summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/net/dsa/b53.txt88
-rw-r--r--Documentation/devicetree/bindings/net/dsa/dsa.txt278
-rw-r--r--Documentation/networking/gen_stats.txt2
-rw-r--r--MAINTAINERS15
-rw-r--r--arch/arm/boot/dts/vf610-zii-dev-rev-b.dts328
-rw-r--r--drivers/net/bonding/bond_main.c22
-rw-r--r--drivers/net/dsa/Kconfig2
-rw-r--r--drivers/net/dsa/Makefile2
-rw-r--r--drivers/net/dsa/b53/Kconfig33
-rw-r--r--drivers/net/dsa/b53/Makefile6
-rw-r--r--drivers/net/dsa/b53/b53_common.c1787
-rw-r--r--drivers/net/dsa/b53/b53_mdio.c392
-rw-r--r--drivers/net/dsa/b53/b53_mmap.c260
-rw-r--r--drivers/net/dsa/b53/b53_priv.h387
-rw-r--r--drivers/net/dsa/b53/b53_regs.h434
-rw-r--r--drivers/net/dsa/b53/b53_spi.c331
-rw-r--r--drivers/net/dsa/b53/b53_srab.c415
-rw-r--r--drivers/net/dsa/bcm_sf2.c701
-rw-r--r--drivers/net/dsa/bcm_sf2.h16
-rw-r--r--drivers/net/dsa/bcm_sf2_regs.h70
-rw-r--r--drivers/net/dsa/mv88e6xxx.c267
-rw-r--r--drivers/net/dsa/mv88e6xxx.h6
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c135
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/request_manager.c5
-rw-r--r--drivers/net/ethernet/cavium/liquidio/response_manager.c3
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h16
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c147
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h14
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c35
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c203
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.c18
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.h5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c247
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c105
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h33
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c250
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h7
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c15
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c10
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c90
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c150
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h13
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c1347
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.h24
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c1623
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.h28
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c542
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev_api.h8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h10947
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.c32
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.h12
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c184
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_ops.c9
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c115
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c27
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c57
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h41
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h26
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c26
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c40
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c506
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.h9
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c95
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.h13
-rw-r--r--drivers/net/ethernet/qlogic/qede/Makefile1
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h3
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_dcbnl.c348
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c26
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig14
-rw-r--r--drivers/net/ethernet/ti/cpsw.c3
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c3
-rw-r--r--drivers/net/fjes/fjes_main.c5
-rw-r--r--drivers/net/hyperv/hyperv_net.h19
-rw-r--r--drivers/net/hyperv/netvsc.c25
-rw-r--r--drivers/net/hyperv/netvsc_drv.c22
-rw-r--r--drivers/net/hyperv/rndis_filter.c159
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c22
-rw-r--r--drivers/net/loopback.c5
-rw-r--r--drivers/net/macvlan.c59
-rw-r--r--drivers/net/ppp/ppp_generic.c3
-rw-r--r--drivers/net/team/team.c19
-rw-r--r--drivers/net/usb/r8152.c2
-rw-r--r--drivers/net/virtio_net.c10
-rw-r--r--drivers/net/vrf.c353
-rw-r--r--drivers/net/wan/Kconfig11
-rw-r--r--drivers/net/wan/Makefile1
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.c1192
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.h147
-rw-r--r--drivers/soc/fsl/qe/Kconfig6
-rw-r--r--drivers/soc/fsl/qe/Makefile1
-rw-r--r--drivers/soc/fsl/qe/qe.c6
-rw-r--r--drivers/soc/fsl/qe/qe_tdm.c276
-rw-r--r--drivers/soc/fsl/qe/ucc.c450
-rw-r--r--drivers/soc/fsl/qe/ucc_fast.c36
-rw-r--r--drivers/vhost/net.c64
-rw-r--r--include/linux/acpi.h13
-rw-r--r--include/linux/ipv6.h7
-rw-r--r--include/linux/netdev_features.h7
-rw-r--r--include/linux/netdevice.h22
-rw-r--r--include/linux/platform_data/b53.h33
-rw-r--r--include/linux/qed/common_hsi.h397
-rw-r--r--include/linux/qed/eth_common.h124
-rw-r--r--include/linux/qed/iscsi_common.h1439
-rw-r--r--include/linux/qed/qed_chain.h556
-rw-r--r--include/linux/qed/qed_eth_if.h63
-rw-r--r--include/linux/qed/qed_if.h158
-rw-r--r--include/linux/qed/rdma_common.h44
-rw-r--r--include/linux/qed/roce_common.h17
-rw-r--r--include/linux/qed/storage_common.h91
-rw-r--r--include/linux/qed/tcp_common.h226
-rw-r--r--include/linux/rxrpc.h18
-rw-r--r--include/linux/skbuff.h8
-rw-r--r--include/net/act_api.h24
-rw-r--r--include/net/dsa.h56
-rw-r--r--include/net/fib_rules.h24
-rw-r--r--include/net/gen_stats.h12
-rw-r--r--include/net/l3mdev.h12
-rw-r--r--include/net/sch_generic.h90
-rw-r--r--include/net/sctp/sctp.h4
-rw-r--r--include/net/sctp/structs.h5
-rw-r--r--include/net/tc_act/tc_defact.h4
-rw-r--r--include/net/udp.h4
-rw-r--r--include/soc/fsl/qe/immap_qe.h5
-rw-r--r--include/soc/fsl/qe/qe.h19
-rw-r--r--include/soc/fsl/qe/qe_tdm.h94
-rw-r--r--include/soc/fsl/qe/ucc.h4
-rw-r--r--include/soc/fsl/qe/ucc_fast.h27
-rw-r--r--include/uapi/linux/fib_rules.h1
-rw-r--r--include/uapi/linux/pkt_cls.h7
-rw-r--r--include/uapi/linux/virtio_net.h3
-rw-r--r--net/bluetooth/6lowpan.c13
-rw-r--r--net/core/dev.c32
-rw-r--r--net/core/ethtool.c1
-rw-r--r--net/core/fib_rules.c33
-rw-r--r--net/core/gen_estimator.c24
-rw-r--r--net/core/gen_stats.c35
-rw-r--r--net/core/skbuff.c46
-rw-r--r--net/dsa/Makefile2
-rw-r--r--net/dsa/dsa.c253
-rw-r--r--net/dsa/dsa2.c690
-rw-r--r--net/dsa/dsa_priv.h9
-rw-r--r--net/dsa/slave.c80
-rw-r--r--net/dsa/tag_brcm.c4
-rw-r--r--net/dsa/tag_dsa.c10
-rw-r--r--net/dsa/tag_edsa.c10
-rw-r--r--net/dsa/tag_trailer.c4
-rw-r--r--net/ieee802154/6lowpan/core.c15
-rw-r--r--net/ipv4/fib_rules.c6
-rw-r--r--net/ipv4/fou.c81
-rw-r--r--net/ipv4/inet_fragment.c2
-rw-r--r--net/ipv4/ip_forward.c2
-rw-r--r--net/ipv4/ip_output.c2
-rw-r--r--net/ipv4/tcp_input.c26
-rw-r--r--net/ipv6/af_inet6.c6
-rw-r--r--net/ipv6/fib6_rules.c6
-rw-r--r--net/ipv6/ila/ila.h3
-rw-r--r--net/ipv6/ila/ila_common.c6
-rw-r--r--net/ipv6/ila/ila_lwt.c4
-rw-r--r--net/ipv6/ila/ila_xlat.c8
-rw-r--r--net/ipv6/ip6_input.c1
-rw-r--r--net/ipv6/ip6_output.c2
-rw-r--r--net/ipv6/sit.c3
-rw-r--r--net/l2tp/l2tp_eth.c4
-rw-r--r--net/l3mdev/l3mdev.c38
-rw-r--r--net/mpls/af_mpls.c2
-rw-r--r--net/netfilter/xt_RATEEST.c2
-rw-r--r--net/netlink/af_netlink.h14
-rw-r--r--net/openvswitch/vport-internal_dev.c2
-rw-r--r--net/rxrpc/af_rxrpc.c195
-rw-r--r--net/rxrpc/ar-accept.c2
-rw-r--r--net/rxrpc/ar-ack.c2
-rw-r--r--net/rxrpc/ar-call.c170
-rw-r--r--net/rxrpc/ar-connection.c19
-rw-r--r--net/rxrpc/ar-connevent.c2
-rw-r--r--net/rxrpc/ar-input.c2
-rw-r--r--net/rxrpc/ar-internal.h52
-rw-r--r--net/rxrpc/ar-key.c4
-rw-r--r--net/rxrpc/ar-local.c2
-rw-r--r--net/rxrpc/ar-output.c188
-rw-r--r--net/rxrpc/ar-peer.c2
-rw-r--r--net/rxrpc/ar-recvmsg.c4
-rw-r--r--net/rxrpc/ar-skbuff.c2
-rw-r--r--net/rxrpc/ar-transport.c2
-rw-r--r--net/rxrpc/rxkad.c2
-rw-r--r--net/sched/act_api.c24
-rw-r--r--net/sched/act_bpf.c8
-rw-r--r--net/sched/act_connmark.c6
-rw-r--r--net/sched/act_csum.c7
-rw-r--r--net/sched/act_gact.c7
-rw-r--r--net/sched/act_ife.c10
-rw-r--r--net/sched/act_ipt.c14
-rw-r--r--net/sched/act_mirred.c6
-rw-r--r--net/sched/act_nat.c7
-rw-r--r--net/sched/act_pedit.c8
-rw-r--r--net/sched/act_police.c4
-rw-r--r--net/sched/act_simple.c7
-rw-r--r--net/sched/act_skbedit.c7
-rw-r--r--net/sched/act_vlan.c9
-rw-r--r--net/sched/cls_api.c11
-rw-r--r--net/sched/cls_flower.c31
-rw-r--r--net/sched/sch_api.c21
-rw-r--r--net/sched/sch_atm.c18
-rw-r--r--net/sched/sch_cbq.c294
-rw-r--r--net/sched/sch_choke.c17
-rw-r--r--net/sched/sch_drr.c31
-rw-r--r--net/sched/sch_dsmark.c18
-rw-r--r--net/sched/sch_fifo.c7
-rw-r--r--net/sched/sch_fq_codel.c25
-rw-r--r--net/sched/sch_generic.c10
-rw-r--r--net/sched/sch_gred.c35
-rw-r--r--net/sched/sch_hfsc.c37
-rw-r--r--net/sched/sch_hhf.c10
-rw-r--r--net/sched/sch_htb.c37
-rw-r--r--net/sched/sch_mq.c2
-rw-r--r--net/sched/sch_mqprio.c11
-rw-r--r--net/sched/sch_multiq.c25
-rw-r--r--net/sched/sch_netem.c34
-rw-r--r--net/sched/sch_plug.c2
-rw-r--r--net/sched/sch_prio.c23
-rw-r--r--net/sched/sch_qfq.c56
-rw-r--r--net/sched/sch_red.c21
-rw-r--r--net/sched/sch_sfq.c1
-rw-r--r--net/sched/sch_tbf.c18
-rw-r--r--net/sctp/Makefile3
-rw-r--r--net/sctp/input.c57
-rw-r--r--net/sctp/inqueue.c78
-rw-r--r--net/sctp/offload.c98
-rw-r--r--net/sctp/output.c366
-rw-r--r--net/sctp/protocol.c3
-rw-r--r--net/sctp/socket.c2
-rw-r--r--net/tipc/link.c2
-rw-r--r--net/tipc/node.c22
246 files changed, 25661 insertions, 8060 deletions
diff --git a/Documentation/devicetree/bindings/net/dsa/b53.txt b/Documentation/devicetree/bindings/net/dsa/b53.txt
new file mode 100644
index 0000000..ca752db
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/dsa/b53.txt
@@ -0,0 +1,88 @@
+Broadcom BCM53xx Ethernet switches
+==================================
+
+Required properties:
+
+- compatible: For external switch chips, compatible string must be exactly one
+ of: "brcm,bcm5325"
+ "brcm,bcm53115"
+ "brcm,bcm53125"
+ "brcm,bcm53128"
+ "brcm,bcm5365"
+ "brcm,bcm5395"
+ "brcm,bcm5397"
+ "brcm,bcm5398"
+
+ For the BCM5310x SoCs with an integrated switch, must be one of:
+ "brcm,bcm53010-srab"
+ "brcm,bcm53011-srab"
+ "brcm,bcm53012-srab"
+ "brcm,bcm53018-srab"
+ "brcm,bcm53019-srab" and the mandatory "brcm,bcm5301x-srab" string
+
+ For the BCM63xx/33xx SoCs with an integrated switch, must be one of:
+ "brcm,bcm3384-switch"
+ "brcm,bcm6328-switch"
+ "brcm,bcm6368-switch" and the mandatory "brcm,bcm63xx-switch"
+
+See Documentation/devicetree/bindings/dsa/dsa.txt for a list of additional
+required and optional properties.
+
+Examples:
+
+Ethernet switch connected via MDIO to the host, CPU port wired to eth0:
+
+ eth0: ethernet@10001000 {
+ compatible = "brcm,unimac";
+ reg = <0x10001000 0x1000>;
+
+ fixed-link {
+ speed = <1000>;
+ duplex-full;
+ };
+ };
+
+ mdio0: mdio@10000000 {
+ compatible = "brcm,unimac-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ switch0: ethernet-switch@30 {
+ compatible = "brcm,bcm53125";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ports {
+ port0@0 {
+ reg = <0>;
+ label = "lan1";
+ };
+
+ port1@1 {
+ reg = <1>;
+ label = "lan2";
+ };
+
+ port5@5 {
+ reg = <5>;
+ label = "cable-modem";
+ fixed-link {
+ speed = <1000>;
+ duplex-full;
+ };
+ phy-mode = "rgmii-txid";
+ };
+
+ port8@8 {
+ reg = <8>;
+ label = "cpu";
+ fixed-link {
+ speed = <1000>;
+ duplex-full;
+ };
+ phy-mode = "rgmii-txid";
+ ethernet = <&eth0>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/dsa/dsa.txt b/Documentation/devicetree/bindings/net/dsa/dsa.txt
index 9f4807f..9bbbe7f 100644
--- a/Documentation/devicetree/bindings/net/dsa/dsa.txt
+++ b/Documentation/devicetree/bindings/net/dsa/dsa.txt
@@ -1,5 +1,279 @@
-Marvell Distributed Switch Architecture Device Tree Bindings
-------------------------------------------------------------
+Distributed Switch Architecture Device Tree Bindings
+----------------------------------------------------
+
+Two bindings exist, one of which has been deprecated due to
+limitations.
+
+Current Binding
+---------------
+
+Switches are true Linux devices and can be probes by any means. Once
+probed, they register to the DSA framework, passing a node
+pointer. This node is expected to fulfil the following binding, and
+may contain additional properties as required by the device it is
+embedded within.
+
+Required properties:
+
+- ports : A container for child nodes representing switch ports.
+
+Optional properties:
+
+- dsa,member : A two element list indicates which DSA cluster, and position
+ within the cluster a switch takes. <0 0> is cluster 0,
+ switch 0. <0 1> is cluster 0, switch 1. <1 0> is cluster 1,
+ switch 0. A switch not part of any cluster (single device
+ hanging off a CPU port) must not specify this property
+
+The ports container has the following properties
+
+Required properties:
+
+- #address-cells : Must be 1
+- #size-cells : Must be 0
+
+Each port children node must have the following mandatory properties:
+- reg : Describes the port address in the switch
+- label : Describes the label associated with this port, which
+ will become the netdev name. Special labels are
+ "cpu" to indicate a CPU port and "dsa" to
+ indicate an uplink/downlink port between switches in
+ the cluster.
+
+A port labelled "dsa" has the following mandatory property:
+
+- link : Should be a list of phandles to other switch's DSA
+ port. This port is used as the outgoing port
+ towards the phandle ports. The full routing
+ information must be given, not just the one hop
+ routes to neighbouring switches.
+
+A port labelled "cpu" has the following mandatory property:
+
+- ethernet : Should be a phandle to a valid Ethernet device node.
+ This host device is what the switch port is
+ connected to.
+
+Port child nodes may also contain the following optional standardised
+properties, described in binding documents:
+
+- phy-handle : Phandle to a PHY on an MDIO bus. See
+ Documentation/devicetree/bindings/net/ethernet.txt
+ for details.
+
+- phy-mode : See
+ Documentation/devicetree/bindings/net/ethernet.txt
+ for details.
+
+- fixed-link : Fixed-link subnode describing a link to a non-MDIO
+ managed entity. See
+ Documentation/devicetree/bindings/net/fixed-link.txt
+ for details.
+
+Example
+
+The following example shows three switches on three MDIO busses,
+linked into one DSA cluster.
+
+&mdio1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ switch0: switch0@0 {
+ compatible = "marvell,mv88e6085";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+
+ dsa,member = <0 0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ label = "lan0";
+ };
+
+ port@1 {
+ reg = <1>;
+ label = "lan1";
+ };
+
+ port@2 {
+ reg = <2>;
+ label = "lan2";
+ };
+
+ switch0port5: port@5 {
+ reg = <5>;
+ label = "dsa";
+ phy-mode = "rgmii-txid";
+ link = <&switch1port6
+ &switch2port9>;
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+
+ port@6 {
+ reg = <6>;
+ label = "cpu";
+ ethernet = <&fec1>;
+ fixed-link {
+ speed = <100>;
+ full-duplex;
+ };
+ };
+ };
+ };
+};
+
+&mdio2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ switch1: switch1@0 {
+ compatible = "marvell,mv88e6085";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+
+ dsa,member = <0 1>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ label = "lan3";
+ phy-handle = <&switch1phy0>;
+ };
+
+ port@1 {
+ reg = <1>;
+ label = "lan4";
+ phy-handle = <&switch1phy1>;
+ };
+
+ port@2 {
+ reg = <2>;
+ label = "lan5";
+ phy-handle = <&switch1phy2>;
+ };
+
+ switch1port5: port@5 {
+ reg = <5>;
+ label = "dsa";
+ link = <&switch2port9>;
+ phy-mode = "rgmii-txid";
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+
+ switch1port6: port@6 {
+ reg = <6>;
+ label = "dsa";
+ phy-mode = "rgmii-txid";
+ link = <&switch0port5>;
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+ };
+ mdio-bus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ switch1phy0: switch1phy0@0 {
+ reg = <0>;
+ };
+ switch1phy1: switch1phy0@1 {
+ reg = <1>;
+ };
+ switch1phy2: switch1phy0@2 {
+ reg = <2>;
+ };
+ };
+ };
+};
+
+&mdio4 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ switch2: switch2@0 {
+ compatible = "marvell,mv88e6085";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+
+ dsa,member = <0 2>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ label = "lan6";
+ };
+
+ port@1 {
+ reg = <1>;
+ label = "lan7";
+ };
+
+ port@2 {
+ reg = <2>;
+ label = "lan8";
+ };
+
+ port@3 {
+ reg = <3>;
+ label = "optical3";
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ link-gpios = <&gpio6 2
+ GPIO_ACTIVE_HIGH>;
+ };
+ };
+
+ port@4 {
+ reg = <4>;
+ label = "optical4";
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ link-gpios = <&gpio6 3
+ GPIO_ACTIVE_HIGH>;
+ };
+ };
+
+ switch2port9: port@9 {
+ reg = <9>;
+ label = "dsa";
+ phy-mode = "rgmii-txid";
+ link = <&switch1port5
+ &switch0port5>;
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+ };
+ };
+};
+
+Deprecated Binding
+------------------
+
+The deprecated binding makes use of a platform device to represent the
+switches. The switches themselves are not Linux devices, and make use
+of an MDIO bus for management.
Required properties:
- compatible : Should be "marvell,dsa"
diff --git a/Documentation/networking/gen_stats.txt b/Documentation/networking/gen_stats.txt
index ff630a8..179b18c 100644
--- a/Documentation/networking/gen_stats.txt
+++ b/Documentation/networking/gen_stats.txt
@@ -21,7 +21,7 @@ struct mystruct {
...
};
-Update statistics:
+Update statistics, in dequeue() methods only, (while owning qdisc->running)
mystruct->tstats.packet++;
mystruct->qstats.backlog += skb->pkt_len;
diff --git a/MAINTAINERS b/MAINTAINERS
index 2ebe195..0e26025 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2454,6 +2454,14 @@ L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/broadcom/b44.*
+BROADCOM B53 ETHERNET SWITCH DRIVER
+M: Florian Fainelli <f.fainelli@gmail.com>
+L: netdev@vger.kernel.org
+L: openwrt-devel@lists.openwrt.org (subscribers-only)
+S: Supported
+F: drivers/net/dsa/b53/*
+F: include/linux/platform_data/b53.h
+
BROADCOM GENET ETHERNET DRIVER
M: Florian Fainelli <f.fainelli@gmail.com>
L: netdev@vger.kernel.org
@@ -4871,6 +4879,13 @@ F: drivers/net/ethernet/freescale/gianfar*
X: drivers/net/ethernet/freescale/gianfar_ptp.c
F: Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
+FREESCALE QUICC ENGINE UCC HDLC DRIVER
+M: Zhao Qiang <qiang.zhao@nxp.com>
+L: netdev@vger.kernel.org
+L: linuxppc-dev@lists.ozlabs.org
+S: Maintained
+F: drivers/net/wan/fsl_ucc_hdlc*
+
FREESCALE QUICC ENGINE UCC UART DRIVER
M: Timur Tabi <timur@tabi.org>
L: linuxppc-dev@lists.ozlabs.org
diff --git a/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts b/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts
index 6c60b7f..5c1fcab 100644
--- a/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts
+++ b/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts
@@ -85,187 +85,199 @@
reg = <1>;
#address-cells = <1>;
#size-cells = <0>;
+
+ switch0: switch0@0 {
+ compatible = "marvell,mv88e6085";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ dsa,member = <0 0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ label = "lan0";
+ };
+
+ port@1 {
+ reg = <1>;
+ label = "lan1";
+ };
+
+ port@2 {
+ reg = <2>;
+ label = "lan2";
+ };
+
+ switch0port5: port@5 {
+ reg = <5>;
+ label = "dsa";
+ phy-mode = "rgmii-txid";
+ link = <&switch1port6
+ &switch2port9>;
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+
+ port@6 {
+ reg = <6>;
+ label = "cpu";
+ ethernet = <&fec1>;
+ fixed-link {
+ speed = <100>;
+ full-duplex;
+ };
+ };
+ };
+ };
};
mdio_mux_2: mdio@2 {
reg = <2>;
#address-cells = <1>;
#size-cells = <0>;
- };
-
- mdio_mux_4: mdio@4 {
- reg = <4>;
- #address-cells = <1>;
- #size-cells = <0>;
- };
-
- mdio_mux_8: mdio@8 {
- reg = <8>;
- #address-cells = <1>;
- #size-cells = <0>;
- };
- };
-
- dsa {
- compatible = "marvell,dsa";
- #address-cells = <2>;
- #size-cells = <0>;
- dsa,ethernet = <&fec1>;
- dsa,mii-bus = <&mdio_mux_1>;
-
- /* 6352 - Primary - 7 ports */
- switch0: switch@0-0 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x00 0>;
- eeprom-length = <512>;
- port@0 {
+ switch1: switch1@0 {
+ compatible = "marvell,mv88e6085";
+ #address-cells = <1>;
+ #size-cells = <0>;
reg = <0>;
- label = "lan0";
- };
-
- port@1 {
- reg = <1>;
- label = "lan1";
- };
-
- port@2 {
- reg = <2>;
- label = "lan2";
- };
-
- switch0port5: port@5 {
- reg = <5>;
- label = "dsa";
- phy-mode = "rgmii-txid";
- link = <&switch1port6
- &switch2port9>;
-
- fixed-link {
- speed = <1000>;
- full-duplex;
+ dsa,member = <0 1>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ label = "lan3";
+ phy-handle = <&switch1phy0>;
+ };
+
+ port@1 {
+ reg = <1>;
+ label = "lan4";
+ phy-handle = <&switch1phy1>;
+ };
+
+ port@2 {
+ reg = <2>;
+ label = "lan5";
+ phy-handle = <&switch1phy2>;
+ };
+
+ switch1port5: port@5 {
+ reg = <5>;
+ label = "dsa";
+ link = <&switch2port9>;
+ phy-mode = "rgmii-txid";
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+
+ switch1port6: port@6 {
+ reg = <6>;
+ label = "dsa";
+ phy-mode = "rgmii-txid";
+ link = <&switch0port5>;
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
};
- };
-
- port@6 {
- reg = <6>;
- label = "cpu";
-
- fixed-link {
- speed = <100>;
- full-duplex;
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ switch1phy0: switch1phy0@0 {
+ reg = <0>;
+ };
+ switch1phy1: switch1phy0@1 {
+ reg = <1>;
+ };
+ switch1phy2: switch1phy0@2 {
+ reg = <2>;
+ };
};
};
-
};
- /* 6352 - Secondary - 7 ports */
- switch1: switch@0-1 {
+ mdio_mux_4: mdio@4 {
#address-cells = <1>;
#size-cells = <0>;
- reg = <0x00 1>;
- eeprom-length = <512>;
- mii-bus = <&mdio_mux_2>;
+ reg = <4>;
- port@0 {
+ switch2: switch2@0 {
+ compatible = "marvell,mv88e6085";
+ #address-cells = <1>;
+ #size-cells = <0>;
reg = <0>;
- label = "lan3";
- };
-
- port@1 {
- reg = <1>;
- label = "lan4";
- };
-
- port@2 {
- reg = <2>;
- label = "lan5";
- };
-
- switch1port5: port@5 {
- reg = <5>;
- label = "dsa";
- link = <&switch2port9>;
- phy-mode = "rgmii-txid";
-
- fixed-link {
- speed = <1000>;
- full-duplex;
- };
- };
-
- switch1port6: port@6 {
- reg = <6>;
- label = "dsa";
- phy-mode = "rgmii-txid";
- link = <&switch0port5>;
-
- fixed-link {
- speed = <1000>;
- full-duplex;
+ dsa,member = <0 2>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ label = "lan6";
+ };
+
+ port@1 {
+ reg = <1>;
+ label = "lan7";
+ };
+
+ port@2 {
+ reg = <2>;
+ label = "lan8";
+ };
+
+ port@3 {
+ reg = <3>;
+ label = "optical3";
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ link-gpios = <&gpio6 2
+ GPIO_ACTIVE_HIGH>;
+ };
+ };
+
+ port@4 {
+ reg = <4>;
+ label = "optical4";
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ link-gpios = <&gpio6 3
+ GPIO_ACTIVE_HIGH>;
+ };
+ };
+
+ switch2port9: port@9 {
+ reg = <9>;
+ label = "dsa";
+ phy-mode = "rgmii-txid";
+ link = <&switch1port5
+ &switch0port5>;
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
};
};
};
- /* 6185 - 10 ports */
- switch2: switch@0-2 {
+ mdio_mux_8: mdio@8 {
+ reg = <8>;
#address-cells = <1>;
#size-cells = <0>;
- reg = <0x00 2>;
- mii-bus = <&mdio_mux_4>;
-
- port@0 {
- reg = <0>;
- label = "lan6";
- };
-
- port@1 {
- reg = <1>;
- label = "lan7";
- };
-
- port@2 {
- reg = <2>;
- label = "lan8";
- };
-
- port@3 {
- reg = <3>;
- label = "optical3";
-
- fixed-link {
- speed = <1000>;
- full-duplex;
- link-gpios = <&gpio6 2
- GPIO_ACTIVE_HIGH>;
- };
- };
-
- port@4 {
- reg = <4>;
- label = "optical4";
-
- fixed-link {
- speed = <1000>;
- full-duplex;
- link-gpios = <&gpio6 3
- GPIO_ACTIVE_HIGH>;
- };
- };
-
- switch2port9: port@9 {
- reg = <9>;
- label = "dsa";
- phy-mode = "rgmii-txid";
- link = <&switch1port5
- &switch0port5>;
-
- fixed-link {
- speed = <1000>;
- full-duplex;
- };
- };
};
};
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 941ec99..90157e2 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -4607,26 +4607,6 @@ static int bond_check_params(struct bond_params *params)
return 0;
}
-static struct lock_class_key bonding_netdev_xmit_lock_key;
-static struct lock_class_key bonding_netdev_addr_lock_key;
-static struct lock_class_key bonding_tx_busylock_key;
-
-static void bond_set_lockdep_class_one(struct net_device *dev,
- struct netdev_queue *txq,
- void *_unused)
-{
- lockdep_set_class(&txq->_xmit_lock,
- &bonding_netdev_xmit_lock_key);
-}
-
-static void bond_set_lockdep_class(struct net_device *dev)
-{
- lockdep_set_class(&dev->addr_list_lock,
- &bonding_netdev_addr_lock_key);
- netdev_for_each_tx_queue(dev, bond_set_lockdep_class_one, NULL);
- dev->qdisc_tx_busylock = &bonding_tx_busylock_key;
-}
-
/* Called from registration process */
static int bond_init(struct net_device *bond_dev)
{
@@ -4639,7 +4619,7 @@ static int bond_init(struct net_device *bond_dev)
if (!bond->wq)
return -ENOMEM;
- bond_set_lockdep_class(bond_dev);
+ netdev_lockdep_set_classes(bond_dev);
list_add_tail(&bond->bond_list, &bn->dev_list);
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index 200663c..be481e1 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -28,4 +28,6 @@ config NET_DSA_BCM_SF2
This enables support for the Broadcom Starfighter 2 Ethernet
switch chips.
+source "drivers/net/dsa/b53/Kconfig"
+
endmenu
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
index 76b751d..97bc70a 100644
--- a/drivers/net/dsa/Makefile
+++ b/drivers/net/dsa/Makefile
@@ -1,3 +1,5 @@
obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o
obj-$(CONFIG_NET_DSA_MV88E6XXX) += mv88e6xxx.o
obj-$(CONFIG_NET_DSA_BCM_SF2) += bcm_sf2.o
+
+obj-y += b53/
diff --git a/drivers/net/dsa/b53/Kconfig b/drivers/net/dsa/b53/Kconfig
new file mode 100644
index 0000000..27f32a5
--- /dev/null
+++ b/drivers/net/dsa/b53/Kconfig
@@ -0,0 +1,33 @@
+menuconfig B53
+ tristate "Broadcom BCM53xx managed switch support"
+ depends on NET_DSA
+ help
+ This driver adds support for Broadcom managed switch chips. It supports
+ BCM5325E, BCM5365, BCM539x, BCM53115 and BCM53125 as well as BCM63XX
+ integrated switches.
+
+config B53_SPI_DRIVER
+ tristate "B53 SPI connected switch driver"
+ depends on B53 && SPI
+ help
+ Select to enable support for registering switches configured through SPI.
+
+config B53_MDIO_DRIVER
+ tristate "B53 MDIO connected switch driver"
+ depends on B53
+ help
+ Select to enable support for registering switches configured through MDIO.
+
+config B53_MMAP_DRIVER
+ tristate "B53 MMAP connected switch driver"
+ depends on B53 && HAS_IOMEM
+ help
+ Select to enable support for memory-mapped switches like the BCM63XX
+ integrated switches.
+
+config B53_SRAB_DRIVER
+ tristate "B53 SRAB connected switch driver"
+ depends on B53 && HAS_IOMEM
+ help
+ Select to enable support for memory-mapped Switch Register Access
+ Bridge Registers (SRAB) like it is found on the BCM53010
diff --git a/drivers/net/dsa/b53/Makefile b/drivers/net/dsa/b53/Makefile
new file mode 100644
index 0000000..7e6f9a8
--- /dev/null
+++ b/drivers/net/dsa/b53/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_B53) += b53_common.o
+
+obj-$(CONFIG_B53_SPI_DRIVER) += b53_spi.o
+obj-$(CONFIG_B53_MDIO_DRIVER) += b53_mdio.o
+obj-$(CONFIG_B53_MMAP_DRIVER) += b53_mmap.o
+obj-$(CONFIG_B53_SRAB_DRIVER) += b53_srab.o
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
new file mode 100644
index 0000000..5321083
--- /dev/null
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -0,0 +1,1787 @@
+/*
+ * B53 switch driver main logic
+ *
+ * Copyright (C) 2011-2013 Jonas Gorski <jogo@openwrt.org>
+ * Copyright (C) 2016 Florian Fainelli <f.fainelli@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/gpio.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_data/b53.h>
+#include <linux/phy.h>
+#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
+#include <net/dsa.h>
+#include <net/switchdev.h>
+
+#include "b53_regs.h"
+#include "b53_priv.h"
+
+struct b53_mib_desc {
+ u8 size;
+ u8 offset;
+ const char *name;
+};
+
+/* BCM5365 MIB counters */
+static const struct b53_mib_desc b53_mibs_65[] = {
+ { 8, 0x00, "TxOctets" },
+ { 4, 0x08, "TxDropPkts" },
+ { 4, 0x10, "TxBroadcastPkts" },
+ { 4, 0x14, "TxMulticastPkts" },
+ { 4, 0x18, "TxUnicastPkts" },
+ { 4, 0x1c, "TxCollisions" },
+ { 4, 0x20, "TxSingleCollision" },
+ { 4, 0x24, "TxMultipleCollision" },
+ { 4, 0x28, "TxDeferredTransmit" },
+ { 4, 0x2c, "TxLateCollision" },
+ { 4, 0x30, "TxExcessiveCollision" },
+ { 4, 0x38, "TxPausePkts" },
+ { 8, 0x44, "RxOctets" },
+ { 4, 0x4c, "RxUndersizePkts" },
+ { 4, 0x50, "RxPausePkts" },
+ { 4, 0x54, "Pkts64Octets" },
+ { 4, 0x58, "Pkts65to127Octets" },
+ { 4, 0x5c, "Pkts128to255Octets" },
+ { 4, 0x60, "Pkts256to511Octets" },
+ { 4, 0x64, "Pkts512to1023Octets" },
+ { 4, 0x68, "Pkts1024to1522Octets" },
+ { 4, 0x6c, "RxOversizePkts" },
+ { 4, 0x70, "RxJabbers" },
+ { 4, 0x74, "RxAlignmentErrors" },
+ { 4, 0x78, "RxFCSErrors" },
+ { 8, 0x7c, "RxGoodOctets" },
+ { 4, 0x84, "RxDropPkts" },
+ { 4, 0x88, "RxUnicastPkts" },
+ { 4, 0x8c, "RxMulticastPkts" },
+ { 4, 0x90, "RxBroadcastPkts" },
+ { 4, 0x94, "RxSAChanges" },
+ { 4, 0x98, "RxFragments" },
+};
+
+#define B53_MIBS_65_SIZE ARRAY_SIZE(b53_mibs_65)
+
+/* BCM63xx MIB counters */
+static const struct b53_mib_desc b53_mibs_63xx[] = {
+ { 8, 0x00, "TxOctets" },
+ { 4, 0x08, "TxDropPkts" },
+ { 4, 0x0c, "TxQoSPkts" },
+ { 4, 0x10, "TxBroadcastPkts" },
+ { 4, 0x14, "TxMulticastPkts" },
+ { 4, 0x18, "TxUnicastPkts" },
+ { 4, 0x1c, "TxCollisions" },
+ { 4, 0x20, "TxSingleCollision" },
+ { 4, 0x24, "TxMultipleCollision" },
+ { 4, 0x28, "TxDeferredTransmit" },
+ { 4, 0x2c, "TxLateCollision" },
+ { 4, 0x30, "TxExcessiveCollision" },
+ { 4, 0x38, "TxPausePkts" },
+ { 8, 0x3c, "TxQoSOctets" },
+ { 8, 0x44, "RxOctets" },
+ { 4, 0x4c, "RxUndersizePkts" },
+ { 4, 0x50, "RxPausePkts" },
+ { 4, 0x54, "Pkts64Octets" },
+ { 4, 0x58, "Pkts65to127Octets" },
+ { 4, 0x5c, "Pkts128to255Octets" },
+ { 4, 0x60, "Pkts256to511Octets" },
+ { 4, 0x64, "Pkts512to1023Octets" },
+ { 4, 0x68, "Pkts1024to1522Octets" },
+ { 4, 0x6c, "RxOversizePkts" },
+ { 4, 0x70, "RxJabbers" },
+ { 4, 0x74, "RxAlignmentErrors" },
+ { 4, 0x78, "RxFCSErrors" },
+ { 8, 0x7c, "RxGoodOctets" },
+ { 4, 0x84, "RxDropPkts" },
+ { 4, 0x88, "RxUnicastPkts" },
+ { 4, 0x8c, "RxMulticastPkts" },
+ { 4, 0x90, "RxBroadcastPkts" },
+ { 4, 0x94, "RxSAChanges" },
+ { 4, 0x98, "RxFragments" },
+ { 4, 0xa0, "RxSymbolErrors" },
+ { 4, 0xa4, "RxQoSPkts" },
+ { 8, 0xa8, "RxQoSOctets" },
+ { 4, 0xb0, "Pkts1523to2047Octets" },
+ { 4, 0xb4, "Pkts2048to4095Octets" },
+ { 4, 0xb8, "Pkts4096to8191Octets" },
+ { 4, 0xbc, "Pkts8192to9728Octets" },
+ { 4, 0xc0, "RxDiscarded" },
+};
+
+#define B53_MIBS_63XX_SIZE ARRAY_SIZE(b53_mibs_63xx)
+
+/* MIB counters */
+static const struct b53_mib_desc b53_mibs[] = {
+ { 8, 0x00, "TxOctets" },
+ { 4, 0x08, "TxDropPkts" },
+ { 4, 0x10, "TxBroadcastPkts" },
+ { 4, 0x14, "TxMulticastPkts" },
+ { 4, 0x18, "TxUnicastPkts" },
+ { 4, 0x1c, "TxCollisions" },
+ { 4, 0x20, "TxSingleCollision" },
+ { 4, 0x24, "TxMultipleCollision" },
+ { 4, 0x28, "TxDeferredTransmit" },
+ { 4, 0x2c, "TxLateCollision" },
+ { 4, 0x30, "TxExcessiveCollision" },
+ { 4, 0x38, "TxPausePkts" },
+ { 8, 0x50, "RxOctets" },
+ { 4, 0x58, "RxUndersizePkts" },
+ { 4, 0x5c, "RxPausePkts" },
+ { 4, 0x60, "Pkts64Octets" },
+ { 4, 0x64, "Pkts65to127Octets" },
+ { 4, 0x68, "Pkts128to255Octets" },
+ { 4, 0x6c, "Pkts256to511Octets" },
+ { 4, 0x70, "Pkts512to1023Octets" },
+ { 4, 0x74, "Pkts1024to1522Octets" },
+ { 4, 0x78, "RxOversizePkts" },
+ { 4, 0x7c, "RxJabbers" },
+ { 4, 0x80, "RxAlignmentErrors" },
+ { 4, 0x84, "RxFCSErrors" },
+ { 8, 0x88, "RxGoodOctets" },
+ { 4, 0x90, "RxDropPkts" },
+ { 4, 0x94, "RxUnicastPkts" },
+ { 4, 0x98, "RxMulticastPkts" },
+ { 4, 0x9c, "RxBroadcastPkts" },
+ { 4, 0xa0, "RxSAChanges" },
+ { 4, 0xa4, "RxFragments" },
+ { 4, 0xa8, "RxJumboPkts" },
+ { 4, 0xac, "RxSymbolErrors" },
+ { 4, 0xc0, "RxDiscarded" },
+};
+
+#define B53_MIBS_SIZE ARRAY_SIZE(b53_mibs)
+
+static int b53_do_vlan_op(struct b53_device *dev, u8 op)
+{
+ unsigned int i;
+
+ b53_write8(dev, B53_ARLIO_PAGE, dev->vta_regs[0], VTA_START_CMD | op);
+
+ for (i = 0; i < 10; i++) {
+ u8 vta;
+
+ b53_read8(dev, B53_ARLIO_PAGE, dev->vta_regs[0], &vta);
+ if (!(vta & VTA_START_CMD))
+ return 0;
+
+ usleep_range(100, 200);
+ }
+
+ return -EIO;
+}
+
+static void b53_set_vlan_entry(struct b53_device *dev, u16 vid,
+ struct b53_vlan *vlan)
+{
+ if (is5325(dev)) {
+ u32 entry = 0;
+
+ if (vlan->members) {
+ entry = ((vlan->untag & VA_UNTAG_MASK_25) <<
+ VA_UNTAG_S_25) | vlan->members;
+ if (dev->core_rev >= 3)
+ entry |= VA_VALID_25_R4 | vid << VA_VID_HIGH_S;
+ else
+ entry |= VA_VALID_25;
+ }
+
+ b53_write32(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_25, entry);
+ b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, vid |
+ VTA_RW_STATE_WR | VTA_RW_OP_EN);
+ } else if (is5365(dev)) {
+ u16 entry = 0;
+
+ if (vlan->members)
+ entry = ((vlan->untag & VA_UNTAG_MASK_65) <<
+ VA_UNTAG_S_65) | vlan->members | VA_VALID_65;
+
+ b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_65, entry);
+ b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_65, vid |
+ VTA_RW_STATE_WR | VTA_RW_OP_EN);
+ } else {
+ b53_write16(dev, B53_ARLIO_PAGE, dev->vta_regs[1], vid);
+ b53_write32(dev, B53_ARLIO_PAGE, dev->vta_regs[2],
+ (vlan->untag << VTE_UNTAG_S) | vlan->members);
+
+ b53_do_vlan_op(dev, VTA_CMD_WRITE);
+ }
+
+ dev_dbg(dev->ds->dev, "VID: %d, members: 0x%04x, untag: 0x%04x\n",
+ vid, vlan->members, vlan->untag);
+}
+
+static void b53_get_vlan_entry(struct b53_device *dev, u16 vid,
+ struct b53_vlan *vlan)
+{
+ if (is5325(dev)) {
+ u32 entry = 0;
+
+ b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, vid |
+ VTA_RW_STATE_RD | VTA_RW_OP_EN);
+ b53_read32(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_25, &entry);
+
+ if (dev->core_rev >= 3)
+ vlan->valid = !!(entry & VA_VALID_25_R4);
+ else
+ vlan->valid = !!(entry & VA_VALID_25);
+ vlan->members = entry & VA_MEMBER_MASK;
+ vlan->untag = (entry >> VA_UNTAG_S_25) & VA_UNTAG_MASK_25;
+
+ } else if (is5365(dev)) {
+ u16 entry = 0;
+
+ b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_65, vid |
+ VTA_RW_STATE_WR | VTA_RW_OP_EN);
+ b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_65, &entry);
+
+ vlan->valid = !!(entry & VA_VALID_65);
+ vlan->members = entry & VA_MEMBER_MASK;
+ vlan->untag = (entry >> VA_UNTAG_S_65) & VA_UNTAG_MASK_65;
+ } else {
+ u32 entry = 0;
+
+ b53_write16(dev, B53_ARLIO_PAGE, dev->vta_regs[1], vid);
+ b53_do_vlan_op(dev, VTA_CMD_READ);
+ b53_read32(dev, B53_ARLIO_PAGE, dev->vta_regs[2], &entry);
+ vlan->members = entry & VTE_MEMBERS;
+ vlan->untag = (entry >> VTE_UNTAG_S) & VTE_MEMBERS;
+ vlan->valid = true;
+ }
+}
+
+static void b53_set_forwarding(struct b53_device *dev, int enable)
+{
+ u8 mgmt;
+
+ b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
+
+ if (enable)
+ mgmt |= SM_SW_FWD_EN;
+ else
+ mgmt &= ~SM_SW_FWD_EN;
+
+ b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
+}
+
+static void b53_enable_vlan(struct b53_device *dev, bool enable)
+{
+ u8 mgmt, vc0, vc1, vc4 = 0, vc5;
+
+ b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
+ b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL0, &vc0);
+ b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL1, &vc1);
+
+ if (is5325(dev) || is5365(dev)) {
+ b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, &vc4);
+ b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_25, &vc5);
+ } else if (is63xx(dev)) {
+ b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_63XX, &vc4);
+ b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_63XX, &vc5);
+ } else {
+ b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4, &vc4);
+ b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, &vc5);
+ }
+
+ mgmt &= ~SM_SW_FWD_MODE;
+
+ if (enable) {
+ vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID;
+ vc1 |= VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN;
+ vc4 &= ~VC4_ING_VID_CHECK_MASK;
+ vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S;
+ vc5 |= VC5_DROP_VTABLE_MISS;
+
+ if (is5325(dev))
+ vc0 &= ~VC0_RESERVED_1;
+
+ if (is5325(dev) || is5365(dev))
+ vc1 |= VC1_RX_MCST_TAG_EN;
+
+ } else {
+ vc0 &= ~(VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID);
+ vc1 &= ~(VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN);
+ vc4 &= ~VC4_ING_VID_CHECK_MASK;
+ vc5 &= ~VC5_DROP_VTABLE_MISS;
+
+ if (is5325(dev) || is5365(dev))
+ vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S;
+ else
+ vc4 |= VC4_ING_VID_VIO_TO_IMP << VC4_ING_VID_CHECK_S;
+
+ if (is5325(dev) || is5365(dev))
+ vc1 &= ~VC1_RX_MCST_TAG_EN;
+ }
+
+ if (!is5325(dev) && !is5365(dev))
+ vc5 &= ~VC5_VID_FFF_EN;
+
+ b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL0, vc0);
+ b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL1, vc1);
+
+ if (is5325(dev) || is5365(dev)) {
+ /* enable the high 8 bit vid check on 5325 */
+ if (is5325(dev) && enable)
+ b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3,
+ VC3_HIGH_8BIT_EN);
+ else
+ b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3, 0);
+
+ b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, vc4);
+ b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_25, vc5);
+ } else if (is63xx(dev)) {
+ b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3_63XX, 0);
+ b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_63XX, vc4);
+ b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_63XX, vc5);
+ } else {
+ b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3, 0);
+ b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4, vc4);
+ b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, vc5);
+ }
+
+ b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
+}
+
+static int b53_set_jumbo(struct b53_device *dev, bool enable, bool allow_10_100)
+{
+ u32 port_mask = 0;
+ u16 max_size = JMS_MIN_SIZE;
+
+ if (is5325(dev) || is5365(dev))
+ return -EINVAL;
+
+ if (enable) {
+ port_mask = dev->enabled_ports;
+ max_size = JMS_MAX_SIZE;
+ if (allow_10_100)
+ port_mask |= JPM_10_100_JUMBO_EN;
+ }
+
+ b53_write32(dev, B53_JUMBO_PAGE, dev->jumbo_pm_reg, port_mask);
+ return b53_write16(dev, B53_JUMBO_PAGE, dev->jumbo_size_reg, max_size);
+}
+
+static int b53_flush_arl(struct b53_device *dev, u8 mask)
+{
+ unsigned int i;
+
+ b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL,
+ FAST_AGE_DONE | FAST_AGE_DYNAMIC | mask);
+
+ for (i = 0; i < 10; i++) {
+ u8 fast_age_ctrl;
+
+ b53_read8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL,
+ &fast_age_ctrl);
+
+ if (!(fast_age_ctrl & FAST_AGE_DONE))
+ goto out;
+
+ msleep(1);
+ }
+
+ return -ETIMEDOUT;
+out:
+ /* Only age dynamic entries (default behavior) */
+ b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL, FAST_AGE_DYNAMIC);
+ return 0;
+}
+
+static int b53_fast_age_port(struct b53_device *dev, int port)
+{
+ b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_PORT_CTRL, port);
+
+ return b53_flush_arl(dev, FAST_AGE_PORT);
+}
+
+static int b53_fast_age_vlan(struct b53_device *dev, u16 vid)
+{
+ b53_write16(dev, B53_CTRL_PAGE, B53_FAST_AGE_VID_CTRL, vid);
+
+ return b53_flush_arl(dev, FAST_AGE_VLAN);
+}
+
+static void b53_imp_vlan_setup(struct dsa_switch *ds, int cpu_port)
+{
+ struct b53_device *dev = ds_to_priv(ds);
+ unsigned int i;
+ u16 pvlan;
+
+ /* Enable the IMP port to be in the same VLAN as the other ports
+ * on a per-port basis such that we only have Port i and IMP in
+ * the same VLAN.
+ */
+ b53_for_each_port(dev, i) {
+ b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), &pvlan);
+ pvlan |= BIT(cpu_port);
+ b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), pvlan);
+ }
+}
+
+static int b53_enable_port(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+{
+ struct b53_device *dev = ds_to_priv(ds);
+ unsigned int cpu_port = dev->cpu_port;
+ u16 pvlan;
+
+ /* Clear the Rx and Tx disable bits and set to no spanning tree */
+ b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), 0);
+
+ /* Set this port, and only this one to be in the default VLAN,
+ * if member of a bridge, restore its membership prior to
+ * bringing down this port.
+ */
+ b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan);
+ pvlan &= ~0x1ff;
+ pvlan |= BIT(port);
+ pvlan |= dev->ports[port].vlan_ctl_mask;
+ b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
+
+ b53_imp_vlan_setup(ds, cpu_port);
+
+ return 0;
+}
+
+static void b53_disable_port(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+{
+ struct b53_device *dev = ds_to_priv(ds);
+ u8 reg;
+
+ /* Disable Tx/Rx for the port */
+ b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), &reg);
+ reg |= PORT_CTRL_RX_DISABLE | PORT_CTRL_TX_DISABLE;
+ b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg);
+}
+
+static void b53_enable_cpu_port(struct b53_device *dev)
+{
+ unsigned int cpu_port = dev->cpu_port;
+ u8 port_ctrl;
+
+ /* BCM5325 CPU port is at 8 */
+ if ((is5325(dev) || is5365(dev)) && cpu_port == B53_CPU_PORT_25)
+ cpu_port = B53_CPU_PORT;
+
+ port_ctrl = PORT_CTRL_RX_BCST_EN |
+ PORT_CTRL_RX_MCST_EN |
+ PORT_CTRL_RX_UCST_EN;
+ b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(cpu_port), port_ctrl);
+}
+
+static void b53_enable_mib(struct b53_device *dev)
+{
+ u8 gc;
+
+ b53_read8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc);
+ gc &= ~(GC_RESET_MIB | GC_MIB_AC_EN);
+ b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc);
+}
+
+static int b53_configure_vlan(struct b53_device *dev)
+{
+ struct b53_vlan vl = { 0 };
+ int i;
+
+ /* clear all vlan entries */
+ if (is5325(dev) || is5365(dev)) {
+ for (i = 1; i < dev->num_vlans; i++)
+ b53_set_vlan_entry(dev, i, &vl);
+ } else {
+ b53_do_vlan_op(dev, VTA_CMD_CLEAR);
+ }
+
+ b53_enable_vlan(dev, false);
+
+ b53_for_each_port(dev, i)
+ b53_write16(dev, B53_VLAN_PAGE,
+ B53_VLAN_PORT_DEF_TAG(i), 1);
+
+ if (!is5325(dev) && !is5365(dev))
+ b53_set_jumbo(dev, dev->enable_jumbo, false);
+
+ return 0;
+}
+
+static void b53_switch_reset_gpio(struct b53_device *dev)
+{
+ int gpio = dev->reset_gpio;
+
+ if (gpio < 0)
+ return;
+
+ /* Reset sequence: RESET low(50ms)->high(20ms)
+ */
+ gpio_set_value(gpio, 0);
+ mdelay(50);
+
+ gpio_set_value(gpio, 1);
+ mdelay(20);
+
+ dev->current_page = 0xff;
+}
+
+static int b53_switch_reset(struct b53_device *dev)
+{
+ u8 mgmt;
+
+ b53_switch_reset_gpio(dev);
+
+ if (is539x(dev)) {
+ b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, 0x83);
+ b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, 0x00);
+ }
+
+ b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
+
+ if (!(mgmt & SM_SW_FWD_EN)) {
+ mgmt &= ~SM_SW_FWD_MODE;
+ mgmt |= SM_SW_FWD_EN;
+
+ b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
+ b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
+
+ if (!(mgmt & SM_SW_FWD_EN)) {
+ dev_err(dev->dev, "Failed to enable switch!\n");
+ return -EINVAL;
+ }
+ }
+
+ b53_enable_mib(dev);
+
+ return b53_flush_arl(dev, FAST_AGE_STATIC);
+}
+
+static int b53_phy_read16(struct dsa_switch *ds, int addr, int reg)
+{
+ struct b53_device *priv = ds_to_priv(ds);
+ u16 value = 0;
+ int ret;
+
+ if (priv->ops->phy_read16)
+ ret = priv->ops->phy_read16(priv, addr, reg, &value);
+ else
+ ret = b53_read16(priv, B53_PORT_MII_PAGE(addr),
+ reg * 2, &value);
+
+ return ret ? ret : value;
+}
+
+static int b53_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val)
+{
+ struct b53_device *priv = ds_to_priv(ds);
+
+ if (priv->ops->phy_write16)
+ return priv->ops->phy_write16(priv, addr, reg, val);
+
+ return b53_write16(priv, B53_PORT_MII_PAGE(addr), reg * 2, val);
+}
+
+static int b53_reset_switch(struct b53_device *priv)
+{
+ /* reset vlans */
+ priv->enable_jumbo = false;
+
+ memset(priv->vlans, 0, sizeof(*priv->vlans) * priv->num_vlans);
+ memset(priv->ports, 0, sizeof(*priv->ports) * priv->num_ports);
+
+ return b53_switch_reset(priv);
+}
+
+static int b53_apply_config(struct b53_device *priv)
+{
+ /* disable switching */
+ b53_set_forwarding(priv, 0);
+
+ b53_configure_vlan(priv);
+
+ /* enable switching */
+ b53_set_forwarding(priv, 1);
+
+ return 0;
+}
+
+static void b53_reset_mib(struct b53_device *priv)
+{
+ u8 gc;
+
+ b53_read8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc);
+
+ b53_write8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc | GC_RESET_MIB);
+ msleep(1);
+ b53_write8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc & ~GC_RESET_MIB);
+ msleep(1);
+}
+
+static const struct b53_mib_desc *b53_get_mib(struct b53_device *dev)
+{
+ if (is5365(dev))
+ return b53_mibs_65;
+ else if (is63xx(dev))
+ return b53_mibs_63xx;
+ else
+ return b53_mibs;
+}
+
+static unsigned int b53_get_mib_size(struct b53_device *dev)
+{
+ if (is5365(dev))
+ return B53_MIBS_65_SIZE;
+ else if (is63xx(dev))
+ return B53_MIBS_63XX_SIZE;
+ else
+ return B53_MIBS_SIZE;
+}
+
+static void b53_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
+{
+ struct b53_device *dev = ds_to_priv(ds);
+ const struct b53_mib_desc *mibs = b53_get_mib(dev);
+ unsigned int mib_size = b53_get_mib_size(dev);
+ unsigned int i;
+
+ for (i = 0; i < mib_size; i++)
+ memcpy(data + i * ETH_GSTRING_LEN,
+ mibs[i].name, ETH_GSTRING_LEN);
+}
+
+static void b53_get_ethtool_stats(struct dsa_switch *ds, int port,
+ uint64_t *data)
+{
+ struct b53_device *dev = ds_to_priv(ds);
+ const struct b53_mib_desc *mibs = b53_get_mib(dev);
+ unsigned int mib_size = b53_get_mib_size(dev);
+ const struct b53_mib_desc *s;
+ unsigned int i;
+ u64 val = 0;
+
+ if (is5365(dev) && port == 5)
+ port = 8;
+
+ mutex_lock(&dev->stats_mutex);
+
+ for (i = 0; i < mib_size; i++) {
+ s = &mibs[i];
+
+ if (mibs->size == 8) {
+ b53_read64(dev, B53_MIB_PAGE(port), s->offset, &val);
+ } else {
+ u32 val32;
+
+ b53_read32(dev, B53_MIB_PAGE(port), s->offset,
+ &val32);
+ val = val32;
+ }
+ data[i] = (u64)val;
+ }
+
+ mutex_unlock(&dev->stats_mutex);
+}
+
+static int b53_get_sset_count(struct dsa_switch *ds)
+{
+ struct b53_device *dev = ds_to_priv(ds);
+
+ return b53_get_mib_size(dev);
+}
+
+static int b53_set_addr(struct dsa_switch *ds, u8 *addr)
+{
+ return 0;
+}
+
+static int b53_setup(struct dsa_switch *ds)
+{
+ struct b53_device *dev = ds_to_priv(ds);
+ unsigned int port;
+ int ret;
+
+ ret = b53_reset_switch(dev);
+ if (ret) {
+ dev_err(ds->dev, "failed to reset switch\n");
+ return ret;
+ }
+
+ b53_reset_mib(dev);
+
+ ret = b53_apply_config(dev);
+ if (ret)
+ dev_err(ds->dev, "failed to apply configuration\n");
+
+ for (port = 0; port < dev->num_ports; port++) {
+ if (BIT(port) & ds->enabled_port_mask)
+ b53_enable_port(ds, port, NULL);
+ else if (dsa_is_cpu_port(ds, port))
+ b53_enable_cpu_port(dev);
+ else
+ b53_disable_port(ds, port, NULL);
+ }
+
+ return ret;
+}
+
+static void b53_adjust_link(struct dsa_switch *ds, int port,
+ struct phy_device *phydev)
+{
+ struct b53_device *dev = ds_to_priv(ds);
+ u8 rgmii_ctrl = 0, reg = 0, off;
+
+ if (!phy_is_pseudo_fixed_link(phydev))
+ return;
+
+ /* Override the port settings */
+ if (port == dev->cpu_port) {
+ off = B53_PORT_OVERRIDE_CTRL;
+ reg = PORT_OVERRIDE_EN;
+ } else {
+ off = B53_GMII_PORT_OVERRIDE_CTRL(port);
+ reg = GMII_PO_EN;
+ }
+
+ /* Set the link UP */
+ if (phydev->link)
+ reg |= PORT_OVERRIDE_LINK;
+
+ if (phydev->duplex == DUPLEX_FULL)
+ reg |= PORT_OVERRIDE_FULL_DUPLEX;
+
+ switch (phydev->speed) {
+ case 2000:
+ reg |= PORT_OVERRIDE_SPEED_2000M;
+ /* fallthrough */
+ case SPEED_1000:
+ reg |= PORT_OVERRIDE_SPEED_1000M;
+ break;
+ case SPEED_100:
+ reg |= PORT_OVERRIDE_SPEED_100M;
+ break;
+ case SPEED_10:
+ reg |= PORT_OVERRIDE_SPEED_10M;
+ break;
+ default:
+ dev_err(ds->dev, "unknown speed: %d\n", phydev->speed);
+ return;
+ }
+
+ /* Enable flow control on BCM5301x's CPU port */
+ if (is5301x(dev) && port == dev->cpu_port)
+ reg |= PORT_OVERRIDE_RX_FLOW | PORT_OVERRIDE_TX_FLOW;
+
+ if (phydev->pause) {
+ if (phydev->asym_pause)
+ reg |= PORT_OVERRIDE_TX_FLOW;
+ reg |= PORT_OVERRIDE_RX_FLOW;
+ }
+
+ b53_write8(dev, B53_CTRL_PAGE, off, reg);
+
+ if (is531x5(dev) && phy_interface_is_rgmii(phydev)) {
+ if (port == 8)
+ off = B53_RGMII_CTRL_IMP;
+ else
+ off = B53_RGMII_CTRL_P(port);
+
+ /* Configure the port RGMII clock delay by DLL disabled and
+ * tx_clk aligned timing (restoring to reset defaults)
+ */
+ b53_read8(dev, B53_CTRL_PAGE, off, &rgmii_ctrl);
+ rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC |
+ RGMII_CTRL_TIMING_SEL);
+
+ /* PHY_INTERFACE_MODE_RGMII_TXID means TX internal delay, make
+ * sure that we enable the port TX clock internal delay to
+ * account for this internal delay that is inserted, otherwise
+ * the switch won't be able to receive correctly.
+ *
+ * PHY_INTERFACE_MODE_RGMII means that we are not introducing
+ * any delay neither on transmission nor reception, so the
+ * BCM53125 must also be configured accordingly to account for
+ * the lack of delay and introduce
+ *
+ * The BCM53125 switch has its RX clock and TX clock control
+ * swapped, hence the reason why we modify the TX clock path in
+ * the "RGMII" case
+ */
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
+ rgmii_ctrl |= RGMII_CTRL_DLL_TXC;
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
+ rgmii_ctrl |= RGMII_CTRL_DLL_TXC | RGMII_CTRL_DLL_RXC;
+ rgmii_ctrl |= RGMII_CTRL_TIMING_SEL;
+ b53_write8(dev, B53_CTRL_PAGE, off, rgmii_ctrl);
+
+ dev_info(ds->dev, "Configured port %d for %s\n", port,
+ phy_modes(phydev->interface));
+ }
+
+ /* configure MII port if necessary */
+ if (is5325(dev)) {
+ b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL,
+ &reg);
+
+ /* reverse mii needs to be enabled */
+ if (!(reg & PORT_OVERRIDE_RV_MII_25)) {
+ b53_write8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL,
+ reg | PORT_OVERRIDE_RV_MII_25);
+ b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL,
+ &reg);
+
+ if (!(reg & PORT_OVERRIDE_RV_MII_25)) {
+ dev_err(ds->dev,
+ "Failed to enable reverse MII mode\n");
+ return;
+ }
+ }
+ } else if (is5301x(dev)) {
+ if (port != dev->cpu_port) {
+ u8 po_reg = B53_GMII_PORT_OVERRIDE_CTRL(dev->cpu_port);
+ u8 gmii_po;
+
+ b53_read8(dev, B53_CTRL_PAGE, po_reg, &gmii_po);
+ gmii_po |= GMII_PO_LINK |
+ GMII_PO_RX_FLOW |
+ GMII_PO_TX_FLOW |
+ GMII_PO_EN |
+ GMII_PO_SPEED_2000M;
+ b53_write8(dev, B53_CTRL_PAGE, po_reg, gmii_po);
+ }
+ }
+}
+
+static int b53_vlan_filtering(struct dsa_switch *ds, int port,
+ bool vlan_filtering)
+{
+ return 0;
+}
+
+static int b53_vlan_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct switchdev_trans *trans)
+{
+ struct b53_device *dev = ds_to_priv(ds);
+
+ if ((is5325(dev) || is5365(dev)) && vlan->vid_begin == 0)
+ return -EOPNOTSUPP;
+
+ if (vlan->vid_end > dev->num_vlans)
+ return -ERANGE;
+
+ b53_enable_vlan(dev, true);
+
+ return 0;
+}
+
+static void b53_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct switchdev_trans *trans)
+{
+ struct b53_device *dev = ds_to_priv(ds);
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ unsigned int cpu_port = dev->cpu_port;
+ struct b53_vlan *vl;
+ u16 vid;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
+ vl = &dev->vlans[vid];
+
+ b53_get_vlan_entry(dev, vid, vl);
+
+ vl->members |= BIT(port) | BIT(cpu_port);
+ if (untagged)
+ vl->untag |= BIT(port) | BIT(cpu_port);
+ else
+ vl->untag &= ~(BIT(port) | BIT(cpu_port));
+
+ b53_set_vlan_entry(dev, vid, vl);
+ b53_fast_age_vlan(dev, vid);
+ }
+
+ if (pvid) {
+ b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port),
+ vlan->vid_end);
+ b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(cpu_port),
+ vlan->vid_end);
+ b53_fast_age_vlan(dev, vid);
+ }
+}
+
+static int b53_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct b53_device *dev = ds_to_priv(ds);
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ unsigned int cpu_port = dev->cpu_port;
+ struct b53_vlan *vl;
+ u16 vid;
+ u16 pvid;
+
+ b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid);
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
+ vl = &dev->vlans[vid];
+
+ b53_get_vlan_entry(dev, vid, vl);
+
+ vl->members &= ~BIT(port);
+ if ((vl->members & BIT(cpu_port)) == BIT(cpu_port))
+ vl->members = 0;
+
+ if (pvid == vid) {
+ if (is5325(dev) || is5365(dev))
+ pvid = 1;
+ else
+ pvid = 0;
+ }
+
+ if (untagged) {
+ vl->untag &= ~(BIT(port));
+ if ((vl->untag & BIT(cpu_port)) == BIT(cpu_port))
+ vl->untag = 0;
+ }
+
+ b53_set_vlan_entry(dev, vid, vl);
+ b53_fast_age_vlan(dev, vid);
+ }
+
+ b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), pvid);
+ b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(cpu_port), pvid);
+ b53_fast_age_vlan(dev, pvid);
+
+ return 0;
+}
+
+static int b53_vlan_dump(struct dsa_switch *ds, int port,
+ struct switchdev_obj_port_vlan *vlan,
+ int (*cb)(struct switchdev_obj *obj))
+{
+ struct b53_device *dev = ds_to_priv(ds);
+ u16 vid, vid_start = 0, pvid;
+ struct b53_vlan *vl;
+ int err = 0;
+
+ if (is5325(dev) || is5365(dev))
+ vid_start = 1;
+
+ b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid);
+
+ /* Use our software cache for dumps, since we do not have any HW
+ * operation returning only the used/valid VLANs
+ */
+ for (vid = vid_start; vid < dev->num_vlans; vid++) {
+ vl = &dev->vlans[vid];
+
+ if (!vl->valid)
+ continue;
+
+ if (!(vl->members & BIT(port)))
+ continue;
+
+ vlan->vid_begin = vlan->vid_end = vid;
+ vlan->flags = 0;
+
+ if (vl->untag & BIT(port))
+ vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
+ if (pvid == vid)
+ vlan->flags |= BRIDGE_VLAN_INFO_PVID;
+
+ err = cb(&vlan->obj);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+/* Address Resolution Logic routines */
+static int b53_arl_op_wait(struct b53_device *dev)
+{
+ unsigned int timeout = 10;
+ u8 reg;
+
+ do {
+ b53_read8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, &reg);
+ if (!(reg & ARLTBL_START_DONE))
+ return 0;
+
+ usleep_range(1000, 2000);
+ } while (timeout--);
+
+ dev_warn(dev->dev, "timeout waiting for ARL to finish: 0x%02x\n", reg);
+
+ return -ETIMEDOUT;
+}
+
+static int b53_arl_rw_op(struct b53_device *dev, unsigned int op)
+{
+ u8 reg;
+
+ if (op > ARLTBL_RW)
+ return -EINVAL;
+
+ b53_read8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, &reg);
+ reg |= ARLTBL_START_DONE;
+ if (op)
+ reg |= ARLTBL_RW;
+ else
+ reg &= ~ARLTBL_RW;
+ b53_write8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, reg);
+
+ return b53_arl_op_wait(dev);
+}
+
+static int b53_arl_read(struct b53_device *dev, u64 mac,
+ u16 vid, struct b53_arl_entry *ent, u8 *idx,
+ bool is_valid)
+{
+ unsigned int i;
+ int ret;
+
+ ret = b53_arl_op_wait(dev);
+ if (ret)
+ return ret;
+
+ /* Read the bins */
+ for (i = 0; i < dev->num_arl_entries; i++) {
+ u64 mac_vid;
+ u32 fwd_entry;
+
+ b53_read64(dev, B53_ARLIO_PAGE,
+ B53_ARLTBL_MAC_VID_ENTRY(i), &mac_vid);
+ b53_read32(dev, B53_ARLIO_PAGE,
+ B53_ARLTBL_DATA_ENTRY(i), &fwd_entry);
+ b53_arl_to_entry(ent, mac_vid, fwd_entry);
+
+ if (!(fwd_entry & ARLTBL_VALID))
+ continue;
+ if ((mac_vid & ARLTBL_MAC_MASK) != mac)
+ continue;
+ *idx = i;
+ }
+
+ return -ENOENT;
+}
+
+static int b53_arl_op(struct b53_device *dev, int op, int port,
+ const unsigned char *addr, u16 vid, bool is_valid)
+{
+ struct b53_arl_entry ent;
+ u32 fwd_entry;
+ u64 mac, mac_vid = 0;
+ u8 idx = 0;
+ int ret;
+
+ /* Convert the array into a 64-bit MAC */
+ mac = b53_mac_to_u64(addr);
+
+ /* Perform a read for the given MAC and VID */
+ b53_write48(dev, B53_ARLIO_PAGE, B53_MAC_ADDR_IDX, mac);
+ b53_write16(dev, B53_ARLIO_PAGE, B53_VLAN_ID_IDX, vid);
+
+ /* Issue a read operation for this MAC */
+ ret = b53_arl_rw_op(dev, 1);
+ if (ret)
+ return ret;
+
+ ret = b53_arl_read(dev, mac, vid, &ent, &idx, is_valid);
+ /* If this is a read, just finish now */
+ if (op)
+ return ret;
+
+ /* We could not find a matching MAC, so reset to a new entry */
+ if (ret) {
+ fwd_entry = 0;
+ idx = 1;
+ }
+
+ memset(&ent, 0, sizeof(ent));
+ ent.port = port;
+ ent.is_valid = is_valid;
+ ent.vid = vid;
+ ent.is_static = true;
+ memcpy(ent.mac, addr, ETH_ALEN);
+ b53_arl_from_entry(&mac_vid, &fwd_entry, &ent);
+
+ b53_write64(dev, B53_ARLIO_PAGE,
+ B53_ARLTBL_MAC_VID_ENTRY(idx), mac_vid);
+ b53_write32(dev, B53_ARLIO_PAGE,
+ B53_ARLTBL_DATA_ENTRY(idx), fwd_entry);
+
+ return b53_arl_rw_op(dev, 0);
+}
+
+static int b53_fdb_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_fdb *fdb,
+ struct switchdev_trans *trans)
+{
+ struct b53_device *priv = ds_to_priv(ds);
+
+ /* 5325 and 5365 require some more massaging, but could
+ * be supported eventually
+ */
+ if (is5325(priv) || is5365(priv))
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static void b53_fdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_fdb *fdb,
+ struct switchdev_trans *trans)
+{
+ struct b53_device *priv = ds_to_priv(ds);
+
+ if (b53_arl_op(priv, 0, port, fdb->addr, fdb->vid, true))
+ pr_err("%s: failed to add MAC address\n", __func__);
+}
+
+static int b53_fdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_fdb *fdb)
+{
+ struct b53_device *priv = ds_to_priv(ds);
+
+ return b53_arl_op(priv, 0, port, fdb->addr, fdb->vid, false);
+}
+
+static int b53_arl_search_wait(struct b53_device *dev)
+{
+ unsigned int timeout = 1000;
+ u8 reg;
+
+ do {
+ b53_read8(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_CTL, &reg);
+ if (!(reg & ARL_SRCH_STDN))
+ return 0;
+
+ if (reg & ARL_SRCH_VLID)
+ return 0;
+
+ usleep_range(1000, 2000);
+ } while (timeout--);
+
+ return -ETIMEDOUT;
+}
+
+static void b53_arl_search_rd(struct b53_device *dev, u8 idx,
+ struct b53_arl_entry *ent)
+{
+ u64 mac_vid;
+ u32 fwd_entry;
+
+ b53_read64(dev, B53_ARLIO_PAGE,
+ B53_ARL_SRCH_RSTL_MACVID(idx), &mac_vid);
+ b53_read32(dev, B53_ARLIO_PAGE,
+ B53_ARL_SRCH_RSTL(idx), &fwd_entry);
+ b53_arl_to_entry(ent, mac_vid, fwd_entry);
+}
+
+static int b53_fdb_copy(struct net_device *dev, int port,
+ const struct b53_arl_entry *ent,
+ struct switchdev_obj_port_fdb *fdb,
+ int (*cb)(struct switchdev_obj *obj))
+{
+ if (!ent->is_valid)
+ return 0;
+
+ if (port != ent->port)
+ return 0;
+
+ ether_addr_copy(fdb->addr, ent->mac);
+ fdb->vid = ent->vid;
+ fdb->ndm_state = ent->is_static ? NUD_NOARP : NUD_REACHABLE;
+
+ return cb(&fdb->obj);
+}
+
+static int b53_fdb_dump(struct dsa_switch *ds, int port,
+ struct switchdev_obj_port_fdb *fdb,
+ int (*cb)(struct switchdev_obj *obj))
+{
+ struct b53_device *priv = ds_to_priv(ds);
+ struct net_device *dev = ds->ports[port].netdev;
+ struct b53_arl_entry results[2];
+ unsigned int count = 0;
+ int ret;
+ u8 reg;
+
+ /* Start search operation */
+ reg = ARL_SRCH_STDN;
+ b53_write8(priv, B53_ARLIO_PAGE, B53_ARL_SRCH_CTL, reg);
+
+ do {
+ ret = b53_arl_search_wait(priv);
+ if (ret)
+ return ret;
+
+ b53_arl_search_rd(priv, 0, &results[0]);
+ ret = b53_fdb_copy(dev, port, &results[0], fdb, cb);
+ if (ret)
+ return ret;
+
+ if (priv->num_arl_entries > 2) {
+ b53_arl_search_rd(priv, 1, &results[1]);
+ ret = b53_fdb_copy(dev, port, &results[1], fdb, cb);
+ if (ret)
+ return ret;
+
+ if (!results[0].is_valid && !results[1].is_valid)
+ break;
+ }
+
+ } while (count++ < 1024);
+
+ return 0;
+}
+
+static int b53_br_join(struct dsa_switch *ds, int port,
+ struct net_device *bridge)
+{
+ struct b53_device *dev = ds_to_priv(ds);
+ u16 pvlan, reg;
+ unsigned int i;
+
+ dev->ports[port].bridge_dev = bridge;
+ b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan);
+
+ b53_for_each_port(dev, i) {
+ if (dev->ports[i].bridge_dev != bridge)
+ continue;
+
+ /* Add this local port to the remote port VLAN control
+ * membership and update the remote port bitmask
+ */
+ b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), &reg);
+ reg |= BIT(port);
+ b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), reg);
+ dev->ports[i].vlan_ctl_mask = reg;
+
+ pvlan |= BIT(i);
+ }
+
+ /* Configure the local port VLAN control membership to include
+ * remote ports and update the local port bitmask
+ */
+ b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
+ dev->ports[port].vlan_ctl_mask = pvlan;
+
+ return 0;
+}
+
+static void b53_br_leave(struct dsa_switch *ds, int port)
+{
+ struct b53_device *dev = ds_to_priv(ds);
+ struct net_device *bridge = dev->ports[port].bridge_dev;
+ struct b53_vlan *vl = &dev->vlans[0];
+ unsigned int i;
+ u16 pvlan, reg, pvid;
+
+ b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan);
+
+ b53_for_each_port(dev, i) {
+ /* Don't touch the remaining ports */
+ if (dev->ports[i].bridge_dev != bridge)
+ continue;
+
+ b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), &reg);
+ reg &= ~BIT(port);
+ b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), reg);
+ dev->ports[port].vlan_ctl_mask = reg;
+
+ /* Prevent self removal to preserve isolation */
+ if (port != i)
+ pvlan &= ~BIT(i);
+ }
+
+ b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
+ dev->ports[port].vlan_ctl_mask = pvlan;
+ dev->ports[port].bridge_dev = NULL;
+
+ if (is5325(dev) || is5365(dev))
+ pvid = 1;
+ else
+ pvid = 0;
+
+ b53_get_vlan_entry(dev, pvid, vl);
+ vl->members |= BIT(port) | BIT(dev->cpu_port);
+ vl->untag |= BIT(port) | BIT(dev->cpu_port);
+ b53_set_vlan_entry(dev, pvid, vl);
+}
+
+static void b53_br_set_stp_state(struct dsa_switch *ds, int port,
+ u8 state)
+{
+ struct b53_device *dev = ds_to_priv(ds);
+ u8 hw_state, cur_hw_state;
+ u8 reg;
+
+ b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), &reg);
+ cur_hw_state = reg & PORT_CTRL_STP_STATE_MASK;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ hw_state = PORT_CTRL_DIS_STATE;
+ break;
+ case BR_STATE_LISTENING:
+ hw_state = PORT_CTRL_LISTEN_STATE;
+ break;
+ case BR_STATE_LEARNING:
+ hw_state = PORT_CTRL_LEARN_STATE;
+ break;
+ case BR_STATE_FORWARDING:
+ hw_state = PORT_CTRL_FWD_STATE;
+ break;
+ case BR_STATE_BLOCKING:
+ hw_state = PORT_CTRL_BLOCK_STATE;
+ break;
+ default:
+ dev_err(ds->dev, "invalid STP state: %d\n", state);
+ return;
+ }
+
+ /* Fast-age ARL entries if we are moving a port from Learning or
+ * Forwarding (cur_hw_state) state to Disabled, Blocking or Listening
+ * state (hw_state)
+ */
+ if (cur_hw_state != hw_state) {
+ if (cur_hw_state >= PORT_CTRL_LEARN_STATE &&
+ hw_state <= PORT_CTRL_LISTEN_STATE) {
+ if (b53_fast_age_port(dev, port)) {
+ dev_err(ds->dev, "fast ageing failed\n");
+ return;
+ }
+ }
+ }
+
+ b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), &reg);
+ reg &= ~PORT_CTRL_STP_STATE_MASK;
+ reg |= hw_state;
+ b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg);
+}
+
+static struct dsa_switch_driver b53_switch_ops = {
+ .tag_protocol = DSA_TAG_PROTO_NONE,
+ .setup = b53_setup,
+ .set_addr = b53_set_addr,
+ .get_strings = b53_get_strings,
+ .get_ethtool_stats = b53_get_ethtool_stats,
+ .get_sset_count = b53_get_sset_count,
+ .phy_read = b53_phy_read16,
+ .phy_write = b53_phy_write16,
+ .adjust_link = b53_adjust_link,
+ .port_enable = b53_enable_port,
+ .port_disable = b53_disable_port,
+ .port_bridge_join = b53_br_join,
+ .port_bridge_leave = b53_br_leave,
+ .port_stp_state_set = b53_br_set_stp_state,
+ .port_vlan_filtering = b53_vlan_filtering,
+ .port_vlan_prepare = b53_vlan_prepare,
+ .port_vlan_add = b53_vlan_add,
+ .port_vlan_del = b53_vlan_del,
+ .port_vlan_dump = b53_vlan_dump,
+ .port_fdb_prepare = b53_fdb_prepare,
+ .port_fdb_dump = b53_fdb_dump,
+ .port_fdb_add = b53_fdb_add,
+ .port_fdb_del = b53_fdb_del,
+};
+
+struct b53_chip_data {
+ u32 chip_id;
+ const char *dev_name;
+ u16 vlans;
+ u16 enabled_ports;
+ u8 cpu_port;
+ u8 vta_regs[3];
+ u8 arl_entries;
+ u8 duplex_reg;
+ u8 jumbo_pm_reg;
+ u8 jumbo_size_reg;
+};
+
+#define B53_VTA_REGS \
+ { B53_VT_ACCESS, B53_VT_INDEX, B53_VT_ENTRY }
+#define B53_VTA_REGS_9798 \
+ { B53_VT_ACCESS_9798, B53_VT_INDEX_9798, B53_VT_ENTRY_9798 }
+#define B53_VTA_REGS_63XX \
+ { B53_VT_ACCESS_63XX, B53_VT_INDEX_63XX, B53_VT_ENTRY_63XX }
+
+static const struct b53_chip_data b53_switch_chips[] = {
+ {
+ .chip_id = BCM5325_DEVICE_ID,
+ .dev_name = "BCM5325",
+ .vlans = 16,
+ .enabled_ports = 0x1f,
+ .arl_entries = 2,
+ .cpu_port = B53_CPU_PORT_25,
+ .duplex_reg = B53_DUPLEX_STAT_FE,
+ },
+ {
+ .chip_id = BCM5365_DEVICE_ID,
+ .dev_name = "BCM5365",
+ .vlans = 256,
+ .enabled_ports = 0x1f,
+ .arl_entries = 2,
+ .cpu_port = B53_CPU_PORT_25,
+ .duplex_reg = B53_DUPLEX_STAT_FE,
+ },
+ {
+ .chip_id = BCM5395_DEVICE_ID,
+ .dev_name = "BCM5395",
+ .vlans = 4096,
+ .enabled_ports = 0x1f,
+ .arl_entries = 4,
+ .cpu_port = B53_CPU_PORT,
+ .vta_regs = B53_VTA_REGS,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ },
+ {
+ .chip_id = BCM5397_DEVICE_ID,
+ .dev_name = "BCM5397",
+ .vlans = 4096,
+ .enabled_ports = 0x1f,
+ .arl_entries = 4,
+ .cpu_port = B53_CPU_PORT,
+ .vta_regs = B53_VTA_REGS_9798,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ },
+ {
+ .chip_id = BCM5398_DEVICE_ID,
+ .dev_name = "BCM5398",
+ .vlans = 4096,
+ .enabled_ports = 0x7f,
+ .arl_entries = 4,
+ .cpu_port = B53_CPU_PORT,
+ .vta_regs = B53_VTA_REGS_9798,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ },
+ {
+ .chip_id = BCM53115_DEVICE_ID,
+ .dev_name = "BCM53115",
+ .vlans = 4096,
+ .enabled_ports = 0x1f,
+ .arl_entries = 4,
+ .vta_regs = B53_VTA_REGS,
+ .cpu_port = B53_CPU_PORT,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ },
+ {
+ .chip_id = BCM53125_DEVICE_ID,
+ .dev_name = "BCM53125",
+ .vlans = 4096,
+ .enabled_ports = 0xff,
+ .cpu_port = B53_CPU_PORT,
+ .vta_regs = B53_VTA_REGS,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ },
+ {
+ .chip_id = BCM53128_DEVICE_ID,
+ .dev_name = "BCM53128",
+ .vlans = 4096,
+ .enabled_ports = 0x1ff,
+ .arl_entries = 4,
+ .cpu_port = B53_CPU_PORT,
+ .vta_regs = B53_VTA_REGS,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ },
+ {
+ .chip_id = BCM63XX_DEVICE_ID,
+ .dev_name = "BCM63xx",
+ .vlans = 4096,
+ .enabled_ports = 0, /* pdata must provide them */
+ .arl_entries = 4,
+ .cpu_port = B53_CPU_PORT,
+ .vta_regs = B53_VTA_REGS_63XX,
+ .duplex_reg = B53_DUPLEX_STAT_63XX,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK_63XX,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE_63XX,
+ },
+ {
+ .chip_id = BCM53010_DEVICE_ID,
+ .dev_name = "BCM53010",
+ .vlans = 4096,
+ .enabled_ports = 0x1f,
+ .arl_entries = 4,
+ .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
+ .vta_regs = B53_VTA_REGS,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ },
+ {
+ .chip_id = BCM53011_DEVICE_ID,
+ .dev_name = "BCM53011",
+ .vlans = 4096,
+ .enabled_ports = 0x1bf,
+ .arl_entries = 4,
+ .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
+ .vta_regs = B53_VTA_REGS,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ },
+ {
+ .chip_id = BCM53012_DEVICE_ID,
+ .dev_name = "BCM53012",
+ .vlans = 4096,
+ .enabled_ports = 0x1bf,
+ .arl_entries = 4,
+ .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
+ .vta_regs = B53_VTA_REGS,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ },
+ {
+ .chip_id = BCM53018_DEVICE_ID,
+ .dev_name = "BCM53018",
+ .vlans = 4096,
+ .enabled_ports = 0x1f,
+ .arl_entries = 4,
+ .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
+ .vta_regs = B53_VTA_REGS,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ },
+ {
+ .chip_id = BCM53019_DEVICE_ID,
+ .dev_name = "BCM53019",
+ .vlans = 4096,
+ .enabled_ports = 0x1f,
+ .arl_entries = 4,
+ .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
+ .vta_regs = B53_VTA_REGS,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ },
+};
+
+static int b53_switch_init(struct b53_device *dev)
+{
+ struct dsa_switch *ds = dev->ds;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(b53_switch_chips); i++) {
+ const struct b53_chip_data *chip = &b53_switch_chips[i];
+
+ if (chip->chip_id == dev->chip_id) {
+ if (!dev->enabled_ports)
+ dev->enabled_ports = chip->enabled_ports;
+ dev->name = chip->dev_name;
+ dev->duplex_reg = chip->duplex_reg;
+ dev->vta_regs[0] = chip->vta_regs[0];
+ dev->vta_regs[1] = chip->vta_regs[1];
+ dev->vta_regs[2] = chip->vta_regs[2];
+ dev->jumbo_pm_reg = chip->jumbo_pm_reg;
+ ds->drv = &b53_switch_ops;
+ dev->cpu_port = chip->cpu_port;
+ dev->num_vlans = chip->vlans;
+ dev->num_arl_entries = chip->arl_entries;
+ break;
+ }
+ }
+
+ /* check which BCM5325x version we have */
+ if (is5325(dev)) {
+ u8 vc4;
+
+ b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, &vc4);
+
+ /* check reserved bits */
+ switch (vc4 & 3) {
+ case 1:
+ /* BCM5325E */
+ break;
+ case 3:
+ /* BCM5325F - do not use port 4 */
+ dev->enabled_ports &= ~BIT(4);
+ break;
+ default:
+/* On the BCM47XX SoCs this is the supported internal switch.*/
+#ifndef CONFIG_BCM47XX
+ /* BCM5325M */
+ return -EINVAL;
+#else
+ break;
+#endif
+ }
+ } else if (dev->chip_id == BCM53115_DEVICE_ID) {
+ u64 strap_value;
+
+ b53_read48(dev, B53_STAT_PAGE, B53_STRAP_VALUE, &strap_value);
+ /* use second IMP port if GMII is enabled */
+ if (strap_value & SV_GMII_CTRL_115)
+ dev->cpu_port = 5;
+ }
+
+ /* cpu port is always last */
+ dev->num_ports = dev->cpu_port + 1;
+ dev->enabled_ports |= BIT(dev->cpu_port);
+
+ dev->ports = devm_kzalloc(dev->dev,
+ sizeof(struct b53_port) * dev->num_ports,
+ GFP_KERNEL);
+ if (!dev->ports)
+ return -ENOMEM;
+
+ dev->vlans = devm_kzalloc(dev->dev,
+ sizeof(struct b53_vlan) * dev->num_vlans,
+ GFP_KERNEL);
+ if (!dev->vlans)
+ return -ENOMEM;
+
+ dev->reset_gpio = b53_switch_get_reset_gpio(dev);
+ if (dev->reset_gpio >= 0) {
+ ret = devm_gpio_request_one(dev->dev, dev->reset_gpio,
+ GPIOF_OUT_INIT_HIGH, "robo_reset");
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+struct b53_device *b53_switch_alloc(struct device *base, struct b53_io_ops *ops,
+ void *priv)
+{
+ struct dsa_switch *ds;
+ struct b53_device *dev;
+
+ ds = devm_kzalloc(base, sizeof(*ds) + sizeof(*dev), GFP_KERNEL);
+ if (!ds)
+ return NULL;
+
+ dev = (struct b53_device *)(ds + 1);
+
+ ds->priv = dev;
+ ds->dev = base;
+ dev->dev = base;
+
+ dev->ds = ds;
+ dev->priv = priv;
+ dev->ops = ops;
+ mutex_init(&dev->reg_mutex);
+ mutex_init(&dev->stats_mutex);
+
+ return dev;
+}
+EXPORT_SYMBOL(b53_switch_alloc);
+
+int b53_switch_detect(struct b53_device *dev)
+{
+ u32 id32;
+ u16 tmp;
+ u8 id8;
+ int ret;
+
+ ret = b53_read8(dev, B53_MGMT_PAGE, B53_DEVICE_ID, &id8);
+ if (ret)
+ return ret;
+
+ switch (id8) {
+ case 0:
+ /* BCM5325 and BCM5365 do not have this register so reads
+ * return 0. But the read operation did succeed, so assume this
+ * is one of them.
+ *
+ * Next check if we can write to the 5325's VTA register; for
+ * 5365 it is read only.
+ */
+ b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, 0xf);
+ b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, &tmp);
+
+ if (tmp == 0xf)
+ dev->chip_id = BCM5325_DEVICE_ID;
+ else
+ dev->chip_id = BCM5365_DEVICE_ID;
+ break;
+ case BCM5395_DEVICE_ID:
+ case BCM5397_DEVICE_ID:
+ case BCM5398_DEVICE_ID:
+ dev->chip_id = id8;
+ break;
+ default:
+ ret = b53_read32(dev, B53_MGMT_PAGE, B53_DEVICE_ID, &id32);
+ if (ret)
+ return ret;
+
+ switch (id32) {
+ case BCM53115_DEVICE_ID:
+ case BCM53125_DEVICE_ID:
+ case BCM53128_DEVICE_ID:
+ case BCM53010_DEVICE_ID:
+ case BCM53011_DEVICE_ID:
+ case BCM53012_DEVICE_ID:
+ case BCM53018_DEVICE_ID:
+ case BCM53019_DEVICE_ID:
+ dev->chip_id = id32;
+ break;
+ default:
+ pr_err("unsupported switch detected (BCM53%02x/BCM%x)\n",
+ id8, id32);
+ return -ENODEV;
+ }
+ }
+
+ if (dev->chip_id == BCM5325_DEVICE_ID)
+ return b53_read8(dev, B53_STAT_PAGE, B53_REV_ID_25,
+ &dev->core_rev);
+ else
+ return b53_read8(dev, B53_MGMT_PAGE, B53_REV_ID,
+ &dev->core_rev);
+}
+EXPORT_SYMBOL(b53_switch_detect);
+
+int b53_switch_register(struct b53_device *dev)
+{
+ int ret;
+
+ if (dev->pdata) {
+ dev->chip_id = dev->pdata->chip_id;
+ dev->enabled_ports = dev->pdata->enabled_ports;
+ }
+
+ if (!dev->chip_id && b53_switch_detect(dev))
+ return -EINVAL;
+
+ ret = b53_switch_init(dev);
+ if (ret)
+ return ret;
+
+ pr_info("found switch: %s, rev %i\n", dev->name, dev->core_rev);
+
+ return dsa_register_switch(dev->ds, dev->ds->dev->of_node);
+}
+EXPORT_SYMBOL(b53_switch_register);
+
+MODULE_AUTHOR("Jonas Gorski <jogo@openwrt.org>");
+MODULE_DESCRIPTION("B53 switch library");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/dsa/b53/b53_mdio.c b/drivers/net/dsa/b53/b53_mdio.c
new file mode 100644
index 0000000..aa87c3f
--- /dev/null
+++ b/drivers/net/dsa/b53/b53_mdio.c
@@ -0,0 +1,392 @@
+/*
+ * B53 register access through MII registers
+ *
+ * Copyright (C) 2011-2013 Jonas Gorski <jogo@openwrt.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/phy.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/brcmphy.h>
+#include <linux/rtnetlink.h>
+#include <net/dsa.h>
+
+#include "b53_priv.h"
+
+/* MII registers */
+#define REG_MII_PAGE 0x10 /* MII Page register */
+#define REG_MII_ADDR 0x11 /* MII Address register */
+#define REG_MII_DATA0 0x18 /* MII Data register 0 */
+#define REG_MII_DATA1 0x19 /* MII Data register 1 */
+#define REG_MII_DATA2 0x1a /* MII Data register 2 */
+#define REG_MII_DATA3 0x1b /* MII Data register 3 */
+
+#define REG_MII_PAGE_ENABLE BIT(0)
+#define REG_MII_ADDR_WRITE BIT(0)
+#define REG_MII_ADDR_READ BIT(1)
+
+static int b53_mdio_op(struct b53_device *dev, u8 page, u8 reg, u16 op)
+{
+ int i;
+ u16 v;
+ int ret;
+ struct mii_bus *bus = dev->priv;
+
+ if (dev->current_page != page) {
+ /* set page number */
+ v = (page << 8) | REG_MII_PAGE_ENABLE;
+ ret = mdiobus_write_nested(bus, BRCM_PSEUDO_PHY_ADDR,
+ REG_MII_PAGE, v);
+ if (ret)
+ return ret;
+ dev->current_page = page;
+ }
+
+ /* set register address */
+ v = (reg << 8) | op;
+ ret = mdiobus_write_nested(bus, BRCM_PSEUDO_PHY_ADDR, REG_MII_ADDR, v);
+ if (ret)
+ return ret;
+
+ /* check if operation completed */
+ for (i = 0; i < 5; ++i) {
+ v = mdiobus_read_nested(bus, BRCM_PSEUDO_PHY_ADDR,
+ REG_MII_ADDR);
+ if (!(v & (REG_MII_ADDR_WRITE | REG_MII_ADDR_READ)))
+ break;
+ usleep_range(10, 100);
+ }
+
+ if (WARN_ON(i == 5))
+ return -EIO;
+
+ return 0;
+}
+
+static int b53_mdio_read8(struct b53_device *dev, u8 page, u8 reg, u8 *val)
+{
+ struct mii_bus *bus = dev->priv;
+ int ret;
+
+ ret = b53_mdio_op(dev, page, reg, REG_MII_ADDR_READ);
+ if (ret)
+ return ret;
+
+ *val = mdiobus_read_nested(bus, BRCM_PSEUDO_PHY_ADDR,
+ REG_MII_DATA0) & 0xff;
+
+ return 0;
+}
+
+static int b53_mdio_read16(struct b53_device *dev, u8 page, u8 reg, u16 *val)
+{
+ struct mii_bus *bus = dev->priv;
+ int ret;
+
+ ret = b53_mdio_op(dev, page, reg, REG_MII_ADDR_READ);
+ if (ret)
+ return ret;
+
+ *val = mdiobus_read_nested(bus, BRCM_PSEUDO_PHY_ADDR, REG_MII_DATA0);
+
+ return 0;
+}
+
+static int b53_mdio_read32(struct b53_device *dev, u8 page, u8 reg, u32 *val)
+{
+ struct mii_bus *bus = dev->priv;
+ int ret;
+
+ ret = b53_mdio_op(dev, page, reg, REG_MII_ADDR_READ);
+ if (ret)
+ return ret;
+
+ *val = mdiobus_read_nested(bus, BRCM_PSEUDO_PHY_ADDR, REG_MII_DATA0);
+ *val |= mdiobus_read_nested(bus, BRCM_PSEUDO_PHY_ADDR,
+ REG_MII_DATA1) << 16;
+
+ return 0;
+}
+
+static int b53_mdio_read48(struct b53_device *dev, u8 page, u8 reg, u64 *val)
+{
+ struct mii_bus *bus = dev->priv;
+ u64 temp = 0;
+ int i;
+ int ret;
+
+ ret = b53_mdio_op(dev, page, reg, REG_MII_ADDR_READ);
+ if (ret)
+ return ret;
+
+ for (i = 2; i >= 0; i--) {
+ temp <<= 16;
+ temp |= mdiobus_read_nested(bus, BRCM_PSEUDO_PHY_ADDR,
+ REG_MII_DATA0 + i);
+ }
+
+ *val = temp;
+
+ return 0;
+}
+
+static int b53_mdio_read64(struct b53_device *dev, u8 page, u8 reg, u64 *val)
+{
+ struct mii_bus *bus = dev->priv;
+ u64 temp = 0;
+ int i;
+ int ret;
+
+ ret = b53_mdio_op(dev, page, reg, REG_MII_ADDR_READ);
+ if (ret)
+ return ret;
+
+ for (i = 3; i >= 0; i--) {
+ temp <<= 16;
+ temp |= mdiobus_read_nested(bus, BRCM_PSEUDO_PHY_ADDR,
+ REG_MII_DATA0 + i);
+ }
+
+ *val = temp;
+
+ return 0;
+}
+
+static int b53_mdio_write8(struct b53_device *dev, u8 page, u8 reg, u8 value)
+{
+ struct mii_bus *bus = dev->priv;
+ int ret;
+
+ ret = mdiobus_write_nested(bus, BRCM_PSEUDO_PHY_ADDR,
+ REG_MII_DATA0, value);
+ if (ret)
+ return ret;
+
+ return b53_mdio_op(dev, page, reg, REG_MII_ADDR_WRITE);
+}
+
+static int b53_mdio_write16(struct b53_device *dev, u8 page, u8 reg,
+ u16 value)
+{
+ struct mii_bus *bus = dev->priv;
+ int ret;
+
+ ret = mdiobus_write_nested(bus, BRCM_PSEUDO_PHY_ADDR,
+ REG_MII_DATA0, value);
+ if (ret)
+ return ret;
+
+ return b53_mdio_op(dev, page, reg, REG_MII_ADDR_WRITE);
+}
+
+static int b53_mdio_write32(struct b53_device *dev, u8 page, u8 reg,
+ u32 value)
+{
+ struct mii_bus *bus = dev->priv;
+ unsigned int i;
+ u32 temp = value;
+
+ for (i = 0; i < 2; i++) {
+ int ret = mdiobus_write_nested(bus, BRCM_PSEUDO_PHY_ADDR,
+ REG_MII_DATA0 + i,
+ temp & 0xffff);
+ if (ret)
+ return ret;
+ temp >>= 16;
+ }
+
+ return b53_mdio_op(dev, page, reg, REG_MII_ADDR_WRITE);
+}
+
+static int b53_mdio_write48(struct b53_device *dev, u8 page, u8 reg,
+ u64 value)
+{
+ struct mii_bus *bus = dev->priv;
+ unsigned int i;
+ u64 temp = value;
+
+ for (i = 0; i < 3; i++) {
+ int ret = mdiobus_write_nested(bus, BRCM_PSEUDO_PHY_ADDR,
+ REG_MII_DATA0 + i,
+ temp & 0xffff);
+ if (ret)
+ return ret;
+ temp >>= 16;
+ }
+
+ return b53_mdio_op(dev, page, reg, REG_MII_ADDR_WRITE);
+}
+
+static int b53_mdio_write64(struct b53_device *dev, u8 page, u8 reg,
+ u64 value)
+{
+ struct mii_bus *bus = dev->priv;
+ unsigned int i;
+ u64 temp = value;
+
+ for (i = 0; i < 4; i++) {
+ int ret = mdiobus_write_nested(bus, BRCM_PSEUDO_PHY_ADDR,
+ REG_MII_DATA0 + i,
+ temp & 0xffff);
+ if (ret)
+ return ret;
+ temp >>= 16;
+ }
+
+ return b53_mdio_op(dev, page, reg, REG_MII_ADDR_WRITE);
+}
+
+static int b53_mdio_phy_read16(struct b53_device *dev, int addr, int reg,
+ u16 *value)
+{
+ struct mii_bus *bus = dev->priv;
+
+ *value = mdiobus_read_nested(bus, addr, reg);
+
+ return 0;
+}
+
+static int b53_mdio_phy_write16(struct b53_device *dev, int addr, int reg,
+ u16 value)
+{
+ struct mii_bus *bus = dev->bus;
+
+ return mdiobus_write_nested(bus, addr, reg, value);
+}
+
+static struct b53_io_ops b53_mdio_ops = {
+ .read8 = b53_mdio_read8,
+ .read16 = b53_mdio_read16,
+ .read32 = b53_mdio_read32,
+ .read48 = b53_mdio_read48,
+ .read64 = b53_mdio_read64,
+ .write8 = b53_mdio_write8,
+ .write16 = b53_mdio_write16,
+ .write32 = b53_mdio_write32,
+ .write48 = b53_mdio_write48,
+ .write64 = b53_mdio_write64,
+ .phy_read16 = b53_mdio_phy_read16,
+ .phy_write16 = b53_mdio_phy_write16,
+};
+
+#define B53_BRCM_OUI_1 0x0143bc00
+#define B53_BRCM_OUI_2 0x03625c00
+#define B53_BRCM_OUI_3 0x00406000
+
+static int b53_mdio_probe(struct mdio_device *mdiodev)
+{
+ struct b53_device *dev;
+ u32 phy_id;
+ int ret;
+
+ /* allow the generic PHY driver to take over the non-management MDIO
+ * addresses
+ */
+ if (mdiodev->addr != BRCM_PSEUDO_PHY_ADDR && mdiodev->addr != 0) {
+ dev_err(&mdiodev->dev, "leaving address %d to PHY\n",
+ mdiodev->addr);
+ return -ENODEV;
+ }
+
+ /* read the first port's id */
+ phy_id = mdiobus_read(mdiodev->bus, 0, 2) << 16;
+ phy_id |= mdiobus_read(mdiodev->bus, 0, 3);
+
+ /* BCM5325, BCM539x (OUI_1)
+ * BCM53125, BCM53128 (OUI_2)
+ * BCM5365 (OUI_3)
+ */
+ if ((phy_id & 0xfffffc00) != B53_BRCM_OUI_1 &&
+ (phy_id & 0xfffffc00) != B53_BRCM_OUI_2 &&
+ (phy_id & 0xfffffc00) != B53_BRCM_OUI_3) {
+ dev_err(&mdiodev->dev, "Unsupported device: 0x%08x\n", phy_id);
+ return -ENODEV;
+ }
+
+ /* First probe will come from SWITCH_MDIO controller on the 7445D0
+ * switch, which will conflict with the 7445 integrated switch
+ * pseudo-phy (we end-up programming both). In that case, we return
+ * -EPROBE_DEFER for the first time we get here, and wait until we come
+ * back with the slave MDIO bus which has the correct indirection
+ * layer setup
+ */
+ if (of_machine_is_compatible("brcm,bcm7445d0") &&
+ strcmp(mdiodev->bus->name, "sf2 slave mii"))
+ return -EPROBE_DEFER;
+
+ dev = b53_switch_alloc(&mdiodev->dev, &b53_mdio_ops, mdiodev->bus);
+ if (!dev)
+ return -ENOMEM;
+
+ /* we don't use page 0xff, so force a page set */
+ dev->current_page = 0xff;
+ dev->bus = mdiodev->bus;
+
+ dev_set_drvdata(&mdiodev->dev, dev);
+
+ ret = b53_switch_register(dev);
+ if (ret) {
+ dev_err(&mdiodev->dev, "failed to register switch: %i\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static void b53_mdio_remove(struct mdio_device *mdiodev)
+{
+ struct b53_device *dev = dev_get_drvdata(&mdiodev->dev);
+ struct dsa_switch *ds = dev->ds;
+
+ dsa_unregister_switch(ds);
+}
+
+static const struct of_device_id b53_of_match[] = {
+ { .compatible = "brcm,bcm5325" },
+ { .compatible = "brcm,bcm53115" },
+ { .compatible = "brcm,bcm53125" },
+ { .compatible = "brcm,bcm53128" },
+ { .compatible = "brcm,bcm5365" },
+ { .compatible = "brcm,bcm5395" },
+ { .compatible = "brcm,bcm5397" },
+ { .compatible = "brcm,bcm5398" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, b53_of_match);
+
+static struct mdio_driver b53_mdio_driver = {
+ .probe = b53_mdio_probe,
+ .remove = b53_mdio_remove,
+ .mdiodrv.driver = {
+ .name = "bcm53xx",
+ .of_match_table = b53_of_match,
+ },
+};
+
+static int __init b53_mdio_driver_register(void)
+{
+ return mdio_driver_register(&b53_mdio_driver);
+}
+module_init(b53_mdio_driver_register);
+
+static void __exit b53_mdio_driver_unregister(void)
+{
+ mdio_driver_unregister(&b53_mdio_driver);
+}
+module_exit(b53_mdio_driver_unregister);
+
+MODULE_DESCRIPTION("B53 MDIO access driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/dsa/b53/b53_mmap.c b/drivers/net/dsa/b53/b53_mmap.c
new file mode 100644
index 0000000..f115ee2
--- /dev/null
+++ b/drivers/net/dsa/b53/b53_mmap.c
@@ -0,0 +1,260 @@
+/*
+ * B53 register access through memory mapped registers
+ *
+ * Copyright (C) 2012-2013 Jonas Gorski <jogo@openwrt.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/kconfig.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/b53.h>
+
+#include "b53_priv.h"
+
+struct b53_mmap_priv {
+ void __iomem *regs;
+};
+
+static int b53_mmap_read8(struct b53_device *dev, u8 page, u8 reg, u8 *val)
+{
+ u8 __iomem *regs = dev->priv;
+
+ *val = readb(regs + (page << 8) + reg);
+
+ return 0;
+}
+
+static int b53_mmap_read16(struct b53_device *dev, u8 page, u8 reg, u16 *val)
+{
+ u8 __iomem *regs = dev->priv;
+
+ if (WARN_ON(reg % 2))
+ return -EINVAL;
+
+ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) && dev->pdata &&
+ dev->pdata->big_endian)
+ *val = __raw_readw(regs + (page << 8) + reg);
+ else
+ *val = readw(regs + (page << 8) + reg);
+
+ return 0;
+}
+
+static int b53_mmap_read32(struct b53_device *dev, u8 page, u8 reg, u32 *val)
+{
+ u8 __iomem *regs = dev->priv;
+
+ if (WARN_ON(reg % 4))
+ return -EINVAL;
+
+ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) && dev->pdata &&
+ dev->pdata->big_endian)
+ *val = __raw_readl(regs + (page << 8) + reg);
+ else
+ *val = readl(regs + (page << 8) + reg);
+
+ return 0;
+}
+
+static int b53_mmap_read48(struct b53_device *dev, u8 page, u8 reg, u64 *val)
+{
+ if (WARN_ON(reg % 2))
+ return -EINVAL;
+
+ if (reg % 4) {
+ u16 lo;
+ u32 hi;
+
+ b53_mmap_read16(dev, page, reg, &lo);
+ b53_mmap_read32(dev, page, reg + 2, &hi);
+
+ *val = ((u64)hi << 16) | lo;
+ } else {
+ u32 lo;
+ u16 hi;
+
+ b53_mmap_read32(dev, page, reg, &lo);
+ b53_mmap_read16(dev, page, reg + 4, &hi);
+
+ *val = ((u64)hi << 32) | lo;
+ }
+
+ return 0;
+}
+
+static int b53_mmap_read64(struct b53_device *dev, u8 page, u8 reg, u64 *val)
+{
+ u32 hi, lo;
+
+ if (WARN_ON(reg % 4))
+ return -EINVAL;
+
+ b53_mmap_read32(dev, page, reg, &lo);
+ b53_mmap_read32(dev, page, reg + 4, &hi);
+
+ *val = ((u64)hi << 32) | lo;
+
+ return 0;
+}
+
+static int b53_mmap_write8(struct b53_device *dev, u8 page, u8 reg, u8 value)
+{
+ u8 __iomem *regs = dev->priv;
+
+ writeb(value, regs + (page << 8) + reg);
+
+ return 0;
+}
+
+static int b53_mmap_write16(struct b53_device *dev, u8 page, u8 reg,
+ u16 value)
+{
+ u8 __iomem *regs = dev->priv;
+
+ if (WARN_ON(reg % 2))
+ return -EINVAL;
+
+ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) && dev->pdata &&
+ dev->pdata->big_endian)
+ __raw_writew(value, regs + (page << 8) + reg);
+ else
+ writew(value, regs + (page << 8) + reg);
+
+ return 0;
+}
+
+static int b53_mmap_write32(struct b53_device *dev, u8 page, u8 reg,
+ u32 value)
+{
+ u8 __iomem *regs = dev->priv;
+
+ if (WARN_ON(reg % 4))
+ return -EINVAL;
+
+ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) && dev->pdata &&
+ dev->pdata->big_endian)
+ __raw_writel(value, regs + (page << 8) + reg);
+ else
+ writel(value, regs + (page << 8) + reg);
+
+ return 0;
+}
+
+static int b53_mmap_write48(struct b53_device *dev, u8 page, u8 reg,
+ u64 value)
+{
+ if (WARN_ON(reg % 2))
+ return -EINVAL;
+
+ if (reg % 4) {
+ u32 hi = (u32)(value >> 16);
+ u16 lo = (u16)value;
+
+ b53_mmap_write16(dev, page, reg, lo);
+ b53_mmap_write32(dev, page, reg + 2, hi);
+ } else {
+ u16 hi = (u16)(value >> 32);
+ u32 lo = (u32)value;
+
+ b53_mmap_write32(dev, page, reg, lo);
+ b53_mmap_write16(dev, page, reg + 4, hi);
+ }
+
+ return 0;
+}
+
+static int b53_mmap_write64(struct b53_device *dev, u8 page, u8 reg,
+ u64 value)
+{
+ u32 hi, lo;
+
+ hi = upper_32_bits(value);
+ lo = lower_32_bits(value);
+
+ if (WARN_ON(reg % 4))
+ return -EINVAL;
+
+ b53_mmap_write32(dev, page, reg, lo);
+ b53_mmap_write32(dev, page, reg + 4, hi);
+
+ return 0;
+}
+
+static struct b53_io_ops b53_mmap_ops = {
+ .read8 = b53_mmap_read8,
+ .read16 = b53_mmap_read16,
+ .read32 = b53_mmap_read32,
+ .read48 = b53_mmap_read48,
+ .read64 = b53_mmap_read64,
+ .write8 = b53_mmap_write8,
+ .write16 = b53_mmap_write16,
+ .write32 = b53_mmap_write32,
+ .write48 = b53_mmap_write48,
+ .write64 = b53_mmap_write64,
+};
+
+static int b53_mmap_probe(struct platform_device *pdev)
+{
+ struct b53_platform_data *pdata = pdev->dev.platform_data;
+ struct b53_device *dev;
+
+ if (!pdata)
+ return -EINVAL;
+
+ dev = b53_switch_alloc(&pdev->dev, &b53_mmap_ops, pdata->regs);
+ if (!dev)
+ return -ENOMEM;
+
+ if (pdata)
+ dev->pdata = pdata;
+
+ platform_set_drvdata(pdev, dev);
+
+ return b53_switch_register(dev);
+}
+
+static int b53_mmap_remove(struct platform_device *pdev)
+{
+ struct b53_device *dev = platform_get_drvdata(pdev);
+
+ if (dev)
+ b53_switch_remove(dev);
+
+ return 0;
+}
+
+static const struct of_device_id b53_mmap_of_table[] = {
+ { .compatible = "brcm,bcm3384-switch" },
+ { .compatible = "brcm,bcm6328-switch" },
+ { .compatible = "brcm,bcm6368-switch" },
+ { .compatible = "brcm,bcm63xx-switch" },
+ { /* sentinel */ },
+};
+
+static struct platform_driver b53_mmap_driver = {
+ .probe = b53_mmap_probe,
+ .remove = b53_mmap_remove,
+ .driver = {
+ .name = "b53-switch",
+ .of_match_table = b53_mmap_of_table,
+ },
+};
+
+module_platform_driver(b53_mmap_driver);
+MODULE_AUTHOR("Jonas Gorski <jogo@openwrt.org>");
+MODULE_DESCRIPTION("B53 MMAP access driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
new file mode 100644
index 0000000..5d8c602
--- /dev/null
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -0,0 +1,387 @@
+/*
+ * B53 common definitions
+ *
+ * Copyright (C) 2011-2013 Jonas Gorski <jogo@openwrt.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __B53_PRIV_H
+#define __B53_PRIV_H
+
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/phy.h>
+#include <net/dsa.h>
+
+#include "b53_regs.h"
+
+struct b53_device;
+struct net_device;
+
+struct b53_io_ops {
+ int (*read8)(struct b53_device *dev, u8 page, u8 reg, u8 *value);
+ int (*read16)(struct b53_device *dev, u8 page, u8 reg, u16 *value);
+ int (*read32)(struct b53_device *dev, u8 page, u8 reg, u32 *value);
+ int (*read48)(struct b53_device *dev, u8 page, u8 reg, u64 *value);
+ int (*read64)(struct b53_device *dev, u8 page, u8 reg, u64 *value);
+ int (*write8)(struct b53_device *dev, u8 page, u8 reg, u8 value);
+ int (*write16)(struct b53_device *dev, u8 page, u8 reg, u16 value);
+ int (*write32)(struct b53_device *dev, u8 page, u8 reg, u32 value);
+ int (*write48)(struct b53_device *dev, u8 page, u8 reg, u64 value);
+ int (*write64)(struct b53_device *dev, u8 page, u8 reg, u64 value);
+ int (*phy_read16)(struct b53_device *dev, int addr, int reg, u16 *value);
+ int (*phy_write16)(struct b53_device *dev, int addr, int reg, u16 value);
+};
+
+enum {
+ BCM5325_DEVICE_ID = 0x25,
+ BCM5365_DEVICE_ID = 0x65,
+ BCM5395_DEVICE_ID = 0x95,
+ BCM5397_DEVICE_ID = 0x97,
+ BCM5398_DEVICE_ID = 0x98,
+ BCM53115_DEVICE_ID = 0x53115,
+ BCM53125_DEVICE_ID = 0x53125,
+ BCM53128_DEVICE_ID = 0x53128,
+ BCM63XX_DEVICE_ID = 0x6300,
+ BCM53010_DEVICE_ID = 0x53010,
+ BCM53011_DEVICE_ID = 0x53011,
+ BCM53012_DEVICE_ID = 0x53012,
+ BCM53018_DEVICE_ID = 0x53018,
+ BCM53019_DEVICE_ID = 0x53019,
+};
+
+#define B53_N_PORTS 9
+#define B53_N_PORTS_25 6
+
+struct b53_port {
+ u16 vlan_ctl_mask;
+ struct net_device *bridge_dev;
+};
+
+struct b53_vlan {
+ u16 members;
+ u16 untag;
+ bool valid;
+};
+
+struct b53_device {
+ struct dsa_switch *ds;
+ struct b53_platform_data *pdata;
+ const char *name;
+
+ struct mutex reg_mutex;
+ struct mutex stats_mutex;
+ const struct b53_io_ops *ops;
+
+ /* chip specific data */
+ u32 chip_id;
+ u8 core_rev;
+ u8 vta_regs[3];
+ u8 duplex_reg;
+ u8 jumbo_pm_reg;
+ u8 jumbo_size_reg;
+ int reset_gpio;
+ u8 num_arl_entries;
+
+ /* used ports mask */
+ u16 enabled_ports;
+ unsigned int cpu_port;
+
+ /* connect specific data */
+ u8 current_page;
+ struct device *dev;
+
+ /* Master MDIO bus we got probed from */
+ struct mii_bus *bus;
+
+ void *priv;
+
+ /* run time configuration */
+ bool enable_jumbo;
+
+ unsigned int num_vlans;
+ struct b53_vlan *vlans;
+ unsigned int num_ports;
+ struct b53_port *ports;
+};
+
+#define b53_for_each_port(dev, i) \
+ for (i = 0; i < B53_N_PORTS; i++) \
+ if (dev->enabled_ports & BIT(i))
+
+
+static inline int is5325(struct b53_device *dev)
+{
+ return dev->chip_id == BCM5325_DEVICE_ID;
+}
+
+static inline int is5365(struct b53_device *dev)
+{
+#ifdef CONFIG_BCM47XX
+ return dev->chip_id == BCM5365_DEVICE_ID;
+#else
+ return 0;
+#endif
+}
+
+static inline int is5397_98(struct b53_device *dev)
+{
+ return dev->chip_id == BCM5397_DEVICE_ID ||
+ dev->chip_id == BCM5398_DEVICE_ID;
+}
+
+static inline int is539x(struct b53_device *dev)
+{
+ return dev->chip_id == BCM5395_DEVICE_ID ||
+ dev->chip_id == BCM5397_DEVICE_ID ||
+ dev->chip_id == BCM5398_DEVICE_ID;
+}
+
+static inline int is531x5(struct b53_device *dev)
+{
+ return dev->chip_id == BCM53115_DEVICE_ID ||
+ dev->chip_id == BCM53125_DEVICE_ID ||
+ dev->chip_id == BCM53128_DEVICE_ID;
+}
+
+static inline int is63xx(struct b53_device *dev)
+{
+#ifdef CONFIG_BCM63XX
+ return dev->chip_id == BCM63XX_DEVICE_ID;
+#else
+ return 0;
+#endif
+}
+
+static inline int is5301x(struct b53_device *dev)
+{
+ return dev->chip_id == BCM53010_DEVICE_ID ||
+ dev->chip_id == BCM53011_DEVICE_ID ||
+ dev->chip_id == BCM53012_DEVICE_ID ||
+ dev->chip_id == BCM53018_DEVICE_ID ||
+ dev->chip_id == BCM53019_DEVICE_ID;
+}
+
+#define B53_CPU_PORT_25 5
+#define B53_CPU_PORT 8
+
+static inline int is_cpu_port(struct b53_device *dev, int port)
+{
+ return dev->cpu_port;
+}
+
+struct b53_device *b53_switch_alloc(struct device *base, struct b53_io_ops *ops,
+ void *priv);
+
+int b53_switch_detect(struct b53_device *dev);
+
+int b53_switch_register(struct b53_device *dev);
+
+static inline void b53_switch_remove(struct b53_device *dev)
+{
+ dsa_unregister_switch(dev->ds);
+}
+
+static inline int b53_read8(struct b53_device *dev, u8 page, u8 reg, u8 *val)
+{
+ int ret;
+
+ mutex_lock(&dev->reg_mutex);
+ ret = dev->ops->read8(dev, page, reg, val);
+ mutex_unlock(&dev->reg_mutex);
+
+ return ret;
+}
+
+static inline int b53_read16(struct b53_device *dev, u8 page, u8 reg, u16 *val)
+{
+ int ret;
+
+ mutex_lock(&dev->reg_mutex);
+ ret = dev->ops->read16(dev, page, reg, val);
+ mutex_unlock(&dev->reg_mutex);
+
+ return ret;
+}
+
+static inline int b53_read32(struct b53_device *dev, u8 page, u8 reg, u32 *val)
+{
+ int ret;
+
+ mutex_lock(&dev->reg_mutex);
+ ret = dev->ops->read32(dev, page, reg, val);
+ mutex_unlock(&dev->reg_mutex);
+
+ return ret;
+}
+
+static inline int b53_read48(struct b53_device *dev, u8 page, u8 reg, u64 *val)
+{
+ int ret;
+
+ mutex_lock(&dev->reg_mutex);
+ ret = dev->ops->read48(dev, page, reg, val);
+ mutex_unlock(&dev->reg_mutex);
+
+ return ret;
+}
+
+static inline int b53_read64(struct b53_device *dev, u8 page, u8 reg, u64 *val)
+{
+ int ret;
+
+ mutex_lock(&dev->reg_mutex);
+ ret = dev->ops->read64(dev, page, reg, val);
+ mutex_unlock(&dev->reg_mutex);
+
+ return ret;
+}
+
+static inline int b53_write8(struct b53_device *dev, u8 page, u8 reg, u8 value)
+{
+ int ret;
+
+ mutex_lock(&dev->reg_mutex);
+ ret = dev->ops->write8(dev, page, reg, value);
+ mutex_unlock(&dev->reg_mutex);
+
+ return ret;
+}
+
+static inline int b53_write16(struct b53_device *dev, u8 page, u8 reg,
+ u16 value)
+{
+ int ret;
+
+ mutex_lock(&dev->reg_mutex);
+ ret = dev->ops->write16(dev, page, reg, value);
+ mutex_unlock(&dev->reg_mutex);
+
+ return ret;
+}
+
+static inline int b53_write32(struct b53_device *dev, u8 page, u8 reg,
+ u32 value)
+{
+ int ret;
+
+ mutex_lock(&dev->reg_mutex);
+ ret = dev->ops->write32(dev, page, reg, value);
+ mutex_unlock(&dev->reg_mutex);
+
+ return ret;
+}
+
+static inline int b53_write48(struct b53_device *dev, u8 page, u8 reg,
+ u64 value)
+{
+ int ret;
+
+ mutex_lock(&dev->reg_mutex);
+ ret = dev->ops->write48(dev, page, reg, value);
+ mutex_unlock(&dev->reg_mutex);
+
+ return ret;
+}
+
+static inline int b53_write64(struct b53_device *dev, u8 page, u8 reg,
+ u64 value)
+{
+ int ret;
+
+ mutex_lock(&dev->reg_mutex);
+ ret = dev->ops->write64(dev, page, reg, value);
+ mutex_unlock(&dev->reg_mutex);
+
+ return ret;
+}
+
+struct b53_arl_entry {
+ u8 port;
+ u8 mac[ETH_ALEN];
+ u16 vid;
+ u8 is_valid:1;
+ u8 is_age:1;
+ u8 is_static:1;
+};
+
+static inline void b53_mac_from_u64(u64 src, u8 *dst)
+{
+ unsigned int i;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ dst[ETH_ALEN - 1 - i] = (src >> (8 * i)) & 0xff;
+}
+
+static inline u64 b53_mac_to_u64(const u8 *src)
+{
+ unsigned int i;
+ u64 dst = 0;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ dst |= (u64)src[ETH_ALEN - 1 - i] << (8 * i);
+
+ return dst;
+}
+
+static inline void b53_arl_to_entry(struct b53_arl_entry *ent,
+ u64 mac_vid, u32 fwd_entry)
+{
+ memset(ent, 0, sizeof(*ent));
+ ent->port = fwd_entry & ARLTBL_DATA_PORT_ID_MASK;
+ ent->is_valid = !!(fwd_entry & ARLTBL_VALID);
+ ent->is_age = !!(fwd_entry & ARLTBL_AGE);
+ ent->is_static = !!(fwd_entry & ARLTBL_STATIC);
+ b53_mac_from_u64(mac_vid, ent->mac);
+ ent->vid = mac_vid >> ARLTBL_VID_S;
+}
+
+static inline void b53_arl_from_entry(u64 *mac_vid, u32 *fwd_entry,
+ const struct b53_arl_entry *ent)
+{
+ *mac_vid = b53_mac_to_u64(ent->mac);
+ *mac_vid |= (u64)(ent->vid & ARLTBL_VID_MASK) << ARLTBL_VID_S;
+ *fwd_entry = ent->port & ARLTBL_DATA_PORT_ID_MASK;
+ if (ent->is_valid)
+ *fwd_entry |= ARLTBL_VALID;
+ if (ent->is_static)
+ *fwd_entry |= ARLTBL_STATIC;
+ if (ent->is_age)
+ *fwd_entry |= ARLTBL_AGE;
+}
+
+#ifdef CONFIG_BCM47XX
+
+#include <linux/version.h>
+#include <linux/bcm47xx_nvram.h>
+#include <bcm47xx_board.h>
+static inline int b53_switch_get_reset_gpio(struct b53_device *dev)
+{
+ enum bcm47xx_board board = bcm47xx_board_get();
+
+ switch (board) {
+ case BCM47XX_BOARD_LINKSYS_WRT300NV11:
+ case BCM47XX_BOARD_LINKSYS_WRT310NV1:
+ return 8;
+ default:
+ return bcm47xx_nvram_gpio_pin("robo_reset");
+ }
+}
+#else
+static inline int b53_switch_get_reset_gpio(struct b53_device *dev)
+{
+ return -ENOENT;
+}
+#endif
+#endif
diff --git a/drivers/net/dsa/b53/b53_regs.h b/drivers/net/dsa/b53/b53_regs.h
new file mode 100644
index 0000000..8f12bdd
--- /dev/null
+++ b/drivers/net/dsa/b53/b53_regs.h
@@ -0,0 +1,434 @@
+/*
+ * B53 register definitions
+ *
+ * Copyright (C) 2004 Broadcom Corporation
+ * Copyright (C) 2011-2013 Jonas Gorski <jogo@openwrt.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __B53_REGS_H
+#define __B53_REGS_H
+
+/* Management Port (SMP) Page offsets */
+#define B53_CTRL_PAGE 0x00 /* Control */
+#define B53_STAT_PAGE 0x01 /* Status */
+#define B53_MGMT_PAGE 0x02 /* Management Mode */
+#define B53_MIB_AC_PAGE 0x03 /* MIB Autocast */
+#define B53_ARLCTRL_PAGE 0x04 /* ARL Control */
+#define B53_ARLIO_PAGE 0x05 /* ARL Access */
+#define B53_FRAMEBUF_PAGE 0x06 /* Management frame access */
+#define B53_MEM_ACCESS_PAGE 0x08 /* Memory access */
+
+/* PHY Registers */
+#define B53_PORT_MII_PAGE(i) (0x10 + (i)) /* Port i MII Registers */
+#define B53_IM_PORT_PAGE 0x18 /* Inverse MII Port (to EMAC) */
+#define B53_ALL_PORT_PAGE 0x19 /* All ports MII (broadcast) */
+
+/* MIB registers */
+#define B53_MIB_PAGE(i) (0x20 + (i))
+
+/* Quality of Service (QoS) Registers */
+#define B53_QOS_PAGE 0x30
+
+/* Port VLAN Page */
+#define B53_PVLAN_PAGE 0x31
+
+/* VLAN Registers */
+#define B53_VLAN_PAGE 0x34
+
+/* Jumbo Frame Registers */
+#define B53_JUMBO_PAGE 0x40
+
+/* CFP Configuration Registers Page */
+#define B53_CFP_PAGE 0xa1
+
+/*************************************************************************
+ * Control Page registers
+ *************************************************************************/
+
+/* Port Control Register (8 bit) */
+#define B53_PORT_CTRL(i) (0x00 + (i))
+#define PORT_CTRL_RX_DISABLE BIT(0)
+#define PORT_CTRL_TX_DISABLE BIT(1)
+#define PORT_CTRL_RX_BCST_EN BIT(2) /* Broadcast RX (P8 only) */
+#define PORT_CTRL_RX_MCST_EN BIT(3) /* Multicast RX (P8 only) */
+#define PORT_CTRL_RX_UCST_EN BIT(4) /* Unicast RX (P8 only) */
+#define PORT_CTRL_STP_STATE_S 5
+#define PORT_CTRL_NO_STP (0 << PORT_CTRL_STP_STATE_S)
+#define PORT_CTRL_DIS_STATE (1 << PORT_CTRL_STP_STATE_S)
+#define PORT_CTRL_BLOCK_STATE (2 << PORT_CTRL_STP_STATE_S)
+#define PORT_CTRL_LISTEN_STATE (3 << PORT_CTRL_STP_STATE_S)
+#define PORT_CTRL_LEARN_STATE (4 << PORT_CTRL_STP_STATE_S)
+#define PORT_CTRL_FWD_STATE (5 << PORT_CTRL_STP_STATE_S)
+#define PORT_CTRL_STP_STATE_MASK (0x7 << PORT_CTRL_STP_STATE_S)
+
+/* SMP Control Register (8 bit) */
+#define B53_SMP_CTRL 0x0a
+
+/* Switch Mode Control Register (8 bit) */
+#define B53_SWITCH_MODE 0x0b
+#define SM_SW_FWD_MODE BIT(0) /* 1 = Managed Mode */
+#define SM_SW_FWD_EN BIT(1) /* Forwarding Enable */
+
+/* IMP Port state override register (8 bit) */
+#define B53_PORT_OVERRIDE_CTRL 0x0e
+#define PORT_OVERRIDE_LINK BIT(0)
+#define PORT_OVERRIDE_FULL_DUPLEX BIT(1) /* 0 = Half Duplex */
+#define PORT_OVERRIDE_SPEED_S 2
+#define PORT_OVERRIDE_SPEED_10M (0 << PORT_OVERRIDE_SPEED_S)
+#define PORT_OVERRIDE_SPEED_100M (1 << PORT_OVERRIDE_SPEED_S)
+#define PORT_OVERRIDE_SPEED_1000M (2 << PORT_OVERRIDE_SPEED_S)
+#define PORT_OVERRIDE_RV_MII_25 BIT(4) /* BCM5325 only */
+#define PORT_OVERRIDE_RX_FLOW BIT(4)
+#define PORT_OVERRIDE_TX_FLOW BIT(5)
+#define PORT_OVERRIDE_SPEED_2000M BIT(6) /* BCM5301X only, requires setting 1000M */
+#define PORT_OVERRIDE_EN BIT(7) /* Use the register contents */
+
+/* Power-down mode control */
+#define B53_PD_MODE_CTRL_25 0x0f
+
+/* IP Multicast control (8 bit) */
+#define B53_IP_MULTICAST_CTRL 0x21
+#define B53_IPMC_FWD_EN BIT(1)
+#define B53_UC_FWD_EN BIT(6)
+#define B53_MC_FWD_EN BIT(7)
+
+/* (16 bit) */
+#define B53_UC_FLOOD_MASK 0x32
+#define B53_MC_FLOOD_MASK 0x34
+#define B53_IPMC_FLOOD_MASK 0x36
+
+/*
+ * Override Ports 0-7 State on devices with xMII interfaces (8 bit)
+ *
+ * For port 8 still use B53_PORT_OVERRIDE_CTRL
+ * Please note that not all ports are available on every hardware, e.g. BCM5301X
+ * don't include overriding port 6, BCM63xx also have some limitations.
+ */
+#define B53_GMII_PORT_OVERRIDE_CTRL(i) (0x58 + (i))
+#define GMII_PO_LINK BIT(0)
+#define GMII_PO_FULL_DUPLEX BIT(1) /* 0 = Half Duplex */
+#define GMII_PO_SPEED_S 2
+#define GMII_PO_SPEED_10M (0 << GMII_PO_SPEED_S)
+#define GMII_PO_SPEED_100M (1 << GMII_PO_SPEED_S)
+#define GMII_PO_SPEED_1000M (2 << GMII_PO_SPEED_S)
+#define GMII_PO_RX_FLOW BIT(4)
+#define GMII_PO_TX_FLOW BIT(5)
+#define GMII_PO_EN BIT(6) /* Use the register contents */
+#define GMII_PO_SPEED_2000M BIT(7) /* BCM5301X only, requires setting 1000M */
+
+#define B53_RGMII_CTRL_IMP 0x60
+#define RGMII_CTRL_ENABLE_GMII BIT(7)
+#define RGMII_CTRL_TIMING_SEL BIT(2)
+#define RGMII_CTRL_DLL_RXC BIT(1)
+#define RGMII_CTRL_DLL_TXC BIT(0)
+
+#define B53_RGMII_CTRL_P(i) (B53_RGMII_CTRL_IMP + (i))
+
+/* Software reset register (8 bit) */
+#define B53_SOFTRESET 0x79
+#define SW_RST BIT(7)
+#define EN_SW_RST BIT(4)
+
+/* Fast Aging Control register (8 bit) */
+#define B53_FAST_AGE_CTRL 0x88
+#define FAST_AGE_STATIC BIT(0)
+#define FAST_AGE_DYNAMIC BIT(1)
+#define FAST_AGE_PORT BIT(2)
+#define FAST_AGE_VLAN BIT(3)
+#define FAST_AGE_STP BIT(4)
+#define FAST_AGE_MC BIT(5)
+#define FAST_AGE_DONE BIT(7)
+
+/* Fast Aging Port Control register (8 bit) */
+#define B53_FAST_AGE_PORT_CTRL 0x89
+
+/* Fast Aging VID Control register (16 bit) */
+#define B53_FAST_AGE_VID_CTRL 0x8a
+
+/*************************************************************************
+ * Status Page registers
+ *************************************************************************/
+
+/* Link Status Summary Register (16bit) */
+#define B53_LINK_STAT 0x00
+
+/* Link Status Change Register (16 bit) */
+#define B53_LINK_STAT_CHANGE 0x02
+
+/* Port Speed Summary Register (16 bit for FE, 32 bit for GE) */
+#define B53_SPEED_STAT 0x04
+#define SPEED_PORT_FE(reg, port) (((reg) >> (port)) & 1)
+#define SPEED_PORT_GE(reg, port) (((reg) >> 2 * (port)) & 3)
+#define SPEED_STAT_10M 0
+#define SPEED_STAT_100M 1
+#define SPEED_STAT_1000M 2
+
+/* Duplex Status Summary (16 bit) */
+#define B53_DUPLEX_STAT_FE 0x06
+#define B53_DUPLEX_STAT_GE 0x08
+#define B53_DUPLEX_STAT_63XX 0x0c
+
+/* Revision ID register for BCM5325 */
+#define B53_REV_ID_25 0x50
+
+/* Strap Value (48 bit) */
+#define B53_STRAP_VALUE 0x70
+#define SV_GMII_CTRL_115 BIT(27)
+
+/*************************************************************************
+ * Management Mode Page Registers
+ *************************************************************************/
+
+/* Global Management Config Register (8 bit) */
+#define B53_GLOBAL_CONFIG 0x00
+#define GC_RESET_MIB 0x01
+#define GC_RX_BPDU_EN 0x02
+#define GC_MIB_AC_HDR_EN 0x10
+#define GC_MIB_AC_EN 0x20
+#define GC_FRM_MGMT_PORT_M 0xC0
+#define GC_FRM_MGMT_PORT_04 0x00
+#define GC_FRM_MGMT_PORT_MII 0x80
+
+/* Broadcom Header control register (8 bit) */
+#define B53_BRCM_HDR 0x03
+#define BRCM_HDR_P8_EN BIT(0) /* Enable tagging on port 8 */
+#define BRCM_HDR_P5_EN BIT(1) /* Enable tagging on port 5 */
+
+/* Device ID register (8 or 32 bit) */
+#define B53_DEVICE_ID 0x30
+
+/* Revision ID register (8 bit) */
+#define B53_REV_ID 0x40
+
+/*************************************************************************
+ * ARL Access Page Registers
+ *************************************************************************/
+
+/* VLAN Table Access Register (8 bit) */
+#define B53_VT_ACCESS 0x80
+#define B53_VT_ACCESS_9798 0x60 /* for BCM5397/BCM5398 */
+#define B53_VT_ACCESS_63XX 0x60 /* for BCM6328/62/68 */
+#define VTA_CMD_WRITE 0
+#define VTA_CMD_READ 1
+#define VTA_CMD_CLEAR 2
+#define VTA_START_CMD BIT(7)
+
+/* VLAN Table Index Register (16 bit) */
+#define B53_VT_INDEX 0x81
+#define B53_VT_INDEX_9798 0x61
+#define B53_VT_INDEX_63XX 0x62
+
+/* VLAN Table Entry Register (32 bit) */
+#define B53_VT_ENTRY 0x83
+#define B53_VT_ENTRY_9798 0x63
+#define B53_VT_ENTRY_63XX 0x64
+#define VTE_MEMBERS 0x1ff
+#define VTE_UNTAG_S 9
+#define VTE_UNTAG (0x1ff << 9)
+
+/*************************************************************************
+ * ARL I/O Registers
+ *************************************************************************/
+
+/* ARL Table Read/Write Register (8 bit) */
+#define B53_ARLTBL_RW_CTRL 0x00
+#define ARLTBL_RW BIT(0)
+#define ARLTBL_START_DONE BIT(7)
+
+/* MAC Address Index Register (48 bit) */
+#define B53_MAC_ADDR_IDX 0x02
+
+/* VLAN ID Index Register (16 bit) */
+#define B53_VLAN_ID_IDX 0x08
+
+/* ARL Table MAC/VID Entry N Registers (64 bit)
+ *
+ * BCM5325 and BCM5365 share most definitions below
+ */
+#define B53_ARLTBL_MAC_VID_ENTRY(n) (0x10 * (n))
+#define ARLTBL_MAC_MASK 0xffffffffffff
+#define ARLTBL_VID_S 48
+#define ARLTBL_VID_MASK_25 0xff
+#define ARLTBL_VID_MASK 0xfff
+#define ARLTBL_DATA_PORT_ID_S_25 48
+#define ARLTBL_DATA_PORT_ID_MASK_25 0xf
+#define ARLTBL_AGE_25 BIT(61)
+#define ARLTBL_STATIC_25 BIT(62)
+#define ARLTBL_VALID_25 BIT(63)
+
+/* ARL Table Data Entry N Registers (32 bit) */
+#define B53_ARLTBL_DATA_ENTRY(n) ((0x10 * (n)) + 0x08)
+#define ARLTBL_DATA_PORT_ID_MASK 0x1ff
+#define ARLTBL_TC(tc) ((3 & tc) << 11)
+#define ARLTBL_AGE BIT(14)
+#define ARLTBL_STATIC BIT(15)
+#define ARLTBL_VALID BIT(16)
+
+/* ARL Search Control Register (8 bit) */
+#define B53_ARL_SRCH_CTL 0x50
+#define B53_ARL_SRCH_CTL_25 0x20
+#define ARL_SRCH_VLID BIT(0)
+#define ARL_SRCH_STDN BIT(7)
+
+/* ARL Search Address Register (16 bit) */
+#define B53_ARL_SRCH_ADDR 0x51
+#define B53_ARL_SRCH_ADDR_25 0x22
+#define B53_ARL_SRCH_ADDR_65 0x24
+#define ARL_ADDR_MASK GENMASK(14, 0)
+
+/* ARL Search MAC/VID Result (64 bit) */
+#define B53_ARL_SRCH_RSTL_0_MACVID 0x60
+
+/* Single register search result on 5325 */
+#define B53_ARL_SRCH_RSTL_0_MACVID_25 0x24
+/* Single register search result on 5365 */
+#define B53_ARL_SRCH_RSTL_0_MACVID_65 0x30
+
+/* ARL Search Data Result (32 bit) */
+#define B53_ARL_SRCH_RSTL_0 0x68
+
+#define B53_ARL_SRCH_RSTL_MACVID(x) (B53_ARL_SRCH_RSTL_0_MACVID + ((x) * 0x10))
+#define B53_ARL_SRCH_RSTL(x) (B53_ARL_SRCH_RSTL_0 + ((x) * 0x10))
+
+/*************************************************************************
+ * Port VLAN Registers
+ *************************************************************************/
+
+/* Port VLAN mask (16 bit) IMP port is always 8, also on 5325 & co */
+#define B53_PVLAN_PORT_MASK(i) ((i) * 2)
+
+/*************************************************************************
+ * 802.1Q Page Registers
+ *************************************************************************/
+
+/* Global QoS Control (8 bit) */
+#define B53_QOS_GLOBAL_CTL 0x00
+
+/* Enable 802.1Q for individual Ports (16 bit) */
+#define B53_802_1P_EN 0x04
+
+/*************************************************************************
+ * VLAN Page Registers
+ *************************************************************************/
+
+/* VLAN Control 0 (8 bit) */
+#define B53_VLAN_CTRL0 0x00
+#define VC0_8021PF_CTRL_MASK 0x3
+#define VC0_8021PF_CTRL_NONE 0x0
+#define VC0_8021PF_CTRL_CHANGE_PRI 0x1
+#define VC0_8021PF_CTRL_CHANGE_VID 0x2
+#define VC0_8021PF_CTRL_CHANGE_BOTH 0x3
+#define VC0_8021QF_CTRL_MASK 0xc
+#define VC0_8021QF_CTRL_CHANGE_PRI 0x1
+#define VC0_8021QF_CTRL_CHANGE_VID 0x2
+#define VC0_8021QF_CTRL_CHANGE_BOTH 0x3
+#define VC0_RESERVED_1 BIT(1)
+#define VC0_DROP_VID_MISS BIT(4)
+#define VC0_VID_HASH_VID BIT(5)
+#define VC0_VID_CHK_EN BIT(6) /* Use VID,DA or VID,SA */
+#define VC0_VLAN_EN BIT(7) /* 802.1Q VLAN Enabled */
+
+/* VLAN Control 1 (8 bit) */
+#define B53_VLAN_CTRL1 0x01
+#define VC1_RX_MCST_TAG_EN BIT(1)
+#define VC1_RX_MCST_FWD_EN BIT(2)
+#define VC1_RX_MCST_UNTAG_EN BIT(3)
+
+/* VLAN Control 2 (8 bit) */
+#define B53_VLAN_CTRL2 0x02
+
+/* VLAN Control 3 (8 bit when BCM5325, 16 bit else) */
+#define B53_VLAN_CTRL3 0x03
+#define B53_VLAN_CTRL3_63XX 0x04
+#define VC3_MAXSIZE_1532 BIT(6) /* 5325 only */
+#define VC3_HIGH_8BIT_EN BIT(7) /* 5325 only */
+
+/* VLAN Control 4 (8 bit) */
+#define B53_VLAN_CTRL4 0x05
+#define B53_VLAN_CTRL4_25 0x04
+#define B53_VLAN_CTRL4_63XX 0x06
+#define VC4_ING_VID_CHECK_S 6
+#define VC4_ING_VID_CHECK_MASK (0x3 << VC4_ING_VID_CHECK_S)
+#define VC4_ING_VID_VIO_FWD 0 /* forward, but do not learn */
+#define VC4_ING_VID_VIO_DROP 1 /* drop VID violations */
+#define VC4_NO_ING_VID_CHK 2 /* do not check */
+#define VC4_ING_VID_VIO_TO_IMP 3 /* redirect to MII port */
+
+/* VLAN Control 5 (8 bit) */
+#define B53_VLAN_CTRL5 0x06
+#define B53_VLAN_CTRL5_25 0x05
+#define B53_VLAN_CTRL5_63XX 0x07
+#define VC5_VID_FFF_EN BIT(2)
+#define VC5_DROP_VTABLE_MISS BIT(3)
+
+/* VLAN Control 6 (8 bit) */
+#define B53_VLAN_CTRL6 0x07
+#define B53_VLAN_CTRL6_63XX 0x08
+
+/* VLAN Table Access Register (16 bit) */
+#define B53_VLAN_TABLE_ACCESS_25 0x06 /* BCM5325E/5350 */
+#define B53_VLAN_TABLE_ACCESS_65 0x08 /* BCM5365 */
+#define VTA_VID_LOW_MASK_25 0xf
+#define VTA_VID_LOW_MASK_65 0xff
+#define VTA_VID_HIGH_S_25 4
+#define VTA_VID_HIGH_S_65 8
+#define VTA_VID_HIGH_MASK_25 (0xff << VTA_VID_HIGH_S_25E)
+#define VTA_VID_HIGH_MASK_65 (0xf << VTA_VID_HIGH_S_65)
+#define VTA_RW_STATE BIT(12)
+#define VTA_RW_STATE_RD 0
+#define VTA_RW_STATE_WR BIT(12)
+#define VTA_RW_OP_EN BIT(13)
+
+/* VLAN Read/Write Registers for (16/32 bit) */
+#define B53_VLAN_WRITE_25 0x08
+#define B53_VLAN_WRITE_65 0x0a
+#define B53_VLAN_READ 0x0c
+#define VA_MEMBER_MASK 0x3f
+#define VA_UNTAG_S_25 6
+#define VA_UNTAG_MASK_25 0x3f
+#define VA_UNTAG_S_65 7
+#define VA_UNTAG_MASK_65 0x1f
+#define VA_VID_HIGH_S 12
+#define VA_VID_HIGH_MASK (0xffff << VA_VID_HIGH_S)
+#define VA_VALID_25 BIT(20)
+#define VA_VALID_25_R4 BIT(24)
+#define VA_VALID_65 BIT(14)
+
+/* VLAN Port Default Tag (16 bit) */
+#define B53_VLAN_PORT_DEF_TAG(i) (0x10 + 2 * (i))
+
+/*************************************************************************
+ * Jumbo Frame Page Registers
+ *************************************************************************/
+
+/* Jumbo Enable Port Mask (bit i == port i enabled) (32 bit) */
+#define B53_JUMBO_PORT_MASK 0x01
+#define B53_JUMBO_PORT_MASK_63XX 0x04
+#define JPM_10_100_JUMBO_EN BIT(24) /* GigE always enabled */
+
+/* Good Frame Max Size without 802.1Q TAG (16 bit) */
+#define B53_JUMBO_MAX_SIZE 0x05
+#define B53_JUMBO_MAX_SIZE_63XX 0x08
+#define JMS_MIN_SIZE 1518
+#define JMS_MAX_SIZE 9724
+
+/*************************************************************************
+ * CFP Configuration Page Registers
+ *************************************************************************/
+
+/* CFP Control Register with ports map (8 bit) */
+#define B53_CFP_CTRL 0x00
+
+#endif /* !__B53_REGS_H */
diff --git a/drivers/net/dsa/b53/b53_spi.c b/drivers/net/dsa/b53/b53_spi.c
new file mode 100644
index 0000000..2bda0b5
--- /dev/null
+++ b/drivers/net/dsa/b53/b53_spi.c
@@ -0,0 +1,331 @@
+/*
+ * B53 register access through SPI
+ *
+ * Copyright (C) 2011-2013 Jonas Gorski <jogo@openwrt.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <asm/unaligned.h>
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/platform_data/b53.h>
+
+#include "b53_priv.h"
+
+#define B53_SPI_DATA 0xf0
+
+#define B53_SPI_STATUS 0xfe
+#define B53_SPI_CMD_SPIF BIT(7)
+#define B53_SPI_CMD_RACK BIT(5)
+
+#define B53_SPI_CMD_READ 0x00
+#define B53_SPI_CMD_WRITE 0x01
+#define B53_SPI_CMD_NORMAL 0x60
+#define B53_SPI_CMD_FAST 0x10
+
+#define B53_SPI_PAGE_SELECT 0xff
+
+static inline int b53_spi_read_reg(struct spi_device *spi, u8 reg, u8 *val,
+ unsigned int len)
+{
+ u8 txbuf[2];
+
+ txbuf[0] = B53_SPI_CMD_NORMAL | B53_SPI_CMD_READ;
+ txbuf[1] = reg;
+
+ return spi_write_then_read(spi, txbuf, 2, val, len);
+}
+
+static inline int b53_spi_clear_status(struct spi_device *spi)
+{
+ unsigned int i;
+ u8 rxbuf;
+ int ret;
+
+ for (i = 0; i < 10; i++) {
+ ret = b53_spi_read_reg(spi, B53_SPI_STATUS, &rxbuf, 1);
+ if (ret)
+ return ret;
+
+ if (!(rxbuf & B53_SPI_CMD_SPIF))
+ break;
+
+ mdelay(1);
+ }
+
+ if (i == 10)
+ return -EIO;
+
+ return 0;
+}
+
+static inline int b53_spi_set_page(struct spi_device *spi, u8 page)
+{
+ u8 txbuf[3];
+
+ txbuf[0] = B53_SPI_CMD_NORMAL | B53_SPI_CMD_WRITE;
+ txbuf[1] = B53_SPI_PAGE_SELECT;
+ txbuf[2] = page;
+
+ return spi_write(spi, txbuf, sizeof(txbuf));
+}
+
+static inline int b53_prepare_reg_access(struct spi_device *spi, u8 page)
+{
+ int ret = b53_spi_clear_status(spi);
+
+ if (ret)
+ return ret;
+
+ return b53_spi_set_page(spi, page);
+}
+
+static int b53_spi_prepare_reg_read(struct spi_device *spi, u8 reg)
+{
+ u8 rxbuf;
+ int retry_count;
+ int ret;
+
+ ret = b53_spi_read_reg(spi, reg, &rxbuf, 1);
+ if (ret)
+ return ret;
+
+ for (retry_count = 0; retry_count < 10; retry_count++) {
+ ret = b53_spi_read_reg(spi, B53_SPI_STATUS, &rxbuf, 1);
+ if (ret)
+ return ret;
+
+ if (rxbuf & B53_SPI_CMD_RACK)
+ break;
+
+ mdelay(1);
+ }
+
+ if (retry_count == 10)
+ return -EIO;
+
+ return 0;
+}
+
+static int b53_spi_read(struct b53_device *dev, u8 page, u8 reg, u8 *data,
+ unsigned int len)
+{
+ struct spi_device *spi = dev->priv;
+ int ret;
+
+ ret = b53_prepare_reg_access(spi, page);
+ if (ret)
+ return ret;
+
+ ret = b53_spi_prepare_reg_read(spi, reg);
+ if (ret)
+ return ret;
+
+ return b53_spi_read_reg(spi, B53_SPI_DATA, data, len);
+}
+
+static int b53_spi_read8(struct b53_device *dev, u8 page, u8 reg, u8 *val)
+{
+ return b53_spi_read(dev, page, reg, val, 1);
+}
+
+static int b53_spi_read16(struct b53_device *dev, u8 page, u8 reg, u16 *val)
+{
+ int ret = b53_spi_read(dev, page, reg, (u8 *)val, 2);
+
+ if (!ret)
+ *val = le16_to_cpu(*val);
+
+ return ret;
+}
+
+static int b53_spi_read32(struct b53_device *dev, u8 page, u8 reg, u32 *val)
+{
+ int ret = b53_spi_read(dev, page, reg, (u8 *)val, 4);
+
+ if (!ret)
+ *val = le32_to_cpu(*val);
+
+ return ret;
+}
+
+static int b53_spi_read48(struct b53_device *dev, u8 page, u8 reg, u64 *val)
+{
+ int ret;
+
+ *val = 0;
+ ret = b53_spi_read(dev, page, reg, (u8 *)val, 6);
+ if (!ret)
+ *val = le64_to_cpu(*val);
+
+ return ret;
+}
+
+static int b53_spi_read64(struct b53_device *dev, u8 page, u8 reg, u64 *val)
+{
+ int ret = b53_spi_read(dev, page, reg, (u8 *)val, 8);
+
+ if (!ret)
+ *val = le64_to_cpu(*val);
+
+ return ret;
+}
+
+static int b53_spi_write8(struct b53_device *dev, u8 page, u8 reg, u8 value)
+{
+ struct spi_device *spi = dev->priv;
+ int ret;
+ u8 txbuf[3];
+
+ ret = b53_prepare_reg_access(spi, page);
+ if (ret)
+ return ret;
+
+ txbuf[0] = B53_SPI_CMD_NORMAL | B53_SPI_CMD_WRITE;
+ txbuf[1] = reg;
+ txbuf[2] = value;
+
+ return spi_write(spi, txbuf, sizeof(txbuf));
+}
+
+static int b53_spi_write16(struct b53_device *dev, u8 page, u8 reg, u16 value)
+{
+ struct spi_device *spi = dev->priv;
+ int ret;
+ u8 txbuf[4];
+
+ ret = b53_prepare_reg_access(spi, page);
+ if (ret)
+ return ret;
+
+ txbuf[0] = B53_SPI_CMD_NORMAL | B53_SPI_CMD_WRITE;
+ txbuf[1] = reg;
+ put_unaligned_le16(value, &txbuf[2]);
+
+ return spi_write(spi, txbuf, sizeof(txbuf));
+}
+
+static int b53_spi_write32(struct b53_device *dev, u8 page, u8 reg, u32 value)
+{
+ struct spi_device *spi = dev->priv;
+ int ret;
+ u8 txbuf[6];
+
+ ret = b53_prepare_reg_access(spi, page);
+ if (ret)
+ return ret;
+
+ txbuf[0] = B53_SPI_CMD_NORMAL | B53_SPI_CMD_WRITE;
+ txbuf[1] = reg;
+ put_unaligned_le32(value, &txbuf[2]);
+
+ return spi_write(spi, txbuf, sizeof(txbuf));
+}
+
+static int b53_spi_write48(struct b53_device *dev, u8 page, u8 reg, u64 value)
+{
+ struct spi_device *spi = dev->priv;
+ int ret;
+ u8 txbuf[10];
+
+ ret = b53_prepare_reg_access(spi, page);
+ if (ret)
+ return ret;
+
+ txbuf[0] = B53_SPI_CMD_NORMAL | B53_SPI_CMD_WRITE;
+ txbuf[1] = reg;
+ put_unaligned_le64(value, &txbuf[2]);
+
+ return spi_write(spi, txbuf, sizeof(txbuf) - 2);
+}
+
+static int b53_spi_write64(struct b53_device *dev, u8 page, u8 reg, u64 value)
+{
+ struct spi_device *spi = dev->priv;
+ int ret;
+ u8 txbuf[10];
+
+ ret = b53_prepare_reg_access(spi, page);
+ if (ret)
+ return ret;
+
+ txbuf[0] = B53_SPI_CMD_NORMAL | B53_SPI_CMD_WRITE;
+ txbuf[1] = reg;
+ put_unaligned_le64(value, &txbuf[2]);
+
+ return spi_write(spi, txbuf, sizeof(txbuf));
+}
+
+static struct b53_io_ops b53_spi_ops = {
+ .read8 = b53_spi_read8,
+ .read16 = b53_spi_read16,
+ .read32 = b53_spi_read32,
+ .read48 = b53_spi_read48,
+ .read64 = b53_spi_read64,
+ .write8 = b53_spi_write8,
+ .write16 = b53_spi_write16,
+ .write32 = b53_spi_write32,
+ .write48 = b53_spi_write48,
+ .write64 = b53_spi_write64,
+};
+
+static int b53_spi_probe(struct spi_device *spi)
+{
+ struct b53_device *dev;
+ int ret;
+
+ dev = b53_switch_alloc(&spi->dev, &b53_spi_ops, spi);
+ if (!dev)
+ return -ENOMEM;
+
+ if (spi->dev.platform_data)
+ dev->pdata = spi->dev.platform_data;
+
+ ret = b53_switch_register(dev);
+ if (ret)
+ return ret;
+
+ spi_set_drvdata(spi, dev);
+
+ return 0;
+}
+
+static int b53_spi_remove(struct spi_device *spi)
+{
+ struct b53_device *dev = spi_get_drvdata(spi);
+
+ if (dev)
+ b53_switch_remove(dev);
+
+ return 0;
+}
+
+static struct spi_driver b53_spi_driver = {
+ .driver = {
+ .name = "b53-switch",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .probe = b53_spi_probe,
+ .remove = b53_spi_remove,
+};
+
+module_spi_driver(b53_spi_driver);
+
+MODULE_AUTHOR("Jonas Gorski <jogo@openwrt.org>");
+MODULE_DESCRIPTION("B53 SPI access driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/dsa/b53/b53_srab.c b/drivers/net/dsa/b53/b53_srab.c
new file mode 100644
index 0000000..70fd472
--- /dev/null
+++ b/drivers/net/dsa/b53/b53_srab.c
@@ -0,0 +1,415 @@
+/*
+ * B53 register access through Switch Register Access Bridge Registers
+ *
+ * Copyright (C) 2013 Hauke Mehrtens <hauke@hauke-m.de>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/b53.h>
+
+#include "b53_priv.h"
+
+/* command and status register of the SRAB */
+#define B53_SRAB_CMDSTAT 0x2c
+#define B53_SRAB_CMDSTAT_RST BIT(2)
+#define B53_SRAB_CMDSTAT_WRITE BIT(1)
+#define B53_SRAB_CMDSTAT_GORDYN BIT(0)
+#define B53_SRAB_CMDSTAT_PAGE 24
+#define B53_SRAB_CMDSTAT_REG 16
+
+/* high order word of write data to switch registe */
+#define B53_SRAB_WD_H 0x30
+
+/* low order word of write data to switch registe */
+#define B53_SRAB_WD_L 0x34
+
+/* high order word of read data from switch register */
+#define B53_SRAB_RD_H 0x38
+
+/* low order word of read data from switch register */
+#define B53_SRAB_RD_L 0x3c
+
+/* command and status register of the SRAB */
+#define B53_SRAB_CTRLS 0x40
+#define B53_SRAB_CTRLS_RCAREQ BIT(3)
+#define B53_SRAB_CTRLS_RCAGNT BIT(4)
+#define B53_SRAB_CTRLS_SW_INIT_DONE BIT(6)
+
+/* the register captures interrupt pulses from the switch */
+#define B53_SRAB_INTR 0x44
+#define B53_SRAB_INTR_P(x) BIT(x)
+#define B53_SRAB_SWITCH_PHY BIT(8)
+#define B53_SRAB_1588_SYNC BIT(9)
+#define B53_SRAB_IMP1_SLEEP_TIMER BIT(10)
+#define B53_SRAB_P7_SLEEP_TIMER BIT(11)
+#define B53_SRAB_IMP0_SLEEP_TIMER BIT(12)
+
+struct b53_srab_priv {
+ void __iomem *regs;
+};
+
+static int b53_srab_request_grant(struct b53_device *dev)
+{
+ struct b53_srab_priv *priv = dev->priv;
+ u8 __iomem *regs = priv->regs;
+ u32 ctrls;
+ int i;
+
+ ctrls = readl(regs + B53_SRAB_CTRLS);
+ ctrls |= B53_SRAB_CTRLS_RCAREQ;
+ writel(ctrls, regs + B53_SRAB_CTRLS);
+
+ for (i = 0; i < 20; i++) {
+ ctrls = readl(regs + B53_SRAB_CTRLS);
+ if (ctrls & B53_SRAB_CTRLS_RCAGNT)
+ break;
+ usleep_range(10, 100);
+ }
+ if (WARN_ON(i == 5))
+ return -EIO;
+
+ return 0;
+}
+
+static void b53_srab_release_grant(struct b53_device *dev)
+{
+ struct b53_srab_priv *priv = dev->priv;
+ u8 __iomem *regs = priv->regs;
+ u32 ctrls;
+
+ ctrls = readl(regs + B53_SRAB_CTRLS);
+ ctrls &= ~B53_SRAB_CTRLS_RCAREQ;
+ writel(ctrls, regs + B53_SRAB_CTRLS);
+}
+
+static int b53_srab_op(struct b53_device *dev, u8 page, u8 reg, u32 op)
+{
+ struct b53_srab_priv *priv = dev->priv;
+ u8 __iomem *regs = priv->regs;
+ int i;
+ u32 cmdstat;
+
+ /* set register address */
+ cmdstat = (page << B53_SRAB_CMDSTAT_PAGE) |
+ (reg << B53_SRAB_CMDSTAT_REG) |
+ B53_SRAB_CMDSTAT_GORDYN |
+ op;
+ writel(cmdstat, regs + B53_SRAB_CMDSTAT);
+
+ /* check if operation completed */
+ for (i = 0; i < 5; ++i) {
+ cmdstat = readl(regs + B53_SRAB_CMDSTAT);
+ if (!(cmdstat & B53_SRAB_CMDSTAT_GORDYN))
+ break;
+ usleep_range(10, 100);
+ }
+
+ if (WARN_ON(i == 5))
+ return -EIO;
+
+ return 0;
+}
+
+static int b53_srab_read8(struct b53_device *dev, u8 page, u8 reg, u8 *val)
+{
+ struct b53_srab_priv *priv = dev->priv;
+ u8 __iomem *regs = priv->regs;
+ int ret = 0;
+
+ ret = b53_srab_request_grant(dev);
+ if (ret)
+ goto err;
+
+ ret = b53_srab_op(dev, page, reg, 0);
+ if (ret)
+ goto err;
+
+ *val = readl(regs + B53_SRAB_RD_L) & 0xff;
+
+err:
+ b53_srab_release_grant(dev);
+
+ return ret;
+}
+
+static int b53_srab_read16(struct b53_device *dev, u8 page, u8 reg, u16 *val)
+{
+ struct b53_srab_priv *priv = dev->priv;
+ u8 __iomem *regs = priv->regs;
+ int ret = 0;
+
+ ret = b53_srab_request_grant(dev);
+ if (ret)
+ goto err;
+
+ ret = b53_srab_op(dev, page, reg, 0);
+ if (ret)
+ goto err;
+
+ *val = readl(regs + B53_SRAB_RD_L) & 0xffff;
+
+err:
+ b53_srab_release_grant(dev);
+
+ return ret;
+}
+
+static int b53_srab_read32(struct b53_device *dev, u8 page, u8 reg, u32 *val)
+{
+ struct b53_srab_priv *priv = dev->priv;
+ u8 __iomem *regs = priv->regs;
+ int ret = 0;
+
+ ret = b53_srab_request_grant(dev);
+ if (ret)
+ goto err;
+
+ ret = b53_srab_op(dev, page, reg, 0);
+ if (ret)
+ goto err;
+
+ *val = readl(regs + B53_SRAB_RD_L);
+
+err:
+ b53_srab_release_grant(dev);
+
+ return ret;
+}
+
+static int b53_srab_read48(struct b53_device *dev, u8 page, u8 reg, u64 *val)
+{
+ struct b53_srab_priv *priv = dev->priv;
+ u8 __iomem *regs = priv->regs;
+ int ret = 0;
+
+ ret = b53_srab_request_grant(dev);
+ if (ret)
+ goto err;
+
+ ret = b53_srab_op(dev, page, reg, 0);
+ if (ret)
+ goto err;
+
+ *val = readl(regs + B53_SRAB_RD_L);
+ *val += ((u64)readl(regs + B53_SRAB_RD_H) & 0xffff) << 32;
+
+err:
+ b53_srab_release_grant(dev);
+
+ return ret;
+}
+
+static int b53_srab_read64(struct b53_device *dev, u8 page, u8 reg, u64 *val)
+{
+ struct b53_srab_priv *priv = dev->priv;
+ u8 __iomem *regs = priv->regs;
+ int ret = 0;
+
+ ret = b53_srab_request_grant(dev);
+ if (ret)
+ goto err;
+
+ ret = b53_srab_op(dev, page, reg, 0);
+ if (ret)
+ goto err;
+
+ *val = readl(regs + B53_SRAB_RD_L);
+ *val += (u64)readl(regs + B53_SRAB_RD_H) << 32;
+
+err:
+ b53_srab_release_grant(dev);
+
+ return ret;
+}
+
+static int b53_srab_write8(struct b53_device *dev, u8 page, u8 reg, u8 value)
+{
+ struct b53_srab_priv *priv = dev->priv;
+ u8 __iomem *regs = priv->regs;
+ int ret = 0;
+
+ ret = b53_srab_request_grant(dev);
+ if (ret)
+ goto err;
+
+ writel(value, regs + B53_SRAB_WD_L);
+
+ ret = b53_srab_op(dev, page, reg, B53_SRAB_CMDSTAT_WRITE);
+
+err:
+ b53_srab_release_grant(dev);
+
+ return ret;
+}
+
+static int b53_srab_write16(struct b53_device *dev, u8 page, u8 reg,
+ u16 value)
+{
+ struct b53_srab_priv *priv = dev->priv;
+ u8 __iomem *regs = priv->regs;
+ int ret = 0;
+
+ ret = b53_srab_request_grant(dev);
+ if (ret)
+ goto err;
+
+ writel(value, regs + B53_SRAB_WD_L);
+
+ ret = b53_srab_op(dev, page, reg, B53_SRAB_CMDSTAT_WRITE);
+
+err:
+ b53_srab_release_grant(dev);
+
+ return ret;
+}
+
+static int b53_srab_write32(struct b53_device *dev, u8 page, u8 reg,
+ u32 value)
+{
+ struct b53_srab_priv *priv = dev->priv;
+ u8 __iomem *regs = priv->regs;
+ int ret = 0;
+
+ ret = b53_srab_request_grant(dev);
+ if (ret)
+ goto err;
+
+ writel(value, regs + B53_SRAB_WD_L);
+
+ ret = b53_srab_op(dev, page, reg, B53_SRAB_CMDSTAT_WRITE);
+
+err:
+ b53_srab_release_grant(dev);
+
+ return ret;
+}
+
+static int b53_srab_write48(struct b53_device *dev, u8 page, u8 reg,
+ u64 value)
+{
+ struct b53_srab_priv *priv = dev->priv;
+ u8 __iomem *regs = priv->regs;
+ int ret = 0;
+
+ ret = b53_srab_request_grant(dev);
+ if (ret)
+ goto err;
+
+ writel((u32)value, regs + B53_SRAB_WD_L);
+ writel((u16)(value >> 32), regs + B53_SRAB_WD_H);
+
+ ret = b53_srab_op(dev, page, reg, B53_SRAB_CMDSTAT_WRITE);
+
+err:
+ b53_srab_release_grant(dev);
+
+ return ret;
+}
+
+static int b53_srab_write64(struct b53_device *dev, u8 page, u8 reg,
+ u64 value)
+{
+ struct b53_srab_priv *priv = dev->priv;
+ u8 __iomem *regs = priv->regs;
+ int ret = 0;
+
+ ret = b53_srab_request_grant(dev);
+ if (ret)
+ goto err;
+
+ writel((u32)value, regs + B53_SRAB_WD_L);
+ writel((u32)(value >> 32), regs + B53_SRAB_WD_H);
+
+ ret = b53_srab_op(dev, page, reg, B53_SRAB_CMDSTAT_WRITE);
+
+err:
+ b53_srab_release_grant(dev);
+
+ return ret;
+}
+
+static struct b53_io_ops b53_srab_ops = {
+ .read8 = b53_srab_read8,
+ .read16 = b53_srab_read16,
+ .read32 = b53_srab_read32,
+ .read48 = b53_srab_read48,
+ .read64 = b53_srab_read64,
+ .write8 = b53_srab_write8,
+ .write16 = b53_srab_write16,
+ .write32 = b53_srab_write32,
+ .write48 = b53_srab_write48,
+ .write64 = b53_srab_write64,
+};
+
+static int b53_srab_probe(struct platform_device *pdev)
+{
+ struct b53_srab_priv *priv;
+ struct b53_device *dev;
+ struct resource *r;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->regs = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(priv->regs))
+ return -ENOMEM;
+
+ dev = b53_switch_alloc(&pdev->dev, &b53_srab_ops, priv);
+ if (!dev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, dev);
+
+ return b53_switch_register(dev);
+}
+
+static int b53_srab_remove(struct platform_device *pdev)
+{
+ struct b53_device *dev = platform_get_drvdata(pdev);
+
+ if (dev)
+ b53_switch_remove(dev);
+
+ return 0;
+}
+
+static const struct of_device_id b53_srab_of_match[] = {
+ { .compatible = "brcm,bcm53010-srab" },
+ { .compatible = "brcm,bcm53011-srab" },
+ { .compatible = "brcm,bcm53012-srab" },
+ { .compatible = "brcm,bcm53018-srab" },
+ { .compatible = "brcm,bcm53019-srab" },
+ { .compatible = "brcm,bcm5301x-srab" },
+ { /* sentinel */ },
+};
+
+static struct platform_driver b53_srab_driver = {
+ .probe = b53_srab_probe,
+ .remove = b53_srab_remove,
+ .driver = {
+ .name = "b53-srab-switch",
+ .of_match_table = b53_srab_of_match,
+ },
+};
+
+module_platform_driver(b53_srab_driver);
+MODULE_AUTHOR("Hauke Mehrtens <hauke@hauke-m.de>");
+MODULE_DESCRIPTION("B53 Switch Register Access Bridge Registers (SRAB) access driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 10ddd5a..cd1d630 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -22,6 +22,7 @@
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/of_net.h>
+#include <linux/of_mdio.h>
#include <net/dsa.h>
#include <linux/ethtool.h>
#include <linux/if_bridge.h>
@@ -460,19 +461,13 @@ static int bcm_sf2_sw_set_eee(struct dsa_switch *ds, int port,
return 0;
}
-/* Fast-ageing of ARL entries for a given port, equivalent to an ARL
- * flush for that port.
- */
-static int bcm_sf2_sw_fast_age_port(struct dsa_switch *ds, int port)
+static int bcm_sf2_fast_age_op(struct bcm_sf2_priv *priv)
{
- struct bcm_sf2_priv *priv = ds_to_priv(ds);
unsigned int timeout = 1000;
u32 reg;
- core_writel(priv, port, CORE_FAST_AGE_PORT);
-
reg = core_readl(priv, CORE_FAST_AGE_CTRL);
- reg |= EN_AGE_PORT | EN_AGE_DYNAMIC | FAST_AGE_STR_DONE;
+ reg |= EN_AGE_PORT | EN_AGE_VLAN | EN_AGE_DYNAMIC | FAST_AGE_STR_DONE;
core_writel(priv, reg, CORE_FAST_AGE_CTRL);
do {
@@ -491,13 +486,98 @@ static int bcm_sf2_sw_fast_age_port(struct dsa_switch *ds, int port)
return 0;
}
+/* Fast-ageing of ARL entries for a given port, equivalent to an ARL
+ * flush for that port.
+ */
+static int bcm_sf2_sw_fast_age_port(struct dsa_switch *ds, int port)
+{
+ struct bcm_sf2_priv *priv = ds_to_priv(ds);
+
+ core_writel(priv, port, CORE_FAST_AGE_PORT);
+
+ return bcm_sf2_fast_age_op(priv);
+}
+
+static int bcm_sf2_sw_fast_age_vlan(struct bcm_sf2_priv *priv, u16 vid)
+{
+ core_writel(priv, vid, CORE_FAST_AGE_VID);
+
+ return bcm_sf2_fast_age_op(priv);
+}
+
+static int bcm_sf2_vlan_op_wait(struct bcm_sf2_priv *priv)
+{
+ unsigned int timeout = 10;
+ u32 reg;
+
+ do {
+ reg = core_readl(priv, CORE_ARLA_VTBL_RWCTRL);
+ if (!(reg & ARLA_VTBL_STDN))
+ return 0;
+
+ usleep_range(1000, 2000);
+ } while (timeout--);
+
+ return -ETIMEDOUT;
+}
+
+static int bcm_sf2_vlan_op(struct bcm_sf2_priv *priv, u8 op)
+{
+ core_writel(priv, ARLA_VTBL_STDN | op, CORE_ARLA_VTBL_RWCTRL);
+
+ return bcm_sf2_vlan_op_wait(priv);
+}
+
+static void bcm_sf2_set_vlan_entry(struct bcm_sf2_priv *priv, u16 vid,
+ struct bcm_sf2_vlan *vlan)
+{
+ int ret;
+
+ core_writel(priv, vid & VTBL_ADDR_INDEX_MASK, CORE_ARLA_VTBL_ADDR);
+ core_writel(priv, vlan->untag << UNTAG_MAP_SHIFT | vlan->members,
+ CORE_ARLA_VTBL_ENTRY);
+
+ ret = bcm_sf2_vlan_op(priv, ARLA_VTBL_CMD_WRITE);
+ if (ret)
+ pr_err("failed to write VLAN entry\n");
+}
+
+static int bcm_sf2_get_vlan_entry(struct bcm_sf2_priv *priv, u16 vid,
+ struct bcm_sf2_vlan *vlan)
+{
+ u32 entry;
+ int ret;
+
+ core_writel(priv, vid & VTBL_ADDR_INDEX_MASK, CORE_ARLA_VTBL_ADDR);
+
+ ret = bcm_sf2_vlan_op(priv, ARLA_VTBL_CMD_READ);
+ if (ret)
+ return ret;
+
+ entry = core_readl(priv, CORE_ARLA_VTBL_ENTRY);
+ vlan->members = entry & FWD_MAP_MASK;
+ vlan->untag = (entry >> UNTAG_MAP_SHIFT) & UNTAG_MAP_MASK;
+
+ return 0;
+}
+
static int bcm_sf2_sw_br_join(struct dsa_switch *ds, int port,
struct net_device *bridge)
{
struct bcm_sf2_priv *priv = ds_to_priv(ds);
+ s8 cpu_port = ds->dst->cpu_port;
unsigned int i;
u32 reg, p_ctl;
+ /* Make this port leave the all VLANs join since we will have proper
+ * VLAN entries from now on
+ */
+ reg = core_readl(priv, CORE_JOIN_ALL_VLAN_EN);
+ reg &= ~BIT(port);
+ if ((reg & BIT(cpu_port)) == BIT(cpu_port))
+ reg &= ~BIT(cpu_port);
+ core_writel(priv, reg, CORE_JOIN_ALL_VLAN_EN);
+
priv->port_sts[port].bridge_dev = bridge;
p_ctl = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port));
@@ -529,6 +609,7 @@ static void bcm_sf2_sw_br_leave(struct dsa_switch *ds, int port)
{
struct bcm_sf2_priv *priv = ds_to_priv(ds);
struct net_device *bridge = priv->port_sts[port].bridge_dev;
+ s8 cpu_port = ds->dst->cpu_port;
unsigned int i;
u32 reg, p_ctl;
@@ -552,6 +633,13 @@ static void bcm_sf2_sw_br_leave(struct dsa_switch *ds, int port)
core_writel(priv, p_ctl, CORE_PORT_VLAN_CTL_PORT(port));
priv->port_sts[port].vlan_ctl_mask = p_ctl;
priv->port_sts[port].bridge_dev = NULL;
+
+ /* Make this port join all VLANs without VLAN entries */
+ reg = core_readl(priv, CORE_JOIN_ALL_VLAN_EN);
+ reg |= BIT(port);
+ if (!(reg & BIT(cpu_port)))
+ reg |= BIT(cpu_port);
+ core_writel(priv, reg, CORE_JOIN_ALL_VLAN_EN);
}
static void bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port,
@@ -804,7 +892,7 @@ static int bcm_sf2_sw_fdb_dump(struct dsa_switch *ds, int port,
int (*cb)(struct switchdev_obj *obj))
{
struct bcm_sf2_priv *priv = ds_to_priv(ds);
- struct net_device *dev = ds->ports[port];
+ struct net_device *dev = ds->ports[port].netdev;
struct bcm_sf2_arl_entry results[2];
unsigned int count = 0;
int ret;
@@ -836,6 +924,66 @@ static int bcm_sf2_sw_fdb_dump(struct dsa_switch *ds, int port,
return 0;
}
+static int bcm_sf2_sw_indir_rw(struct bcm_sf2_priv *priv, int op, int addr,
+ int regnum, u16 val)
+{
+ int ret = 0;
+ u32 reg;
+
+ reg = reg_readl(priv, REG_SWITCH_CNTRL);
+ reg |= MDIO_MASTER_SEL;
+ reg_writel(priv, reg, REG_SWITCH_CNTRL);
+
+ /* Page << 8 | offset */
+ reg = 0x70;
+ reg <<= 2;
+ core_writel(priv, addr, reg);
+
+ /* Page << 8 | offset */
+ reg = 0x80 << 8 | regnum << 1;
+ reg <<= 2;
+
+ if (op)
+ ret = core_readl(priv, reg);
+ else
+ core_writel(priv, val, reg);
+
+ reg = reg_readl(priv, REG_SWITCH_CNTRL);
+ reg &= ~MDIO_MASTER_SEL;
+ reg_writel(priv, reg, REG_SWITCH_CNTRL);
+
+ return ret & 0xffff;
+}
+
+static int bcm_sf2_sw_mdio_read(struct mii_bus *bus, int addr, int regnum)
+{
+ struct bcm_sf2_priv *priv = bus->priv;
+
+ /* Intercept reads from Broadcom pseudo-PHY address, else, send
+ * them to our master MDIO bus controller
+ */
+ if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr))
+ return bcm_sf2_sw_indir_rw(priv, 1, addr, regnum, 0);
+ else
+ return mdiobus_read(priv->master_mii_bus, addr, regnum);
+}
+
+static int bcm_sf2_sw_mdio_write(struct mii_bus *bus, int addr, int regnum,
+ u16 val)
+{
+ struct bcm_sf2_priv *priv = bus->priv;
+
+ /* Intercept writes to the Broadcom pseudo-PHY address, else,
+ * send them to our master MDIO bus controller
+ */
+ if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr))
+ bcm_sf2_sw_indir_rw(priv, 0, addr, regnum, val);
+ else
+ mdiobus_write(priv->master_mii_bus, addr, regnum, val);
+
+ return 0;
+}
+
static irqreturn_t bcm_sf2_switch_0_isr(int irq, void *dev_id)
{
struct bcm_sf2_priv *priv = dev_id;
@@ -932,133 +1080,70 @@ static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv,
}
}
-static int bcm_sf2_sw_setup(struct dsa_switch *ds)
+static int bcm_sf2_mdio_register(struct dsa_switch *ds)
{
- const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME;
struct bcm_sf2_priv *priv = ds_to_priv(ds);
struct device_node *dn;
- void __iomem **base;
- unsigned int port;
- unsigned int i;
- u32 reg, rev;
- int ret;
-
- spin_lock_init(&priv->indir_lock);
- mutex_init(&priv->stats_mutex);
-
- /* All the interesting properties are at the parent device_node
- * level
- */
- dn = ds->cd->of_node->parent;
- bcm_sf2_identify_ports(priv, ds->cd->of_node);
-
- priv->irq0 = irq_of_parse_and_map(dn, 0);
- priv->irq1 = irq_of_parse_and_map(dn, 1);
-
- base = &priv->core;
- for (i = 0; i < BCM_SF2_REGS_NUM; i++) {
- *base = of_iomap(dn, i);
- if (*base == NULL) {
- pr_err("unable to find register: %s\n", reg_names[i]);
- ret = -ENOMEM;
- goto out_unmap;
- }
- base++;
- }
-
- ret = bcm_sf2_sw_rst(priv);
- if (ret) {
- pr_err("unable to software reset switch: %d\n", ret);
- goto out_unmap;
- }
-
- /* Disable all interrupts and request them */
- bcm_sf2_intr_disable(priv);
-
- ret = request_irq(priv->irq0, bcm_sf2_switch_0_isr, 0,
- "switch_0", priv);
- if (ret < 0) {
- pr_err("failed to request switch_0 IRQ\n");
- goto out_unmap;
- }
-
- ret = request_irq(priv->irq1, bcm_sf2_switch_1_isr, 0,
- "switch_1", priv);
- if (ret < 0) {
- pr_err("failed to request switch_1 IRQ\n");
- goto out_free_irq0;
- }
-
- /* Reset the MIB counters */
- reg = core_readl(priv, CORE_GMNCFGCFG);
- reg |= RST_MIB_CNT;
- core_writel(priv, reg, CORE_GMNCFGCFG);
- reg &= ~RST_MIB_CNT;
- core_writel(priv, reg, CORE_GMNCFGCFG);
-
- /* Get the maximum number of ports for this switch */
- priv->hw_params.num_ports = core_readl(priv, CORE_IMP0_PRT_ID) + 1;
- if (priv->hw_params.num_ports > DSA_MAX_PORTS)
- priv->hw_params.num_ports = DSA_MAX_PORTS;
-
- /* Assume a single GPHY setup if we can't read that property */
- if (of_property_read_u32(dn, "brcm,num-gphy",
- &priv->hw_params.num_gphy))
- priv->hw_params.num_gphy = 1;
-
- /* Enable all valid ports and disable those unused */
- for (port = 0; port < priv->hw_params.num_ports; port++) {
- /* IMP port receives special treatment */
- if ((1 << port) & ds->enabled_port_mask)
- bcm_sf2_port_setup(ds, port, NULL);
- else if (dsa_is_cpu_port(ds, port))
- bcm_sf2_imp_setup(ds, port);
- else
- bcm_sf2_port_disable(ds, port, NULL);
- }
-
- /* Include the pseudo-PHY address and the broadcast PHY address to
- * divert reads towards our workaround. This is only required for
- * 7445D0, since 7445E0 disconnects the internal switch pseudo-PHY such
- * that we can use the regular SWITCH_MDIO master controller instead.
+ static int index;
+ int err;
+
+ /* Find our integrated MDIO bus node */
+ dn = of_find_compatible_node(NULL, NULL, "brcm,unimac-mdio");
+ priv->master_mii_bus = of_mdio_find_bus(dn);
+ if (!priv->master_mii_bus)
+ return -EPROBE_DEFER;
+
+ get_device(&priv->master_mii_bus->dev);
+ priv->master_mii_dn = dn;
+
+ priv->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
+ if (!priv->slave_mii_bus)
+ return -ENOMEM;
+
+ priv->slave_mii_bus->priv = priv;
+ priv->slave_mii_bus->name = "sf2 slave mii";
+ priv->slave_mii_bus->read = bcm_sf2_sw_mdio_read;
+ priv->slave_mii_bus->write = bcm_sf2_sw_mdio_write;
+ snprintf(priv->slave_mii_bus->id, MII_BUS_ID_SIZE, "sf2-%d",
+ index++);
+ priv->slave_mii_bus->dev.of_node = dn;
+
+ /* Include the pseudo-PHY address to divert reads towards our
+ * workaround. This is only required for 7445D0, since 7445E0
+ * disconnects the internal switch pseudo-PHY such that we can use the
+ * regular SWITCH_MDIO master controller instead.
*
- * By default, DSA initializes ds->phys_mii_mask to
- * ds->enabled_port_mask to have a 1:1 mapping between Port address
- * and PHY address in order to utilize the slave_mii_bus instance to
- * read from Port PHYs. This is not what we want here, so we
- * initialize phys_mii_mask 0 to always utilize the "master" MDIO
- * bus backed by the "mdio-unimac" driver.
+ * Here we flag the pseudo PHY as needing special treatment and would
+ * otherwise make all other PHY read/writes go to the master MDIO bus
+ * controller that comes with this switch backed by the "mdio-unimac"
+ * driver.
*/
if (of_machine_is_compatible("brcm,bcm7445d0"))
- ds->phys_mii_mask |= ((1 << BRCM_PSEUDO_PHY_ADDR) | (1 << 0));
+ priv->indir_phy_mask |= (1 << BRCM_PSEUDO_PHY_ADDR);
else
- ds->phys_mii_mask = 0;
+ priv->indir_phy_mask = 0;
- rev = reg_readl(priv, REG_SWITCH_REVISION);
- priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) &
- SWITCH_TOP_REV_MASK;
- priv->hw_params.core_rev = (rev & SF2_REV_MASK);
+ ds->phys_mii_mask = priv->indir_phy_mask;
+ ds->slave_mii_bus = priv->slave_mii_bus;
+ priv->slave_mii_bus->parent = ds->dev->parent;
+ priv->slave_mii_bus->phy_mask = ~priv->indir_phy_mask;
- rev = reg_readl(priv, REG_PHY_REVISION);
- priv->hw_params.gphy_rev = rev & PHY_REVISION_MASK;
+ if (dn)
+ err = of_mdiobus_register(priv->slave_mii_bus, dn);
+ else
+ err = mdiobus_register(priv->slave_mii_bus);
- pr_info("Starfighter 2 top: %x.%02x, core: %x.%02x base: 0x%p, IRQs: %d, %d\n",
- priv->hw_params.top_rev >> 8, priv->hw_params.top_rev & 0xff,
- priv->hw_params.core_rev >> 8, priv->hw_params.core_rev & 0xff,
- priv->core, priv->irq0, priv->irq1);
+ if (err)
+ of_node_put(dn);
- return 0;
+ return err;
+}
-out_free_irq0:
- free_irq(priv->irq0, priv);
-out_unmap:
- base = &priv->core;
- for (i = 0; i < BCM_SF2_REGS_NUM; i++) {
- if (*base)
- iounmap(*base);
- base++;
- }
- return ret;
+static void bcm_sf2_mdio_unregister(struct bcm_sf2_priv *priv)
+{
+ mdiobus_unregister(priv->slave_mii_bus);
+ if (priv->master_mii_dn)
+ of_node_put(priv->master_mii_dn);
}
static int bcm_sf2_sw_set_addr(struct dsa_switch *ds, u8 *addr)
@@ -1078,68 +1163,6 @@ static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port)
return priv->hw_params.gphy_rev;
}
-static int bcm_sf2_sw_indir_rw(struct dsa_switch *ds, int op, int addr,
- int regnum, u16 val)
-{
- struct bcm_sf2_priv *priv = ds_to_priv(ds);
- int ret = 0;
- u32 reg;
-
- reg = reg_readl(priv, REG_SWITCH_CNTRL);
- reg |= MDIO_MASTER_SEL;
- reg_writel(priv, reg, REG_SWITCH_CNTRL);
-
- /* Page << 8 | offset */
- reg = 0x70;
- reg <<= 2;
- core_writel(priv, addr, reg);
-
- /* Page << 8 | offset */
- reg = 0x80 << 8 | regnum << 1;
- reg <<= 2;
-
- if (op)
- ret = core_readl(priv, reg);
- else
- core_writel(priv, val, reg);
-
- reg = reg_readl(priv, REG_SWITCH_CNTRL);
- reg &= ~MDIO_MASTER_SEL;
- reg_writel(priv, reg, REG_SWITCH_CNTRL);
-
- return ret & 0xffff;
-}
-
-static int bcm_sf2_sw_phy_read(struct dsa_switch *ds, int addr, int regnum)
-{
- /* Intercept reads from the MDIO broadcast address or Broadcom
- * pseudo-PHY address
- */
- switch (addr) {
- case 0:
- case BRCM_PSEUDO_PHY_ADDR:
- return bcm_sf2_sw_indir_rw(ds, 1, addr, regnum, 0);
- default:
- return 0xffff;
- }
-}
-
-static int bcm_sf2_sw_phy_write(struct dsa_switch *ds, int addr, int regnum,
- u16 val)
-{
- /* Intercept writes to the MDIO broadcast address or Broadcom
- * pseudo-PHY address
- */
- switch (addr) {
- case 0:
- case BRCM_PSEUDO_PHY_ADDR:
- bcm_sf2_sw_indir_rw(ds, 0, addr, regnum, val);
- break;
- }
-
- return 0;
-}
-
static void bcm_sf2_sw_adjust_link(struct dsa_switch *ds, int port,
struct phy_device *phydev)
{
@@ -1248,7 +1271,7 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
* state machine and make it go in PHY_FORCING state instead.
*/
if (!status->link)
- netif_carrier_off(ds->ports[port]);
+ netif_carrier_off(ds->ports[port].netdev);
status->duplex = 1;
} else {
status->link = 1;
@@ -1370,14 +1393,309 @@ static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port,
return p->ethtool_ops->set_wol(p, wol);
}
+static void bcm_sf2_enable_vlan(struct bcm_sf2_priv *priv, bool enable)
+{
+ u32 mgmt, vc0, vc1, vc4, vc5;
+
+ mgmt = core_readl(priv, CORE_SWMODE);
+ vc0 = core_readl(priv, CORE_VLAN_CTRL0);
+ vc1 = core_readl(priv, CORE_VLAN_CTRL1);
+ vc4 = core_readl(priv, CORE_VLAN_CTRL4);
+ vc5 = core_readl(priv, CORE_VLAN_CTRL5);
+
+ mgmt &= ~SW_FWDG_MODE;
+
+ if (enable) {
+ vc0 |= VLAN_EN | VLAN_LEARN_MODE_IVL;
+ vc1 |= EN_RSV_MCAST_UNTAG | EN_RSV_MCAST_FWDMAP;
+ vc4 &= ~(INGR_VID_CHK_MASK << INGR_VID_CHK_SHIFT);
+ vc4 |= INGR_VID_CHK_DROP;
+ vc5 |= DROP_VTABLE_MISS | EN_VID_FFF_FWD;
+ } else {
+ vc0 &= ~(VLAN_EN | VLAN_LEARN_MODE_IVL);
+ vc1 &= ~(EN_RSV_MCAST_UNTAG | EN_RSV_MCAST_FWDMAP);
+ vc4 &= ~(INGR_VID_CHK_MASK << INGR_VID_CHK_SHIFT);
+ vc5 &= ~(DROP_VTABLE_MISS | EN_VID_FFF_FWD);
+ vc4 |= INGR_VID_CHK_VID_VIOL_IMP;
+ }
+
+ core_writel(priv, vc0, CORE_VLAN_CTRL0);
+ core_writel(priv, vc1, CORE_VLAN_CTRL1);
+ core_writel(priv, 0, CORE_VLAN_CTRL3);
+ core_writel(priv, vc4, CORE_VLAN_CTRL4);
+ core_writel(priv, vc5, CORE_VLAN_CTRL5);
+ core_writel(priv, mgmt, CORE_SWMODE);
+}
+
+static void bcm_sf2_sw_configure_vlan(struct dsa_switch *ds)
+{
+ struct bcm_sf2_priv *priv = ds_to_priv(ds);
+ unsigned int port;
+
+ /* Clear all VLANs */
+ bcm_sf2_vlan_op(priv, ARLA_VTBL_CMD_CLEAR);
+
+ for (port = 0; port < priv->hw_params.num_ports; port++) {
+ if (!((1 << port) & ds->enabled_port_mask))
+ continue;
+
+ core_writel(priv, 1, CORE_DEFAULT_1Q_TAG_P(port));
+ }
+}
+
+static int bcm_sf2_sw_vlan_filtering(struct dsa_switch *ds, int port,
+ bool vlan_filtering)
+{
+ return 0;
+}
+
+static int bcm_sf2_sw_vlan_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct switchdev_trans *trans)
+{
+ struct bcm_sf2_priv *priv = ds_to_priv(ds);
+
+ bcm_sf2_enable_vlan(priv, true);
+
+ return 0;
+}
+
+static void bcm_sf2_sw_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct switchdev_trans *trans)
+{
+ struct bcm_sf2_priv *priv = ds_to_priv(ds);
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ s8 cpu_port = ds->dst->cpu_port;
+ struct bcm_sf2_vlan *vl;
+ u16 vid;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
+ vl = &priv->vlans[vid];
+
+ bcm_sf2_get_vlan_entry(priv, vid, vl);
+
+ vl->members |= BIT(port) | BIT(cpu_port);
+ if (untagged)
+ vl->untag |= BIT(port) | BIT(cpu_port);
+ else
+ vl->untag &= ~(BIT(port) | BIT(cpu_port));
+
+ bcm_sf2_set_vlan_entry(priv, vid, vl);
+ bcm_sf2_sw_fast_age_vlan(priv, vid);
+ }
+
+ if (pvid) {
+ core_writel(priv, vlan->vid_end, CORE_DEFAULT_1Q_TAG_P(port));
+ core_writel(priv, vlan->vid_end,
+ CORE_DEFAULT_1Q_TAG_P(cpu_port));
+ bcm_sf2_sw_fast_age_vlan(priv, vid);
+ }
+}
+
+static int bcm_sf2_sw_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct bcm_sf2_priv *priv = ds_to_priv(ds);
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ s8 cpu_port = ds->dst->cpu_port;
+ struct bcm_sf2_vlan *vl;
+ u16 vid, pvid;
+ int ret;
+
+ pvid = core_readl(priv, CORE_DEFAULT_1Q_TAG_P(port));
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
+ vl = &priv->vlans[vid];
+
+ ret = bcm_sf2_get_vlan_entry(priv, vid, vl);
+ if (ret)
+ return ret;
+
+ vl->members &= ~BIT(port);
+ if ((vl->members & BIT(cpu_port)) == BIT(cpu_port))
+ vl->members = 0;
+ if (pvid == vid)
+ pvid = 0;
+ if (untagged) {
+ vl->untag &= ~BIT(port);
+ if ((vl->untag & BIT(port)) == BIT(cpu_port))
+ vl->untag = 0;
+ }
+
+ bcm_sf2_set_vlan_entry(priv, vid, vl);
+ bcm_sf2_sw_fast_age_vlan(priv, vid);
+ }
+
+ core_writel(priv, pvid, CORE_DEFAULT_1Q_TAG_P(port));
+ core_writel(priv, pvid, CORE_DEFAULT_1Q_TAG_P(cpu_port));
+ bcm_sf2_sw_fast_age_vlan(priv, vid);
+
+ return 0;
+}
+
+static int bcm_sf2_sw_vlan_dump(struct dsa_switch *ds, int port,
+ struct switchdev_obj_port_vlan *vlan,
+ int (*cb)(struct switchdev_obj *obj))
+{
+ struct bcm_sf2_priv *priv = ds_to_priv(ds);
+ struct bcm_sf2_port_status *p = &priv->port_sts[port];
+ struct bcm_sf2_vlan *vl;
+ u16 vid, pvid;
+ int err = 0;
+
+ pvid = core_readl(priv, CORE_DEFAULT_1Q_TAG_P(port));
+
+ for (vid = 0; vid < VLAN_N_VID; vid++) {
+ vl = &priv->vlans[vid];
+
+ if (!(vl->members & BIT(port)))
+ continue;
+
+ vlan->vid_begin = vlan->vid_end = vid;
+ vlan->flags = 0;
+
+ if (vl->untag & BIT(port))
+ vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
+ if (p->pvid == vid)
+ vlan->flags |= BRIDGE_VLAN_INFO_PVID;
+
+ err = cb(&vlan->obj);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+static int bcm_sf2_sw_setup(struct dsa_switch *ds)
+{
+ const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME;
+ struct bcm_sf2_priv *priv = ds_to_priv(ds);
+ struct device_node *dn;
+ void __iomem **base;
+ unsigned int port;
+ unsigned int i;
+ u32 reg, rev;
+ int ret;
+
+ spin_lock_init(&priv->indir_lock);
+ mutex_init(&priv->stats_mutex);
+
+ /* All the interesting properties are at the parent device_node
+ * level
+ */
+ dn = ds->cd->of_node->parent;
+ bcm_sf2_identify_ports(priv, ds->cd->of_node);
+
+ priv->irq0 = irq_of_parse_and_map(dn, 0);
+ priv->irq1 = irq_of_parse_and_map(dn, 1);
+
+ base = &priv->core;
+ for (i = 0; i < BCM_SF2_REGS_NUM; i++) {
+ *base = of_iomap(dn, i);
+ if (*base == NULL) {
+ pr_err("unable to find register: %s\n", reg_names[i]);
+ ret = -ENOMEM;
+ goto out_unmap;
+ }
+ base++;
+ }
+
+ ret = bcm_sf2_sw_rst(priv);
+ if (ret) {
+ pr_err("unable to software reset switch: %d\n", ret);
+ goto out_unmap;
+ }
+
+ ret = bcm_sf2_mdio_register(ds);
+ if (ret) {
+ pr_err("failed to register MDIO bus\n");
+ goto out_unmap;
+ }
+
+ /* Disable all interrupts and request them */
+ bcm_sf2_intr_disable(priv);
+
+ ret = request_irq(priv->irq0, bcm_sf2_switch_0_isr, 0,
+ "switch_0", priv);
+ if (ret < 0) {
+ pr_err("failed to request switch_0 IRQ\n");
+ goto out_unmap;
+ }
+
+ ret = request_irq(priv->irq1, bcm_sf2_switch_1_isr, 0,
+ "switch_1", priv);
+ if (ret < 0) {
+ pr_err("failed to request switch_1 IRQ\n");
+ goto out_free_irq0;
+ }
+
+ /* Reset the MIB counters */
+ reg = core_readl(priv, CORE_GMNCFGCFG);
+ reg |= RST_MIB_CNT;
+ core_writel(priv, reg, CORE_GMNCFGCFG);
+ reg &= ~RST_MIB_CNT;
+ core_writel(priv, reg, CORE_GMNCFGCFG);
+
+ /* Get the maximum number of ports for this switch */
+ priv->hw_params.num_ports = core_readl(priv, CORE_IMP0_PRT_ID) + 1;
+ if (priv->hw_params.num_ports > DSA_MAX_PORTS)
+ priv->hw_params.num_ports = DSA_MAX_PORTS;
+
+ /* Assume a single GPHY setup if we can't read that property */
+ if (of_property_read_u32(dn, "brcm,num-gphy",
+ &priv->hw_params.num_gphy))
+ priv->hw_params.num_gphy = 1;
+
+ /* Enable all valid ports and disable those unused */
+ for (port = 0; port < priv->hw_params.num_ports; port++) {
+ /* IMP port receives special treatment */
+ if ((1 << port) & ds->enabled_port_mask)
+ bcm_sf2_port_setup(ds, port, NULL);
+ else if (dsa_is_cpu_port(ds, port))
+ bcm_sf2_imp_setup(ds, port);
+ else
+ bcm_sf2_port_disable(ds, port, NULL);
+ }
+
+ bcm_sf2_sw_configure_vlan(ds);
+
+ rev = reg_readl(priv, REG_SWITCH_REVISION);
+ priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) &
+ SWITCH_TOP_REV_MASK;
+ priv->hw_params.core_rev = (rev & SF2_REV_MASK);
+
+ rev = reg_readl(priv, REG_PHY_REVISION);
+ priv->hw_params.gphy_rev = rev & PHY_REVISION_MASK;
+
+ pr_info("Starfighter 2 top: %x.%02x, core: %x.%02x base: 0x%p, IRQs: %d, %d\n",
+ priv->hw_params.top_rev >> 8, priv->hw_params.top_rev & 0xff,
+ priv->hw_params.core_rev >> 8, priv->hw_params.core_rev & 0xff,
+ priv->core, priv->irq0, priv->irq1);
+
+ return 0;
+
+out_free_irq0:
+ free_irq(priv->irq0, priv);
+out_unmap:
+ base = &priv->core;
+ for (i = 0; i < BCM_SF2_REGS_NUM; i++) {
+ if (*base)
+ iounmap(*base);
+ base++;
+ }
+ bcm_sf2_mdio_unregister(priv);
+ return ret;
+}
+
static struct dsa_switch_driver bcm_sf2_switch_driver = {
.tag_protocol = DSA_TAG_PROTO_BRCM,
.probe = bcm_sf2_sw_drv_probe,
.setup = bcm_sf2_sw_setup,
.set_addr = bcm_sf2_sw_set_addr,
.get_phy_flags = bcm_sf2_sw_get_phy_flags,
- .phy_read = bcm_sf2_sw_phy_read,
- .phy_write = bcm_sf2_sw_phy_write,
.get_strings = bcm_sf2_sw_get_strings,
.get_ethtool_stats = bcm_sf2_sw_get_ethtool_stats,
.get_sset_count = bcm_sf2_sw_get_sset_count,
@@ -1398,6 +1716,11 @@ static struct dsa_switch_driver bcm_sf2_switch_driver = {
.port_fdb_add = bcm_sf2_sw_fdb_add,
.port_fdb_del = bcm_sf2_sw_fdb_del,
.port_fdb_dump = bcm_sf2_sw_fdb_dump,
+ .port_vlan_filtering = bcm_sf2_sw_vlan_filtering,
+ .port_vlan_prepare = bcm_sf2_sw_vlan_prepare,
+ .port_vlan_add = bcm_sf2_sw_vlan_add,
+ .port_vlan_del = bcm_sf2_sw_vlan_del,
+ .port_vlan_dump = bcm_sf2_sw_vlan_dump,
};
static int __init bcm_sf2_init(void)
diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h
index 200b1f5..463bed8 100644
--- a/drivers/net/dsa/bcm_sf2.h
+++ b/drivers/net/dsa/bcm_sf2.h
@@ -21,6 +21,7 @@
#include <linux/ethtool.h>
#include <linux/types.h>
#include <linux/bitops.h>
+#include <linux/if_vlan.h>
#include <net/dsa.h>
@@ -50,6 +51,7 @@ struct bcm_sf2_port_status {
struct ethtool_eee eee;
u32 vlan_ctl_mask;
+ u16 pvid;
struct net_device *bridge_dev;
};
@@ -63,6 +65,11 @@ struct bcm_sf2_arl_entry {
u8 is_static:1;
};
+struct bcm_sf2_vlan {
+ u16 members;
+ u16 untag;
+};
+
static inline void bcm_sf2_mac_from_u64(u64 src, u8 *dst)
{
unsigned int i;
@@ -142,6 +149,15 @@ struct bcm_sf2_priv {
/* Bitmask of ports having an integrated PHY */
unsigned int int_phy_mask;
+
+ /* Master and slave MDIO bus controller */
+ unsigned int indir_phy_mask;
+ struct device_node *master_mii_dn;
+ struct mii_bus *slave_mii_bus;
+ struct mii_bus *master_mii_bus;
+
+ /* Cache of programmed VLANs */
+ struct bcm_sf2_vlan vlans[VLAN_N_VID];
};
struct bcm_sf2_hw_stats {
diff --git a/drivers/net/dsa/bcm_sf2_regs.h b/drivers/net/dsa/bcm_sf2_regs.h
index 97780d4..9f2a9cb 100644
--- a/drivers/net/dsa/bcm_sf2_regs.h
+++ b/drivers/net/dsa/bcm_sf2_regs.h
@@ -274,6 +274,23 @@
#define CORE_ARLA_SRCH_RSLT_MACVID(x) (CORE_ARLA_SRCH_RSLT_0_MACVID + ((x) * 0x40))
#define CORE_ARLA_SRCH_RSLT(x) (CORE_ARLA_SRCH_RSLT_0 + ((x) * 0x40))
+#define CORE_ARLA_VTBL_RWCTRL 0x1600
+#define ARLA_VTBL_CMD_WRITE 0
+#define ARLA_VTBL_CMD_READ 1
+#define ARLA_VTBL_CMD_CLEAR 2
+#define ARLA_VTBL_STDN (1 << 7)
+
+#define CORE_ARLA_VTBL_ADDR 0x1604
+#define VTBL_ADDR_INDEX_MASK 0xfff
+
+#define CORE_ARLA_VTBL_ENTRY 0x160c
+#define FWD_MAP_MASK 0x1ff
+#define UNTAG_MAP_MASK 0x1ff
+#define UNTAG_MAP_SHIFT 9
+#define MSTP_INDEX_MASK 0x7
+#define MSTP_INDEX_SHIFT 18
+#define FWD_MODE (1 << 21)
+
#define CORE_MEM_PSM_VDD_CTRL 0x2380
#define P_TXQ_PSM_VDD_SHIFT 2
#define P_TXQ_PSM_VDD_MASK 0x3
@@ -287,6 +304,59 @@
#define CORE_PORT_VLAN_CTL_PORT(x) (0xc400 + ((x) * 0x8))
#define PORT_VLAN_CTRL_MASK 0x1ff
+#define CORE_VLAN_CTRL0 0xd000
+#define CHANGE_1P_VID_INNER (1 << 0)
+#define CHANGE_1P_VID_OUTER (1 << 1)
+#define CHANGE_1Q_VID (1 << 3)
+#define VLAN_LEARN_MODE_SVL (0 << 5)
+#define VLAN_LEARN_MODE_IVL (3 << 5)
+#define VLAN_EN (1 << 7)
+
+#define CORE_VLAN_CTRL1 0xd004
+#define EN_RSV_MCAST_FWDMAP (1 << 2)
+#define EN_RSV_MCAST_UNTAG (1 << 3)
+#define EN_IPMC_BYPASS_FWDMAP (1 << 5)
+#define EN_IPMC_BYPASS_UNTAG (1 << 6)
+
+#define CORE_VLAN_CTRL2 0xd008
+#define EN_MIIM_BYPASS_V_FWDMAP (1 << 2)
+#define EN_GMRP_GVRP_V_FWDMAP (1 << 5)
+#define EN_GMRP_GVRP_UNTAG_MAP (1 << 6)
+
+#define CORE_VLAN_CTRL3 0xd00c
+#define EN_DROP_NON1Q_MASK 0x1ff
+
+#define CORE_VLAN_CTRL4 0xd014
+#define RESV_MCAST_FLOOD (1 << 1)
+#define EN_DOUBLE_TAG_MASK 0x3
+#define EN_DOUBLE_TAG_SHIFT 2
+#define EN_MGE_REV_GMRP (1 << 4)
+#define EN_MGE_REV_GVRP (1 << 5)
+#define INGR_VID_CHK_SHIFT 6
+#define INGR_VID_CHK_MASK 0x3
+#define INGR_VID_CHK_FWD (0 << INGR_VID_CHK_SHIFT)
+#define INGR_VID_CHK_DROP (1 << INGR_VID_CHK_SHIFT)
+#define INGR_VID_CHK_NO_CHK (2 << INGR_VID_CHK_SHIFT)
+#define INGR_VID_CHK_VID_VIOL_IMP (3 << INGR_VID_CHK_SHIFT)
+
+#define CORE_VLAN_CTRL5 0xd018
+#define EN_CPU_RX_BYP_INNER_CRCCHCK (1 << 0)
+#define EN_VID_FFF_FWD (1 << 2)
+#define DROP_VTABLE_MISS (1 << 3)
+#define EGRESS_DIR_FRM_BYP_TRUNK_EN (1 << 4)
+#define PRESV_NON1Q (1 << 6)
+
+#define CORE_VLAN_CTRL6 0xd01c
+#define STRICT_SFD_DETECT (1 << 0)
+#define DIS_ARL_BUST_LMIT (1 << 4)
+
+#define CORE_DEFAULT_1Q_TAG_P(x) (0xd040 + ((x) * 8))
+#define CFI_SHIFT 12
+#define PRI_SHIFT 13
+#define PRI_MASK 0x7
+
+#define CORE_JOIN_ALL_VLAN_EN 0xd140
+
#define CORE_EEE_EN_CTRL 0x24800
#define CORE_EEE_LPI_INDICATE 0x24810
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index ba9dfc9..ee06055 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -21,6 +21,7 @@
#include <linux/list.h>
#include <linux/mdio.h>
#include <linux/module.h>
+#include <linux/of_mdio.h>
#include <linux/netdevice.h>
#include <linux/gpio/consumer.h>
#include <linux/phy.h>
@@ -238,16 +239,16 @@ int mv88e6xxx_set_addr(struct dsa_switch *ds, u8 *addr)
return mv88e6xxx_set_addr_direct(ds, addr);
}
-static int _mv88e6xxx_phy_read(struct mv88e6xxx_priv_state *ps, int addr,
- int regnum)
+static int mv88e6xxx_mdio_read_direct(struct mv88e6xxx_priv_state *ps,
+ int addr, int regnum)
{
if (addr >= 0)
return _mv88e6xxx_reg_read(ps, addr, regnum);
return 0xffff;
}
-static int _mv88e6xxx_phy_write(struct mv88e6xxx_priv_state *ps, int addr,
- int regnum, u16 val)
+static int mv88e6xxx_mdio_write_direct(struct mv88e6xxx_priv_state *ps,
+ int addr, int regnum, u16 val)
{
if (addr >= 0)
return _mv88e6xxx_reg_write(ps, addr, regnum, val);
@@ -288,18 +289,18 @@ static int mv88e6xxx_ppu_enable(struct mv88e6xxx_priv_state *ps)
int ret, err;
unsigned long timeout;
- ret = mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_CONTROL);
+ ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_CONTROL);
if (ret < 0)
return ret;
- err = mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL,
- ret | GLOBAL_CONTROL_PPU_ENABLE);
+ err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL,
+ ret | GLOBAL_CONTROL_PPU_ENABLE);
if (err)
return err;
timeout = jiffies + 1 * HZ;
while (time_before(jiffies, timeout)) {
- ret = mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATUS);
+ ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATUS);
if (ret < 0)
return ret;
@@ -317,11 +318,16 @@ static void mv88e6xxx_ppu_reenable_work(struct work_struct *ugly)
struct mv88e6xxx_priv_state *ps;
ps = container_of(ugly, struct mv88e6xxx_priv_state, ppu_work);
+
+ mutex_lock(&ps->smi_mutex);
+
if (mutex_trylock(&ps->ppu_mutex)) {
if (mv88e6xxx_ppu_enable(ps) == 0)
ps->ppu_disabled = 0;
mutex_unlock(&ps->ppu_mutex);
}
+
+ mutex_unlock(&ps->smi_mutex);
}
static void mv88e6xxx_ppu_reenable_timer(unsigned long _ps)
@@ -373,8 +379,8 @@ void mv88e6xxx_ppu_state_init(struct mv88e6xxx_priv_state *ps)
ps->ppu_timer.function = mv88e6xxx_ppu_reenable_timer;
}
-static int mv88e6xxx_phy_read_ppu(struct mv88e6xxx_priv_state *ps, int addr,
- int regnum)
+static int mv88e6xxx_mdio_read_ppu(struct mv88e6xxx_priv_state *ps, int addr,
+ int regnum)
{
int ret;
@@ -387,8 +393,8 @@ static int mv88e6xxx_phy_read_ppu(struct mv88e6xxx_priv_state *ps, int addr,
return ret;
}
-static int mv88e6xxx_phy_write_ppu(struct mv88e6xxx_priv_state *ps, int addr,
- int regnum, u16 val)
+static int mv88e6xxx_mdio_write_ppu(struct mv88e6xxx_priv_state *ps, int addr,
+ int regnum, u16 val)
{
int ret;
@@ -824,7 +830,7 @@ static int mv88e6xxx_wait(struct mv88e6xxx_priv_state *ps, int reg,
return ret;
}
-static int _mv88e6xxx_phy_wait(struct mv88e6xxx_priv_state *ps)
+static int mv88e6xxx_mdio_wait(struct mv88e6xxx_priv_state *ps)
{
return _mv88e6xxx_wait(ps, REG_GLOBAL2, GLOBAL2_SMI_OP,
GLOBAL2_SMI_OP_BUSY);
@@ -1071,7 +1077,7 @@ static int _mv88e6xxx_atu_wait(struct mv88e6xxx_priv_state *ps)
GLOBAL_ATU_OP_BUSY);
}
-static int _mv88e6xxx_phy_read_indirect(struct mv88e6xxx_priv_state *ps,
+static int mv88e6xxx_mdio_read_indirect(struct mv88e6xxx_priv_state *ps,
int addr, int regnum)
{
int ret;
@@ -1082,7 +1088,7 @@ static int _mv88e6xxx_phy_read_indirect(struct mv88e6xxx_priv_state *ps,
if (ret < 0)
return ret;
- ret = _mv88e6xxx_phy_wait(ps);
+ ret = mv88e6xxx_mdio_wait(ps);
if (ret < 0)
return ret;
@@ -1091,7 +1097,7 @@ static int _mv88e6xxx_phy_read_indirect(struct mv88e6xxx_priv_state *ps,
return ret;
}
-static int _mv88e6xxx_phy_write_indirect(struct mv88e6xxx_priv_state *ps,
+static int mv88e6xxx_mdio_write_indirect(struct mv88e6xxx_priv_state *ps,
int addr, int regnum, u16 val)
{
int ret;
@@ -1104,7 +1110,7 @@ static int _mv88e6xxx_phy_write_indirect(struct mv88e6xxx_priv_state *ps,
GLOBAL2_SMI_OP_22_WRITE | (addr << 5) |
regnum);
- return _mv88e6xxx_phy_wait(ps);
+ return mv88e6xxx_mdio_wait(ps);
}
static int mv88e6xxx_get_eee(struct dsa_switch *ds, int port,
@@ -1118,7 +1124,7 @@ static int mv88e6xxx_get_eee(struct dsa_switch *ds, int port,
mutex_lock(&ps->smi_mutex);
- reg = _mv88e6xxx_phy_read_indirect(ps, port, 16);
+ reg = mv88e6xxx_mdio_read_indirect(ps, port, 16);
if (reg < 0)
goto out;
@@ -1149,7 +1155,7 @@ static int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
mutex_lock(&ps->smi_mutex);
- ret = _mv88e6xxx_phy_read_indirect(ps, port, 16);
+ ret = mv88e6xxx_mdio_read_indirect(ps, port, 16);
if (ret < 0)
goto out;
@@ -1159,7 +1165,7 @@ static int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
if (e->tx_lpi_enabled)
reg |= 0x0100;
- ret = _mv88e6xxx_phy_write_indirect(ps, port, 16, reg);
+ ret = mv88e6xxx_mdio_write_indirect(ps, port, 16, reg);
out:
mutex_unlock(&ps->smi_mutex);
@@ -1322,7 +1328,7 @@ static int _mv88e6xxx_port_state(struct mv88e6xxx_priv_state *ps, int port,
if (ret)
return ret;
- netdev_dbg(ds->ports[port], "PortState %s (was %s)\n",
+ netdev_dbg(ds->ports[port].netdev, "PortState %s (was %s)\n",
mv88e6xxx_port_state_names[state],
mv88e6xxx_port_state_names[oldstate]);
}
@@ -1400,7 +1406,8 @@ static void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port,
mutex_unlock(&ps->smi_mutex);
if (err)
- netdev_err(ds->ports[port], "failed to update state to %s\n",
+ netdev_err(ds->ports[port].netdev,
+ "failed to update state to %s\n",
mv88e6xxx_port_state_names[stp_state]);
}
@@ -1426,8 +1433,8 @@ static int _mv88e6xxx_port_pvid(struct mv88e6xxx_priv_state *ps, int port,
if (ret < 0)
return ret;
- netdev_dbg(ds->ports[port], "DefaultVID %d (was %d)\n", *new,
- pvid);
+ netdev_dbg(ds->ports[port].netdev,
+ "DefaultVID %d (was %d)\n", *new, pvid);
}
if (old)
@@ -1842,7 +1849,8 @@ static int _mv88e6xxx_port_fid(struct mv88e6xxx_priv_state *ps, int port,
if (ret < 0)
return ret;
- netdev_dbg(ds->ports[port], "FID %d (was %d)\n", *new, fid);
+ netdev_dbg(ds->ports[port].netdev,
+ "FID %d (was %d)\n", *new, fid);
}
if (old)
@@ -2023,7 +2031,7 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
ps->ports[port].bridge_dev)
break; /* same bridge, check next VLAN */
- netdev_warn(ds->ports[port],
+ netdev_warn(ds->ports[port].netdev,
"hardware VLAN %d already used by %s\n",
vlan.vid,
netdev_name(ps->ports[i].bridge_dev));
@@ -2073,7 +2081,7 @@ static int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
if (ret < 0)
goto unlock;
- netdev_dbg(ds->ports[port], "802.1Q Mode %s (was %s)\n",
+ netdev_dbg(ds->ports[port].netdev, "802.1Q Mode %s (was %s)\n",
mv88e6xxx_port_8021q_mode_names[new],
mv88e6xxx_port_8021q_mode_names[old]);
}
@@ -2142,11 +2150,12 @@ static void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid)
if (_mv88e6xxx_port_vlan_add(ps, port, vid, untagged))
- netdev_err(ds->ports[port], "failed to add VLAN %d%c\n",
+ netdev_err(ds->ports[port].netdev,
+ "failed to add VLAN %d%c\n",
vid, untagged ? 'u' : 't');
if (pvid && _mv88e6xxx_port_pvid_set(ps, port, vlan->vid_end))
- netdev_err(ds->ports[port], "failed to set PVID %d\n",
+ netdev_err(ds->ports[port].netdev, "failed to set PVID %d\n",
vlan->vid_end);
mutex_unlock(&ps->smi_mutex);
@@ -2331,7 +2340,8 @@ static void mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
mutex_lock(&ps->smi_mutex);
if (_mv88e6xxx_port_fdb_load(ps, port, fdb->addr, fdb->vid, state))
- netdev_err(ds->ports[port], "failed to load MAC address\n");
+ netdev_err(ds->ports[port].netdev,
+ "failed to load MAC address\n");
mutex_unlock(&ps->smi_mutex);
}
@@ -2532,39 +2542,40 @@ static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port)
for (i = 0; i < ps->info->num_ports; ++i)
if (i == port || ps->ports[i].bridge_dev == bridge)
if (_mv88e6xxx_port_based_vlan_map(ps, i))
- netdev_warn(ds->ports[i], "failed to remap\n");
+ netdev_warn(ds->ports[i].netdev,
+ "failed to remap\n");
mutex_unlock(&ps->smi_mutex);
}
-static int _mv88e6xxx_phy_page_write(struct mv88e6xxx_priv_state *ps,
- int port, int page, int reg, int val)
+static int _mv88e6xxx_mdio_page_write(struct mv88e6xxx_priv_state *ps,
+ int port, int page, int reg, int val)
{
int ret;
- ret = _mv88e6xxx_phy_write_indirect(ps, port, 0x16, page);
+ ret = mv88e6xxx_mdio_write_indirect(ps, port, 0x16, page);
if (ret < 0)
goto restore_page_0;
- ret = _mv88e6xxx_phy_write_indirect(ps, port, reg, val);
+ ret = mv88e6xxx_mdio_write_indirect(ps, port, reg, val);
restore_page_0:
- _mv88e6xxx_phy_write_indirect(ps, port, 0x16, 0x0);
+ mv88e6xxx_mdio_write_indirect(ps, port, 0x16, 0x0);
return ret;
}
-static int _mv88e6xxx_phy_page_read(struct mv88e6xxx_priv_state *ps,
- int port, int page, int reg)
+static int _mv88e6xxx_mdio_page_read(struct mv88e6xxx_priv_state *ps,
+ int port, int page, int reg)
{
int ret;
- ret = _mv88e6xxx_phy_write_indirect(ps, port, 0x16, page);
+ ret = mv88e6xxx_mdio_write_indirect(ps, port, 0x16, page);
if (ret < 0)
goto restore_page_0;
- ret = _mv88e6xxx_phy_read_indirect(ps, port, reg);
+ ret = mv88e6xxx_mdio_read_indirect(ps, port, reg);
restore_page_0:
- _mv88e6xxx_phy_write_indirect(ps, port, 0x16, 0x0);
+ mv88e6xxx_mdio_write_indirect(ps, port, 0x16, 0x0);
return ret;
}
@@ -2635,16 +2646,16 @@ static int mv88e6xxx_power_on_serdes(struct mv88e6xxx_priv_state *ps)
{
int ret;
- ret = _mv88e6xxx_phy_page_read(ps, REG_FIBER_SERDES, PAGE_FIBER_SERDES,
- MII_BMCR);
+ ret = _mv88e6xxx_mdio_page_read(ps, REG_FIBER_SERDES,
+ PAGE_FIBER_SERDES, MII_BMCR);
if (ret < 0)
return ret;
if (ret & BMCR_PDOWN) {
ret &= ~BMCR_PDOWN;
- ret = _mv88e6xxx_phy_page_write(ps, REG_FIBER_SERDES,
- PAGE_FIBER_SERDES, MII_BMCR,
- ret);
+ ret = _mv88e6xxx_mdio_page_write(ps, REG_FIBER_SERDES,
+ PAGE_FIBER_SERDES, MII_BMCR,
+ ret);
}
return ret;
@@ -2715,11 +2726,8 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_priv_state *ps, int port)
if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
mv88e6xxx_6320_family(ps)) {
- if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
- reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA;
- else
- reg |= PORT_CONTROL_FRAME_MODE_DSA;
- reg |= PORT_CONTROL_FORWARD_UNKNOWN |
+ reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA |
+ PORT_CONTROL_FORWARD_UNKNOWN |
PORT_CONTROL_FORWARD_UNKNOWN_MC;
}
@@ -2727,7 +2735,6 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_priv_state *ps, int port)
mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
mv88e6xxx_6095_family(ps) || mv88e6xxx_6065_family(ps) ||
mv88e6xxx_6185_family(ps) || mv88e6xxx_6320_family(ps)) {
- if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
reg |= PORT_CONTROL_EGRESS_ADD_TAG;
}
}
@@ -3014,9 +3021,8 @@ static int mv88e6xxx_setup_global(struct mv88e6xxx_priv_state *ps)
for (i = 0; i < 32; i++) {
int nexthop = 0x1f;
- if (ps->ds->cd->rtable &&
- i != ps->ds->index && i < ps->ds->dst->pd->nr_chips)
- nexthop = ps->ds->cd->rtable[i] & 0x1f;
+ if (i != ds->index && i < DSA_MAX_SWITCHES)
+ nexthop = ds->rtable[i] & 0x1f;
err = _mv88e6xxx_reg_write(
ps, REG_GLOBAL2,
@@ -3125,13 +3131,11 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
int i;
ps->ds = ds;
+ ds->slave_mii_bus = ps->mdio_bus;
if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEPROM))
mutex_init(&ps->eeprom_mutex);
- if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU))
- mv88e6xxx_ppu_state_init(ps);
-
mutex_lock(&ps->smi_mutex);
err = mv88e6xxx_switch_reset(ps);
@@ -3154,43 +3158,43 @@ unlock:
return err;
}
-int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg)
+int mv88e6xxx_mdio_page_read(struct dsa_switch *ds, int port, int page, int reg)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int ret;
mutex_lock(&ps->smi_mutex);
- ret = _mv88e6xxx_phy_page_read(ps, port, page, reg);
+ ret = _mv88e6xxx_mdio_page_read(ps, port, page, reg);
mutex_unlock(&ps->smi_mutex);
return ret;
}
-int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
- int reg, int val)
+int mv88e6xxx_mdio_page_write(struct dsa_switch *ds, int port, int page,
+ int reg, int val)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int ret;
mutex_lock(&ps->smi_mutex);
- ret = _mv88e6xxx_phy_page_write(ps, port, page, reg, val);
+ ret = _mv88e6xxx_mdio_page_write(ps, port, page, reg, val);
mutex_unlock(&ps->smi_mutex);
return ret;
}
-static int mv88e6xxx_port_to_phy_addr(struct mv88e6xxx_priv_state *ps,
- int port)
+static int mv88e6xxx_port_to_mdio_addr(struct mv88e6xxx_priv_state *ps,
+ int port)
{
if (port >= 0 && port < ps->info->num_ports)
return port;
return -EINVAL;
}
-static int mv88e6xxx_phy_read(struct dsa_switch *ds, int port, int regnum)
+static int mv88e6xxx_mdio_read(struct mii_bus *bus, int port, int regnum)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- int addr = mv88e6xxx_port_to_phy_addr(ps, port);
+ struct mv88e6xxx_priv_state *ps = bus->priv;
+ int addr = mv88e6xxx_port_to_mdio_addr(ps, port);
int ret;
if (addr < 0)
@@ -3199,21 +3203,21 @@ static int mv88e6xxx_phy_read(struct dsa_switch *ds, int port, int regnum)
mutex_lock(&ps->smi_mutex);
if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU))
- ret = mv88e6xxx_phy_read_ppu(ps, addr, regnum);
+ ret = mv88e6xxx_mdio_read_ppu(ps, addr, regnum);
else if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_SMI_PHY))
- ret = _mv88e6xxx_phy_read_indirect(ps, addr, regnum);
+ ret = mv88e6xxx_mdio_read_indirect(ps, addr, regnum);
else
- ret = _mv88e6xxx_phy_read(ps, addr, regnum);
+ ret = mv88e6xxx_mdio_read_direct(ps, addr, regnum);
mutex_unlock(&ps->smi_mutex);
return ret;
}
-static int mv88e6xxx_phy_write(struct dsa_switch *ds, int port, int regnum,
- u16 val)
+static int mv88e6xxx_mdio_write(struct mii_bus *bus, int port, int regnum,
+ u16 val)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- int addr = mv88e6xxx_port_to_phy_addr(ps, port);
+ struct mv88e6xxx_priv_state *ps = bus->priv;
+ int addr = mv88e6xxx_port_to_mdio_addr(ps, port);
int ret;
if (addr < 0)
@@ -3222,16 +3226,76 @@ static int mv88e6xxx_phy_write(struct dsa_switch *ds, int port, int regnum,
mutex_lock(&ps->smi_mutex);
if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU))
- ret = mv88e6xxx_phy_write_ppu(ps, addr, regnum, val);
+ ret = mv88e6xxx_mdio_write_ppu(ps, addr, regnum, val);
else if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_SMI_PHY))
- ret = _mv88e6xxx_phy_write_indirect(ps, addr, regnum, val);
+ ret = mv88e6xxx_mdio_write_indirect(ps, addr, regnum, val);
else
- ret = _mv88e6xxx_phy_write(ps, addr, regnum, val);
+ ret = mv88e6xxx_mdio_write_direct(ps, addr, regnum, val);
mutex_unlock(&ps->smi_mutex);
return ret;
}
+static int mv88e6xxx_mdio_register(struct mv88e6xxx_priv_state *ps,
+ struct device_node *np)
+{
+ static int index;
+ struct mii_bus *bus;
+ int err;
+
+ if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU))
+ mv88e6xxx_ppu_state_init(ps);
+
+ if (np)
+ ps->mdio_np = of_get_child_by_name(np, "mdio");
+
+ bus = devm_mdiobus_alloc(ps->dev);
+ if (!bus)
+ return -ENOMEM;
+
+ bus->priv = (void *)ps;
+ if (np) {
+ bus->name = np->full_name;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s", np->full_name);
+ } else {
+ bus->name = "mv88e6xxx SMI";
+ snprintf(bus->id, MII_BUS_ID_SIZE, "mv88e6xxx-%d", index++);
+ }
+
+ bus->read = mv88e6xxx_mdio_read;
+ bus->write = mv88e6xxx_mdio_write;
+ bus->parent = ps->dev;
+
+ if (ps->mdio_np)
+ err = of_mdiobus_register(bus, ps->mdio_np);
+ else
+ err = mdiobus_register(bus);
+ if (err) {
+ dev_err(ps->dev, "Cannot register MDIO bus (%d)\n", err);
+ goto out;
+ }
+ ps->mdio_bus = bus;
+
+ return 0;
+
+out:
+ if (ps->mdio_np)
+ of_node_put(ps->mdio_np);
+
+ return err;
+}
+
+static void mv88e6xxx_mdio_unregister(struct mv88e6xxx_priv_state *ps)
+
+{
+ struct mii_bus *bus = ps->mdio_bus;
+
+ mdiobus_unregister(bus);
+
+ if (ps->mdio_np)
+ of_node_put(ps->mdio_np);
+}
+
#ifdef CONFIG_NET_DSA_HWMON
static int mv88e61xx_get_temp(struct dsa_switch *ds, int *temp)
@@ -3244,37 +3308,37 @@ static int mv88e61xx_get_temp(struct dsa_switch *ds, int *temp)
mutex_lock(&ps->smi_mutex);
- ret = _mv88e6xxx_phy_write(ps, 0x0, 0x16, 0x6);
+ ret = mv88e6xxx_mdio_write_direct(ps, 0x0, 0x16, 0x6);
if (ret < 0)
goto error;
/* Enable temperature sensor */
- ret = _mv88e6xxx_phy_read(ps, 0x0, 0x1a);
+ ret = mv88e6xxx_mdio_read_direct(ps, 0x0, 0x1a);
if (ret < 0)
goto error;
- ret = _mv88e6xxx_phy_write(ps, 0x0, 0x1a, ret | (1 << 5));
+ ret = mv88e6xxx_mdio_write_direct(ps, 0x0, 0x1a, ret | (1 << 5));
if (ret < 0)
goto error;
/* Wait for temperature to stabilize */
usleep_range(10000, 12000);
- val = _mv88e6xxx_phy_read(ps, 0x0, 0x1a);
+ val = mv88e6xxx_mdio_read_direct(ps, 0x0, 0x1a);
if (val < 0) {
ret = val;
goto error;
}
/* Disable temperature sensor */
- ret = _mv88e6xxx_phy_write(ps, 0x0, 0x1a, ret & ~(1 << 5));
+ ret = mv88e6xxx_mdio_write_direct(ps, 0x0, 0x1a, ret & ~(1 << 5));
if (ret < 0)
goto error;
*temp = ((val & 0x1f) - 5) * 5;
error:
- _mv88e6xxx_phy_write(ps, 0x0, 0x16, 0x0);
+ mv88e6xxx_mdio_write_direct(ps, 0x0, 0x16, 0x0);
mutex_unlock(&ps->smi_mutex);
return ret;
}
@@ -3287,7 +3351,7 @@ static int mv88e63xx_get_temp(struct dsa_switch *ds, int *temp)
*temp = 0;
- ret = mv88e6xxx_phy_page_read(ds, phy, 6, 27);
+ ret = mv88e6xxx_mdio_page_read(ds, phy, 6, 27);
if (ret < 0)
return ret;
@@ -3320,7 +3384,7 @@ static int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp)
*temp = 0;
- ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
+ ret = mv88e6xxx_mdio_page_read(ds, phy, 6, 26);
if (ret < 0)
return ret;
@@ -3338,12 +3402,12 @@ static int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp)
if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_TEMP_LIMIT))
return -EOPNOTSUPP;
- ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
+ ret = mv88e6xxx_mdio_page_read(ds, phy, 6, 26);
if (ret < 0)
return ret;
temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f);
- return mv88e6xxx_phy_page_write(ds, phy, 6, 26,
- (ret & 0xe0ff) | (temp << 8));
+ return mv88e6xxx_mdio_page_write(ds, phy, 6, 26,
+ (ret & 0xe0ff) | (temp << 8));
}
static int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm)
@@ -3357,7 +3421,7 @@ static int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm)
*alarm = false;
- ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
+ ret = mv88e6xxx_mdio_page_read(ds, phy, 6, 26);
if (ret < 0)
return ret;
@@ -3544,6 +3608,7 @@ static const char *mv88e6xxx_drv_probe(struct device *dsa_dev,
struct mii_bus *bus;
const char *name;
int id, prod_num, rev;
+ int err;
bus = dsa_host_dev_to_mii_bus(host_dev);
if (!bus)
@@ -3570,8 +3635,13 @@ static const char *mv88e6xxx_drv_probe(struct device *dsa_dev,
ps->bus = bus;
ps->sw_addr = sw_addr;
ps->info = info;
+ ps->dev = dsa_dev;
mutex_init(&ps->smi_mutex);
+ err = mv88e6xxx_mdio_register(ps, NULL);
+ if (err)
+ return NULL;
+
*priv = ps;
dev_info(&ps->bus->dev, "switch 0x%x probed: %s, revision %u\n",
@@ -3585,8 +3655,6 @@ struct dsa_switch_driver mv88e6xxx_switch_driver = {
.probe = mv88e6xxx_drv_probe,
.setup = mv88e6xxx_setup,
.set_addr = mv88e6xxx_set_addr,
- .phy_read = mv88e6xxx_phy_read,
- .phy_write = mv88e6xxx_phy_write,
.adjust_link = mv88e6xxx_adjust_link,
.get_strings = mv88e6xxx_get_strings,
.get_ethtool_stats = mv88e6xxx_get_ethtool_stats,
@@ -3672,8 +3740,20 @@ int mv88e6xxx_probe(struct mdio_device *mdiodev)
!of_property_read_u32(np, "eeprom-length", &eeprom_len))
ps->eeprom_len = eeprom_len;
+ err = mv88e6xxx_mdio_register(ps, mdiodev->dev.of_node);
+ if (err)
+ return err;
+
+ ds->slave_mii_bus = ps->mdio_bus;
+
dev_set_drvdata(dev, ds);
+ err = dsa_register_switch(ds, mdiodev->dev.of_node);
+ if (err) {
+ mv88e6xxx_mdio_unregister(ps);
+ return err;
+ }
+
dev_info(dev, "switch 0x%x probed: %s, revision %u\n",
prod_num, ps->info->name, rev);
@@ -3685,7 +3765,10 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev)
struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev);
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ dsa_unregister_switch(ds);
put_device(&ps->bus->dev);
+
+ mv88e6xxx_mdio_unregister(ps);
}
static const struct of_device_id mv88e6xxx_of_match[] = {
diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h
index 36d0e15..8221c3c 100644
--- a/drivers/net/dsa/mv88e6xxx.h
+++ b/drivers/net/dsa/mv88e6xxx.h
@@ -600,6 +600,12 @@ struct mv88e6xxx_priv_state {
/* set to size of eeprom if supported by the switch */
int eeprom_len;
+
+ /* Device node for the MDIO bus */
+ struct device_node *mdio_np;
+
+ /* And the MDIO bus itself */
+ struct mii_bus *mdio_bus;
};
enum stat_type {
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index ee5f431..0ee34cc 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -246,6 +246,8 @@ err_dma_head:
err_drop:
dev_kfree_skb(skb);
+ net_dev->stats.tx_dropped++;
+ net_dev->stats.tx_errors++;
return NETDEV_TX_OK;
}
@@ -284,6 +286,8 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
DMA_TO_DEVICE);
if (slot->skb) {
+ bgmac->net_dev->stats.tx_bytes += slot->skb->len;
+ bgmac->net_dev->stats.tx_packets++;
bytes_compl += slot->skb->len;
pkts_compl++;
@@ -464,6 +468,7 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n",
ring->start);
put_page(virt_to_head_page(buf));
+ bgmac->net_dev->stats.rx_errors++;
break;
}
@@ -471,6 +476,8 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
bgmac_err(bgmac, "Found oversized packet at slot %d, DMA issue!\n",
ring->start);
put_page(virt_to_head_page(buf));
+ bgmac->net_dev->stats.rx_length_errors++;
+ bgmac->net_dev->stats.rx_errors++;
break;
}
@@ -481,6 +488,7 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
if (unlikely(!skb)) {
bgmac_err(bgmac, "build_skb failed\n");
put_page(virt_to_head_page(buf));
+ bgmac->net_dev->stats.rx_errors++;
break;
}
skb_put(skb, BGMAC_RX_FRAME_OFFSET +
@@ -490,6 +498,8 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
skb_checksum_none_assert(skb);
skb->protocol = eth_type_trans(skb, bgmac->net_dev);
+ bgmac->net_dev->stats.rx_bytes += len;
+ bgmac->net_dev->stats.rx_packets++;
napi_gro_receive(&bgmac->napi, skb);
handled++;
} while (0);
@@ -1382,6 +1392,127 @@ static const struct net_device_ops bgmac_netdev_ops = {
* ethtool_ops
**************************************************/
+struct bgmac_stat {
+ u8 size;
+ u32 offset;
+ const char *name;
+};
+
+static struct bgmac_stat bgmac_get_strings_stats[] = {
+ { 8, BGMAC_TX_GOOD_OCTETS, "tx_good_octets" },
+ { 4, BGMAC_TX_GOOD_PKTS, "tx_good" },
+ { 8, BGMAC_TX_OCTETS, "tx_octets" },
+ { 4, BGMAC_TX_PKTS, "tx_pkts" },
+ { 4, BGMAC_TX_BROADCAST_PKTS, "tx_broadcast" },
+ { 4, BGMAC_TX_MULTICAST_PKTS, "tx_multicast" },
+ { 4, BGMAC_TX_LEN_64, "tx_64" },
+ { 4, BGMAC_TX_LEN_65_TO_127, "tx_65_127" },
+ { 4, BGMAC_TX_LEN_128_TO_255, "tx_128_255" },
+ { 4, BGMAC_TX_LEN_256_TO_511, "tx_256_511" },
+ { 4, BGMAC_TX_LEN_512_TO_1023, "tx_512_1023" },
+ { 4, BGMAC_TX_LEN_1024_TO_1522, "tx_1024_1522" },
+ { 4, BGMAC_TX_LEN_1523_TO_2047, "tx_1523_2047" },
+ { 4, BGMAC_TX_LEN_2048_TO_4095, "tx_2048_4095" },
+ { 4, BGMAC_TX_LEN_4096_TO_8191, "tx_4096_8191" },
+ { 4, BGMAC_TX_LEN_8192_TO_MAX, "tx_8192_max" },
+ { 4, BGMAC_TX_JABBER_PKTS, "tx_jabber" },
+ { 4, BGMAC_TX_OVERSIZE_PKTS, "tx_oversize" },
+ { 4, BGMAC_TX_FRAGMENT_PKTS, "tx_fragment" },
+ { 4, BGMAC_TX_UNDERRUNS, "tx_underruns" },
+ { 4, BGMAC_TX_TOTAL_COLS, "tx_total_cols" },
+ { 4, BGMAC_TX_SINGLE_COLS, "tx_single_cols" },
+ { 4, BGMAC_TX_MULTIPLE_COLS, "tx_multiple_cols" },
+ { 4, BGMAC_TX_EXCESSIVE_COLS, "tx_excessive_cols" },
+ { 4, BGMAC_TX_LATE_COLS, "tx_late_cols" },
+ { 4, BGMAC_TX_DEFERED, "tx_defered" },
+ { 4, BGMAC_TX_CARRIER_LOST, "tx_carrier_lost" },
+ { 4, BGMAC_TX_PAUSE_PKTS, "tx_pause" },
+ { 4, BGMAC_TX_UNI_PKTS, "tx_unicast" },
+ { 4, BGMAC_TX_Q0_PKTS, "tx_q0" },
+ { 8, BGMAC_TX_Q0_OCTETS, "tx_q0_octets" },
+ { 4, BGMAC_TX_Q1_PKTS, "tx_q1" },
+ { 8, BGMAC_TX_Q1_OCTETS, "tx_q1_octets" },
+ { 4, BGMAC_TX_Q2_PKTS, "tx_q2" },
+ { 8, BGMAC_TX_Q2_OCTETS, "tx_q2_octets" },
+ { 4, BGMAC_TX_Q3_PKTS, "tx_q3" },
+ { 8, BGMAC_TX_Q3_OCTETS, "tx_q3_octets" },
+ { 8, BGMAC_RX_GOOD_OCTETS, "rx_good_octets" },
+ { 4, BGMAC_RX_GOOD_PKTS, "rx_good" },
+ { 8, BGMAC_RX_OCTETS, "rx_octets" },
+ { 4, BGMAC_RX_PKTS, "rx_pkts" },
+ { 4, BGMAC_RX_BROADCAST_PKTS, "rx_broadcast" },
+ { 4, BGMAC_RX_MULTICAST_PKTS, "rx_multicast" },
+ { 4, BGMAC_RX_LEN_64, "rx_64" },
+ { 4, BGMAC_RX_LEN_65_TO_127, "rx_65_127" },
+ { 4, BGMAC_RX_LEN_128_TO_255, "rx_128_255" },
+ { 4, BGMAC_RX_LEN_256_TO_511, "rx_256_511" },
+ { 4, BGMAC_RX_LEN_512_TO_1023, "rx_512_1023" },
+ { 4, BGMAC_RX_LEN_1024_TO_1522, "rx_1024_1522" },
+ { 4, BGMAC_RX_LEN_1523_TO_2047, "rx_1523_2047" },
+ { 4, BGMAC_RX_LEN_2048_TO_4095, "rx_2048_4095" },
+ { 4, BGMAC_RX_LEN_4096_TO_8191, "rx_4096_8191" },
+ { 4, BGMAC_RX_LEN_8192_TO_MAX, "rx_8192_max" },
+ { 4, BGMAC_RX_JABBER_PKTS, "rx_jabber" },
+ { 4, BGMAC_RX_OVERSIZE_PKTS, "rx_oversize" },
+ { 4, BGMAC_RX_FRAGMENT_PKTS, "rx_fragment" },
+ { 4, BGMAC_RX_MISSED_PKTS, "rx_missed" },
+ { 4, BGMAC_RX_CRC_ALIGN_ERRS, "rx_crc_align" },
+ { 4, BGMAC_RX_UNDERSIZE, "rx_undersize" },
+ { 4, BGMAC_RX_CRC_ERRS, "rx_crc" },
+ { 4, BGMAC_RX_ALIGN_ERRS, "rx_align" },
+ { 4, BGMAC_RX_SYMBOL_ERRS, "rx_symbol" },
+ { 4, BGMAC_RX_PAUSE_PKTS, "rx_pause" },
+ { 4, BGMAC_RX_NONPAUSE_PKTS, "rx_nonpause" },
+ { 4, BGMAC_RX_SACHANGES, "rx_sa_changes" },
+ { 4, BGMAC_RX_UNI_PKTS, "rx_unicast" },
+};
+
+#define BGMAC_STATS_LEN ARRAY_SIZE(bgmac_get_strings_stats)
+
+static int bgmac_get_sset_count(struct net_device *dev, int string_set)
+{
+ switch (string_set) {
+ case ETH_SS_STATS:
+ return BGMAC_STATS_LEN;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static void bgmac_get_strings(struct net_device *dev, u32 stringset,
+ u8 *data)
+{
+ int i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < BGMAC_STATS_LEN; i++)
+ strlcpy(data + i * ETH_GSTRING_LEN,
+ bgmac_get_strings_stats[i].name, ETH_GSTRING_LEN);
+}
+
+static void bgmac_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *ss, uint64_t *data)
+{
+ struct bgmac *bgmac = netdev_priv(dev);
+ const struct bgmac_stat *s;
+ unsigned int i;
+ u64 val;
+
+ if (!netif_running(dev))
+ return;
+
+ for (i = 0; i < BGMAC_STATS_LEN; i++) {
+ s = &bgmac_get_strings_stats[i];
+ val = 0;
+ if (s->size == 8)
+ val = (u64)bgmac_read(bgmac, s->offset + 4) << 32;
+ val |= bgmac_read(bgmac, s->offset);
+ data[i] = val;
+ }
+}
+
static int bgmac_get_settings(struct net_device *net_dev,
struct ethtool_cmd *cmd)
{
@@ -1406,6 +1537,9 @@ static void bgmac_get_drvinfo(struct net_device *net_dev,
}
static const struct ethtool_ops bgmac_ethtool_ops = {
+ .get_strings = bgmac_get_strings,
+ .get_sset_count = bgmac_get_sset_count,
+ .get_ethtool_stats = bgmac_get_ethtool_stats,
.get_settings = bgmac_get_settings,
.set_settings = bgmac_set_settings,
.get_drvinfo = bgmac_get_drvinfo,
@@ -1588,6 +1722,7 @@ static int bgmac_probe(struct bcma_device *core)
bgmac->net_dev = net_dev;
bgmac->core = core;
bcma_set_drvdata(core, bgmac);
+ SET_NETDEV_DEV(net_dev, &core->dev);
/* Defaults */
memcpy(bgmac->net_dev->dev_addr, mac, ETH_ALEN);
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index 9a03c14..853d72b 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -123,7 +123,7 @@
#define BGMAC_TX_LEN_1024_TO_1522 0x334
#define BGMAC_TX_LEN_1523_TO_2047 0x338
#define BGMAC_TX_LEN_2048_TO_4095 0x33c
-#define BGMAC_TX_LEN_4095_TO_8191 0x340
+#define BGMAC_TX_LEN_4096_TO_8191 0x340
#define BGMAC_TX_LEN_8192_TO_MAX 0x344
#define BGMAC_TX_JABBER_PKTS 0x348 /* Error */
#define BGMAC_TX_OVERSIZE_PKTS 0x34c /* Error */
@@ -166,7 +166,7 @@
#define BGMAC_RX_LEN_1024_TO_1522 0x3e4
#define BGMAC_RX_LEN_1523_TO_2047 0x3e8
#define BGMAC_RX_LEN_2048_TO_4095 0x3ec
-#define BGMAC_RX_LEN_4095_TO_8191 0x3f0
+#define BGMAC_RX_LEN_4096_TO_8191 0x3f0
#define BGMAC_RX_LEN_8192_TO_MAX 0x3f4
#define BGMAC_RX_JABBER_PKTS 0x3f8 /* Error */
#define BGMAC_RX_OVERSIZE_PKTS 0x3fc /* Error */
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 8de79ae..655d89e 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -2036,7 +2036,8 @@ static inline void setup_tx_poll_fn(struct net_device *netdev)
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
- lio->txq_status_wq.wq = create_workqueue("txq-status");
+ lio->txq_status_wq.wq = alloc_workqueue("txq-status",
+ WQ_MEM_RECLAIM, 0);
if (!lio->txq_status_wq.wq) {
dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n");
return;
@@ -2103,7 +2104,6 @@ static int liquidio_stop(struct net_device *netdev)
send_rx_ctrl_cmd(lio, 0);
cancel_delayed_work_sync(&lio->txq_status_wq.wk.work);
- flush_workqueue(lio->txq_status_wq.wq);
destroy_workqueue(lio->txq_status_wq.wq);
if (lio->ptp_clock) {
diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c
index a2a2465..9313915 100644
--- a/drivers/net/ethernet/cavium/liquidio/request_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
@@ -144,7 +144,9 @@ int octeon_init_instr_queue(struct octeon_device *oct,
oct->fn_list.setup_iq_regs(oct, iq_no);
- oct->check_db_wq[iq_no].wq = create_workqueue("check_iq_db");
+ oct->check_db_wq[iq_no].wq = alloc_workqueue("check_iq_db",
+ WQ_MEM_RECLAIM,
+ 0);
if (!oct->check_db_wq[iq_no].wq) {
lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma);
dev_err(&oct->pci_dev->dev, "check db wq create failed for iq %d\n",
@@ -168,7 +170,6 @@ int octeon_delete_instr_queue(struct octeon_device *oct, u32 iq_no)
struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
cancel_delayed_work_sync(&oct->check_db_wq[iq_no].wk.work);
- flush_workqueue(oct->check_db_wq[iq_no].wq);
destroy_workqueue(oct->check_db_wq[iq_no].wq);
if (OCTEON_CN6XXX(oct))
diff --git a/drivers/net/ethernet/cavium/liquidio/response_manager.c b/drivers/net/ethernet/cavium/liquidio/response_manager.c
index 091f537..6287a7c 100644
--- a/drivers/net/ethernet/cavium/liquidio/response_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/response_manager.c
@@ -55,7 +55,7 @@ int octeon_setup_response_list(struct octeon_device *oct)
atomic_set(&oct->response_list[i].pending_req_count, 0);
}
- oct->dma_comp_wq.wq = create_workqueue("dma-comp");
+ oct->dma_comp_wq.wq = alloc_workqueue("dma-comp", WQ_MEM_RECLAIM, 0);
if (!oct->dma_comp_wq.wq) {
dev_err(&oct->pci_dev->dev, "failed to create wq thread\n");
return -ENOMEM;
@@ -72,7 +72,6 @@ int octeon_setup_response_list(struct octeon_device *oct)
void octeon_delete_response_list(struct octeon_device *oct)
{
cancel_delayed_work_sync(&oct->dma_comp_wq.wk.work);
- flush_workqueue(oct->dma_comp_wq.wq);
destroy_workqueue(oct->dma_comp_wq.wq);
}
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index fe3763d..e1e6c40 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -97,7 +97,8 @@
* SURF/DPDK
*/
-#define MAX_RSS_IFACES 15
+#define MAX_PORT_RSS_TABLES 15
+#define MAX_NIC_FUNCS 16
#define MAX_RX_QS 32
#define MAX_EVT_QS 32
#define MAX_TX_QS 32
@@ -444,6 +445,17 @@ struct be_resources {
u16 max_evt_qs;
u32 if_cap_flags;
u32 vf_if_cap_flags; /* VF if capability flags */
+ u32 flags;
+ /* Calculated PF Pool's share of RSS Tables. This is not enforced by
+ * the FW, but is a self-imposed driver limitation.
+ */
+ u16 max_rss_tables;
+};
+
+/* These are port-wide values */
+struct be_port_resources {
+ u16 max_vfs;
+ u16 nic_pfs;
};
#define be_is_os2bmc_enabled(adapter) (adapter->flags & BE_FLAGS_OS2BMC)
@@ -634,6 +646,8 @@ struct be_adapter {
#define be_max_rxqs(adapter) (adapter->res.max_rx_qs)
#define be_max_eqs(adapter) (adapter->res.max_evt_qs)
#define be_if_cap_flags(adapter) (adapter->res.if_cap_flags)
+#define be_max_pf_pool_rss_tables(adapter) \
+ (adapter->pool_res.max_rss_tables)
static inline u16 be_max_qs(struct be_adapter *adapter)
{
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 22402db..29aeb91 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -4023,7 +4023,10 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *)cmd.va;
adapter->wol_cap = resp->wol_settings;
- if (adapter->wol_cap & BE_WOL_CAP)
+
+ /* Non-zero macaddr indicates WOL is enabled */
+ if (adapter->wol_cap & BE_WOL_CAP &&
+ !is_zero_ether_addr(resp->magic_mac))
adapter->wol_en = true;
}
err:
@@ -4360,9 +4363,35 @@ err:
return status;
}
+/* This routine returns a list of all the NIC PF_nums in the adapter */
+u16 be_get_nic_pf_num_list(u8 *buf, u32 desc_count, u16 *nic_pf_nums)
+{
+ struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
+ struct be_pcie_res_desc *pcie = NULL;
+ int i;
+ u16 nic_pf_count = 0;
+
+ for (i = 0; i < desc_count; i++) {
+ if (hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V0 ||
+ hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V1) {
+ pcie = (struct be_pcie_res_desc *)hdr;
+ if (pcie->pf_state && (pcie->pf_type == MISSION_NIC ||
+ pcie->pf_type == MISSION_RDMA)) {
+ nic_pf_nums[nic_pf_count++] = pcie->pf_num;
+ }
+ }
+
+ hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
+ hdr = (void *)hdr + hdr->desc_len;
+ }
+ return nic_pf_count;
+}
+
/* Will use MBOX only if MCCQ has not been created */
int be_cmd_get_profile_config(struct be_adapter *adapter,
- struct be_resources *res, u8 query, u8 domain)
+ struct be_resources *res,
+ struct be_port_resources *port_res,
+ u8 profile_type, u8 query, u8 domain)
{
struct be_cmd_resp_get_profile_config *resp;
struct be_cmd_req_get_profile_config *req;
@@ -4389,7 +4418,7 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
if (!lancer_chip(adapter))
req->hdr.version = 1;
- req->type = ACTIVE_PROFILE_TYPE;
+ req->type = profile_type;
req->hdr.domain = domain;
/* When QUERY_MODIFIABLE_FIELDS_TYPE bit is set, cmd returns the
@@ -4406,6 +4435,28 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
resp = cmd.va;
desc_count = le16_to_cpu(resp->desc_count);
+ if (port_res) {
+ u16 nic_pf_cnt = 0, i;
+ u16 nic_pf_num_list[MAX_NIC_FUNCS];
+
+ nic_pf_cnt = be_get_nic_pf_num_list(resp->func_param,
+ desc_count,
+ nic_pf_num_list);
+
+ for (i = 0; i < nic_pf_cnt; i++) {
+ nic = be_get_func_nic_desc(resp->func_param, desc_count,
+ nic_pf_num_list[i]);
+ if (nic->link_param == adapter->port_num) {
+ port_res->nic_pfs++;
+ pcie = be_get_pcie_desc(resp->func_param,
+ desc_count,
+ nic_pf_num_list[i]);
+ port_res->max_vfs += le16_to_cpu(pcie->num_vfs);
+ }
+ }
+ return status;
+ }
+
pcie = be_get_pcie_desc(resp->func_param, desc_count,
adapter->pf_num);
if (pcie)
@@ -4465,7 +4516,7 @@ static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
}
/* Mark all fields invalid */
-static void be_reset_nic_desc(struct be_nic_res_desc *nic)
+void be_reset_nic_desc(struct be_nic_res_desc *nic)
{
memset(nic, 0, sizeof(*nic));
nic->unicast_mac_count = 0xFFFF;
@@ -4534,73 +4585,9 @@ int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, u16 link_speed,
1, version, domain);
}
-static void be_fill_vf_res_template(struct be_adapter *adapter,
- struct be_resources pool_res,
- u16 num_vfs, u16 num_vf_qs,
- struct be_nic_res_desc *nic_vft)
-{
- u32 vf_if_cap_flags = pool_res.vf_if_cap_flags;
- struct be_resources res_mod = {0};
-
- /* Resource with fields set to all '1's by GET_PROFILE_CONFIG cmd,
- * which are modifiable using SET_PROFILE_CONFIG cmd.
- */
- be_cmd_get_profile_config(adapter, &res_mod, RESOURCE_MODIFIABLE, 0);
-
- /* If RSS IFACE capability flags are modifiable for a VF, set the
- * capability flag as valid and set RSS and DEFQ_RSS IFACE flags if
- * more than 1 RSSQ is available for a VF.
- * Otherwise, provision only 1 queue pair for VF.
- */
- if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_RSS) {
- nic_vft->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
- if (num_vf_qs > 1) {
- vf_if_cap_flags |= BE_IF_FLAGS_RSS;
- if (pool_res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS)
- vf_if_cap_flags |= BE_IF_FLAGS_DEFQ_RSS;
- } else {
- vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS |
- BE_IF_FLAGS_DEFQ_RSS);
- }
- } else {
- num_vf_qs = 1;
- }
-
- if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
- nic_vft->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
- vf_if_cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
- }
-
- nic_vft->cap_flags = cpu_to_le32(vf_if_cap_flags);
- nic_vft->rq_count = cpu_to_le16(num_vf_qs);
- nic_vft->txq_count = cpu_to_le16(num_vf_qs);
- nic_vft->rssq_count = cpu_to_le16(num_vf_qs);
- nic_vft->cq_count = cpu_to_le16(pool_res.max_cq_count /
- (num_vfs + 1));
-
- /* Distribute unicast MACs, VLANs, IFACE count and MCCQ count equally
- * among the PF and it's VFs, if the fields are changeable
- */
- if (res_mod.max_uc_mac == FIELD_MODIFIABLE)
- nic_vft->unicast_mac_count = cpu_to_le16(pool_res.max_uc_mac /
- (num_vfs + 1));
-
- if (res_mod.max_vlans == FIELD_MODIFIABLE)
- nic_vft->vlan_count = cpu_to_le16(pool_res.max_vlans /
- (num_vfs + 1));
-
- if (res_mod.max_iface_count == FIELD_MODIFIABLE)
- nic_vft->iface_count = cpu_to_le16(pool_res.max_iface_count /
- (num_vfs + 1));
-
- if (res_mod.max_mcc_count == FIELD_MODIFIABLE)
- nic_vft->mcc_count = cpu_to_le16(pool_res.max_mcc_count /
- (num_vfs + 1));
-}
-
int be_cmd_set_sriov_config(struct be_adapter *adapter,
struct be_resources pool_res, u16 num_vfs,
- u16 num_vf_qs)
+ struct be_resources *vft_res)
{
struct {
struct be_pcie_res_desc pcie;
@@ -4620,12 +4607,26 @@ int be_cmd_set_sriov_config(struct be_adapter *adapter,
be_reset_nic_desc(&desc.nic_vft);
desc.nic_vft.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1;
desc.nic_vft.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
- desc.nic_vft.flags = BIT(VFT_SHIFT) | BIT(IMM_SHIFT) | BIT(NOSV_SHIFT);
+ desc.nic_vft.flags = vft_res->flags | BIT(VFT_SHIFT) |
+ BIT(IMM_SHIFT) | BIT(NOSV_SHIFT);
desc.nic_vft.pf_num = adapter->pdev->devfn;
desc.nic_vft.vf_num = 0;
-
- be_fill_vf_res_template(adapter, pool_res, num_vfs, num_vf_qs,
- &desc.nic_vft);
+ desc.nic_vft.cap_flags = cpu_to_le32(vft_res->vf_if_cap_flags);
+ desc.nic_vft.rq_count = cpu_to_le16(vft_res->max_rx_qs);
+ desc.nic_vft.txq_count = cpu_to_le16(vft_res->max_tx_qs);
+ desc.nic_vft.rssq_count = cpu_to_le16(vft_res->max_rss_qs);
+ desc.nic_vft.cq_count = cpu_to_le16(vft_res->max_cq_count);
+
+ if (vft_res->max_uc_mac)
+ desc.nic_vft.unicast_mac_count =
+ cpu_to_le16(vft_res->max_uc_mac);
+ if (vft_res->max_vlans)
+ desc.nic_vft.vlan_count = cpu_to_le16(vft_res->max_vlans);
+ if (vft_res->max_iface_count)
+ desc.nic_vft.iface_count =
+ cpu_to_le16(vft_res->max_iface_count);
+ if (vft_res->max_mcc_count)
+ desc.nic_vft.mcc_count = cpu_to_le16(vft_res->max_mcc_count);
return be_cmd_set_profile_config(adapter, &desc,
2 * RESOURCE_DESC_SIZE_V1, 2, 1, 0);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index d8540ae..cb96ddd 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -1556,7 +1556,9 @@ struct be_cmd_resp_acpi_wol_magic_config_v1 {
u8 rsvd0[2];
u8 wol_settings;
u8 rsvd1[5];
- u32 rsvd2[295];
+ u32 rsvd2[288];
+ u8 magic_mac[6];
+ u8 rsvd3[22];
} __packed;
#define BE_GET_WOL_CAP 2
@@ -2128,6 +2130,9 @@ struct be_cmd_req_set_ext_fat_caps {
#define IMM_SHIFT 6 /* Immediate */
#define NOSV_SHIFT 7 /* No save */
+#define MISSION_NIC 1
+#define MISSION_RDMA 8
+
struct be_res_desc_hdr {
u8 desc_type;
u8 desc_len;
@@ -2244,6 +2249,7 @@ struct be_cmd_req_get_profile_config {
struct be_cmd_req_hdr hdr;
u8 rsvd;
#define ACTIVE_PROFILE_TYPE 0x2
+#define SAVED_PROFILE_TYPE 0x0
#define QUERY_MODIFIABLE_FIELDS_TYPE BIT(3)
u8 type;
u16 rsvd1;
@@ -2449,7 +2455,9 @@ int be_cmd_query_port_name(struct be_adapter *adapter);
int be_cmd_get_func_config(struct be_adapter *adapter,
struct be_resources *res);
int be_cmd_get_profile_config(struct be_adapter *adapter,
- struct be_resources *res, u8 query, u8 domain);
+ struct be_resources *res,
+ struct be_port_resources *port_res,
+ u8 profile_type, u8 query, u8 domain);
int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile);
int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
int vf_num);
@@ -2461,4 +2469,4 @@ int be_cmd_set_vxlan_port(struct be_adapter *adapter, __be16 port);
int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op);
int be_cmd_set_sriov_config(struct be_adapter *adapter,
struct be_resources res, u16 num_vfs,
- u16 num_vf_qs);
+ struct be_resources *vft_res);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 2ff6916..c569cd7 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -793,6 +793,11 @@ static void be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
static int be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct be_adapter *adapter = netdev_priv(netdev);
+ struct device *dev = &adapter->pdev->dev;
+ struct be_dma_mem cmd;
+ u8 mac[ETH_ALEN];
+ bool enable;
+ int status;
if (wol->wolopts & ~WAKE_MAGIC)
return -EOPNOTSUPP;
@@ -802,12 +807,32 @@ static int be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
return -EOPNOTSUPP;
}
- if (wol->wolopts & WAKE_MAGIC)
- adapter->wol_en = true;
- else
- adapter->wol_en = false;
+ cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
+ cmd.va = dma_zalloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL);
+ if (!cmd.va)
+ return -ENOMEM;
- return 0;
+ eth_zero_addr(mac);
+
+ enable = wol->wolopts & WAKE_MAGIC;
+ if (enable)
+ ether_addr_copy(mac, adapter->netdev->dev_addr);
+
+ status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
+ if (status) {
+ dev_err(dev, "Could not set Wake-on-lan mac address\n");
+ status = be_cmd_status(status);
+ goto err;
+ }
+
+ pci_enable_wake(adapter->pdev, PCI_D3hot, enable);
+ pci_enable_wake(adapter->pdev, PCI_D3cold, enable);
+
+ adapter->wol_en = enable ? true : false;
+
+err:
+ dma_free_coherent(dev, cmd.size, cmd.va, cmd.dma);
+ return status;
}
static int be_test_ddr_dma(struct be_adapter *adapter)
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index ed98ef1..2451a47 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -3636,40 +3636,6 @@ err:
return -EIO;
}
-static int be_setup_wol(struct be_adapter *adapter, bool enable)
-{
- struct device *dev = &adapter->pdev->dev;
- struct be_dma_mem cmd;
- u8 mac[ETH_ALEN];
- int status;
-
- eth_zero_addr(mac);
-
- cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
- cmd.va = dma_zalloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL);
- if (!cmd.va)
- return -ENOMEM;
-
- if (enable) {
- status = pci_write_config_dword(adapter->pdev,
- PCICFG_PM_CONTROL_OFFSET,
- PCICFG_PM_CONTROL_MASK);
- if (status) {
- dev_err(dev, "Could not enable Wake-on-lan\n");
- goto err;
- }
- } else {
- ether_addr_copy(mac, adapter->netdev->dev_addr);
- }
-
- status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
- pci_enable_wake(adapter->pdev, PCI_D3hot, enable);
- pci_enable_wake(adapter->pdev, PCI_D3cold, enable);
-err:
- dma_free_coherent(dev, cmd.size, cmd.va, cmd.dma);
- return status;
-}
-
static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
{
u32 addr;
@@ -3810,35 +3776,86 @@ static void be_disable_vxlan_offloads(struct be_adapter *adapter)
}
#endif
-static u16 be_calculate_vf_qs(struct be_adapter *adapter, u16 num_vfs)
+static void be_calculate_vf_res(struct be_adapter *adapter, u16 num_vfs,
+ struct be_resources *vft_res)
{
struct be_resources res = adapter->pool_res;
+ u32 vf_if_cap_flags = res.vf_if_cap_flags;
+ struct be_resources res_mod = {0};
u16 num_vf_qs = 1;
- /* Distribute the queue resources among the PF and it's VFs
- * Do not distribute queue resources in multi-channel configuration.
- */
- if (num_vfs && !be_is_mc(adapter)) {
- /* Divide the qpairs evenly among the VFs and the PF, capped
- * at VF-EQ-count. Any remainder qpairs belong to the PF.
- */
+ /* Distribute the queue resources among the PF and it's VFs */
+ if (num_vfs) {
+ /* Divide the rx queues evenly among the VFs and the PF, capped
+ * at VF-EQ-count. Any remainder queues belong to the PF.
+ */
num_vf_qs = min(SH_VF_MAX_NIC_EQS,
res.max_rss_qs / (num_vfs + 1));
- /* Skyhawk-R chip supports only MAX_RSS_IFACES RSS capable
- * interfaces per port. Provide RSS on VFs, only if number
- * of VFs requested is less than MAX_RSS_IFACES limit.
+ /* Skyhawk-R chip supports only MAX_PORT_RSS_TABLES
+ * RSS Tables per port. Provide RSS on VFs, only if number of
+ * VFs requested is less than it's PF Pool's RSS Tables limit.
*/
- if (num_vfs >= MAX_RSS_IFACES)
+ if (num_vfs >= be_max_pf_pool_rss_tables(adapter))
num_vf_qs = 1;
}
- return num_vf_qs;
+
+ /* Resource with fields set to all '1's by GET_PROFILE_CONFIG cmd,
+ * which are modifiable using SET_PROFILE_CONFIG cmd.
+ */
+ be_cmd_get_profile_config(adapter, &res_mod, NULL, ACTIVE_PROFILE_TYPE,
+ RESOURCE_MODIFIABLE, 0);
+
+ /* If RSS IFACE capability flags are modifiable for a VF, set the
+ * capability flag as valid and set RSS and DEFQ_RSS IFACE flags if
+ * more than 1 RSSQ is available for a VF.
+ * Otherwise, provision only 1 queue pair for VF.
+ */
+ if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_RSS) {
+ vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
+ if (num_vf_qs > 1) {
+ vf_if_cap_flags |= BE_IF_FLAGS_RSS;
+ if (res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS)
+ vf_if_cap_flags |= BE_IF_FLAGS_DEFQ_RSS;
+ } else {
+ vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS |
+ BE_IF_FLAGS_DEFQ_RSS);
+ }
+ } else {
+ num_vf_qs = 1;
+ }
+
+ if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
+ vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
+ vf_if_cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
+ }
+
+ vft_res->vf_if_cap_flags = vf_if_cap_flags;
+ vft_res->max_rx_qs = num_vf_qs;
+ vft_res->max_rss_qs = num_vf_qs;
+ vft_res->max_tx_qs = res.max_tx_qs / (num_vfs + 1);
+ vft_res->max_cq_count = res.max_cq_count / (num_vfs + 1);
+
+ /* Distribute unicast MACs, VLANs, IFACE count and MCCQ count equally
+ * among the PF and it's VFs, if the fields are changeable
+ */
+ if (res_mod.max_uc_mac == FIELD_MODIFIABLE)
+ vft_res->max_uc_mac = res.max_uc_mac / (num_vfs + 1);
+
+ if (res_mod.max_vlans == FIELD_MODIFIABLE)
+ vft_res->max_vlans = res.max_vlans / (num_vfs + 1);
+
+ if (res_mod.max_iface_count == FIELD_MODIFIABLE)
+ vft_res->max_iface_count = res.max_iface_count / (num_vfs + 1);
+
+ if (res_mod.max_mcc_count == FIELD_MODIFIABLE)
+ vft_res->max_mcc_count = res.max_mcc_count / (num_vfs + 1);
}
static int be_clear(struct be_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
- u16 num_vf_qs;
+ struct be_resources vft_res = {0};
be_cancel_worker(adapter);
@@ -3850,11 +3867,12 @@ static int be_clear(struct be_adapter *adapter)
*/
if (skyhawk_chip(adapter) && be_physfn(adapter) &&
!pci_vfs_assigned(pdev)) {
- num_vf_qs = be_calculate_vf_qs(adapter,
- pci_sriov_get_totalvfs(pdev));
+ be_calculate_vf_res(adapter,
+ pci_sriov_get_totalvfs(pdev),
+ &vft_res);
be_cmd_set_sriov_config(adapter, adapter->pool_res,
pci_sriov_get_totalvfs(pdev),
- num_vf_qs);
+ &vft_res);
}
#ifdef CONFIG_BE2NET_VXLAN
@@ -3884,7 +3902,8 @@ static int be_vfs_if_create(struct be_adapter *adapter)
for_all_vfs(adapter, vf_cfg, vf) {
if (!BE3_chip(adapter)) {
- status = be_cmd_get_profile_config(adapter, &res,
+ status = be_cmd_get_profile_config(adapter, &res, NULL,
+ ACTIVE_PROFILE_TYPE,
RESOURCE_LIMITS,
vf + 1);
if (!status) {
@@ -4069,8 +4088,9 @@ static void BEx_get_resources(struct be_adapter *adapter,
/* On a SuperNIC profile, the driver needs to use the
* GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
*/
- be_cmd_get_profile_config(adapter, &super_nic_res,
- RESOURCE_LIMITS, 0);
+ be_cmd_get_profile_config(adapter, &super_nic_res, NULL,
+ ACTIVE_PROFILE_TYPE, RESOURCE_LIMITS,
+ 0);
/* Some old versions of BE3 FW don't report max_tx_qs value */
res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
} else {
@@ -4109,12 +4129,38 @@ static void be_setup_init(struct be_adapter *adapter)
adapter->cmd_privileges = MIN_PRIVILEGES;
}
+/* HW supports only MAX_PORT_RSS_TABLES RSS Policy Tables per port.
+ * However, this HW limitation is not exposed to the host via any SLI cmd.
+ * As a result, in the case of SRIOV and in particular multi-partition configs
+ * the driver needs to calcuate a proportional share of RSS Tables per PF-pool
+ * for distribution between the VFs. This self-imposed limit will determine the
+ * no: of VFs for which RSS can be enabled.
+ */
+void be_calculate_pf_pool_rss_tables(struct be_adapter *adapter)
+{
+ struct be_port_resources port_res = {0};
+ u8 rss_tables_on_port;
+ u16 max_vfs = be_max_vfs(adapter);
+
+ be_cmd_get_profile_config(adapter, NULL, &port_res, SAVED_PROFILE_TYPE,
+ RESOURCE_LIMITS, 0);
+
+ rss_tables_on_port = MAX_PORT_RSS_TABLES - port_res.nic_pfs;
+
+ /* Each PF Pool's RSS Tables limit =
+ * PF's Max VFs / Total_Max_VFs on Port * RSS Tables on Port
+ */
+ adapter->pool_res.max_rss_tables =
+ max_vfs * rss_tables_on_port / port_res.max_vfs;
+}
+
static int be_get_sriov_config(struct be_adapter *adapter)
{
struct be_resources res = {0};
int max_vfs, old_vfs;
- be_cmd_get_profile_config(adapter, &res, RESOURCE_LIMITS, 0);
+ be_cmd_get_profile_config(adapter, &res, NULL, ACTIVE_PROFILE_TYPE,
+ RESOURCE_LIMITS, 0);
/* Some old versions of BE3 FW don't report max_vfs value */
if (BE3_chip(adapter) && !res.max_vfs) {
@@ -4138,13 +4184,19 @@ static int be_get_sriov_config(struct be_adapter *adapter)
adapter->num_vfs = old_vfs;
}
+ if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
+ be_calculate_pf_pool_rss_tables(adapter);
+ dev_info(&adapter->pdev->dev,
+ "RSS can be enabled for all VFs if num_vfs <= %d\n",
+ be_max_pf_pool_rss_tables(adapter));
+ }
return 0;
}
static void be_alloc_sriov_res(struct be_adapter *adapter)
{
int old_vfs = pci_num_vf(adapter->pdev);
- u16 num_vf_qs;
+ struct be_resources vft_res = {0};
int status;
be_get_sriov_config(adapter);
@@ -4158,9 +4210,9 @@ static void be_alloc_sriov_res(struct be_adapter *adapter)
* Also, this is done by FW in Lancer chip.
*/
if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
- num_vf_qs = be_calculate_vf_qs(adapter, 0);
+ be_calculate_vf_res(adapter, 0, &vft_res);
status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
- num_vf_qs);
+ &vft_res);
if (status)
dev_err(&adapter->pdev->dev,
"Failed to optimize SRIOV resources\n");
@@ -4241,6 +4293,8 @@ static int be_get_config(struct be_adapter *adapter)
}
be_cmd_get_acpi_wol_cap(adapter);
+ pci_enable_wake(adapter->pdev, PCI_D3hot, adapter->wol_en);
+ pci_enable_wake(adapter->pdev, PCI_D3cold, adapter->wol_en);
be_cmd_query_port_name(adapter);
@@ -4251,15 +4305,6 @@ static int be_get_config(struct be_adapter *adapter)
"Using profile 0x%x\n", profile_id);
}
- status = be_get_resources(adapter);
- if (status)
- return status;
-
- adapter->pmac_id = kcalloc(be_max_uc(adapter),
- sizeof(*adapter->pmac_id), GFP_KERNEL);
- if (!adapter->pmac_id)
- return -ENOMEM;
-
return 0;
}
@@ -4460,13 +4505,22 @@ static int be_setup(struct be_adapter *adapter)
return status;
}
+ status = be_get_config(adapter);
+ if (status)
+ goto err;
+
if (!BE2_chip(adapter) && be_physfn(adapter))
be_alloc_sriov_res(adapter);
- status = be_get_config(adapter);
+ status = be_get_resources(adapter);
if (status)
goto err;
+ adapter->pmac_id = kcalloc(be_max_uc(adapter),
+ sizeof(*adapter->pmac_id), GFP_KERNEL);
+ if (!adapter->pmac_id)
+ return -ENOMEM;
+
status = be_msix_enable(adapter);
if (status)
goto err;
@@ -5410,9 +5464,6 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct be_adapter *adapter = pci_get_drvdata(pdev);
- if (adapter->wol_en)
- be_setup_wol(adapter, true);
-
be_intr_set(adapter, false);
be_cancel_err_detection(adapter);
@@ -5441,9 +5492,6 @@ static int be_pci_resume(struct pci_dev *pdev)
be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
- if (adapter->wol_en)
- be_setup_wol(adapter, false);
-
return 0;
}
@@ -5552,7 +5600,7 @@ err:
static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
struct be_adapter *adapter = pci_get_drvdata(pdev);
- u16 num_vf_qs;
+ struct be_resources vft_res = {0};
int status;
if (!num_vfs)
@@ -5575,9 +5623,10 @@ static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
* Also, this is done by FW in Lancer chip.
*/
if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
- num_vf_qs = be_calculate_vf_qs(adapter, adapter->num_vfs);
+ be_calculate_vf_res(adapter, adapter->num_vfs,
+ &vft_res);
status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
- adapter->num_vfs, num_vf_qs);
+ adapter->num_vfs, &vft_res);
if (status)
dev_err(&pdev->dev,
"Failed to optimize SR-IOV resources\n");
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c
index 3bfe36f..5d3047c 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.c
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.c
@@ -96,16 +96,22 @@ static int __ae_match(struct device *dev, const void *data)
{
struct hnae_ae_dev *hdev = cls_to_ae_dev(dev);
- return hdev->dev->of_node == data;
+ if (dev_of_node(hdev->dev))
+ return (data == &hdev->dev->of_node->fwnode);
+ else if (is_acpi_node(hdev->dev->fwnode))
+ return (data == hdev->dev->fwnode);
+
+ dev_err(dev, "__ae_match cannot read cfg data from OF or acpi\n");
+ return 0;
}
-static struct hnae_ae_dev *find_ae(const struct device_node *ae_node)
+static struct hnae_ae_dev *find_ae(const struct fwnode_handle *fwnode)
{
struct device *dev;
- WARN_ON(!ae_node);
+ WARN_ON(!fwnode);
- dev = class_find_device(hnae_class, NULL, ae_node, __ae_match);
+ dev = class_find_device(hnae_class, NULL, fwnode, __ae_match);
return dev ? cls_to_ae_dev(dev) : NULL;
}
@@ -312,7 +318,7 @@ EXPORT_SYMBOL(hnae_reinit_handle);
* return handle ptr or ERR_PTR
*/
struct hnae_handle *hnae_get_handle(struct device *owner_dev,
- const struct device_node *ae_node,
+ const struct fwnode_handle *fwnode,
u32 port_id,
struct hnae_buf_ops *bops)
{
@@ -321,7 +327,7 @@ struct hnae_handle *hnae_get_handle(struct device *owner_dev,
int i, j;
int ret;
- dev = find_ae(ae_node);
+ dev = find_ae(fwnode);
if (!dev)
return ERR_PTR(-ENODEV);
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
index e8d36aa..529cb13 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.h
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
@@ -27,6 +27,7 @@
* "cb" means control block
*/
+#include <linux/acpi.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/module.h>
@@ -512,7 +513,7 @@ struct hnae_ae_dev {
struct hnae_handle {
struct device *owner_dev; /* the device which make use of this handle */
struct hnae_ae_dev *dev; /* the device who provides this handle */
- struct device_node *phy_node;
+ struct phy_device *phy_dev;
phy_interface_t phy_if;
u32 if_support;
int q_num;
@@ -528,7 +529,7 @@ struct hnae_handle {
#define ring_to_dev(ring) ((ring)->q->dev->dev)
struct hnae_handle *hnae_get_handle(struct device *owner_dev,
- const struct device_node *ae_node,
+ const struct fwnode_handle *fwnode,
u32 port_id,
struct hnae_buf_ops *bops);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
index 7a757e8..d37b778 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -131,7 +131,7 @@ struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev,
vf_cb->mac_cb = dsaf_dev->mac_cb[port_id];
ae_handle->phy_if = vf_cb->mac_cb->phy_if;
- ae_handle->phy_node = vf_cb->mac_cb->phy_node;
+ ae_handle->phy_dev = vf_cb->mac_cb->phy_dev;
ae_handle->if_support = vf_cb->mac_cb->if_support;
ae_handle->port_type = vf_cb->mac_cb->mac_type;
ae_handle->dport_id = port_id;
@@ -637,13 +637,15 @@ static int hns_ae_config_loopback(struct hnae_handle *handle,
int ret;
struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+ struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
switch (loop) {
case MAC_INTERNALLOOP_PHY:
ret = 0;
break;
case MAC_INTERNALLOOP_SERDES:
- ret = hns_mac_config_sds_loopback(vf_cb->mac_cb, en);
+ ret = dsaf_dev->misc_op->cfg_serdes_loopback(vf_cb->mac_cb,
+ !!en);
break;
case MAC_INTERNALLOOP_MAC:
ret = hns_mac_config_mac_loopback(vf_cb->mac_cb, loop, en);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
index 44abb08..1235c7f 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
@@ -110,7 +110,7 @@ static void hns_gmac_free(void *mac_drv)
u32 mac_id = drv->mac_id;
- hns_dsaf_ge_srst_by_port(dsaf_dev, mac_id, 0);
+ dsaf_dev->misc_op->ge_srst(dsaf_dev, mac_id, 0);
}
static void hns_gmac_set_tx_auto_pause_frames(void *mac_drv, u16 newval)
@@ -317,9 +317,9 @@ static void hns_gmac_init(void *mac_drv)
port = drv->mac_id;
- hns_dsaf_ge_srst_by_port(dsaf_dev, port, 0);
+ dsaf_dev->misc_op->ge_srst(dsaf_dev, port, 0);
mdelay(10);
- hns_dsaf_ge_srst_by_port(dsaf_dev, port, 1);
+ dsaf_dev->misc_op->ge_srst(dsaf_dev, port, 1);
mdelay(10);
hns_gmac_disable(mac_drv, MAC_COMM_MODE_RX_AND_TX);
hns_gmac_tx_loop_pkt_dis(mac_drv);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index 611581f..c526558 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -7,6 +7,7 @@
* (at your option) any later version.
*/
+#include <linux/acpi.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
@@ -15,7 +16,8 @@
#include <linux/netdevice.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/phy_fixed.h>
+#include <linux/of_mdio.h>
+#include <linux/phy.h>
#include <linux/platform_device.h>
#include "hns_dsaf_main.h"
@@ -94,7 +96,7 @@ void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status)
else
*link_status = 0;
- ret = hns_mac_get_sfp_prsnt(mac_cb, &sfp_prsnt);
+ ret = mac_cb->dsaf_dev->misc_op->get_sfp_prsnt(mac_cb, &sfp_prsnt);
if (!ret)
*link_status = *link_status && sfp_prsnt;
@@ -511,7 +513,7 @@ void hns_mac_stop(struct hns_mac_cb *mac_cb)
mac_ctrl_drv->mac_en_flg = 0;
mac_cb->link = 0;
- cpld_led_reset(mac_cb);
+ mac_cb->dsaf_dev->misc_op->cpld_reset_led(mac_cb);
}
/**
@@ -637,6 +639,115 @@ free_mac_drv:
return ret;
}
+static int
+hns_mac_phy_parse_addr(struct device *dev, struct fwnode_handle *fwnode)
+{
+ u32 addr;
+ int ret;
+
+ ret = fwnode_property_read_u32(fwnode, "phy-addr", &addr);
+ if (ret) {
+ dev_err(dev, "has invalid PHY address ret:%d\n", ret);
+ return ret;
+ }
+
+ if (addr >= PHY_MAX_ADDR) {
+ dev_err(dev, "PHY address %i is too large\n", addr);
+ return -EINVAL;
+ }
+
+ return addr;
+}
+
+static int hns_mac_phydev_match(struct device *dev, void *fwnode)
+{
+ return dev->fwnode == fwnode;
+}
+
+static struct
+platform_device *hns_mac_find_platform_device(struct fwnode_handle *fwnode)
+{
+ struct device *dev;
+
+ dev = bus_find_device(&platform_bus_type, NULL,
+ fwnode, hns_mac_phydev_match);
+ return dev ? to_platform_device(dev) : NULL;
+}
+
+static int
+hns_mac_register_phydev(struct mii_bus *mdio, struct hns_mac_cb *mac_cb,
+ u32 addr)
+{
+ struct phy_device *phy;
+ const char *phy_type;
+ bool is_c45;
+ int rc;
+
+ rc = fwnode_property_read_string(mac_cb->fw_port,
+ "phy-mode", &phy_type);
+ if (rc < 0)
+ return rc;
+
+ if (!strcmp(phy_type, phy_modes(PHY_INTERFACE_MODE_XGMII)))
+ is_c45 = 1;
+ else if (!strcmp(phy_type, phy_modes(PHY_INTERFACE_MODE_SGMII)))
+ is_c45 = 0;
+ else
+ return -ENODATA;
+
+ phy = get_phy_device(mdio, addr, is_c45);
+ if (!phy || IS_ERR(phy))
+ return -EIO;
+
+ if (mdio->irq)
+ phy->irq = mdio->irq[addr];
+
+ /* All data is now stored in the phy struct;
+ * register it
+ */
+ rc = phy_device_register(phy);
+ if (rc) {
+ phy_device_free(phy);
+ return -ENODEV;
+ }
+
+ mac_cb->phy_dev = phy;
+
+ dev_dbg(&mdio->dev, "registered phy at address %i\n", addr);
+
+ return 0;
+}
+
+static void hns_mac_register_phy(struct hns_mac_cb *mac_cb)
+{
+ struct acpi_reference_args args;
+ struct platform_device *pdev;
+ struct mii_bus *mii_bus;
+ int rc;
+ int addr;
+
+ /* Loop over the child nodes and register a phy_device for each one */
+ if (!to_acpi_device_node(mac_cb->fw_port))
+ return;
+
+ rc = acpi_node_get_property_reference(
+ mac_cb->fw_port, "mdio-node", 0, &args);
+ if (rc)
+ return;
+
+ addr = hns_mac_phy_parse_addr(mac_cb->dev, mac_cb->fw_port);
+ if (addr < 0)
+ return;
+
+ /* dev address in adev */
+ pdev = hns_mac_find_platform_device(acpi_fwnode_handle(args.adev));
+ mii_bus = platform_get_drvdata(pdev);
+ rc = hns_mac_register_phydev(mii_bus, mac_cb, addr);
+ if (!rc)
+ dev_dbg(mac_cb->dev, "mac%d register phy addr:%d\n",
+ mac_cb->mac_id, addr);
+}
+
/**
*hns_mac_get_info - get mac information from device node
*@mac_cb: mac device
@@ -645,7 +756,7 @@ free_mac_drv:
*/
static int hns_mac_get_info(struct hns_mac_cb *mac_cb)
{
- struct device_node *np = mac_cb->dev->of_node;
+ struct device_node *np;
struct regmap *syscon;
struct of_phandle_args cpld_args;
u32 ret;
@@ -672,63 +783,85 @@ static int hns_mac_get_info(struct hns_mac_cb *mac_cb)
* from dsaf node
*/
if (!mac_cb->fw_port) {
- mac_cb->phy_node = of_parse_phandle(np, "phy-handle",
- mac_cb->mac_id);
- if (mac_cb->phy_node)
+ np = of_parse_phandle(mac_cb->dev->of_node, "phy-handle",
+ mac_cb->mac_id);
+ mac_cb->phy_dev = of_phy_find_device(np);
+ if (mac_cb->phy_dev) {
+ /* refcount is held by of_phy_find_device()
+ * if the phy_dev is found
+ */
+ put_device(&mac_cb->phy_dev->mdio.dev);
+
dev_dbg(mac_cb->dev, "mac%d phy_node: %s\n",
- mac_cb->mac_id, mac_cb->phy_node->name);
- return 0;
- }
- if (!is_of_node(mac_cb->fw_port))
- return -EINVAL;
- /* parse property from port subnode in dsaf */
- mac_cb->phy_node = of_parse_phandle(to_of_node(mac_cb->fw_port),
- "phy-handle", 0);
- if (mac_cb->phy_node)
- dev_dbg(mac_cb->dev, "mac%d phy_node: %s\n",
- mac_cb->mac_id, mac_cb->phy_node->name);
- syscon = syscon_node_to_regmap(
- of_parse_phandle(to_of_node(mac_cb->fw_port),
- "serdes-syscon", 0));
- if (IS_ERR_OR_NULL(syscon)) {
- dev_err(mac_cb->dev, "serdes-syscon is needed!\n");
- return -EINVAL;
- }
- mac_cb->serdes_ctrl = syscon;
+ mac_cb->mac_id, np->name);
+ }
- ret = fwnode_property_read_u32(mac_cb->fw_port,
- "port-rst-offset",
- &mac_cb->port_rst_off);
- if (ret) {
- dev_dbg(mac_cb->dev,
- "mac%d port-rst-offset not found, use default value.\n",
- mac_cb->mac_id);
+ return 0;
}
- ret = fwnode_property_read_u32(mac_cb->fw_port,
- "port-mode-offset",
- &mac_cb->port_mode_off);
- if (ret) {
- dev_dbg(mac_cb->dev,
- "mac%d port-mode-offset not found, use default value.\n",
- mac_cb->mac_id);
- }
+ if (is_of_node(mac_cb->fw_port)) {
+ /* parse property from port subnode in dsaf */
+ np = of_parse_phandle(to_of_node(mac_cb->fw_port),
+ "phy-handle", 0);
+ mac_cb->phy_dev = of_phy_find_device(np);
+ if (mac_cb->phy_dev) {
+ /* refcount is held by of_phy_find_device()
+ * if the phy_dev is found
+ */
+ put_device(&mac_cb->phy_dev->mdio.dev);
+ dev_dbg(mac_cb->dev, "mac%d phy_node: %s\n",
+ mac_cb->mac_id, np->name);
+ }
- ret = of_parse_phandle_with_fixed_args(to_of_node(mac_cb->fw_port),
- "cpld-syscon", 1, 0, &cpld_args);
- if (ret) {
- dev_dbg(mac_cb->dev, "mac%d no cpld-syscon found.\n",
- mac_cb->mac_id);
- mac_cb->cpld_ctrl = NULL;
- } else {
- syscon = syscon_node_to_regmap(cpld_args.np);
+ syscon = syscon_node_to_regmap(
+ of_parse_phandle(to_of_node(mac_cb->fw_port),
+ "serdes-syscon", 0));
if (IS_ERR_OR_NULL(syscon)) {
- dev_dbg(mac_cb->dev, "no cpld-syscon found!\n");
+ dev_err(mac_cb->dev, "serdes-syscon is needed!\n");
+ return -EINVAL;
+ }
+ mac_cb->serdes_ctrl = syscon;
+
+ ret = fwnode_property_read_u32(mac_cb->fw_port,
+ "port-rst-offset",
+ &mac_cb->port_rst_off);
+ if (ret) {
+ dev_dbg(mac_cb->dev,
+ "mac%d port-rst-offset not found, use default value.\n",
+ mac_cb->mac_id);
+ }
+
+ ret = fwnode_property_read_u32(mac_cb->fw_port,
+ "port-mode-offset",
+ &mac_cb->port_mode_off);
+ if (ret) {
+ dev_dbg(mac_cb->dev,
+ "mac%d port-mode-offset not found, use default value.\n",
+ mac_cb->mac_id);
+ }
+
+ ret = of_parse_phandle_with_fixed_args(
+ to_of_node(mac_cb->fw_port), "cpld-syscon", 1, 0,
+ &cpld_args);
+ if (ret) {
+ dev_dbg(mac_cb->dev, "mac%d no cpld-syscon found.\n",
+ mac_cb->mac_id);
mac_cb->cpld_ctrl = NULL;
} else {
- mac_cb->cpld_ctrl = syscon;
- mac_cb->cpld_ctrl_reg = cpld_args.args[0];
+ syscon = syscon_node_to_regmap(cpld_args.np);
+ if (IS_ERR_OR_NULL(syscon)) {
+ dev_dbg(mac_cb->dev, "no cpld-syscon found!\n");
+ mac_cb->cpld_ctrl = NULL;
+ } else {
+ mac_cb->cpld_ctrl = syscon;
+ mac_cb->cpld_ctrl_reg = cpld_args.args[0];
+ }
}
+ } else if (is_acpi_node(mac_cb->fw_port)) {
+ hns_mac_register_phy(mac_cb);
+ } else {
+ dev_err(mac_cb->dev, "mac%d cannot find phy node\n",
+ mac_cb->mac_id);
}
return 0;
@@ -790,7 +923,7 @@ int hns_mac_get_cfg(struct dsaf_device *dsaf_dev, struct hns_mac_cb *mac_cb)
else
mac_cb->mac_type = HNAE_PORT_DEBUG;
- mac_cb->phy_if = hns_mac_get_phy_if(mac_cb);
+ mac_cb->phy_if = dsaf_dev->misc_op->get_phy_if(mac_cb);
ret = hns_mac_get_mode(mac_cb->phy_if);
if (ret < 0) {
@@ -805,7 +938,7 @@ int hns_mac_get_cfg(struct dsaf_device *dsaf_dev, struct hns_mac_cb *mac_cb)
if (ret)
return ret;
- cpld_led_reset(mac_cb);
+ mac_cb->dsaf_dev->misc_op->cpld_reset_led(mac_cb);
mac_cb->vaddr = hns_mac_get_vaddr(dsaf_dev, mac_cb, mac_mode_idx);
return 0;
@@ -892,7 +1025,7 @@ void hns_mac_uninit(struct dsaf_device *dsaf_dev)
int max_port_num = hns_mac_get_max_port_num(dsaf_dev);
for (i = 0; i < max_port_num; i++) {
- cpld_led_reset(dsaf_dev->mac_cb[i]);
+ dsaf_dev->misc_op->cpld_reset_led(dsaf_dev->mac_cb[i]);
dsaf_dev->mac_cb[i] = NULL;
}
}
@@ -975,7 +1108,7 @@ void hns_set_led_opt(struct hns_mac_cb *mac_cb)
nic_data = 0;
mac_cb->txpkt_for_led = mac_cb->hw_stats.tx_good_pkts;
mac_cb->rxpkt_for_led = mac_cb->hw_stats.rx_good_pkts;
- hns_cpld_set_led(mac_cb, (int)mac_cb->link,
+ mac_cb->dsaf_dev->misc_op->cpld_set_led(mac_cb, (int)mac_cb->link,
mac_cb->speed, nic_data);
}
@@ -985,5 +1118,5 @@ int hns_cpld_led_set_id(struct hns_mac_cb *mac_cb,
if (!mac_cb || !mac_cb->cpld_ctrl)
return 0;
- return cpld_set_led_id(mac_cb, status);
+ return mac_cb->dsaf_dev->misc_op->cpld_set_led_id(mac_cb, status);
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
index 97ce9a7..05a6e8f 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
@@ -338,7 +338,7 @@ struct hns_mac_cb {
phy_interface_t phy_if;
enum hnae_loop loop_mode;
- struct device_node *phy_node;
+ struct phy_device *phy_dev;
struct mac_hw_stats hw_stats;
};
@@ -448,8 +448,6 @@ int hns_mac_set_pauseparam(struct hns_mac_cb *mac_cb, u32 rx_en, u32 tx_en);
int hns_mac_set_mtu(struct hns_mac_cb *mac_cb, u32 new_mtu);
int hns_mac_get_port_info(struct hns_mac_cb *mac_cb,
u8 *auto_neg, u16 *speed, u8 *duplex);
-phy_interface_t hns_mac_get_phy_if(struct hns_mac_cb *mac_cb);
-int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, u8 en);
int hns_mac_config_mac_loopback(struct hns_mac_cb *mac_cb,
enum hnae_loop loop, int en);
void hns_mac_update_stats(struct hns_mac_cb *mac_cb);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index 1c2ddb2..ac03c4a 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -7,6 +7,7 @@
* (at your option) any later version.
*/
+#include <linux/acpi.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -24,6 +25,7 @@
#include "hns_dsaf_main.h"
#include "hns_dsaf_ppe.h"
#include "hns_dsaf_rcb.h"
+#include "hns_dsaf_misc.h"
const char *g_dsaf_mode_match[DSAF_MODE_MAX] = {
[DSAF_MODE_DISABLE_2PORT_64VM] = "2port-64vf",
@@ -32,6 +34,13 @@ const char *g_dsaf_mode_match[DSAF_MODE_MAX] = {
[DSAF_MODE_DISABLE_SP] = "single-port",
};
+static const struct acpi_device_id hns_dsaf_acpi_match[] = {
+ { "HISI00B1", 0 },
+ { "HISI00B2", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, hns_dsaf_acpi_match);
+
int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
{
int ret, i;
@@ -45,12 +54,24 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
struct device_node *np = dsaf_dev->dev->of_node;
struct platform_device *pdev = to_platform_device(dsaf_dev->dev);
- if (of_device_is_compatible(np, "hisilicon,hns-dsaf-v1"))
- dsaf_dev->dsaf_ver = AE_VERSION_1;
- else
- dsaf_dev->dsaf_ver = AE_VERSION_2;
+ if (dev_of_node(dsaf_dev->dev)) {
+ if (of_device_is_compatible(np, "hisilicon,hns-dsaf-v1"))
+ dsaf_dev->dsaf_ver = AE_VERSION_1;
+ else
+ dsaf_dev->dsaf_ver = AE_VERSION_2;
+ } else if (is_acpi_node(dsaf_dev->dev->fwnode)) {
+ if (acpi_dev_found(hns_dsaf_acpi_match[0].id))
+ dsaf_dev->dsaf_ver = AE_VERSION_1;
+ else if (acpi_dev_found(hns_dsaf_acpi_match[1].id))
+ dsaf_dev->dsaf_ver = AE_VERSION_2;
+ else
+ return -ENXIO;
+ } else {
+ dev_err(dsaf_dev->dev, "cannot get cfg data from of or acpi\n");
+ return -ENXIO;
+ }
- ret = of_property_read_string(np, "mode", &mode_str);
+ ret = device_property_read_string(dsaf_dev->dev, "mode", &mode_str);
if (ret) {
dev_err(dsaf_dev->dev, "get dsaf mode fail, ret=%d!\n", ret);
return ret;
@@ -80,32 +101,40 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
else
dsaf_dev->dsaf_tc_mode = HRD_DSAF_4TC_MODE;
- syscon = syscon_node_to_regmap(
- of_parse_phandle(np, "subctrl-syscon", 0));
- if (IS_ERR_OR_NULL(syscon)) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, res_idx++);
- if (!res) {
- dev_err(dsaf_dev->dev, "subctrl info is needed!\n");
- return -ENOMEM;
- }
- dsaf_dev->sc_base = devm_ioremap_resource(&pdev->dev, res);
- if (!dsaf_dev->sc_base) {
- dev_err(dsaf_dev->dev, "subctrl can not map!\n");
- return -ENOMEM;
- }
+ if (dev_of_node(dsaf_dev->dev)) {
+ syscon = syscon_node_to_regmap(
+ of_parse_phandle(np, "subctrl-syscon", 0));
+ if (IS_ERR_OR_NULL(syscon)) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM,
+ res_idx++);
+ if (!res) {
+ dev_err(dsaf_dev->dev, "subctrl info is needed!\n");
+ return -ENOMEM;
+ }
- res = platform_get_resource(pdev, IORESOURCE_MEM, res_idx++);
- if (!res) {
- dev_err(dsaf_dev->dev, "serdes-ctrl info is needed!\n");
- return -ENOMEM;
- }
- dsaf_dev->sds_base = devm_ioremap_resource(&pdev->dev, res);
- if (!dsaf_dev->sds_base) {
- dev_err(dsaf_dev->dev, "serdes-ctrl can not map!\n");
- return -ENOMEM;
+ dsaf_dev->sc_base = devm_ioremap_resource(&pdev->dev,
+ res);
+ if (!dsaf_dev->sc_base) {
+ dev_err(dsaf_dev->dev, "subctrl can not map!\n");
+ return -ENOMEM;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM,
+ res_idx++);
+ if (!res) {
+ dev_err(dsaf_dev->dev, "serdes-ctrl info is needed!\n");
+ return -ENOMEM;
+ }
+
+ dsaf_dev->sds_base = devm_ioremap_resource(&pdev->dev,
+ res);
+ if (!dsaf_dev->sds_base) {
+ dev_err(dsaf_dev->dev, "serdes-ctrl can not map!\n");
+ return -ENOMEM;
+ }
+ } else {
+ dsaf_dev->sub_ctrl = syscon;
}
- } else {
- dsaf_dev->sub_ctrl = syscon;
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ppe-base");
@@ -142,7 +171,7 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
}
}
- ret = of_property_read_u32(np, "desc-num", &desc_num);
+ ret = device_property_read_u32(dsaf_dev->dev, "desc-num", &desc_num);
if (ret < 0 || desc_num < HNS_DSAF_MIN_DESC_CNT ||
desc_num > HNS_DSAF_MAX_DESC_CNT) {
dev_err(dsaf_dev->dev, "get desc-num(%d) fail, ret=%d!\n",
@@ -151,14 +180,15 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
}
dsaf_dev->desc_num = desc_num;
- ret = of_property_read_u32(np, "reset-field-offset", &reset_offset);
+ ret = device_property_read_u32(dsaf_dev->dev, "reset-field-offset",
+ &reset_offset);
if (ret < 0) {
dev_dbg(dsaf_dev->dev,
"get reset-field-offset fail, ret=%d!\r\n", ret);
}
dsaf_dev->reset_offset = reset_offset;
- ret = of_property_read_u32(np, "buf-size", &buf_size);
+ ret = device_property_read_u32(dsaf_dev->dev, "buf-size", &buf_size);
if (ret < 0) {
dev_err(dsaf_dev->dev,
"get buf-size fail, ret=%d!\r\n", ret);
@@ -173,6 +203,10 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
goto unmap_base_addr;
}
+ dsaf_dev->misc_op = hns_misc_op_get(dsaf_dev);
+ if (!dsaf_dev->misc_op)
+ return -ENOMEM;
+
if (!dma_set_mask_and_coherent(dsaf_dev->dev, DMA_BIT_MASK(64ULL)))
dev_dbg(dsaf_dev->dev, "set mask to 64bit\n");
else
@@ -1295,9 +1329,9 @@ static int hns_dsaf_init_hw(struct dsaf_device *dsaf_dev)
dev_dbg(dsaf_dev->dev,
"hns_dsaf_init_hw begin %s !\n", dsaf_dev->ae_dev.name);
- hns_dsaf_rst(dsaf_dev, 0);
+ dsaf_dev->misc_op->dsaf_reset(dsaf_dev, 0);
mdelay(10);
- hns_dsaf_rst(dsaf_dev, 1);
+ dsaf_dev->misc_op->dsaf_reset(dsaf_dev, 1);
hns_dsaf_comm_init(dsaf_dev);
@@ -1325,7 +1359,7 @@ static int hns_dsaf_init_hw(struct dsaf_device *dsaf_dev)
static void hns_dsaf_remove_hw(struct dsaf_device *dsaf_dev)
{
/*reset*/
- hns_dsaf_rst(dsaf_dev, 0);
+ dsaf_dev->misc_op->dsaf_reset(dsaf_dev, 0);
}
/**
@@ -2680,6 +2714,7 @@ static struct platform_driver g_dsaf_driver = {
.driver = {
.name = DSAF_DRV_NAME,
.of_match_table = g_dsaf_match,
+ .acpi_match_table = hns_dsaf_acpi_match,
},
};
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
index f0502ba..2e55b3c 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
@@ -268,6 +268,27 @@ struct dsaf_int_stat {
};
+struct dsaf_misc_op {
+ void (*cpld_set_led)(struct hns_mac_cb *mac_cb, int link_status,
+ u16 speed, int data);
+ void (*cpld_reset_led)(struct hns_mac_cb *mac_cb);
+ int (*cpld_set_led_id)(struct hns_mac_cb *mac_cb,
+ enum hnae_led_state status);
+ /* reset seris function, it will be reset if the dereseet is 0 */
+ void (*dsaf_reset)(struct dsaf_device *dsaf_dev, bool dereset);
+ void (*xge_srst)(struct dsaf_device *dsaf_dev, u32 port, bool dereset);
+ void (*xge_core_srst)(struct dsaf_device *dsaf_dev, u32 port,
+ bool dereset);
+ void (*ge_srst)(struct dsaf_device *dsaf_dev, u32 port, bool dereset);
+ void (*ppe_srst)(struct dsaf_device *dsaf_dev, u32 port, bool dereset);
+ void (*ppe_comm_srst)(struct dsaf_device *dsaf_dev, bool dereset);
+
+ phy_interface_t (*get_phy_if)(struct hns_mac_cb *mac_cb);
+ int (*get_sfp_prsnt)(struct hns_mac_cb *mac_cb, int *sfp_prsnt);
+
+ int (*cfg_serdes_loopback)(struct hns_mac_cb *mac_cb, bool en);
+};
+
/* Dsaf device struct define ,and mac -> dsaf */
struct dsaf_device {
struct device *dev;
@@ -292,6 +313,7 @@ struct dsaf_device {
struct ppe_common_cb *ppe_common[DSAF_COMM_DEV_NUM];
struct rcb_common_cb *rcb_common[DSAF_COMM_DEV_NUM];
struct hns_mac_cb *mac_cb[DSAF_MAX_PORT_NUM];
+ struct dsaf_misc_op *misc_op;
struct dsaf_hw_stats hw_stats[DSAF_NODE_NUM];
struct dsaf_int_stat int_stat;
@@ -388,22 +410,11 @@ int hns_dsaf_get_mac_entry_by_index(
u16 entry_index,
struct dsaf_drv_mac_multi_dest_entry *mac_entry);
-void hns_dsaf_rst(struct dsaf_device *dsaf_dev, u32 val);
-
-void hns_ppe_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val);
-
-void hns_ppe_com_srst(struct ppe_common_cb *ppe_common, u32 val);
-
void hns_dsaf_fix_mac_mode(struct hns_mac_cb *mac_cb);
int hns_dsaf_ae_init(struct dsaf_device *dsaf_dev);
void hns_dsaf_ae_uninit(struct dsaf_device *dsaf_dev);
-void hns_dsaf_xge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val);
-void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val);
-void hns_dsaf_xge_core_srst_by_port(struct dsaf_device *dsaf_dev,
- u32 port, u32 val);
-
void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 inode_num);
int hns_dsaf_get_sset_count(int stringset);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index a837bb9..96cb628 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -12,6 +12,27 @@
#include "hns_dsaf_ppe.h"
#include "hns_dsaf_reg.h"
+enum _dsm_op_index {
+ HNS_OP_RESET_FUNC = 0x1,
+ HNS_OP_SERDES_LP_FUNC = 0x2,
+ HNS_OP_LED_SET_FUNC = 0x3,
+ HNS_OP_GET_PORT_TYPE_FUNC = 0x4,
+ HNS_OP_GET_SFP_STAT_FUNC = 0x5,
+};
+
+enum _dsm_rst_type {
+ HNS_DSAF_RESET_FUNC = 0x1,
+ HNS_PPE_RESET_FUNC = 0x2,
+ HNS_XGE_CORE_RESET_FUNC = 0x3,
+ HNS_XGE_RESET_FUNC = 0x4,
+ HNS_GE_RESET_FUNC = 0x5,
+};
+
+const u8 hns_dsaf_acpi_dsm_uuid[] = {
+ 0x1A, 0xAA, 0x85, 0x1A, 0x93, 0xE2, 0x5E, 0x41,
+ 0x8E, 0x28, 0x8D, 0x69, 0x0A, 0x0F, 0x82, 0x0A
+};
+
static void dsaf_write_sub(struct dsaf_device *dsaf_dev, u32 reg, u32 val)
{
if (dsaf_dev->sub_ctrl)
@@ -32,8 +53,8 @@ static u32 dsaf_read_sub(struct dsaf_device *dsaf_dev, u32 reg)
return ret;
}
-void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status,
- u16 speed, int data)
+static void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status,
+ u16 speed, int data)
{
int speed_reg = 0;
u8 value;
@@ -71,7 +92,7 @@ void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status,
}
}
-void cpld_led_reset(struct hns_mac_cb *mac_cb)
+static void cpld_led_reset(struct hns_mac_cb *mac_cb)
{
if (!mac_cb || !mac_cb->cpld_ctrl)
return;
@@ -81,8 +102,8 @@ void cpld_led_reset(struct hns_mac_cb *mac_cb)
mac_cb->cpld_led_value = CPLD_LED_DEFAULT_VALUE;
}
-int cpld_set_led_id(struct hns_mac_cb *mac_cb,
- enum hnae_led_state status)
+static int cpld_set_led_id(struct hns_mac_cb *mac_cb,
+ enum hnae_led_state status)
{
switch (status) {
case HNAE_LED_ACTIVE:
@@ -109,12 +130,40 @@ int cpld_set_led_id(struct hns_mac_cb *mac_cb,
#define RESET_REQ_OR_DREQ 1
-void hns_dsaf_rst(struct dsaf_device *dsaf_dev, u32 val)
+static void hns_dsaf_acpi_srst_by_port(struct dsaf_device *dsaf_dev, u8 op_type,
+ u32 port_type, u32 port, u32 val)
+{
+ union acpi_object *obj;
+ union acpi_object obj_args[3], argv4;
+
+ obj_args[0].integer.type = ACPI_TYPE_INTEGER;
+ obj_args[0].integer.value = port_type;
+ obj_args[1].integer.type = ACPI_TYPE_INTEGER;
+ obj_args[1].integer.value = port;
+ obj_args[2].integer.type = ACPI_TYPE_INTEGER;
+ obj_args[2].integer.value = val;
+
+ argv4.type = ACPI_TYPE_PACKAGE;
+ argv4.package.count = 3;
+ argv4.package.elements = obj_args;
+
+ obj = acpi_evaluate_dsm(ACPI_HANDLE(dsaf_dev->dev),
+ hns_dsaf_acpi_dsm_uuid, 0, op_type, &argv4);
+ if (!obj) {
+ dev_warn(dsaf_dev->dev, "reset port_type%d port%d fail!",
+ port_type, port);
+ return;
+ }
+
+ ACPI_FREE(obj);
+}
+
+static void hns_dsaf_rst(struct dsaf_device *dsaf_dev, bool dereset)
{
u32 xbar_reg_addr;
u32 nt_reg_addr;
- if (!val) {
+ if (!dereset) {
xbar_reg_addr = DSAF_SUB_SC_XBAR_RESET_REQ_REG;
nt_reg_addr = DSAF_SUB_SC_NT_RESET_REQ_REG;
} else {
@@ -126,7 +175,15 @@ void hns_dsaf_rst(struct dsaf_device *dsaf_dev, u32 val)
dsaf_write_sub(dsaf_dev, nt_reg_addr, RESET_REQ_OR_DREQ);
}
-void hns_dsaf_xge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val)
+static void hns_dsaf_rst_acpi(struct dsaf_device *dsaf_dev, bool dereset)
+{
+ hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC,
+ HNS_DSAF_RESET_FUNC,
+ 0, dereset);
+}
+
+static void hns_dsaf_xge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port,
+ bool dereset)
{
u32 reg_val = 0;
u32 reg_addr;
@@ -137,7 +194,7 @@ void hns_dsaf_xge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val)
reg_val |= RESET_REQ_OR_DREQ;
reg_val |= 0x2082082 << dsaf_dev->mac_cb[port]->port_rst_off;
- if (val == 0)
+ if (!dereset)
reg_addr = DSAF_SUB_SC_XGE_RESET_REQ_REG;
else
reg_addr = DSAF_SUB_SC_XGE_RESET_DREQ_REG;
@@ -145,8 +202,15 @@ void hns_dsaf_xge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val)
dsaf_write_sub(dsaf_dev, reg_addr, reg_val);
}
-void hns_dsaf_xge_core_srst_by_port(struct dsaf_device *dsaf_dev,
- u32 port, u32 val)
+static void hns_dsaf_xge_srst_by_port_acpi(struct dsaf_device *dsaf_dev,
+ u32 port, bool dereset)
+{
+ hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC,
+ HNS_XGE_RESET_FUNC, port, dereset);
+}
+
+static void hns_dsaf_xge_core_srst_by_port(struct dsaf_device *dsaf_dev,
+ u32 port, bool dereset)
{
u32 reg_val = 0;
u32 reg_addr;
@@ -157,7 +221,7 @@ void hns_dsaf_xge_core_srst_by_port(struct dsaf_device *dsaf_dev,
reg_val |= XGMAC_TRX_CORE_SRST_M
<< dsaf_dev->mac_cb[port]->port_rst_off;
- if (val == 0)
+ if (!dereset)
reg_addr = DSAF_SUB_SC_XGE_RESET_REQ_REG;
else
reg_addr = DSAF_SUB_SC_XGE_RESET_DREQ_REG;
@@ -165,7 +229,16 @@ void hns_dsaf_xge_core_srst_by_port(struct dsaf_device *dsaf_dev,
dsaf_write_sub(dsaf_dev, reg_addr, reg_val);
}
-void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val)
+static void
+hns_dsaf_xge_core_srst_by_port_acpi(struct dsaf_device *dsaf_dev,
+ u32 port, bool dereset)
+{
+ hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC,
+ HNS_XGE_CORE_RESET_FUNC, port, dereset);
+}
+
+static void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port,
+ bool dereset)
{
u32 reg_val_1;
u32 reg_val_2;
@@ -183,7 +256,7 @@ void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val)
else
reg_val_2 = 0x2082082 << port_rst_off;
- if (val == 0) {
+ if (!dereset) {
dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_REQ1_REG,
reg_val_1);
@@ -200,7 +273,7 @@ void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val)
reg_val_1 = 0x15540 << dsaf_dev->reset_offset;
reg_val_2 = 0x100 << dsaf_dev->reset_offset;
- if (val == 0) {
+ if (!dereset) {
dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_REQ1_REG,
reg_val_1);
@@ -216,14 +289,22 @@ void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val)
}
}
-void hns_ppe_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val)
+static void hns_dsaf_ge_srst_by_port_acpi(struct dsaf_device *dsaf_dev,
+ u32 port, bool dereset)
+{
+ hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC,
+ HNS_GE_RESET_FUNC, port, dereset);
+}
+
+static void hns_ppe_srst_by_port(struct dsaf_device *dsaf_dev, u32 port,
+ bool dereset)
{
u32 reg_val = 0;
u32 reg_addr;
reg_val |= RESET_REQ_OR_DREQ << dsaf_dev->mac_cb[port]->port_rst_off;
- if (val == 0)
+ if (!dereset)
reg_addr = DSAF_SUB_SC_PPE_RESET_REQ_REG;
else
reg_addr = DSAF_SUB_SC_PPE_RESET_DREQ_REG;
@@ -231,15 +312,24 @@ void hns_ppe_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val)
dsaf_write_sub(dsaf_dev, reg_addr, reg_val);
}
-void hns_ppe_com_srst(struct ppe_common_cb *ppe_common, u32 val)
+static void
+hns_ppe_srst_by_port_acpi(struct dsaf_device *dsaf_dev, u32 port, bool dereset)
+{
+ hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC,
+ HNS_PPE_RESET_FUNC, port, dereset);
+}
+
+static void hns_ppe_com_srst(struct dsaf_device *dsaf_dev, bool dereset)
{
- struct dsaf_device *dsaf_dev = ppe_common->dsaf_dev;
u32 reg_val;
u32 reg_addr;
+ if (!(dev_of_node(dsaf_dev->dev)))
+ return;
+
if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) {
reg_val = RESET_REQ_OR_DREQ;
- if (val == 0)
+ if (!dereset)
reg_addr = DSAF_SUB_SC_RCB_PPE_COM_RESET_REQ_REG;
else
reg_addr = DSAF_SUB_SC_RCB_PPE_COM_RESET_DREQ_REG;
@@ -247,7 +337,7 @@ void hns_ppe_com_srst(struct ppe_common_cb *ppe_common, u32 val)
} else {
reg_val = 0x100 << dsaf_dev->reset_offset;
- if (val == 0)
+ if (!dereset)
reg_addr = DSAF_SUB_SC_PPE_RESET_REQ_REG;
else
reg_addr = DSAF_SUB_SC_PPE_RESET_DREQ_REG;
@@ -261,7 +351,7 @@ void hns_ppe_com_srst(struct ppe_common_cb *ppe_common, u32 val)
* @mac_cb: mac control block
* retuen phy interface
*/
-phy_interface_t hns_mac_get_phy_if(struct hns_mac_cb *mac_cb)
+static phy_interface_t hns_mac_get_phy_if(struct hns_mac_cb *mac_cb)
{
u32 mode;
u32 reg;
@@ -293,6 +383,36 @@ phy_interface_t hns_mac_get_phy_if(struct hns_mac_cb *mac_cb)
return phy_if;
}
+static phy_interface_t hns_mac_get_phy_if_acpi(struct hns_mac_cb *mac_cb)
+{
+ phy_interface_t phy_if = PHY_INTERFACE_MODE_NA;
+ union acpi_object *obj;
+ union acpi_object obj_args, argv4;
+
+ obj_args.integer.type = ACPI_TYPE_INTEGER;
+ obj_args.integer.value = mac_cb->mac_id;
+
+ argv4.type = ACPI_TYPE_PACKAGE,
+ argv4.package.count = 1,
+ argv4.package.elements = &obj_args,
+
+ obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev),
+ hns_dsaf_acpi_dsm_uuid, 0,
+ HNS_OP_GET_PORT_TYPE_FUNC, &argv4);
+
+ if (!obj || obj->type != ACPI_TYPE_INTEGER)
+ return phy_if;
+
+ phy_if = obj->integer.value ?
+ PHY_INTERFACE_MODE_XGMII : PHY_INTERFACE_MODE_SGMII;
+
+ dev_dbg(mac_cb->dev, "mac_id=%d, phy_if=%d\n", mac_cb->mac_id, phy_if);
+
+ ACPI_FREE(obj);
+
+ return phy_if;
+}
+
int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
{
if (!mac_cb->cpld_ctrl)
@@ -309,7 +429,7 @@ int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
* @mac_cb: mac control block
* retuen 0 == success
*/
-int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, u8 en)
+static int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, bool en)
{
/* port 0-3 hilink4 base is serdes_vaddr + 0x00280000
* port 4-7 hilink3 base is serdes_vaddr + 0x00200000
@@ -332,7 +452,7 @@ int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, u8 en)
int sfp_prsnt;
int ret = hns_mac_get_sfp_prsnt(mac_cb, &sfp_prsnt);
- if (!mac_cb->phy_node) {
+ if (!mac_cb->phy_dev) {
if (ret)
pr_info("please confirm sfp is present or not\n");
else
@@ -343,11 +463,89 @@ int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, u8 en)
if (mac_cb->serdes_ctrl) {
u32 origin = dsaf_read_syscon(mac_cb->serdes_ctrl, reg_offset);
- dsaf_set_field(origin, 1ull << 10, 10, !!en);
+ dsaf_set_field(origin, 1ull << 10, 10, en);
dsaf_write_syscon(mac_cb->serdes_ctrl, reg_offset, origin);
} else {
- dsaf_set_reg_field(base_addr, reg_offset, 1ull << 10, 10, !!en);
+ dsaf_set_reg_field(base_addr, reg_offset, 1ull << 10, 10, en);
+ }
+
+ return 0;
+}
+
+static int
+hns_mac_config_sds_loopback_acpi(struct hns_mac_cb *mac_cb, bool en)
+{
+ union acpi_object *obj;
+ union acpi_object obj_args[3], argv4;
+
+ obj_args[0].integer.type = ACPI_TYPE_INTEGER;
+ obj_args[0].integer.value = mac_cb->mac_id;
+ obj_args[1].integer.type = ACPI_TYPE_INTEGER;
+ obj_args[1].integer.value = !!en;
+
+ argv4.type = ACPI_TYPE_PACKAGE;
+ argv4.package.count = 2;
+ argv4.package.elements = obj_args;
+
+ obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dsaf_dev->dev),
+ hns_dsaf_acpi_dsm_uuid, 0,
+ HNS_OP_SERDES_LP_FUNC, &argv4);
+ if (!obj) {
+ dev_warn(mac_cb->dsaf_dev->dev, "set port%d serdes lp fail!",
+ mac_cb->mac_id);
+
+ return -ENOTSUPP;
}
+ ACPI_FREE(obj);
+
return 0;
}
+
+struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev)
+{
+ struct dsaf_misc_op *misc_op;
+
+ misc_op = devm_kzalloc(dsaf_dev->dev, sizeof(*misc_op), GFP_KERNEL);
+ if (!misc_op)
+ return NULL;
+
+ if (dev_of_node(dsaf_dev->dev)) {
+ misc_op->cpld_set_led = hns_cpld_set_led;
+ misc_op->cpld_reset_led = cpld_led_reset;
+ misc_op->cpld_set_led_id = cpld_set_led_id;
+
+ misc_op->dsaf_reset = hns_dsaf_rst;
+ misc_op->xge_srst = hns_dsaf_xge_srst_by_port;
+ misc_op->xge_core_srst = hns_dsaf_xge_core_srst_by_port;
+ misc_op->ge_srst = hns_dsaf_ge_srst_by_port;
+ misc_op->ppe_srst = hns_ppe_srst_by_port;
+ misc_op->ppe_comm_srst = hns_ppe_com_srst;
+
+ misc_op->get_phy_if = hns_mac_get_phy_if;
+ misc_op->get_sfp_prsnt = hns_mac_get_sfp_prsnt;
+
+ misc_op->cfg_serdes_loopback = hns_mac_config_sds_loopback;
+ } else if (is_acpi_node(dsaf_dev->dev->fwnode)) {
+ misc_op->cpld_set_led = hns_cpld_set_led;
+ misc_op->cpld_reset_led = cpld_led_reset;
+ misc_op->cpld_set_led_id = cpld_set_led_id;
+
+ misc_op->dsaf_reset = hns_dsaf_rst_acpi;
+ misc_op->xge_srst = hns_dsaf_xge_srst_by_port_acpi;
+ misc_op->xge_core_srst = hns_dsaf_xge_core_srst_by_port_acpi;
+ misc_op->ge_srst = hns_dsaf_ge_srst_by_port_acpi;
+ misc_op->ppe_srst = hns_ppe_srst_by_port_acpi;
+ misc_op->ppe_comm_srst = hns_ppe_com_srst;
+
+ misc_op->get_phy_if = hns_mac_get_phy_if_acpi;
+ misc_op->get_sfp_prsnt = hns_mac_get_sfp_prsnt;
+
+ misc_op->cfg_serdes_loopback = hns_mac_config_sds_loopback_acpi;
+ } else {
+ devm_kfree(dsaf_dev->dev, (void *)misc_op);
+ misc_op = NULL;
+ }
+
+ return (void *)misc_op;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h
index 419f07a..f06bb03 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h
@@ -33,11 +33,6 @@
#define DSAF_LED_DATA_B 4
#define DSAF_LED_ANCHOR_B 5
-void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status,
- u16 speed, int data);
-void cpld_led_reset(struct hns_mac_cb *mac_cb);
-int cpld_set_led_id(struct hns_mac_cb *mac_cb,
- enum hnae_led_state status);
-int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt);
+struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
index 8cd151a..ff8b6a4 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
@@ -112,7 +112,6 @@ void hns_ppe_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index)
static void __iomem *hns_ppe_get_iobase(struct ppe_common_cb *ppe_common,
int ppe_idx)
{
-
return ppe_common->dsaf_dev->ppe_base + ppe_idx * PPE_REG_OFFSET;
}
@@ -200,11 +199,12 @@ static void hns_ppe_set_port_mode(struct hns_ppe_cb *ppe_cb,
static int hns_ppe_common_init_hw(struct ppe_common_cb *ppe_common)
{
enum ppe_qid_mode qid_mode;
- enum dsaf_mode dsaf_mode = ppe_common->dsaf_dev->dsaf_mode;
+ struct dsaf_device *dsaf_dev = ppe_common->dsaf_dev;
+ enum dsaf_mode dsaf_mode = dsaf_dev->dsaf_mode;
- hns_ppe_com_srst(ppe_common, 0);
+ dsaf_dev->misc_op->ppe_comm_srst(dsaf_dev, 0);
mdelay(100);
- hns_ppe_com_srst(ppe_common, 1);
+ dsaf_dev->misc_op->ppe_comm_srst(dsaf_dev, 1);
mdelay(100);
if (ppe_common->ppe_mode == PPE_COMMON_MODE_SERVICE) {
@@ -288,9 +288,9 @@ static void hns_ppe_init_hw(struct hns_ppe_cb *ppe_cb)
/* get default RSS key */
netdev_rss_key_fill(ppe_cb->rss_key, HNS_PPEV2_RSS_KEY_SIZE);
- hns_ppe_srst_by_port(dsaf_dev, port, 0);
+ dsaf_dev->misc_op->ppe_srst(dsaf_dev, port, 0);
mdelay(10);
- hns_ppe_srst_by_port(dsaf_dev, port, 1);
+ dsaf_dev->misc_op->ppe_srst(dsaf_dev, port, 1);
/* clr and msk except irq*/
hns_ppe_exc_irq_en(ppe_cb, 0);
@@ -328,10 +328,11 @@ static void hns_ppe_init_hw(struct hns_ppe_cb *ppe_cb)
static void hns_ppe_uninit_hw(struct hns_ppe_cb *ppe_cb)
{
u32 port;
+ struct dsaf_device *dsaf_dev = ppe_cb->ppe_common_cb->dsaf_dev;
if (ppe_cb->ppe_common_cb) {
port = ppe_cb->index;
- hns_ppe_srst_by_port(ppe_cb->ppe_common_cb->dsaf_dev, port, 0);
+ dsaf_dev->misc_op->ppe_srst(dsaf_dev, port, 0);
}
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
index 4ef6d23..3ce2409 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
@@ -458,7 +458,6 @@ void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common)
u32 i;
u32 ring_num = rcb_common->ring_num;
int base_irq_idx = hns_rcb_get_base_irq_idx(rcb_common);
- struct device_node *np = rcb_common->dsaf_dev->dev->of_node;
struct platform_device *pdev =
to_platform_device(rcb_common->dsaf_dev->dev);
bool is_ver1 = AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver);
@@ -473,10 +472,10 @@ void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common)
ring_pair_cb->port_id_in_comm =
hns_rcb_get_port_in_comm(rcb_common, i);
ring_pair_cb->virq[HNS_RCB_IRQ_IDX_TX] =
- is_ver1 ? irq_of_parse_and_map(np, base_irq_idx + i * 2) :
+ is_ver1 ? platform_get_irq(pdev, base_irq_idx + i * 2) :
platform_get_irq(pdev, base_irq_idx + i * 3 + 1);
ring_pair_cb->virq[HNS_RCB_IRQ_IDX_RX] =
- is_ver1 ? irq_of_parse_and_map(np, base_irq_idx + i * 2 + 1) :
+ is_ver1 ? platform_get_irq(pdev, base_irq_idx + i * 2 + 1) :
platform_get_irq(pdev, base_irq_idx + i * 3);
ring_pair_cb->q.phy_base =
RCB_COMM_BASE_TO_RING_BASE(rcb_common->phy_base, i);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
index fd90f37..8f4f0e8 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
@@ -119,7 +119,7 @@ static void hns_xgmac_enable(void *mac_drv, enum mac_commom_mode mode)
= (struct dsaf_device *)dev_get_drvdata(drv->dev);
u32 port = drv->mac_id;
- hns_dsaf_xge_core_srst_by_port(dsaf_dev, port, 1);
+ dsaf_dev->misc_op->xge_core_srst(dsaf_dev, port, 1);
mdelay(10);
/*enable XGE rX/tX */
@@ -157,7 +157,7 @@ static void hns_xgmac_disable(void *mac_drv, enum mac_commom_mode mode)
}
mdelay(10);
- hns_dsaf_xge_core_srst_by_port(dsaf_dev, port, 0);
+ dsaf_dev->misc_op->xge_core_srst(dsaf_dev, port, 0);
}
/**
@@ -198,9 +198,9 @@ static void hns_xgmac_init(void *mac_drv)
= (struct dsaf_device *)dev_get_drvdata(drv->dev);
u32 port = drv->mac_id;
- hns_dsaf_xge_srst_by_port(dsaf_dev, port, 0);
+ dsaf_dev->misc_op->xge_srst(dsaf_dev, port, 0);
mdelay(100);
- hns_dsaf_xge_srst_by_port(dsaf_dev, port, 1);
+ dsaf_dev->misc_op->xge_srst(dsaf_dev, port, 1);
mdelay(100);
hns_xgmac_exc_irq_en(drv, 0);
@@ -425,7 +425,7 @@ static void hns_xgmac_free(void *mac_drv)
u32 mac_id = drv->mac_id;
- hns_dsaf_xge_srst_by_port(dsaf_dev, mac_id, 0);
+ dsaf_dev->misc_op->xge_srst(dsaf_dev, mac_id, 0);
}
/**
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index e621636..ad742a6 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -132,6 +132,13 @@ static void fill_v2_desc(struct hnae_ring *ring, void *priv,
ring_ptr_move_fw(ring, next_to_use);
}
+static const struct acpi_device_id hns_enet_acpi_match[] = {
+ { "HISI00C1", 0 },
+ { "HISI00C2", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, hns_enet_acpi_match);
+
static void fill_desc(struct hnae_ring *ring, void *priv,
int size, dma_addr_t dma, int frag_end,
int buf_num, enum hns_desc_type type, int mtu)
@@ -996,19 +1003,22 @@ static void hns_nic_adjust_link(struct net_device *ndev)
int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
- struct phy_device *phy_dev = NULL;
+ struct phy_device *phy_dev = h->phy_dev;
+ int ret;
- if (!h->phy_node)
+ if (!h->phy_dev)
return 0;
- if (h->phy_if != PHY_INTERFACE_MODE_XGMII)
- phy_dev = of_phy_connect(ndev, h->phy_node,
- hns_nic_adjust_link, 0, h->phy_if);
- else
- phy_dev = of_phy_attach(ndev, h->phy_node, 0, h->phy_if);
+ if (h->phy_if != PHY_INTERFACE_MODE_XGMII) {
+ phy_dev->dev_flags = 0;
- if (unlikely(!phy_dev) || IS_ERR(phy_dev))
- return !phy_dev ? -ENODEV : PTR_ERR(phy_dev);
+ ret = phy_connect_direct(ndev, phy_dev, hns_nic_adjust_link,
+ h->phy_if);
+ } else {
+ ret = phy_attach_direct(ndev, phy_dev, 0, h->phy_if);
+ }
+ if (unlikely(ret))
+ return -ENODEV;
phy_dev->supported &= h->if_support;
phy_dev->advertising = phy_dev->supported;
@@ -1067,13 +1077,8 @@ void hns_nic_update_stats(struct net_device *netdev)
static void hns_init_mac_addr(struct net_device *ndev)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
- struct device_node *node = priv->dev->of_node;
- const void *mac_addr_temp;
- mac_addr_temp = of_get_mac_address(node);
- if (mac_addr_temp && is_valid_ether_addr(mac_addr_temp)) {
- memcpy(ndev->dev_addr, mac_addr_temp, ndev->addr_len);
- } else {
+ if (!device_get_mac_address(priv->dev, ndev->dev_addr, ETH_ALEN)) {
eth_hw_addr_random(ndev);
dev_warn(priv->dev, "No valid mac, use random mac %pM",
ndev->dev_addr);
@@ -1812,7 +1817,7 @@ static int hns_nic_try_get_ae(struct net_device *ndev)
int ret;
h = hnae_get_handle(&priv->netdev->dev,
- priv->ae_node, priv->port_id, NULL);
+ priv->fwnode, priv->port_id, NULL);
if (IS_ERR_OR_NULL(h)) {
ret = -ENODEV;
dev_dbg(priv->dev, "has not handle, register notifier!\n");
@@ -1872,7 +1877,6 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct net_device *ndev;
struct hns_nic_priv *priv;
- struct device_node *node = dev->of_node;
u32 port_id;
int ret;
@@ -1886,22 +1890,49 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
priv->dev = dev;
priv->netdev = ndev;
- if (of_device_is_compatible(node, "hisilicon,hns-nic-v1"))
- priv->enet_ver = AE_VERSION_1;
- else
- priv->enet_ver = AE_VERSION_2;
+ if (dev_of_node(dev)) {
+ struct device_node *ae_node;
+
+ if (of_device_is_compatible(dev->of_node,
+ "hisilicon,hns-nic-v1"))
+ priv->enet_ver = AE_VERSION_1;
+ else
+ priv->enet_ver = AE_VERSION_2;
- priv->ae_node = (void *)of_parse_phandle(node, "ae-handle", 0);
- if (IS_ERR_OR_NULL(priv->ae_node)) {
- ret = PTR_ERR(priv->ae_node);
- dev_err(dev, "not find ae-handle\n");
- goto out_read_prop_fail;
+ ae_node = of_parse_phandle(dev->of_node, "ae-handle", 0);
+ if (IS_ERR_OR_NULL(ae_node)) {
+ ret = PTR_ERR(ae_node);
+ dev_err(dev, "not find ae-handle\n");
+ goto out_read_prop_fail;
+ }
+ priv->fwnode = &ae_node->fwnode;
+ } else if (is_acpi_node(dev->fwnode)) {
+ struct acpi_reference_args args;
+
+ if (acpi_dev_found(hns_enet_acpi_match[0].id))
+ priv->enet_ver = AE_VERSION_1;
+ else if (acpi_dev_found(hns_enet_acpi_match[1].id))
+ priv->enet_ver = AE_VERSION_2;
+ else
+ return -ENXIO;
+
+ /* try to find port-idx-in-ae first */
+ ret = acpi_node_get_property_reference(dev->fwnode,
+ "ae-handle", 0, &args);
+ if (ret) {
+ dev_err(dev, "not find ae-handle\n");
+ goto out_read_prop_fail;
+ }
+ priv->fwnode = acpi_fwnode_handle(args.adev);
+ } else {
+ dev_err(dev, "cannot read cfg data from OF or acpi\n");
+ return -ENXIO;
}
- /* try to find port-idx-in-ae first */
- ret = of_property_read_u32(node, "port-idx-in-ae", &port_id);
+
+ ret = device_property_read_u32(dev, "port-idx-in-ae", &port_id);
if (ret) {
/* only for old code compatible */
- ret = of_property_read_u32(node, "port-id", &port_id);
+ ret = device_property_read_u32(dev, "port-id", &port_id);
if (ret)
goto out_read_prop_fail;
/* for old dts, we need to caculate the port offset */
@@ -2014,6 +2045,7 @@ static struct platform_driver hns_nic_dev_driver = {
.driver = {
.name = "hns-nic",
.of_match_table = hns_enet_of_match,
+ .acpi_match_table = ACPI_PTR(hns_enet_acpi_match),
},
.probe = hns_nic_dev_probe,
.remove = hns_nic_dev_remove,
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.h b/drivers/net/ethernet/hisilicon/hns/hns_enet.h
index 337efa5..44bb301 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.h
@@ -54,7 +54,7 @@ struct hns_nic_ops {
};
struct hns_nic_priv {
- const struct device_node *ae_node;
+ const struct fwnode_handle *fwnode;
u32 enet_ver;
u32 port_id;
int phy_mode;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 67a648c..a809f52 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -596,7 +596,7 @@ static void hns_nic_self_test(struct net_device *ndev,
st_param[1][0] = MAC_INTERNALLOOP_SERDES;
st_param[1][1] = 1; /*serdes must exist*/
st_param[2][0] = MAC_INTERNALLOOP_PHY; /* only supporte phy node*/
- st_param[2][1] = ((!!(priv->ae_handle->phy_node)) &&
+ st_param[2][1] = ((!!(priv->ae_handle->phy_dev)) &&
(priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII));
if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index 765ddb3..761a32f 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -7,6 +7,7 @@
* (at your option) any later version.
*/
+#include <linux/acpi.h>
#include <linux/errno.h>
#include <linux/etherdevice.h>
#include <linux/init.h>
@@ -354,67 +355,64 @@ static int hns_mdio_reset(struct mii_bus *bus)
struct hns_mdio_device *mdio_dev = (struct hns_mdio_device *)bus->priv;
int ret;
- if (!mdio_dev->subctrl_vbase) {
- dev_err(&bus->dev, "mdio sys ctl reg has not maped\n");
- return -ENODEV;
- }
-
- /*1. reset req, and read reset st check*/
- ret = mdio_sc_cfg_reg_write(mdio_dev, MDIO_SC_RESET_REQ, 0x1,
- MDIO_SC_RESET_ST, 0x1,
- MDIO_CHECK_SET_ST);
- if (ret) {
- dev_err(&bus->dev, "MDIO reset fail\n");
- return ret;
- }
+ if (dev_of_node(bus->parent)) {
+ if (!mdio_dev->subctrl_vbase) {
+ dev_err(&bus->dev, "mdio sys ctl reg has not maped\n");
+ return -ENODEV;
+ }
- /*2. dis clk, and read clk st check*/
- ret = mdio_sc_cfg_reg_write(mdio_dev, MDIO_SC_CLK_DIS,
- 0x1, MDIO_SC_CLK_ST, 0x1,
- MDIO_CHECK_CLR_ST);
- if (ret) {
- dev_err(&bus->dev, "MDIO dis clk fail\n");
- return ret;
- }
+ /* 1. reset req, and read reset st check */
+ ret = mdio_sc_cfg_reg_write(mdio_dev, MDIO_SC_RESET_REQ, 0x1,
+ MDIO_SC_RESET_ST, 0x1,
+ MDIO_CHECK_SET_ST);
+ if (ret) {
+ dev_err(&bus->dev, "MDIO reset fail\n");
+ return ret;
+ }
- /*3. reset dreq, and read reset st check*/
- ret = mdio_sc_cfg_reg_write(mdio_dev, MDIO_SC_RESET_DREQ, 0x1,
- MDIO_SC_RESET_ST, 0x1,
- MDIO_CHECK_CLR_ST);
- if (ret) {
- dev_err(&bus->dev, "MDIO dis clk fail\n");
- return ret;
- }
+ /* 2. dis clk, and read clk st check */
+ ret = mdio_sc_cfg_reg_write(mdio_dev, MDIO_SC_CLK_DIS,
+ 0x1, MDIO_SC_CLK_ST, 0x1,
+ MDIO_CHECK_CLR_ST);
+ if (ret) {
+ dev_err(&bus->dev, "MDIO dis clk fail\n");
+ return ret;
+ }
- /*4. en clk, and read clk st check*/
- ret = mdio_sc_cfg_reg_write(mdio_dev, MDIO_SC_CLK_EN,
- 0x1, MDIO_SC_CLK_ST, 0x1,
- MDIO_CHECK_SET_ST);
- if (ret)
- dev_err(&bus->dev, "MDIO en clk fail\n");
+ /* 3. reset dreq, and read reset st check */
+ ret = mdio_sc_cfg_reg_write(mdio_dev, MDIO_SC_RESET_DREQ, 0x1,
+ MDIO_SC_RESET_ST, 0x1,
+ MDIO_CHECK_CLR_ST);
+ if (ret) {
+ dev_err(&bus->dev, "MDIO dis clk fail\n");
+ return ret;
+ }
+ /* 4. en clk, and read clk st check */
+ ret = mdio_sc_cfg_reg_write(mdio_dev, MDIO_SC_CLK_EN,
+ 0x1, MDIO_SC_CLK_ST, 0x1,
+ MDIO_CHECK_SET_ST);
+ if (ret)
+ dev_err(&bus->dev, "MDIO en clk fail\n");
+ } else if (is_acpi_node(bus->parent->fwnode)) {
+ acpi_status s;
+
+ s = acpi_evaluate_object(ACPI_HANDLE(bus->parent),
+ "_RST", NULL, NULL);
+ if (ACPI_FAILURE(s)) {
+ dev_err(&bus->dev, "Reset failed, return:%#x\n", s);
+ ret = -EBUSY;
+ } else {
+ ret = 0;
+ }
+ } else {
+ dev_err(&bus->dev, "Can not get cfg data from DT or ACPI\n");
+ ret = -ENXIO;
+ }
return ret;
}
/**
- * hns_mdio_bus_name - get mdio bus name
- * @name: mdio bus name
- * @np: mdio device node pointer
- */
-static void hns_mdio_bus_name(char *name, struct device_node *np)
-{
- const u32 *addr;
- u64 taddr = OF_BAD_ADDR;
-
- addr = of_get_address(np, 0, NULL, NULL);
- if (addr)
- taddr = of_translate_address(np, addr);
-
- snprintf(name, MII_BUS_ID_SIZE, "%s@%llx", np->name,
- (unsigned long long)taddr);
-}
-
-/**
* hns_mdio_probe - probe mdio device
* @pdev: mdio platform device
*
@@ -422,17 +420,16 @@ static void hns_mdio_bus_name(char *name, struct device_node *np)
*/
static int hns_mdio_probe(struct platform_device *pdev)
{
- struct device_node *np;
struct hns_mdio_device *mdio_dev;
struct mii_bus *new_bus;
struct resource *res;
- int ret;
+ int ret = -ENODEV;
if (!pdev) {
dev_err(NULL, "pdev is NULL!\r\n");
return -ENODEV;
}
- np = pdev->dev.of_node;
+
mdio_dev = devm_kzalloc(&pdev->dev, sizeof(*mdio_dev), GFP_KERNEL);
if (!mdio_dev)
return -ENOMEM;
@@ -448,7 +445,7 @@ static int hns_mdio_probe(struct platform_device *pdev)
new_bus->write = hns_mdio_write;
new_bus->reset = hns_mdio_reset;
new_bus->priv = mdio_dev;
- hns_mdio_bus_name(new_bus->id, np);
+ new_bus->parent = &pdev->dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mdio_dev->vbase = devm_ioremap_resource(&pdev->dev, res);
@@ -457,16 +454,32 @@ static int hns_mdio_probe(struct platform_device *pdev)
return ret;
}
- mdio_dev->subctrl_vbase =
- syscon_node_to_regmap(of_parse_phandle(np, "subctrl-vbase", 0));
- if (IS_ERR(mdio_dev->subctrl_vbase)) {
- dev_warn(&pdev->dev, "no syscon hisilicon,peri-c-subctrl\n");
- mdio_dev->subctrl_vbase = NULL;
- }
- new_bus->parent = &pdev->dev;
platform_set_drvdata(pdev, new_bus);
+ snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%s", "Mii",
+ dev_name(&pdev->dev));
+ if (dev_of_node(&pdev->dev)) {
+ mdio_dev->subctrl_vbase = syscon_node_to_regmap(
+ of_parse_phandle(pdev->dev.of_node,
+ "subctrl-vbase", 0));
+ if (IS_ERR(mdio_dev->subctrl_vbase)) {
+ dev_warn(&pdev->dev, "no syscon hisilicon,peri-c-subctrl\n");
+ mdio_dev->subctrl_vbase = NULL;
+ }
+ ret = of_mdiobus_register(new_bus, pdev->dev.of_node);
+ } else if (is_acpi_node(pdev->dev.fwnode)) {
+ /* Clear all the IRQ properties */
+ memset(new_bus->irq, PHY_POLL, 4 * PHY_MAX_ADDR);
+
+ /* Mask out all PHYs from auto probing. */
+ new_bus->phy_mask = ~0;
+
+ /* Register the MDIO bus */
+ ret = mdiobus_register(new_bus);
+ } else {
+ dev_err(&pdev->dev, "Can not get cfg data from DT or ACPI\n");
+ ret = -ENXIO;
+ }
- ret = of_mdiobus_register(new_bus, np);
if (ret) {
dev_err(&pdev->dev, "Cannot register as MDIO bus!\n");
platform_set_drvdata(pdev, NULL);
@@ -499,12 +512,19 @@ static const struct of_device_id hns_mdio_match[] = {
{}
};
+static const struct acpi_device_id hns_mdio_acpi_match[] = {
+ { "HISI0141", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, hns_mdio_acpi_match);
+
static struct platform_driver hns_mdio_driver = {
.probe = hns_mdio_probe,
.remove = hns_mdio_remove,
.driver = {
.name = MDIO_DRV_NAME,
.of_match_table = hns_mdio_match,
+ .acpi_match_table = ACPI_PTR(hns_mdio_acpi_match),
},
};
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index fc95aff..51a2e82 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1107,7 +1107,7 @@ static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
- return priv->rx_ring_num;
+ return rounddown_pow_of_two(priv->rx_ring_num);
}
static u32 mlx4_en_get_rxfh_key_size(struct net_device *netdev)
@@ -1141,19 +1141,17 @@ static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key,
u8 *hfunc)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
- struct mlx4_en_rss_map *rss_map = &priv->rss_map;
- int rss_rings;
- size_t n = priv->rx_ring_num;
+ u32 n = mlx4_en_get_rxfh_indir_size(dev);
+ u32 i, rss_rings;
int err = 0;
- rss_rings = priv->prof->rss_rings ?: priv->rx_ring_num;
- rss_rings = 1 << ilog2(rss_rings);
+ rss_rings = priv->prof->rss_rings ?: n;
+ rss_rings = rounddown_pow_of_two(rss_rings);
- while (n--) {
+ for (i = 0; i < n; i++) {
if (!ring_index)
break;
- ring_index[n] = rss_map->qps[n % rss_rings].qpn -
- rss_map->base_qpn;
+ ring_index[i] = i % rss_rings;
}
if (key)
memcpy(key, priv->rss_key, MLX4_EN_RSS_KEY_SIZE);
@@ -1166,6 +1164,7 @@ static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
const u8 *key, const u8 hfunc)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
+ u32 n = mlx4_en_get_rxfh_indir_size(dev);
struct mlx4_en_dev *mdev = priv->mdev;
int port_up = 0;
int err = 0;
@@ -1175,18 +1174,18 @@ static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
/* Calculate RSS table size and make sure flows are spread evenly
* between rings
*/
- for (i = 0; i < priv->rx_ring_num; i++) {
+ for (i = 0; i < n; i++) {
if (!ring_index)
- continue;
+ break;
if (i > 0 && !ring_index[i] && !rss_rings)
rss_rings = i;
- if (ring_index[i] != (i % (rss_rings ?: priv->rx_ring_num)))
+ if (ring_index[i] != (i % (rss_rings ?: n)))
return -EINVAL;
}
if (!rss_rings)
- rss_rings = priv->rx_ring_num;
+ rss_rings = n;
/* RSS table size must be an order of 2 */
if (!is_power_of_2(rss_rings))
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 19ceced..973391b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1197,8 +1197,8 @@ static void mlx4_en_netpoll(struct net_device *dev)
struct mlx4_en_cq *cq;
int i;
- for (i = 0; i < priv->rx_ring_num; i++) {
- cq = priv->rx_cq[i];
+ for (i = 0; i < priv->tx_ring_num; i++) {
+ cq = priv->tx_cq[i];
napi_schedule(&cq->napi);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index b0a0b01..01ae548 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -1736,7 +1736,7 @@ static int __init mlxsw_core_module_init(void)
{
int err;
- mlxsw_wq = create_workqueue(mlxsw_core_driver_name);
+ mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, WQ_MEM_RECLAIM, 0);
if (!mlxsw_wq)
return -ENOMEM;
mlxsw_core_dbg_root = debugfs_create_dir(mlxsw_core_driver_name, NULL);
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 1042f2a..9a63df1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -127,6 +127,8 @@ struct qed_tunn_update_params {
*/
enum qed_pci_personality {
QED_PCI_ETH,
+ QED_PCI_ISCSI,
+ QED_PCI_ETH_ROCE,
QED_PCI_DEFAULT /* default in shmem */
};
@@ -170,6 +172,8 @@ enum QED_PORT_MODE {
enum qed_dev_cap {
QED_DEV_CAP_ETH,
+ QED_DEV_CAP_ISCSI,
+ QED_DEV_CAP_ROCE,
};
struct qed_hw_info {
@@ -183,6 +187,8 @@ struct qed_hw_info {
#define RESC_START(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_start[resc])
#define RESC_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_num[resc])
+#define RESC_END(_p_hwfn, resc) (RESC_START(_p_hwfn, resc) + \
+ RESC_NUM(_p_hwfn, resc))
#define FEAT_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.feat_num[resc])
u8 num_tc;
@@ -255,6 +261,7 @@ struct qed_qm_info {
u8 pure_lb_pq;
u8 offload_pq;
u8 pure_ack_pq;
+ u8 ooo_pq;
u8 vf_queues_offset;
u16 num_pqs;
u16 num_vf_pqs;
@@ -267,6 +274,7 @@ struct qed_qm_info {
u8 pf_wfq;
u32 pf_rl;
struct qed_wfq_data *wfq_data;
+ u8 num_pf_rls;
};
struct storm_stats {
@@ -312,6 +320,7 @@ struct qed_hwfn {
bool hw_init_done;
u8 num_funcs_on_engine;
+ u8 enabled_func_idx;
/* BAR access */
void __iomem *regview;
@@ -350,6 +359,9 @@ struct qed_hwfn {
/* Protocol related */
struct qed_pf_params pf_params;
+ bool b_rdma_enabled_in_prs;
+ u32 rdma_prs_search_reg;
+
/* Array of sb_info of all status blocks */
struct qed_sb_info *sbs_info[MAX_SB_PER_PF_MIMD];
u16 num_sbs;
@@ -555,6 +567,7 @@ static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
}
#define PURE_LB_TC 8
+#define OOO_LB_TC 9
int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate);
void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index ac284c5..1c35f37 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -39,6 +39,14 @@
#define DQ_RANGE_SHIFT 4
#define DQ_RANGE_ALIGN BIT(DQ_RANGE_SHIFT)
+/* Searcher constants */
+#define SRC_MIN_NUM_ELEMS 256
+
+/* Timers constants */
+#define TM_SHIFT 7
+#define TM_ALIGN BIT(TM_SHIFT)
+#define TM_ELEM_SIZE 4
+
/* ILT constants */
#define ILT_DEFAULT_HW_P_SIZE 3
#define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12))
@@ -56,26 +64,71 @@
union conn_context {
struct core_conn_context core_ctx;
struct eth_conn_context eth_ctx;
+ struct iscsi_conn_context iscsi_ctx;
+ struct roce_conn_context roce_ctx;
+};
+
+/* TYPE-0 task context - iSCSI */
+union type0_task_context {
+ struct iscsi_task_context iscsi_ctx;
};
+/* TYPE-1 task context - ROCE */
+union type1_task_context {
+ struct rdma_task_context roce_ctx;
+};
+
+struct src_ent {
+ u8 opaque[56];
+ u64 next;
+};
+
+#define CDUT_SEG_ALIGNMET 3 /* in 4k chunks */
+#define CDUT_SEG_ALIGNMET_IN_BYTES (1 << (CDUT_SEG_ALIGNMET + 12))
+
#define CONN_CXT_SIZE(p_hwfn) \
ALIGNED_TYPE_SIZE(union conn_context, p_hwfn)
+#define SRQ_CXT_SIZE (sizeof(struct rdma_srq_context))
+
+#define TYPE0_TASK_CXT_SIZE(p_hwfn) \
+ ALIGNED_TYPE_SIZE(union type0_task_context, p_hwfn)
+
+/* Alignment is inherent to the type1_task_context structure */
+#define TYPE1_TASK_CXT_SIZE(p_hwfn) sizeof(union type1_task_context)
+
/* PF per protocl configuration object */
+#define TASK_SEGMENTS (NUM_TASK_PF_SEGMENTS + NUM_TASK_VF_SEGMENTS)
+#define TASK_SEGMENT_VF (NUM_TASK_PF_SEGMENTS)
+
+struct qed_tid_seg {
+ u32 count;
+ u8 type;
+ bool has_fl_mem;
+};
+
struct qed_conn_type_cfg {
u32 cid_count;
u32 cid_start;
u32 cids_per_vf;
+ struct qed_tid_seg tid_seg[TASK_SEGMENTS];
};
/* ILT Client configuration, Per connection type (protocol) resources. */
#define ILT_CLI_PF_BLOCKS (1 + NUM_TASK_PF_SEGMENTS * 2)
#define ILT_CLI_VF_BLOCKS (1 + NUM_TASK_VF_SEGMENTS * 2)
#define CDUC_BLK (0)
+#define SRQ_BLK (0)
+#define CDUT_SEG_BLK(n) (1 + (u8)(n))
+#define CDUT_FL_SEG_BLK(n, X) (1 + (n) + NUM_TASK_ ## X ## _SEGMENTS)
enum ilt_clients {
ILT_CLI_CDUC,
+ ILT_CLI_CDUT,
ILT_CLI_QM,
+ ILT_CLI_TM,
+ ILT_CLI_SRC,
+ ILT_CLI_TSDM,
ILT_CLI_MAX
};
@@ -88,6 +141,7 @@ struct qed_ilt_cli_blk {
u32 total_size; /* 0 means not active */
u32 real_size_in_page;
u32 start_line;
+ u32 dynamic_line_cnt;
};
struct qed_ilt_client_cfg {
@@ -131,18 +185,44 @@ struct qed_cxt_mngr {
/* computed ILT structure */
struct qed_ilt_client_cfg clients[ILT_CLI_MAX];
+ /* Task type sizes */
+ u32 task_type_size[NUM_TASK_TYPES];
+
/* total number of VFs for this hwfn -
* ALL VFs are symmetric in terms of HW resources
*/
u32 vf_count;
+ /* total number of SRQ's for this hwfn */
+ u32 srq_count;
+
/* Acquired CIDs */
struct qed_cid_acquired_map acquired[MAX_CONN_TYPES];
/* ILT shadow table */
struct qed_dma_mem *ilt_shadow;
u32 pf_start_line;
+
+ /* Mutex for a dynamic ILT allocation */
+ struct mutex mutex;
+
+ /* SRC T2 */
+ struct qed_dma_mem *t2;
+ u32 t2_num_pages;
+ u64 first_free;
+ u64 last_free;
};
+static bool src_proto(enum protocol_type type)
+{
+ return type == PROTOCOLID_ISCSI ||
+ type == PROTOCOLID_ROCE;
+}
+
+static bool tm_cid_proto(enum protocol_type type)
+{
+ return type == PROTOCOLID_ISCSI ||
+ type == PROTOCOLID_ROCE;
+}
/* counts the iids for the CDU/CDUC ILT client configuration */
struct qed_cdu_iids {
@@ -161,21 +241,120 @@ static void qed_cxt_cdu_iids(struct qed_cxt_mngr *p_mngr,
}
}
+/* counts the iids for the Searcher block configuration */
+struct qed_src_iids {
+ u32 pf_cids;
+ u32 per_vf_cids;
+};
+
+static void qed_cxt_src_iids(struct qed_cxt_mngr *p_mngr,
+ struct qed_src_iids *iids)
+{
+ u32 i;
+
+ for (i = 0; i < MAX_CONN_TYPES; i++) {
+ if (!src_proto(i))
+ continue;
+
+ iids->pf_cids += p_mngr->conn_cfg[i].cid_count;
+ iids->per_vf_cids += p_mngr->conn_cfg[i].cids_per_vf;
+ }
+}
+
+/* counts the iids for the Timers block configuration */
+struct qed_tm_iids {
+ u32 pf_cids;
+ u32 pf_tids[NUM_TASK_PF_SEGMENTS]; /* per segment */
+ u32 pf_tids_total;
+ u32 per_vf_cids;
+ u32 per_vf_tids;
+};
+
+static void qed_cxt_tm_iids(struct qed_cxt_mngr *p_mngr,
+ struct qed_tm_iids *iids)
+{
+ u32 i, j;
+
+ for (i = 0; i < MAX_CONN_TYPES; i++) {
+ struct qed_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[i];
+
+ if (tm_cid_proto(i)) {
+ iids->pf_cids += p_cfg->cid_count;
+ iids->per_vf_cids += p_cfg->cids_per_vf;
+ }
+ }
+
+ iids->pf_cids = roundup(iids->pf_cids, TM_ALIGN);
+ iids->per_vf_cids = roundup(iids->per_vf_cids, TM_ALIGN);
+ iids->per_vf_tids = roundup(iids->per_vf_tids, TM_ALIGN);
+
+ for (iids->pf_tids_total = 0, j = 0; j < NUM_TASK_PF_SEGMENTS; j++) {
+ iids->pf_tids[j] = roundup(iids->pf_tids[j], TM_ALIGN);
+ iids->pf_tids_total += iids->pf_tids[j];
+ }
+}
+
static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn,
struct qed_qm_iids *iids)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
- u32 vf_cids = 0, type;
+ struct qed_tid_seg *segs;
+ u32 vf_cids = 0, type, j;
+ u32 vf_tids = 0;
for (type = 0; type < MAX_CONN_TYPES; type++) {
iids->cids += p_mngr->conn_cfg[type].cid_count;
vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
+
+ segs = p_mngr->conn_cfg[type].tid_seg;
+ /* for each segment there is at most one
+ * protocol for which count is not 0.
+ */
+ for (j = 0; j < NUM_TASK_PF_SEGMENTS; j++)
+ iids->tids += segs[j].count;
+
+ /* The last array elelment is for the VFs. As for PF
+ * segments there can be only one protocol for
+ * which this value is not 0.
+ */
+ vf_tids += segs[NUM_TASK_PF_SEGMENTS].count;
}
iids->vf_cids += vf_cids * p_mngr->vf_count;
+ iids->tids += vf_tids * p_mngr->vf_count;
+
DP_VERBOSE(p_hwfn, QED_MSG_ILT,
- "iids: CIDS %08x vf_cids %08x\n",
- iids->cids, iids->vf_cids);
+ "iids: CIDS %08x vf_cids %08x tids %08x vf_tids %08x\n",
+ iids->cids, iids->vf_cids, iids->tids, vf_tids);
+}
+
+static struct qed_tid_seg *qed_cxt_tid_seg_info(struct qed_hwfn *p_hwfn,
+ u32 seg)
+{
+ struct qed_cxt_mngr *p_cfg = p_hwfn->p_cxt_mngr;
+ u32 i;
+
+ /* Find the protocol with tid count > 0 for this segment.
+ * Note: there can only be one and this is already validated.
+ */
+ for (i = 0; i < MAX_CONN_TYPES; i++)
+ if (p_cfg->conn_cfg[i].tid_seg[seg].count)
+ return &p_cfg->conn_cfg[i].tid_seg[seg];
+ return NULL;
+}
+
+void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn, u32 num_srqs)
+{
+ struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
+
+ p_mgr->srq_count = num_srqs;
+}
+
+u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn)
+{
+ struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
+
+ return p_mgr->srq_count;
}
/* set the iids count per protocol */
@@ -188,6 +367,14 @@ static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn,
p_conn->cid_count = roundup(cid_count, DQ_RANGE_ALIGN);
p_conn->cids_per_vf = roundup(vf_cid_cnt, DQ_RANGE_ALIGN);
+
+ if (type == PROTOCOLID_ROCE) {
+ u32 page_sz = p_mgr->clients[ILT_CLI_CDUC].p_size.val;
+ u32 cxt_size = CONN_CXT_SIZE(p_hwfn);
+ u32 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
+
+ p_conn->cid_count = roundup(p_conn->cid_count, elems_per_page);
+ }
}
u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
@@ -200,6 +387,37 @@ u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
return p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
}
+u32 qed_cxt_get_proto_cid_start(struct qed_hwfn *p_hwfn,
+ enum protocol_type type)
+{
+ return p_hwfn->p_cxt_mngr->acquired[type].start_cid;
+}
+
+u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn,
+ enum protocol_type type)
+{
+ u32 cnt = 0;
+ int i;
+
+ for (i = 0; i < TASK_SEGMENTS; i++)
+ cnt += p_hwfn->p_cxt_mngr->conn_cfg[type].tid_seg[i].count;
+
+ return cnt;
+}
+
+static void
+qed_cxt_set_proto_tid_count(struct qed_hwfn *p_hwfn,
+ enum protocol_type proto,
+ u8 seg, u8 seg_type, u32 count, bool has_fl)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct qed_tid_seg *p_seg = &p_mngr->conn_cfg[proto].tid_seg[seg];
+
+ p_seg->count = count;
+ p_seg->has_fl_mem = has_fl;
+ p_seg->type = seg_type;
+}
+
static void qed_ilt_cli_blk_fill(struct qed_ilt_client_cfg *p_cli,
struct qed_ilt_cli_blk *p_blk,
u32 start_line, u32 total_size,
@@ -241,17 +459,42 @@ static void qed_ilt_cli_adv_line(struct qed_hwfn *p_hwfn,
p_blk->real_size_in_page, p_blk->start_line);
}
+static u32 qed_ilt_get_dynamic_line_cnt(struct qed_hwfn *p_hwfn,
+ enum ilt_clients ilt_client)
+{
+ u32 cid_count = p_hwfn->p_cxt_mngr->conn_cfg[PROTOCOLID_ROCE].cid_count;
+ struct qed_ilt_client_cfg *p_cli;
+ u32 lines_to_skip = 0;
+ u32 cxts_per_p;
+
+ if (ilt_client == ILT_CLI_CDUC) {
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
+
+ cxts_per_p = ILT_PAGE_IN_BYTES(p_cli->p_size.val) /
+ (u32) CONN_CXT_SIZE(p_hwfn);
+
+ lines_to_skip = cid_count / cxts_per_p;
+ }
+
+ return lines_to_skip;
+}
+
int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 curr_line, total, i, task_size, line;
struct qed_ilt_client_cfg *p_cli;
struct qed_ilt_cli_blk *p_blk;
struct qed_cdu_iids cdu_iids;
+ struct qed_src_iids src_iids;
struct qed_qm_iids qm_iids;
- u32 curr_line, total, i;
+ struct qed_tm_iids tm_iids;
+ struct qed_tid_seg *p_seg;
memset(&qm_iids, 0, sizeof(qm_iids));
memset(&cdu_iids, 0, sizeof(cdu_iids));
+ memset(&src_iids, 0, sizeof(src_iids));
+ memset(&tm_iids, 0, sizeof(tm_iids));
p_mngr->pf_start_line = RESC_START(p_hwfn, QED_ILT);
@@ -279,6 +522,9 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
p_cli->pf_total_lines = curr_line - p_blk->start_line;
+ p_blk->dynamic_line_cnt = qed_ilt_get_dynamic_line_cnt(p_hwfn,
+ ILT_CLI_CDUC);
+
/* CDUC VF */
p_blk = &p_cli->vf_blks[CDUC_BLK];
total = cdu_iids.per_vf_cids * CONN_CXT_SIZE(p_hwfn);
@@ -293,21 +539,128 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
ILT_CLI_CDUC);
+ /* CDUT PF */
+ p_cli = &p_mngr->clients[ILT_CLI_CDUT];
+ p_cli->first.val = curr_line;
+
+ /* first the 'working' task memory */
+ for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
+ p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
+ if (!p_seg || p_seg->count == 0)
+ continue;
+
+ p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(i)];
+ total = p_seg->count * p_mngr->task_type_size[p_seg->type];
+ qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total,
+ p_mngr->task_type_size[p_seg->type]);
+
+ qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_CDUT);
+ }
+
+ /* next the 'init' task memory (forced load memory) */
+ for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
+ p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
+ if (!p_seg || p_seg->count == 0)
+ continue;
+
+ p_blk = &p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)];
+
+ if (!p_seg->has_fl_mem) {
+ /* The segment is active (total size pf 'working'
+ * memory is > 0) but has no FL (forced-load, Init)
+ * memory. Thus:
+ *
+ * 1. The total-size in the corrsponding FL block of
+ * the ILT client is set to 0 - No ILT line are
+ * provisioned and no ILT memory allocated.
+ *
+ * 2. The start-line of said block is set to the
+ * start line of the matching working memory
+ * block in the ILT client. This is later used to
+ * configure the CDU segment offset registers and
+ * results in an FL command for TIDs of this
+ * segement behaves as regular load commands
+ * (loading TIDs from the working memory).
+ */
+ line = p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line;
+
+ qed_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0);
+ continue;
+ }
+ total = p_seg->count * p_mngr->task_type_size[p_seg->type];
+
+ qed_ilt_cli_blk_fill(p_cli, p_blk,
+ curr_line, total,
+ p_mngr->task_type_size[p_seg->type]);
+
+ qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_CDUT);
+ }
+ p_cli->pf_total_lines = curr_line - p_cli->pf_blks[0].start_line;
+
+ /* CDUT VF */
+ p_seg = qed_cxt_tid_seg_info(p_hwfn, TASK_SEGMENT_VF);
+ if (p_seg && p_seg->count) {
+ /* Stricly speaking we need to iterate over all VF
+ * task segment types, but a VF has only 1 segment
+ */
+
+ /* 'working' memory */
+ total = p_seg->count * p_mngr->task_type_size[p_seg->type];
+
+ p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)];
+ qed_ilt_cli_blk_fill(p_cli, p_blk,
+ curr_line, total,
+ p_mngr->task_type_size[p_seg->type]);
+
+ qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_CDUT);
+
+ /* 'init' memory */
+ p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)];
+ if (!p_seg->has_fl_mem) {
+ /* see comment above */
+ line = p_cli->vf_blks[CDUT_SEG_BLK(0)].start_line;
+ qed_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0);
+ } else {
+ task_size = p_mngr->task_type_size[p_seg->type];
+ qed_ilt_cli_blk_fill(p_cli, p_blk,
+ curr_line, total, task_size);
+ qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_CDUT);
+ }
+ p_cli->vf_total_lines = curr_line -
+ p_cli->vf_blks[0].start_line;
+
+ /* Now for the rest of the VFs */
+ for (i = 1; i < p_mngr->vf_count; i++) {
+ p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)];
+ qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_CDUT);
+
+ p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)];
+ qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_CDUT);
+ }
+ }
+
/* QM */
p_cli = &p_mngr->clients[ILT_CLI_QM];
p_blk = &p_cli->pf_blks[0];
qed_cxt_qm_iids(p_hwfn, &qm_iids);
total = qed_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids,
- qm_iids.vf_cids, 0,
+ qm_iids.vf_cids, qm_iids.tids,
p_hwfn->qm_info.num_pqs,
p_hwfn->qm_info.num_vf_pqs);
DP_VERBOSE(p_hwfn,
QED_MSG_ILT,
- "QM ILT Info, (cids=%d, vf_cids=%d, num_pqs=%d, num_vf_pqs=%d, memory_size=%d)\n",
+ "QM ILT Info, (cids=%d, vf_cids=%d, tids=%d, num_pqs=%d, num_vf_pqs=%d, memory_size=%d)\n",
qm_iids.cids,
qm_iids.vf_cids,
+ qm_iids.tids,
p_hwfn->qm_info.num_pqs, p_hwfn->qm_info.num_vf_pqs, total);
qed_ilt_cli_blk_fill(p_cli, p_blk,
@@ -317,6 +670,75 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_QM);
p_cli->pf_total_lines = curr_line - p_blk->start_line;
+ /* SRC */
+ p_cli = &p_mngr->clients[ILT_CLI_SRC];
+ qed_cxt_src_iids(p_mngr, &src_iids);
+
+ /* Both the PF and VFs searcher connections are stored in the per PF
+ * database. Thus sum the PF searcher cids and all the VFs searcher
+ * cids.
+ */
+ total = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
+ if (total) {
+ u32 local_max = max_t(u32, total,
+ SRC_MIN_NUM_ELEMS);
+
+ total = roundup_pow_of_two(local_max);
+
+ p_blk = &p_cli->pf_blks[0];
+ qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+ total * sizeof(struct src_ent),
+ sizeof(struct src_ent));
+
+ qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_SRC);
+ p_cli->pf_total_lines = curr_line - p_blk->start_line;
+ }
+
+ /* TM PF */
+ p_cli = &p_mngr->clients[ILT_CLI_TM];
+ qed_cxt_tm_iids(p_mngr, &tm_iids);
+ total = tm_iids.pf_cids + tm_iids.pf_tids_total;
+ if (total) {
+ p_blk = &p_cli->pf_blks[0];
+ qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+ total * TM_ELEM_SIZE, TM_ELEM_SIZE);
+
+ qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_TM);
+ p_cli->pf_total_lines = curr_line - p_blk->start_line;
+ }
+
+ /* TM VF */
+ total = tm_iids.per_vf_cids + tm_iids.per_vf_tids;
+ if (total) {
+ p_blk = &p_cli->vf_blks[0];
+ qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+ total * TM_ELEM_SIZE, TM_ELEM_SIZE);
+
+ qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_TM);
+ p_cli->pf_total_lines = curr_line - p_blk->start_line;
+
+ for (i = 1; i < p_mngr->vf_count; i++)
+ qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_TM);
+ }
+
+ /* TSDM (SRQ CONTEXT) */
+ total = qed_cxt_get_srq_count(p_hwfn);
+
+ if (total) {
+ p_cli = &p_mngr->clients[ILT_CLI_TSDM];
+ p_blk = &p_cli->pf_blks[SRQ_BLK];
+ qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+ total * SRQ_CXT_SIZE, SRQ_CXT_SIZE);
+
+ qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_TSDM);
+ p_cli->pf_total_lines = curr_line - p_blk->start_line;
+ }
+
if (curr_line - p_hwfn->p_cxt_mngr->pf_start_line >
RESC_NUM(p_hwfn, QED_ILT)) {
DP_ERR(p_hwfn, "too many ilt lines...#lines=%d\n",
@@ -327,8 +749,122 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
return 0;
}
+static void qed_cxt_src_t2_free(struct qed_hwfn *p_hwfn)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 i;
+
+ if (!p_mngr->t2)
+ return;
+
+ for (i = 0; i < p_mngr->t2_num_pages; i++)
+ if (p_mngr->t2[i].p_virt)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ p_mngr->t2[i].size,
+ p_mngr->t2[i].p_virt,
+ p_mngr->t2[i].p_phys);
+
+ kfree(p_mngr->t2);
+ p_mngr->t2 = NULL;
+}
+
+static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 conn_num, total_size, ent_per_page, psz, i;
+ struct qed_ilt_client_cfg *p_src;
+ struct qed_src_iids src_iids;
+ struct qed_dma_mem *p_t2;
+ int rc;
+
+ memset(&src_iids, 0, sizeof(src_iids));
+
+ /* if the SRC ILT client is inactive - there are no connection
+ * requiring the searcer, leave.
+ */
+ p_src = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_SRC];
+ if (!p_src->active)
+ return 0;
+
+ qed_cxt_src_iids(p_mngr, &src_iids);
+ conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
+ total_size = conn_num * sizeof(struct src_ent);
+
+ /* use the same page size as the SRC ILT client */
+ psz = ILT_PAGE_IN_BYTES(p_src->p_size.val);
+ p_mngr->t2_num_pages = DIV_ROUND_UP(total_size, psz);
+
+ /* allocate t2 */
+ p_mngr->t2 = kzalloc(p_mngr->t2_num_pages * sizeof(struct qed_dma_mem),
+ GFP_KERNEL);
+ if (!p_mngr->t2) {
+ DP_NOTICE(p_hwfn, "Failed to allocate t2 table\n");
+ rc = -ENOMEM;
+ goto t2_fail;
+ }
+
+ /* allocate t2 pages */
+ for (i = 0; i < p_mngr->t2_num_pages; i++) {
+ u32 size = min_t(u32, total_size, psz);
+ void **p_virt = &p_mngr->t2[i].p_virt;
+
+ *p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ size,
+ &p_mngr->t2[i].p_phys, GFP_KERNEL);
+ if (!p_mngr->t2[i].p_virt) {
+ rc = -ENOMEM;
+ goto t2_fail;
+ }
+ memset(*p_virt, 0, size);
+ p_mngr->t2[i].size = size;
+ total_size -= size;
+ }
+
+ /* Set the t2 pointers */
+
+ /* entries per page - must be a power of two */
+ ent_per_page = psz / sizeof(struct src_ent);
+
+ p_mngr->first_free = (u64) p_mngr->t2[0].p_phys;
+
+ p_t2 = &p_mngr->t2[(conn_num - 1) / ent_per_page];
+ p_mngr->last_free = (u64) p_t2->p_phys +
+ ((conn_num - 1) & (ent_per_page - 1)) * sizeof(struct src_ent);
+
+ for (i = 0; i < p_mngr->t2_num_pages; i++) {
+ u32 ent_num = min_t(u32,
+ ent_per_page,
+ conn_num);
+ struct src_ent *entries = p_mngr->t2[i].p_virt;
+ u64 p_ent_phys = (u64) p_mngr->t2[i].p_phys, val;
+ u32 j;
+
+ for (j = 0; j < ent_num - 1; j++) {
+ val = p_ent_phys + (j + 1) * sizeof(struct src_ent);
+ entries[j].next = cpu_to_be64(val);
+ }
+
+ if (i < p_mngr->t2_num_pages - 1)
+ val = (u64) p_mngr->t2[i + 1].p_phys;
+ else
+ val = 0;
+ entries[j].next = cpu_to_be64(val);
+
+ conn_num -= ent_num;
+ }
+
+ return 0;
+
+t2_fail:
+ qed_cxt_src_t2_free(p_hwfn);
+ return rc;
+}
+
#define for_each_ilt_valid_client(pos, clients) \
- for (pos = 0; pos < ILT_CLI_MAX; pos++)
+ for (pos = 0; pos < ILT_CLI_MAX; pos++) \
+ if (!clients[pos].active) { \
+ continue; \
+ } else \
/* Total number of ILT lines used by this PF */
static u32 qed_cxt_ilt_shadow_size(struct qed_ilt_client_cfg *ilt_clients)
@@ -336,12 +872,8 @@ static u32 qed_cxt_ilt_shadow_size(struct qed_ilt_client_cfg *ilt_clients)
u32 size = 0;
u32 i;
- for_each_ilt_valid_client(i, ilt_clients) {
- if (!ilt_clients[i].active)
- continue;
- size += (ilt_clients[i].last.val -
- ilt_clients[i].first.val + 1);
- }
+ for_each_ilt_valid_client(i, ilt_clients)
+ size += (ilt_clients[i].last.val - ilt_clients[i].first.val + 1);
return size;
}
@@ -372,15 +904,22 @@ static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
u32 start_line_offset)
{
struct qed_dma_mem *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow;
- u32 lines, line, sz_left;
+ u32 lines, line, sz_left, lines_to_skip = 0;
+
+ /* Special handling for RoCE that supports dynamic allocation */
+ if ((p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) &&
+ ((ilt_client == ILT_CLI_CDUT) || ilt_client == ILT_CLI_TSDM))
+ return 0;
+
+ lines_to_skip = p_blk->dynamic_line_cnt;
if (!p_blk->total_size)
return 0;
sz_left = p_blk->total_size;
- lines = DIV_ROUND_UP(sz_left, p_blk->real_size_in_page);
+ lines = DIV_ROUND_UP(sz_left, p_blk->real_size_in_page) - lines_to_skip;
line = p_blk->start_line + start_line_offset -
- p_hwfn->p_cxt_mngr->pf_start_line;
+ p_hwfn->p_cxt_mngr->pf_start_line + lines_to_skip;
for (; lines; lines--) {
dma_addr_t p_phys;
@@ -434,8 +973,6 @@ static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
(u32)(size * sizeof(struct qed_dma_mem)));
for_each_ilt_valid_client(i, clients) {
- if (!clients[i].active)
- continue;
for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) {
p_blk = &clients[i].pf_blks[j];
rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, 0);
@@ -514,6 +1051,7 @@ cid_map_fail:
int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
{
+ struct qed_ilt_client_cfg *clients;
struct qed_cxt_mngr *p_mngr;
u32 i;
@@ -524,20 +1062,42 @@ int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
}
/* Initialize ILT client registers */
- p_mngr->clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT);
- p_mngr->clients[ILT_CLI_CDUC].last.reg = ILT_CFG_REG(CDUC, LAST_ILT);
- p_mngr->clients[ILT_CLI_CDUC].p_size.reg = ILT_CFG_REG(CDUC, P_SIZE);
-
- p_mngr->clients[ILT_CLI_QM].first.reg = ILT_CFG_REG(QM, FIRST_ILT);
- p_mngr->clients[ILT_CLI_QM].last.reg = ILT_CFG_REG(QM, LAST_ILT);
- p_mngr->clients[ILT_CLI_QM].p_size.reg = ILT_CFG_REG(QM, P_SIZE);
-
+ clients = p_mngr->clients;
+ clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT);
+ clients[ILT_CLI_CDUC].last.reg = ILT_CFG_REG(CDUC, LAST_ILT);
+ clients[ILT_CLI_CDUC].p_size.reg = ILT_CFG_REG(CDUC, P_SIZE);
+
+ clients[ILT_CLI_QM].first.reg = ILT_CFG_REG(QM, FIRST_ILT);
+ clients[ILT_CLI_QM].last.reg = ILT_CFG_REG(QM, LAST_ILT);
+ clients[ILT_CLI_QM].p_size.reg = ILT_CFG_REG(QM, P_SIZE);
+
+ clients[ILT_CLI_TM].first.reg = ILT_CFG_REG(TM, FIRST_ILT);
+ clients[ILT_CLI_TM].last.reg = ILT_CFG_REG(TM, LAST_ILT);
+ clients[ILT_CLI_TM].p_size.reg = ILT_CFG_REG(TM, P_SIZE);
+
+ clients[ILT_CLI_SRC].first.reg = ILT_CFG_REG(SRC, FIRST_ILT);
+ clients[ILT_CLI_SRC].last.reg = ILT_CFG_REG(SRC, LAST_ILT);
+ clients[ILT_CLI_SRC].p_size.reg = ILT_CFG_REG(SRC, P_SIZE);
+
+ clients[ILT_CLI_CDUT].first.reg = ILT_CFG_REG(CDUT, FIRST_ILT);
+ clients[ILT_CLI_CDUT].last.reg = ILT_CFG_REG(CDUT, LAST_ILT);
+ clients[ILT_CLI_CDUT].p_size.reg = ILT_CFG_REG(CDUT, P_SIZE);
+
+ clients[ILT_CLI_TSDM].first.reg = ILT_CFG_REG(TSDM, FIRST_ILT);
+ clients[ILT_CLI_TSDM].last.reg = ILT_CFG_REG(TSDM, LAST_ILT);
+ clients[ILT_CLI_TSDM].p_size.reg = ILT_CFG_REG(TSDM, P_SIZE);
/* default ILT page size for all clients is 32K */
for (i = 0; i < ILT_CLI_MAX; i++)
p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
+ /* Initialize task sizes */
+ p_mngr->task_type_size[0] = TYPE0_TASK_CXT_SIZE(p_hwfn);
+ p_mngr->task_type_size[1] = TYPE1_TASK_CXT_SIZE(p_hwfn);
+
if (p_hwfn->cdev->p_iov_info)
p_mngr->vf_count = p_hwfn->cdev->p_iov_info->total_vfs;
+ /* Initialize the dynamic ILT allocation mutex */
+ mutex_init(&p_mngr->mutex);
/* Set the cxt mangr pointer priori to further allocations */
p_hwfn->p_cxt_mngr = p_mngr;
@@ -556,6 +1116,13 @@ int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn)
goto tables_alloc_fail;
}
+ /* Allocate the T2 table */
+ rc = qed_cxt_src_t2_alloc(p_hwfn);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to allocate T2 memory\n");
+ goto tables_alloc_fail;
+ }
+
/* Allocate and initialize the acquired cids bitmaps */
rc = qed_cid_map_alloc(p_hwfn);
if (rc) {
@@ -576,6 +1143,7 @@ void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn)
return;
qed_cid_map_free(p_hwfn);
+ qed_cxt_src_t2_free(p_hwfn);
qed_ilt_shadow_free(p_hwfn);
kfree(p_hwfn->p_cxt_mngr);
@@ -620,6 +1188,48 @@ void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn)
#define CDUC_NCIB_MASK \
(CDU_REG_CID_ADDR_PARAMS_NCIB >> CDUC_NCIB_SHIFT)
+#define CDUT_TYPE0_CXT_SIZE_SHIFT \
+ CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE_SHIFT
+
+#define CDUT_TYPE0_CXT_SIZE_MASK \
+ (CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE >> \
+ CDUT_TYPE0_CXT_SIZE_SHIFT)
+
+#define CDUT_TYPE0_BLOCK_WASTE_SHIFT \
+ CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE_SHIFT
+
+#define CDUT_TYPE0_BLOCK_WASTE_MASK \
+ (CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE >> \
+ CDUT_TYPE0_BLOCK_WASTE_SHIFT)
+
+#define CDUT_TYPE0_NCIB_SHIFT \
+ CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK_SHIFT
+
+#define CDUT_TYPE0_NCIB_MASK \
+ (CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK >> \
+ CDUT_TYPE0_NCIB_SHIFT)
+
+#define CDUT_TYPE1_CXT_SIZE_SHIFT \
+ CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE_SHIFT
+
+#define CDUT_TYPE1_CXT_SIZE_MASK \
+ (CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE >> \
+ CDUT_TYPE1_CXT_SIZE_SHIFT)
+
+#define CDUT_TYPE1_BLOCK_WASTE_SHIFT \
+ CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE_SHIFT
+
+#define CDUT_TYPE1_BLOCK_WASTE_MASK \
+ (CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE >> \
+ CDUT_TYPE1_BLOCK_WASTE_SHIFT)
+
+#define CDUT_TYPE1_NCIB_SHIFT \
+ CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK_SHIFT
+
+#define CDUT_TYPE1_NCIB_MASK \
+ (CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK >> \
+ CDUT_TYPE1_NCIB_SHIFT)
+
static void qed_cdu_init_common(struct qed_hwfn *p_hwfn)
{
u32 page_sz, elems_per_page, block_waste, cxt_size, cdu_params = 0;
@@ -634,6 +1244,92 @@ static void qed_cdu_init_common(struct qed_hwfn *p_hwfn)
SET_FIELD(cdu_params, CDUC_BLOCK_WASTE, block_waste);
SET_FIELD(cdu_params, CDUC_NCIB, elems_per_page);
STORE_RT_REG(p_hwfn, CDU_REG_CID_ADDR_PARAMS_RT_OFFSET, cdu_params);
+
+ /* CDUT - type-0 tasks configuration */
+ page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT].p_size.val;
+ cxt_size = p_hwfn->p_cxt_mngr->task_type_size[0];
+ elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
+ block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
+
+ /* cxt size and block-waste are multipes of 8 */
+ cdu_params = 0;
+ SET_FIELD(cdu_params, CDUT_TYPE0_CXT_SIZE, (cxt_size >> 3));
+ SET_FIELD(cdu_params, CDUT_TYPE0_BLOCK_WASTE, (block_waste >> 3));
+ SET_FIELD(cdu_params, CDUT_TYPE0_NCIB, elems_per_page);
+ STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT0_PARAMS_RT_OFFSET, cdu_params);
+
+ /* CDUT - type-1 tasks configuration */
+ cxt_size = p_hwfn->p_cxt_mngr->task_type_size[1];
+ elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
+ block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
+
+ /* cxt size and block-waste are multipes of 8 */
+ cdu_params = 0;
+ SET_FIELD(cdu_params, CDUT_TYPE1_CXT_SIZE, (cxt_size >> 3));
+ SET_FIELD(cdu_params, CDUT_TYPE1_BLOCK_WASTE, (block_waste >> 3));
+ SET_FIELD(cdu_params, CDUT_TYPE1_NCIB, elems_per_page);
+ STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT1_PARAMS_RT_OFFSET, cdu_params);
+}
+
+/* CDU PF */
+#define CDU_SEG_REG_TYPE_SHIFT CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT
+#define CDU_SEG_REG_TYPE_MASK 0x1
+#define CDU_SEG_REG_OFFSET_SHIFT 0
+#define CDU_SEG_REG_OFFSET_MASK CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK
+
+static void qed_cdu_init_pf(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ilt_client_cfg *p_cli;
+ struct qed_tid_seg *p_seg;
+ u32 cdu_seg_params, offset;
+ int i;
+
+ static const u32 rt_type_offset_arr[] = {
+ CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET,
+ CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET,
+ CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET,
+ CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET
+ };
+
+ static const u32 rt_type_offset_fl_arr[] = {
+ CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET,
+ CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET,
+ CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET,
+ CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET
+ };
+
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
+
+ /* There are initializations only for CDUT during pf Phase */
+ for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
+ /* Segment 0 */
+ p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
+ if (!p_seg)
+ continue;
+
+ /* Note: start_line is already adjusted for the CDU
+ * segment register granularity, so we just need to
+ * divide. Adjustment is implicit as we assume ILT
+ * Page size is larger than 32K!
+ */
+ offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) *
+ (p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line -
+ p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES;
+
+ cdu_seg_params = 0;
+ SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type);
+ SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset);
+ STORE_RT_REG(p_hwfn, rt_type_offset_arr[i], cdu_seg_params);
+
+ offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) *
+ (p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)].start_line -
+ p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES;
+
+ cdu_seg_params = 0;
+ SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type);
+ SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset);
+ STORE_RT_REG(p_hwfn, rt_type_offset_fl_arr[i], cdu_seg_params);
+ }
}
void qed_qm_init_pf(struct qed_hwfn *p_hwfn)
@@ -742,14 +1438,11 @@ static void qed_ilt_bounds_init(struct qed_hwfn *p_hwfn)
ilt_clients = p_hwfn->p_cxt_mngr->clients;
for_each_ilt_valid_client(i, ilt_clients) {
- if (!ilt_clients[i].active)
- continue;
STORE_RT_REG(p_hwfn,
ilt_clients[i].first.reg,
ilt_clients[i].first.val);
STORE_RT_REG(p_hwfn,
- ilt_clients[i].last.reg,
- ilt_clients[i].last.val);
+ ilt_clients[i].last.reg, ilt_clients[i].last.val);
STORE_RT_REG(p_hwfn,
ilt_clients[i].p_size.reg,
ilt_clients[i].p_size.val);
@@ -786,6 +1479,33 @@ static void qed_ilt_vf_bounds_init(struct qed_hwfn *p_hwfn)
PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET,
p_cli->vf_total_lines);
}
+
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
+ blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
+ if (p_cli->active) {
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET,
+ blk_factor);
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
+ p_cli->pf_total_lines);
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET,
+ p_cli->vf_total_lines);
+ }
+
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TM];
+ blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
+ if (p_cli->active) {
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET, blk_factor);
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
+ p_cli->pf_total_lines);
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET,
+ p_cli->vf_total_lines);
+ }
}
/* ILT (PSWRQ2) PF */
@@ -804,9 +1524,6 @@ static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn)
clients = p_hwfn->p_cxt_mngr->clients;
for_each_ilt_valid_client(i, clients) {
- if (!clients[i].active)
- continue;
-
/** Client's 1st val and RT array are absolute, ILT shadows'
* lines are relative.
*/
@@ -837,6 +1554,137 @@ static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn)
}
}
+/* SRC (Searcher) PF */
+static void qed_src_init_pf(struct qed_hwfn *p_hwfn)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 rounded_conn_num, conn_num, conn_max;
+ struct qed_src_iids src_iids;
+
+ memset(&src_iids, 0, sizeof(src_iids));
+ qed_cxt_src_iids(p_mngr, &src_iids);
+ conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
+ if (!conn_num)
+ return;
+
+ conn_max = max_t(u32, conn_num, SRC_MIN_NUM_ELEMS);
+ rounded_conn_num = roundup_pow_of_two(conn_max);
+
+ STORE_RT_REG(p_hwfn, SRC_REG_COUNTFREE_RT_OFFSET, conn_num);
+ STORE_RT_REG(p_hwfn, SRC_REG_NUMBER_HASH_BITS_RT_OFFSET,
+ ilog2(rounded_conn_num));
+
+ STORE_RT_REG_AGG(p_hwfn, SRC_REG_FIRSTFREE_RT_OFFSET,
+ p_hwfn->p_cxt_mngr->first_free);
+ STORE_RT_REG_AGG(p_hwfn, SRC_REG_LASTFREE_RT_OFFSET,
+ p_hwfn->p_cxt_mngr->last_free);
+}
+
+/* Timers PF */
+#define TM_CFG_NUM_IDS_SHIFT 0
+#define TM_CFG_NUM_IDS_MASK 0xFFFFULL
+#define TM_CFG_PRE_SCAN_OFFSET_SHIFT 16
+#define TM_CFG_PRE_SCAN_OFFSET_MASK 0x1FFULL
+#define TM_CFG_PARENT_PF_SHIFT 25
+#define TM_CFG_PARENT_PF_MASK 0x7ULL
+
+#define TM_CFG_CID_PRE_SCAN_ROWS_SHIFT 30
+#define TM_CFG_CID_PRE_SCAN_ROWS_MASK 0x1FFULL
+
+#define TM_CFG_TID_OFFSET_SHIFT 30
+#define TM_CFG_TID_OFFSET_MASK 0x7FFFFULL
+#define TM_CFG_TID_PRE_SCAN_ROWS_SHIFT 49
+#define TM_CFG_TID_PRE_SCAN_ROWS_MASK 0x1FFULL
+
+static void qed_tm_init_pf(struct qed_hwfn *p_hwfn)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 active_seg_mask = 0, tm_offset, rt_reg;
+ struct qed_tm_iids tm_iids;
+ u64 cfg_word;
+ u8 i;
+
+ memset(&tm_iids, 0, sizeof(tm_iids));
+ qed_cxt_tm_iids(p_mngr, &tm_iids);
+
+ /* @@@TBD No pre-scan for now */
+
+ /* Note: We assume consecutive VFs for a PF */
+ for (i = 0; i < p_mngr->vf_count; i++) {
+ cfg_word = 0;
+ SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_cids);
+ SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
+ SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id);
+ SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0);
+ rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +
+ (sizeof(cfg_word) / sizeof(u32)) *
+ (p_hwfn->cdev->p_iov_info->first_vf_in_pf + i);
+ STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
+ }
+
+ cfg_word = 0;
+ SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_cids);
+ SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
+ SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0); /* n/a for PF */
+ SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0); /* scan all */
+
+ rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +
+ (sizeof(cfg_word) / sizeof(u32)) *
+ (NUM_OF_VFS(p_hwfn->cdev) + p_hwfn->rel_pf_id);
+ STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
+
+ /* enale scan */
+ STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_CONN_RT_OFFSET,
+ tm_iids.pf_cids ? 0x1 : 0x0);
+
+ /* @@@TBD how to enable the scan for the VFs */
+
+ tm_offset = tm_iids.per_vf_cids;
+
+ /* Note: We assume consecutive VFs for a PF */
+ for (i = 0; i < p_mngr->vf_count; i++) {
+ cfg_word = 0;
+ SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_tids);
+ SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
+ SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id);
+ SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset);
+ SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64) 0);
+
+ rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET +
+ (sizeof(cfg_word) / sizeof(u32)) *
+ (p_hwfn->cdev->p_iov_info->first_vf_in_pf + i);
+
+ STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
+ }
+
+ tm_offset = tm_iids.pf_cids;
+ for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
+ cfg_word = 0;
+ SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_tids[i]);
+ SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
+ SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0);
+ SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset);
+ SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64) 0);
+
+ rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET +
+ (sizeof(cfg_word) / sizeof(u32)) *
+ (NUM_OF_VFS(p_hwfn->cdev) +
+ p_hwfn->rel_pf_id * NUM_TASK_PF_SEGMENTS + i);
+
+ STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
+ active_seg_mask |= (tm_iids.pf_tids[i] ? (1 << i) : 0);
+
+ tm_offset += tm_iids.pf_tids[i];
+ }
+
+ if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE)
+ active_seg_mask = 0;
+
+ STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_TASK_RT_OFFSET, active_seg_mask);
+
+ /* @@@TBD how to enable the scan for the VFs */
+}
+
void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn)
{
qed_cdu_init_common(p_hwfn);
@@ -847,7 +1695,10 @@ void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn)
qed_qm_init_pf(p_hwfn);
qed_cm_init_pf(p_hwfn);
qed_dq_init_pf(p_hwfn);
+ qed_cdu_init_pf(p_hwfn);
qed_ilt_init_pf(p_hwfn);
+ qed_src_init_pf(p_hwfn);
+ qed_tm_init_pf(p_hwfn);
}
int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
@@ -968,17 +1819,439 @@ int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn,
return 0;
}
-int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn)
+void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_pf_params *p_params)
{
- struct qed_eth_pf_params *p_params = &p_hwfn->pf_params.eth_pf_params;
+ u32 num_cons, num_tasks, num_qps, num_mrs, num_srqs;
+ enum protocol_type proto;
+
+ num_mrs = min_t(u32, RDMA_MAX_TIDS, p_params->num_mrs);
+ num_tasks = num_mrs; /* each mr uses a single task id */
+ num_srqs = min_t(u32, 32 * 1024, p_params->num_srqs);
+
+ switch (p_hwfn->hw_info.personality) {
+ case QED_PCI_ETH_ROCE:
+ num_qps = min_t(u32, ROCE_MAX_QPS, p_params->num_qps);
+ num_cons = num_qps * 2; /* each QP requires two connections */
+ proto = PROTOCOLID_ROCE;
+ break;
+ default:
+ return;
+ }
+
+ if (num_cons && num_tasks) {
+ qed_cxt_set_proto_cid_count(p_hwfn, proto, num_cons, 0);
+
+ /* Deliberatly passing ROCE for tasks id. This is because
+ * iWARP / RoCE share the task id.
+ */
+ qed_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_ROCE,
+ QED_CXT_ROCE_TID_SEG, 1,
+ num_tasks, false);
+ qed_cxt_set_srq_count(p_hwfn, num_srqs);
+ } else {
+ DP_INFO(p_hwfn->cdev,
+ "RDMA personality used without setting params!\n");
+ }
+}
+int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn)
+{
/* Set the number of required CORE connections */
u32 core_cids = 1; /* SPQ */
qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids, 0);
- qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
- p_params->num_cons, 1);
+ switch (p_hwfn->hw_info.personality) {
+ case QED_PCI_ETH_ROCE:
+ {
+ qed_rdma_set_pf_params(p_hwfn,
+ &p_hwfn->
+ pf_params.rdma_pf_params);
+ /* no need for break since RoCE coexist with Ethernet */
+ }
+ case QED_PCI_ETH:
+ {
+ struct qed_eth_pf_params *p_params =
+ &p_hwfn->pf_params.eth_pf_params;
+
+ qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
+ p_params->num_cons, 1);
+ break;
+ }
+ case QED_PCI_ISCSI:
+ {
+ struct qed_iscsi_pf_params *p_params;
+
+ p_params = &p_hwfn->pf_params.iscsi_pf_params;
+
+ if (p_params->num_cons && p_params->num_tasks) {
+ qed_cxt_set_proto_cid_count(p_hwfn,
+ PROTOCOLID_ISCSI,
+ p_params->num_cons,
+ 0);
+
+ qed_cxt_set_proto_tid_count(p_hwfn,
+ PROTOCOLID_ISCSI,
+ QED_CXT_ISCSI_TID_SEG,
+ 0,
+ p_params->num_tasks,
+ true);
+ } else {
+ DP_INFO(p_hwfn->cdev,
+ "Iscsi personality used without setting params!\n");
+ }
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn,
+ struct qed_tid_mem *p_info)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 proto, seg, total_lines, i, shadow_line;
+ struct qed_ilt_client_cfg *p_cli;
+ struct qed_ilt_cli_blk *p_fl_seg;
+ struct qed_tid_seg *p_seg_info;
+
+ /* Verify the personality */
+ switch (p_hwfn->hw_info.personality) {
+ case QED_PCI_ISCSI:
+ proto = PROTOCOLID_ISCSI;
+ seg = QED_CXT_ISCSI_TID_SEG;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ p_cli = &p_mngr->clients[ILT_CLI_CDUT];
+ if (!p_cli->active)
+ return -EINVAL;
+
+ p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg];
+ if (!p_seg_info->has_fl_mem)
+ return -EINVAL;
+
+ p_fl_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)];
+ total_lines = DIV_ROUND_UP(p_fl_seg->total_size,
+ p_fl_seg->real_size_in_page);
+
+ for (i = 0; i < total_lines; i++) {
+ shadow_line = i + p_fl_seg->start_line -
+ p_hwfn->p_cxt_mngr->pf_start_line;
+ p_info->blocks[i] = p_mngr->ilt_shadow[shadow_line].p_virt;
+ }
+ p_info->waste = ILT_PAGE_IN_BYTES(p_cli->p_size.val) -
+ p_fl_seg->real_size_in_page;
+ p_info->tid_size = p_mngr->task_type_size[p_seg_info->type];
+ p_info->num_tids_per_block = p_fl_seg->real_size_in_page /
+ p_info->tid_size;
+
+ return 0;
+}
+
+/* This function is very RoCE oriented, if another protocol in the future
+ * will want this feature we'll need to modify the function to be more generic
+ */
+int
+qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
+ enum qed_cxt_elem_type elem_type, u32 iid)
+{
+ u32 reg_offset, shadow_line, elem_size, hw_p_size, elems_per_p, line;
+ struct qed_ilt_client_cfg *p_cli;
+ struct qed_ilt_cli_blk *p_blk;
+ struct qed_ptt *p_ptt;
+ dma_addr_t p_phys;
+ u64 ilt_hw_entry;
+ void *p_virt;
+ int rc = 0;
+
+ switch (elem_type) {
+ case QED_ELEM_CXT:
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
+ elem_size = CONN_CXT_SIZE(p_hwfn);
+ p_blk = &p_cli->pf_blks[CDUC_BLK];
+ break;
+ case QED_ELEM_SRQ:
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
+ elem_size = SRQ_CXT_SIZE;
+ p_blk = &p_cli->pf_blks[SRQ_BLK];
+ break;
+ case QED_ELEM_TASK:
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
+ elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
+ p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(QED_CXT_ROCE_TID_SEG)];
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "-EINVALID elem type = %d", elem_type);
+ return -EINVAL;
+ }
+
+ /* Calculate line in ilt */
+ hw_p_size = p_cli->p_size.val;
+ elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size;
+ line = p_blk->start_line + (iid / elems_per_p);
+ shadow_line = line - p_hwfn->p_cxt_mngr->pf_start_line;
+
+ /* If line is already allocated, do nothing, otherwise allocate it and
+ * write it to the PSWRQ2 registers.
+ * This section can be run in parallel from different contexts and thus
+ * a mutex protection is needed.
+ */
+
+ mutex_lock(&p_hwfn->p_cxt_mngr->mutex);
+
+ if (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt)
+ goto out0;
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ DP_NOTICE(p_hwfn,
+ "QED_TIME_OUT on ptt acquire - dynamic allocation");
+ rc = -EBUSY;
+ goto out0;
+ }
+
+ p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ p_blk->real_size_in_page,
+ &p_phys, GFP_KERNEL);
+ if (!p_virt) {
+ rc = -ENOMEM;
+ goto out1;
+ }
+ memset(p_virt, 0, p_blk->real_size_in_page);
+
+ /* configuration of refTagMask to 0xF is required for RoCE DIF MR only,
+ * to compensate for a HW bug, but it is configured even if DIF is not
+ * enabled. This is harmless and allows us to avoid a dedicated API. We
+ * configure the field for all of the contexts on the newly allocated
+ * page.
+ */
+ if (elem_type == QED_ELEM_TASK) {
+ u32 elem_i;
+ u8 *elem_start = (u8 *)p_virt;
+ union type1_task_context *elem;
+
+ for (elem_i = 0; elem_i < elems_per_p; elem_i++) {
+ elem = (union type1_task_context *)elem_start;
+ SET_FIELD(elem->roce_ctx.tdif_context.flags1,
+ TDIF_TASK_CONTEXT_REFTAGMASK, 0xf);
+ elem_start += TYPE1_TASK_CXT_SIZE(p_hwfn);
+ }
+ }
+
+ p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt = p_virt;
+ p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_phys = p_phys;
+ p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].size =
+ p_blk->real_size_in_page;
+
+ /* compute absolute offset */
+ reg_offset = PSWRQ2_REG_ILT_MEMORY +
+ (line * ILT_REG_SIZE_IN_BYTES * ILT_ENTRY_IN_REGS);
+
+ ilt_hw_entry = 0;
+ SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
+ SET_FIELD(ilt_hw_entry,
+ ILT_ENTRY_PHY_ADDR,
+ (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_phys >> 12));
+
+ /* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a wide-bus */
+ qed_dmae_host2grc(p_hwfn, p_ptt, (u64) (uintptr_t)&ilt_hw_entry,
+ reg_offset, sizeof(ilt_hw_entry) / sizeof(u32), 0);
+
+ if (elem_type == QED_ELEM_CXT) {
+ u32 last_cid_allocated = (1 + (iid / elems_per_p)) *
+ elems_per_p;
+
+ /* Update the relevant register in the parser */
+ qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF,
+ last_cid_allocated - 1);
+
+ if (!p_hwfn->b_rdma_enabled_in_prs) {
+ /* Enable RoCE search */
+ qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1);
+ p_hwfn->b_rdma_enabled_in_prs = true;
+ }
+ }
+
+out1:
+ qed_ptt_release(p_hwfn, p_ptt);
+out0:
+ mutex_unlock(&p_hwfn->p_cxt_mngr->mutex);
+
+ return rc;
+}
+
+/* This function is very RoCE oriented, if another protocol in the future
+ * will want this feature we'll need to modify the function to be more generic
+ */
+static int
+qed_cxt_free_ilt_range(struct qed_hwfn *p_hwfn,
+ enum qed_cxt_elem_type elem_type,
+ u32 start_iid, u32 count)
+{
+ u32 start_line, end_line, shadow_start_line, shadow_end_line;
+ u32 reg_offset, elem_size, hw_p_size, elems_per_p;
+ struct qed_ilt_client_cfg *p_cli;
+ struct qed_ilt_cli_blk *p_blk;
+ u32 end_iid = start_iid + count;
+ struct qed_ptt *p_ptt;
+ u64 ilt_hw_entry = 0;
+ u32 i;
+
+ switch (elem_type) {
+ case QED_ELEM_CXT:
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
+ elem_size = CONN_CXT_SIZE(p_hwfn);
+ p_blk = &p_cli->pf_blks[CDUC_BLK];
+ break;
+ case QED_ELEM_SRQ:
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
+ elem_size = SRQ_CXT_SIZE;
+ p_blk = &p_cli->pf_blks[SRQ_BLK];
+ break;
+ case QED_ELEM_TASK:
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
+ elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
+ p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(QED_CXT_ROCE_TID_SEG)];
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "-EINVALID elem type = %d", elem_type);
+ return -EINVAL;
+ }
+
+ /* Calculate line in ilt */
+ hw_p_size = p_cli->p_size.val;
+ elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size;
+ start_line = p_blk->start_line + (start_iid / elems_per_p);
+ end_line = p_blk->start_line + (end_iid / elems_per_p);
+ if (((end_iid + 1) / elems_per_p) != (end_iid / elems_per_p))
+ end_line--;
+
+ shadow_start_line = start_line - p_hwfn->p_cxt_mngr->pf_start_line;
+ shadow_end_line = end_line - p_hwfn->p_cxt_mngr->pf_start_line;
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ DP_NOTICE(p_hwfn,
+ "QED_TIME_OUT on ptt acquire - dynamic allocation");
+ return -EBUSY;
+ }
+
+ for (i = shadow_start_line; i < shadow_end_line; i++) {
+ if (!p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt)
+ continue;
+
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ p_hwfn->p_cxt_mngr->ilt_shadow[i].size,
+ p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt,
+ p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys);
+
+ p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt = NULL;
+ p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys = 0;
+ p_hwfn->p_cxt_mngr->ilt_shadow[i].size = 0;
+
+ /* compute absolute offset */
+ reg_offset = PSWRQ2_REG_ILT_MEMORY +
+ ((start_line++) * ILT_REG_SIZE_IN_BYTES *
+ ILT_ENTRY_IN_REGS);
+
+ /* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a
+ * wide-bus.
+ */
+ qed_dmae_host2grc(p_hwfn, p_ptt,
+ (u64) (uintptr_t) &ilt_hw_entry,
+ reg_offset,
+ sizeof(ilt_hw_entry) / sizeof(u32),
+ 0);
+ }
+
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ return 0;
+}
+
+int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto)
+{
+ int rc;
+ u32 cid;
+
+ /* Free Connection CXT */
+ rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_CXT,
+ qed_cxt_get_proto_cid_start(p_hwfn,
+ proto),
+ qed_cxt_get_proto_cid_count(p_hwfn,
+ proto, &cid));
+
+ if (rc)
+ return rc;
+
+ /* Free Task CXT */
+ rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_TASK, 0,
+ qed_cxt_get_proto_tid_count(p_hwfn, proto));
+ if (rc)
+ return rc;
+
+ /* Free TSDM CXT */
+ rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_SRQ, 0,
+ qed_cxt_get_srq_count(p_hwfn));
+
+ return rc;
+}
+
+int qed_cxt_get_task_ctx(struct qed_hwfn *p_hwfn,
+ u32 tid, u8 ctx_type, void **pp_task_ctx)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct qed_ilt_client_cfg *p_cli;
+ struct qed_ilt_cli_blk *p_seg;
+ struct qed_tid_seg *p_seg_info;
+ u32 proto, seg;
+ u32 total_lines;
+ u32 tid_size, ilt_idx;
+ u32 num_tids_per_block;
+
+ /* Verify the personality */
+ switch (p_hwfn->hw_info.personality) {
+ case QED_PCI_ISCSI:
+ proto = PROTOCOLID_ISCSI;
+ seg = QED_CXT_ISCSI_TID_SEG;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ p_cli = &p_mngr->clients[ILT_CLI_CDUT];
+ if (!p_cli->active)
+ return -EINVAL;
+
+ p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg];
+
+ if (ctx_type == QED_CTX_WORKING_MEM) {
+ p_seg = &p_cli->pf_blks[CDUT_SEG_BLK(seg)];
+ } else if (ctx_type == QED_CTX_FL_MEM) {
+ if (!p_seg_info->has_fl_mem)
+ return -EINVAL;
+ p_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)];
+ } else {
+ return -EINVAL;
+ }
+ total_lines = DIV_ROUND_UP(p_seg->total_size, p_seg->real_size_in_page);
+ tid_size = p_mngr->task_type_size[p_seg_info->type];
+ num_tids_per_block = p_seg->real_size_in_page / tid_size;
+
+ if (total_lines < tid / num_tids_per_block)
+ return -EINVAL;
+
+ ilt_idx = tid / num_tids_per_block + p_seg->start_line -
+ p_mngr->pf_start_line;
+ *pp_task_ctx = (u8 *)p_mngr->ilt_shadow[ilt_idx].p_virt +
+ (tid % num_tids_per_block) * tid_size;
return 0;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
index 234c0fa..c6f6f2e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
@@ -21,6 +21,14 @@ struct qed_cxt_info {
enum protocol_type type;
};
+#define MAX_TID_BLOCKS 512
+struct qed_tid_mem {
+ u32 tid_size;
+ u32 num_tids_per_block;
+ u32 waste;
+ u8 *blocks[MAX_TID_BLOCKS]; /* 4K */
+};
+
/**
* @brief qed_cxt_acquire - Acquire a new cid of a specific protocol type
*
@@ -46,8 +54,22 @@ int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn,
struct qed_cxt_info *p_info);
+/**
+ * @brief qed_cxt_get_tid_mem_info
+ *
+ * @param p_hwfn
+ * @param p_info
+ *
+ * @return int
+ */
+int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn,
+ struct qed_tid_mem *p_info);
+
+#define QED_CXT_ISCSI_TID_SEG PROTOCOLID_ISCSI
+#define QED_CXT_ROCE_TID_SEG PROTOCOLID_ROCE
enum qed_cxt_elem_type {
QED_ELEM_CXT,
+ QED_ELEM_SRQ,
QED_ELEM_TASK
};
@@ -149,4 +171,6 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
void qed_cxt_release_cid(struct qed_hwfn *p_hwfn,
u32 cid);
+#define QED_CTX_WORKING_MEM 0
+#define QED_CTX_FL_MEM 1
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index 21ec1c2..d0dc28f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -9,6 +9,7 @@
#include <linux/types.h>
#include <asm/byteorder.h>
#include <linux/bitops.h>
+#include <linux/dcbnl.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -18,6 +19,9 @@
#include "qed_dcbx.h"
#include "qed_hsi.h"
#include "qed_sp.h"
+#ifdef CONFIG_DCB
+#include <linux/qed/qed_eth_if.h>
+#endif
#define QED_DCBX_MAX_MIB_READ_TRY (100)
#define QED_ETH_TYPE_DEFAULT (0)
@@ -252,7 +256,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
if (p_data->arr[type].update)
continue;
- enable = (type == DCBX_PROTOCOL_ETH) ? false : dcbx_enabled;
+ enable = !(type == DCBX_PROTOCOL_ETH);
qed_dcbx_update_app_info(p_data, p_hwfn, enable, true,
priority, tc, type);
}
@@ -351,6 +355,293 @@ qed_dcbx_copy_mib(struct qed_hwfn *p_hwfn,
return rc;
}
+#ifdef CONFIG_DCB
+static void
+qed_dcbx_get_priority_info(struct qed_hwfn *p_hwfn,
+ struct qed_dcbx_app_prio *p_prio,
+ struct qed_dcbx_results *p_results)
+{
+ u8 val;
+
+ p_prio->roce = QED_DCBX_INVALID_PRIORITY;
+ p_prio->roce_v2 = QED_DCBX_INVALID_PRIORITY;
+ p_prio->iscsi = QED_DCBX_INVALID_PRIORITY;
+ p_prio->fcoe = QED_DCBX_INVALID_PRIORITY;
+
+ if (p_results->arr[DCBX_PROTOCOL_ROCE].update &&
+ p_results->arr[DCBX_PROTOCOL_ROCE].enable)
+ p_prio->roce = p_results->arr[DCBX_PROTOCOL_ROCE].priority;
+
+ if (p_results->arr[DCBX_PROTOCOL_ROCE_V2].update &&
+ p_results->arr[DCBX_PROTOCOL_ROCE_V2].enable) {
+ val = p_results->arr[DCBX_PROTOCOL_ROCE_V2].priority;
+ p_prio->roce_v2 = val;
+ }
+
+ if (p_results->arr[DCBX_PROTOCOL_ISCSI].update &&
+ p_results->arr[DCBX_PROTOCOL_ISCSI].enable)
+ p_prio->iscsi = p_results->arr[DCBX_PROTOCOL_ISCSI].priority;
+
+ if (p_results->arr[DCBX_PROTOCOL_FCOE].update &&
+ p_results->arr[DCBX_PROTOCOL_FCOE].enable)
+ p_prio->fcoe = p_results->arr[DCBX_PROTOCOL_FCOE].priority;
+
+ if (p_results->arr[DCBX_PROTOCOL_ETH].update &&
+ p_results->arr[DCBX_PROTOCOL_ETH].enable)
+ p_prio->eth = p_results->arr[DCBX_PROTOCOL_ETH].priority;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB,
+ "Priorities: iscsi %d, roce %d, roce v2 %d, fcoe %d, eth %d\n",
+ p_prio->iscsi, p_prio->roce, p_prio->roce_v2, p_prio->fcoe,
+ p_prio->eth);
+}
+
+static void
+qed_dcbx_get_app_data(struct qed_hwfn *p_hwfn,
+ struct dcbx_app_priority_feature *p_app,
+ struct dcbx_app_priority_entry *p_tbl,
+ struct qed_dcbx_params *p_params)
+{
+ struct qed_app_entry *entry;
+ u8 pri_map;
+ int i;
+
+ p_params->app_willing = QED_MFW_GET_FIELD(p_app->flags,
+ DCBX_APP_WILLING);
+ p_params->app_valid = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_ENABLED);
+ p_params->app_error = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_ERROR);
+ p_params->num_app_entries = QED_MFW_GET_FIELD(p_app->flags,
+ DCBX_APP_NUM_ENTRIES);
+ for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
+ entry = &p_params->app_entry[i];
+ entry->ethtype = !(QED_MFW_GET_FIELD(p_tbl[i].entry,
+ DCBX_APP_SF));
+ pri_map = QED_MFW_GET_FIELD(p_tbl[i].entry, DCBX_APP_PRI_MAP);
+ entry->prio = ffs(pri_map) - 1;
+ entry->proto_id = QED_MFW_GET_FIELD(p_tbl[i].entry,
+ DCBX_APP_PROTOCOL_ID);
+ qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry,
+ entry->proto_id,
+ &entry->proto_type);
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB,
+ "APP params: willing %d, valid %d error = %d\n",
+ p_params->app_willing, p_params->app_valid,
+ p_params->app_error);
+}
+
+static void
+qed_dcbx_get_pfc_data(struct qed_hwfn *p_hwfn,
+ u32 pfc, struct qed_dcbx_params *p_params)
+{
+ u8 pfc_map;
+
+ p_params->pfc.willing = QED_MFW_GET_FIELD(pfc, DCBX_PFC_WILLING);
+ p_params->pfc.max_tc = QED_MFW_GET_FIELD(pfc, DCBX_PFC_CAPS);
+ p_params->pfc.enabled = QED_MFW_GET_FIELD(pfc, DCBX_PFC_ENABLED);
+ pfc_map = QED_MFW_GET_FIELD(pfc, DCBX_PFC_PRI_EN_BITMAP);
+ p_params->pfc.prio[0] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_0);
+ p_params->pfc.prio[1] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_1);
+ p_params->pfc.prio[2] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_2);
+ p_params->pfc.prio[3] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_3);
+ p_params->pfc.prio[4] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_4);
+ p_params->pfc.prio[5] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_5);
+ p_params->pfc.prio[6] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_6);
+ p_params->pfc.prio[7] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_7);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB,
+ "PFC params: willing %d, pfc_bitmap %d\n",
+ p_params->pfc.willing, pfc_map);
+}
+
+static void
+qed_dcbx_get_ets_data(struct qed_hwfn *p_hwfn,
+ struct dcbx_ets_feature *p_ets,
+ struct qed_dcbx_params *p_params)
+{
+ u32 bw_map[2], tsa_map[2], pri_map;
+ int i;
+
+ p_params->ets_willing = QED_MFW_GET_FIELD(p_ets->flags,
+ DCBX_ETS_WILLING);
+ p_params->ets_enabled = QED_MFW_GET_FIELD(p_ets->flags,
+ DCBX_ETS_ENABLED);
+ p_params->ets_cbs = QED_MFW_GET_FIELD(p_ets->flags, DCBX_ETS_CBS);
+ p_params->max_ets_tc = QED_MFW_GET_FIELD(p_ets->flags,
+ DCBX_ETS_MAX_TCS);
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB,
+ "ETS params: willing %d, ets_cbs %d pri_tc_tbl_0 %x max_ets_tc %d\n",
+ p_params->ets_willing,
+ p_params->ets_cbs,
+ p_ets->pri_tc_tbl[0], p_params->max_ets_tc);
+
+ /* 8 bit tsa and bw data corresponding to each of the 8 TC's are
+ * encoded in a type u32 array of size 2.
+ */
+ bw_map[0] = be32_to_cpu(p_ets->tc_bw_tbl[0]);
+ bw_map[1] = be32_to_cpu(p_ets->tc_bw_tbl[1]);
+ tsa_map[0] = be32_to_cpu(p_ets->tc_tsa_tbl[0]);
+ tsa_map[1] = be32_to_cpu(p_ets->tc_tsa_tbl[1]);
+ pri_map = be32_to_cpu(p_ets->pri_tc_tbl[0]);
+ for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) {
+ p_params->ets_tc_bw_tbl[i] = ((u8 *)bw_map)[i];
+ p_params->ets_tc_tsa_tbl[i] = ((u8 *)tsa_map)[i];
+ p_params->ets_pri_tc_tbl[i] = QED_DCBX_PRIO2TC(pri_map, i);
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB,
+ "elem %d bw_tbl %x tsa_tbl %x\n",
+ i, p_params->ets_tc_bw_tbl[i],
+ p_params->ets_tc_tsa_tbl[i]);
+ }
+}
+
+static void
+qed_dcbx_get_common_params(struct qed_hwfn *p_hwfn,
+ struct dcbx_app_priority_feature *p_app,
+ struct dcbx_app_priority_entry *p_tbl,
+ struct dcbx_ets_feature *p_ets,
+ u32 pfc, struct qed_dcbx_params *p_params)
+{
+ qed_dcbx_get_app_data(p_hwfn, p_app, p_tbl, p_params);
+ qed_dcbx_get_ets_data(p_hwfn, p_ets, p_params);
+ qed_dcbx_get_pfc_data(p_hwfn, pfc, p_params);
+}
+
+static void
+qed_dcbx_get_local_params(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, struct qed_dcbx_get *params)
+{
+ struct dcbx_features *p_feat;
+
+ p_feat = &p_hwfn->p_dcbx_info->local_admin.features;
+ qed_dcbx_get_common_params(p_hwfn, &p_feat->app,
+ p_feat->app.app_pri_tbl, &p_feat->ets,
+ p_feat->pfc, &params->local.params);
+ params->local.valid = true;
+}
+
+static void
+qed_dcbx_get_remote_params(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, struct qed_dcbx_get *params)
+{
+ struct dcbx_features *p_feat;
+
+ p_feat = &p_hwfn->p_dcbx_info->remote.features;
+ qed_dcbx_get_common_params(p_hwfn, &p_feat->app,
+ p_feat->app.app_pri_tbl, &p_feat->ets,
+ p_feat->pfc, &params->remote.params);
+ params->remote.valid = true;
+}
+
+static void
+qed_dcbx_get_operational_params(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_dcbx_get *params)
+{
+ struct qed_dcbx_operational_params *p_operational;
+ struct qed_dcbx_results *p_results;
+ struct dcbx_features *p_feat;
+ bool enabled, err;
+ u32 flags;
+ bool val;
+
+ flags = p_hwfn->p_dcbx_info->operational.flags;
+
+ /* If DCBx version is non zero, then negotiation
+ * was successfuly performed
+ */
+ p_operational = &params->operational;
+ enabled = !!(QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) !=
+ DCBX_CONFIG_VERSION_DISABLED);
+ if (!enabled) {
+ p_operational->enabled = enabled;
+ p_operational->valid = false;
+ return;
+ }
+
+ p_feat = &p_hwfn->p_dcbx_info->operational.features;
+ p_results = &p_hwfn->p_dcbx_info->results;
+
+ val = !!(QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) ==
+ DCBX_CONFIG_VERSION_IEEE);
+ p_operational->ieee = val;
+ val = !!(QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) ==
+ DCBX_CONFIG_VERSION_CEE);
+ p_operational->cee = val;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Version support: ieee %d, cee %d\n",
+ p_operational->ieee, p_operational->cee);
+
+ qed_dcbx_get_common_params(p_hwfn, &p_feat->app,
+ p_feat->app.app_pri_tbl, &p_feat->ets,
+ p_feat->pfc, &params->operational.params);
+ qed_dcbx_get_priority_info(p_hwfn, &p_operational->app_prio, p_results);
+ err = QED_MFW_GET_FIELD(p_feat->app.flags, DCBX_APP_ERROR);
+ p_operational->err = err;
+ p_operational->enabled = enabled;
+ p_operational->valid = true;
+}
+
+static void
+qed_dcbx_get_local_lldp_params(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_dcbx_get *params)
+{
+ struct lldp_config_params_s *p_local;
+
+ p_local = &p_hwfn->p_dcbx_info->lldp_local[LLDP_NEAREST_BRIDGE];
+
+ memcpy(params->lldp_local.local_chassis_id, p_local->local_chassis_id,
+ ARRAY_SIZE(p_local->local_chassis_id));
+ memcpy(params->lldp_local.local_port_id, p_local->local_port_id,
+ ARRAY_SIZE(p_local->local_port_id));
+}
+
+static void
+qed_dcbx_get_remote_lldp_params(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_dcbx_get *params)
+{
+ struct lldp_status_params_s *p_remote;
+
+ p_remote = &p_hwfn->p_dcbx_info->lldp_remote[LLDP_NEAREST_BRIDGE];
+
+ memcpy(params->lldp_remote.peer_chassis_id, p_remote->peer_chassis_id,
+ ARRAY_SIZE(p_remote->peer_chassis_id));
+ memcpy(params->lldp_remote.peer_port_id, p_remote->peer_port_id,
+ ARRAY_SIZE(p_remote->peer_port_id));
+}
+
+static int
+qed_dcbx_get_params(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ struct qed_dcbx_get *p_params,
+ enum qed_mib_read_type type)
+{
+ switch (type) {
+ case QED_DCBX_REMOTE_MIB:
+ qed_dcbx_get_remote_params(p_hwfn, p_ptt, p_params);
+ break;
+ case QED_DCBX_LOCAL_MIB:
+ qed_dcbx_get_local_params(p_hwfn, p_ptt, p_params);
+ break;
+ case QED_DCBX_OPERATIONAL_MIB:
+ qed_dcbx_get_operational_params(p_hwfn, p_ptt, p_params);
+ break;
+ case QED_DCBX_REMOTE_LLDP_MIB:
+ qed_dcbx_get_remote_lldp_params(p_hwfn, p_ptt, p_params);
+ break;
+ case QED_DCBX_LOCAL_LLDP_MIB:
+ qed_dcbx_get_local_lldp_params(p_hwfn, p_ptt, p_params);
+ break;
+ default:
+ DP_ERR(p_hwfn, "MIB read err, unknown mib type %d\n", type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+#endif
+
static int
qed_dcbx_read_local_lldp_mib(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
@@ -561,3 +852,1333 @@ void qed_dcbx_set_pf_update_params(struct qed_dcbx_results *p_src,
p_dcb_data = &p_dest->eth_dcb_data;
qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ETH);
}
+
+#ifdef CONFIG_DCB
+static int qed_dcbx_query_params(struct qed_hwfn *p_hwfn,
+ struct qed_dcbx_get *p_get,
+ enum qed_mib_read_type type)
+{
+ struct qed_ptt *p_ptt;
+ int rc;
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EBUSY;
+
+ rc = qed_dcbx_read_mib(p_hwfn, p_ptt, type);
+ if (rc)
+ goto out;
+
+ rc = qed_dcbx_get_params(p_hwfn, p_ptt, p_get, type);
+
+out:
+ qed_ptt_release(p_hwfn, p_ptt);
+ return rc;
+}
+
+static void
+qed_dcbx_set_pfc_data(struct qed_hwfn *p_hwfn,
+ u32 *pfc, struct qed_dcbx_params *p_params)
+{
+ u8 pfc_map = 0;
+ int i;
+
+ if (p_params->pfc.willing)
+ *pfc |= DCBX_PFC_WILLING_MASK;
+ else
+ *pfc &= ~DCBX_PFC_WILLING_MASK;
+
+ if (p_params->pfc.enabled)
+ *pfc |= DCBX_PFC_ENABLED_MASK;
+ else
+ *pfc &= ~DCBX_PFC_ENABLED_MASK;
+
+ *pfc &= ~DCBX_PFC_CAPS_MASK;
+ *pfc |= (u32)p_params->pfc.max_tc << DCBX_PFC_CAPS_SHIFT;
+
+ for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++)
+ if (p_params->pfc.prio[i])
+ pfc_map |= BIT(i);
+
+ *pfc |= (pfc_map << DCBX_PFC_PRI_EN_BITMAP_SHIFT);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB, "pfc = 0x%x\n", *pfc);
+}
+
+static void
+qed_dcbx_set_ets_data(struct qed_hwfn *p_hwfn,
+ struct dcbx_ets_feature *p_ets,
+ struct qed_dcbx_params *p_params)
+{
+ u8 *bw_map, *tsa_map;
+ u32 val;
+ int i;
+
+ if (p_params->ets_willing)
+ p_ets->flags |= DCBX_ETS_WILLING_MASK;
+ else
+ p_ets->flags &= ~DCBX_ETS_WILLING_MASK;
+
+ if (p_params->ets_cbs)
+ p_ets->flags |= DCBX_ETS_CBS_MASK;
+ else
+ p_ets->flags &= ~DCBX_ETS_CBS_MASK;
+
+ if (p_params->ets_enabled)
+ p_ets->flags |= DCBX_ETS_ENABLED_MASK;
+ else
+ p_ets->flags &= ~DCBX_ETS_ENABLED_MASK;
+
+ p_ets->flags &= ~DCBX_ETS_MAX_TCS_MASK;
+ p_ets->flags |= (u32)p_params->max_ets_tc << DCBX_ETS_MAX_TCS_SHIFT;
+
+ bw_map = (u8 *)&p_ets->tc_bw_tbl[0];
+ tsa_map = (u8 *)&p_ets->tc_tsa_tbl[0];
+ p_ets->pri_tc_tbl[0] = 0;
+ for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) {
+ bw_map[i] = p_params->ets_tc_bw_tbl[i];
+ tsa_map[i] = p_params->ets_tc_tsa_tbl[i];
+ /* Copy the priority value to the corresponding 4 bits in the
+ * traffic class table.
+ */
+ val = (((u32)p_params->ets_pri_tc_tbl[i]) << ((7 - i) * 4));
+ p_ets->pri_tc_tbl[0] |= val;
+ }
+ p_ets->pri_tc_tbl[0] = cpu_to_be32(p_ets->pri_tc_tbl[0]);
+ for (i = 0; i < 2; i++) {
+ p_ets->tc_bw_tbl[i] = cpu_to_be32(p_ets->tc_bw_tbl[i]);
+ p_ets->tc_tsa_tbl[i] = cpu_to_be32(p_ets->tc_tsa_tbl[i]);
+ }
+}
+
+static void
+qed_dcbx_set_app_data(struct qed_hwfn *p_hwfn,
+ struct dcbx_app_priority_feature *p_app,
+ struct qed_dcbx_params *p_params)
+{
+ u32 *entry;
+ int i;
+
+ if (p_params->app_willing)
+ p_app->flags |= DCBX_APP_WILLING_MASK;
+ else
+ p_app->flags &= ~DCBX_APP_WILLING_MASK;
+
+ if (p_params->app_valid)
+ p_app->flags |= DCBX_APP_ENABLED_MASK;
+ else
+ p_app->flags &= ~DCBX_APP_ENABLED_MASK;
+
+ p_app->flags &= ~DCBX_APP_NUM_ENTRIES_MASK;
+ p_app->flags |= (u32)p_params->num_app_entries <<
+ DCBX_APP_NUM_ENTRIES_SHIFT;
+
+ for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
+ entry = &p_app->app_pri_tbl[i].entry;
+ *entry &= ~DCBX_APP_SF_MASK;
+ if (p_params->app_entry[i].ethtype)
+ *entry |= ((u32)DCBX_APP_SF_ETHTYPE <<
+ DCBX_APP_SF_SHIFT);
+ else
+ *entry |= ((u32)DCBX_APP_SF_PORT << DCBX_APP_SF_SHIFT);
+ *entry &= ~DCBX_APP_PROTOCOL_ID_MASK;
+ *entry |= ((u32)p_params->app_entry[i].proto_id <<
+ DCBX_APP_PROTOCOL_ID_SHIFT);
+ *entry &= ~DCBX_APP_PRI_MAP_MASK;
+ *entry |= ((u32)(p_params->app_entry[i].prio) <<
+ DCBX_APP_PRI_MAP_SHIFT);
+ }
+}
+
+static void
+qed_dcbx_set_local_params(struct qed_hwfn *p_hwfn,
+ struct dcbx_local_params *local_admin,
+ struct qed_dcbx_set *params)
+{
+ local_admin->flags = 0;
+ memcpy(&local_admin->features,
+ &p_hwfn->p_dcbx_info->operational.features,
+ sizeof(local_admin->features));
+
+ if (params->enabled)
+ local_admin->config = params->ver_num;
+ else
+ local_admin->config = DCBX_CONFIG_VERSION_DISABLED;
+
+ if (params->override_flags & QED_DCBX_OVERRIDE_PFC_CFG)
+ qed_dcbx_set_pfc_data(p_hwfn, &local_admin->features.pfc,
+ &params->config.params);
+
+ if (params->override_flags & QED_DCBX_OVERRIDE_ETS_CFG)
+ qed_dcbx_set_ets_data(p_hwfn, &local_admin->features.ets,
+ &params->config.params);
+
+ if (params->override_flags & QED_DCBX_OVERRIDE_APP_CFG)
+ qed_dcbx_set_app_data(p_hwfn, &local_admin->features.app,
+ &params->config.params);
+}
+
+int qed_dcbx_config_params(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ struct qed_dcbx_set *params, bool hw_commit)
+{
+ struct dcbx_local_params local_admin;
+ struct qed_dcbx_mib_meta_data data;
+ u32 resp = 0, param = 0;
+ int rc = 0;
+
+ if (!hw_commit) {
+ memcpy(&p_hwfn->p_dcbx_info->set, params,
+ sizeof(struct qed_dcbx_set));
+ return 0;
+ }
+
+ /* clear set-parmas cache */
+ memset(&p_hwfn->p_dcbx_info->set, 0, sizeof(p_hwfn->p_dcbx_info->set));
+
+ memset(&local_admin, 0, sizeof(local_admin));
+ qed_dcbx_set_local_params(p_hwfn, &local_admin, params);
+
+ data.addr = p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, local_admin_dcbx_mib);
+ data.local_admin = &local_admin;
+ data.size = sizeof(struct dcbx_local_params);
+ qed_memcpy_to(p_hwfn, p_ptt, data.addr, data.local_admin, data.size);
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_DCBX,
+ 1 << DRV_MB_PARAM_LLDP_SEND_SHIFT, &resp, &param);
+ if (rc)
+ DP_NOTICE(p_hwfn, "Failed to send DCBX update request\n");
+
+ return rc;
+}
+
+int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn,
+ struct qed_dcbx_set *params)
+{
+ struct qed_dcbx_get *dcbx_info;
+ int rc;
+
+ if (p_hwfn->p_dcbx_info->set.config.valid) {
+ memcpy(params, &p_hwfn->p_dcbx_info->set,
+ sizeof(struct qed_dcbx_set));
+ return 0;
+ }
+
+ dcbx_info = kmalloc(sizeof(*dcbx_info), GFP_KERNEL);
+ if (!dcbx_info) {
+ DP_ERR(p_hwfn, "Failed to allocate struct qed_dcbx_info\n");
+ return -ENOMEM;
+ }
+
+ rc = qed_dcbx_query_params(p_hwfn, dcbx_info, QED_DCBX_OPERATIONAL_MIB);
+ if (rc) {
+ kfree(dcbx_info);
+ return rc;
+ }
+
+ p_hwfn->p_dcbx_info->set.override_flags = 0;
+ p_hwfn->p_dcbx_info->set.ver_num = DCBX_CONFIG_VERSION_DISABLED;
+ if (dcbx_info->operational.cee)
+ p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_CEE;
+ if (dcbx_info->operational.ieee)
+ p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_IEEE;
+
+ p_hwfn->p_dcbx_info->set.enabled = dcbx_info->operational.enabled;
+ memcpy(&p_hwfn->p_dcbx_info->set.config.params,
+ &dcbx_info->operational.params,
+ sizeof(struct qed_dcbx_admin_params));
+ p_hwfn->p_dcbx_info->set.config.valid = true;
+
+ memcpy(params, &p_hwfn->p_dcbx_info->set, sizeof(struct qed_dcbx_set));
+
+ kfree(dcbx_info);
+
+ return 0;
+}
+
+static struct qed_dcbx_get *qed_dcbnl_get_dcbx(struct qed_hwfn *hwfn,
+ enum qed_mib_read_type type)
+{
+ struct qed_dcbx_get *dcbx_info;
+
+ dcbx_info = kmalloc(sizeof(*dcbx_info), GFP_KERNEL);
+ if (!dcbx_info) {
+ DP_ERR(hwfn->cdev, "Failed to allocate memory for dcbx_info\n");
+ return NULL;
+ }
+
+ if (qed_dcbx_query_params(hwfn, dcbx_info, type)) {
+ kfree(dcbx_info);
+ return NULL;
+ }
+
+ if ((type == QED_DCBX_OPERATIONAL_MIB) &&
+ !dcbx_info->operational.enabled) {
+ DP_INFO(hwfn, "DCBX is not enabled/operational\n");
+ kfree(dcbx_info);
+ return NULL;
+ }
+
+ return dcbx_info;
+}
+
+static u8 qed_dcbnl_getstate(struct qed_dev *cdev)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ bool enabled;
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return 0;
+
+ enabled = dcbx_info->operational.enabled;
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "DCB state = %d\n", enabled);
+ kfree(dcbx_info);
+
+ return enabled;
+}
+
+static u8 qed_dcbnl_setstate(struct qed_dev *cdev, u8 state)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_set dcbx_set;
+ struct qed_ptt *ptt;
+ int rc;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "DCB state = %d\n", state);
+
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return 1;
+
+ dcbx_set.enabled = !!state;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return 1;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return rc ? 1 : 0;
+}
+
+static void qed_dcbnl_getpgtccfgtx(struct qed_dev *cdev, int tc, u8 *prio_type,
+ u8 *pgid, u8 *bw_pct, u8 *up_map)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "tc = %d\n", tc);
+ *prio_type = *pgid = *bw_pct = *up_map = 0;
+ if (tc < 0 || tc >= QED_MAX_PFC_PRIORITIES) {
+ DP_INFO(hwfn, "Invalid tc %d\n", tc);
+ return;
+ }
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return;
+
+ *pgid = dcbx_info->operational.params.ets_pri_tc_tbl[tc];
+ kfree(dcbx_info);
+}
+
+static void qed_dcbnl_getpgbwgcfgtx(struct qed_dev *cdev, int pgid, u8 *bw_pct)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+
+ *bw_pct = 0;
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "pgid = %d\n", pgid);
+ if (pgid < 0 || pgid >= QED_MAX_PFC_PRIORITIES) {
+ DP_INFO(hwfn, "Invalid pgid %d\n", pgid);
+ return;
+ }
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return;
+
+ *bw_pct = dcbx_info->operational.params.ets_tc_bw_tbl[pgid];
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "bw_pct = %d\n", *bw_pct);
+ kfree(dcbx_info);
+}
+
+static void qed_dcbnl_getpgtccfgrx(struct qed_dev *cdev, int tc, u8 *prio,
+ u8 *bwg_id, u8 *bw_pct, u8 *up_map)
+{
+ DP_INFO(QED_LEADING_HWFN(cdev), "Rx ETS is not supported\n");
+ *prio = *bwg_id = *bw_pct = *up_map = 0;
+}
+
+static void qed_dcbnl_getpgbwgcfgrx(struct qed_dev *cdev,
+ int bwg_id, u8 *bw_pct)
+{
+ DP_INFO(QED_LEADING_HWFN(cdev), "Rx ETS is not supported\n");
+ *bw_pct = 0;
+}
+
+static void qed_dcbnl_getpfccfg(struct qed_dev *cdev,
+ int priority, u8 *setting)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "priority = %d\n", priority);
+ if (priority < 0 || priority >= QED_MAX_PFC_PRIORITIES) {
+ DP_INFO(hwfn, "Invalid priority %d\n", priority);
+ return;
+ }
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return;
+
+ *setting = dcbx_info->operational.params.pfc.prio[priority];
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "setting = %d\n", *setting);
+ kfree(dcbx_info);
+}
+
+static void qed_dcbnl_setpfccfg(struct qed_dev *cdev, int priority, u8 setting)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_set dcbx_set;
+ struct qed_ptt *ptt;
+ int rc;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "priority = %d setting = %d\n",
+ priority, setting);
+ if (priority < 0 || priority >= QED_MAX_PFC_PRIORITIES) {
+ DP_INFO(hwfn, "Invalid priority %d\n", priority);
+ return;
+ }
+
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return;
+
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_PFC_CFG;
+ dcbx_set.config.params.pfc.prio[priority] = !!setting;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+ qed_ptt_release(hwfn, ptt);
+}
+
+static u8 qed_dcbnl_getcap(struct qed_dev *cdev, int capid, u8 *cap)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ int rc = 0;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "capid = %d\n", capid);
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return 1;
+
+ switch (capid) {
+ case DCB_CAP_ATTR_PG:
+ case DCB_CAP_ATTR_PFC:
+ case DCB_CAP_ATTR_UP2TC:
+ case DCB_CAP_ATTR_GSP:
+ *cap = true;
+ break;
+ case DCB_CAP_ATTR_PG_TCS:
+ case DCB_CAP_ATTR_PFC_TCS:
+ *cap = 0x80;
+ break;
+ case DCB_CAP_ATTR_DCBX:
+ *cap = (DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_CEE |
+ DCB_CAP_DCBX_VER_IEEE);
+ break;
+ default:
+ *cap = false;
+ rc = 1;
+ }
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "id = %d caps = %d\n", capid, *cap);
+ kfree(dcbx_info);
+
+ return rc;
+}
+
+static int qed_dcbnl_getnumtcs(struct qed_dev *cdev, int tcid, u8 *num)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ int rc = 0;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "tcid = %d\n", tcid);
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return -EINVAL;
+
+ switch (tcid) {
+ case DCB_NUMTCS_ATTR_PG:
+ *num = dcbx_info->operational.params.max_ets_tc;
+ break;
+ case DCB_NUMTCS_ATTR_PFC:
+ *num = dcbx_info->operational.params.pfc.max_tc;
+ break;
+ default:
+ rc = -EINVAL;
+ }
+
+ kfree(dcbx_info);
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "numtcs = %d\n", *num);
+
+ return rc;
+}
+
+static u8 qed_dcbnl_getpfcstate(struct qed_dev *cdev)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ bool enabled;
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return 0;
+
+ enabled = dcbx_info->operational.params.pfc.enabled;
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "pfc state = %d\n", enabled);
+ kfree(dcbx_info);
+
+ return enabled;
+}
+
+static u8 qed_dcbnl_getdcbx(struct qed_dev *cdev)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ u8 mode = 0;
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return 0;
+
+ if (dcbx_info->operational.enabled)
+ mode |= DCB_CAP_DCBX_LLD_MANAGED;
+ if (dcbx_info->operational.ieee)
+ mode |= DCB_CAP_DCBX_VER_IEEE;
+ if (dcbx_info->operational.cee)
+ mode |= DCB_CAP_DCBX_VER_CEE;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "dcb mode = %d\n", mode);
+ kfree(dcbx_info);
+
+ return mode;
+}
+
+static void qed_dcbnl_setpgtccfgtx(struct qed_dev *cdev,
+ int tc,
+ u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_set dcbx_set;
+ struct qed_ptt *ptt;
+ int rc;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB,
+ "tc = %d pri_type = %d pgid = %d bw_pct = %d up_map = %d\n",
+ tc, pri_type, pgid, bw_pct, up_map);
+
+ if (tc < 0 || tc >= QED_MAX_PFC_PRIORITIES) {
+ DP_INFO(hwfn, "Invalid tc %d\n", tc);
+ return;
+ }
+
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return;
+
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_ETS_CFG;
+ dcbx_set.config.params.ets_pri_tc_tbl[tc] = pgid;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+ qed_ptt_release(hwfn, ptt);
+}
+
+static void qed_dcbnl_setpgtccfgrx(struct qed_dev *cdev, int prio,
+ u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map)
+{
+ DP_INFO(QED_LEADING_HWFN(cdev), "Rx ETS is not supported\n");
+}
+
+static void qed_dcbnl_setpgbwgcfgtx(struct qed_dev *cdev, int pgid, u8 bw_pct)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_set dcbx_set;
+ struct qed_ptt *ptt;
+ int rc;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "pgid = %d bw_pct = %d\n", pgid, bw_pct);
+ if (pgid < 0 || pgid >= QED_MAX_PFC_PRIORITIES) {
+ DP_INFO(hwfn, "Invalid pgid %d\n", pgid);
+ return;
+ }
+
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return;
+
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_ETS_CFG;
+ dcbx_set.config.params.ets_tc_bw_tbl[pgid] = bw_pct;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+ qed_ptt_release(hwfn, ptt);
+}
+
+static void qed_dcbnl_setpgbwgcfgrx(struct qed_dev *cdev, int pgid, u8 bw_pct)
+{
+ DP_INFO(QED_LEADING_HWFN(cdev), "Rx ETS is not supported\n");
+}
+
+static u8 qed_dcbnl_setall(struct qed_dev *cdev)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_set dcbx_set;
+ struct qed_ptt *ptt;
+ int rc;
+
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return 1;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return 1;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 1);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return rc;
+}
+
+static int qed_dcbnl_setnumtcs(struct qed_dev *cdev, int tcid, u8 num)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_set dcbx_set;
+ struct qed_ptt *ptt;
+ int rc;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "tcid = %d num = %d\n", tcid, num);
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return 1;
+
+ switch (tcid) {
+ case DCB_NUMTCS_ATTR_PG:
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_ETS_CFG;
+ dcbx_set.config.params.max_ets_tc = num;
+ break;
+ case DCB_NUMTCS_ATTR_PFC:
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_PFC_CFG;
+ dcbx_set.config.params.pfc.max_tc = num;
+ break;
+ default:
+ DP_INFO(hwfn, "Invalid tcid %d\n", tcid);
+ return -EINVAL;
+ }
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EINVAL;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return 0;
+}
+
+static void qed_dcbnl_setpfcstate(struct qed_dev *cdev, u8 state)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_set dcbx_set;
+ struct qed_ptt *ptt;
+ int rc;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "new state = %d\n", state);
+
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return;
+
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_PFC_CFG;
+ dcbx_set.config.params.pfc.enabled = !!state;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+ qed_ptt_release(hwfn, ptt);
+}
+
+static int qed_dcbnl_getapp(struct qed_dev *cdev, u8 idtype, u16 idval)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ struct qed_app_entry *entry;
+ bool ethtype;
+ u8 prio = 0;
+ int i;
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return -EINVAL;
+
+ ethtype = !!(idtype == DCB_APP_IDTYPE_ETHTYPE);
+ for (i = 0; i < QED_DCBX_MAX_APP_PROTOCOL; i++) {
+ entry = &dcbx_info->operational.params.app_entry[i];
+ if ((entry->ethtype == ethtype) && (entry->proto_id == idval)) {
+ prio = entry->prio;
+ break;
+ }
+ }
+
+ if (i == QED_DCBX_MAX_APP_PROTOCOL) {
+ DP_ERR(cdev, "App entry (%d, %d) not found\n", idtype, idval);
+ kfree(dcbx_info);
+ return -EINVAL;
+ }
+
+ kfree(dcbx_info);
+
+ return prio;
+}
+
+static int qed_dcbnl_setapp(struct qed_dev *cdev,
+ u8 idtype, u16 idval, u8 pri_map)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_set dcbx_set;
+ struct qed_app_entry *entry;
+ struct qed_ptt *ptt;
+ bool ethtype;
+ int rc, i;
+
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return -EINVAL;
+
+ ethtype = !!(idtype == DCB_APP_IDTYPE_ETHTYPE);
+ for (i = 0; i < QED_DCBX_MAX_APP_PROTOCOL; i++) {
+ entry = &dcbx_set.config.params.app_entry[i];
+ if ((entry->ethtype == ethtype) && (entry->proto_id == idval))
+ break;
+ /* First empty slot */
+ if (!entry->proto_id)
+ break;
+ }
+
+ if (i == QED_DCBX_MAX_APP_PROTOCOL) {
+ DP_ERR(cdev, "App table is full\n");
+ return -EBUSY;
+ }
+
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_APP_CFG;
+ dcbx_set.config.params.app_entry[i].ethtype = ethtype;
+ dcbx_set.config.params.app_entry[i].proto_id = idval;
+ dcbx_set.config.params.app_entry[i].prio = pri_map;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EBUSY;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return rc;
+}
+
+static u8 qed_dcbnl_setdcbx(struct qed_dev *cdev, u8 mode)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_set dcbx_set;
+ struct qed_ptt *ptt;
+ int rc;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "new mode = %x\n", mode);
+
+ if (!(mode & DCB_CAP_DCBX_VER_IEEE) && !(mode & DCB_CAP_DCBX_VER_CEE)) {
+ DP_INFO(hwfn, "Allowed mode is cee, ieee or both\n");
+ return 1;
+ }
+
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return 1;
+
+ dcbx_set.ver_num = 0;
+ if (mode & DCB_CAP_DCBX_VER_CEE) {
+ dcbx_set.ver_num |= DCBX_CONFIG_VERSION_CEE;
+ dcbx_set.enabled = true;
+ }
+
+ if (mode & DCB_CAP_DCBX_VER_IEEE) {
+ dcbx_set.ver_num |= DCBX_CONFIG_VERSION_IEEE;
+ dcbx_set.enabled = true;
+ }
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return 1;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return 0;
+}
+
+static u8 qed_dcbnl_getfeatcfg(struct qed_dev *cdev, int featid, u8 *flags)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "Feature id = %d\n", featid);
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return 1;
+
+ *flags = 0;
+ switch (featid) {
+ case DCB_FEATCFG_ATTR_PG:
+ if (dcbx_info->operational.params.ets_enabled)
+ *flags = DCB_FEATCFG_ENABLE;
+ else
+ *flags = DCB_FEATCFG_ERROR;
+ break;
+ case DCB_FEATCFG_ATTR_PFC:
+ if (dcbx_info->operational.params.pfc.enabled)
+ *flags = DCB_FEATCFG_ENABLE;
+ else
+ *flags = DCB_FEATCFG_ERROR;
+ break;
+ case DCB_FEATCFG_ATTR_APP:
+ if (dcbx_info->operational.params.app_valid)
+ *flags = DCB_FEATCFG_ENABLE;
+ else
+ *flags = DCB_FEATCFG_ERROR;
+ break;
+ default:
+ DP_INFO(hwfn, "Invalid feature-ID %d\n", featid);
+ kfree(dcbx_info);
+ return 1;
+ }
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "flags = %d\n", *flags);
+ kfree(dcbx_info);
+
+ return 0;
+}
+
+static u8 qed_dcbnl_setfeatcfg(struct qed_dev *cdev, int featid, u8 flags)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_set dcbx_set;
+ bool enabled, willing;
+ struct qed_ptt *ptt;
+ int rc;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "featid = %d flags = %d\n",
+ featid, flags);
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return 1;
+
+ enabled = !!(flags & DCB_FEATCFG_ENABLE);
+ willing = !!(flags & DCB_FEATCFG_WILLING);
+ switch (featid) {
+ case DCB_FEATCFG_ATTR_PG:
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_ETS_CFG;
+ dcbx_set.config.params.ets_enabled = enabled;
+ dcbx_set.config.params.ets_willing = willing;
+ break;
+ case DCB_FEATCFG_ATTR_PFC:
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_PFC_CFG;
+ dcbx_set.config.params.pfc.enabled = enabled;
+ dcbx_set.config.params.pfc.willing = willing;
+ break;
+ case DCB_FEATCFG_ATTR_APP:
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_APP_CFG;
+ dcbx_set.config.params.app_willing = willing;
+ break;
+ default:
+ DP_INFO(hwfn, "Invalid feature-ID %d\n", featid);
+ return 1;
+ }
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return 1;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return 0;
+}
+
+static int qed_dcbnl_peer_getappinfo(struct qed_dev *cdev,
+ struct dcb_peer_app_info *info,
+ u16 *app_count)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_REMOTE_MIB);
+ if (!dcbx_info)
+ return -EINVAL;
+
+ info->willing = dcbx_info->remote.params.app_willing;
+ info->error = dcbx_info->remote.params.app_error;
+ *app_count = dcbx_info->remote.params.num_app_entries;
+ kfree(dcbx_info);
+
+ return 0;
+}
+
+static int qed_dcbnl_peer_getapptable(struct qed_dev *cdev,
+ struct dcb_app *table)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ int i;
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_REMOTE_MIB);
+ if (!dcbx_info)
+ return -EINVAL;
+
+ for (i = 0; i < dcbx_info->remote.params.num_app_entries; i++) {
+ if (dcbx_info->remote.params.app_entry[i].ethtype)
+ table[i].selector = DCB_APP_IDTYPE_ETHTYPE;
+ else
+ table[i].selector = DCB_APP_IDTYPE_PORTNUM;
+ table[i].priority = dcbx_info->remote.params.app_entry[i].prio;
+ table[i].protocol =
+ dcbx_info->remote.params.app_entry[i].proto_id;
+ }
+
+ kfree(dcbx_info);
+
+ return 0;
+}
+
+static int qed_dcbnl_cee_peer_getpfc(struct qed_dev *cdev, struct cee_pfc *pfc)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ int i;
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_REMOTE_MIB);
+ if (!dcbx_info)
+ return -EINVAL;
+
+ for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++)
+ if (dcbx_info->remote.params.pfc.prio[i])
+ pfc->pfc_en |= BIT(i);
+
+ pfc->tcs_supported = dcbx_info->remote.params.pfc.max_tc;
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "pfc state = %d tcs_supported = %d\n",
+ pfc->pfc_en, pfc->tcs_supported);
+ kfree(dcbx_info);
+
+ return 0;
+}
+
+static int qed_dcbnl_cee_peer_getpg(struct qed_dev *cdev, struct cee_pg *pg)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ int i;
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_REMOTE_MIB);
+ if (!dcbx_info)
+ return -EINVAL;
+
+ pg->willing = dcbx_info->remote.params.ets_willing;
+ for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) {
+ pg->pg_bw[i] = dcbx_info->remote.params.ets_tc_bw_tbl[i];
+ pg->prio_pg[i] = dcbx_info->remote.params.ets_pri_tc_tbl[i];
+ }
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "willing = %d", pg->willing);
+ kfree(dcbx_info);
+
+ return 0;
+}
+
+static int qed_dcbnl_get_ieee_pfc(struct qed_dev *cdev,
+ struct ieee_pfc *pfc, bool remote)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_params *params;
+ struct qed_dcbx_get *dcbx_info;
+ int rc, i;
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return -EINVAL;
+
+ if (!dcbx_info->operational.ieee) {
+ DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n");
+ return -EINVAL;
+ }
+
+ if (remote) {
+ memset(dcbx_info, 0, sizeof(*dcbx_info));
+ rc = qed_dcbx_query_params(hwfn, dcbx_info,
+ QED_DCBX_REMOTE_MIB);
+ if (rc) {
+ kfree(dcbx_info);
+ return -EINVAL;
+ }
+
+ params = &dcbx_info->remote.params;
+ } else {
+ params = &dcbx_info->operational.params;
+ }
+
+ pfc->pfc_cap = params->pfc.max_tc;
+ pfc->pfc_en = 0;
+ for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++)
+ if (params->pfc.prio[i])
+ pfc->pfc_en |= BIT(i);
+
+ kfree(dcbx_info);
+
+ return 0;
+}
+
+static int qed_dcbnl_ieee_getpfc(struct qed_dev *cdev, struct ieee_pfc *pfc)
+{
+ return qed_dcbnl_get_ieee_pfc(cdev, pfc, false);
+}
+
+static int qed_dcbnl_ieee_setpfc(struct qed_dev *cdev, struct ieee_pfc *pfc)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ struct qed_dcbx_set dcbx_set;
+ struct qed_ptt *ptt;
+ int rc, i;
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return -EINVAL;
+
+ if (!dcbx_info->operational.ieee) {
+ DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n");
+ kfree(dcbx_info);
+ return -EINVAL;
+ }
+
+ kfree(dcbx_info);
+
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return -EINVAL;
+
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_PFC_CFG;
+ for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++)
+ dcbx_set.config.params.pfc.prio[i] = !!(pfc->pfc_en & BIT(i));
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EINVAL;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return rc;
+}
+
+static int qed_dcbnl_get_ieee_ets(struct qed_dev *cdev,
+ struct ieee_ets *ets, bool remote)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ struct qed_dcbx_params *params;
+ int rc;
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return -EINVAL;
+
+ if (!dcbx_info->operational.ieee) {
+ DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n");
+ kfree(dcbx_info);
+ return -EINVAL;
+ }
+
+ if (remote) {
+ memset(dcbx_info, 0, sizeof(*dcbx_info));
+ rc = qed_dcbx_query_params(hwfn, dcbx_info,
+ QED_DCBX_REMOTE_MIB);
+ if (rc) {
+ kfree(dcbx_info);
+ return -EINVAL;
+ }
+
+ params = &dcbx_info->remote.params;
+ } else {
+ params = &dcbx_info->operational.params;
+ }
+
+ ets->ets_cap = params->max_ets_tc;
+ ets->willing = params->ets_willing;
+ ets->cbs = params->ets_cbs;
+ memcpy(ets->tc_tx_bw, params->ets_tc_bw_tbl, sizeof(ets->tc_tx_bw));
+ memcpy(ets->tc_tsa, params->ets_tc_tsa_tbl, sizeof(ets->tc_tsa));
+ memcpy(ets->prio_tc, params->ets_pri_tc_tbl, sizeof(ets->prio_tc));
+ kfree(dcbx_info);
+
+ return 0;
+}
+
+static int qed_dcbnl_ieee_getets(struct qed_dev *cdev, struct ieee_ets *ets)
+{
+ return qed_dcbnl_get_ieee_ets(cdev, ets, false);
+}
+
+static int qed_dcbnl_ieee_setets(struct qed_dev *cdev, struct ieee_ets *ets)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ struct qed_dcbx_set dcbx_set;
+ struct qed_ptt *ptt;
+ int rc;
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return -EINVAL;
+
+ if (!dcbx_info->operational.ieee) {
+ DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n");
+ kfree(dcbx_info);
+ return -EINVAL;
+ }
+
+ kfree(dcbx_info);
+
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return -EINVAL;
+
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_ETS_CFG;
+ dcbx_set.config.params.max_ets_tc = ets->ets_cap;
+ dcbx_set.config.params.ets_willing = ets->willing;
+ dcbx_set.config.params.ets_cbs = ets->cbs;
+ memcpy(dcbx_set.config.params.ets_tc_bw_tbl, ets->tc_tx_bw,
+ sizeof(ets->tc_tx_bw));
+ memcpy(dcbx_set.config.params.ets_tc_tsa_tbl, ets->tc_tsa,
+ sizeof(ets->tc_tsa));
+ memcpy(dcbx_set.config.params.ets_pri_tc_tbl, ets->prio_tc,
+ sizeof(ets->prio_tc));
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EINVAL;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return rc;
+}
+
+int qed_dcbnl_ieee_peer_getets(struct qed_dev *cdev, struct ieee_ets *ets)
+{
+ return qed_dcbnl_get_ieee_ets(cdev, ets, true);
+}
+
+int qed_dcbnl_ieee_peer_getpfc(struct qed_dev *cdev, struct ieee_pfc *pfc)
+{
+ return qed_dcbnl_get_ieee_pfc(cdev, pfc, true);
+}
+
+int qed_dcbnl_ieee_getapp(struct qed_dev *cdev, struct dcb_app *app)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ struct qed_app_entry *entry;
+ bool ethtype;
+ u8 prio = 0;
+ int i;
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return -EINVAL;
+
+ if (!dcbx_info->operational.ieee) {
+ DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n");
+ kfree(dcbx_info);
+ return -EINVAL;
+ }
+
+ /* ieee defines the selector field value for ethertype to be 1 */
+ ethtype = !!((app->selector - 1) == DCB_APP_IDTYPE_ETHTYPE);
+ for (i = 0; i < QED_DCBX_MAX_APP_PROTOCOL; i++) {
+ entry = &dcbx_info->operational.params.app_entry[i];
+ if ((entry->ethtype == ethtype) &&
+ (entry->proto_id == app->protocol)) {
+ prio = entry->prio;
+ break;
+ }
+ }
+
+ if (i == QED_DCBX_MAX_APP_PROTOCOL) {
+ DP_ERR(cdev, "App entry (%d, %d) not found\n", app->selector,
+ app->protocol);
+ kfree(dcbx_info);
+ return -EINVAL;
+ }
+
+ app->priority = ffs(prio) - 1;
+
+ kfree(dcbx_info);
+
+ return 0;
+}
+
+int qed_dcbnl_ieee_setapp(struct qed_dev *cdev, struct dcb_app *app)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ struct qed_dcbx_set dcbx_set;
+ struct qed_app_entry *entry;
+ struct qed_ptt *ptt;
+ bool ethtype;
+ int rc, i;
+
+ if (app->priority < 0 || app->priority >= QED_MAX_PFC_PRIORITIES) {
+ DP_INFO(hwfn, "Invalid priority %d\n", app->priority);
+ return -EINVAL;
+ }
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return -EINVAL;
+
+ if (!dcbx_info->operational.ieee) {
+ DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n");
+ kfree(dcbx_info);
+ return -EINVAL;
+ }
+
+ kfree(dcbx_info);
+
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return -EINVAL;
+
+ /* ieee defines the selector field value for ethertype to be 1 */
+ ethtype = !!((app->selector - 1) == DCB_APP_IDTYPE_ETHTYPE);
+ for (i = 0; i < QED_DCBX_MAX_APP_PROTOCOL; i++) {
+ entry = &dcbx_set.config.params.app_entry[i];
+ if ((entry->ethtype == ethtype) &&
+ (entry->proto_id == app->protocol))
+ break;
+ /* First empty slot */
+ if (!entry->proto_id)
+ break;
+ }
+
+ if (i == QED_DCBX_MAX_APP_PROTOCOL) {
+ DP_ERR(cdev, "App table is full\n");
+ return -EBUSY;
+ }
+
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_APP_CFG;
+ dcbx_set.config.params.app_entry[i].ethtype = ethtype;
+ dcbx_set.config.params.app_entry[i].proto_id = app->protocol;
+ dcbx_set.config.params.app_entry[i].prio = BIT(app->priority);
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EBUSY;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return rc;
+}
+
+const struct qed_eth_dcbnl_ops qed_dcbnl_ops_pass = {
+ .getstate = qed_dcbnl_getstate,
+ .setstate = qed_dcbnl_setstate,
+ .getpgtccfgtx = qed_dcbnl_getpgtccfgtx,
+ .getpgbwgcfgtx = qed_dcbnl_getpgbwgcfgtx,
+ .getpgtccfgrx = qed_dcbnl_getpgtccfgrx,
+ .getpgbwgcfgrx = qed_dcbnl_getpgbwgcfgrx,
+ .getpfccfg = qed_dcbnl_getpfccfg,
+ .setpfccfg = qed_dcbnl_setpfccfg,
+ .getcap = qed_dcbnl_getcap,
+ .getnumtcs = qed_dcbnl_getnumtcs,
+ .getpfcstate = qed_dcbnl_getpfcstate,
+ .getdcbx = qed_dcbnl_getdcbx,
+ .setpgtccfgtx = qed_dcbnl_setpgtccfgtx,
+ .setpgtccfgrx = qed_dcbnl_setpgtccfgrx,
+ .setpgbwgcfgtx = qed_dcbnl_setpgbwgcfgtx,
+ .setpgbwgcfgrx = qed_dcbnl_setpgbwgcfgrx,
+ .setall = qed_dcbnl_setall,
+ .setnumtcs = qed_dcbnl_setnumtcs,
+ .setpfcstate = qed_dcbnl_setpfcstate,
+ .setapp = qed_dcbnl_setapp,
+ .setdcbx = qed_dcbnl_setdcbx,
+ .setfeatcfg = qed_dcbnl_setfeatcfg,
+ .getfeatcfg = qed_dcbnl_getfeatcfg,
+ .getapp = qed_dcbnl_getapp,
+ .peer_getappinfo = qed_dcbnl_peer_getappinfo,
+ .peer_getapptable = qed_dcbnl_peer_getapptable,
+ .cee_peer_getpfc = qed_dcbnl_cee_peer_getpfc,
+ .cee_peer_getpg = qed_dcbnl_cee_peer_getpg,
+ .ieee_getpfc = qed_dcbnl_ieee_getpfc,
+ .ieee_setpfc = qed_dcbnl_ieee_setpfc,
+ .ieee_getets = qed_dcbnl_ieee_getets,
+ .ieee_setets = qed_dcbnl_ieee_setets,
+ .ieee_peer_getpfc = qed_dcbnl_ieee_peer_getpfc,
+ .ieee_peer_getets = qed_dcbnl_ieee_peer_getets,
+ .ieee_getapp = qed_dcbnl_ieee_getapp,
+ .ieee_setapp = qed_dcbnl_ieee_setapp,
+};
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
index e7f834d..9ba6816 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
@@ -33,6 +33,24 @@ struct qed_dcbx_app_data {
u8 tc; /* Traffic Class */
};
+#ifdef CONFIG_DCB
+#define QED_DCBX_VERSION_DISABLED 0
+#define QED_DCBX_VERSION_IEEE 1
+#define QED_DCBX_VERSION_CEE 2
+
+struct qed_dcbx_set {
+#define QED_DCBX_OVERRIDE_STATE BIT(0)
+#define QED_DCBX_OVERRIDE_PFC_CFG BIT(1)
+#define QED_DCBX_OVERRIDE_ETS_CFG BIT(2)
+#define QED_DCBX_OVERRIDE_APP_CFG BIT(3)
+#define QED_DCBX_OVERRIDE_DSCP_CFG BIT(4)
+ u32 override_flags;
+ bool enabled;
+ struct qed_dcbx_admin_params config;
+ u32 ver_num;
+};
+#endif
+
struct qed_dcbx_results {
bool dcbx_enabled;
u8 pf_id;
@@ -55,6 +73,9 @@ struct qed_dcbx_info {
struct qed_dcbx_results results;
struct dcbx_mib operational;
struct dcbx_mib remote;
+#ifdef CONFIG_DCB
+ struct qed_dcbx_set set;
+#endif
u8 dcbx_cap;
};
@@ -67,6 +88,13 @@ struct qed_dcbx_mib_meta_data {
u32 addr;
};
+#ifdef CONFIG_DCB
+int qed_dcbx_get_config_params(struct qed_hwfn *, struct qed_dcbx_set *);
+
+int qed_dcbx_config_params(struct qed_hwfn *,
+ struct qed_ptt *, struct qed_dcbx_set *, bool);
+#endif
+
/* QED local interface routines */
int
qed_dcbx_mib_update_event(struct qed_hwfn *,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 2d89e8c..e45cff4 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -17,6 +17,7 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/string.h>
+#include <linux/vmalloc.h>
#include <linux/etherdevice.h>
#include <linux/qed/qed_chain.h>
#include <linux/qed/qed_if.h>
@@ -160,9 +161,13 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable)
u8 num_vports, vf_offset = 0, i, vport_id, num_ports, curr_queue = 0;
struct qed_qm_info *qm_info = &p_hwfn->qm_info;
struct init_qm_port_params *p_qm_port;
+ bool init_rdma_offload_pq = false;
+ bool init_pure_ack_pq = false;
+ bool init_ooo_pq = false;
u16 num_pqs, multi_cos_tcs = 1;
u8 pf_wfq = qm_info->pf_wfq;
u32 pf_rl = qm_info->pf_rl;
+ u16 num_pf_rls = 0;
u16 num_vfs = 0;
#ifdef CONFIG_QED_SRIOV
@@ -174,6 +179,25 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable)
num_pqs = multi_cos_tcs + num_vfs + 1; /* The '1' is for pure-LB */
num_vports = (u8)RESC_NUM(p_hwfn, QED_VPORT);
+ if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
+ num_pqs++; /* for RoCE queue */
+ init_rdma_offload_pq = true;
+ /* we subtract num_vfs because each require a rate limiter,
+ * and one default rate limiter
+ */
+ if (p_hwfn->pf_params.rdma_pf_params.enable_dcqcn)
+ num_pf_rls = RESC_NUM(p_hwfn, QED_RL) - num_vfs - 1;
+
+ num_pqs += num_pf_rls;
+ qm_info->num_pf_rls = (u8) num_pf_rls;
+ }
+
+ if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
+ num_pqs += 2; /* for iSCSI pure-ACK / OOO queue */
+ init_pure_ack_pq = true;
+ init_ooo_pq = true;
+ }
+
/* Sanity checking that setup requires legal number of resources */
if (num_pqs > RESC_NUM(p_hwfn, QED_PQ)) {
DP_ERR(p_hwfn,
@@ -211,12 +235,22 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable)
vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
+ /* First init rate limited queues */
+ for (curr_queue = 0; curr_queue < num_pf_rls; curr_queue++) {
+ qm_info->qm_pq_params[curr_queue].vport_id = vport_id++;
+ qm_info->qm_pq_params[curr_queue].tc_id =
+ p_hwfn->hw_info.non_offload_tc;
+ qm_info->qm_pq_params[curr_queue].wrr_group = 1;
+ qm_info->qm_pq_params[curr_queue].rl_valid = 1;
+ }
+
/* First init per-TC PQs */
for (i = 0; i < multi_cos_tcs; i++) {
struct init_qm_pq_params *params =
&qm_info->qm_pq_params[curr_queue++];
- if (p_hwfn->hw_info.personality == QED_PCI_ETH) {
+ if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE ||
+ p_hwfn->hw_info.personality == QED_PCI_ETH) {
params->vport_id = vport_id;
params->tc_id = p_hwfn->hw_info.non_offload_tc;
params->wrr_group = 1;
@@ -236,6 +270,32 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable)
curr_queue++;
qm_info->offload_pq = 0;
+ if (init_rdma_offload_pq) {
+ qm_info->offload_pq = curr_queue;
+ qm_info->qm_pq_params[curr_queue].vport_id = vport_id;
+ qm_info->qm_pq_params[curr_queue].tc_id =
+ p_hwfn->hw_info.offload_tc;
+ qm_info->qm_pq_params[curr_queue].wrr_group = 1;
+ curr_queue++;
+ }
+
+ if (init_pure_ack_pq) {
+ qm_info->pure_ack_pq = curr_queue;
+ qm_info->qm_pq_params[curr_queue].vport_id = vport_id;
+ qm_info->qm_pq_params[curr_queue].tc_id =
+ p_hwfn->hw_info.offload_tc;
+ qm_info->qm_pq_params[curr_queue].wrr_group = 1;
+ curr_queue++;
+ }
+
+ if (init_ooo_pq) {
+ qm_info->ooo_pq = curr_queue;
+ qm_info->qm_pq_params[curr_queue].vport_id = vport_id;
+ qm_info->qm_pq_params[curr_queue].tc_id = DCBX_ISCSI_OOO_TC;
+ qm_info->qm_pq_params[curr_queue].wrr_group = 1;
+ curr_queue++;
+ }
+
/* Then init per-VF PQs */
vf_offset = curr_queue;
for (i = 0; i < num_vfs; i++) {
@@ -244,6 +304,7 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable)
qm_info->qm_pq_params[curr_queue].tc_id =
p_hwfn->hw_info.non_offload_tc;
qm_info->qm_pq_params[curr_queue].wrr_group = 1;
+ qm_info->qm_pq_params[curr_queue].rl_valid = 1;
curr_queue++;
}
@@ -256,7 +317,10 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable)
for (i = 0; i < num_ports; i++) {
p_qm_port = &qm_info->qm_port_params[i];
p_qm_port->active = 1;
- p_qm_port->num_active_phys_tcs = 4;
+ if (num_ports == 4)
+ p_qm_port->active_phys_tcs = 0x7;
+ else
+ p_qm_port->active_phys_tcs = 0x9f;
p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports;
p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
}
@@ -366,21 +430,20 @@ int qed_resc_alloc(struct qed_dev *cdev)
if (!p_hwfn->p_tx_cids) {
DP_NOTICE(p_hwfn,
"Failed to allocate memory for Tx Cids\n");
- rc = -ENOMEM;
- goto alloc_err;
+ goto alloc_no_mem;
}
p_hwfn->p_rx_cids = kzalloc(rx_size, GFP_KERNEL);
if (!p_hwfn->p_rx_cids) {
DP_NOTICE(p_hwfn,
"Failed to allocate memory for Rx Cids\n");
- rc = -ENOMEM;
- goto alloc_err;
+ goto alloc_no_mem;
}
}
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ u32 n_eqes, num_cons;
/* First allocate the context manager structure */
rc = qed_cxt_mngr_alloc(p_hwfn);
@@ -429,18 +492,34 @@ int qed_resc_alloc(struct qed_dev *cdev)
goto alloc_err;
/* EQ */
- p_eq = qed_eq_alloc(p_hwfn, 256);
- if (!p_eq) {
- rc = -ENOMEM;
+ n_eqes = qed_chain_get_capacity(&p_hwfn->p_spq->chain);
+ if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
+ num_cons = qed_cxt_get_proto_cid_count(p_hwfn,
+ PROTOCOLID_ROCE,
+ 0) * 2;
+ n_eqes += num_cons + 2 * MAX_NUM_VFS_BB;
+ } else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
+ num_cons =
+ qed_cxt_get_proto_cid_count(p_hwfn,
+ PROTOCOLID_ISCSI, 0);
+ n_eqes += 2 * num_cons;
+ }
+
+ if (n_eqes > 0xFFFF) {
+ DP_ERR(p_hwfn,
+ "Cannot allocate 0x%x EQ elements. The maximum of a u16 chain is 0x%x\n",
+ n_eqes, 0xFFFF);
goto alloc_err;
}
+
+ p_eq = qed_eq_alloc(p_hwfn, (u16) n_eqes);
+ if (!p_eq)
+ goto alloc_no_mem;
p_hwfn->p_eq = p_eq;
p_consq = qed_consq_alloc(p_hwfn);
- if (!p_consq) {
- rc = -ENOMEM;
- goto alloc_err;
- }
+ if (!p_consq)
+ goto alloc_no_mem;
p_hwfn->p_consq = p_consq;
/* DMA info initialization */
@@ -469,6 +548,8 @@ int qed_resc_alloc(struct qed_dev *cdev)
return 0;
+alloc_no_mem:
+ rc = -ENOMEM;
alloc_err:
qed_resc_free(cdev);
return rc;
@@ -634,6 +715,7 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
struct qed_qm_info *qm_info = &p_hwfn->qm_info;
struct qed_qm_common_rt_init_params params;
struct qed_dev *cdev = p_hwfn->cdev;
+ u16 num_pfs, pf_id;
u32 concrete_fid;
int rc = 0;
u8 vf_id;
@@ -682,9 +764,16 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0);
qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1);
- /* Disable relaxed ordering in the PCI config space */
- qed_wr(p_hwfn, p_ptt, 0x20b4,
- qed_rd(p_hwfn, p_ptt, 0x20b4) & ~0x10);
+ if (QED_IS_BB(p_hwfn->cdev)) {
+ num_pfs = NUM_OF_ENG_PFS(p_hwfn->cdev);
+ for (pf_id = 0; pf_id < num_pfs; pf_id++) {
+ qed_fid_pretend(p_hwfn, p_ptt, pf_id);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
+ }
+ /* pretend to original PF */
+ qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
+ }
for (vf_id = 0; vf_id < MAX_NUM_VFS_BB; vf_id++) {
concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id);
@@ -703,8 +792,31 @@ static int qed_hw_init_port(struct qed_hwfn *p_hwfn,
{
int rc = 0;
- rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id,
- hw_mode);
+ rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id, hw_mode);
+ if (rc != 0)
+ return rc;
+
+ if (hw_mode & (1 << MODE_MF_SI)) {
+ u8 pf_id = 0;
+
+ if (!qed_hw_init_first_eth(p_hwfn, p_ptt, &pf_id)) {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
+ "PF[%08x] is first eth on engine\n", pf_id);
+
+ /* We should have configured BIT for ppfid, i.e., the
+ * relative function number in the port. But there's a
+ * bug in LLH in BB where the ppfid is actually engine
+ * based, so we need to take this into account.
+ */
+ qed_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR, 1 << pf_id);
+ }
+
+ /* Take the protocol-based hit vector if there is a hit,
+ * otherwise take the other vector.
+ */
+ qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_CLS_TYPE_DUALMODE, 0x2);
+ }
return rc;
}
@@ -751,7 +863,8 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
}
/* Protocl Configuration */
- STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, 0);
+ STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET,
+ (p_hwfn->hw_info.personality == QED_PCI_ISCSI) ? 1 : 0);
STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET, 0);
STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0);
@@ -773,6 +886,21 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
/* Pure runtime initializations - directly to the HW */
qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true);
+ if (hw_mode & (1 << MODE_MF_SI)) {
+ u8 pf_id = 0;
+ u32 val;
+
+ if (!qed_hw_init_first_eth(p_hwfn, p_ptt, &pf_id)) {
+ if (p_hwfn->rel_pf_id == pf_id) {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
+ "PF[%d] is first ETH on engine\n",
+ pf_id);
+ val = 1;
+ }
+ qed_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, val);
+ }
+ }
+
if (b_hw_start) {
/* enable interrupts */
qed_int_igu_enable(p_hwfn, p_ptt, int_mode);
@@ -1213,8 +1341,9 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
num_features);
}
-static void qed_hw_get_resc(struct qed_hwfn *p_hwfn)
+static int qed_hw_get_resc(struct qed_hwfn *p_hwfn)
{
+ u8 enabled_func_idx = p_hwfn->enabled_func_idx;
u32 *resc_start = p_hwfn->hw_info.resc_start;
u8 num_funcs = p_hwfn->num_funcs_on_engine;
u32 *resc_num = p_hwfn->hw_info.resc_num;
@@ -1238,14 +1367,22 @@ static void qed_hw_get_resc(struct qed_hwfn *p_hwfn)
resc_num[QED_VPORT] = MAX_NUM_VPORTS_BB / num_funcs;
resc_num[QED_RSS_ENG] = ETH_RSS_ENGINE_NUM_BB / num_funcs;
resc_num[QED_PQ] = MAX_QM_TX_QUEUES_BB / num_funcs;
- resc_num[QED_RL] = 8;
+ resc_num[QED_RL] = min_t(u32, 64, resc_num[QED_VPORT]);
resc_num[QED_MAC] = ETH_NUM_MAC_FILTERS / num_funcs;
resc_num[QED_VLAN] = (ETH_NUM_VLAN_FILTERS - 1 /*For vlan0*/) /
num_funcs;
- resc_num[QED_ILT] = 950;
+ resc_num[QED_ILT] = PXP_NUM_ILT_RECORDS_BB / num_funcs;
for (i = 0; i < QED_MAX_RESC; i++)
- resc_start[i] = resc_num[i] * p_hwfn->rel_pf_id;
+ resc_start[i] = resc_num[i] * enabled_func_idx;
+
+ /* Sanity for ILT */
+ if (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_BB) {
+ DP_NOTICE(p_hwfn, "Can't assign ILT pages [%08x,...,%08x]\n",
+ RESC_START(p_hwfn, QED_ILT),
+ RESC_END(p_hwfn, QED_ILT) - 1);
+ return -EINVAL;
+ }
qed_hw_set_feat(p_hwfn);
@@ -1275,6 +1412,8 @@ static void qed_hw_get_resc(struct qed_hwfn *p_hwfn)
p_hwfn->hw_info.resc_start[QED_VLAN],
p_hwfn->hw_info.resc_num[QED_ILT],
p_hwfn->hw_info.resc_start[QED_ILT]);
+
+ return 0;
}
static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
@@ -1304,31 +1443,31 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >>
NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) {
- case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X40G:
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G:
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X40G;
break;
- case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X50G:
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G:
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X50G;
break;
- case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X100G:
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G:
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X100G;
break;
- case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_F:
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F:
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_F;
break;
- case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_E:
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E:
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_E;
break;
- case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X20G:
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G:
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X20G;
break;
- case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X40G:
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G:
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X40G;
break;
- case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X25G:
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G:
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X25G;
break;
- case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X25G:
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G:
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G;
break;
default:
@@ -1373,7 +1512,7 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
case NVM_CFG1_PORT_DRV_LINK_SPEED_50G:
link->speed.forced_speed = 50000;
break;
- case NVM_CFG1_PORT_DRV_LINK_SPEED_100G:
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G:
link->speed.forced_speed = 100000;
break;
default:
@@ -1429,14 +1568,20 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET)
__set_bit(QED_DEV_CAP_ETH,
&p_hwfn->hw_info.device_capabilities);
+ if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI)
+ __set_bit(QED_DEV_CAP_ISCSI,
+ &p_hwfn->hw_info.device_capabilities);
+ if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE)
+ __set_bit(QED_DEV_CAP_ROCE,
+ &p_hwfn->hw_info.device_capabilities);
return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
}
static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
- u32 reg_function_hide, tmp, eng_mask;
- u8 num_funcs;
+ u8 num_funcs, enabled_func_idx = p_hwfn->rel_pf_id;
+ u32 reg_function_hide, tmp, eng_mask, low_pfs_mask;
num_funcs = MAX_NUM_PFS_BB;
@@ -1466,9 +1611,19 @@ static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
num_funcs++;
tmp >>= 0x1;
}
+
+ /* Get the PF index within the enabled functions */
+ low_pfs_mask = (0x1 << p_hwfn->abs_pf_id) - 1;
+ tmp = reg_function_hide & eng_mask & low_pfs_mask;
+ while (tmp) {
+ if (tmp & 0x1)
+ enabled_func_idx--;
+ tmp >>= 0x1;
+ }
}
p_hwfn->num_funcs_on_engine = num_funcs;
+ p_hwfn->enabled_func_idx = enabled_func_idx;
DP_VERBOSE(p_hwfn,
NETIF_MSG_PROBE,
@@ -1538,9 +1693,7 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn,
qed_get_num_funcs(p_hwfn, p_ptt);
- qed_hw_get_resc(p_hwfn);
-
- return rc;
+ return qed_hw_get_resc(p_hwfn);
}
static int qed_get_dev_info(struct qed_dev *cdev)
@@ -1737,92 +1890,285 @@ void qed_hw_remove(struct qed_dev *cdev)
qed_iov_free_hw_info(cdev);
}
-int qed_chain_alloc(struct qed_dev *cdev,
- enum qed_chain_use_mode intended_use,
- enum qed_chain_mode mode,
- u16 num_elems,
- size_t elem_size,
- struct qed_chain *p_chain)
+static void qed_chain_free_next_ptr(struct qed_dev *cdev,
+ struct qed_chain *p_chain)
+{
+ void *p_virt = p_chain->p_virt_addr, *p_virt_next = NULL;
+ dma_addr_t p_phys = p_chain->p_phys_addr, p_phys_next = 0;
+ struct qed_chain_next *p_next;
+ u32 size, i;
+
+ if (!p_virt)
+ return;
+
+ size = p_chain->elem_size * p_chain->usable_per_page;
+
+ for (i = 0; i < p_chain->page_cnt; i++) {
+ if (!p_virt)
+ break;
+
+ p_next = (struct qed_chain_next *)((u8 *)p_virt + size);
+ p_virt_next = p_next->next_virt;
+ p_phys_next = HILO_DMA_REGPAIR(p_next->next_phys);
+
+ dma_free_coherent(&cdev->pdev->dev,
+ QED_CHAIN_PAGE_SIZE, p_virt, p_phys);
+
+ p_virt = p_virt_next;
+ p_phys = p_phys_next;
+ }
+}
+
+static void qed_chain_free_single(struct qed_dev *cdev,
+ struct qed_chain *p_chain)
+{
+ if (!p_chain->p_virt_addr)
+ return;
+
+ dma_free_coherent(&cdev->pdev->dev,
+ QED_CHAIN_PAGE_SIZE,
+ p_chain->p_virt_addr, p_chain->p_phys_addr);
+}
+
+static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
+{
+ void **pp_virt_addr_tbl = p_chain->pbl.pp_virt_addr_tbl;
+ u32 page_cnt = p_chain->page_cnt, i, pbl_size;
+ u8 *p_pbl_virt = p_chain->pbl.p_virt_table;
+
+ if (!pp_virt_addr_tbl)
+ return;
+
+ if (!p_chain->pbl.p_virt_table)
+ goto out;
+
+ for (i = 0; i < page_cnt; i++) {
+ if (!pp_virt_addr_tbl[i])
+ break;
+
+ dma_free_coherent(&cdev->pdev->dev,
+ QED_CHAIN_PAGE_SIZE,
+ pp_virt_addr_tbl[i],
+ *(dma_addr_t *)p_pbl_virt);
+
+ p_pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE;
+ }
+
+ pbl_size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
+ dma_free_coherent(&cdev->pdev->dev,
+ pbl_size,
+ p_chain->pbl.p_virt_table, p_chain->pbl.p_phys_table);
+out:
+ vfree(p_chain->pbl.pp_virt_addr_tbl);
+}
+
+void qed_chain_free(struct qed_dev *cdev, struct qed_chain *p_chain)
+{
+ switch (p_chain->mode) {
+ case QED_CHAIN_MODE_NEXT_PTR:
+ qed_chain_free_next_ptr(cdev, p_chain);
+ break;
+ case QED_CHAIN_MODE_SINGLE:
+ qed_chain_free_single(cdev, p_chain);
+ break;
+ case QED_CHAIN_MODE_PBL:
+ qed_chain_free_pbl(cdev, p_chain);
+ break;
+ }
+}
+
+static int
+qed_chain_alloc_sanity_check(struct qed_dev *cdev,
+ enum qed_chain_cnt_type cnt_type,
+ size_t elem_size, u32 page_cnt)
+{
+ u64 chain_size = ELEMS_PER_PAGE(elem_size) * page_cnt;
+
+ /* The actual chain size can be larger than the maximal possible value
+ * after rounding up the requested elements number to pages, and after
+ * taking into acount the unusuable elements (next-ptr elements).
+ * The size of a "u16" chain can be (U16_MAX + 1) since the chain
+ * size/capacity fields are of a u32 type.
+ */
+ if ((cnt_type == QED_CHAIN_CNT_TYPE_U16 &&
+ chain_size > 0x10000) ||
+ (cnt_type == QED_CHAIN_CNT_TYPE_U32 &&
+ chain_size > 0x100000000ULL)) {
+ DP_NOTICE(cdev,
+ "The actual chain size (0x%llx) is larger than the maximal possible value\n",
+ chain_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+qed_chain_alloc_next_ptr(struct qed_dev *cdev, struct qed_chain *p_chain)
{
- dma_addr_t p_pbl_phys = 0;
- void *p_pbl_virt = NULL;
+ void *p_virt = NULL, *p_virt_prev = NULL;
dma_addr_t p_phys = 0;
- void *p_virt = NULL;
- u16 page_cnt = 0;
- size_t size;
+ u32 i;
- if (mode == QED_CHAIN_MODE_SINGLE)
- page_cnt = 1;
- else
- page_cnt = QED_CHAIN_PAGE_CNT(num_elems, elem_size, mode);
+ for (i = 0; i < p_chain->page_cnt; i++) {
+ p_virt = dma_alloc_coherent(&cdev->pdev->dev,
+ QED_CHAIN_PAGE_SIZE,
+ &p_phys, GFP_KERNEL);
+ if (!p_virt) {
+ DP_NOTICE(cdev, "Failed to allocate chain memory\n");
+ return -ENOMEM;
+ }
+
+ if (i == 0) {
+ qed_chain_init_mem(p_chain, p_virt, p_phys);
+ qed_chain_reset(p_chain);
+ } else {
+ qed_chain_init_next_ptr_elem(p_chain, p_virt_prev,
+ p_virt, p_phys);
+ }
+
+ p_virt_prev = p_virt;
+ }
+ /* Last page's next element should point to the beginning of the
+ * chain.
+ */
+ qed_chain_init_next_ptr_elem(p_chain, p_virt_prev,
+ p_chain->p_virt_addr,
+ p_chain->p_phys_addr);
+
+ return 0;
+}
+
+static int
+qed_chain_alloc_single(struct qed_dev *cdev, struct qed_chain *p_chain)
+{
+ dma_addr_t p_phys = 0;
+ void *p_virt = NULL;
- size = page_cnt * QED_CHAIN_PAGE_SIZE;
p_virt = dma_alloc_coherent(&cdev->pdev->dev,
- size, &p_phys, GFP_KERNEL);
+ QED_CHAIN_PAGE_SIZE, &p_phys, GFP_KERNEL);
if (!p_virt) {
- DP_NOTICE(cdev, "Failed to allocate chain mem\n");
- goto nomem;
+ DP_NOTICE(cdev, "Failed to allocate chain memory\n");
+ return -ENOMEM;
}
- if (mode == QED_CHAIN_MODE_PBL) {
- size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
- p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev,
- size, &p_pbl_phys,
- GFP_KERNEL);
- if (!p_pbl_virt) {
- DP_NOTICE(cdev, "Failed to allocate chain pbl mem\n");
- goto nomem;
- }
+ qed_chain_init_mem(p_chain, p_virt, p_phys);
+ qed_chain_reset(p_chain);
- qed_chain_pbl_init(p_chain, p_virt, p_phys, page_cnt,
- (u8)elem_size, intended_use,
- p_pbl_phys, p_pbl_virt);
- } else {
- qed_chain_init(p_chain, p_virt, p_phys, page_cnt,
- (u8)elem_size, intended_use, mode);
+ return 0;
+}
+
+static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
+{
+ u32 page_cnt = p_chain->page_cnt, size, i;
+ dma_addr_t p_phys = 0, p_pbl_phys = 0;
+ void **pp_virt_addr_tbl = NULL;
+ u8 *p_pbl_virt = NULL;
+ void *p_virt = NULL;
+
+ size = page_cnt * sizeof(*pp_virt_addr_tbl);
+ pp_virt_addr_tbl = vmalloc(size);
+ if (!pp_virt_addr_tbl) {
+ DP_NOTICE(cdev,
+ "Failed to allocate memory for the chain virtual addresses table\n");
+ return -ENOMEM;
}
+ memset(pp_virt_addr_tbl, 0, size);
- return 0;
+ /* The allocation of the PBL table is done with its full size, since it
+ * is expected to be successive.
+ * qed_chain_init_pbl_mem() is called even in a case of an allocation
+ * failure, since pp_virt_addr_tbl was previously allocated, and it
+ * should be saved to allow its freeing during the error flow.
+ */
+ size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
+ p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev,
+ size, &p_pbl_phys, GFP_KERNEL);
+ qed_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys,
+ pp_virt_addr_tbl);
+ if (!p_pbl_virt) {
+ DP_NOTICE(cdev, "Failed to allocate chain pbl memory\n");
+ return -ENOMEM;
+ }
-nomem:
- dma_free_coherent(&cdev->pdev->dev,
- page_cnt * QED_CHAIN_PAGE_SIZE,
- p_virt, p_phys);
- dma_free_coherent(&cdev->pdev->dev,
- page_cnt * QED_CHAIN_PBL_ENTRY_SIZE,
- p_pbl_virt, p_pbl_phys);
+ for (i = 0; i < page_cnt; i++) {
+ p_virt = dma_alloc_coherent(&cdev->pdev->dev,
+ QED_CHAIN_PAGE_SIZE,
+ &p_phys, GFP_KERNEL);
+ if (!p_virt) {
+ DP_NOTICE(cdev, "Failed to allocate chain memory\n");
+ return -ENOMEM;
+ }
- return -ENOMEM;
+ if (i == 0) {
+ qed_chain_init_mem(p_chain, p_virt, p_phys);
+ qed_chain_reset(p_chain);
+ }
+
+ /* Fill the PBL table with the physical address of the page */
+ *(dma_addr_t *)p_pbl_virt = p_phys;
+ /* Keep the virtual address of the page */
+ p_chain->pbl.pp_virt_addr_tbl[i] = p_virt;
+
+ p_pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE;
+ }
+
+ return 0;
}
-void qed_chain_free(struct qed_dev *cdev,
- struct qed_chain *p_chain)
+int qed_chain_alloc(struct qed_dev *cdev,
+ enum qed_chain_use_mode intended_use,
+ enum qed_chain_mode mode,
+ enum qed_chain_cnt_type cnt_type,
+ u32 num_elems, size_t elem_size, struct qed_chain *p_chain)
{
- size_t size;
+ u32 page_cnt;
+ int rc = 0;
- if (!p_chain->p_virt_addr)
- return;
+ if (mode == QED_CHAIN_MODE_SINGLE)
+ page_cnt = 1;
+ else
+ page_cnt = QED_CHAIN_PAGE_CNT(num_elems, elem_size, mode);
- if (p_chain->mode == QED_CHAIN_MODE_PBL) {
- size = p_chain->page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
- dma_free_coherent(&cdev->pdev->dev, size,
- p_chain->pbl.p_virt_table,
- p_chain->pbl.p_phys_table);
+ rc = qed_chain_alloc_sanity_check(cdev, cnt_type, elem_size, page_cnt);
+ if (rc) {
+ DP_NOTICE(cdev,
+ "Cannot allocate a chain with the given arguments:\n"
+ "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n",
+ intended_use, mode, cnt_type, num_elems, elem_size);
+ return rc;
}
- size = p_chain->page_cnt * QED_CHAIN_PAGE_SIZE;
- dma_free_coherent(&cdev->pdev->dev, size,
- p_chain->p_virt_addr,
- p_chain->p_phys_addr);
+ qed_chain_init_params(p_chain, page_cnt, (u8) elem_size, intended_use,
+ mode, cnt_type);
+
+ switch (mode) {
+ case QED_CHAIN_MODE_NEXT_PTR:
+ rc = qed_chain_alloc_next_ptr(cdev, p_chain);
+ break;
+ case QED_CHAIN_MODE_SINGLE:
+ rc = qed_chain_alloc_single(cdev, p_chain);
+ break;
+ case QED_CHAIN_MODE_PBL:
+ rc = qed_chain_alloc_pbl(cdev, p_chain);
+ break;
+ }
+ if (rc)
+ goto nomem;
+
+ return 0;
+
+nomem:
+ qed_chain_free(cdev, p_chain);
+ return rc;
}
-int qed_fw_l2_queue(struct qed_hwfn *p_hwfn,
- u16 src_id, u16 *dst_id)
+int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, u16 src_id, u16 *dst_id)
{
if (src_id >= RESC_NUM(p_hwfn, QED_L2_QUEUE)) {
u16 min, max;
- min = (u16)RESC_START(p_hwfn, QED_L2_QUEUE);
+ min = (u16) RESC_START(p_hwfn, QED_L2_QUEUE);
max = min + RESC_NUM(p_hwfn, QED_L2_QUEUE);
DP_NOTICE(p_hwfn,
"l2_queue id [%d] is not valid, available indices [%d - %d]\n",
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
index dde364d..f810ce4 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
@@ -245,9 +245,8 @@ int
qed_chain_alloc(struct qed_dev *cdev,
enum qed_chain_use_mode intended_use,
enum qed_chain_mode mode,
- u16 num_elems,
- size_t elem_size,
- struct qed_chain *p_chain);
+ enum qed_chain_cnt_type cnt_type,
+ u32 num_elems, size_t elem_size, struct qed_chain *p_chain);
/**
* @brief qed_chain_free - Free chain DMA memory
@@ -255,8 +254,7 @@ qed_chain_alloc(struct qed_dev *cdev,
* @param p_hwfn
* @param p_chain
*/
-void qed_chain_free(struct qed_dev *cdev,
- struct qed_chain *p_chain);
+void qed_chain_free(struct qed_dev *cdev, struct qed_chain *p_chain);
/**
* @@brief qed_fw_l2_queue - Get absolute L2 queue ID
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index 9afc15f..5927840 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -17,13 +17,15 @@
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/qed/common_hsi.h>
+#include <linux/qed/storage_common.h>
+#include <linux/qed/tcp_common.h>
#include <linux/qed/eth_common.h>
+#include <linux/qed/iscsi_common.h>
+#include <linux/qed/rdma_common.h>
+#include <linux/qed/roce_common.h>
struct qed_hwfn;
struct qed_ptt;
-/********************************/
-/* Add include to common target */
-/********************************/
/* opcodes for the event ring */
enum common_event_opcode {
@@ -32,9 +34,10 @@ enum common_event_opcode {
COMMON_EVENT_VF_START,
COMMON_EVENT_VF_STOP,
COMMON_EVENT_VF_PF_CHANNEL,
- COMMON_EVENT_RESERVED4,
- COMMON_EVENT_RESERVED5,
- COMMON_EVENT_RESERVED6,
+ COMMON_EVENT_VF_FLR,
+ COMMON_EVENT_PF_UPDATE,
+ COMMON_EVENT_MALICIOUS_VF,
+ COMMON_EVENT_RL_UPDATE,
COMMON_EVENT_EMPTY,
MAX_COMMON_EVENT_OPCODE
};
@@ -42,11 +45,12 @@ enum common_event_opcode {
/* Common Ramrod Command IDs */
enum common_ramrod_cmd_id {
COMMON_RAMROD_UNUSED,
- COMMON_RAMROD_PF_START /* PF Function Start Ramrod */,
- COMMON_RAMROD_PF_STOP /* PF Function Stop Ramrod */,
+ COMMON_RAMROD_PF_START,
+ COMMON_RAMROD_PF_STOP,
COMMON_RAMROD_VF_START,
COMMON_RAMROD_VF_STOP,
COMMON_RAMROD_PF_UPDATE,
+ COMMON_RAMROD_RL_UPDATE,
COMMON_RAMROD_EMPTY,
MAX_COMMON_RAMROD_CMD_ID
};
@@ -63,448 +67,448 @@ struct pstorm_core_conn_st_ctx {
/* Core Slowpath Connection storm context of Xstorm */
struct xstorm_core_conn_st_ctx {
- __le32 spq_base_lo /* SPQ Ring Base Address low dword */;
- __le32 spq_base_hi /* SPQ Ring Base Address high dword */;
- struct regpair consolid_base_addr;
- __le16 spq_cons /* SPQ Ring Consumer */;
- __le16 consolid_cons /* Consolidation Ring Consumer */;
- __le32 reserved0[55] /* Pad to 15 cycles */;
+ __le32 spq_base_lo;
+ __le32 spq_base_hi;
+ struct regpair consolid_base_addr;
+ __le16 spq_cons;
+ __le16 consolid_cons;
+ __le32 reserved0[55];
};
struct xstorm_core_conn_ag_ctx {
- u8 reserved0 /* cdu_validation */;
- u8 core_state /* state */;
- u8 flags0;
-#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT 1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT 5
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK 0x1 /* bit6 */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT 6
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK 0x1 /* bit7 */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT 7
+ u8 reserved0;
+ u8 core_state;
+ u8 flags0;
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT 7
u8 flags1;
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK 0x1 /* bit8 */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK 0x1 /* bit9 */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT 1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK 0x1 /* bit10 */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_BIT11_MASK 0x1 /* bit11 */
-#define XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT 3
-#define XSTORM_CORE_CONN_AG_CTX_BIT12_MASK 0x1 /* bit12 */
-#define XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_BIT13_MASK 0x1 /* bit13 */
-#define XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT 5
-#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 /* bit14 */
-#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 /* bit15 */
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_BIT12_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_BIT13_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
u8 flags2;
-#define XSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
-#define XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
-#define XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
-#define XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3;
-#define XSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
-#define XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
-#define XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
-#define XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
-#define XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 6
u8 flags4;
-#define XSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
-#define XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
-#define XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
-#define XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_CF11_MASK 0x3 /* cf11 */
-#define XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define XSTORM_CORE_CONN_AG_CTX_CF12_MASK 0x3 /* cf12 */
-#define XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_CF13_MASK 0x3 /* cf13 */
-#define XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_CF14_MASK 0x3 /* cf14 */
-#define XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_CF15_MASK 0x3 /* cf15 */
-#define XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
-#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK 0x3 /* cf16 */
-#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_CF17_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK 0x3 /* cf18 */
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 /* cf19 */
-#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF17_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
u8 flags7;
-#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 /* cf20 */
-#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK 0x3 /* cf21 */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK 0x3 /* cf22 */
-#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
-#define XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 6
-#define XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
-#define XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 7
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
-#define XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
-#define XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 1
-#define XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
-#define XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
-#define XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 3
-#define XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
-#define XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
-#define XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 5
-#define XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
-#define XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 6
-#define XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
-#define XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 7
+#define XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
-#define XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
-#define XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK 0x1 /* cf11en */
-#define XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT 1
-#define XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK 0x1 /* cf12en */
-#define XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK 0x1 /* cf13en */
-#define XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT 3
-#define XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK 0x1 /* cf14en */
-#define XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK 0x1 /* cf15en */
-#define XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT 5
-#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK 0x1 /* cf16en */
-#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6
-#define XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT 7
+#define XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT 7
u8 flags10;
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 /* cf18en */
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 /* cf19en */
-#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
-#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 /* cf20en */
-#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK 0x1 /* cf21en */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT 3
-#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 /* cf22en */
-#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK 0x1 /* cf23en */
-#define XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT 5
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK 0x1 /* rule0en */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT 6
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK 0x1 /* rule1en */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT 7
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT 7
u8 flags11;
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK 0x1 /* rule2en */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK 0x1 /* rule3en */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT 1
-#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 /* rule4en */
-#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 /* rule8en */
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK 0x1 /* rule9en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
-#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK 0x1 /* rule10en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK 0x1 /* rule11en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 /* rule12en */
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 /* rule13en */
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK 0x1 /* rule14en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK 0x1 /* rule15en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK 0x1 /* rule16en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK 0x1 /* rule17en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
-#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK 0x1 /* rule18en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK 0x1 /* rule19en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT 1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 /* rule20en */
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 /* rule21en */
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 /* rule22en */
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 /* rule23en */
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 /* rule24en */
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 /* rule25en */
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define XSTORM_CORE_CONN_AG_CTX_BIT16_MASK 0x1 /* bit16 */
-#define XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_BIT17_MASK 0x1 /* bit17 */
-#define XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT 1
-#define XSTORM_CORE_CONN_AG_CTX_BIT18_MASK 0x1 /* bit18 */
-#define XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_BIT19_MASK 0x1 /* bit19 */
-#define XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT 3
-#define XSTORM_CORE_CONN_AG_CTX_BIT20_MASK 0x1 /* bit20 */
-#define XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_BIT21_MASK 0x1 /* bit21 */
-#define XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT 5
-#define XSTORM_CORE_CONN_AG_CTX_CF23_MASK 0x3 /* cf23 */
-#define XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT 6
- u8 byte2 /* byte2 */;
- __le16 physical_q0 /* physical_q0 */;
- __le16 consolid_prod /* physical_q1 */;
- __le16 reserved16 /* physical_q2 */;
- __le16 tx_bd_cons /* word3 */;
- __le16 tx_bd_or_spq_prod /* word4 */;
- __le16 word5 /* word5 */;
- __le16 conn_dpi /* conn_dpi */;
- u8 byte3 /* byte3 */;
- u8 byte4 /* byte4 */;
- u8 byte5 /* byte5 */;
- u8 byte6 /* byte6 */;
- __le32 reg0 /* reg0 */;
- __le32 reg1 /* reg1 */;
- __le32 reg2 /* reg2 */;
- __le32 reg3 /* reg3 */;
- __le32 reg4 /* reg4 */;
- __le32 reg5 /* cf_array0 */;
- __le32 reg6 /* cf_array1 */;
- __le16 word7 /* word7 */;
- __le16 word8 /* word8 */;
- __le16 word9 /* word9 */;
- __le16 word10 /* word10 */;
- __le32 reg7 /* reg7 */;
- __le32 reg8 /* reg8 */;
- __le32 reg9 /* reg9 */;
- u8 byte7 /* byte7 */;
- u8 byte8 /* byte8 */;
- u8 byte9 /* byte9 */;
- u8 byte10 /* byte10 */;
- u8 byte11 /* byte11 */;
- u8 byte12 /* byte12 */;
- u8 byte13 /* byte13 */;
- u8 byte14 /* byte14 */;
- u8 byte15 /* byte15 */;
- u8 byte16 /* byte16 */;
- __le16 word11 /* word11 */;
- __le32 reg10 /* reg10 */;
- __le32 reg11 /* reg11 */;
- __le32 reg12 /* reg12 */;
- __le32 reg13 /* reg13 */;
- __le32 reg14 /* reg14 */;
- __le32 reg15 /* reg15 */;
- __le32 reg16 /* reg16 */;
- __le32 reg17 /* reg17 */;
- __le32 reg18 /* reg18 */;
- __le32 reg19 /* reg19 */;
- __le16 word12 /* word12 */;
- __le16 word13 /* word13 */;
- __le16 word14 /* word14 */;
- __le16 word15 /* word15 */;
+#define XSTORM_CORE_CONN_AG_CTX_BIT16_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_BIT17_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_BIT18_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_BIT19_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_BIT20_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_BIT21_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_CF23_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT 6
+ u8 byte2;
+ __le16 physical_q0;
+ __le16 consolid_prod;
+ __le16 reserved16;
+ __le16 tx_bd_cons;
+ __le16 tx_bd_or_spq_prod;
+ __le16 word5;
+ __le16 conn_dpi;
+ u8 byte3;
+ u8 byte4;
+ u8 byte5;
+ u8 byte6;
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le32 reg4;
+ __le32 reg5;
+ __le32 reg6;
+ __le16 word7;
+ __le16 word8;
+ __le16 word9;
+ __le16 word10;
+ __le32 reg7;
+ __le32 reg8;
+ __le32 reg9;
+ u8 byte7;
+ u8 byte8;
+ u8 byte9;
+ u8 byte10;
+ u8 byte11;
+ u8 byte12;
+ u8 byte13;
+ u8 byte14;
+ u8 byte15;
+ u8 byte16;
+ __le16 word11;
+ __le32 reg10;
+ __le32 reg11;
+ __le32 reg12;
+ __le32 reg13;
+ __le32 reg14;
+ __le32 reg15;
+ __le32 reg16;
+ __le32 reg17;
+ __le32 reg18;
+ __le32 reg19;
+ __le16 word12;
+ __le16 word13;
+ __le16 word14;
+ __le16 word15;
};
struct tstorm_core_conn_ag_ctx {
- u8 byte0 /* cdu_validation */;
- u8 byte1 /* state */;
- u8 flags0;
-#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
-#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3
-#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5
-#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
-#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
-#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
-#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
-#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
-#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6
u8 flags2;
-#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
-#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
-#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
-#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
-#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
-#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
-#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
-#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
-#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
-#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5
-#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
-#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6
-#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
-#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7
+#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7
u8 flags4;
-#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
-#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
-#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1
-#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
-#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
-#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3
-#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
-#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
-#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5
-#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
-#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6
-#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
- __le32 reg0 /* reg0 */;
- __le32 reg1 /* reg1 */;
- __le32 reg2 /* reg2 */;
- __le32 reg3 /* reg3 */;
- __le32 reg4 /* reg4 */;
- __le32 reg5 /* reg5 */;
- __le32 reg6 /* reg6 */;
- __le32 reg7 /* reg7 */;
- __le32 reg8 /* reg8 */;
- u8 byte2 /* byte2 */;
- u8 byte3 /* byte3 */;
- __le16 word0 /* word0 */;
- u8 byte4 /* byte4 */;
- u8 byte5 /* byte5 */;
- __le16 word1 /* word1 */;
- __le16 word2 /* conn_dpi */;
- __le16 word3 /* word3 */;
- __le32 reg9 /* reg9 */;
- __le32 reg10 /* reg10 */;
+#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le32 reg4;
+ __le32 reg5;
+ __le32 reg6;
+ __le32 reg7;
+ __le32 reg8;
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ u8 byte4;
+ u8 byte5;
+ __le16 word1;
+ __le16 word2;
+ __le16 word3;
+ __le32 reg9;
+ __le32 reg10;
};
struct ustorm_core_conn_ag_ctx {
- u8 reserved /* cdu_validation */;
- u8 byte1 /* state */;
- u8 flags0;
-#define USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
-#define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
-#define USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
-#define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
-#define USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
-#define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
-#define USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
-#define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
-#define USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
-#define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 reserved;
+ u8 byte1;
+ u8 flags0;
+#define USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
-#define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0
-#define USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
-#define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2
-#define USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
-#define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4
-#define USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
-#define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6
+#define USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2
+#define USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4
+#define USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2;
-#define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
-#define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
-#define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
-#define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
-#define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3
-#define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
-#define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4
-#define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
-#define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5
-#define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
-#define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6
-#define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
-#define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4
+#define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5
+#define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3;
-#define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
-#define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
-#define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
-#define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
-#define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
-#define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
-#define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
-#define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
-#define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
- u8 byte2 /* byte2 */;
- u8 byte3 /* byte3 */;
- __le16 word0 /* conn_dpi */;
- __le16 word1 /* word1 */;
- __le32 rx_producers /* reg0 */;
- __le32 reg1 /* reg1 */;
- __le32 reg2 /* reg2 */;
- __le32 reg3 /* reg3 */;
- __le16 word2 /* word2 */;
- __le16 word3 /* word3 */;
+#define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ __le16 word1;
+ __le32 rx_producers;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le16 word2;
+ __le16 word3;
};
/* The core storm context for the Mstorm */
@@ -519,122 +523,186 @@ struct ustorm_core_conn_st_ctx {
/* core connection context */
struct core_conn_context {
- struct ystorm_core_conn_st_ctx ystorm_st_context;
- struct regpair ystorm_st_padding[2] /* padding */;
- struct pstorm_core_conn_st_ctx pstorm_st_context;
- struct regpair pstorm_st_padding[2];
- struct xstorm_core_conn_st_ctx xstorm_st_context;
- struct xstorm_core_conn_ag_ctx xstorm_ag_context;
- struct tstorm_core_conn_ag_ctx tstorm_ag_context;
- struct ustorm_core_conn_ag_ctx ustorm_ag_context;
- struct mstorm_core_conn_st_ctx mstorm_st_context;
- struct ustorm_core_conn_st_ctx ustorm_st_context;
- struct regpair ustorm_st_padding[2] /* padding */;
+ struct ystorm_core_conn_st_ctx ystorm_st_context;
+ struct regpair ystorm_st_padding[2];
+ struct pstorm_core_conn_st_ctx pstorm_st_context;
+ struct regpair pstorm_st_padding[2];
+ struct xstorm_core_conn_st_ctx xstorm_st_context;
+ struct xstorm_core_conn_ag_ctx xstorm_ag_context;
+ struct tstorm_core_conn_ag_ctx tstorm_ag_context;
+ struct ustorm_core_conn_ag_ctx ustorm_ag_context;
+ struct mstorm_core_conn_st_ctx mstorm_st_context;
+ struct ustorm_core_conn_st_ctx ustorm_st_context;
+ struct regpair ustorm_st_padding[2];
+};
+
+struct eth_mstorm_per_pf_stat {
+ struct regpair gre_discard_pkts;
+ struct regpair vxlan_discard_pkts;
+ struct regpair geneve_discard_pkts;
+ struct regpair lb_discard_pkts;
};
struct eth_mstorm_per_queue_stat {
- struct regpair ttl0_discard;
- struct regpair packet_too_big_discard;
- struct regpair no_buff_discard;
- struct regpair not_active_discard;
- struct regpair tpa_coalesced_pkts;
- struct regpair tpa_coalesced_events;
- struct regpair tpa_aborts_num;
- struct regpair tpa_coalesced_bytes;
+ struct regpair ttl0_discard;
+ struct regpair packet_too_big_discard;
+ struct regpair no_buff_discard;
+ struct regpair not_active_discard;
+ struct regpair tpa_coalesced_pkts;
+ struct regpair tpa_coalesced_events;
+ struct regpair tpa_aborts_num;
+ struct regpair tpa_coalesced_bytes;
+};
+
+/* Ethernet TX Per PF */
+struct eth_pstorm_per_pf_stat {
+ struct regpair sent_lb_ucast_bytes;
+ struct regpair sent_lb_mcast_bytes;
+ struct regpair sent_lb_bcast_bytes;
+ struct regpair sent_lb_ucast_pkts;
+ struct regpair sent_lb_mcast_pkts;
+ struct regpair sent_lb_bcast_pkts;
+ struct regpair sent_gre_bytes;
+ struct regpair sent_vxlan_bytes;
+ struct regpair sent_geneve_bytes;
+ struct regpair sent_gre_pkts;
+ struct regpair sent_vxlan_pkts;
+ struct regpair sent_geneve_pkts;
+ struct regpair gre_drop_pkts;
+ struct regpair vxlan_drop_pkts;
+ struct regpair geneve_drop_pkts;
+};
+
+/* Ethernet TX Per Queue Stats */
+struct eth_pstorm_per_queue_stat {
+ struct regpair sent_ucast_bytes;
+ struct regpair sent_mcast_bytes;
+ struct regpair sent_bcast_bytes;
+ struct regpair sent_ucast_pkts;
+ struct regpair sent_mcast_pkts;
+ struct regpair sent_bcast_pkts;
+ struct regpair error_drop_pkts;
+};
+
+/* ETH Rx producers data */
+struct eth_rx_rate_limit {
+ __le16 mult;
+ __le16 cnst;
+ u8 add_sub_cnst;
+ u8 reserved0;
+ __le16 reserved1;
};
-struct eth_pstorm_per_queue_stat {
- struct regpair sent_ucast_bytes;
- struct regpair sent_mcast_bytes;
- struct regpair sent_bcast_bytes;
- struct regpair sent_ucast_pkts;
- struct regpair sent_mcast_pkts;
- struct regpair sent_bcast_pkts;
- struct regpair error_drop_pkts;
+struct eth_ustorm_per_pf_stat {
+ struct regpair rcv_lb_ucast_bytes;
+ struct regpair rcv_lb_mcast_bytes;
+ struct regpair rcv_lb_bcast_bytes;
+ struct regpair rcv_lb_ucast_pkts;
+ struct regpair rcv_lb_mcast_pkts;
+ struct regpair rcv_lb_bcast_pkts;
+ struct regpair rcv_gre_bytes;
+ struct regpair rcv_vxlan_bytes;
+ struct regpair rcv_geneve_bytes;
+ struct regpair rcv_gre_pkts;
+ struct regpair rcv_vxlan_pkts;
+ struct regpair rcv_geneve_pkts;
};
struct eth_ustorm_per_queue_stat {
- struct regpair rcv_ucast_bytes;
- struct regpair rcv_mcast_bytes;
- struct regpair rcv_bcast_bytes;
- struct regpair rcv_ucast_pkts;
- struct regpair rcv_mcast_pkts;
- struct regpair rcv_bcast_pkts;
+ struct regpair rcv_ucast_bytes;
+ struct regpair rcv_mcast_bytes;
+ struct regpair rcv_bcast_bytes;
+ struct regpair rcv_ucast_pkts;
+ struct regpair rcv_mcast_pkts;
+ struct regpair rcv_bcast_pkts;
};
/* Event Ring Next Page Address */
struct event_ring_next_addr {
- struct regpair addr /* Next Page Address */;
- __le32 reserved[2] /* Reserved */;
+ struct regpair addr;
+ __le32 reserved[2];
};
+/* Event Ring Element */
union event_ring_element {
- struct event_ring_entry entry /* Event Ring Entry */;
- struct event_ring_next_addr next_addr;
+ struct event_ring_entry entry;
+ struct event_ring_next_addr next_addr;
+};
+
+/* Major and Minor hsi Versions */
+struct hsi_fp_ver_struct {
+ u8 minor_ver_arr[2];
+ u8 major_ver_arr[2];
};
+/* Mstorm non-triggering VF zone */
struct mstorm_non_trigger_vf_zone {
struct eth_mstorm_per_queue_stat eth_queue_stat;
+ struct eth_rx_prod_data eth_rx_queue_producers[ETH_MAX_NUM_RX_QUEUES_PER_VF];
};
+/* Mstorm VF zone */
struct mstorm_vf_zone {
struct mstorm_non_trigger_vf_zone non_trigger;
+
};
+/* personality per PF */
enum personality_type {
BAD_PERSONALITY_TYP,
- PERSONALITY_RESERVED,
+ PERSONALITY_ISCSI,
PERSONALITY_RESERVED2,
- PERSONALITY_RDMA_AND_ETH /* Roce or Iwarp */,
+ PERSONALITY_RDMA_AND_ETH,
PERSONALITY_RESERVED3,
PERSONALITY_CORE,
- PERSONALITY_ETH /* Ethernet */,
+ PERSONALITY_ETH,
PERSONALITY_RESERVED4,
MAX_PERSONALITY_TYPE
};
+/* tunnel configuration */
struct pf_start_tunnel_config {
- u8 set_vxlan_udp_port_flg;
- u8 set_geneve_udp_port_flg;
- u8 tx_enable_vxlan /* If set, enable VXLAN tunnel in TX path. */;
- u8 tx_enable_l2geneve;
- u8 tx_enable_ipgeneve;
- u8 tx_enable_l2gre /* If set, enable l2 GRE tunnel in TX path. */;
- u8 tx_enable_ipgre /* If set, enable IP GRE tunnel in TX path. */;
- u8 tunnel_clss_vxlan /* Classification scheme for VXLAN tunnel. */;
- u8 tunnel_clss_l2geneve;
- u8 tunnel_clss_ipgeneve;
- u8 tunnel_clss_l2gre;
- u8 tunnel_clss_ipgre;
- __le16 vxlan_udp_port /* VXLAN tunnel UDP destination port. */;
- __le16 geneve_udp_port /* GENEVE tunnel UDP destination port. */;
+ u8 set_vxlan_udp_port_flg;
+ u8 set_geneve_udp_port_flg;
+ u8 tx_enable_vxlan;
+ u8 tx_enable_l2geneve;
+ u8 tx_enable_ipgeneve;
+ u8 tx_enable_l2gre;
+ u8 tx_enable_ipgre;
+ u8 tunnel_clss_vxlan;
+ u8 tunnel_clss_l2geneve;
+ u8 tunnel_clss_ipgeneve;
+ u8 tunnel_clss_l2gre;
+ u8 tunnel_clss_ipgre;
+ __le16 vxlan_udp_port;
+ __le16 geneve_udp_port;
};
/* Ramrod data for PF start ramrod */
struct pf_start_ramrod_data {
- struct regpair event_ring_pbl_addr;
- struct regpair consolid_q_pbl_addr;
- struct pf_start_tunnel_config tunnel_config;
- __le16 event_ring_sb_id;
- u8 base_vf_id;
- u8 num_vfs;
- u8 event_ring_num_pages;
- u8 event_ring_sb_index;
- u8 path_id;
- u8 warning_as_error;
- u8 dont_log_ramrods;
- u8 personality;
- __le16 log_type_mask;
- u8 mf_mode /* Multi function mode */;
- u8 integ_phase /* Integration phase */;
- u8 allow_npar_tx_switching;
- u8 inner_to_outer_pri_map[8];
- u8 pri_map_valid;
- u32 outer_tag;
- u8 reserved0[4];
-};
-
-/* Data for port update ramrod */
+ struct regpair event_ring_pbl_addr;
+ struct regpair consolid_q_pbl_addr;
+ struct pf_start_tunnel_config tunnel_config;
+ __le16 event_ring_sb_id;
+ u8 base_vf_id;
+ u8 num_vfs;
+ u8 event_ring_num_pages;
+ u8 event_ring_sb_index;
+ u8 path_id;
+ u8 warning_as_error;
+ u8 dont_log_ramrods;
+ u8 personality;
+ __le16 log_type_mask;
+ u8 mf_mode;
+ u8 integ_phase;
+ u8 allow_npar_tx_switching;
+ u8 inner_to_outer_pri_map[8];
+ u8 pri_map_valid;
+ __le32 outer_tag;
+ struct hsi_fp_ver_struct hsi_fp_ver;
+
+};
+
struct protocol_dcb_data {
u8 dcb_enable_flag;
u8 dcb_priority;
@@ -642,25 +710,24 @@ struct protocol_dcb_data {
u8 reserved;
};
-/* tunnel configuration */
struct pf_update_tunnel_config {
- u8 update_rx_pf_clss;
- u8 update_tx_pf_clss;
- u8 set_vxlan_udp_port_flg;
- u8 set_geneve_udp_port_flg;
- u8 tx_enable_vxlan;
- u8 tx_enable_l2geneve;
- u8 tx_enable_ipgeneve;
- u8 tx_enable_l2gre;
- u8 tx_enable_ipgre;
- u8 tunnel_clss_vxlan;
- u8 tunnel_clss_l2geneve;
- u8 tunnel_clss_ipgeneve;
- u8 tunnel_clss_l2gre;
- u8 tunnel_clss_ipgre;
- __le16 vxlan_udp_port;
- __le16 geneve_udp_port;
- __le16 reserved[3];
+ u8 update_rx_pf_clss;
+ u8 update_tx_pf_clss;
+ u8 set_vxlan_udp_port_flg;
+ u8 set_geneve_udp_port_flg;
+ u8 tx_enable_vxlan;
+ u8 tx_enable_l2geneve;
+ u8 tx_enable_ipgeneve;
+ u8 tx_enable_l2gre;
+ u8 tx_enable_ipgre;
+ u8 tunnel_clss_vxlan;
+ u8 tunnel_clss_l2geneve;
+ u8 tunnel_clss_ipgeneve;
+ u8 tunnel_clss_l2gre;
+ u8 tunnel_clss_ipgre;
+ __le16 vxlan_udp_port;
+ __le16 geneve_udp_port;
+ __le16 reserved[3];
};
struct pf_update_ramrod_data {
@@ -669,38 +736,43 @@ struct pf_update_ramrod_data {
u8 update_fcoe_dcb_data_flag;
u8 update_iscsi_dcb_data_flag;
u8 update_roce_dcb_data_flag;
+ u8 update_iwarp_dcb_data_flag;
u8 update_mf_vlan_flag;
- __le16 mf_vlan;
+ u8 reserved;
struct protocol_dcb_data eth_dcb_data;
struct protocol_dcb_data fcoe_dcb_data;
struct protocol_dcb_data iscsi_dcb_data;
struct protocol_dcb_data roce_dcb_data;
- struct pf_update_tunnel_config tunnel_config;
-};
-
-/* Tunnel classification scheme */
-enum tunnel_clss {
- TUNNEL_CLSS_MAC_VLAN = 0,
- TUNNEL_CLSS_MAC_VNI,
- TUNNEL_CLSS_INNER_MAC_VLAN,
- TUNNEL_CLSS_INNER_MAC_VNI,
- MAX_TUNNEL_CLSS
+ struct protocol_dcb_data iwarp_dcb_data;
+ __le16 mf_vlan;
+ __le16 reserved2;
+ struct pf_update_tunnel_config tunnel_config;
};
+/* Ports mode */
enum ports_mode {
- ENGX2_PORTX1 /* 2 engines x 1 port */,
- ENGX2_PORTX2 /* 2 engines x 2 ports */,
- ENGX1_PORTX1 /* 1 engine x 1 port */,
- ENGX1_PORTX2 /* 1 engine x 2 ports */,
- ENGX1_PORTX4 /* 1 engine x 4 ports */,
+ ENGX2_PORTX1,
+ ENGX2_PORTX2,
+ ENGX1_PORTX1,
+ ENGX1_PORTX2,
+ ENGX1_PORTX4,
MAX_PORTS_MODE
};
+/* use to index in hsi_fp_[major|minor]_ver_arr per protocol */
+enum protocol_version_array_key {
+ ETH_VER_KEY = 0,
+ ROCE_VER_KEY,
+ MAX_PROTOCOL_VERSION_ARRAY_KEY
+};
+
+/* Pstorm non-triggering VF zone */
struct pstorm_non_trigger_vf_zone {
struct eth_pstorm_per_queue_stat eth_queue_stat;
struct regpair reserved[2];
};
+/* Pstorm VF zone */
struct pstorm_vf_zone {
struct pstorm_non_trigger_vf_zone non_trigger;
struct regpair reserved[7];
@@ -708,56 +780,89 @@ struct pstorm_vf_zone {
/* Ramrod Header of SPQE */
struct ramrod_header {
- __le32 cid /* Slowpath Connection CID */;
- u8 cmd_id /* Ramrod Cmd (Per Protocol Type) */;
- u8 protocol_id /* Ramrod Protocol ID */;
- __le16 echo /* Ramrod echo */;
+ __le32 cid;
+ u8 cmd_id;
+ u8 protocol_id;
+ __le16 echo;
};
/* Slowpath Element (SPQE) */
struct slow_path_element {
- struct ramrod_header hdr /* Ramrod Header */;
- struct regpair data_ptr;
+ struct ramrod_header hdr;
+ struct regpair data_ptr;
+};
+
+/* Tstorm non-triggering VF zone */
+struct tstorm_non_trigger_vf_zone {
+ struct regpair reserved[2];
};
struct tstorm_per_port_stat {
- struct regpair trunc_error_discard;
- struct regpair mac_error_discard;
- struct regpair mftag_filter_discard;
- struct regpair eth_mac_filter_discard;
- struct regpair ll2_mac_filter_discard;
- struct regpair ll2_conn_disabled_discard;
- struct regpair iscsi_irregular_pkt;
- struct regpair fcoe_irregular_pkt;
- struct regpair roce_irregular_pkt;
- struct regpair eth_irregular_pkt;
- struct regpair toe_irregular_pkt;
- struct regpair preroce_irregular_pkt;
+ struct regpair trunc_error_discard;
+ struct regpair mac_error_discard;
+ struct regpair mftag_filter_discard;
+ struct regpair eth_mac_filter_discard;
+ struct regpair reserved[5];
+ struct regpair eth_irregular_pkt;
+ struct regpair reserved1[2];
+ struct regpair eth_gre_tunn_filter_discard;
+ struct regpair eth_vxlan_tunn_filter_discard;
+ struct regpair eth_geneve_tunn_filter_discard;
+};
+
+/* Tstorm VF zone */
+struct tstorm_vf_zone {
+ struct tstorm_non_trigger_vf_zone non_trigger;
+};
+
+/* Tunnel classification scheme */
+enum tunnel_clss {
+ TUNNEL_CLSS_MAC_VLAN = 0,
+ TUNNEL_CLSS_MAC_VNI,
+ TUNNEL_CLSS_INNER_MAC_VLAN,
+ TUNNEL_CLSS_INNER_MAC_VNI,
+ TUNNEL_CLSS_MAC_VLAN_DUAL_STAGE,
+ MAX_TUNNEL_CLSS
};
+/* Ustorm non-triggering VF zone */
struct ustorm_non_trigger_vf_zone {
struct eth_ustorm_per_queue_stat eth_queue_stat;
struct regpair vf_pf_msg_addr;
};
+/* Ustorm triggering VF zone */
struct ustorm_trigger_vf_zone {
u8 vf_pf_msg_valid;
u8 reserved[7];
};
+/* Ustorm VF zone */
struct ustorm_vf_zone {
struct ustorm_non_trigger_vf_zone non_trigger;
struct ustorm_trigger_vf_zone trigger;
};
+/* VF-PF channel data */
+struct vf_pf_channel_data {
+ __le32 ready;
+ u8 valid;
+ u8 reserved0;
+ __le16 reserved1;
+};
+
+/* Ramrod data for VF start ramrod */
struct vf_start_ramrod_data {
u8 vf_id;
u8 enable_flr_ack;
__le16 opaque_fid;
u8 personality;
- u8 reserved[3];
+ u8 reserved[7];
+ struct hsi_fp_ver_struct hsi_fp_ver;
+
};
+/* Ramrod data for VF start ramrod */
struct vf_stop_ramrod_data {
u8 vf_id;
u8 reserved0;
@@ -765,94 +870,474 @@ struct vf_stop_ramrod_data {
__le32 reserved2;
};
+/* Attentions status block */
struct atten_status_block {
- __le32 atten_bits;
- __le32 atten_ack;
- __le16 reserved0;
- __le16 sb_index /* status block running index */;
- __le32 reserved1;
+ __le32 atten_bits;
+ __le32 atten_ack;
+ __le16 reserved0;
+ __le16 sb_index;
+ __le32 reserved1;
+};
+
+enum command_type_bit {
+ IGU_COMMAND_TYPE_NOP = 0,
+ IGU_COMMAND_TYPE_SET = 1,
+ MAX_COMMAND_TYPE_BIT
+};
+
+/* DMAE command */
+struct dmae_cmd {
+ __le32 opcode;
+#define DMAE_CMD_SRC_MASK 0x1
+#define DMAE_CMD_SRC_SHIFT 0
+#define DMAE_CMD_DST_MASK 0x3
+#define DMAE_CMD_DST_SHIFT 1
+#define DMAE_CMD_C_DST_MASK 0x1
+#define DMAE_CMD_C_DST_SHIFT 3
+#define DMAE_CMD_CRC_RESET_MASK 0x1
+#define DMAE_CMD_CRC_RESET_SHIFT 4
+#define DMAE_CMD_SRC_ADDR_RESET_MASK 0x1
+#define DMAE_CMD_SRC_ADDR_RESET_SHIFT 5
+#define DMAE_CMD_DST_ADDR_RESET_MASK 0x1
+#define DMAE_CMD_DST_ADDR_RESET_SHIFT 6
+#define DMAE_CMD_COMP_FUNC_MASK 0x1
+#define DMAE_CMD_COMP_FUNC_SHIFT 7
+#define DMAE_CMD_COMP_WORD_EN_MASK 0x1
+#define DMAE_CMD_COMP_WORD_EN_SHIFT 8
+#define DMAE_CMD_COMP_CRC_EN_MASK 0x1
+#define DMAE_CMD_COMP_CRC_EN_SHIFT 9
+#define DMAE_CMD_COMP_CRC_OFFSET_MASK 0x7
+#define DMAE_CMD_COMP_CRC_OFFSET_SHIFT 10
+#define DMAE_CMD_RESERVED1_MASK 0x1
+#define DMAE_CMD_RESERVED1_SHIFT 13
+#define DMAE_CMD_ENDIANITY_MODE_MASK 0x3
+#define DMAE_CMD_ENDIANITY_MODE_SHIFT 14
+#define DMAE_CMD_ERR_HANDLING_MASK 0x3
+#define DMAE_CMD_ERR_HANDLING_SHIFT 16
+#define DMAE_CMD_PORT_ID_MASK 0x3
+#define DMAE_CMD_PORT_ID_SHIFT 18
+#define DMAE_CMD_SRC_PF_ID_MASK 0xF
+#define DMAE_CMD_SRC_PF_ID_SHIFT 20
+#define DMAE_CMD_DST_PF_ID_MASK 0xF
+#define DMAE_CMD_DST_PF_ID_SHIFT 24
+#define DMAE_CMD_SRC_VF_ID_VALID_MASK 0x1
+#define DMAE_CMD_SRC_VF_ID_VALID_SHIFT 28
+#define DMAE_CMD_DST_VF_ID_VALID_MASK 0x1
+#define DMAE_CMD_DST_VF_ID_VALID_SHIFT 29
+#define DMAE_CMD_RESERVED2_MASK 0x3
+#define DMAE_CMD_RESERVED2_SHIFT 30
+ __le32 src_addr_lo;
+ __le32 src_addr_hi;
+ __le32 dst_addr_lo;
+ __le32 dst_addr_hi;
+ __le16 length_dw;
+ __le16 opcode_b;
+#define DMAE_CMD_SRC_VF_ID_MASK 0xFF
+#define DMAE_CMD_SRC_VF_ID_SHIFT 0
+#define DMAE_CMD_DST_VF_ID_MASK 0xFF
+#define DMAE_CMD_DST_VF_ID_SHIFT 8
+ __le32 comp_addr_lo;
+ __le32 comp_addr_hi;
+ __le32 comp_val;
+ __le32 crc32;
+ __le32 crc_32_c;
+ __le16 crc16;
+ __le16 crc16_c;
+ __le16 crc10;
+ __le16 reserved;
+ __le16 xsum16;
+ __le16 xsum8;
+};
+
+enum dmae_cmd_comp_crc_en_enum {
+ dmae_cmd_comp_crc_disabled,
+ dmae_cmd_comp_crc_enabled,
+ MAX_DMAE_CMD_COMP_CRC_EN_ENUM
+};
+
+enum dmae_cmd_comp_func_enum {
+ dmae_cmd_comp_func_to_src,
+ dmae_cmd_comp_func_to_dst,
+ MAX_DMAE_CMD_COMP_FUNC_ENUM
+};
+
+enum dmae_cmd_comp_word_en_enum {
+ dmae_cmd_comp_word_disabled,
+ dmae_cmd_comp_word_enabled,
+ MAX_DMAE_CMD_COMP_WORD_EN_ENUM
+};
+
+enum dmae_cmd_c_dst_enum {
+ dmae_cmd_c_dst_pcie,
+ dmae_cmd_c_dst_grc,
+ MAX_DMAE_CMD_C_DST_ENUM
+};
+
+enum dmae_cmd_dst_enum {
+ dmae_cmd_dst_none_0,
+ dmae_cmd_dst_pcie,
+ dmae_cmd_dst_grc,
+ dmae_cmd_dst_none_3,
+ MAX_DMAE_CMD_DST_ENUM
+};
+
+enum dmae_cmd_error_handling_enum {
+ dmae_cmd_error_handling_send_regular_comp,
+ dmae_cmd_error_handling_send_comp_with_err,
+ dmae_cmd_error_handling_dont_send_comp,
+ MAX_DMAE_CMD_ERROR_HANDLING_ENUM
+};
+
+enum dmae_cmd_src_enum {
+ dmae_cmd_src_pcie,
+ dmae_cmd_src_grc,
+ MAX_DMAE_CMD_SRC_ENUM
+};
+
+/* IGU cleanup command */
+struct igu_cleanup {
+ __le32 sb_id_and_flags;
+#define IGU_CLEANUP_RESERVED0_MASK 0x7FFFFFF
+#define IGU_CLEANUP_RESERVED0_SHIFT 0
+#define IGU_CLEANUP_CLEANUP_SET_MASK 0x1
+#define IGU_CLEANUP_CLEANUP_SET_SHIFT 27
+#define IGU_CLEANUP_CLEANUP_TYPE_MASK 0x7
+#define IGU_CLEANUP_CLEANUP_TYPE_SHIFT 28
+#define IGU_CLEANUP_COMMAND_TYPE_MASK 0x1
+#define IGU_CLEANUP_COMMAND_TYPE_SHIFT 31
+ __le32 reserved1;
+};
+
+/* IGU firmware driver command */
+union igu_command {
+ struct igu_prod_cons_update prod_cons_update;
+ struct igu_cleanup cleanup;
+};
+
+/* IGU firmware driver command */
+struct igu_command_reg_ctrl {
+ __le16 opaque_fid;
+ __le16 igu_command_reg_ctrl_fields;
+#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_MASK 0xFFF
+#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_SHIFT 0
+#define IGU_COMMAND_REG_CTRL_RESERVED_MASK 0x7
+#define IGU_COMMAND_REG_CTRL_RESERVED_SHIFT 12
+#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_MASK 0x1
+#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_SHIFT 15
};
+/* IGU mapping line structure */
+struct igu_mapping_line {
+ __le32 igu_mapping_line_fields;
+#define IGU_MAPPING_LINE_VALID_MASK 0x1
+#define IGU_MAPPING_LINE_VALID_SHIFT 0
+#define IGU_MAPPING_LINE_VECTOR_NUMBER_MASK 0xFF
+#define IGU_MAPPING_LINE_VECTOR_NUMBER_SHIFT 1
+#define IGU_MAPPING_LINE_FUNCTION_NUMBER_MASK 0xFF
+#define IGU_MAPPING_LINE_FUNCTION_NUMBER_SHIFT 9
+#define IGU_MAPPING_LINE_PF_VALID_MASK 0x1
+#define IGU_MAPPING_LINE_PF_VALID_SHIFT 17
+#define IGU_MAPPING_LINE_IPS_GROUP_MASK 0x3F
+#define IGU_MAPPING_LINE_IPS_GROUP_SHIFT 18
+#define IGU_MAPPING_LINE_RESERVED_MASK 0xFF
+#define IGU_MAPPING_LINE_RESERVED_SHIFT 24
+};
+
+/* IGU MSIX line structure */
+struct igu_msix_vector {
+ struct regpair address;
+ __le32 data;
+ __le32 msix_vector_fields;
+#define IGU_MSIX_VECTOR_MASK_BIT_MASK 0x1
+#define IGU_MSIX_VECTOR_MASK_BIT_SHIFT 0
+#define IGU_MSIX_VECTOR_RESERVED0_MASK 0x7FFF
+#define IGU_MSIX_VECTOR_RESERVED0_SHIFT 1
+#define IGU_MSIX_VECTOR_STEERING_TAG_MASK 0xFF
+#define IGU_MSIX_VECTOR_STEERING_TAG_SHIFT 16
+#define IGU_MSIX_VECTOR_RESERVED1_MASK 0xFF
+#define IGU_MSIX_VECTOR_RESERVED1_SHIFT 24
+};
+
+struct mstorm_core_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
+ __le16 word0;
+ __le16 word1;
+ __le32 reg0;
+ __le32 reg1;
+};
+
+/* per encapsulation type enabling flags */
+struct prs_reg_encapsulation_type_en {
+ u8 flags;
+#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT 0
+#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT 1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT 2
+#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_SHIFT 3
+#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT 4
+#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT 5
+#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_MASK 0x3
+#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_SHIFT 6
+};
+
+enum pxp_tph_st_hint {
+ TPH_ST_HINT_BIDIR,
+ TPH_ST_HINT_REQUESTER,
+ TPH_ST_HINT_TARGET,
+ TPH_ST_HINT_TARGET_PRIO,
+ MAX_PXP_TPH_ST_HINT
+};
+
+/* QM hardware structure of enable bypass credit mask */
+struct qm_rf_bypass_mask {
+ u8 flags;
+#define QM_RF_BYPASS_MASK_LINEVOQ_MASK 0x1
+#define QM_RF_BYPASS_MASK_LINEVOQ_SHIFT 0
+#define QM_RF_BYPASS_MASK_RESERVED0_MASK 0x1
+#define QM_RF_BYPASS_MASK_RESERVED0_SHIFT 1
+#define QM_RF_BYPASS_MASK_PFWFQ_MASK 0x1
+#define QM_RF_BYPASS_MASK_PFWFQ_SHIFT 2
+#define QM_RF_BYPASS_MASK_VPWFQ_MASK 0x1
+#define QM_RF_BYPASS_MASK_VPWFQ_SHIFT 3
+#define QM_RF_BYPASS_MASK_PFRL_MASK 0x1
+#define QM_RF_BYPASS_MASK_PFRL_SHIFT 4
+#define QM_RF_BYPASS_MASK_VPQCNRL_MASK 0x1
+#define QM_RF_BYPASS_MASK_VPQCNRL_SHIFT 5
+#define QM_RF_BYPASS_MASK_FWPAUSE_MASK 0x1
+#define QM_RF_BYPASS_MASK_FWPAUSE_SHIFT 6
+#define QM_RF_BYPASS_MASK_RESERVED1_MASK 0x1
+#define QM_RF_BYPASS_MASK_RESERVED1_SHIFT 7
+};
+
+/* QM hardware structure of opportunistic credit mask */
+struct qm_rf_opportunistic_mask {
+ __le16 flags;
+#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT 0
+#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT 1
+#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT 2
+#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT 3
+#define QM_RF_OPPORTUNISTIC_MASK_PFRL_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT 4
+#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT 5
+#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT 6
+#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_SHIFT 7
+#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT 8
+#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_MASK 0x7F
+#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_SHIFT 9
+};
+
+/* QM hardware structure of QM map memory */
+struct qm_rf_pq_map {
+ __le32 reg;
+#define QM_RF_PQ_MAP_PQ_VALID_MASK 0x1
+#define QM_RF_PQ_MAP_PQ_VALID_SHIFT 0
+#define QM_RF_PQ_MAP_RL_ID_MASK 0xFF
+#define QM_RF_PQ_MAP_RL_ID_SHIFT 1
+#define QM_RF_PQ_MAP_VP_PQ_ID_MASK 0x1FF
+#define QM_RF_PQ_MAP_VP_PQ_ID_SHIFT 9
+#define QM_RF_PQ_MAP_VOQ_MASK 0x1F
+#define QM_RF_PQ_MAP_VOQ_SHIFT 18
+#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_MASK 0x3
+#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_SHIFT 23
+#define QM_RF_PQ_MAP_RL_VALID_MASK 0x1
+#define QM_RF_PQ_MAP_RL_VALID_SHIFT 25
+#define QM_RF_PQ_MAP_RESERVED_MASK 0x3F
+#define QM_RF_PQ_MAP_RESERVED_SHIFT 26
+};
+
+/* Completion params for aggregated interrupt completion */
+struct sdm_agg_int_comp_params {
+ __le16 params;
+#define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_MASK 0x3F
+#define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT 0
+#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_MASK 0x1
+#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT 6
+#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_MASK 0x1FF
+#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT 7
+};
+
+/* SDM operation gen command (generate aggregative interrupt) */
+struct sdm_op_gen {
+ __le32 command;
+#define SDM_OP_GEN_COMP_PARAM_MASK 0xFFFF
+#define SDM_OP_GEN_COMP_PARAM_SHIFT 0
+#define SDM_OP_GEN_COMP_TYPE_MASK 0xF
+#define SDM_OP_GEN_COMP_TYPE_SHIFT 16
+#define SDM_OP_GEN_RESERVED_MASK 0xFFF
+#define SDM_OP_GEN_RESERVED_SHIFT 20
+};
+
+struct ystorm_core_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ __le32 reg0;
+ __le32 reg1;
+ __le16 word1;
+ __le16 word2;
+ __le16 word3;
+ __le16 word4;
+ __le32 reg2;
+ __le32 reg3;
+};
+
+/****************************************/
+/* Debug Tools HSI constants and macros */
+/****************************************/
+
enum block_addr {
- GRCBASE_GRC = 0x50000,
- GRCBASE_MISCS = 0x9000,
- GRCBASE_MISC = 0x8000,
- GRCBASE_DBU = 0xa000,
- GRCBASE_PGLUE_B = 0x2a8000,
- GRCBASE_CNIG = 0x218000,
- GRCBASE_CPMU = 0x30000,
- GRCBASE_NCSI = 0x40000,
- GRCBASE_OPTE = 0x53000,
- GRCBASE_BMB = 0x540000,
- GRCBASE_PCIE = 0x54000,
- GRCBASE_MCP = 0xe00000,
- GRCBASE_MCP2 = 0x52000,
- GRCBASE_PSWHST = 0x2a0000,
- GRCBASE_PSWHST2 = 0x29e000,
- GRCBASE_PSWRD = 0x29c000,
- GRCBASE_PSWRD2 = 0x29d000,
- GRCBASE_PSWWR = 0x29a000,
- GRCBASE_PSWWR2 = 0x29b000,
- GRCBASE_PSWRQ = 0x280000,
- GRCBASE_PSWRQ2 = 0x240000,
- GRCBASE_PGLCS = 0x0,
- GRCBASE_PTU = 0x560000,
- GRCBASE_DMAE = 0xc000,
- GRCBASE_TCM = 0x1180000,
- GRCBASE_MCM = 0x1200000,
- GRCBASE_UCM = 0x1280000,
- GRCBASE_XCM = 0x1000000,
- GRCBASE_YCM = 0x1080000,
- GRCBASE_PCM = 0x1100000,
- GRCBASE_QM = 0x2f0000,
- GRCBASE_TM = 0x2c0000,
- GRCBASE_DORQ = 0x100000,
- GRCBASE_BRB = 0x340000,
- GRCBASE_SRC = 0x238000,
- GRCBASE_PRS = 0x1f0000,
- GRCBASE_TSDM = 0xfb0000,
- GRCBASE_MSDM = 0xfc0000,
- GRCBASE_USDM = 0xfd0000,
- GRCBASE_XSDM = 0xf80000,
- GRCBASE_YSDM = 0xf90000,
- GRCBASE_PSDM = 0xfa0000,
- GRCBASE_TSEM = 0x1700000,
- GRCBASE_MSEM = 0x1800000,
- GRCBASE_USEM = 0x1900000,
- GRCBASE_XSEM = 0x1400000,
- GRCBASE_YSEM = 0x1500000,
- GRCBASE_PSEM = 0x1600000,
- GRCBASE_RSS = 0x238800,
- GRCBASE_TMLD = 0x4d0000,
- GRCBASE_MULD = 0x4e0000,
- GRCBASE_YULD = 0x4c8000,
- GRCBASE_XYLD = 0x4c0000,
- GRCBASE_PRM = 0x230000,
- GRCBASE_PBF_PB1 = 0xda0000,
- GRCBASE_PBF_PB2 = 0xda4000,
- GRCBASE_RPB = 0x23c000,
- GRCBASE_BTB = 0xdb0000,
- GRCBASE_PBF = 0xd80000,
- GRCBASE_RDIF = 0x300000,
- GRCBASE_TDIF = 0x310000,
- GRCBASE_CDU = 0x580000,
- GRCBASE_CCFC = 0x2e0000,
- GRCBASE_TCFC = 0x2d0000,
- GRCBASE_IGU = 0x180000,
- GRCBASE_CAU = 0x1c0000,
- GRCBASE_UMAC = 0x51000,
- GRCBASE_XMAC = 0x210000,
- GRCBASE_DBG = 0x10000,
- GRCBASE_NIG = 0x500000,
- GRCBASE_WOL = 0x600000,
- GRCBASE_BMBN = 0x610000,
- GRCBASE_IPC = 0x20000,
- GRCBASE_NWM = 0x800000,
- GRCBASE_NWS = 0x700000,
- GRCBASE_MS = 0x6a0000,
- GRCBASE_PHY_PCIE = 0x620000,
- GRCBASE_MISC_AEU = 0x8000,
- GRCBASE_BAR0_MAP = 0x1c00000,
+ GRCBASE_GRC = 0x50000,
+ GRCBASE_MISCS = 0x9000,
+ GRCBASE_MISC = 0x8000,
+ GRCBASE_DBU = 0xa000,
+ GRCBASE_PGLUE_B = 0x2a8000,
+ GRCBASE_CNIG = 0x218000,
+ GRCBASE_CPMU = 0x30000,
+ GRCBASE_NCSI = 0x40000,
+ GRCBASE_OPTE = 0x53000,
+ GRCBASE_BMB = 0x540000,
+ GRCBASE_PCIE = 0x54000,
+ GRCBASE_MCP = 0xe00000,
+ GRCBASE_MCP2 = 0x52000,
+ GRCBASE_PSWHST = 0x2a0000,
+ GRCBASE_PSWHST2 = 0x29e000,
+ GRCBASE_PSWRD = 0x29c000,
+ GRCBASE_PSWRD2 = 0x29d000,
+ GRCBASE_PSWWR = 0x29a000,
+ GRCBASE_PSWWR2 = 0x29b000,
+ GRCBASE_PSWRQ = 0x280000,
+ GRCBASE_PSWRQ2 = 0x240000,
+ GRCBASE_PGLCS = 0x0,
+ GRCBASE_DMAE = 0xc000,
+ GRCBASE_PTU = 0x560000,
+ GRCBASE_TCM = 0x1180000,
+ GRCBASE_MCM = 0x1200000,
+ GRCBASE_UCM = 0x1280000,
+ GRCBASE_XCM = 0x1000000,
+ GRCBASE_YCM = 0x1080000,
+ GRCBASE_PCM = 0x1100000,
+ GRCBASE_QM = 0x2f0000,
+ GRCBASE_TM = 0x2c0000,
+ GRCBASE_DORQ = 0x100000,
+ GRCBASE_BRB = 0x340000,
+ GRCBASE_SRC = 0x238000,
+ GRCBASE_PRS = 0x1f0000,
+ GRCBASE_TSDM = 0xfb0000,
+ GRCBASE_MSDM = 0xfc0000,
+ GRCBASE_USDM = 0xfd0000,
+ GRCBASE_XSDM = 0xf80000,
+ GRCBASE_YSDM = 0xf90000,
+ GRCBASE_PSDM = 0xfa0000,
+ GRCBASE_TSEM = 0x1700000,
+ GRCBASE_MSEM = 0x1800000,
+ GRCBASE_USEM = 0x1900000,
+ GRCBASE_XSEM = 0x1400000,
+ GRCBASE_YSEM = 0x1500000,
+ GRCBASE_PSEM = 0x1600000,
+ GRCBASE_RSS = 0x238800,
+ GRCBASE_TMLD = 0x4d0000,
+ GRCBASE_MULD = 0x4e0000,
+ GRCBASE_YULD = 0x4c8000,
+ GRCBASE_XYLD = 0x4c0000,
+ GRCBASE_PRM = 0x230000,
+ GRCBASE_PBF_PB1 = 0xda0000,
+ GRCBASE_PBF_PB2 = 0xda4000,
+ GRCBASE_RPB = 0x23c000,
+ GRCBASE_BTB = 0xdb0000,
+ GRCBASE_PBF = 0xd80000,
+ GRCBASE_RDIF = 0x300000,
+ GRCBASE_TDIF = 0x310000,
+ GRCBASE_CDU = 0x580000,
+ GRCBASE_CCFC = 0x2e0000,
+ GRCBASE_TCFC = 0x2d0000,
+ GRCBASE_IGU = 0x180000,
+ GRCBASE_CAU = 0x1c0000,
+ GRCBASE_UMAC = 0x51000,
+ GRCBASE_XMAC = 0x210000,
+ GRCBASE_DBG = 0x10000,
+ GRCBASE_NIG = 0x500000,
+ GRCBASE_WOL = 0x600000,
+ GRCBASE_BMBN = 0x610000,
+ GRCBASE_IPC = 0x20000,
+ GRCBASE_NWM = 0x800000,
+ GRCBASE_NWS = 0x700000,
+ GRCBASE_MS = 0x6a0000,
+ GRCBASE_PHY_PCIE = 0x620000,
+ GRCBASE_LED = 0x6b8000,
+ GRCBASE_MISC_AEU = 0x8000,
+ GRCBASE_BAR0_MAP = 0x1c00000,
MAX_BLOCK_ADDR
};
@@ -879,8 +1364,8 @@ enum block_id {
BLOCK_PSWRQ,
BLOCK_PSWRQ2,
BLOCK_PGLCS,
- BLOCK_PTU,
BLOCK_DMAE,
+ BLOCK_PTU,
BLOCK_TCM,
BLOCK_MCM,
BLOCK_UCM,
@@ -934,141 +1419,216 @@ enum block_id {
BLOCK_NWS,
BLOCK_MS,
BLOCK_PHY_PCIE,
+ BLOCK_LED,
BLOCK_MISC_AEU,
BLOCK_BAR0_MAP,
MAX_BLOCK_ID
};
-enum command_type_bit {
- IGU_COMMAND_TYPE_NOP = 0,
- IGU_COMMAND_TYPE_SET = 1,
- MAX_COMMAND_TYPE_BIT
+/* binary debug buffer types */
+enum bin_dbg_buffer_type {
+ BIN_BUF_DBG_MODE_TREE,
+ BIN_BUF_DBG_DUMP_REG,
+ BIN_BUF_DBG_DUMP_MEM,
+ BIN_BUF_DBG_IDLE_CHK_REGS,
+ BIN_BUF_DBG_IDLE_CHK_IMMS,
+ BIN_BUF_DBG_IDLE_CHK_RULES,
+ BIN_BUF_DBG_IDLE_CHK_PARSING_DATA,
+ BIN_BUF_DBG_ATTN_BLOCKS,
+ BIN_BUF_DBG_ATTN_REGS,
+ BIN_BUF_DBG_ATTN_INDEXES,
+ BIN_BUF_DBG_ATTN_NAME_OFFSETS,
+ BIN_BUF_DBG_PARSING_STRINGS,
+ MAX_BIN_DBG_BUFFER_TYPE
};
-struct dmae_cmd {
- __le32 opcode;
-#define DMAE_CMD_SRC_MASK 0x1
-#define DMAE_CMD_SRC_SHIFT 0
-#define DMAE_CMD_DST_MASK 0x3
-#define DMAE_CMD_DST_SHIFT 1
-#define DMAE_CMD_C_DST_MASK 0x1
-#define DMAE_CMD_C_DST_SHIFT 3
-#define DMAE_CMD_CRC_RESET_MASK 0x1
-#define DMAE_CMD_CRC_RESET_SHIFT 4
-#define DMAE_CMD_SRC_ADDR_RESET_MASK 0x1
-#define DMAE_CMD_SRC_ADDR_RESET_SHIFT 5
-#define DMAE_CMD_DST_ADDR_RESET_MASK 0x1
-#define DMAE_CMD_DST_ADDR_RESET_SHIFT 6
-#define DMAE_CMD_COMP_FUNC_MASK 0x1
-#define DMAE_CMD_COMP_FUNC_SHIFT 7
-#define DMAE_CMD_COMP_WORD_EN_MASK 0x1
-#define DMAE_CMD_COMP_WORD_EN_SHIFT 8
-#define DMAE_CMD_COMP_CRC_EN_MASK 0x1
-#define DMAE_CMD_COMP_CRC_EN_SHIFT 9
-#define DMAE_CMD_COMP_CRC_OFFSET_MASK 0x7
-#define DMAE_CMD_COMP_CRC_OFFSET_SHIFT 10
-#define DMAE_CMD_RESERVED1_MASK 0x1
-#define DMAE_CMD_RESERVED1_SHIFT 13
-#define DMAE_CMD_ENDIANITY_MODE_MASK 0x3
-#define DMAE_CMD_ENDIANITY_MODE_SHIFT 14
-#define DMAE_CMD_ERR_HANDLING_MASK 0x3
-#define DMAE_CMD_ERR_HANDLING_SHIFT 16
-#define DMAE_CMD_PORT_ID_MASK 0x3
-#define DMAE_CMD_PORT_ID_SHIFT 18
-#define DMAE_CMD_SRC_PF_ID_MASK 0xF
-#define DMAE_CMD_SRC_PF_ID_SHIFT 20
-#define DMAE_CMD_DST_PF_ID_MASK 0xF
-#define DMAE_CMD_DST_PF_ID_SHIFT 24
-#define DMAE_CMD_SRC_VF_ID_VALID_MASK 0x1
-#define DMAE_CMD_SRC_VF_ID_VALID_SHIFT 28
-#define DMAE_CMD_DST_VF_ID_VALID_MASK 0x1
-#define DMAE_CMD_DST_VF_ID_VALID_SHIFT 29
-#define DMAE_CMD_RESERVED2_MASK 0x3
-#define DMAE_CMD_RESERVED2_SHIFT 30
- __le32 src_addr_lo;
- __le32 src_addr_hi;
- __le32 dst_addr_lo;
- __le32 dst_addr_hi;
- __le16 length /* Length in DW */;
- __le16 opcode_b;
-#define DMAE_CMD_SRC_VF_ID_MASK 0xFF /* Source VF id */
-#define DMAE_CMD_SRC_VF_ID_SHIFT 0
-#define DMAE_CMD_DST_VF_ID_MASK 0xFF /* Destination VF id */
-#define DMAE_CMD_DST_VF_ID_SHIFT 8
- __le32 comp_addr_lo /* PCIe completion address low or grc address */;
- __le32 comp_addr_hi;
- __le32 comp_val /* Value to write to copmletion address */;
- __le32 crc32 /* crc16 result */;
- __le32 crc_32_c /* crc32_c result */;
- __le16 crc16 /* crc16 result */;
- __le16 crc16_c /* crc16_c result */;
- __le16 crc10 /* crc_t10 result */;
- __le16 reserved;
- __le16 xsum16 /* checksum16 result */;
- __le16 xsum8 /* checksum8 result */;
+/* Chip IDs */
+enum chip_ids {
+ CHIP_RESERVED,
+ CHIP_BB_B0,
+ CHIP_RESERVED2,
+ MAX_CHIP_IDS
};
-struct igu_cleanup {
- __le32 sb_id_and_flags;
-#define IGU_CLEANUP_RESERVED0_MASK 0x7FFFFFF
-#define IGU_CLEANUP_RESERVED0_SHIFT 0
-#define IGU_CLEANUP_CLEANUP_SET_MASK 0x1 /* cleanup clear - 0, set - 1 */
-#define IGU_CLEANUP_CLEANUP_SET_SHIFT 27
-#define IGU_CLEANUP_CLEANUP_TYPE_MASK 0x7
-#define IGU_CLEANUP_CLEANUP_TYPE_SHIFT 28
-#define IGU_CLEANUP_COMMAND_TYPE_MASK 0x1
-#define IGU_CLEANUP_COMMAND_TYPE_SHIFT 31
- __le32 reserved1;
+/* Attention bit mapping */
+struct dbg_attn_bit_mapping {
+ __le16 data;
+#define DBG_ATTN_BIT_MAPPING_VAL_MASK 0x7FFF
+#define DBG_ATTN_BIT_MAPPING_VAL_SHIFT 0
+#define DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT_MASK 0x1
+#define DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT_SHIFT 15
};
-union igu_command {
- struct igu_prod_cons_update prod_cons_update;
- struct igu_cleanup cleanup;
+/* Attention block per-type data */
+struct dbg_attn_block_type_data {
+ __le16 names_offset;
+ __le16 reserved1;
+ u8 num_regs;
+ u8 reserved2;
+ __le16 regs_offset;
};
-struct igu_command_reg_ctrl {
- __le16 opaque_fid;
- __le16 igu_command_reg_ctrl_fields;
-#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_MASK 0xFFF
-#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_SHIFT 0
-#define IGU_COMMAND_REG_CTRL_RESERVED_MASK 0x7
-#define IGU_COMMAND_REG_CTRL_RESERVED_SHIFT 12
-#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_MASK 0x1
-#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_SHIFT 15
+/* Block attentions */
+struct dbg_attn_block {
+ struct dbg_attn_block_type_data per_type_data[2];
};
-struct igu_mapping_line {
- __le32 igu_mapping_line_fields;
-#define IGU_MAPPING_LINE_VALID_MASK 0x1
-#define IGU_MAPPING_LINE_VALID_SHIFT 0
-#define IGU_MAPPING_LINE_VECTOR_NUMBER_MASK 0xFF
-#define IGU_MAPPING_LINE_VECTOR_NUMBER_SHIFT 1
-#define IGU_MAPPING_LINE_FUNCTION_NUMBER_MASK 0xFF
-#define IGU_MAPPING_LINE_FUNCTION_NUMBER_SHIFT 9
-#define IGU_MAPPING_LINE_PF_VALID_MASK 0x1 /* PF-1, VF-0 */
-#define IGU_MAPPING_LINE_PF_VALID_SHIFT 17
-#define IGU_MAPPING_LINE_IPS_GROUP_MASK 0x3F
-#define IGU_MAPPING_LINE_IPS_GROUP_SHIFT 18
-#define IGU_MAPPING_LINE_RESERVED_MASK 0xFF
-#define IGU_MAPPING_LINE_RESERVED_SHIFT 24
+/* Attention register result */
+struct dbg_attn_reg_result {
+ __le32 data;
+#define DBG_ATTN_REG_RESULT_STS_ADDRESS_MASK 0xFFFFFF
+#define DBG_ATTN_REG_RESULT_STS_ADDRESS_SHIFT 0
+#define DBG_ATTN_REG_RESULT_NUM_ATTN_IDX_MASK 0xFF
+#define DBG_ATTN_REG_RESULT_NUM_ATTN_IDX_SHIFT 24
+ __le16 attn_idx_offset;
+ __le16 reserved;
+ __le32 sts_val;
+ __le32 mask_val;
+};
+
+/* Attention block result */
+struct dbg_attn_block_result {
+ u8 block_id;
+ u8 data;
+#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_MASK 0x3
+#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_SHIFT 0
+#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_MASK 0x3F
+#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_SHIFT 2
+ __le16 names_offset;
+ struct dbg_attn_reg_result reg_results[15];
+};
+
+/* mode header */
+struct dbg_mode_hdr {
+ __le16 data;
+#define DBG_MODE_HDR_EVAL_MODE_MASK 0x1
+#define DBG_MODE_HDR_EVAL_MODE_SHIFT 0
+#define DBG_MODE_HDR_MODES_BUF_OFFSET_MASK 0x7FFF
+#define DBG_MODE_HDR_MODES_BUF_OFFSET_SHIFT 1
+};
+
+/* Attention register */
+struct dbg_attn_reg {
+ struct dbg_mode_hdr mode;
+ __le16 attn_idx_offset;
+ __le32 data;
+#define DBG_ATTN_REG_STS_ADDRESS_MASK 0xFFFFFF
+#define DBG_ATTN_REG_STS_ADDRESS_SHIFT 0
+#define DBG_ATTN_REG_NUM_ATTN_IDX_MASK 0xFF
+#define DBG_ATTN_REG_NUM_ATTN_IDX_SHIFT 24
+ __le32 sts_clr_address;
+ __le32 mask_address;
+};
+
+/* attention types */
+enum dbg_attn_type {
+ ATTN_TYPE_INTERRUPT,
+ ATTN_TYPE_PARITY,
+ MAX_DBG_ATTN_TYPE
+};
+
+/* Debug status codes */
+enum dbg_status {
+ DBG_STATUS_OK,
+ DBG_STATUS_APP_VERSION_NOT_SET,
+ DBG_STATUS_UNSUPPORTED_APP_VERSION,
+ DBG_STATUS_DBG_BLOCK_NOT_RESET,
+ DBG_STATUS_INVALID_ARGS,
+ DBG_STATUS_OUTPUT_ALREADY_SET,
+ DBG_STATUS_INVALID_PCI_BUF_SIZE,
+ DBG_STATUS_PCI_BUF_ALLOC_FAILED,
+ DBG_STATUS_PCI_BUF_NOT_ALLOCATED,
+ DBG_STATUS_TOO_MANY_INPUTS,
+ DBG_STATUS_INPUT_OVERLAP,
+ DBG_STATUS_HW_ONLY_RECORDING,
+ DBG_STATUS_STORM_ALREADY_ENABLED,
+ DBG_STATUS_STORM_NOT_ENABLED,
+ DBG_STATUS_BLOCK_ALREADY_ENABLED,
+ DBG_STATUS_BLOCK_NOT_ENABLED,
+ DBG_STATUS_NO_INPUT_ENABLED,
+ DBG_STATUS_NO_FILTER_TRIGGER_64B,
+ DBG_STATUS_FILTER_ALREADY_ENABLED,
+ DBG_STATUS_TRIGGER_ALREADY_ENABLED,
+ DBG_STATUS_TRIGGER_NOT_ENABLED,
+ DBG_STATUS_CANT_ADD_CONSTRAINT,
+ DBG_STATUS_TOO_MANY_TRIGGER_STATES,
+ DBG_STATUS_TOO_MANY_CONSTRAINTS,
+ DBG_STATUS_RECORDING_NOT_STARTED,
+ DBG_STATUS_DATA_DIDNT_TRIGGER,
+ DBG_STATUS_NO_DATA_RECORDED,
+ DBG_STATUS_DUMP_BUF_TOO_SMALL,
+ DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED,
+ DBG_STATUS_UNKNOWN_CHIP,
+ DBG_STATUS_VIRT_MEM_ALLOC_FAILED,
+ DBG_STATUS_BLOCK_IN_RESET,
+ DBG_STATUS_INVALID_TRACE_SIGNATURE,
+ DBG_STATUS_INVALID_NVRAM_BUNDLE,
+ DBG_STATUS_NVRAM_GET_IMAGE_FAILED,
+ DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE,
+ DBG_STATUS_NVRAM_READ_FAILED,
+ DBG_STATUS_IDLE_CHK_PARSE_FAILED,
+ DBG_STATUS_MCP_TRACE_BAD_DATA,
+ DBG_STATUS_MCP_TRACE_NO_META,
+ DBG_STATUS_MCP_COULD_NOT_HALT,
+ DBG_STATUS_MCP_COULD_NOT_RESUME,
+ DBG_STATUS_DMAE_FAILED,
+ DBG_STATUS_SEMI_FIFO_NOT_EMPTY,
+ DBG_STATUS_IGU_FIFO_BAD_DATA,
+ DBG_STATUS_MCP_COULD_NOT_MASK_PRTY,
+ DBG_STATUS_FW_ASSERTS_PARSE_FAILED,
+ DBG_STATUS_REG_FIFO_BAD_DATA,
+ DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA,
+ DBG_STATUS_DBG_ARRAY_NOT_SET,
+ MAX_DBG_STATUS
};
-struct igu_msix_vector {
- struct regpair address;
- __le32 data;
- __le32 msix_vector_fields;
-#define IGU_MSIX_VECTOR_MASK_BIT_MASK 0x1
-#define IGU_MSIX_VECTOR_MASK_BIT_SHIFT 0
-#define IGU_MSIX_VECTOR_RESERVED0_MASK 0x7FFF
-#define IGU_MSIX_VECTOR_RESERVED0_SHIFT 1
-#define IGU_MSIX_VECTOR_STEERING_TAG_MASK 0xFF
-#define IGU_MSIX_VECTOR_STEERING_TAG_SHIFT 16
-#define IGU_MSIX_VECTOR_RESERVED1_MASK 0xFF
-#define IGU_MSIX_VECTOR_RESERVED1_SHIFT 24
+/********************************/
+/* HSI Init Functions constants */
+/********************************/
+
+/* Number of VLAN priorities */
+#define NUM_OF_VLAN_PRIORITIES 8
+
+/* QM per-port init parameters */
+struct init_qm_port_params {
+ u8 active;
+ u8 active_phys_tcs;
+ __le16 num_pbf_cmd_lines;
+ __le16 num_btb_blocks;
+ __le16 reserved;
};
+/* QM per-PQ init parameters */
+struct init_qm_pq_params {
+ u8 vport_id;
+ u8 tc_id;
+ u8 wrr_group;
+ u8 rl_valid;
+};
+
+/* QM per-vport init parameters */
+struct init_qm_vport_params {
+ __le32 vport_rl;
+ __le16 vport_wfq;
+ __le16 first_tx_pq_id[NUM_OF_TCS];
+};
+
+/**************************************/
+/* Init Tool HSI constants and macros */
+/**************************************/
+
+/* Width of GRC address in bits (addresses are specified in dwords) */
+#define GRC_ADDR_BITS 23
+#define MAX_GRC_ADDR ((1 << GRC_ADDR_BITS) - 1)
+
+/* indicates an init that should be applied to any phase ID */
+#define ANY_PHASE_ID 0xffff
+
+/* Max size in dwords of a zipped array */
+#define MAX_ZIPPED_SIZE 8192
+
enum init_modes {
- MODE_BB_A0,
+ MODE_RESERVED,
MODE_BB_B0,
MODE_RESERVED2,
MODE_ASIC,
@@ -1083,7 +1643,8 @@ enum init_modes {
MODE_PORTS_PER_ENG_2,
MODE_PORTS_PER_ENG_4,
MODE_100G,
- MODE_EAGLE_ENG1_WORKAROUND,
+ MODE_40G,
+ MODE_RESERVED7,
MAX_INIT_MODES
};
@@ -1096,484 +1657,302 @@ enum init_phases {
MAX_INIT_PHASES
};
-/* per encapsulation type enabling flags */
-struct prs_reg_encapsulation_type_en {
- u8 flags;
-#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_MASK 0x1
-#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT 0
-#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_MASK 0x1
-#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT 1
-#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_MASK 0x1
-#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT 2
-#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_MASK 0x1
-#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_SHIFT 3
-#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_MASK 0x1
-#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT 4
-#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_MASK 0x1
-#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT 5
-#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_MASK 0x3
-#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_SHIFT 6
-};
-
-enum pxp_tph_st_hint {
- TPH_ST_HINT_BIDIR /* Read/Write access by Host and Device */,
- TPH_ST_HINT_REQUESTER /* Read/Write access by Device */,
- TPH_ST_HINT_TARGET,
- TPH_ST_HINT_TARGET_PRIO,
- MAX_PXP_TPH_ST_HINT
-};
-
-/* QM hardware structure of enable bypass credit mask */
-struct qm_rf_bypass_mask {
- u8 flags;
-#define QM_RF_BYPASS_MASK_LINEVOQ_MASK 0x1
-#define QM_RF_BYPASS_MASK_LINEVOQ_SHIFT 0
-#define QM_RF_BYPASS_MASK_RESERVED0_MASK 0x1
-#define QM_RF_BYPASS_MASK_RESERVED0_SHIFT 1
-#define QM_RF_BYPASS_MASK_PFWFQ_MASK 0x1
-#define QM_RF_BYPASS_MASK_PFWFQ_SHIFT 2
-#define QM_RF_BYPASS_MASK_VPWFQ_MASK 0x1
-#define QM_RF_BYPASS_MASK_VPWFQ_SHIFT 3
-#define QM_RF_BYPASS_MASK_PFRL_MASK 0x1
-#define QM_RF_BYPASS_MASK_PFRL_SHIFT 4
-#define QM_RF_BYPASS_MASK_VPQCNRL_MASK 0x1
-#define QM_RF_BYPASS_MASK_VPQCNRL_SHIFT 5
-#define QM_RF_BYPASS_MASK_FWPAUSE_MASK 0x1
-#define QM_RF_BYPASS_MASK_FWPAUSE_SHIFT 6
-#define QM_RF_BYPASS_MASK_RESERVED1_MASK 0x1
-#define QM_RF_BYPASS_MASK_RESERVED1_SHIFT 7
-};
-
-/* QM hardware structure of opportunistic credit mask */
-struct qm_rf_opportunistic_mask {
- __le16 flags;
-#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_MASK 0x1
-#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT 0
-#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_MASK 0x1
-#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT 1
-#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_MASK 0x1
-#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT 2
-#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_MASK 0x1
-#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT 3
-#define QM_RF_OPPORTUNISTIC_MASK_PFRL_MASK 0x1
-#define QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT 4
-#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_MASK 0x1
-#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT 5
-#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_MASK 0x1
-#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT 6
-#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_MASK 0x1
-#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_SHIFT 7
-#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_MASK 0x1
-#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT 8
-#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_MASK 0x7F
-#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_SHIFT 9
-};
-
-/* QM hardware structure of QM map memory */
-struct qm_rf_pq_map {
- u32 reg;
-#define QM_RF_PQ_MAP_PQ_VALID_MASK 0x1 /* PQ active */
-#define QM_RF_PQ_MAP_PQ_VALID_SHIFT 0
-#define QM_RF_PQ_MAP_RL_ID_MASK 0xFF /* RL ID */
-#define QM_RF_PQ_MAP_RL_ID_SHIFT 1
-#define QM_RF_PQ_MAP_VP_PQ_ID_MASK 0x1FF
-#define QM_RF_PQ_MAP_VP_PQ_ID_SHIFT 9
-#define QM_RF_PQ_MAP_VOQ_MASK 0x1F /* VOQ */
-#define QM_RF_PQ_MAP_VOQ_SHIFT 18
-#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_MASK 0x3 /* WRR weight */
-#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_SHIFT 23
-#define QM_RF_PQ_MAP_RL_VALID_MASK 0x1 /* RL active */
-#define QM_RF_PQ_MAP_RL_VALID_SHIFT 25
-#define QM_RF_PQ_MAP_RESERVED_MASK 0x3F
-#define QM_RF_PQ_MAP_RESERVED_SHIFT 26
-};
-
-/* Completion params for aggregated interrupt completion */
-struct sdm_agg_int_comp_params {
- __le16 params;
-#define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_MASK 0x3F
-#define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT 0
-#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_MASK 0x1
-#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT 6
-#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_MASK 0x1FF
-#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT 7
+enum init_split_types {
+ SPLIT_TYPE_NONE,
+ SPLIT_TYPE_PORT,
+ SPLIT_TYPE_PF,
+ SPLIT_TYPE_PORT_PF,
+ SPLIT_TYPE_VF,
+ MAX_INIT_SPLIT_TYPES
};
-/* SDM operation gen command (generate aggregative interrupt) */
-struct sdm_op_gen {
- __le32 command;
-#define SDM_OP_GEN_COMP_PARAM_MASK 0xFFFF /* completion parameters 0-15 */
-#define SDM_OP_GEN_COMP_PARAM_SHIFT 0
-#define SDM_OP_GEN_COMP_TYPE_MASK 0xF /* completion type 16-19 */
-#define SDM_OP_GEN_COMP_TYPE_SHIFT 16
-#define SDM_OP_GEN_RESERVED_MASK 0xFFF /* reserved 20-31 */
-#define SDM_OP_GEN_RESERVED_SHIFT 20
-};
-
-/*********************************** Init ************************************/
-
-/* Width of GRC address in bits (addresses are specified in dwords) */
-#define GRC_ADDR_BITS 23
-#define MAX_GRC_ADDR ((1 << GRC_ADDR_BITS) - 1)
-
-/* indicates an init that should be applied to any phase ID */
-#define ANY_PHASE_ID 0xffff
-
-/* init pattern size in bytes */
-#define INIT_PATTERN_SIZE_BITS 4
-#define MAX_INIT_PATTERN_SIZE BIT(INIT_PATTERN_SIZE_BITS)
-
-/* Max size in dwords of a zipped array */
-#define MAX_ZIPPED_SIZE 8192
-
-/* Global PXP window */
-#define NUM_OF_PXP_WIN 19
-#define PXP_WIN_DWORD_SIZE_BITS 10
-#define PXP_WIN_DWORD_SIZE BIT(PXP_WIN_DWORD_SIZE_BITS)
-#define PXP_WIN_BYTE_SIZE_BITS (PXP_WIN_DWORD_SIZE_BITS + 2)
-#define PXP_WIN_BYTE_SIZE (PXP_WIN_DWORD_SIZE * 4)
-
-/********************************* GRC Dump **********************************/
-
-/* width of GRC dump register sequence length in bits */
-#define DUMP_SEQ_LEN_BITS 8
-#define DUMP_SEQ_LEN_MAX_VAL ((1 << DUMP_SEQ_LEN_BITS) - 1)
-
-/* width of GRC dump memory length in bits */
-#define DUMP_MEM_LEN_BITS 18
-#define DUMP_MEM_LEN_MAX_VAL ((1 << DUMP_MEM_LEN_BITS) - 1)
-
-/* width of register type ID in bits */
-#define REG_TYPE_ID_BITS 6
-#define REG_TYPE_ID_MAX_VAL ((1 << REG_TYPE_ID_BITS) - 1)
-
-/* width of block ID in bits */
-#define BLOCK_ID_BITS 8
-#define BLOCK_ID_MAX_VAL ((1 << BLOCK_ID_BITS) - 1)
-
-/******************************** Idle Check *********************************/
-
-/* max number of idle check predicate immediates */
-#define MAX_IDLE_CHK_PRED_IMM 3
-
-/* max number of idle check argument registers */
-#define MAX_IDLE_CHK_READ_REGS 3
-
-/* max number of idle check loops */
-#define MAX_IDLE_CHK_LOOPS 0x10000
-
-/* max idle check address increment */
-#define MAX_IDLE_CHK_INCREMENT 0x10000
-
-/* inicates an undefined idle check line index */
-#define IDLE_CHK_UNDEFINED_LINE_IDX 0xffffff
-
-/* max number of register values following the idle check header */
-#define IDLE_CHK_MAX_DUMP_REGS 2
-
-/* arguments for IDLE_CHK_MACRO_TYPE_QM_RD_WR */
-#define IDLE_CHK_QM_RD_WR_PTR 0
-#define IDLE_CHK_QM_RD_WR_BANK 1
-
-/**************************************/
-/* HSI Functions constants and macros */
-/**************************************/
-
-/* Number of VLAN priorities */
-#define NUM_OF_VLAN_PRIORITIES 8
-
-/* the MCP Trace meta data signautre is duplicated in the perl script that
- * generats the NVRAM images.
- */
-#define MCP_TRACE_META_IMAGE_SIGNATURE 0x669955aa
-
/* Binary buffer header */
struct bin_buffer_hdr {
- u32 offset;
- u32 length /* buffer length in bytes */;
-};
-
-/* binary buffer types */
-enum bin_buffer_type {
- BIN_BUF_FW_VER_INFO /* fw_ver_info struct */,
- BIN_BUF_INIT_CMD /* init commands */,
- BIN_BUF_INIT_VAL /* init data */,
- BIN_BUF_INIT_MODE_TREE /* init modes tree */,
- BIN_BUF_IRO /* internal RAM offsets array */,
- MAX_BIN_BUFFER_TYPE
+ __le32 offset;
+ __le32 length;
};
-/* Chip IDs */
-enum chip_ids {
- CHIP_BB_A0 /* BB A0 chip ID */,
- CHIP_BB_B0 /* BB B0 chip ID */,
- CHIP_K2 /* AH chip ID */,
- MAX_CHIP_IDS
+/* binary init buffer types */
+enum bin_init_buffer_type {
+ BIN_BUF_FW_VER_INFO,
+ BIN_BUF_INIT_CMD,
+ BIN_BUF_INIT_VAL,
+ BIN_BUF_INIT_MODE_TREE,
+ BIN_BUF_IRO,
+ MAX_BIN_INIT_BUFFER_TYPE
};
+/* init array header: raw */
struct init_array_raw_hdr {
__le32 data;
-#define INIT_ARRAY_RAW_HDR_TYPE_MASK 0xF
-#define INIT_ARRAY_RAW_HDR_TYPE_SHIFT 0
-#define INIT_ARRAY_RAW_HDR_PARAMS_MASK 0xFFFFFFF /* init array params */
-#define INIT_ARRAY_RAW_HDR_PARAMS_SHIFT 4
+#define INIT_ARRAY_RAW_HDR_TYPE_MASK 0xF
+#define INIT_ARRAY_RAW_HDR_TYPE_SHIFT 0
+#define INIT_ARRAY_RAW_HDR_PARAMS_MASK 0xFFFFFFF
+#define INIT_ARRAY_RAW_HDR_PARAMS_SHIFT 4
};
+/* init array header: standard */
struct init_array_standard_hdr {
__le32 data;
-#define INIT_ARRAY_STANDARD_HDR_TYPE_MASK 0xF
-#define INIT_ARRAY_STANDARD_HDR_TYPE_SHIFT 0
-#define INIT_ARRAY_STANDARD_HDR_SIZE_MASK 0xFFFFFFF
-#define INIT_ARRAY_STANDARD_HDR_SIZE_SHIFT 4
+#define INIT_ARRAY_STANDARD_HDR_TYPE_MASK 0xF
+#define INIT_ARRAY_STANDARD_HDR_TYPE_SHIFT 0
+#define INIT_ARRAY_STANDARD_HDR_SIZE_MASK 0xFFFFFFF
+#define INIT_ARRAY_STANDARD_HDR_SIZE_SHIFT 4
};
+/* init array header: zipped */
struct init_array_zipped_hdr {
__le32 data;
-#define INIT_ARRAY_ZIPPED_HDR_TYPE_MASK 0xF
-#define INIT_ARRAY_ZIPPED_HDR_TYPE_SHIFT 0
-#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_MASK 0xFFFFFFF
-#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_SHIFT 4
+#define INIT_ARRAY_ZIPPED_HDR_TYPE_MASK 0xF
+#define INIT_ARRAY_ZIPPED_HDR_TYPE_SHIFT 0
+#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_MASK 0xFFFFFFF
+#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_SHIFT 4
};
+/* init array header: pattern */
struct init_array_pattern_hdr {
__le32 data;
-#define INIT_ARRAY_PATTERN_HDR_TYPE_MASK 0xF
-#define INIT_ARRAY_PATTERN_HDR_TYPE_SHIFT 0
-#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_MASK 0xF
-#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_SHIFT 4
-#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_MASK 0xFFFFFF
-#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_SHIFT 8
+#define INIT_ARRAY_PATTERN_HDR_TYPE_MASK 0xF
+#define INIT_ARRAY_PATTERN_HDR_TYPE_SHIFT 0
+#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_MASK 0xF
+#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_SHIFT 4
+#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_MASK 0xFFFFFF
+#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_SHIFT 8
};
+/* init array header union */
union init_array_hdr {
- struct init_array_raw_hdr raw /* raw init array header */;
- struct init_array_standard_hdr standard;
- struct init_array_zipped_hdr zipped /* zipped init array header */;
- struct init_array_pattern_hdr pattern /* pattern init array header */;
+ struct init_array_raw_hdr raw;
+ struct init_array_standard_hdr standard;
+ struct init_array_zipped_hdr zipped;
+ struct init_array_pattern_hdr pattern;
};
+/* init array types */
enum init_array_types {
- INIT_ARR_STANDARD /* standard init array */,
- INIT_ARR_ZIPPED /* zipped init array */,
- INIT_ARR_PATTERN /* a repeated pattern */,
+ INIT_ARR_STANDARD,
+ INIT_ARR_ZIPPED,
+ INIT_ARR_PATTERN,
MAX_INIT_ARRAY_TYPES
};
/* init operation: callback */
struct init_callback_op {
- __le32 op_data;
-#define INIT_CALLBACK_OP_OP_MASK 0xF
-#define INIT_CALLBACK_OP_OP_SHIFT 0
-#define INIT_CALLBACK_OP_RESERVED_MASK 0xFFFFFFF
-#define INIT_CALLBACK_OP_RESERVED_SHIFT 4
- __le16 callback_id /* Callback ID */;
- __le16 block_id /* Blocks ID */;
+ __le32 op_data;
+#define INIT_CALLBACK_OP_OP_MASK 0xF
+#define INIT_CALLBACK_OP_OP_SHIFT 0
+#define INIT_CALLBACK_OP_RESERVED_MASK 0xFFFFFFF
+#define INIT_CALLBACK_OP_RESERVED_SHIFT 4
+ __le16 callback_id;
+ __le16 block_id;
};
/* init operation: delay */
struct init_delay_op {
- __le32 op_data;
-#define INIT_DELAY_OP_OP_MASK 0xF
-#define INIT_DELAY_OP_OP_SHIFT 0
-#define INIT_DELAY_OP_RESERVED_MASK 0xFFFFFFF
-#define INIT_DELAY_OP_RESERVED_SHIFT 4
- __le32 delay /* delay in us */;
+ __le32 op_data;
+#define INIT_DELAY_OP_OP_MASK 0xF
+#define INIT_DELAY_OP_OP_SHIFT 0
+#define INIT_DELAY_OP_RESERVED_MASK 0xFFFFFFF
+#define INIT_DELAY_OP_RESERVED_SHIFT 4
+ __le32 delay;
};
/* init operation: if_mode */
struct init_if_mode_op {
__le32 op_data;
-#define INIT_IF_MODE_OP_OP_MASK 0xF
-#define INIT_IF_MODE_OP_OP_SHIFT 0
-#define INIT_IF_MODE_OP_RESERVED1_MASK 0xFFF
-#define INIT_IF_MODE_OP_RESERVED1_SHIFT 4
-#define INIT_IF_MODE_OP_CMD_OFFSET_MASK 0xFFFF
-#define INIT_IF_MODE_OP_CMD_OFFSET_SHIFT 16
- __le16 reserved2;
- __le16 modes_buf_offset;
+#define INIT_IF_MODE_OP_OP_MASK 0xF
+#define INIT_IF_MODE_OP_OP_SHIFT 0
+#define INIT_IF_MODE_OP_RESERVED1_MASK 0xFFF
+#define INIT_IF_MODE_OP_RESERVED1_SHIFT 4
+#define INIT_IF_MODE_OP_CMD_OFFSET_MASK 0xFFFF
+#define INIT_IF_MODE_OP_CMD_OFFSET_SHIFT 16
+ __le16 reserved2;
+ __le16 modes_buf_offset;
};
-/* init operation: if_phase */
+/* init operation: if_phase */
struct init_if_phase_op {
__le32 op_data;
-#define INIT_IF_PHASE_OP_OP_MASK 0xF
-#define INIT_IF_PHASE_OP_OP_SHIFT 0
-#define INIT_IF_PHASE_OP_DMAE_ENABLE_MASK 0x1
-#define INIT_IF_PHASE_OP_DMAE_ENABLE_SHIFT 4
-#define INIT_IF_PHASE_OP_RESERVED1_MASK 0x7FF
-#define INIT_IF_PHASE_OP_RESERVED1_SHIFT 5
-#define INIT_IF_PHASE_OP_CMD_OFFSET_MASK 0xFFFF
-#define INIT_IF_PHASE_OP_CMD_OFFSET_SHIFT 16
+#define INIT_IF_PHASE_OP_OP_MASK 0xF
+#define INIT_IF_PHASE_OP_OP_SHIFT 0
+#define INIT_IF_PHASE_OP_DMAE_ENABLE_MASK 0x1
+#define INIT_IF_PHASE_OP_DMAE_ENABLE_SHIFT 4
+#define INIT_IF_PHASE_OP_RESERVED1_MASK 0x7FF
+#define INIT_IF_PHASE_OP_RESERVED1_SHIFT 5
+#define INIT_IF_PHASE_OP_CMD_OFFSET_MASK 0xFFFF
+#define INIT_IF_PHASE_OP_CMD_OFFSET_SHIFT 16
__le32 phase_data;
-#define INIT_IF_PHASE_OP_PHASE_MASK 0xFF /* Init phase */
-#define INIT_IF_PHASE_OP_PHASE_SHIFT 0
-#define INIT_IF_PHASE_OP_RESERVED2_MASK 0xFF
-#define INIT_IF_PHASE_OP_RESERVED2_SHIFT 8
-#define INIT_IF_PHASE_OP_PHASE_ID_MASK 0xFFFF /* Init phase ID */
-#define INIT_IF_PHASE_OP_PHASE_ID_SHIFT 16
+#define INIT_IF_PHASE_OP_PHASE_MASK 0xFF
+#define INIT_IF_PHASE_OP_PHASE_SHIFT 0
+#define INIT_IF_PHASE_OP_RESERVED2_MASK 0xFF
+#define INIT_IF_PHASE_OP_RESERVED2_SHIFT 8
+#define INIT_IF_PHASE_OP_PHASE_ID_MASK 0xFFFF
+#define INIT_IF_PHASE_OP_PHASE_ID_SHIFT 16
};
/* init mode operators */
enum init_mode_ops {
- INIT_MODE_OP_NOT /* init mode not operator */,
- INIT_MODE_OP_OR /* init mode or operator */,
- INIT_MODE_OP_AND /* init mode and operator */,
+ INIT_MODE_OP_NOT,
+ INIT_MODE_OP_OR,
+ INIT_MODE_OP_AND,
MAX_INIT_MODE_OPS
};
/* init operation: raw */
struct init_raw_op {
- __le32 op_data;
-#define INIT_RAW_OP_OP_MASK 0xF
-#define INIT_RAW_OP_OP_SHIFT 0
-#define INIT_RAW_OP_PARAM1_MASK 0xFFFFFFF /* init param 1 */
-#define INIT_RAW_OP_PARAM1_SHIFT 4
- __le32 param2 /* Init param 2 */;
+ __le32 op_data;
+#define INIT_RAW_OP_OP_MASK 0xF
+#define INIT_RAW_OP_OP_SHIFT 0
+#define INIT_RAW_OP_PARAM1_MASK 0xFFFFFFF
+#define INIT_RAW_OP_PARAM1_SHIFT 4
+ __le32 param2;
};
/* init array params */
struct init_op_array_params {
- __le16 size /* array size in dwords */;
- __le16 offset /* array start offset in dwords */;
+ __le16 size;
+ __le16 offset;
};
/* Write init operation arguments */
union init_write_args {
- __le32 inline_val;
- __le32 zeros_count;
- __le32 array_offset;
- struct init_op_array_params runtime;
+ __le32 inline_val;
+ __le32 zeros_count;
+ __le32 array_offset;
+ struct init_op_array_params runtime;
};
/* init operation: write */
struct init_write_op {
__le32 data;
-#define INIT_WRITE_OP_OP_MASK 0xF
-#define INIT_WRITE_OP_OP_SHIFT 0
-#define INIT_WRITE_OP_SOURCE_MASK 0x7
-#define INIT_WRITE_OP_SOURCE_SHIFT 4
-#define INIT_WRITE_OP_RESERVED_MASK 0x1
-#define INIT_WRITE_OP_RESERVED_SHIFT 7
-#define INIT_WRITE_OP_WIDE_BUS_MASK 0x1
-#define INIT_WRITE_OP_WIDE_BUS_SHIFT 8
-#define INIT_WRITE_OP_ADDRESS_MASK 0x7FFFFF
-#define INIT_WRITE_OP_ADDRESS_SHIFT 9
- union init_write_args args /* Write init operation arguments */;
+#define INIT_WRITE_OP_OP_MASK 0xF
+#define INIT_WRITE_OP_OP_SHIFT 0
+#define INIT_WRITE_OP_SOURCE_MASK 0x7
+#define INIT_WRITE_OP_SOURCE_SHIFT 4
+#define INIT_WRITE_OP_RESERVED_MASK 0x1
+#define INIT_WRITE_OP_RESERVED_SHIFT 7
+#define INIT_WRITE_OP_WIDE_BUS_MASK 0x1
+#define INIT_WRITE_OP_WIDE_BUS_SHIFT 8
+#define INIT_WRITE_OP_ADDRESS_MASK 0x7FFFFF
+#define INIT_WRITE_OP_ADDRESS_SHIFT 9
+ union init_write_args args;
};
/* init operation: read */
struct init_read_op {
__le32 op_data;
-#define INIT_READ_OP_OP_MASK 0xF
-#define INIT_READ_OP_OP_SHIFT 0
-#define INIT_READ_OP_POLL_TYPE_MASK 0xF
-#define INIT_READ_OP_POLL_TYPE_SHIFT 4
-#define INIT_READ_OP_RESERVED_MASK 0x1
-#define INIT_READ_OP_RESERVED_SHIFT 8
-#define INIT_READ_OP_ADDRESS_MASK 0x7FFFFF
-#define INIT_READ_OP_ADDRESS_SHIFT 9
+#define INIT_READ_OP_OP_MASK 0xF
+#define INIT_READ_OP_OP_SHIFT 0
+#define INIT_READ_OP_POLL_TYPE_MASK 0xF
+#define INIT_READ_OP_POLL_TYPE_SHIFT 4
+#define INIT_READ_OP_RESERVED_MASK 0x1
+#define INIT_READ_OP_RESERVED_SHIFT 8
+#define INIT_READ_OP_ADDRESS_MASK 0x7FFFFF
+#define INIT_READ_OP_ADDRESS_SHIFT 9
__le32 expected_val;
+
};
/* Init operations union */
union init_op {
- struct init_raw_op raw /* raw init operation */;
- struct init_write_op write /* write init operation */;
- struct init_read_op read /* read init operation */;
- struct init_if_mode_op if_mode /* if_mode init operation */;
- struct init_if_phase_op if_phase /* if_phase init operation */;
- struct init_callback_op callback /* callback init operation */;
- struct init_delay_op delay /* delay init operation */;
+ struct init_raw_op raw;
+ struct init_write_op write;
+ struct init_read_op read;
+ struct init_if_mode_op if_mode;
+ struct init_if_phase_op if_phase;
+ struct init_callback_op callback;
+ struct init_delay_op delay;
};
/* Init command operation types */
enum init_op_types {
- INIT_OP_READ /* GRC read init command */,
- INIT_OP_WRITE /* GRC write init command */,
+ INIT_OP_READ,
+ INIT_OP_WRITE,
INIT_OP_IF_MODE,
INIT_OP_IF_PHASE,
- INIT_OP_DELAY /* delay init command */,
- INIT_OP_CALLBACK /* callback init command */,
+ INIT_OP_DELAY,
+ INIT_OP_CALLBACK,
MAX_INIT_OP_TYPES
};
+/* init polling types */
enum init_poll_types {
- INIT_POLL_NONE /* No polling */,
- INIT_POLL_EQ /* init value is included in the init command */,
- INIT_POLL_OR /* init value is all zeros */,
- INIT_POLL_AND /* init value is an array of values */,
+ INIT_POLL_NONE,
+ INIT_POLL_EQ,
+ INIT_POLL_OR,
+ INIT_POLL_AND,
MAX_INIT_POLL_TYPES
};
/* init source types */
enum init_source_types {
- INIT_SRC_INLINE /* init value is included in the init command */,
- INIT_SRC_ZEROS /* init value is all zeros */,
- INIT_SRC_ARRAY /* init value is an array of values */,
- INIT_SRC_RUNTIME /* init value is provided during runtime */,
+ INIT_SRC_INLINE,
+ INIT_SRC_ZEROS,
+ INIT_SRC_ARRAY,
+ INIT_SRC_RUNTIME,
MAX_INIT_SOURCE_TYPES
};
/* Internal RAM Offsets macro data */
struct iro {
- u32 base /* RAM field offset */;
- u16 m1 /* multiplier 1 */;
- u16 m2 /* multiplier 2 */;
- u16 m3 /* multiplier 3 */;
- u16 size /* RAM field size */;
+ __le32 base;
+ __le16 m1;
+ __le16 m2;
+ __le16 m3;
+ __le16 size;
};
-/* QM per-port init parameters */
-struct init_qm_port_params {
- u8 active /* Indicates if this port is active */;
- u8 num_active_phys_tcs;
- u16 num_pbf_cmd_lines;
- u16 num_btb_blocks;
- __le16 reserved;
-};
-
-/* QM per-PQ init parameters */
-struct init_qm_pq_params {
- u8 vport_id /* VPORT ID */;
- u8 tc_id /* TC ID */;
- u8 wrr_group /* WRR group */;
- u8 reserved;
-};
+/**
+ * @brief qed_dbg_print_attn - Prints attention registers values in the specified results struct.
+ *
+ * @param p_hwfn
+ * @param results - Pointer to the attention read results
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn,
+ struct dbg_attn_block_result *results);
-/* QM per-vport init parameters */
-struct init_qm_vport_params {
- u32 vport_rl;
- u16 vport_wfq;
- u16 first_tx_pq_id[NUM_OF_TCS];
-};
+#define MAX_NAME_LEN 16
/* Win 2 */
#define GTT_BAR0_MAP_REG_IGU_CMD \
0x00f000UL
+
/* Win 3 */
#define GTT_BAR0_MAP_REG_TSDM_RAM \
0x010000UL
+
/* Win 4 */
#define GTT_BAR0_MAP_REG_MSDM_RAM \
0x011000UL
+
/* Win 5 */
#define GTT_BAR0_MAP_REG_MSDM_RAM_1024 \
0x012000UL
+
/* Win 6 */
#define GTT_BAR0_MAP_REG_USDM_RAM \
0x013000UL
+
/* Win 7 */
#define GTT_BAR0_MAP_REG_USDM_RAM_1024 \
0x014000UL
+
/* Win 8 */
#define GTT_BAR0_MAP_REG_USDM_RAM_2048 \
0x015000UL
+
/* Win 9 */
#define GTT_BAR0_MAP_REG_XSDM_RAM \
0x016000UL
+
/* Win 10 */
#define GTT_BAR0_MAP_REG_YSDM_RAM \
0x017000UL
+
/* Win 11 */
#define GTT_BAR0_MAP_REG_PSDM_RAM \
0x018000UL
@@ -1584,785 +1963,718 @@ struct init_qm_vport_params {
* Returns the required host memory size in 4KB units.
* Must be called before all QM init HSI functions.
*
- * @param pf_id - physical function ID
- * @param num_pf_cids - number of connections used by this PF
- * @param num_vf_cids - number of connections used by VFs of this PF
- * @param num_tids - number of tasks used by this PF
- * @param num_pf_pqs - number of PQs used by this PF
- * @param num_vf_pqs - number of PQs used by VFs of this PF
+ * @param pf_id - physical function ID
+ * @param num_pf_cids - number of connections used by this PF
+ * @param num_vf_cids - number of connections used by VFs of this PF
+ * @param num_tids - number of tasks used by this PF
+ * @param num_pf_pqs - number of PQs used by this PF
+ * @param num_vf_pqs - number of PQs used by VFs of this PF
*
* @return The required host memory size in 4KB units.
*/
-u32 qed_qm_pf_mem_size(u8 pf_id,
- u32 num_pf_cids,
- u32 num_vf_cids,
- u32 num_tids,
- u16 num_pf_pqs,
- u16 num_vf_pqs);
+u32 qed_qm_pf_mem_size(u8 pf_id,
+ u32 num_pf_cids,
+ u32 num_vf_cids,
+ u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs);
struct qed_qm_common_rt_init_params {
- u8 max_ports_per_engine;
- u8 max_phys_tcs_per_port;
- bool pf_rl_en;
- bool pf_wfq_en;
- bool vport_rl_en;
- bool vport_wfq_en;
- struct init_qm_port_params *port_params;
+ u8 max_ports_per_engine;
+ u8 max_phys_tcs_per_port;
+ bool pf_rl_en;
+ bool pf_wfq_en;
+ bool vport_rl_en;
+ bool vport_wfq_en;
+ struct init_qm_port_params *port_params;
};
+int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn,
+ struct qed_qm_common_rt_init_params *p_params);
+
+struct qed_qm_pf_rt_init_params {
+ u8 port_id;
+ u8 pf_id;
+ u8 max_phys_tcs_per_port;
+ bool is_first_pf;
+ u32 num_pf_cids;
+ u32 num_vf_cids;
+ u32 num_tids;
+ u16 start_pq;
+ u16 num_pf_pqs;
+ u16 num_vf_pqs;
+ u8 start_vport;
+ u8 num_vports;
+ u8 pf_wfq;
+ u32 pf_rl;
+ struct init_qm_pq_params *pq_params;
+ struct init_qm_vport_params *vport_params;
+};
+
+int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_qm_pf_rt_init_params *p_params);
+
/**
- * @brief qed_qm_common_rt_init - Prepare QM runtime init values for the
- * engine phase.
+ * @brief qed_init_pf_wfq - Initializes the WFQ weight of the specified PF
*
* @param p_hwfn
- * @param max_ports_per_engine - max number of ports per engine in HW
- * @param max_phys_tcs_per_port - max number of physical TCs per port in HW
- * @param pf_rl_en - enable per-PF rate limiters
- * @param pf_wfq_en - enable per-PF WFQ
- * @param vport_rl_en - enable per-VPORT rate limiters
- * @param vport_wfq_en - enable per-VPORT WFQ
- * @param port_params - array of size MAX_NUM_PORTS with
- * arameters for each port
+ * @param p_ptt - ptt window used for writing the registers
+ * @param pf_id - PF ID
+ * @param pf_wfq - WFQ weight. Must be non-zero.
*
* @return 0 on success, -1 on error.
*/
-int qed_qm_common_rt_init(
- struct qed_hwfn *p_hwfn,
- struct qed_qm_common_rt_init_params *p_params);
-
-struct qed_qm_pf_rt_init_params {
- u8 port_id;
- u8 pf_id;
- u8 max_phys_tcs_per_port;
- bool is_first_pf;
- u32 num_pf_cids;
- u32 num_vf_cids;
- u32 num_tids;
- u16 start_pq;
- u16 num_pf_pqs;
- u16 num_vf_pqs;
- u8 start_vport;
- u8 num_vports;
- u8 pf_wfq;
- u32 pf_rl;
- struct init_qm_pq_params *pq_params;
- struct init_qm_vport_params *vport_params;
-};
-
-int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct qed_qm_pf_rt_init_params *p_params);
+int qed_init_pf_wfq(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 pf_id, u16 pf_wfq);
/**
- * @brief qed_init_pf_rl Initializes the rate limit of the specified PF
+ * @brief qed_init_pf_rl - Initializes the rate limit of the specified PF
*
* @param p_hwfn
- * @param p_ptt - ptt window used for writing the registers
- * @param pf_id - PF ID
- * @param pf_rl - rate limit in Mb/sec units
+ * @param p_ptt - ptt window used for writing the registers
+ * @param pf_id - PF ID
+ * @param pf_rl - rate limit in Mb/sec units
*
* @return 0 on success, -1 on error.
*/
-int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u8 pf_id,
- u32 pf_rl);
+int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 pf_id, u32 pf_rl);
/**
- * @brief qed_init_vport_rl Initializes the rate limit of the specified VPORT
+ * @brief qed_init_vport_wfq Initializes the WFQ weight of the specified VPORT
*
* @param p_hwfn
- * @param p_ptt - ptt window used for writing the registers
- * @param vport_id - VPORT ID
- * @param vport_rl - rate limit in Mb/sec units
+ * @param p_ptt - ptt window used for writing the registers
+ * @param first_tx_pq_id- An array containing the first Tx PQ ID associated
+ * with the VPORT for each TC. This array is filled by
+ * qed_qm_pf_rt_init
+ * @param vport_wfq - WFQ weight. Must be non-zero.
*
* @return 0 on success, -1 on error.
*/
+int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq);
-int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u8 vport_id,
- u32 vport_rl);
+/**
+ * @brief qed_init_vport_rl - Initializes the rate limit of the specified VPORT
+ *
+ * @param p_hwfn
+ * @param p_ptt - ptt window used for writing the registers
+ * @param vport_id - VPORT ID
+ * @param vport_rl - rate limit in Mb/sec units
+ *
+ * @return 0 on success, -1 on error.
+ */
+int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 vport_id, u32 vport_rl);
/**
* @brief qed_send_qm_stop_cmd Sends a stop command to the QM
*
* @param p_hwfn
- * @param p_ptt - ptt window used for writing the registers
+ * @param p_ptt
* @param is_release_cmd - true for release, false for stop.
- * @param is_tx_pq - true for Tx PQs, false for Other PQs.
- * @param start_pq - first PQ ID to stop
- * @param num_pqs - Number of PQs to stop, starting from start_pq.
+ * @param is_tx_pq - true for Tx PQs, false for Other PQs.
+ * @param start_pq - first PQ ID to stop
+ * @param num_pqs - Number of PQs to stop, starting from start_pq.
*
- * @return bool, true if successful, false if timeout occurred while waiting
- * for QM command done.
+ * @return bool, true if successful, false if timeout occured while waiting for QM command done.
*/
+bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ bool is_release_cmd,
+ bool is_tx_pq, u16 start_pq, u16 num_pqs);
-bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- bool is_release_cmd,
- bool is_tx_pq,
- u16 start_pq,
- u16 num_pqs);
-
+/**
+ * @brief qed_set_vxlan_dest_port - initializes vxlan tunnel destination udp port
+ *
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param dest_port - vxlan destination udp port.
+ */
void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u16 dest_port);
+ struct qed_ptt *p_ptt, u16 dest_port);
+
+/**
+ * @brief qed_set_vxlan_enable - enable or disable VXLAN tunnel in HW
+ *
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param vxlan_enable - vxlan enable flag.
+ */
void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, bool vxlan_enable);
+
+/**
+ * @brief qed_set_gre_enable - enable or disable GRE tunnel in HW
+ *
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param eth_gre_enable - eth GRE enable enable flag.
+ * @param ip_gre_enable - IP GRE enable enable flag.
+ */
void qed_set_gre_enable(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, bool eth_gre_enable,
- bool ip_gre_enable);
+ struct qed_ptt *p_ptt,
+ bool eth_gre_enable, bool ip_gre_enable);
+
+/**
+ * @brief qed_set_geneve_dest_port - initializes geneve tunnel destination udp port
+ *
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param dest_port - geneve destination udp port.
+ */
void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u16 dest_port);
+
+/**
+ * @brief qed_set_gre_enable - enable or disable GRE tunnel in HW
+ *
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param eth_geneve_enable - eth GENEVE enable enable flag.
+ * @param ip_geneve_enable - IP GENEVE enable enable flag.
+ */
void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, bool eth_geneve_enable,
- bool ip_geneve_enable);
-
-/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */
-#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base)
-#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size)
-/* Tstorm port statistics */
-#define TSTORM_PORT_STAT_OFFSET(port_id) (IRO[1].base + ((port_id) * IRO[1].m1))
-#define TSTORM_PORT_STAT_SIZE (IRO[1].size)
-/* Tstorm ll2 port statistics */
-#define TSTORM_LL2_PORT_STAT_OFFSET(port_id) \
- (IRO[2].base + ((port_id) * IRO[2].m1))
-#define TSTORM_LL2_PORT_STAT_SIZE (IRO[2].size)
-/* Ustorm VF-PF Channel ready flag */
-#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) \
- (IRO[3].base + ((vf_id) * IRO[3].m1))
-#define USTORM_VF_PF_CHANNEL_READY_SIZE (IRO[3].size)
-/* Ustorm Final flr cleanup ack */
-#define USTORM_FLR_FINAL_ACK_OFFSET(pf_id) (IRO[4].base + ((pf_id) * IRO[4].m1))
-#define USTORM_FLR_FINAL_ACK_SIZE (IRO[4].size)
-/* Ustorm Event ring consumer */
-#define USTORM_EQE_CONS_OFFSET(pf_id) (IRO[5].base + ((pf_id) * IRO[5].m1))
-#define USTORM_EQE_CONS_SIZE (IRO[5].size)
-/* Ustorm Common Queue ring consumer */
-#define USTORM_COMMON_QUEUE_CONS_OFFSET(global_queue_id) \
- (IRO[6].base + ((global_queue_id) * IRO[6].m1))
-#define USTORM_COMMON_QUEUE_CONS_SIZE (IRO[6].size)
-/* Xstorm Integration Test Data */
-#define XSTORM_INTEG_TEST_DATA_OFFSET (IRO[7].base)
-#define XSTORM_INTEG_TEST_DATA_SIZE (IRO[7].size)
-/* Ystorm Integration Test Data */
-#define YSTORM_INTEG_TEST_DATA_OFFSET (IRO[8].base)
-#define YSTORM_INTEG_TEST_DATA_SIZE (IRO[8].size)
-/* Pstorm Integration Test Data */
-#define PSTORM_INTEG_TEST_DATA_OFFSET (IRO[9].base)
-#define PSTORM_INTEG_TEST_DATA_SIZE (IRO[9].size)
-/* Tstorm Integration Test Data */
-#define TSTORM_INTEG_TEST_DATA_OFFSET (IRO[10].base)
-#define TSTORM_INTEG_TEST_DATA_SIZE (IRO[10].size)
-/* Mstorm Integration Test Data */
-#define MSTORM_INTEG_TEST_DATA_OFFSET (IRO[11].base)
-#define MSTORM_INTEG_TEST_DATA_SIZE (IRO[11].size)
-/* Ustorm Integration Test Data */
-#define USTORM_INTEG_TEST_DATA_OFFSET (IRO[12].base)
-#define USTORM_INTEG_TEST_DATA_SIZE (IRO[12].size)
-/* Tstorm producers */
-#define TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) \
- (IRO[13].base + ((core_rx_queue_id) * IRO[13].m1))
-#define TSTORM_LL2_RX_PRODS_SIZE (IRO[13].size)
-/* Tstorm LightL2 queue statistics */
-#define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
- (IRO[14].base + ((core_rx_queue_id) * IRO[14].m1))
-#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE (IRO[14].size)
-/* Ustorm LiteL2 queue statistics */
-#define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
- (IRO[15].base + ((core_rx_queue_id) * IRO[15].m1))
-#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE (IRO[15].size)
-/* Pstorm LiteL2 queue statistics */
-#define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) \
- (IRO[16].base + ((core_tx_stats_id) * IRO[16].m1))
-#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE (IRO[16].size)
-/* Mstorm queue statistics */
-#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
- (IRO[17].base + ((stat_counter_id) * IRO[17].m1))
-#define MSTORM_QUEUE_STAT_SIZE (IRO[17].size)
-/* Mstorm producers */
-#define MSTORM_PRODS_OFFSET(queue_id) (IRO[18].base + ((queue_id) * IRO[18].m1))
-#define MSTORM_PRODS_SIZE (IRO[18].size)
-/* TPA agregation timeout in us resolution (on ASIC) */
-#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[19].base)
-#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[19].size)
-/* Ustorm queue statistics */
-#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
- (IRO[20].base + ((stat_counter_id) * IRO[20].m1))
-#define USTORM_QUEUE_STAT_SIZE (IRO[20].size)
-/* Ustorm queue zone */
-#define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \
- (IRO[21].base + ((queue_id) * IRO[21].m1))
-#define USTORM_ETH_QUEUE_ZONE_SIZE (IRO[21].size)
-/* Pstorm queue statistics */
-#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
- (IRO[22].base + ((stat_counter_id) * IRO[22].m1))
-#define PSTORM_QUEUE_STAT_SIZE (IRO[22].size)
-/* Tstorm last parser message */
-#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[23].base)
-#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[23].size)
-/* Tstorm Eth limit Rx rate */
-#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) (IRO[24].base + ((pf_id) * IRO[24].m1))
-#define ETH_RX_RATE_LIMIT_SIZE (IRO[24].size)
-/* Ystorm queue zone */
-#define YSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \
- (IRO[25].base + ((queue_id) * IRO[25].m1))
-#define YSTORM_ETH_QUEUE_ZONE_SIZE (IRO[25].size)
-/* Ystorm cqe producer */
-#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) \
- (IRO[26].base + ((rss_id) * IRO[26].m1))
-#define YSTORM_TOE_CQ_PROD_SIZE (IRO[26].size)
-/* Ustorm cqe producer */
-#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) \
- (IRO[27].base + ((rss_id) * IRO[27].m1))
-#define USTORM_TOE_CQ_PROD_SIZE (IRO[27].size)
-/* Ustorm grq producer */
-#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) \
- (IRO[28].base + ((pf_id) * IRO[28].m1))
-#define USTORM_TOE_GRQ_PROD_SIZE (IRO[28].size)
-/* Tstorm cmdq-cons of given command queue-id */
-#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) \
- (IRO[29].base + ((cmdq_queue_id) * IRO[29].m1))
-#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[29].size)
-/* Mstorm rq-cons of given queue-id */
-#define MSTORM_SCSI_RQ_CONS_OFFSET(rq_queue_id) \
- (IRO[30].base + ((rq_queue_id) * IRO[30].m1))
-#define MSTORM_SCSI_RQ_CONS_SIZE (IRO[30].size)
-/* Mstorm bdq-external-producer of given BDQ function ID, BDqueue-id */
-#define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \
- (IRO[31].base + ((func_id) * IRO[31].m1) + ((bdq_id) * IRO[31].m2))
-#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[31].size)
-/* Tstorm (reflects M-Storm) bdq-external-producer of given fn ID, BDqueue-id */
-#define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \
- (IRO[32].base + ((func_id) * IRO[32].m1) + ((bdq_id) * IRO[32].m2))
-#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[32].size)
-/* Tstorm iSCSI RX stats */
-#define TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
- (IRO[33].base + ((pf_id) * IRO[33].m1))
-#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[33].size)
-/* Mstorm iSCSI RX stats */
-#define MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
- (IRO[34].base + ((pf_id) * IRO[34].m1))
-#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[34].size)
-/* Ustorm iSCSI RX stats */
-#define USTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
- (IRO[35].base + ((pf_id) * IRO[35].m1))
-#define USTORM_ISCSI_RX_STATS_SIZE (IRO[35].size)
-/* Xstorm iSCSI TX stats */
-#define XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
- (IRO[36].base + ((pf_id) * IRO[36].m1))
-#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[36].size)
-/* Ystorm iSCSI TX stats */
-#define YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
- (IRO[37].base + ((pf_id) * IRO[37].m1))
-#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[37].size)
-/* Pstorm iSCSI TX stats */
-#define PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
- (IRO[38].base + ((pf_id) * IRO[38].m1))
-#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[38].size)
-/* Tstorm FCoE RX stats */
-#define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) \
- (IRO[39].base + ((pf_id) * IRO[39].m1))
-#define TSTORM_FCOE_RX_STATS_SIZE (IRO[39].size)
-/* Mstorm FCoE RX stats */
-#define MSTORM_FCOE_RX_STATS_OFFSET(pf_id) \
- (IRO[40].base + ((pf_id) * IRO[40].m1))
-#define MSTORM_FCOE_RX_STATS_SIZE (IRO[40].size)
-/* Pstorm FCoE TX stats */
-#define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) \
- (IRO[41].base + ((pf_id) * IRO[41].m1))
-#define PSTORM_FCOE_TX_STATS_SIZE (IRO[41].size)
-/* Pstorm RoCE statistics */
-#define PSTORM_ROCE_STAT_OFFSET(stat_counter_id) \
- (IRO[42].base + ((stat_counter_id) * IRO[42].m1))
-#define PSTORM_ROCE_STAT_SIZE (IRO[42].size)
-/* Tstorm RoCE statistics */
-#define TSTORM_ROCE_STAT_OFFSET(stat_counter_id) \
- (IRO[43].base + ((stat_counter_id) * IRO[43].m1))
-#define TSTORM_ROCE_STAT_SIZE (IRO[43].size)
-
-static const struct iro iro_arr[44] = {
- { 0x10, 0x0, 0x0, 0x0, 0x8 },
- { 0x47c8, 0x60, 0x0, 0x0, 0x60 },
- { 0x5e30, 0x20, 0x0, 0x0, 0x20 },
- { 0x510, 0x8, 0x0, 0x0, 0x4 },
- { 0x490, 0x8, 0x0, 0x0, 0x4 },
- { 0x10, 0x8, 0x0, 0x0, 0x2 },
- { 0x90, 0x8, 0x0, 0x0, 0x2 },
- { 0x4940, 0x0, 0x0, 0x0, 0x78 },
- { 0x3de0, 0x0, 0x0, 0x0, 0x78 },
- { 0x2998, 0x0, 0x0, 0x0, 0x78 },
- { 0x4750, 0x0, 0x0, 0x0, 0x78 },
- { 0x56d0, 0x0, 0x0, 0x0, 0x78 },
- { 0x7e50, 0x0, 0x0, 0x0, 0x78 },
- { 0x100, 0x8, 0x0, 0x0, 0x8 },
- { 0x5c10, 0x10, 0x0, 0x0, 0x10 },
- { 0xb508, 0x30, 0x0, 0x0, 0x30 },
- { 0x95c0, 0x30, 0x0, 0x0, 0x30 },
- { 0x58a0, 0x40, 0x0, 0x0, 0x40 },
- { 0x200, 0x10, 0x0, 0x0, 0x8 },
- { 0xa230, 0x0, 0x0, 0x0, 0x4 },
- { 0x8058, 0x40, 0x0, 0x0, 0x30 },
- { 0xd00, 0x8, 0x0, 0x0, 0x8 },
- { 0x2b30, 0x80, 0x0, 0x0, 0x38 },
- { 0xa808, 0x0, 0x0, 0x0, 0xf0 },
- { 0xa8f8, 0x8, 0x0, 0x0, 0x8 },
- { 0x80, 0x8, 0x0, 0x0, 0x8 },
- { 0xac0, 0x8, 0x0, 0x0, 0x8 },
- { 0x2580, 0x8, 0x0, 0x0, 0x8 },
- { 0x2500, 0x8, 0x0, 0x0, 0x8 },
- { 0x440, 0x8, 0x0, 0x0, 0x2 },
- { 0x1800, 0x8, 0x0, 0x0, 0x2 },
- { 0x1a00, 0x10, 0x8, 0x0, 0x2 },
- { 0x640, 0x10, 0x8, 0x0, 0x2 },
- { 0xd9b8, 0x38, 0x0, 0x0, 0x24 },
- { 0x11048, 0x10, 0x0, 0x0, 0x8 },
- { 0x11678, 0x38, 0x0, 0x0, 0x18 },
- { 0xaec0, 0x30, 0x0, 0x0, 0x10 },
- { 0x8700, 0x28, 0x0, 0x0, 0x18 },
- { 0xec00, 0x10, 0x0, 0x0, 0x10 },
- { 0xde38, 0x40, 0x0, 0x0, 0x30 },
- { 0x121a8, 0x38, 0x0, 0x0, 0x8 },
- { 0xf068, 0x20, 0x0, 0x0, 0x20 },
- { 0x2b68, 0x80, 0x0, 0x0, 0x10 },
- { 0x4ab8, 0x10, 0x0, 0x0, 0x10 },
+ struct qed_ptt *p_ptt,
+ bool eth_geneve_enable, bool ip_geneve_enable);
+
+#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base)
+#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size)
+#define TSTORM_PORT_STAT_OFFSET(port_id) \
+ (IRO[1].base + ((port_id) * IRO[1].m1))
+#define TSTORM_PORT_STAT_SIZE (IRO[1].size)
+#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) \
+ (IRO[3].base + ((vf_id) * IRO[3].m1))
+#define USTORM_VF_PF_CHANNEL_READY_SIZE (IRO[3].size)
+#define USTORM_FLR_FINAL_ACK_OFFSET(pf_id) \
+ (IRO[4].base + (pf_id) * IRO[4].m1)
+#define USTORM_FLR_FINAL_ACK_SIZE (IRO[4].size)
+#define USTORM_EQE_CONS_OFFSET(pf_id) \
+ (IRO[5].base + ((pf_id) * IRO[5].m1))
+#define USTORM_EQE_CONS_SIZE (IRO[5].size)
+#define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_zone_id) \
+ (IRO[6].base + ((queue_zone_id) * IRO[6].m1))
+#define USTORM_ETH_QUEUE_ZONE_SIZE (IRO[6].size)
+#define USTORM_COMMON_QUEUE_CONS_OFFSET(queue_zone_id) \
+ (IRO[7].base + ((queue_zone_id) * IRO[7].m1))
+#define USTORM_COMMON_QUEUE_CONS_SIZE (IRO[7].size)
+#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+ (IRO[18].base + ((stat_counter_id) * IRO[18].m1))
+#define MSTORM_QUEUE_STAT_SIZE (IRO[18].size)
+#define MSTORM_ETH_PF_PRODS_OFFSET(queue_id) \
+ (IRO[19].base + ((queue_id) * IRO[19].m1))
+#define MSTORM_ETH_PF_PRODS_SIZE (IRO[19].size)
+#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[20].base)
+#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[20].size)
+#define MSTORM_ETH_PF_STAT_OFFSET(pf_id) \
+ (IRO[21].base + ((pf_id) * IRO[21].m1))
+#define MSTORM_ETH_PF_STAT_SIZE (IRO[21].size)
+#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+ (IRO[22].base + ((stat_counter_id) * IRO[22].m1))
+#define USTORM_QUEUE_STAT_SIZE (IRO[22].size)
+#define USTORM_ETH_PF_STAT_OFFSET(pf_id) \
+ (IRO[23].base + ((pf_id) * IRO[23].m1))
+#define USTORM_ETH_PF_STAT_SIZE (IRO[23].size)
+#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+ (IRO[24].base + ((stat_counter_id) * IRO[24].m1))
+#define PSTORM_QUEUE_STAT_SIZE (IRO[24].size)
+#define PSTORM_ETH_PF_STAT_OFFSET(pf_id) \
+ (IRO[25].base + ((pf_id) * IRO[25].m1))
+#define PSTORM_ETH_PF_STAT_SIZE (IRO[25].size)
+#define PSTORM_CTL_FRAME_ETHTYPE_OFFSET(ethtype) \
+ (IRO[26].base + ((ethtype) * IRO[26].m1))
+#define PSTORM_CTL_FRAME_ETHTYPE_SIZE (IRO[26].size)
+#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[27].base)
+#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[27].size)
+#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) \
+ (IRO[28].base + ((pf_id) * IRO[28].m1))
+#define ETH_RX_RATE_LIMIT_SIZE (IRO[28].size)
+#define XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \
+ (IRO[29].base + ((queue_id) * IRO[29].m1))
+#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[29].size)
+
+static const struct iro iro_arr[46] = {
+ {0x0, 0x0, 0x0, 0x0, 0x8},
+ {0x4cb0, 0x78, 0x0, 0x0, 0x78},
+ {0x6318, 0x20, 0x0, 0x0, 0x20},
+ {0xb00, 0x8, 0x0, 0x0, 0x4},
+ {0xa80, 0x8, 0x0, 0x0, 0x4},
+ {0x0, 0x8, 0x0, 0x0, 0x2},
+ {0x80, 0x8, 0x0, 0x0, 0x4},
+ {0x84, 0x8, 0x0, 0x0, 0x2},
+ {0x4bc0, 0x0, 0x0, 0x0, 0x78},
+ {0x3df0, 0x0, 0x0, 0x0, 0x78},
+ {0x29b0, 0x0, 0x0, 0x0, 0x78},
+ {0x4c38, 0x0, 0x0, 0x0, 0x78},
+ {0x4a48, 0x0, 0x0, 0x0, 0x78},
+ {0x7e48, 0x0, 0x0, 0x0, 0x78},
+ {0xa28, 0x8, 0x0, 0x0, 0x8},
+ {0x60f8, 0x10, 0x0, 0x0, 0x10},
+ {0xb820, 0x30, 0x0, 0x0, 0x30},
+ {0x95b8, 0x30, 0x0, 0x0, 0x30},
+ {0x4c18, 0x80, 0x0, 0x0, 0x40},
+ {0x1f8, 0x4, 0x0, 0x0, 0x4},
+ {0xc9a8, 0x0, 0x0, 0x0, 0x4},
+ {0x4c58, 0x80, 0x0, 0x0, 0x20},
+ {0x8050, 0x40, 0x0, 0x0, 0x30},
+ {0xe770, 0x60, 0x0, 0x0, 0x60},
+ {0x2b48, 0x80, 0x0, 0x0, 0x38},
+ {0xdf88, 0x78, 0x0, 0x0, 0x78},
+ {0x1f8, 0x4, 0x0, 0x0, 0x4},
+ {0xacf0, 0x0, 0x0, 0x0, 0xf0},
+ {0xade0, 0x8, 0x0, 0x0, 0x8},
+ {0x1f8, 0x8, 0x0, 0x0, 0x8},
+ {0xac0, 0x8, 0x0, 0x0, 0x8},
+ {0x2578, 0x8, 0x0, 0x0, 0x8},
+ {0x24f8, 0x8, 0x0, 0x0, 0x8},
+ {0x0, 0x8, 0x0, 0x0, 0x8},
+ {0x200, 0x10, 0x8, 0x0, 0x8},
+ {0xb78, 0x10, 0x8, 0x0, 0x2},
+ {0xd888, 0x38, 0x0, 0x0, 0x24},
+ {0x12120, 0x10, 0x0, 0x0, 0x8},
+ {0x11b20, 0x38, 0x0, 0x0, 0x18},
+ {0xa8c0, 0x30, 0x0, 0x0, 0x10},
+ {0x86f8, 0x28, 0x0, 0x0, 0x18},
+ {0xeff8, 0x10, 0x0, 0x0, 0x10},
+ {0xdd08, 0x48, 0x0, 0x0, 0x38},
+ {0xf460, 0x20, 0x0, 0x0, 0x20},
+ {0x2b80, 0x80, 0x0, 0x0, 0x10},
+ {0x5000, 0x10, 0x0, 0x0, 0x10},
};
/* Runtime array offsets */
-#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0
-#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET 1
-#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET 2
-#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET 3
-#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET 4
-#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET 5
-#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET 6
-#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET 7
-#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET 8
-#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET 9
-#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET 10
-#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET 11
-#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET 12
-#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET 13
-#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14
-#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15
-#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 16
-#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET 17
-#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 18
-#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 19
-#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 20
-#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 21
-#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 22
-#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 23
-#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 24
-#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761
-#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
-#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761
-#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
-#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1497
-#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 736
-#define CAU_REG_PI_MEMORY_RT_OFFSET 2233
-#define CAU_REG_PI_MEMORY_RT_SIZE 4416
-#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6649
-#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6650
-#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6651
-#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6652
-#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6653
-#define PRS_REG_SEARCH_TCP_RT_OFFSET 6654
-#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6655
-#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6656
-#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6657
-#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6658
-#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6659
-#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6660
-#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6661
-#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6662
-#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 6663
-#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6664
-#define SRC_REG_FIRSTFREE_RT_OFFSET 6665
-#define SRC_REG_FIRSTFREE_RT_SIZE 2
-#define SRC_REG_LASTFREE_RT_OFFSET 6667
-#define SRC_REG_LASTFREE_RT_SIZE 2
-#define SRC_REG_COUNTFREE_RT_OFFSET 6669
-#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6670
-#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6671
-#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6672
-#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6673
-#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6674
-#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6675
-#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6676
-#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6677
-#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6678
-#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6679
-#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6680
-#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6681
-#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6682
-#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6683
-#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6684
-#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6685
-#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6686
-#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6687
-#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6688
-#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6689
-#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6690
-#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6691
-#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6692
-#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6693
-#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6694
-#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6695
-#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6696
-#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6697
-#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6698
-#define PSWRQ2_REG_WR_MBS0_RT_OFFSET 6699
-#define PSWRQ2_REG_RD_MBS0_RT_OFFSET 6700
-#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6701
-#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6702
-#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6703
-#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000
-#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28703
-#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28704
-#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28705
-#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28706
-#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28707
-#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28708
-#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28709
-#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28710
-#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28711
-#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28712
-#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28713
-#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416
-#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29129
-#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 512
-#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29641
-#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29642
-#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29643
-#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29644
-#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29645
-#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29646
-#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29647
-#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29648
-#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29649
-#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29650
-#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29651
-#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29652
-#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29653
-#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29654
-#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29655
-#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29656
-#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29657
-#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29658
-#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29659
-#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29660
-#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29661
-#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29662
-#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29663
-#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29664
-#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29665
-#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29666
-#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29667
-#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29668
-#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29669
-#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29670
-#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29671
-#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29672
-#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29673
-#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29674
-#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29675
-#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29676
-#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29677
-#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29678
-#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29679
-#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29680
-#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29681
-#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29682
-#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29683
-#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29684
-#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29685
-#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29686
-#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29687
-#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29688
-#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29689
-#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29690
-#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29691
-#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29692
-#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29693
-#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29694
-#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29695
-#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29696
-#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29697
-#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29698
-#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29699
-#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29700
-#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29701
-#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29702
-#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29703
-#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29704
-#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29705
-#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29706
-#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29707
-#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29708
-#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128
-#define QM_REG_VOQCRDLINE_RT_OFFSET 29836
-#define QM_REG_VOQCRDLINE_RT_SIZE 20
-#define QM_REG_VOQINITCRDLINE_RT_OFFSET 29856
-#define QM_REG_VOQINITCRDLINE_RT_SIZE 20
-#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29876
-#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29877
-#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29878
-#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29879
-#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29880
-#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29881
-#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29882
-#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29883
-#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29884
-#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29885
-#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29886
-#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29887
-#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29888
-#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29889
-#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29890
-#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29891
-#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29892
-#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29893
-#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29894
-#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29895
-#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29896
-#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29897
-#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29898
-#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29899
-#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29900
-#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29901
-#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29902
-#define QM_REG_PQTX2PF_0_RT_OFFSET 29903
-#define QM_REG_PQTX2PF_1_RT_OFFSET 29904
-#define QM_REG_PQTX2PF_2_RT_OFFSET 29905
-#define QM_REG_PQTX2PF_3_RT_OFFSET 29906
-#define QM_REG_PQTX2PF_4_RT_OFFSET 29907
-#define QM_REG_PQTX2PF_5_RT_OFFSET 29908
-#define QM_REG_PQTX2PF_6_RT_OFFSET 29909
-#define QM_REG_PQTX2PF_7_RT_OFFSET 29910
-#define QM_REG_PQTX2PF_8_RT_OFFSET 29911
-#define QM_REG_PQTX2PF_9_RT_OFFSET 29912
-#define QM_REG_PQTX2PF_10_RT_OFFSET 29913
-#define QM_REG_PQTX2PF_11_RT_OFFSET 29914
-#define QM_REG_PQTX2PF_12_RT_OFFSET 29915
-#define QM_REG_PQTX2PF_13_RT_OFFSET 29916
-#define QM_REG_PQTX2PF_14_RT_OFFSET 29917
-#define QM_REG_PQTX2PF_15_RT_OFFSET 29918
-#define QM_REG_PQTX2PF_16_RT_OFFSET 29919
-#define QM_REG_PQTX2PF_17_RT_OFFSET 29920
-#define QM_REG_PQTX2PF_18_RT_OFFSET 29921
-#define QM_REG_PQTX2PF_19_RT_OFFSET 29922
-#define QM_REG_PQTX2PF_20_RT_OFFSET 29923
-#define QM_REG_PQTX2PF_21_RT_OFFSET 29924
-#define QM_REG_PQTX2PF_22_RT_OFFSET 29925
-#define QM_REG_PQTX2PF_23_RT_OFFSET 29926
-#define QM_REG_PQTX2PF_24_RT_OFFSET 29927
-#define QM_REG_PQTX2PF_25_RT_OFFSET 29928
-#define QM_REG_PQTX2PF_26_RT_OFFSET 29929
-#define QM_REG_PQTX2PF_27_RT_OFFSET 29930
-#define QM_REG_PQTX2PF_28_RT_OFFSET 29931
-#define QM_REG_PQTX2PF_29_RT_OFFSET 29932
-#define QM_REG_PQTX2PF_30_RT_OFFSET 29933
-#define QM_REG_PQTX2PF_31_RT_OFFSET 29934
-#define QM_REG_PQTX2PF_32_RT_OFFSET 29935
-#define QM_REG_PQTX2PF_33_RT_OFFSET 29936
-#define QM_REG_PQTX2PF_34_RT_OFFSET 29937
-#define QM_REG_PQTX2PF_35_RT_OFFSET 29938
-#define QM_REG_PQTX2PF_36_RT_OFFSET 29939
-#define QM_REG_PQTX2PF_37_RT_OFFSET 29940
-#define QM_REG_PQTX2PF_38_RT_OFFSET 29941
-#define QM_REG_PQTX2PF_39_RT_OFFSET 29942
-#define QM_REG_PQTX2PF_40_RT_OFFSET 29943
-#define QM_REG_PQTX2PF_41_RT_OFFSET 29944
-#define QM_REG_PQTX2PF_42_RT_OFFSET 29945
-#define QM_REG_PQTX2PF_43_RT_OFFSET 29946
-#define QM_REG_PQTX2PF_44_RT_OFFSET 29947
-#define QM_REG_PQTX2PF_45_RT_OFFSET 29948
-#define QM_REG_PQTX2PF_46_RT_OFFSET 29949
-#define QM_REG_PQTX2PF_47_RT_OFFSET 29950
-#define QM_REG_PQTX2PF_48_RT_OFFSET 29951
-#define QM_REG_PQTX2PF_49_RT_OFFSET 29952
-#define QM_REG_PQTX2PF_50_RT_OFFSET 29953
-#define QM_REG_PQTX2PF_51_RT_OFFSET 29954
-#define QM_REG_PQTX2PF_52_RT_OFFSET 29955
-#define QM_REG_PQTX2PF_53_RT_OFFSET 29956
-#define QM_REG_PQTX2PF_54_RT_OFFSET 29957
-#define QM_REG_PQTX2PF_55_RT_OFFSET 29958
-#define QM_REG_PQTX2PF_56_RT_OFFSET 29959
-#define QM_REG_PQTX2PF_57_RT_OFFSET 29960
-#define QM_REG_PQTX2PF_58_RT_OFFSET 29961
-#define QM_REG_PQTX2PF_59_RT_OFFSET 29962
-#define QM_REG_PQTX2PF_60_RT_OFFSET 29963
-#define QM_REG_PQTX2PF_61_RT_OFFSET 29964
-#define QM_REG_PQTX2PF_62_RT_OFFSET 29965
-#define QM_REG_PQTX2PF_63_RT_OFFSET 29966
-#define QM_REG_PQOTHER2PF_0_RT_OFFSET 29967
-#define QM_REG_PQOTHER2PF_1_RT_OFFSET 29968
-#define QM_REG_PQOTHER2PF_2_RT_OFFSET 29969
-#define QM_REG_PQOTHER2PF_3_RT_OFFSET 29970
-#define QM_REG_PQOTHER2PF_4_RT_OFFSET 29971
-#define QM_REG_PQOTHER2PF_5_RT_OFFSET 29972
-#define QM_REG_PQOTHER2PF_6_RT_OFFSET 29973
-#define QM_REG_PQOTHER2PF_7_RT_OFFSET 29974
-#define QM_REG_PQOTHER2PF_8_RT_OFFSET 29975
-#define QM_REG_PQOTHER2PF_9_RT_OFFSET 29976
-#define QM_REG_PQOTHER2PF_10_RT_OFFSET 29977
-#define QM_REG_PQOTHER2PF_11_RT_OFFSET 29978
-#define QM_REG_PQOTHER2PF_12_RT_OFFSET 29979
-#define QM_REG_PQOTHER2PF_13_RT_OFFSET 29980
-#define QM_REG_PQOTHER2PF_14_RT_OFFSET 29981
-#define QM_REG_PQOTHER2PF_15_RT_OFFSET 29982
-#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 29983
-#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 29984
-#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 29985
-#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 29986
-#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 29987
-#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 29988
-#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 29989
-#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 29990
-#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 29991
-#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 29992
-#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 29993
-#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 29994
-#define QM_REG_RLGLBLINCVAL_RT_OFFSET 29995
-#define QM_REG_RLGLBLINCVAL_RT_SIZE 256
-#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30251
-#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256
-#define QM_REG_RLGLBLCRD_RT_OFFSET 30507
-#define QM_REG_RLGLBLCRD_RT_SIZE 256
-#define QM_REG_RLGLBLENABLE_RT_OFFSET 30763
-#define QM_REG_RLPFPERIOD_RT_OFFSET 30764
-#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30765
-#define QM_REG_RLPFINCVAL_RT_OFFSET 30766
-#define QM_REG_RLPFINCVAL_RT_SIZE 16
-#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30782
-#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16
-#define QM_REG_RLPFCRD_RT_OFFSET 30798
-#define QM_REG_RLPFCRD_RT_SIZE 16
-#define QM_REG_RLPFENABLE_RT_OFFSET 30814
-#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30815
-#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30816
-#define QM_REG_WFQPFWEIGHT_RT_SIZE 16
-#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30832
-#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16
-#define QM_REG_WFQPFCRD_RT_OFFSET 30848
-#define QM_REG_WFQPFCRD_RT_SIZE 160
-#define QM_REG_WFQPFENABLE_RT_OFFSET 31008
-#define QM_REG_WFQVPENABLE_RT_OFFSET 31009
-#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31010
-#define QM_REG_BASEADDRTXPQ_RT_SIZE 512
-#define QM_REG_TXPQMAP_RT_OFFSET 31522
-#define QM_REG_TXPQMAP_RT_SIZE 512
-#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32034
-#define QM_REG_WFQVPWEIGHT_RT_SIZE 512
-#define QM_REG_WFQVPCRD_RT_OFFSET 32546
-#define QM_REG_WFQVPCRD_RT_SIZE 512
-#define QM_REG_WFQVPMAP_RT_OFFSET 33058
-#define QM_REG_WFQVPMAP_RT_SIZE 512
-#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33570
-#define QM_REG_WFQPFCRD_MSB_RT_SIZE 160
-#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 33730
-#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 33731
-#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 33732
-#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 33733
-#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 33734
-#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 33735
-#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 33736
-#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 33737
-#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 33741
-#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 33745
-#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 33749
-#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 33750
-#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32
-#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 33782
-#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 33798
-#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 33814
-#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 33830
-#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
-#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 33846
-#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 33847
-#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 33848
-#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 33849
-#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 33850
-#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 33851
-#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 33852
-#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 33853
-#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 33854
-#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 33855
-#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 33856
-#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 33857
-#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 33858
-#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 33859
-#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 33860
-#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 33861
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 33862
-#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 33863
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 33864
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 33865
-#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 33866
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 33867
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 33868
-#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 33869
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 33870
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 33871
-#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 33872
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 33873
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 33874
-#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 33875
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 33876
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 33877
-#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 33878
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 33879
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 33880
-#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 33881
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 33882
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 33883
-#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 33884
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 33885
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 33886
-#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 33887
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 33888
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 33889
-#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 33890
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 33891
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 33892
-#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 33893
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 33894
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 33895
-#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 33896
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 33897
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 33898
-#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 33899
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 33900
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 33901
-#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 33902
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 33903
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 33904
-#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 33905
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 33906
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 33907
-#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 33908
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 33909
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 33910
-#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 33911
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 33912
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 33913
-#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 33914
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 33915
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 33916
-#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 33917
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 33918
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 33919
-#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 33920
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 33921
-#define XCM_REG_CON_PHY_Q3_RT_OFFSET 33922
-
-#define RUNTIME_ARRAY_SIZE 33923
+#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0
+#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET 1
+#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET 2
+#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET 3
+#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET 4
+#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET 5
+#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET 6
+#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET 7
+#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET 8
+#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET 9
+#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET 10
+#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET 11
+#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET 12
+#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET 13
+#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14
+#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15
+#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 16
+#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET 17
+#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 18
+#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 19
+#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 20
+#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 21
+#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 22
+#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 23
+#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 24
+#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761
+#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
+#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761
+#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
+#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1497
+#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 736
+#define CAU_REG_PI_MEMORY_RT_OFFSET 2233
+#define CAU_REG_PI_MEMORY_RT_SIZE 4416
+#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6649
+#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6650
+#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6651
+#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6652
+#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6653
+#define PRS_REG_SEARCH_TCP_RT_OFFSET 6654
+#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6655
+#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6656
+#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6657
+#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6658
+#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6659
+#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6660
+#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6661
+#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6662
+#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 6663
+#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6664
+#define SRC_REG_FIRSTFREE_RT_OFFSET 6665
+#define SRC_REG_FIRSTFREE_RT_SIZE 2
+#define SRC_REG_LASTFREE_RT_OFFSET 6667
+#define SRC_REG_LASTFREE_RT_SIZE 2
+#define SRC_REG_COUNTFREE_RT_OFFSET 6669
+#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6670
+#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6671
+#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6672
+#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6673
+#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6674
+#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6675
+#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET 6676
+#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6677
+#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6678
+#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6679
+#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6680
+#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6681
+#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6682
+#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6683
+#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6684
+#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6685
+#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6686
+#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6687
+#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6688
+#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6689
+#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6690
+#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6691
+#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6692
+#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6693
+#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6694
+#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6695
+#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6696
+#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6697
+#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6698
+#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6699
+#define PSWRQ2_REG_WR_MBS0_RT_OFFSET 6700
+#define PSWRQ2_REG_RD_MBS0_RT_OFFSET 6701
+#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6702
+#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6703
+#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6704
+#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000
+#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28704
+#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28705
+#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28706
+#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28707
+#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28708
+#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28709
+#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28710
+#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28711
+#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28712
+#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28713
+#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28714
+#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416
+#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29130
+#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 512
+#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29642
+#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29643
+#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29644
+#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29645
+#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29646
+#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29647
+#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29648
+#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29649
+#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29650
+#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29651
+#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29652
+#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29653
+#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29654
+#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29655
+#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29656
+#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29657
+#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29658
+#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29659
+#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29660
+#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29661
+#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29662
+#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29663
+#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29664
+#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29665
+#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29666
+#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29667
+#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29668
+#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29669
+#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29670
+#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29671
+#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29672
+#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29673
+#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29674
+#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29675
+#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29676
+#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29677
+#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29678
+#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29679
+#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29680
+#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29681
+#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29682
+#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29683
+#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29684
+#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29685
+#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29686
+#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29687
+#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29688
+#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29689
+#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29690
+#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29691
+#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29692
+#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29693
+#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29694
+#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29695
+#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29696
+#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29697
+#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29698
+#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29699
+#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29700
+#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29701
+#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29702
+#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29703
+#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29704
+#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29705
+#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29706
+#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29707
+#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29708
+#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29709
+#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128
+#define QM_REG_VOQCRDLINE_RT_OFFSET 29837
+#define QM_REG_VOQCRDLINE_RT_SIZE 20
+#define QM_REG_VOQINITCRDLINE_RT_OFFSET 29857
+#define QM_REG_VOQINITCRDLINE_RT_SIZE 20
+#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29877
+#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29878
+#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29879
+#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29880
+#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29881
+#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29882
+#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29883
+#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29884
+#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29885
+#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29886
+#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29887
+#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29888
+#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29889
+#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29890
+#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29891
+#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29892
+#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29893
+#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29894
+#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29895
+#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29896
+#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29897
+#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29898
+#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29899
+#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29900
+#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29901
+#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29902
+#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29903
+#define QM_REG_PQTX2PF_0_RT_OFFSET 29904
+#define QM_REG_PQTX2PF_1_RT_OFFSET 29905
+#define QM_REG_PQTX2PF_2_RT_OFFSET 29906
+#define QM_REG_PQTX2PF_3_RT_OFFSET 29907
+#define QM_REG_PQTX2PF_4_RT_OFFSET 29908
+#define QM_REG_PQTX2PF_5_RT_OFFSET 29909
+#define QM_REG_PQTX2PF_6_RT_OFFSET 29910
+#define QM_REG_PQTX2PF_7_RT_OFFSET 29911
+#define QM_REG_PQTX2PF_8_RT_OFFSET 29912
+#define QM_REG_PQTX2PF_9_RT_OFFSET 29913
+#define QM_REG_PQTX2PF_10_RT_OFFSET 29914
+#define QM_REG_PQTX2PF_11_RT_OFFSET 29915
+#define QM_REG_PQTX2PF_12_RT_OFFSET 29916
+#define QM_REG_PQTX2PF_13_RT_OFFSET 29917
+#define QM_REG_PQTX2PF_14_RT_OFFSET 29918
+#define QM_REG_PQTX2PF_15_RT_OFFSET 29919
+#define QM_REG_PQTX2PF_16_RT_OFFSET 29920
+#define QM_REG_PQTX2PF_17_RT_OFFSET 29921
+#define QM_REG_PQTX2PF_18_RT_OFFSET 29922
+#define QM_REG_PQTX2PF_19_RT_OFFSET 29923
+#define QM_REG_PQTX2PF_20_RT_OFFSET 29924
+#define QM_REG_PQTX2PF_21_RT_OFFSET 29925
+#define QM_REG_PQTX2PF_22_RT_OFFSET 29926
+#define QM_REG_PQTX2PF_23_RT_OFFSET 29927
+#define QM_REG_PQTX2PF_24_RT_OFFSET 29928
+#define QM_REG_PQTX2PF_25_RT_OFFSET 29929
+#define QM_REG_PQTX2PF_26_RT_OFFSET 29930
+#define QM_REG_PQTX2PF_27_RT_OFFSET 29931
+#define QM_REG_PQTX2PF_28_RT_OFFSET 29932
+#define QM_REG_PQTX2PF_29_RT_OFFSET 29933
+#define QM_REG_PQTX2PF_30_RT_OFFSET 29934
+#define QM_REG_PQTX2PF_31_RT_OFFSET 29935
+#define QM_REG_PQTX2PF_32_RT_OFFSET 29936
+#define QM_REG_PQTX2PF_33_RT_OFFSET 29937
+#define QM_REG_PQTX2PF_34_RT_OFFSET 29938
+#define QM_REG_PQTX2PF_35_RT_OFFSET 29939
+#define QM_REG_PQTX2PF_36_RT_OFFSET 29940
+#define QM_REG_PQTX2PF_37_RT_OFFSET 29941
+#define QM_REG_PQTX2PF_38_RT_OFFSET 29942
+#define QM_REG_PQTX2PF_39_RT_OFFSET 29943
+#define QM_REG_PQTX2PF_40_RT_OFFSET 29944
+#define QM_REG_PQTX2PF_41_RT_OFFSET 29945
+#define QM_REG_PQTX2PF_42_RT_OFFSET 29946
+#define QM_REG_PQTX2PF_43_RT_OFFSET 29947
+#define QM_REG_PQTX2PF_44_RT_OFFSET 29948
+#define QM_REG_PQTX2PF_45_RT_OFFSET 29949
+#define QM_REG_PQTX2PF_46_RT_OFFSET 29950
+#define QM_REG_PQTX2PF_47_RT_OFFSET 29951
+#define QM_REG_PQTX2PF_48_RT_OFFSET 29952
+#define QM_REG_PQTX2PF_49_RT_OFFSET 29953
+#define QM_REG_PQTX2PF_50_RT_OFFSET 29954
+#define QM_REG_PQTX2PF_51_RT_OFFSET 29955
+#define QM_REG_PQTX2PF_52_RT_OFFSET 29956
+#define QM_REG_PQTX2PF_53_RT_OFFSET 29957
+#define QM_REG_PQTX2PF_54_RT_OFFSET 29958
+#define QM_REG_PQTX2PF_55_RT_OFFSET 29959
+#define QM_REG_PQTX2PF_56_RT_OFFSET 29960
+#define QM_REG_PQTX2PF_57_RT_OFFSET 29961
+#define QM_REG_PQTX2PF_58_RT_OFFSET 29962
+#define QM_REG_PQTX2PF_59_RT_OFFSET 29963
+#define QM_REG_PQTX2PF_60_RT_OFFSET 29964
+#define QM_REG_PQTX2PF_61_RT_OFFSET 29965
+#define QM_REG_PQTX2PF_62_RT_OFFSET 29966
+#define QM_REG_PQTX2PF_63_RT_OFFSET 29967
+#define QM_REG_PQOTHER2PF_0_RT_OFFSET 29968
+#define QM_REG_PQOTHER2PF_1_RT_OFFSET 29969
+#define QM_REG_PQOTHER2PF_2_RT_OFFSET 29970
+#define QM_REG_PQOTHER2PF_3_RT_OFFSET 29971
+#define QM_REG_PQOTHER2PF_4_RT_OFFSET 29972
+#define QM_REG_PQOTHER2PF_5_RT_OFFSET 29973
+#define QM_REG_PQOTHER2PF_6_RT_OFFSET 29974
+#define QM_REG_PQOTHER2PF_7_RT_OFFSET 29975
+#define QM_REG_PQOTHER2PF_8_RT_OFFSET 29976
+#define QM_REG_PQOTHER2PF_9_RT_OFFSET 29977
+#define QM_REG_PQOTHER2PF_10_RT_OFFSET 29978
+#define QM_REG_PQOTHER2PF_11_RT_OFFSET 29979
+#define QM_REG_PQOTHER2PF_12_RT_OFFSET 29980
+#define QM_REG_PQOTHER2PF_13_RT_OFFSET 29981
+#define QM_REG_PQOTHER2PF_14_RT_OFFSET 29982
+#define QM_REG_PQOTHER2PF_15_RT_OFFSET 29983
+#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 29984
+#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 29985
+#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 29986
+#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 29987
+#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 29988
+#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 29989
+#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 29990
+#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 29991
+#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 29992
+#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 29993
+#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 29994
+#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 29995
+#define QM_REG_RLGLBLINCVAL_RT_OFFSET 29996
+#define QM_REG_RLGLBLINCVAL_RT_SIZE 256
+#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30252
+#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256
+#define QM_REG_RLGLBLCRD_RT_OFFSET 30508
+#define QM_REG_RLGLBLCRD_RT_SIZE 256
+#define QM_REG_RLGLBLENABLE_RT_OFFSET 30764
+#define QM_REG_RLPFPERIOD_RT_OFFSET 30765
+#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30766
+#define QM_REG_RLPFINCVAL_RT_OFFSET 30767
+#define QM_REG_RLPFINCVAL_RT_SIZE 16
+#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30783
+#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16
+#define QM_REG_RLPFCRD_RT_OFFSET 30799
+#define QM_REG_RLPFCRD_RT_SIZE 16
+#define QM_REG_RLPFENABLE_RT_OFFSET 30815
+#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30816
+#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30817
+#define QM_REG_WFQPFWEIGHT_RT_SIZE 16
+#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30833
+#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16
+#define QM_REG_WFQPFCRD_RT_OFFSET 30849
+#define QM_REG_WFQPFCRD_RT_SIZE 160
+#define QM_REG_WFQPFENABLE_RT_OFFSET 31009
+#define QM_REG_WFQVPENABLE_RT_OFFSET 31010
+#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31011
+#define QM_REG_BASEADDRTXPQ_RT_SIZE 512
+#define QM_REG_TXPQMAP_RT_OFFSET 31523
+#define QM_REG_TXPQMAP_RT_SIZE 512
+#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32035
+#define QM_REG_WFQVPWEIGHT_RT_SIZE 512
+#define QM_REG_WFQVPCRD_RT_OFFSET 32547
+#define QM_REG_WFQVPCRD_RT_SIZE 512
+#define QM_REG_WFQVPMAP_RT_OFFSET 33059
+#define QM_REG_WFQVPMAP_RT_SIZE 512
+#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33571
+#define QM_REG_WFQPFCRD_MSB_RT_SIZE 160
+#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 33731
+#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 33732
+#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 33733
+#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 33734
+#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 33735
+#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 33736
+#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 33737
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 33738
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4
+#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 33742
+#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE 4
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 33746
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4
+#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 33750
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 33751
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 33783
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 33799
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 33815
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 33831
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
+#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 33847
+#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 33848
+#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 33849
+#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 33850
+#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 33851
+#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 33852
+#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 33853
+#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 33854
+#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 33855
+#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 33856
+#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 33857
+#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 33858
+#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 33859
+#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 33860
+#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 33861
+#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 33862
+#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 33863
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 33864
+#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 33865
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 33866
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 33867
+#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 33868
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 33869
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 33870
+#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 33871
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 33872
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 33873
+#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 33874
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 33875
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 33876
+#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 33877
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 33878
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 33879
+#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 33880
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 33881
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 33882
+#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 33883
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 33884
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 33885
+#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 33886
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 33887
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 33888
+#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 33889
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 33890
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 33891
+#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 33892
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 33893
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 33894
+#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 33895
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 33896
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 33897
+#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 33898
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 33899
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 33900
+#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 33901
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 33902
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 33903
+#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 33904
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 33905
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 33906
+#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 33907
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 33908
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 33909
+#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 33910
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 33911
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 33912
+#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 33913
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 33914
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 33915
+#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 33916
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 33917
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 33918
+#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 33919
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 33920
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 33921
+#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 33922
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 33923
+#define XCM_REG_CON_PHY_Q3_RT_OFFSET 33924
+
+#define RUNTIME_ARRAY_SIZE 33925
/* The eth storm context for the Tstorm */
struct tstorm_eth_conn_st_ctx {
@@ -2380,266 +2692,266 @@ struct xstorm_eth_conn_st_ctx {
};
struct xstorm_eth_conn_ag_ctx {
- u8 reserved0 /* cdu_validation */;
- u8 eth_state /* state */;
- u8 flags0;
-#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT 1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK 0x1 /* bit4 */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT 5
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK 0x1 /* bit6 */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT 6
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK 0x1 /* bit7 */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT 7
- u8 flags1;
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK 0x1 /* bit8 */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK 0x1 /* bit9 */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT 1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK 0x1 /* bit10 */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_BIT11_MASK 0x1 /* bit11 */
-#define XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT 3
-#define XSTORM_ETH_CONN_AG_CTX_BIT12_MASK 0x1 /* bit12 */
-#define XSTORM_ETH_CONN_AG_CTX_BIT12_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_BIT13_MASK 0x1 /* bit13 */
-#define XSTORM_ETH_CONN_AG_CTX_BIT13_SHIFT 5
-#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 /* bit14 */
-#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 /* bit15 */
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+ u8 reserved0;
+ u8 eth_state;
+ u8 flags0;
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT 7
+ u8 flags1;
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_BIT12_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_BIT13_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
u8 flags2;
-#define XSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
-#define XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
-#define XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
-#define XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3;
-#define XSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
-#define XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
-#define XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
-#define XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
-#define XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 6
- u8 flags4;
-#define XSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
-#define XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
-#define XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
-#define XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_CF11_MASK 0x3 /* cf11 */
-#define XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 6
+ u8 flags4;
+#define XSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define XSTORM_ETH_CONN_AG_CTX_CF12_MASK 0x3 /* cf12 */
-#define XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_CF13_MASK 0x3 /* cf13 */
-#define XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_CF14_MASK 0x3 /* cf14 */
-#define XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_CF15_MASK 0x3 /* cf15 */
-#define XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
-#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3 /* cf16 */
-#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK 0x3 /* cf18 */
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 /* cf19 */
-#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
u8 flags7;
-#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 /* cf20 */
-#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK 0x3 /* cf21 */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK 0x3 /* cf22 */
-#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
-#define XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 6
-#define XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
-#define XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 7
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
-#define XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
-#define XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 1
-#define XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
-#define XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
-#define XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 3
-#define XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
-#define XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
-#define XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 5
-#define XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
-#define XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 6
-#define XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
-#define XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 7
+#define XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
-#define XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
-#define XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK 0x1 /* cf11en */
-#define XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT 1
-#define XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK 0x1 /* cf12en */
-#define XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK 0x1 /* cf13en */
-#define XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT 3
-#define XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK 0x1 /* cf14en */
-#define XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK 0x1 /* cf15en */
-#define XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT 5
-#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1 /* cf16en */
-#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
-#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
+#define XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
u8 flags10;
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 /* cf18en */
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 /* cf19en */
-#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
-#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 /* cf20en */
-#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK 0x1 /* cf21en */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT 3
-#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 /* cf22en */
-#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1 /* cf23en */
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK 0x1 /* rule0en */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT 6
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK 0x1 /* rule1en */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT 7
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT 7
u8 flags11;
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK 0x1 /* rule2en */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK 0x1 /* rule3en */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT 1
-#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 /* rule4en */
-#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 /* rule8en */
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK 0x1 /* rule9en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
-#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK 0x1 /* rule10en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK 0x1 /* rule11en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 /* rule12en */
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 /* rule13en */
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK 0x1 /* rule14en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK 0x1 /* rule15en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK 0x1 /* rule16en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK 0x1 /* rule17en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
-#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK 0x1 /* rule18en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK 0x1 /* rule19en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT 1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 /* rule20en */
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 /* rule21en */
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 /* rule22en */
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 /* rule23en */
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 /* rule24en */
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 /* rule25en */
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1 /* bit16 */
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1 /* bit17 */
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1 /* bit18 */
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1 /* bit19 */
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
-#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1 /* bit20 */
-#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 /* bit21 */
-#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
-#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK 0x3 /* cf23 */
-#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
- u8 edpm_event_id /* byte2 */;
- __le16 physical_q0 /* physical_q0 */;
- __le16 word1 /* physical_q1 */;
- __le16 edpm_num_bds /* physical_q2 */;
- __le16 tx_bd_cons /* word3 */;
- __le16 tx_bd_prod /* word4 */;
- __le16 go_to_bd_cons /* word5 */;
- __le16 conn_dpi /* conn_dpi */;
- u8 byte3 /* byte3 */;
- u8 byte4 /* byte4 */;
- u8 byte5 /* byte5 */;
- u8 byte6 /* byte6 */;
- __le32 reg0 /* reg0 */;
- __le32 reg1 /* reg1 */;
- __le32 reg2 /* reg2 */;
- __le32 reg3 /* reg3 */;
- __le32 reg4 /* reg4 */;
- __le32 reg5 /* cf_array0 */;
- __le32 reg6 /* cf_array1 */;
- __le16 word7 /* word7 */;
- __le16 word8 /* word8 */;
- __le16 word9 /* word9 */;
- __le16 word10 /* word10 */;
- __le32 reg7 /* reg7 */;
- __le32 reg8 /* reg8 */;
- __le32 reg9 /* reg9 */;
- u8 byte7 /* byte7 */;
- u8 byte8 /* byte8 */;
- u8 byte9 /* byte9 */;
- u8 byte10 /* byte10 */;
- u8 byte11 /* byte11 */;
- u8 byte12 /* byte12 */;
- u8 byte13 /* byte13 */;
- u8 byte14 /* byte14 */;
- u8 byte15 /* byte15 */;
- u8 byte16 /* byte16 */;
- __le16 word11 /* word11 */;
- __le32 reg10 /* reg10 */;
- __le32 reg11 /* reg11 */;
- __le32 reg12 /* reg12 */;
- __le32 reg13 /* reg13 */;
- __le32 reg14 /* reg14 */;
- __le32 reg15 /* reg15 */;
- __le32 reg16 /* reg16 */;
- __le32 reg17 /* reg17 */;
- __le32 reg18 /* reg18 */;
- __le32 reg19 /* reg19 */;
- __le16 word12 /* word12 */;
- __le16 word13 /* word13 */;
- __le16 word14 /* word14 */;
- __le16 word15 /* word15 */;
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
+ u8 edpm_event_id;
+ __le16 physical_q0;
+ __le16 quota;
+ __le16 edpm_num_bds;
+ __le16 tx_bd_cons;
+ __le16 tx_bd_prod;
+ __le16 tx_class;
+ __le16 conn_dpi;
+ u8 byte3;
+ u8 byte4;
+ u8 byte5;
+ u8 byte6;
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le32 reg4;
+ __le32 reg5;
+ __le32 reg6;
+ __le16 word7;
+ __le16 word8;
+ __le16 word9;
+ __le16 word10;
+ __le32 reg7;
+ __le32 reg8;
+ __le32 reg9;
+ u8 byte7;
+ u8 byte8;
+ u8 byte9;
+ u8 byte10;
+ u8 byte11;
+ u8 byte12;
+ u8 byte13;
+ u8 byte14;
+ u8 byte15;
+ u8 byte16;
+ __le16 word11;
+ __le32 reg10;
+ __le32 reg11;
+ __le32 reg12;
+ __le32 reg13;
+ __le32 reg14;
+ __le32 reg15;
+ __le32 reg16;
+ __le32 reg17;
+ __le32 reg18;
+ __le32 reg19;
+ __le16 word12;
+ __le16 word13;
+ __le16 word14;
+ __le16 word15;
};
/* The eth storm context for the Ystorm */
@@ -2648,220 +2960,220 @@ struct ystorm_eth_conn_st_ctx {
};
struct ystorm_eth_conn_ag_ctx {
- u8 byte0 /* cdu_validation */;
- u8 byte1 /* state */;
- u8 flags0;
-#define YSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
-#define YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
-#define YSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
-#define YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
-#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 /* cf0 */
-#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 2
-#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK 0x3 /* cf1 */
-#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT 4
-#define YSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
-#define YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+ u8 byte0;
+ u8 state;
+ u8 flags0;
+#define YSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3
+#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 2
+#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK 0x3
+#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT 4
+#define YSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1 /* cf0en */
-#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 0
-#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK 0x1 /* cf1en */
-#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT 1
-#define YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
-#define YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
-#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
-#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
-#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
-#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
-#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
-#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
- u8 byte2 /* byte2 */;
- u8 byte3 /* byte3 */;
- __le16 word0 /* word0 */;
- __le32 terminate_spqe /* reg0 */;
- __le32 reg1 /* reg1 */;
- __le16 tx_bd_cons_upd /* word1 */;
- __le16 word2 /* word2 */;
- __le16 word3 /* word3 */;
- __le16 word4 /* word4 */;
- __le32 reg2 /* reg2 */;
- __le32 reg3 /* reg3 */;
+#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 0
+#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT 1
+#define YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
+ u8 tx_q0_int_coallecing_timeset;
+ u8 byte3;
+ __le16 word0;
+ __le32 terminate_spqe;
+ __le32 reg1;
+ __le16 tx_bd_cons_upd;
+ __le16 word2;
+ __le16 word3;
+ __le16 word4;
+ __le32 reg2;
+ __le32 reg3;
};
struct tstorm_eth_conn_ag_ctx {
- u8 byte0 /* cdu_validation */;
- u8 byte1 /* state */;
- u8 flags0;
-#define TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
-#define TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
-#define TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
-#define TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */
-#define TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */
-#define TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3
-#define TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */
-#define TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */
-#define TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5
-#define TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
-#define TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
-#define TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
-#define TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
-#define TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
-#define TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6
+#define TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6
u8 flags2;
-#define TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
-#define TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
-#define TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
-#define TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
-#define TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6
+#define TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
-#define TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
-#define TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
-#define TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
-#define TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
-#define TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5
-#define TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
-#define TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6
-#define TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
-#define TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7
+#define TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5
+#define TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6
+#define TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7
u8 flags4;
-#define TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
-#define TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
-#define TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1
-#define TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
-#define TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
-#define TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3
-#define TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
-#define TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
-#define TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5
-#define TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
-#define TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6
-#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5
+#define TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6
+#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1 /* rule6en */
-#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5
-#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
- __le32 reg0 /* reg0 */;
- __le32 reg1 /* reg1 */;
- __le32 reg2 /* reg2 */;
- __le32 reg3 /* reg3 */;
- __le32 reg4 /* reg4 */;
- __le32 reg5 /* reg5 */;
- __le32 reg6 /* reg6 */;
- __le32 reg7 /* reg7 */;
- __le32 reg8 /* reg8 */;
- u8 byte2 /* byte2 */;
- u8 byte3 /* byte3 */;
- __le16 rx_bd_cons /* word0 */;
- u8 byte4 /* byte4 */;
- u8 byte5 /* byte5 */;
- __le16 rx_bd_prod /* word1 */;
- __le16 word2 /* conn_dpi */;
- __le16 word3 /* word3 */;
- __le32 reg9 /* reg9 */;
- __le32 reg10 /* reg10 */;
+#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5
+#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le32 reg4;
+ __le32 reg5;
+ __le32 reg6;
+ __le32 reg7;
+ __le32 reg8;
+ u8 byte2;
+ u8 byte3;
+ __le16 rx_bd_cons;
+ u8 byte4;
+ u8 byte5;
+ __le16 rx_bd_prod;
+ __le16 word2;
+ __le16 word3;
+ __le32 reg9;
+ __le32 reg10;
};
struct ustorm_eth_conn_ag_ctx {
- u8 byte0 /* cdu_validation */;
- u8 byte1 /* state */;
- u8 flags0;
-#define USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
-#define USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
-#define USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
-#define USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
-#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK 0x3 /* timer0cf */
-#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT 2
-#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK 0x3 /* timer1cf */
-#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT 4
-#define USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
-#define USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+#define USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT 2
+#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT 4
+#define USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
-#define USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0
-#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3 /* cf4 */
-#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2
-#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3 /* cf5 */
-#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4
-#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 /* cf6 */
-#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6
+#define USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6
u8 flags2;
-#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK 0x1 /* cf0en */
-#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT 0
-#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK 0x1 /* cf1en */
-#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT 1
-#define USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
-#define USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
-#define USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
-#define USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3
-#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1 /* cf4en */
-#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4
-#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1 /* cf5en */
-#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5
-#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1 /* cf6en */
-#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6
-#define USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
-#define USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT 0
+#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT 1
+#define USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6
+#define USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3;
-#define USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
-#define USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
-#define USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
-#define USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
-#define USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
-#define USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
-#define USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
-#define USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
-#define USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
- u8 byte2 /* byte2 */;
- u8 byte3 /* byte3 */;
- __le16 word0 /* conn_dpi */;
- __le16 tx_bd_cons /* word1 */;
- __le32 reg0 /* reg0 */;
- __le32 reg1 /* reg1 */;
- __le32 reg2 /* reg2 */;
- __le32 tx_int_coallecing_timeset /* reg3 */;
- __le16 tx_drv_bd_cons /* word2 */;
- __le16 rx_drv_cqe_cons /* word3 */;
+#define USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ __le16 tx_bd_cons;
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 tx_int_coallecing_timeset;
+ __le16 tx_drv_bd_cons;
+ __le16 rx_drv_cqe_cons;
};
/* The eth storm context for the Ustorm */
@@ -2876,47 +3188,75 @@ struct mstorm_eth_conn_st_ctx {
/* eth connection context */
struct eth_conn_context {
- struct tstorm_eth_conn_st_ctx tstorm_st_context;
- struct regpair tstorm_st_padding[2];
- struct pstorm_eth_conn_st_ctx pstorm_st_context;
- struct xstorm_eth_conn_st_ctx xstorm_st_context;
- struct xstorm_eth_conn_ag_ctx xstorm_ag_context;
- struct ystorm_eth_conn_st_ctx ystorm_st_context;
- struct ystorm_eth_conn_ag_ctx ystorm_ag_context;
- struct tstorm_eth_conn_ag_ctx tstorm_ag_context;
- struct ustorm_eth_conn_ag_ctx ustorm_ag_context;
- struct ustorm_eth_conn_st_ctx ustorm_st_context;
- struct mstorm_eth_conn_st_ctx mstorm_st_context;
+ struct tstorm_eth_conn_st_ctx tstorm_st_context;
+ struct regpair tstorm_st_padding[2];
+ struct pstorm_eth_conn_st_ctx pstorm_st_context;
+ struct xstorm_eth_conn_st_ctx xstorm_st_context;
+ struct xstorm_eth_conn_ag_ctx xstorm_ag_context;
+ struct ystorm_eth_conn_st_ctx ystorm_st_context;
+ struct ystorm_eth_conn_ag_ctx ystorm_ag_context;
+ struct tstorm_eth_conn_ag_ctx tstorm_ag_context;
+ struct ustorm_eth_conn_ag_ctx ustorm_ag_context;
+ struct ustorm_eth_conn_st_ctx ustorm_st_context;
+ struct mstorm_eth_conn_st_ctx mstorm_st_context;
};
+/* opcodes for the event ring */
+enum eth_event_opcode {
+ ETH_EVENT_UNUSED,
+ ETH_EVENT_VPORT_START,
+ ETH_EVENT_VPORT_UPDATE,
+ ETH_EVENT_VPORT_STOP,
+ ETH_EVENT_TX_QUEUE_START,
+ ETH_EVENT_TX_QUEUE_STOP,
+ ETH_EVENT_RX_QUEUE_START,
+ ETH_EVENT_RX_QUEUE_UPDATE,
+ ETH_EVENT_RX_QUEUE_STOP,
+ ETH_EVENT_FILTERS_UPDATE,
+ ETH_EVENT_RESERVED,
+ ETH_EVENT_RESERVED2,
+ ETH_EVENT_RESERVED3,
+ ETH_EVENT_RX_ADD_UDP_FILTER,
+ ETH_EVENT_RX_DELETE_UDP_FILTER,
+ ETH_EVENT_RESERVED4,
+ ETH_EVENT_RESERVED5,
+ MAX_ETH_EVENT_OPCODE
+};
+
+/* Classify rule types in E2/E3 */
enum eth_filter_action {
+ ETH_FILTER_ACTION_UNUSED,
ETH_FILTER_ACTION_REMOVE,
ETH_FILTER_ACTION_ADD,
ETH_FILTER_ACTION_REMOVE_ALL,
MAX_ETH_FILTER_ACTION
};
+/* Command for adding/removing a classification rule $$KEEP_ENDIANNESS$$ */
struct eth_filter_cmd {
- u8 type /* Filter Type (MAC/VLAN/Pair/VNI) */;
- u8 vport_id /* the vport id */;
- u8 action /* filter command action: add/remove/replace */;
- u8 reserved0;
- __le32 vni;
- __le16 mac_lsb;
- __le16 mac_mid;
- __le16 mac_msb;
- __le16 vlan_id;
+ u8 type;
+ u8 vport_id;
+ u8 action;
+ u8 reserved0;
+ __le32 vni;
+ __le16 mac_lsb;
+ __le16 mac_mid;
+ __le16 mac_msb;
+ __le16 vlan_id;
};
+/* $$KEEP_ENDIANNESS$$ */
struct eth_filter_cmd_header {
- u8 rx;
- u8 tx;
- u8 cmd_cnt;
- u8 assert_on_error;
- u8 reserved1[4];
+ u8 rx;
+ u8 tx;
+ u8 cmd_cnt;
+ u8 assert_on_error;
+ u8 reserved1[4];
};
+/* Ethernet filter types: mac/vlan/pair */
enum eth_filter_type {
+ ETH_FILTER_TYPE_UNUSED,
ETH_FILTER_TYPE_MAC,
ETH_FILTER_TYPE_VLAN,
ETH_FILTER_TYPE_PAIR,
@@ -2929,463 +3269,3515 @@ enum eth_filter_type {
MAX_ETH_FILTER_TYPE
};
+/* Ethernet Ramrod Command IDs */
enum eth_ramrod_cmd_id {
ETH_RAMROD_UNUSED,
- ETH_RAMROD_VPORT_START /* VPort Start Ramrod */,
- ETH_RAMROD_VPORT_UPDATE /* VPort Update Ramrod */,
- ETH_RAMROD_VPORT_STOP /* VPort Stop Ramrod */,
- ETH_RAMROD_RX_QUEUE_START /* RX Queue Start Ramrod */,
- ETH_RAMROD_RX_QUEUE_STOP /* RX Queue Stop Ramrod */,
- ETH_RAMROD_TX_QUEUE_START /* TX Queue Start Ramrod */,
- ETH_RAMROD_TX_QUEUE_STOP /* TX Queue Stop Ramrod */,
- ETH_RAMROD_FILTERS_UPDATE /* Add or Remove Mac/Vlan/Pair filters */,
- ETH_RAMROD_RX_QUEUE_UPDATE /* RX Queue Update Ramrod */,
- ETH_RAMROD_RESERVED,
- ETH_RAMROD_RESERVED2,
- ETH_RAMROD_RESERVED3,
- ETH_RAMROD_RESERVED4,
- ETH_RAMROD_RESERVED5,
- ETH_RAMROD_RESERVED6,
- ETH_RAMROD_RESERVED7,
- ETH_RAMROD_RESERVED8,
+ ETH_RAMROD_VPORT_START,
+ ETH_RAMROD_VPORT_UPDATE,
+ ETH_RAMROD_VPORT_STOP,
+ ETH_RAMROD_RX_QUEUE_START,
+ ETH_RAMROD_RX_QUEUE_STOP,
+ ETH_RAMROD_TX_QUEUE_START,
+ ETH_RAMROD_TX_QUEUE_STOP,
+ ETH_RAMROD_FILTERS_UPDATE,
+ ETH_RAMROD_RX_QUEUE_UPDATE,
+ ETH_RAMROD_RX_CREATE_OPENFLOW_ACTION,
+ ETH_RAMROD_RX_ADD_OPENFLOW_FILTER,
+ ETH_RAMROD_RX_DELETE_OPENFLOW_FILTER,
+ ETH_RAMROD_RX_ADD_UDP_FILTER,
+ ETH_RAMROD_RX_DELETE_UDP_FILTER,
+ ETH_RAMROD_RX_CREATE_GFT_ACTION,
+ ETH_RAMROD_GFT_UPDATE_FILTER,
MAX_ETH_RAMROD_CMD_ID
};
+/* return code from eth sp ramrods */
+struct eth_return_code {
+ u8 value;
+#define ETH_RETURN_CODE_ERR_CODE_MASK 0x1F
+#define ETH_RETURN_CODE_ERR_CODE_SHIFT 0
+#define ETH_RETURN_CODE_RESERVED_MASK 0x3
+#define ETH_RETURN_CODE_RESERVED_SHIFT 5
+#define ETH_RETURN_CODE_RX_TX_MASK 0x1
+#define ETH_RETURN_CODE_RX_TX_SHIFT 7
+};
+
+/* What to do in case an error occurs */
enum eth_tx_err {
- ETH_TX_ERR_DROP /* Drop erronous packet. */,
+ ETH_TX_ERR_DROP,
ETH_TX_ERR_ASSERT_MALICIOUS,
MAX_ETH_TX_ERR
};
+/* Array of the different error type behaviors */
struct eth_tx_err_vals {
__le16 values;
-#define ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE_MASK 0x1
-#define ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE_SHIFT 0
-#define ETH_TX_ERR_VALS_PACKET_TOO_SMALL_MASK 0x1
-#define ETH_TX_ERR_VALS_PACKET_TOO_SMALL_SHIFT 1
-#define ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR_MASK 0x1
-#define ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR_SHIFT 2
-#define ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS_MASK 0x1
-#define ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS_SHIFT 3
-#define ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG_MASK 0x1
-#define ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG_SHIFT 4
-#define ETH_TX_ERR_VALS_MTU_VIOLATION_MASK 0x1
-#define ETH_TX_ERR_VALS_MTU_VIOLATION_SHIFT 5
-#define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_MASK 0x1
-#define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_SHIFT 6
-#define ETH_TX_ERR_VALS_RESERVED_MASK 0x1FF
-#define ETH_TX_ERR_VALS_RESERVED_SHIFT 7
-};
-
+#define ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE_MASK 0x1
+#define ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE_SHIFT 0
+#define ETH_TX_ERR_VALS_PACKET_TOO_SMALL_MASK 0x1
+#define ETH_TX_ERR_VALS_PACKET_TOO_SMALL_SHIFT 1
+#define ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR_MASK 0x1
+#define ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR_SHIFT 2
+#define ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS_MASK 0x1
+#define ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS_SHIFT 3
+#define ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG_MASK 0x1
+#define ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG_SHIFT 4
+#define ETH_TX_ERR_VALS_MTU_VIOLATION_MASK 0x1
+#define ETH_TX_ERR_VALS_MTU_VIOLATION_SHIFT 5
+#define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_MASK 0x1
+#define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_SHIFT 6
+#define ETH_TX_ERR_VALS_RESERVED_MASK 0x1FF
+#define ETH_TX_ERR_VALS_RESERVED_SHIFT 7
+};
+
+/* vport rss configuration data */
struct eth_vport_rss_config {
__le16 capabilities;
-#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_MASK 0x1
-#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_SHIFT 0
-#define ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY_MASK 0x1
-#define ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY_SHIFT 1
-#define ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY_MASK 0x1
-#define ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY_SHIFT 2
-#define ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY_MASK 0x1
-#define ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY_SHIFT 3
-#define ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY_MASK 0x1
-#define ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY_SHIFT 4
-#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_MASK 0x1
-#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_SHIFT 5
-#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_MASK 0x1
-#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_SHIFT 6
-#define ETH_VPORT_RSS_CONFIG_RESERVED0_MASK 0x1FF
-#define ETH_VPORT_RSS_CONFIG_RESERVED0_SHIFT 7
- u8 rss_id;
- u8 rss_mode;
- u8 update_rss_key;
- u8 update_rss_ind_table;
- u8 update_rss_capabilities;
- u8 tbl_size;
- __le32 reserved2[2];
- __le16 indirection_table[ETH_RSS_IND_TABLE_ENTRIES_NUM];
- __le32 rss_key[ETH_RSS_KEY_SIZE_REGS];
- __le32 reserved3[2];
-};
-
+#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_SHIFT 0
+#define ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY_SHIFT 1
+#define ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY_SHIFT 2
+#define ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY_SHIFT 3
+#define ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY_SHIFT 4
+#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_SHIFT 5
+#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_SHIFT 6
+#define ETH_VPORT_RSS_CONFIG_RESERVED0_MASK 0x1FF
+#define ETH_VPORT_RSS_CONFIG_RESERVED0_SHIFT 7
+ u8 rss_id;
+ u8 rss_mode;
+ u8 update_rss_key;
+ u8 update_rss_ind_table;
+ u8 update_rss_capabilities;
+ u8 tbl_size;
+ __le32 reserved2[2];
+ __le16 indirection_table[ETH_RSS_IND_TABLE_ENTRIES_NUM];
+
+ __le32 rss_key[ETH_RSS_KEY_SIZE_REGS];
+ __le32 reserved3[2];
+};
+
+/* eth vport RSS mode */
enum eth_vport_rss_mode {
ETH_VPORT_RSS_MODE_DISABLED,
ETH_VPORT_RSS_MODE_REGULAR,
MAX_ETH_VPORT_RSS_MODE
};
+/* Command for setting classification flags for a vport $$KEEP_ENDIANNESS$$ */
struct eth_vport_rx_mode {
__le16 state;
-#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_MASK 0x1
-#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_SHIFT 0
-#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_MASK 0x1
-#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_SHIFT 1
-#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_MASK 0x1
-#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_SHIFT 2
-#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_MASK 0x1
-#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_SHIFT 3
-#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_MASK 0x1
-#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_SHIFT 4
-#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_MASK 0x1
-#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_SHIFT 5
-#define ETH_VPORT_RX_MODE_RESERVED1_MASK 0x3FF
-#define ETH_VPORT_RX_MODE_RESERVED1_SHIFT 6
+#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_MASK 0x1
+#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_SHIFT 0
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_SHIFT 1
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_MASK 0x1
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_SHIFT 2
+#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_MASK 0x1
+#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_SHIFT 3
+#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_SHIFT 4
+#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_SHIFT 5
+#define ETH_VPORT_RX_MODE_RESERVED1_MASK 0x3FF
+#define ETH_VPORT_RX_MODE_RESERVED1_SHIFT 6
__le16 reserved2[3];
};
+/* Command for setting tpa parameters */
struct eth_vport_tpa_param {
- u8 tpa_ipv4_en_flg;
- u8 tpa_ipv6_en_flg;
- u8 tpa_ipv4_tunn_en_flg;
- u8 tpa_ipv6_tunn_en_flg;
- u8 tpa_pkt_split_flg;
- u8 tpa_hdr_data_split_flg;
- u8 tpa_gro_consistent_flg;
- u8 tpa_max_aggs_num;
- u16 tpa_max_size;
- u16 tpa_min_size_to_start;
- u16 tpa_min_size_to_cont;
- u8 max_buff_num;
- u8 reserved;
+ u8 tpa_ipv4_en_flg;
+ u8 tpa_ipv6_en_flg;
+ u8 tpa_ipv4_tunn_en_flg;
+ u8 tpa_ipv6_tunn_en_flg;
+ u8 tpa_pkt_split_flg;
+ u8 tpa_hdr_data_split_flg;
+ u8 tpa_gro_consistent_flg;
+
+ u8 tpa_max_aggs_num;
+
+ __le16 tpa_max_size;
+ __le16 tpa_min_size_to_start;
+
+ __le16 tpa_min_size_to_cont;
+ u8 max_buff_num;
+ u8 reserved;
};
+/* Command for setting classification flags for a vport $$KEEP_ENDIANNESS$$ */
struct eth_vport_tx_mode {
__le16 state;
-#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_MASK 0x1
-#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_SHIFT 0
-#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_MASK 0x1
-#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_SHIFT 1
-#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_MASK 0x1
-#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_SHIFT 2
-#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_MASK 0x1
-#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_SHIFT 3
-#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_MASK 0x1
-#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_SHIFT 4
-#define ETH_VPORT_TX_MODE_RESERVED1_MASK 0x7FF
-#define ETH_VPORT_TX_MODE_RESERVED1_SHIFT 5
+#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_MASK 0x1
+#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_SHIFT 0
+#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_SHIFT 1
+#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_MASK 0x1
+#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_SHIFT 2
+#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_SHIFT 3
+#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_SHIFT 4
+#define ETH_VPORT_TX_MODE_RESERVED1_MASK 0x7FF
+#define ETH_VPORT_TX_MODE_RESERVED1_SHIFT 5
__le16 reserved2[3];
};
+/* Ramrod data for rx queue start ramrod */
struct rx_queue_start_ramrod_data {
- __le16 rx_queue_id;
- __le16 num_of_pbl_pages;
- __le16 bd_max_bytes;
- __le16 sb_id;
- u8 sb_index;
- u8 vport_id;
- u8 default_rss_queue_flg;
- u8 complete_cqe_flg;
- u8 complete_event_flg;
- u8 stats_counter_id;
- u8 pin_context;
- u8 pxp_tph_valid_bd;
- u8 pxp_tph_valid_pkt;
- u8 pxp_st_hint;
- __le16 pxp_st_index;
- u8 pmd_mode;
- u8 notify_en;
- u8 toggle_val;
- u8 reserved[7];
- __le16 reserved1;
- struct regpair cqe_pbl_addr;
- struct regpair bd_base;
- struct regpair reserved2;
+ __le16 rx_queue_id;
+ __le16 num_of_pbl_pages;
+ __le16 bd_max_bytes;
+ __le16 sb_id;
+ u8 sb_index;
+ u8 vport_id;
+ u8 default_rss_queue_flg;
+ u8 complete_cqe_flg;
+ u8 complete_event_flg;
+ u8 stats_counter_id;
+ u8 pin_context;
+ u8 pxp_tph_valid_bd;
+ u8 pxp_tph_valid_pkt;
+ u8 pxp_st_hint;
+
+ __le16 pxp_st_index;
+ u8 pmd_mode;
+
+ u8 notify_en;
+ u8 toggle_val;
+
+ u8 vf_rx_prod_index;
+
+ u8 reserved[6];
+ __le16 reserved1;
+ struct regpair cqe_pbl_addr;
+ struct regpair bd_base;
+ struct regpair reserved2;
};
+/* Ramrod data for rx queue start ramrod */
struct rx_queue_stop_ramrod_data {
- __le16 rx_queue_id;
- u8 complete_cqe_flg;
- u8 complete_event_flg;
- u8 vport_id;
- u8 reserved[3];
+ __le16 rx_queue_id;
+ u8 complete_cqe_flg;
+ u8 complete_event_flg;
+ u8 vport_id;
+ u8 reserved[3];
};
+/* Ramrod data for rx queue update ramrod */
struct rx_queue_update_ramrod_data {
- __le16 rx_queue_id;
- u8 complete_cqe_flg;
- u8 complete_event_flg;
- u8 vport_id;
- u8 reserved[4];
- u8 reserved1;
- u8 reserved2;
- u8 reserved3;
- __le16 reserved4;
- __le16 reserved5;
+ __le16 rx_queue_id;
+ u8 complete_cqe_flg;
+ u8 complete_event_flg;
+ u8 vport_id;
+ u8 reserved[4];
+ u8 reserved1;
+ u8 reserved2;
+ u8 reserved3;
+ __le16 reserved4;
+ __le16 reserved5;
struct regpair reserved6;
};
-struct tx_queue_start_ramrod_data {
- __le16 sb_id;
- u8 sb_index;
- u8 vport_id;
- u8 reserved0;
- u8 stats_counter_id;
- __le16 qm_pq_id;
- u8 flags;
-#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_MASK 0x1
-#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_SHIFT 0
-#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_MASK 0x1
-#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_SHIFT 1
-#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_MASK 0x1
-#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_SHIFT 2
-#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_MASK 0x1
-#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_SHIFT 3
-#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_MASK 0x1
-#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_SHIFT 4
-#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_MASK 0x1
-#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_SHIFT 5
-#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_MASK 0x3
-#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_SHIFT 6
- u8 pxp_st_hint;
- u8 pxp_tph_valid_bd;
- u8 pxp_tph_valid_pkt;
- __le16 pxp_st_index;
- __le16 comp_agg_size;
- __le16 queue_zone_id;
- __le16 test_dup_count;
- __le16 pbl_size;
- __le16 tx_queue_id;
- struct regpair pbl_base_addr;
- struct regpair bd_cons_address;
+/* Ramrod data for rx Add UDP Filter */
+struct rx_udp_filter_data {
+ __le16 action_icid;
+ __le16 vlan_id;
+ u8 ip_type;
+ u8 tenant_id_exists;
+ __le16 reserved1;
+ __le32 ip_dst_addr[4];
+ __le32 ip_src_addr[4];
+ __le16 udp_dst_port;
+ __le16 udp_src_port;
+ __le32 tenant_id;
};
+/* Ramrod data for rx queue start ramrod */
+struct tx_queue_start_ramrod_data {
+ __le16 sb_id;
+ u8 sb_index;
+ u8 vport_id;
+ u8 reserved0;
+ u8 stats_counter_id;
+ __le16 qm_pq_id;
+ u8 flags;
+#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_MASK 0x1
+#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_SHIFT 0
+#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_MASK 0x1
+#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_SHIFT 1
+#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_MASK 0x1
+#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_SHIFT 2
+#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_MASK 0x1
+#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_SHIFT 3
+#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_MASK 0x1
+#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_SHIFT 4
+#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_MASK 0x1
+#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_SHIFT 5
+#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_MASK 0x3
+#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_SHIFT 6
+ u8 pxp_st_hint;
+ u8 pxp_tph_valid_bd;
+ u8 pxp_tph_valid_pkt;
+ __le16 pxp_st_index;
+ __le16 comp_agg_size;
+ __le16 queue_zone_id;
+ __le16 test_dup_count;
+ __le16 pbl_size;
+ __le16 tx_queue_id;
+
+ struct regpair pbl_base_addr;
+ struct regpair bd_cons_address;
+};
+
+/* Ramrod data for tx queue stop ramrod */
struct tx_queue_stop_ramrod_data {
__le16 reserved[4];
};
+/* Ramrod data for vport update ramrod */
struct vport_filter_update_ramrod_data {
- struct eth_filter_cmd_header filter_cmd_hdr;
- struct eth_filter_cmd filter_cmds[ETH_FILTER_RULES_COUNT];
+ struct eth_filter_cmd_header filter_cmd_hdr;
+ struct eth_filter_cmd filter_cmds[ETH_FILTER_RULES_COUNT];
};
+/* Ramrod data for vport start ramrod */
struct vport_start_ramrod_data {
- u8 vport_id;
- u8 sw_fid;
- __le16 mtu;
- u8 drop_ttl0_en;
- u8 inner_vlan_removal_en;
- struct eth_vport_rx_mode rx_mode;
- struct eth_vport_tx_mode tx_mode;
- struct eth_vport_tpa_param tpa_param;
- __le16 default_vlan;
- u8 tx_switching_en;
- u8 anti_spoofing_en;
- u8 default_vlan_en;
- u8 handle_ptp_pkts;
- u8 silent_vlan_removal_en;
- u8 untagged;
- struct eth_tx_err_vals tx_err_behav;
- u8 zero_placement_offset;
- u8 reserved[7];
-};
-
+ u8 vport_id;
+ u8 sw_fid;
+ __le16 mtu;
+ u8 drop_ttl0_en;
+ u8 inner_vlan_removal_en;
+ struct eth_vport_rx_mode rx_mode;
+ struct eth_vport_tx_mode tx_mode;
+ struct eth_vport_tpa_param tpa_param;
+ __le16 default_vlan;
+ u8 tx_switching_en;
+ u8 anti_spoofing_en;
+
+ u8 default_vlan_en;
+
+ u8 handle_ptp_pkts;
+ u8 silent_vlan_removal_en;
+ u8 untagged;
+ struct eth_tx_err_vals tx_err_behav;
+
+ u8 zero_placement_offset;
+ u8 ctl_frame_mac_check_en;
+ u8 ctl_frame_ethtype_check_en;
+ u8 reserved[5];
+};
+
+/* Ramrod data for vport stop ramrod */
struct vport_stop_ramrod_data {
- u8 vport_id;
- u8 reserved[7];
+ u8 vport_id;
+ u8 reserved[7];
};
+/* Ramrod data for vport update ramrod */
struct vport_update_ramrod_data_cmn {
- u8 vport_id;
- u8 update_rx_active_flg;
- u8 rx_active_flg;
- u8 update_tx_active_flg;
- u8 tx_active_flg;
- u8 update_rx_mode_flg;
- u8 update_tx_mode_flg;
- u8 update_approx_mcast_flg;
- u8 update_rss_flg;
- u8 update_inner_vlan_removal_en_flg;
- u8 inner_vlan_removal_en;
- u8 update_tpa_param_flg;
- u8 update_tpa_en_flg;
- u8 update_tx_switching_en_flg;
- u8 tx_switching_en;
- u8 update_anti_spoofing_en_flg;
- u8 anti_spoofing_en;
- u8 update_handle_ptp_pkts;
- u8 handle_ptp_pkts;
- u8 update_default_vlan_en_flg;
- u8 default_vlan_en;
- u8 update_default_vlan_flg;
- __le16 default_vlan;
- u8 update_accept_any_vlan_flg;
- u8 accept_any_vlan;
- u8 silent_vlan_removal_en;
- u8 update_mtu_flg;
- __le16 mtu;
- u8 reserved[2];
+ u8 vport_id;
+ u8 update_rx_active_flg;
+ u8 rx_active_flg;
+ u8 update_tx_active_flg;
+ u8 tx_active_flg;
+ u8 update_rx_mode_flg;
+ u8 update_tx_mode_flg;
+ u8 update_approx_mcast_flg;
+
+ u8 update_rss_flg;
+ u8 update_inner_vlan_removal_en_flg;
+
+ u8 inner_vlan_removal_en;
+ u8 update_tpa_param_flg;
+ u8 update_tpa_en_flg;
+ u8 update_tx_switching_en_flg;
+
+ u8 tx_switching_en;
+ u8 update_anti_spoofing_en_flg;
+
+ u8 anti_spoofing_en;
+ u8 update_handle_ptp_pkts;
+
+ u8 handle_ptp_pkts;
+ u8 update_default_vlan_en_flg;
+
+ u8 default_vlan_en;
+
+ u8 update_default_vlan_flg;
+
+ __le16 default_vlan;
+ u8 update_accept_any_vlan_flg;
+
+ u8 accept_any_vlan;
+ u8 silent_vlan_removal_en;
+ u8 update_mtu_flg;
+
+ __le16 mtu;
+ u8 reserved[2];
};
struct vport_update_ramrod_mcast {
__le32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
};
+/* Ramrod data for vport update ramrod */
struct vport_update_ramrod_data {
- struct vport_update_ramrod_data_cmn common;
- struct eth_vport_rx_mode rx_mode;
- struct eth_vport_tx_mode tx_mode;
- struct eth_vport_tpa_param tpa_param;
- struct vport_update_ramrod_mcast approx_mcast;
- struct eth_vport_rss_config rss_config;
+ struct vport_update_ramrod_data_cmn common;
+
+ struct eth_vport_rx_mode rx_mode;
+ struct eth_vport_tx_mode tx_mode;
+ struct eth_vport_tpa_param tpa_param;
+ struct vport_update_ramrod_mcast approx_mcast;
+ struct eth_vport_rss_config rss_config;
+};
+
+struct mstorm_rdma_task_st_ctx {
+ struct regpair temp[4];
+};
+
+struct rdma_close_func_ramrod_data {
+ u8 cnq_start_offset;
+ u8 num_cnqs;
+ u8 vf_id;
+ u8 vf_valid;
+ u8 reserved[4];
+};
+
+struct rdma_cnq_params {
+ __le16 sb_num;
+ u8 sb_index;
+ u8 num_pbl_pages;
+ __le32 reserved;
+ struct regpair pbl_base_addr;
+ __le16 queue_zone_num;
+ u8 reserved1[6];
+};
+
+struct rdma_create_cq_ramrod_data {
+ struct regpair cq_handle;
+ struct regpair pbl_addr;
+ __le32 max_cqes;
+ __le16 pbl_num_pages;
+ __le16 dpi;
+ u8 is_two_level_pbl;
+ u8 cnq_id;
+ u8 pbl_log_page_size;
+ u8 toggle_bit;
+ __le16 int_timeout;
+ __le16 reserved1;
};
-#define VF_MAX_STATIC 192 /* In case of K2 */
+struct rdma_deregister_tid_ramrod_data {
+ __le32 itid;
+ __le32 reserved;
+};
-#define MCP_GLOB_PATH_MAX 2
-#define MCP_PORT_MAX 2 /* Global */
-#define MCP_GLOB_PORT_MAX 4 /* Global */
-#define MCP_GLOB_FUNC_MAX 16 /* Global */
+struct rdma_destroy_cq_output_params {
+ __le16 cnq_num;
+ __le16 reserved0;
+ __le32 reserved1;
+};
+
+struct rdma_destroy_cq_ramrod_data {
+ struct regpair output_params_addr;
+};
+
+enum rdma_event_opcode {
+ RDMA_EVENT_UNUSED,
+ RDMA_EVENT_FUNC_INIT,
+ RDMA_EVENT_FUNC_CLOSE,
+ RDMA_EVENT_REGISTER_MR,
+ RDMA_EVENT_DEREGISTER_MR,
+ RDMA_EVENT_CREATE_CQ,
+ RDMA_EVENT_RESIZE_CQ,
+ RDMA_EVENT_DESTROY_CQ,
+ RDMA_EVENT_CREATE_SRQ,
+ RDMA_EVENT_MODIFY_SRQ,
+ RDMA_EVENT_DESTROY_SRQ,
+ MAX_RDMA_EVENT_OPCODE
+};
+
+enum rdma_fw_return_code {
+ RDMA_RETURN_OK = 0,
+ RDMA_RETURN_REGISTER_MR_BAD_STATE_ERR,
+ RDMA_RETURN_DEREGISTER_MR_BAD_STATE_ERR,
+ RDMA_RETURN_RESIZE_CQ_ERR,
+ RDMA_RETURN_NIG_DRAIN_REQ,
+ MAX_RDMA_FW_RETURN_CODE
+};
+
+struct rdma_init_func_hdr {
+ u8 cnq_start_offset;
+ u8 num_cnqs;
+ u8 cq_ring_mode;
+ u8 cnp_vlan_priority;
+ __le32 cnp_send_timeout;
+ u8 cnp_dscp;
+ u8 vf_id;
+ u8 vf_valid;
+ u8 reserved[5];
+};
+
+struct rdma_init_func_ramrod_data {
+ struct rdma_init_func_hdr params_header;
+ struct rdma_cnq_params cnq_params[NUM_OF_GLOBAL_QUEUES];
+};
+
+enum rdma_ramrod_cmd_id {
+ RDMA_RAMROD_UNUSED,
+ RDMA_RAMROD_FUNC_INIT,
+ RDMA_RAMROD_FUNC_CLOSE,
+ RDMA_RAMROD_REGISTER_MR,
+ RDMA_RAMROD_DEREGISTER_MR,
+ RDMA_RAMROD_CREATE_CQ,
+ RDMA_RAMROD_RESIZE_CQ,
+ RDMA_RAMROD_DESTROY_CQ,
+ RDMA_RAMROD_CREATE_SRQ,
+ RDMA_RAMROD_MODIFY_SRQ,
+ RDMA_RAMROD_DESTROY_SRQ,
+ MAX_RDMA_RAMROD_CMD_ID
+};
+
+struct rdma_register_tid_ramrod_data {
+ __le32 flags;
+#define RDMA_REGISTER_TID_RAMROD_DATA_MAX_ID_MASK 0x3FFFF
+#define RDMA_REGISTER_TID_RAMROD_DATA_MAX_ID_SHIFT 0
+#define RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG_MASK 0x1F
+#define RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG_SHIFT 18
+#define RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL_SHIFT 23
+#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_SHIFT 24
+#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_SHIFT 25
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_SHIFT 26
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_SHIFT 27
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC_SHIFT 28
+#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_SHIFT 29
+#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_SHIFT 30
+#define RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND_SHIFT 31
+ u8 flags1;
+#define RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG_MASK 0x1F
+#define RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG_SHIFT 0
+#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_MASK 0x7
+#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_SHIFT 5
+ u8 flags2;
+#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_SHIFT 0
+#define RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG_SHIFT 1
+#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_MASK 0x3F
+#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_SHIFT 2
+ u8 key;
+ u8 length_hi;
+ u8 vf_id;
+ u8 vf_valid;
+ __le16 pd;
+ __le32 length_lo;
+ __le32 itid;
+ __le32 reserved2;
+ struct regpair va;
+ struct regpair pbl_base;
+ struct regpair dif_error_addr;
+ struct regpair dif_runt_addr;
+ __le32 reserved3[2];
+};
+
+struct rdma_resize_cq_output_params {
+ __le32 old_cq_cons;
+ __le32 old_cq_prod;
+};
+
+struct rdma_resize_cq_ramrod_data {
+ u8 flags;
+#define RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_MASK 0x1
+#define RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_SHIFT 0
+#define RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL_MASK 0x1
+#define RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL_SHIFT 1
+#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_MASK 0x3F
+#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_SHIFT 2
+ u8 pbl_log_page_size;
+ __le16 pbl_num_pages;
+ __le32 max_cqes;
+ struct regpair pbl_addr;
+ struct regpair output_params_addr;
+};
+
+struct rdma_srq_context {
+ struct regpair temp[8];
+};
+
+struct rdma_srq_create_ramrod_data {
+ struct regpair pbl_base_addr;
+ __le16 pages_in_srq_pbl;
+ __le16 pd_id;
+ struct rdma_srq_id srq_id;
+ __le16 page_size;
+ __le16 reserved1;
+ __le32 reserved2;
+ struct regpair producers_addr;
+};
+
+struct rdma_srq_destroy_ramrod_data {
+ struct rdma_srq_id srq_id;
+ __le32 reserved;
+};
+
+struct rdma_srq_modify_ramrod_data {
+ struct rdma_srq_id srq_id;
+ __le32 wqe_limit;
+};
+
+struct ystorm_rdma_task_st_ctx {
+ struct regpair temp[4];
+};
+
+struct ystorm_rdma_task_ag_ctx {
+ u8 reserved;
+ u8 byte1;
+ __le16 msem_ctx_upd_seq;
+ u8 flags0;
+#define YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define YSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
+#define YSTORM_RDMA_TASK_AG_CTX_VALID_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_VALID_SHIFT 6
+#define YSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7
+ u8 flags1;
+#define YSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3
+#define YSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0
+#define YSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3
+#define YSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2
+#define YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
+#define YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
+#define YSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6
+#define YSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7
+ u8 flags2;
+#define YSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0
+#define YSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2
+#define YSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3
+#define YSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4
+#define YSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5
+#define YSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6
+#define YSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7
+ u8 key;
+ __le32 mw_cnt;
+ u8 ref_cnt_seq;
+ u8 ctx_upd_seq;
+ __le16 dif_flags;
+ __le16 tx_ref_count;
+ __le16 last_used_ltid;
+ __le16 parent_mr_lo;
+ __le16 parent_mr_hi;
+ __le32 fbo_lo;
+ __le32 fbo_hi;
+};
+
+struct mstorm_rdma_task_ag_ctx {
+ u8 reserved;
+ u8 byte1;
+ __le16 icid;
+ u8 flags0;
+#define MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define MSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
+#define MSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6
+#define MSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7
+ u8 flags1;
+#define MSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3
+#define MSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0
+#define MSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3
+#define MSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2
+#define MSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3
+#define MSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 4
+#define MSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6
+#define MSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7
+ u8 flags2;
+#define MSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 0
+#define MSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2
+#define MSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3
+#define MSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4
+#define MSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5
+#define MSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6
+#define MSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7
+ u8 key;
+ __le32 mw_cnt;
+ u8 ref_cnt_seq;
+ u8 ctx_upd_seq;
+ __le16 dif_flags;
+ __le16 tx_ref_count;
+ __le16 last_used_ltid;
+ __le16 parent_mr_lo;
+ __le16 parent_mr_hi;
+ __le32 fbo_lo;
+ __le32 fbo_hi;
+};
+
+struct ustorm_rdma_task_st_ctx {
+ struct regpair temp[2];
+};
+
+struct ustorm_rdma_task_ag_ctx {
+ u8 reserved;
+ u8 byte1;
+ __le16 icid;
+ u8 flags0;
+#define USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_SHIFT 5
+#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_MASK 0x3
+#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_SHIFT 6
+ u8 flags1;
+#define USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_MASK 0x3
+#define USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_SHIFT 0
+#define USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_MASK 0x3
+#define USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_SHIFT 2
+#define USTORM_RDMA_TASK_AG_CTX_CF3_MASK 0x3
+#define USTORM_RDMA_TASK_AG_CTX_CF3_SHIFT 4
+#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
+#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
+ u8 flags2;
+#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_SHIFT 0
+#define USTORM_RDMA_TASK_AG_CTX_RESERVED2_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RESERVED2_SHIFT 1
+#define USTORM_RDMA_TASK_AG_CTX_RESERVED3_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RESERVED3_SHIFT 2
+#define USTORM_RDMA_TASK_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
+#define USTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 5
+#define USTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 6
+#define USTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 7
+ u8 flags3;
+#define USTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 0
+#define USTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 1
+#define USTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 2
+#define USTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 3
+#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
+#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
+ __le32 dif_err_intervals;
+ __le32 dif_error_1st_interval;
+ __le32 reg2;
+ __le32 dif_runt_value;
+ __le32 reg4;
+ __le32 reg5;
+};
+
+struct rdma_task_context {
+ struct ystorm_rdma_task_st_ctx ystorm_st_context;
+ struct ystorm_rdma_task_ag_ctx ystorm_ag_context;
+ struct tdif_task_context tdif_context;
+ struct mstorm_rdma_task_ag_ctx mstorm_ag_context;
+ struct mstorm_rdma_task_st_ctx mstorm_st_context;
+ struct rdif_task_context rdif_context;
+ struct ustorm_rdma_task_st_ctx ustorm_st_context;
+ struct regpair ustorm_st_padding[2];
+ struct ustorm_rdma_task_ag_ctx ustorm_ag_context;
+};
+
+enum rdma_tid_type {
+ RDMA_TID_REGISTERED_MR,
+ RDMA_TID_FMR,
+ RDMA_TID_MW_TYPE1,
+ RDMA_TID_MW_TYPE2A,
+ MAX_RDMA_TID_TYPE
+};
+
+struct mstorm_rdma_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define MSTORM_RDMA_CONN_AG_CTX_BIT0_MASK 0x1
+#define MSTORM_RDMA_CONN_AG_CTX_BIT0_SHIFT 0
+#define MSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define MSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 7
+ __le16 word0;
+ __le16 word1;
+ __le32 reg0;
+ __le32 reg1;
+};
+
+struct tstorm_rdma_conn_ag_ctx {
+ u8 reserved0;
+ u8 byte1;
+ u8 flags0;
+#define TSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define TSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_RDMA_CONN_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_RDMA_CONN_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_RDMA_CONN_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_RDMA_CONN_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3
+#define TSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 6
+ u8 flags1;
+#define TSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3
+#define TSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 0
+#define TSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3
+#define TSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 2
+#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3
+#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4
+#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+ u8 flags2;
+#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3
+#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
+#define TSTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3
+#define TSTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_RDMA_CONN_AG_CTX_CF7_MASK 0x3
+#define TSTORM_RDMA_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_RDMA_CONN_AG_CTX_CF8_MASK 0x3
+#define TSTORM_RDMA_CONN_AG_CTX_CF8_SHIFT 6
+ u8 flags3;
+#define TSTORM_RDMA_CONN_AG_CTX_CF9_MASK 0x3
+#define TSTORM_RDMA_CONN_AG_CTX_CF9_SHIFT 0
+#define TSTORM_RDMA_CONN_AG_CTX_CF10_MASK 0x3
+#define TSTORM_RDMA_CONN_AG_CTX_CF10_SHIFT 2
+#define TSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 5
+#define TSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 6
+#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
+ u8 flags4;
+#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
+#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 1
+#define TSTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_RDMA_CONN_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_RDMA_CONN_AG_CTX_CF8EN_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_RDMA_CONN_AG_CTX_CF9EN_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_CF9EN_SHIFT 5
+#define TSTORM_RDMA_CONN_AG_CTX_CF10EN_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_CF10EN_SHIFT 6
+#define TSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags5;
+#define TSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define TSTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT 7
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le32 reg4;
+ __le32 reg5;
+ __le32 reg6;
+ __le32 reg7;
+ __le32 reg8;
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ u8 byte4;
+ u8 byte5;
+ __le16 word1;
+ __le16 word2;
+ __le16 word3;
+ __le32 reg9;
+ __le32 reg10;
+};
+
+struct tstorm_rdma_task_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ __le16 word0;
+ u8 flags0;
+#define TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_MASK 0xF
+#define TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_SHIFT 0
+#define TSTORM_RDMA_TASK_AG_CTX_BIT0_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_BIT0_SHIFT 4
+#define TSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
+#define TSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6
+#define TSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7
+ u8 flags1;
+#define TSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0
+#define TSTORM_RDMA_TASK_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_BIT5_SHIFT 1
+#define TSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 2
+#define TSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 4
+#define TSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 6
+ u8 flags2;
+#define TSTORM_RDMA_TASK_AG_CTX_CF3_MASK 0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF3_SHIFT 0
+#define TSTORM_RDMA_TASK_AG_CTX_CF4_MASK 0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF4_SHIFT 2
+#define TSTORM_RDMA_TASK_AG_CTX_CF5_MASK 0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF5_SHIFT 4
+#define TSTORM_RDMA_TASK_AG_CTX_CF6_MASK 0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF6_SHIFT 6
+ u8 flags3;
+#define TSTORM_RDMA_TASK_AG_CTX_CF7_MASK 0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF7_SHIFT 0
+#define TSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 2
+#define TSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 3
+#define TSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 4
+#define TSTORM_RDMA_TASK_AG_CTX_CF3EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT 5
+#define TSTORM_RDMA_TASK_AG_CTX_CF4EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF4EN_SHIFT 6
+#define TSTORM_RDMA_TASK_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF5EN_SHIFT 7
+ u8 flags4;
+#define TSTORM_RDMA_TASK_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF6EN_SHIFT 0
+#define TSTORM_RDMA_TASK_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF7EN_SHIFT 1
+#define TSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 2
+#define TSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 3
+#define TSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 4
+#define TSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 5
+#define TSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 6
+#define TSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 7
+ u8 byte2;
+ __le16 word1;
+ __le32 reg0;
+ u8 byte3;
+ u8 byte4;
+ __le16 word2;
+ __le16 word3;
+ __le16 word4;
+ __le32 reg1;
+ __le32 reg2;
+};
+
+struct ustorm_rdma_conn_ag_ctx {
+ u8 reserved;
+ u8 byte1;
+ u8 flags0;
+#define USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define USTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 2
+#define USTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3
+#define USTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define USTORM_RDMA_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_RDMA_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4
+#define USTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3
+#define USTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 6
+ u8 flags2;
+#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
+#define USTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_RDMA_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5
+#define USTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_SHIFT 7
+ u8 flags3;
+#define USTORM_RDMA_CONN_AG_CTX_CQ_EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_CQ_EN_SHIFT 0
+#define USTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 conn_dpi;
+ __le16 word1;
+ __le32 cq_cons;
+ __le32 cq_se_prod;
+ __le32 cq_prod;
+ __le32 reg3;
+ __le16 int_timeout;
+ __le16 word3;
+};
+
+struct xstorm_roce_conn_ag_ctx_dq_ext_ld_part {
+ u8 reserved0;
+ u8 state;
+ u8 flags0;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_SHIFT 1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT 3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_SHIFT 5
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_SHIFT 6
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_SHIFT 7
+ u8 flags1;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_SHIFT 1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_SHIFT 3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT13_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT13_SHIFT 5
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_SHIFT 6
+#define XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_SHIFT 7
+ u8 flags2;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3_SHIFT 6
+ u8 flags3;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_SHIFT 6
+ u8 flags4;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11_SHIFT 6
+ u8 flags5;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15_SHIFT 6
+ u8 flags6;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19_SHIFT 6
+ u8 flags7;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_SHIFT 6
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_SHIFT 7
+ u8 flags8;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_SHIFT 1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_SHIFT 3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_SHIFT 5
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_SHIFT 6
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_SHIFT 7
+ u8 flags9;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_SHIFT 1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_SHIFT 3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_SHIFT 5
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_SHIFT 6
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_SHIFT 7
+ u8 flags10;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_SHIFT 1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_SHIFT 3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_SHIFT 5
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_SHIFT 6
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_SHIFT 7
+ u8 flags11;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_SHIFT 1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_SHIFT 3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_SHIFT 5
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT 6
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_SHIFT 7
+ u8 flags12;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_SHIFT 1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT 3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_SHIFT 5
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_SHIFT 6
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_SHIFT 7
+ u8 flags13;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_SHIFT 1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT 3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT 5
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT 6
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_SHIFT 1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT 5
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23_SHIFT 6
+ u8 byte2;
+ __le16 physical_q0;
+ __le16 word1;
+ __le16 word2;
+ __le16 word3;
+ __le16 word4;
+ __le16 word5;
+ __le16 conn_dpi;
+ u8 byte3;
+ u8 byte4;
+ u8 byte5;
+ u8 byte6;
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 snd_nxt_psn;
+ __le32 reg4;
+};
+
+struct xstorm_rdma_conn_ag_ctx {
+ u8 reserved0;
+ u8 state;
+ u8 flags0;
+#define XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1
+#define XSTORM_RDMA_CONN_AG_CTX_BIT2_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_BIT2_SHIFT 2
+#define XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_RDMA_CONN_AG_CTX_BIT4_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_BIT4_SHIFT 4
+#define XSTORM_RDMA_CONN_AG_CTX_BIT5_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_BIT5_SHIFT 5
+#define XSTORM_RDMA_CONN_AG_CTX_BIT6_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_BIT6_SHIFT 6
+#define XSTORM_RDMA_CONN_AG_CTX_BIT7_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_BIT7_SHIFT 7
+ u8 flags1;
+#define XSTORM_RDMA_CONN_AG_CTX_BIT8_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_BIT8_SHIFT 0
+#define XSTORM_RDMA_CONN_AG_CTX_BIT9_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_BIT9_SHIFT 1
+#define XSTORM_RDMA_CONN_AG_CTX_BIT10_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_BIT10_SHIFT 2
+#define XSTORM_RDMA_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_RDMA_CONN_AG_CTX_BIT12_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_RDMA_CONN_AG_CTX_BIT13_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_RDMA_CONN_AG_CTX_BIT14_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_BIT14_SHIFT 6
+#define XSTORM_RDMA_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7
+ u8 flags2;
+#define XSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_RDMA_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF3_SHIFT 6
+ u8 flags3;
+#define XSTORM_RDMA_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_RDMA_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+ u8 flags4;
+#define XSTORM_RDMA_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_RDMA_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_RDMA_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_RDMA_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF11_SHIFT 6
+ u8 flags5;
+#define XSTORM_RDMA_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_RDMA_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_RDMA_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_RDMA_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF15_SHIFT 6
+ u8 flags6;
+#define XSTORM_RDMA_CONN_AG_CTX_CF16_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF16_SHIFT 0
+#define XSTORM_RDMA_CONN_AG_CTX_CF17_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF17_SHIFT 2
+#define XSTORM_RDMA_CONN_AG_CTX_CF18_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF18_SHIFT 4
+#define XSTORM_RDMA_CONN_AG_CTX_CF19_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF19_SHIFT 6
+ u8 flags7;
+#define XSTORM_RDMA_CONN_AG_CTX_CF20_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF20_SHIFT 0
+#define XSTORM_RDMA_CONN_AG_CTX_CF21_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF21_SHIFT 2
+#define XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 7
+ u8 flags8;
+#define XSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_RDMA_CONN_AG_CTX_CF3EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_RDMA_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_RDMA_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
+#define XSTORM_RDMA_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_RDMA_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF9EN_SHIFT 7
+ u8 flags9;
+#define XSTORM_RDMA_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_RDMA_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_RDMA_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_RDMA_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_RDMA_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_RDMA_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_RDMA_CONN_AG_CTX_CF16EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF16EN_SHIFT 6
+#define XSTORM_RDMA_CONN_AG_CTX_CF17EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF17EN_SHIFT 7
+ u8 flags10;
+#define XSTORM_RDMA_CONN_AG_CTX_CF18EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF18EN_SHIFT 0
+#define XSTORM_RDMA_CONN_AG_CTX_CF19EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF19EN_SHIFT 1
+#define XSTORM_RDMA_CONN_AG_CTX_CF20EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF20EN_SHIFT 2
+#define XSTORM_RDMA_CONN_AG_CTX_CF21EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF21EN_SHIFT 3
+#define XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_RDMA_CONN_AG_CTX_CF23EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF23EN_SHIFT 5
+#define XSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 6
+#define XSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 7
+ u8 flags11;
+#define XSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 0
+#define XSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 2
+#define XSTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_RDMA_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE9EN_SHIFT 7
+ u8 flags12;
+#define XSTORM_RDMA_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define XSTORM_RDMA_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_RDMA_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_RDMA_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_RDMA_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_RDMA_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE17EN_SHIFT 7
+ u8 flags13;
+#define XSTORM_RDMA_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_RDMA_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define XSTORM_RDMA_CONN_AG_CTX_MIGRATION_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_MIGRATION_SHIFT 0
+#define XSTORM_RDMA_CONN_AG_CTX_BIT17_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_BIT17_SHIFT 1
+#define XSTORM_RDMA_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2
+#define XSTORM_RDMA_CONN_AG_CTX_RESERVED_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RESERVED_SHIFT 4
+#define XSTORM_RDMA_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define XSTORM_RDMA_CONN_AG_CTX_CF23_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF23_SHIFT 6
+ u8 byte2;
+ __le16 physical_q0;
+ __le16 word1;
+ __le16 word2;
+ __le16 word3;
+ __le16 word4;
+ __le16 word5;
+ __le16 conn_dpi;
+ u8 byte3;
+ u8 byte4;
+ u8 byte5;
+ u8 byte6;
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 snd_nxt_psn;
+ __le32 reg4;
+ __le32 reg5;
+ __le32 reg6;
+};
+
+struct ystorm_rdma_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define YSTORM_RDMA_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_RDMA_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3
+#define YSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 2
+#define YSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3
+#define YSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4
+#define YSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define YSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 0
+#define YSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1
+#define YSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ __le32 reg0;
+ __le32 reg1;
+ __le16 word1;
+ __le16 word2;
+ __le16 word3;
+ __le16 word4;
+ __le32 reg2;
+ __le32 reg3;
+};
+
+struct mstorm_roce_conn_st_ctx {
+ struct regpair temp[6];
+};
+
+struct pstorm_roce_conn_st_ctx {
+ struct regpair temp[16];
+};
+
+struct ystorm_roce_conn_st_ctx {
+ struct regpair temp[2];
+};
+
+struct xstorm_roce_conn_st_ctx {
+ struct regpair temp[22];
+};
+
+struct tstorm_roce_conn_st_ctx {
+ struct regpair temp[30];
+};
+
+struct ustorm_roce_conn_st_ctx {
+ struct regpair temp[12];
+};
+
+struct roce_conn_context {
+ struct ystorm_roce_conn_st_ctx ystorm_st_context;
+ struct regpair ystorm_st_padding[2];
+ struct pstorm_roce_conn_st_ctx pstorm_st_context;
+ struct xstorm_roce_conn_st_ctx xstorm_st_context;
+ struct regpair xstorm_st_padding[2];
+ struct xstorm_rdma_conn_ag_ctx xstorm_ag_context;
+ struct tstorm_rdma_conn_ag_ctx tstorm_ag_context;
+ struct timers_context timer_context;
+ struct ustorm_rdma_conn_ag_ctx ustorm_ag_context;
+ struct tstorm_roce_conn_st_ctx tstorm_st_context;
+ struct mstorm_roce_conn_st_ctx mstorm_st_context;
+ struct ustorm_roce_conn_st_ctx ustorm_st_context;
+ struct regpair ustorm_st_padding[2];
+};
+
+struct roce_create_qp_req_ramrod_data {
+ __le16 flags;
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR_MASK 0x3
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR_SHIFT 0
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN_MASK 0x1
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN_SHIFT 2
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP_MASK 0x1
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP_SHIFT 3
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_PRI_MASK 0x7
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_PRI_SHIFT 4
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_MASK 0x1
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_SHIFT 7
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_MASK 0xF
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_SHIFT 8
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_MASK 0xF
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_SHIFT 12
+ u8 max_ord;
+ u8 traffic_class;
+ u8 hop_limit;
+ u8 orq_num_pages;
+ __le16 p_key;
+ __le32 flow_label;
+ __le32 dst_qp_id;
+ __le32 ack_timeout_val;
+ __le32 initial_psn;
+ __le16 mtu;
+ __le16 pd;
+ __le16 sq_num_pages;
+ __le16 reseved2;
+ struct regpair sq_pbl_addr;
+ struct regpair orq_pbl_addr;
+ __le16 local_mac_addr[3];
+ __le16 remote_mac_addr[3];
+ __le16 vlan_id;
+ __le16 udp_src_port;
+ __le32 src_gid[4];
+ __le32 dst_gid[4];
+ struct regpair qp_handle_for_cqe;
+ struct regpair qp_handle_for_async;
+ u8 stats_counter_id;
+ u8 reserved3[7];
+ __le32 cq_cid;
+ __le16 physical_queue0;
+ __le16 dpi;
+};
+
+struct roce_create_qp_resp_ramrod_data {
+ __le16 flags;
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR_MASK 0x3
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR_SHIFT 0
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN_SHIFT 2
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN_SHIFT 3
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN_MASK 0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN_SHIFT 4
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG_MASK 0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG_SHIFT 5
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN_MASK 0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN_SHIFT 6
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED0_MASK 0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED0_SHIFT 7
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_MASK 0x7
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_SHIFT 8
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_MASK 0x1F
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_SHIFT 11
+ u8 max_ird;
+ u8 traffic_class;
+ u8 hop_limit;
+ u8 irq_num_pages;
+ __le16 p_key;
+ __le32 flow_label;
+ __le32 dst_qp_id;
+ u8 stats_counter_id;
+ u8 reserved1;
+ __le16 mtu;
+ __le32 initial_psn;
+ __le16 pd;
+ __le16 rq_num_pages;
+ struct rdma_srq_id srq_id;
+ struct regpair rq_pbl_addr;
+ struct regpair irq_pbl_addr;
+ __le16 local_mac_addr[3];
+ __le16 remote_mac_addr[3];
+ __le16 vlan_id;
+ __le16 udp_src_port;
+ __le32 src_gid[4];
+ __le32 dst_gid[4];
+ struct regpair qp_handle_for_cqe;
+ struct regpair qp_handle_for_async;
+ __le32 reserved2[2];
+ __le32 cq_cid;
+ __le16 physical_queue0;
+ __le16 dpi;
+};
+
+struct roce_destroy_qp_req_output_params {
+ __le32 num_bound_mw;
+ __le32 reserved;
+};
+
+struct roce_destroy_qp_req_ramrod_data {
+ struct regpair output_params_addr;
+};
+
+struct roce_destroy_qp_resp_output_params {
+ __le32 num_invalidated_mw;
+ __le32 reserved;
+};
+
+struct roce_destroy_qp_resp_ramrod_data {
+ struct regpair output_params_addr;
+};
+
+enum roce_event_opcode {
+ ROCE_EVENT_CREATE_QP = 11,
+ ROCE_EVENT_MODIFY_QP,
+ ROCE_EVENT_QUERY_QP,
+ ROCE_EVENT_DESTROY_QP,
+ MAX_ROCE_EVENT_OPCODE
+};
+
+struct roce_modify_qp_req_ramrod_data {
+ __le16 flags;
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG_SHIFT 0
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG_SHIFT 1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY_SHIFT 2
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG_SHIFT 3
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG_SHIFT 4
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG_SHIFT 5
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG_SHIFT 6
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG_SHIFT 7
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG_SHIFT 8
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_FLG_SHIFT 9
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_MASK 0x7
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_SHIFT 10
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_MASK 0x7
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_SHIFT 13
+ u8 fields;
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_MASK 0xF
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_SHIFT 0
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_MASK 0xF
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_SHIFT 4
+ u8 max_ord;
+ u8 traffic_class;
+ u8 hop_limit;
+ __le16 p_key;
+ __le32 flow_label;
+ __le32 ack_timeout_val;
+ __le16 mtu;
+ __le16 reserved2;
+ __le32 reserved3[3];
+ __le32 src_gid[4];
+ __le32 dst_gid[4];
+};
+
+struct roce_modify_qp_resp_ramrod_data {
+ __le16 flags;
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG_SHIFT 0
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN_SHIFT 1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN_SHIFT 2
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN_SHIFT 3
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG_SHIFT 4
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG_SHIFT 5
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG_SHIFT 6
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_FLG_SHIFT 7
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG_SHIFT 8
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT 9
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_MASK 0x3F
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_SHIFT 10
+ u8 fields;
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_MASK 0x7
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_SHIFT 0
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_MASK 0x1F
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_SHIFT 3
+ u8 max_ird;
+ u8 traffic_class;
+ u8 hop_limit;
+ __le16 p_key;
+ __le32 flow_label;
+ __le16 mtu;
+ __le16 reserved2;
+ __le32 src_gid[4];
+ __le32 dst_gid[4];
+};
+
+struct roce_query_qp_req_output_params {
+ __le32 psn;
+ __le32 flags;
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG_MASK 0x1
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG_SHIFT 0
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG_MASK 0x1
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG_SHIFT 1
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_RESERVED0_MASK 0x3FFFFFFF
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_RESERVED0_SHIFT 2
+};
+
+struct roce_query_qp_req_ramrod_data {
+ struct regpair output_params_addr;
+};
+
+struct roce_query_qp_resp_output_params {
+ __le32 psn;
+ __le32 err_flag;
+#define ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG_MASK 0x1
+#define ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG_SHIFT 0
+#define ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_RESERVED0_MASK 0x7FFFFFFF
+#define ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_RESERVED0_SHIFT 1
+};
+
+struct roce_query_qp_resp_ramrod_data {
+ struct regpair output_params_addr;
+};
+
+enum roce_ramrod_cmd_id {
+ ROCE_RAMROD_CREATE_QP = 11,
+ ROCE_RAMROD_MODIFY_QP,
+ ROCE_RAMROD_QUERY_QP,
+ ROCE_RAMROD_DESTROY_QP,
+ MAX_ROCE_RAMROD_CMD_ID
+};
+
+struct mstorm_roce_req_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7
+ __le16 word0;
+ __le16 word1;
+ __le32 reg0;
+ __le32 reg1;
+};
+
+struct mstorm_roce_resp_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7
+ __le16 word0;
+ __le16 word1;
+ __le32 reg0;
+ __le32 reg1;
+};
+
+enum roce_flavor {
+ PLAIN_ROCE /* RoCE v1 */ ,
+ RROCE_IPV4 /* RoCE v2 (Routable RoCE) over ipv4 */ ,
+ RROCE_IPV6 /* RoCE v2 (Routable RoCE) over ipv6 */ ,
+ MAX_ROCE_FLAVOR
+};
+
+struct tstorm_roce_req_conn_ag_ctx {
+ u8 reserved0;
+ u8 state;
+ u8 flags0;
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURED_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURED_SHIFT 1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURED_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURED_SHIFT 2
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_SHIFT 5
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_SHIFT 6
+ u8 flags1;
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 0
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_SHIFT 2
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+ u8 flags2;
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_SHIFT 2
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_SHIFT 4
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_SHIFT 6
+ u8 flags3;
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_SHIFT 0
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_SHIFT 2
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_SHIFT 4
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 5
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_SHIFT 6
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
+ u8 flags4;
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_SHIFT 2
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_SHIFT 3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_SHIFT 4
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_SHIFT 5
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_SHIFT 6
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags5;
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_SHIFT 5
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7
+ __le32 reg0;
+ __le32 snd_nxt_psn;
+ __le32 snd_max_psn;
+ __le32 orq_prod;
+ __le32 reg4;
+ __le32 reg5;
+ __le32 reg6;
+ __le32 reg7;
+ __le32 reg8;
+ u8 tx_cqe_error_type;
+ u8 orq_cache_idx;
+ __le16 snd_sq_cons_th;
+ u8 byte4;
+ u8 byte5;
+ __le16 snd_sq_cons;
+ __le16 word2;
+ __le16 word3;
+ __le32 reg9;
+ __le32 reg10;
+};
+
+struct tstorm_roce_resp_conn_ag_ctx {
+ u8 byte0;
+ u8 state;
+ u8 flags0;
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 6
+ u8 flags1;
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 0
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_SHIFT 2
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 4
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+ u8 flags2;
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 6
+ u8 flags3;
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 0
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 2
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 5
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_SHIFT 6
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 7
+ u8 flags4;
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 5
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 6
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags5;
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_SHIFT 5
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7
+ __le32 psn_and_rxmit_id_echo;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le32 reg4;
+ __le32 reg5;
+ __le32 reg6;
+ __le32 reg7;
+ __le32 reg8;
+ u8 tx_async_error_type;
+ u8 byte3;
+ __le16 rq_cons;
+ u8 byte4;
+ u8 byte5;
+ __le16 rq_prod;
+ __le16 conn_dpi;
+ __le16 irq_cons;
+ __le32 num_invlidated_mw;
+ __le32 reg10;
+};
+
+struct ustorm_roce_req_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
+#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4_MASK 0x3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4_SHIFT 2
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5_MASK 0x3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5_SHIFT 4
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6_MASK 0x3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6_SHIFT 6
+ u8 flags2;
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_SHIFT 4
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_SHIFT 5
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags3;
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ __le16 word1;
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le16 word2;
+ __le16 word3;
+};
+
+struct ustorm_roce_resp_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
+#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4_MASK 0x3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4_SHIFT 2
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5_MASK 0x3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5_SHIFT 4
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 6
+ u8 flags2;
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_SHIFT 4
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_SHIFT 5
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags3;
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ __le16 word1;
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le16 word2;
+ __le16 word3;
+};
+
+struct xstorm_roce_req_conn_ag_ctx {
+ u8 reserved0;
+ u8 state;
+ u8 flags0;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_SHIFT 7
+ u8 flags1;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT12_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT13_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7
+ u8 flags2;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 6
+ u8 flags3;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+ u8 flags4;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_SHIFT 6
+ u8 flags5;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_SHIFT 6
+ u8 flags6;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_SHIFT 6
+ u8 flags7;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 7
+ u8 flags8;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF9EN_SHIFT 7
+ u8 flags9;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_SHIFT 7
+ u8 flags10;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_SHIFT 3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 7
+ u8 flags11;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_SHIFT 7
+ u8 flags12;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_SHIFT 7
+ u8 flags13;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_SHIFT 6
+ u8 byte2;
+ __le16 physical_q0;
+ __le16 word1;
+ __le16 sq_cmp_cons;
+ __le16 sq_cons;
+ __le16 sq_prod;
+ __le16 word5;
+ __le16 conn_dpi;
+ u8 byte3;
+ u8 byte4;
+ u8 byte5;
+ u8 byte6;
+ __le32 lsn;
+ __le32 ssn;
+ __le32 snd_una_psn;
+ __le32 snd_nxt_psn;
+ __le32 reg4;
+ __le32 orq_cons_th;
+ __le32 orq_cons;
+};
+
+struct xstorm_roce_resp_conn_ag_ctx {
+ u8 reserved0;
+ u8 state;
+ u8 flags0;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_SHIFT 7
+ u8 flags1;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT12_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT13_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7
+ u8 flags2;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 6
+ u8 flags3;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+ u8 flags4;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_SHIFT 6
+ u8 flags5;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_SHIFT 6
+ u8 flags6;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_SHIFT 6
+ u8 flags7;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 7
+ u8 flags8;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 7
+ u8 flags9;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_SHIFT 7
+ u8 flags10;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 7
+ u8 flags11;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_SHIFT 7
+ u8 flags12;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_SHIFT 7
+ u8 flags13;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_SHIFT 6
+ u8 byte2;
+ __le16 physical_q0;
+ __le16 word1;
+ __le16 irq_prod;
+ __le16 word3;
+ __le16 word4;
+ __le16 word5;
+ __le16 irq_cons;
+ u8 rxmit_opcode;
+ u8 byte4;
+ u8 byte5;
+ u8 byte6;
+ __le32 rxmit_psn_and_id;
+ __le32 rxmit_bytes_length;
+ __le32 psn;
+ __le32 reg3;
+ __le32 reg4;
+ __le32 reg5;
+ __le32 msn_and_syndrome;
+};
+
+struct ystorm_roce_req_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ __le32 reg0;
+ __le32 reg1;
+ __le16 word1;
+ __le16 word2;
+ __le16 word3;
+ __le16 word4;
+ __le32 reg2;
+ __le32 reg3;
+};
+
+struct ystorm_roce_resp_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ __le32 reg0;
+ __le32 reg1;
+ __le16 word1;
+ __le16 word2;
+ __le16 word3;
+ __le16 word4;
+ __le32 reg2;
+ __le32 reg3;
+};
+
+struct ystorm_iscsi_conn_st_ctx {
+ __le32 reserved[4];
+};
+
+struct pstorm_iscsi_tcp_conn_st_ctx {
+ __le32 tcp[32];
+ __le32 iscsi[4];
+};
+
+struct xstorm_iscsi_tcp_conn_st_ctx {
+ __le32 reserved_iscsi[40];
+ __le32 reserved_tcp[4];
+};
+
+struct xstorm_iscsi_conn_ag_ctx {
+ u8 cdu_validation;
+ u8 state;
+ u8 flags0;
+#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT6_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT6_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT7_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT7_SHIFT 7
+ u8 flags1;
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT8_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT8_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT9_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT9_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT10_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT10_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT12_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT13_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT14_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT14_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_SHIFT 7
+ u8 flags2;
+#define XSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6
+ u8 flags3;
+#define XSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 6
+ u8 flags4;
+#define XSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF11_SHIFT 6
+ u8 flags5;
+#define XSTORM_ISCSI_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_SHIFT 6
+ u8 flags6;
+#define XSTORM_ISCSI_CONN_AG_CTX_CF16_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF16_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_CF17_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF17_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_CF18_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF18_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_SHIFT 6
+ u8 flags7;
+#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 7
+ u8 flags8;
+#define XSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF9EN_SHIFT 7
+ u8 flags9;
+#define XSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_CF16EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF16EN_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_CF17EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF17EN_SHIFT 7
+ u8 flags10;
+#define XSTORM_ISCSI_CONN_AG_CTX_CF18EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF18EN_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_SHIFT 7
+ u8 flags11;
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_SHIFT 7
+ u8 flags12;
+#define XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_SHIFT 7
+ u8 flags13;
+#define XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT16_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT16_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT17_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT17_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT18_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT18_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT19_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT19_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT20_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT20_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_SHIFT 6
+ u8 byte2;
+ __le16 physical_q0;
+ __le16 physical_q1;
+ __le16 dummy_dorq_var;
+ __le16 sq_cons;
+ __le16 sq_prod;
+ __le16 word5;
+ __le16 slow_io_total_data_tx_update;
+ u8 byte3;
+ u8 byte4;
+ u8 byte5;
+ u8 byte6;
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 more_to_send_seq;
+ __le32 reg4;
+ __le32 reg5;
+ __le32 hq_scan_next_relevant_ack;
+ __le16 r2tq_prod;
+ __le16 r2tq_cons;
+ __le16 hq_prod;
+ __le16 hq_cons;
+ __le32 remain_seq;
+ __le32 bytes_to_next_pdu;
+ __le32 hq_tcp_seq;
+ u8 byte7;
+ u8 byte8;
+ u8 byte9;
+ u8 byte10;
+ u8 byte11;
+ u8 byte12;
+ u8 byte13;
+ u8 byte14;
+ u8 byte15;
+ u8 byte16;
+ __le16 word11;
+ __le32 reg10;
+ __le32 reg11;
+ __le32 exp_stat_sn;
+ __le32 reg13;
+ __le32 reg14;
+ __le32 reg15;
+ __le32 reg16;
+ __le32 reg17;
+};
+
+struct tstorm_iscsi_conn_ag_ctx {
+ u8 reserved0;
+ u8 state;
+ u8 flags0;
+#define TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 6
+ u8 flags1;
+#define TSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 0
+#define TSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 2
+#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4
+#define TSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 6
+ u8 flags2;
+#define TSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 6
+ u8 flags3;
+#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define TSTORM_ISCSI_CONN_AG_CTX_CF10_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT 2
+#define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 5
+#define TSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 6
+#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7
+ u8 flags4;
+#define TSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 0
+#define TSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 5
+#define TSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT 6
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags5;
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le32 reg4;
+ __le32 reg5;
+ __le32 reg6;
+ __le32 reg7;
+ __le32 reg8;
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+};
+
+struct ustorm_iscsi_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define USTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
+#define USTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
+#define USTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
+#define USTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
+#define USTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define USTORM_ISCSI_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_ISCSI_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3
+#define USTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 2
+#define USTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3
+#define USTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 4
+#define USTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3
+#define USTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 6
+ u8 flags2;
+#define USTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_ISCSI_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 4
+#define USTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 5
+#define USTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags3;
+#define USTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ __le16 word1;
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le16 word2;
+ __le16 word3;
+};
+
+struct tstorm_iscsi_conn_st_ctx {
+ __le32 reserved[40];
+};
+
+struct mstorm_iscsi_conn_ag_ctx {
+ u8 reserved;
+ u8 state;
+ u8 flags0;
+#define MSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
+#define MSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define MSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7
+ __le16 word0;
+ __le16 word1;
+ __le32 reg0;
+ __le32 reg1;
+};
+
+struct mstorm_iscsi_tcp_conn_st_ctx {
+ __le32 reserved_tcp[20];
+ __le32 reserved_iscsi[8];
+};
+
+struct ustorm_iscsi_conn_st_ctx {
+ __le32 reserved[52];
+};
+
+struct iscsi_conn_context {
+ struct ystorm_iscsi_conn_st_ctx ystorm_st_context;
+ struct regpair ystorm_st_padding[2];
+ struct pstorm_iscsi_tcp_conn_st_ctx pstorm_st_context;
+ struct regpair pstorm_st_padding[2];
+ struct pb_context xpb2_context;
+ struct xstorm_iscsi_tcp_conn_st_ctx xstorm_st_context;
+ struct regpair xstorm_st_padding[2];
+ struct xstorm_iscsi_conn_ag_ctx xstorm_ag_context;
+ struct tstorm_iscsi_conn_ag_ctx tstorm_ag_context;
+ struct regpair tstorm_ag_padding[2];
+ struct timers_context timer_context;
+ struct ustorm_iscsi_conn_ag_ctx ustorm_ag_context;
+ struct pb_context upb_context;
+ struct tstorm_iscsi_conn_st_ctx tstorm_st_context;
+ struct regpair tstorm_st_padding[2];
+ struct mstorm_iscsi_conn_ag_ctx mstorm_ag_context;
+ struct mstorm_iscsi_tcp_conn_st_ctx mstorm_st_context;
+ struct ustorm_iscsi_conn_st_ctx ustorm_st_context;
+};
+
+struct iscsi_init_ramrod_params {
+ struct iscsi_spe_func_init iscsi_init_spe;
+ struct tcp_init_params tcp_init;
+};
+
+struct ystorm_iscsi_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define YSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
+#define YSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
+#define YSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
+#define YSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
+#define YSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define YSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
+#define YSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
+#define YSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ __le32 reg0;
+ __le32 reg1;
+ __le16 word1;
+ __le16 word2;
+ __le16 word3;
+ __le16 word4;
+ __le32 reg2;
+ __le32 reg3;
+};
+#define VF_MAX_STATIC 192
+
+#define MCP_GLOB_PATH_MAX 2
+#define MCP_PORT_MAX 2
+#define MCP_GLOB_PORT_MAX 4
+#define MCP_GLOB_FUNC_MAX 16
-typedef u32 offsize_t; /* In DWORDS !!! */
/* Offset from the beginning of the MCP scratchpad */
-#define OFFSIZE_OFFSET_SHIFT 0
-#define OFFSIZE_OFFSET_MASK 0x0000ffff
+#define OFFSIZE_OFFSET_SHIFT 0
+#define OFFSIZE_OFFSET_MASK 0x0000ffff
/* Size of specific element (not the whole array if any) */
-#define OFFSIZE_SIZE_SHIFT 16
-#define OFFSIZE_SIZE_MASK 0xffff0000
+#define OFFSIZE_SIZE_SHIFT 16
+#define OFFSIZE_SIZE_MASK 0xffff0000
-/* SECTION_OFFSET is calculating the offset in bytes out of offsize */
-#define SECTION_OFFSET(_offsize) ((((_offsize & \
- OFFSIZE_OFFSET_MASK) >> \
- OFFSIZE_OFFSET_SHIFT) << 2))
+#define SECTION_OFFSET(_offsize) ((((_offsize & \
+ OFFSIZE_OFFSET_MASK) >> \
+ OFFSIZE_OFFSET_SHIFT) << 2))
-/* QED_SECTION_SIZE is calculating the size in bytes out of offsize */
-#define QED_SECTION_SIZE(_offsize) (((_offsize & \
- OFFSIZE_SIZE_MASK) >> \
- OFFSIZE_SIZE_SHIFT) << 2)
+#define QED_SECTION_SIZE(_offsize) (((_offsize & \
+ OFFSIZE_SIZE_MASK) >> \
+ OFFSIZE_SIZE_SHIFT) << 2)
-/* SECTION_ADDR returns the GRC addr of a section, given offsize and index
- * within section.
- */
-#define SECTION_ADDR(_offsize, idx) (MCP_REG_SCRATCH + \
- SECTION_OFFSET(_offsize) + \
- (QED_SECTION_SIZE(_offsize) * idx))
+#define SECTION_ADDR(_offsize, idx) (MCP_REG_SCRATCH + \
+ SECTION_OFFSET(_offsize) + \
+ (QED_SECTION_SIZE(_offsize) * idx))
+
+#define SECTION_OFFSIZE_ADDR(_pub_base, _section) \
+ (_pub_base + offsetof(struct mcp_public_data, sections[_section]))
-/* SECTION_OFFSIZE_ADDR returns the GRC addr to the offsize address.
- * Use offsetof, since the OFFSETUP collide with the firmware definition
- */
-#define SECTION_OFFSIZE_ADDR(_pub_base, _section) (_pub_base + \
- offsetof(struct \
- mcp_public_data, \
- sections[_section]))
/* PHY configuration */
-struct pmm_phy_cfg {
- u32 speed;
-#define PMM_SPEED_AUTONEG 0
-
- u32 pause; /* bitmask */
-#define PMM_PAUSE_NONE 0x0
-#define PMM_PAUSE_AUTONEG 0x1
-#define PMM_PAUSE_RX 0x2
-#define PMM_PAUSE_TX 0x4
-
- u32 adv_speed; /* Default should be the speed_cap_mask */
- u32 loopback_mode;
-#define PMM_LOOPBACK_NONE 0
-#define PMM_LOOPBACK_INT_PHY 1
-#define PMM_LOOPBACK_EXT_PHY 2
-#define PMM_LOOPBACK_EXT 3
-#define PMM_LOOPBACK_MAC 4
-
- /* features */
+struct eth_phy_cfg {
+ u32 speed;
+#define ETH_SPEED_AUTONEG 0
+#define ETH_SPEED_SMARTLINQ 0x8
+
+ u32 pause;
+#define ETH_PAUSE_NONE 0x0
+#define ETH_PAUSE_AUTONEG 0x1
+#define ETH_PAUSE_RX 0x2
+#define ETH_PAUSE_TX 0x4
+
+ u32 adv_speed;
+ u32 loopback_mode;
+#define ETH_LOOPBACK_NONE (0)
+#define ETH_LOOPBACK_INT_PHY (1)
+#define ETH_LOOPBACK_EXT_PHY (2)
+#define ETH_LOOPBACK_EXT (3)
+#define ETH_LOOPBACK_MAC (4)
+
u32 feature_config_flags;
+#define ETH_EEE_MODE_ADV_LPI (1 << 0)
};
struct port_mf_cfg {
- u32 dynamic_cfg; /* device control channel */
-#define PORT_MF_CFG_OV_TAG_MASK 0x0000ffff
-#define PORT_MF_CFG_OV_TAG_SHIFT 0
-#define PORT_MF_CFG_OV_TAG_DEFAULT PORT_MF_CFG_OV_TAG_MASK
-
- u32 reserved[1];
-};
-
-/* DO NOT add new fields in the middle
- * MUST be synced with struct pmm_stats_map
- */
-struct pmm_stats {
- u64 r64; /* 0x00 (Offset 0x00 ) RX 64-byte frame counter*/
- u64 r127; /* 0x01 (Offset 0x08 ) RX 65 to 127 byte frame counter*/
- u64 r255;
- u64 r511;
- u64 r1023;
- u64 r1518;
- u64 r1522;
- u64 r2047;
- u64 r4095;
- u64 r9216;
- u64 r16383;
- u64 rfcs; /* 0x0F (Offset 0x58 ) RX FCS error frame counter*/
- u64 rxcf; /* 0x10 (Offset 0x60 ) RX control frame counter*/
- u64 rxpf; /* 0x11 (Offset 0x68 ) RX pause frame counter*/
- u64 rxpp; /* 0x12 (Offset 0x70 ) RX PFC frame counter*/
- u64 raln; /* 0x16 (Offset 0x78 ) RX alignment error counter*/
- u64 rfcr; /* 0x19 (Offset 0x80 ) RX false carrier counter */
- u64 rovr; /* 0x1A (Offset 0x88 ) RX oversized frame counter*/
- u64 rjbr; /* 0x1B (Offset 0x90 ) RX jabber frame counter */
- u64 rund; /* 0x34 (Offset 0x98 ) RX undersized frame counter */
- u64 rfrg; /* 0x35 (Offset 0xa0 ) RX fragment counter */
- u64 t64; /* 0x40 (Offset 0xa8 ) TX 64-byte frame counter */
- u64 t127;
- u64 t255;
- u64 t511;
- u64 t1023;
- u64 t1518;
- u64 t2047;
- u64 t4095;
- u64 t9216;
- u64 t16383;
- u64 txpf; /* 0x50 (Offset 0xf8 ) TX pause frame counter */
- u64 txpp; /* 0x51 (Offset 0x100) TX PFC frame counter */
- u64 tlpiec;
- u64 tncl;
- u64 rbyte; /* 0x3d (Offset 0x118) RX byte counter */
- u64 rxuca; /* 0x0c (Offset 0x120) RX UC frame counter */
- u64 rxmca; /* 0x0d (Offset 0x128) RX MC frame counter */
- u64 rxbca; /* 0x0e (Offset 0x130) RX BC frame counter */
- u64 rxpok;
- u64 tbyte; /* 0x6f (Offset 0x140) TX byte counter */
- u64 txuca; /* 0x4d (Offset 0x148) TX UC frame counter */
- u64 txmca; /* 0x4e (Offset 0x150) TX MC frame counter */
- u64 txbca; /* 0x4f (Offset 0x158) TX BC frame counter */
- u64 txcf; /* 0x54 (Offset 0x160) TX control frame counter */
+ u32 dynamic_cfg;
+#define PORT_MF_CFG_OV_TAG_MASK 0x0000ffff
+#define PORT_MF_CFG_OV_TAG_SHIFT 0
+#define PORT_MF_CFG_OV_TAG_DEFAULT PORT_MF_CFG_OV_TAG_MASK
+
+ u32 reserved[1];
+};
+
+struct eth_stats {
+ u64 r64;
+ u64 r127;
+ u64 r255;
+ u64 r511;
+ u64 r1023;
+ u64 r1518;
+ u64 r1522;
+ u64 r2047;
+ u64 r4095;
+ u64 r9216;
+ u64 r16383;
+ u64 rfcs;
+ u64 rxcf;
+ u64 rxpf;
+ u64 rxpp;
+ u64 raln;
+ u64 rfcr;
+ u64 rovr;
+ u64 rjbr;
+ u64 rund;
+ u64 rfrg;
+ u64 t64;
+ u64 t127;
+ u64 t255;
+ u64 t511;
+ u64 t1023;
+ u64 t1518;
+ u64 t2047;
+ u64 t4095;
+ u64 t9216;
+ u64 t16383;
+ u64 txpf;
+ u64 txpp;
+ u64 tlpiec;
+ u64 tncl;
+ u64 rbyte;
+ u64 rxuca;
+ u64 rxmca;
+ u64 rxbca;
+ u64 rxpok;
+ u64 tbyte;
+ u64 txuca;
+ u64 txmca;
+ u64 txbca;
+ u64 txcf;
};
struct brb_stats {
- u64 brb_truncate[8];
- u64 brb_discard[8];
+ u64 brb_truncate[8];
+ u64 brb_discard[8];
};
struct port_stats {
- struct brb_stats brb;
- struct pmm_stats pmm;
+ struct brb_stats brb;
+ struct eth_stats eth;
};
-#define CMT_TEAM0 0
-#define CMT_TEAM1 1
-#define CMT_TEAM_MAX 2
-
struct couple_mode_teaming {
u8 port_cmt[MCP_GLOB_PORT_MAX];
-#define PORT_CMT_IN_TEAM BIT(0)
+#define PORT_CMT_IN_TEAM (1 << 0)
-#define PORT_CMT_PORT_ROLE BIT(1)
-#define PORT_CMT_PORT_INACTIVE (0 << 1)
-#define PORT_CMT_PORT_ACTIVE BIT(1)
+#define PORT_CMT_PORT_ROLE (1 << 1)
+#define PORT_CMT_PORT_INACTIVE (0 << 1)
+#define PORT_CMT_PORT_ACTIVE (1 << 1)
-#define PORT_CMT_TEAM_MASK BIT(2)
-#define PORT_CMT_TEAM0 (0 << 2)
-#define PORT_CMT_TEAM1 BIT(2)
+#define PORT_CMT_TEAM_MASK (1 << 2)
+#define PORT_CMT_TEAM0 (0 << 2)
+#define PORT_CMT_TEAM1 (1 << 2)
};
-/**************************************
-* LLDP and DCBX HSI structures
-**************************************/
-#define LLDP_CHASSIS_ID_STAT_LEN 4
-#define LLDP_PORT_ID_STAT_LEN 4
-#define DCBX_MAX_APP_PROTOCOL 32
-#define MAX_SYSTEM_LLDP_TLV_DATA 32
+#define LLDP_CHASSIS_ID_STAT_LEN 4
+#define LLDP_PORT_ID_STAT_LEN 4
+#define DCBX_MAX_APP_PROTOCOL 32
+#define MAX_SYSTEM_LLDP_TLV_DATA 32
-enum lldp_agent_e {
+enum _lldp_agent {
LLDP_NEAREST_BRIDGE = 0,
LLDP_NEAREST_NON_TPMR_BRIDGE,
LLDP_NEAREST_CUSTOMER_BRIDGE,
@@ -3394,689 +6786,517 @@ enum lldp_agent_e {
struct lldp_config_params_s {
u32 config;
-#define LLDP_CONFIG_TX_INTERVAL_MASK 0x000000ff
-#define LLDP_CONFIG_TX_INTERVAL_SHIFT 0
-#define LLDP_CONFIG_HOLD_MASK 0x00000f00
-#define LLDP_CONFIG_HOLD_SHIFT 8
-#define LLDP_CONFIG_MAX_CREDIT_MASK 0x0000f000
-#define LLDP_CONFIG_MAX_CREDIT_SHIFT 12
-#define LLDP_CONFIG_ENABLE_RX_MASK 0x40000000
-#define LLDP_CONFIG_ENABLE_RX_SHIFT 30
-#define LLDP_CONFIG_ENABLE_TX_MASK 0x80000000
-#define LLDP_CONFIG_ENABLE_TX_SHIFT 31
- u32 local_chassis_id[LLDP_CHASSIS_ID_STAT_LEN];
- u32 local_port_id[LLDP_PORT_ID_STAT_LEN];
+#define LLDP_CONFIG_TX_INTERVAL_MASK 0x000000ff
+#define LLDP_CONFIG_TX_INTERVAL_SHIFT 0
+#define LLDP_CONFIG_HOLD_MASK 0x00000f00
+#define LLDP_CONFIG_HOLD_SHIFT 8
+#define LLDP_CONFIG_MAX_CREDIT_MASK 0x0000f000
+#define LLDP_CONFIG_MAX_CREDIT_SHIFT 12
+#define LLDP_CONFIG_ENABLE_RX_MASK 0x40000000
+#define LLDP_CONFIG_ENABLE_RX_SHIFT 30
+#define LLDP_CONFIG_ENABLE_TX_MASK 0x80000000
+#define LLDP_CONFIG_ENABLE_TX_SHIFT 31
+ u32 local_chassis_id[LLDP_CHASSIS_ID_STAT_LEN];
+ u32 local_port_id[LLDP_PORT_ID_STAT_LEN];
};
struct lldp_status_params_s {
- u32 prefix_seq_num;
- u32 status; /* TBD */
-
- /* Holds remote Chassis ID TLV header, subtype and 9B of payload. */
- u32 peer_chassis_id[LLDP_CHASSIS_ID_STAT_LEN];
-
- /* Holds remote Port ID TLV header, subtype and 9B of payload. */
- u32 peer_port_id[LLDP_PORT_ID_STAT_LEN];
- u32 suffix_seq_num;
+ u32 prefix_seq_num;
+ u32 status;
+ u32 peer_chassis_id[LLDP_CHASSIS_ID_STAT_LEN];
+ u32 peer_port_id[LLDP_PORT_ID_STAT_LEN];
+ u32 suffix_seq_num;
};
struct dcbx_ets_feature {
u32 flags;
-#define DCBX_ETS_ENABLED_MASK 0x00000001
-#define DCBX_ETS_ENABLED_SHIFT 0
-#define DCBX_ETS_WILLING_MASK 0x00000002
-#define DCBX_ETS_WILLING_SHIFT 1
-#define DCBX_ETS_ERROR_MASK 0x00000004
-#define DCBX_ETS_ERROR_SHIFT 2
-#define DCBX_ETS_CBS_MASK 0x00000008
-#define DCBX_ETS_CBS_SHIFT 3
-#define DCBX_ETS_MAX_TCS_MASK 0x000000f0
-#define DCBX_ETS_MAX_TCS_SHIFT 4
- u32 pri_tc_tbl[1];
-#define DCBX_ISCSI_OOO_TC 4
-#define NIG_ETS_ISCSI_OOO_CLIENT_OFFSET (DCBX_ISCSI_OOO_TC + 1)
- u32 tc_bw_tbl[2];
- u32 tc_tsa_tbl[2];
-#define DCBX_ETS_TSA_STRICT 0
-#define DCBX_ETS_TSA_CBS 1
-#define DCBX_ETS_TSA_ETS 2
+#define DCBX_ETS_ENABLED_MASK 0x00000001
+#define DCBX_ETS_ENABLED_SHIFT 0
+#define DCBX_ETS_WILLING_MASK 0x00000002
+#define DCBX_ETS_WILLING_SHIFT 1
+#define DCBX_ETS_ERROR_MASK 0x00000004
+#define DCBX_ETS_ERROR_SHIFT 2
+#define DCBX_ETS_CBS_MASK 0x00000008
+#define DCBX_ETS_CBS_SHIFT 3
+#define DCBX_ETS_MAX_TCS_MASK 0x000000f0
+#define DCBX_ETS_MAX_TCS_SHIFT 4
+#define DCBX_ISCSI_OOO_TC_MASK 0x00000f00
+#define DCBX_ISCSI_OOO_TC_SHIFT 8
+ u32 pri_tc_tbl[1];
+#define DCBX_ISCSI_OOO_TC (4)
+
+#define NIG_ETS_ISCSI_OOO_CLIENT_OFFSET (DCBX_ISCSI_OOO_TC + 1)
+#define DCBX_CEE_STRICT_PRIORITY 0xf
+ u32 tc_bw_tbl[2];
+ u32 tc_tsa_tbl[2];
+#define DCBX_ETS_TSA_STRICT 0
+#define DCBX_ETS_TSA_CBS 1
+#define DCBX_ETS_TSA_ETS 2
};
struct dcbx_app_priority_entry {
u32 entry;
-#define DCBX_APP_PRI_MAP_MASK 0x000000ff
-#define DCBX_APP_PRI_MAP_SHIFT 0
-#define DCBX_APP_PRI_0 0x01
-#define DCBX_APP_PRI_1 0x02
-#define DCBX_APP_PRI_2 0x04
-#define DCBX_APP_PRI_3 0x08
-#define DCBX_APP_PRI_4 0x10
-#define DCBX_APP_PRI_5 0x20
-#define DCBX_APP_PRI_6 0x40
-#define DCBX_APP_PRI_7 0x80
-#define DCBX_APP_SF_MASK 0x00000300
-#define DCBX_APP_SF_SHIFT 8
-#define DCBX_APP_SF_ETHTYPE 0
-#define DCBX_APP_SF_PORT 1
-#define DCBX_APP_PROTOCOL_ID_MASK 0xffff0000
-#define DCBX_APP_PROTOCOL_ID_SHIFT 16
-};
-
-/* FW structure in BE */
+#define DCBX_APP_PRI_MAP_MASK 0x000000ff
+#define DCBX_APP_PRI_MAP_SHIFT 0
+#define DCBX_APP_PRI_0 0x01
+#define DCBX_APP_PRI_1 0x02
+#define DCBX_APP_PRI_2 0x04
+#define DCBX_APP_PRI_3 0x08
+#define DCBX_APP_PRI_4 0x10
+#define DCBX_APP_PRI_5 0x20
+#define DCBX_APP_PRI_6 0x40
+#define DCBX_APP_PRI_7 0x80
+#define DCBX_APP_SF_MASK 0x00000300
+#define DCBX_APP_SF_SHIFT 8
+#define DCBX_APP_SF_ETHTYPE 0
+#define DCBX_APP_SF_PORT 1
+#define DCBX_APP_PROTOCOL_ID_MASK 0xffff0000
+#define DCBX_APP_PROTOCOL_ID_SHIFT 16
+};
+
struct dcbx_app_priority_feature {
u32 flags;
-#define DCBX_APP_ENABLED_MASK 0x00000001
-#define DCBX_APP_ENABLED_SHIFT 0
-#define DCBX_APP_WILLING_MASK 0x00000002
-#define DCBX_APP_WILLING_SHIFT 1
-#define DCBX_APP_ERROR_MASK 0x00000004
-#define DCBX_APP_ERROR_SHIFT 2
-/* Not in use
- * #define DCBX_APP_DEFAULT_PRI_MASK 0x00000f00
- * #define DCBX_APP_DEFAULT_PRI_SHIFT 8
- */
-#define DCBX_APP_MAX_TCS_MASK 0x0000f000
-#define DCBX_APP_MAX_TCS_SHIFT 12
-#define DCBX_APP_NUM_ENTRIES_MASK 0x00ff0000
-#define DCBX_APP_NUM_ENTRIES_SHIFT 16
+#define DCBX_APP_ENABLED_MASK 0x00000001
+#define DCBX_APP_ENABLED_SHIFT 0
+#define DCBX_APP_WILLING_MASK 0x00000002
+#define DCBX_APP_WILLING_SHIFT 1
+#define DCBX_APP_ERROR_MASK 0x00000004
+#define DCBX_APP_ERROR_SHIFT 2
+#define DCBX_APP_MAX_TCS_MASK 0x0000f000
+#define DCBX_APP_MAX_TCS_SHIFT 12
+#define DCBX_APP_NUM_ENTRIES_MASK 0x00ff0000
+#define DCBX_APP_NUM_ENTRIES_SHIFT 16
struct dcbx_app_priority_entry app_pri_tbl[DCBX_MAX_APP_PROTOCOL];
};
-/* FW structure in BE */
struct dcbx_features {
- /* PG feature */
struct dcbx_ets_feature ets;
+ u32 pfc;
+#define DCBX_PFC_PRI_EN_BITMAP_MASK 0x000000ff
+#define DCBX_PFC_PRI_EN_BITMAP_SHIFT 0
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_0 0x01
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_1 0x02
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_2 0x04
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_3 0x08
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_4 0x10
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_5 0x20
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_6 0x40
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_7 0x80
+
+#define DCBX_PFC_FLAGS_MASK 0x0000ff00
+#define DCBX_PFC_FLAGS_SHIFT 8
+#define DCBX_PFC_CAPS_MASK 0x00000f00
+#define DCBX_PFC_CAPS_SHIFT 8
+#define DCBX_PFC_MBC_MASK 0x00004000
+#define DCBX_PFC_MBC_SHIFT 14
+#define DCBX_PFC_WILLING_MASK 0x00008000
+#define DCBX_PFC_WILLING_SHIFT 15
+#define DCBX_PFC_ENABLED_MASK 0x00010000
+#define DCBX_PFC_ENABLED_SHIFT 16
+#define DCBX_PFC_ERROR_MASK 0x00020000
+#define DCBX_PFC_ERROR_SHIFT 17
- /* PFC feature */
- u32 pfc;
-#define DCBX_PFC_PRI_EN_BITMAP_MASK 0x000000ff
-#define DCBX_PFC_PRI_EN_BITMAP_SHIFT 0
-#define DCBX_PFC_PRI_EN_BITMAP_PRI_0 0x01
-#define DCBX_PFC_PRI_EN_BITMAP_PRI_1 0x02
-#define DCBX_PFC_PRI_EN_BITMAP_PRI_2 0x04
-#define DCBX_PFC_PRI_EN_BITMAP_PRI_3 0x08
-#define DCBX_PFC_PRI_EN_BITMAP_PRI_4 0x10
-#define DCBX_PFC_PRI_EN_BITMAP_PRI_5 0x20
-#define DCBX_PFC_PRI_EN_BITMAP_PRI_6 0x40
-#define DCBX_PFC_PRI_EN_BITMAP_PRI_7 0x80
-
-#define DCBX_PFC_FLAGS_MASK 0x0000ff00
-#define DCBX_PFC_FLAGS_SHIFT 8
-#define DCBX_PFC_CAPS_MASK 0x00000f00
-#define DCBX_PFC_CAPS_SHIFT 8
-#define DCBX_PFC_MBC_MASK 0x00004000
-#define DCBX_PFC_MBC_SHIFT 14
-#define DCBX_PFC_WILLING_MASK 0x00008000
-#define DCBX_PFC_WILLING_SHIFT 15
-#define DCBX_PFC_ENABLED_MASK 0x00010000
-#define DCBX_PFC_ENABLED_SHIFT 16
-#define DCBX_PFC_ERROR_MASK 0x00020000
-#define DCBX_PFC_ERROR_SHIFT 17
-
- /* APP feature */
struct dcbx_app_priority_feature app;
};
struct dcbx_local_params {
u32 config;
-#define DCBX_CONFIG_VERSION_MASK 0x00000003
-#define DCBX_CONFIG_VERSION_SHIFT 0
-#define DCBX_CONFIG_VERSION_DISABLED 0
-#define DCBX_CONFIG_VERSION_IEEE 1
-#define DCBX_CONFIG_VERSION_CEE 2
+#define DCBX_CONFIG_VERSION_MASK 0x00000007
+#define DCBX_CONFIG_VERSION_SHIFT 0
+#define DCBX_CONFIG_VERSION_DISABLED 0
+#define DCBX_CONFIG_VERSION_IEEE 1
+#define DCBX_CONFIG_VERSION_CEE 2
+#define DCBX_CONFIG_VERSION_STATIC 4
- u32 flags;
- struct dcbx_features features;
+ u32 flags;
+ struct dcbx_features features;
};
struct dcbx_mib {
- u32 prefix_seq_num;
- u32 flags;
- struct dcbx_features features;
- u32 suffix_seq_num;
+ u32 prefix_seq_num;
+ u32 flags;
+ struct dcbx_features features;
+ u32 suffix_seq_num;
};
struct lldp_system_tlvs_buffer_s {
- u16 valid;
- u16 length;
- u32 data[MAX_SYSTEM_LLDP_TLV_DATA];
+ u16 valid;
+ u16 length;
+ u32 data[MAX_SYSTEM_LLDP_TLV_DATA];
};
-/**************************************/
-/* */
-/* P U B L I C G L O B A L */
-/* */
-/**************************************/
-struct public_global {
- u32 max_path;
-#define MAX_PATH_BIG_BEAR 2
-#define MAX_PATH_K2 1
- u32 max_ports;
-#define MODE_1P 1
-#define MODE_2P 2
-#define MODE_3P 3
-#define MODE_4P 4
- u32 debug_mb_offset;
- u32 phymod_dbg_mb_offset;
- struct couple_mode_teaming cmt;
- s32 internal_temperature;
- u32 mfw_ver;
- u32 running_bundle_id;
+struct dcb_dscp_map {
+ u32 flags;
+#define DCB_DSCP_ENABLE_MASK 0x1
+#define DCB_DSCP_ENABLE_SHIFT 0
+#define DCB_DSCP_ENABLE 1
+ u32 dscp_pri_map[8];
};
-/**************************************/
-/* */
-/* P U B L I C P A T H */
-/* */
-/**************************************/
+struct public_global {
+ u32 max_path;
+ u32 max_ports;
+ u32 debug_mb_offset;
+ u32 phymod_dbg_mb_offset;
+ struct couple_mode_teaming cmt;
+ s32 internal_temperature;
+ u32 mfw_ver;
+ u32 running_bundle_id;
+ s32 external_temperature;
+ u32 mdump_reason;
+};
-/****************************************************************************
-* Shared Memory 2 Region *
-****************************************************************************/
-/* The fw_flr_ack is actually built in the following way: */
-/* 8 bit: PF ack */
-/* 128 bit: VF ack */
-/* 8 bit: ios_dis_ack */
-/* In order to maintain endianity in the mailbox hsi, we want to keep using */
-/* u32. The fw must have the VF right after the PF since this is how it */
-/* access arrays(it expects always the VF to reside after the PF, and that */
-/* makes the calculation much easier for it. ) */
-/* In order to answer both limitations, and keep the struct small, the code */
-/* will abuse the structure defined here to achieve the actual partition */
-/* above */
-/****************************************************************************/
struct fw_flr_mb {
- u32 aggint;
- u32 opgen_addr;
- u32 accum_ack; /* 0..15:PF, 16..207:VF, 256..271:IOV_DIS */
-#define ACCUM_ACK_PF_BASE 0
-#define ACCUM_ACK_PF_SHIFT 0
-
-#define ACCUM_ACK_VF_BASE 8
-#define ACCUM_ACK_VF_SHIFT 3
-
-#define ACCUM_ACK_IOV_DIS_BASE 256
-#define ACCUM_ACK_IOV_DIS_SHIFT 8
+ u32 aggint;
+ u32 opgen_addr;
+ u32 accum_ack;
};
struct public_path {
- struct fw_flr_mb flr_mb;
- u32 mcp_vf_disabled[VF_MAX_STATIC / 32];
-
- u32 process_kill;
-#define PROCESS_KILL_COUNTER_MASK 0x0000ffff
-#define PROCESS_KILL_COUNTER_SHIFT 0
-#define PROCESS_KILL_GLOB_AEU_BIT_MASK 0xffff0000
-#define PROCESS_KILL_GLOB_AEU_BIT_SHIFT 16
+ struct fw_flr_mb flr_mb;
+ u32 mcp_vf_disabled[VF_MAX_STATIC / 32];
+
+ u32 process_kill;
+#define PROCESS_KILL_COUNTER_MASK 0x0000ffff
+#define PROCESS_KILL_COUNTER_SHIFT 0
+#define PROCESS_KILL_GLOB_AEU_BIT_MASK 0xffff0000
+#define PROCESS_KILL_GLOB_AEU_BIT_SHIFT 16
#define GLOBAL_AEU_BIT(aeu_reg_id, aeu_bit) (aeu_reg_id * 32 + aeu_bit)
};
-/**************************************/
-/* */
-/* P U B L I C P O R T */
-/* */
-/**************************************/
-
-/****************************************************************************
-* Driver <-> FW Mailbox *
-****************************************************************************/
-
struct public_port {
- u32 validity_map; /* 0x0 (4*2 = 0x8) */
-
- /* validity bits */
-#define MCP_VALIDITY_PCI_CFG 0x00100000
-#define MCP_VALIDITY_MB 0x00200000
-#define MCP_VALIDITY_DEV_INFO 0x00400000
-#define MCP_VALIDITY_RESERVED 0x00000007
-
- /* One licensing bit should be set */
-#define MCP_VALIDITY_LIC_KEY_IN_EFFECT_MASK 0x00000038
-#define MCP_VALIDITY_LIC_MANUF_KEY_IN_EFFECT 0x00000008
-#define MCP_VALIDITY_LIC_UPGRADE_KEY_IN_EFFECT 0x00000010
-#define MCP_VALIDITY_LIC_NO_KEY_IN_EFFECT 0x00000020
-
- /* Active MFW */
-#define MCP_VALIDITY_ACTIVE_MFW_UNKNOWN 0x00000000
-#define MCP_VALIDITY_ACTIVE_MFW_MASK 0x000001c0
-#define MCP_VALIDITY_ACTIVE_MFW_NCSI 0x00000040
-#define MCP_VALIDITY_ACTIVE_MFW_NONE 0x000001c0
+ u32 validity_map;
u32 link_status;
-#define LINK_STATUS_LINK_UP \
- 0x00000001
-#define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001e
-#define LINK_STATUS_SPEED_AND_DUPLEX_1000THD BIT(1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (2 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_10G (3 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_20G (4 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_40G (5 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_50G (6 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_100G (7 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_25G (8 << 1)
-
-#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020
-
-#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040
-#define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080
-
-#define LINK_STATUS_PFC_ENABLED \
- 0x00000100
-#define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200
-#define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400
-#define LINK_STATUS_LINK_PARTNER_10G_CAPABLE 0x00000800
-#define LINK_STATUS_LINK_PARTNER_20G_CAPABLE 0x00001000
-#define LINK_STATUS_LINK_PARTNER_40G_CAPABLE 0x00002000
-#define LINK_STATUS_LINK_PARTNER_50G_CAPABLE 0x00004000
-#define LINK_STATUS_LINK_PARTNER_100G_CAPABLE 0x00008000
-#define LINK_STATUS_LINK_PARTNER_25G_CAPABLE 0x00010000
-
-#define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000C0000
-#define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0 << 18)
-#define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE BIT(18)
-#define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2 << 18)
-#define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3 << 18)
-
-#define LINK_STATUS_SFP_TX_FAULT \
- 0x00100000
-#define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00200000
-#define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00400000
-
- u32 link_status1;
- u32 ext_phy_fw_version;
- u32 drv_phy_cfg_addr;
-
- u32 port_stx;
-
- u32 stat_nig_timer;
-
- struct port_mf_cfg port_mf_config;
- struct port_stats stats;
-
- u32 media_type;
-#define MEDIA_UNSPECIFIED 0x0
-#define MEDIA_SFPP_10G_FIBER 0x1
-#define MEDIA_XFP_FIBER 0x2
-#define MEDIA_DA_TWINAX 0x3
-#define MEDIA_BASE_T 0x4
-#define MEDIA_SFP_1G_FIBER 0x5
-#define MEDIA_KR 0xf0
-#define MEDIA_NOT_PRESENT 0xff
+#define LINK_STATUS_LINK_UP 0x00000001
+#define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001e
+#define LINK_STATUS_SPEED_AND_DUPLEX_1000THD (1 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (2 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_10G (3 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_20G (4 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_40G (5 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_50G (6 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_100G (7 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_25G (8 << 1)
+
+#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020
+
+#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040
+#define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080
+
+#define LINK_STATUS_PFC_ENABLED 0x00000100
+#define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200
+#define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400
+#define LINK_STATUS_LINK_PARTNER_10G_CAPABLE 0x00000800
+#define LINK_STATUS_LINK_PARTNER_20G_CAPABLE 0x00001000
+#define LINK_STATUS_LINK_PARTNER_40G_CAPABLE 0x00002000
+#define LINK_STATUS_LINK_PARTNER_50G_CAPABLE 0x00004000
+#define LINK_STATUS_LINK_PARTNER_100G_CAPABLE 0x00008000
+#define LINK_STATUS_LINK_PARTNER_25G_CAPABLE 0x00010000
+
+#define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000C0000
+#define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0 << 18)
+#define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE (1 << 18)
+#define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2 << 18)
+#define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3 << 18)
+
+#define LINK_STATUS_SFP_TX_FAULT 0x00100000
+#define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00200000
+#define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00400000
+#define LINK_STATUS_RX_SIGNAL_PRESENT 0x00800000
+#define LINK_STATUS_MAC_LOCAL_FAULT 0x01000000
+#define LINK_STATUS_MAC_REMOTE_FAULT 0x02000000
+#define LINK_STATUS_UNSUPPORTED_SPD_REQ 0x04000000
+
+ u32 link_status1;
+ u32 ext_phy_fw_version;
+ u32 drv_phy_cfg_addr;
+
+ u32 port_stx;
+
+ u32 stat_nig_timer;
+
+ struct port_mf_cfg port_mf_config;
+ struct port_stats stats;
+
+ u32 media_type;
+#define MEDIA_UNSPECIFIED 0x0
+#define MEDIA_SFPP_10G_FIBER 0x1
+#define MEDIA_XFP_FIBER 0x2
+#define MEDIA_DA_TWINAX 0x3
+#define MEDIA_BASE_T 0x4
+#define MEDIA_SFP_1G_FIBER 0x5
+#define MEDIA_MODULE_FIBER 0x6
+#define MEDIA_KR 0xf0
+#define MEDIA_NOT_PRESENT 0xff
u32 lfa_status;
-#define LFA_LINK_FLAP_REASON_OFFSET 0
-#define LFA_LINK_FLAP_REASON_MASK 0x000000ff
-#define LFA_NO_REASON (0 << 0)
-#define LFA_LINK_DOWN BIT(0)
-#define LFA_FORCE_INIT BIT(1)
-#define LFA_LOOPBACK_MISMATCH BIT(2)
-#define LFA_SPEED_MISMATCH BIT(3)
-#define LFA_FLOW_CTRL_MISMATCH BIT(4)
-#define LFA_ADV_SPEED_MISMATCH BIT(5)
-#define LINK_FLAP_AVOIDANCE_COUNT_OFFSET 8
-#define LINK_FLAP_AVOIDANCE_COUNT_MASK 0x0000ff00
-#define LINK_FLAP_COUNT_OFFSET 16
-#define LINK_FLAP_COUNT_MASK 0x00ff0000
-
- u32 link_change_count;
-
- /* LLDP params */
- struct lldp_config_params_s lldp_config_params[
- LLDP_MAX_LLDP_AGENTS];
- struct lldp_status_params_s lldp_status_params[
- LLDP_MAX_LLDP_AGENTS];
- struct lldp_system_tlvs_buffer_s system_lldp_tlvs_buf;
+ u32 link_change_count;
+
+ struct lldp_config_params_s lldp_config_params[LLDP_MAX_LLDP_AGENTS];
+ struct lldp_status_params_s lldp_status_params[LLDP_MAX_LLDP_AGENTS];
+ struct lldp_system_tlvs_buffer_s system_lldp_tlvs_buf;
/* DCBX related MIB */
- struct dcbx_local_params local_admin_dcbx_mib;
- struct dcbx_mib remote_dcbx_mib;
- struct dcbx_mib operational_dcbx_mib;
+ struct dcbx_local_params local_admin_dcbx_mib;
+ struct dcbx_mib remote_dcbx_mib;
+ struct dcbx_mib operational_dcbx_mib;
- u32 fc_npiv_nvram_tbl_addr;
- u32 fc_npiv_nvram_tbl_size;
- u32 transceiver_data;
-#define PMM_TRANSCEIVER_STATE_MASK 0x000000FF
-#define PMM_TRANSCEIVER_STATE_SHIFT 0x00000000
-#define PMM_TRANSCEIVER_STATE_PRESENT 0x00000001
-};
+ u32 reserved[2];
+ u32 transceiver_data;
+#define ETH_TRANSCEIVER_STATE_MASK 0x000000FF
+#define ETH_TRANSCEIVER_STATE_SHIFT 0x00000000
+#define ETH_TRANSCEIVER_STATE_UNPLUGGED 0x00000000
+#define ETH_TRANSCEIVER_STATE_PRESENT 0x00000001
+#define ETH_TRANSCEIVER_STATE_VALID 0x00000003
+#define ETH_TRANSCEIVER_STATE_UPDATING 0x00000008
-/**************************************/
-/* */
-/* P U B L I C F U N C */
-/* */
-/**************************************/
+ u32 wol_info;
+ u32 wol_pkt_len;
+ u32 wol_pkt_details;
+ struct dcb_dscp_map dcb_dscp_map;
+};
struct public_func {
- u32 iscsi_boot_signature;
- u32 iscsi_boot_block_offset;
-
- u32 mtu_size;
- u32 c2s_pcp_map_lower;
- u32 c2s_pcp_map_upper;
- u32 c2s_pcp_map_default;
- u32 reserved[4];
-
- u32 config;
-
- /* E/R/I/D */
- /* function 0 of each port cannot be hidden */
-#define FUNC_MF_CFG_FUNC_HIDE 0x00000001
-#define FUNC_MF_CFG_PAUSE_ON_HOST_RING 0x00000002
-#define FUNC_MF_CFG_PAUSE_ON_HOST_RING_SHIFT 0x00000001
-
-#define FUNC_MF_CFG_PROTOCOL_MASK 0x000000f0
-#define FUNC_MF_CFG_PROTOCOL_SHIFT 4
-#define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000000
+ u32 reserved0[2];
+
+ u32 mtu_size;
+
+ u32 reserved[7];
+
+ u32 config;
+#define FUNC_MF_CFG_FUNC_HIDE 0x00000001
+#define FUNC_MF_CFG_PAUSE_ON_HOST_RING 0x00000002
+#define FUNC_MF_CFG_PAUSE_ON_HOST_RING_SHIFT 0x00000001
+
+#define FUNC_MF_CFG_PROTOCOL_MASK 0x000000f0
+#define FUNC_MF_CFG_PROTOCOL_SHIFT 4
+#define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000000
#define FUNC_MF_CFG_PROTOCOL_ISCSI 0x00000010
-#define FUNC_MF_CFG_PROTOCOL_FCOE 0x00000020
#define FUNC_MF_CFG_PROTOCOL_ROCE 0x00000030
-#define FUNC_MF_CFG_PROTOCOL_MAX 0x00000030
+#define FUNC_MF_CFG_PROTOCOL_MAX 0x00000030
- /* MINBW, MAXBW */
- /* value range - 0..100, increments in 1 % */
-#define FUNC_MF_CFG_MIN_BW_MASK 0x0000ff00
-#define FUNC_MF_CFG_MIN_BW_SHIFT 8
-#define FUNC_MF_CFG_MIN_BW_DEFAULT 0x00000000
-#define FUNC_MF_CFG_MAX_BW_MASK 0x00ff0000
-#define FUNC_MF_CFG_MAX_BW_SHIFT 16
-#define FUNC_MF_CFG_MAX_BW_DEFAULT 0x00640000
+#define FUNC_MF_CFG_MIN_BW_MASK 0x0000ff00
+#define FUNC_MF_CFG_MIN_BW_SHIFT 8
+#define FUNC_MF_CFG_MIN_BW_DEFAULT 0x00000000
+#define FUNC_MF_CFG_MAX_BW_MASK 0x00ff0000
+#define FUNC_MF_CFG_MAX_BW_SHIFT 16
+#define FUNC_MF_CFG_MAX_BW_DEFAULT 0x00640000
- u32 status;
-#define FUNC_STATUS_VLINK_DOWN 0x00000001
+ u32 status;
+#define FUNC_STATUS_VLINK_DOWN 0x00000001
- u32 mac_upper; /* MAC */
-#define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff
-#define FUNC_MF_CFG_UPPERMAC_SHIFT 0
-#define FUNC_MF_CFG_UPPERMAC_DEFAULT FUNC_MF_CFG_UPPERMAC_MASK
- u32 mac_lower;
-#define FUNC_MF_CFG_LOWERMAC_DEFAULT 0xffffffff
+ u32 mac_upper;
+#define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff
+#define FUNC_MF_CFG_UPPERMAC_SHIFT 0
+#define FUNC_MF_CFG_UPPERMAC_DEFAULT FUNC_MF_CFG_UPPERMAC_MASK
+ u32 mac_lower;
+#define FUNC_MF_CFG_LOWERMAC_DEFAULT 0xffffffff
- u32 fcoe_wwn_port_name_upper;
- u32 fcoe_wwn_port_name_lower;
+ u32 fcoe_wwn_port_name_upper;
+ u32 fcoe_wwn_port_name_lower;
- u32 fcoe_wwn_node_name_upper;
- u32 fcoe_wwn_node_name_lower;
+ u32 fcoe_wwn_node_name_upper;
+ u32 fcoe_wwn_node_name_lower;
- u32 ovlan_stag; /* tags */
-#define FUNC_MF_CFG_OV_STAG_MASK 0x0000ffff
-#define FUNC_MF_CFG_OV_STAG_SHIFT 0
-#define FUNC_MF_CFG_OV_STAG_DEFAULT FUNC_MF_CFG_OV_STAG_MASK
+ u32 ovlan_stag;
+#define FUNC_MF_CFG_OV_STAG_MASK 0x0000ffff
+#define FUNC_MF_CFG_OV_STAG_SHIFT 0
+#define FUNC_MF_CFG_OV_STAG_DEFAULT FUNC_MF_CFG_OV_STAG_MASK
- u32 pf_allocation; /* vf per pf */
+ u32 pf_allocation;
- u32 preserve_data; /* Will be used bt CCM */
+ u32 preserve_data;
- u32 driver_last_activity_ts;
+ u32 driver_last_activity_ts;
- u32 drv_ack_vf_disabled[VF_MAX_STATIC / 32]; /* 0x0044 */
+ u32 drv_ack_vf_disabled[VF_MAX_STATIC / 32];
- u32 drv_id;
-#define DRV_ID_PDA_COMP_VER_MASK 0x0000ffff
-#define DRV_ID_PDA_COMP_VER_SHIFT 0
+ u32 drv_id;
+#define DRV_ID_PDA_COMP_VER_MASK 0x0000ffff
+#define DRV_ID_PDA_COMP_VER_SHIFT 0
-#define DRV_ID_MCP_HSI_VER_MASK 0x00ff0000
-#define DRV_ID_MCP_HSI_VER_SHIFT 16
-#define DRV_ID_MCP_HSI_VER_CURRENT BIT(DRV_ID_MCP_HSI_VER_SHIFT)
+#define DRV_ID_MCP_HSI_VER_MASK 0x00ff0000
+#define DRV_ID_MCP_HSI_VER_SHIFT 16
+#define DRV_ID_MCP_HSI_VER_CURRENT (1 << DRV_ID_MCP_HSI_VER_SHIFT)
-#define DRV_ID_DRV_TYPE_MASK 0x7f000000
-#define DRV_ID_DRV_TYPE_SHIFT 24
-#define DRV_ID_DRV_TYPE_UNKNOWN (0 << DRV_ID_DRV_TYPE_SHIFT)
-#define DRV_ID_DRV_TYPE_LINUX (1 << DRV_ID_DRV_TYPE_SHIFT)
-#define DRV_ID_DRV_TYPE_WINDOWS (2 << DRV_ID_DRV_TYPE_SHIFT)
-#define DRV_ID_DRV_TYPE_DIAG (3 << DRV_ID_DRV_TYPE_SHIFT)
-#define DRV_ID_DRV_TYPE_PREBOOT (4 << DRV_ID_DRV_TYPE_SHIFT)
-#define DRV_ID_DRV_TYPE_SOLARIS (5 << DRV_ID_DRV_TYPE_SHIFT)
-#define DRV_ID_DRV_TYPE_VMWARE (6 << DRV_ID_DRV_TYPE_SHIFT)
-#define DRV_ID_DRV_TYPE_FREEBSD (7 << DRV_ID_DRV_TYPE_SHIFT)
-#define DRV_ID_DRV_TYPE_AIX (8 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_MASK 0x7f000000
+#define DRV_ID_DRV_TYPE_SHIFT 24
+#define DRV_ID_DRV_TYPE_UNKNOWN (0 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_LINUX (1 << DRV_ID_DRV_TYPE_SHIFT)
-#define DRV_ID_DRV_INIT_HW_MASK 0x80000000
-#define DRV_ID_DRV_INIT_HW_SHIFT 31
-#define DRV_ID_DRV_INIT_HW_FLAG BIT(DRV_ID_DRV_INIT_HW_SHIFT)
+#define DRV_ID_DRV_INIT_HW_MASK 0x80000000
+#define DRV_ID_DRV_INIT_HW_SHIFT 31
+#define DRV_ID_DRV_INIT_HW_FLAG (1 << DRV_ID_DRV_INIT_HW_SHIFT)
};
-/**************************************/
-/* */
-/* P U B L I C M B */
-/* */
-/**************************************/
-/* This is the only section that the driver can write to, and each */
-/* Basically each driver request to set feature parameters,
- * will be done using a different command, which will be linked
- * to a specific data structure from the union below.
- * For huge strucuture, the common blank structure should be used.
- */
-
struct mcp_mac {
- u32 mac_upper; /* Upper 16 bits are always zeroes */
- u32 mac_lower;
+ u32 mac_upper;
+ u32 mac_lower;
};
struct mcp_val64 {
- u32 lo;
- u32 hi;
+ u32 lo;
+ u32 hi;
};
struct mcp_file_att {
- u32 nvm_start_addr;
- u32 len;
+ u32 nvm_start_addr;
+ u32 len;
+};
+
+struct bist_nvm_image_att {
+ u32 return_code;
+ u32 image_type;
+ u32 nvm_start_addr;
+ u32 len;
};
#define MCP_DRV_VER_STR_SIZE 16
#define MCP_DRV_VER_STR_SIZE_DWORD (MCP_DRV_VER_STR_SIZE / sizeof(u32))
#define MCP_DRV_NVM_BUF_LEN 32
struct drv_version_stc {
- u32 version;
- u8 name[MCP_DRV_VER_STR_SIZE - 4];
+ u32 version;
+ u8 name[MCP_DRV_VER_STR_SIZE - 4];
+};
+
+struct lan_stats_stc {
+ u64 ucast_rx_pkts;
+ u64 ucast_tx_pkts;
+ u32 fcs_err;
+ u32 rserved;
+};
+
+struct ocbb_data_stc {
+ u32 ocbb_host_addr;
+ u32 ocsd_host_addr;
+ u32 ocsd_req_update_interval;
+};
+
+#define MAX_NUM_OF_SENSORS 7
+struct temperature_status_stc {
+ u32 num_of_sensors;
+ u32 sensor[MAX_NUM_OF_SENSORS];
+};
+
+/* crash dump configuration header */
+struct mdump_config_stc {
+ u32 version;
+ u32 config;
+ u32 epoc;
+ u32 num_of_logs;
+ u32 valid_logs;
};
union drv_union_data {
- u32 ver_str[MCP_DRV_VER_STR_SIZE_DWORD];
- struct mcp_mac wol_mac;
+ u32 ver_str[MCP_DRV_VER_STR_SIZE_DWORD];
+ struct mcp_mac wol_mac;
+
+ struct eth_phy_cfg drv_phy_cfg;
- struct pmm_phy_cfg drv_phy_cfg;
+ struct mcp_val64 val64;
- struct mcp_val64 val64; /* For PHY / AVS commands */
+ u8 raw_data[MCP_DRV_NVM_BUF_LEN];
- u8 raw_data[MCP_DRV_NVM_BUF_LEN];
+ struct mcp_file_att file_att;
- struct mcp_file_att file_att;
+ u32 ack_vf_disabled[VF_MAX_STATIC / 32];
- u32 ack_vf_disabled[VF_MAX_STATIC / 32];
+ struct drv_version_stc drv_version;
- struct drv_version_stc drv_version;
+ struct lan_stats_stc lan_stats;
+ u64 reserved_stats[11];
+ struct ocbb_data_stc ocbb_info;
+ struct temperature_status_stc temp_info;
+ struct bist_nvm_image_att nvm_image_att;
+ struct mdump_config_stc mdump_config;
};
struct public_drv_mb {
u32 drv_mb_header;
-#define DRV_MSG_CODE_MASK 0xffff0000
-#define DRV_MSG_CODE_LOAD_REQ 0x10000000
-#define DRV_MSG_CODE_LOAD_DONE 0x11000000
-#define DRV_MSG_CODE_INIT_HW 0x12000000
-#define DRV_MSG_CODE_UNLOAD_REQ 0x20000000
-#define DRV_MSG_CODE_UNLOAD_DONE 0x21000000
-#define DRV_MSG_CODE_INIT_PHY 0x22000000
- /* Params - FORCE - Reinitialize the link regardless of LFA */
- /* - DONT_CARE - Don't flap the link if up */
-#define DRV_MSG_CODE_LINK_RESET 0x23000000
-
-#define DRV_MSG_CODE_SET_LLDP 0x24000000
-#define DRV_MSG_CODE_SET_DCBX 0x25000000
+#define DRV_MSG_CODE_MASK 0xffff0000
+#define DRV_MSG_CODE_LOAD_REQ 0x10000000
+#define DRV_MSG_CODE_LOAD_DONE 0x11000000
+#define DRV_MSG_CODE_INIT_HW 0x12000000
+#define DRV_MSG_CODE_UNLOAD_REQ 0x20000000
+#define DRV_MSG_CODE_UNLOAD_DONE 0x21000000
+#define DRV_MSG_CODE_INIT_PHY 0x22000000
+#define DRV_MSG_CODE_LINK_RESET 0x23000000
+#define DRV_MSG_CODE_SET_DCBX 0x25000000
+
#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000
-#define DRV_MSG_CODE_NIG_DRAIN 0x30000000
-
-#define DRV_MSG_CODE_INITIATE_FLR 0x02000000
-#define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000
-#define DRV_MSG_CODE_CFG_VF_MSIX 0xc0010000
-#define DRV_MSG_CODE_NVM_PUT_FILE_BEGIN 0x00010000
-#define DRV_MSG_CODE_NVM_PUT_FILE_DATA 0x00020000
-#define DRV_MSG_CODE_NVM_GET_FILE_ATT 0x00030000
-#define DRV_MSG_CODE_NVM_READ_NVRAM 0x00050000
-#define DRV_MSG_CODE_NVM_WRITE_NVRAM 0x00060000
-#define DRV_MSG_CODE_NVM_DEL_FILE 0x00080000
-#define DRV_MSG_CODE_MCP_RESET 0x00090000
-#define DRV_MSG_CODE_SET_SECURE_MODE 0x000a0000
-#define DRV_MSG_CODE_PHY_RAW_READ 0x000b0000
-#define DRV_MSG_CODE_PHY_RAW_WRITE 0x000c0000
-#define DRV_MSG_CODE_PHY_CORE_READ 0x000d0000
-#define DRV_MSG_CODE_PHY_CORE_WRITE 0x000e0000
-#define DRV_MSG_CODE_SET_VERSION 0x000f0000
-
-#define DRV_MSG_CODE_BIST_TEST 0x001e0000
-#define DRV_MSG_CODE_SET_LED_MODE 0x00200000
-
-#define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff
+#define DRV_MSG_CODE_NIG_DRAIN 0x30000000
+#define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000
+#define DRV_MSG_CODE_CFG_VF_MSIX 0xc0010000
+#define DRV_MSG_CODE_MCP_RESET 0x00090000
+#define DRV_MSG_CODE_SET_VERSION 0x000f0000
+
+#define DRV_MSG_CODE_BIST_TEST 0x001e0000
+#define DRV_MSG_CODE_SET_LED_MODE 0x00200000
+
+#define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff
u32 drv_mb_param;
+#define DRV_MB_PARAM_UNLOAD_WOL_MCP 0x00000001
+#define DRV_MB_PARAM_DCBX_NOTIFY_MASK 0x000000FF
+#define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT 3
+#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT 0
+#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK 0x000000FF
+#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT 8
+#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK 0x0000FF00
+#define DRV_MB_PARAM_LLDP_SEND_MASK 0x00000001
+#define DRV_MB_PARAM_LLDP_SEND_SHIFT 0
+
+
+#define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0
+#define DRV_MB_PARAM_SET_LED_MODE_ON 0x1
+#define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2
+
+#define DRV_MB_PARAM_BIST_REGISTER_TEST 1
+#define DRV_MB_PARAM_BIST_CLOCK_TEST 2
+
+#define DRV_MB_PARAM_BIST_RC_UNKNOWN 0
+#define DRV_MB_PARAM_BIST_RC_PASSED 1
+#define DRV_MB_PARAM_BIST_RC_FAILED 2
+#define DRV_MB_PARAM_BIST_RC_INVALID_PARAMETER 3
- /* UNLOAD_REQ params */
-#define DRV_MB_PARAM_UNLOAD_WOL_UNKNOWN 0x00000000
-#define DRV_MB_PARAM_UNLOAD_WOL_MCP 0x00000001
-#define DRV_MB_PARAM_UNLOAD_WOL_DISABLED 0x00000002
-#define DRV_MB_PARAM_UNLOAD_WOL_ENABLED 0x00000003
-
- /* UNLOAD_DONE_params */
-#define DRV_MB_PARAM_UNLOAD_NON_D3_POWER 0x00000001
-
- /* INIT_PHY params */
-#define DRV_MB_PARAM_INIT_PHY_FORCE 0x00000001
-#define DRV_MB_PARAM_INIT_PHY_DONT_CARE 0x00000002
-
- /* LLDP / DCBX params*/
-#define DRV_MB_PARAM_LLDP_SEND_MASK 0x00000001
-#define DRV_MB_PARAM_LLDP_SEND_SHIFT 0
-#define DRV_MB_PARAM_LLDP_AGENT_MASK 0x00000006
-#define DRV_MB_PARAM_LLDP_AGENT_SHIFT 1
-#define DRV_MB_PARAM_DCBX_NOTIFY_MASK 0x00000008
-#define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT 3
-
-#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_MASK 0x000000FF
-#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_SHIFT 0
-
-#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MFW 0x1
-#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_IMAGE 0x2
-
-#define DRV_MB_PARAM_NVM_OFFSET_SHIFT 0
-#define DRV_MB_PARAM_NVM_OFFSET_MASK 0x00FFFFFF
-#define DRV_MB_PARAM_NVM_LEN_SHIFT 24
-#define DRV_MB_PARAM_NVM_LEN_MASK 0xFF000000
-
-#define DRV_MB_PARAM_PHY_ADDR_SHIFT 0
-#define DRV_MB_PARAM_PHY_ADDR_MASK 0x1FF0FFFF
-#define DRV_MB_PARAM_PHY_LANE_SHIFT 16
-#define DRV_MB_PARAM_PHY_LANE_MASK 0x000F0000
-#define DRV_MB_PARAM_PHY_SELECT_PORT_SHIFT 29
-#define DRV_MB_PARAM_PHY_SELECT_PORT_MASK 0x20000000
-#define DRV_MB_PARAM_PHY_PORT_SHIFT 30
-#define DRV_MB_PARAM_PHY_PORT_MASK 0xc0000000
-
-/* configure vf MSIX params*/
-#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT 0
-#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK 0x000000FF
-#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT 8
-#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK 0x0000FF00
-
-#define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0
-#define DRV_MB_PARAM_SET_LED_MODE_ON 0x1
-#define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2
-
-#define DRV_MB_PARAM_BIST_UNKNOWN_TEST 0
-#define DRV_MB_PARAM_BIST_REGISTER_TEST 1
-#define DRV_MB_PARAM_BIST_CLOCK_TEST 2
-
-#define DRV_MB_PARAM_BIST_RC_UNKNOWN 0
-#define DRV_MB_PARAM_BIST_RC_PASSED 1
-#define DRV_MB_PARAM_BIST_RC_FAILED 2
-#define DRV_MB_PARAM_BIST_RC_INVALID_PARAMETER 3
-
-#define DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT 0
-#define DRV_MB_PARAM_BIST_TEST_INDEX_MASK 0x000000FF
+#define DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT 0
+#define DRV_MB_PARAM_BIST_TEST_INDEX_MASK 0x000000FF
u32 fw_mb_header;
-#define FW_MSG_CODE_MASK 0xffff0000
-#define FW_MSG_CODE_DRV_LOAD_ENGINE 0x10100000
-#define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000
-#define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000
-#define FW_MSG_CODE_DRV_LOAD_REFUSED_PDA 0x10200000
-#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI 0x10210000
-#define FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG 0x10220000
-#define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000
-#define FW_MSG_CODE_DRV_UNLOAD_ENGINE 0x20110000
-#define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20120000
-#define FW_MSG_CODE_DRV_UNLOAD_FUNCTION 0x20130000
-#define FW_MSG_CODE_DRV_UNLOAD_DONE 0x21100000
-#define FW_MSG_CODE_INIT_PHY_DONE 0x21200000
-#define FW_MSG_CODE_INIT_PHY_ERR_INVALID_ARGS 0x21300000
-#define FW_MSG_CODE_LINK_RESET_DONE 0x23000000
-#define FW_MSG_CODE_SET_LLDP_DONE 0x24000000
-#define FW_MSG_CODE_SET_LLDP_UNSUPPORTED_AGENT 0x24010000
-#define FW_MSG_CODE_SET_DCBX_DONE 0x25000000
-#define FW_MSG_CODE_NIG_DRAIN_DONE 0x30000000
-#define FW_MSG_CODE_VF_DISABLED_DONE 0xb0000000
-#define FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE 0xb0010000
-#define FW_MSG_CODE_FLR_ACK 0x02000000
-#define FW_MSG_CODE_FLR_NACK 0x02100000
-
-#define FW_MSG_CODE_NVM_OK 0x00010000
-#define FW_MSG_CODE_NVM_INVALID_MODE 0x00020000
-#define FW_MSG_CODE_NVM_PREV_CMD_WAS_NOT_FINISHED 0x00030000
-#define FW_MSG_CODE_NVM_FAILED_TO_ALLOCATE_PAGE 0x00040000
-#define FW_MSG_CODE_NVM_INVALID_DIR_FOUND 0x00050000
-#define FW_MSG_CODE_NVM_PAGE_NOT_FOUND 0x00060000
-#define FW_MSG_CODE_NVM_FAILED_PARSING_BNDLE_HEADER 0x00070000
-#define FW_MSG_CODE_NVM_FAILED_PARSING_IMAGE_HEADER 0x00080000
-#define FW_MSG_CODE_NVM_PARSING_OUT_OF_SYNC 0x00090000
-#define FW_MSG_CODE_NVM_FAILED_UPDATING_DIR 0x000a0000
-#define FW_MSG_CODE_NVM_FAILED_TO_FREE_PAGE 0x000b0000
-#define FW_MSG_CODE_NVM_FILE_NOT_FOUND 0x000c0000
-#define FW_MSG_CODE_NVM_OPERATION_FAILED 0x000d0000
-#define FW_MSG_CODE_NVM_FAILED_UNALIGNED 0x000e0000
-#define FW_MSG_CODE_NVM_BAD_OFFSET 0x000f0000
-#define FW_MSG_CODE_NVM_BAD_SIGNATURE 0x00100000
-#define FW_MSG_CODE_NVM_FILE_READ_ONLY 0x00200000
-#define FW_MSG_CODE_NVM_UNKNOWN_FILE 0x00300000
-#define FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK 0x00400000
-#define FW_MSG_CODE_MCP_RESET_REJECT 0x00600000
-#define FW_MSG_CODE_PHY_OK 0x00110000
-#define FW_MSG_CODE_PHY_ERROR 0x00120000
-#define FW_MSG_CODE_SET_SECURE_MODE_ERROR 0x00130000
-#define FW_MSG_CODE_SET_SECURE_MODE_OK 0x00140000
-#define FW_MSG_MODE_PHY_PRIVILEGE_ERROR 0x00150000
-#define FW_MSG_CODE_OK 0x00160000
-
-#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff
-
- u32 fw_mb_param;
-
- u32 drv_pulse_mb;
-#define DRV_PULSE_SEQ_MASK 0x00007fff
-#define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000
-#define DRV_PULSE_ALWAYS_ALIVE 0x00008000
+#define FW_MSG_CODE_MASK 0xffff0000
+#define FW_MSG_CODE_DRV_LOAD_ENGINE 0x10100000
+#define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000
+#define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_PDA 0x10200000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI 0x10210000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG 0x10220000
+#define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000
+#define FW_MSG_CODE_DRV_UNLOAD_ENGINE 0x20110000
+#define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20120000
+#define FW_MSG_CODE_DRV_UNLOAD_FUNCTION 0x20130000
+#define FW_MSG_CODE_DRV_UNLOAD_DONE 0x21100000
+#define FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE 0xb0010000
+#define FW_MSG_CODE_OK 0x00160000
+
+#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff
+
+ u32 fw_mb_param;
+
+ u32 drv_pulse_mb;
+#define DRV_PULSE_SEQ_MASK 0x00007fff
+#define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000
+#define DRV_PULSE_ALWAYS_ALIVE 0x00008000
+
u32 mcp_pulse_mb;
-#define MCP_PULSE_SEQ_MASK 0x00007fff
-#define MCP_PULSE_ALWAYS_ALIVE 0x00008000
-#define MCP_EVENT_MASK 0xffff0000
-#define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000
+#define MCP_PULSE_SEQ_MASK 0x00007fff
+#define MCP_PULSE_ALWAYS_ALIVE 0x00008000
+#define MCP_EVENT_MASK 0xffff0000
+#define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000
union drv_union_data union_data;
};
-/* MFW - DRV MB */
-/**********************************************************************
-* Description
-* Incremental Aggregative
-* 8-bit MFW counter per message
-* 8-bit ack-counter per message
-* Capabilities
-* Provides up to 256 aggregative message per type
-* Provides 4 message types in dword
-* Message type pointers to byte offset
-* Backward Compatibility by using sizeof for the counters.
-* No lock requires for 32bit messages
-* Limitations:
-* In case of messages greater than 32bit, a dedicated mechanism(e.g lock)
-* is required to prevent data corruption.
-**********************************************************************/
enum MFW_DRV_MSG_TYPE {
MFW_DRV_MSG_LINK_CHANGE,
MFW_DRV_MSG_FLR_FW_ACK_FAILED,
@@ -4084,37 +7304,33 @@ enum MFW_DRV_MSG_TYPE {
MFW_DRV_MSG_LLDP_DATA_UPDATED,
MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED,
MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED,
- MFW_DRV_MSG_ERROR_RECOVERY,
+ MFW_DRV_MSG_RESERVED4,
MFW_DRV_MSG_BW_UPDATE,
- MFW_DRV_MSG_S_TAG_UPDATE,
- MFW_DRV_MSG_GET_LAN_STATS,
- MFW_DRV_MSG_GET_FCOE_STATS,
- MFW_DRV_MSG_GET_ISCSI_STATS,
- MFW_DRV_MSG_GET_RDMA_STATS,
- MFW_DRV_MSG_FAILURE_DETECTED,
+ MFW_DRV_MSG_BW_UPDATE5,
+ MFW_DRV_MSG_BW_UPDATE6,
+ MFW_DRV_MSG_BW_UPDATE7,
+ MFW_DRV_MSG_BW_UPDATE8,
+ MFW_DRV_MSG_BW_UPDATE9,
+ MFW_DRV_MSG_BW_UPDATE10,
MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE,
+ MFW_DRV_MSG_BW_UPDATE11,
MFW_DRV_MSG_MAX
};
-#define MFW_DRV_MSG_MAX_DWORDS(msgs) (((msgs - 1) >> 2) + 1)
-#define MFW_DRV_MSG_DWORD(msg_id) (msg_id >> 2)
-#define MFW_DRV_MSG_OFFSET(msg_id) ((msg_id & 0x3) << 3)
-#define MFW_DRV_MSG_MASK(msg_id) (0xff << MFW_DRV_MSG_OFFSET(msg_id))
+#define MFW_DRV_MSG_MAX_DWORDS(msgs) (((msgs - 1) >> 2) + 1)
+#define MFW_DRV_MSG_DWORD(msg_id) (msg_id >> 2)
+#define MFW_DRV_MSG_OFFSET(msg_id) ((msg_id & 0x3) << 3)
+#define MFW_DRV_MSG_MASK(msg_id) (0xff << MFW_DRV_MSG_OFFSET(msg_id))
struct public_mfw_mb {
- u32 sup_msgs;
- u32 msg[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)];
- u32 ack[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)];
+ u32 sup_msgs;
+ u32 msg[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)];
+ u32 ack[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)];
};
-/**************************************/
-/* */
-/* P U B L I C D A T A */
-/* */
-/**************************************/
enum public_sections {
- PUBLIC_DRV_MB, /* Points to the first drv_mb of path0 */
- PUBLIC_MFW_MB, /* Points to the first mfw_mb of path0 */
+ PUBLIC_DRV_MB,
+ PUBLIC_MFW_MB,
PUBLIC_GLOBAL,
PUBLIC_PATH,
PUBLIC_PORT,
@@ -4122,1080 +7338,179 @@ enum public_sections {
PUBLIC_MAX_SECTIONS
};
-struct drv_ver_info_stc {
- u32 ver;
- u8 name[32];
-};
-
struct mcp_public_data {
- /* The sections fields is an array */
- u32 num_sections;
- offsize_t sections[PUBLIC_MAX_SECTIONS];
- struct public_drv_mb drv_mb[MCP_GLOB_FUNC_MAX];
- struct public_mfw_mb mfw_mb[MCP_GLOB_FUNC_MAX];
- struct public_global global;
- struct public_path path[MCP_GLOB_PATH_MAX];
- struct public_port port[MCP_GLOB_PORT_MAX];
- struct public_func func[MCP_GLOB_FUNC_MAX];
- struct drv_ver_info_stc drv_info;
+ u32 num_sections;
+ u32 sections[PUBLIC_MAX_SECTIONS];
+ struct public_drv_mb drv_mb[MCP_GLOB_FUNC_MAX];
+ struct public_mfw_mb mfw_mb[MCP_GLOB_FUNC_MAX];
+ struct public_global global;
+ struct public_path path[MCP_GLOB_PATH_MAX];
+ struct public_port port[MCP_GLOB_PORT_MAX];
+ struct public_func func[MCP_GLOB_FUNC_MAX];
};
struct nvm_cfg_mac_address {
- u32 mac_addr_hi;
-#define NVM_CFG_MAC_ADDRESS_HI_MASK 0x0000FFFF
-#define NVM_CFG_MAC_ADDRESS_HI_OFFSET 0
-
- u32 mac_addr_lo;
+ u32 mac_addr_hi;
+#define NVM_CFG_MAC_ADDRESS_HI_MASK 0x0000FFFF
+#define NVM_CFG_MAC_ADDRESS_HI_OFFSET 0
+ u32 mac_addr_lo;
};
-/******************************************
-* nvm_cfg1 structs
-******************************************/
-
struct nvm_cfg1_glob {
- u32 generic_cont0; /* 0x0 */
-#define NVM_CFG1_GLOB_BOARD_SWAP_MASK 0x0000000F
-#define NVM_CFG1_GLOB_BOARD_SWAP_OFFSET 0
-#define NVM_CFG1_GLOB_BOARD_SWAP_NONE 0x0
-#define NVM_CFG1_GLOB_BOARD_SWAP_PATH 0x1
-#define NVM_CFG1_GLOB_BOARD_SWAP_PORT 0x2
-#define NVM_CFG1_GLOB_BOARD_SWAP_BOTH 0x3
-#define NVM_CFG1_GLOB_MF_MODE_MASK 0x00000FF0
-#define NVM_CFG1_GLOB_MF_MODE_OFFSET 4
-#define NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED 0x0
-#define NVM_CFG1_GLOB_MF_MODE_DEFAULT 0x1
-#define NVM_CFG1_GLOB_MF_MODE_SPIO4 0x2
-#define NVM_CFG1_GLOB_MF_MODE_NPAR1_0 0x3
-#define NVM_CFG1_GLOB_MF_MODE_NPAR1_5 0x4
-#define NVM_CFG1_GLOB_MF_MODE_NPAR2_0 0x5
-#define NVM_CFG1_GLOB_MF_MODE_BD 0x6
-#define NVM_CFG1_GLOB_MF_MODE_UFP 0x7
-#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_MASK 0x00001000
-#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_OFFSET 12
-#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_DISABLED 0x0
-#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_ENABLED 0x1
-#define NVM_CFG1_GLOB_AVS_MARGIN_LOW_MASK 0x001FE000
-#define NVM_CFG1_GLOB_AVS_MARGIN_LOW_OFFSET 13
-#define NVM_CFG1_GLOB_AVS_MARGIN_HIGH_MASK 0x1FE00000
-#define NVM_CFG1_GLOB_AVS_MARGIN_HIGH_OFFSET 21
-#define NVM_CFG1_GLOB_ENABLE_SRIOV_MASK 0x20000000
-#define NVM_CFG1_GLOB_ENABLE_SRIOV_OFFSET 29
-#define NVM_CFG1_GLOB_ENABLE_SRIOV_DISABLED 0x0
-#define NVM_CFG1_GLOB_ENABLE_SRIOV_ENABLED 0x1
-#define NVM_CFG1_GLOB_ENABLE_ATC_MASK 0x40000000
-#define NVM_CFG1_GLOB_ENABLE_ATC_OFFSET 30
-#define NVM_CFG1_GLOB_ENABLE_ATC_DISABLED 0x0
-#define NVM_CFG1_GLOB_ENABLE_ATC_ENABLED 0x1
-#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_MASK 0x80000000
-#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_OFFSET 31
-#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_DISABLED 0x0
-#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_ENABLED 0x1
-
- u32 engineering_change[3]; /* 0x4 */
-
- u32 manufacturing_id; /* 0x10 */
-
- u32 serial_number[4]; /* 0x14 */
-
- u32 pcie_cfg; /* 0x24 */
-#define NVM_CFG1_GLOB_PCI_GEN_MASK 0x00000003
-#define NVM_CFG1_GLOB_PCI_GEN_OFFSET 0
-#define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN1 0x0
-#define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN2 0x1
-#define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN3 0x2
-#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_MASK 0x00000004
-#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_OFFSET 2
-#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_DISABLED 0x0
-#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_ENABLED 0x1
-#define NVM_CFG1_GLOB_ASPM_SUPPORT_MASK 0x00000018
-#define NVM_CFG1_GLOB_ASPM_SUPPORT_OFFSET 3
-#define NVM_CFG1_GLOB_ASPM_SUPPORT_L0S_L1_ENABLED 0x0
-#define NVM_CFG1_GLOB_ASPM_SUPPORT_L0S_DISABLED 0x1
-#define NVM_CFG1_GLOB_ASPM_SUPPORT_L1_DISABLED 0x2
-#define NVM_CFG1_GLOB_ASPM_SUPPORT_L0S_L1_DISABLED 0x3
-#define NVM_CFG1_GLOB_PREVENT_PCIE_L1_MENTRY_MASK 0x00000020
-#define NVM_CFG1_GLOB_PREVENT_PCIE_L1_MENTRY_OFFSET 5
-#define NVM_CFG1_GLOB_PREVENT_PCIE_L1_MENTRY_DISABLED 0x0
-#define NVM_CFG1_GLOB_PREVENT_PCIE_L1_MENTRY_ENABLED 0x1
-#define NVM_CFG1_GLOB_PCIE_G2_TX_AMPLITUDE_MASK 0x000003C0
-#define NVM_CFG1_GLOB_PCIE_G2_TX_AMPLITUDE_OFFSET 6
-#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_MASK 0x00001C00
-#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_OFFSET 10
-#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_HW 0x0
-#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_0DB 0x1
-#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_3_5DB 0x2
-#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_6_0DB 0x3
-#define NVM_CFG1_GLOB_WWN_NODE_PREFIX0_MASK 0x001FE000
-#define NVM_CFG1_GLOB_WWN_NODE_PREFIX0_OFFSET 13
-#define NVM_CFG1_GLOB_WWN_NODE_PREFIX1_MASK 0x1FE00000
-#define NVM_CFG1_GLOB_WWN_NODE_PREFIX1_OFFSET 21
-#define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_MASK 0x60000000
-#define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_OFFSET 29
-
- u32 mgmt_traffic; /* 0x28 */
-#define NVM_CFG1_GLOB_RESERVED60_MASK 0x00000001
-#define NVM_CFG1_GLOB_RESERVED60_OFFSET 0
-#define NVM_CFG1_GLOB_RESERVED60_100KHZ 0x0
-#define NVM_CFG1_GLOB_RESERVED60_400KHZ 0x1
-#define NVM_CFG1_GLOB_WWN_PORT_PREFIX0_MASK 0x000001FE
-#define NVM_CFG1_GLOB_WWN_PORT_PREFIX0_OFFSET 1
-#define NVM_CFG1_GLOB_WWN_PORT_PREFIX1_MASK 0x0001FE00
-#define NVM_CFG1_GLOB_WWN_PORT_PREFIX1_OFFSET 9
-#define NVM_CFG1_GLOB_SMBUS_ADDRESS_MASK 0x01FE0000
-#define NVM_CFG1_GLOB_SMBUS_ADDRESS_OFFSET 17
-#define NVM_CFG1_GLOB_SIDEBAND_MODE_MASK 0x06000000
-#define NVM_CFG1_GLOB_SIDEBAND_MODE_OFFSET 25
-#define NVM_CFG1_GLOB_SIDEBAND_MODE_DISABLED 0x0
-#define NVM_CFG1_GLOB_SIDEBAND_MODE_RMII 0x1
-#define NVM_CFG1_GLOB_SIDEBAND_MODE_SGMII 0x2
-
- u32 core_cfg; /* 0x2C */
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK 0x000000FF
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET 0
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X40G 0x0
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X50G 0x1
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X100G 0x2
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_F 0x3
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_E 0x4
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X20G 0x5
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X40G 0xB
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X25G 0xC
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X25G 0xD
-#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_MASK 0x00000100
-#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_OFFSET 8
-#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_DISABLED 0x0
-#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_ENABLED 0x1
-#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_MASK 0x00000200
-#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_OFFSET 9
-#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_DISABLED 0x0
-#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_ENABLED 0x1
-#define NVM_CFG1_GLOB_EAGLE_CORE_ADDR_MASK 0x0003FC00
-#define NVM_CFG1_GLOB_EAGLE_CORE_ADDR_OFFSET 10
-#define NVM_CFG1_GLOB_FALCON_CORE_ADDR_MASK 0x03FC0000
-#define NVM_CFG1_GLOB_FALCON_CORE_ADDR_OFFSET 18
-#define NVM_CFG1_GLOB_AVS_MODE_MASK 0x1C000000
-#define NVM_CFG1_GLOB_AVS_MODE_OFFSET 26
-#define NVM_CFG1_GLOB_AVS_MODE_CLOSE_LOOP 0x0
-#define NVM_CFG1_GLOB_AVS_MODE_OPEN_LOOP 0x1
-#define NVM_CFG1_GLOB_AVS_MODE_DISABLED 0x3
-#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_MASK 0x60000000
-#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_OFFSET 29
-#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_DISABLED 0x0
-#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_ENABLED 0x1
-
- u32 e_lane_cfg1; /* 0x30 */
-#define NVM_CFG1_GLOB_RX_LANE0_SWAP_MASK 0x0000000F
-#define NVM_CFG1_GLOB_RX_LANE0_SWAP_OFFSET 0
-#define NVM_CFG1_GLOB_RX_LANE1_SWAP_MASK 0x000000F0
-#define NVM_CFG1_GLOB_RX_LANE1_SWAP_OFFSET 4
-#define NVM_CFG1_GLOB_RX_LANE2_SWAP_MASK 0x00000F00
-#define NVM_CFG1_GLOB_RX_LANE2_SWAP_OFFSET 8
-#define NVM_CFG1_GLOB_RX_LANE3_SWAP_MASK 0x0000F000
-#define NVM_CFG1_GLOB_RX_LANE3_SWAP_OFFSET 12
-#define NVM_CFG1_GLOB_TX_LANE0_SWAP_MASK 0x000F0000
-#define NVM_CFG1_GLOB_TX_LANE0_SWAP_OFFSET 16
-#define NVM_CFG1_GLOB_TX_LANE1_SWAP_MASK 0x00F00000
-#define NVM_CFG1_GLOB_TX_LANE1_SWAP_OFFSET 20
-#define NVM_CFG1_GLOB_TX_LANE2_SWAP_MASK 0x0F000000
-#define NVM_CFG1_GLOB_TX_LANE2_SWAP_OFFSET 24
-#define NVM_CFG1_GLOB_TX_LANE3_SWAP_MASK 0xF0000000
-#define NVM_CFG1_GLOB_TX_LANE3_SWAP_OFFSET 28
-
- u32 e_lane_cfg2; /* 0x34 */
-#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_MASK 0x00000001
-#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_OFFSET 0
-#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_MASK 0x00000002
-#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_OFFSET 1
-#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_MASK 0x00000004
-#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_OFFSET 2
-#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_MASK 0x00000008
-#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_OFFSET 3
-#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_MASK 0x00000010
-#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_OFFSET 4
-#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_MASK 0x00000020
-#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_OFFSET 5
-#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_MASK 0x00000040
-#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_OFFSET 6
-#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_MASK 0x00000080
-#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_OFFSET 7
-#define NVM_CFG1_GLOB_SMBUS_MODE_MASK 0x00000F00
-#define NVM_CFG1_GLOB_SMBUS_MODE_OFFSET 8
-#define NVM_CFG1_GLOB_SMBUS_MODE_DISABLED 0x0
-#define NVM_CFG1_GLOB_SMBUS_MODE_100KHZ 0x1
-#define NVM_CFG1_GLOB_SMBUS_MODE_400KHZ 0x2
-#define NVM_CFG1_GLOB_NCSI_MASK 0x0000F000
-#define NVM_CFG1_GLOB_NCSI_OFFSET 12
-#define NVM_CFG1_GLOB_NCSI_DISABLED 0x0
-#define NVM_CFG1_GLOB_NCSI_ENABLED 0x1
-
- u32 f_lane_cfg1; /* 0x38 */
-#define NVM_CFG1_GLOB_RX_LANE0_SWAP_MASK 0x0000000F
-#define NVM_CFG1_GLOB_RX_LANE0_SWAP_OFFSET 0
-#define NVM_CFG1_GLOB_RX_LANE1_SWAP_MASK 0x000000F0
-#define NVM_CFG1_GLOB_RX_LANE1_SWAP_OFFSET 4
-#define NVM_CFG1_GLOB_RX_LANE2_SWAP_MASK 0x00000F00
-#define NVM_CFG1_GLOB_RX_LANE2_SWAP_OFFSET 8
-#define NVM_CFG1_GLOB_RX_LANE3_SWAP_MASK 0x0000F000
-#define NVM_CFG1_GLOB_RX_LANE3_SWAP_OFFSET 12
-#define NVM_CFG1_GLOB_TX_LANE0_SWAP_MASK 0x000F0000
-#define NVM_CFG1_GLOB_TX_LANE0_SWAP_OFFSET 16
-#define NVM_CFG1_GLOB_TX_LANE1_SWAP_MASK 0x00F00000
-#define NVM_CFG1_GLOB_TX_LANE1_SWAP_OFFSET 20
-#define NVM_CFG1_GLOB_TX_LANE2_SWAP_MASK 0x0F000000
-#define NVM_CFG1_GLOB_TX_LANE2_SWAP_OFFSET 24
-#define NVM_CFG1_GLOB_TX_LANE3_SWAP_MASK 0xF0000000
-#define NVM_CFG1_GLOB_TX_LANE3_SWAP_OFFSET 28
-
- u32 f_lane_cfg2; /* 0x3C */
-#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_MASK 0x00000001
-#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_OFFSET 0
-#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_MASK 0x00000002
-#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_OFFSET 1
-#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_MASK 0x00000004
-#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_OFFSET 2
-#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_MASK 0x00000008
-#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_OFFSET 3
-#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_MASK 0x00000010
-#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_OFFSET 4
-#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_MASK 0x00000020
-#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_OFFSET 5
-#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_MASK 0x00000040
-#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_OFFSET 6
-#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_MASK 0x00000080
-#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_OFFSET 7
-
- u32 eagle_preemphasis; /* 0x40 */
-#define NVM_CFG1_GLOB_LANE0_PREEMP_MASK 0x000000FF
-#define NVM_CFG1_GLOB_LANE0_PREEMP_OFFSET 0
-#define NVM_CFG1_GLOB_LANE1_PREEMP_MASK 0x0000FF00
-#define NVM_CFG1_GLOB_LANE1_PREEMP_OFFSET 8
-#define NVM_CFG1_GLOB_LANE2_PREEMP_MASK 0x00FF0000
-#define NVM_CFG1_GLOB_LANE2_PREEMP_OFFSET 16
-#define NVM_CFG1_GLOB_LANE3_PREEMP_MASK 0xFF000000
-#define NVM_CFG1_GLOB_LANE3_PREEMP_OFFSET 24
-
- u32 eagle_driver_current; /* 0x44 */
-#define NVM_CFG1_GLOB_LANE0_AMP_MASK 0x000000FF
-#define NVM_CFG1_GLOB_LANE0_AMP_OFFSET 0
-#define NVM_CFG1_GLOB_LANE1_AMP_MASK 0x0000FF00
-#define NVM_CFG1_GLOB_LANE1_AMP_OFFSET 8
-#define NVM_CFG1_GLOB_LANE2_AMP_MASK 0x00FF0000
-#define NVM_CFG1_GLOB_LANE2_AMP_OFFSET 16
-#define NVM_CFG1_GLOB_LANE3_AMP_MASK 0xFF000000
-#define NVM_CFG1_GLOB_LANE3_AMP_OFFSET 24
-
- u32 falcon_preemphasis; /* 0x48 */
-#define NVM_CFG1_GLOB_LANE0_PREEMP_MASK 0x000000FF
-#define NVM_CFG1_GLOB_LANE0_PREEMP_OFFSET 0
-#define NVM_CFG1_GLOB_LANE1_PREEMP_MASK 0x0000FF00
-#define NVM_CFG1_GLOB_LANE1_PREEMP_OFFSET 8
-#define NVM_CFG1_GLOB_LANE2_PREEMP_MASK 0x00FF0000
-#define NVM_CFG1_GLOB_LANE2_PREEMP_OFFSET 16
-#define NVM_CFG1_GLOB_LANE3_PREEMP_MASK 0xFF000000
-#define NVM_CFG1_GLOB_LANE3_PREEMP_OFFSET 24
-
- u32 falcon_driver_current; /* 0x4C */
-#define NVM_CFG1_GLOB_LANE0_AMP_MASK 0x000000FF
-#define NVM_CFG1_GLOB_LANE0_AMP_OFFSET 0
-#define NVM_CFG1_GLOB_LANE1_AMP_MASK 0x0000FF00
-#define NVM_CFG1_GLOB_LANE1_AMP_OFFSET 8
-#define NVM_CFG1_GLOB_LANE2_AMP_MASK 0x00FF0000
-#define NVM_CFG1_GLOB_LANE2_AMP_OFFSET 16
-#define NVM_CFG1_GLOB_LANE3_AMP_MASK 0xFF000000
-#define NVM_CFG1_GLOB_LANE3_AMP_OFFSET 24
-
- u32 pci_id; /* 0x50 */
-#define NVM_CFG1_GLOB_VENDOR_ID_MASK 0x0000FFFF
-#define NVM_CFG1_GLOB_VENDOR_ID_OFFSET 0
-
- u32 pci_subsys_id; /* 0x54 */
-#define NVM_CFG1_GLOB_SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFF
-#define NVM_CFG1_GLOB_SUBSYSTEM_VENDOR_ID_OFFSET 0
-#define NVM_CFG1_GLOB_SUBSYSTEM_DEVICE_ID_MASK 0xFFFF0000
-#define NVM_CFG1_GLOB_SUBSYSTEM_DEVICE_ID_OFFSET 16
-
- u32 bar; /* 0x58 */
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_MASK 0x0000000F
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_OFFSET 0
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_DISABLED 0x0
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_2K 0x1
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_4K 0x2
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_8K 0x3
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_16K 0x4
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_32K 0x5
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_64K 0x6
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_128K 0x7
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_256K 0x8
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_512K 0x9
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_1M 0xA
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_2M 0xB
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_4M 0xC
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_8M 0xD
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_16M 0xE
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_32M 0xF
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_MASK 0x000000F0
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_OFFSET 4
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_DISABLED 0x0
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_4K 0x1
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_8K 0x2
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_16K 0x3
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_32K 0x4
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_64K 0x5
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_128K 0x6
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_256K 0x7
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_512K 0x8
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_1M 0x9
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_2M 0xA
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_4M 0xB
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_8M 0xC
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_16M 0xD
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_32M 0xE
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_64M 0xF
-#define NVM_CFG1_GLOB_BAR2_SIZE_MASK 0x00000F00
-#define NVM_CFG1_GLOB_BAR2_SIZE_OFFSET 8
-#define NVM_CFG1_GLOB_BAR2_SIZE_DISABLED 0x0
-#define NVM_CFG1_GLOB_BAR2_SIZE_64K 0x1
-#define NVM_CFG1_GLOB_BAR2_SIZE_128K 0x2
-#define NVM_CFG1_GLOB_BAR2_SIZE_256K 0x3
-#define NVM_CFG1_GLOB_BAR2_SIZE_512K 0x4
-#define NVM_CFG1_GLOB_BAR2_SIZE_1M 0x5
-#define NVM_CFG1_GLOB_BAR2_SIZE_2M 0x6
-#define NVM_CFG1_GLOB_BAR2_SIZE_4M 0x7
-#define NVM_CFG1_GLOB_BAR2_SIZE_8M 0x8
-#define NVM_CFG1_GLOB_BAR2_SIZE_16M 0x9
-#define NVM_CFG1_GLOB_BAR2_SIZE_32M 0xA
-#define NVM_CFG1_GLOB_BAR2_SIZE_64M 0xB
-#define NVM_CFG1_GLOB_BAR2_SIZE_128M 0xC
-#define NVM_CFG1_GLOB_BAR2_SIZE_256M 0xD
-#define NVM_CFG1_GLOB_BAR2_SIZE_512M 0xE
-#define NVM_CFG1_GLOB_BAR2_SIZE_1G 0xF
-
- u32 eagle_txfir_main; /* 0x5C */
-#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_MASK 0x000000FF
-#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_OFFSET 0
-#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_MASK 0x0000FF00
-#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_OFFSET 8
-#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_MASK 0x00FF0000
-#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_OFFSET 16
-#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_MASK 0xFF000000
-#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_OFFSET 24
-
- u32 eagle_txfir_post; /* 0x60 */
-#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_MASK 0x000000FF
-#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_OFFSET 0
-#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_MASK 0x0000FF00
-#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_OFFSET 8
-#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_MASK 0x00FF0000
-#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_OFFSET 16
-#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_MASK 0xFF000000
-#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_OFFSET 24
-
- u32 falcon_txfir_main; /* 0x64 */
-#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_MASK 0x000000FF
-#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_OFFSET 0
-#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_MASK 0x0000FF00
-#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_OFFSET 8
-#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_MASK 0x00FF0000
-#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_OFFSET 16
-#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_MASK 0xFF000000
-#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_OFFSET 24
-
- u32 falcon_txfir_post; /* 0x68 */
-#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_MASK 0x000000FF
-#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_OFFSET 0
-#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_MASK 0x0000FF00
-#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_OFFSET 8
-#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_MASK 0x00FF0000
-#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_OFFSET 16
-#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_MASK 0xFF000000
-#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_OFFSET 24
-
- u32 manufacture_ver; /* 0x6C */
-#define NVM_CFG1_GLOB_MANUF0_VER_MASK 0x0000003F
-#define NVM_CFG1_GLOB_MANUF0_VER_OFFSET 0
-#define NVM_CFG1_GLOB_MANUF1_VER_MASK 0x00000FC0
-#define NVM_CFG1_GLOB_MANUF1_VER_OFFSET 6
-#define NVM_CFG1_GLOB_MANUF2_VER_MASK 0x0003F000
-#define NVM_CFG1_GLOB_MANUF2_VER_OFFSET 12
-#define NVM_CFG1_GLOB_MANUF3_VER_MASK 0x00FC0000
-#define NVM_CFG1_GLOB_MANUF3_VER_OFFSET 18
-#define NVM_CFG1_GLOB_MANUF4_VER_MASK 0x3F000000
-#define NVM_CFG1_GLOB_MANUF4_VER_OFFSET 24
-
- u32 manufacture_time; /* 0x70 */
-#define NVM_CFG1_GLOB_MANUF0_TIME_MASK 0x0000003F
-#define NVM_CFG1_GLOB_MANUF0_TIME_OFFSET 0
-#define NVM_CFG1_GLOB_MANUF1_TIME_MASK 0x00000FC0
-#define NVM_CFG1_GLOB_MANUF1_TIME_OFFSET 6
-#define NVM_CFG1_GLOB_MANUF2_TIME_MASK 0x0003F000
-#define NVM_CFG1_GLOB_MANUF2_TIME_OFFSET 12
-
- u32 led_global_settings; /* 0x74 */
-#define NVM_CFG1_GLOB_LED_SWAP_0_MASK 0x0000000F
-#define NVM_CFG1_GLOB_LED_SWAP_0_OFFSET 0
-#define NVM_CFG1_GLOB_LED_SWAP_1_MASK 0x000000F0
-#define NVM_CFG1_GLOB_LED_SWAP_1_OFFSET 4
-#define NVM_CFG1_GLOB_LED_SWAP_2_MASK 0x00000F00
-#define NVM_CFG1_GLOB_LED_SWAP_2_OFFSET 8
-#define NVM_CFG1_GLOB_LED_SWAP_3_MASK 0x0000F000
-#define NVM_CFG1_GLOB_LED_SWAP_3_OFFSET 12
-
- u32 generic_cont1; /* 0x78 */
-#define NVM_CFG1_GLOB_AVS_DAC_CODE_MASK 0x000003FF
-#define NVM_CFG1_GLOB_AVS_DAC_CODE_OFFSET 0
-
- u32 mbi_version; /* 0x7C */
-#define NVM_CFG1_GLOB_MBI_VERSION_0_MASK 0x000000FF
-#define NVM_CFG1_GLOB_MBI_VERSION_0_OFFSET 0
-#define NVM_CFG1_GLOB_MBI_VERSION_1_MASK 0x0000FF00
-#define NVM_CFG1_GLOB_MBI_VERSION_1_OFFSET 8
-#define NVM_CFG1_GLOB_MBI_VERSION_2_MASK 0x00FF0000
-#define NVM_CFG1_GLOB_MBI_VERSION_2_OFFSET 16
-
- u32 mbi_date; /* 0x80 */
-
- u32 misc_sig; /* 0x84 */
-
- /* Define the GPIO mapping to switch i2c mux */
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_0_MASK 0x000000FF
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_0_OFFSET 0
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_1_MASK 0x0000FF00
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_1_OFFSET 8
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__NA 0x0
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO0 0x1
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO1 0x2
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO2 0x3
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO3 0x4
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO4 0x5
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO5 0x6
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO6 0x7
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO7 0x8
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO8 0x9
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO9 0xA
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO10 0xB
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO11 0xC
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO12 0xD
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO13 0xE
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO14 0xF
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO15 0x10
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO16 0x11
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO17 0x12
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO18 0x13
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO19 0x14
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO20 0x15
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO21 0x16
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO22 0x17
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO23 0x18
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO24 0x19
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO25 0x1A
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO26 0x1B
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO27 0x1C
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO28 0x1D
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO29 0x1E
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO30 0x1F
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO31 0x20
- u32 device_capabilities; /* 0x88 */
-#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET 0x1
- u32 power_dissipated; /* 0x8C */
- u32 power_consumed; /* 0x90 */
- u32 efi_version; /* 0x94 */
- u32 reserved[42]; /* 0x98 */
+ u32 generic_cont0;
+#define NVM_CFG1_GLOB_MF_MODE_MASK 0x00000FF0
+#define NVM_CFG1_GLOB_MF_MODE_OFFSET 4
+#define NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED 0x0
+#define NVM_CFG1_GLOB_MF_MODE_DEFAULT 0x1
+#define NVM_CFG1_GLOB_MF_MODE_SPIO4 0x2
+#define NVM_CFG1_GLOB_MF_MODE_NPAR1_0 0x3
+#define NVM_CFG1_GLOB_MF_MODE_NPAR1_5 0x4
+#define NVM_CFG1_GLOB_MF_MODE_NPAR2_0 0x5
+#define NVM_CFG1_GLOB_MF_MODE_BD 0x6
+#define NVM_CFG1_GLOB_MF_MODE_UFP 0x7
+ u32 engineering_change[3];
+ u32 manufacturing_id;
+ u32 serial_number[4];
+ u32 pcie_cfg;
+ u32 mgmt_traffic;
+ u32 core_cfg;
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK 0x000000FF
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET 0
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G 0x0
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G 0x1
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G 0x2
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F 0x3
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E 0x4
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G 0x5
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G 0xB
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G 0xC
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G 0xD
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G 0xE
+ u32 e_lane_cfg1;
+ u32 e_lane_cfg2;
+ u32 f_lane_cfg1;
+ u32 f_lane_cfg2;
+ u32 mps10_preemphasis;
+ u32 mps10_driver_current;
+ u32 mps25_preemphasis;
+ u32 mps25_driver_current;
+ u32 pci_id;
+ u32 pci_subsys_id;
+ u32 bar;
+ u32 mps10_txfir_main;
+ u32 mps10_txfir_post;
+ u32 mps25_txfir_main;
+ u32 mps25_txfir_post;
+ u32 manufacture_ver;
+ u32 manufacture_time;
+ u32 led_global_settings;
+ u32 generic_cont1;
+ u32 mbi_version;
+ u32 mbi_date;
+ u32 misc_sig;
+ u32 device_capabilities;
+#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET 0x1
+#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI 0x4
+#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE 0x8
+ u32 power_dissipated;
+ u32 power_consumed;
+ u32 efi_version;
+ u32 multi_network_modes_capability;
+ u32 reserved[41];
};
struct nvm_cfg1_path {
- u32 reserved[30]; /* 0x0 */
+ u32 reserved[30];
};
struct nvm_cfg1_port {
- u32 reserved__m_relocated_to_option_123; /* 0x0 */
- u32 reserved__m_relocated_to_option_124; /* 0x4 */
- u32 generic_cont0; /* 0x8 */
-#define NVM_CFG1_PORT_LED_MODE_MASK 0x000000FF
-#define NVM_CFG1_PORT_LED_MODE_OFFSET 0
-#define NVM_CFG1_PORT_LED_MODE_MAC1 0x0
-#define NVM_CFG1_PORT_LED_MODE_PHY1 0x1
-#define NVM_CFG1_PORT_LED_MODE_PHY2 0x2
-#define NVM_CFG1_PORT_LED_MODE_PHY3 0x3
-#define NVM_CFG1_PORT_LED_MODE_MAC2 0x4
-#define NVM_CFG1_PORT_LED_MODE_PHY4 0x5
-#define NVM_CFG1_PORT_LED_MODE_PHY5 0x6
-#define NVM_CFG1_PORT_LED_MODE_PHY6 0x7
-#define NVM_CFG1_PORT_LED_MODE_MAC3 0x8
-#define NVM_CFG1_PORT_LED_MODE_PHY7 0x9
-#define NVM_CFG1_PORT_LED_MODE_PHY8 0xA
-#define NVM_CFG1_PORT_LED_MODE_PHY9 0xB
-#define NVM_CFG1_PORT_LED_MODE_MAC4 0xC
-#define NVM_CFG1_PORT_LED_MODE_PHY10 0xD
-#define NVM_CFG1_PORT_LED_MODE_PHY11 0xE
-#define NVM_CFG1_PORT_LED_MODE_PHY12 0xF
-#define NVM_CFG1_PORT_ROCE_PRIORITY_MASK 0x0000FF00
-#define NVM_CFG1_PORT_ROCE_PRIORITY_OFFSET 8
-#define NVM_CFG1_PORT_DCBX_MODE_MASK 0x000F0000
-#define NVM_CFG1_PORT_DCBX_MODE_OFFSET 16
-#define NVM_CFG1_PORT_DCBX_MODE_DISABLED 0x0
-#define NVM_CFG1_PORT_DCBX_MODE_IEEE 0x1
-#define NVM_CFG1_PORT_DCBX_MODE_CEE 0x2
-#define NVM_CFG1_PORT_DCBX_MODE_DYNAMIC 0x3
-#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_MASK 0x00F00000
-#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_OFFSET 20
-#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ETHERNET 0x1
- u32 pcie_cfg; /* 0xC */
-#define NVM_CFG1_PORT_RESERVED15_MASK 0x00000007
-#define NVM_CFG1_PORT_RESERVED15_OFFSET 0
-
- u32 features; /* 0x10 */
-#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_MASK 0x00000001
-#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_OFFSET 0
-#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_DISABLED 0x0
-#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_ENABLED 0x1
-#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_MASK 0x00000002
-#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_OFFSET 1
-#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_DISABLED 0x0
-#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_ENABLED 0x1
-
- u32 speed_cap_mask; /* 0x14 */
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK 0x0000FFFF
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_OFFSET 0
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G 0x1
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G 0x2
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G 0x8
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G 0x10
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G 0x20
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G 0x40
-#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_MASK 0xFFFF0000
-#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_OFFSET 16
-#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_1G 0x1
-#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_10G 0x2
-#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_25G 0x8
-#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_40G 0x10
-#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_50G 0x20
-#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_100G 0x40
-
- u32 link_settings; /* 0x18 */
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_MASK 0x0000000F
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET 0
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG 0x0
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_1G 0x1
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_10G 0x2
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_25G 0x4
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_40G 0x5
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_50G 0x6
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_100G 0x7
-#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK 0x00000070
-#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET 4
-#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG 0x1
-#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX 0x2
-#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX 0x4
-#define NVM_CFG1_PORT_MFW_LINK_SPEED_MASK 0x00000780
-#define NVM_CFG1_PORT_MFW_LINK_SPEED_OFFSET 7
-#define NVM_CFG1_PORT_MFW_LINK_SPEED_AUTONEG 0x0
-#define NVM_CFG1_PORT_MFW_LINK_SPEED_1G 0x1
-#define NVM_CFG1_PORT_MFW_LINK_SPEED_10G 0x2
-#define NVM_CFG1_PORT_MFW_LINK_SPEED_25G 0x4
-#define NVM_CFG1_PORT_MFW_LINK_SPEED_40G 0x5
-#define NVM_CFG1_PORT_MFW_LINK_SPEED_50G 0x6
-#define NVM_CFG1_PORT_MFW_LINK_SPEED_100G 0x7
-#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_MASK 0x00003800
-#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_OFFSET 11
-#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_AUTONEG 0x1
-#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_RX 0x2
-#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_TX 0x4
-#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_MASK 0x00004000
-#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_OFFSET 14
-#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_DISABLED 0x0
-#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_ENABLED 0x1
-
- u32 phy_cfg; /* 0x1C */
-#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_MASK 0x0000FFFF
-#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_OFFSET 0
-#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_HIGIG 0x1
-#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_SCRAMBLER 0x2
-#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_FIBER 0x4
-#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_DISABLE_CL72_AN 0x8
-#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_DISABLE_FEC_AN 0x10
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_MASK 0x00FF0000
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_OFFSET 16
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_BYPASS 0x0
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR 0x2
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR2 0x3
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR4 0x4
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XFI 0x8
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_SFI 0x9
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_1000X 0xB
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_SGMII 0xC
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLAUI 0x11
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLPPI 0x12
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CAUI 0x21
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CPPI 0x22
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_25GAUI 0x31
-#define NVM_CFG1_PORT_AN_MODE_MASK 0xFF000000
-#define NVM_CFG1_PORT_AN_MODE_OFFSET 24
-#define NVM_CFG1_PORT_AN_MODE_NONE 0x0
-#define NVM_CFG1_PORT_AN_MODE_CL73 0x1
-#define NVM_CFG1_PORT_AN_MODE_CL37 0x2
-#define NVM_CFG1_PORT_AN_MODE_CL73_BAM 0x3
-#define NVM_CFG1_PORT_AN_MODE_CL37_BAM 0x4
-#define NVM_CFG1_PORT_AN_MODE_HPAM 0x5
-#define NVM_CFG1_PORT_AN_MODE_SGMII 0x6
-
- u32 mgmt_traffic; /* 0x20 */
-#define NVM_CFG1_PORT_RESERVED61_MASK 0x0000000F
-#define NVM_CFG1_PORT_RESERVED61_OFFSET 0
-
- u32 ext_phy; /* 0x24 */
-#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_MASK 0x000000FF
-#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_OFFSET 0
-#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_NONE 0x0
-#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_BCM84844 0x1
-#define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_MASK 0x0000FF00
-#define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_OFFSET 8
-
- u32 mba_cfg1; /* 0x28 */
-#define NVM_CFG1_PORT_PREBOOT_OPROM_MASK 0x00000001
-#define NVM_CFG1_PORT_PREBOOT_OPROM_OFFSET 0
-#define NVM_CFG1_PORT_PREBOOT_OPROM_DISABLED 0x0
-#define NVM_CFG1_PORT_PREBOOT_OPROM_ENABLED 0x1
-#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_TYPE_MASK 0x00000006
-#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_TYPE_OFFSET 1
-#define NVM_CFG1_PORT_MBA_DELAY_TIME_MASK 0x00000078
-#define NVM_CFG1_PORT_MBA_DELAY_TIME_OFFSET 3
-#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_MASK 0x00000080
-#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_OFFSET 7
-#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_CTRL_S 0x0
-#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_CTRL_B 0x1
-#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_MASK 0x00000100
-#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_OFFSET 8
-#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_DISABLED 0x0
-#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_ENABLED 0x1
-#define NVM_CFG1_PORT_RESERVED5_MASK 0x0001FE00
-#define NVM_CFG1_PORT_RESERVED5_OFFSET 9
-#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_MASK 0x001E0000
-#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_OFFSET 17
-#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_AUTONEG 0x0
-#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_1G 0x1
-#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_10G 0x2
-#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_25G 0x4
-#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_40G 0x5
-#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_50G 0x6
-#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_100G 0x7
-#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_SMARTLINQ 0x8
-#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_RETRY_COUNT_MASK 0x00E00000
-#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_RETRY_COUNT_OFFSET 21
-
- u32 mba_cfg2; /* 0x2C */
-#define NVM_CFG1_PORT_RESERVED65_MASK 0x0000FFFF
-#define NVM_CFG1_PORT_RESERVED65_OFFSET 0
-#define NVM_CFG1_PORT_RESERVED66_MASK 0x00010000
-#define NVM_CFG1_PORT_RESERVED66_OFFSET 16
-
- u32 vf_cfg; /* 0x30 */
-#define NVM_CFG1_PORT_RESERVED8_MASK 0x0000FFFF
-#define NVM_CFG1_PORT_RESERVED8_OFFSET 0
-#define NVM_CFG1_PORT_RESERVED6_MASK 0x000F0000
-#define NVM_CFG1_PORT_RESERVED6_OFFSET 16
-
- struct nvm_cfg_mac_address lldp_mac_address; /* 0x34 */
-
- u32 led_port_settings; /* 0x3C */
-#define NVM_CFG1_PORT_LANE_LED_SPD_0_SEL_MASK 0x000000FF
-#define NVM_CFG1_PORT_LANE_LED_SPD_0_SEL_OFFSET 0
-#define NVM_CFG1_PORT_LANE_LED_SPD_1_SEL_MASK 0x0000FF00
-#define NVM_CFG1_PORT_LANE_LED_SPD_1_SEL_OFFSET 8
-#define NVM_CFG1_PORT_LANE_LED_SPD_2_SEL_MASK 0x00FF0000
-#define NVM_CFG1_PORT_LANE_LED_SPD_2_SEL_OFFSET 16
-#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_1G 0x1
-#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_10G 0x2
-#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_25G 0x8
-#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_40G 0x10
-#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_50G 0x20
-#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_100G 0x40
-
- u32 transceiver_00; /* 0x40 */
-
- /* Define for mapping of transceiver signal module absent */
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_MASK 0x000000FF
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_OFFSET 0
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_NA 0x0
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO0 0x1
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO1 0x2
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO2 0x3
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO3 0x4
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO4 0x5
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO5 0x6
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO6 0x7
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO7 0x8
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO8 0x9
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO9 0xA
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO10 0xB
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO11 0xC
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO12 0xD
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO13 0xE
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO14 0xF
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO15 0x10
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO16 0x11
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO17 0x12
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO18 0x13
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO19 0x14
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO20 0x15
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO21 0x16
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO22 0x17
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO23 0x18
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO24 0x19
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO25 0x1A
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO26 0x1B
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO27 0x1C
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO28 0x1D
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO29 0x1E
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO30 0x1F
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO31 0x20
- /* Define the GPIO mux settings to switch i2c mux to this port */
-#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_0_MASK 0x00000F00
-#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_0_OFFSET 8
-#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_1_MASK 0x0000F000
-#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_1_OFFSET 12
-
- u32 reserved[133]; /* 0x44 */
+ u32 reserved__m_relocated_to_option_123;
+ u32 reserved__m_relocated_to_option_124;
+ u32 generic_cont0;
+#define NVM_CFG1_PORT_DCBX_MODE_MASK 0x000F0000
+#define NVM_CFG1_PORT_DCBX_MODE_OFFSET 16
+#define NVM_CFG1_PORT_DCBX_MODE_DISABLED 0x0
+#define NVM_CFG1_PORT_DCBX_MODE_IEEE 0x1
+#define NVM_CFG1_PORT_DCBX_MODE_CEE 0x2
+#define NVM_CFG1_PORT_DCBX_MODE_DYNAMIC 0x3
+#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_MASK 0x00F00000
+#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_OFFSET 20
+#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ETHERNET 0x1
+#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_FCOE 0x2
+#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ISCSI 0x4
+ u32 pcie_cfg;
+ u32 features;
+ u32 speed_cap_mask;
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK 0x0000FFFF
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_OFFSET 0
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G 0x1
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G 0x2
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G 0x8
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G 0x10
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G 0x20
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G 0x40
+ u32 link_settings;
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_MASK 0x0000000F
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET 0
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG 0x0
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_1G 0x1
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_10G 0x2
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_25G 0x4
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_40G 0x5
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_50G 0x6
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G 0x7
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_SMARTLINQ 0x8
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK 0x00000070
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET 4
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG 0x1
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX 0x2
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX 0x4
+ u32 phy_cfg;
+ u32 mgmt_traffic;
+ u32 ext_phy;
+ u32 mba_cfg1;
+ u32 mba_cfg2;
+ u32 vf_cfg;
+ struct nvm_cfg_mac_address lldp_mac_address;
+ u32 led_port_settings;
+ u32 transceiver_00;
+ u32 device_ids;
+ u32 board_cfg;
+ u32 mnm_10g_cap;
+ u32 mnm_10g_ctrl;
+ u32 mnm_10g_misc;
+ u32 mnm_25g_cap;
+ u32 mnm_25g_ctrl;
+ u32 mnm_25g_misc;
+ u32 mnm_40g_cap;
+ u32 mnm_40g_ctrl;
+ u32 mnm_40g_misc;
+ u32 mnm_50g_cap;
+ u32 mnm_50g_ctrl;
+ u32 mnm_50g_misc;
+ u32 mnm_100g_cap;
+ u32 mnm_100g_ctrl;
+ u32 mnm_100g_misc;
+ u32 reserved[116];
};
struct nvm_cfg1_func {
- struct nvm_cfg_mac_address mac_address; /* 0x0 */
-
- u32 rsrv1; /* 0x8 */
-#define NVM_CFG1_FUNC_RESERVED1_MASK 0x0000FFFF
-#define NVM_CFG1_FUNC_RESERVED1_OFFSET 0
-#define NVM_CFG1_FUNC_RESERVED2_MASK 0xFFFF0000
-#define NVM_CFG1_FUNC_RESERVED2_OFFSET 16
-
- u32 rsrv2; /* 0xC */
-#define NVM_CFG1_FUNC_RESERVED3_MASK 0x0000FFFF
-#define NVM_CFG1_FUNC_RESERVED3_OFFSET 0
-#define NVM_CFG1_FUNC_RESERVED4_MASK 0xFFFF0000
-#define NVM_CFG1_FUNC_RESERVED4_OFFSET 16
-
- u32 device_id; /* 0x10 */
-#define NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_MASK 0x0000FFFF
-#define NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_OFFSET 0
-#define NVM_CFG1_FUNC_RESERVED77_MASK 0xFFFF0000
-#define NVM_CFG1_FUNC_RESERVED77_OFFSET 16
-
- u32 cmn_cfg; /* 0x14 */
-#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_MASK 0x00000007
-#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_OFFSET 0
-#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_PXE 0x0
-#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_ISCSI_BOOT 0x3
-#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_FCOE_BOOT 0x4
-#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_NONE 0x7
-#define NVM_CFG1_FUNC_VF_PCI_DEVICE_ID_MASK 0x0007FFF8
-#define NVM_CFG1_FUNC_VF_PCI_DEVICE_ID_OFFSET 3
-#define NVM_CFG1_FUNC_PERSONALITY_MASK 0x00780000
-#define NVM_CFG1_FUNC_PERSONALITY_OFFSET 19
-#define NVM_CFG1_FUNC_PERSONALITY_ETHERNET 0x0
-#define NVM_CFG1_FUNC_PERSONALITY_ISCSI 0x1
-#define NVM_CFG1_FUNC_PERSONALITY_FCOE 0x2
-#define NVM_CFG1_FUNC_PERSONALITY_ROCE 0x3
-#define NVM_CFG1_FUNC_BANDWIDTH_WEIGHT_MASK 0x7F800000
-#define NVM_CFG1_FUNC_BANDWIDTH_WEIGHT_OFFSET 23
-#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_MASK 0x80000000
-#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_OFFSET 31
-#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_DISABLED 0x0
-#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_ENABLED 0x1
-
- u32 pci_cfg; /* 0x18 */
-#define NVM_CFG1_FUNC_NUMBER_OF_VFS_PER_PF_MASK 0x0000007F
-#define NVM_CFG1_FUNC_NUMBER_OF_VFS_PER_PF_OFFSET 0
-#define NVM_CFG1_FUNC_RESERVESD12_MASK 0x00003F80
-#define NVM_CFG1_FUNC_RESERVESD12_OFFSET 7
-#define NVM_CFG1_FUNC_BAR1_SIZE_MASK 0x0003C000
-#define NVM_CFG1_FUNC_BAR1_SIZE_OFFSET 14
-#define NVM_CFG1_FUNC_BAR1_SIZE_DISABLED 0x0
-#define NVM_CFG1_FUNC_BAR1_SIZE_64K 0x1
-#define NVM_CFG1_FUNC_BAR1_SIZE_128K 0x2
-#define NVM_CFG1_FUNC_BAR1_SIZE_256K 0x3
-#define NVM_CFG1_FUNC_BAR1_SIZE_512K 0x4
-#define NVM_CFG1_FUNC_BAR1_SIZE_1M 0x5
-#define NVM_CFG1_FUNC_BAR1_SIZE_2M 0x6
-#define NVM_CFG1_FUNC_BAR1_SIZE_4M 0x7
-#define NVM_CFG1_FUNC_BAR1_SIZE_8M 0x8
-#define NVM_CFG1_FUNC_BAR1_SIZE_16M 0x9
-#define NVM_CFG1_FUNC_BAR1_SIZE_32M 0xA
-#define NVM_CFG1_FUNC_BAR1_SIZE_64M 0xB
-#define NVM_CFG1_FUNC_BAR1_SIZE_128M 0xC
-#define NVM_CFG1_FUNC_BAR1_SIZE_256M 0xD
-#define NVM_CFG1_FUNC_BAR1_SIZE_512M 0xE
-#define NVM_CFG1_FUNC_BAR1_SIZE_1G 0xF
-#define NVM_CFG1_FUNC_MAX_BANDWIDTH_MASK 0x03FC0000
-#define NVM_CFG1_FUNC_MAX_BANDWIDTH_OFFSET 18
-
- struct nvm_cfg_mac_address fcoe_node_wwn_mac_addr; /* 0x1C */
-
- struct nvm_cfg_mac_address fcoe_port_wwn_mac_addr; /* 0x24 */
- u32 preboot_generic_cfg; /* 0x2C */
- u32 reserved[8]; /* 0x30 */
+ struct nvm_cfg_mac_address mac_address;
+ u32 rsrv1;
+ u32 rsrv2;
+ u32 device_id;
+ u32 cmn_cfg;
+ u32 pci_cfg;
+ struct nvm_cfg_mac_address fcoe_node_wwn_mac_addr;
+ struct nvm_cfg_mac_address fcoe_port_wwn_mac_addr;
+ u32 preboot_generic_cfg;
+ u32 reserved[8];
};
struct nvm_cfg1 {
- struct nvm_cfg1_glob glob; /* 0x0 */
-
- struct nvm_cfg1_path path[MCP_GLOB_PATH_MAX]; /* 0x140 */
-
- struct nvm_cfg1_port port[MCP_GLOB_PORT_MAX]; /* 0x230 */
-
- struct nvm_cfg1_func func[MCP_GLOB_FUNC_MAX]; /* 0xB90 */
-};
-
-/******************************************
-* nvm_cfg structs
-******************************************/
-
-enum nvm_cfg_sections {
- NVM_CFG_SECTION_NVM_CFG1,
- NVM_CFG_SECTION_MAX
-};
-
-struct nvm_cfg {
- u32 num_sections;
- u32 sections_offset[NVM_CFG_SECTION_MAX];
- struct nvm_cfg1 cfg1;
-};
-
-#define PORT_0 0
-#define PORT_1 1
-#define PORT_2 2
-#define PORT_3 3
-
-extern struct spad_layout g_spad;
-
-#define MCP_SPAD_SIZE 0x00028000 /* 160 KB */
-
-#define SPAD_OFFSET(addr) (((u32)addr - (u32)CPU_SPAD_BASE))
-
-#define TO_OFFSIZE(_offset, _size) \
- (u32)((((u32)(_offset) >> 2) << OFFSIZE_OFFSET_SHIFT) | \
- (((u32)(_size) >> 2) << OFFSIZE_SIZE_SHIFT))
-
-enum spad_sections {
- SPAD_SECTION_TRACE,
- SPAD_SECTION_NVM_CFG,
- SPAD_SECTION_PUBLIC,
- SPAD_SECTION_PRIVATE,
- SPAD_SECTION_MAX
-};
-
-struct spad_layout {
- struct nvm_cfg nvm_cfg;
- struct mcp_public_data public_data;
+ struct nvm_cfg1_glob glob;
+ struct nvm_cfg1_path path[MCP_GLOB_PATH_MAX];
+ struct nvm_cfg1_port port[MCP_GLOB_PORT_MAX];
+ struct nvm_cfg1_func func[MCP_GLOB_FUNC_MAX];
};
-
-#define CRC_MAGIC_VALUE 0xDEBB20E3
-#define CRC32_POLYNOMIAL 0xEDB88320
-#define NVM_CRC_SIZE (sizeof(u32))
-
-enum nvm_sw_arbitrator {
- NVM_SW_ARB_HOST,
- NVM_SW_ARB_MCP,
- NVM_SW_ARB_UART,
- NVM_SW_ARB_RESERVED
-};
-
-/****************************************************************************
-* Boot Strap Region *
-****************************************************************************/
-struct legacy_bootstrap_region {
- u32 magic_value;
-#define NVM_MAGIC_VALUE 0x669955aa
- u32 sram_start_addr;
- u32 code_len; /* boot code length (in dwords) */
- u32 code_start_addr;
- u32 crc; /* 32-bit CRC */
-};
-
-/****************************************************************************
-* Directories Region *
-****************************************************************************/
-struct nvm_code_entry {
- u32 image_type; /* Image type */
- u32 nvm_start_addr; /* NVM address of the image */
- u32 len; /* Include CRC */
- u32 sram_start_addr;
- u32 sram_run_addr; /* Relevant in case of MIM only */
-};
-
-enum nvm_image_type {
- NVM_TYPE_TIM1 = 0x01,
- NVM_TYPE_TIM2 = 0x02,
- NVM_TYPE_MIM1 = 0x03,
- NVM_TYPE_MIM2 = 0x04,
- NVM_TYPE_MBA = 0x05,
- NVM_TYPE_MODULES_PN = 0x06,
- NVM_TYPE_VPD = 0x07,
- NVM_TYPE_MFW_TRACE1 = 0x08,
- NVM_TYPE_MFW_TRACE2 = 0x09,
- NVM_TYPE_NVM_CFG1 = 0x0a,
- NVM_TYPE_L2B = 0x0b,
- NVM_TYPE_DIR1 = 0x0c,
- NVM_TYPE_EAGLE_FW1 = 0x0d,
- NVM_TYPE_FALCON_FW1 = 0x0e,
- NVM_TYPE_PCIE_FW1 = 0x0f,
- NVM_TYPE_HW_SET = 0x10,
- NVM_TYPE_LIM = 0x11,
- NVM_TYPE_AVS_FW1 = 0x12,
- NVM_TYPE_DIR2 = 0x13,
- NVM_TYPE_CCM = 0x14,
- NVM_TYPE_EAGLE_FW2 = 0x15,
- NVM_TYPE_FALCON_FW2 = 0x16,
- NVM_TYPE_PCIE_FW2 = 0x17,
- NVM_TYPE_AVS_FW2 = 0x18,
-
- NVM_TYPE_MAX,
-};
-
-#define MAX_NVM_DIR_ENTRIES 200
-
-struct nvm_dir {
- s32 seq;
-#define NVM_DIR_NEXT_MFW_MASK 0x00000001
-#define NVM_DIR_SEQ_MASK 0xfffffffe
-#define NVM_DIR_NEXT_MFW(seq) ((seq) & NVM_DIR_NEXT_MFW_MASK)
-
-#define IS_DIR_SEQ_VALID(seq) ((seq & NVM_DIR_SEQ_MASK) != NVM_DIR_SEQ_MASK)
-
- u32 num_images;
- u32 rsrv;
- struct nvm_code_entry code[1]; /* Up to MAX_NVM_DIR_ENTRIES */
-};
-
-#define NVM_DIR_SIZE(_num_images) (sizeof(struct nvm_dir) + \
- (_num_images - \
- 1) * sizeof(struct nvm_code_entry) + \
- NVM_CRC_SIZE)
-
-struct nvm_vpd_image {
- u32 format_revision;
-#define VPD_IMAGE_VERSION 1
-
- /* This array length depends on the number of VPD fields */
- u8 vpd_data[1];
-};
-
-/****************************************************************************
-* NVRAM FULL MAP *
-****************************************************************************/
-#define DIR_ID_1 (0)
-#define DIR_ID_2 (1)
-#define MAX_DIR_IDS (2)
-
-#define MFW_BUNDLE_1 (0)
-#define MFW_BUNDLE_2 (1)
-#define MAX_MFW_BUNDLES (2)
-
-#define FLASH_PAGE_SIZE 0x1000
-#define NVM_DIR_MAX_SIZE (FLASH_PAGE_SIZE) /* 4Kb */
-#define ASIC_MIM_MAX_SIZE (300 * FLASH_PAGE_SIZE) /* 1.2Mb */
-#define FPGA_MIM_MAX_SIZE (25 * FLASH_PAGE_SIZE) /* 60Kb */
-
-#define LIM_MAX_SIZE ((2 * \
- FLASH_PAGE_SIZE) - \
- sizeof(struct legacy_bootstrap_region) - \
- NVM_RSV_SIZE)
-#define LIM_OFFSET (NVM_OFFSET(lim_image))
-#define NVM_RSV_SIZE (44)
-#define MIM_MAX_SIZE(is_asic) ((is_asic) ? ASIC_MIM_MAX_SIZE : \
- FPGA_MIM_MAX_SIZE)
-#define MIM_OFFSET(idx, is_asic) (NVM_OFFSET(dir[MAX_MFW_BUNDLES]) + \
- ((idx == \
- NVM_TYPE_MIM2) ? MIM_MAX_SIZE(is_asic) : 0))
-#define NVM_FIXED_AREA_SIZE(is_asic) (sizeof(struct nvm_image) + \
- MIM_MAX_SIZE(is_asic) * 2)
-
-union nvm_dir_union {
- struct nvm_dir dir;
- u8 page[FLASH_PAGE_SIZE];
-};
-
-/* Address
- * +-------------------+ 0x000000
- * | Bootstrap: |
- * | magic_number |
- * | sram_start_addr |
- * | code_len |
- * | code_start_addr |
- * | crc |
- * +-------------------+ 0x000014
- * | rsrv |
- * +-------------------+ 0x000040
- * | LIM |
- * +-------------------+ 0x002000
- * | Dir1 |
- * +-------------------+ 0x003000
- * | Dir2 |
- * +-------------------+ 0x004000
- * | MIM1 |
- * +-------------------+ 0x130000
- * | MIM2 |
- * +-------------------+ 0x25C000
- * | Rest Images: |
- * | TIM1/2 |
- * | MFW_TRACE1/2 |
- * | Eagle/Falcon FW |
- * | PCIE/AVS FW |
- * | MBA/CCM/L2B |
- * | VPD |
- * | optic_modules |
- * | ... |
- * +-------------------+ 0x400000
- */
-struct nvm_image {
-/*********** !!! FIXED SECTIONS !!! DO NOT MODIFY !!! **********************/
- /* NVM Offset (size) */
- struct legacy_bootstrap_region bootstrap;
- u8 rsrv[NVM_RSV_SIZE];
- u8 lim_image[LIM_MAX_SIZE];
- union nvm_dir_union dir[MAX_MFW_BUNDLES];
-
- /* MIM1_IMAGE 0x004000 (0x12c000) */
- /* MIM2_IMAGE 0x130000 (0x12c000) */
-/*********** !!! FIXED SECTIONS !!! DO NOT MODIFY !!! **********************/
-}; /* 0x134 */
-
-#define NVM_OFFSET(f) ((u32_t)((int_ptr_t)(&(((struct nvm_image *)0)->f))))
-
-struct hw_set_info {
- u32 reg_type;
-#define GRC_REG_TYPE 1
-#define PHY_REG_TYPE 2
-#define PCI_REG_TYPE 4
-
- u32 bank_num;
- u32 pf_num;
- u32 operation;
-#define READ_OP 1
-#define WRITE_OP 2
-#define RMW_SET_OP 3
-#define RMW_CLR_OP 4
-
- u32 reg_addr;
- u32 reg_data;
-
- u32 reset_type;
-#define POR_RESET_TYPE BIT(0)
-#define HARD_RESET_TYPE BIT(1)
-#define CORE_RESET_TYPE BIT(2)
-#define MCP_RESET_TYPE BIT(3)
-#define PERSET_ASSERT BIT(4)
-#define PERSET_DEASSERT BIT(5)
-};
-
-struct hw_set_image {
- u32 format_version;
-#define HW_SET_IMAGE_VERSION 1
- u32 no_hw_sets;
-
- /* This array length depends on the no_hw_sets */
- struct hw_set_info hw_sets[1];
-};
-
-int qed_init_pf_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
- u8 pf_id, u16 pf_wfq);
-int qed_init_vport_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
- u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq);
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c
index 0ada7fd..2693c30 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c
@@ -446,7 +446,7 @@ qed_dmae_post_command(struct qed_hwfn *p_hwfn,
idx_cmd,
le32_to_cpu(command->opcode),
le16_to_cpu(command->opcode_b),
- le16_to_cpu(command->length),
+ le16_to_cpu(command->length_dw),
le32_to_cpu(command->src_addr_hi),
le32_to_cpu(command->src_addr_lo),
le32_to_cpu(command->dst_addr_hi),
@@ -461,7 +461,7 @@ qed_dmae_post_command(struct qed_hwfn *p_hwfn,
idx_cmd,
le32_to_cpu(command->opcode),
le16_to_cpu(command->opcode_b),
- le16_to_cpu(command->length),
+ le16_to_cpu(command->length_dw),
le32_to_cpu(command->src_addr_hi),
le32_to_cpu(command->src_addr_lo),
le32_to_cpu(command->dst_addr_hi),
@@ -645,7 +645,7 @@ static int qed_dmae_execute_sub_operation(struct qed_hwfn *p_hwfn,
return -EINVAL;
}
- cmd->length = cpu_to_le16((u16)length);
+ cmd->length_dw = cpu_to_le16((u16)length);
qed_dmae_post_command(p_hwfn, p_ptt);
@@ -791,16 +791,16 @@ qed_dmae_host2host(struct qed_hwfn *p_hwfn,
}
u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn,
- enum protocol_type proto,
- union qed_qm_pq_params *p_params)
+ enum protocol_type proto, union qed_qm_pq_params *p_params)
{
u16 pq_id = 0;
- if ((proto == PROTOCOLID_CORE || proto == PROTOCOLID_ETH) &&
- !p_params) {
+ if ((proto == PROTOCOLID_CORE ||
+ proto == PROTOCOLID_ETH ||
+ proto == PROTOCOLID_ISCSI ||
+ proto == PROTOCOLID_ROCE) && !p_params) {
DP_NOTICE(p_hwfn,
- "Protocol %d received NULL PQ params\n",
- proto);
+ "Protocol %d received NULL PQ params\n", proto);
return 0;
}
@@ -808,6 +808,8 @@ u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn,
case PROTOCOLID_CORE:
if (p_params->core.tc == LB_TC)
pq_id = p_hwfn->qm_info.pure_lb_pq;
+ else if (p_params->core.tc == OOO_LB_TC)
+ pq_id = p_hwfn->qm_info.ooo_pq;
else
pq_id = p_hwfn->qm_info.offload_pq;
break;
@@ -817,6 +819,18 @@ u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn,
pq_id += p_hwfn->qm_info.vf_queues_offset +
p_params->eth.vf_id;
break;
+ case PROTOCOLID_ISCSI:
+ if (p_params->iscsi.q_idx == 1)
+ pq_id = p_hwfn->qm_info.pure_ack_pq;
+ break;
+ case PROTOCOLID_ROCE:
+ if (p_params->roce.dcqcn)
+ pq_id = p_params->roce.qpid;
+ else
+ pq_id = p_hwfn->qm_info.offload_pq;
+ if (pq_id > p_hwfn->qm_info.num_pf_rls)
+ pq_id = p_hwfn->qm_info.offload_pq;
+ break;
default:
pq_id = 0;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h
index 4367363..d015570 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h
@@ -254,6 +254,10 @@ void qed_dmae_info_free(struct qed_hwfn *p_hwfn);
union qed_qm_pq_params {
struct {
+ u8 q_idx;
+ } iscsi;
+
+ struct {
u8 tc;
} core;
@@ -262,11 +266,15 @@ union qed_qm_pq_params {
u8 vf_id;
u8 tc;
} eth;
+
+ struct {
+ u8 dcqcn;
+ u8 qpid; /* roce relative */
+ } roce;
};
u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn,
- enum protocol_type proto,
- union qed_qm_pq_params *params);
+ enum protocol_type proto, union qed_qm_pq_params *params);
int qed_init_fw_data(struct qed_dev *cdev,
const u8 *fw_data);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
index e8a3b9d..23e455f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
@@ -31,7 +31,6 @@ enum cminterface {
};
/* general constants */
-#define QM_PQ_ELEMENT_SIZE 4 /* in bytes */
#define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \
QM_PQ_ELEMENT_SIZE, \
0x1000) : 0)
@@ -44,28 +43,28 @@ enum cminterface {
/* other PQ constants */
#define QM_OTHER_PQS_PER_PF 4
/* WFQ constants */
-#define QM_WFQ_UPPER_BOUND 6250000
+#define QM_WFQ_UPPER_BOUND 62500000
#define QM_WFQ_VP_PQ_VOQ_SHIFT 0
#define QM_WFQ_VP_PQ_PF_SHIFT 5
#define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000)
-#define QM_WFQ_MAX_INC_VAL 4375000
-#define QM_WFQ_INIT_CRD(inc_val) (2 * (inc_val))
+#define QM_WFQ_MAX_INC_VAL 43750000
+
/* RL constants */
-#define QM_RL_UPPER_BOUND 6250000
+#define QM_RL_UPPER_BOUND 62500000
#define QM_RL_PERIOD 5 /* in us */
#define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD)
+#define QM_RL_MAX_INC_VAL 43750000
#define QM_RL_INC_VAL(rate) max_t(u32, \
- (((rate ? rate : 1000000) \
- * QM_RL_PERIOD) / 8), 1)
-#define QM_RL_MAX_INC_VAL 4375000
+ (u32)(((rate ? rate : \
+ 1000000) * \
+ QM_RL_PERIOD * \
+ 101) / (8 * 100)), 1)
/* AFullOprtnstcCrdMask constants */
#define QM_OPPOR_LINE_VOQ_DEF 1
#define QM_OPPOR_FW_STOP_DEF 0
#define QM_OPPOR_PQ_EMPTY_DEF 1
-#define EAGLE_WORKAROUND_TC 7
/* Command Queue constants */
#define PBF_CMDQ_PURE_LB_LINES 150
-#define PBF_CMDQ_EAGLE_WORKAROUND_LINES 8
#define PBF_CMDQ_LINES_RT_OFFSET(voq) ( \
PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + voq * \
(PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
@@ -80,7 +79,6 @@ enum cminterface {
/* BTB: blocks constants (block size = 256B) */
#define BTB_JUMBO_PKT_BLOCKS 38
#define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS
-#define BTB_EAGLE_WORKAROUND_BLOCKS 4
#define BTB_PURE_LB_FACTOR 10
#define BTB_PURE_LB_RATIO 7
/* QM stop command constants */
@@ -107,9 +105,9 @@ enum cminterface {
cmd ## _ ## field, \
value)
/* QM: VOQ macros */
-#define PHYS_VOQ(port, tc, max_phy_tcs_pr_port) ((port) * \
- (max_phy_tcs_pr_port) \
- + (tc))
+#define PHYS_VOQ(port, tc, max_phys_tcs_per_port) ((port) * \
+ (max_phys_tcs_per_port) + \
+ (tc))
#define LB_VOQ(port) ( \
MAX_PHYS_VOQS + (port))
#define VOQ(port, tc, max_phy_tcs_pr_port) \
@@ -120,8 +118,7 @@ enum cminterface {
: LB_VOQ(port))
/******************** INTERNAL IMPLEMENTATION *********************/
/* Prepare PF RL enable/disable runtime init values */
-static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn,
- bool pf_rl_en)
+static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en)
{
STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
if (pf_rl_en) {
@@ -130,8 +127,7 @@ static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn,
(1 << MAX_NUM_VOQS) - 1);
/* write RL period */
STORE_RT_REG(p_hwfn,
- QM_REG_RLPFPERIOD_RT_OFFSET,
- QM_RL_PERIOD_CLK_25M);
+ QM_REG_RLPFPERIOD_RT_OFFSET, QM_RL_PERIOD_CLK_25M);
STORE_RT_REG(p_hwfn,
QM_REG_RLPFPERIODTIMER_RT_OFFSET,
QM_RL_PERIOD_CLK_25M);
@@ -144,8 +140,7 @@ static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn,
}
/* Prepare PF WFQ enable/disable runtime init values */
-static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn,
- bool pf_wfq_en)
+static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn, bool pf_wfq_en)
{
STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0);
/* set credit threshold for QM bypass flow */
@@ -156,8 +151,7 @@ static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn,
}
/* Prepare VPORT RL enable/disable runtime init values */
-static void qed_enable_vport_rl(struct qed_hwfn *p_hwfn,
- bool vport_rl_en)
+static void qed_enable_vport_rl(struct qed_hwfn *p_hwfn, bool vport_rl_en)
{
STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET,
vport_rl_en ? 1 : 0);
@@ -178,8 +172,7 @@ static void qed_enable_vport_rl(struct qed_hwfn *p_hwfn,
}
/* Prepare VPORT WFQ enable/disable runtime init values */
-static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn,
- bool vport_wfq_en)
+static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn, bool vport_wfq_en)
{
STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET,
vport_wfq_en ? 1 : 0);
@@ -194,8 +187,7 @@ static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn,
* the specified VOQ
*/
static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn,
- u8 voq,
- u16 cmdq_lines)
+ u8 voq, u16 cmdq_lines)
{
u32 qm_line_crd;
@@ -221,7 +213,7 @@ static void qed_cmdq_lines_rt_init(
u8 max_phys_tcs_per_port,
struct init_qm_port_params port_params[MAX_NUM_PORTS])
{
- u8 tc, voq, port_id;
+ u8 tc, voq, port_id, num_tcs_in_port;
/* clear PBF lines for all VOQs */
for (voq = 0; voq < MAX_NUM_VOQS; voq++)
@@ -229,22 +221,31 @@ static void qed_cmdq_lines_rt_init(
for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
if (port_params[port_id].active) {
u16 phys_lines, phys_lines_per_tc;
- u8 phys_tcs = port_params[port_id].num_active_phys_tcs;
- /* find #lines to divide between the active
- * physical TCs.
- */
+ /* find #lines to divide between active phys TCs */
phys_lines = port_params[port_id].num_pbf_cmd_lines -
PBF_CMDQ_PURE_LB_LINES;
/* find #lines per active physical TC */
- phys_lines_per_tc = phys_lines / phys_tcs;
+ num_tcs_in_port = 0;
+ for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
+ if (((port_params[port_id].active_phys_tcs >>
+ tc) & 0x1) == 1)
+ num_tcs_in_port++;
+ }
+
+ phys_lines_per_tc = phys_lines / num_tcs_in_port;
/* init registers per active TC */
- for (tc = 0; tc < phys_tcs; tc++) {
+ for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
+ if (((port_params[port_id].active_phys_tcs >>
+ tc) & 0x1) != 1)
+ continue;
+
voq = PHYS_VOQ(port_id, tc,
max_phys_tcs_per_port);
qed_cmdq_lines_voq_rt_init(p_hwfn, voq,
phys_lines_per_tc);
}
+
/* init registers for pure LB TC */
qed_cmdq_lines_voq_rt_init(p_hwfn, LB_VOQ(port_id),
PBF_CMDQ_PURE_LB_LINES);
@@ -259,34 +260,42 @@ static void qed_btb_blocks_rt_init(
struct init_qm_port_params port_params[MAX_NUM_PORTS])
{
u32 usable_blocks, pure_lb_blocks, phys_blocks;
- u8 tc, voq, port_id;
+ u8 tc, voq, port_id, num_tcs_in_port;
for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
u32 temp;
- u8 phys_tcs;
if (!port_params[port_id].active)
continue;
- phys_tcs = port_params[port_id].num_active_phys_tcs;
-
/* subtract headroom blocks */
usable_blocks = port_params[port_id].num_btb_blocks -
BTB_HEADROOM_BLOCKS;
- /* find blocks per physical TC. use factor to avoid
- * floating arithmethic.
- */
+ /* find blocks per physical TC */
+ num_tcs_in_port = 0;
+ for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
+ if (((port_params[port_id].active_phys_tcs >>
+ tc) & 0x1) == 1)
+ num_tcs_in_port++;
+ }
+
pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) /
- (phys_tcs * BTB_PURE_LB_FACTOR +
+ (num_tcs_in_port * BTB_PURE_LB_FACTOR +
BTB_PURE_LB_RATIO);
pure_lb_blocks = max_t(u32, BTB_JUMBO_PKT_BLOCKS,
pure_lb_blocks / BTB_PURE_LB_FACTOR);
- phys_blocks = (usable_blocks - pure_lb_blocks) / phys_tcs;
+ phys_blocks = (usable_blocks - pure_lb_blocks) /
+ num_tcs_in_port;
/* init physical TCs */
- for (tc = 0; tc < phys_tcs; tc++) {
- voq = PHYS_VOQ(port_id, tc, max_phys_tcs_per_port);
+ for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
+ if (((port_params[port_id].active_phys_tcs >>
+ tc) & 0x1) != 1)
+ continue;
+
+ voq = PHYS_VOQ(port_id, tc,
+ max_phys_tcs_per_port);
STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(voq),
phys_blocks);
}
@@ -360,10 +369,11 @@ static void qed_tx_pq_map_rt_init(
memset(&tx_pq_map, 0, sizeof(tx_pq_map));
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_VALID,
- is_vf_pq ? 1 : 0);
+ p_params->pq_params[i].rl_valid ? 1 : 0);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID,
- is_vf_pq ? p_params->pq_params[i].vport_id : 0);
+ p_params->pq_params[i].rl_valid ?
+ p_params->pq_params[i].vport_id : 0);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP,
p_params->pq_params[i].wrr_group);
@@ -390,25 +400,11 @@ static void qed_tx_pq_map_rt_init(
/* store Tx PQ VF mask to size select register */
for (i = 0; i < num_tx_pq_vf_masks; i++) {
if (tx_pq_vf_mask[i]) {
- if (is_bb_a0) {
- u32 curr_mask = 0, addr;
-
- addr = QM_REG_MAXPQSIZETXSEL_0 + (i * 4);
- if (!p_params->is_first_pf)
- curr_mask = qed_rd(p_hwfn, p_ptt,
- addr);
-
- addr = QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i;
-
- STORE_RT_REG(p_hwfn, addr,
- curr_mask | tx_pq_vf_mask[i]);
- } else {
- u32 addr;
+ u32 addr;
- addr = QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i;
- STORE_RT_REG(p_hwfn, addr,
- tx_pq_vf_mask[i]);
- }
+ addr = QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i;
+ STORE_RT_REG(p_hwfn, addr,
+ tx_pq_vf_mask[i]);
}
}
}
@@ -418,8 +414,7 @@ static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn,
u8 port_id,
u8 pf_id,
u32 num_pf_cids,
- u32 num_tids,
- u32 base_mem_addr_4kb)
+ u32 num_tids, u32 base_mem_addr_4kb)
{
u16 i, pq_id;
@@ -465,15 +460,10 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
(p_params->pf_id % MAX_NUM_PFS_BB);
inc_val = QM_WFQ_INC_VAL(p_params->pf_wfq);
- if (inc_val > QM_WFQ_MAX_INC_VAL) {
+ if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration");
return -1;
}
- STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
- inc_val);
- STORE_RT_REG(p_hwfn,
- QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id,
- QM_WFQ_UPPER_BOUND | QM_WFQ_CRD_REG_SIGN_BIT);
for (i = 0; i < num_tx_pqs; i++) {
u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id,
@@ -481,19 +471,21 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
OVERWRITE_RT_REG(p_hwfn,
crd_reg_offset + voq * MAX_NUM_PFS_BB,
- QM_WFQ_INIT_CRD(inc_val) |
QM_WFQ_CRD_REG_SIGN_BIT);
}
+ STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
+ inc_val);
+ STORE_RT_REG(p_hwfn,
+ QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id,
+ QM_WFQ_UPPER_BOUND | QM_WFQ_CRD_REG_SIGN_BIT);
return 0;
}
/* Prepare PF RL runtime init values for the specified PF.
* Return -1 on error.
*/
-static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn,
- u8 pf_id,
- u32 pf_rl)
+static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn, u8 pf_id, u32 pf_rl)
{
u32 inc_val = QM_RL_INC_VAL(pf_rl);
@@ -607,9 +599,7 @@ static bool qed_poll_on_qm_cmd_ready(struct qed_hwfn *p_hwfn,
static bool qed_send_qm_cmd(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- u32 cmd_addr,
- u32 cmd_data_lsb,
- u32 cmd_data_msb)
+ u32 cmd_addr, u32 cmd_data_lsb, u32 cmd_data_msb)
{
if (!qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt))
return false;
@@ -627,9 +617,7 @@ static bool qed_send_qm_cmd(struct qed_hwfn *p_hwfn,
u32 qed_qm_pf_mem_size(u8 pf_id,
u32 num_pf_cids,
u32 num_vf_cids,
- u32 num_tids,
- u16 num_pf_pqs,
- u16 num_vf_pqs)
+ u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs)
{
return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs +
QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs +
@@ -713,8 +701,7 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
}
int qed_init_pf_wfq(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u8 pf_id, u16 pf_wfq)
+ struct qed_ptt *p_ptt, u8 pf_id, u16 pf_wfq)
{
u32 inc_val = QM_WFQ_INC_VAL(pf_wfq);
@@ -728,9 +715,7 @@ int qed_init_pf_wfq(struct qed_hwfn *p_hwfn,
}
int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u8 pf_id,
- u32 pf_rl)
+ struct qed_ptt *p_ptt, u8 pf_id, u32 pf_rl)
{
u32 inc_val = QM_RL_INC_VAL(pf_rl);
@@ -749,8 +734,7 @@ int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- u16 first_tx_pq_id[NUM_OF_TCS],
- u16 vport_wfq)
+ u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq)
{
u32 inc_val = QM_WFQ_INC_VAL(vport_wfq);
u8 tc;
@@ -773,9 +757,7 @@ int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
}
int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u8 vport_id,
- u32 vport_rl)
+ struct qed_ptt *p_ptt, u8 vport_id, u32 vport_rl)
{
u32 inc_val = QM_RL_INC_VAL(vport_rl);
@@ -795,9 +777,7 @@ int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
bool is_release_cmd,
- bool is_tx_pq,
- u16 start_pq,
- u16 num_pqs)
+ bool is_tx_pq, u16 start_pq, u16 num_pqs)
{
u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 };
u32 pq_mask = 0, last_pq = start_pq + num_pqs - 1, pq_id;
@@ -841,17 +821,15 @@ qed_set_tunnel_type_enable_bit(unsigned long *var, int bit, bool enable)
#define PRS_ETH_TUNN_FIC_FORMAT -188897008
void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u16 dest_port)
+ struct qed_ptt *p_ptt, u16 dest_port)
{
qed_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port);
- qed_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_PORT, dest_port);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_CTRL, dest_port);
qed_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port);
}
void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- bool vxlan_enable)
+ struct qed_ptt *p_ptt, bool vxlan_enable)
{
unsigned long reg_val = 0;
u8 shift;
@@ -908,8 +886,7 @@ void qed_set_gre_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
}
void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u16 dest_port)
+ struct qed_ptt *p_ptt, u16 dest_port)
{
qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port);
qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port);
@@ -918,8 +895,7 @@ void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- bool eth_geneve_enable,
- bool ip_geneve_enable)
+ bool eth_geneve_enable, bool ip_geneve_enable)
{
unsigned long reg_val = 0;
u8 shift;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
index d358c3b..9866a20 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
@@ -543,8 +543,7 @@ void qed_gtt_init(struct qed_hwfn *p_hwfn)
pxp_global_win[i]);
}
-int qed_init_fw_data(struct qed_dev *cdev,
- const u8 *data)
+int qed_init_fw_data(struct qed_dev *cdev, const u8 *data)
{
struct qed_fw_data *fw = cdev->fw_data;
struct bin_buffer_hdr *buf_hdr;
@@ -555,7 +554,11 @@ int qed_init_fw_data(struct qed_dev *cdev,
return -EINVAL;
}
- buf_hdr = (struct bin_buffer_hdr *)data;
+ /* First Dword contains metadata and should be skipped */
+ buf_hdr = (struct bin_buffer_hdr *)(data + sizeof(u32));
+
+ offset = buf_hdr[BIN_BUF_FW_VER_INFO].offset;
+ fw->fw_ver_info = (struct fw_ver_info *)(data + offset);
offset = buf_hdr[BIN_BUF_INIT_CMD].offset;
fw->init_ops = (union init_op *)(data + offset);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index 8fba87dd..d121a8b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -575,9 +575,12 @@ int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
- rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ p_ramrod->vf_rx_prod_index = params->vf_qid;
+ if (params->vf_qid)
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "Queue is meant for VF rxq[%04x]\n", params->vf_qid);
- return rc;
+ return qed_spq_post(p_hwfn, p_ent, NULL);
}
static int
@@ -615,7 +618,7 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
*pp_prod = (u8 __iomem *)p_hwfn->regview +
GTT_BAR0_MAP_REG_MSDM_RAM +
- MSTORM_PRODS_OFFSET(abs_l2_queue);
+ MSTORM_ETH_PF_PRODS_OFFSET(abs_l2_queue);
/* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
__internal_ram_wr(p_hwfn, *pp_prod, sizeof(u64),
@@ -759,9 +762,9 @@ int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
struct qed_hw_cid_data *p_tx_cid;
- u8 abs_vport_id;
+ u16 pq_id, abs_tx_q_id = 0;
int rc = -EINVAL;
- u16 pq_id;
+ u8 abs_vport_id;
/* Store information for the stop */
p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id];
@@ -772,6 +775,10 @@ int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
if (rc)
return rc;
+ rc = qed_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_tx_q_id);
+ if (rc)
+ return rc;
+
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
init_data.cid = cid;
@@ -791,6 +798,7 @@ int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
p_ramrod->sb_index = p_params->sb_idx;
p_ramrod->stats_counter_id = stats_id;
+ p_ramrod->queue_zone_id = cpu_to_le16(abs_tx_q_id);
p_ramrod->pbl_size = cpu_to_le16(pbl_size);
DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr);
@@ -1485,51 +1493,51 @@ static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn,
offsetof(struct public_port, stats),
sizeof(port_stats));
- p_stats->rx_64_byte_packets += port_stats.pmm.r64;
- p_stats->rx_65_to_127_byte_packets += port_stats.pmm.r127;
- p_stats->rx_128_to_255_byte_packets += port_stats.pmm.r255;
- p_stats->rx_256_to_511_byte_packets += port_stats.pmm.r511;
- p_stats->rx_512_to_1023_byte_packets += port_stats.pmm.r1023;
- p_stats->rx_1024_to_1518_byte_packets += port_stats.pmm.r1518;
- p_stats->rx_1519_to_1522_byte_packets += port_stats.pmm.r1522;
- p_stats->rx_1519_to_2047_byte_packets += port_stats.pmm.r2047;
- p_stats->rx_2048_to_4095_byte_packets += port_stats.pmm.r4095;
- p_stats->rx_4096_to_9216_byte_packets += port_stats.pmm.r9216;
- p_stats->rx_9217_to_16383_byte_packets += port_stats.pmm.r16383;
- p_stats->rx_crc_errors += port_stats.pmm.rfcs;
- p_stats->rx_mac_crtl_frames += port_stats.pmm.rxcf;
- p_stats->rx_pause_frames += port_stats.pmm.rxpf;
- p_stats->rx_pfc_frames += port_stats.pmm.rxpp;
- p_stats->rx_align_errors += port_stats.pmm.raln;
- p_stats->rx_carrier_errors += port_stats.pmm.rfcr;
- p_stats->rx_oversize_packets += port_stats.pmm.rovr;
- p_stats->rx_jabbers += port_stats.pmm.rjbr;
- p_stats->rx_undersize_packets += port_stats.pmm.rund;
- p_stats->rx_fragments += port_stats.pmm.rfrg;
- p_stats->tx_64_byte_packets += port_stats.pmm.t64;
- p_stats->tx_65_to_127_byte_packets += port_stats.pmm.t127;
- p_stats->tx_128_to_255_byte_packets += port_stats.pmm.t255;
- p_stats->tx_256_to_511_byte_packets += port_stats.pmm.t511;
- p_stats->tx_512_to_1023_byte_packets += port_stats.pmm.t1023;
- p_stats->tx_1024_to_1518_byte_packets += port_stats.pmm.t1518;
- p_stats->tx_1519_to_2047_byte_packets += port_stats.pmm.t2047;
- p_stats->tx_2048_to_4095_byte_packets += port_stats.pmm.t4095;
- p_stats->tx_4096_to_9216_byte_packets += port_stats.pmm.t9216;
- p_stats->tx_9217_to_16383_byte_packets += port_stats.pmm.t16383;
- p_stats->tx_pause_frames += port_stats.pmm.txpf;
- p_stats->tx_pfc_frames += port_stats.pmm.txpp;
- p_stats->tx_lpi_entry_count += port_stats.pmm.tlpiec;
- p_stats->tx_total_collisions += port_stats.pmm.tncl;
- p_stats->rx_mac_bytes += port_stats.pmm.rbyte;
- p_stats->rx_mac_uc_packets += port_stats.pmm.rxuca;
- p_stats->rx_mac_mc_packets += port_stats.pmm.rxmca;
- p_stats->rx_mac_bc_packets += port_stats.pmm.rxbca;
- p_stats->rx_mac_frames_ok += port_stats.pmm.rxpok;
- p_stats->tx_mac_bytes += port_stats.pmm.tbyte;
- p_stats->tx_mac_uc_packets += port_stats.pmm.txuca;
- p_stats->tx_mac_mc_packets += port_stats.pmm.txmca;
- p_stats->tx_mac_bc_packets += port_stats.pmm.txbca;
- p_stats->tx_mac_ctrl_frames += port_stats.pmm.txcf;
+ p_stats->rx_64_byte_packets += port_stats.eth.r64;
+ p_stats->rx_65_to_127_byte_packets += port_stats.eth.r127;
+ p_stats->rx_128_to_255_byte_packets += port_stats.eth.r255;
+ p_stats->rx_256_to_511_byte_packets += port_stats.eth.r511;
+ p_stats->rx_512_to_1023_byte_packets += port_stats.eth.r1023;
+ p_stats->rx_1024_to_1518_byte_packets += port_stats.eth.r1518;
+ p_stats->rx_1519_to_1522_byte_packets += port_stats.eth.r1522;
+ p_stats->rx_1519_to_2047_byte_packets += port_stats.eth.r2047;
+ p_stats->rx_2048_to_4095_byte_packets += port_stats.eth.r4095;
+ p_stats->rx_4096_to_9216_byte_packets += port_stats.eth.r9216;
+ p_stats->rx_9217_to_16383_byte_packets += port_stats.eth.r16383;
+ p_stats->rx_crc_errors += port_stats.eth.rfcs;
+ p_stats->rx_mac_crtl_frames += port_stats.eth.rxcf;
+ p_stats->rx_pause_frames += port_stats.eth.rxpf;
+ p_stats->rx_pfc_frames += port_stats.eth.rxpp;
+ p_stats->rx_align_errors += port_stats.eth.raln;
+ p_stats->rx_carrier_errors += port_stats.eth.rfcr;
+ p_stats->rx_oversize_packets += port_stats.eth.rovr;
+ p_stats->rx_jabbers += port_stats.eth.rjbr;
+ p_stats->rx_undersize_packets += port_stats.eth.rund;
+ p_stats->rx_fragments += port_stats.eth.rfrg;
+ p_stats->tx_64_byte_packets += port_stats.eth.t64;
+ p_stats->tx_65_to_127_byte_packets += port_stats.eth.t127;
+ p_stats->tx_128_to_255_byte_packets += port_stats.eth.t255;
+ p_stats->tx_256_to_511_byte_packets += port_stats.eth.t511;
+ p_stats->tx_512_to_1023_byte_packets += port_stats.eth.t1023;
+ p_stats->tx_1024_to_1518_byte_packets += port_stats.eth.t1518;
+ p_stats->tx_1519_to_2047_byte_packets += port_stats.eth.t2047;
+ p_stats->tx_2048_to_4095_byte_packets += port_stats.eth.t4095;
+ p_stats->tx_4096_to_9216_byte_packets += port_stats.eth.t9216;
+ p_stats->tx_9217_to_16383_byte_packets += port_stats.eth.t16383;
+ p_stats->tx_pause_frames += port_stats.eth.txpf;
+ p_stats->tx_pfc_frames += port_stats.eth.txpp;
+ p_stats->tx_lpi_entry_count += port_stats.eth.tlpiec;
+ p_stats->tx_total_collisions += port_stats.eth.tncl;
+ p_stats->rx_mac_bytes += port_stats.eth.rbyte;
+ p_stats->rx_mac_uc_packets += port_stats.eth.rxuca;
+ p_stats->rx_mac_mc_packets += port_stats.eth.rxmca;
+ p_stats->rx_mac_bc_packets += port_stats.eth.rxbca;
+ p_stats->rx_mac_frames_ok += port_stats.eth.rxpok;
+ p_stats->tx_mac_bytes += port_stats.eth.tbyte;
+ p_stats->tx_mac_uc_packets += port_stats.eth.txuca;
+ p_stats->tx_mac_mc_packets += port_stats.eth.txmca;
+ p_stats->tx_mac_bc_packets += port_stats.eth.txbca;
+ p_stats->tx_mac_ctrl_frames += port_stats.eth.txcf;
for (j = 0; j < 8; j++) {
p_stats->brb_truncates += port_stats.brb.brb_truncate[j];
p_stats->brb_discards += port_stats.brb.brb_discard[j];
@@ -2158,11 +2166,18 @@ static int qed_fp_cqe_completion(struct qed_dev *dev,
extern const struct qed_iov_hv_ops qed_iov_ops_pass;
#endif
+#ifdef CONFIG_DCB
+extern const struct qed_eth_dcbnl_ops qed_dcbnl_ops_pass;
+#endif
+
static const struct qed_eth_ops qed_eth_ops_pass = {
.common = &qed_common_ops_pass,
#ifdef CONFIG_QED_SRIOV
.iov = &qed_iov_ops_pass,
#endif
+#ifdef CONFIG_DCB
+ .dcb = &qed_dcbnl_ops_pass,
+#endif
.fill_dev_info = &qed_fill_eth_dev_info,
.register_ops = &qed_register_eth_ops,
.check_mac = &qed_check_mac,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 61cc686..6c4606b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -207,6 +207,8 @@ int qed_fill_dev_info(struct qed_dev *cdev,
dev_info->pci_mem_start = cdev->pci_params.mem_start;
dev_info->pci_mem_end = cdev->pci_params.mem_end;
dev_info->pci_irq = cdev->pci_params.irq;
+ dev_info->rdma_supported =
+ (cdev->hwfns[0].hw_info.personality == QED_PCI_ETH_ROCE);
dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]);
ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr);
@@ -832,7 +834,8 @@ static int qed_slowpath_start(struct qed_dev *cdev,
goto err2;
}
- data = cdev->firmware->data;
+ /* First Dword used to diffrentiate between various sources */
+ data = cdev->firmware->data + sizeof(u32);
}
memset(&tunn_info, 0, sizeof(tunn_info));
@@ -900,7 +903,8 @@ static int qed_slowpath_stop(struct qed_dev *cdev)
if (IS_PF(cdev)) {
qed_free_stream_mem(cdev);
- qed_sriov_disable(cdev, true);
+ if (IS_QED_ETH_IF(cdev))
+ qed_sriov_disable(cdev, true);
qed_nic_stop(cdev);
qed_slowpath_irq_free(cdev);
@@ -991,8 +995,7 @@ static bool qed_can_link_change(struct qed_dev *cdev)
return true;
}
-static int qed_set_link(struct qed_dev *cdev,
- struct qed_link_params *params)
+static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params)
{
struct qed_hwfn *hwfn;
struct qed_mcp_link_params *link_params;
@@ -1032,7 +1035,7 @@ static int qed_set_link(struct qed_dev *cdev,
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G;
if (params->adv_speeds & 0)
link_params->speed.advertised_speeds |=
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G;
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G;
}
if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED)
link_params->speed.forced_speed = params->forced_speed;
@@ -1053,19 +1056,19 @@ static int qed_set_link(struct qed_dev *cdev,
if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) {
switch (params->loopback_mode) {
case QED_LINK_LOOPBACK_INT_PHY:
- link_params->loopback_mode = PMM_LOOPBACK_INT_PHY;
+ link_params->loopback_mode = ETH_LOOPBACK_INT_PHY;
break;
case QED_LINK_LOOPBACK_EXT_PHY:
- link_params->loopback_mode = PMM_LOOPBACK_EXT_PHY;
+ link_params->loopback_mode = ETH_LOOPBACK_EXT_PHY;
break;
case QED_LINK_LOOPBACK_EXT:
- link_params->loopback_mode = PMM_LOOPBACK_EXT;
+ link_params->loopback_mode = ETH_LOOPBACK_EXT;
break;
case QED_LINK_LOOPBACK_MAC:
- link_params->loopback_mode = PMM_LOOPBACK_MAC;
+ link_params->loopback_mode = ETH_LOOPBACK_MAC;
break;
default:
- link_params->loopback_mode = PMM_LOOPBACK_NONE;
+ link_params->loopback_mode = ETH_LOOPBACK_NONE;
break;
}
}
@@ -1184,7 +1187,7 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
if_link->advertised_caps |= 0;
if (params.speed.advertised_speeds &
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G)
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
if_link->advertised_caps |= 0;
if (link_caps.speed_capabilities &
@@ -1201,7 +1204,7 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
if_link->supported_caps |= 0;
if (link_caps.speed_capabilities &
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G)
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
if_link->supported_caps |= 0;
if (link.link_up)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 1182361..a240f26 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -531,9 +531,9 @@ static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn,
transceiver_data)));
transceiver_state = GET_FIELD(transceiver_state,
- PMM_TRANSCEIVER_STATE);
+ ETH_TRANSCEIVER_STATE);
- if (transceiver_state == PMM_TRANSCEIVER_STATE_PRESENT)
+ if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
DP_NOTICE(p_hwfn, "Transceiver is present.\n");
else
DP_NOTICE(p_hwfn, "Transceiver is unplugged.\n");
@@ -668,14 +668,12 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
qed_link_update(p_hwfn);
}
-int qed_mcp_set_link(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- bool b_up)
+int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
{
struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
struct qed_mcp_mb_params mb_params;
union drv_union_data union_data;
- struct pmm_phy_cfg *phy_cfg;
+ struct eth_phy_cfg *phy_cfg;
int rc = 0;
u32 cmd;
@@ -685,9 +683,9 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn,
cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
if (!params->speed.autoneg)
phy_cfg->speed = params->speed.forced_speed;
- phy_cfg->pause |= (params->pause.autoneg) ? PMM_PAUSE_AUTONEG : 0;
- phy_cfg->pause |= (params->pause.forced_rx) ? PMM_PAUSE_RX : 0;
- phy_cfg->pause |= (params->pause.forced_tx) ? PMM_PAUSE_TX : 0;
+ phy_cfg->pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
+ phy_cfg->pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
+ phy_cfg->pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
phy_cfg->adv_speed = params->speed.advertised_speeds;
phy_cfg->loopback_mode = params->loopback_mode;
@@ -773,6 +771,34 @@ static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
return size;
}
+int qed_hw_init_first_eth(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 *p_pf)
+{
+ struct public_func shmem_info;
+ int i;
+
+ /* Find first Ethernet interface in port */
+ for (i = 0; i < NUM_OF_ENG_PFS(p_hwfn->cdev);
+ i += p_hwfn->cdev->num_ports_in_engines) {
+ qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
+ MCP_PF_ID_BY_REL(p_hwfn, i));
+
+ if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE)
+ continue;
+
+ if ((shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK) ==
+ FUNC_MF_CFG_PROTOCOL_ETHERNET) {
+ *p_pf = (u8)i;
+ return 0;
+ }
+ }
+
+ DP_NOTICE(p_hwfn,
+ "Failed to find on port an ethernet interface in MF_SI mode\n");
+
+ return -EINVAL;
+}
+
static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
@@ -951,7 +977,18 @@ qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn,
switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
case FUNC_MF_CFG_PROTOCOL_ETHERNET:
- *p_proto = QED_PCI_ETH;
+ if (test_bit(QED_DEV_CAP_ROCE,
+ &p_hwfn->hw_info.device_capabilities))
+ *p_proto = QED_PCI_ETH_ROCE;
+ else
+ *p_proto = QED_PCI_ETH;
+ break;
+ case FUNC_MF_CFG_PROTOCOL_ISCSI:
+ *p_proto = QED_PCI_ISCSI;
+ break;
+ case FUNC_MF_CFG_PROTOCOL_ROCE:
+ DP_NOTICE(p_hwfn, "RoCE personality is not a valid value!\n");
+ rc = -EINVAL;
break;
default:
rc = -EINVAL;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index 6dd59eb..7f319aa 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -457,4 +457,7 @@ int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_mcp_link_state *p_link,
u8 min_bw);
+
+int qed_hw_init_first_eth(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 *p_pf);
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index 3a6c506..aa08ddb 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -27,6 +27,35 @@
#define CDU_REG_CID_ADDR_PARAMS_NCIB ( \
0xff << 24)
+#define CDU_REG_SEGMENT0_PARAMS \
+ 0x580904UL
+#define CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK \
+ (0xfff << 0)
+#define CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK_SHIFT \
+ 0
+#define CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE \
+ (0xff << 16)
+#define CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE_SHIFT \
+ 16
+#define CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE \
+ (0xff << 24)
+#define CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE_SHIFT \
+ 24
+#define CDU_REG_SEGMENT1_PARAMS \
+ 0x580908UL
+#define CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK \
+ (0xfff << 0)
+#define CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK_SHIFT \
+ 0
+#define CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE \
+ (0xff << 16)
+#define CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE_SHIFT \
+ 16
+#define CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE \
+ (0xff << 24)
+#define CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE_SHIFT \
+ 24
+
#define XSDM_REG_OPERATION_GEN \
0xf80408UL
#define NIG_REG_RX_BRB_OUT_EN \
@@ -167,6 +196,10 @@
0x1800004UL
#define NIG_REG_CM_HDR \
0x500840UL
+#define NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR \
+ 0x50196cUL
+#define NIG_REG_LLH_CLS_TYPE_DUALMODE \
+ 0x501964UL
#define NCSI_REG_CONFIG \
0x040200UL
#define PBF_REG_INIT \
@@ -219,6 +252,10 @@
0x230000UL
#define PRS_REG_SOFT_RST \
0x1f0000UL
+#define PRS_REG_MSG_INFO \
+ 0x1f0a1cUL
+#define PRS_REG_ROCE_DEST_QP_MAX_PF \
+ 0x1f0430UL
#define PSDM_REG_ENABLE_IN1 \
0xfa0004UL
#define PSEM_REG_ENABLE_IN \
@@ -227,6 +264,8 @@
0x280020UL
#define PSWRQ2_REG_CDUT_P_SIZE \
0x24000cUL
+#define PSWRQ2_REG_ILT_MEMORY \
+ 0x260000UL
#define PSWHST_REG_DISCARD_INTERNAL_WRITES \
0x2a0040UL
#define PSWHST2_REG_DBGSYN_ALMOST_FULL_THR \
@@ -460,7 +499,7 @@
#define NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE (0x1 << 2)
#define NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT 2
-#define NIG_REG_VXLAN_PORT 0x50105cUL
+#define NIG_REG_VXLAN_CTRL 0x50105cUL
#define PBF_REG_VXLAN_PORT 0xd80518UL
#define PBF_REG_NGE_PORT 0xd8051cUL
#define PRS_REG_NGE_PORT 0x1f086cUL
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index ea4e9ce..a548504 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -63,6 +63,32 @@ union ramrod_data {
struct vport_update_ramrod_data vport_update;
struct vport_filter_update_ramrod_data vport_filter_update;
+ struct rdma_init_func_ramrod_data rdma_init_func;
+ struct rdma_close_func_ramrod_data rdma_close_func;
+ struct rdma_register_tid_ramrod_data rdma_register_tid;
+ struct rdma_deregister_tid_ramrod_data rdma_deregister_tid;
+ struct roce_create_qp_resp_ramrod_data roce_create_qp_resp;
+ struct roce_create_qp_req_ramrod_data roce_create_qp_req;
+ struct roce_modify_qp_resp_ramrod_data roce_modify_qp_resp;
+ struct roce_modify_qp_req_ramrod_data roce_modify_qp_req;
+ struct roce_query_qp_resp_ramrod_data roce_query_qp_resp;
+ struct roce_query_qp_req_ramrod_data roce_query_qp_req;
+ struct roce_destroy_qp_resp_ramrod_data roce_destroy_qp_resp;
+ struct roce_destroy_qp_req_ramrod_data roce_destroy_qp_req;
+ struct rdma_create_cq_ramrod_data rdma_create_cq;
+ struct rdma_resize_cq_ramrod_data rdma_resize_cq;
+ struct rdma_destroy_cq_ramrod_data rdma_destroy_cq;
+ struct rdma_srq_create_ramrod_data rdma_create_srq;
+ struct rdma_srq_destroy_ramrod_data rdma_destroy_srq;
+ struct rdma_srq_modify_ramrod_data rdma_modify_srq;
+
+ struct iscsi_slow_path_hdr iscsi_empty;
+ struct iscsi_init_ramrod_params iscsi_init;
+ struct iscsi_spe_func_dstry iscsi_destroy;
+ struct iscsi_spe_conn_offload iscsi_conn_offload;
+ struct iscsi_conn_update_ramrod_params iscsi_conn_update;
+ struct iscsi_spe_conn_termination iscsi_conn_terminate;
+
struct vf_start_ramrod_data vf_start;
struct vf_stop_ramrod_data vf_stop;
};
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index 67f6ce3..a52f3fc 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -308,6 +308,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
int rc = -EINVAL;
+ u8 page_cnt;
/* update initial eq producer */
qed_eq_prod_update(p_hwfn,
@@ -332,7 +333,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
p_ramrod->path_id = QED_PATH_ID(p_hwfn);
p_ramrod->dont_log_ramrods = 0;
p_ramrod->log_type_mask = cpu_to_le16(0xf);
- p_ramrod->mf_mode = mode;
+
switch (mode) {
case QED_MF_DEFAULT:
case QED_MF_NPAR:
@@ -350,24 +351,41 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
/* Place EQ address in RAMROD */
DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
p_hwfn->p_eq->chain.pbl.p_phys_table);
- p_ramrod->event_ring_num_pages = (u8)p_hwfn->p_eq->chain.page_cnt;
-
+ page_cnt = (u8)qed_chain_get_page_cnt(&p_hwfn->p_eq->chain);
+ p_ramrod->event_ring_num_pages = page_cnt;
DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
p_hwfn->p_consq->chain.pbl.p_phys_table);
qed_tunn_set_pf_start_params(p_hwfn, p_tunn,
&p_ramrod->tunnel_config);
- p_hwfn->hw_info.personality = PERSONALITY_ETH;
if (IS_MF_SI(p_hwfn))
p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch;
+ switch (p_hwfn->hw_info.personality) {
+ case QED_PCI_ETH:
+ p_ramrod->personality = PERSONALITY_ETH;
+ break;
+ case QED_PCI_ISCSI:
+ p_ramrod->personality = PERSONALITY_ISCSI;
+ break;
+ case QED_PCI_ETH_ROCE:
+ p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "Unkown personality %d\n",
+ p_hwfn->hw_info.personality);
+ p_ramrod->personality = PERSONALITY_ETH;
+ }
+
if (p_hwfn->cdev->p_iov_info) {
struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
p_ramrod->base_vf_id = (u8) p_iov->first_vf_in_pf;
p_ramrod->num_vfs = (u8) p_iov->total_vfs;
}
+ p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
+ p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR;
DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
"Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n",
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index acac662..ad9bf5c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -343,6 +343,7 @@ struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn,
if (qed_chain_alloc(p_hwfn->cdev,
QED_CHAIN_USE_TO_PRODUCE,
QED_CHAIN_MODE_PBL,
+ QED_CHAIN_CNT_TYPE_U16,
num_elem,
sizeof(union event_ring_element),
&p_eq->chain)) {
@@ -416,10 +417,10 @@ int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
***************************************************************************/
void qed_spq_setup(struct qed_hwfn *p_hwfn)
{
- struct qed_spq *p_spq = p_hwfn->p_spq;
- struct qed_spq_entry *p_virt = NULL;
- dma_addr_t p_phys = 0;
- unsigned int i = 0;
+ struct qed_spq *p_spq = p_hwfn->p_spq;
+ struct qed_spq_entry *p_virt = NULL;
+ dma_addr_t p_phys = 0;
+ u32 i, capacity;
INIT_LIST_HEAD(&p_spq->pending);
INIT_LIST_HEAD(&p_spq->completion_pending);
@@ -431,7 +432,8 @@ void qed_spq_setup(struct qed_hwfn *p_hwfn)
p_phys = p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod);
p_virt = p_spq->p_virt;
- for (i = 0; i < p_spq->chain.capacity; i++) {
+ capacity = qed_chain_get_capacity(&p_spq->chain);
+ for (i = 0; i < capacity; i++) {
DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
list_add_tail(&p_virt->list, &p_spq->free_pool);
@@ -459,9 +461,10 @@ void qed_spq_setup(struct qed_hwfn *p_hwfn)
int qed_spq_alloc(struct qed_hwfn *p_hwfn)
{
- struct qed_spq *p_spq = NULL;
- dma_addr_t p_phys = 0;
- struct qed_spq_entry *p_virt = NULL;
+ struct qed_spq_entry *p_virt = NULL;
+ struct qed_spq *p_spq = NULL;
+ dma_addr_t p_phys = 0;
+ u32 capacity;
/* SPQ struct */
p_spq =
@@ -475,6 +478,7 @@ int qed_spq_alloc(struct qed_hwfn *p_hwfn)
if (qed_chain_alloc(p_hwfn->cdev,
QED_CHAIN_USE_TO_PRODUCE,
QED_CHAIN_MODE_SINGLE,
+ QED_CHAIN_CNT_TYPE_U16,
0, /* N/A when the mode is SINGLE */
sizeof(struct slow_path_element),
&p_spq->chain)) {
@@ -483,11 +487,11 @@ int qed_spq_alloc(struct qed_hwfn *p_hwfn)
}
/* allocate and fill the SPQ elements (incl. ramrod data list) */
+ capacity = qed_chain_get_capacity(&p_spq->chain);
p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
- p_spq->chain.capacity *
+ capacity *
sizeof(struct qed_spq_entry),
- &p_phys,
- GFP_KERNEL);
+ &p_phys, GFP_KERNEL);
if (!p_virt)
goto spq_allocate_fail;
@@ -507,16 +511,18 @@ spq_allocate_fail:
void qed_spq_free(struct qed_hwfn *p_hwfn)
{
struct qed_spq *p_spq = p_hwfn->p_spq;
+ u32 capacity;
if (!p_spq)
return;
- if (p_spq->p_virt)
+ if (p_spq->p_virt) {
+ capacity = qed_chain_get_capacity(&p_spq->chain);
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
- p_spq->chain.capacity *
+ capacity *
sizeof(struct qed_spq_entry),
- p_spq->p_virt,
- p_spq->p_phys);
+ p_spq->p_virt, p_spq->p_phys);
+ }
qed_chain_free(p_hwfn->cdev, &p_spq->chain);
;
@@ -871,9 +877,9 @@ struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn)
if (qed_chain_alloc(p_hwfn->cdev,
QED_CHAIN_USE_TO_PRODUCE,
QED_CHAIN_MODE_PBL,
+ QED_CHAIN_CNT_TYPE_U16,
QED_CHAIN_PAGE_SIZE / 0x80,
- 0x80,
- &p_consq->chain)) {
+ 0x80, &p_consq->chain)) {
DP_NOTICE(p_hwfn, "Failed to allocate consq chain");
goto consq_allocate_fail;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index c325ee8..4d161c75 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -21,18 +21,18 @@
#include "qed_vf.h"
/* IOV ramrods */
-static int qed_sp_vf_start(struct qed_hwfn *p_hwfn,
- u32 concrete_vfid, u16 opaque_vfid)
+static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf)
{
struct vf_start_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
int rc = -EINVAL;
+ u8 fp_minor;
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
init_data.cid = qed_spq_get_cid(p_hwfn);
- init_data.opaque_fid = opaque_vfid;
+ init_data.opaque_fid = p_vf->opaque_fid;
init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
rc = qed_sp_init_request(p_hwfn, &p_ent,
@@ -43,10 +43,39 @@ static int qed_sp_vf_start(struct qed_hwfn *p_hwfn,
p_ramrod = &p_ent->ramrod.vf_start;
- p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
- p_ramrod->opaque_fid = cpu_to_le16(opaque_vfid);
+ p_ramrod->vf_id = GET_FIELD(p_vf->concrete_fid, PXP_CONCRETE_FID_VFID);
+ p_ramrod->opaque_fid = cpu_to_le16(p_vf->opaque_fid);
- p_ramrod->personality = PERSONALITY_ETH;
+ switch (p_hwfn->hw_info.personality) {
+ case QED_PCI_ETH:
+ p_ramrod->personality = PERSONALITY_ETH;
+ break;
+ case QED_PCI_ETH_ROCE:
+ p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "Unknown VF personality %d\n",
+ p_hwfn->hw_info.personality);
+ return -EINVAL;
+ }
+
+ fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor;
+ if (fp_minor > ETH_HSI_VER_MINOR) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF [%d] - Requested fp hsi %02x.%02x which is slightly newer than PF's %02x.%02x; Configuring PFs version\n",
+ p_vf->abs_vf_id,
+ ETH_HSI_VER_MAJOR,
+ fp_minor, ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
+ fp_minor = ETH_HSI_VER_MINOR;
+ }
+
+ p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
+ p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = fp_minor;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF[%d] - Starting using HSI %02x.%02x\n",
+ p_vf->abs_vf_id, ETH_HSI_VER_MAJOR, fp_minor);
return qed_spq_post(p_hwfn, p_ent, NULL);
}
@@ -117,6 +146,45 @@ static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn,
return vf;
}
+static bool qed_iov_validate_rxq(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf, u16 rx_qid)
+{
+ if (rx_qid >= p_vf->num_rxqs)
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF[0x%02x] - can't touch Rx queue[%04x]; Only 0x%04x are allocated\n",
+ p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs);
+ return rx_qid < p_vf->num_rxqs;
+}
+
+static bool qed_iov_validate_txq(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf, u16 tx_qid)
+{
+ if (tx_qid >= p_vf->num_txqs)
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF[0x%02x] - can't touch Tx queue[%04x]; Only 0x%04x are allocated\n",
+ p_vf->abs_vf_id, tx_qid, p_vf->num_txqs);
+ return tx_qid < p_vf->num_txqs;
+}
+
+static bool qed_iov_validate_sb(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf, u16 sb_idx)
+{
+ int i;
+
+ for (i = 0; i < p_vf->num_sbs; i++)
+ if (p_vf->igu_sbs[i] == sb_idx)
+ return true;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF[0%02x] - tried using sb_idx %04x which doesn't exist as one of its 0x%02x SBs\n",
+ p_vf->abs_vf_id, sb_idx, p_vf->num_sbs);
+
+ return false;
+}
+
int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn,
int vfid, struct qed_ptt *p_ptt)
{
@@ -293,6 +361,9 @@ static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn)
vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
(vf->abs_vf_id << 8);
vf->vport_id = idx + 1;
+
+ vf->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS;
+ vf->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
}
}
@@ -598,17 +669,6 @@ static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn,
/* unpretend */
qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
- if (vf->state != VF_STOPPED) {
- DP_NOTICE(p_hwfn, "VF[%02x] is already started\n",
- vf->abs_vf_id);
- return -EINVAL;
- }
-
- /* Start VF */
- rc = qed_sp_vf_start(p_hwfn, vf->concrete_fid, vf->opaque_fid);
- if (rc)
- DP_NOTICE(p_hwfn, "Failed to start VF[%02x]\n", vf->abs_vf_id);
-
vf->state = VF_FREE;
return rc;
@@ -852,7 +912,6 @@ static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_params params;
struct qed_mcp_link_state link;
struct qed_vf_info *vf = NULL;
- int rc = 0;
vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
if (!vf) {
@@ -874,18 +933,8 @@ static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn,
memcpy(&caps, qed_mcp_get_link_capabilities(p_hwfn), sizeof(caps));
qed_iov_set_link(p_hwfn, rel_vf_id, &params, &link, &caps);
- if (vf->state != VF_STOPPED) {
- /* Stopping the VF */
- rc = qed_sp_vf_stop(p_hwfn, vf->concrete_fid, vf->opaque_fid);
-
- if (rc != 0) {
- DP_ERR(p_hwfn, "qed_sp_vf_stop returned error %d\n",
- rc);
- return rc;
- }
-
- vf->state = VF_STOPPED;
- }
+ /* Forget the VF's acquisition message */
+ memset(&vf->acquire, 0, sizeof(vf->acquire));
/* disablng interrupts and resetting permission table was done during
* vf-close, however, we could get here without going through vf_close
@@ -1116,8 +1165,6 @@ static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn,
p_vf->vf_bulletin = 0;
p_vf->vport_instance = 0;
- p_vf->num_mac_filters = 0;
- p_vf->num_vlan_filters = 0;
p_vf->configured_features = 0;
/* If VF previously requested less resources, go back to default */
@@ -1130,9 +1177,95 @@ static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn,
p_vf->vf_queues[i].rxq_active = 0;
memset(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config));
+ memset(&p_vf->acquire, 0, sizeof(p_vf->acquire));
qed_iov_clean_vf(p_hwfn, p_vf->relative_vf_id);
}
+static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *p_vf,
+ struct vf_pf_resc_request *p_req,
+ struct pf_vf_resc *p_resp)
+{
+ int i;
+
+ /* Queue related information */
+ p_resp->num_rxqs = p_vf->num_rxqs;
+ p_resp->num_txqs = p_vf->num_txqs;
+ p_resp->num_sbs = p_vf->num_sbs;
+
+ for (i = 0; i < p_resp->num_sbs; i++) {
+ p_resp->hw_sbs[i].hw_sb_id = p_vf->igu_sbs[i];
+ p_resp->hw_sbs[i].sb_qid = 0;
+ }
+
+ /* These fields are filled for backward compatibility.
+ * Unused by modern vfs.
+ */
+ for (i = 0; i < p_resp->num_rxqs; i++) {
+ qed_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid,
+ (u16 *)&p_resp->hw_qid[i]);
+ p_resp->cid[i] = p_vf->vf_queues[i].fw_cid;
+ }
+
+ /* Filter related information */
+ p_resp->num_mac_filters = min_t(u8, p_vf->num_mac_filters,
+ p_req->num_mac_filters);
+ p_resp->num_vlan_filters = min_t(u8, p_vf->num_vlan_filters,
+ p_req->num_vlan_filters);
+
+ /* This isn't really needed/enforced, but some legacy VFs might depend
+ * on the correct filling of this field.
+ */
+ p_resp->num_mc_filters = QED_MAX_MC_ADDRS;
+
+ /* Validate sufficient resources for VF */
+ if (p_resp->num_rxqs < p_req->num_rxqs ||
+ p_resp->num_txqs < p_req->num_txqs ||
+ p_resp->num_sbs < p_req->num_sbs ||
+ p_resp->num_mac_filters < p_req->num_mac_filters ||
+ p_resp->num_vlan_filters < p_req->num_vlan_filters ||
+ p_resp->num_mc_filters < p_req->num_mc_filters) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x]\n",
+ p_vf->abs_vf_id,
+ p_req->num_rxqs,
+ p_resp->num_rxqs,
+ p_req->num_rxqs,
+ p_resp->num_txqs,
+ p_req->num_sbs,
+ p_resp->num_sbs,
+ p_req->num_mac_filters,
+ p_resp->num_mac_filters,
+ p_req->num_vlan_filters,
+ p_resp->num_vlan_filters,
+ p_req->num_mc_filters, p_resp->num_mc_filters);
+ return PFVF_STATUS_NO_RESOURCE;
+ }
+
+ return PFVF_STATUS_SUCCESS;
+}
+
+static void qed_iov_vf_mbx_acquire_stats(struct qed_hwfn *p_hwfn,
+ struct pfvf_stats_info *p_stats)
+{
+ p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B +
+ offsetof(struct mstorm_vf_zone,
+ non_trigger.eth_queue_stat);
+ p_stats->mstats.len = sizeof(struct eth_mstorm_per_queue_stat);
+ p_stats->ustats.address = PXP_VF_BAR0_START_USDM_ZONE_B +
+ offsetof(struct ustorm_vf_zone,
+ non_trigger.eth_queue_stat);
+ p_stats->ustats.len = sizeof(struct eth_ustorm_per_queue_stat);
+ p_stats->pstats.address = PXP_VF_BAR0_START_PSDM_ZONE_B +
+ offsetof(struct pstorm_vf_zone,
+ non_trigger.eth_queue_stat);
+ p_stats->pstats.len = sizeof(struct eth_pstorm_per_queue_stat);
+ p_stats->tstats.address = 0;
+ p_stats->tstats.len = 0;
+}
+
static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_vf_info *vf)
@@ -1141,25 +1274,27 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp;
struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire;
- u8 i, vfpf_status = PFVF_STATUS_SUCCESS;
+ u8 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
struct pf_vf_resc *resc = &resp->resc;
+ int rc;
+
+ memset(resp, 0, sizeof(*resp));
/* Validate FW compatibility */
- if (req->vfdev_info.fw_major != FW_MAJOR_VERSION ||
- req->vfdev_info.fw_minor != FW_MINOR_VERSION ||
- req->vfdev_info.fw_revision != FW_REVISION_VERSION ||
- req->vfdev_info.fw_engineering != FW_ENGINEERING_VERSION) {
+ if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) {
DP_INFO(p_hwfn,
- "VF[%d] is running an incompatible driver [VF needs FW %02x:%02x:%02x:%02x but Hypervisor is using %02x:%02x:%02x:%02x]\n",
+ "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's faspath HSI %02x.%02x\n",
vf->abs_vf_id,
- req->vfdev_info.fw_major,
- req->vfdev_info.fw_minor,
- req->vfdev_info.fw_revision,
- req->vfdev_info.fw_engineering,
- FW_MAJOR_VERSION,
- FW_MINOR_VERSION,
- FW_REVISION_VERSION, FW_ENGINEERING_VERSION);
- vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
+ req->vfdev_info.eth_fp_hsi_major,
+ req->vfdev_info.eth_fp_hsi_minor,
+ ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
+
+ /* Write the PF version so that VF would know which version
+ * is supported.
+ */
+ pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
+ pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR;
+
goto out;
}
@@ -1169,16 +1304,13 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
DP_INFO(p_hwfn,
"VF[%d] is running an old driver that doesn't support 100g\n",
vf->abs_vf_id);
- vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
goto out;
}
- memset(resp, 0, sizeof(*resp));
+ /* Store the acquire message */
+ memcpy(&vf->acquire, req, sizeof(vf->acquire));
- /* Fill in vf info stuff */
vf->opaque_fid = req->vfdev_info.opaque_fid;
- vf->num_mac_filters = 1;
- vf->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
vf->vf_bulletin = req->bulletin_addr;
vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ?
@@ -1194,26 +1326,7 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
if (p_hwfn->cdev->num_hwfns > 1)
pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
- pfdev_info->stats_info.mstats.address =
- PXP_VF_BAR0_START_MSDM_ZONE_B +
- offsetof(struct mstorm_vf_zone, non_trigger.eth_queue_stat);
- pfdev_info->stats_info.mstats.len =
- sizeof(struct eth_mstorm_per_queue_stat);
-
- pfdev_info->stats_info.ustats.address =
- PXP_VF_BAR0_START_USDM_ZONE_B +
- offsetof(struct ustorm_vf_zone, non_trigger.eth_queue_stat);
- pfdev_info->stats_info.ustats.len =
- sizeof(struct eth_ustorm_per_queue_stat);
-
- pfdev_info->stats_info.pstats.address =
- PXP_VF_BAR0_START_PSDM_ZONE_B +
- offsetof(struct pstorm_vf_zone, non_trigger.eth_queue_stat);
- pfdev_info->stats_info.pstats.len =
- sizeof(struct eth_pstorm_per_queue_stat);
-
- pfdev_info->stats_info.tstats.address = 0;
- pfdev_info->stats_info.tstats.len = 0;
+ qed_iov_vf_mbx_acquire_stats(p_hwfn, &pfdev_info->stats_info);
memcpy(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
@@ -1221,36 +1334,31 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
pfdev_info->fw_minor = FW_MINOR_VERSION;
pfdev_info->fw_rev = FW_REVISION_VERSION;
pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
+ pfdev_info->minor_fp_hsi = min_t(u8,
+ ETH_HSI_VER_MINOR,
+ req->vfdev_info.eth_fp_hsi_minor);
pfdev_info->os_type = VFPF_ACQUIRE_OS_LINUX;
qed_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver, NULL);
pfdev_info->dev_type = p_hwfn->cdev->type;
pfdev_info->chip_rev = p_hwfn->cdev->chip_rev;
- resc->num_rxqs = vf->num_rxqs;
- resc->num_txqs = vf->num_txqs;
- resc->num_sbs = vf->num_sbs;
- for (i = 0; i < resc->num_sbs; i++) {
- resc->hw_sbs[i].hw_sb_id = vf->igu_sbs[i];
- resc->hw_sbs[i].sb_qid = 0;
- }
+ /* Fill resources available to VF; Make sure there are enough to
+ * satisfy the VF's request.
+ */
+ vfpf_status = qed_iov_vf_mbx_acquire_resc(p_hwfn, p_ptt, vf,
+ &req->resc_request, resc);
+ if (vfpf_status != PFVF_STATUS_SUCCESS)
+ goto out;
- for (i = 0; i < resc->num_rxqs; i++) {
- qed_fw_l2_queue(p_hwfn, vf->vf_queues[i].fw_rx_qid,
- (u16 *)&resc->hw_qid[i]);
- resc->cid[i] = vf->vf_queues[i].fw_cid;
+ /* Start the VF in FW */
+ rc = qed_sp_vf_start(p_hwfn, vf);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to start VF[%02x]\n", vf->abs_vf_id);
+ vfpf_status = PFVF_STATUS_FAILURE;
+ goto out;
}
- resc->num_mac_filters = min_t(u8, vf->num_mac_filters,
- req->resc_request.num_mac_filters);
- resc->num_vlan_filters = min_t(u8, vf->num_vlan_filters,
- req->resc_request.num_vlan_filters);
-
- /* This isn't really required as VF isn't limited, but some VFs might
- * actually test this value, so need to provide it.
- */
- resc->num_mc_filters = req->resc_request.num_mc_filters;
-
/* Fill agreed size of bulletin board in response */
resp->bulletin_size = vf->bulletin.size;
qed_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt);
@@ -1585,10 +1693,6 @@ static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn,
sizeof(struct pfvf_def_resp_tlv), status);
}
-#define TSTORM_QZONE_START PXP_VF_BAR0_START_SDM_ZONE_A
-#define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \
- (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))
-
static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_vf_info *vf, u8 status)
@@ -1606,16 +1710,11 @@ static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn,
/* Update the TLV with the response */
if (status == PFVF_STATUS_SUCCESS) {
- u16 hw_qid = 0;
-
req = &mbx->req_virt->start_rxq;
- qed_fw_l2_queue(p_hwfn, vf->vf_queues[req->rx_qid].fw_rx_qid,
- &hw_qid);
-
- p_tlv->offset = MSTORM_QZONE_START(p_hwfn->cdev) +
- hw_qid * MSTORM_QZONE_SIZE +
- offsetof(struct mstorm_eth_queue_zone,
- rx_producers);
+ p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B +
+ offsetof(struct mstorm_vf_zone,
+ non_trigger.eth_rx_queue_producers) +
+ sizeof(struct eth_rx_prod_data) * req->rx_qid;
}
qed_iov_send_response(p_hwfn, p_ptt, vf, sizeof(*p_tlv), status);
@@ -1627,13 +1726,19 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
{
struct qed_queue_start_common_params params;
struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
- u8 status = PFVF_STATUS_SUCCESS;
+ u8 status = PFVF_STATUS_NO_RESOURCE;
struct vfpf_start_rxq_tlv *req;
int rc;
memset(&params, 0, sizeof(params));
req = &mbx->req_virt->start_rxq;
+
+ if (!qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid) ||
+ !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
+ goto out;
+
params.queue_id = vf->vf_queues[req->rx_qid].fw_rx_qid;
+ params.vf_qid = req->rx_qid;
params.vport_id = vf->vport_id;
params.sb = req->hw_sb;
params.sb_idx = req->sb_index;
@@ -1649,22 +1754,48 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
if (rc) {
status = PFVF_STATUS_FAILURE;
} else {
+ status = PFVF_STATUS_SUCCESS;
vf->vf_queues[req->rx_qid].rxq_active = true;
vf->num_active_rxqs++;
}
+out:
qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status);
}
+static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *p_vf, u8 status)
+{
+ struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
+ struct pfvf_start_queue_resp_tlv *p_tlv;
+
+ mbx->offset = (u8 *)mbx->reply_virt;
+
+ p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_TXQ,
+ sizeof(*p_tlv));
+ qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* Update the TLV with the response */
+ if (status == PFVF_STATUS_SUCCESS) {
+ u16 qid = mbx->req_virt->start_txq.tx_qid;
+
+ p_tlv->offset = qed_db_addr(p_vf->vf_queues[qid].fw_cid,
+ DQ_DEMS_LEGACY);
+ }
+
+ qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_tlv), status);
+}
+
static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_vf_info *vf)
{
- u16 length = sizeof(struct pfvf_def_resp_tlv);
struct qed_queue_start_common_params params;
struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+ u8 status = PFVF_STATUS_NO_RESOURCE;
union qed_qm_pq_params pq_params;
- u8 status = PFVF_STATUS_SUCCESS;
struct vfpf_start_txq_tlv *req;
int rc;
@@ -1675,6 +1806,11 @@ static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
memset(&params, 0, sizeof(params));
req = &mbx->req_virt->start_txq;
+
+ if (!qed_iov_validate_txq(p_hwfn, vf, req->tx_qid) ||
+ !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
+ goto out;
+
params.queue_id = vf->vf_queues[req->tx_qid].fw_tx_qid;
params.vport_id = vf->vport_id;
params.sb = req->hw_sb;
@@ -1688,13 +1824,15 @@ static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
req->pbl_addr,
req->pbl_size, &pq_params);
- if (rc)
+ if (rc) {
status = PFVF_STATUS_FAILURE;
- else
+ } else {
+ status = PFVF_STATUS_SUCCESS;
vf->vf_queues[req->tx_qid].txq_active = true;
+ }
- qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_START_TXQ,
- length, status);
+out:
+ qed_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf, status);
}
static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn,
@@ -2119,6 +2257,16 @@ static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn,
u16 length;
int rc;
+ /* Valiate PF can send such a request */
+ if (!vf->vport_instance) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "No VPORT instance available for VF[%d], failing vport update\n",
+ vf->abs_vf_id);
+ status = PFVF_STATUS_FAILURE;
+ goto out;
+ }
+
memset(&params, 0, sizeof(params));
params.opaque_fid = vf->opaque_fid;
params.vport_id = vf->vport_id;
@@ -2161,15 +2309,12 @@ out:
qed_iov_send_response(p_hwfn, p_ptt, vf, length, status);
}
-static int qed_iov_vf_update_unicast_shadow(struct qed_hwfn *p_hwfn,
- struct qed_vf_info *p_vf,
- struct qed_filter_ucast *p_params)
+static int qed_iov_vf_update_vlan_shadow(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf,
+ struct qed_filter_ucast *p_params)
{
int i;
- if (p_params->type == QED_FILTER_MAC)
- return 0;
-
/* First remove entries and then add new ones */
if (p_params->opcode == QED_FILTER_REMOVE) {
for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
@@ -2222,6 +2367,80 @@ static int qed_iov_vf_update_unicast_shadow(struct qed_hwfn *p_hwfn,
return 0;
}
+static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf,
+ struct qed_filter_ucast *p_params)
+{
+ int i;
+
+ /* If we're in forced-mode, we don't allow any change */
+ if (p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED))
+ return 0;
+
+ /* First remove entries and then add new ones */
+ if (p_params->opcode == QED_FILTER_REMOVE) {
+ for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
+ if (ether_addr_equal(p_vf->shadow_config.macs[i],
+ p_params->mac)) {
+ memset(p_vf->shadow_config.macs[i], 0,
+ ETH_ALEN);
+ break;
+ }
+ }
+
+ if (i == QED_ETH_VF_NUM_MAC_FILTERS) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "MAC isn't configured\n");
+ return -EINVAL;
+ }
+ } else if (p_params->opcode == QED_FILTER_REPLACE ||
+ p_params->opcode == QED_FILTER_FLUSH) {
+ for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++)
+ memset(p_vf->shadow_config.macs[i], 0, ETH_ALEN);
+ }
+
+ /* List the new MAC address */
+ if (p_params->opcode != QED_FILTER_ADD &&
+ p_params->opcode != QED_FILTER_REPLACE)
+ return 0;
+
+ for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
+ if (is_zero_ether_addr(p_vf->shadow_config.macs[i])) {
+ ether_addr_copy(p_vf->shadow_config.macs[i],
+ p_params->mac);
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Added MAC at %d entry in shadow\n", i);
+ break;
+ }
+ }
+
+ if (i == QED_ETH_VF_NUM_MAC_FILTERS) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No available place for MAC\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+qed_iov_vf_update_unicast_shadow(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf,
+ struct qed_filter_ucast *p_params)
+{
+ int rc = 0;
+
+ if (p_params->type == QED_FILTER_MAC) {
+ rc = qed_iov_vf_update_mac_shadow(p_hwfn, p_vf, p_params);
+ if (rc)
+ return rc;
+ }
+
+ if (p_params->type == QED_FILTER_VLAN)
+ rc = qed_iov_vf_update_vlan_shadow(p_hwfn, p_vf, p_params);
+
+ return rc;
+}
+
int qed_iov_chk_ucast(struct qed_hwfn *hwfn,
int vfid, struct qed_filter_ucast *params)
{
@@ -2366,11 +2585,27 @@ static void qed_iov_vf_mbx_release(struct qed_hwfn *p_hwfn,
struct qed_vf_info *p_vf)
{
u16 length = sizeof(struct pfvf_def_resp_tlv);
+ u8 status = PFVF_STATUS_SUCCESS;
+ int rc = 0;
qed_iov_vf_cleanup(p_hwfn, p_vf);
+ if (p_vf->state != VF_STOPPED && p_vf->state != VF_FREE) {
+ /* Stopping the VF */
+ rc = qed_sp_vf_stop(p_hwfn, p_vf->concrete_fid,
+ p_vf->opaque_fid);
+
+ if (rc) {
+ DP_ERR(p_hwfn, "qed_sp_vf_stop returned error %d\n",
+ rc);
+ status = PFVF_STATUS_FAILURE;
+ }
+
+ p_vf->state = VF_STOPPED;
+ }
+
qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE,
- length, PFVF_STATUS_SUCCESS);
+ length, status);
}
static int
@@ -2622,7 +2857,6 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
{
struct qed_iov_vf_mbx *mbx;
struct qed_vf_info *p_vf;
- int i;
p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
if (!p_vf)
@@ -2631,9 +2865,8 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
mbx = &p_vf->vf_mbx;
/* qed_iov_process_mbx_request */
- DP_VERBOSE(p_hwfn,
- QED_MSG_IOV,
- "qed_iov_process_mbx_req vfid %d\n", p_vf->abs_vf_id);
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF[%02x]: Processing mailbox message\n", p_vf->abs_vf_id);
mbx->first_tlv = mbx->req_virt->first_tlv;
@@ -2687,15 +2920,28 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
* support them. Or this may be because someone wrote a crappy
* VF driver and is sending garbage over the channel.
*/
- DP_ERR(p_hwfn,
- "unknown TLV. type %d length %d. first 20 bytes of mailbox buffer:\n",
- mbx->first_tlv.tl.type, mbx->first_tlv.tl.length);
-
- for (i = 0; i < 20; i++) {
+ DP_NOTICE(p_hwfn,
+ "VF[%02x]: unknown TLV. type %04x length %04x padding %08x reply address %llu\n",
+ p_vf->abs_vf_id,
+ mbx->first_tlv.tl.type,
+ mbx->first_tlv.tl.length,
+ mbx->first_tlv.padding, mbx->first_tlv.reply_address);
+
+ /* Try replying in case reply address matches the acquisition's
+ * posted address.
+ */
+ if (p_vf->acquire.first_tlv.reply_address &&
+ (mbx->first_tlv.reply_address ==
+ p_vf->acquire.first_tlv.reply_address)) {
+ qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
+ mbx->first_tlv.tl.type,
+ sizeof(struct pfvf_def_resp_tlv),
+ PFVF_STATUS_NOT_SUPPORTED);
+ } else {
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
- "%x ",
- mbx->req_virt->tlv_buf_size.tlv_buffer[i]);
+ "VF[%02x]: Can't respond to TLV - no valid reply address\n",
+ p_vf->abs_vf_id);
}
}
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
index c90b2b6..0dd23e4 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
@@ -10,6 +10,9 @@
#define _QED_SRIOV_H
#include <linux/types.h>
#include "qed_vf.h"
+
+#define QED_ETH_VF_NUM_MAC_FILTERS 1
+#define QED_ETH_VF_NUM_VLAN_FILTERS 2
#define QED_VF_ARRAY_LENGTH (3)
#ifdef CONFIG_QED_SRIOV
@@ -24,7 +27,6 @@
#define IS_PF_SRIOV_ALLOC(p_hwfn) (!!((p_hwfn)->pf_iov_info))
#define QED_MAX_VF_CHAINS_PER_PF 16
-#define QED_ETH_VF_NUM_VLAN_FILTERS 2
#define QED_ETH_MAX_VF_NUM_VLAN_FILTERS \
(MAX_NUM_VFS * QED_ETH_VF_NUM_VLAN_FILTERS)
@@ -120,6 +122,8 @@ struct qed_vf_shadow_config {
/* Shadow copy of all guest vlans */
struct qed_vf_vlan_shadow vlans[QED_ETH_VF_NUM_VLAN_FILTERS + 1];
+ /* Shadow copy of all configured MACs; Empty if forcing MACs */
+ u8 macs[QED_ETH_VF_NUM_MAC_FILTERS][ETH_ALEN];
u8 inner_vlan_removal;
};
@@ -133,6 +137,9 @@ struct qed_vf_info {
struct qed_bulletin bulletin;
dma_addr_t vf_bulletin;
+ /* PF saves a copy of the last VF acquire message */
+ struct vfpf_acquire_tlv acquire;
+
u32 concrete_fid;
u16 opaque_fid;
u16 mtu;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
index 72e69c0..9819230 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
@@ -117,36 +117,64 @@ exit:
}
#define VF_ACQUIRE_THRESH 3
-#define VF_ACQUIRE_MAC_FILTERS 1
+static void qed_vf_pf_acquire_reduce_resc(struct qed_hwfn *p_hwfn,
+ struct vf_pf_resc_request *p_req,
+ struct pf_vf_resc *p_resp)
+{
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "PF unwilling to fullill resource request: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x]. Try PF recommended amount\n",
+ p_req->num_rxqs,
+ p_resp->num_rxqs,
+ p_req->num_rxqs,
+ p_resp->num_txqs,
+ p_req->num_sbs,
+ p_resp->num_sbs,
+ p_req->num_mac_filters,
+ p_resp->num_mac_filters,
+ p_req->num_vlan_filters,
+ p_resp->num_vlan_filters,
+ p_req->num_mc_filters, p_resp->num_mc_filters);
+
+ /* humble our request */
+ p_req->num_txqs = p_resp->num_txqs;
+ p_req->num_rxqs = p_resp->num_rxqs;
+ p_req->num_sbs = p_resp->num_sbs;
+ p_req->num_mac_filters = p_resp->num_mac_filters;
+ p_req->num_vlan_filters = p_resp->num_vlan_filters;
+ p_req->num_mc_filters = p_resp->num_mc_filters;
+}
static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
{
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp;
struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
- u8 rx_count = 1, tx_count = 1, num_sbs = 1;
- u8 num_mac = VF_ACQUIRE_MAC_FILTERS;
+ struct vf_pf_resc_request *p_resc;
bool resources_acquired = false;
struct vfpf_acquire_tlv *req;
int rc = 0, attempts = 0;
/* clear mailbox and prep first tlv */
req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_ACQUIRE, sizeof(*req));
+ p_resc = &req->resc_request;
/* starting filling the request */
req->vfdev_info.opaque_fid = p_hwfn->hw_info.opaque_fid;
- req->resc_request.num_rxqs = rx_count;
- req->resc_request.num_txqs = tx_count;
- req->resc_request.num_sbs = num_sbs;
- req->resc_request.num_mac_filters = num_mac;
- req->resc_request.num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
+ p_resc->num_rxqs = QED_MAX_VF_CHAINS_PER_PF;
+ p_resc->num_txqs = QED_MAX_VF_CHAINS_PER_PF;
+ p_resc->num_sbs = QED_MAX_VF_CHAINS_PER_PF;
+ p_resc->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS;
+ p_resc->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
req->vfdev_info.os_type = VFPF_ACQUIRE_OS_LINUX;
req->vfdev_info.fw_major = FW_MAJOR_VERSION;
req->vfdev_info.fw_minor = FW_MINOR_VERSION;
req->vfdev_info.fw_revision = FW_REVISION_VERSION;
req->vfdev_info.fw_engineering = FW_ENGINEERING_VERSION;
+ req->vfdev_info.eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
+ req->vfdev_info.eth_fp_hsi_minor = ETH_HSI_VER_MINOR;
/* Fill capability field with any non-deprecated config we support */
req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_100G;
@@ -185,21 +213,21 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
resources_acquired = true;
} else if (resp->hdr.status == PFVF_STATUS_NO_RESOURCE &&
attempts < VF_ACQUIRE_THRESH) {
- DP_VERBOSE(p_hwfn,
- QED_MSG_IOV,
- "PF unwilling to fullfill resource request. Try PF recommended amount\n");
-
- /* humble our request */
- req->resc_request.num_txqs = resp->resc.num_txqs;
- req->resc_request.num_rxqs = resp->resc.num_rxqs;
- req->resc_request.num_sbs = resp->resc.num_sbs;
- req->resc_request.num_mac_filters =
- resp->resc.num_mac_filters;
- req->resc_request.num_vlan_filters =
- resp->resc.num_vlan_filters;
+ qed_vf_pf_acquire_reduce_resc(p_hwfn, p_resc,
+ &resp->resc);
/* Clear response buffer */
memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs));
+ } else if ((resp->hdr.status == PFVF_STATUS_NOT_SUPPORTED) &&
+ pfdev_info->major_fp_hsi &&
+ (pfdev_info->major_fp_hsi != ETH_HSI_VER_MAJOR)) {
+ DP_NOTICE(p_hwfn,
+ "PF uses an incompatible fastpath HSI %02x.%02x [VF requires %02x.%02x]. Please change to a VF driver using %02x.xx.\n",
+ pfdev_info->major_fp_hsi,
+ pfdev_info->minor_fp_hsi,
+ ETH_HSI_VER_MAJOR,
+ ETH_HSI_VER_MINOR, pfdev_info->major_fp_hsi);
+ return -EINVAL;
} else {
DP_ERR(p_hwfn,
"PF returned error %d to VF acquisition request\n",
@@ -225,6 +253,13 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
}
}
+ if (ETH_HSI_VER_MINOR &&
+ (resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR)) {
+ DP_INFO(p_hwfn,
+ "PF is using older fastpath HSI; %02x.%02x is configured\n",
+ ETH_HSI_VER_MAJOR, resp->pfdev_info.minor_fp_hsi);
+ }
+
return 0;
}
@@ -405,8 +440,8 @@ int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
u16 pbl_size, void __iomem **pp_doorbell)
{
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_start_queue_resp_tlv *resp;
struct vfpf_start_txq_tlv *req;
- struct pfvf_def_resp_tlv *resp;
int rc;
/* clear mailbox and prep first tlv */
@@ -424,20 +459,24 @@ int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
qed_add_tlv(p_hwfn, &p_iov->offset,
CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
- resp = &p_iov->pf2vf_reply->default_resp;
+ resp = &p_iov->pf2vf_reply->queue_start;
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
- return rc;
+ goto exit;
- if (resp->hdr.status != PFVF_STATUS_SUCCESS)
- return -EINVAL;
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = -EINVAL;
+ goto exit;
+ }
if (pp_doorbell) {
- u8 cid = p_iov->acquire_resp.resc.cid[tx_queue_id];
+ *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + resp->offset;
- *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells +
- qed_db_addr(cid, DQ_DEMS_LEGACY);
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Txq[0x%02x]: doorbell at %p [offset 0x%08x]\n",
+ tx_queue_id, *pp_doorbell, resp->offset);
}
+exit:
return rc;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h
index b82fda9..b23ce58 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h
@@ -96,7 +96,9 @@ struct vfpf_acquire_tlv {
u32 driver_version;
u16 opaque_fid; /* ME register value */
u8 os_type; /* VFPF_ACQUIRE_OS_* value */
- u8 padding[5];
+ u8 eth_fp_hsi_major;
+ u8 eth_fp_hsi_minor;
+ u8 padding[3];
} vfdev_info;
struct vf_pf_resc_request resc_request;
@@ -171,7 +173,14 @@ struct pfvf_acquire_resp_tlv {
struct pfvf_stats_info stats_info;
u8 port_mac[ETH_ALEN];
- u8 padding2[2];
+
+ /* It's possible PF had to configure an older fastpath HSI
+ * [in case VF is newer than PF]. This is communicated back
+ * to the VF. It can also be used in case of error due to
+ * non-matching versions to shed light in VF about failure.
+ */
+ u8 major_fp_hsi;
+ u8 minor_fp_hsi;
} pfdev_info;
struct pf_vf_resc {
diff --git a/drivers/net/ethernet/qlogic/qede/Makefile b/drivers/net/ethernet/qlogic/qede/Makefile
index 06ff90d..74a4985 100644
--- a/drivers/net/ethernet/qlogic/qede/Makefile
+++ b/drivers/net/ethernet/qlogic/qede/Makefile
@@ -1,3 +1,4 @@
obj-$(CONFIG_QEDE) := qede.o
qede-y := qede_main.o qede_ethtool.o
+qede-$(CONFIG_DCB) += qede_dcbnl.o
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index 47d6b22..1441c8f 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -304,6 +304,9 @@ union qede_reload_args {
u16 mtu;
};
+#ifdef CONFIG_DCB
+void qede_set_dcbnl_ops(struct net_device *ndev);
+#endif
void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level);
void qede_set_ethtool_ops(struct net_device *netdev);
void qede_reload(struct qede_dev *edev,
diff --git a/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c b/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c
new file mode 100644
index 0000000..03e8c02
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c
@@ -0,0 +1,348 @@
+/* QLogic qede NIC Driver
+* Copyright (c) 2015 QLogic Corporation
+*
+* This software is available under the terms of the GNU General Public License
+* (GPL) Version 2, available from the file COPYING in the main directory of
+* this source tree.
+*/
+
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
+#include <net/dcbnl.h>
+#include "qede.h"
+
+static u8 qede_dcbnl_getstate(struct net_device *netdev)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->getstate(edev->cdev);
+}
+
+static u8 qede_dcbnl_setstate(struct net_device *netdev, u8 state)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setstate(edev->cdev, state);
+}
+
+static void qede_dcbnl_getpermhwaddr(struct net_device *netdev,
+ u8 *perm_addr)
+{
+ memcpy(perm_addr, netdev->dev_addr, netdev->addr_len);
+}
+
+static void qede_dcbnl_getpgtccfgtx(struct net_device *netdev, int prio,
+ u8 *prio_type, u8 *pgid, u8 *bw_pct,
+ u8 *up_map)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ edev->ops->dcb->getpgtccfgtx(edev->cdev, prio, prio_type,
+ pgid, bw_pct, up_map);
+}
+
+static void qede_dcbnl_getpgbwgcfgtx(struct net_device *netdev,
+ int pgid, u8 *bw_pct)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ edev->ops->dcb->getpgbwgcfgtx(edev->cdev, pgid, bw_pct);
+}
+
+static void qede_dcbnl_getpgtccfgrx(struct net_device *netdev, int prio,
+ u8 *prio_type, u8 *pgid, u8 *bw_pct,
+ u8 *up_map)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ edev->ops->dcb->getpgtccfgrx(edev->cdev, prio, prio_type, pgid, bw_pct,
+ up_map);
+}
+
+static void qede_dcbnl_getpgbwgcfgrx(struct net_device *netdev,
+ int pgid, u8 *bw_pct)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ edev->ops->dcb->getpgbwgcfgrx(edev->cdev, pgid, bw_pct);
+}
+
+static void qede_dcbnl_getpfccfg(struct net_device *netdev, int prio,
+ u8 *setting)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ edev->ops->dcb->getpfccfg(edev->cdev, prio, setting);
+}
+
+static void qede_dcbnl_setpfccfg(struct net_device *netdev, int prio,
+ u8 setting)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ edev->ops->dcb->setpfccfg(edev->cdev, prio, setting);
+}
+
+static u8 qede_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->getcap(edev->cdev, capid, cap);
+}
+
+static int qede_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->getnumtcs(edev->cdev, tcid, num);
+}
+
+static u8 qede_dcbnl_getpfcstate(struct net_device *netdev)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->getpfcstate(edev->cdev);
+}
+
+static int qede_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->getapp(edev->cdev, idtype, id);
+}
+
+static u8 qede_dcbnl_getdcbx(struct net_device *netdev)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->getdcbx(edev->cdev);
+}
+
+static void qede_dcbnl_setpgtccfgtx(struct net_device *netdev, int prio,
+ u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setpgtccfgtx(edev->cdev, prio, pri_type, pgid,
+ bw_pct, up_map);
+}
+
+static void qede_dcbnl_setpgtccfgrx(struct net_device *netdev, int prio,
+ u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setpgtccfgrx(edev->cdev, prio, pri_type, pgid,
+ bw_pct, up_map);
+}
+
+static void qede_dcbnl_setpgbwgcfgtx(struct net_device *netdev, int pgid,
+ u8 bw_pct)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setpgbwgcfgtx(edev->cdev, pgid, bw_pct);
+}
+
+static void qede_dcbnl_setpgbwgcfgrx(struct net_device *netdev, int pgid,
+ u8 bw_pct)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setpgbwgcfgrx(edev->cdev, pgid, bw_pct);
+}
+
+static u8 qede_dcbnl_setall(struct net_device *netdev)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setall(edev->cdev);
+}
+
+static int qede_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setnumtcs(edev->cdev, tcid, num);
+}
+
+static void qede_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setpfcstate(edev->cdev, state);
+}
+
+static int qede_dcbnl_setapp(struct net_device *netdev, u8 idtype, u16 idval,
+ u8 up)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setapp(edev->cdev, idtype, idval, up);
+}
+
+static u8 qede_dcbnl_setdcbx(struct net_device *netdev, u8 state)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setdcbx(edev->cdev, state);
+}
+
+static u8 qede_dcbnl_getfeatcfg(struct net_device *netdev, int featid,
+ u8 *flags)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->getfeatcfg(edev->cdev, featid, flags);
+}
+
+static u8 qede_dcbnl_setfeatcfg(struct net_device *netdev, int featid, u8 flags)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setfeatcfg(edev->cdev, featid, flags);
+}
+
+static int qede_dcbnl_peer_getappinfo(struct net_device *netdev,
+ struct dcb_peer_app_info *info,
+ u16 *count)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->peer_getappinfo(edev->cdev, info, count);
+}
+
+static int qede_dcbnl_peer_getapptable(struct net_device *netdev,
+ struct dcb_app *app)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->peer_getapptable(edev->cdev, app);
+}
+
+static int qede_dcbnl_cee_peer_getpfc(struct net_device *netdev,
+ struct cee_pfc *pfc)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->cee_peer_getpfc(edev->cdev, pfc);
+}
+
+static int qede_dcbnl_cee_peer_getpg(struct net_device *netdev,
+ struct cee_pg *pg)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->cee_peer_getpg(edev->cdev, pg);
+}
+
+static int qede_dcbnl_ieee_getpfc(struct net_device *netdev,
+ struct ieee_pfc *pfc)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->ieee_getpfc(edev->cdev, pfc);
+}
+
+static int qede_dcbnl_ieee_setpfc(struct net_device *netdev,
+ struct ieee_pfc *pfc)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->ieee_setpfc(edev->cdev, pfc);
+}
+
+static int qede_dcbnl_ieee_getets(struct net_device *netdev,
+ struct ieee_ets *ets)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->ieee_getets(edev->cdev, ets);
+}
+
+static int qede_dcbnl_ieee_setets(struct net_device *netdev,
+ struct ieee_ets *ets)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->ieee_setets(edev->cdev, ets);
+}
+
+static int qede_dcbnl_ieee_getapp(struct net_device *netdev,
+ struct dcb_app *app)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->ieee_getapp(edev->cdev, app);
+}
+
+static int qede_dcbnl_ieee_setapp(struct net_device *netdev,
+ struct dcb_app *app)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->ieee_setapp(edev->cdev, app);
+}
+
+static int qede_dcbnl_ieee_peer_getpfc(struct net_device *netdev,
+ struct ieee_pfc *pfc)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->ieee_peer_getpfc(edev->cdev, pfc);
+}
+
+static int qede_dcbnl_ieee_peer_getets(struct net_device *netdev,
+ struct ieee_ets *ets)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->ieee_peer_getets(edev->cdev, ets);
+}
+
+static const struct dcbnl_rtnl_ops qede_dcbnl_ops = {
+ .ieee_getpfc = qede_dcbnl_ieee_getpfc,
+ .ieee_setpfc = qede_dcbnl_ieee_setpfc,
+ .ieee_getets = qede_dcbnl_ieee_getets,
+ .ieee_setets = qede_dcbnl_ieee_setets,
+ .ieee_getapp = qede_dcbnl_ieee_getapp,
+ .ieee_setapp = qede_dcbnl_ieee_setapp,
+ .getdcbx = qede_dcbnl_getdcbx,
+ .ieee_peer_getpfc = qede_dcbnl_ieee_peer_getpfc,
+ .ieee_peer_getets = qede_dcbnl_ieee_peer_getets,
+ .getstate = qede_dcbnl_getstate,
+ .setstate = qede_dcbnl_setstate,
+ .getpermhwaddr = qede_dcbnl_getpermhwaddr,
+ .getpgtccfgtx = qede_dcbnl_getpgtccfgtx,
+ .getpgbwgcfgtx = qede_dcbnl_getpgbwgcfgtx,
+ .getpgtccfgrx = qede_dcbnl_getpgtccfgrx,
+ .getpgbwgcfgrx = qede_dcbnl_getpgbwgcfgrx,
+ .getpfccfg = qede_dcbnl_getpfccfg,
+ .setpfccfg = qede_dcbnl_setpfccfg,
+ .getcap = qede_dcbnl_getcap,
+ .getnumtcs = qede_dcbnl_getnumtcs,
+ .getpfcstate = qede_dcbnl_getpfcstate,
+ .getapp = qede_dcbnl_getapp,
+ .getdcbx = qede_dcbnl_getdcbx,
+ .setpgtccfgtx = qede_dcbnl_setpgtccfgtx,
+ .setpgtccfgrx = qede_dcbnl_setpgtccfgrx,
+ .setpgbwgcfgtx = qede_dcbnl_setpgbwgcfgtx,
+ .setpgbwgcfgrx = qede_dcbnl_setpgbwgcfgrx,
+ .setall = qede_dcbnl_setall,
+ .setnumtcs = qede_dcbnl_setnumtcs,
+ .setpfcstate = qede_dcbnl_setpfcstate,
+ .setapp = qede_dcbnl_setapp,
+ .setdcbx = qede_dcbnl_setdcbx,
+ .setfeatcfg = qede_dcbnl_setfeatcfg,
+ .getfeatcfg = qede_dcbnl_getfeatcfg,
+ .peer_getappinfo = qede_dcbnl_peer_getappinfo,
+ .peer_getapptable = qede_dcbnl_peer_getapptable,
+ .cee_peer_getpfc = qede_dcbnl_cee_peer_getpfc,
+ .cee_peer_getpg = qede_dcbnl_cee_peer_getpg,
+};
+
+void qede_set_dcbnl_ops(struct net_device *dev)
+{
+ dev->dcbnl_ops = &qede_dcbnl_ops;
+}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index ad3cae3..6836d44 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -910,6 +910,8 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev,
memset(first_bd, 0, sizeof(*first_bd));
val = 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
first_bd->data.bd_flags.bitfields = val;
+ val = skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK;
+ first_bd->data.bitfields |= (val << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT);
/* Map skb linear data for DMA and set in the first BD */
mapping = dma_map_single(&edev->pdev->dev, skb->data,
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 5733d18..423168b 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -579,8 +579,6 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
/* Fill the parsing flags & params according to the requested offload */
if (xmit_type & XMIT_L4_CSUM) {
- u16 temp = 1 << ETH_TX_DATA_1ST_BD_TUNN_CFG_OVERRIDE_SHIFT;
-
/* We don't re-calculate IP checksum as it is already done by
* the upper stack
*/
@@ -590,14 +588,8 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
if (xmit_type & XMIT_ENC) {
first_bd->data.bd_flags.bitfields |=
1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
- } else {
- /* In cases when OS doesn't indicate for inner offloads
- * when packet is tunnelled, we need to override the HW
- * tunnel configuration so that packets are treated as
- * regular non tunnelled packets and no inner offloads
- * are done by the hardware.
- */
- first_bd->data.bitfields |= cpu_to_le16(temp);
+ first_bd->data.bitfields |=
+ 1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
}
/* If the packet is IPv6 with extension header, indicate that
@@ -655,6 +647,10 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
tx_data_bd = (struct eth_tx_bd *)third_bd;
data_split = true;
}
+ } else {
+ first_bd->data.bitfields |=
+ (skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
+ ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
}
/* Handle fragmented skb */
@@ -2505,6 +2501,10 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
edev->ops->register_ops(cdev, &qede_ll_ops, edev);
+#ifdef CONFIG_DCB
+ qede_set_dcbnl_ops(edev->ndev);
+#endif
+
INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
mutex_init(&edev->qede_lock);
@@ -2823,6 +2823,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev,
rc = edev->ops->common->chain_alloc(edev->cdev,
QED_CHAIN_USE_TO_CONSUME_PRODUCE,
QED_CHAIN_MODE_NEXT_PTR,
+ QED_CHAIN_CNT_TYPE_U16,
RX_RING_SIZE,
sizeof(struct eth_rx_bd),
&rxq->rx_bd_ring);
@@ -2834,6 +2835,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev,
rc = edev->ops->common->chain_alloc(edev->cdev,
QED_CHAIN_USE_TO_CONSUME,
QED_CHAIN_MODE_PBL,
+ QED_CHAIN_CNT_TYPE_U16,
RX_RING_SIZE,
sizeof(union eth_rx_cqe),
&rxq->rx_comp_ring);
@@ -2885,9 +2887,9 @@ static int qede_alloc_mem_txq(struct qede_dev *edev,
rc = edev->ops->common->chain_alloc(edev->cdev,
QED_CHAIN_USE_TO_CONSUME_PRODUCE,
QED_CHAIN_MODE_PBL,
+ QED_CHAIN_CNT_TYPE_U16,
NUM_TX_BDS_MAX,
- sizeof(*p_virt),
- &txq->tx_pbl);
+ sizeof(*p_virt), &txq->tx_pbl);
if (rc)
goto err;
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 867caf6..5349284 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -362,8 +362,6 @@ static void ravb_emac_init(struct net_device *ndev)
ravb_write(ndev,
(ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
- ravb_write(ndev, 1, MPR);
-
/* E-MAC status register clear */
ravb_write(ndev, ECSR_ICD | ECSR_MPD, ECSR);
@@ -402,7 +400,8 @@ static int ravb_dmac_init(struct net_device *ndev)
#endif
/* Set AVB RX */
- ravb_write(ndev, RCR_EFFS | RCR_ENCF | RCR_ETS0 | 0x18000000, RCR);
+ ravb_write(ndev,
+ RCR_EFFS | RCR_ENCF | RCR_ETS0 | RCR_ESF | 0x18000000, RCR);
/* Set FIFO size */
ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00222200, TGC);
@@ -2111,8 +2110,7 @@ static int ravb_runtime_nop(struct device *dev)
}
static const struct dev_pm_ops ravb_dev_pm_ops = {
- .runtime_suspend = ravb_runtime_nop,
- .runtime_resume = ravb_runtime_nop,
+ SET_RUNTIME_PM_OPS(ravb_runtime_nop, ravb_runtime_nop, NULL)
};
#define RAVB_PM_OPS (&ravb_dev_pm_ops)
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index cec147d..8f06a66 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -40,7 +40,7 @@ config DWMAC_GENERIC
config DWMAC_IPQ806X
tristate "QCA IPQ806x DWMAC support"
default ARCH_QCOM
- depends on OF
+ depends on OF && (ARCH_QCOM || COMPILE_TEST)
select MFD_SYSCON
help
Support for QCA IPQ806X DWMAC Ethernet.
@@ -53,7 +53,7 @@ config DWMAC_IPQ806X
config DWMAC_LPC18XX
tristate "NXP LPC18xx/43xx DWMAC support"
default ARCH_LPC18XX
- depends on OF
+ depends on OF && (ARCH_LPC18XX || COMPILE_TEST)
select MFD_SYSCON
---help---
Support for NXP LPC18xx/43xx DWMAC Ethernet.
@@ -61,7 +61,7 @@ config DWMAC_LPC18XX
config DWMAC_MESON
tristate "Amlogic Meson dwmac support"
default ARCH_MESON
- depends on OF
+ depends on OF && (ARCH_MESON || COMPILE_TEST)
help
Support for Ethernet controller on Amlogic Meson SoCs.
@@ -72,7 +72,7 @@ config DWMAC_MESON
config DWMAC_ROCKCHIP
tristate "Rockchip dwmac support"
default ARCH_ROCKCHIP
- depends on OF
+ depends on OF && (ARCH_ROCKCHIP || COMPILE_TEST)
select MFD_SYSCON
help
Support for Ethernet controller on Rockchip RK3288 SoC.
@@ -83,7 +83,7 @@ config DWMAC_ROCKCHIP
config DWMAC_SOCFPGA
tristate "SOCFPGA dwmac support"
default ARCH_SOCFPGA
- depends on OF
+ depends on OF && (ARCH_SOCFPGA || COMPILE_TEST)
select MFD_SYSCON
help
Support for ethernet controller on Altera SOCFPGA
@@ -95,7 +95,7 @@ config DWMAC_SOCFPGA
config DWMAC_STI
tristate "STi GMAC support"
default ARCH_STI
- depends on OF
+ depends on OF && (ARCH_STI || COMPILE_TEST)
select MFD_SYSCON
---help---
Support for ethernet controller on STi SOCs.
@@ -107,7 +107,7 @@ config DWMAC_STI
config DWMAC_SUNXI
tristate "Allwinner GMAC support"
default ARCH_SUNXI
- depends on OF
+ depends on OF && (ARCH_SUNXI || COMPILE_TEST)
---help---
Support for Allwinner A20/A31 GMAC ethernet controllers.
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index e6bb0ec..f2a4cd6 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -364,7 +364,6 @@ static inline void slave_write(struct cpsw_slave *slave, u32 val, u32 offset)
}
struct cpsw_priv {
- spinlock_t lock;
struct platform_device *pdev;
struct net_device *ndev;
struct napi_struct napi_rx;
@@ -2124,7 +2123,6 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev,
}
priv_sl2 = netdev_priv(ndev);
- spin_lock_init(&priv_sl2->lock);
priv_sl2->data = *data;
priv_sl2->pdev = pdev;
priv_sl2->ndev = ndev;
@@ -2243,7 +2241,6 @@ static int cpsw_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ndev);
priv = netdev_priv(ndev);
- spin_lock_init(&priv->lock);
priv->pdev = pdev;
priv->ndev = ndev;
priv->dev = &ndev->dev;
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index 4f6255c..37ab46c 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -1154,7 +1154,7 @@ int w5100_probe(struct device *dev, const struct w5100_ops *ops,
if (err < 0)
goto err_register;
- priv->xfer_wq = create_workqueue(netdev_name(ndev));
+ priv->xfer_wq = alloc_workqueue(netdev_name(ndev), WQ_MEM_RECLAIM, 0);
if (!priv->xfer_wq) {
err = -ENOMEM;
goto err_wq;
@@ -1233,7 +1233,6 @@ int w5100_remove(struct device *dev)
flush_work(&priv->setrx_work);
flush_work(&priv->restart_work);
- flush_workqueue(priv->xfer_wq);
destroy_workqueue(priv->xfer_wq);
unregister_netdev(ndev);
diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c
index 86c331b..9006877 100644
--- a/drivers/net/fjes/fjes_main.c
+++ b/drivers/net/fjes/fjes_main.c
@@ -1187,8 +1187,9 @@ static int fjes_probe(struct platform_device *plat_dev)
adapter->force_reset = false;
adapter->open_guard = false;
- adapter->txrx_wq = create_workqueue(DRV_NAME "/txrx");
- adapter->control_wq = create_workqueue(DRV_NAME "/control");
+ adapter->txrx_wq = alloc_workqueue(DRV_NAME "/txrx", WQ_MEM_RECLAIM, 0);
+ adapter->control_wq = alloc_workqueue(DRV_NAME "/control",
+ WQ_MEM_RECLAIM, 0);
INIT_WORK(&adapter->tx_stall_task, fjes_tx_stall_task);
INIT_WORK(&adapter->raise_intr_rxdata_task,
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index c270c5a..467fb8b 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -173,6 +173,7 @@ struct rndis_device {
/* Interface */
struct rndis_message;
+struct netvsc_device;
int netvsc_device_add(struct hv_device *device, void *additional_info);
int netvsc_device_remove(struct hv_device *device);
int netvsc_send(struct hv_device *device,
@@ -189,8 +190,8 @@ int netvsc_recv_callback(struct hv_device *device_obj,
struct vmbus_channel *channel,
u16 vlan_tci);
void netvsc_channel_cb(void *context);
-int rndis_filter_open(struct hv_device *dev);
-int rndis_filter_close(struct hv_device *dev);
+int rndis_filter_open(struct netvsc_device *nvdev);
+int rndis_filter_close(struct netvsc_device *nvdev);
int rndis_filter_device_add(struct hv_device *dev,
void *additional_info);
void rndis_filter_device_remove(struct hv_device *dev);
@@ -200,7 +201,7 @@ int rndis_filter_receive(struct hv_device *dev,
struct vmbus_channel *channel);
int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter);
-int rndis_filter_set_device_mac(struct hv_device *hdev, char *mac);
+int rndis_filter_set_device_mac(struct net_device *ndev, char *mac);
void netvsc_switch_datapath(struct net_device *nv_dev, bool vf);
@@ -743,6 +744,18 @@ struct netvsc_device {
atomic_t vf_use_cnt;
};
+static inline struct netvsc_device *
+net_device_to_netvsc_device(struct net_device *ndev)
+{
+ return ((struct net_device_context *)netdev_priv(ndev))->nvdev;
+}
+
+static inline struct netvsc_device *
+hv_device_to_netvsc_device(struct hv_device *device)
+{
+ return net_device_to_netvsc_device(hv_get_drvdata(device));
+}
+
/* NdisInitialize message */
struct rndis_initialize_request {
u32 req_id;
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 719cb35..6909c32 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -95,9 +95,7 @@ static void free_netvsc_device(struct netvsc_device *nvdev)
static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
{
- struct net_device *ndev = hv_get_drvdata(device);
- struct net_device_context *net_device_ctx = netdev_priv(ndev);
- struct netvsc_device *net_device = net_device_ctx->nvdev;
+ struct netvsc_device *net_device = hv_device_to_netvsc_device(device);
if (net_device && net_device->destroy)
net_device = NULL;
@@ -107,9 +105,7 @@ static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
static struct netvsc_device *get_inbound_net_device(struct hv_device *device)
{
- struct net_device *ndev = hv_get_drvdata(device);
- struct net_device_context *net_device_ctx = netdev_priv(ndev);
- struct netvsc_device *net_device = net_device_ctx->nvdev;
+ struct netvsc_device *net_device = hv_device_to_netvsc_device(device);
if (!net_device)
goto get_in_err;
@@ -128,8 +124,7 @@ static int netvsc_destroy_buf(struct hv_device *device)
struct nvsp_message *revoke_packet;
int ret = 0;
struct net_device *ndev = hv_get_drvdata(device);
- struct net_device_context *net_device_ctx = netdev_priv(ndev);
- struct netvsc_device *net_device = net_device_ctx->nvdev;
+ struct netvsc_device *net_device = net_device_to_netvsc_device(ndev);
/*
* If we got a section count, it means we received a
@@ -249,7 +244,6 @@ static int netvsc_destroy_buf(struct hv_device *device)
static int netvsc_init_buf(struct hv_device *device)
{
int ret = 0;
- unsigned long t;
struct netvsc_device *net_device;
struct nvsp_message *init_packet;
struct net_device *ndev;
@@ -310,9 +304,7 @@ static int netvsc_init_buf(struct hv_device *device)
goto cleanup;
}
- t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
- BUG_ON(t == 0);
-
+ wait_for_completion(&net_device->channel_init_wait);
/* Check the response */
if (init_packet->msg.v1_msg.
@@ -395,8 +387,7 @@ static int netvsc_init_buf(struct hv_device *device)
goto cleanup;
}
- t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
- BUG_ON(t == 0);
+ wait_for_completion(&net_device->channel_init_wait);
/* Check the response */
if (init_packet->msg.v1_msg.
@@ -450,7 +441,6 @@ static int negotiate_nvsp_ver(struct hv_device *device,
{
struct net_device *ndev = hv_get_drvdata(device);
int ret;
- unsigned long t;
memset(init_packet, 0, sizeof(struct nvsp_message));
init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT;
@@ -467,10 +457,7 @@ static int negotiate_nvsp_ver(struct hv_device *device,
if (ret != 0)
return ret;
- t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
-
- if (t == 0)
- return -ETIMEDOUT;
+ wait_for_completion(&net_device->channel_init_wait);
if (init_packet->msg.init_msg.init_complete.status !=
NVSP_STAT_SUCCESS)
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 6a69b5c..787a202 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -98,16 +98,14 @@ static void netvsc_set_multicast_list(struct net_device *net)
static int netvsc_open(struct net_device *net)
{
- struct net_device_context *net_device_ctx = netdev_priv(net);
- struct hv_device *device_obj = net_device_ctx->device_ctx;
- struct netvsc_device *nvdev = net_device_ctx->nvdev;
+ struct netvsc_device *nvdev = net_device_to_netvsc_device(net);
struct rndis_device *rdev;
int ret = 0;
netif_carrier_off(net);
/* Open up the device */
- ret = rndis_filter_open(device_obj);
+ ret = rndis_filter_open(nvdev);
if (ret != 0) {
netdev_err(net, "unable to open device (ret %d).\n", ret);
return ret;
@@ -125,7 +123,6 @@ static int netvsc_open(struct net_device *net)
static int netvsc_close(struct net_device *net)
{
struct net_device_context *net_device_ctx = netdev_priv(net);
- struct hv_device *device_obj = net_device_ctx->device_ctx;
struct netvsc_device *nvdev = net_device_ctx->nvdev;
int ret;
u32 aread, awrite, i, msec = 10, retry = 0, retry_max = 20;
@@ -135,7 +132,7 @@ static int netvsc_close(struct net_device *net)
/* Make sure netvsc_set_multicast_list doesn't re-enable filter! */
cancel_work_sync(&net_device_ctx->work);
- ret = rndis_filter_close(device_obj);
+ ret = rndis_filter_close(nvdev);
if (ret != 0) {
netdev_err(net, "unable to close device (ret %d).\n", ret);
return ret;
@@ -701,7 +698,6 @@ int netvsc_recv_callback(struct hv_device *device_obj,
}
vf_injection_done:
- net_device_ctx = netdev_priv(net);
rx_stats = this_cpu_ptr(net_device_ctx->rx_stats);
/* Allocate a skb - TODO direct I/O to pages? */
@@ -986,8 +982,6 @@ static struct rtnl_link_stats64 *netvsc_get_stats64(struct net_device *net,
static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
{
- struct net_device_context *ndevctx = netdev_priv(ndev);
- struct hv_device *hdev = ndevctx->device_ctx;
struct sockaddr *addr = p;
char save_adr[ETH_ALEN];
unsigned char save_aatype;
@@ -1000,7 +994,7 @@ static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
if (err != 0)
return err;
- err = rndis_filter_set_device_mac(hdev, addr->sa_data);
+ err = rndis_filter_set_device_mac(ndev, addr->sa_data);
if (err != 0) {
/* roll back to saved MAC */
memcpy(ndev->dev_addr, save_adr, ETH_ALEN);
@@ -1248,7 +1242,7 @@ static int netvsc_vf_up(struct net_device *vf_netdev)
/*
* Open the device before switching data path.
*/
- rndis_filter_open(net_device_ctx->device_ctx);
+ rndis_filter_open(netvsc_dev);
/*
* notify the host to switch the data path.
@@ -1303,7 +1297,7 @@ static int netvsc_vf_down(struct net_device *vf_netdev)
udelay(50);
netvsc_switch_datapath(ndev, false);
netdev_info(ndev, "Data path switched from VF: %s\n", vf_netdev->name);
- rndis_filter_close(net_device_ctx->device_ctx);
+ rndis_filter_close(netvsc_dev);
netif_carrier_on(ndev);
/*
* Notify peers.
@@ -1500,6 +1494,10 @@ static int netvsc_netdev_event(struct notifier_block *this,
{
struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
+ /* Avoid Vlan dev with same MAC registering as VF */
+ if (event_dev->priv_flags & IFF_802_1Q_VLAN)
+ return NOTIFY_DONE;
+
switch (event) {
case NETDEV_REGISTER:
return netvsc_register_vf(event_dev);
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 97c292b..8e830f7 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -466,7 +466,6 @@ static int rndis_filter_query_device(struct rndis_device *dev, u32 oid,
struct rndis_query_request *query;
struct rndis_query_complete *query_complete;
int ret = 0;
- unsigned long t;
if (!result)
return -EINVAL;
@@ -503,11 +502,7 @@ static int rndis_filter_query_device(struct rndis_device *dev, u32 oid,
if (ret != 0)
goto cleanup;
- t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
- if (t == 0) {
- ret = -ETIMEDOUT;
- goto cleanup;
- }
+ wait_for_completion(&request->wait_event);
/* Copy the response back */
query_complete = &request->response_msg.msg.query_complete;
@@ -543,11 +538,9 @@ static int rndis_filter_query_device_mac(struct rndis_device *dev)
#define NWADR_STR "NetworkAddress"
#define NWADR_STRLEN 14
-int rndis_filter_set_device_mac(struct hv_device *hdev, char *mac)
+int rndis_filter_set_device_mac(struct net_device *ndev, char *mac)
{
- struct net_device *ndev = hv_get_drvdata(hdev);
- struct net_device_context *net_device_ctx = netdev_priv(ndev);
- struct netvsc_device *nvdev = net_device_ctx->nvdev;
+ struct netvsc_device *nvdev = net_device_to_netvsc_device(ndev);
struct rndis_device *rdev = nvdev->extension;
struct rndis_request *request;
struct rndis_set_request *set;
@@ -558,7 +551,6 @@ int rndis_filter_set_device_mac(struct hv_device *hdev, char *mac)
u32 extlen = sizeof(struct rndis_config_parameter_info) +
2*NWADR_STRLEN + 4*ETH_ALEN;
int ret;
- unsigned long t;
request = get_rndis_request(rdev, RNDIS_MSG_SET,
RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
@@ -599,21 +591,13 @@ int rndis_filter_set_device_mac(struct hv_device *hdev, char *mac)
if (ret != 0)
goto cleanup;
- t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
- if (t == 0) {
- netdev_err(ndev, "timeout before we got a set response...\n");
- /*
- * can't put_rndis_request, since we may still receive a
- * send-completion.
- */
- return -EBUSY;
- } else {
- set_complete = &request->response_msg.msg.set_complete;
- if (set_complete->status != RNDIS_STATUS_SUCCESS) {
- netdev_err(ndev, "Fail to set MAC on host side:0x%x\n",
- set_complete->status);
- ret = -EINVAL;
- }
+ wait_for_completion(&request->wait_event);
+
+ set_complete = &request->response_msg.msg.set_complete;
+ if (set_complete->status != RNDIS_STATUS_SUCCESS) {
+ netdev_err(ndev, "Fail to set MAC on host side:0x%x\n",
+ set_complete->status);
+ ret = -EINVAL;
}
cleanup:
@@ -622,12 +606,10 @@ cleanup:
}
static int
-rndis_filter_set_offload_params(struct hv_device *hdev,
+rndis_filter_set_offload_params(struct net_device *ndev,
struct ndis_offload_params *req_offloads)
{
- struct net_device *ndev = hv_get_drvdata(hdev);
- struct net_device_context *net_device_ctx = netdev_priv(ndev);
- struct netvsc_device *nvdev = net_device_ctx->nvdev;
+ struct netvsc_device *nvdev = net_device_to_netvsc_device(ndev);
struct rndis_device *rdev = nvdev->extension;
struct rndis_request *request;
struct rndis_set_request *set;
@@ -635,7 +617,6 @@ rndis_filter_set_offload_params(struct hv_device *hdev,
struct rndis_set_complete *set_complete;
u32 extlen = sizeof(struct ndis_offload_params);
int ret;
- unsigned long t;
u32 vsp_version = nvdev->nvsp_version;
if (vsp_version <= NVSP_PROTOCOL_VERSION_4) {
@@ -669,20 +650,12 @@ rndis_filter_set_offload_params(struct hv_device *hdev,
if (ret != 0)
goto cleanup;
- t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
- if (t == 0) {
- netdev_err(ndev, "timeout before we got aOFFLOAD set response...\n");
- /* can't put_rndis_request, since we may still receive a
- * send-completion.
- */
- return -EBUSY;
- } else {
- set_complete = &request->response_msg.msg.set_complete;
- if (set_complete->status != RNDIS_STATUS_SUCCESS) {
- netdev_err(ndev, "Fail to set offload on host side:0x%x\n",
- set_complete->status);
- ret = -EINVAL;
- }
+ wait_for_completion(&request->wait_event);
+ set_complete = &request->response_msg.msg.set_complete;
+ if (set_complete->status != RNDIS_STATUS_SUCCESS) {
+ netdev_err(ndev, "Fail to set offload on host side:0x%x\n",
+ set_complete->status);
+ ret = -EINVAL;
}
cleanup:
@@ -710,7 +683,6 @@ static int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue)
u32 *itab;
u8 *keyp;
int i, ret;
- unsigned long t;
request = get_rndis_request(
rdev, RNDIS_MSG_SET,
@@ -753,20 +725,12 @@ static int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue)
if (ret != 0)
goto cleanup;
- t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
- if (t == 0) {
- netdev_err(ndev, "timeout before we got a set response...\n");
- /* can't put_rndis_request, since we may still receive a
- * send-completion.
- */
- return -ETIMEDOUT;
- } else {
- set_complete = &request->response_msg.msg.set_complete;
- if (set_complete->status != RNDIS_STATUS_SUCCESS) {
- netdev_err(ndev, "Fail to set RSS parameters:0x%x\n",
- set_complete->status);
- ret = -EINVAL;
- }
+ wait_for_completion(&request->wait_event);
+ set_complete = &request->response_msg.msg.set_complete;
+ if (set_complete->status != RNDIS_STATUS_SUCCESS) {
+ netdev_err(ndev, "Fail to set RSS parameters:0x%x\n",
+ set_complete->status);
+ ret = -EINVAL;
}
cleanup:
@@ -795,8 +759,6 @@ int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter)
struct rndis_set_complete *set_complete;
u32 status;
int ret;
- unsigned long t;
- struct net_device *ndev = dev->ndev;
request = get_rndis_request(dev, RNDIS_MSG_SET,
RNDIS_MESSAGE_SIZE(struct rndis_set_request) +
@@ -819,26 +781,14 @@ int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter)
if (ret != 0)
goto cleanup;
- t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
+ wait_for_completion(&request->wait_event);
- if (t == 0) {
- netdev_err(ndev,
- "timeout before we got a set response...\n");
- ret = -ETIMEDOUT;
- /*
- * We can't deallocate the request since we may still receive a
- * send completion for it.
- */
- goto exit;
- } else {
- set_complete = &request->response_msg.msg.set_complete;
- status = set_complete->status;
- }
+ set_complete = &request->response_msg.msg.set_complete;
+ status = set_complete->status;
cleanup:
if (request)
put_rndis_request(dev, request);
-exit:
return ret;
}
@@ -850,9 +800,7 @@ static int rndis_filter_init_device(struct rndis_device *dev)
struct rndis_initialize_complete *init_complete;
u32 status;
int ret;
- unsigned long t;
- struct net_device_context *net_device_ctx = netdev_priv(dev->ndev);
- struct netvsc_device *nvdev = net_device_ctx->nvdev;
+ struct netvsc_device *nvdev = net_device_to_netvsc_device(dev->ndev);
request = get_rndis_request(dev, RNDIS_MSG_INIT,
RNDIS_MESSAGE_SIZE(struct rndis_initialize_request));
@@ -875,12 +823,7 @@ static int rndis_filter_init_device(struct rndis_device *dev)
goto cleanup;
}
- t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
-
- if (t == 0) {
- ret = -ETIMEDOUT;
- goto cleanup;
- }
+ wait_for_completion(&request->wait_event);
init_complete = &request->response_msg.msg.init_complete;
status = init_complete->status;
@@ -977,8 +920,7 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
{
struct net_device *ndev =
hv_get_drvdata(new_sc->primary_channel->device_obj);
- struct net_device_context *net_device_ctx = netdev_priv(ndev);
- struct netvsc_device *nvscdev = net_device_ctx->nvdev;
+ struct netvsc_device *nvscdev = net_device_to_netvsc_device(ndev);
u16 chn_index = new_sc->offermsg.offer.sub_channel_index;
int ret;
unsigned long flags;
@@ -1014,7 +956,6 @@ int rndis_filter_device_add(struct hv_device *dev,
struct netvsc_device_info *device_info = additional_info;
struct ndis_offload_params offloads;
struct nvsp_message *init_packet;
- unsigned long t;
struct ndis_recv_scale_cap rsscap;
u32 rsscap_size = sizeof(struct ndis_recv_scale_cap);
u32 mtu, size;
@@ -1088,7 +1029,7 @@ int rndis_filter_device_add(struct hv_device *dev,
offloads.lso_v2_ipv4 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
- ret = rndis_filter_set_offload_params(dev, &offloads);
+ ret = rndis_filter_set_offload_params(net, &offloads);
if (ret)
goto err_dev_remv;
@@ -1157,11 +1098,8 @@ int rndis_filter_device_add(struct hv_device *dev,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
if (ret)
goto out;
- t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
- if (t == 0) {
- ret = -ETIMEDOUT;
- goto out;
- }
+ wait_for_completion(&net_device->channel_init_wait);
+
if (init_packet->msg.v5_msg.subchn_comp.status !=
NVSP_STAT_SUCCESS) {
ret = -ENODEV;
@@ -1196,21 +1134,14 @@ err_dev_remv:
void rndis_filter_device_remove(struct hv_device *dev)
{
- struct net_device *ndev = hv_get_drvdata(dev);
- struct net_device_context *net_device_ctx = netdev_priv(ndev);
- struct netvsc_device *net_dev = net_device_ctx->nvdev;
+ struct netvsc_device *net_dev = hv_device_to_netvsc_device(dev);
struct rndis_device *rndis_dev = net_dev->extension;
- unsigned long t;
/* If not all subchannel offers are complete, wait for them until
* completion to avoid race.
*/
- while (net_dev->num_sc_offered > 0) {
- t = wait_for_completion_timeout(&net_dev->channel_init_wait,
- 10 * HZ);
- if (t == 0)
- WARN(1, "Netvsc: Waiting for sub-channel processing");
- }
+ if (net_dev->num_sc_offered > 0)
+ wait_for_completion(&net_dev->channel_init_wait);
/* Halt and release the rndis device */
rndis_filter_halt_device(rndis_dev);
@@ -1222,27 +1153,19 @@ void rndis_filter_device_remove(struct hv_device *dev)
}
-int rndis_filter_open(struct hv_device *dev)
+int rndis_filter_open(struct netvsc_device *nvdev)
{
- struct net_device *ndev = hv_get_drvdata(dev);
- struct net_device_context *net_device_ctx = netdev_priv(ndev);
- struct netvsc_device *net_device = net_device_ctx->nvdev;
-
- if (!net_device)
+ if (!nvdev)
return -EINVAL;
- if (atomic_inc_return(&net_device->open_cnt) != 1)
+ if (atomic_inc_return(&nvdev->open_cnt) != 1)
return 0;
- return rndis_filter_open_device(net_device->extension);
+ return rndis_filter_open_device(nvdev->extension);
}
-int rndis_filter_close(struct hv_device *dev)
+int rndis_filter_close(struct netvsc_device *nvdev)
{
- struct net_device *ndev = hv_get_drvdata(dev);
- struct net_device_context *net_device_ctx = netdev_priv(ndev);
- struct netvsc_device *nvdev = net_device_ctx->nvdev;
-
if (!nvdev)
return -EINVAL;
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 1c4d395..18b4e8c 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -80,13 +80,6 @@ static void ipvlan_port_destroy(struct net_device *dev)
kfree_rcu(port, rcu);
}
-/* ipvlan network devices have devices nesting below it and are a special
- * "super class" of normal network devices; split their locks off into a
- * separate class since they always nest.
- */
-static struct lock_class_key ipvlan_netdev_xmit_lock_key;
-static struct lock_class_key ipvlan_netdev_addr_lock_key;
-
#define IPVLAN_FEATURES \
(NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \
@@ -96,19 +89,6 @@ static struct lock_class_key ipvlan_netdev_addr_lock_key;
#define IPVLAN_STATE_MASK \
((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
-static void ipvlan_set_lockdep_class_one(struct net_device *dev,
- struct netdev_queue *txq,
- void *_unused)
-{
- lockdep_set_class(&txq->_xmit_lock, &ipvlan_netdev_xmit_lock_key);
-}
-
-static void ipvlan_set_lockdep_class(struct net_device *dev)
-{
- lockdep_set_class(&dev->addr_list_lock, &ipvlan_netdev_addr_lock_key);
- netdev_for_each_tx_queue(dev, ipvlan_set_lockdep_class_one, NULL);
-}
-
static int ipvlan_init(struct net_device *dev)
{
struct ipvl_dev *ipvlan = netdev_priv(dev);
@@ -123,7 +103,7 @@ static int ipvlan_init(struct net_device *dev)
dev->gso_max_segs = phy_dev->gso_max_segs;
dev->hard_header_len = phy_dev->hard_header_len;
- ipvlan_set_lockdep_class(dev);
+ netdev_lockdep_set_classes(dev);
ipvlan->pcpu_stats = alloc_percpu(struct ipvl_pcpu_stats);
if (!ipvlan->pcpu_stats)
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index a400288..6255973 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -169,10 +169,9 @@ static void loopback_setup(struct net_device *dev)
dev->flags = IFF_LOOPBACK;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
netif_keep_dst(dev);
- dev->hw_features = NETIF_F_ALL_TSO | NETIF_F_UFO;
+ dev->hw_features = NETIF_F_GSO_SOFTWARE;
dev->features = NETIF_F_SG | NETIF_F_FRAGLIST
- | NETIF_F_ALL_TSO
- | NETIF_F_UFO
+ | NETIF_F_GSO_SOFTWARE
| NETIF_F_HW_CSUM
| NETIF_F_RXCSUM
| NETIF_F_SCTP_CRC
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index cb01023..cd9b538 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -49,6 +49,7 @@ struct macvlan_port {
bool passthru;
int count;
struct hlist_head vlan_source_hash[MACVLAN_HASH_SIZE];
+ DECLARE_BITMAP(mc_filter, MACVLAN_MC_FILTER_SZ);
};
struct macvlan_source_entry {
@@ -305,11 +306,14 @@ static void macvlan_process_broadcast(struct work_struct *w)
rcu_read_unlock();
+ if (src)
+ dev_put(src->dev);
kfree_skb(skb);
}
}
static void macvlan_broadcast_enqueue(struct macvlan_port *port,
+ const struct macvlan_dev *src,
struct sk_buff *skb)
{
struct sk_buff *nskb;
@@ -319,8 +323,12 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port,
if (!nskb)
goto err;
+ MACVLAN_SKB_CB(nskb)->src = src;
+
spin_lock(&port->bc_queue.lock);
if (skb_queue_len(&port->bc_queue) < MACVLAN_BC_QUEUE_LEN) {
+ if (src)
+ dev_hold(src->dev);
__skb_queue_tail(&port->bc_queue, nskb);
err = 0;
}
@@ -412,6 +420,8 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
port = macvlan_port_get_rcu(skb->dev);
if (is_multicast_ether_addr(eth->h_dest)) {
+ unsigned int hash;
+
skb = ip_check_defrag(dev_net(skb->dev), skb, IP_DEFRAG_MACVLAN);
if (!skb)
return RX_HANDLER_CONSUMED;
@@ -429,8 +439,9 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
goto out;
}
- MACVLAN_SKB_CB(skb)->src = src;
- macvlan_broadcast_enqueue(port, skb);
+ hash = mc_hash(NULL, eth->h_dest);
+ if (test_bit(hash, port->mc_filter))
+ macvlan_broadcast_enqueue(port, src, skb);
return RX_HANDLER_PASS;
}
@@ -716,12 +727,12 @@ static void macvlan_change_rx_flags(struct net_device *dev, int change)
}
}
-static void macvlan_set_mac_lists(struct net_device *dev)
+static void macvlan_compute_filter(unsigned long *mc_filter,
+ struct net_device *dev,
+ struct macvlan_dev *vlan)
{
- struct macvlan_dev *vlan = netdev_priv(dev);
-
if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
- bitmap_fill(vlan->mc_filter, MACVLAN_MC_FILTER_SZ);
+ bitmap_fill(mc_filter, MACVLAN_MC_FILTER_SZ);
} else {
struct netdev_hw_addr *ha;
DECLARE_BITMAP(filter, MACVLAN_MC_FILTER_SZ);
@@ -733,10 +744,33 @@ static void macvlan_set_mac_lists(struct net_device *dev)
__set_bit(mc_hash(vlan, dev->broadcast), filter);
- bitmap_copy(vlan->mc_filter, filter, MACVLAN_MC_FILTER_SZ);
+ bitmap_copy(mc_filter, filter, MACVLAN_MC_FILTER_SZ);
}
+}
+
+static void macvlan_set_mac_lists(struct net_device *dev)
+{
+ struct macvlan_dev *vlan = netdev_priv(dev);
+
+ macvlan_compute_filter(vlan->mc_filter, dev, vlan);
+
dev_uc_sync(vlan->lowerdev, dev);
dev_mc_sync(vlan->lowerdev, dev);
+
+ /* This is slightly inaccurate as we're including the subscription
+ * list of vlan->lowerdev too.
+ *
+ * Bug alert: This only works if everyone has the same broadcast
+ * address as lowerdev. As soon as someone changes theirs this
+ * will break.
+ *
+ * However, this is already broken as when you change your broadcast
+ * address we don't get called.
+ *
+ * The solution is to maintain a list of broadcast addresses like
+ * we do for uc/mc, if you care.
+ */
+ macvlan_compute_filter(vlan->port->mc_filter, vlan->lowerdev, NULL);
}
static int macvlan_change_mtu(struct net_device *dev, int new_mtu)
@@ -754,7 +788,6 @@ static int macvlan_change_mtu(struct net_device *dev, int new_mtu)
* "super class" of normal network devices; split their locks off into a
* separate class since they always nest.
*/
-static struct lock_class_key macvlan_netdev_xmit_lock_key;
static struct lock_class_key macvlan_netdev_addr_lock_key;
#define ALWAYS_ON_FEATURES \
@@ -775,20 +808,12 @@ static int macvlan_get_nest_level(struct net_device *dev)
return ((struct macvlan_dev *)netdev_priv(dev))->nest_level;
}
-static void macvlan_set_lockdep_class_one(struct net_device *dev,
- struct netdev_queue *txq,
- void *_unused)
-{
- lockdep_set_class(&txq->_xmit_lock,
- &macvlan_netdev_xmit_lock_key);
-}
-
static void macvlan_set_lockdep_class(struct net_device *dev)
{
+ netdev_lockdep_set_classes(dev);
lockdep_set_class_and_subclass(&dev->addr_list_lock,
&macvlan_netdev_addr_lock_key,
macvlan_get_nest_level(dev));
- netdev_for_each_tx_queue(dev, macvlan_set_lockdep_class_one, NULL);
}
static int macvlan_init(struct net_device *dev)
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 8dedafa..17953ab 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -1312,10 +1312,9 @@ ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64)
return stats64;
}
-static struct lock_class_key ppp_tx_busylock;
static int ppp_dev_init(struct net_device *dev)
{
- dev->qdisc_tx_busylock = &ppp_tx_busylock;
+ netdev_lockdep_set_classes(dev);
return 0;
}
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 2ace126..0a1bb83 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1574,23 +1574,6 @@ static const struct team_option team_options[] = {
},
};
-static struct lock_class_key team_netdev_xmit_lock_key;
-static struct lock_class_key team_netdev_addr_lock_key;
-static struct lock_class_key team_tx_busylock_key;
-
-static void team_set_lockdep_class_one(struct net_device *dev,
- struct netdev_queue *txq,
- void *unused)
-{
- lockdep_set_class(&txq->_xmit_lock, &team_netdev_xmit_lock_key);
-}
-
-static void team_set_lockdep_class(struct net_device *dev)
-{
- lockdep_set_class(&dev->addr_list_lock, &team_netdev_addr_lock_key);
- netdev_for_each_tx_queue(dev, team_set_lockdep_class_one, NULL);
- dev->qdisc_tx_busylock = &team_tx_busylock_key;
-}
static int team_init(struct net_device *dev)
{
@@ -1626,7 +1609,7 @@ static int team_init(struct net_device *dev)
goto err_options_register;
netif_carrier_off(dev);
- team_set_lockdep_class(dev);
+ netdev_lockdep_set_classes(dev);
return 0;
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 3f9f6ed..161c25e 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -1742,7 +1742,7 @@ static int rx_bottom(struct r8152 *tp, int budget)
pkt_len -= CRC_SIZE;
rx_data += sizeof(struct rx_desc);
- skb = netdev_alloc_skb_ip_align(netdev, pkt_len);
+ skb = napi_alloc_skb(&tp->napi, pkt_len);
if (!skb) {
stats->rx_dropped++;
goto find_next_rx;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index e0638e5..192f321 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1780,6 +1780,7 @@ static int virtnet_probe(struct virtio_device *vdev)
struct net_device *dev;
struct virtnet_info *vi;
u16 max_queue_pairs;
+ int mtu;
if (!vdev->config->get) {
dev_err(&vdev->dev, "%s failure: config access disabled\n",
@@ -1896,6 +1897,14 @@ static int virtnet_probe(struct virtio_device *vdev)
if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
vi->has_cvq = true;
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
+ mtu = virtio_cread16(vdev,
+ offsetof(struct virtio_net_config,
+ mtu));
+ if (virtnet_change_mtu(dev, mtu))
+ __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU);
+ }
+
if (vi->any_header_sg)
dev->needed_headroom = vi->hdr_len;
@@ -2067,6 +2076,7 @@ static unsigned int features[] = {
VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ,
VIRTIO_NET_F_CTRL_MAC_ADDR,
VIRTIO_F_ANY_LAYOUT,
+ VIRTIO_NET_F_MTU,
};
static struct virtio_driver virtio_net_driver = {
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index dff0884..b4d7469 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -35,6 +35,7 @@
#include <net/route.h>
#include <net/addrconf.h>
#include <net/l3mdev.h>
+#include <net/fib_rules.h>
#define RT_FL_TOS(oldflp4) \
((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
@@ -42,9 +43,14 @@
#define DRV_NAME "vrf"
#define DRV_VERSION "1.0"
+#define FIB_RULE_PREF 1000 /* default preference for FIB rules */
+static bool add_fib_rules = true;
+
struct net_vrf {
struct rtable __rcu *rth;
+ struct rtable __rcu *rth_local;
struct rt6_info __rcu *rt6;
+ struct rt6_info __rcu *rt6_local;
u32 tb_id;
};
@@ -54,9 +60,20 @@ struct pcpu_dstats {
u64 tx_drps;
u64 rx_pkts;
u64 rx_bytes;
+ u64 rx_drps;
struct u64_stats_sync syncp;
};
+static void vrf_rx_stats(struct net_device *dev, int len)
+{
+ struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
+
+ u64_stats_update_begin(&dstats->syncp);
+ dstats->rx_pkts++;
+ dstats->rx_bytes += len;
+ u64_stats_update_end(&dstats->syncp);
+}
+
static void vrf_tx_error(struct net_device *vrf_dev, struct sk_buff *skb)
{
vrf_dev->stats.tx_errors++;
@@ -91,6 +108,34 @@ static struct rtnl_link_stats64 *vrf_get_stats64(struct net_device *dev,
return stats;
}
+/* Local traffic destined to local address. Reinsert the packet to rx
+ * path, similar to loopback handling.
+ */
+static int vrf_local_xmit(struct sk_buff *skb, struct net_device *dev,
+ struct dst_entry *dst)
+{
+ int len = skb->len;
+
+ skb_orphan(skb);
+
+ skb_dst_set(skb, dst);
+ skb_dst_force(skb);
+
+ /* set pkt_type to avoid skb hitting packet taps twice -
+ * once on Tx and again in Rx processing
+ */
+ skb->pkt_type = PACKET_LOOPBACK;
+
+ skb->protocol = eth_type_trans(skb, dev);
+
+ if (likely(netif_rx(skb) == NET_RX_SUCCESS))
+ vrf_rx_stats(dev, len);
+ else
+ this_cpu_inc(dev->dstats->rx_drps);
+
+ return NETDEV_TX_OK;
+}
+
#if IS_ENABLED(CONFIG_IPV6)
static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
struct net_device *dev)
@@ -117,8 +162,51 @@ static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
goto err;
skb_dst_drop(skb);
+
+ /* if dst.dev is loopback or the VRF device again this is locally
+ * originated traffic destined to a local address. Short circuit
+ * to Rx path using our local dst
+ */
+ if (dst->dev == net->loopback_dev || dst->dev == dev) {
+ struct net_vrf *vrf = netdev_priv(dev);
+ struct rt6_info *rt6_local;
+
+ /* release looked up dst and use cached local dst */
+ dst_release(dst);
+
+ rcu_read_lock();
+
+ rt6_local = rcu_dereference(vrf->rt6_local);
+ if (unlikely(!rt6_local)) {
+ rcu_read_unlock();
+ goto err;
+ }
+
+ /* Ordering issue: cached local dst is created on newlink
+ * before the IPv6 initialization. Using the local dst
+ * requires rt6i_idev to be set so make sure it is.
+ */
+ if (unlikely(!rt6_local->rt6i_idev)) {
+ rt6_local->rt6i_idev = in6_dev_get(dev);
+ if (!rt6_local->rt6i_idev) {
+ rcu_read_unlock();
+ goto err;
+ }
+ }
+
+ dst = &rt6_local->dst;
+ dst_hold(dst);
+
+ rcu_read_unlock();
+
+ return vrf_local_xmit(skb, dev, &rt6_local->dst);
+ }
+
skb_dst_set(skb, dst);
+ /* strip the ethernet header added for pass through VRF device */
+ __skb_pull(skb, skb_network_offset(skb));
+
ret = ip6_local_out(net, skb->sk, skb);
if (unlikely(net_xmit_eval(ret)))
dev->stats.tx_errors++;
@@ -139,29 +227,6 @@ static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
}
#endif
-static int vrf_send_v4_prep(struct sk_buff *skb, struct flowi4 *fl4,
- struct net_device *vrf_dev)
-{
- struct rtable *rt;
- int err = 1;
-
- rt = ip_route_output_flow(dev_net(vrf_dev), fl4, NULL);
- if (IS_ERR(rt))
- goto out;
-
- /* TO-DO: what about broadcast ? */
- if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) {
- ip_rt_put(rt);
- goto out;
- }
-
- skb_dst_drop(skb);
- skb_dst_set(skb, &rt->dst);
- err = 0;
-out:
- return err;
-}
-
static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
struct net_device *vrf_dev)
{
@@ -176,9 +241,51 @@ static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
FLOWI_FLAG_SKIP_NH_OIF,
.daddr = ip4h->daddr,
};
+ struct net *net = dev_net(vrf_dev);
+ struct rtable *rt;
- if (vrf_send_v4_prep(skb, &fl4, vrf_dev))
+ rt = ip_route_output_flow(net, &fl4, NULL);
+ if (IS_ERR(rt))
+ goto err;
+
+ if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) {
+ ip_rt_put(rt);
goto err;
+ }
+
+ skb_dst_drop(skb);
+
+ /* if dst.dev is loopback or the VRF device again this is locally
+ * originated traffic destined to a local address. Short circuit
+ * to Rx path using our local dst
+ */
+ if (rt->dst.dev == net->loopback_dev || rt->dst.dev == vrf_dev) {
+ struct net_vrf *vrf = netdev_priv(vrf_dev);
+ struct rtable *rth_local;
+ struct dst_entry *dst = NULL;
+
+ ip_rt_put(rt);
+
+ rcu_read_lock();
+
+ rth_local = rcu_dereference(vrf->rth_local);
+ if (likely(rth_local)) {
+ dst = &rth_local->dst;
+ dst_hold(dst);
+ }
+
+ rcu_read_unlock();
+
+ if (unlikely(!dst))
+ goto err;
+
+ return vrf_local_xmit(skb, vrf_dev, dst);
+ }
+
+ skb_dst_set(skb, &rt->dst);
+
+ /* strip the ethernet header added for pass through VRF device */
+ __skb_pull(skb, skb_network_offset(skb));
if (!ip4h->saddr) {
ip4h->saddr = inet_select_addr(skb_dst(skb)->dev, 0,
@@ -200,9 +307,6 @@ err:
static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev)
{
- /* strip the ethernet header added for pass through VRF device */
- __skb_pull(skb, skb_network_offset(skb));
-
switch (skb->protocol) {
case htons(ETH_P_IP):
return vrf_process_v4_outbound(skb, dev);
@@ -277,27 +381,42 @@ static int vrf_output6(struct net *net, struct sock *sk, struct sk_buff *skb)
static void vrf_rt6_release(struct net_vrf *vrf)
{
struct rt6_info *rt6 = rtnl_dereference(vrf->rt6);
+ struct rt6_info *rt6_local = rtnl_dereference(vrf->rt6_local);
- rcu_assign_pointer(vrf->rt6, NULL);
+ RCU_INIT_POINTER(vrf->rt6, NULL);
+ RCU_INIT_POINTER(vrf->rt6_local, NULL);
+ synchronize_rcu();
if (rt6)
dst_release(&rt6->dst);
+
+ if (rt6_local) {
+ if (rt6_local->rt6i_idev)
+ in6_dev_put(rt6_local->rt6i_idev);
+
+ dst_release(&rt6_local->dst);
+ }
}
static int vrf_rt6_create(struct net_device *dev)
{
+ int flags = DST_HOST | DST_NOPOLICY | DST_NOXFRM | DST_NOCACHE;
struct net_vrf *vrf = netdev_priv(dev);
struct net *net = dev_net(dev);
struct fib6_table *rt6i_table;
- struct rt6_info *rt6;
+ struct rt6_info *rt6, *rt6_local;
int rc = -ENOMEM;
+ /* IPv6 can be CONFIG enabled and then disabled runtime */
+ if (!ipv6_mod_enabled())
+ return 0;
+
rt6i_table = fib6_new_table(net, vrf->tb_id);
if (!rt6i_table)
goto out;
- rt6 = ip6_dst_alloc(net, dev,
- DST_HOST | DST_NOPOLICY | DST_NOXFRM | DST_NOCACHE);
+ /* create a dst for routing packets out a VRF device */
+ rt6 = ip6_dst_alloc(net, dev, flags);
if (!rt6)
goto out;
@@ -305,7 +424,25 @@ static int vrf_rt6_create(struct net_device *dev)
rt6->rt6i_table = rt6i_table;
rt6->dst.output = vrf_output6;
+
+ /* create a dst for local routing - packets sent locally
+ * to local address via the VRF device as a loopback
+ */
+ rt6_local = ip6_dst_alloc(net, dev, flags);
+ if (!rt6_local) {
+ dst_release(&rt6->dst);
+ goto out;
+ }
+
+ dst_hold(&rt6_local->dst);
+
+ rt6_local->rt6i_idev = in6_dev_get(dev);
+ rt6_local->rt6i_flags = RTF_UP | RTF_NONEXTHOP | RTF_LOCAL;
+ rt6_local->rt6i_table = rt6i_table;
+ rt6_local->dst.input = ip6_input;
+
rcu_assign_pointer(vrf->rt6, rt6);
+ rcu_assign_pointer(vrf->rt6_local, rt6_local);
rc = 0;
out:
@@ -384,29 +521,48 @@ static int vrf_output(struct net *net, struct sock *sk, struct sk_buff *skb)
static void vrf_rtable_release(struct net_vrf *vrf)
{
struct rtable *rth = rtnl_dereference(vrf->rth);
+ struct rtable *rth_local = rtnl_dereference(vrf->rth_local);
- rcu_assign_pointer(vrf->rth, NULL);
+ RCU_INIT_POINTER(vrf->rth, NULL);
+ RCU_INIT_POINTER(vrf->rth_local, NULL);
+ synchronize_rcu();
if (rth)
dst_release(&rth->dst);
+
+ if (rth_local)
+ dst_release(&rth_local->dst);
}
static int vrf_rtable_create(struct net_device *dev)
{
struct net_vrf *vrf = netdev_priv(dev);
- struct rtable *rth;
+ struct rtable *rth, *rth_local;
if (!fib_new_table(dev_net(dev), vrf->tb_id))
return -ENOMEM;
+ /* create a dst for routing packets out through a VRF device */
rth = rt_dst_alloc(dev, 0, RTN_UNICAST, 1, 1, 0);
if (!rth)
return -ENOMEM;
+ /* create a dst for local ingress routing - packets sent locally
+ * to local address via the VRF device as a loopback
+ */
+ rth_local = rt_dst_alloc(dev, RTCF_LOCAL, RTN_LOCAL, 1, 1, 0);
+ if (!rth_local) {
+ dst_release(&rth->dst);
+ return -ENOMEM;
+ }
+
rth->dst.output = vrf_output;
rth->rt_table_id = vrf->tb_id;
+ rth_local->rt_table_id = vrf->tb_id;
+
rcu_assign_pointer(vrf->rth, rth);
+ rcu_assign_pointer(vrf->rth_local, rth_local);
return 0;
}
@@ -504,6 +660,12 @@ static int vrf_dev_init(struct net_device *dev)
dev->flags = IFF_MASTER | IFF_NOARP;
+ /* MTU is irrelevant for VRF device; set to 64k similar to lo */
+ dev->mtu = 64 * 1024;
+
+ /* similarly, oper state is irrelevant; set to up to avoid confusion */
+ dev->operstate = IF_OPER_UP;
+ netdev_lockdep_set_classes(dev);
return 0;
out_rth:
@@ -626,6 +788,16 @@ out:
static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
struct sk_buff *skb)
{
+ /* loopback traffic; do not push through packet taps again.
+ * Reset pkt_type for upper layers to process skb
+ */
+ if (skb->pkt_type == PACKET_LOOPBACK) {
+ skb->dev = vrf_dev;
+ skb->skb_iif = vrf_dev->ifindex;
+ skb->pkt_type = PACKET_HOST;
+ goto out;
+ }
+
/* if packet is NDISC keep the ingress interface */
if (!ipv6_ndisc_frame(skb)) {
skb->dev = vrf_dev;
@@ -638,6 +810,7 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
IP6CB(skb)->flags |= IP6SKB_L3SLAVE;
}
+out:
return skb;
}
@@ -655,10 +828,19 @@ static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
skb->dev = vrf_dev;
skb->skb_iif = vrf_dev->ifindex;
+ /* loopback traffic; do not push through packet taps again.
+ * Reset pkt_type for upper layers to process skb
+ */
+ if (skb->pkt_type == PACKET_LOOPBACK) {
+ skb->pkt_type = PACKET_HOST;
+ goto out;
+ }
+
skb_push(skb, skb->mac_len);
dev_queue_xmit_nit(skb, vrf_dev);
skb_pull(skb, skb->mac_len);
+out:
return skb;
}
@@ -723,6 +905,94 @@ static const struct ethtool_ops vrf_ethtool_ops = {
.get_drvinfo = vrf_get_drvinfo,
};
+static inline size_t vrf_fib_rule_nl_size(void)
+{
+ size_t sz;
+
+ sz = NLMSG_ALIGN(sizeof(struct fib_rule_hdr));
+ sz += nla_total_size(sizeof(u8)); /* FRA_L3MDEV */
+ sz += nla_total_size(sizeof(u32)); /* FRA_PRIORITY */
+
+ return sz;
+}
+
+static int vrf_fib_rule(const struct net_device *dev, __u8 family, bool add_it)
+{
+ struct fib_rule_hdr *frh;
+ struct nlmsghdr *nlh;
+ struct sk_buff *skb;
+ int err;
+
+ if (family == AF_INET6 && !ipv6_mod_enabled())
+ return 0;
+
+ skb = nlmsg_new(vrf_fib_rule_nl_size(), GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ nlh = nlmsg_put(skb, 0, 0, 0, sizeof(*frh), 0);
+ if (!nlh)
+ goto nla_put_failure;
+
+ /* rule only needs to appear once */
+ nlh->nlmsg_flags &= NLM_F_EXCL;
+
+ frh = nlmsg_data(nlh);
+ memset(frh, 0, sizeof(*frh));
+ frh->family = family;
+ frh->action = FR_ACT_TO_TBL;
+
+ if (nla_put_u32(skb, FRA_L3MDEV, 1))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, FRA_PRIORITY, FIB_RULE_PREF))
+ goto nla_put_failure;
+
+ nlmsg_end(skb, nlh);
+
+ /* fib_nl_{new,del}rule handling looks for net from skb->sk */
+ skb->sk = dev_net(dev)->rtnl;
+ if (add_it) {
+ err = fib_nl_newrule(skb, nlh);
+ if (err == -EEXIST)
+ err = 0;
+ } else {
+ err = fib_nl_delrule(skb, nlh);
+ if (err == -ENOENT)
+ err = 0;
+ }
+ nlmsg_free(skb);
+
+ return err;
+
+nla_put_failure:
+ nlmsg_free(skb);
+
+ return -EMSGSIZE;
+}
+
+static int vrf_add_fib_rules(const struct net_device *dev)
+{
+ int err;
+
+ err = vrf_fib_rule(dev, AF_INET, true);
+ if (err < 0)
+ goto out_err;
+
+ err = vrf_fib_rule(dev, AF_INET6, true);
+ if (err < 0)
+ goto ipv6_err;
+
+ return 0;
+
+ipv6_err:
+ vrf_fib_rule(dev, AF_INET, false);
+
+out_err:
+ netdev_err(dev, "Failed to add FIB rules.\n");
+ return err;
+}
+
static void vrf_setup(struct net_device *dev)
{
ether_setup(dev);
@@ -763,6 +1033,7 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
struct net_vrf *vrf = netdev_priv(dev);
+ int err;
if (!data || !data[IFLA_VRF_TABLE])
return -EINVAL;
@@ -771,7 +1042,21 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev,
dev->priv_flags |= IFF_L3MDEV_MASTER;
- return register_netdevice(dev);
+ err = register_netdevice(dev);
+ if (err)
+ goto out;
+
+ if (add_fib_rules) {
+ err = vrf_add_fib_rules(dev);
+ if (err) {
+ unregister_netdevice(dev);
+ goto out;
+ }
+ add_fib_rules = false;
+ }
+
+out:
+ return err;
}
static size_t vrf_nl_getsize(const struct net_device *dev)
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index a2fdd15..9e314b7 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -280,6 +280,17 @@ config DSCC4
To compile this driver as a module, choose M here: the
module will be called dscc4.
+config FSL_UCC_HDLC
+ tristate "Freescale QUICC Engine HDLC support"
+ depends on HDLC
+ depends on QUICC_ENGINE
+ help
+ Driver for Freescale QUICC Engine HDLC controller. The driver
+ supports HDLC in NMSI and TDM mode.
+
+ To compile this driver as a module, choose M here: the
+ module will be called fsl_ucc_hdlc.
+
config DSCC4_PCISYNC
bool "Etinc PCISYNC features"
depends on DSCC4
diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile
index c135ef4..25fec40 100644
--- a/drivers/net/wan/Makefile
+++ b/drivers/net/wan/Makefile
@@ -32,6 +32,7 @@ obj-$(CONFIG_WANXL) += wanxl.o
obj-$(CONFIG_PCI200SYN) += pci200syn.o
obj-$(CONFIG_PC300TOO) += pc300too.o
obj-$(CONFIG_IXP4XX_HSS) += ixp4xx_hss.o
+obj-$(CONFIG_FSL_UCC_HDLC) += fsl_ucc_hdlc.o
clean-files := wanxlfw.inc
$(obj)/wanxl.o: $(obj)/wanxlfw.inc
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
new file mode 100644
index 0000000..19174ac
--- /dev/null
+++ b/drivers/net/wan/fsl_ucc_hdlc.c
@@ -0,0 +1,1192 @@
+/* Freescale QUICC Engine HDLC Device Driver
+ *
+ * Copyright 2016 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/hdlc.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/stddef.h>
+#include <soc/fsl/qe/qe_tdm.h>
+#include <uapi/linux/if_arp.h>
+
+#include "fsl_ucc_hdlc.h"
+
+#define DRV_DESC "Freescale QE UCC HDLC Driver"
+#define DRV_NAME "ucc_hdlc"
+
+#define TDM_PPPOHT_SLIC_MAXIN
+#define BROKEN_FRAME_INFO
+
+static struct ucc_tdm_info utdm_primary_info = {
+ .uf_info = {
+ .tsa = 0,
+ .cdp = 0,
+ .cds = 1,
+ .ctsp = 1,
+ .ctss = 1,
+ .revd = 0,
+ .urfs = 256,
+ .utfs = 256,
+ .urfet = 128,
+ .urfset = 192,
+ .utfet = 128,
+ .utftt = 0x40,
+ .ufpt = 256,
+ .mode = UCC_FAST_PROTOCOL_MODE_HDLC,
+ .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL,
+ .tenc = UCC_FAST_TX_ENCODING_NRZ,
+ .renc = UCC_FAST_RX_ENCODING_NRZ,
+ .tcrc = UCC_FAST_16_BIT_CRC,
+ .synl = UCC_FAST_SYNC_LEN_NOT_USED,
+ },
+
+ .si_info = {
+#ifdef TDM_PPPOHT_SLIC_MAXIN
+ .simr_rfsd = 1,
+ .simr_tfsd = 2,
+#else
+ .simr_rfsd = 0,
+ .simr_tfsd = 0,
+#endif
+ .simr_crt = 0,
+ .simr_sl = 0,
+ .simr_ce = 1,
+ .simr_fe = 1,
+ .simr_gm = 0,
+ },
+};
+
+static struct ucc_tdm_info utdm_info[MAX_HDLC_NUM];
+
+static int uhdlc_init(struct ucc_hdlc_private *priv)
+{
+ struct ucc_tdm_info *ut_info;
+ struct ucc_fast_info *uf_info;
+ u32 cecr_subblock;
+ u16 bd_status;
+ int ret, i;
+ void *bd_buffer;
+ dma_addr_t bd_dma_addr;
+ u32 riptr;
+ u32 tiptr;
+ u32 gumr;
+
+ ut_info = priv->ut_info;
+ uf_info = &ut_info->uf_info;
+
+ if (priv->tsa) {
+ uf_info->tsa = 1;
+ uf_info->ctsp = 1;
+ }
+ uf_info->uccm_mask = ((UCC_HDLC_UCCE_RXB | UCC_HDLC_UCCE_RXF |
+ UCC_HDLC_UCCE_TXB) << 16);
+
+ ret = ucc_fast_init(uf_info, &priv->uccf);
+ if (ret) {
+ dev_err(priv->dev, "Failed to init uccf.");
+ return ret;
+ }
+
+ priv->uf_regs = priv->uccf->uf_regs;
+ ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
+
+ /* Loopback mode */
+ if (priv->loopback) {
+ dev_info(priv->dev, "Loopback Mode\n");
+ gumr = ioread32be(&priv->uf_regs->gumr);
+ gumr |= (UCC_FAST_GUMR_LOOPBACK | UCC_FAST_GUMR_CDS |
+ UCC_FAST_GUMR_TCI);
+ gumr &= ~(UCC_FAST_GUMR_CTSP | UCC_FAST_GUMR_RSYN);
+ iowrite32be(gumr, &priv->uf_regs->gumr);
+ }
+
+ /* Initialize SI */
+ if (priv->tsa)
+ ucc_tdm_init(priv->utdm, priv->ut_info);
+
+ /* Write to QE CECR, UCCx channel to Stop Transmission */
+ cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
+ ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock,
+ QE_CR_PROTOCOL_UNSPECIFIED, 0);
+
+ /* Set UPSMR normal mode (need fixed)*/
+ iowrite32be(0, &priv->uf_regs->upsmr);
+
+ priv->rx_ring_size = RX_BD_RING_LEN;
+ priv->tx_ring_size = TX_BD_RING_LEN;
+ /* Alloc Rx BD */
+ priv->rx_bd_base = dma_alloc_coherent(priv->dev,
+ RX_BD_RING_LEN * sizeof(struct qe_bd *),
+ &priv->dma_rx_bd, GFP_KERNEL);
+
+ if (!priv->rx_bd_base) {
+ dev_err(priv->dev, "Cannot allocate MURAM memory for RxBDs\n");
+ ret = -ENOMEM;
+ goto rxbd_alloc_error;
+ }
+
+ /* Alloc Tx BD */
+ priv->tx_bd_base = dma_alloc_coherent(priv->dev,
+ TX_BD_RING_LEN * sizeof(struct qe_bd *),
+ &priv->dma_tx_bd, GFP_KERNEL);
+
+ if (!priv->tx_bd_base) {
+ dev_err(priv->dev, "Cannot allocate MURAM memory for TxBDs\n");
+ ret = -ENOMEM;
+ goto txbd_alloc_error;
+ }
+
+ /* Alloc parameter ram for ucc hdlc */
+ priv->ucc_pram_offset = qe_muram_alloc(sizeof(priv->ucc_pram),
+ ALIGNMENT_OF_UCC_HDLC_PRAM);
+
+ if (priv->ucc_pram_offset < 0) {
+ dev_err(priv->dev, "Can not allocate MURAM for hdlc prameter.\n");
+ ret = -ENOMEM;
+ goto pram_alloc_error;
+ }
+
+ priv->rx_skbuff = kzalloc(priv->rx_ring_size * sizeof(*priv->rx_skbuff),
+ GFP_KERNEL);
+ if (!priv->rx_skbuff)
+ goto rx_skb_alloc_error;
+
+ priv->tx_skbuff = kzalloc(priv->tx_ring_size * sizeof(*priv->tx_skbuff),
+ GFP_KERNEL);
+ if (!priv->tx_skbuff)
+ goto tx_skb_alloc_error;
+
+ priv->skb_curtx = 0;
+ priv->skb_dirtytx = 0;
+ priv->curtx_bd = priv->tx_bd_base;
+ priv->dirty_tx = priv->tx_bd_base;
+ priv->currx_bd = priv->rx_bd_base;
+ priv->currx_bdnum = 0;
+
+ /* init parameter base */
+ cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
+ ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
+ QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
+
+ priv->ucc_pram = (struct ucc_hdlc_param __iomem *)
+ qe_muram_addr(priv->ucc_pram_offset);
+
+ /* Zero out parameter ram */
+ memset_io(priv->ucc_pram, 0, sizeof(struct ucc_hdlc_param));
+
+ /* Alloc riptr, tiptr */
+ riptr = qe_muram_alloc(32, 32);
+ if (riptr < 0) {
+ dev_err(priv->dev, "Cannot allocate MURAM mem for Receive internal temp data pointer\n");
+ ret = -ENOMEM;
+ goto riptr_alloc_error;
+ }
+
+ tiptr = qe_muram_alloc(32, 32);
+ if (tiptr < 0) {
+ dev_err(priv->dev, "Cannot allocate MURAM mem for Transmit internal temp data pointer\n");
+ ret = -ENOMEM;
+ goto tiptr_alloc_error;
+ }
+
+ /* Set RIPTR, TIPTR */
+ iowrite16be(riptr, &priv->ucc_pram->riptr);
+ iowrite16be(tiptr, &priv->ucc_pram->tiptr);
+
+ /* Set MRBLR */
+ iowrite16be(MAX_RX_BUF_LENGTH, &priv->ucc_pram->mrblr);
+
+ /* Set RBASE, TBASE */
+ iowrite32be(priv->dma_rx_bd, &priv->ucc_pram->rbase);
+ iowrite32be(priv->dma_tx_bd, &priv->ucc_pram->tbase);
+
+ /* Set RSTATE, TSTATE */
+ iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->rstate);
+ iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->tstate);
+
+ /* Set C_MASK, C_PRES for 16bit CRC */
+ iowrite32be(CRC_16BIT_MASK, &priv->ucc_pram->c_mask);
+ iowrite32be(CRC_16BIT_PRES, &priv->ucc_pram->c_pres);
+
+ iowrite16be(MAX_FRAME_LENGTH, &priv->ucc_pram->mflr);
+ iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfthr);
+ iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfcnt);
+ iowrite16be(DEFAULT_ADDR_MASK, &priv->ucc_pram->hmask);
+ iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr1);
+ iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr2);
+ iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr3);
+ iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr4);
+
+ /* Get BD buffer */
+ bd_buffer = dma_alloc_coherent(priv->dev,
+ (RX_BD_RING_LEN + TX_BD_RING_LEN) *
+ MAX_RX_BUF_LENGTH,
+ &bd_dma_addr, GFP_KERNEL);
+
+ if (!bd_buffer) {
+ dev_err(priv->dev, "Could not allocate buffer descriptors\n");
+ ret = -ENOMEM;
+ goto bd_alloc_error;
+ }
+
+ memset(bd_buffer, 0, (RX_BD_RING_LEN + TX_BD_RING_LEN)
+ * MAX_RX_BUF_LENGTH);
+
+ priv->rx_buffer = bd_buffer;
+ priv->tx_buffer = bd_buffer + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH;
+
+ priv->dma_rx_addr = bd_dma_addr;
+ priv->dma_tx_addr = bd_dma_addr + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH;
+
+ for (i = 0; i < RX_BD_RING_LEN; i++) {
+ if (i < (RX_BD_RING_LEN - 1))
+ bd_status = R_E_S | R_I_S;
+ else
+ bd_status = R_E_S | R_I_S | R_W_S;
+
+ iowrite16be(bd_status, &priv->rx_bd_base[i].status);
+ iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH,
+ &priv->rx_bd_base[i].buf);
+ }
+
+ for (i = 0; i < TX_BD_RING_LEN; i++) {
+ if (i < (TX_BD_RING_LEN - 1))
+ bd_status = T_I_S | T_TC_S;
+ else
+ bd_status = T_I_S | T_TC_S | T_W_S;
+
+ iowrite16be(bd_status, &priv->tx_bd_base[i].status);
+ iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH,
+ &priv->tx_bd_base[i].buf);
+ }
+
+ return 0;
+
+bd_alloc_error:
+ qe_muram_free(tiptr);
+tiptr_alloc_error:
+ qe_muram_free(riptr);
+riptr_alloc_error:
+ kfree(priv->tx_skbuff);
+tx_skb_alloc_error:
+ kfree(priv->rx_skbuff);
+rx_skb_alloc_error:
+ qe_muram_free(priv->ucc_pram_offset);
+pram_alloc_error:
+ dma_free_coherent(priv->dev,
+ TX_BD_RING_LEN * sizeof(struct qe_bd),
+ priv->tx_bd_base, priv->dma_tx_bd);
+txbd_alloc_error:
+ dma_free_coherent(priv->dev,
+ RX_BD_RING_LEN * sizeof(struct qe_bd),
+ priv->rx_bd_base, priv->dma_rx_bd);
+rxbd_alloc_error:
+ ucc_fast_free(priv->uccf);
+
+ return ret;
+}
+
+static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+ struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)hdlc->priv;
+ struct qe_bd __iomem *bd;
+ u16 bd_status;
+ unsigned long flags;
+ u8 *send_buf;
+ int i;
+ u16 *proto_head;
+
+ switch (dev->type) {
+ case ARPHRD_RAWHDLC:
+ if (skb_headroom(skb) < HDLC_HEAD_LEN) {
+ dev->stats.tx_dropped++;
+ dev_kfree_skb(skb);
+ netdev_err(dev, "No enough space for hdlc head\n");
+ return -ENOMEM;
+ }
+
+ skb_push(skb, HDLC_HEAD_LEN);
+
+ proto_head = (u16 *)skb->data;
+ *proto_head = htons(DEFAULT_HDLC_HEAD);
+
+ dev->stats.tx_bytes += skb->len;
+ break;
+
+ case ARPHRD_PPP:
+ proto_head = (u16 *)skb->data;
+ if (*proto_head != htons(DEFAULT_PPP_HEAD)) {
+ dev->stats.tx_dropped++;
+ dev_kfree_skb(skb);
+ netdev_err(dev, "Wrong ppp header\n");
+ return -ENOMEM;
+ }
+
+ dev->stats.tx_bytes += skb->len;
+ break;
+
+ default:
+ dev->stats.tx_dropped++;
+ dev_kfree_skb(skb);
+ return -ENOMEM;
+ }
+
+ pr_info("Tx data skb->len:%d ", skb->len);
+ send_buf = (u8 *)skb->data;
+ pr_info("\nTransmitted data:\n");
+ for (i = 0; i < 16; i++) {
+ if (i == skb->len)
+ pr_info("++++");
+ else
+ pr_info("%02x\n", send_buf[i]);
+ }
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* Start from the next BD that should be filled */
+ bd = priv->curtx_bd;
+ bd_status = ioread16be(&bd->status);
+ /* Save the skb pointer so we can free it later */
+ priv->tx_skbuff[priv->skb_curtx] = skb;
+
+ /* Update the current skb pointer (wrapping if this was the last) */
+ priv->skb_curtx =
+ (priv->skb_curtx + 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN);
+
+ /* copy skb data to tx buffer for sdma processing */
+ memcpy(priv->tx_buffer + (be32_to_cpu(bd->buf) - priv->dma_tx_addr),
+ skb->data, skb->len);
+
+ /* set bd status and length */
+ bd_status = (bd_status & T_W_S) | T_R_S | T_I_S | T_L_S | T_TC_S;
+
+ iowrite16be(bd_status, &bd->status);
+ iowrite16be(skb->len, &bd->length);
+
+ /* Move to next BD in the ring */
+ if (!(bd_status & T_W_S))
+ bd += 1;
+ else
+ bd = priv->tx_bd_base;
+
+ if (bd == priv->dirty_tx) {
+ if (!netif_queue_stopped(dev))
+ netif_stop_queue(dev);
+ }
+
+ priv->curtx_bd = bd;
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return NETDEV_TX_OK;
+}
+
+static int hdlc_tx_done(struct ucc_hdlc_private *priv)
+{
+ /* Start from the next BD that should be filled */
+ struct net_device *dev = priv->ndev;
+ struct qe_bd *bd; /* BD pointer */
+ u16 bd_status;
+
+ bd = priv->dirty_tx;
+ bd_status = ioread16be(&bd->status);
+
+ /* Normal processing. */
+ while ((bd_status & T_R_S) == 0) {
+ struct sk_buff *skb;
+
+ /* BD contains already transmitted buffer. */
+ /* Handle the transmitted buffer and release */
+ /* the BD to be used with the current frame */
+
+ skb = priv->tx_skbuff[priv->skb_dirtytx];
+ if (!skb)
+ break;
+ pr_info("TxBD: %x\n", bd_status);
+ dev->stats.tx_packets++;
+ memset(priv->tx_buffer +
+ (be32_to_cpu(bd->buf) - priv->dma_tx_addr),
+ 0, skb->len);
+ dev_kfree_skb_irq(skb);
+
+ priv->tx_skbuff[priv->skb_dirtytx] = NULL;
+ priv->skb_dirtytx =
+ (priv->skb_dirtytx +
+ 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN);
+
+ /* We freed a buffer, so now we can restart transmission */
+ if (netif_queue_stopped(dev))
+ netif_wake_queue(dev);
+
+ /* Advance the confirmation BD pointer */
+ if (!(bd_status & T_W_S))
+ bd += 1;
+ else
+ bd = priv->tx_bd_base;
+ bd_status = ioread16be(&bd->status);
+ }
+ priv->dirty_tx = bd;
+
+ return 0;
+}
+
+static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit)
+{
+ struct net_device *dev = priv->ndev;
+ struct sk_buff *skb;
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+ struct qe_bd *bd;
+ u32 bd_status;
+ u16 length, howmany = 0;
+ u8 *bdbuffer;
+ int i;
+ static int entry;
+
+ bd = priv->currx_bd;
+ bd_status = ioread16be(&bd->status);
+
+ /* while there are received buffers and BD is full (~R_E) */
+ while (!((bd_status & (R_E_S)) || (--rx_work_limit < 0))) {
+ if (bd_status & R_OV_S)
+ dev->stats.rx_over_errors++;
+ if (bd_status & R_CR_S) {
+#ifdef BROKEN_FRAME_INFO
+ pr_info("Broken Frame with RxBD: %x\n", bd_status);
+#endif
+ dev->stats.rx_crc_errors++;
+ dev->stats.rx_dropped++;
+ goto recycle;
+ }
+ bdbuffer = priv->rx_buffer +
+ (priv->currx_bdnum * MAX_RX_BUF_LENGTH);
+ length = ioread16be(&bd->length);
+
+ pr_info("Received data length:%d", length);
+ pr_info("while entry times:%d", entry++);
+
+ pr_info("\nReceived data:\n");
+ for (i = 0; (i < 16); i++) {
+ if (i == length)
+ pr_info("++++");
+ else
+ pr_info("%02x\n", bdbuffer[i]);
+ }
+
+ switch (dev->type) {
+ case ARPHRD_RAWHDLC:
+ bdbuffer += HDLC_HEAD_LEN;
+ length -= (HDLC_HEAD_LEN + HDLC_CRC_SIZE);
+
+ skb = dev_alloc_skb(length);
+ if (!skb) {
+ dev->stats.rx_dropped++;
+ return -ENOMEM;
+ }
+
+ skb_put(skb, length);
+ skb->len = length;
+ skb->dev = dev;
+ memcpy(skb->data, bdbuffer, length);
+ break;
+
+ case ARPHRD_PPP:
+ length -= HDLC_CRC_SIZE;
+
+ skb = dev_alloc_skb(length);
+ if (!skb) {
+ dev->stats.rx_dropped++;
+ return -ENOMEM;
+ }
+
+ skb_put(skb, length);
+ skb->len = length;
+ skb->dev = dev;
+ memcpy(skb->data, bdbuffer, length);
+ break;
+ }
+
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += skb->len;
+ howmany++;
+ if (hdlc->proto)
+ skb->protocol = hdlc_type_trans(skb, dev);
+ pr_info("skb->protocol:%x\n", skb->protocol);
+ netif_receive_skb(skb);
+
+recycle:
+ iowrite16be(bd_status | R_E_S | R_I_S, &bd->status);
+
+ /* update to point at the next bd */
+ if (bd_status & R_W_S) {
+ priv->currx_bdnum = 0;
+ bd = priv->rx_bd_base;
+ } else {
+ if (priv->currx_bdnum < (RX_BD_RING_LEN - 1))
+ priv->currx_bdnum += 1;
+ else
+ priv->currx_bdnum = RX_BD_RING_LEN - 1;
+
+ bd += 1;
+ }
+
+ bd_status = ioread16be(&bd->status);
+ }
+
+ priv->currx_bd = bd;
+ return howmany;
+}
+
+static int ucc_hdlc_poll(struct napi_struct *napi, int budget)
+{
+ struct ucc_hdlc_private *priv = container_of(napi,
+ struct ucc_hdlc_private,
+ napi);
+ int howmany;
+
+ /* Tx event processing */
+ spin_lock(&priv->lock);
+ hdlc_tx_done(priv);
+ spin_unlock(&priv->lock);
+
+ howmany = 0;
+ howmany += hdlc_rx_done(priv, budget - howmany);
+
+ if (howmany < budget) {
+ napi_complete(napi);
+ qe_setbits32(priv->uccf->p_uccm,
+ (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS) << 16);
+ }
+
+ return howmany;
+}
+
+static irqreturn_t ucc_hdlc_irq_handler(int irq, void *dev_id)
+{
+ struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)dev_id;
+ struct net_device *dev = priv->ndev;
+ struct ucc_fast_private *uccf;
+ struct ucc_tdm_info *ut_info;
+ u32 ucce;
+ u32 uccm;
+
+ ut_info = priv->ut_info;
+ uccf = priv->uccf;
+
+ ucce = ioread32be(uccf->p_ucce);
+ uccm = ioread32be(uccf->p_uccm);
+ ucce &= uccm;
+ iowrite32be(ucce, uccf->p_ucce);
+ pr_info("irq ucce:%x\n", ucce);
+ if (!ucce)
+ return IRQ_NONE;
+
+ if ((ucce >> 16) & (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS)) {
+ if (napi_schedule_prep(&priv->napi)) {
+ uccm &= ~((UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS)
+ << 16);
+ iowrite32be(uccm, uccf->p_uccm);
+ __napi_schedule(&priv->napi);
+ }
+ }
+
+ /* Errors and other events */
+ if (ucce >> 16 & UCC_HDLC_UCCE_BSY)
+ dev->stats.rx_errors++;
+ if (ucce >> 16 & UCC_HDLC_UCCE_TXE)
+ dev->stats.tx_errors++;
+
+ return IRQ_HANDLED;
+}
+
+static int uhdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ const size_t size = sizeof(te1_settings);
+ te1_settings line;
+ struct ucc_hdlc_private *priv = netdev_priv(dev);
+
+ if (cmd != SIOCWANDEV)
+ return hdlc_ioctl(dev, ifr, cmd);
+
+ switch (ifr->ifr_settings.type) {
+ case IF_GET_IFACE:
+ ifr->ifr_settings.type = IF_IFACE_E1;
+ if (ifr->ifr_settings.size < size) {
+ ifr->ifr_settings.size = size; /* data size wanted */
+ return -ENOBUFS;
+ }
+ line.clock_type = priv->clocking;
+ line.clock_rate = 0;
+ line.loopback = 0;
+
+ if (copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &line, size))
+ return -EFAULT;
+ return 0;
+
+ default:
+ return hdlc_ioctl(dev, ifr, cmd);
+ }
+}
+
+static int uhdlc_open(struct net_device *dev)
+{
+ u32 cecr_subblock;
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+ struct ucc_hdlc_private *priv = hdlc->priv;
+ struct ucc_tdm *utdm = priv->utdm;
+
+ if (priv->hdlc_busy != 1) {
+ if (request_irq(priv->ut_info->uf_info.irq,
+ ucc_hdlc_irq_handler, 0, "hdlc", priv))
+ return -ENODEV;
+
+ cecr_subblock = ucc_fast_get_qe_cr_subblock(
+ priv->ut_info->uf_info.ucc_num);
+
+ qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock,
+ QE_CR_PROTOCOL_UNSPECIFIED, 0);
+
+ ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
+
+ /* Enable the TDM port */
+ if (priv->tsa)
+ utdm->si_regs->siglmr1_h |= (0x1 << utdm->tdm_port);
+
+ priv->hdlc_busy = 1;
+ netif_device_attach(priv->ndev);
+ napi_enable(&priv->napi);
+ netif_start_queue(dev);
+ hdlc_open(dev);
+ }
+
+ return 0;
+}
+
+static void uhdlc_memclean(struct ucc_hdlc_private *priv)
+{
+ qe_muram_free(priv->ucc_pram->riptr);
+ qe_muram_free(priv->ucc_pram->tiptr);
+
+ if (priv->rx_bd_base) {
+ dma_free_coherent(priv->dev,
+ RX_BD_RING_LEN * sizeof(struct qe_bd),
+ priv->rx_bd_base, priv->dma_rx_bd);
+
+ priv->rx_bd_base = NULL;
+ priv->dma_rx_bd = 0;
+ }
+
+ if (priv->tx_bd_base) {
+ dma_free_coherent(priv->dev,
+ TX_BD_RING_LEN * sizeof(struct qe_bd),
+ priv->tx_bd_base, priv->dma_tx_bd);
+
+ priv->tx_bd_base = NULL;
+ priv->dma_tx_bd = 0;
+ }
+
+ if (priv->ucc_pram) {
+ qe_muram_free(priv->ucc_pram_offset);
+ priv->ucc_pram = NULL;
+ priv->ucc_pram_offset = 0;
+ }
+
+ kfree(priv->rx_skbuff);
+ priv->rx_skbuff = NULL;
+
+ kfree(priv->tx_skbuff);
+ priv->tx_skbuff = NULL;
+
+ if (priv->uf_regs) {
+ iounmap(priv->uf_regs);
+ priv->uf_regs = NULL;
+ }
+
+ if (priv->uccf) {
+ ucc_fast_free(priv->uccf);
+ priv->uccf = NULL;
+ }
+
+ if (priv->rx_buffer) {
+ dma_free_coherent(priv->dev,
+ RX_BD_RING_LEN * MAX_RX_BUF_LENGTH,
+ priv->rx_buffer, priv->dma_rx_addr);
+ priv->rx_buffer = NULL;
+ priv->dma_rx_addr = 0;
+ }
+
+ if (priv->tx_buffer) {
+ dma_free_coherent(priv->dev,
+ TX_BD_RING_LEN * MAX_RX_BUF_LENGTH,
+ priv->tx_buffer, priv->dma_tx_addr);
+ priv->tx_buffer = NULL;
+ priv->dma_tx_addr = 0;
+ }
+}
+
+static int uhdlc_close(struct net_device *dev)
+{
+ struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv;
+ struct ucc_tdm *utdm = priv->utdm;
+ u32 cecr_subblock;
+
+ napi_disable(&priv->napi);
+ cecr_subblock = ucc_fast_get_qe_cr_subblock(
+ priv->ut_info->uf_info.ucc_num);
+
+ qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
+ (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
+ qe_issue_cmd(QE_CLOSE_RX_BD, cecr_subblock,
+ (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
+
+ if (priv->tsa)
+ utdm->si_regs->siglmr1_h &= ~(0x1 << utdm->tdm_port);
+
+ ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
+
+ free_irq(priv->ut_info->uf_info.irq, priv);
+ netif_stop_queue(dev);
+ priv->hdlc_busy = 0;
+
+ return 0;
+}
+
+static int ucc_hdlc_attach(struct net_device *dev, unsigned short encoding,
+ unsigned short parity)
+{
+ struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv;
+
+ if (encoding != ENCODING_NRZ &&
+ encoding != ENCODING_NRZI)
+ return -EINVAL;
+
+ if (parity != PARITY_NONE &&
+ parity != PARITY_CRC32_PR1_CCITT &&
+ parity != PARITY_CRC16_PR1_CCITT)
+ return -EINVAL;
+
+ priv->encoding = encoding;
+ priv->parity = parity;
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static void store_clk_config(struct ucc_hdlc_private *priv)
+{
+ struct qe_mux *qe_mux_reg = &qe_immr->qmx;
+
+ /* store si clk */
+ priv->cmxsi1cr_h = ioread32be(&qe_mux_reg->cmxsi1cr_h);
+ priv->cmxsi1cr_l = ioread32be(&qe_mux_reg->cmxsi1cr_l);
+
+ /* store si sync */
+ priv->cmxsi1syr = ioread32be(&qe_mux_reg->cmxsi1syr);
+
+ /* store ucc clk */
+ memcpy_fromio(priv->cmxucr, qe_mux_reg->cmxucr, 4 * sizeof(u32));
+}
+
+static void resume_clk_config(struct ucc_hdlc_private *priv)
+{
+ struct qe_mux *qe_mux_reg = &qe_immr->qmx;
+
+ memcpy_toio(qe_mux_reg->cmxucr, priv->cmxucr, 4 * sizeof(u32));
+
+ iowrite32be(priv->cmxsi1cr_h, &qe_mux_reg->cmxsi1cr_h);
+ iowrite32be(priv->cmxsi1cr_l, &qe_mux_reg->cmxsi1cr_l);
+
+ iowrite32be(priv->cmxsi1syr, &qe_mux_reg->cmxsi1syr);
+}
+
+static int uhdlc_suspend(struct device *dev)
+{
+ struct ucc_hdlc_private *priv = dev_get_drvdata(dev);
+ struct ucc_tdm_info *ut_info;
+ struct ucc_fast __iomem *uf_regs;
+
+ if (!priv)
+ return -EINVAL;
+
+ if (!netif_running(priv->ndev))
+ return 0;
+
+ netif_device_detach(priv->ndev);
+ napi_disable(&priv->napi);
+
+ ut_info = priv->ut_info;
+ uf_regs = priv->uf_regs;
+
+ /* backup gumr guemr*/
+ priv->gumr = ioread32be(&uf_regs->gumr);
+ priv->guemr = ioread8(&uf_regs->guemr);
+
+ priv->ucc_pram_bak = kmalloc(sizeof(*priv->ucc_pram_bak),
+ GFP_KERNEL);
+ if (!priv->ucc_pram_bak)
+ return -ENOMEM;
+
+ /* backup HDLC parameter */
+ memcpy_fromio(priv->ucc_pram_bak, priv->ucc_pram,
+ sizeof(struct ucc_hdlc_param));
+
+ /* store the clk configuration */
+ store_clk_config(priv);
+
+ /* save power */
+ ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
+
+ dev_dbg(dev, "ucc hdlc suspend\n");
+ return 0;
+}
+
+static int uhdlc_resume(struct device *dev)
+{
+ struct ucc_hdlc_private *priv = dev_get_drvdata(dev);
+ struct ucc_tdm *utdm = priv->utdm;
+ struct ucc_tdm_info *ut_info;
+ struct ucc_fast __iomem *uf_regs;
+ struct ucc_fast_private *uccf;
+ struct ucc_fast_info *uf_info;
+ int ret, i;
+ u32 cecr_subblock;
+ u16 bd_status;
+
+ if (!priv)
+ return -EINVAL;
+
+ if (!netif_running(priv->ndev))
+ return 0;
+
+ ut_info = priv->ut_info;
+ uf_info = &ut_info->uf_info;
+ uf_regs = priv->uf_regs;
+ uccf = priv->uccf;
+
+ /* restore gumr guemr */
+ iowrite8(priv->guemr, &uf_regs->guemr);
+ iowrite32be(priv->gumr, &uf_regs->gumr);
+
+ /* Set Virtual Fifo registers */
+ iowrite16be(uf_info->urfs, &uf_regs->urfs);
+ iowrite16be(uf_info->urfet, &uf_regs->urfet);
+ iowrite16be(uf_info->urfset, &uf_regs->urfset);
+ iowrite16be(uf_info->utfs, &uf_regs->utfs);
+ iowrite16be(uf_info->utfet, &uf_regs->utfet);
+ iowrite16be(uf_info->utftt, &uf_regs->utftt);
+ /* utfb, urfb are offsets from MURAM base */
+ iowrite32be(uccf->ucc_fast_tx_virtual_fifo_base_offset, &uf_regs->utfb);
+ iowrite32be(uccf->ucc_fast_rx_virtual_fifo_base_offset, &uf_regs->urfb);
+
+ /* Rx Tx and sync clock routing */
+ resume_clk_config(priv);
+
+ iowrite32be(uf_info->uccm_mask, &uf_regs->uccm);
+ iowrite32be(0xffffffff, &uf_regs->ucce);
+
+ ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
+
+ /* rebuild SIRAM */
+ if (priv->tsa)
+ ucc_tdm_init(priv->utdm, priv->ut_info);
+
+ /* Write to QE CECR, UCCx channel to Stop Transmission */
+ cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
+ ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock,
+ (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
+
+ /* Set UPSMR normal mode */
+ iowrite32be(0, &uf_regs->upsmr);
+
+ /* init parameter base */
+ cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
+ ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
+ QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
+
+ priv->ucc_pram = (struct ucc_hdlc_param __iomem *)
+ qe_muram_addr(priv->ucc_pram_offset);
+
+ /* restore ucc parameter */
+ memcpy_toio(priv->ucc_pram, priv->ucc_pram_bak,
+ sizeof(struct ucc_hdlc_param));
+ kfree(priv->ucc_pram_bak);
+
+ /* rebuild BD entry */
+ for (i = 0; i < RX_BD_RING_LEN; i++) {
+ if (i < (RX_BD_RING_LEN - 1))
+ bd_status = R_E_S | R_I_S;
+ else
+ bd_status = R_E_S | R_I_S | R_W_S;
+
+ iowrite16be(bd_status, &priv->rx_bd_base[i].status);
+ iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH,
+ &priv->rx_bd_base[i].buf);
+ }
+
+ for (i = 0; i < TX_BD_RING_LEN; i++) {
+ if (i < (TX_BD_RING_LEN - 1))
+ bd_status = T_I_S | T_TC_S;
+ else
+ bd_status = T_I_S | T_TC_S | T_W_S;
+
+ iowrite16be(bd_status, &priv->tx_bd_base[i].status);
+ iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH,
+ &priv->tx_bd_base[i].buf);
+ }
+
+ /* if hdlc is busy enable TX and RX */
+ if (priv->hdlc_busy == 1) {
+ cecr_subblock = ucc_fast_get_qe_cr_subblock(
+ priv->ut_info->uf_info.ucc_num);
+
+ qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock,
+ (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
+
+ ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
+
+ /* Enable the TDM port */
+ if (priv->tsa)
+ utdm->si_regs->siglmr1_h |= (0x1 << utdm->tdm_port);
+ }
+
+ napi_enable(&priv->napi);
+ netif_device_attach(priv->ndev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops uhdlc_pm_ops = {
+ .suspend = uhdlc_suspend,
+ .resume = uhdlc_resume,
+ .freeze = uhdlc_suspend,
+ .thaw = uhdlc_resume,
+};
+
+#define HDLC_PM_OPS (&uhdlc_pm_ops)
+
+#else
+
+#define HDLC_PM_OPS NULL
+
+#endif
+static const struct net_device_ops uhdlc_ops = {
+ .ndo_open = uhdlc_open,
+ .ndo_stop = uhdlc_close,
+ .ndo_change_mtu = hdlc_change_mtu,
+ .ndo_start_xmit = hdlc_start_xmit,
+ .ndo_do_ioctl = uhdlc_ioctl,
+};
+
+static int ucc_hdlc_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct ucc_hdlc_private *uhdlc_priv = NULL;
+ struct ucc_tdm_info *ut_info;
+ struct ucc_tdm *utdm;
+ struct resource res;
+ struct net_device *dev;
+ hdlc_device *hdlc;
+ int ucc_num;
+ const char *sprop;
+ int ret;
+ u32 val;
+
+ ret = of_property_read_u32_index(np, "cell-index", 0, &val);
+ if (ret) {
+ dev_err(&pdev->dev, "Invalid ucc property\n");
+ return -ENODEV;
+ }
+
+ ucc_num = val - 1;
+ if ((ucc_num > 3) || (ucc_num < 0)) {
+ dev_err(&pdev->dev, ": Invalid UCC num\n");
+ return -EINVAL;
+ }
+
+ memcpy(&utdm_info[ucc_num], &utdm_primary_info,
+ sizeof(utdm_primary_info));
+
+ ut_info = &utdm_info[ucc_num];
+ ut_info->uf_info.ucc_num = ucc_num;
+
+ sprop = of_get_property(np, "rx-clock-name", NULL);
+ if (sprop) {
+ ut_info->uf_info.rx_clock = qe_clock_source(sprop);
+ if ((ut_info->uf_info.rx_clock < QE_CLK_NONE) ||
+ (ut_info->uf_info.rx_clock > QE_CLK24)) {
+ dev_err(&pdev->dev, "Invalid rx-clock-name property\n");
+ return -EINVAL;
+ }
+ } else {
+ dev_err(&pdev->dev, "Invalid rx-clock-name property\n");
+ return -EINVAL;
+ }
+
+ sprop = of_get_property(np, "tx-clock-name", NULL);
+ if (sprop) {
+ ut_info->uf_info.tx_clock = qe_clock_source(sprop);
+ if ((ut_info->uf_info.tx_clock < QE_CLK_NONE) ||
+ (ut_info->uf_info.tx_clock > QE_CLK24)) {
+ dev_err(&pdev->dev, "Invalid tx-clock-name property\n");
+ return -EINVAL;
+ }
+ } else {
+ dev_err(&pdev->dev, "Invalid tx-clock-name property\n");
+ return -EINVAL;
+ }
+
+ /* use the same clock when work in loopback */
+ if (ut_info->uf_info.rx_clock == ut_info->uf_info.tx_clock)
+ qe_setbrg(ut_info->uf_info.rx_clock, 20000000, 1);
+
+ ret = of_address_to_resource(np, 0, &res);
+ if (ret)
+ return -EINVAL;
+
+ ut_info->uf_info.regs = res.start;
+ ut_info->uf_info.irq = irq_of_parse_and_map(np, 0);
+
+ uhdlc_priv = kzalloc(sizeof(*uhdlc_priv), GFP_KERNEL);
+ if (!uhdlc_priv) {
+ ret = -ENOMEM;
+ dev_err(&pdev->dev, "No mem to alloc hdlc private data\n");
+ goto err_alloc_priv;
+ }
+
+ dev_set_drvdata(&pdev->dev, uhdlc_priv);
+ uhdlc_priv->dev = &pdev->dev;
+ uhdlc_priv->ut_info = ut_info;
+
+ if (of_get_property(np, "fsl,tdm-interface", NULL))
+ uhdlc_priv->tsa = 1;
+
+ if (of_get_property(np, "fsl,ucc-internal-loopback", NULL))
+ uhdlc_priv->loopback = 1;
+
+ if (uhdlc_priv->tsa == 1) {
+ utdm = kzalloc(sizeof(*utdm), GFP_KERNEL);
+ if (!utdm) {
+ ret = -ENOMEM;
+ dev_err(&pdev->dev, "No mem to alloc ucc tdm data\n");
+ goto err_alloc_utdm;
+ }
+ uhdlc_priv->utdm = utdm;
+ ret = ucc_of_parse_tdm(np, utdm, ut_info);
+ if (ret)
+ goto err_miss_tsa_property;
+ }
+
+ ret = uhdlc_init(uhdlc_priv);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to init uhdlc\n");
+ goto err_hdlc_init;
+ }
+
+ dev = alloc_hdlcdev(uhdlc_priv);
+ if (!dev) {
+ ret = -ENOMEM;
+ pr_err("ucc_hdlc: unable to allocate memory\n");
+ goto err_hdlc_init;
+ }
+
+ uhdlc_priv->ndev = dev;
+ hdlc = dev_to_hdlc(dev);
+ dev->tx_queue_len = 16;
+ dev->netdev_ops = &uhdlc_ops;
+ hdlc->attach = ucc_hdlc_attach;
+ hdlc->xmit = ucc_hdlc_tx;
+ netif_napi_add(dev, &uhdlc_priv->napi, ucc_hdlc_poll, 32);
+ if (register_hdlc_device(dev)) {
+ ret = -ENOBUFS;
+ pr_err("ucc_hdlc: unable to register hdlc device\n");
+ free_netdev(dev);
+ goto err_hdlc_init;
+ }
+
+ return 0;
+
+err_hdlc_init:
+err_miss_tsa_property:
+ kfree(uhdlc_priv);
+ if (uhdlc_priv->tsa)
+ kfree(utdm);
+err_alloc_utdm:
+ kfree(uhdlc_priv);
+err_alloc_priv:
+ return ret;
+}
+
+static int ucc_hdlc_remove(struct platform_device *pdev)
+{
+ struct ucc_hdlc_private *priv = dev_get_drvdata(&pdev->dev);
+
+ uhdlc_memclean(priv);
+
+ if (priv->utdm->si_regs) {
+ iounmap(priv->utdm->si_regs);
+ priv->utdm->si_regs = NULL;
+ }
+
+ if (priv->utdm->siram) {
+ iounmap(priv->utdm->siram);
+ priv->utdm->siram = NULL;
+ }
+ kfree(priv);
+
+ dev_info(&pdev->dev, "UCC based hdlc module removed\n");
+
+ return 0;
+}
+
+static const struct of_device_id fsl_ucc_hdlc_of_match[] = {
+ {
+ .compatible = "fsl,ucc-hdlc",
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, fsl_ucc_hdlc_of_match);
+
+static struct platform_driver ucc_hdlc_driver = {
+ .probe = ucc_hdlc_probe,
+ .remove = ucc_hdlc_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRV_NAME,
+ .pm = HDLC_PM_OPS,
+ .of_match_table = fsl_ucc_hdlc_of_match,
+ },
+};
+
+static int __init ucc_hdlc_init(void)
+{
+ return platform_driver_register(&ucc_hdlc_driver);
+}
+
+static void __exit ucc_hdlc_exit(void)
+{
+ platform_driver_unregister(&ucc_hdlc_driver);
+}
+
+module_init(ucc_hdlc_init);
+module_exit(ucc_hdlc_exit);
diff --git a/drivers/net/wan/fsl_ucc_hdlc.h b/drivers/net/wan/fsl_ucc_hdlc.h
new file mode 100644
index 0000000..881ecde
--- /dev/null
+++ b/drivers/net/wan/fsl_ucc_hdlc.h
@@ -0,0 +1,147 @@
+/* Freescale QUICC Engine HDLC Device Driver
+ *
+ * Copyright 2014 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _UCC_HDLC_H_
+#define _UCC_HDLC_H_
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+
+#include <soc/fsl/qe/immap_qe.h>
+#include <soc/fsl/qe/qe.h>
+
+#include <soc/fsl/qe/ucc.h>
+#include <soc/fsl/qe/ucc_fast.h>
+
+/* UCC HDLC event register */
+#define UCCE_HDLC_RX_EVENTS \
+(UCC_HDLC_UCCE_RXF | UCC_HDLC_UCCE_RXB | UCC_HDLC_UCCE_BSY)
+#define UCCE_HDLC_TX_EVENTS (UCC_HDLC_UCCE_TXB | UCC_HDLC_UCCE_TXE)
+
+struct ucc_hdlc_param {
+ __be16 riptr;
+ __be16 tiptr;
+ __be16 res0;
+ __be16 mrblr;
+ __be32 rstate;
+ __be32 rbase;
+ __be16 rbdstat;
+ __be16 rbdlen;
+ __be32 rdptr;
+ __be32 tstate;
+ __be32 tbase;
+ __be16 tbdstat;
+ __be16 tbdlen;
+ __be32 tdptr;
+ __be32 rbptr;
+ __be32 tbptr;
+ __be32 rcrc;
+ __be32 res1;
+ __be32 tcrc;
+ __be32 res2;
+ __be32 res3;
+ __be32 c_mask;
+ __be32 c_pres;
+ __be16 disfc;
+ __be16 crcec;
+ __be16 abtsc;
+ __be16 nmarc;
+ __be32 max_cnt;
+ __be16 mflr;
+ __be16 rfthr;
+ __be16 rfcnt;
+ __be16 hmask;
+ __be16 haddr1;
+ __be16 haddr2;
+ __be16 haddr3;
+ __be16 haddr4;
+ __be16 ts_tmp;
+ __be16 tmp_mb;
+};
+
+struct ucc_hdlc_private {
+ struct ucc_tdm *utdm;
+ struct ucc_tdm_info *ut_info;
+ struct ucc_fast_private *uccf;
+ struct device *dev;
+ struct net_device *ndev;
+ struct napi_struct napi;
+ struct ucc_fast __iomem *uf_regs; /* UCC Fast registers */
+ struct ucc_hdlc_param __iomem *ucc_pram;
+ u16 tsa;
+ bool hdlc_busy;
+ bool loopback;
+
+ u8 *tx_buffer;
+ u8 *rx_buffer;
+ dma_addr_t dma_tx_addr;
+ dma_addr_t dma_rx_addr;
+
+ struct qe_bd *tx_bd_base;
+ struct qe_bd *rx_bd_base;
+ dma_addr_t dma_tx_bd;
+ dma_addr_t dma_rx_bd;
+ struct qe_bd *curtx_bd;
+ struct qe_bd *currx_bd;
+ struct qe_bd *dirty_tx;
+ u16 currx_bdnum;
+
+ struct sk_buff **tx_skbuff;
+ struct sk_buff **rx_skbuff;
+ u16 skb_curtx;
+ u16 skb_currx;
+ unsigned short skb_dirtytx;
+
+ unsigned short tx_ring_size;
+ unsigned short rx_ring_size;
+ u32 ucc_pram_offset;
+
+ unsigned short encoding;
+ unsigned short parity;
+ u32 clocking;
+ spinlock_t lock; /* lock for Tx BD and Tx buffer */
+#ifdef CONFIG_PM
+ struct ucc_hdlc_param *ucc_pram_bak;
+ u32 gumr;
+ u8 guemr;
+ u32 cmxsi1cr_l, cmxsi1cr_h;
+ u32 cmxsi1syr;
+ u32 cmxucr[4];
+#endif
+};
+
+#define TX_BD_RING_LEN 0x10
+#define RX_BD_RING_LEN 0x20
+#define RX_CLEAN_MAX 0x10
+#define NUM_OF_BUF 4
+#define MAX_RX_BUF_LENGTH (48 * 0x20)
+#define MAX_FRAME_LENGTH (MAX_RX_BUF_LENGTH + 8)
+#define ALIGNMENT_OF_UCC_HDLC_PRAM 64
+#define SI_BANK_SIZE 128
+#define MAX_HDLC_NUM 4
+#define HDLC_HEAD_LEN 2
+#define HDLC_CRC_SIZE 2
+#define TX_RING_MOD_MASK(size) (size - 1)
+#define RX_RING_MOD_MASK(size) (size - 1)
+
+#define HDLC_HEAD_MASK 0x0000
+#define DEFAULT_HDLC_HEAD 0xff44
+#define DEFAULT_ADDR_MASK 0x00ff
+#define DEFAULT_HDLC_ADDR 0x00ff
+
+#define BMR_GBL 0x20000000
+#define BMR_BIG_ENDIAN 0x10000000
+#define CRC_16BIT_MASK 0x0000F0B8
+#define CRC_16BIT_PRES 0x0000FFFF
+#define DEFAULT_RFTHR 1
+
+#define DEFAULT_PPP_HEAD 0xff03
+
+#endif
diff --git a/drivers/soc/fsl/qe/Kconfig b/drivers/soc/fsl/qe/Kconfig
index 20978f2..73a2e08 100644
--- a/drivers/soc/fsl/qe/Kconfig
+++ b/drivers/soc/fsl/qe/Kconfig
@@ -22,7 +22,7 @@ config UCC_SLOW
config UCC_FAST
bool
- default y if UCC_GETH
+ default y if UCC_GETH || QE_TDM
help
This option provides qe_lib support to UCC fast
protocols: HDLC, Ethernet, ATM, transparent
@@ -31,6 +31,10 @@ config UCC
bool
default y if UCC_FAST || UCC_SLOW
+config QE_TDM
+ bool
+ default y if FSL_UCC_HDLC
+
config QE_USB
bool
default y if USB_FSL_QE
diff --git a/drivers/soc/fsl/qe/Makefile b/drivers/soc/fsl/qe/Makefile
index ffac541..2031d38 100644
--- a/drivers/soc/fsl/qe/Makefile
+++ b/drivers/soc/fsl/qe/Makefile
@@ -6,5 +6,6 @@ obj-$(CONFIG_CPM) += qe_common.o
obj-$(CONFIG_UCC) += ucc.o
obj-$(CONFIG_UCC_SLOW) += ucc_slow.o
obj-$(CONFIG_UCC_FAST) += ucc_fast.o
+obj-$(CONFIG_QE_TDM) += qe_tdm.o
obj-$(CONFIG_QE_USB) += usb.o
obj-$(CONFIG_QE_GPIO) += gpio.o
diff --git a/drivers/soc/fsl/qe/qe.c b/drivers/soc/fsl/qe/qe.c
index 709fc63..7026507 100644
--- a/drivers/soc/fsl/qe/qe.c
+++ b/drivers/soc/fsl/qe/qe.c
@@ -239,6 +239,12 @@ enum qe_clock qe_clock_source(const char *source)
if (strcasecmp(source, "none") == 0)
return QE_CLK_NONE;
+ if (strcmp(source, "tsync_pin") == 0)
+ return QE_TSYNC_PIN;
+
+ if (strcmp(source, "rsync_pin") == 0)
+ return QE_RSYNC_PIN;
+
if (strncasecmp(source, "brg", 3) == 0) {
i = simple_strtoul(source + 3, NULL, 10);
if ((i >= 1) && (i <= 16))
diff --git a/drivers/soc/fsl/qe/qe_tdm.c b/drivers/soc/fsl/qe/qe_tdm.c
new file mode 100644
index 0000000..5e48b14
--- /dev/null
+++ b/drivers/soc/fsl/qe/qe_tdm.c
@@ -0,0 +1,276 @@
+/*
+ * Copyright (C) 2015 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Authors: Zhao Qiang <qiang.zhao@nxp.com>
+ *
+ * Description:
+ * QE TDM API Set - TDM specific routines implementations.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <soc/fsl/qe/qe_tdm.h>
+
+static int set_tdm_framer(const char *tdm_framer_type)
+{
+ if (strcmp(tdm_framer_type, "e1") == 0)
+ return TDM_FRAMER_E1;
+ else if (strcmp(tdm_framer_type, "t1") == 0)
+ return TDM_FRAMER_T1;
+ else
+ return -EINVAL;
+}
+
+static void set_si_param(struct ucc_tdm *utdm, struct ucc_tdm_info *ut_info)
+{
+ struct si_mode_info *si_info = &ut_info->si_info;
+
+ if (utdm->tdm_mode == TDM_INTERNAL_LOOPBACK) {
+ si_info->simr_crt = 1;
+ si_info->simr_rfsd = 0;
+ }
+}
+
+int ucc_of_parse_tdm(struct device_node *np, struct ucc_tdm *utdm,
+ struct ucc_tdm_info *ut_info)
+{
+ const char *sprop;
+ int ret = 0;
+ u32 val;
+ struct resource *res;
+ struct device_node *np2;
+ static int siram_init_flag;
+ struct platform_device *pdev;
+
+ sprop = of_get_property(np, "fsl,rx-sync-clock", NULL);
+ if (sprop) {
+ ut_info->uf_info.rx_sync = qe_clock_source(sprop);
+ if ((ut_info->uf_info.rx_sync < QE_CLK_NONE) ||
+ (ut_info->uf_info.rx_sync > QE_RSYNC_PIN)) {
+ pr_err("QE-TDM: Invalid rx-sync-clock property\n");
+ return -EINVAL;
+ }
+ } else {
+ pr_err("QE-TDM: Invalid rx-sync-clock property\n");
+ return -EINVAL;
+ }
+
+ sprop = of_get_property(np, "fsl,tx-sync-clock", NULL);
+ if (sprop) {
+ ut_info->uf_info.tx_sync = qe_clock_source(sprop);
+ if ((ut_info->uf_info.tx_sync < QE_CLK_NONE) ||
+ (ut_info->uf_info.tx_sync > QE_TSYNC_PIN)) {
+ pr_err("QE-TDM: Invalid tx-sync-clock property\n");
+ return -EINVAL;
+ }
+ } else {
+ pr_err("QE-TDM: Invalid tx-sync-clock property\n");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32_index(np, "fsl,tx-timeslot-mask", 0, &val);
+ if (ret) {
+ pr_err("QE-TDM: Invalid tx-timeslot-mask property\n");
+ return -EINVAL;
+ }
+ utdm->tx_ts_mask = val;
+
+ ret = of_property_read_u32_index(np, "fsl,rx-timeslot-mask", 0, &val);
+ if (ret) {
+ ret = -EINVAL;
+ pr_err("QE-TDM: Invalid rx-timeslot-mask property\n");
+ return ret;
+ }
+ utdm->rx_ts_mask = val;
+
+ ret = of_property_read_u32_index(np, "fsl,tdm-id", 0, &val);
+ if (ret) {
+ ret = -EINVAL;
+ pr_err("QE-TDM: No fsl,tdm-id property for this UCC\n");
+ return ret;
+ }
+ utdm->tdm_port = val;
+ ut_info->uf_info.tdm_num = utdm->tdm_port;
+
+ if (of_get_property(np, "fsl,tdm-internal-loopback", NULL))
+ utdm->tdm_mode = TDM_INTERNAL_LOOPBACK;
+ else
+ utdm->tdm_mode = TDM_NORMAL;
+
+ sprop = of_get_property(np, "fsl,tdm-framer-type", NULL);
+ if (!sprop) {
+ ret = -EINVAL;
+ pr_err("QE-TDM: No tdm-framer-type property for UCC\n");
+ return ret;
+ }
+ ret = set_tdm_framer(sprop);
+ if (ret < 0)
+ return -EINVAL;
+ utdm->tdm_framer_type = ret;
+
+ ret = of_property_read_u32_index(np, "fsl,siram-entry-id", 0, &val);
+ if (ret) {
+ ret = -EINVAL;
+ pr_err("QE-TDM: No siram entry id for UCC\n");
+ return ret;
+ }
+ utdm->siram_entry_id = val;
+
+ set_si_param(utdm, ut_info);
+
+ np2 = of_find_compatible_node(NULL, NULL, "fsl,t1040-qe-si");
+ if (!np2)
+ return -EINVAL;
+
+ pdev = of_find_device_by_node(np2);
+ if (!pdev) {
+ pr_err("%s: failed to lookup pdev\n", np2->name);
+ of_node_put(np2);
+ return -EINVAL;
+ }
+
+ of_node_put(np2);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ utdm->si_regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(utdm->si_regs)) {
+ ret = PTR_ERR(utdm->si_regs);
+ goto err_miss_siram_property;
+ }
+
+ np2 = of_find_compatible_node(NULL, NULL, "fsl,t1040-qe-siram");
+ if (!np2) {
+ ret = -EINVAL;
+ goto err_miss_siram_property;
+ }
+
+ pdev = of_find_device_by_node(np2);
+ if (!pdev) {
+ ret = -EINVAL;
+ pr_err("%s: failed to lookup pdev\n", np2->name);
+ of_node_put(np2);
+ goto err_miss_siram_property;
+ }
+
+ of_node_put(np2);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ utdm->siram = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(utdm->siram)) {
+ ret = PTR_ERR(utdm->siram);
+ goto err_miss_siram_property;
+ }
+
+ if (siram_init_flag == 0) {
+ memset_io(utdm->siram, 0, res->end - res->start + 1);
+ siram_init_flag = 1;
+ }
+
+ return ret;
+
+err_miss_siram_property:
+ devm_iounmap(&pdev->dev, utdm->si_regs);
+ return ret;
+}
+
+void ucc_tdm_init(struct ucc_tdm *utdm, struct ucc_tdm_info *ut_info)
+{
+ struct si1 __iomem *si_regs;
+ u16 __iomem *siram;
+ u16 siram_entry_valid;
+ u16 siram_entry_closed;
+ u16 ucc_num;
+ u8 csel;
+ u16 sixmr;
+ u16 tdm_port;
+ u32 siram_entry_id;
+ u32 mask;
+ int i;
+
+ si_regs = utdm->si_regs;
+ siram = utdm->siram;
+ ucc_num = ut_info->uf_info.ucc_num;
+ tdm_port = utdm->tdm_port;
+ siram_entry_id = utdm->siram_entry_id;
+
+ if (utdm->tdm_framer_type == TDM_FRAMER_T1)
+ utdm->num_of_ts = 24;
+ if (utdm->tdm_framer_type == TDM_FRAMER_E1)
+ utdm->num_of_ts = 32;
+
+ /* set siram table */
+ csel = (ucc_num < 4) ? ucc_num + 9 : ucc_num - 3;
+
+ siram_entry_valid = SIR_CSEL(csel) | SIR_BYTE | SIR_CNT(0);
+ siram_entry_closed = SIR_IDLE | SIR_BYTE | SIR_CNT(0);
+
+ for (i = 0; i < utdm->num_of_ts; i++) {
+ mask = 0x01 << i;
+
+ if (utdm->tx_ts_mask & mask)
+ iowrite16be(siram_entry_valid,
+ &siram[siram_entry_id * 32 + i]);
+ else
+ iowrite16be(siram_entry_closed,
+ &siram[siram_entry_id * 32 + i]);
+
+ if (utdm->rx_ts_mask & mask)
+ iowrite16be(siram_entry_valid,
+ &siram[siram_entry_id * 32 + 0x200 + i]);
+ else
+ iowrite16be(siram_entry_closed,
+ &siram[siram_entry_id * 32 + 0x200 + i]);
+ }
+
+ setbits16(&siram[(siram_entry_id * 32) + (utdm->num_of_ts - 1)],
+ SIR_LAST);
+ setbits16(&siram[(siram_entry_id * 32) + 0x200 + (utdm->num_of_ts - 1)],
+ SIR_LAST);
+
+ /* Set SIxMR register */
+ sixmr = SIMR_SAD(siram_entry_id);
+
+ sixmr &= ~SIMR_SDM_MASK;
+
+ if (utdm->tdm_mode == TDM_INTERNAL_LOOPBACK)
+ sixmr |= SIMR_SDM_INTERNAL_LOOPBACK;
+ else
+ sixmr |= SIMR_SDM_NORMAL;
+
+ sixmr |= SIMR_RFSD(ut_info->si_info.simr_rfsd) |
+ SIMR_TFSD(ut_info->si_info.simr_tfsd);
+
+ if (ut_info->si_info.simr_crt)
+ sixmr |= SIMR_CRT;
+ if (ut_info->si_info.simr_sl)
+ sixmr |= SIMR_SL;
+ if (ut_info->si_info.simr_ce)
+ sixmr |= SIMR_CE;
+ if (ut_info->si_info.simr_fe)
+ sixmr |= SIMR_FE;
+ if (ut_info->si_info.simr_gm)
+ sixmr |= SIMR_GM;
+
+ switch (tdm_port) {
+ case 0:
+ iowrite16be(sixmr, &si_regs->sixmr1[0]);
+ break;
+ case 1:
+ iowrite16be(sixmr, &si_regs->sixmr1[1]);
+ break;
+ case 2:
+ iowrite16be(sixmr, &si_regs->sixmr1[2]);
+ break;
+ case 3:
+ iowrite16be(sixmr, &si_regs->sixmr1[3]);
+ break;
+ default:
+ pr_err("QE-TDM: can not find tdm sixmr reg\n");
+ break;
+ }
+}
diff --git a/drivers/soc/fsl/qe/ucc.c b/drivers/soc/fsl/qe/ucc.c
index b59d335..c646d87 100644
--- a/drivers/soc/fsl/qe/ucc.c
+++ b/drivers/soc/fsl/qe/ucc.c
@@ -25,6 +25,12 @@
#include <soc/fsl/qe/qe.h>
#include <soc/fsl/qe/ucc.h>
+#define UCC_TDM_NUM 8
+#define RX_SYNC_SHIFT_BASE 30
+#define TX_SYNC_SHIFT_BASE 14
+#define RX_CLK_SHIFT_BASE 28
+#define TX_CLK_SHIFT_BASE 12
+
int ucc_set_qe_mux_mii_mng(unsigned int ucc_num)
{
unsigned long flags;
@@ -210,3 +216,447 @@ int ucc_set_qe_mux_rxtx(unsigned int ucc_num, enum qe_clock clock,
return 0;
}
+
+static int ucc_get_tdm_common_clk(u32 tdm_num, enum qe_clock clock)
+{
+ int clock_bits = -EINVAL;
+
+ /*
+ * for TDM[0, 1, 2, 3], TX and RX use common
+ * clock source BRG3,4 and CLK1,2
+ * for TDM[4, 5, 6, 7], TX and RX use common
+ * clock source BRG12,13 and CLK23,24
+ */
+ switch (tdm_num) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ switch (clock) {
+ case QE_BRG3:
+ clock_bits = 1;
+ break;
+ case QE_BRG4:
+ clock_bits = 2;
+ break;
+ case QE_CLK1:
+ clock_bits = 4;
+ break;
+ case QE_CLK2:
+ clock_bits = 5;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ switch (clock) {
+ case QE_BRG12:
+ clock_bits = 1;
+ break;
+ case QE_BRG13:
+ clock_bits = 2;
+ break;
+ case QE_CLK23:
+ clock_bits = 4;
+ break;
+ case QE_CLK24:
+ clock_bits = 5;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return clock_bits;
+}
+
+static int ucc_get_tdm_rx_clk(u32 tdm_num, enum qe_clock clock)
+{
+ int clock_bits = -EINVAL;
+
+ switch (tdm_num) {
+ case 0:
+ switch (clock) {
+ case QE_CLK3:
+ clock_bits = 6;
+ break;
+ case QE_CLK8:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 1:
+ switch (clock) {
+ case QE_CLK5:
+ clock_bits = 6;
+ break;
+ case QE_CLK10:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 2:
+ switch (clock) {
+ case QE_CLK7:
+ clock_bits = 6;
+ break;
+ case QE_CLK12:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 3:
+ switch (clock) {
+ case QE_CLK9:
+ clock_bits = 6;
+ break;
+ case QE_CLK14:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 4:
+ switch (clock) {
+ case QE_CLK11:
+ clock_bits = 6;
+ break;
+ case QE_CLK16:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 5:
+ switch (clock) {
+ case QE_CLK13:
+ clock_bits = 6;
+ break;
+ case QE_CLK18:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 6:
+ switch (clock) {
+ case QE_CLK15:
+ clock_bits = 6;
+ break;
+ case QE_CLK20:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 7:
+ switch (clock) {
+ case QE_CLK17:
+ clock_bits = 6;
+ break;
+ case QE_CLK22:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ }
+
+ return clock_bits;
+}
+
+static int ucc_get_tdm_tx_clk(u32 tdm_num, enum qe_clock clock)
+{
+ int clock_bits = -EINVAL;
+
+ switch (tdm_num) {
+ case 0:
+ switch (clock) {
+ case QE_CLK4:
+ clock_bits = 6;
+ break;
+ case QE_CLK9:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 1:
+ switch (clock) {
+ case QE_CLK6:
+ clock_bits = 6;
+ break;
+ case QE_CLK11:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 2:
+ switch (clock) {
+ case QE_CLK8:
+ clock_bits = 6;
+ break;
+ case QE_CLK13:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 3:
+ switch (clock) {
+ case QE_CLK10:
+ clock_bits = 6;
+ break;
+ case QE_CLK15:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 4:
+ switch (clock) {
+ case QE_CLK12:
+ clock_bits = 6;
+ break;
+ case QE_CLK17:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 5:
+ switch (clock) {
+ case QE_CLK14:
+ clock_bits = 6;
+ break;
+ case QE_CLK19:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 6:
+ switch (clock) {
+ case QE_CLK16:
+ clock_bits = 6;
+ break;
+ case QE_CLK21:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 7:
+ switch (clock) {
+ case QE_CLK18:
+ clock_bits = 6;
+ break;
+ case QE_CLK3:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ }
+
+ return clock_bits;
+}
+
+/* tdm_num: TDM A-H port num is 0-7 */
+static int ucc_get_tdm_rxtx_clk(enum comm_dir mode, u32 tdm_num,
+ enum qe_clock clock)
+{
+ int clock_bits;
+
+ clock_bits = ucc_get_tdm_common_clk(tdm_num, clock);
+ if (clock_bits > 0)
+ return clock_bits;
+ if (mode == COMM_DIR_RX)
+ clock_bits = ucc_get_tdm_rx_clk(tdm_num, clock);
+ if (mode == COMM_DIR_TX)
+ clock_bits = ucc_get_tdm_tx_clk(tdm_num, clock);
+ return clock_bits;
+}
+
+static u32 ucc_get_tdm_clk_shift(enum comm_dir mode, u32 tdm_num)
+{
+ u32 shift;
+
+ shift = (mode == COMM_DIR_RX) ? RX_CLK_SHIFT_BASE : TX_CLK_SHIFT_BASE;
+ if (tdm_num < 4)
+ shift -= tdm_num * 4;
+ else
+ shift -= (tdm_num - 4) * 4;
+
+ return shift;
+}
+
+int ucc_set_tdm_rxtx_clk(u32 tdm_num, enum qe_clock clock,
+ enum comm_dir mode)
+{
+ int clock_bits;
+ u32 shift;
+ struct qe_mux __iomem *qe_mux_reg;
+ __be32 __iomem *cmxs1cr;
+
+ qe_mux_reg = &qe_immr->qmx;
+
+ if (tdm_num > 7 || tdm_num < 0)
+ return -EINVAL;
+
+ /* The communications direction must be RX or TX */
+ if (mode != COMM_DIR_RX && mode != COMM_DIR_TX)
+ return -EINVAL;
+
+ clock_bits = ucc_get_tdm_rxtx_clk(mode, tdm_num, clock);
+ if (clock_bits < 0)
+ return -EINVAL;
+
+ shift = ucc_get_tdm_clk_shift(mode, tdm_num);
+
+ cmxs1cr = (tdm_num < 4) ? &qe_mux_reg->cmxsi1cr_l :
+ &qe_mux_reg->cmxsi1cr_h;
+
+ qe_clrsetbits32(cmxs1cr, QE_CMXUCR_TX_CLK_SRC_MASK << shift,
+ clock_bits << shift);
+
+ return 0;
+}
+
+static int ucc_get_tdm_sync_source(u32 tdm_num, enum qe_clock clock,
+ enum comm_dir mode)
+{
+ int source = -EINVAL;
+
+ if (mode == COMM_DIR_RX && clock == QE_RSYNC_PIN) {
+ source = 0;
+ return source;
+ }
+ if (mode == COMM_DIR_TX && clock == QE_TSYNC_PIN) {
+ source = 0;
+ return source;
+ }
+
+ switch (tdm_num) {
+ case 0:
+ case 1:
+ switch (clock) {
+ case QE_BRG9:
+ source = 1;
+ break;
+ case QE_BRG10:
+ source = 2;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 2:
+ case 3:
+ switch (clock) {
+ case QE_BRG9:
+ source = 1;
+ break;
+ case QE_BRG11:
+ source = 2;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 4:
+ case 5:
+ switch (clock) {
+ case QE_BRG13:
+ source = 1;
+ break;
+ case QE_BRG14:
+ source = 2;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 6:
+ case 7:
+ switch (clock) {
+ case QE_BRG13:
+ source = 1;
+ break;
+ case QE_BRG15:
+ source = 2;
+ break;
+ default:
+ break;
+ }
+ break;
+ }
+
+ return source;
+}
+
+static u32 ucc_get_tdm_sync_shift(enum comm_dir mode, u32 tdm_num)
+{
+ u32 shift;
+
+ shift = (mode == COMM_DIR_RX) ? RX_SYNC_SHIFT_BASE : RX_SYNC_SHIFT_BASE;
+ shift -= tdm_num * 2;
+
+ return shift;
+}
+
+int ucc_set_tdm_rxtx_sync(u32 tdm_num, enum qe_clock clock,
+ enum comm_dir mode)
+{
+ int source;
+ u32 shift;
+ struct qe_mux *qe_mux_reg;
+
+ qe_mux_reg = &qe_immr->qmx;
+
+ if (tdm_num >= UCC_TDM_NUM)
+ return -EINVAL;
+
+ /* The communications direction must be RX or TX */
+ if (mode != COMM_DIR_RX && mode != COMM_DIR_TX)
+ return -EINVAL;
+
+ source = ucc_get_tdm_sync_source(tdm_num, clock, mode);
+ if (source < 0)
+ return -EINVAL;
+
+ shift = ucc_get_tdm_sync_shift(mode, tdm_num);
+
+ qe_clrsetbits32(&qe_mux_reg->cmxsi1syr,
+ QE_CMXUCR_TX_CLK_SRC_MASK << shift,
+ source << shift);
+
+ return 0;
+}
diff --git a/drivers/soc/fsl/qe/ucc_fast.c b/drivers/soc/fsl/qe/ucc_fast.c
index a768931..83d8d16 100644
--- a/drivers/soc/fsl/qe/ucc_fast.c
+++ b/drivers/soc/fsl/qe/ucc_fast.c
@@ -327,6 +327,42 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc
ucc_fast_free(uccf);
return -EINVAL;
}
+ } else {
+ /* tdm Rx clock routing */
+ if ((uf_info->rx_clock != QE_CLK_NONE) &&
+ ucc_set_tdm_rxtx_clk(uf_info->tdm_num, uf_info->rx_clock,
+ COMM_DIR_RX)) {
+ pr_err("%s: illegal value for RX clock", __func__);
+ ucc_fast_free(uccf);
+ return -EINVAL;
+ }
+
+ /* tdm Tx clock routing */
+ if ((uf_info->tx_clock != QE_CLK_NONE) &&
+ ucc_set_tdm_rxtx_clk(uf_info->tdm_num, uf_info->tx_clock,
+ COMM_DIR_TX)) {
+ pr_err("%s: illegal value for TX clock", __func__);
+ ucc_fast_free(uccf);
+ return -EINVAL;
+ }
+
+ /* tdm Rx sync clock routing */
+ if ((uf_info->rx_sync != QE_CLK_NONE) &&
+ ucc_set_tdm_rxtx_sync(uf_info->tdm_num, uf_info->rx_sync,
+ COMM_DIR_RX)) {
+ pr_err("%s: illegal value for RX clock", __func__);
+ ucc_fast_free(uccf);
+ return -EINVAL;
+ }
+
+ /* tdm Tx sync clock routing */
+ if ((uf_info->tx_sync != QE_CLK_NONE) &&
+ ucc_set_tdm_rxtx_sync(uf_info->tdm_num, uf_info->tx_sync,
+ COMM_DIR_TX)) {
+ pr_err("%s: illegal value for TX clock", __func__);
+ ucc_fast_free(uccf);
+ return -EINVAL;
+ }
}
/* Set interrupt mask register at UCC level. */
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index f744eeb..1d3e45f 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -301,6 +301,32 @@ static bool vhost_can_busy_poll(struct vhost_dev *dev,
!vhost_has_work(dev);
}
+static void vhost_net_disable_vq(struct vhost_net *n,
+ struct vhost_virtqueue *vq)
+{
+ struct vhost_net_virtqueue *nvq =
+ container_of(vq, struct vhost_net_virtqueue, vq);
+ struct vhost_poll *poll = n->poll + (nvq - n->vqs);
+ if (!vq->private_data)
+ return;
+ vhost_poll_stop(poll);
+}
+
+static int vhost_net_enable_vq(struct vhost_net *n,
+ struct vhost_virtqueue *vq)
+{
+ struct vhost_net_virtqueue *nvq =
+ container_of(vq, struct vhost_net_virtqueue, vq);
+ struct vhost_poll *poll = n->poll + (nvq - n->vqs);
+ struct socket *sock;
+
+ sock = vq->private_data;
+ if (!sock)
+ return 0;
+
+ return vhost_poll_start(poll, sock->file);
+}
+
static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
struct vhost_virtqueue *vq,
struct iovec iov[], unsigned int iov_size,
@@ -613,6 +639,7 @@ static void handle_rx(struct vhost_net *net)
if (!sock)
goto out;
vhost_disable_notify(&net->dev, vq);
+ vhost_net_disable_vq(net, vq);
vhost_hlen = nvq->vhost_hlen;
sock_hlen = nvq->sock_hlen;
@@ -629,7 +656,7 @@ static void handle_rx(struct vhost_net *net)
likely(mergeable) ? UIO_MAXIOV : 1);
/* On error, stop handling until the next kick. */
if (unlikely(headcount < 0))
- break;
+ goto out;
/* On overrun, truncate and discard */
if (unlikely(headcount > UIO_MAXIOV)) {
iov_iter_init(&msg.msg_iter, READ, vq->iov, 1, 1);
@@ -648,7 +675,7 @@ static void handle_rx(struct vhost_net *net)
}
/* Nothing new? Wait for eventfd to tell us
* they refilled. */
- break;
+ goto out;
}
/* We don't need to be notified again. */
iov_iter_init(&msg.msg_iter, READ, vq->iov, in, vhost_len);
@@ -676,7 +703,7 @@ static void handle_rx(struct vhost_net *net)
&fixup) != sizeof(hdr)) {
vq_err(vq, "Unable to write vnet_hdr "
"at addr %p\n", vq->iov->iov_base);
- break;
+ goto out;
}
} else {
/* Header came from socket; we'll need to patch
@@ -692,7 +719,7 @@ static void handle_rx(struct vhost_net *net)
&fixup) != sizeof num_buffers) {
vq_err(vq, "Failed num_buffers write");
vhost_discard_vq_desc(vq, headcount);
- break;
+ goto out;
}
vhost_add_used_and_signal_n(&net->dev, vq, vq->heads,
headcount);
@@ -701,9 +728,10 @@ static void handle_rx(struct vhost_net *net)
total_len += vhost_len;
if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
vhost_poll_queue(&vq->poll);
- break;
+ goto out;
}
}
+ vhost_net_enable_vq(net, vq);
out:
mutex_unlock(&vq->mutex);
}
@@ -782,32 +810,6 @@ static int vhost_net_open(struct inode *inode, struct file *f)
return 0;
}
-static void vhost_net_disable_vq(struct vhost_net *n,
- struct vhost_virtqueue *vq)
-{
- struct vhost_net_virtqueue *nvq =
- container_of(vq, struct vhost_net_virtqueue, vq);
- struct vhost_poll *poll = n->poll + (nvq - n->vqs);
- if (!vq->private_data)
- return;
- vhost_poll_stop(poll);
-}
-
-static int vhost_net_enable_vq(struct vhost_net *n,
- struct vhost_virtqueue *vq)
-{
- struct vhost_net_virtqueue *nvq =
- container_of(vq, struct vhost_net_virtqueue, vq);
- struct vhost_poll *poll = n->poll + (nvq - n->vqs);
- struct socket *sock;
-
- sock = vq->private_data;
- if (!sock)
- return 0;
-
- return vhost_poll_start(poll, sock->file);
-}
-
static struct socket *vhost_net_stop_vq(struct vhost_net *n,
struct vhost_virtqueue *vq)
{
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 288fac5..4d4bb49 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -543,6 +543,11 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *);
struct fwnode_handle;
+static inline bool acpi_dev_found(const char *hid)
+{
+ return false;
+}
+
static inline bool is_acpi_node(struct fwnode_handle *fwnode)
{
return false;
@@ -654,6 +659,14 @@ static inline bool acpi_driver_match_device(struct device *dev,
return false;
}
+static inline union acpi_object *acpi_evaluate_dsm(acpi_handle handle,
+ const u8 *uuid,
+ int rev, int func,
+ union acpi_object *argv4)
+{
+ return NULL;
+}
+
static inline int acpi_device_uevent_modalias(struct device *dev,
struct kobj_uevent_env *env)
{
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 5c91b0b..c6dbcd8 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -283,6 +283,8 @@ struct tcp6_timewait_sock {
};
#if IS_ENABLED(CONFIG_IPV6)
+bool ipv6_mod_enabled(void);
+
static inline struct ipv6_pinfo *inet6_sk(const struct sock *__sk)
{
return sk_fullsock(__sk) ? inet_sk(__sk)->pinet6 : NULL;
@@ -326,6 +328,11 @@ static inline int inet_v6_ipv6only(const struct sock *sk)
#define ipv6_only_sock(sk) 0
#define ipv6_sk_rxinfo(sk) 0
+static inline bool ipv6_mod_enabled(void)
+{
+ return false;
+}
+
static inline struct ipv6_pinfo * inet6_sk(const struct sock *__sk)
{
return NULL;
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index aa7b240..9c6c8ef 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -53,8 +53,9 @@ enum {
* headers in software.
*/
NETIF_F_GSO_TUNNEL_REMCSUM_BIT, /* ... TUNNEL with TSO & REMCSUM */
+ NETIF_F_GSO_SCTP_BIT, /* ... SCTP fragmentation */
/**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */
- NETIF_F_GSO_TUNNEL_REMCSUM_BIT,
+ NETIF_F_GSO_SCTP_BIT,
NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */
NETIF_F_SCTP_CRC_BIT, /* SCTP checksum offload */
@@ -128,6 +129,7 @@ enum {
#define NETIF_F_TSO_MANGLEID __NETIF_F(TSO_MANGLEID)
#define NETIF_F_GSO_PARTIAL __NETIF_F(GSO_PARTIAL)
#define NETIF_F_GSO_TUNNEL_REMCSUM __NETIF_F(GSO_TUNNEL_REMCSUM)
+#define NETIF_F_GSO_SCTP __NETIF_F(GSO_SCTP)
#define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER)
#define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX)
#define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX)
@@ -166,7 +168,8 @@ enum {
NETIF_F_FSO)
/* List of features with software fallbacks. */
-#define NETIF_F_GSO_SOFTWARE (NETIF_F_ALL_TSO | NETIF_F_UFO)
+#define NETIF_F_GSO_SOFTWARE (NETIF_F_ALL_TSO | NETIF_F_UFO | \
+ NETIF_F_GSO_SCTP)
/*
* If one device supports one of these features, then enable them
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index f45929c..4f234b1 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1594,7 +1594,8 @@ enum netdev_priv_flags {
* @phydev: Physical device may attach itself
* for hardware timestamping
*
- * @qdisc_tx_busylock: XXX: need comments on this one
+ * @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock
+ * @qdisc_running_key: lockdep class annotating Qdisc->running seqcount
*
* @proto_down: protocol port state information can be sent to the
* switch driver and used to set the phys state of the
@@ -1862,6 +1863,7 @@ struct net_device {
#endif
struct phy_device *phydev;
struct lock_class_key *qdisc_tx_busylock;
+ struct lock_class_key *qdisc_running_key;
bool proto_down;
};
#define to_net_dev(d) container_of(d, struct net_device, dev)
@@ -1944,6 +1946,23 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev,
f(dev, &dev->_tx[i], arg);
}
+#define netdev_lockdep_set_classes(dev) \
+{ \
+ static struct lock_class_key qdisc_tx_busylock_key; \
+ static struct lock_class_key qdisc_running_key; \
+ static struct lock_class_key qdisc_xmit_lock_key; \
+ static struct lock_class_key dev_addr_list_lock_key; \
+ unsigned int i; \
+ \
+ (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \
+ (dev)->qdisc_running_key = &qdisc_running_key; \
+ lockdep_set_class(&(dev)->addr_list_lock, \
+ &dev_addr_list_lock_key); \
+ for (i = 0; i < (dev)->num_tx_queues; i++) \
+ lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \
+ &qdisc_xmit_lock_key); \
+}
+
struct netdev_queue *netdev_pick_tx(struct net_device *dev,
struct sk_buff *skb,
void *accel_priv);
@@ -4012,6 +4031,7 @@ static inline bool net_gso_ok(netdev_features_t features, int gso_type)
BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT));
+ BUILD_BUG_ON(SKB_GSO_SCTP != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT));
return (features & feature) == feature;
}
diff --git a/include/linux/platform_data/b53.h b/include/linux/platform_data/b53.h
new file mode 100644
index 0000000..69d279c
--- /dev/null
+++ b/include/linux/platform_data/b53.h
@@ -0,0 +1,33 @@
+/*
+ * B53 platform data
+ *
+ * Copyright (C) 2013 Jonas Gorski <jogo@openwrt.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __B53_H
+#define __B53_H
+
+#include <linux/kernel.h>
+
+struct b53_platform_data {
+ u32 chip_id;
+ u16 enabled_ports;
+
+ /* only used by MMAP'd driver */
+ unsigned big_endian:1;
+ void __iomem *regs;
+};
+
+#endif
diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h
index 3f14c7e..40c0ada 100644
--- a/include/linux/qed/common_hsi.h
+++ b/include/linux/qed/common_hsi.h
@@ -12,10 +12,21 @@
#define CORE_SPQE_PAGE_SIZE_BYTES 4096
#define X_FINAL_CLEANUP_AGG_INT 1
+#define NUM_OF_GLOBAL_QUEUES 128
+
+/* Queue Zone sizes in bytes */
+#define TSTORM_QZONE_SIZE 8
+#define MSTORM_QZONE_SIZE 0
+#define USTORM_QZONE_SIZE 8
+#define XSTORM_QZONE_SIZE 8
+#define YSTORM_QZONE_SIZE 0
+#define PSTORM_QZONE_SIZE 0
+
+#define ETH_MAX_NUM_RX_QUEUES_PER_VF 16
#define FW_MAJOR_VERSION 8
-#define FW_MINOR_VERSION 7
-#define FW_REVISION_VERSION 3
+#define FW_MINOR_VERSION 10
+#define FW_REVISION_VERSION 5
#define FW_ENGINEERING_VERSION 0
/***********************/
@@ -97,45 +108,86 @@
#define DQ_XCM_AGG_VAL_SEL_REG6 7
/* XCM agg val selection */
-#define DQ_XCM_ETH_EDPM_NUM_BDS_CMD \
- DQ_XCM_AGG_VAL_SEL_WORD2
-#define DQ_XCM_ETH_TX_BD_CONS_CMD \
- DQ_XCM_AGG_VAL_SEL_WORD3
-#define DQ_XCM_CORE_TX_BD_CONS_CMD \
- DQ_XCM_AGG_VAL_SEL_WORD3
-#define DQ_XCM_ETH_TX_BD_PROD_CMD \
- DQ_XCM_AGG_VAL_SEL_WORD4
-#define DQ_XCM_CORE_TX_BD_PROD_CMD \
- DQ_XCM_AGG_VAL_SEL_WORD4
-#define DQ_XCM_CORE_SPQ_PROD_CMD \
- DQ_XCM_AGG_VAL_SEL_WORD4
-#define DQ_XCM_ETH_GO_TO_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD5
+#define DQ_XCM_CORE_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3
+#define DQ_XCM_CORE_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_CORE_SPQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_ETH_EDPM_NUM_BDS_CMD DQ_XCM_AGG_VAL_SEL_WORD2
+#define DQ_XCM_ETH_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3
+#define DQ_XCM_ETH_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_ETH_GO_TO_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD5
+
+/* UCM agg val selection (HW) */
+#define DQ_UCM_AGG_VAL_SEL_WORD0 0
+#define DQ_UCM_AGG_VAL_SEL_WORD1 1
+#define DQ_UCM_AGG_VAL_SEL_WORD2 2
+#define DQ_UCM_AGG_VAL_SEL_WORD3 3
+#define DQ_UCM_AGG_VAL_SEL_REG0 4
+#define DQ_UCM_AGG_VAL_SEL_REG1 5
+#define DQ_UCM_AGG_VAL_SEL_REG2 6
+#define DQ_UCM_AGG_VAL_SEL_REG3 7
+
+/* UCM agg val selection (FW) */
+#define DQ_UCM_ETH_PMD_TX_CONS_CMD DQ_UCM_AGG_VAL_SEL_WORD2
+#define DQ_UCM_ETH_PMD_RX_CONS_CMD DQ_UCM_AGG_VAL_SEL_WORD3
+#define DQ_UCM_ROCE_CQ_CONS_CMD DQ_UCM_AGG_VAL_SEL_REG0
+#define DQ_UCM_ROCE_CQ_PROD_CMD DQ_UCM_AGG_VAL_SEL_REG2
+
+/* TCM agg val selection (HW) */
+#define DQ_TCM_AGG_VAL_SEL_WORD0 0
+#define DQ_TCM_AGG_VAL_SEL_WORD1 1
+#define DQ_TCM_AGG_VAL_SEL_WORD2 2
+#define DQ_TCM_AGG_VAL_SEL_WORD3 3
+#define DQ_TCM_AGG_VAL_SEL_REG1 4
+#define DQ_TCM_AGG_VAL_SEL_REG2 5
+#define DQ_TCM_AGG_VAL_SEL_REG6 6
+#define DQ_TCM_AGG_VAL_SEL_REG9 7
+
+/* TCM agg val selection (FW) */
+#define DQ_TCM_L2B_BD_PROD_CMD \
+ DQ_TCM_AGG_VAL_SEL_WORD1
+#define DQ_TCM_ROCE_RQ_PROD_CMD \
+ DQ_TCM_AGG_VAL_SEL_WORD0
/* XCM agg counter flag selection */
-#define DQ_XCM_AGG_FLG_SHIFT_BIT14 0
-#define DQ_XCM_AGG_FLG_SHIFT_BIT15 1
-#define DQ_XCM_AGG_FLG_SHIFT_CF12 2
-#define DQ_XCM_AGG_FLG_SHIFT_CF13 3
-#define DQ_XCM_AGG_FLG_SHIFT_CF18 4
-#define DQ_XCM_AGG_FLG_SHIFT_CF19 5
-#define DQ_XCM_AGG_FLG_SHIFT_CF22 6
-#define DQ_XCM_AGG_FLG_SHIFT_CF23 7
+#define DQ_XCM_AGG_FLG_SHIFT_BIT14 0
+#define DQ_XCM_AGG_FLG_SHIFT_BIT15 1
+#define DQ_XCM_AGG_FLG_SHIFT_CF12 2
+#define DQ_XCM_AGG_FLG_SHIFT_CF13 3
+#define DQ_XCM_AGG_FLG_SHIFT_CF18 4
+#define DQ_XCM_AGG_FLG_SHIFT_CF19 5
+#define DQ_XCM_AGG_FLG_SHIFT_CF22 6
+#define DQ_XCM_AGG_FLG_SHIFT_CF23 7
/* XCM agg counter flag selection */
-#define DQ_XCM_ETH_DQ_CF_CMD (1 << \
- DQ_XCM_AGG_FLG_SHIFT_CF18)
-#define DQ_XCM_CORE_DQ_CF_CMD (1 << \
- DQ_XCM_AGG_FLG_SHIFT_CF18)
-#define DQ_XCM_ETH_TERMINATE_CMD (1 << \
- DQ_XCM_AGG_FLG_SHIFT_CF19)
-#define DQ_XCM_CORE_TERMINATE_CMD (1 << \
- DQ_XCM_AGG_FLG_SHIFT_CF19)
-#define DQ_XCM_ETH_SLOW_PATH_CMD (1 << \
- DQ_XCM_AGG_FLG_SHIFT_CF22)
-#define DQ_XCM_CORE_SLOW_PATH_CMD (1 << \
- DQ_XCM_AGG_FLG_SHIFT_CF22)
-#define DQ_XCM_ETH_TPH_EN_CMD (1 << \
- DQ_XCM_AGG_FLG_SHIFT_CF23)
+#define DQ_XCM_CORE_DQ_CF_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF18)
+#define DQ_XCM_CORE_TERMINATE_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_CORE_SLOW_PATH_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_ETH_DQ_CF_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF18)
+#define DQ_XCM_ETH_TERMINATE_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_ETH_SLOW_PATH_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_ETH_TPH_EN_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF23)
+
+/* UCM agg counter flag selection (HW) */
+#define DQ_UCM_AGG_FLG_SHIFT_CF0 0
+#define DQ_UCM_AGG_FLG_SHIFT_CF1 1
+#define DQ_UCM_AGG_FLG_SHIFT_CF3 2
+#define DQ_UCM_AGG_FLG_SHIFT_CF4 3
+#define DQ_UCM_AGG_FLG_SHIFT_CF5 4
+#define DQ_UCM_AGG_FLG_SHIFT_CF6 5
+#define DQ_UCM_AGG_FLG_SHIFT_RULE0EN 6
+#define DQ_UCM_AGG_FLG_SHIFT_RULE1EN 7
+
+/* UCM agg counter flag selection (FW) */
+#define DQ_UCM_ETH_PMD_TX_ARM_CMD (1 << DQ_UCM_AGG_FLG_SHIFT_CF4)
+#define DQ_UCM_ETH_PMD_RX_ARM_CMD (1 << DQ_UCM_AGG_FLG_SHIFT_CF5)
+
+#define DQ_REGION_SHIFT (12)
+
+/* DPM */
+#define DQ_DPM_WQE_BUFF_SIZE (320)
+
+/* Conn type ranges */
+#define DQ_CONN_TYPE_RANGE_SHIFT (4)
/*****************/
/* QM CONSTANTS */
@@ -282,8 +334,6 @@
(PXP_EXTERNAL_BAR_GLOBAL_WINDOW_START + \
PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH - 1)
-#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12
-#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024
#define PXP_VF_BAR0_START_IGU 0
#define PXP_VF_BAR0_IGU_LENGTH 0x3000
@@ -342,6 +392,9 @@
#define PXP_VF_BAR0_GRC_WINDOW_LENGTH 32
+#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12
+#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024
+
/* ILT Records */
#define PXP_NUM_ILT_RECORDS_BB 7600
#define PXP_NUM_ILT_RECORDS_K2 11000
@@ -379,6 +432,38 @@ struct async_data {
u8 fw_debug_param;
};
+struct coalescing_timeset {
+ u8 value;
+#define COALESCING_TIMESET_TIMESET_MASK 0x7F
+#define COALESCING_TIMESET_TIMESET_SHIFT 0
+#define COALESCING_TIMESET_VALID_MASK 0x1
+#define COALESCING_TIMESET_VALID_SHIFT 7
+};
+
+struct common_prs_pf_msg_info {
+ __le32 value;
+#define COMMON_PRS_PF_MSG_INFO_NPAR_DEFAULT_PF_MASK 0x1
+#define COMMON_PRS_PF_MSG_INFO_NPAR_DEFAULT_PF_SHIFT 0
+#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_1_MASK 0x1
+#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_1_SHIFT 1
+#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_2_MASK 0x1
+#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_2_SHIFT 2
+#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_3_MASK 0x1
+#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_3_SHIFT 3
+#define COMMON_PRS_PF_MSG_INFO_RESERVED_MASK 0xFFFFFFF
+#define COMMON_PRS_PF_MSG_INFO_RESERVED_SHIFT 4
+};
+
+struct common_queue_zone {
+ __le16 ring_drv_data_consumer;
+ __le16 reserved;
+};
+
+struct eth_rx_prod_data {
+ __le16 bd_prod;
+ __le16 cqe_prod;
+};
+
struct regpair {
__le32 lo;
__le32 hi;
@@ -388,11 +473,23 @@ struct vf_pf_channel_eqe_data {
struct regpair msg_addr;
};
+struct malicious_vf_eqe_data {
+ u8 vf_id;
+ u8 err_id;
+ __le16 reserved[3];
+};
+
+struct initial_cleanup_eqe_data {
+ u8 vf_id;
+ u8 reserved[7];
+};
+
/* Event Data Union */
union event_ring_data {
- u8 bytes[8];
- struct vf_pf_channel_eqe_data vf_pf_channel;
- struct async_data async_info;
+ u8 bytes[8];
+ struct vf_pf_channel_eqe_data vf_pf_channel;
+ struct malicious_vf_eqe_data malicious_vf;
+ struct initial_cleanup_eqe_data vf_init_cleanup;
};
/* Event Ring Entry */
@@ -420,9 +517,9 @@ enum mf_mode {
/* Per-protocol connection types */
enum protocol_type {
- PROTOCOLID_RESERVED1,
+ PROTOCOLID_ISCSI,
PROTOCOLID_RESERVED2,
- PROTOCOLID_RESERVED3,
+ PROTOCOLID_ROCE,
PROTOCOLID_CORE,
PROTOCOLID_ETH,
PROTOCOLID_RESERVED4,
@@ -433,6 +530,16 @@ enum protocol_type {
MAX_PROTOCOL_TYPE
};
+struct ustorm_eth_queue_zone {
+ struct coalescing_timeset int_coalescing_timeset;
+ u8 reserved[3];
+};
+
+struct ustorm_queue_zone {
+ struct ustorm_eth_queue_zone eth;
+ struct common_queue_zone common;
+};
+
/* status block structure */
struct cau_pi_entry {
u32 prod;
@@ -588,7 +695,10 @@ struct parsing_and_err_flags {
#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT 15
};
-/* Concrete Function ID. */
+struct pb_context {
+ __le32 crc[4];
+};
+
struct pxp_concrete_fid {
__le16 fid;
#define PXP_CONCRETE_FID_PFID_MASK 0xF
@@ -655,6 +765,72 @@ struct pxp_ptt_entry {
};
/* RSS hash type */
+struct rdif_task_context {
+ __le32 initial_ref_tag;
+ __le16 app_tag_value;
+ __le16 app_tag_mask;
+ u8 flags0;
+#define RDIF_TASK_CONTEXT_IGNOREAPPTAG_MASK 0x1
+#define RDIF_TASK_CONTEXT_IGNOREAPPTAG_SHIFT 0
+#define RDIF_TASK_CONTEXT_INITIALREFTAGVALID_MASK 0x1
+#define RDIF_TASK_CONTEXT_INITIALREFTAGVALID_SHIFT 1
+#define RDIF_TASK_CONTEXT_HOSTGUARDTYPE_MASK 0x1
+#define RDIF_TASK_CONTEXT_HOSTGUARDTYPE_SHIFT 2
+#define RDIF_TASK_CONTEXT_SETERRORWITHEOP_MASK 0x1
+#define RDIF_TASK_CONTEXT_SETERRORWITHEOP_SHIFT 3
+#define RDIF_TASK_CONTEXT_PROTECTIONTYPE_MASK 0x3
+#define RDIF_TASK_CONTEXT_PROTECTIONTYPE_SHIFT 4
+#define RDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1
+#define RDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6
+#define RDIF_TASK_CONTEXT_KEEPREFTAGCONST_MASK 0x1
+#define RDIF_TASK_CONTEXT_KEEPREFTAGCONST_SHIFT 7
+ u8 partial_dif_data[7];
+ __le16 partial_crc_value;
+ __le16 partial_checksum_value;
+ __le32 offset_in_io;
+ __le16 flags1;
+#define RDIF_TASK_CONTEXT_VALIDATEGUARD_MASK 0x1
+#define RDIF_TASK_CONTEXT_VALIDATEGUARD_SHIFT 0
+#define RDIF_TASK_CONTEXT_VALIDATEAPPTAG_MASK 0x1
+#define RDIF_TASK_CONTEXT_VALIDATEAPPTAG_SHIFT 1
+#define RDIF_TASK_CONTEXT_VALIDATEREFTAG_MASK 0x1
+#define RDIF_TASK_CONTEXT_VALIDATEREFTAG_SHIFT 2
+#define RDIF_TASK_CONTEXT_FORWARDGUARD_MASK 0x1
+#define RDIF_TASK_CONTEXT_FORWARDGUARD_SHIFT 3
+#define RDIF_TASK_CONTEXT_FORWARDAPPTAG_MASK 0x1
+#define RDIF_TASK_CONTEXT_FORWARDAPPTAG_SHIFT 4
+#define RDIF_TASK_CONTEXT_FORWARDREFTAG_MASK 0x1
+#define RDIF_TASK_CONTEXT_FORWARDREFTAG_SHIFT 5
+#define RDIF_TASK_CONTEXT_INTERVALSIZE_MASK 0x7
+#define RDIF_TASK_CONTEXT_INTERVALSIZE_SHIFT 6
+#define RDIF_TASK_CONTEXT_HOSTINTERFACE_MASK 0x3
+#define RDIF_TASK_CONTEXT_HOSTINTERFACE_SHIFT 9
+#define RDIF_TASK_CONTEXT_DIFBEFOREDATA_MASK 0x1
+#define RDIF_TASK_CONTEXT_DIFBEFOREDATA_SHIFT 11
+#define RDIF_TASK_CONTEXT_RESERVED0_MASK 0x1
+#define RDIF_TASK_CONTEXT_RESERVED0_SHIFT 12
+#define RDIF_TASK_CONTEXT_NETWORKINTERFACE_MASK 0x1
+#define RDIF_TASK_CONTEXT_NETWORKINTERFACE_SHIFT 13
+#define RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_MASK 0x1
+#define RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_SHIFT 14
+#define RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_MASK 0x1
+#define RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_SHIFT 15
+ __le16 state;
+#define RDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFT_MASK 0xF
+#define RDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFT_SHIFT 0
+#define RDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFT_MASK 0xF
+#define RDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFT_SHIFT 4
+#define RDIF_TASK_CONTEXT_ERRORINIO_MASK 0x1
+#define RDIF_TASK_CONTEXT_ERRORINIO_SHIFT 8
+#define RDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_MASK 0x1
+#define RDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_SHIFT 9
+#define RDIF_TASK_CONTEXT_REFTAGMASK_MASK 0xF
+#define RDIF_TASK_CONTEXT_REFTAGMASK_SHIFT 10
+#define RDIF_TASK_CONTEXT_RESERVED1_MASK 0x3
+#define RDIF_TASK_CONTEXT_RESERVED1_SHIFT 14
+ __le32 reserved2;
+};
+
enum rss_hash_type {
RSS_HASH_TYPE_DEFAULT = 0,
RSS_HASH_TYPE_IPV4 = 1,
@@ -683,19 +859,122 @@ struct status_block {
#define STATUS_BLOCK_ZERO_PAD3_SHIFT 24
};
-struct tunnel_parsing_flags {
- u8 flags;
-#define TUNNEL_PARSING_FLAGS_TYPE_MASK 0x3
-#define TUNNEL_PARSING_FLAGS_TYPE_SHIFT 0
-#define TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_MASK 0x1
-#define TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_SHIFT 2
-#define TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_MASK 0x3
-#define TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_SHIFT 3
-#define TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_MASK 0x1
-#define TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_SHIFT 5
-#define TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK 0x1
-#define TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT 6
-#define TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_MASK 0x1
-#define TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_SHIFT 7
+struct tdif_task_context {
+ __le32 initial_ref_tag;
+ __le16 app_tag_value;
+ __le16 app_tag_mask;
+ __le16 partial_crc_valueB;
+ __le16 partial_checksum_valueB;
+ __le16 stateB;
+#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTB_MASK 0xF
+#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTB_SHIFT 0
+#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTB_MASK 0xF
+#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTB_SHIFT 4
+#define TDIF_TASK_CONTEXT_ERRORINIOB_MASK 0x1
+#define TDIF_TASK_CONTEXT_ERRORINIOB_SHIFT 8
+#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_MASK 0x1
+#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_SHIFT 9
+#define TDIF_TASK_CONTEXT_RESERVED0_MASK 0x3F
+#define TDIF_TASK_CONTEXT_RESERVED0_SHIFT 10
+ u8 reserved1;
+ u8 flags0;
+#define TDIF_TASK_CONTEXT_IGNOREAPPTAG_MASK 0x1
+#define TDIF_TASK_CONTEXT_IGNOREAPPTAG_SHIFT 0
+#define TDIF_TASK_CONTEXT_INITIALREFTAGVALID_MASK 0x1
+#define TDIF_TASK_CONTEXT_INITIALREFTAGVALID_SHIFT 1
+#define TDIF_TASK_CONTEXT_HOSTGUARDTYPE_MASK 0x1
+#define TDIF_TASK_CONTEXT_HOSTGUARDTYPE_SHIFT 2
+#define TDIF_TASK_CONTEXT_SETERRORWITHEOP_MASK 0x1
+#define TDIF_TASK_CONTEXT_SETERRORWITHEOP_SHIFT 3
+#define TDIF_TASK_CONTEXT_PROTECTIONTYPE_MASK 0x3
+#define TDIF_TASK_CONTEXT_PROTECTIONTYPE_SHIFT 4
+#define TDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1
+#define TDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6
+#define TDIF_TASK_CONTEXT_RESERVED2_MASK 0x1
+#define TDIF_TASK_CONTEXT_RESERVED2_SHIFT 7
+ __le32 flags1;
+#define TDIF_TASK_CONTEXT_VALIDATEGUARD_MASK 0x1
+#define TDIF_TASK_CONTEXT_VALIDATEGUARD_SHIFT 0
+#define TDIF_TASK_CONTEXT_VALIDATEAPPTAG_MASK 0x1
+#define TDIF_TASK_CONTEXT_VALIDATEAPPTAG_SHIFT 1
+#define TDIF_TASK_CONTEXT_VALIDATEREFTAG_MASK 0x1
+#define TDIF_TASK_CONTEXT_VALIDATEREFTAG_SHIFT 2
+#define TDIF_TASK_CONTEXT_FORWARDGUARD_MASK 0x1
+#define TDIF_TASK_CONTEXT_FORWARDGUARD_SHIFT 3
+#define TDIF_TASK_CONTEXT_FORWARDAPPTAG_MASK 0x1
+#define TDIF_TASK_CONTEXT_FORWARDAPPTAG_SHIFT 4
+#define TDIF_TASK_CONTEXT_FORWARDREFTAG_MASK 0x1
+#define TDIF_TASK_CONTEXT_FORWARDREFTAG_SHIFT 5
+#define TDIF_TASK_CONTEXT_INTERVALSIZE_MASK 0x7
+#define TDIF_TASK_CONTEXT_INTERVALSIZE_SHIFT 6
+#define TDIF_TASK_CONTEXT_HOSTINTERFACE_MASK 0x3
+#define TDIF_TASK_CONTEXT_HOSTINTERFACE_SHIFT 9
+#define TDIF_TASK_CONTEXT_DIFBEFOREDATA_MASK 0x1
+#define TDIF_TASK_CONTEXT_DIFBEFOREDATA_SHIFT 11
+#define TDIF_TASK_CONTEXT_RESERVED3_MASK 0x1
+#define TDIF_TASK_CONTEXT_RESERVED3_SHIFT 12
+#define TDIF_TASK_CONTEXT_NETWORKINTERFACE_MASK 0x1
+#define TDIF_TASK_CONTEXT_NETWORKINTERFACE_SHIFT 13
+#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTA_MASK 0xF
+#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTA_SHIFT 14
+#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTA_MASK 0xF
+#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTA_SHIFT 18
+#define TDIF_TASK_CONTEXT_ERRORINIOA_MASK 0x1
+#define TDIF_TASK_CONTEXT_ERRORINIOA_SHIFT 22
+#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOWA_MASK 0x1
+#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOWA_SHIFT 23
+#define TDIF_TASK_CONTEXT_REFTAGMASK_MASK 0xF
+#define TDIF_TASK_CONTEXT_REFTAGMASK_SHIFT 24
+#define TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_MASK 0x1
+#define TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_SHIFT 28
+#define TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_MASK 0x1
+#define TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_SHIFT 29
+#define TDIF_TASK_CONTEXT_KEEPREFTAGCONST_MASK 0x1
+#define TDIF_TASK_CONTEXT_KEEPREFTAGCONST_SHIFT 30
+#define TDIF_TASK_CONTEXT_RESERVED4_MASK 0x1
+#define TDIF_TASK_CONTEXT_RESERVED4_SHIFT 31
+ __le32 offset_in_iob;
+ __le16 partial_crc_value_a;
+ __le16 partial_checksum_valuea_;
+ __le32 offset_in_ioa;
+ u8 partial_dif_data_a[8];
+ u8 partial_dif_data_b[8];
+};
+
+struct timers_context {
+ __le32 logical_client0;
+#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_MASK 0xFFFFFFF
+#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_SHIFT 0
+#define TIMERS_CONTEXT_VALIDLC0_MASK 0x1
+#define TIMERS_CONTEXT_VALIDLC0_SHIFT 28
+#define TIMERS_CONTEXT_ACTIVELC0_MASK 0x1
+#define TIMERS_CONTEXT_ACTIVELC0_SHIFT 29
+#define TIMERS_CONTEXT_RESERVED0_MASK 0x3
+#define TIMERS_CONTEXT_RESERVED0_SHIFT 30
+ __le32 logical_client1;
+#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_MASK 0xFFFFFFF
+#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_SHIFT 0
+#define TIMERS_CONTEXT_VALIDLC1_MASK 0x1
+#define TIMERS_CONTEXT_VALIDLC1_SHIFT 28
+#define TIMERS_CONTEXT_ACTIVELC1_MASK 0x1
+#define TIMERS_CONTEXT_ACTIVELC1_SHIFT 29
+#define TIMERS_CONTEXT_RESERVED1_MASK 0x3
+#define TIMERS_CONTEXT_RESERVED1_SHIFT 30
+ __le32 logical_client2;
+#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_MASK 0xFFFFFFF
+#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_SHIFT 0
+#define TIMERS_CONTEXT_VALIDLC2_MASK 0x1
+#define TIMERS_CONTEXT_VALIDLC2_SHIFT 28
+#define TIMERS_CONTEXT_ACTIVELC2_MASK 0x1
+#define TIMERS_CONTEXT_ACTIVELC2_SHIFT 29
+#define TIMERS_CONTEXT_RESERVED2_MASK 0x3
+#define TIMERS_CONTEXT_RESERVED2_SHIFT 30
+ __le32 host_expiration_fields;
+#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALUE_MASK 0xFFFFFFF
+#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALUE_SHIFT 0
+#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALID_MASK 0x1
+#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALID_SHIFT 28
+#define TIMERS_CONTEXT_RESERVED3_MASK 0x7
+#define TIMERS_CONTEXT_RESERVED3_SHIFT 29
};
#endif /* __COMMON_HSI__ */
diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h
index 092cb0c..b5ebc69 100644
--- a/include/linux/qed/eth_common.h
+++ b/include/linux/qed/eth_common.h
@@ -12,6 +12,8 @@
/********************/
/* ETH FW CONSTANTS */
/********************/
+#define ETH_HSI_VER_MAJOR 3
+#define ETH_HSI_VER_MINOR 0
#define ETH_CACHE_LINE_SIZE 64
#define ETH_MAX_RAMROD_PER_CON 8
@@ -57,19 +59,6 @@
#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6
#define ETH_TPA_CQE_END_LEN_LIST_SIZE 4
-/* Queue Zone sizes */
-#define TSTORM_QZONE_SIZE 0
-#define MSTORM_QZONE_SIZE sizeof(struct mstorm_eth_queue_zone)
-#define USTORM_QZONE_SIZE sizeof(struct ustorm_eth_queue_zone)
-#define XSTORM_QZONE_SIZE 0
-#define YSTORM_QZONE_SIZE sizeof(struct ystorm_eth_queue_zone)
-#define PSTORM_QZONE_SIZE 0
-
-/* Interrupt coalescing TimeSet */
-struct coalescing_timeset {
- u8 timeset;
- u8 valid;
-};
struct eth_tx_1st_bd_flags {
u8 bitfields;
@@ -97,12 +86,12 @@ struct eth_tx_data_1st_bd {
u8 nbds;
struct eth_tx_1st_bd_flags bd_flags;
__le16 bitfields;
-#define ETH_TX_DATA_1ST_BD_TUNN_CFG_OVERRIDE_MASK 0x1
-#define ETH_TX_DATA_1ST_BD_TUNN_CFG_OVERRIDE_SHIFT 0
+#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK 0x1
+#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT 0
#define ETH_TX_DATA_1ST_BD_RESERVED0_MASK 0x1
#define ETH_TX_DATA_1ST_BD_RESERVED0_SHIFT 1
-#define ETH_TX_DATA_1ST_BD_FW_USE_ONLY_MASK 0x3FFF
-#define ETH_TX_DATA_1ST_BD_FW_USE_ONLY_SHIFT 2
+#define ETH_TX_DATA_1ST_BD_PKT_LEN_MASK 0x3FFF
+#define ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT 2
};
/* The parsing information data for the second tx bd of a given packet. */
@@ -136,28 +125,51 @@ struct eth_tx_data_2nd_bd {
#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13
};
+struct eth_fast_path_cqe_fw_debug {
+ u8 reserved0;
+ u8 reserved1;
+ __le16 reserved2;
+};
+
+/* tunneling parsing flags */
+struct eth_tunnel_parsing_flags {
+ u8 flags;
+#define ETH_TUNNEL_PARSING_FLAGS_TYPE_MASK 0x3
+#define ETH_TUNNEL_PARSING_FLAGS_TYPE_SHIFT 0
+#define ETH_TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_MASK 0x1
+#define ETH_TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_SHIFT 2
+#define ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_MASK 0x3
+#define ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_SHIFT 3
+#define ETH_TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_MASK 0x1
+#define ETH_TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_SHIFT 5
+#define ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK 0x1
+#define ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT 6
+#define ETH_TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_MASK 0x1
+#define ETH_TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_SHIFT 7
+};
+
/* Regular ETH Rx FP CQE. */
struct eth_fast_path_rx_reg_cqe {
- u8 type;
- u8 bitfields;
+ u8 type;
+ u8 bitfields;
#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK 0x7
#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT 0
#define ETH_FAST_PATH_RX_REG_CQE_TC_MASK 0xF
#define ETH_FAST_PATH_RX_REG_CQE_TC_SHIFT 3
#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_MASK 0x1
#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_SHIFT 7
- __le16 pkt_len;
- struct parsing_and_err_flags pars_flags;
- __le16 vlan_tag;
- __le32 rss_hash;
- __le16 len_on_first_bd;
- u8 placement_offset;
- struct tunnel_parsing_flags tunnel_pars_flags;
- u8 bd_num;
- u8 reserved[7];
- u32 fw_debug;
- u8 reserved1[3];
- u8 flags;
+ __le16 pkt_len;
+ struct parsing_and_err_flags pars_flags;
+ __le16 vlan_tag;
+ __le32 rss_hash;
+ __le16 len_on_first_bd;
+ u8 placement_offset;
+ struct eth_tunnel_parsing_flags tunnel_pars_flags;
+ u8 bd_num;
+ u8 reserved[7];
+ struct eth_fast_path_cqe_fw_debug fw_debug;
+ u8 reserved1[3];
+ u8 flags;
#define ETH_FAST_PATH_RX_REG_CQE_VALID_MASK 0x1
#define ETH_FAST_PATH_RX_REG_CQE_VALID_SHIFT 0
#define ETH_FAST_PATH_RX_REG_CQE_VALID_TOGGLE_MASK 0x1
@@ -207,11 +219,11 @@ struct eth_fast_path_rx_tpa_start_cqe {
__le32 rss_hash;
__le16 len_on_first_bd;
u8 placement_offset;
- struct tunnel_parsing_flags tunnel_pars_flags;
+ struct eth_tunnel_parsing_flags tunnel_pars_flags;
u8 tpa_agg_index;
u8 header_len;
__le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE];
- u32 fw_debug;
+ struct eth_fast_path_cqe_fw_debug fw_debug;
};
/* The L4 pseudo checksum mode for Ethernet */
@@ -264,12 +276,25 @@ enum eth_rx_cqe_type {
MAX_ETH_RX_CQE_TYPE
};
-/* ETH Rx producers data */
-struct eth_rx_prod_data {
- __le16 bd_prod;
- __le16 cqe_prod;
- __le16 reserved;
- __le16 reserved1;
+enum eth_rx_tunn_type {
+ ETH_RX_NO_TUNN,
+ ETH_RX_TUNN_GENEVE,
+ ETH_RX_TUNN_GRE,
+ ETH_RX_TUNN_VXLAN,
+ MAX_ETH_RX_TUNN_TYPE
+};
+
+/* Aggregation end reason. */
+enum eth_tpa_end_reason {
+ ETH_AGG_END_UNUSED,
+ ETH_AGG_END_SP_UPDATE,
+ ETH_AGG_END_MAX_LEN,
+ ETH_AGG_END_LAST_SEG,
+ ETH_AGG_END_TIMEOUT,
+ ETH_AGG_END_NOT_CONSISTENT,
+ ETH_AGG_END_OUT_OF_ORDER,
+ ETH_AGG_END_NON_TPA_SEG,
+ MAX_ETH_TPA_END_REASON
};
/* The first tx bd of a given packet */
@@ -337,21 +362,18 @@ union eth_tx_bd_types {
};
/* Mstorm Queue Zone */
-struct mstorm_eth_queue_zone {
- struct eth_rx_prod_data rx_producers;
- __le32 reserved[2];
-};
-
-/* Ustorm Queue Zone */
-struct ustorm_eth_queue_zone {
- struct coalescing_timeset int_coalescing_timeset;
- __le16 reserved[3];
+enum eth_tx_tunn_type {
+ ETH_TX_TUNN_GENEVE,
+ ETH_TX_TUNN_TTAG,
+ ETH_TX_TUNN_GRE,
+ ETH_TX_TUNN_VXLAN,
+ MAX_ETH_TX_TUNN_TYPE
};
/* Ystorm Queue Zone */
-struct ystorm_eth_queue_zone {
- struct coalescing_timeset int_coalescing_timeset;
- __le16 reserved[3];
+struct xstorm_eth_queue_zone {
+ struct coalescing_timeset int_coalescing_timeset;
+ u8 reserved[7];
};
/* ETH doorbell data */
diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h
new file mode 100644
index 0000000..b3c0feb
--- /dev/null
+++ b/include/linux/qed/iscsi_common.h
@@ -0,0 +1,1439 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef __ISCSI_COMMON__
+#define __ISCSI_COMMON__
+/**********************/
+/* ISCSI FW CONSTANTS */
+/**********************/
+
+/* iSCSI HSI constants */
+#define ISCSI_DEFAULT_MTU (1500)
+
+/* Current iSCSI HSI version number composed of two fields (16 bit) */
+#define ISCSI_HSI_MAJOR_VERSION (0)
+#define ISCSI_HSI_MINOR_VERSION (0)
+
+/* KWQ (kernel work queue) layer codes */
+#define ISCSI_SLOW_PATH_LAYER_CODE (6)
+
+/* CQE completion status */
+#define ISCSI_EQE_COMPLETION_SUCCESS (0x0)
+#define ISCSI_EQE_RST_CONN_RCVD (0x1)
+
+/* iSCSI parameter defaults */
+#define ISCSI_DEFAULT_HEADER_DIGEST (0)
+#define ISCSI_DEFAULT_DATA_DIGEST (0)
+#define ISCSI_DEFAULT_INITIAL_R2T (1)
+#define ISCSI_DEFAULT_IMMEDIATE_DATA (1)
+#define ISCSI_DEFAULT_MAX_PDU_LENGTH (0x2000)
+#define ISCSI_DEFAULT_FIRST_BURST_LENGTH (0x10000)
+#define ISCSI_DEFAULT_MAX_BURST_LENGTH (0x40000)
+#define ISCSI_DEFAULT_MAX_OUTSTANDING_R2T (1)
+
+/* iSCSI parameter limits */
+#define ISCSI_MIN_VAL_MAX_PDU_LENGTH (0x200)
+#define ISCSI_MAX_VAL_MAX_PDU_LENGTH (0xffffff)
+#define ISCSI_MIN_VAL_BURST_LENGTH (0x200)
+#define ISCSI_MAX_VAL_BURST_LENGTH (0xffffff)
+#define ISCSI_MIN_VAL_MAX_OUTSTANDING_R2T (1)
+#define ISCSI_MAX_VAL_MAX_OUTSTANDING_R2T (0xff)
+
+/* iSCSI reserved params */
+#define ISCSI_ITT_ALL_ONES (0xffffffff)
+#define ISCSI_TTT_ALL_ONES (0xffffffff)
+
+#define ISCSI_OPTION_1_OFF_CHIP_TCP 1
+#define ISCSI_OPTION_2_ON_CHIP_TCP 2
+
+#define ISCSI_INITIATOR_MODE 0
+#define ISCSI_TARGET_MODE 1
+
+/* iSCSI request op codes */
+#define ISCSI_OPCODE_NOP_OUT_NO_IMM (0)
+#define ISCSI_OPCODE_NOP_OUT ( \
+ ISCSI_OPCODE_NOP_OUT_NO_IMM | 0x40)
+#define ISCSI_OPCODE_SCSI_CMD_NO_IMM (1)
+#define ISCSI_OPCODE_SCSI_CMD ( \
+ ISCSI_OPCODE_SCSI_CMD_NO_IMM | 0x40)
+#define ISCSI_OPCODE_TMF_REQUEST_NO_IMM (2)
+#define ISCSI_OPCODE_TMF_REQUEST ( \
+ ISCSI_OPCODE_TMF_REQUEST_NO_IMM | 0x40)
+#define ISCSI_OPCODE_LOGIN_REQUEST_NO_IMM (3)
+#define ISCSI_OPCODE_LOGIN_REQUEST ( \
+ ISCSI_OPCODE_LOGIN_REQUEST_NO_IMM | 0x40)
+#define ISCSI_OPCODE_TEXT_REQUEST_NO_IMM (4)
+#define ISCSI_OPCODE_TEXT_REQUEST ( \
+ ISCSI_OPCODE_TEXT_REQUEST_NO_IMM | 0x40)
+#define ISCSI_OPCODE_DATA_OUT (5)
+#define ISCSI_OPCODE_LOGOUT_REQUEST_NO_IMM (6)
+#define ISCSI_OPCODE_LOGOUT_REQUEST ( \
+ ISCSI_OPCODE_LOGOUT_REQUEST_NO_IMM | 0x40)
+
+/* iSCSI response/messages op codes */
+#define ISCSI_OPCODE_NOP_IN (0x20)
+#define ISCSI_OPCODE_SCSI_RESPONSE (0x21)
+#define ISCSI_OPCODE_TMF_RESPONSE (0x22)
+#define ISCSI_OPCODE_LOGIN_RESPONSE (0x23)
+#define ISCSI_OPCODE_TEXT_RESPONSE (0x24)
+#define ISCSI_OPCODE_DATA_IN (0x25)
+#define ISCSI_OPCODE_LOGOUT_RESPONSE (0x26)
+#define ISCSI_OPCODE_R2T (0x31)
+#define ISCSI_OPCODE_ASYNC_MSG (0x32)
+#define ISCSI_OPCODE_REJECT (0x3f)
+
+/* iSCSI stages */
+#define ISCSI_STAGE_SECURITY_NEGOTIATION (0)
+#define ISCSI_STAGE_LOGIN_OPERATIONAL_NEGOTIATION (1)
+#define ISCSI_STAGE_FULL_FEATURE_PHASE (3)
+
+/* iSCSI CQE errors */
+#define CQE_ERROR_BITMAP_DATA_DIGEST (0x08)
+#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN (0x10)
+#define CQE_ERROR_BITMAP_DATA_TRUNCATED (0x20)
+
+struct cqe_error_bitmap {
+ u8 cqe_error_status_bits;
+#define CQE_ERROR_BITMAP_DIF_ERR_BITS_MASK 0x7
+#define CQE_ERROR_BITMAP_DIF_ERR_BITS_SHIFT 0
+#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_MASK 0x1
+#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_SHIFT 3
+#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_MASK 0x1
+#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_SHIFT 4
+#define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_MASK 0x1
+#define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_SHIFT 5
+#define CQE_ERROR_BITMAP_UNDER_RUN_ERR_MASK 0x1
+#define CQE_ERROR_BITMAP_UNDER_RUN_ERR_SHIFT 6
+#define CQE_ERROR_BITMAP_RESERVED2_MASK 0x1
+#define CQE_ERROR_BITMAP_RESERVED2_SHIFT 7
+};
+
+union cqe_error_status {
+ u8 error_status;
+ struct cqe_error_bitmap error_bits;
+};
+
+struct data_hdr {
+ __le32 data[12];
+};
+
+struct iscsi_async_msg_hdr {
+ __le16 reserved0;
+ u8 flags_attr;
+#define ISCSI_ASYNC_MSG_HDR_RSRV_MASK 0x7F
+#define ISCSI_ASYNC_MSG_HDR_RSRV_SHIFT 0
+#define ISCSI_ASYNC_MSG_HDR_CONST1_MASK 0x1
+#define ISCSI_ASYNC_MSG_HDR_CONST1_SHIFT 7
+ u8 opcode;
+ __le32 hdr_second_dword;
+#define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_SHIFT 24
+ struct regpair lun;
+ __le32 all_ones;
+ __le32 reserved1;
+ __le32 stat_sn;
+ __le32 exp_cmd_sn;
+ __le32 max_cmd_sn;
+ __le16 param1_rsrv;
+ u8 async_vcode;
+ u8 async_event;
+ __le16 param3_rsrv;
+ __le16 param2_rsrv;
+ __le32 reserved7;
+};
+
+struct iscsi_sge {
+ struct regpair sge_addr;
+ __le16 sge_len;
+ __le16 reserved0;
+ __le32 reserved1;
+};
+
+struct iscsi_cached_sge_ctx {
+ struct iscsi_sge sge;
+ struct regpair reserved;
+ __le32 dsgl_curr_offset[2];
+};
+
+struct iscsi_cmd_hdr {
+ __le16 reserved1;
+ u8 flags_attr;
+#define ISCSI_CMD_HDR_ATTR_MASK 0x7
+#define ISCSI_CMD_HDR_ATTR_SHIFT 0
+#define ISCSI_CMD_HDR_RSRV_MASK 0x3
+#define ISCSI_CMD_HDR_RSRV_SHIFT 3
+#define ISCSI_CMD_HDR_WRITE_MASK 0x1
+#define ISCSI_CMD_HDR_WRITE_SHIFT 5
+#define ISCSI_CMD_HDR_READ_MASK 0x1
+#define ISCSI_CMD_HDR_READ_SHIFT 6
+#define ISCSI_CMD_HDR_FINAL_MASK 0x1
+#define ISCSI_CMD_HDR_FINAL_SHIFT 7
+ u8 opcode;
+ __le32 hdr_second_dword;
+#define ISCSI_CMD_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_CMD_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_CMD_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_CMD_HDR_TOTAL_AHS_LEN_SHIFT 24
+ struct regpair lun;
+ __le32 itt;
+ __le32 expected_transfer_length;
+ __le32 cmd_sn;
+ __le32 exp_stat_sn;
+ __le32 cdb[4];
+};
+
+struct iscsi_common_hdr {
+ u8 hdr_status;
+ u8 hdr_response;
+ u8 hdr_flags;
+ u8 hdr_first_byte;
+#define ISCSI_COMMON_HDR_OPCODE_MASK 0x3F
+#define ISCSI_COMMON_HDR_OPCODE_SHIFT 0
+#define ISCSI_COMMON_HDR_IMM_MASK 0x1
+#define ISCSI_COMMON_HDR_IMM_SHIFT 6
+#define ISCSI_COMMON_HDR_RSRV_MASK 0x1
+#define ISCSI_COMMON_HDR_RSRV_SHIFT 7
+ __le32 hdr_second_dword;
+#define ISCSI_COMMON_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_COMMON_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_SHIFT 24
+ __le32 lun_reserved[4];
+ __le32 data[6];
+};
+
+struct iscsi_conn_offload_params {
+ struct regpair sq_pbl_addr;
+ struct regpair r2tq_pbl_addr;
+ struct regpair xhq_pbl_addr;
+ struct regpair uhq_pbl_addr;
+ __le32 initial_ack;
+ __le16 physical_q0;
+ __le16 physical_q1;
+ u8 flags;
+#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_MASK 0x1
+#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_SHIFT 0
+#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_MASK 0x1
+#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_SHIFT 1
+#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_MASK 0x3F
+#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT 2
+ u8 pbl_page_size_log;
+ u8 pbe_page_size_log;
+ u8 default_cq;
+ __le32 stat_sn;
+};
+
+struct iscsi_slow_path_hdr {
+ u8 op_code;
+ u8 flags;
+#define ISCSI_SLOW_PATH_HDR_RESERVED0_MASK 0xF
+#define ISCSI_SLOW_PATH_HDR_RESERVED0_SHIFT 0
+#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_MASK 0x7
+#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_SHIFT 4
+#define ISCSI_SLOW_PATH_HDR_RESERVED1_MASK 0x1
+#define ISCSI_SLOW_PATH_HDR_RESERVED1_SHIFT 7
+};
+
+struct iscsi_conn_update_ramrod_params {
+ struct iscsi_slow_path_hdr hdr;
+ __le16 conn_id;
+ __le32 fw_cid;
+ u8 flags;
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_MASK 0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_SHIFT 0
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_MASK 0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_SHIFT 1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_MASK 0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_SHIFT 2
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_MASK 0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_SHIFT 3
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_MASK 0xF
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_SHIFT 4
+ u8 reserved0[3];
+ __le32 max_seq_size;
+ __le32 max_send_pdu_length;
+ __le32 max_recv_pdu_length;
+ __le32 first_seq_length;
+ __le32 exp_stat_sn;
+};
+
+struct iscsi_ext_cdb_cmd_hdr {
+ __le16 reserved1;
+ u8 flags_attr;
+#define ISCSI_EXT_CDB_CMD_HDR_ATTR_MASK 0x7
+#define ISCSI_EXT_CDB_CMD_HDR_ATTR_SHIFT 0
+#define ISCSI_EXT_CDB_CMD_HDR_RSRV_MASK 0x3
+#define ISCSI_EXT_CDB_CMD_HDR_RSRV_SHIFT 3
+#define ISCSI_EXT_CDB_CMD_HDR_WRITE_MASK 0x1
+#define ISCSI_EXT_CDB_CMD_HDR_WRITE_SHIFT 5
+#define ISCSI_EXT_CDB_CMD_HDR_READ_MASK 0x1
+#define ISCSI_EXT_CDB_CMD_HDR_READ_SHIFT 6
+#define ISCSI_EXT_CDB_CMD_HDR_FINAL_MASK 0x1
+#define ISCSI_EXT_CDB_CMD_HDR_FINAL_SHIFT 7
+ u8 opcode;
+ __le32 hdr_second_dword;
+#define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_MASK 0xFF
+#define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_SHIFT 24
+ struct regpair lun;
+ __le32 itt;
+ __le32 expected_transfer_length;
+ __le32 cmd_sn;
+ __le32 exp_stat_sn;
+ struct iscsi_sge cdb_sge;
+};
+
+struct iscsi_login_req_hdr {
+ u8 version_min;
+ u8 version_max;
+ u8 flags_attr;
+#define ISCSI_LOGIN_REQ_HDR_NSG_MASK 0x3
+#define ISCSI_LOGIN_REQ_HDR_NSG_SHIFT 0
+#define ISCSI_LOGIN_REQ_HDR_CSG_MASK 0x3
+#define ISCSI_LOGIN_REQ_HDR_CSG_SHIFT 2
+#define ISCSI_LOGIN_REQ_HDR_RSRV_MASK 0x3
+#define ISCSI_LOGIN_REQ_HDR_RSRV_SHIFT 4
+#define ISCSI_LOGIN_REQ_HDR_C_MASK 0x1
+#define ISCSI_LOGIN_REQ_HDR_C_SHIFT 6
+#define ISCSI_LOGIN_REQ_HDR_T_MASK 0x1
+#define ISCSI_LOGIN_REQ_HDR_T_SHIFT 7
+ u8 opcode;
+ __le32 hdr_second_dword;
+#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_SHIFT 24
+ __le32 isid_TABC;
+ __le16 tsih;
+ __le16 isid_d;
+ __le32 itt;
+ __le16 reserved1;
+ __le16 cid;
+ __le32 cmd_sn;
+ __le32 exp_stat_sn;
+ __le32 reserved2[4];
+};
+
+struct iscsi_logout_req_hdr {
+ __le16 reserved0;
+ u8 reason_code;
+ u8 opcode;
+ __le32 reserved1;
+ __le32 reserved2[2];
+ __le32 itt;
+ __le16 reserved3;
+ __le16 cid;
+ __le32 cmd_sn;
+ __le32 exp_stat_sn;
+ __le32 reserved4[4];
+};
+
+struct iscsi_data_out_hdr {
+ __le16 reserved1;
+ u8 flags_attr;
+#define ISCSI_DATA_OUT_HDR_RSRV_MASK 0x7F
+#define ISCSI_DATA_OUT_HDR_RSRV_SHIFT 0
+#define ISCSI_DATA_OUT_HDR_FINAL_MASK 0x1
+#define ISCSI_DATA_OUT_HDR_FINAL_SHIFT 7
+ u8 opcode;
+ __le32 reserved2;
+ struct regpair lun;
+ __le32 itt;
+ __le32 ttt;
+ __le32 reserved3;
+ __le32 exp_stat_sn;
+ __le32 reserved4;
+ __le32 data_sn;
+ __le32 buffer_offset;
+ __le32 reserved5;
+};
+
+struct iscsi_data_in_hdr {
+ u8 status_rsvd;
+ u8 reserved1;
+ u8 flags;
+#define ISCSI_DATA_IN_HDR_STATUS_MASK 0x1
+#define ISCSI_DATA_IN_HDR_STATUS_SHIFT 0
+#define ISCSI_DATA_IN_HDR_UNDERFLOW_MASK 0x1
+#define ISCSI_DATA_IN_HDR_UNDERFLOW_SHIFT 1
+#define ISCSI_DATA_IN_HDR_OVERFLOW_MASK 0x1
+#define ISCSI_DATA_IN_HDR_OVERFLOW_SHIFT 2
+#define ISCSI_DATA_IN_HDR_RSRV_MASK 0x7
+#define ISCSI_DATA_IN_HDR_RSRV_SHIFT 3
+#define ISCSI_DATA_IN_HDR_ACK_MASK 0x1
+#define ISCSI_DATA_IN_HDR_ACK_SHIFT 6
+#define ISCSI_DATA_IN_HDR_FINAL_MASK 0x1
+#define ISCSI_DATA_IN_HDR_FINAL_SHIFT 7
+ u8 opcode;
+ __le32 reserved2;
+ struct regpair lun;
+ __le32 itt;
+ __le32 ttt;
+ __le32 stat_sn;
+ __le32 exp_cmd_sn;
+ __le32 max_cmd_sn;
+ __le32 data_sn;
+ __le32 buffer_offset;
+ __le32 residual_count;
+};
+
+struct iscsi_r2t_hdr {
+ u8 reserved0[3];
+ u8 opcode;
+ __le32 reserved2;
+ struct regpair lun;
+ __le32 itt;
+ __le32 ttt;
+ __le32 stat_sn;
+ __le32 exp_cmd_sn;
+ __le32 max_cmd_sn;
+ __le32 r2t_sn;
+ __le32 buffer_offset;
+ __le32 desired_data_trns_len;
+};
+
+struct iscsi_nop_out_hdr {
+ __le16 reserved1;
+ u8 flags_attr;
+#define ISCSI_NOP_OUT_HDR_RSRV_MASK 0x7F
+#define ISCSI_NOP_OUT_HDR_RSRV_SHIFT 0
+#define ISCSI_NOP_OUT_HDR_CONST1_MASK 0x1
+#define ISCSI_NOP_OUT_HDR_CONST1_SHIFT 7
+ u8 opcode;
+ __le32 reserved2;
+ struct regpair lun;
+ __le32 itt;
+ __le32 ttt;
+ __le32 cmd_sn;
+ __le32 exp_stat_sn;
+ __le32 reserved3;
+ __le32 reserved4;
+ __le32 reserved5;
+ __le32 reserved6;
+};
+
+struct iscsi_nop_in_hdr {
+ __le16 reserved0;
+ u8 flags_attr;
+#define ISCSI_NOP_IN_HDR_RSRV_MASK 0x7F
+#define ISCSI_NOP_IN_HDR_RSRV_SHIFT 0
+#define ISCSI_NOP_IN_HDR_CONST1_MASK 0x1
+#define ISCSI_NOP_IN_HDR_CONST1_SHIFT 7
+ u8 opcode;
+ __le32 hdr_second_dword;
+#define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_SHIFT 24
+ struct regpair lun;
+ __le32 itt;
+ __le32 ttt;
+ __le32 stat_sn;
+ __le32 exp_cmd_sn;
+ __le32 max_cmd_sn;
+ __le32 reserved5;
+ __le32 reserved6;
+ __le32 reserved7;
+};
+
+struct iscsi_login_response_hdr {
+ u8 version_active;
+ u8 version_max;
+ u8 flags_attr;
+#define ISCSI_LOGIN_RESPONSE_HDR_NSG_MASK 0x3
+#define ISCSI_LOGIN_RESPONSE_HDR_NSG_SHIFT 0
+#define ISCSI_LOGIN_RESPONSE_HDR_CSG_MASK 0x3
+#define ISCSI_LOGIN_RESPONSE_HDR_CSG_SHIFT 2
+#define ISCSI_LOGIN_RESPONSE_HDR_RSRV_MASK 0x3
+#define ISCSI_LOGIN_RESPONSE_HDR_RSRV_SHIFT 4
+#define ISCSI_LOGIN_RESPONSE_HDR_C_MASK 0x1
+#define ISCSI_LOGIN_RESPONSE_HDR_C_SHIFT 6
+#define ISCSI_LOGIN_RESPONSE_HDR_T_MASK 0x1
+#define ISCSI_LOGIN_RESPONSE_HDR_T_SHIFT 7
+ u8 opcode;
+ __le32 hdr_second_dword;
+#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
+ __le32 isid_TABC;
+ __le16 tsih;
+ __le16 isid_d;
+ __le32 itt;
+ __le32 reserved1;
+ __le32 stat_sn;
+ __le32 exp_cmd_sn;
+ __le32 max_cmd_sn;
+ __le16 reserved2;
+ u8 status_detail;
+ u8 status_class;
+ __le32 reserved4[2];
+};
+
+struct iscsi_logout_response_hdr {
+ u8 reserved1;
+ u8 response;
+ u8 flags;
+ u8 opcode;
+ __le32 hdr_second_dword;
+#define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
+ __le32 reserved2[2];
+ __le32 itt;
+ __le32 reserved3;
+ __le32 stat_sn;
+ __le32 exp_cmd_sn;
+ __le32 max_cmd_sn;
+ __le32 reserved4;
+ __le16 time2retain;
+ __le16 time2wait;
+ __le32 reserved5[1];
+};
+
+struct iscsi_text_request_hdr {
+ __le16 reserved0;
+ u8 flags_attr;
+#define ISCSI_TEXT_REQUEST_HDR_RSRV_MASK 0x3F
+#define ISCSI_TEXT_REQUEST_HDR_RSRV_SHIFT 0
+#define ISCSI_TEXT_REQUEST_HDR_C_MASK 0x1
+#define ISCSI_TEXT_REQUEST_HDR_C_SHIFT 6
+#define ISCSI_TEXT_REQUEST_HDR_F_MASK 0x1
+#define ISCSI_TEXT_REQUEST_HDR_F_SHIFT 7
+ u8 opcode;
+ __le32 hdr_second_dword;
+#define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT 24
+ struct regpair lun;
+ __le32 itt;
+ __le32 ttt;
+ __le32 cmd_sn;
+ __le32 exp_stat_sn;
+ __le32 reserved4[4];
+};
+
+struct iscsi_text_response_hdr {
+ __le16 reserved1;
+ u8 flags;
+#define ISCSI_TEXT_RESPONSE_HDR_RSRV_MASK 0x3F
+#define ISCSI_TEXT_RESPONSE_HDR_RSRV_SHIFT 0
+#define ISCSI_TEXT_RESPONSE_HDR_C_MASK 0x1
+#define ISCSI_TEXT_RESPONSE_HDR_C_SHIFT 6
+#define ISCSI_TEXT_RESPONSE_HDR_F_MASK 0x1
+#define ISCSI_TEXT_RESPONSE_HDR_F_SHIFT 7
+ u8 opcode;
+ __le32 hdr_second_dword;
+#define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
+ struct regpair lun;
+ __le32 itt;
+ __le32 ttt;
+ __le32 stat_sn;
+ __le32 exp_cmd_sn;
+ __le32 max_cmd_sn;
+ __le32 reserved4[3];
+};
+
+struct iscsi_tmf_request_hdr {
+ __le16 reserved0;
+ u8 function;
+ u8 opcode;
+ __le32 hdr_second_dword;
+#define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT 24
+ struct regpair lun;
+ __le32 itt;
+ __le32 rtt;
+ __le32 cmd_sn;
+ __le32 exp_stat_sn;
+ __le32 ref_cmd_sn;
+ __le32 exp_data_sn;
+ __le32 reserved4[2];
+};
+
+struct iscsi_tmf_response_hdr {
+ u8 reserved2;
+ u8 hdr_response;
+ u8 hdr_flags;
+ u8 opcode;
+ __le32 hdr_second_dword;
+#define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
+ struct regpair reserved0;
+ __le32 itt;
+ __le32 rtt;
+ __le32 stat_sn;
+ __le32 exp_cmd_sn;
+ __le32 max_cmd_sn;
+ __le32 reserved4[3];
+};
+
+struct iscsi_response_hdr {
+ u8 hdr_status;
+ u8 hdr_response;
+ u8 hdr_flags;
+ u8 opcode;
+ __le32 hdr_second_dword;
+#define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
+ struct regpair lun;
+ __le32 itt;
+ __le32 snack_tag;
+ __le32 stat_sn;
+ __le32 exp_cmd_sn;
+ __le32 max_cmd_sn;
+ __le32 exp_data_sn;
+ __le32 bi_residual_count;
+ __le32 residual_count;
+};
+
+struct iscsi_reject_hdr {
+ u8 reserved4;
+ u8 hdr_reason;
+ u8 hdr_flags;
+ u8 opcode;
+ __le32 hdr_second_dword;
+#define ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_REJECT_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_SHIFT 24
+ struct regpair reserved0;
+ __le32 reserved1;
+ __le32 reserved2;
+ __le32 stat_sn;
+ __le32 exp_cmd_sn;
+ __le32 max_cmd_sn;
+ __le32 data_sn;
+ __le32 reserved3[2];
+};
+
+union iscsi_task_hdr {
+ struct iscsi_common_hdr common;
+ struct data_hdr data;
+ struct iscsi_cmd_hdr cmd;
+ struct iscsi_ext_cdb_cmd_hdr ext_cdb_cmd;
+ struct iscsi_login_req_hdr login_req;
+ struct iscsi_logout_req_hdr logout_req;
+ struct iscsi_data_out_hdr data_out;
+ struct iscsi_data_in_hdr data_in;
+ struct iscsi_r2t_hdr r2t;
+ struct iscsi_nop_out_hdr nop_out;
+ struct iscsi_nop_in_hdr nop_in;
+ struct iscsi_login_response_hdr login_response;
+ struct iscsi_logout_response_hdr logout_response;
+ struct iscsi_text_request_hdr text_request;
+ struct iscsi_text_response_hdr text_response;
+ struct iscsi_tmf_request_hdr tmf_request;
+ struct iscsi_tmf_response_hdr tmf_response;
+ struct iscsi_response_hdr response;
+ struct iscsi_reject_hdr reject;
+ struct iscsi_async_msg_hdr async_msg;
+};
+
+struct iscsi_cqe_common {
+ __le16 conn_id;
+ u8 cqe_type;
+ union cqe_error_status error_bitmap;
+ __le32 reserved[3];
+ union iscsi_task_hdr iscsi_hdr;
+};
+
+struct iscsi_cqe_solicited {
+ __le16 conn_id;
+ u8 cqe_type;
+ union cqe_error_status error_bitmap;
+ __le16 itid;
+ u8 task_type;
+ u8 fw_dbg_field;
+ __le32 reserved1[2];
+ union iscsi_task_hdr iscsi_hdr;
+};
+
+struct iscsi_cqe_unsolicited {
+ __le16 conn_id;
+ u8 cqe_type;
+ union cqe_error_status error_bitmap;
+ __le16 reserved0;
+ u8 reserved1;
+ u8 unsol_cqe_type;
+ struct regpair rqe_opaque;
+ union iscsi_task_hdr iscsi_hdr;
+};
+
+union iscsi_cqe {
+ struct iscsi_cqe_common cqe_common;
+ struct iscsi_cqe_solicited cqe_solicited;
+ struct iscsi_cqe_unsolicited cqe_unsolicited;
+};
+
+enum iscsi_cqes_type {
+ ISCSI_CQE_TYPE_SOLICITED = 1,
+ ISCSI_CQE_TYPE_UNSOLICITED,
+ ISCSI_CQE_TYPE_SOLICITED_WITH_SENSE
+ ,
+ ISCSI_CQE_TYPE_TASK_CLEANUP,
+ ISCSI_CQE_TYPE_DUMMY,
+ MAX_ISCSI_CQES_TYPE
+};
+
+enum iscsi_cqe_unsolicited_type {
+ ISCSI_CQE_UNSOLICITED_NONE,
+ ISCSI_CQE_UNSOLICITED_SINGLE,
+ ISCSI_CQE_UNSOLICITED_FIRST,
+ ISCSI_CQE_UNSOLICITED_MIDDLE,
+ ISCSI_CQE_UNSOLICITED_LAST,
+ MAX_ISCSI_CQE_UNSOLICITED_TYPE
+};
+
+struct iscsi_virt_sgl_ctx {
+ struct regpair sgl_base;
+ struct regpair dsgl_base;
+ __le32 sgl_initial_offset;
+ __le32 dsgl_initial_offset;
+ __le32 dsgl_curr_offset[2];
+};
+
+struct iscsi_sgl_var_params {
+ u8 sgl_ptr;
+ u8 dsgl_ptr;
+ __le16 sge_offset;
+ __le16 dsge_offset;
+};
+
+struct iscsi_phys_sgl_ctx {
+ struct regpair sgl_base;
+ struct regpair dsgl_base;
+ u8 sgl_size;
+ u8 dsgl_size;
+ __le16 reserved;
+ struct iscsi_sgl_var_params var_params[2];
+};
+
+union iscsi_data_desc_ctx {
+ struct iscsi_virt_sgl_ctx virt_sgl;
+ struct iscsi_phys_sgl_ctx phys_sgl;
+ struct iscsi_cached_sge_ctx cached_sge;
+};
+
+struct iscsi_debug_modes {
+ u8 flags;
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_MASK 0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_SHIFT 0
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_MASK 0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_SHIFT 1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_MASK 0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_SHIFT 2
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_MASK 0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_SHIFT 3
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_MASK 0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_SHIFT 4
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_MASK 0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_SHIFT 5
+#define ISCSI_DEBUG_MODES_RESERVED0_MASK 0x3
+#define ISCSI_DEBUG_MODES_RESERVED0_SHIFT 6
+};
+
+struct iscsi_dif_flags {
+ u8 flags;
+#define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK 0xF
+#define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT 0
+#define ISCSI_DIF_FLAGS_DIF_TO_PEER_MASK 0x1
+#define ISCSI_DIF_FLAGS_DIF_TO_PEER_SHIFT 4
+#define ISCSI_DIF_FLAGS_HOST_INTERFACE_MASK 0x7
+#define ISCSI_DIF_FLAGS_HOST_INTERFACE_SHIFT 5
+};
+
+enum iscsi_eqe_opcode {
+ ISCSI_EVENT_TYPE_INIT_FUNC = 0,
+ ISCSI_EVENT_TYPE_DESTROY_FUNC,
+ ISCSI_EVENT_TYPE_OFFLOAD_CONN,
+ ISCSI_EVENT_TYPE_UPDATE_CONN,
+ ISCSI_EVENT_TYPE_CLEAR_SQ,
+ ISCSI_EVENT_TYPE_TERMINATE_CONN,
+ ISCSI_EVENT_TYPE_ASYN_CONNECT_COMPLETE,
+ ISCSI_EVENT_TYPE_ASYN_TERMINATE_DONE,
+ RESERVED8,
+ RESERVED9,
+ ISCSI_EVENT_TYPE_START_OF_ERROR_TYPES = 10,
+ ISCSI_EVENT_TYPE_ASYN_ABORT_RCVD,
+ ISCSI_EVENT_TYPE_ASYN_CLOSE_RCVD,
+ ISCSI_EVENT_TYPE_ASYN_SYN_RCVD,
+ ISCSI_EVENT_TYPE_ASYN_MAX_RT_TIME,
+ ISCSI_EVENT_TYPE_ASYN_MAX_RT_CNT,
+ ISCSI_EVENT_TYPE_ASYN_MAX_KA_PROBES_CNT,
+ ISCSI_EVENT_TYPE_ASYN_FIN_WAIT2,
+ ISCSI_EVENT_TYPE_ISCSI_CONN_ERROR,
+ ISCSI_EVENT_TYPE_TCP_CONN_ERROR,
+ ISCSI_EVENT_TYPE_ASYN_DELETE_OOO_ISLES,
+ MAX_ISCSI_EQE_OPCODE
+};
+
+enum iscsi_error_types {
+ ISCSI_STATUS_NONE = 0,
+ ISCSI_CQE_ERROR_UNSOLICITED_RCV_ON_INVALID_CONN = 1,
+ ISCSI_CONN_ERROR_TASK_CID_MISMATCH,
+ ISCSI_CONN_ERROR_TASK_NOT_VALID,
+ ISCSI_CONN_ERROR_RQ_RING_IS_FULL,
+ ISCSI_CONN_ERROR_CMDQ_RING_IS_FULL,
+ ISCSI_CONN_ERROR_HQE_CACHING_FAILED,
+ ISCSI_CONN_ERROR_HEADER_DIGEST_ERROR,
+ ISCSI_CONN_ERROR_LOCAL_COMPLETION_ERROR,
+ ISCSI_CONN_ERROR_DATA_OVERRUN,
+ ISCSI_CONN_ERROR_OUT_OF_SGES_ERROR,
+ ISCSI_CONN_ERROR_TCP_SEG_PROC_URG_ERROR,
+ ISCSI_CONN_ERROR_TCP_SEG_PROC_IP_OPTIONS_ERROR,
+ ISCSI_CONN_ERROR_TCP_SEG_PROC_CONNECT_INVALID_WS_OPTION,
+ ISCSI_CONN_ERROR_TCP_IP_FRAGMENT_ERROR,
+ ISCSI_CONN_ERROR_PROTOCOL_ERR_AHS_LEN,
+ ISCSI_CONN_ERROR_PROTOCOL_ERR_AHS_TYPE,
+ ISCSI_CONN_ERROR_PROTOCOL_ERR_ITT_OUT_OF_RANGE,
+ ISCSI_CONN_ERROR_PROTOCOL_ERR_TTT_OUT_OF_RANGE,
+ ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SEG_LEN_EXCEEDS_PDU_SIZE,
+ ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_OPCODE,
+ ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_OPCODE_BEFORE_UPDATE,
+ ISCSI_CONN_ERROR_UNVALID_NOPIN_DSL,
+ ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_CARRIES_NO_DATA,
+ ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SN,
+ ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_IN_TTT,
+ ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_OUT_ITT,
+ ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_TTT,
+ ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_BUFFER_OFFSET,
+ ISCSI_CONN_ERROR_PROTOCOL_ERR_BUFFER_OFFSET_OOO,
+ ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_SN,
+ ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0,
+ ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1,
+ ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_2,
+ ISCSI_CONN_ERROR_PROTOCOL_ERR_LUN,
+ ISCSI_CONN_ERROR_PROTOCOL_ERR_F_BIT_ZERO,
+ ISCSI_CONN_ERROR_PROTOCOL_ERR_F_BIT_ZERO_S_BIT_ONE,
+ ISCSI_CONN_ERROR_PROTOCOL_ERR_EXP_STAT_SN,
+ ISCSI_CONN_ERROR_PROTOCOL_ERR_DSL_NOT_ZERO,
+ ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_DSL,
+ ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SEG_LEN_TOO_BIG,
+ ISCSI_CONN_ERROR_PROTOCOL_ERR_OUTSTANDING_R2T_COUNT,
+ ISCSI_CONN_ERROR_PROTOCOL_ERR_DIF_TX,
+ ISCSI_CONN_ERROR_SENSE_DATA_LENGTH,
+ ISCSI_CONN_ERROR_DATA_PLACEMENT_ERROR,
+ ISCSI_ERROR_UNKNOWN,
+ MAX_ISCSI_ERROR_TYPES
+};
+
+struct iscsi_mflags {
+ u8 mflags;
+#define ISCSI_MFLAGS_SLOW_IO_MASK 0x1
+#define ISCSI_MFLAGS_SLOW_IO_SHIFT 0
+#define ISCSI_MFLAGS_SINGLE_SGE_MASK 0x1
+#define ISCSI_MFLAGS_SINGLE_SGE_SHIFT 1
+#define ISCSI_MFLAGS_RESERVED_MASK 0x3F
+#define ISCSI_MFLAGS_RESERVED_SHIFT 2
+};
+
+struct iscsi_sgl {
+ struct regpair sgl_addr;
+ __le16 updated_sge_size;
+ __le16 updated_sge_offset;
+ __le32 byte_offset;
+};
+
+union iscsi_mstorm_sgl {
+ struct iscsi_sgl sgl_struct;
+ struct iscsi_sge single_sge;
+};
+
+enum iscsi_ramrod_cmd_id {
+ ISCSI_RAMROD_CMD_ID_UNUSED = 0,
+ ISCSI_RAMROD_CMD_ID_INIT_FUNC = 1,
+ ISCSI_RAMROD_CMD_ID_DESTROY_FUNC = 2,
+ ISCSI_RAMROD_CMD_ID_OFFLOAD_CONN = 3,
+ ISCSI_RAMROD_CMD_ID_UPDATE_CONN = 4,
+ ISCSI_RAMROD_CMD_ID_TERMINATION_CONN = 5,
+ ISCSI_RAMROD_CMD_ID_CLEAR_SQ = 6,
+ MAX_ISCSI_RAMROD_CMD_ID
+};
+
+struct iscsi_reg1 {
+ __le32 reg1_map;
+#define ISCSI_REG1_NUM_FAST_SGES_MASK 0x7
+#define ISCSI_REG1_NUM_FAST_SGES_SHIFT 0
+#define ISCSI_REG1_RESERVED1_MASK 0x1FFFFFFF
+#define ISCSI_REG1_RESERVED1_SHIFT 3
+};
+
+union iscsi_seq_num {
+ __le16 data_sn;
+ __le16 r2t_sn;
+};
+
+struct iscsi_spe_conn_offload {
+ struct iscsi_slow_path_hdr hdr;
+ __le16 conn_id;
+ __le32 fw_cid;
+ struct iscsi_conn_offload_params iscsi;
+ struct tcp_offload_params tcp;
+};
+
+struct iscsi_spe_conn_offload_option2 {
+ struct iscsi_slow_path_hdr hdr;
+ __le16 conn_id;
+ __le32 fw_cid;
+ struct iscsi_conn_offload_params iscsi;
+ struct tcp_offload_params_opt2 tcp;
+};
+
+struct iscsi_spe_conn_termination {
+ struct iscsi_slow_path_hdr hdr;
+ __le16 conn_id;
+ __le32 fw_cid;
+ u8 abortive;
+ u8 reserved0[7];
+ struct regpair queue_cnts_addr;
+ struct regpair query_params_addr;
+};
+
+struct iscsi_spe_func_dstry {
+ struct iscsi_slow_path_hdr hdr;
+ __le16 reserved0;
+ __le32 reserved1;
+};
+
+struct iscsi_spe_func_init {
+ struct iscsi_slow_path_hdr hdr;
+ __le16 half_way_close_timeout;
+ u8 num_sq_pages_in_ring;
+ u8 num_r2tq_pages_in_ring;
+ u8 num_uhq_pages_in_ring;
+ u8 ll2_rx_queue_id;
+ u8 ooo_enable;
+ struct iscsi_debug_modes debug_mode;
+ __le16 reserved1;
+ __le32 reserved2;
+ __le32 reserved3;
+ __le32 reserved4;
+ struct scsi_init_func_params func_params;
+ struct scsi_init_func_queues q_params;
+};
+
+struct ystorm_iscsi_task_state {
+ union iscsi_data_desc_ctx sgl_ctx_union;
+ __le32 buffer_offset[2];
+ __le16 bytes_nxt_dif;
+ __le16 rxmit_bytes_nxt_dif;
+ union iscsi_seq_num seq_num_union;
+ u8 dif_bytes_leftover;
+ u8 rxmit_dif_bytes_leftover;
+ __le16 reuse_count;
+ struct iscsi_dif_flags dif_flags;
+ u8 local_comp;
+ __le32 exp_r2t_sn;
+ __le32 sgl_offset[2];
+};
+
+struct ystorm_iscsi_task_st_ctx {
+ struct ystorm_iscsi_task_state state;
+ union iscsi_task_hdr pdu_hdr;
+};
+
+struct ystorm_iscsi_task_ag_ctx {
+ u8 reserved;
+ u8 byte1;
+ __le16 word0;
+ u8 flags0;
+#define YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF
+#define YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0
+#define YSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4
+#define YSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
+#define YSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6
+#define YSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7
+ u8 flags1;
+#define YSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3
+#define YSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 0
+#define YSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
+#define YSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2
+#define YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
+#define YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
+#define YSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 6
+#define YSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7
+ u8 flags2;
+#define YSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7
+ u8 byte2;
+ __le32 TTT;
+ u8 byte3;
+ u8 byte4;
+ __le16 word1;
+};
+
+struct mstorm_iscsi_task_ag_ctx {
+ u8 cdu_validation;
+ u8 byte1;
+ __le16 task_cid;
+ u8 flags0;
+#define MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define MSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
+#define MSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6
+#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_SHIFT 7
+ u8 flags1;
+#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_MASK 0x3
+#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_SHIFT 0
+#define MSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
+#define MSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2
+#define MSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3
+#define MSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 4
+#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_SHIFT 6
+#define MSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7
+ u8 flags2;
+#define MSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 0
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7
+ u8 byte2;
+ __le32 reg0;
+ u8 byte3;
+ u8 byte4;
+ __le16 word1;
+};
+
+struct ustorm_iscsi_task_ag_ctx {
+ u8 reserved;
+ u8 state;
+ __le16 icid;
+ u8 flags0;
+#define USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define USTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
+#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_MASK 0x3
+#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_SHIFT 6
+ u8 flags1;
+#define USTORM_ISCSI_TASK_AG_CTX_RESERVED1_MASK 0x3
+#define USTORM_ISCSI_TASK_AG_CTX_RESERVED1_SHIFT 0
+#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_MASK 0x3
+#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_SHIFT 2
+#define USTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3
+#define USTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 4
+#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
+#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
+ u8 flags2;
+#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_SHIFT 0
+#define USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_SHIFT 1
+#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_SHIFT 2
+#define USTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
+#define USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_SHIFT 5
+#define USTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 6
+#define USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_SHIFT 7
+ u8 flags3;
+#define USTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 0
+#define USTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 1
+#define USTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 2
+#define USTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 3
+#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
+#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
+ __le32 dif_err_intervals;
+ __le32 dif_error_1st_interval;
+ __le32 rcv_cont_len;
+ __le32 exp_cont_len;
+ __le32 total_data_acked;
+ __le32 exp_data_acked;
+ u8 next_tid_valid;
+ u8 byte3;
+ __le16 word1;
+ __le16 next_tid;
+ __le16 word3;
+ __le32 hdr_residual_count;
+ __le32 exp_r2t_sn;
+};
+
+struct mstorm_iscsi_task_st_ctx {
+ union iscsi_mstorm_sgl sgl_union;
+ struct iscsi_dif_flags dif_flags;
+ struct iscsi_mflags flags;
+ u8 sgl_size;
+ u8 host_sge_index;
+ __le16 dix_cur_sge_offset;
+ __le16 dix_cur_sge_size;
+ __le32 data_offset_rtid;
+ u8 dif_offset;
+ u8 dix_sgl_size;
+ u8 dix_sge_index;
+ u8 task_type;
+ struct regpair sense_db;
+ struct regpair dix_sgl_cur_sge;
+ __le32 rem_task_size;
+ __le16 reuse_count;
+ __le16 dif_data_residue;
+ u8 reserved0[4];
+ __le32 reserved1[1];
+};
+
+struct ustorm_iscsi_task_st_ctx {
+ __le32 rem_rcv_len;
+ __le32 exp_data_transfer_len;
+ __le32 exp_data_sn;
+ struct regpair lun;
+ struct iscsi_reg1 reg1;
+ u8 flags2;
+#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_MASK 0x1
+#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_SHIFT 0
+#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_MASK 0x7F
+#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_SHIFT 1
+ u8 reserved2;
+ __le16 reserved3;
+ __le32 reserved4;
+ __le32 reserved5;
+ __le32 reserved6;
+ __le32 reserved7;
+ u8 task_type;
+ u8 error_flags;
+#define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_MASK 0x1
+#define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_SHIFT 0
+#define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_MASK 0x1
+#define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_SHIFT 1
+#define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_MASK 0x1
+#define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_SHIFT 2
+#define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_MASK 0x1F
+#define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_SHIFT 3
+ u8 flags;
+#define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_MASK 0x3
+#define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_SHIFT 0
+#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_MASK 0x1
+#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_SHIFT 2
+#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_MASK 0x1
+#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_SHIFT 3
+#define USTORM_ISCSI_TASK_ST_CTX_TOTALDATAACKED_DONE_MASK 0x1
+#define USTORM_ISCSI_TASK_ST_CTX_TOTALDATAACKED_DONE_SHIFT 4
+#define USTORM_ISCSI_TASK_ST_CTX_HQSCANNED_DONE_MASK 0x1
+#define USTORM_ISCSI_TASK_ST_CTX_HQSCANNED_DONE_SHIFT 5
+#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_MASK 0x1
+#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_SHIFT 6
+#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_MASK 0x1
+#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_SHIFT 7
+ u8 cq_rss_number;
+};
+
+struct iscsi_task_context {
+ struct ystorm_iscsi_task_st_ctx ystorm_st_context;
+ struct regpair ystorm_st_padding[2];
+ struct ystorm_iscsi_task_ag_ctx ystorm_ag_context;
+ struct regpair ystorm_ag_padding[2];
+ struct tdif_task_context tdif_context;
+ struct mstorm_iscsi_task_ag_ctx mstorm_ag_context;
+ struct regpair mstorm_ag_padding[2];
+ struct ustorm_iscsi_task_ag_ctx ustorm_ag_context;
+ struct mstorm_iscsi_task_st_ctx mstorm_st_context;
+ struct ustorm_iscsi_task_st_ctx ustorm_st_context;
+ struct rdif_task_context rdif_context;
+};
+
+enum iscsi_task_type {
+ ISCSI_TASK_TYPE_INITIATOR_WRITE,
+ ISCSI_TASK_TYPE_INITIATOR_READ,
+ ISCSI_TASK_TYPE_MIDPATH,
+ ISCSI_TASK_TYPE_UNSOLIC,
+ ISCSI_TASK_TYPE_EXCHCLEANUP,
+ ISCSI_TASK_TYPE_IRRELEVANT,
+ ISCSI_TASK_TYPE_TARGET_WRITE,
+ ISCSI_TASK_TYPE_TARGET_READ,
+ ISCSI_TASK_TYPE_TARGET_RESPONSE,
+ ISCSI_TASK_TYPE_LOGIN_RESPONSE,
+ MAX_ISCSI_TASK_TYPE
+};
+
+union iscsi_ttt_txlen_union {
+ __le32 desired_tx_len;
+ __le32 ttt;
+};
+
+struct iscsi_uhqe {
+ __le32 reg1;
+#define ISCSI_UHQE_PDU_PAYLOAD_LEN_MASK 0xFFFFF
+#define ISCSI_UHQE_PDU_PAYLOAD_LEN_SHIFT 0
+#define ISCSI_UHQE_LOCAL_COMP_MASK 0x1
+#define ISCSI_UHQE_LOCAL_COMP_SHIFT 20
+#define ISCSI_UHQE_TOGGLE_BIT_MASK 0x1
+#define ISCSI_UHQE_TOGGLE_BIT_SHIFT 21
+#define ISCSI_UHQE_PURE_PAYLOAD_MASK 0x1
+#define ISCSI_UHQE_PURE_PAYLOAD_SHIFT 22
+#define ISCSI_UHQE_LOGIN_RESPONSE_PDU_MASK 0x1
+#define ISCSI_UHQE_LOGIN_RESPONSE_PDU_SHIFT 23
+#define ISCSI_UHQE_TASK_ID_HI_MASK 0xFF
+#define ISCSI_UHQE_TASK_ID_HI_SHIFT 24
+ __le32 reg2;
+#define ISCSI_UHQE_BUFFER_OFFSET_MASK 0xFFFFFF
+#define ISCSI_UHQE_BUFFER_OFFSET_SHIFT 0
+#define ISCSI_UHQE_TASK_ID_LO_MASK 0xFF
+#define ISCSI_UHQE_TASK_ID_LO_SHIFT 24
+};
+
+struct iscsi_wqe_field {
+ __le32 contlen_cdbsize_field;
+#define ISCSI_WQE_FIELD_CONT_LEN_MASK 0xFFFFFF
+#define ISCSI_WQE_FIELD_CONT_LEN_SHIFT 0
+#define ISCSI_WQE_FIELD_CDB_SIZE_MASK 0xFF
+#define ISCSI_WQE_FIELD_CDB_SIZE_SHIFT 24
+};
+
+union iscsi_wqe_field_union {
+ struct iscsi_wqe_field cont_field;
+ __le32 prev_tid;
+};
+
+struct iscsi_wqe {
+ __le16 task_id;
+ u8 flags;
+#define ISCSI_WQE_WQE_TYPE_MASK 0x7
+#define ISCSI_WQE_WQE_TYPE_SHIFT 0
+#define ISCSI_WQE_NUM_FAST_SGES_MASK 0x7
+#define ISCSI_WQE_NUM_FAST_SGES_SHIFT 3
+#define ISCSI_WQE_PTU_INVALIDATE_MASK 0x1
+#define ISCSI_WQE_PTU_INVALIDATE_SHIFT 6
+#define ISCSI_WQE_RESPONSE_MASK 0x1
+#define ISCSI_WQE_RESPONSE_SHIFT 7
+ struct iscsi_dif_flags prot_flags;
+ union iscsi_wqe_field_union cont_prevtid_union;
+};
+
+enum iscsi_wqe_type {
+ ISCSI_WQE_TYPE_NORMAL,
+ ISCSI_WQE_TYPE_TASK_CLEANUP,
+ ISCSI_WQE_TYPE_MIDDLE_PATH,
+ ISCSI_WQE_TYPE_LOGIN,
+ ISCSI_WQE_TYPE_FIRST_R2T_CONT,
+ ISCSI_WQE_TYPE_NONFIRST_R2T_CONT,
+ ISCSI_WQE_TYPE_RESPONSE,
+ MAX_ISCSI_WQE_TYPE
+};
+
+struct iscsi_xhqe {
+ union iscsi_ttt_txlen_union ttt_or_txlen;
+ __le32 exp_stat_sn;
+ struct iscsi_dif_flags prot_flags;
+ u8 total_ahs_length;
+ u8 opcode;
+ u8 flags;
+#define ISCSI_XHQE_NUM_FAST_SGES_MASK 0x7
+#define ISCSI_XHQE_NUM_FAST_SGES_SHIFT 0
+#define ISCSI_XHQE_FINAL_MASK 0x1
+#define ISCSI_XHQE_FINAL_SHIFT 3
+#define ISCSI_XHQE_SUPER_IO_MASK 0x1
+#define ISCSI_XHQE_SUPER_IO_SHIFT 4
+#define ISCSI_XHQE_STATUS_BIT_MASK 0x1
+#define ISCSI_XHQE_STATUS_BIT_SHIFT 5
+#define ISCSI_XHQE_RESERVED_MASK 0x3
+#define ISCSI_XHQE_RESERVED_SHIFT 6
+ union iscsi_seq_num seq_num_union;
+ __le16 reserved1;
+};
+
+struct mstorm_iscsi_stats_drv {
+ struct regpair iscsi_rx_dropped_pdus_task_not_valid;
+};
+
+struct ooo_opaque {
+ __le32 cid;
+ u8 drop_isle;
+ u8 drop_size;
+ u8 ooo_opcode;
+ u8 ooo_isle;
+};
+
+struct pstorm_iscsi_stats_drv {
+ struct regpair iscsi_tx_bytes_cnt;
+ struct regpair iscsi_tx_packet_cnt;
+};
+
+struct tstorm_iscsi_stats_drv {
+ struct regpair iscsi_rx_bytes_cnt;
+ struct regpair iscsi_rx_packet_cnt;
+ struct regpair iscsi_rx_new_ooo_isle_events_cnt;
+ __le32 iscsi_cmdq_threshold_cnt;
+ __le32 iscsi_rq_threshold_cnt;
+ __le32 iscsi_immq_threshold_cnt;
+};
+
+struct ustorm_iscsi_stats_drv {
+ struct regpair iscsi_rx_data_pdu_cnt;
+ struct regpair iscsi_rx_r2t_pdu_cnt;
+ struct regpair iscsi_rx_total_pdu_cnt;
+};
+
+struct xstorm_iscsi_stats_drv {
+ struct regpair iscsi_tx_go_to_slow_start_event_cnt;
+ struct regpair iscsi_tx_fast_retransmit_event_cnt;
+};
+
+struct ystorm_iscsi_stats_drv {
+ struct regpair iscsi_tx_data_pdu_cnt;
+ struct regpair iscsi_tx_r2t_pdu_cnt;
+ struct regpair iscsi_tx_total_pdu_cnt;
+};
+
+struct iscsi_db_data {
+ u8 params;
+#define ISCSI_DB_DATA_DEST_MASK 0x3
+#define ISCSI_DB_DATA_DEST_SHIFT 0
+#define ISCSI_DB_DATA_AGG_CMD_MASK 0x3
+#define ISCSI_DB_DATA_AGG_CMD_SHIFT 2
+#define ISCSI_DB_DATA_BYPASS_EN_MASK 0x1
+#define ISCSI_DB_DATA_BYPASS_EN_SHIFT 4
+#define ISCSI_DB_DATA_RESERVED_MASK 0x1
+#define ISCSI_DB_DATA_RESERVED_SHIFT 5
+#define ISCSI_DB_DATA_AGG_VAL_SEL_MASK 0x3
+#define ISCSI_DB_DATA_AGG_VAL_SEL_SHIFT 6
+ u8 agg_flags;
+ __le16 sq_prod;
+};
+
+struct tstorm_iscsi_task_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ __le16 word0;
+ u8 flags0;
+#define TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF
+#define TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT2_SHIFT 6
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7
+ u8 flags1;
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT5_SHIFT 1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 2
+#define TSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 4
+#define TSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 6
+ u8 flags2;
+#define TSTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 0
+#define TSTORM_ISCSI_TASK_AG_CTX_CF4_MASK 0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF4_SHIFT 2
+#define TSTORM_ISCSI_TASK_AG_CTX_CF5_MASK 0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF5_SHIFT 4
+#define TSTORM_ISCSI_TASK_AG_CTX_CF6_MASK 0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF6_SHIFT 6
+ u8 flags3;
+#define TSTORM_ISCSI_TASK_AG_CTX_CF7_MASK 0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF7_SHIFT 0
+#define TSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 2
+#define TSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 4
+#define TSTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 5
+#define TSTORM_ISCSI_TASK_AG_CTX_CF4EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF4EN_SHIFT 6
+#define TSTORM_ISCSI_TASK_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF5EN_SHIFT 7
+ u8 flags4;
+#define TSTORM_ISCSI_TASK_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF6EN_SHIFT 0
+#define TSTORM_ISCSI_TASK_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF7EN_SHIFT 1
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 2
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 3
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 4
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 5
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 6
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 7
+ u8 byte2;
+ __le16 word1;
+ __le32 reg0;
+ u8 byte3;
+ u8 byte4;
+ __le16 word2;
+ __le16 word3;
+ __le16 word4;
+ __le32 reg1;
+ __le32 reg2;
+};
+
+#endif /* __ISCSI_COMMON__ */
diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h
index 5f8fcaa..7e441bd 100644
--- a/include/linux/qed/qed_chain.h
+++ b/include/linux/qed/qed_chain.h
@@ -25,10 +25,9 @@
} while (0)
#define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo))
-#define HILO_DMA(hi, lo) HILO_GEN(hi, lo, dma_addr_t)
#define HILO_64(hi, lo) HILO_GEN((le32_to_cpu(hi)), (le32_to_cpu(lo)), u64)
-#define HILO_DMA_REGPAIR(regpair) (HILO_DMA(regpair.hi, regpair.lo))
#define HILO_64_REGPAIR(regpair) (HILO_64(regpair.hi, regpair.lo))
+#define HILO_DMA_REGPAIR(regpair) ((dma_addr_t)HILO_64_REGPAIR(regpair))
enum qed_chain_mode {
/* Each Page contains a next pointer at its end */
@@ -47,16 +46,56 @@ enum qed_chain_use_mode {
QED_CHAIN_USE_TO_CONSUME_PRODUCE, /* Chain starts empty */
};
+enum qed_chain_cnt_type {
+ /* The chain's size/prod/cons are kept in 16-bit variables */
+ QED_CHAIN_CNT_TYPE_U16,
+
+ /* The chain's size/prod/cons are kept in 32-bit variables */
+ QED_CHAIN_CNT_TYPE_U32,
+};
+
struct qed_chain_next {
struct regpair next_phys;
void *next_virt;
};
+struct qed_chain_pbl_u16 {
+ u16 prod_page_idx;
+ u16 cons_page_idx;
+};
+
+struct qed_chain_pbl_u32 {
+ u32 prod_page_idx;
+ u32 cons_page_idx;
+};
+
struct qed_chain_pbl {
+ /* Base address of a pre-allocated buffer for pbl */
dma_addr_t p_phys_table;
void *p_virt_table;
- u16 prod_page_idx;
- u16 cons_page_idx;
+
+ /* Table for keeping the virtual addresses of the chain pages,
+ * respectively to the physical addresses in the pbl table.
+ */
+ void **pp_virt_addr_tbl;
+
+ /* Index to current used page by producer/consumer */
+ union {
+ struct qed_chain_pbl_u16 pbl16;
+ struct qed_chain_pbl_u32 pbl32;
+ } u;
+};
+
+struct qed_chain_u16 {
+ /* Cyclic index of next element to produce/consme */
+ u16 prod_idx;
+ u16 cons_idx;
+};
+
+struct qed_chain_u32 {
+ /* Cyclic index of next element to produce/consme */
+ u32 prod_idx;
+ u32 cons_idx;
};
struct qed_chain {
@@ -64,13 +103,25 @@ struct qed_chain {
dma_addr_t p_phys_addr;
void *p_prod_elem;
void *p_cons_elem;
- u16 page_cnt;
+
enum qed_chain_mode mode;
enum qed_chain_use_mode intended_use; /* used to produce/consume */
- u16 capacity; /*< number of _usable_ elements */
- u16 size; /* number of elements */
- u16 prod_idx;
- u16 cons_idx;
+ enum qed_chain_cnt_type cnt_type;
+
+ union {
+ struct qed_chain_u16 chain16;
+ struct qed_chain_u32 chain32;
+ } u;
+
+ u32 page_cnt;
+
+ /* Number of elements - capacity is for usable elements only,
+ * while size will contain total number of elements [for entire chain].
+ */
+ u32 capacity;
+ u32 size;
+
+ /* Elements information for fast calculations */
u16 elem_per_page;
u16 elem_per_page_mask;
u16 elem_unusable;
@@ -96,66 +147,69 @@ struct qed_chain {
#define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, mode) \
DIV_ROUND_UP(elem_cnt, USABLE_ELEMS_PER_PAGE(elem_size, mode))
+#define is_chain_u16(p) ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U16)
+#define is_chain_u32(p) ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U32)
+
/* Accessors */
static inline u16 qed_chain_get_prod_idx(struct qed_chain *p_chain)
{
- return p_chain->prod_idx;
+ return p_chain->u.chain16.prod_idx;
}
static inline u16 qed_chain_get_cons_idx(struct qed_chain *p_chain)
{
- return p_chain->cons_idx;
+ return p_chain->u.chain16.cons_idx;
+}
+
+static inline u32 qed_chain_get_cons_idx_u32(struct qed_chain *p_chain)
+{
+ return p_chain->u.chain32.cons_idx;
}
static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain)
{
u16 used;
- /* we don't need to trancate upon assignmet, as we assign u32->u16 */
- used = ((u32)0x10000u + (u32)(p_chain->prod_idx)) -
- (u32)p_chain->cons_idx;
+ used = (u16) (((u32)0x10000 +
+ (u32)p_chain->u.chain16.prod_idx) -
+ (u32)p_chain->u.chain16.cons_idx);
if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
- used -= p_chain->prod_idx / p_chain->elem_per_page -
- p_chain->cons_idx / p_chain->elem_per_page;
+ used -= p_chain->u.chain16.prod_idx / p_chain->elem_per_page -
+ p_chain->u.chain16.cons_idx / p_chain->elem_per_page;
- return p_chain->capacity - used;
+ return (u16)(p_chain->capacity - used);
}
-static inline u8 qed_chain_is_full(struct qed_chain *p_chain)
+static inline u32 qed_chain_get_elem_left_u32(struct qed_chain *p_chain)
{
- return qed_chain_get_elem_left(p_chain) == p_chain->capacity;
-}
+ u32 used;
-static inline u8 qed_chain_is_empty(struct qed_chain *p_chain)
-{
- return qed_chain_get_elem_left(p_chain) == 0;
-}
+ used = (u32) (((u64)0x100000000ULL +
+ (u64)p_chain->u.chain32.prod_idx) -
+ (u64)p_chain->u.chain32.cons_idx);
+ if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
+ used -= p_chain->u.chain32.prod_idx / p_chain->elem_per_page -
+ p_chain->u.chain32.cons_idx / p_chain->elem_per_page;
-static inline u16 qed_chain_get_elem_per_page(
- struct qed_chain *p_chain)
-{
- return p_chain->elem_per_page;
+ return p_chain->capacity - used;
}
-static inline u16 qed_chain_get_usable_per_page(
- struct qed_chain *p_chain)
+static inline u16 qed_chain_get_usable_per_page(struct qed_chain *p_chain)
{
return p_chain->usable_per_page;
}
-static inline u16 qed_chain_get_unusable_per_page(
- struct qed_chain *p_chain)
+static inline u16 qed_chain_get_unusable_per_page(struct qed_chain *p_chain)
{
return p_chain->elem_unusable;
}
-static inline u16 qed_chain_get_size(struct qed_chain *p_chain)
+static inline u32 qed_chain_get_page_cnt(struct qed_chain *p_chain)
{
- return p_chain->size;
+ return p_chain->page_cnt;
}
-static inline dma_addr_t
-qed_chain_get_pbl_phys(struct qed_chain *p_chain)
+static inline dma_addr_t qed_chain_get_pbl_phys(struct qed_chain *p_chain)
{
return p_chain->pbl.p_phys_table;
}
@@ -172,65 +226,63 @@ qed_chain_get_pbl_phys(struct qed_chain *p_chain)
*/
static inline void
qed_chain_advance_page(struct qed_chain *p_chain,
- void **p_next_elem,
- u16 *idx_to_inc,
- u16 *page_to_inc)
+ void **p_next_elem, void *idx_to_inc, void *page_to_inc)
{
+ struct qed_chain_next *p_next = NULL;
+ u32 page_index = 0;
switch (p_chain->mode) {
case QED_CHAIN_MODE_NEXT_PTR:
- {
- struct qed_chain_next *p_next = *p_next_elem;
+ p_next = *p_next_elem;
*p_next_elem = p_next->next_virt;
- *idx_to_inc += p_chain->elem_unusable;
+ if (is_chain_u16(p_chain))
+ *(u16 *)idx_to_inc += p_chain->elem_unusable;
+ else
+ *(u32 *)idx_to_inc += p_chain->elem_unusable;
break;
- }
case QED_CHAIN_MODE_SINGLE:
*p_next_elem = p_chain->p_virt_addr;
break;
case QED_CHAIN_MODE_PBL:
- /* It is assumed pages are sequential, next element needs
- * to change only when passing going back to first from last.
- */
- if (++(*page_to_inc) == p_chain->page_cnt) {
- *page_to_inc = 0;
- *p_next_elem = p_chain->p_virt_addr;
+ if (is_chain_u16(p_chain)) {
+ if (++(*(u16 *)page_to_inc) == p_chain->page_cnt)
+ *(u16 *)page_to_inc = 0;
+ page_index = *(u16 *)page_to_inc;
+ } else {
+ if (++(*(u32 *)page_to_inc) == p_chain->page_cnt)
+ *(u32 *)page_to_inc = 0;
+ page_index = *(u32 *)page_to_inc;
}
+ *p_next_elem = p_chain->pbl.pp_virt_addr_tbl[page_index];
}
}
#define is_unusable_idx(p, idx) \
- (((p)->idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
+ (((p)->u.chain16.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
+
+#define is_unusable_idx_u32(p, idx) \
+ (((p)->u.chain32.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
+#define is_unusable_next_idx(p, idx) \
+ ((((p)->u.chain16.idx + 1) & (p)->elem_per_page_mask) == \
+ (p)->usable_per_page)
-#define is_unusable_next_idx(p, idx) \
- ((((p)->idx + 1) & (p)->elem_per_page_mask) == (p)->usable_per_page)
+#define is_unusable_next_idx_u32(p, idx) \
+ ((((p)->u.chain32.idx + 1) & (p)->elem_per_page_mask) == \
+ (p)->usable_per_page)
-#define test_ans_skip(p, idx) \
+#define test_and_skip(p, idx) \
do { \
- if (is_unusable_idx(p, idx)) { \
- (p)->idx += (p)->elem_unusable; \
+ if (is_chain_u16(p)) { \
+ if (is_unusable_idx(p, idx)) \
+ (p)->u.chain16.idx += (p)->elem_unusable; \
+ } else { \
+ if (is_unusable_idx_u32(p, idx)) \
+ (p)->u.chain32.idx += (p)->elem_unusable; \
} \
} while (0)
/**
- * @brief qed_chain_return_multi_produced -
- *
- * A chain in which the driver "Produces" elements should use this API
- * to indicate previous produced elements are now consumed.
- *
- * @param p_chain
- * @param num
- */
-static inline void
-qed_chain_return_multi_produced(struct qed_chain *p_chain,
- u16 num)
-{
- p_chain->cons_idx += num;
- test_ans_skip(p_chain, cons_idx);
-}
-
-/**
* @brief qed_chain_return_produced -
*
* A chain in which the driver "Produces" elements should use this API
@@ -240,8 +292,11 @@ qed_chain_return_multi_produced(struct qed_chain *p_chain,
*/
static inline void qed_chain_return_produced(struct qed_chain *p_chain)
{
- p_chain->cons_idx++;
- test_ans_skip(p_chain, cons_idx);
+ if (is_chain_u16(p_chain))
+ p_chain->u.chain16.cons_idx++;
+ else
+ p_chain->u.chain32.cons_idx++;
+ test_and_skip(p_chain, cons_idx);
}
/**
@@ -257,21 +312,33 @@ static inline void qed_chain_return_produced(struct qed_chain *p_chain)
*/
static inline void *qed_chain_produce(struct qed_chain *p_chain)
{
- void *ret = NULL;
-
- if ((p_chain->prod_idx & p_chain->elem_per_page_mask) ==
- p_chain->next_page_mask) {
- qed_chain_advance_page(p_chain, &p_chain->p_prod_elem,
- &p_chain->prod_idx,
- &p_chain->pbl.prod_page_idx);
+ void *p_ret = NULL, *p_prod_idx, *p_prod_page_idx;
+
+ if (is_chain_u16(p_chain)) {
+ if ((p_chain->u.chain16.prod_idx &
+ p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
+ p_prod_idx = &p_chain->u.chain16.prod_idx;
+ p_prod_page_idx = &p_chain->pbl.u.pbl16.prod_page_idx;
+ qed_chain_advance_page(p_chain, &p_chain->p_prod_elem,
+ p_prod_idx, p_prod_page_idx);
+ }
+ p_chain->u.chain16.prod_idx++;
+ } else {
+ if ((p_chain->u.chain32.prod_idx &
+ p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
+ p_prod_idx = &p_chain->u.chain32.prod_idx;
+ p_prod_page_idx = &p_chain->pbl.u.pbl32.prod_page_idx;
+ qed_chain_advance_page(p_chain, &p_chain->p_prod_elem,
+ p_prod_idx, p_prod_page_idx);
+ }
+ p_chain->u.chain32.prod_idx++;
}
- ret = p_chain->p_prod_elem;
- p_chain->prod_idx++;
+ p_ret = p_chain->p_prod_elem;
p_chain->p_prod_elem = (void *)(((u8 *)p_chain->p_prod_elem) +
p_chain->elem_size);
- return ret;
+ return p_ret;
}
/**
@@ -282,9 +349,9 @@ static inline void *qed_chain_produce(struct qed_chain *p_chain)
* @param p_chain
* @param num
*
- * @return u16, number of unusable BDs
+ * @return number of unusable BDs
*/
-static inline u16 qed_chain_get_capacity(struct qed_chain *p_chain)
+static inline u32 qed_chain_get_capacity(struct qed_chain *p_chain)
{
return p_chain->capacity;
}
@@ -297,11 +364,13 @@ static inline u16 qed_chain_get_capacity(struct qed_chain *p_chain)
*
* @param p_chain
*/
-static inline void
-qed_chain_recycle_consumed(struct qed_chain *p_chain)
+static inline void qed_chain_recycle_consumed(struct qed_chain *p_chain)
{
- test_ans_skip(p_chain, prod_idx);
- p_chain->prod_idx++;
+ test_and_skip(p_chain, prod_idx);
+ if (is_chain_u16(p_chain))
+ p_chain->u.chain16.prod_idx++;
+ else
+ p_chain->u.chain32.prod_idx++;
}
/**
@@ -316,21 +385,33 @@ qed_chain_recycle_consumed(struct qed_chain *p_chain)
*/
static inline void *qed_chain_consume(struct qed_chain *p_chain)
{
- void *ret = NULL;
-
- if ((p_chain->cons_idx & p_chain->elem_per_page_mask) ==
- p_chain->next_page_mask) {
+ void *p_ret = NULL, *p_cons_idx, *p_cons_page_idx;
+
+ if (is_chain_u16(p_chain)) {
+ if ((p_chain->u.chain16.cons_idx &
+ p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
+ p_cons_idx = &p_chain->u.chain16.cons_idx;
+ p_cons_page_idx = &p_chain->pbl.u.pbl16.cons_page_idx;
+ qed_chain_advance_page(p_chain, &p_chain->p_cons_elem,
+ p_cons_idx, p_cons_page_idx);
+ }
+ p_chain->u.chain16.cons_idx++;
+ } else {
+ if ((p_chain->u.chain32.cons_idx &
+ p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
+ p_cons_idx = &p_chain->u.chain32.cons_idx;
+ p_cons_page_idx = &p_chain->pbl.u.pbl32.cons_page_idx;
qed_chain_advance_page(p_chain, &p_chain->p_cons_elem,
- &p_chain->cons_idx,
- &p_chain->pbl.cons_page_idx);
+ p_cons_idx, p_cons_page_idx);
+ }
+ p_chain->u.chain32.cons_idx++;
}
- ret = p_chain->p_cons_elem;
- p_chain->cons_idx++;
+ p_ret = p_chain->p_cons_elem;
p_chain->p_cons_elem = (void *)(((u8 *)p_chain->p_cons_elem) +
p_chain->elem_size);
- return ret;
+ return p_ret;
}
/**
@@ -340,16 +421,33 @@ static inline void *qed_chain_consume(struct qed_chain *p_chain)
*/
static inline void qed_chain_reset(struct qed_chain *p_chain)
{
- int i;
-
- p_chain->prod_idx = 0;
- p_chain->cons_idx = 0;
- p_chain->p_cons_elem = p_chain->p_virt_addr;
- p_chain->p_prod_elem = p_chain->p_virt_addr;
+ u32 i;
+
+ if (is_chain_u16(p_chain)) {
+ p_chain->u.chain16.prod_idx = 0;
+ p_chain->u.chain16.cons_idx = 0;
+ } else {
+ p_chain->u.chain32.prod_idx = 0;
+ p_chain->u.chain32.cons_idx = 0;
+ }
+ p_chain->p_cons_elem = p_chain->p_virt_addr;
+ p_chain->p_prod_elem = p_chain->p_virt_addr;
if (p_chain->mode == QED_CHAIN_MODE_PBL) {
- p_chain->pbl.prod_page_idx = p_chain->page_cnt - 1;
- p_chain->pbl.cons_page_idx = p_chain->page_cnt - 1;
+ /* Use (page_cnt - 1) as a reset value for the prod/cons page's
+ * indices, to avoid unnecessary page advancing on the first
+ * call to qed_chain_produce/consume. Instead, the indices
+ * will be advanced to page_cnt and then will be wrapped to 0.
+ */
+ u32 reset_val = p_chain->page_cnt - 1;
+
+ if (is_chain_u16(p_chain)) {
+ p_chain->pbl.u.pbl16.prod_page_idx = (u16)reset_val;
+ p_chain->pbl.u.pbl16.cons_page_idx = (u16)reset_val;
+ } else {
+ p_chain->pbl.u.pbl32.prod_page_idx = reset_val;
+ p_chain->pbl.u.pbl32.cons_page_idx = reset_val;
+ }
}
switch (p_chain->intended_use) {
@@ -377,168 +475,184 @@ static inline void qed_chain_reset(struct qed_chain *p_chain)
* @param intended_use
* @param mode
*/
-static inline void qed_chain_init(struct qed_chain *p_chain,
- void *p_virt_addr,
- dma_addr_t p_phys_addr,
- u16 page_cnt,
- u8 elem_size,
- enum qed_chain_use_mode intended_use,
- enum qed_chain_mode mode)
+static inline void qed_chain_init_params(struct qed_chain *p_chain,
+ u32 page_cnt,
+ u8 elem_size,
+ enum qed_chain_use_mode intended_use,
+ enum qed_chain_mode mode,
+ enum qed_chain_cnt_type cnt_type)
{
/* chain fixed parameters */
- p_chain->p_virt_addr = p_virt_addr;
- p_chain->p_phys_addr = p_phys_addr;
+ p_chain->p_virt_addr = NULL;
+ p_chain->p_phys_addr = 0;
p_chain->elem_size = elem_size;
- p_chain->page_cnt = page_cnt;
+ p_chain->intended_use = intended_use;
p_chain->mode = mode;
+ p_chain->cnt_type = cnt_type;
- p_chain->intended_use = intended_use;
p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size);
- p_chain->usable_per_page =
- USABLE_ELEMS_PER_PAGE(elem_size, mode);
- p_chain->capacity = p_chain->usable_per_page * page_cnt;
- p_chain->size = p_chain->elem_per_page * page_cnt;
+ p_chain->usable_per_page = USABLE_ELEMS_PER_PAGE(elem_size, mode);
p_chain->elem_per_page_mask = p_chain->elem_per_page - 1;
-
p_chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode);
-
p_chain->next_page_mask = (p_chain->usable_per_page &
p_chain->elem_per_page_mask);
- if (mode == QED_CHAIN_MODE_NEXT_PTR) {
- struct qed_chain_next *p_next;
- u16 i;
-
- for (i = 0; i < page_cnt - 1; i++) {
- /* Increment mem_phy to the next page. */
- p_phys_addr += QED_CHAIN_PAGE_SIZE;
-
- /* Initialize the physical address of the next page. */
- p_next = (struct qed_chain_next *)((u8 *)p_virt_addr +
- elem_size *
- p_chain->
- usable_per_page);
-
- p_next->next_phys.lo = DMA_LO_LE(p_phys_addr);
- p_next->next_phys.hi = DMA_HI_LE(p_phys_addr);
-
- /* Initialize the virtual address of the next page. */
- p_next->next_virt = (void *)((u8 *)p_virt_addr +
- QED_CHAIN_PAGE_SIZE);
-
- /* Move to the next page. */
- p_virt_addr = p_next->next_virt;
- }
-
- /* Last page's next should point to beginning of the chain */
- p_next = (struct qed_chain_next *)((u8 *)p_virt_addr +
- elem_size *
- p_chain->usable_per_page);
+ p_chain->page_cnt = page_cnt;
+ p_chain->capacity = p_chain->usable_per_page * page_cnt;
+ p_chain->size = p_chain->elem_per_page * page_cnt;
- p_next->next_phys.lo = DMA_LO_LE(p_chain->p_phys_addr);
- p_next->next_phys.hi = DMA_HI_LE(p_chain->p_phys_addr);
- p_next->next_virt = p_chain->p_virt_addr;
- }
- qed_chain_reset(p_chain);
+ p_chain->pbl.p_phys_table = 0;
+ p_chain->pbl.p_virt_table = NULL;
+ p_chain->pbl.pp_virt_addr_tbl = NULL;
}
/**
- * @brief qed_chain_pbl_init - Initalizes a basic pbl chain
- * struct
+ * @brief qed_chain_init_mem -
+ *
+ * Initalizes a basic chain struct with its chain buffers
+ *
* @param p_chain
* @param p_virt_addr virtual address of allocated buffer's beginning
* @param p_phys_addr physical address of allocated buffer's beginning
- * @param page_cnt number of pages in the allocated buffer
- * @param elem_size size of each element in the chain
- * @param use_mode
- * @param p_phys_pbl pointer to a pre-allocated side table
- * which will hold physical page addresses.
- * @param p_virt_pbl pointer to a pre allocated side table
- * which will hold virtual page addresses.
+ *
*/
-static inline void
-qed_chain_pbl_init(struct qed_chain *p_chain,
- void *p_virt_addr,
- dma_addr_t p_phys_addr,
- u16 page_cnt,
- u8 elem_size,
- enum qed_chain_use_mode use_mode,
- dma_addr_t p_phys_pbl,
- dma_addr_t *p_virt_pbl)
+static inline void qed_chain_init_mem(struct qed_chain *p_chain,
+ void *p_virt_addr, dma_addr_t p_phys_addr)
{
- dma_addr_t *p_pbl_dma = p_virt_pbl;
- int i;
-
- qed_chain_init(p_chain, p_virt_addr, p_phys_addr, page_cnt,
- elem_size, use_mode, QED_CHAIN_MODE_PBL);
+ p_chain->p_virt_addr = p_virt_addr;
+ p_chain->p_phys_addr = p_phys_addr;
+}
+/**
+ * @brief qed_chain_init_pbl_mem -
+ *
+ * Initalizes a basic chain struct with its pbl buffers
+ *
+ * @param p_chain
+ * @param p_virt_pbl pointer to a pre allocated side table which will hold
+ * virtual page addresses.
+ * @param p_phys_pbl pointer to a pre-allocated side table which will hold
+ * physical page addresses.
+ * @param pp_virt_addr_tbl
+ * pointer to a pre-allocated side table which will hold
+ * the virtual addresses of the chain pages.
+ *
+ */
+static inline void qed_chain_init_pbl_mem(struct qed_chain *p_chain,
+ void *p_virt_pbl,
+ dma_addr_t p_phys_pbl,
+ void **pp_virt_addr_tbl)
+{
p_chain->pbl.p_phys_table = p_phys_pbl;
p_chain->pbl.p_virt_table = p_virt_pbl;
-
- /* Fill the PBL with physical addresses*/
- for (i = 0; i < page_cnt; i++) {
- *p_pbl_dma = p_phys_addr;
- p_phys_addr += QED_CHAIN_PAGE_SIZE;
- p_pbl_dma++;
- }
+ p_chain->pbl.pp_virt_addr_tbl = pp_virt_addr_tbl;
}
/**
- * @brief qed_chain_set_prod - sets the prod to the given
- * value
+ * @brief qed_chain_init_next_ptr_elem -
+ *
+ * Initalizes a next pointer element
+ *
+ * @param p_chain
+ * @param p_virt_curr virtual address of a chain page of which the next
+ * pointer element is initialized
+ * @param p_virt_next virtual address of the next chain page
+ * @param p_phys_next physical address of the next chain page
*
- * @param prod_idx
- * @param p_prod_elem
*/
-static inline void qed_chain_set_prod(struct qed_chain *p_chain,
- u16 prod_idx,
- void *p_prod_elem)
+static inline void
+qed_chain_init_next_ptr_elem(struct qed_chain *p_chain,
+ void *p_virt_curr,
+ void *p_virt_next, dma_addr_t p_phys_next)
{
- p_chain->prod_idx = prod_idx;
- p_chain->p_prod_elem = p_prod_elem;
+ struct qed_chain_next *p_next;
+ u32 size;
+
+ size = p_chain->elem_size * p_chain->usable_per_page;
+ p_next = (struct qed_chain_next *)((u8 *)p_virt_curr + size);
+
+ DMA_REGPAIR_LE(p_next->next_phys, p_phys_next);
+
+ p_next->next_virt = p_virt_next;
}
/**
- * @brief qed_chain_get_elem -
+ * @brief qed_chain_get_last_elem -
*
- * get a pointer to an element represented by absolute idx
+ * Returns a pointer to the last element of the chain
*
* @param p_chain
- * @assumption p_chain->size is a power of 2
*
- * @return void*, a pointer to next element
+ * @return void*
*/
-static inline void *qed_chain_sge_get_elem(struct qed_chain *p_chain,
- u16 idx)
+static inline void *qed_chain_get_last_elem(struct qed_chain *p_chain)
{
- void *ret = NULL;
-
- if (idx >= p_chain->size)
- return NULL;
+ struct qed_chain_next *p_next = NULL;
+ void *p_virt_addr = NULL;
+ u32 size, last_page_idx;
- ret = (u8 *)p_chain->p_virt_addr + p_chain->elem_size * idx;
+ if (!p_chain->p_virt_addr)
+ goto out;
- return ret;
+ switch (p_chain->mode) {
+ case QED_CHAIN_MODE_NEXT_PTR:
+ size = p_chain->elem_size * p_chain->usable_per_page;
+ p_virt_addr = p_chain->p_virt_addr;
+ p_next = (struct qed_chain_next *)((u8 *)p_virt_addr + size);
+ while (p_next->next_virt != p_chain->p_virt_addr) {
+ p_virt_addr = p_next->next_virt;
+ p_next = (struct qed_chain_next *)((u8 *)p_virt_addr +
+ size);
+ }
+ break;
+ case QED_CHAIN_MODE_SINGLE:
+ p_virt_addr = p_chain->p_virt_addr;
+ break;
+ case QED_CHAIN_MODE_PBL:
+ last_page_idx = p_chain->page_cnt - 1;
+ p_virt_addr = p_chain->pbl.pp_virt_addr_tbl[last_page_idx];
+ break;
+ }
+ /* p_virt_addr points at this stage to the last page of the chain */
+ size = p_chain->elem_size * (p_chain->usable_per_page - 1);
+ p_virt_addr = (u8 *)p_virt_addr + size;
+out:
+ return p_virt_addr;
}
/**
- * @brief qed_chain_sge_inc_cons_prod
+ * @brief qed_chain_set_prod - sets the prod to the given value
*
- * for sge chains, producer isn't increased serially, the ring
- * is expected to be full at all times. Once elements are
- * consumed, they are immediately produced.
+ * @param prod_idx
+ * @param p_prod_elem
+ */
+static inline void qed_chain_set_prod(struct qed_chain *p_chain,
+ u32 prod_idx, void *p_prod_elem)
+{
+ if (is_chain_u16(p_chain))
+ p_chain->u.chain16.prod_idx = (u16) prod_idx;
+ else
+ p_chain->u.chain32.prod_idx = prod_idx;
+ p_chain->p_prod_elem = p_prod_elem;
+}
+
+/**
+ * @brief qed_chain_pbl_zero_mem - set chain memory to 0
*
* @param p_chain
- * @param cnt
- *
- * @return inline void
*/
-static inline void
-qed_chain_sge_inc_cons_prod(struct qed_chain *p_chain,
- u16 cnt)
+static inline void qed_chain_pbl_zero_mem(struct qed_chain *p_chain)
{
- p_chain->prod_idx += cnt;
- p_chain->cons_idx += cnt;
+ u32 i, page_cnt;
+
+ if (p_chain->mode != QED_CHAIN_MODE_PBL)
+ return;
+
+ page_cnt = qed_chain_get_page_cnt(p_chain);
+
+ for (i = 0; i < page_cnt; i++)
+ memset(p_chain->pbl.pp_virt_addr_tbl[i], 0,
+ QED_CHAIN_PAGE_SIZE);
}
#endif
diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h
index 6ae8cb4..71d523b 100644
--- a/include/linux/qed/qed_eth_if.h
+++ b/include/linux/qed/qed_eth_if.h
@@ -113,6 +113,7 @@ struct qed_queue_start_common_params {
u8 vport_id;
u16 sb;
u16 sb_idx;
+ u16 vf_qid;
};
struct qed_tunn_params {
@@ -127,11 +128,73 @@ struct qed_eth_cb_ops {
void (*force_mac) (void *dev, u8 *mac);
};
+#ifdef CONFIG_DCB
+/* Prototype declaration of qed_eth_dcbnl_ops should match with the declaration
+ * of dcbnl_rtnl_ops structure.
+ */
+struct qed_eth_dcbnl_ops {
+ /* IEEE 802.1Qaz std */
+ int (*ieee_getpfc)(struct qed_dev *cdev, struct ieee_pfc *pfc);
+ int (*ieee_setpfc)(struct qed_dev *cdev, struct ieee_pfc *pfc);
+ int (*ieee_getets)(struct qed_dev *cdev, struct ieee_ets *ets);
+ int (*ieee_setets)(struct qed_dev *cdev, struct ieee_ets *ets);
+ int (*ieee_peer_getets)(struct qed_dev *cdev, struct ieee_ets *ets);
+ int (*ieee_peer_getpfc)(struct qed_dev *cdev, struct ieee_pfc *pfc);
+ int (*ieee_getapp)(struct qed_dev *cdev, struct dcb_app *app);
+ int (*ieee_setapp)(struct qed_dev *cdev, struct dcb_app *app);
+
+ /* CEE std */
+ u8 (*getstate)(struct qed_dev *cdev);
+ u8 (*setstate)(struct qed_dev *cdev, u8 state);
+ void (*getpgtccfgtx)(struct qed_dev *cdev, int prio, u8 *prio_type,
+ u8 *pgid, u8 *bw_pct, u8 *up_map);
+ void (*getpgbwgcfgtx)(struct qed_dev *cdev, int pgid, u8 *bw_pct);
+ void (*getpgtccfgrx)(struct qed_dev *cdev, int prio, u8 *prio_type,
+ u8 *pgid, u8 *bw_pct, u8 *up_map);
+ void (*getpgbwgcfgrx)(struct qed_dev *cdev, int pgid, u8 *bw_pct);
+ void (*getpfccfg)(struct qed_dev *cdev, int prio, u8 *setting);
+ void (*setpfccfg)(struct qed_dev *cdev, int prio, u8 setting);
+ u8 (*getcap)(struct qed_dev *cdev, int capid, u8 *cap);
+ int (*getnumtcs)(struct qed_dev *cdev, int tcid, u8 *num);
+ u8 (*getpfcstate)(struct qed_dev *cdev);
+ int (*getapp)(struct qed_dev *cdev, u8 idtype, u16 id);
+ u8 (*getfeatcfg)(struct qed_dev *cdev, int featid, u8 *flags);
+
+ /* DCBX configuration */
+ u8 (*getdcbx)(struct qed_dev *cdev);
+ void (*setpgtccfgtx)(struct qed_dev *cdev, int prio,
+ u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map);
+ void (*setpgtccfgrx)(struct qed_dev *cdev, int prio,
+ u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map);
+ void (*setpgbwgcfgtx)(struct qed_dev *cdev, int pgid, u8 bw_pct);
+ void (*setpgbwgcfgrx)(struct qed_dev *cdev, int pgid, u8 bw_pct);
+ u8 (*setall)(struct qed_dev *cdev);
+ int (*setnumtcs)(struct qed_dev *cdev, int tcid, u8 num);
+ void (*setpfcstate)(struct qed_dev *cdev, u8 state);
+ int (*setapp)(struct qed_dev *cdev, u8 idtype, u16 idval, u8 up);
+ u8 (*setdcbx)(struct qed_dev *cdev, u8 state);
+ u8 (*setfeatcfg)(struct qed_dev *cdev, int featid, u8 flags);
+
+ /* Peer apps */
+ int (*peer_getappinfo)(struct qed_dev *cdev,
+ struct dcb_peer_app_info *info,
+ u16 *app_count);
+ int (*peer_getapptable)(struct qed_dev *cdev, struct dcb_app *table);
+
+ /* CEE peer */
+ int (*cee_peer_getpfc)(struct qed_dev *cdev, struct cee_pfc *pfc);
+ int (*cee_peer_getpg)(struct qed_dev *cdev, struct cee_pg *pg);
+};
+#endif
+
struct qed_eth_ops {
const struct qed_common_ops *common;
#ifdef CONFIG_QED_SRIOV
const struct qed_iov_hv_ops *iov;
#endif
+#ifdef CONFIG_DCB
+ const struct qed_eth_dcbnl_ops *dcb;
+#endif
int (*fill_dev_info)(struct qed_dev *cdev,
struct qed_dev_eth_info *info);
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index 4c29439..e1d5122 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -34,6 +34,96 @@ enum dcbx_protocol_type {
DCBX_MAX_PROTOCOL_TYPE
};
+#ifdef CONFIG_DCB
+#define QED_LLDP_CHASSIS_ID_STAT_LEN 4
+#define QED_LLDP_PORT_ID_STAT_LEN 4
+#define QED_DCBX_MAX_APP_PROTOCOL 32
+#define QED_MAX_PFC_PRIORITIES 8
+#define QED_DCBX_DSCP_SIZE 64
+
+struct qed_dcbx_lldp_remote {
+ u32 peer_chassis_id[QED_LLDP_CHASSIS_ID_STAT_LEN];
+ u32 peer_port_id[QED_LLDP_PORT_ID_STAT_LEN];
+ bool enable_rx;
+ bool enable_tx;
+ u32 tx_interval;
+ u32 max_credit;
+};
+
+struct qed_dcbx_lldp_local {
+ u32 local_chassis_id[QED_LLDP_CHASSIS_ID_STAT_LEN];
+ u32 local_port_id[QED_LLDP_PORT_ID_STAT_LEN];
+};
+
+struct qed_dcbx_app_prio {
+ u8 roce;
+ u8 roce_v2;
+ u8 fcoe;
+ u8 iscsi;
+ u8 eth;
+};
+
+struct qed_dbcx_pfc_params {
+ bool willing;
+ bool enabled;
+ u8 prio[QED_MAX_PFC_PRIORITIES];
+ u8 max_tc;
+};
+
+struct qed_app_entry {
+ bool ethtype;
+ bool enabled;
+ u8 prio;
+ u16 proto_id;
+ enum dcbx_protocol_type proto_type;
+};
+
+struct qed_dcbx_params {
+ struct qed_app_entry app_entry[QED_DCBX_MAX_APP_PROTOCOL];
+ u16 num_app_entries;
+ bool app_willing;
+ bool app_valid;
+ bool app_error;
+ bool ets_willing;
+ bool ets_enabled;
+ bool ets_cbs;
+ bool valid;
+ u8 ets_pri_tc_tbl[QED_MAX_PFC_PRIORITIES];
+ u8 ets_tc_bw_tbl[QED_MAX_PFC_PRIORITIES];
+ u8 ets_tc_tsa_tbl[QED_MAX_PFC_PRIORITIES];
+ struct qed_dbcx_pfc_params pfc;
+ u8 max_ets_tc;
+};
+
+struct qed_dcbx_admin_params {
+ struct qed_dcbx_params params;
+ bool valid;
+};
+
+struct qed_dcbx_remote_params {
+ struct qed_dcbx_params params;
+ bool valid;
+};
+
+struct qed_dcbx_operational_params {
+ struct qed_dcbx_app_prio app_prio;
+ struct qed_dcbx_params params;
+ bool valid;
+ bool enabled;
+ bool ieee;
+ bool cee;
+ u32 err;
+};
+
+struct qed_dcbx_get {
+ struct qed_dcbx_operational_params operational;
+ struct qed_dcbx_lldp_remote lldp_remote;
+ struct qed_dcbx_lldp_local lldp_local;
+ struct qed_dcbx_remote_params remote;
+ struct qed_dcbx_admin_params local;
+};
+#endif
+
enum qed_led_mode {
QED_LED_MODE_OFF,
QED_LED_MODE_ON,
@@ -58,8 +148,70 @@ struct qed_eth_pf_params {
u16 num_cons;
};
+/* Most of the the parameters below are described in the FW iSCSI / TCP HSI */
+struct qed_iscsi_pf_params {
+ u64 glbl_q_params_addr;
+ u64 bdq_pbl_base_addr[2];
+ u32 max_cwnd;
+ u16 cq_num_entries;
+ u16 cmdq_num_entries;
+ u16 dup_ack_threshold;
+ u16 tx_sws_timer;
+ u16 min_rto;
+ u16 min_rto_rt;
+ u16 max_rto;
+
+ /* The following parameters are used during HW-init
+ * and these parameters need to be passed as arguments
+ * to update_pf_params routine invoked before slowpath start
+ */
+ u16 num_cons;
+ u16 num_tasks;
+
+ /* The following parameters are used during protocol-init */
+ u16 half_way_close_timeout;
+ u16 bdq_xoff_threshold[2];
+ u16 bdq_xon_threshold[2];
+ u16 cmdq_xoff_threshold;
+ u16 cmdq_xon_threshold;
+ u16 rq_buffer_size;
+
+ u8 num_sq_pages_in_ring;
+ u8 num_r2tq_pages_in_ring;
+ u8 num_uhq_pages_in_ring;
+ u8 num_queues;
+ u8 log_page_size;
+ u8 rqe_log_size;
+ u8 max_fin_rt;
+ u8 gl_rq_pi;
+ u8 gl_cmd_pi;
+ u8 debug_mode;
+ u8 ll2_ooo_queue_id;
+ u8 ooo_enable;
+
+ u8 is_target;
+ u8 bdq_pbl_num_entries[2];
+};
+
+struct qed_rdma_pf_params {
+ /* Supplied to QED during resource allocation (may affect the ILT and
+ * the doorbell BAR).
+ */
+ u32 min_dpis; /* number of requested DPIs */
+ u32 num_mrs; /* number of requested memory regions */
+ u32 num_qps; /* number of requested Queue Pairs */
+ u32 num_srqs; /* number of requested SRQ */
+ u8 roce_edpm_mode; /* see QED_ROCE_EDPM_MODE_ENABLE */
+ u8 gl_pi; /* protocol index */
+
+ /* Will allocate rate limiters to be used with QPs */
+ u8 enable_dcqcn;
+};
+
struct qed_pf_params {
struct qed_eth_pf_params eth_pf_params;
+ struct qed_iscsi_pf_params iscsi_pf_params;
+ struct qed_rdma_pf_params rdma_pf_params;
};
enum qed_int_mode {
@@ -100,6 +252,8 @@ struct qed_dev_info {
/* MFW version */
u32 mfw_rev;
+ bool rdma_supported;
+
u32 flash_size;
u8 mf_mode;
bool tx_switching;
@@ -111,6 +265,7 @@ enum qed_sb_type {
enum qed_protocol {
QED_PROTOCOL_ETH,
+ QED_PROTOCOL_ISCSI,
};
struct qed_link_params {
@@ -325,7 +480,8 @@ struct qed_common_ops {
int (*chain_alloc)(struct qed_dev *cdev,
enum qed_chain_use_mode intended_use,
enum qed_chain_mode mode,
- u16 num_elems,
+ enum qed_chain_cnt_type cnt_type,
+ u32 num_elems,
size_t elem_size,
struct qed_chain *p_chain);
diff --git a/include/linux/qed/rdma_common.h b/include/linux/qed/rdma_common.h
new file mode 100644
index 0000000..187991c
--- /dev/null
+++ b/include/linux/qed/rdma_common.h
@@ -0,0 +1,44 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef __RDMA_COMMON__
+#define __RDMA_COMMON__
+/************************/
+/* RDMA FW CONSTANTS */
+/************************/
+
+#define RDMA_RESERVED_LKEY (0)
+#define RDMA_RING_PAGE_SIZE (0x1000)
+
+#define RDMA_MAX_SGE_PER_SQ_WQE (4)
+#define RDMA_MAX_SGE_PER_RQ_WQE (4)
+
+#define RDMA_MAX_DATA_SIZE_IN_WQE (0x7FFFFFFF)
+
+#define RDMA_REQ_RD_ATOMIC_ELM_SIZE (0x50)
+#define RDMA_RESP_RD_ATOMIC_ELM_SIZE (0x20)
+
+#define RDMA_MAX_CQS (64 * 1024)
+#define RDMA_MAX_TIDS (128 * 1024 - 1)
+#define RDMA_MAX_PDS (64 * 1024)
+
+#define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
+
+#define RDMA_TASK_TYPE (PROTOCOLID_ROCE)
+
+struct rdma_srq_id {
+ __le16 srq_idx;
+ __le16 opaque_fid;
+};
+
+struct rdma_srq_producers {
+ __le32 sge_prod;
+ __le32 wqe_prod;
+};
+
+#endif /* __RDMA_COMMON__ */
diff --git a/include/linux/qed/roce_common.h b/include/linux/qed/roce_common.h
new file mode 100644
index 0000000..2eeaf3d
--- /dev/null
+++ b/include/linux/qed/roce_common.h
@@ -0,0 +1,17 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef __ROCE_COMMON__
+#define __ROCE_COMMON__
+
+#define ROCE_REQ_MAX_INLINE_DATA_SIZE (256)
+#define ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE (288)
+
+#define ROCE_MAX_QPS (32 * 1024)
+
+#endif /* __ROCE_COMMON__ */
diff --git a/include/linux/qed/storage_common.h b/include/linux/qed/storage_common.h
new file mode 100644
index 0000000..3b8e1ef
--- /dev/null
+++ b/include/linux/qed/storage_common.h
@@ -0,0 +1,91 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef __STORAGE_COMMON__
+#define __STORAGE_COMMON__
+
+#define NUM_OF_CMDQS_CQS (NUM_OF_GLOBAL_QUEUES / 2)
+#define BDQ_NUM_RESOURCES (4)
+
+#define BDQ_ID_RQ (0)
+#define BDQ_ID_IMM_DATA (1)
+#define BDQ_NUM_IDS (2)
+
+#define BDQ_MAX_EXTERNAL_RING_SIZE (1 << 15)
+
+struct scsi_bd {
+ struct regpair address;
+ struct regpair opaque;
+};
+
+struct scsi_bdq_ram_drv_data {
+ __le16 external_producer;
+ __le16 reserved0[3];
+};
+
+struct scsi_drv_cmdq {
+ __le16 cmdq_cons;
+ __le16 reserved0;
+ __le32 reserved1;
+};
+
+struct scsi_init_func_params {
+ __le16 num_tasks;
+ u8 log_page_size;
+ u8 debug_mode;
+ u8 reserved2[12];
+};
+
+struct scsi_init_func_queues {
+ struct regpair glbl_q_params_addr;
+ __le16 rq_buffer_size;
+ __le16 cq_num_entries;
+ __le16 cmdq_num_entries;
+ u8 bdq_resource_id;
+ u8 q_validity;
+#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_MASK 0x1
+#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_SHIFT 0
+#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_MASK 0x1
+#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_SHIFT 1
+#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_MASK 0x1
+#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_SHIFT 2
+#define SCSI_INIT_FUNC_QUEUES_RESERVED_VALID_MASK 0x1F
+#define SCSI_INIT_FUNC_QUEUES_RESERVED_VALID_SHIFT 3
+ u8 num_queues;
+ u8 queue_relative_offset;
+ u8 cq_sb_pi;
+ u8 cmdq_sb_pi;
+ __le16 cq_cmdq_sb_num_arr[NUM_OF_CMDQS_CQS];
+ __le16 reserved0;
+ u8 bdq_pbl_num_entries[BDQ_NUM_IDS];
+ struct regpair bdq_pbl_base_address[BDQ_NUM_IDS];
+ __le16 bdq_xoff_threshold[BDQ_NUM_IDS];
+ __le16 bdq_xon_threshold[BDQ_NUM_IDS];
+ __le16 cmdq_xoff_threshold;
+ __le16 cmdq_xon_threshold;
+ __le32 reserved1;
+};
+
+struct scsi_ram_per_bdq_resource_drv_data {
+ struct scsi_bdq_ram_drv_data drv_data_per_bdq_id[BDQ_NUM_IDS];
+};
+
+struct scsi_sge {
+ struct regpair sge_addr;
+ __le16 sge_len;
+ __le16 reserved0;
+ __le32 reserved1;
+};
+
+struct scsi_terminate_extra_params {
+ __le16 unsolicited_cq_count;
+ __le16 cmdq_count;
+ u8 reserved[4];
+};
+
+#endif /* __STORAGE_COMMON__ */
diff --git a/include/linux/qed/tcp_common.h b/include/linux/qed/tcp_common.h
new file mode 100644
index 0000000..accba0e
--- /dev/null
+++ b/include/linux/qed/tcp_common.h
@@ -0,0 +1,226 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef __TCP_COMMON__
+#define __TCP_COMMON__
+
+#define TCP_INVALID_TIMEOUT_VAL -1
+
+enum tcp_connect_mode {
+ TCP_CONNECT_ACTIVE,
+ TCP_CONNECT_PASSIVE,
+ MAX_TCP_CONNECT_MODE
+};
+
+struct tcp_init_params {
+ __le32 max_cwnd;
+ __le16 dup_ack_threshold;
+ __le16 tx_sws_timer;
+ __le16 min_rto;
+ __le16 min_rto_rt;
+ __le16 max_rto;
+ u8 maxfinrt;
+ u8 reserved[1];
+};
+
+enum tcp_ip_version {
+ TCP_IPV4,
+ TCP_IPV6,
+ MAX_TCP_IP_VERSION
+};
+
+struct tcp_offload_params {
+ __le16 local_mac_addr_lo;
+ __le16 local_mac_addr_mid;
+ __le16 local_mac_addr_hi;
+ __le16 remote_mac_addr_lo;
+ __le16 remote_mac_addr_mid;
+ __le16 remote_mac_addr_hi;
+ __le16 vlan_id;
+ u8 flags;
+#define TCP_OFFLOAD_PARAMS_TS_EN_MASK 0x1
+#define TCP_OFFLOAD_PARAMS_TS_EN_SHIFT 0
+#define TCP_OFFLOAD_PARAMS_DA_EN_MASK 0x1
+#define TCP_OFFLOAD_PARAMS_DA_EN_SHIFT 1
+#define TCP_OFFLOAD_PARAMS_KA_EN_MASK 0x1
+#define TCP_OFFLOAD_PARAMS_KA_EN_SHIFT 2
+#define TCP_OFFLOAD_PARAMS_NAGLE_EN_MASK 0x1
+#define TCP_OFFLOAD_PARAMS_NAGLE_EN_SHIFT 3
+#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_MASK 0x1
+#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_SHIFT 4
+#define TCP_OFFLOAD_PARAMS_FIN_SENT_MASK 0x1
+#define TCP_OFFLOAD_PARAMS_FIN_SENT_SHIFT 5
+#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_MASK 0x1
+#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_SHIFT 6
+#define TCP_OFFLOAD_PARAMS_RESERVED0_MASK 0x1
+#define TCP_OFFLOAD_PARAMS_RESERVED0_SHIFT 7
+ u8 ip_version;
+ __le32 remote_ip[4];
+ __le32 local_ip[4];
+ __le32 flow_label;
+ u8 ttl;
+ u8 tos_or_tc;
+ __le16 remote_port;
+ __le16 local_port;
+ __le16 mss;
+ u8 rcv_wnd_scale;
+ u8 connect_mode;
+ __le16 srtt;
+ __le32 cwnd;
+ __le32 ss_thresh;
+ __le16 reserved1;
+ u8 ka_max_probe_cnt;
+ u8 dup_ack_theshold;
+ __le32 rcv_next;
+ __le32 snd_una;
+ __le32 snd_next;
+ __le32 snd_max;
+ __le32 snd_wnd;
+ __le32 rcv_wnd;
+ __le32 snd_wl1;
+ __le32 ts_time;
+ __le32 ts_recent;
+ __le32 ts_recent_age;
+ __le32 total_rt;
+ __le32 ka_timeout_delta;
+ __le32 rt_timeout_delta;
+ u8 dup_ack_cnt;
+ u8 snd_wnd_probe_cnt;
+ u8 ka_probe_cnt;
+ u8 rt_cnt;
+ __le16 rtt_var;
+ __le16 reserved2;
+ __le32 ka_timeout;
+ __le32 ka_interval;
+ __le32 max_rt_time;
+ __le32 initial_rcv_wnd;
+ u8 snd_wnd_scale;
+ u8 ack_frequency;
+ __le16 da_timeout_value;
+ __le32 ts_ticks_per_second;
+};
+
+struct tcp_offload_params_opt2 {
+ __le16 local_mac_addr_lo;
+ __le16 local_mac_addr_mid;
+ __le16 local_mac_addr_hi;
+ __le16 remote_mac_addr_lo;
+ __le16 remote_mac_addr_mid;
+ __le16 remote_mac_addr_hi;
+ __le16 vlan_id;
+ u8 flags;
+#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_MASK 0x1
+#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_SHIFT 0
+#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_MASK 0x1
+#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_SHIFT 1
+#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_MASK 0x1
+#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_SHIFT 2
+#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_MASK 0x1F
+#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_SHIFT 3
+ u8 ip_version;
+ __le32 remote_ip[4];
+ __le32 local_ip[4];
+ __le32 flow_label;
+ u8 ttl;
+ u8 tos_or_tc;
+ __le16 remote_port;
+ __le16 local_port;
+ __le16 mss;
+ u8 rcv_wnd_scale;
+ u8 connect_mode;
+ __le16 syn_ip_payload_length;
+ __le32 syn_phy_addr_lo;
+ __le32 syn_phy_addr_hi;
+ __le32 reserved1[22];
+};
+
+enum tcp_seg_placement_event {
+ TCP_EVENT_ADD_PEN,
+ TCP_EVENT_ADD_NEW_ISLE,
+ TCP_EVENT_ADD_ISLE_RIGHT,
+ TCP_EVENT_ADD_ISLE_LEFT,
+ TCP_EVENT_JOIN,
+ TCP_EVENT_NOP,
+ MAX_TCP_SEG_PLACEMENT_EVENT
+};
+
+struct tcp_update_params {
+ __le16 flags;
+#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_MASK 0x1
+#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_SHIFT 0
+#define TCP_UPDATE_PARAMS_MSS_CHANGED_MASK 0x1
+#define TCP_UPDATE_PARAMS_MSS_CHANGED_SHIFT 1
+#define TCP_UPDATE_PARAMS_TTL_CHANGED_MASK 0x1
+#define TCP_UPDATE_PARAMS_TTL_CHANGED_SHIFT 2
+#define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_MASK 0x1
+#define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_SHIFT 3
+#define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_MASK 0x1
+#define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_SHIFT 4
+#define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_MASK 0x1
+#define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_SHIFT 5
+#define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_MASK 0x1
+#define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_SHIFT 6
+#define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_MASK 0x1
+#define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_SHIFT 7
+#define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_MASK 0x1
+#define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_SHIFT 8
+#define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_MASK 0x1
+#define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_SHIFT 9
+#define TCP_UPDATE_PARAMS_KA_EN_CHANGED_MASK 0x1
+#define TCP_UPDATE_PARAMS_KA_EN_CHANGED_SHIFT 10
+#define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_MASK 0x1
+#define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_SHIFT 11
+#define TCP_UPDATE_PARAMS_KA_EN_MASK 0x1
+#define TCP_UPDATE_PARAMS_KA_EN_SHIFT 12
+#define TCP_UPDATE_PARAMS_NAGLE_EN_MASK 0x1
+#define TCP_UPDATE_PARAMS_NAGLE_EN_SHIFT 13
+#define TCP_UPDATE_PARAMS_KA_RESTART_MASK 0x1
+#define TCP_UPDATE_PARAMS_KA_RESTART_SHIFT 14
+#define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_MASK 0x1
+#define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_SHIFT 15
+ __le16 remote_mac_addr_lo;
+ __le16 remote_mac_addr_mid;
+ __le16 remote_mac_addr_hi;
+ __le16 mss;
+ u8 ttl;
+ u8 tos_or_tc;
+ __le32 ka_timeout;
+ __le32 ka_interval;
+ __le32 max_rt_time;
+ __le32 flow_label;
+ __le32 initial_rcv_wnd;
+ u8 ka_max_probe_cnt;
+ u8 reserved1[7];
+};
+
+struct tcp_upload_params {
+ __le32 rcv_next;
+ __le32 snd_una;
+ __le32 snd_next;
+ __le32 snd_max;
+ __le32 snd_wnd;
+ __le32 rcv_wnd;
+ __le32 snd_wl1;
+ __le32 cwnd;
+ __le32 ss_thresh;
+ __le16 srtt;
+ __le16 rtt_var;
+ __le32 ts_time;
+ __le32 ts_recent;
+ __le32 ts_recent_age;
+ __le32 total_rt;
+ __le32 ka_timeout_delta;
+ __le32 rt_timeout_delta;
+ u8 dup_ack_cnt;
+ u8 snd_wnd_probe_cnt;
+ u8 ka_probe_cnt;
+ u8 rt_cnt;
+ __le32 reserved;
+};
+
+#endif /* __TCP_COMMON__ */
diff --git a/include/linux/rxrpc.h b/include/linux/rxrpc.h
index a53915c..1e8f216 100644
--- a/include/linux/rxrpc.h
+++ b/include/linux/rxrpc.h
@@ -40,16 +40,18 @@ struct sockaddr_rxrpc {
/*
* RxRPC control messages
+ * - If neither abort or accept are specified, the message is a data message.
* - terminal messages mean that a user call ID tag can be recycled
+ * - s/r/- indicate whether these are applicable to sendmsg() and/or recvmsg()
*/
-#define RXRPC_USER_CALL_ID 1 /* user call ID specifier */
-#define RXRPC_ABORT 2 /* abort request / notification [terminal] */
-#define RXRPC_ACK 3 /* [Server] RPC op final ACK received [terminal] */
-#define RXRPC_NET_ERROR 5 /* network error received [terminal] */
-#define RXRPC_BUSY 6 /* server busy received [terminal] */
-#define RXRPC_LOCAL_ERROR 7 /* local error generated [terminal] */
-#define RXRPC_NEW_CALL 8 /* [Server] new incoming call notification */
-#define RXRPC_ACCEPT 9 /* [Server] accept request */
+#define RXRPC_USER_CALL_ID 1 /* sr: user call ID specifier */
+#define RXRPC_ABORT 2 /* sr: abort request / notification [terminal] */
+#define RXRPC_ACK 3 /* -r: [Service] RPC op final ACK received [terminal] */
+#define RXRPC_NET_ERROR 5 /* -r: network error received [terminal] */
+#define RXRPC_BUSY 6 /* -r: server busy received [terminal] */
+#define RXRPC_LOCAL_ERROR 7 /* -r: local error generated [terminal] */
+#define RXRPC_NEW_CALL 8 /* -r: [Service] new incoming call notification */
+#define RXRPC_ACCEPT 9 /* s-: [Service] accept request */
/*
* RxRPC security levels
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index ee38a41..dc0fca7 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -301,6 +301,11 @@ struct sk_buff;
#endif
extern int sysctl_max_skb_frags;
+/* Set skb_shinfo(skb)->gso_size to this in case you want skb_segment to
+ * segment using its current segmentation instead.
+ */
+#define GSO_BY_FRAGS 0xFFFF
+
typedef struct skb_frag_struct skb_frag_t;
struct skb_frag_struct {
@@ -482,6 +487,8 @@ enum {
SKB_GSO_PARTIAL = 1 << 13,
SKB_GSO_TUNNEL_REMCSUM = 1 << 14,
+
+ SKB_GSO_SCTP = 1 << 15,
};
#if BITS_PER_LONG > 32
@@ -2987,6 +2994,7 @@ void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
void skb_scrub_packet(struct sk_buff *skb, bool xnet);
unsigned int skb_gso_transport_seglen(const struct sk_buff *skb);
+bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu);
struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
int skb_ensure_writable(struct sk_buff *skb, int write_len);
diff --git a/include/net/act_api.h b/include/net/act_api.h
index 9a9a8ed..db218a1 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -2,8 +2,8 @@
#define __NET_ACT_API_H
/*
- * Public police action API for classifiers/qdiscs
- */
+ * Public action API for classifiers/qdiscs
+*/
#include <net/sch_generic.h>
#include <net/pkt_sched.h>
@@ -76,6 +76,16 @@ static inline void tcf_lastuse_update(struct tcf_t *tm)
if (tm->lastuse != now)
tm->lastuse = now;
+ if (unlikely(!tm->firstuse))
+ tm->firstuse = now;
+}
+
+static inline void tcf_tm_dump(struct tcf_t *dtm, const struct tcf_t *stm)
+{
+ dtm->install = jiffies_to_clock_t(jiffies - stm->install);
+ dtm->lastuse = jiffies_to_clock_t(jiffies - stm->lastuse);
+ dtm->firstuse = jiffies_to_clock_t(jiffies - stm->firstuse);
+ dtm->expires = jiffies_to_clock_t(stm->expires);
}
struct tc_action {
@@ -97,7 +107,8 @@ struct tc_action_ops {
char kind[IFNAMSIZ];
__u32 type; /* TBD to match kind */
struct module *owner;
- int (*act)(struct sk_buff *, const struct tc_action *, struct tcf_result *);
+ int (*act)(struct sk_buff *, const struct tc_action *,
+ struct tcf_result *);
int (*dump)(struct sk_buff *, struct tc_action *, int, int);
void (*cleanup)(struct tc_action *, int bind);
int (*lookup)(struct net *, struct tc_action *, u32);
@@ -115,8 +126,8 @@ struct tc_action_net {
};
static inline
-int tc_action_net_init(struct tc_action_net *tn, const struct tc_action_ops *ops,
- unsigned int mask)
+int tc_action_net_init(struct tc_action_net *tn,
+ const struct tc_action_ops *ops, unsigned int mask)
{
int err = 0;
@@ -159,7 +170,8 @@ static inline int tcf_hash_release(struct tc_action *a, bool bind)
}
int tcf_register_action(struct tc_action_ops *a, struct pernet_operations *ops);
-int tcf_unregister_action(struct tc_action_ops *a, struct pernet_operations *ops);
+int tcf_unregister_action(struct tc_action_ops *a,
+ struct pernet_operations *ops);
int tcf_action_destroy(struct list_head *actions, int bind);
int tcf_action_exec(struct sk_buff *skb, const struct list_head *actions,
struct tcf_result *res);
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 17c3d37..20b3087 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -26,6 +26,7 @@ enum dsa_tag_protocol {
DSA_TAG_PROTO_TRAILER,
DSA_TAG_PROTO_EDSA,
DSA_TAG_PROTO_BRCM,
+ DSA_TAG_LAST, /* MUST BE LAST */
};
#define DSA_MAX_SWITCHES 4
@@ -58,12 +59,11 @@ struct dsa_chip_data {
struct device_node *port_dn[DSA_MAX_PORTS];
/*
- * An array (with nr_chips elements) of which element [a]
- * indicates which port on this switch should be used to
- * send packets to that are destined for switch a. Can be
- * NULL if there is only one switch chip.
+ * An array of which element [a] indicates which port on this
+ * switch should be used to send packets to that are destined
+ * for switch a. Can be NULL if there is only one switch chip.
*/
- s8 *rtable;
+ s8 rtable[DSA_MAX_SWITCHES];
};
struct dsa_platform_data {
@@ -85,6 +85,17 @@ struct dsa_platform_data {
struct packet_type;
struct dsa_switch_tree {
+ struct list_head list;
+
+ /* Tree identifier */
+ u32 tree;
+
+ /* Number of switches attached to this tree */
+ struct kref refcount;
+
+ /* Has this tree been applied to the hardware? */
+ bool applied;
+
/*
* Configuration data for the platform device that owns
* this dsa switch tree instance.
@@ -100,12 +111,12 @@ struct dsa_switch_tree {
struct net_device *dev,
struct packet_type *pt,
struct net_device *orig_dev);
- enum dsa_tag_protocol tag_protocol;
/*
* Original copy of the master netdev ethtool_ops
*/
struct ethtool_ops master_ethtool_ops;
+ const struct ethtool_ops *master_orig_ethtool_ops;
/*
* The switch and port to which the CPU is attached.
@@ -117,6 +128,17 @@ struct dsa_switch_tree {
* Data for the individual switch chips.
*/
struct dsa_switch *ds[DSA_MAX_SWITCHES];
+
+ /*
+ * Tagging protocol operations for adding and removing an
+ * encapsulation tag.
+ */
+ const struct dsa_device_ops *tag_ops;
+};
+
+struct dsa_port {
+ struct net_device *netdev;
+ struct device_node *dn;
};
struct dsa_switch {
@@ -144,6 +166,13 @@ struct dsa_switch {
*/
struct dsa_switch_driver *drv;
+ /*
+ * An array of which element [a] indicates which port on this
+ * switch should be used to send packets to that are destined
+ * for switch a. Can be NULL if there is only one switch chip.
+ */
+ s8 rtable[DSA_MAX_SWITCHES];
+
#ifdef CONFIG_NET_DSA_HWMON
/*
* Hardware monitoring information
@@ -153,13 +182,19 @@ struct dsa_switch {
#endif
/*
+ * The lower device this switch uses to talk to the host
+ */
+ struct net_device *master_netdev;
+
+ /*
* Slave mii_bus and devices for the individual ports.
*/
u32 dsa_port_mask;
+ u32 cpu_port_mask;
u32 enabled_port_mask;
u32 phys_mii_mask;
+ struct dsa_port ports[DSA_MAX_PORTS];
struct mii_bus *slave_mii_bus;
- struct net_device *ports[DSA_MAX_PORTS];
};
static inline bool dsa_is_cpu_port(struct dsa_switch *ds, int p)
@@ -174,7 +209,7 @@ static inline bool dsa_is_dsa_port(struct dsa_switch *ds, int p)
static inline bool dsa_is_port_initialized(struct dsa_switch *ds, int p)
{
- return ds->enabled_port_mask & (1 << p) && ds->ports[p];
+ return ds->enabled_port_mask & (1 << p) && ds->ports[p].netdev;
}
static inline u8 dsa_upstream_port(struct dsa_switch *ds)
@@ -190,7 +225,7 @@ static inline u8 dsa_upstream_port(struct dsa_switch *ds)
if (dst->cpu_switch == ds->index)
return dst->cpu_port;
else
- return ds->cd->rtable[dst->cpu_switch];
+ return ds->rtable[dst->cpu_switch];
}
struct switchdev_trans;
@@ -344,4 +379,7 @@ static inline bool dsa_uses_tagged_protocol(struct dsa_switch_tree *dst)
{
return dst->rcv != NULL;
}
+
+void dsa_unregister_switch(struct dsa_switch *ds);
+int dsa_register_switch(struct dsa_switch *ds, struct device_node *np);
#endif
diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h
index 59160de..456e4a6 100644
--- a/include/net/fib_rules.h
+++ b/include/net/fib_rules.h
@@ -17,7 +17,8 @@ struct fib_rule {
u32 flags;
u32 table;
u8 action;
- /* 3 bytes hole, try to use */
+ u8 l3mdev;
+ /* 2 bytes hole, try to use */
u32 target;
__be64 tun_id;
struct fib_rule __rcu *ctarget;
@@ -36,6 +37,7 @@ struct fib_lookup_arg {
void *lookup_ptr;
void *result;
struct fib_rule *rule;
+ u32 table;
int flags;
#define FIB_LOOKUP_NOREF 1
#define FIB_LOOKUP_IGNORE_LINKSTATE 2
@@ -89,7 +91,8 @@ struct fib_rules_ops {
[FRA_TABLE] = { .type = NLA_U32 }, \
[FRA_SUPPRESS_PREFIXLEN] = { .type = NLA_U32 }, \
[FRA_SUPPRESS_IFGROUP] = { .type = NLA_U32 }, \
- [FRA_GOTO] = { .type = NLA_U32 }
+ [FRA_GOTO] = { .type = NLA_U32 }, \
+ [FRA_L3MDEV] = { .type = NLA_U8 }
static inline void fib_rule_get(struct fib_rule *rule)
{
@@ -102,6 +105,20 @@ static inline void fib_rule_put(struct fib_rule *rule)
kfree_rcu(rule, rcu);
}
+#ifdef CONFIG_NET_L3_MASTER_DEV
+static inline u32 fib_rule_get_table(struct fib_rule *rule,
+ struct fib_lookup_arg *arg)
+{
+ return rule->l3mdev ? arg->table : rule->table;
+}
+#else
+static inline u32 fib_rule_get_table(struct fib_rule *rule,
+ struct fib_lookup_arg *arg)
+{
+ return rule->table;
+}
+#endif
+
static inline u32 frh_get_table(struct fib_rule_hdr *frh, struct nlattr **nla)
{
if (nla[FRA_TABLE])
@@ -117,4 +134,7 @@ int fib_rules_lookup(struct fib_rules_ops *, struct flowi *, int flags,
struct fib_lookup_arg *);
int fib_default_rule_add(struct fib_rules_ops *, u32 pref, u32 table,
u32 flags);
+
+int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh);
+int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh);
#endif
diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h
index 610cd39..231e121 100644
--- a/include/net/gen_stats.h
+++ b/include/net/gen_stats.h
@@ -33,10 +33,12 @@ int gnet_stats_start_copy_compat(struct sk_buff *skb, int type,
spinlock_t *lock, struct gnet_dump *d,
int padattr);
-int gnet_stats_copy_basic(struct gnet_dump *d,
+int gnet_stats_copy_basic(const seqcount_t *running,
+ struct gnet_dump *d,
struct gnet_stats_basic_cpu __percpu *cpu,
struct gnet_stats_basic_packed *b);
-void __gnet_stats_copy_basic(struct gnet_stats_basic_packed *bstats,
+void __gnet_stats_copy_basic(const seqcount_t *running,
+ struct gnet_stats_basic_packed *bstats,
struct gnet_stats_basic_cpu __percpu *cpu,
struct gnet_stats_basic_packed *b);
int gnet_stats_copy_rate_est(struct gnet_dump *d,
@@ -52,13 +54,15 @@ int gnet_stats_finish_copy(struct gnet_dump *d);
int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_basic_cpu __percpu *cpu_bstats,
struct gnet_stats_rate_est64 *rate_est,
- spinlock_t *stats_lock, struct nlattr *opt);
+ spinlock_t *stats_lock,
+ seqcount_t *running, struct nlattr *opt);
void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_rate_est64 *rate_est);
int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_basic_cpu __percpu *cpu_bstats,
struct gnet_stats_rate_est64 *rate_est,
- spinlock_t *stats_lock, struct nlattr *opt);
+ spinlock_t *stats_lock,
+ seqcount_t *running, struct nlattr *opt);
bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
const struct gnet_stats_rate_est64 *rate_est);
#endif
diff --git a/include/net/l3mdev.h b/include/net/l3mdev.h
index 374388d..34f33eb 100644
--- a/include/net/l3mdev.h
+++ b/include/net/l3mdev.h
@@ -11,6 +11,8 @@
#ifndef _NET_L3MDEV_H_
#define _NET_L3MDEV_H_
+#include <net/fib_rules.h>
+
/**
* struct l3mdev_ops - l3mdev operations
*
@@ -41,6 +43,9 @@ struct l3mdev_ops {
#ifdef CONFIG_NET_L3_MASTER_DEV
+int l3mdev_fib_rule_match(struct net *net, struct flowi *fl,
+ struct fib_lookup_arg *arg);
+
int l3mdev_master_ifindex_rcu(const struct net_device *dev);
static inline int l3mdev_master_ifindex(struct net_device *dev)
{
@@ -236,6 +241,13 @@ struct sk_buff *l3mdev_ip6_rcv(struct sk_buff *skb)
{
return skb;
}
+
+static inline
+int l3mdev_fib_rule_match(struct net *net, struct flowi *fl,
+ struct fib_lookup_arg *arg)
+{
+ return 1;
+}
#endif
#endif /* _NET_L3MDEV_H_ */
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 62d5531..9f35819 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -29,13 +29,6 @@ enum qdisc_state_t {
__QDISC_STATE_THROTTLED,
};
-/*
- * following bits are only changed while qdisc lock is held
- */
-enum qdisc___state_t {
- __QDISC___STATE_RUNNING = 1,
-};
-
struct qdisc_size_table {
struct rcu_head rcu;
struct list_head list;
@@ -70,30 +63,23 @@ struct Qdisc {
struct list_head list;
u32 handle;
u32 parent;
- int (*reshape_fail)(struct sk_buff *skb,
- struct Qdisc *q);
-
void *u32_node;
- /* This field is deprecated, but it is still used by CBQ
- * and it will live until better solution will be invented.
- */
- struct Qdisc *__parent;
struct netdev_queue *dev_queue;
struct gnet_stats_rate_est64 rate_est;
struct gnet_stats_basic_cpu __percpu *cpu_bstats;
struct gnet_stats_queue __percpu *cpu_qstats;
- struct Qdisc *next_sched;
- struct sk_buff *gso_skb;
/*
* For performance sake on SMP, we put highly modified fields at the end
*/
+ struct Qdisc *next_sched ____cacheline_aligned_in_smp;
+ struct sk_buff *gso_skb;
unsigned long state;
struct sk_buff_head q;
struct gnet_stats_basic_packed bstats;
- unsigned int __state;
+ seqcount_t running;
struct gnet_stats_queue qstats;
struct rcu_head rcu_head;
int padded;
@@ -104,20 +90,24 @@ struct Qdisc {
static inline bool qdisc_is_running(const struct Qdisc *qdisc)
{
- return (qdisc->__state & __QDISC___STATE_RUNNING) ? true : false;
+ return (raw_read_seqcount(&qdisc->running) & 1) ? true : false;
}
static inline bool qdisc_run_begin(struct Qdisc *qdisc)
{
if (qdisc_is_running(qdisc))
return false;
- qdisc->__state |= __QDISC___STATE_RUNNING;
+ /* Variant of write_seqcount_begin() telling lockdep a trylock
+ * was attempted.
+ */
+ raw_write_seqcount_begin(&qdisc->running);
+ seqcount_acquire(&qdisc->running.dep_map, 0, 1, _RET_IP_);
return true;
}
static inline void qdisc_run_end(struct Qdisc *qdisc)
{
- qdisc->__state &= ~__QDISC___STATE_RUNNING;
+ write_seqcount_end(&qdisc->running);
}
static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
@@ -189,7 +179,6 @@ struct Qdisc_ops {
int (*enqueue)(struct sk_buff *, struct Qdisc *);
struct sk_buff * (*dequeue)(struct Qdisc *);
struct sk_buff * (*peek)(struct Qdisc *);
- unsigned int (*drop)(struct Qdisc *);
int (*init)(struct Qdisc *, struct nlattr *arg);
void (*reset)(struct Qdisc *);
@@ -322,6 +311,14 @@ static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
return qdisc_lock(root);
}
+static inline seqcount_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc)
+{
+ struct Qdisc *root = qdisc_root_sleeping(qdisc);
+
+ ASSERT_RTNL();
+ return &root->running;
+}
+
static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc)
{
return qdisc->dev_queue->dev;
@@ -665,22 +662,6 @@ static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch)
return __qdisc_queue_drop_head(sch, &sch->q);
}
-static inline struct sk_buff *__qdisc_dequeue_tail(struct Qdisc *sch,
- struct sk_buff_head *list)
-{
- struct sk_buff *skb = __skb_dequeue_tail(list);
-
- if (likely(skb != NULL))
- qdisc_qstats_backlog_dec(sch, skb);
-
- return skb;
-}
-
-static inline struct sk_buff *qdisc_dequeue_tail(struct Qdisc *sch)
-{
- return __qdisc_dequeue_tail(sch, &sch->q);
-}
-
static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
{
return skb_peek(&sch->q);
@@ -751,25 +732,6 @@ static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
return old;
}
-static inline unsigned int __qdisc_queue_drop(struct Qdisc *sch,
- struct sk_buff_head *list)
-{
- struct sk_buff *skb = __qdisc_dequeue_tail(sch, list);
-
- if (likely(skb != NULL)) {
- unsigned int len = qdisc_pkt_len(skb);
- kfree_skb(skb);
- return len;
- }
-
- return 0;
-}
-
-static inline unsigned int qdisc_queue_drop(struct Qdisc *sch)
-{
- return __qdisc_queue_drop(sch, &sch->q);
-}
-
static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
{
kfree_skb(skb);
@@ -778,22 +740,6 @@ static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
return NET_XMIT_DROP;
}
-static inline int qdisc_reshape_fail(struct sk_buff *skb, struct Qdisc *sch)
-{
- qdisc_qstats_drop(sch);
-
-#ifdef CONFIG_NET_CLS_ACT
- if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch))
- goto drop;
-
- return NET_XMIT_SUCCESS;
-
-drop:
-#endif
- kfree_skb(skb);
- return NET_XMIT_DROP;
-}
-
/* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how
long it will take to send a packet given its size.
*/
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index b392ac8..632e205 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -186,6 +186,10 @@ void sctp_assocs_proc_exit(struct net *net);
int sctp_remaddr_proc_init(struct net *net);
void sctp_remaddr_proc_exit(struct net *net);
+/*
+ * sctp/offload.c
+ */
+int sctp_offload_init(void);
/*
* Module global variables
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 16b013a..83c5ec5 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -566,6 +566,9 @@ struct sctp_chunk {
/* This points to the sk_buff containing the actual data. */
struct sk_buff *skb;
+ /* In case of GSO packets, this will store the head one */
+ struct sk_buff *head_skb;
+
/* These are the SCTP headers by reverse order in a packet.
* Note that some of these may happen more than once. In that
* case, we point at the "current" one, whatever that means
@@ -696,6 +699,8 @@ struct sctp_packet {
size_t overhead;
/* This is the total size of all chunks INCLUDING padding. */
size_t size;
+ /* This is the maximum size this packet may have */
+ size_t max_size;
/* The packet is destined for this transport address.
* The function we finally use to pass down to the next lower
diff --git a/include/net/tc_act/tc_defact.h b/include/net/tc_act/tc_defact.h
index 9763dcb..ab9b5d6 100644
--- a/include/net/tc_act/tc_defact.h
+++ b/include/net/tc_act/tc_defact.h
@@ -5,8 +5,8 @@
struct tcf_defact {
struct tcf_common common;
- u32 tcfd_datalen;
- void *tcfd_defdata;
+ u32 tcfd_datalen;
+ void *tcfd_defdata;
};
#define to_defact(a) \
container_of(a->priv, struct tcf_defact, common)
diff --git a/include/net/udp.h b/include/net/udp.h
index ae07f37..8894d71 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -160,8 +160,8 @@ void udp_set_csum(bool nocheck, struct sk_buff *skb,
static inline void udp_csum_pull_header(struct sk_buff *skb)
{
- if (skb->ip_summed == CHECKSUM_NONE)
- skb->csum = csum_partial(udp_hdr(skb), sizeof(struct udphdr),
+ if (!skb->csum_valid && skb->ip_summed == CHECKSUM_NONE)
+ skb->csum = csum_partial(skb->data, sizeof(struct udphdr),
skb->csum);
skb_pull_rcsum(skb, sizeof(struct udphdr));
UDP_SKB_CB(skb)->cscov -= sizeof(struct udphdr);
diff --git a/include/soc/fsl/qe/immap_qe.h b/include/soc/fsl/qe/immap_qe.h
index bedbff8..c76ef30 100644
--- a/include/soc/fsl/qe/immap_qe.h
+++ b/include/soc/fsl/qe/immap_qe.h
@@ -159,10 +159,7 @@ struct spi {
/* SI */
struct si1 {
- __be16 siamr1; /* SI1 TDMA mode register */
- __be16 sibmr1; /* SI1 TDMB mode register */
- __be16 sicmr1; /* SI1 TDMC mode register */
- __be16 sidmr1; /* SI1 TDMD mode register */
+ __be16 sixmr1[4]; /* SI1 TDMx (x = A B C D) mode register */
u8 siglmr1_h; /* SI1 global mode register high */
u8 res0[0x1];
u8 sicmdr1_h; /* SI1 command register high */
diff --git a/include/soc/fsl/qe/qe.h b/include/soc/fsl/qe/qe.h
index 33b29ea..70339d7 100644
--- a/include/soc/fsl/qe/qe.h
+++ b/include/soc/fsl/qe/qe.h
@@ -80,6 +80,8 @@ enum qe_clock {
QE_CLK22, /* Clock 22 */
QE_CLK23, /* Clock 23 */
QE_CLK24, /* Clock 24 */
+ QE_RSYNC_PIN, /* RSYNC from pin */
+ QE_TSYNC_PIN, /* TSYNC from pin */
QE_CLK_DUMMY
};
@@ -242,6 +244,22 @@ static inline int qe_alive_during_sleep(void)
#define qe_muram_addr cpm_muram_addr
#define qe_muram_offset cpm_muram_offset
+#define qe_setbits32(_addr, _v) iowrite32be(ioread32be(_addr) | (_v), (_addr))
+#define qe_clrbits32(_addr, _v) iowrite32be(ioread32be(_addr) & ~(_v), (_addr))
+
+#define qe_setbits16(_addr, _v) iowrite16be(ioread16be(_addr) | (_v), (_addr))
+#define qe_clrbits16(_addr, _v) iowrite16be(ioread16be(_addr) & ~(_v), (_addr))
+
+#define qe_setbits8(_addr, _v) iowrite8(ioread8(_addr) | (_v), (_addr))
+#define qe_clrbits8(_addr, _v) iowrite8(ioread8(_addr) & ~(_v), (_addr))
+
+#define qe_clrsetbits32(addr, clear, set) \
+ iowrite32be((ioread32be(addr) & ~(clear)) | (set), (addr))
+#define qe_clrsetbits16(addr, clear, set) \
+ iowrite16be((ioread16be(addr) & ~(clear)) | (set), (addr))
+#define qe_clrsetbits8(addr, clear, set) \
+ iowrite8((ioread8(addr) & ~(clear)) | (set), (addr))
+
/* Structure that defines QE firmware binary files.
*
* See Documentation/powerpc/qe_firmware.txt for a description of these
@@ -639,6 +657,7 @@ struct ucc_slow_pram {
#define UCC_SLOW_GUMR_L_MODE_QMC 0x00000002
/* General UCC FAST Mode Register */
+#define UCC_FAST_GUMR_LOOPBACK 0x40000000
#define UCC_FAST_GUMR_TCI 0x20000000
#define UCC_FAST_GUMR_TRX 0x10000000
#define UCC_FAST_GUMR_TTX 0x08000000
diff --git a/include/soc/fsl/qe/qe_tdm.h b/include/soc/fsl/qe/qe_tdm.h
new file mode 100644
index 0000000..a1664b6
--- /dev/null
+++ b/include/soc/fsl/qe/qe_tdm.h
@@ -0,0 +1,94 @@
+/*
+ * Internal header file for QE TDM mode routines.
+ *
+ * Copyright (C) 2016 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Authors: Zhao Qiang <qiang.zhao@nxp.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version
+ */
+
+#ifndef _QE_TDM_H_
+#define _QE_TDM_H_
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+
+#include <soc/fsl/qe/immap_qe.h>
+#include <soc/fsl/qe/qe.h>
+
+#include <soc/fsl/qe/ucc.h>
+#include <soc/fsl/qe/ucc_fast.h>
+
+/* SI RAM entries */
+#define SIR_LAST 0x0001
+#define SIR_BYTE 0x0002
+#define SIR_CNT(x) ((x) << 2)
+#define SIR_CSEL(x) ((x) << 5)
+#define SIR_SGS 0x0200
+#define SIR_SWTR 0x4000
+#define SIR_MCC 0x8000
+#define SIR_IDLE 0
+
+/* SIxMR fields */
+#define SIMR_SAD(x) ((x) << 12)
+#define SIMR_SDM_NORMAL 0x0000
+#define SIMR_SDM_INTERNAL_LOOPBACK 0x0800
+#define SIMR_SDM_MASK 0x0c00
+#define SIMR_CRT 0x0040
+#define SIMR_SL 0x0020
+#define SIMR_CE 0x0010
+#define SIMR_FE 0x0008
+#define SIMR_GM 0x0004
+#define SIMR_TFSD(n) (n)
+#define SIMR_RFSD(n) ((n) << 8)
+
+enum tdm_ts_t {
+ TDM_TX_TS,
+ TDM_RX_TS
+};
+
+enum tdm_framer_t {
+ TDM_FRAMER_T1,
+ TDM_FRAMER_E1
+};
+
+enum tdm_mode_t {
+ TDM_INTERNAL_LOOPBACK,
+ TDM_NORMAL
+};
+
+struct si_mode_info {
+ u8 simr_rfsd;
+ u8 simr_tfsd;
+ u8 simr_crt;
+ u8 simr_sl;
+ u8 simr_ce;
+ u8 simr_fe;
+ u8 simr_gm;
+};
+
+struct ucc_tdm_info {
+ struct ucc_fast_info uf_info;
+ struct si_mode_info si_info;
+};
+
+struct ucc_tdm {
+ u16 tdm_port; /* port for this tdm:TDMA,TDMB */
+ u32 siram_entry_id;
+ u16 __iomem *siram;
+ struct si1 __iomem *si_regs;
+ enum tdm_framer_t tdm_framer_type;
+ enum tdm_mode_t tdm_mode;
+ u8 num_of_ts; /* the number of timeslots in this tdm frame */
+ u32 tx_ts_mask; /* tx time slot mask */
+ u32 rx_ts_mask; /* rx time slot mask */
+};
+
+int ucc_of_parse_tdm(struct device_node *np, struct ucc_tdm *utdm,
+ struct ucc_tdm_info *ut_info);
+void ucc_tdm_init(struct ucc_tdm *utdm, struct ucc_tdm_info *ut_info);
+#endif
diff --git a/include/soc/fsl/qe/ucc.h b/include/soc/fsl/qe/ucc.h
index 894f14c..6bbbb59 100644
--- a/include/soc/fsl/qe/ucc.h
+++ b/include/soc/fsl/qe/ucc.h
@@ -41,6 +41,10 @@ int ucc_set_qe_mux_mii_mng(unsigned int ucc_num);
int ucc_set_qe_mux_rxtx(unsigned int ucc_num, enum qe_clock clock,
enum comm_dir mode);
+int ucc_set_tdm_rxtx_clk(unsigned int tdm_num, enum qe_clock clock,
+ enum comm_dir mode);
+int ucc_set_tdm_rxtx_sync(unsigned int tdm_num, enum qe_clock clock,
+ enum comm_dir mode);
int ucc_mux_set_grant_tsa_bkpt(unsigned int ucc_num, int set, u32 mask);
diff --git a/include/soc/fsl/qe/ucc_fast.h b/include/soc/fsl/qe/ucc_fast.h
index df8ea79..3ee9e7c 100644
--- a/include/soc/fsl/qe/ucc_fast.h
+++ b/include/soc/fsl/qe/ucc_fast.h
@@ -21,19 +21,37 @@
#include <soc/fsl/qe/ucc.h>
-/* Receive BD's status */
+/* Receive BD's status and length*/
#define R_E 0x80000000 /* buffer empty */
#define R_W 0x20000000 /* wrap bit */
#define R_I 0x10000000 /* interrupt on reception */
#define R_L 0x08000000 /* last */
#define R_F 0x04000000 /* first */
-/* transmit BD's status */
+/* transmit BD's status and length*/
#define T_R 0x80000000 /* ready bit */
#define T_W 0x20000000 /* wrap bit */
#define T_I 0x10000000 /* interrupt on completion */
#define T_L 0x08000000 /* last */
+/* Receive BD's status */
+#define R_E_S 0x8000 /* buffer empty */
+#define R_W_S 0x2000 /* wrap bit */
+#define R_I_S 0x1000 /* interrupt on reception */
+#define R_L_S 0x0800 /* last */
+#define R_F_S 0x0400 /* first */
+#define R_CM_S 0x0200 /* continuous mode */
+#define R_CR_S 0x0004 /* crc */
+#define R_OV_S 0x0002 /* crc */
+
+/* transmit BD's status */
+#define T_R_S 0x8000 /* ready bit */
+#define T_W_S 0x2000 /* wrap bit */
+#define T_I_S 0x1000 /* interrupt on completion */
+#define T_L_S 0x0800 /* last */
+#define T_TC_S 0x0400 /* crc */
+#define T_TM_S 0x0200 /* continuous mode */
+
/* Rx Data buffer must be 4 bytes aligned in most cases */
#define UCC_FAST_RX_ALIGN 4
#define UCC_FAST_MRBLR_ALIGNMENT 4
@@ -118,9 +136,12 @@ enum ucc_fast_transparent_tcrc {
/* Fast UCC initialization structure */
struct ucc_fast_info {
int ucc_num;
+ int tdm_num;
enum qe_clock rx_clock;
enum qe_clock tx_clock;
- u32 regs;
+ enum qe_clock rx_sync;
+ enum qe_clock tx_sync;
+ resource_size_t regs;
int irq;
u32 uccm_mask;
int bd_mem_part;
diff --git a/include/uapi/linux/fib_rules.h b/include/uapi/linux/fib_rules.h
index 620c8a5..14404b3 100644
--- a/include/uapi/linux/fib_rules.h
+++ b/include/uapi/linux/fib_rules.h
@@ -50,6 +50,7 @@ enum {
FRA_FWMASK, /* mask for netfilter mark */
FRA_OIFNAME,
FRA_PAD,
+ FRA_L3MDEV, /* iif or oif is l3mdev goto its table */
__FRA_MAX
};
diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h
index f4297c8..5702e93 100644
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -115,8 +115,8 @@ struct tc_police {
__u32 mtu;
struct tc_ratespec rate;
struct tc_ratespec peakrate;
- int refcnt;
- int bindcnt;
+ int refcnt;
+ int bindcnt;
__u32 capab;
};
@@ -124,10 +124,11 @@ struct tcf_t {
__u64 install;
__u64 lastuse;
__u64 expires;
+ __u64 firstuse;
};
struct tc_cnt {
- int refcnt;
+ int refcnt;
int bindcnt;
};
diff --git a/include/uapi/linux/virtio_net.h b/include/uapi/linux/virtio_net.h
index ec32293..0da0e3a 100644
--- a/include/uapi/linux/virtio_net.h
+++ b/include/uapi/linux/virtio_net.h
@@ -35,6 +35,7 @@
#define VIRTIO_NET_F_CSUM 0 /* Host handles pkts w/ partial csum */
#define VIRTIO_NET_F_GUEST_CSUM 1 /* Guest handles pkts w/ partial csum */
#define VIRTIO_NET_F_CTRL_GUEST_OFFLOADS 2 /* Dynamic offload configuration. */
+#define VIRTIO_NET_F_MTU 3 /* Initial MTU advice */
#define VIRTIO_NET_F_MAC 5 /* Host has given MAC address. */
#define VIRTIO_NET_F_GUEST_TSO4 7 /* Guest can handle TSOv4 in. */
#define VIRTIO_NET_F_GUEST_TSO6 8 /* Guest can handle TSOv6 in. */
@@ -73,6 +74,8 @@ struct virtio_net_config {
* Legal values are between 1 and 0x8000
*/
__u16 max_virtqueue_pairs;
+ /* Default maximum transmit unit advice */
+ __u16 mtu;
} __attribute__((packed));
/*
diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
index 780089d..d020299 100644
--- a/net/bluetooth/6lowpan.c
+++ b/net/bluetooth/6lowpan.c
@@ -627,20 +627,9 @@ static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev)
return err < 0 ? NET_XMIT_DROP : err;
}
-static struct lock_class_key bt_tx_busylock;
-static struct lock_class_key bt_netdev_xmit_lock_key;
-
-static void bt_set_lockdep_class_one(struct net_device *dev,
- struct netdev_queue *txq,
- void *_unused)
-{
- lockdep_set_class(&txq->_xmit_lock, &bt_netdev_xmit_lock_key);
-}
-
static int bt_dev_init(struct net_device *dev)
{
- netdev_for_each_tx_queue(dev, bt_set_lockdep_class_one, NULL);
- dev->qdisc_tx_busylock = &bt_tx_busylock;
+ netdev_lockdep_set_classes(dev);
return 0;
}
diff --git a/net/core/dev.c b/net/core/dev.c
index 904ff43..c43c9d2 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -139,6 +139,7 @@
#include <linux/hrtimer.h>
#include <linux/netfilter_ingress.h>
#include <linux/sctp.h>
+#include <linux/crash_dump.h>
#include "net-sysfs.h"
@@ -2249,11 +2250,12 @@ EXPORT_SYMBOL(netif_set_real_num_rx_queues);
*/
int netif_get_num_default_rss_queues(void)
{
- return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
+ return is_kdump_kernel() ?
+ 1 : min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
}
EXPORT_SYMBOL(netif_get_num_default_rss_queues);
-static inline void __netif_reschedule(struct Qdisc *q)
+static void __netif_reschedule(struct Qdisc *q)
{
struct softnet_data *sd;
unsigned long flags;
@@ -3075,7 +3077,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
/*
* Heuristic to force contended enqueues to serialize on a
* separate lock before trying to get qdisc main lock.
- * This permits __QDISC___STATE_RUNNING owner to get the lock more
+ * This permits qdisc->running owner to get the lock more
* often and dequeue packets faster.
*/
contended = qdisc_is_running(q);
@@ -3898,22 +3900,14 @@ static void net_tx_action(struct softirq_action *h)
head = head->next_sched;
root_lock = qdisc_lock(q);
- if (spin_trylock(root_lock)) {
- smp_mb__before_atomic();
- clear_bit(__QDISC_STATE_SCHED,
- &q->state);
- qdisc_run(q);
- spin_unlock(root_lock);
- } else {
- if (!test_bit(__QDISC_STATE_DEACTIVATED,
- &q->state)) {
- __netif_reschedule(q);
- } else {
- smp_mb__before_atomic();
- clear_bit(__QDISC_STATE_SCHED,
- &q->state);
- }
- }
+ spin_lock(root_lock);
+ /* We need to make sure head->next_sched is read
+ * before clearing __QDISC_STATE_SCHED
+ */
+ smp_mb__before_atomic();
+ clear_bit(__QDISC_STATE_SCHED, &q->state);
+ qdisc_run(q);
+ spin_unlock(root_lock);
}
}
}
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index f403481..9774898 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -89,6 +89,7 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN]
[NETIF_F_GSO_UDP_TUNNEL_BIT] = "tx-udp_tnl-segmentation",
[NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT] = "tx-udp_tnl-csum-segmentation",
[NETIF_F_GSO_PARTIAL_BIT] = "tx-gso-partial",
+ [NETIF_F_GSO_SCTP_BIT] = "tx-sctp-segmentation",
[NETIF_F_FCOE_CRC_BIT] = "tx-checksum-fcoe-crc",
[NETIF_F_SCTP_CRC_BIT] = "tx-checksum-sctp",
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 840aceb..98298b1 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -173,7 +173,8 @@ void fib_rules_unregister(struct fib_rules_ops *ops)
EXPORT_SYMBOL_GPL(fib_rules_unregister);
static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops,
- struct flowi *fl, int flags)
+ struct flowi *fl, int flags,
+ struct fib_lookup_arg *arg)
{
int ret = 0;
@@ -189,6 +190,9 @@ static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops,
if (rule->tun_id && (rule->tun_id != fl->flowi_tun_key.tun_id))
goto out;
+ if (rule->l3mdev && !l3mdev_fib_rule_match(rule->fr_net, fl, arg))
+ goto out;
+
ret = ops->match(rule, fl, flags);
out:
return (rule->flags & FIB_RULE_INVERT) ? !ret : ret;
@@ -204,7 +208,7 @@ int fib_rules_lookup(struct fib_rules_ops *ops, struct flowi *fl,
list_for_each_entry_rcu(rule, &ops->rules_list, list) {
jumped:
- if (!fib_rule_match(rule, ops, fl, flags))
+ if (!fib_rule_match(rule, ops, fl, flags, arg))
continue;
if (rule->action == FR_ACT_GOTO) {
@@ -265,7 +269,7 @@ errout:
return err;
}
-static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh)
+int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct net *net = sock_net(skb->sk);
struct fib_rule_hdr *frh = nlmsg_data(nlh);
@@ -336,6 +340,14 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh)
if (tb[FRA_TUN_ID])
rule->tun_id = nla_get_be64(tb[FRA_TUN_ID]);
+ if (tb[FRA_L3MDEV]) {
+#ifdef CONFIG_NET_L3_MASTER_DEV
+ rule->l3mdev = nla_get_u8(tb[FRA_L3MDEV]);
+ if (rule->l3mdev != 1)
+#endif
+ goto errout_free;
+ }
+
rule->action = frh->action;
rule->flags = frh->flags;
rule->table = frh_get_table(frh, tb);
@@ -371,6 +383,9 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh)
} else if (rule->action == FR_ACT_GOTO)
goto errout_free;
+ if (rule->l3mdev && rule->table)
+ goto errout_free;
+
err = ops->configure(rule, skb, frh, tb);
if (err < 0)
goto errout_free;
@@ -424,8 +439,9 @@ errout:
rules_ops_put(ops);
return err;
}
+EXPORT_SYMBOL_GPL(fib_nl_newrule);
-static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh)
+int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct net *net = sock_net(skb->sk);
struct fib_rule_hdr *frh = nlmsg_data(nlh);
@@ -483,6 +499,10 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh)
(rule->tun_id != nla_get_be64(tb[FRA_TUN_ID])))
continue;
+ if (tb[FRA_L3MDEV] &&
+ (rule->l3mdev != nla_get_u8(tb[FRA_L3MDEV])))
+ continue;
+
if (!ops->compare(rule, frh, tb))
continue;
@@ -536,6 +556,7 @@ errout:
rules_ops_put(ops);
return err;
}
+EXPORT_SYMBOL_GPL(fib_nl_delrule);
static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops *ops,
struct fib_rule *rule)
@@ -607,7 +628,9 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
(rule->target &&
nla_put_u32(skb, FRA_GOTO, rule->target)) ||
(rule->tun_id &&
- nla_put_be64(skb, FRA_TUN_ID, rule->tun_id, FRA_PAD)))
+ nla_put_be64(skb, FRA_TUN_ID, rule->tun_id, FRA_PAD)) ||
+ (rule->l3mdev &&
+ nla_put_u8(skb, FRA_L3MDEV, rule->l3mdev)))
goto nla_put_failure;
if (rule->suppress_ifgroup != -1) {
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index 4573d81..cad8e79 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -84,6 +84,7 @@ struct gen_estimator
struct gnet_stats_basic_packed *bstats;
struct gnet_stats_rate_est64 *rate_est;
spinlock_t *stats_lock;
+ seqcount_t *running;
int ewma_log;
u32 last_packets;
unsigned long avpps;
@@ -121,26 +122,28 @@ static void est_timer(unsigned long arg)
unsigned long rate;
u64 brate;
- spin_lock(e->stats_lock);
+ if (e->stats_lock)
+ spin_lock(e->stats_lock);
read_lock(&est_lock);
if (e->bstats == NULL)
goto skip;
- __gnet_stats_copy_basic(&b, e->cpu_bstats, e->bstats);
+ __gnet_stats_copy_basic(e->running, &b, e->cpu_bstats, e->bstats);
brate = (b.bytes - e->last_bytes)<<(7 - idx);
e->last_bytes = b.bytes;
e->avbps += (brate >> e->ewma_log) - (e->avbps >> e->ewma_log);
- e->rate_est->bps = (e->avbps+0xF)>>5;
+ WRITE_ONCE(e->rate_est->bps, (e->avbps + 0xF) >> 5);
rate = b.packets - e->last_packets;
rate <<= (7 - idx);
e->last_packets = b.packets;
e->avpps += (rate >> e->ewma_log) - (e->avpps >> e->ewma_log);
- e->rate_est->pps = (e->avpps + 0xF) >> 5;
+ WRITE_ONCE(e->rate_est->pps, (e->avpps + 0xF) >> 5);
skip:
read_unlock(&est_lock);
- spin_unlock(e->stats_lock);
+ if (e->stats_lock)
+ spin_unlock(e->stats_lock);
}
if (!list_empty(&elist[idx].list))
@@ -194,6 +197,7 @@ struct gen_estimator *gen_find_node(const struct gnet_stats_basic_packed *bstats
* @cpu_bstats: bstats per cpu
* @rate_est: rate estimator statistics
* @stats_lock: statistics lock
+ * @running: qdisc running seqcount
* @opt: rate estimator configuration TLV
*
* Creates a new rate estimator with &bstats as source and &rate_est
@@ -209,6 +213,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_basic_cpu __percpu *cpu_bstats,
struct gnet_stats_rate_est64 *rate_est,
spinlock_t *stats_lock,
+ seqcount_t *running,
struct nlattr *opt)
{
struct gen_estimator *est;
@@ -226,12 +231,13 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
if (est == NULL)
return -ENOBUFS;
- __gnet_stats_copy_basic(&b, cpu_bstats, bstats);
+ __gnet_stats_copy_basic(running, &b, cpu_bstats, bstats);
idx = parm->interval + 2;
est->bstats = bstats;
est->rate_est = rate_est;
est->stats_lock = stats_lock;
+ est->running = running;
est->ewma_log = parm->ewma_log;
est->last_bytes = b.bytes;
est->avbps = rate_est->bps<<5;
@@ -291,6 +297,7 @@ EXPORT_SYMBOL(gen_kill_estimator);
* @cpu_bstats: bstats per cpu
* @rate_est: rate estimator statistics
* @stats_lock: statistics lock
+ * @running: qdisc running seqcount (might be NULL)
* @opt: rate estimator configuration TLV
*
* Replaces the configuration of a rate estimator by calling
@@ -301,10 +308,11 @@ EXPORT_SYMBOL(gen_kill_estimator);
int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_basic_cpu __percpu *cpu_bstats,
struct gnet_stats_rate_est64 *rate_est,
- spinlock_t *stats_lock, struct nlattr *opt)
+ spinlock_t *stats_lock,
+ seqcount_t *running, struct nlattr *opt)
{
gen_kill_estimator(bstats, rate_est);
- return gen_new_estimator(bstats, cpu_bstats, rate_est, stats_lock, opt);
+ return gen_new_estimator(bstats, cpu_bstats, rate_est, stats_lock, running, opt);
}
EXPORT_SYMBOL(gen_replace_estimator);
diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c
index be873e4..508e051 100644
--- a/net/core/gen_stats.c
+++ b/net/core/gen_stats.c
@@ -32,10 +32,11 @@ gnet_stats_copy(struct gnet_dump *d, int type, void *buf, int size, int padattr)
return 0;
nla_put_failure:
+ if (d->lock)
+ spin_unlock_bh(d->lock);
kfree(d->xstats);
d->xstats = NULL;
d->xstats_len = 0;
- spin_unlock_bh(d->lock);
return -1;
}
@@ -66,15 +67,16 @@ gnet_stats_start_copy_compat(struct sk_buff *skb, int type, int tc_stats_type,
{
memset(d, 0, sizeof(*d));
- spin_lock_bh(lock);
- d->lock = lock;
if (type)
d->tail = (struct nlattr *)skb_tail_pointer(skb);
d->skb = skb;
d->compat_tc_stats = tc_stats_type;
d->compat_xstats = xstats_type;
d->padattr = padattr;
-
+ if (lock) {
+ d->lock = lock;
+ spin_lock_bh(lock);
+ }
if (d->tail)
return gnet_stats_copy(d, type, NULL, 0, padattr);
@@ -128,21 +130,29 @@ __gnet_stats_copy_basic_cpu(struct gnet_stats_basic_packed *bstats,
}
void
-__gnet_stats_copy_basic(struct gnet_stats_basic_packed *bstats,
+__gnet_stats_copy_basic(const seqcount_t *running,
+ struct gnet_stats_basic_packed *bstats,
struct gnet_stats_basic_cpu __percpu *cpu,
struct gnet_stats_basic_packed *b)
{
+ unsigned int seq;
+
if (cpu) {
__gnet_stats_copy_basic_cpu(bstats, cpu);
- } else {
+ return;
+ }
+ do {
+ if (running)
+ seq = read_seqcount_begin(running);
bstats->bytes = b->bytes;
bstats->packets = b->packets;
- }
+ } while (running && read_seqcount_retry(running, seq));
}
EXPORT_SYMBOL(__gnet_stats_copy_basic);
/**
* gnet_stats_copy_basic - copy basic statistics into statistic TLV
+ * @running: seqcount_t pointer
* @d: dumping handle
* @cpu: copy statistic per cpu
* @b: basic statistics
@@ -154,13 +164,14 @@ EXPORT_SYMBOL(__gnet_stats_copy_basic);
* if the room in the socket buffer was not sufficient.
*/
int
-gnet_stats_copy_basic(struct gnet_dump *d,
+gnet_stats_copy_basic(const seqcount_t *running,
+ struct gnet_dump *d,
struct gnet_stats_basic_cpu __percpu *cpu,
struct gnet_stats_basic_packed *b)
{
struct gnet_stats_basic_packed bstats = {0};
- __gnet_stats_copy_basic(&bstats, cpu, b);
+ __gnet_stats_copy_basic(running, &bstats, cpu, b);
if (d->compat_tc_stats) {
d->tc_stats.bytes = bstats.bytes;
@@ -330,8 +341,9 @@ gnet_stats_copy_app(struct gnet_dump *d, void *st, int len)
return 0;
err_out:
+ if (d->lock)
+ spin_unlock_bh(d->lock);
d->xstats_len = 0;
- spin_unlock_bh(d->lock);
return -1;
}
EXPORT_SYMBOL(gnet_stats_copy_app);
@@ -365,10 +377,11 @@ gnet_stats_finish_copy(struct gnet_dump *d)
return -1;
}
+ if (d->lock)
+ spin_unlock_bh(d->lock);
kfree(d->xstats);
d->xstats = NULL;
d->xstats_len = 0;
- spin_unlock_bh(d->lock);
return 0;
}
EXPORT_SYMBOL(gnet_stats_finish_copy);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index f2b77e5..e7ec6d3 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -49,6 +49,7 @@
#include <linux/slab.h>
#include <linux/tcp.h>
#include <linux/udp.h>
+#include <linux/sctp.h>
#include <linux/netdevice.h>
#ifdef CONFIG_NET_CLS_ACT
#include <net/pkt_sched.h>
@@ -3116,9 +3117,13 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
int hsize;
int size;
- len = head_skb->len - offset;
- if (len > mss)
- len = mss;
+ if (unlikely(mss == GSO_BY_FRAGS)) {
+ len = list_skb->len;
+ } else {
+ len = head_skb->len - offset;
+ if (len > mss)
+ len = mss;
+ }
hsize = skb_headlen(head_skb) - offset;
if (hsize < 0)
@@ -3438,6 +3443,7 @@ done:
NAPI_GRO_CB(skb)->same_flow = 1;
return 0;
}
+EXPORT_SYMBOL_GPL(skb_gro_receive);
void __init skb_init(void)
{
@@ -4378,6 +4384,8 @@ unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
thlen += inner_tcp_hdrlen(skb);
} else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
thlen = tcp_hdrlen(skb);
+ } else if (unlikely(shinfo->gso_type & SKB_GSO_SCTP)) {
+ thlen = sizeof(struct sctphdr);
}
/* UFO sets gso_size to the size of the fragmentation
* payload, i.e. the size of the L4 (UDP) header is already
@@ -4387,6 +4395,38 @@ unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
}
EXPORT_SYMBOL_GPL(skb_gso_transport_seglen);
+/**
+ * skb_gso_validate_mtu - Return in case such skb fits a given MTU
+ *
+ * @skb: GSO skb
+ * @mtu: MTU to validate against
+ *
+ * skb_gso_validate_mtu validates if a given skb will fit a wanted MTU
+ * once split.
+ */
+bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu)
+{
+ const struct skb_shared_info *shinfo = skb_shinfo(skb);
+ const struct sk_buff *iter;
+ unsigned int hlen;
+
+ hlen = skb_gso_network_seglen(skb);
+
+ if (shinfo->gso_size != GSO_BY_FRAGS)
+ return hlen <= mtu;
+
+ /* Undo this so we can re-use header sizes */
+ hlen -= GSO_BY_FRAGS;
+
+ skb_walk_frags(skb, iter) {
+ if (hlen + skb_headlen(iter) > mtu)
+ return false;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(skb_gso_validate_mtu);
+
static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
{
if (skb_cow(skb, skb_headroom(skb)) < 0) {
diff --git a/net/dsa/Makefile b/net/dsa/Makefile
index da06ed1..8af4ded 100644
--- a/net/dsa/Makefile
+++ b/net/dsa/Makefile
@@ -1,6 +1,6 @@
# the core
obj-$(CONFIG_NET_DSA) += dsa_core.o
-dsa_core-y += dsa.o slave.o
+dsa_core-y += dsa.o slave.o dsa2.o
# tagging formats
dsa_core-$(CONFIG_NET_DSA_TAG_BRCM) += tag_brcm.o
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index eff5dfc..766d2a5 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -29,6 +29,33 @@
char dsa_driver_version[] = "0.1";
+static struct sk_buff *dsa_slave_notag_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ /* Just return the original SKB */
+ return skb;
+}
+
+static const struct dsa_device_ops none_ops = {
+ .xmit = dsa_slave_notag_xmit,
+ .rcv = NULL,
+};
+
+const struct dsa_device_ops *dsa_device_ops[DSA_TAG_LAST] = {
+#ifdef CONFIG_NET_DSA_TAG_DSA
+ [DSA_TAG_PROTO_DSA] = &dsa_netdev_ops,
+#endif
+#ifdef CONFIG_NET_DSA_TAG_EDSA
+ [DSA_TAG_PROTO_EDSA] = &edsa_netdev_ops,
+#endif
+#ifdef CONFIG_NET_DSA_TAG_TRAILER
+ [DSA_TAG_PROTO_TRAILER] = &trailer_netdev_ops,
+#endif
+#ifdef CONFIG_NET_DSA_TAG_BRCM
+ [DSA_TAG_PROTO_BRCM] = &brcm_netdev_ops,
+#endif
+ [DSA_TAG_PROTO_NONE] = &none_ops,
+};
/* switch driver registration ***********************************************/
static DEFINE_MUTEX(dsa_switch_drivers_mutex);
@@ -180,41 +207,100 @@ __ATTRIBUTE_GROUPS(dsa_hwmon);
#endif /* CONFIG_NET_DSA_HWMON */
/* basic switch operations **************************************************/
-static int dsa_cpu_dsa_setup(struct dsa_switch *ds, struct net_device *master)
+int dsa_cpu_dsa_setup(struct dsa_switch *ds, struct device *dev,
+ struct device_node *port_dn, int port)
{
- struct dsa_chip_data *cd = ds->cd;
- struct device_node *port_dn;
struct phy_device *phydev;
- int ret, port, mode;
+ int ret, mode;
+
+ if (of_phy_is_fixed_link(port_dn)) {
+ ret = of_phy_register_fixed_link(port_dn);
+ if (ret) {
+ dev_err(dev, "failed to register fixed PHY\n");
+ return ret;
+ }
+ phydev = of_phy_find_device(port_dn);
+
+ mode = of_get_phy_mode(port_dn);
+ if (mode < 0)
+ mode = PHY_INTERFACE_MODE_NA;
+ phydev->interface = mode;
+
+ genphy_config_init(phydev);
+ genphy_read_status(phydev);
+ if (ds->drv->adjust_link)
+ ds->drv->adjust_link(ds, port, phydev);
+ }
+
+ return 0;
+}
+
+static int dsa_cpu_dsa_setups(struct dsa_switch *ds, struct device *dev)
+{
+ struct device_node *port_dn;
+ int ret, port;
for (port = 0; port < DSA_MAX_PORTS; port++) {
if (!(dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)))
continue;
- port_dn = cd->port_dn[port];
- if (of_phy_is_fixed_link(port_dn)) {
- ret = of_phy_register_fixed_link(port_dn);
- if (ret) {
- netdev_err(master,
- "failed to register fixed PHY\n");
- return ret;
- }
- phydev = of_phy_find_device(port_dn);
+ port_dn = ds->ports[port].dn;
+ ret = dsa_cpu_dsa_setup(ds, dev, port_dn, port);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
- mode = of_get_phy_mode(port_dn);
- if (mode < 0)
- mode = PHY_INTERFACE_MODE_NA;
- phydev->interface = mode;
+const struct dsa_device_ops *dsa_resolve_tag_protocol(int tag_protocol)
+{
+ const struct dsa_device_ops *ops;
+
+ if (tag_protocol >= DSA_TAG_LAST)
+ return ERR_PTR(-EINVAL);
+ ops = dsa_device_ops[tag_protocol];
+
+ if (!ops)
+ return ERR_PTR(-ENOPROTOOPT);
+
+ return ops;
+}
+
+int dsa_cpu_port_ethtool_setup(struct dsa_switch *ds)
+{
+ struct net_device *master;
+ struct ethtool_ops *cpu_ops;
+
+ master = ds->dst->master_netdev;
+ if (ds->master_netdev)
+ master = ds->master_netdev;
+
+ cpu_ops = devm_kzalloc(ds->dev, sizeof(*cpu_ops), GFP_KERNEL);
+ if (!cpu_ops)
+ return -ENOMEM;
+
+ memcpy(&ds->dst->master_ethtool_ops, master->ethtool_ops,
+ sizeof(struct ethtool_ops));
+ ds->dst->master_orig_ethtool_ops = master->ethtool_ops;
+ memcpy(cpu_ops, &ds->dst->master_ethtool_ops,
+ sizeof(struct ethtool_ops));
+ dsa_cpu_port_ethtool_init(cpu_ops);
+ master->ethtool_ops = cpu_ops;
- genphy_config_init(phydev);
- genphy_read_status(phydev);
- if (ds->drv->adjust_link)
- ds->drv->adjust_link(ds, port, phydev);
- }
- }
return 0;
}
+void dsa_cpu_port_ethtool_restore(struct dsa_switch *ds)
+{
+ struct net_device *master;
+
+ master = ds->dst->master_netdev;
+ if (ds->master_netdev)
+ master = ds->master_netdev;
+
+ master->ethtool_ops = ds->dst->master_orig_ethtool_ops;
+}
+
static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent)
{
struct dsa_switch_driver *drv = ds->drv;
@@ -243,6 +329,7 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent)
}
dst->cpu_switch = index;
dst->cpu_port = i;
+ ds->cpu_port_mask |= 1 << i;
} else if (!strcmp(name, "dsa")) {
ds->dsa_port_mask |= 1 << i;
} else {
@@ -267,37 +354,17 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent)
* switch.
*/
if (dst->cpu_switch == index) {
- switch (drv->tag_protocol) {
-#ifdef CONFIG_NET_DSA_TAG_DSA
- case DSA_TAG_PROTO_DSA:
- dst->rcv = dsa_netdev_ops.rcv;
- break;
-#endif
-#ifdef CONFIG_NET_DSA_TAG_EDSA
- case DSA_TAG_PROTO_EDSA:
- dst->rcv = edsa_netdev_ops.rcv;
- break;
-#endif
-#ifdef CONFIG_NET_DSA_TAG_TRAILER
- case DSA_TAG_PROTO_TRAILER:
- dst->rcv = trailer_netdev_ops.rcv;
- break;
-#endif
-#ifdef CONFIG_NET_DSA_TAG_BRCM
- case DSA_TAG_PROTO_BRCM:
- dst->rcv = brcm_netdev_ops.rcv;
- break;
-#endif
- case DSA_TAG_PROTO_NONE:
- break;
- default:
- ret = -ENOPROTOOPT;
+ dst->tag_ops = dsa_resolve_tag_protocol(drv->tag_protocol);
+ if (IS_ERR(dst->tag_ops)) {
+ ret = PTR_ERR(dst->tag_ops);
goto out;
}
- dst->tag_protocol = drv->tag_protocol;
+ dst->rcv = dst->tag_ops->rcv;
}
+ memcpy(ds->rtable, cd->rtable, sizeof(ds->rtable));
+
/*
* Do basic register setup.
*/
@@ -309,22 +376,25 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent)
if (ret < 0)
goto out;
- ds->slave_mii_bus = devm_mdiobus_alloc(parent);
- if (ds->slave_mii_bus == NULL) {
- ret = -ENOMEM;
- goto out;
- }
- dsa_slave_mii_bus_init(ds);
-
- ret = mdiobus_register(ds->slave_mii_bus);
- if (ret < 0)
- goto out;
+ if (!ds->slave_mii_bus && drv->phy_read) {
+ ds->slave_mii_bus = devm_mdiobus_alloc(parent);
+ if (!ds->slave_mii_bus) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ dsa_slave_mii_bus_init(ds);
+ ret = mdiobus_register(ds->slave_mii_bus);
+ if (ret < 0)
+ goto out;
+ }
/*
* Create network devices for physical switch ports.
*/
for (i = 0; i < DSA_MAX_PORTS; i++) {
+ ds->ports[i].dn = cd->port_dn[i];
+
if (!(ds->enabled_port_mask & (1 << i)))
continue;
@@ -337,13 +407,17 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent)
}
/* Perform configuration of the CPU and DSA ports */
- ret = dsa_cpu_dsa_setup(ds, dst->master_netdev);
+ ret = dsa_cpu_dsa_setups(ds, parent);
if (ret < 0) {
netdev_err(dst->master_netdev, "[%d] : can't configure CPU and DSA ports\n",
index);
ret = 0;
}
+ ret = dsa_cpu_port_ethtool_setup(ds);
+ if (ret)
+ return ret;
+
#ifdef CONFIG_NET_DSA_HWMON
/* If the switch provides a temperature sensor,
* register with hardware monitoring subsystem.
@@ -420,11 +494,21 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index,
return ds;
}
-static void dsa_switch_destroy(struct dsa_switch *ds)
+void dsa_cpu_dsa_destroy(struct device_node *port_dn)
{
- struct device_node *port_dn;
struct phy_device *phydev;
- struct dsa_chip_data *cd = ds->cd;
+
+ if (of_phy_is_fixed_link(port_dn)) {
+ phydev = of_phy_find_device(port_dn);
+ if (phydev) {
+ phy_device_free(phydev);
+ fixed_phy_unregister(phydev);
+ }
+ }
+}
+
+static void dsa_switch_destroy(struct dsa_switch *ds)
+{
int port;
#ifdef CONFIG_NET_DSA_HWMON
@@ -437,26 +521,25 @@ static void dsa_switch_destroy(struct dsa_switch *ds)
if (!(ds->enabled_port_mask & (1 << port)))
continue;
- if (!ds->ports[port])
+ if (!ds->ports[port].netdev)
continue;
- dsa_slave_destroy(ds->ports[port]);
+ dsa_slave_destroy(ds->ports[port].netdev);
}
- /* Remove any fixed link PHYs */
+ /* Disable configuration of the CPU and DSA ports */
for (port = 0; port < DSA_MAX_PORTS; port++) {
- port_dn = cd->port_dn[port];
- if (of_phy_is_fixed_link(port_dn)) {
- phydev = of_phy_find_device(port_dn);
- if (phydev) {
- phy_device_free(phydev);
- of_node_put(port_dn);
- fixed_phy_unregister(phydev);
- }
- }
+ if (!(dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)))
+ continue;
+ dsa_cpu_dsa_destroy(ds->ports[port].dn);
+
+ /* Clearing a bit which is not set does no harm */
+ ds->cpu_port_mask |= ~(1 << port);
+ ds->dsa_port_mask |= ~(1 << port);
}
- mdiobus_unregister(ds->slave_mii_bus);
+ if (ds->slave_mii_bus && ds->drv->phy_read)
+ mdiobus_unregister(ds->slave_mii_bus);
}
#ifdef CONFIG_PM_SLEEP
@@ -469,7 +552,7 @@ static int dsa_switch_suspend(struct dsa_switch *ds)
if (!dsa_is_port_initialized(ds, i))
continue;
- ret = dsa_slave_suspend(ds->ports[i]);
+ ret = dsa_slave_suspend(ds->ports[i].netdev);
if (ret)
return ret;
}
@@ -495,7 +578,7 @@ static int dsa_switch_resume(struct dsa_switch *ds)
if (!dsa_is_port_initialized(ds, i))
continue;
- ret = dsa_slave_resume(ds->ports[i]);
+ ret = dsa_slave_resume(ds->ports[i].netdev);
if (ret)
return ret;
}
@@ -587,17 +670,6 @@ static int dsa_of_setup_routing_table(struct dsa_platform_data *pd,
if (link_sw_addr >= pd->nr_chips)
return -EINVAL;
- /* First time routing table allocation */
- if (!cd->rtable) {
- cd->rtable = kmalloc_array(pd->nr_chips, sizeof(s8),
- GFP_KERNEL);
- if (!cd->rtable)
- return -ENOMEM;
-
- /* default to no valid uplink/downlink */
- memset(cd->rtable, -1, pd->nr_chips * sizeof(s8));
- }
-
cd->rtable[link_sw_addr] = port_index;
return 0;
@@ -639,7 +711,6 @@ static void dsa_of_free_platform_data(struct dsa_platform_data *pd)
kfree(pd->chip[i].port_names[port_index]);
port_index++;
}
- kfree(pd->chip[i].rtable);
/* Drop our reference to the MDIO bus device */
if (pd->chip[i].host_dev)
@@ -931,6 +1002,8 @@ static void dsa_remove_dst(struct dsa_switch_tree *dst)
dsa_switch_destroy(ds);
}
+ dsa_cpu_port_ethtool_restore(dst->ds[0]);
+
dev_put(dst->master_netdev);
}
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
new file mode 100644
index 0000000..83b95fc
--- /dev/null
+++ b/net/dsa/dsa2.c
@@ -0,0 +1,690 @@
+/*
+ * net/dsa/dsa2.c - Hardware switch handling, binding version 2
+ * Copyright (c) 2008-2009 Marvell Semiconductor
+ * Copyright (c) 2013 Florian Fainelli <florian@openwrt.org>
+ * Copyright (c) 2016 Andrew Lunn <andrew@lunn.ch>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/rtnetlink.h>
+#include <net/dsa.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include "dsa_priv.h"
+
+static LIST_HEAD(dsa_switch_trees);
+static DEFINE_MUTEX(dsa2_mutex);
+
+static struct dsa_switch_tree *dsa_get_dst(u32 tree)
+{
+ struct dsa_switch_tree *dst;
+
+ list_for_each_entry(dst, &dsa_switch_trees, list)
+ if (dst->tree == tree)
+ return dst;
+ return NULL;
+}
+
+static void dsa_free_dst(struct kref *ref)
+{
+ struct dsa_switch_tree *dst = container_of(ref, struct dsa_switch_tree,
+ refcount);
+
+ list_del(&dst->list);
+ kfree(dst);
+}
+
+static void dsa_put_dst(struct dsa_switch_tree *dst)
+{
+ kref_put(&dst->refcount, dsa_free_dst);
+}
+
+static struct dsa_switch_tree *dsa_add_dst(u32 tree)
+{
+ struct dsa_switch_tree *dst;
+
+ dst = kzalloc(sizeof(*dst), GFP_KERNEL);
+ if (!dst)
+ return NULL;
+ dst->tree = tree;
+ dst->cpu_switch = -1;
+ INIT_LIST_HEAD(&dst->list);
+ list_add_tail(&dsa_switch_trees, &dst->list);
+ kref_init(&dst->refcount);
+
+ return dst;
+}
+
+static void dsa_dst_add_ds(struct dsa_switch_tree *dst,
+ struct dsa_switch *ds, u32 index)
+{
+ kref_get(&dst->refcount);
+ dst->ds[index] = ds;
+}
+
+static void dsa_dst_del_ds(struct dsa_switch_tree *dst,
+ struct dsa_switch *ds, u32 index)
+{
+ dst->ds[index] = NULL;
+ kref_put(&dst->refcount, dsa_free_dst);
+}
+
+static bool dsa_port_is_dsa(struct device_node *port)
+{
+ const char *name;
+
+ name = of_get_property(port, "label", NULL);
+ if (!name)
+ return false;
+
+ if (!strcmp(name, "dsa"))
+ return true;
+
+ return false;
+}
+
+static bool dsa_port_is_cpu(struct device_node *port)
+{
+ const char *name;
+
+ name = of_get_property(port, "label", NULL);
+ if (!name)
+ return false;
+
+ if (!strcmp(name, "cpu"))
+ return true;
+
+ return false;
+}
+
+static bool dsa_ds_find_port(struct dsa_switch *ds,
+ struct device_node *port)
+{
+ u32 index;
+
+ for (index = 0; index < DSA_MAX_PORTS; index++)
+ if (ds->ports[index].dn == port)
+ return true;
+ return false;
+}
+
+static struct dsa_switch *dsa_dst_find_port(struct dsa_switch_tree *dst,
+ struct device_node *port)
+{
+ struct dsa_switch *ds;
+ u32 index;
+
+ for (index = 0; index < DSA_MAX_SWITCHES; index++) {
+ ds = dst->ds[index];
+ if (!ds)
+ continue;
+
+ if (dsa_ds_find_port(ds, port))
+ return ds;
+ }
+
+ return NULL;
+}
+
+static int dsa_port_complete(struct dsa_switch_tree *dst,
+ struct dsa_switch *src_ds,
+ struct device_node *port,
+ u32 src_port)
+{
+ struct device_node *link;
+ int index;
+ struct dsa_switch *dst_ds;
+
+ for (index = 0;; index++) {
+ link = of_parse_phandle(port, "link", index);
+ if (!link)
+ break;
+
+ dst_ds = dsa_dst_find_port(dst, link);
+ of_node_put(link);
+
+ if (!dst_ds)
+ return 1;
+
+ src_ds->rtable[dst_ds->index] = src_port;
+ }
+
+ return 0;
+}
+
+/* A switch is complete if all the DSA ports phandles point to ports
+ * known in the tree. A return value of 1 means the tree is not
+ * complete. This is not an error condition. A value of 0 is
+ * success.
+ */
+static int dsa_ds_complete(struct dsa_switch_tree *dst, struct dsa_switch *ds)
+{
+ struct device_node *port;
+ u32 index;
+ int err;
+
+ for (index = 0; index < DSA_MAX_PORTS; index++) {
+ port = ds->ports[index].dn;
+ if (!port)
+ continue;
+
+ if (!dsa_port_is_dsa(port))
+ continue;
+
+ err = dsa_port_complete(dst, ds, port, index);
+ if (err != 0)
+ return err;
+
+ ds->dsa_port_mask |= BIT(index);
+ }
+
+ return 0;
+}
+
+/* A tree is complete if all the DSA ports phandles point to ports
+ * known in the tree. A return value of 1 means the tree is not
+ * complete. This is not an error condition. A value of 0 is
+ * success.
+ */
+static int dsa_dst_complete(struct dsa_switch_tree *dst)
+{
+ struct dsa_switch *ds;
+ u32 index;
+ int err;
+
+ for (index = 0; index < DSA_MAX_SWITCHES; index++) {
+ ds = dst->ds[index];
+ if (!ds)
+ continue;
+
+ err = dsa_ds_complete(dst, ds);
+ if (err != 0)
+ return err;
+ }
+
+ return 0;
+}
+
+static int dsa_dsa_port_apply(struct device_node *port, u32 index,
+ struct dsa_switch *ds)
+{
+ int err;
+
+ err = dsa_cpu_dsa_setup(ds, ds->dev, port, index);
+ if (err) {
+ dev_warn(ds->dev, "Failed to setup dsa port %d: %d\n",
+ index, err);
+ return err;
+ }
+
+ return 0;
+}
+
+static void dsa_dsa_port_unapply(struct device_node *port, u32 index,
+ struct dsa_switch *ds)
+{
+ dsa_cpu_dsa_destroy(port);
+}
+
+static int dsa_cpu_port_apply(struct device_node *port, u32 index,
+ struct dsa_switch *ds)
+{
+ int err;
+
+ err = dsa_cpu_dsa_setup(ds, ds->dev, port, index);
+ if (err) {
+ dev_warn(ds->dev, "Failed to setup cpu port %d: %d\n",
+ index, err);
+ return err;
+ }
+
+ ds->cpu_port_mask |= BIT(index);
+
+ return 0;
+}
+
+static void dsa_cpu_port_unapply(struct device_node *port, u32 index,
+ struct dsa_switch *ds)
+{
+ dsa_cpu_dsa_destroy(port);
+ ds->cpu_port_mask &= ~BIT(index);
+
+}
+
+static int dsa_user_port_apply(struct device_node *port, u32 index,
+ struct dsa_switch *ds)
+{
+ const char *name;
+ int err;
+
+ name = of_get_property(port, "label", NULL);
+
+ err = dsa_slave_create(ds, ds->dev, index, name);
+ if (err) {
+ dev_warn(ds->dev, "Failed to create slave %d: %d\n",
+ index, err);
+ return err;
+ }
+
+ return 0;
+}
+
+static void dsa_user_port_unapply(struct device_node *port, u32 index,
+ struct dsa_switch *ds)
+{
+ if (ds->ports[index].netdev) {
+ dsa_slave_destroy(ds->ports[index].netdev);
+ ds->ports[index].netdev = NULL;
+ ds->enabled_port_mask &= ~(1 << index);
+ }
+}
+
+static int dsa_ds_apply(struct dsa_switch_tree *dst, struct dsa_switch *ds)
+{
+ struct device_node *port;
+ u32 index;
+ int err;
+
+ /* Initialize ds->phys_mii_mask before registering the slave MDIO bus
+ * driver and before drv->setup() has run, since the switch drivers and
+ * the slave MDIO bus driver rely on these values for probing PHY
+ * devices or not
+ */
+ ds->phys_mii_mask = ds->enabled_port_mask;
+
+ err = ds->drv->setup(ds);
+ if (err < 0)
+ return err;
+
+ err = ds->drv->set_addr(ds, dst->master_netdev->dev_addr);
+ if (err < 0)
+ return err;
+
+ err = ds->drv->set_addr(ds, dst->master_netdev->dev_addr);
+ if (err < 0)
+ return err;
+
+ if (!ds->slave_mii_bus && ds->drv->phy_read) {
+ ds->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
+ if (!ds->slave_mii_bus)
+ return -ENOMEM;
+
+ dsa_slave_mii_bus_init(ds);
+
+ err = mdiobus_register(ds->slave_mii_bus);
+ if (err < 0)
+ return err;
+ }
+
+ for (index = 0; index < DSA_MAX_PORTS; index++) {
+ port = ds->ports[index].dn;
+ if (!port)
+ continue;
+
+ if (dsa_port_is_dsa(port)) {
+ err = dsa_dsa_port_apply(port, index, ds);
+ if (err)
+ return err;
+ continue;
+ }
+
+ if (dsa_port_is_cpu(port)) {
+ err = dsa_cpu_port_apply(port, index, ds);
+ if (err)
+ return err;
+ continue;
+ }
+
+ err = dsa_user_port_apply(port, index, ds);
+ if (err)
+ continue;
+ }
+
+ return 0;
+}
+
+static void dsa_ds_unapply(struct dsa_switch_tree *dst, struct dsa_switch *ds)
+{
+ struct device_node *port;
+ u32 index;
+
+ for (index = 0; index < DSA_MAX_PORTS; index++) {
+ port = ds->ports[index].dn;
+ if (!port)
+ continue;
+
+ if (dsa_port_is_dsa(port)) {
+ dsa_dsa_port_unapply(port, index, ds);
+ continue;
+ }
+
+ if (dsa_port_is_cpu(port)) {
+ dsa_cpu_port_unapply(port, index, ds);
+ continue;
+ }
+
+ dsa_user_port_unapply(port, index, ds);
+ }
+
+ if (ds->slave_mii_bus && ds->drv->phy_read)
+ mdiobus_unregister(ds->slave_mii_bus);
+}
+
+static int dsa_dst_apply(struct dsa_switch_tree *dst)
+{
+ struct dsa_switch *ds;
+ u32 index;
+ int err;
+
+ for (index = 0; index < DSA_MAX_SWITCHES; index++) {
+ ds = dst->ds[index];
+ if (!ds)
+ continue;
+
+ err = dsa_ds_apply(dst, ds);
+ if (err)
+ return err;
+ }
+
+ err = dsa_cpu_port_ethtool_setup(dst->ds[0]);
+ if (err)
+ return err;
+
+ /* If we use a tagging format that doesn't have an ethertype
+ * field, make sure that all packets from this point on get
+ * sent to the tag format's receive function.
+ */
+ wmb();
+ dst->master_netdev->dsa_ptr = (void *)dst;
+ dst->applied = true;
+
+ return 0;
+}
+
+static void dsa_dst_unapply(struct dsa_switch_tree *dst)
+{
+ struct dsa_switch *ds;
+ u32 index;
+
+ if (!dst->applied)
+ return;
+
+ dst->master_netdev->dsa_ptr = NULL;
+
+ /* If we used a tagging format that doesn't have an ethertype
+ * field, make sure that all packets from this point get sent
+ * without the tag and go through the regular receive path.
+ */
+ wmb();
+
+ for (index = 0; index < DSA_MAX_SWITCHES; index++) {
+ ds = dst->ds[index];
+ if (!ds)
+ continue;
+
+ dsa_ds_unapply(dst, ds);
+ }
+
+ dsa_cpu_port_ethtool_restore(dst->ds[0]);
+
+ pr_info("DSA: tree %d unapplied\n", dst->tree);
+ dst->applied = false;
+}
+
+static int dsa_cpu_parse(struct device_node *port, u32 index,
+ struct dsa_switch_tree *dst,
+ struct dsa_switch *ds)
+{
+ struct net_device *ethernet_dev;
+ struct device_node *ethernet;
+
+ ethernet = of_parse_phandle(port, "ethernet", 0);
+ if (!ethernet)
+ return -EINVAL;
+
+ ethernet_dev = of_find_net_device_by_node(ethernet);
+ if (!ethernet_dev)
+ return -EPROBE_DEFER;
+
+ if (!ds->master_netdev)
+ ds->master_netdev = ethernet_dev;
+
+ if (!dst->master_netdev)
+ dst->master_netdev = ethernet_dev;
+
+ if (dst->cpu_switch == -1) {
+ dst->cpu_switch = ds->index;
+ dst->cpu_port = index;
+ }
+
+ dst->tag_ops = dsa_resolve_tag_protocol(ds->drv->tag_protocol);
+ if (IS_ERR(dst->tag_ops)) {
+ dev_warn(ds->dev, "No tagger for this switch\n");
+ return PTR_ERR(dst->tag_ops);
+ }
+
+ dst->rcv = dst->tag_ops->rcv;
+
+ return 0;
+}
+
+static int dsa_ds_parse(struct dsa_switch_tree *dst, struct dsa_switch *ds)
+{
+ struct device_node *port;
+ u32 index;
+ int err;
+
+ for (index = 0; index < DSA_MAX_PORTS; index++) {
+ port = ds->ports[index].dn;
+ if (!port)
+ continue;
+
+ if (dsa_port_is_cpu(port)) {
+ err = dsa_cpu_parse(port, index, dst, ds);
+ if (err)
+ return err;
+ }
+ }
+
+ pr_info("DSA: switch %d %d parsed\n", dst->tree, ds->index);
+
+ return 0;
+}
+
+static int dsa_dst_parse(struct dsa_switch_tree *dst)
+{
+ struct dsa_switch *ds;
+ u32 index;
+ int err;
+
+ for (index = 0; index < DSA_MAX_SWITCHES; index++) {
+ ds = dst->ds[index];
+ if (!ds)
+ continue;
+
+ err = dsa_ds_parse(dst, ds);
+ if (err)
+ return err;
+ }
+
+ if (!dst->master_netdev) {
+ pr_warn("Tree has no master device\n");
+ return -EINVAL;
+ }
+
+ pr_info("DSA: tree %d parsed\n", dst->tree);
+
+ return 0;
+}
+
+static int dsa_parse_ports_dn(struct device_node *ports, struct dsa_switch *ds)
+{
+ struct device_node *port;
+ int err;
+ u32 reg;
+
+ for_each_available_child_of_node(ports, port) {
+ err = of_property_read_u32(port, "reg", &reg);
+ if (err)
+ return err;
+
+ if (reg >= DSA_MAX_PORTS)
+ return -EINVAL;
+
+ ds->ports[reg].dn = port;
+
+ /* Initialize enabled_port_mask now for drv->setup()
+ * to have access to a correct value, just like what
+ * net/dsa/dsa.c::dsa_switch_setup_one does.
+ */
+ if (!dsa_port_is_cpu(port))
+ ds->enabled_port_mask |= 1 << reg;
+ }
+
+ return 0;
+}
+
+static int dsa_parse_member(struct device_node *np, u32 *tree, u32 *index)
+{
+ int err;
+
+ *tree = *index = 0;
+
+ err = of_property_read_u32_index(np, "dsa,member", 0, tree);
+ if (err) {
+ /* Does not exist, but it is optional */
+ if (err == -EINVAL)
+ return 0;
+ return err;
+ }
+
+ err = of_property_read_u32_index(np, "dsa,member", 1, index);
+ if (err)
+ return err;
+
+ if (*index >= DSA_MAX_SWITCHES)
+ return -EINVAL;
+
+ return 0;
+}
+
+static struct device_node *dsa_get_ports(struct dsa_switch *ds,
+ struct device_node *np)
+{
+ struct device_node *ports;
+
+ ports = of_get_child_by_name(np, "ports");
+ if (!ports) {
+ dev_err(ds->dev, "no ports child node found\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ return ports;
+}
+
+static int _dsa_register_switch(struct dsa_switch *ds, struct device_node *np)
+{
+ struct device_node *ports = dsa_get_ports(ds, np);
+ struct dsa_switch_tree *dst;
+ u32 tree, index;
+ int err;
+
+ err = dsa_parse_member(np, &tree, &index);
+ if (err)
+ return err;
+
+ if (IS_ERR(ports))
+ return PTR_ERR(ports);
+
+ err = dsa_parse_ports_dn(ports, ds);
+ if (err)
+ return err;
+
+ dst = dsa_get_dst(tree);
+ if (!dst) {
+ dst = dsa_add_dst(tree);
+ if (!dst)
+ return -ENOMEM;
+ }
+
+ if (dst->ds[index]) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ ds->dst = dst;
+ ds->index = index;
+ dsa_dst_add_ds(dst, ds, index);
+
+ err = dsa_dst_complete(dst);
+ if (err < 0)
+ goto out_del_dst;
+
+ if (err == 1) {
+ /* Not all switches registered yet */
+ err = 0;
+ goto out;
+ }
+
+ if (dst->applied) {
+ pr_info("DSA: Disjoint trees?\n");
+ return -EINVAL;
+ }
+
+ err = dsa_dst_parse(dst);
+ if (err)
+ goto out_del_dst;
+
+ err = dsa_dst_apply(dst);
+ if (err) {
+ dsa_dst_unapply(dst);
+ goto out_del_dst;
+ }
+
+ dsa_put_dst(dst);
+ return 0;
+
+out_del_dst:
+ dsa_dst_del_ds(dst, ds, ds->index);
+out:
+ dsa_put_dst(dst);
+
+ return err;
+}
+
+int dsa_register_switch(struct dsa_switch *ds, struct device_node *np)
+{
+ int err;
+
+ mutex_lock(&dsa2_mutex);
+ err = _dsa_register_switch(ds, np);
+ mutex_unlock(&dsa2_mutex);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(dsa_register_switch);
+
+void _dsa_unregister_switch(struct dsa_switch *ds)
+{
+ struct dsa_switch_tree *dst = ds->dst;
+
+ dsa_dst_unapply(dst);
+
+ dsa_dst_del_ds(dst, ds, ds->index);
+}
+
+void dsa_unregister_switch(struct dsa_switch *ds)
+{
+ mutex_lock(&dsa2_mutex);
+ _dsa_unregister_switch(ds);
+ mutex_unlock(&dsa2_mutex);
+}
+EXPORT_SYMBOL_GPL(dsa_unregister_switch);
diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h
index dfa3377..00077a9 100644
--- a/net/dsa/dsa_priv.h
+++ b/net/dsa/dsa_priv.h
@@ -50,12 +50,19 @@ struct dsa_slave_priv {
/* dsa.c */
extern char dsa_driver_version[];
+int dsa_cpu_dsa_setup(struct dsa_switch *ds, struct device *dev,
+ struct device_node *port_dn, int port);
+void dsa_cpu_dsa_destroy(struct device_node *port_dn);
+const struct dsa_device_ops *dsa_resolve_tag_protocol(int tag_protocol);
+int dsa_cpu_port_ethtool_setup(struct dsa_switch *ds);
+void dsa_cpu_port_ethtool_restore(struct dsa_switch *ds);
/* slave.c */
extern const struct dsa_device_ops notag_netdev_ops;
void dsa_slave_mii_bus_init(struct dsa_switch *ds);
+void dsa_cpu_port_ethtool_init(struct ethtool_ops *ops);
int dsa_slave_create(struct dsa_switch *ds, struct device *parent,
- int port, char *name);
+ int port, const char *name);
void dsa_slave_destroy(struct net_device *slave_dev);
int dsa_slave_suspend(struct net_device *slave_dev);
int dsa_slave_resume(struct net_device *slave_dev);
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 152436c..7236eb2 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -49,8 +49,8 @@ void dsa_slave_mii_bus_init(struct dsa_switch *ds)
ds->slave_mii_bus->name = "dsa slave smi";
ds->slave_mii_bus->read = dsa_slave_phy_read;
ds->slave_mii_bus->write = dsa_slave_phy_write;
- snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d:%.2x",
- ds->index, ds->cd->sw_addr);
+ snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d.%d",
+ ds->dst->tree, ds->index);
ds->slave_mii_bus->parent = ds->dev;
ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask;
}
@@ -522,14 +522,6 @@ static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
-static struct sk_buff *dsa_slave_notag_xmit(struct sk_buff *skb,
- struct net_device *dev)
-{
- /* Just return the original SKB */
- return skb;
-}
-
-
/* ethtool operations *******************************************************/
static int
dsa_slave_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
@@ -615,7 +607,7 @@ static int dsa_slave_get_eeprom_len(struct net_device *dev)
struct dsa_slave_priv *p = netdev_priv(dev);
struct dsa_switch *ds = p->parent;
- if (ds->cd->eeprom_len)
+ if (ds->cd && ds->cd->eeprom_len)
return ds->cd->eeprom_len;
if (ds->drv->get_eeprom_len)
@@ -873,6 +865,13 @@ static void dsa_slave_poll_controller(struct net_device *dev)
}
#endif
+void dsa_cpu_port_ethtool_init(struct ethtool_ops *ops)
+{
+ ops->get_sset_count = dsa_cpu_port_get_sset_count;
+ ops->get_ethtool_stats = dsa_cpu_port_get_ethtool_stats;
+ ops->get_strings = dsa_cpu_port_get_strings;
+}
+
static const struct ethtool_ops dsa_slave_ethtool_ops = {
.get_settings = dsa_slave_get_settings,
.set_settings = dsa_slave_set_settings,
@@ -893,8 +892,6 @@ static const struct ethtool_ops dsa_slave_ethtool_ops = {
.get_eee = dsa_slave_get_eee,
};
-static struct ethtool_ops dsa_cpu_port_ethtool_ops;
-
static const struct net_device_ops dsa_slave_netdev_ops = {
.ndo_open = dsa_slave_open,
.ndo_stop = dsa_slave_close,
@@ -999,13 +996,12 @@ static int dsa_slave_phy_setup(struct dsa_slave_priv *p,
struct net_device *slave_dev)
{
struct dsa_switch *ds = p->parent;
- struct dsa_chip_data *cd = ds->cd;
struct device_node *phy_dn, *port_dn;
bool phy_is_fixed = false;
u32 phy_flags = 0;
int mode, ret;
- port_dn = cd->port_dn[p->port];
+ port_dn = ds->ports[p->port].dn;
mode = of_get_phy_mode(port_dn);
if (mode < 0)
mode = PHY_INTERFACE_MODE_NA;
@@ -1109,14 +1105,18 @@ int dsa_slave_resume(struct net_device *slave_dev)
}
int dsa_slave_create(struct dsa_switch *ds, struct device *parent,
- int port, char *name)
+ int port, const char *name)
{
- struct net_device *master = ds->dst->master_netdev;
struct dsa_switch_tree *dst = ds->dst;
+ struct net_device *master;
struct net_device *slave_dev;
struct dsa_slave_priv *p;
int ret;
+ master = ds->dst->master_netdev;
+ if (ds->master_netdev)
+ master = ds->master_netdev;
+
slave_dev = alloc_netdev(sizeof(struct dsa_slave_priv), name,
NET_NAME_UNKNOWN, ether_setup);
if (slave_dev == NULL)
@@ -1124,19 +1124,6 @@ int dsa_slave_create(struct dsa_switch *ds, struct device *parent,
slave_dev->features = master->vlan_features;
slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
- if (master->ethtool_ops != &dsa_cpu_port_ethtool_ops) {
- memcpy(&dst->master_ethtool_ops, master->ethtool_ops,
- sizeof(struct ethtool_ops));
- memcpy(&dsa_cpu_port_ethtool_ops, &dst->master_ethtool_ops,
- sizeof(struct ethtool_ops));
- dsa_cpu_port_ethtool_ops.get_sset_count =
- dsa_cpu_port_get_sset_count;
- dsa_cpu_port_ethtool_ops.get_ethtool_stats =
- dsa_cpu_port_get_ethtool_stats;
- dsa_cpu_port_ethtool_ops.get_strings =
- dsa_cpu_port_get_strings;
- master->ethtool_ops = &dsa_cpu_port_ethtool_ops;
- }
eth_hw_addr_inherit(slave_dev, master);
slave_dev->priv_flags |= IFF_NO_QUEUE;
slave_dev->netdev_ops = &dsa_slave_netdev_ops;
@@ -1147,49 +1134,24 @@ int dsa_slave_create(struct dsa_switch *ds, struct device *parent,
NULL);
SET_NETDEV_DEV(slave_dev, parent);
- slave_dev->dev.of_node = ds->cd->port_dn[port];
+ slave_dev->dev.of_node = ds->ports[port].dn;
slave_dev->vlan_features = master->vlan_features;
p = netdev_priv(slave_dev);
p->parent = ds;
p->port = port;
-
- switch (ds->dst->tag_protocol) {
-#ifdef CONFIG_NET_DSA_TAG_DSA
- case DSA_TAG_PROTO_DSA:
- p->xmit = dsa_netdev_ops.xmit;
- break;
-#endif
-#ifdef CONFIG_NET_DSA_TAG_EDSA
- case DSA_TAG_PROTO_EDSA:
- p->xmit = edsa_netdev_ops.xmit;
- break;
-#endif
-#ifdef CONFIG_NET_DSA_TAG_TRAILER
- case DSA_TAG_PROTO_TRAILER:
- p->xmit = trailer_netdev_ops.xmit;
- break;
-#endif
-#ifdef CONFIG_NET_DSA_TAG_BRCM
- case DSA_TAG_PROTO_BRCM:
- p->xmit = brcm_netdev_ops.xmit;
- break;
-#endif
- default:
- p->xmit = dsa_slave_notag_xmit;
- break;
- }
+ p->xmit = dst->tag_ops->xmit;
p->old_pause = -1;
p->old_link = -1;
p->old_duplex = -1;
- ds->ports[port] = slave_dev;
+ ds->ports[port].netdev = slave_dev;
ret = register_netdev(slave_dev);
if (ret) {
netdev_err(master, "error %d registering interface %s\n",
ret, slave_dev->name);
- ds->ports[port] = NULL;
+ ds->ports[port].netdev = NULL;
free_netdev(slave_dev);
return ret;
}
diff --git a/net/dsa/tag_brcm.c b/net/dsa/tag_brcm.c
index e2aadb7..21bffde 100644
--- a/net/dsa/tag_brcm.c
+++ b/net/dsa/tag_brcm.c
@@ -127,7 +127,7 @@ static int brcm_tag_rcv(struct sk_buff *skb, struct net_device *dev,
source_port = brcm_tag[3] & BRCM_EG_PID_MASK;
/* Validate port against switch setup, either the port is totally */
- if (source_port >= DSA_MAX_PORTS || ds->ports[source_port] == NULL)
+ if (source_port >= DSA_MAX_PORTS || !ds->ports[source_port].netdev)
goto out_drop;
/* Remove Broadcom tag and update checksum */
@@ -140,7 +140,7 @@ static int brcm_tag_rcv(struct sk_buff *skb, struct net_device *dev,
skb_push(skb, ETH_HLEN);
skb->pkt_type = PACKET_HOST;
- skb->dev = ds->ports[source_port];
+ skb->dev = ds->ports[source_port].netdev;
skb->protocol = eth_type_trans(skb, skb->dev);
skb->dev->stats.rx_packets++;
diff --git a/net/dsa/tag_dsa.c b/net/dsa/tag_dsa.c
index aa780e4..bce79ff 100644
--- a/net/dsa/tag_dsa.c
+++ b/net/dsa/tag_dsa.c
@@ -107,10 +107,14 @@ static int dsa_rcv(struct sk_buff *skb, struct net_device *dev,
* Check that the source device exists and that the source
* port is a registered DSA port.
*/
- if (source_device >= dst->pd->nr_chips)
+ if (source_device >= DSA_MAX_SWITCHES)
goto out_drop;
+
ds = dst->ds[source_device];
- if (source_port >= DSA_MAX_PORTS || ds->ports[source_port] == NULL)
+ if (!ds)
+ goto out_drop;
+
+ if (source_port >= DSA_MAX_PORTS || !ds->ports[source_port].netdev)
goto out_drop;
/*
@@ -159,7 +163,7 @@ static int dsa_rcv(struct sk_buff *skb, struct net_device *dev,
2 * ETH_ALEN);
}
- skb->dev = ds->ports[source_port];
+ skb->dev = ds->ports[source_port].netdev;
skb_push(skb, ETH_HLEN);
skb->pkt_type = PACKET_HOST;
skb->protocol = eth_type_trans(skb, skb->dev);
diff --git a/net/dsa/tag_edsa.c b/net/dsa/tag_edsa.c
index 2288c80..6c1720e 100644
--- a/net/dsa/tag_edsa.c
+++ b/net/dsa/tag_edsa.c
@@ -120,10 +120,14 @@ static int edsa_rcv(struct sk_buff *skb, struct net_device *dev,
* Check that the source device exists and that the source
* port is a registered DSA port.
*/
- if (source_device >= dst->pd->nr_chips)
+ if (source_device >= DSA_MAX_SWITCHES)
goto out_drop;
+
ds = dst->ds[source_device];
- if (source_port >= DSA_MAX_PORTS || ds->ports[source_port] == NULL)
+ if (!ds)
+ goto out_drop;
+
+ if (source_port >= DSA_MAX_PORTS || !ds->ports[source_port].netdev)
goto out_drop;
/*
@@ -178,7 +182,7 @@ static int edsa_rcv(struct sk_buff *skb, struct net_device *dev,
2 * ETH_ALEN);
}
- skb->dev = ds->ports[source_port];
+ skb->dev = ds->ports[source_port].netdev;
skb_push(skb, ETH_HLEN);
skb->pkt_type = PACKET_HOST;
skb->protocol = eth_type_trans(skb, skb->dev);
diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c
index b6ca089..5e3903e 100644
--- a/net/dsa/tag_trailer.c
+++ b/net/dsa/tag_trailer.c
@@ -82,12 +82,12 @@ static int trailer_rcv(struct sk_buff *skb, struct net_device *dev,
goto out_drop;
source_port = trailer[1] & 7;
- if (source_port >= DSA_MAX_PORTS || ds->ports[source_port] == NULL)
+ if (source_port >= DSA_MAX_PORTS || !ds->ports[source_port].netdev)
goto out_drop;
pskb_trim_rcsum(skb, skb->len - 4);
- skb->dev = ds->ports[source_port];
+ skb->dev = ds->ports[source_port].netdev;
skb_push(skb, ETH_HLEN);
skb->pkt_type = PACKET_HOST;
skb->protocol = eth_type_trans(skb, skb->dev);
diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c
index dd085db..4e2b308 100644
--- a/net/ieee802154/6lowpan/core.c
+++ b/net/ieee802154/6lowpan/core.c
@@ -58,21 +58,10 @@ static struct header_ops lowpan_header_ops = {
.create = lowpan_header_create,
};
-static struct lock_class_key lowpan_tx_busylock;
-static struct lock_class_key lowpan_netdev_xmit_lock_key;
-
-static void lowpan_set_lockdep_class_one(struct net_device *ldev,
- struct netdev_queue *txq,
- void *_unused)
-{
- lockdep_set_class(&txq->_xmit_lock,
- &lowpan_netdev_xmit_lock_key);
-}
-
static int lowpan_dev_init(struct net_device *ldev)
{
- netdev_for_each_tx_queue(ldev, lowpan_set_lockdep_class_one, NULL);
- ldev->qdisc_tx_busylock = &lowpan_tx_busylock;
+ netdev_lockdep_set_classes(ldev);
+
return 0;
}
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index f2bda9e..6e9ea69 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -76,6 +76,7 @@ static int fib4_rule_action(struct fib_rule *rule, struct flowi *flp,
{
int err = -EAGAIN;
struct fib_table *tbl;
+ u32 tb_id;
switch (rule->action) {
case FR_ACT_TO_TBL:
@@ -94,7 +95,8 @@ static int fib4_rule_action(struct fib_rule *rule, struct flowi *flp,
rcu_read_lock();
- tbl = fib_get_table(rule->fr_net, rule->table);
+ tb_id = fib_rule_get_table(rule, arg);
+ tbl = fib_get_table(rule->fr_net, tb_id);
if (tbl)
err = fib_table_lookup(tbl, &flp->u.ip4,
(struct fib_result *)arg->result,
@@ -180,7 +182,7 @@ static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
if (err)
goto errout;
- if (rule->table == RT_TABLE_UNSPEC) {
+ if (rule->table == RT_TABLE_UNSPEC && !rule->l3mdev) {
if (rule->action == FR_ACT_TO_TBL) {
struct fib_table *table;
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index 5f9207c..321d57f 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -129,6 +129,36 @@ static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
guehdr = (struct guehdr *)&udp_hdr(skb)[1];
+ switch (guehdr->version) {
+ case 0: /* Full GUE header present */
+ break;
+
+ case 1: {
+ /* Direct encasulation of IPv4 or IPv6 */
+
+ int prot;
+
+ switch (((struct iphdr *)guehdr)->version) {
+ case 4:
+ prot = IPPROTO_IPIP;
+ break;
+ case 6:
+ prot = IPPROTO_IPV6;
+ break;
+ default:
+ goto drop;
+ }
+
+ if (fou_recv_pull(skb, fou, sizeof(struct udphdr)))
+ goto drop;
+
+ return -prot;
+ }
+
+ default: /* Undefined version */
+ goto drop;
+ }
+
optlen = guehdr->hlen << 2;
len += optlen;
@@ -289,6 +319,7 @@ static struct sk_buff **gue_gro_receive(struct sock *sk,
int flush = 1;
struct fou *fou = fou_from_sock(sk);
struct gro_remcsum grc;
+ u8 proto;
skb_gro_remcsum_init(&grc);
@@ -302,6 +333,25 @@ static struct sk_buff **gue_gro_receive(struct sock *sk,
goto out;
}
+ switch (guehdr->version) {
+ case 0:
+ break;
+ case 1:
+ switch (((struct iphdr *)guehdr)->version) {
+ case 4:
+ proto = IPPROTO_IPIP;
+ break;
+ case 6:
+ proto = IPPROTO_IPV6;
+ break;
+ default:
+ goto out;
+ }
+ goto next_proto;
+ default:
+ goto out;
+ }
+
optlen = guehdr->hlen << 2;
len += optlen;
@@ -370,6 +420,10 @@ static struct sk_buff **gue_gro_receive(struct sock *sk,
}
}
+ proto = guehdr->proto_ctype;
+
+next_proto:
+
/* We can clear the encap_mark for GUE as we are essentially doing
* one of two possible things. We are either adding an L4 tunnel
* header to the outer L3 tunnel header, or we are are simply
@@ -383,7 +437,7 @@ static struct sk_buff **gue_gro_receive(struct sock *sk,
rcu_read_lock();
offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
- ops = rcu_dereference(offloads[guehdr->proto_ctype]);
+ ops = rcu_dereference(offloads[proto]);
if (WARN_ON_ONCE(!ops || !ops->callbacks.gro_receive))
goto out_unlock;
@@ -404,13 +458,30 @@ static int gue_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
const struct net_offload **offloads;
struct guehdr *guehdr = (struct guehdr *)(skb->data + nhoff);
const struct net_offload *ops;
- unsigned int guehlen;
+ unsigned int guehlen = 0;
u8 proto;
int err = -ENOENT;
- proto = guehdr->proto_ctype;
-
- guehlen = sizeof(*guehdr) + (guehdr->hlen << 2);
+ switch (guehdr->version) {
+ case 0:
+ proto = guehdr->proto_ctype;
+ guehlen = sizeof(*guehdr) + (guehdr->hlen << 2);
+ break;
+ case 1:
+ switch (((struct iphdr *)guehdr)->version) {
+ case 4:
+ proto = IPPROTO_IPIP;
+ break;
+ case 6:
+ proto = IPPROTO_IPV6;
+ break;
+ default:
+ return err;
+ }
+ break;
+ default:
+ return err;
+ }
rcu_read_lock();
offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index 3a88b0c..b5e9317 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -355,7 +355,7 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
{
struct inet_frag_queue *q;
- if (frag_mem_limit(nf) > nf->high_thresh) {
+ if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh) {
inet_frag_schedule_worker(f);
return NULL;
}
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index cbfb180..9f0a7b9 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -54,7 +54,7 @@ static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
if (skb->ignore_df)
return false;
- if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
+ if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu))
return false;
return true;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 124bf0a..cbac493 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -225,7 +225,7 @@ static int ip_finish_output_gso(struct net *net, struct sock *sk,
/* common case: locally created skb or seglen is <= mtu */
if (((IPCB(skb)->flags & IPSKB_FORWARDED) == 0) ||
- skb_gso_network_seglen(skb) <= mtu)
+ skb_gso_validate_mtu(skb, mtu))
return ip_finish_output2(net, sk, skb);
/* Slowpath - GSO segment length is exceeding the dst MTU.
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index d6c8f4cd0..89dd8d8 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5159,6 +5159,7 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
const struct tcphdr *th, int syn_inerr)
{
struct tcp_sock *tp = tcp_sk(sk);
+ bool rst_seq_match = false;
/* RFC1323: H1. Apply PAWS check first. */
if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp &&
@@ -5195,13 +5196,32 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
/* Step 2: check RST bit */
if (th->rst) {
- /* RFC 5961 3.2 :
- * If sequence number exactly matches RCV.NXT, then
+ /* RFC 5961 3.2 (extend to match against SACK too if available):
+ * If seq num matches RCV.NXT or the right-most SACK block,
+ * then
* RESET the connection
* else
* Send a challenge ACK
*/
- if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt)
+ if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) {
+ rst_seq_match = true;
+ } else if (tcp_is_sack(tp) && tp->rx_opt.num_sacks > 0) {
+ struct tcp_sack_block *sp = &tp->selective_acks[0];
+ int max_sack = sp[0].end_seq;
+ int this_sack;
+
+ for (this_sack = 1; this_sack < tp->rx_opt.num_sacks;
+ ++this_sack) {
+ max_sack = after(sp[this_sack].end_seq,
+ max_sack) ?
+ sp[this_sack].end_seq : max_sack;
+ }
+
+ if (TCP_SKB_CB(skb)->seq == max_sack)
+ rst_seq_match = true;
+ }
+
+ if (rst_seq_match)
tcp_reset(sk);
else
tcp_send_challenge_ack(sk, skb);
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index bfa86f0..2076c21 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -92,6 +92,12 @@ MODULE_PARM_DESC(disable_ipv6, "Disable IPv6 on all interfaces");
module_param_named(autoconf, ipv6_defaults.autoconf, int, 0444);
MODULE_PARM_DESC(autoconf, "Enable IPv6 address autoconfiguration on all interfaces");
+bool ipv6_mod_enabled(void)
+{
+ return disable_ipv6_mod == 0;
+}
+EXPORT_SYMBOL_GPL(ipv6_mod_enabled);
+
static __inline__ struct ipv6_pinfo *inet6_sk_generic(struct sock *sk)
{
const int offset = sk->sk_prot->obj_size - sizeof(struct ipv6_pinfo);
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index ed33abf..5857c1f 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -67,6 +67,7 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
struct net *net = rule->fr_net;
pol_lookup_t lookup = arg->lookup_ptr;
int err = 0;
+ u32 tb_id;
switch (rule->action) {
case FR_ACT_TO_TBL:
@@ -86,7 +87,8 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
goto discard_pkt;
}
- table = fib6_get_table(net, rule->table);
+ tb_id = fib_rule_get_table(rule, arg);
+ table = fib6_get_table(net, tb_id);
if (!table) {
err = -EAGAIN;
goto out;
@@ -199,7 +201,7 @@ static int fib6_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
struct net *net = sock_net(skb->sk);
struct fib6_rule *rule6 = (struct fib6_rule *) rule;
- if (rule->action == FR_ACT_TO_TBL) {
+ if (rule->action == FR_ACT_TO_TBL && !rule->l3mdev) {
if (rule->table == RT6_TABLE_UNSPEC)
goto errout;
diff --git a/net/ipv6/ila/ila.h b/net/ipv6/ila/ila.h
index d08fd2d..e0170f6 100644
--- a/net/ipv6/ila/ila.h
+++ b/net/ipv6/ila/ila.h
@@ -109,7 +109,8 @@ static inline bool ila_csum_neutral_set(struct ila_identifier ident)
return !!(ident.csum_neutral);
}
-void ila_update_ipv6_locator(struct sk_buff *skb, struct ila_params *p);
+void ila_update_ipv6_locator(struct sk_buff *skb, struct ila_params *p,
+ bool set_csum_neutral);
void ila_init_saved_csum(struct ila_params *p);
diff --git a/net/ipv6/ila/ila_common.c b/net/ipv6/ila/ila_common.c
index 0e94042..b3d00be 100644
--- a/net/ipv6/ila/ila_common.c
+++ b/net/ipv6/ila/ila_common.c
@@ -103,7 +103,8 @@ static void ila_csum_adjust_transport(struct sk_buff *skb,
iaddr->loc = p->locator;
}
-void ila_update_ipv6_locator(struct sk_buff *skb, struct ila_params *p)
+void ila_update_ipv6_locator(struct sk_buff *skb, struct ila_params *p,
+ bool set_csum_neutral)
{
struct ipv6hdr *ip6h = ipv6_hdr(skb);
struct ila_addr *iaddr = ila_a2i(&ip6h->daddr);
@@ -114,7 +115,8 @@ void ila_update_ipv6_locator(struct sk_buff *skb, struct ila_params *p)
* is a locator being translated to a SIR address.
* Perform (receiver) checksum-neutral translation.
*/
- ila_csum_do_neutral(iaddr, p);
+ if (!set_csum_neutral)
+ ila_csum_do_neutral(iaddr, p);
} else {
switch (p->csum_mode) {
case ILA_CSUM_ADJUST_TRANSPORT:
diff --git a/net/ipv6/ila/ila_lwt.c b/net/ipv6/ila/ila_lwt.c
index 1dfb641..c8314c6 100644
--- a/net/ipv6/ila/ila_lwt.c
+++ b/net/ipv6/ila/ila_lwt.c
@@ -26,7 +26,7 @@ static int ila_output(struct net *net, struct sock *sk, struct sk_buff *skb)
if (skb->protocol != htons(ETH_P_IPV6))
goto drop;
- ila_update_ipv6_locator(skb, ila_params_lwtunnel(dst->lwtstate));
+ ila_update_ipv6_locator(skb, ila_params_lwtunnel(dst->lwtstate), true);
return dst->lwtstate->orig_output(net, sk, skb);
@@ -42,7 +42,7 @@ static int ila_input(struct sk_buff *skb)
if (skb->protocol != htons(ETH_P_IPV6))
goto drop;
- ila_update_ipv6_locator(skb, ila_params_lwtunnel(dst->lwtstate));
+ ila_update_ipv6_locator(skb, ila_params_lwtunnel(dst->lwtstate), false);
return dst->lwtstate->orig_input(skb);
diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c
index a90e572..e6eca5f 100644
--- a/net/ipv6/ila/ila_xlat.c
+++ b/net/ipv6/ila/ila_xlat.c
@@ -210,14 +210,14 @@ static void ila_free_cb(void *ptr, void *arg)
}
}
-static int ila_xlat_addr(struct sk_buff *skb);
+static int ila_xlat_addr(struct sk_buff *skb, bool set_csum_neutral);
static unsigned int
ila_nf_input(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state)
{
- ila_xlat_addr(skb);
+ ila_xlat_addr(skb, false);
return NF_ACCEPT;
}
@@ -597,7 +597,7 @@ static struct pernet_operations ila_net_ops = {
.size = sizeof(struct ila_net),
};
-static int ila_xlat_addr(struct sk_buff *skb)
+static int ila_xlat_addr(struct sk_buff *skb, bool set_csum_neutral)
{
struct ila_map *ila;
struct ipv6hdr *ip6h = ipv6_hdr(skb);
@@ -616,7 +616,7 @@ static int ila_xlat_addr(struct sk_buff *skb)
ila = ila_lookup_wildcards(iaddr, skb->dev->ifindex, ilan);
if (ila)
- ila_update_ipv6_locator(skb, &ila->xp.ip);
+ ila_update_ipv6_locator(skb, &ila->xp.ip, set_csum_neutral);
rcu_read_unlock();
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 94611e4..aacfb4b 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -323,6 +323,7 @@ int ip6_input(struct sk_buff *skb)
dev_net(skb->dev), NULL, skb, skb->dev, NULL,
ip6_input_finish);
}
+EXPORT_SYMBOL_GPL(ip6_input);
int ip6_mc_input(struct sk_buff *skb)
{
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 635b8d3..fd32175 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -368,7 +368,7 @@ static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
if (skb->ignore_df)
return false;
- if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
+ if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu))
return false;
return true;
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 0a5a255..d9f2bd6 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -825,9 +825,6 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
u8 protocol = IPPROTO_IPV6;
int t_hlen = tunnel->hlen + sizeof(struct iphdr);
- if (skb->protocol != htons(ETH_P_IPV6))
- goto tx_error;
-
if (tos == 1)
tos = ipv6_get_dsfield(iph6);
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
index e253c26..57fc5a4 100644
--- a/net/l2tp/l2tp_eth.c
+++ b/net/l2tp/l2tp_eth.c
@@ -67,7 +67,6 @@ static inline struct l2tp_eth_net *l2tp_eth_pernet(struct net *net)
return net_generic(net, l2tp_eth_net_id);
}
-static struct lock_class_key l2tp_eth_tx_busylock;
static int l2tp_eth_dev_init(struct net_device *dev)
{
struct l2tp_eth *priv = netdev_priv(dev);
@@ -75,7 +74,8 @@ static int l2tp_eth_dev_init(struct net_device *dev)
priv->dev = dev;
eth_hw_addr_random(dev);
eth_broadcast_addr(dev->broadcast);
- dev->qdisc_tx_busylock = &l2tp_eth_tx_busylock;
+ netdev_lockdep_set_classes(dev);
+
return 0;
}
diff --git a/net/l3mdev/l3mdev.c b/net/l3mdev/l3mdev.c
index 6651a78..7da9780 100644
--- a/net/l3mdev/l3mdev.c
+++ b/net/l3mdev/l3mdev.c
@@ -10,6 +10,7 @@
*/
#include <linux/netdevice.h>
+#include <net/fib_rules.h>
#include <net/l3mdev.h>
/**
@@ -160,3 +161,40 @@ int l3mdev_get_saddr(struct net *net, int ifindex, struct flowi4 *fl4)
return rc;
}
EXPORT_SYMBOL_GPL(l3mdev_get_saddr);
+
+/**
+ * l3mdev_fib_rule_match - Determine if flowi references an
+ * L3 master device
+ * @net: network namespace for device index lookup
+ * @fl: flow struct
+ */
+
+int l3mdev_fib_rule_match(struct net *net, struct flowi *fl,
+ struct fib_lookup_arg *arg)
+{
+ struct net_device *dev;
+ int rc = 0;
+
+ rcu_read_lock();
+
+ dev = dev_get_by_index_rcu(net, fl->flowi_oif);
+ if (dev && netif_is_l3_master(dev) &&
+ dev->l3mdev_ops->l3mdev_fib_table) {
+ arg->table = dev->l3mdev_ops->l3mdev_fib_table(dev);
+ rc = 1;
+ goto out;
+ }
+
+ dev = dev_get_by_index_rcu(net, fl->flowi_iif);
+ if (dev && netif_is_l3_master(dev) &&
+ dev->l3mdev_ops->l3mdev_fib_table) {
+ arg->table = dev->l3mdev_ops->l3mdev_fib_table(dev);
+ rc = 1;
+ goto out;
+ }
+
+out:
+ rcu_read_unlock();
+
+ return rc;
+}
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 0b80a71..7a4aa34 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -91,7 +91,7 @@ bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
if (skb->len <= mtu)
return false;
- if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
+ if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu))
return false;
return true;
diff --git a/net/netfilter/xt_RATEEST.c b/net/netfilter/xt_RATEEST.c
index 604df6f..515131f 100644
--- a/net/netfilter/xt_RATEEST.c
+++ b/net/netfilter/xt_RATEEST.c
@@ -137,7 +137,7 @@ static int xt_rateest_tg_checkentry(const struct xt_tgchk_param *par)
cfg.est.ewma_log = info->ewma_log;
ret = gen_new_estimator(&est->bstats, NULL, &est->rstats,
- &est->lock, &cfg.opt);
+ &est->lock, NULL, &cfg.opt);
if (ret < 0)
goto err2;
diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h
index e68ef9c..3cfd6cc 100644
--- a/net/netlink/af_netlink.h
+++ b/net/netlink/af_netlink.h
@@ -8,20 +8,6 @@
#define NLGRPSZ(x) (ALIGN(x, sizeof(unsigned long) * 8) / 8)
#define NLGRPLONGS(x) (NLGRPSZ(x)/sizeof(unsigned long))
-struct netlink_ring {
- void **pg_vec;
- unsigned int head;
- unsigned int frames_per_block;
- unsigned int frame_size;
- unsigned int frame_max;
-
- unsigned int pg_vec_order;
- unsigned int pg_vec_pages;
- unsigned int pg_vec_len;
-
- atomic_t pending;
-};
-
struct netlink_sock {
/* struct sock has to be the first member of netlink_sock */
struct sock sk;
diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c
index 2ee48e4..434e04c 100644
--- a/net/openvswitch/vport-internal_dev.c
+++ b/net/openvswitch/vport-internal_dev.c
@@ -195,7 +195,7 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
}
vport->dev = alloc_netdev(sizeof(struct internal_dev),
- parms->name, NET_NAME_UNKNOWN, do_setup);
+ parms->name, NET_NAME_USER, do_setup);
if (!vport->dev) {
err = -ENOMEM;
goto error_free_vport;
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index e45e94c..38512a2 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -9,6 +9,8 @@
* 2 of the License, or (at your option) any later version.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/net.h>
@@ -137,33 +139,33 @@ static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len)
lock_sock(&rx->sk);
- if (rx->sk.sk_state != RXRPC_UNCONNECTED) {
+ if (rx->sk.sk_state != RXRPC_UNBOUND) {
ret = -EINVAL;
goto error_unlock;
}
memcpy(&rx->srx, srx, sizeof(rx->srx));
- /* Find or create a local transport endpoint to use */
local = rxrpc_lookup_local(&rx->srx);
if (IS_ERR(local)) {
ret = PTR_ERR(local);
goto error_unlock;
}
- rx->local = local;
- if (srx->srx_service) {
+ if (rx->srx.srx_service) {
write_lock_bh(&local->services_lock);
list_for_each_entry(prx, &local->services, listen_link) {
- if (prx->srx.srx_service == srx->srx_service)
+ if (prx->srx.srx_service == rx->srx.srx_service)
goto service_in_use;
}
+ rx->local = local;
list_add_tail(&rx->listen_link, &local->services);
write_unlock_bh(&local->services_lock);
rx->sk.sk_state = RXRPC_SERVER_BOUND;
} else {
+ rx->local = local;
rx->sk.sk_state = RXRPC_CLIENT_BOUND;
}
@@ -172,8 +174,9 @@ static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len)
return 0;
service_in_use:
- ret = -EADDRINUSE;
write_unlock_bh(&local->services_lock);
+ rxrpc_put_local(local);
+ ret = -EADDRINUSE;
error_unlock:
release_sock(&rx->sk);
error:
@@ -195,11 +198,11 @@ static int rxrpc_listen(struct socket *sock, int backlog)
lock_sock(&rx->sk);
switch (rx->sk.sk_state) {
- case RXRPC_UNCONNECTED:
+ case RXRPC_UNBOUND:
ret = -EADDRNOTAVAIL;
break;
+ case RXRPC_CLIENT_UNBOUND:
case RXRPC_CLIENT_BOUND:
- case RXRPC_CLIENT_CONNECTED:
default:
ret = -EBUSY;
break;
@@ -219,20 +222,18 @@ static int rxrpc_listen(struct socket *sock, int backlog)
/*
* find a transport by address
*/
-static struct rxrpc_transport *rxrpc_name_to_transport(struct socket *sock,
- struct sockaddr *addr,
- int addr_len, int flags,
- gfp_t gfp)
+struct rxrpc_transport *rxrpc_name_to_transport(struct rxrpc_sock *rx,
+ struct sockaddr *addr,
+ int addr_len, int flags,
+ gfp_t gfp)
{
struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *) addr;
struct rxrpc_transport *trans;
- struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
struct rxrpc_peer *peer;
_enter("%p,%p,%d,%d", rx, addr, addr_len, flags);
ASSERT(rx->local != NULL);
- ASSERT(rx->sk.sk_state > RXRPC_UNCONNECTED);
if (rx->srx.transport_type != srx->transport_type)
return ERR_PTR(-ESOCKTNOSUPPORT);
@@ -254,7 +255,7 @@ static struct rxrpc_transport *rxrpc_name_to_transport(struct socket *sock,
/**
* rxrpc_kernel_begin_call - Allow a kernel service to begin a call
* @sock: The socket on which to make the call
- * @srx: The address of the peer to contact (defaults to socket setting)
+ * @srx: The address of the peer to contact
* @key: The security context to use (defaults to socket setting)
* @user_call_ID: The ID to use
*
@@ -280,25 +281,14 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
lock_sock(&rx->sk);
- if (srx) {
- trans = rxrpc_name_to_transport(sock, (struct sockaddr *) srx,
- sizeof(*srx), 0, gfp);
- if (IS_ERR(trans)) {
- call = ERR_CAST(trans);
- trans = NULL;
- goto out_notrans;
- }
- } else {
- trans = rx->trans;
- if (!trans) {
- call = ERR_PTR(-ENOTCONN);
- goto out_notrans;
- }
- atomic_inc(&trans->usage);
+ trans = rxrpc_name_to_transport(rx, (struct sockaddr *)srx,
+ sizeof(*srx), 0, gfp);
+ if (IS_ERR(trans)) {
+ call = ERR_CAST(trans);
+ trans = NULL;
+ goto out_notrans;
}
- if (!srx)
- srx = &rx->srx;
if (!key)
key = rx->key;
if (key && !key->payload.data[0])
@@ -310,8 +300,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
goto out;
}
- call = rxrpc_get_client_call(rx, trans, bundle, user_call_ID, true,
- gfp);
+ call = rxrpc_new_client_call(rx, trans, bundle, user_call_ID, gfp);
rxrpc_put_bundle(trans, bundle);
out:
rxrpc_put_transport(trans);
@@ -367,11 +356,8 @@ EXPORT_SYMBOL(rxrpc_kernel_intercept_rx_messages);
static int rxrpc_connect(struct socket *sock, struct sockaddr *addr,
int addr_len, int flags)
{
- struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *) addr;
- struct sock *sk = sock->sk;
- struct rxrpc_transport *trans;
- struct rxrpc_local *local;
- struct rxrpc_sock *rx = rxrpc_sk(sk);
+ struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *)addr;
+ struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
int ret;
_enter("%p,%p,%d,%d", rx, addr, addr_len, flags);
@@ -384,45 +370,28 @@ static int rxrpc_connect(struct socket *sock, struct sockaddr *addr,
lock_sock(&rx->sk);
+ ret = -EISCONN;
+ if (test_bit(RXRPC_SOCK_CONNECTED, &rx->flags))
+ goto error;
+
switch (rx->sk.sk_state) {
- case RXRPC_UNCONNECTED:
- /* find a local transport endpoint if we don't have one already */
- ASSERTCMP(rx->local, ==, NULL);
- rx->srx.srx_family = AF_RXRPC;
- rx->srx.srx_service = 0;
- rx->srx.transport_type = srx->transport_type;
- rx->srx.transport_len = sizeof(sa_family_t);
- rx->srx.transport.family = srx->transport.family;
- local = rxrpc_lookup_local(&rx->srx);
- if (IS_ERR(local)) {
- release_sock(&rx->sk);
- return PTR_ERR(local);
- }
- rx->local = local;
- rx->sk.sk_state = RXRPC_CLIENT_BOUND;
+ case RXRPC_UNBOUND:
+ rx->sk.sk_state = RXRPC_CLIENT_UNBOUND;
+ case RXRPC_CLIENT_UNBOUND:
case RXRPC_CLIENT_BOUND:
break;
- case RXRPC_CLIENT_CONNECTED:
- release_sock(&rx->sk);
- return -EISCONN;
default:
- release_sock(&rx->sk);
- return -EBUSY; /* server sockets can't connect as well */
- }
-
- trans = rxrpc_name_to_transport(sock, addr, addr_len, flags,
- GFP_KERNEL);
- if (IS_ERR(trans)) {
- release_sock(&rx->sk);
- _leave(" = %ld", PTR_ERR(trans));
- return PTR_ERR(trans);
+ ret = -EBUSY;
+ goto error;
}
- rx->trans = trans;
- rx->sk.sk_state = RXRPC_CLIENT_CONNECTED;
+ rx->connect_srx = *srx;
+ set_bit(RXRPC_SOCK_CONNECTED, &rx->flags);
+ ret = 0;
+error:
release_sock(&rx->sk);
- return 0;
+ return ret;
}
/*
@@ -436,7 +405,7 @@ static int rxrpc_connect(struct socket *sock, struct sockaddr *addr,
*/
static int rxrpc_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
{
- struct rxrpc_transport *trans;
+ struct rxrpc_local *local;
struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
int ret;
@@ -453,48 +422,38 @@ static int rxrpc_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
}
}
- trans = NULL;
lock_sock(&rx->sk);
- if (m->msg_name) {
- ret = -EISCONN;
- trans = rxrpc_name_to_transport(sock, m->msg_name,
- m->msg_namelen, 0, GFP_KERNEL);
- if (IS_ERR(trans)) {
- ret = PTR_ERR(trans);
- trans = NULL;
- goto out;
- }
- } else {
- trans = rx->trans;
- if (trans)
- atomic_inc(&trans->usage);
- }
-
switch (rx->sk.sk_state) {
- case RXRPC_SERVER_LISTENING:
- if (!m->msg_name) {
- ret = rxrpc_server_sendmsg(rx, m, len);
- break;
+ case RXRPC_UNBOUND:
+ local = rxrpc_lookup_local(&rx->srx);
+ if (IS_ERR(local)) {
+ ret = PTR_ERR(local);
+ goto error_unlock;
}
- case RXRPC_SERVER_BOUND:
+
+ rx->local = local;
+ rx->sk.sk_state = RXRPC_CLIENT_UNBOUND;
+ /* Fall through */
+
+ case RXRPC_CLIENT_UNBOUND:
case RXRPC_CLIENT_BOUND:
- if (!m->msg_name) {
- ret = -ENOTCONN;
- break;
+ if (!m->msg_name &&
+ test_bit(RXRPC_SOCK_CONNECTED, &rx->flags)) {
+ m->msg_name = &rx->connect_srx;
+ m->msg_namelen = sizeof(rx->connect_srx);
}
- case RXRPC_CLIENT_CONNECTED:
- ret = rxrpc_client_sendmsg(rx, trans, m, len);
+ case RXRPC_SERVER_BOUND:
+ case RXRPC_SERVER_LISTENING:
+ ret = rxrpc_do_sendmsg(rx, m, len);
break;
default:
- ret = -ENOTCONN;
+ ret = -EINVAL;
break;
}
-out:
+error_unlock:
release_sock(&rx->sk);
- if (trans)
- rxrpc_put_transport(trans);
_leave(" = %d", ret);
return ret;
}
@@ -521,7 +480,7 @@ static int rxrpc_setsockopt(struct socket *sock, int level, int optname,
if (optlen != 0)
goto error;
ret = -EISCONN;
- if (rx->sk.sk_state != RXRPC_UNCONNECTED)
+ if (rx->sk.sk_state != RXRPC_UNBOUND)
goto error;
set_bit(RXRPC_SOCK_EXCLUSIVE_CONN, &rx->flags);
goto success;
@@ -531,7 +490,7 @@ static int rxrpc_setsockopt(struct socket *sock, int level, int optname,
if (rx->key)
goto error;
ret = -EISCONN;
- if (rx->sk.sk_state != RXRPC_UNCONNECTED)
+ if (rx->sk.sk_state != RXRPC_UNBOUND)
goto error;
ret = rxrpc_request_key(rx, optval, optlen);
goto error;
@@ -541,7 +500,7 @@ static int rxrpc_setsockopt(struct socket *sock, int level, int optname,
if (rx->key)
goto error;
ret = -EISCONN;
- if (rx->sk.sk_state != RXRPC_UNCONNECTED)
+ if (rx->sk.sk_state != RXRPC_UNBOUND)
goto error;
ret = rxrpc_server_keyring(rx, optval, optlen);
goto error;
@@ -551,7 +510,7 @@ static int rxrpc_setsockopt(struct socket *sock, int level, int optname,
if (optlen != sizeof(unsigned int))
goto error;
ret = -EISCONN;
- if (rx->sk.sk_state != RXRPC_UNCONNECTED)
+ if (rx->sk.sk_state != RXRPC_UNBOUND)
goto error;
ret = get_user(min_sec_level,
(unsigned int __user *) optval);
@@ -630,7 +589,7 @@ static int rxrpc_create(struct net *net, struct socket *sock, int protocol,
return -ENOMEM;
sock_init_data(sock, sk);
- sk->sk_state = RXRPC_UNCONNECTED;
+ sk->sk_state = RXRPC_UNBOUND;
sk->sk_write_space = rxrpc_write_space;
sk->sk_max_ack_backlog = sysctl_rxrpc_max_qlen;
sk->sk_destruct = rxrpc_sock_destructor;
@@ -703,14 +662,6 @@ static int rxrpc_release_sock(struct sock *sk)
rx->conn = NULL;
}
- if (rx->bundle) {
- rxrpc_put_bundle(rx->trans, rx->bundle);
- rx->bundle = NULL;
- }
- if (rx->trans) {
- rxrpc_put_transport(rx->trans);
- rx->trans = NULL;
- }
if (rx->local) {
rxrpc_put_local(rx->local);
rx->local = NULL;
@@ -796,49 +747,49 @@ static int __init af_rxrpc_init(void)
"rxrpc_call_jar", sizeof(struct rxrpc_call), 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!rxrpc_call_jar) {
- printk(KERN_NOTICE "RxRPC: Failed to allocate call jar\n");
+ pr_notice("Failed to allocate call jar\n");
goto error_call_jar;
}
rxrpc_workqueue = alloc_workqueue("krxrpcd", 0, 1);
if (!rxrpc_workqueue) {
- printk(KERN_NOTICE "RxRPC: Failed to allocate work queue\n");
+ pr_notice("Failed to allocate work queue\n");
goto error_work_queue;
}
ret = rxrpc_init_security();
if (ret < 0) {
- printk(KERN_CRIT "RxRPC: Cannot initialise security\n");
+ pr_crit("Cannot initialise security\n");
goto error_security;
}
ret = proto_register(&rxrpc_proto, 1);
if (ret < 0) {
- printk(KERN_CRIT "RxRPC: Cannot register protocol\n");
+ pr_crit("Cannot register protocol\n");
goto error_proto;
}
ret = sock_register(&rxrpc_family_ops);
if (ret < 0) {
- printk(KERN_CRIT "RxRPC: Cannot register socket family\n");
+ pr_crit("Cannot register socket family\n");
goto error_sock;
}
ret = register_key_type(&key_type_rxrpc);
if (ret < 0) {
- printk(KERN_CRIT "RxRPC: Cannot register client key type\n");
+ pr_crit("Cannot register client key type\n");
goto error_key_type;
}
ret = register_key_type(&key_type_rxrpc_s);
if (ret < 0) {
- printk(KERN_CRIT "RxRPC: Cannot register server key type\n");
+ pr_crit("Cannot register server key type\n");
goto error_key_type_s;
}
ret = rxrpc_sysctl_init();
if (ret < 0) {
- printk(KERN_CRIT "RxRPC: Cannot register sysctls\n");
+ pr_crit("Cannot register sysctls\n");
goto error_sysctls;
}
diff --git a/net/rxrpc/ar-accept.c b/net/rxrpc/ar-accept.c
index e7a7f05..eea5f4a 100644
--- a/net/rxrpc/ar-accept.c
+++ b/net/rxrpc/ar-accept.c
@@ -9,6 +9,8 @@
* 2 of the License, or (at your option) any later version.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/net.h>
#include <linux/skbuff.h>
diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
index 374478e..1838178 100644
--- a/net/rxrpc/ar-ack.c
+++ b/net/rxrpc/ar-ack.c
@@ -9,6 +9,8 @@
* 2 of the License, or (at your option) any later version.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/circ_buf.h>
#include <linux/net.h>
diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c
index 571a41f..68125dc 100644
--- a/net/rxrpc/ar-call.c
+++ b/net/rxrpc/ar-call.c
@@ -9,6 +9,8 @@
* 2 of the License, or (at your option) any later version.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/circ_buf.h>
@@ -194,6 +196,43 @@ struct rxrpc_call *rxrpc_find_call_hash(
}
/*
+ * find an extant server call
+ * - called in process context with IRQs enabled
+ */
+struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *rx,
+ unsigned long user_call_ID)
+{
+ struct rxrpc_call *call;
+ struct rb_node *p;
+
+ _enter("%p,%lx", rx, user_call_ID);
+
+ read_lock(&rx->call_lock);
+
+ p = rx->calls.rb_node;
+ while (p) {
+ call = rb_entry(p, struct rxrpc_call, sock_node);
+
+ if (user_call_ID < call->user_call_ID)
+ p = p->rb_left;
+ else if (user_call_ID > call->user_call_ID)
+ p = p->rb_right;
+ else
+ goto found_extant_call;
+ }
+
+ read_unlock(&rx->call_lock);
+ _leave(" = NULL");
+ return NULL;
+
+found_extant_call:
+ rxrpc_get_call(call);
+ read_unlock(&rx->call_lock);
+ _leave(" = %p [%d]", call, atomic_read(&call->usage));
+ return call;
+}
+
+/*
* allocate a new call
*/
static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
@@ -309,51 +348,27 @@ static struct rxrpc_call *rxrpc_alloc_client_call(
* set up a call for the given data
* - called in process context with IRQs enabled
*/
-struct rxrpc_call *rxrpc_get_client_call(struct rxrpc_sock *rx,
+struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
struct rxrpc_transport *trans,
struct rxrpc_conn_bundle *bundle,
unsigned long user_call_ID,
- int create,
gfp_t gfp)
{
- struct rxrpc_call *call, *candidate;
- struct rb_node *p, *parent, **pp;
-
- _enter("%p,%d,%d,%lx,%d",
- rx, trans ? trans->debug_id : -1, bundle ? bundle->debug_id : -1,
- user_call_ID, create);
-
- /* search the extant calls first for one that matches the specified
- * user ID */
- read_lock(&rx->call_lock);
-
- p = rx->calls.rb_node;
- while (p) {
- call = rb_entry(p, struct rxrpc_call, sock_node);
-
- if (user_call_ID < call->user_call_ID)
- p = p->rb_left;
- else if (user_call_ID > call->user_call_ID)
- p = p->rb_right;
- else
- goto found_extant_call;
- }
-
- read_unlock(&rx->call_lock);
+ struct rxrpc_call *call, *xcall;
+ struct rb_node *parent, **pp;
- if (!create || !trans)
- return ERR_PTR(-EBADSLT);
+ _enter("%p,%d,%d,%lx",
+ rx, trans->debug_id, bundle ? bundle->debug_id : -1,
+ user_call_ID);
- /* not yet present - create a candidate for a new record and then
- * redo the search */
- candidate = rxrpc_alloc_client_call(rx, trans, bundle, gfp);
- if (IS_ERR(candidate)) {
- _leave(" = %ld", PTR_ERR(candidate));
- return candidate;
+ call = rxrpc_alloc_client_call(rx, trans, bundle, gfp);
+ if (IS_ERR(call)) {
+ _leave(" = %ld", PTR_ERR(call));
+ return call;
}
- candidate->user_call_ID = user_call_ID;
- __set_bit(RXRPC_CALL_HAS_USERID, &candidate->flags);
+ call->user_call_ID = user_call_ID;
+ __set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
write_lock(&rx->call_lock);
@@ -361,19 +376,16 @@ struct rxrpc_call *rxrpc_get_client_call(struct rxrpc_sock *rx,
parent = NULL;
while (*pp) {
parent = *pp;
- call = rb_entry(parent, struct rxrpc_call, sock_node);
+ xcall = rb_entry(parent, struct rxrpc_call, sock_node);
- if (user_call_ID < call->user_call_ID)
+ if (user_call_ID < xcall->user_call_ID)
pp = &(*pp)->rb_left;
- else if (user_call_ID > call->user_call_ID)
+ else if (user_call_ID > xcall->user_call_ID)
pp = &(*pp)->rb_right;
else
- goto found_extant_second;
+ goto found_user_ID_now_present;
}
- /* second search also failed; add the new call */
- call = candidate;
- candidate = NULL;
rxrpc_get_call(call);
rb_link_node(&call->sock_node, parent, pp);
@@ -389,20 +401,16 @@ struct rxrpc_call *rxrpc_get_client_call(struct rxrpc_sock *rx,
_leave(" = %p [new]", call);
return call;
- /* we found the call in the list immediately */
-found_extant_call:
- rxrpc_get_call(call);
- read_unlock(&rx->call_lock);
- _leave(" = %p [extant %d]", call, atomic_read(&call->usage));
- return call;
-
- /* we found the call on the second time through the list */
-found_extant_second:
- rxrpc_get_call(call);
+ /* We unexpectedly found the user ID in the list after taking
+ * the call_lock. This shouldn't happen unless the user races
+ * with itself and tries to add the same user ID twice at the
+ * same time in different threads.
+ */
+found_user_ID_now_present:
write_unlock(&rx->call_lock);
- rxrpc_put_call(candidate);
- _leave(" = %p [second %d]", call, atomic_read(&call->usage));
- return call;
+ rxrpc_put_call(call);
+ _leave(" = -EEXIST [%p]", call);
+ return ERR_PTR(-EEXIST);
}
/*
@@ -564,46 +572,6 @@ old_call:
}
/*
- * find an extant server call
- * - called in process context with IRQs enabled
- */
-struct rxrpc_call *rxrpc_find_server_call(struct rxrpc_sock *rx,
- unsigned long user_call_ID)
-{
- struct rxrpc_call *call;
- struct rb_node *p;
-
- _enter("%p,%lx", rx, user_call_ID);
-
- /* search the extant calls for one that matches the specified user
- * ID */
- read_lock(&rx->call_lock);
-
- p = rx->calls.rb_node;
- while (p) {
- call = rb_entry(p, struct rxrpc_call, sock_node);
-
- if (user_call_ID < call->user_call_ID)
- p = p->rb_left;
- else if (user_call_ID > call->user_call_ID)
- p = p->rb_right;
- else
- goto found_extant_call;
- }
-
- read_unlock(&rx->call_lock);
- _leave(" = NULL");
- return NULL;
-
- /* we found the call in the list immediately */
-found_extant_call:
- rxrpc_get_call(call);
- read_unlock(&rx->call_lock);
- _leave(" = %p [%d]", call, atomic_read(&call->usage));
- return call;
-}
-
-/*
* detach a call from a socket and set up for release
*/
void rxrpc_release_call(struct rxrpc_call *call)
@@ -669,8 +637,7 @@ void rxrpc_release_call(struct rxrpc_call *call)
conn->channels[3] == NULL);
break;
default:
- printk(KERN_ERR "RxRPC: conn->avail_calls=%d\n",
- conn->avail_calls);
+ pr_err("conn->avail_calls=%d\n", conn->avail_calls);
BUG();
}
}
@@ -935,16 +902,15 @@ void __exit rxrpc_destroy_all_calls(void)
if (call->state != RXRPC_CALL_DEAD)
break;
default:
- printk(KERN_ERR "RXRPC:"
- " Call %p still in use (%d,%d,%s,%lx,%lx)!\n",
+ pr_err("Call %p still in use (%d,%d,%s,%lx,%lx)!\n",
call, atomic_read(&call->usage),
atomic_read(&call->ackr_not_idle),
rxrpc_call_states[call->state],
call->flags, call->events);
if (!skb_queue_empty(&call->rx_queue))
- printk(KERN_ERR"RXRPC: Rx queue occupied\n");
+ pr_err("Rx queue occupied\n");
if (!skb_queue_empty(&call->rx_oos_queue))
- printk(KERN_ERR"RXRPC: OOS queue occupied\n");
+ pr_err("OOS queue occupied\n");
break;
}
diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c
index 97f4fae..8ecde4b 100644
--- a/net/rxrpc/ar-connection.c
+++ b/net/rxrpc/ar-connection.c
@@ -9,6 +9,8 @@
* 2 of the License, or (at your option) any later version.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/net.h>
@@ -78,11 +80,6 @@ struct rxrpc_conn_bundle *rxrpc_get_bundle(struct rxrpc_sock *rx,
_enter("%p{%x},%x,%hx,",
rx, key_serial(key), trans->debug_id, service_id);
- if (rx->trans == trans && rx->bundle) {
- atomic_inc(&rx->bundle->usage);
- return rx->bundle;
- }
-
/* search the extant bundles first for one that matches the specified
* user ID */
spin_lock(&trans->client_lock);
@@ -136,10 +133,6 @@ struct rxrpc_conn_bundle *rxrpc_get_bundle(struct rxrpc_sock *rx,
rb_insert_color(&bundle->node, &trans->bundles);
spin_unlock(&trans->client_lock);
_net("BUNDLE new on trans %d", trans->debug_id);
- if (!rx->bundle && rx->sk.sk_state == RXRPC_CLIENT_CONNECTED) {
- atomic_inc(&bundle->usage);
- rx->bundle = bundle;
- }
_leave(" = %p [new]", bundle);
return bundle;
@@ -148,10 +141,6 @@ found_extant_bundle:
atomic_inc(&bundle->usage);
spin_unlock(&trans->client_lock);
_net("BUNDLE old on trans %d", trans->debug_id);
- if (!rx->bundle && rx->sk.sk_state == RXRPC_CLIENT_CONNECTED) {
- atomic_inc(&bundle->usage);
- rx->bundle = bundle;
- }
_leave(" = %p [extant %d]", bundle, atomic_read(&bundle->usage));
return bundle;
@@ -161,10 +150,6 @@ found_extant_second:
spin_unlock(&trans->client_lock);
kfree(candidate);
_net("BUNDLE old2 on trans %d", trans->debug_id);
- if (!rx->bundle && rx->sk.sk_state == RXRPC_CLIENT_CONNECTED) {
- atomic_inc(&bundle->usage);
- rx->bundle = bundle;
- }
_leave(" = %p [second %d]", bundle, atomic_read(&bundle->usage));
return bundle;
}
diff --git a/net/rxrpc/ar-connevent.c b/net/rxrpc/ar-connevent.c
index 5f95639..8bdd692 100644
--- a/net/rxrpc/ar-connevent.c
+++ b/net/rxrpc/ar-connevent.c
@@ -9,6 +9,8 @@
* 2 of the License, or (at your option) any later version.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/net.h>
#include <linux/skbuff.h>
diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
index 6ff9741..d7c2a0b 100644
--- a/net/rxrpc/ar-input.c
+++ b/net/rxrpc/ar-input.c
@@ -9,6 +9,8 @@
* 2 of the License, or (at your option) any later version.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/net.h>
#include <linux/skbuff.h>
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index f0b807a..b89dcdc 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -39,9 +39,9 @@ struct rxrpc_crypt {
* sk_state for RxRPC sockets
*/
enum {
- RXRPC_UNCONNECTED = 0,
+ RXRPC_UNBOUND = 0,
+ RXRPC_CLIENT_UNBOUND, /* Unbound socket used as client */
RXRPC_CLIENT_BOUND, /* client local address bound */
- RXRPC_CLIENT_CONNECTED, /* client is connected */
RXRPC_SERVER_BOUND, /* server local address bound */
RXRPC_SERVER_LISTENING, /* server listening for connections */
RXRPC_CLOSE, /* socket is being closed */
@@ -55,8 +55,6 @@ struct rxrpc_sock {
struct sock sk;
rxrpc_interceptor_t interceptor; /* kernel service Rx interceptor function */
struct rxrpc_local *local; /* local endpoint */
- struct rxrpc_transport *trans; /* transport handler */
- struct rxrpc_conn_bundle *bundle; /* virtual connection bundle */
struct rxrpc_connection *conn; /* exclusive virtual connection */
struct list_head listen_link; /* link in the local endpoint's listen list */
struct list_head secureq; /* calls awaiting connection security clearance */
@@ -65,11 +63,13 @@ struct rxrpc_sock {
struct key *securities; /* list of server security descriptors */
struct rb_root calls; /* outstanding calls on this socket */
unsigned long flags;
+#define RXRPC_SOCK_CONNECTED 0 /* connect_srx is set */
#define RXRPC_SOCK_EXCLUSIVE_CONN 1 /* exclusive connection for a client socket */
rwlock_t call_lock; /* lock for calls */
u32 min_sec_level; /* minimum security level */
#define RXRPC_SECURITY_MAX RXRPC_SECURITY_ENCRYPT
struct sockaddr_rxrpc srx; /* local address */
+ struct sockaddr_rxrpc connect_srx; /* Default client address from connect() */
sa_family_t proto; /* protocol created with */
};
@@ -477,6 +477,10 @@ extern u32 rxrpc_epoch;
extern atomic_t rxrpc_debug_id;
extern struct workqueue_struct *rxrpc_workqueue;
+extern struct rxrpc_transport *rxrpc_name_to_transport(struct rxrpc_sock *,
+ struct sockaddr *,
+ int, int, gfp_t);
+
/*
* ar-accept.c
*/
@@ -502,14 +506,14 @@ extern rwlock_t rxrpc_call_lock;
struct rxrpc_call *rxrpc_find_call_hash(struct rxrpc_host_header *,
void *, sa_family_t, const void *);
-struct rxrpc_call *rxrpc_get_client_call(struct rxrpc_sock *,
+struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *, unsigned long);
+struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *,
struct rxrpc_transport *,
struct rxrpc_conn_bundle *,
- unsigned long, int, gfp_t);
+ unsigned long, gfp_t);
struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *,
struct rxrpc_connection *,
struct rxrpc_host_header *);
-struct rxrpc_call *rxrpc_find_server_call(struct rxrpc_sock *, unsigned long);
void rxrpc_release_call(struct rxrpc_call *);
void rxrpc_release_calls_on_socket(struct rxrpc_sock *);
void __rxrpc_put_call(struct rxrpc_call *);
@@ -581,9 +585,7 @@ int rxrpc_get_server_data_key(struct rxrpc_connection *, const void *, time_t,
extern unsigned int rxrpc_resend_timeout;
int rxrpc_send_packet(struct rxrpc_transport *, struct sk_buff *);
-int rxrpc_client_sendmsg(struct rxrpc_sock *, struct rxrpc_transport *,
- struct msghdr *, size_t);
-int rxrpc_server_sendmsg(struct rxrpc_sock *, struct msghdr *, size_t);
+int rxrpc_do_sendmsg(struct rxrpc_sock *, struct msghdr *, size_t);
/*
* ar-peer.c
@@ -744,21 +746,18 @@ do { \
#define ASSERT(X) \
do { \
if (unlikely(!(X))) { \
- printk(KERN_ERR "\n"); \
- printk(KERN_ERR "RxRPC: Assertion failed\n"); \
+ pr_err("Assertion failed\n"); \
BUG(); \
} \
} while (0)
#define ASSERTCMP(X, OP, Y) \
do { \
- if (unlikely(!((X) OP (Y)))) { \
- printk(KERN_ERR "\n"); \
- printk(KERN_ERR "RxRPC: Assertion failed\n"); \
- printk(KERN_ERR "%lu " #OP " %lu is false\n", \
- (unsigned long)(X), (unsigned long)(Y)); \
- printk(KERN_ERR "0x%lx " #OP " 0x%lx is false\n", \
- (unsigned long)(X), (unsigned long)(Y)); \
+ unsigned long _x = (unsigned long)(X); \
+ unsigned long _y = (unsigned long)(Y); \
+ if (unlikely(!(_x OP _y))) { \
+ pr_err("Assertion failed - %lu(0x%lx) %s %lu(0x%lx) is false\n", \
+ _x, _x, #OP, _y, _y); \
BUG(); \
} \
} while (0)
@@ -766,21 +765,18 @@ do { \
#define ASSERTIF(C, X) \
do { \
if (unlikely((C) && !(X))) { \
- printk(KERN_ERR "\n"); \
- printk(KERN_ERR "RxRPC: Assertion failed\n"); \
+ pr_err("Assertion failed\n"); \
BUG(); \
} \
} while (0)
#define ASSERTIFCMP(C, X, OP, Y) \
do { \
- if (unlikely((C) && !((X) OP (Y)))) { \
- printk(KERN_ERR "\n"); \
- printk(KERN_ERR "RxRPC: Assertion failed\n"); \
- printk(KERN_ERR "%lu " #OP " %lu is false\n", \
- (unsigned long)(X), (unsigned long)(Y)); \
- printk(KERN_ERR "0x%lx " #OP " 0x%lx is false\n", \
- (unsigned long)(X), (unsigned long)(Y)); \
+ unsigned long _x = (unsigned long)(X); \
+ unsigned long _y = (unsigned long)(Y); \
+ if (unlikely((C) && !(_x OP _y))) { \
+ pr_err("Assertion failed - %lu(0x%lx) %s %lu(0x%lx) is false\n", \
+ _x, _x, #OP, _y, _y); \
BUG(); \
} \
} while (0)
diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c
index 1021b4c..4ad56fa 100644
--- a/net/rxrpc/ar-key.c
+++ b/net/rxrpc/ar-key.c
@@ -12,6 +12,8 @@
* "afs@CAMBRIDGE.REDHAT.COM>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <crypto/skcipher.h>
#include <linux/module.h>
#include <linux/net.h>
@@ -800,7 +802,7 @@ static void rxrpc_free_token_list(struct rxrpc_key_token *token)
rxrpc_rxk5_free(token->k5);
break;
default:
- printk(KERN_ERR "Unknown token type %x on rxrpc key\n",
+ pr_err("Unknown token type %x on rxrpc key\n",
token->security_index);
BUG();
}
diff --git a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c
index 4e1e6db..701c42b 100644
--- a/net/rxrpc/ar-local.c
+++ b/net/rxrpc/ar-local.c
@@ -9,6 +9,8 @@
* 2 of the License, or (at your option) any later version.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/net.h>
#include <linux/skbuff.h>
diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
index 51cb100..2e3c406 100644
--- a/net/rxrpc/ar-output.c
+++ b/net/rxrpc/ar-output.c
@@ -9,6 +9,8 @@
* 2 of the License, or (at your option) any later version.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/net.h>
#include <linux/gfp.h>
#include <linux/skbuff.h>
@@ -30,13 +32,13 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
/*
* extract control messages from the sendmsg() control buffer
*/
-static int rxrpc_sendmsg_cmsg(struct rxrpc_sock *rx, struct msghdr *msg,
+static int rxrpc_sendmsg_cmsg(struct msghdr *msg,
unsigned long *user_call_ID,
enum rxrpc_command *command,
- u32 *abort_code,
- bool server)
+ u32 *abort_code)
{
struct cmsghdr *cmsg;
+ bool got_user_ID = false;
int len;
*command = RXRPC_CMD_SEND_DATA;
@@ -68,6 +70,7 @@ static int rxrpc_sendmsg_cmsg(struct rxrpc_sock *rx, struct msghdr *msg,
CMSG_DATA(cmsg);
}
_debug("User Call ID %lx", *user_call_ID);
+ got_user_ID = true;
break;
case RXRPC_ABORT:
@@ -88,8 +91,6 @@ static int rxrpc_sendmsg_cmsg(struct rxrpc_sock *rx, struct msghdr *msg,
*command = RXRPC_CMD_ACCEPT;
if (len != 0)
return -EINVAL;
- if (!server)
- return -EISCONN;
break;
default:
@@ -97,6 +98,8 @@ static int rxrpc_sendmsg_cmsg(struct rxrpc_sock *rx, struct msghdr *msg,
}
}
+ if (!got_user_ID)
+ return -EINVAL;
_leave(" = 0");
return 0;
}
@@ -124,55 +127,96 @@ static void rxrpc_send_abort(struct rxrpc_call *call, u32 abort_code)
}
/*
+ * Create a new client call for sendmsg().
+ */
+static struct rxrpc_call *
+rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg,
+ unsigned long user_call_ID)
+{
+ struct rxrpc_conn_bundle *bundle;
+ struct rxrpc_transport *trans;
+ struct rxrpc_call *call;
+ struct key *key;
+ long ret;
+
+ DECLARE_SOCKADDR(struct sockaddr_rxrpc *, srx, msg->msg_name);
+
+ _enter("");
+
+ if (!msg->msg_name)
+ return ERR_PTR(-EDESTADDRREQ);
+
+ trans = rxrpc_name_to_transport(rx, msg->msg_name, msg->msg_namelen, 0,
+ GFP_KERNEL);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ goto out;
+ }
+
+ key = rx->key;
+ if (key && !rx->key->payload.data[0])
+ key = NULL;
+ bundle = rxrpc_get_bundle(rx, trans, key, srx->srx_service, GFP_KERNEL);
+ if (IS_ERR(bundle)) {
+ ret = PTR_ERR(bundle);
+ goto out_trans;
+ }
+
+ call = rxrpc_new_client_call(rx, trans, bundle, user_call_ID,
+ GFP_KERNEL);
+ rxrpc_put_bundle(trans, bundle);
+ rxrpc_put_transport(trans);
+ if (IS_ERR(call)) {
+ ret = PTR_ERR(call);
+ goto out_trans;
+ }
+
+ _leave(" = %p\n", call);
+ return call;
+
+out_trans:
+ rxrpc_put_transport(trans);
+out:
+ _leave(" = %ld", ret);
+ return ERR_PTR(ret);
+}
+
+/*
* send a message forming part of a client call through an RxRPC socket
* - caller holds the socket locked
* - the socket may be either a client socket or a server socket
*/
-int rxrpc_client_sendmsg(struct rxrpc_sock *rx, struct rxrpc_transport *trans,
- struct msghdr *msg, size_t len)
+int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
{
- struct rxrpc_conn_bundle *bundle;
enum rxrpc_command cmd;
struct rxrpc_call *call;
unsigned long user_call_ID = 0;
- struct key *key;
- u16 service_id;
u32 abort_code = 0;
int ret;
_enter("");
- ASSERT(trans != NULL);
-
- ret = rxrpc_sendmsg_cmsg(rx, msg, &user_call_ID, &cmd, &abort_code,
- false);
+ ret = rxrpc_sendmsg_cmsg(msg, &user_call_ID, &cmd, &abort_code);
if (ret < 0)
return ret;
- bundle = NULL;
- if (trans) {
- service_id = rx->srx.srx_service;
- if (msg->msg_name) {
- DECLARE_SOCKADDR(struct sockaddr_rxrpc *, srx,
- msg->msg_name);
- service_id = srx->srx_service;
- }
- key = rx->key;
- if (key && !rx->key->payload.data[0])
- key = NULL;
- bundle = rxrpc_get_bundle(rx, trans, key, service_id,
- GFP_KERNEL);
- if (IS_ERR(bundle))
- return PTR_ERR(bundle);
+ if (cmd == RXRPC_CMD_ACCEPT) {
+ if (rx->sk.sk_state != RXRPC_SERVER_LISTENING)
+ return -EINVAL;
+ call = rxrpc_accept_call(rx, user_call_ID);
+ if (IS_ERR(call))
+ return PTR_ERR(call);
+ rxrpc_put_call(call);
+ return 0;
}
- call = rxrpc_get_client_call(rx, trans, bundle, user_call_ID,
- abort_code == 0, GFP_KERNEL);
- if (trans)
- rxrpc_put_bundle(trans, bundle);
- if (IS_ERR(call)) {
- _leave(" = %ld", PTR_ERR(call));
- return PTR_ERR(call);
+ call = rxrpc_find_call_by_user_ID(rx, user_call_ID);
+ if (!call) {
+ if (cmd != RXRPC_CMD_SEND_DATA)
+ return -EBADSLT;
+ call = rxrpc_new_client_call_for_sendmsg(rx, msg, user_call_ID);
+ if (IS_ERR(call))
+ return PTR_ERR(call);
}
_debug("CALL %d USR %lx ST %d on CONN %p",
@@ -180,14 +224,21 @@ int rxrpc_client_sendmsg(struct rxrpc_sock *rx, struct rxrpc_transport *trans,
if (call->state >= RXRPC_CALL_COMPLETE) {
/* it's too late for this call */
- ret = -ESHUTDOWN;
+ ret = -ECONNRESET;
} else if (cmd == RXRPC_CMD_SEND_ABORT) {
rxrpc_send_abort(call, abort_code);
+ ret = 0;
} else if (cmd != RXRPC_CMD_SEND_DATA) {
ret = -EINVAL;
- } else if (call->state != RXRPC_CALL_CLIENT_SEND_REQUEST) {
+ } else if (!call->in_clientflag &&
+ call->state != RXRPC_CALL_CLIENT_SEND_REQUEST) {
/* request phase complete for this client call */
ret = -EPROTO;
+ } else if (call->in_clientflag &&
+ call->state != RXRPC_CALL_SERVER_ACK_REQUEST &&
+ call->state != RXRPC_CALL_SERVER_SEND_REPLY) {
+ /* Reply phase not begun or not complete for service call. */
+ ret = -EPROTO;
} else {
ret = rxrpc_send_data(rx, call, msg, len);
}
@@ -266,67 +317,6 @@ void rxrpc_kernel_abort_call(struct rxrpc_call *call, u32 abort_code)
EXPORT_SYMBOL(rxrpc_kernel_abort_call);
/*
- * send a message through a server socket
- * - caller holds the socket locked
- */
-int rxrpc_server_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
-{
- enum rxrpc_command cmd;
- struct rxrpc_call *call;
- unsigned long user_call_ID = 0;
- u32 abort_code = 0;
- int ret;
-
- _enter("");
-
- ret = rxrpc_sendmsg_cmsg(rx, msg, &user_call_ID, &cmd, &abort_code,
- true);
- if (ret < 0)
- return ret;
-
- if (cmd == RXRPC_CMD_ACCEPT) {
- call = rxrpc_accept_call(rx, user_call_ID);
- if (IS_ERR(call))
- return PTR_ERR(call);
- rxrpc_put_call(call);
- return 0;
- }
-
- call = rxrpc_find_server_call(rx, user_call_ID);
- if (!call)
- return -EBADSLT;
- if (call->state >= RXRPC_CALL_COMPLETE) {
- ret = -ESHUTDOWN;
- goto out;
- }
-
- switch (cmd) {
- case RXRPC_CMD_SEND_DATA:
- if (call->state != RXRPC_CALL_CLIENT_SEND_REQUEST &&
- call->state != RXRPC_CALL_SERVER_ACK_REQUEST &&
- call->state != RXRPC_CALL_SERVER_SEND_REPLY) {
- /* Tx phase not yet begun for this call */
- ret = -EPROTO;
- break;
- }
-
- ret = rxrpc_send_data(rx, call, msg, len);
- break;
-
- case RXRPC_CMD_SEND_ABORT:
- rxrpc_send_abort(call, abort_code);
- break;
- default:
- BUG();
- }
-
- out:
- rxrpc_put_call(call);
- _leave(" = %d", ret);
- return ret;
-}
-
-/*
* send a packet through the transport endpoint
*/
int rxrpc_send_packet(struct rxrpc_transport *trans, struct sk_buff *skb)
diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
index dc089b1..0b54cda 100644
--- a/net/rxrpc/ar-peer.c
+++ b/net/rxrpc/ar-peer.c
@@ -9,6 +9,8 @@
* 2 of the License, or (at your option) any later version.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/net.h>
#include <linux/skbuff.h>
diff --git a/net/rxrpc/ar-recvmsg.c b/net/rxrpc/ar-recvmsg.c
index 160f092..59706b9 100644
--- a/net/rxrpc/ar-recvmsg.c
+++ b/net/rxrpc/ar-recvmsg.c
@@ -9,6 +9,8 @@
* 2 of the License, or (at your option) any later version.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/net.h>
#include <linux/skbuff.h>
#include <linux/export.h>
@@ -307,7 +309,7 @@ receive_non_data_message:
&abort_code);
break;
default:
- pr_err("RxRPC: Unknown packet mark %u\n", skb->mark);
+ pr_err("Unknown packet mark %u\n", skb->mark);
BUG();
break;
}
diff --git a/net/rxrpc/ar-skbuff.c b/net/rxrpc/ar-skbuff.c
index 62a2674..eee0cfd9 100644
--- a/net/rxrpc/ar-skbuff.c
+++ b/net/rxrpc/ar-skbuff.c
@@ -9,6 +9,8 @@
* 2 of the License, or (at your option) any later version.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/net.h>
#include <linux/skbuff.h>
diff --git a/net/rxrpc/ar-transport.c b/net/rxrpc/ar-transport.c
index 66a1a56..a1b6518 100644
--- a/net/rxrpc/ar-transport.c
+++ b/net/rxrpc/ar-transport.c
@@ -9,6 +9,8 @@
* 2 of the License, or (at your option) any later version.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/net.h>
#include <linux/skbuff.h>
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index bab56ed..36a6340 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -9,6 +9,8 @@
* 2 of the License, or (at your option) any later version.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <crypto/skcipher.h>
#include <linux/module.h>
#include <linux/net.h>
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 336774a..b6db56e 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -283,10 +283,11 @@ err2:
p->tcfc_index = index ? index : tcf_hash_new_index(tn);
p->tcfc_tm.install = jiffies;
p->tcfc_tm.lastuse = jiffies;
+ p->tcfc_tm.firstuse = 0;
if (est) {
err = gen_new_estimator(&p->tcfc_bstats, p->cpu_bstats,
&p->tcfc_rate_est,
- &p->tcfc_lock, est);
+ &p->tcfc_lock, NULL, est);
if (err) {
free_percpu(p->cpu_qstats);
goto err2;
@@ -503,8 +504,8 @@ nla_put_failure:
}
EXPORT_SYMBOL(tcf_action_dump_1);
-int
-tcf_action_dump(struct sk_buff *skb, struct list_head *actions, int bind, int ref)
+int tcf_action_dump(struct sk_buff *skb, struct list_head *actions,
+ int bind, int ref)
{
struct tc_action *a;
int err = -EINVAL;
@@ -670,7 +671,7 @@ int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a,
if (err < 0)
goto errout;
- if (gnet_stats_copy_basic(&d, p->cpu_bstats, &p->tcfc_bstats) < 0 ||
+ if (gnet_stats_copy_basic(NULL, &d, p->cpu_bstats, &p->tcfc_bstats) < 0 ||
gnet_stats_copy_rate_est(&d, &p->tcfc_bstats,
&p->tcfc_rate_est) < 0 ||
gnet_stats_copy_queue(&d, p->cpu_qstats,
@@ -687,9 +688,9 @@ errout:
return -1;
}
-static int
-tca_get_fill(struct sk_buff *skb, struct list_head *actions, u32 portid, u32 seq,
- u16 flags, int event, int bind, int ref)
+static int tca_get_fill(struct sk_buff *skb, struct list_head *actions,
+ u32 portid, u32 seq, u16 flags, int event, int bind,
+ int ref)
{
struct tcamsg *t;
struct nlmsghdr *nlh;
@@ -730,7 +731,8 @@ act_get_notify(struct net *net, u32 portid, struct nlmsghdr *n,
skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
if (!skb)
return -ENOBUFS;
- if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, event, 0, 0) <= 0) {
+ if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, event,
+ 0, 0) <= 0) {
kfree_skb(skb);
return -EINVAL;
}
@@ -838,7 +840,8 @@ static int tca_action_flush(struct net *net, struct nlattr *nla,
if (a.ops == NULL) /*some idjot trying to flush unknown action */
goto err_out;
- nlh = nlmsg_put(skb, portid, n->nlmsg_seq, RTM_DELACTION, sizeof(*t), 0);
+ nlh = nlmsg_put(skb, portid, n->nlmsg_seq, RTM_DELACTION,
+ sizeof(*t), 0);
if (!nlh)
goto out_module_put;
t = nlmsg_data(nlh);
@@ -1001,7 +1004,8 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n)
u32 portid = skb ? NETLINK_CB(skb).portid : 0;
int ret = 0, ovr = 0;
- if ((n->nlmsg_type != RTM_GETACTION) && !netlink_capable(skb, CAP_NET_ADMIN))
+ if ((n->nlmsg_type != RTM_GETACTION) &&
+ !netlink_capable(skb, CAP_NET_ADMIN))
return -EPERM;
ret = nlmsg_parse(n, sizeof(struct tcamsg), tca, TCA_ACT_MAX, NULL);
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index c7123e0..f7b6cf4 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -154,10 +154,7 @@ static int tcf_bpf_dump(struct sk_buff *skb, struct tc_action *act,
if (ret)
goto nla_put_failure;
- tm.install = jiffies_to_clock_t(jiffies - prog->tcf_tm.install);
- tm.lastuse = jiffies_to_clock_t(jiffies - prog->tcf_tm.lastuse);
- tm.expires = jiffies_to_clock_t(prog->tcf_tm.expires);
-
+ tcf_tm_dump(&tm, &prog->tcf_tm);
if (nla_put_64bit(skb, TCA_ACT_BPF_TM, sizeof(tm), &tm,
TCA_ACT_BPF_PAD))
goto nla_put_failure;
@@ -172,7 +169,8 @@ nla_put_failure:
static const struct nla_policy act_bpf_policy[TCA_ACT_BPF_MAX + 1] = {
[TCA_ACT_BPF_PARMS] = { .len = sizeof(struct tc_act_bpf) },
[TCA_ACT_BPF_FD] = { .type = NLA_U32 },
- [TCA_ACT_BPF_NAME] = { .type = NLA_NUL_STRING, .len = ACT_BPF_NAME_LEN },
+ [TCA_ACT_BPF_NAME] = { .type = NLA_NUL_STRING,
+ .len = ACT_BPF_NAME_LEN },
[TCA_ACT_BPF_OPS_LEN] = { .type = NLA_U16 },
[TCA_ACT_BPF_OPS] = { .type = NLA_BINARY,
.len = sizeof(struct sock_filter) * BPF_MAXINSNS },
diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c
index 2ba700c..35a5270 100644
--- a/net/sched/act_connmark.c
+++ b/net/sched/act_connmark.c
@@ -44,7 +44,7 @@ static int tcf_connmark(struct sk_buff *skb, const struct tc_action *a,
int proto;
spin_lock(&ca->tcf_lock);
- ca->tcf_tm.lastuse = jiffies;
+ tcf_lastuse_update(&ca->tcf_tm);
bstats_update(&ca->tcf_bstats, skb);
if (skb->protocol == htons(ETH_P_IP)) {
@@ -160,9 +160,7 @@ static inline int tcf_connmark_dump(struct sk_buff *skb, struct tc_action *a,
if (nla_put(skb, TCA_CONNMARK_PARMS, sizeof(opt), &opt))
goto nla_put_failure;
- t.install = jiffies_to_clock_t(jiffies - ci->tcf_tm.install);
- t.lastuse = jiffies_to_clock_t(jiffies - ci->tcf_tm.lastuse);
- t.expires = jiffies_to_clock_t(ci->tcf_tm.expires);
+ tcf_tm_dump(&t, &ci->tcf_tm);
if (nla_put_64bit(skb, TCA_CONNMARK_TM, sizeof(t), &t,
TCA_CONNMARK_PAD))
goto nla_put_failure;
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index 28e934e..dcd9aba 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -501,7 +501,7 @@ static int tcf_csum(struct sk_buff *skb,
u32 update_flags;
spin_lock(&p->tcf_lock);
- p->tcf_tm.lastuse = jiffies;
+ tcf_lastuse_update(&p->tcf_tm);
bstats_update(&p->tcf_bstats, skb);
action = p->tcf_action;
update_flags = p->update_flags;
@@ -546,9 +546,8 @@ static int tcf_csum_dump(struct sk_buff *skb,
if (nla_put(skb, TCA_CSUM_PARMS, sizeof(opt), &opt))
goto nla_put_failure;
- t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install);
- t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse);
- t.expires = jiffies_to_clock_t(p->tcf_tm.expires);
+
+ tcf_tm_dump(&t, &p->tcf_tm);
if (nla_put_64bit(skb, TCA_CSUM_TM, sizeof(t), &t, TCA_CSUM_PAD))
goto nla_put_failure;
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index ec5cc84..19058a7 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -162,7 +162,8 @@ static void tcf_gact_stats_update(struct tc_action *a, u64 bytes, u32 packets,
tm->lastuse = lastuse;
}
-static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
+static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a,
+ int bind, int ref)
{
unsigned char *b = skb_tail_pointer(skb);
struct tcf_gact *gact = a->priv;
@@ -188,9 +189,7 @@ static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a, int bind, int
goto nla_put_failure;
}
#endif
- t.install = jiffies_to_clock_t(jiffies - gact->tcf_tm.install);
- t.lastuse = jiffies_to_clock_t(jiffies - gact->tcf_tm.lastuse);
- t.expires = jiffies_to_clock_t(gact->tcf_tm.expires);
+ tcf_tm_dump(&t, &gact->tcf_tm);
if (nla_put_64bit(skb, TCA_GACT_TM, sizeof(t), &t, TCA_GACT_PAD))
goto nla_put_failure;
return skb->len;
diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
index 658046d..02f5a8b 100644
--- a/net/sched/act_ife.c
+++ b/net/sched/act_ife.c
@@ -553,9 +553,7 @@ static int tcf_ife_dump(struct sk_buff *skb, struct tc_action *a, int bind,
if (nla_put(skb, TCA_IFE_PARMS, sizeof(opt), &opt))
goto nla_put_failure;
- t.install = jiffies_to_clock_t(jiffies - ife->tcf_tm.install);
- t.lastuse = jiffies_to_clock_t(jiffies - ife->tcf_tm.lastuse);
- t.expires = jiffies_to_clock_t(ife->tcf_tm.expires);
+ tcf_tm_dump(&t, &ife->tcf_tm);
if (nla_put_64bit(skb, TCA_IFE_TM, sizeof(t), &t, TCA_IFE_PAD))
goto nla_put_failure;
@@ -623,7 +621,7 @@ static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
spin_lock(&ife->tcf_lock);
bstats_update(&ife->tcf_bstats, skb);
- ife->tcf_tm.lastuse = jiffies;
+ tcf_lastuse_update(&ife->tcf_tm);
spin_unlock(&ife->tcf_lock);
ifehdrln = ntohs(ifehdrln);
@@ -711,7 +709,7 @@ static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a,
spin_lock(&ife->tcf_lock);
bstats_update(&ife->tcf_bstats, skb);
- ife->tcf_tm.lastuse = jiffies;
+ tcf_lastuse_update(&ife->tcf_tm);
if (!metalen) { /* no metadata to send */
/* abuse overlimits to count when we allow packet
@@ -802,7 +800,7 @@ static int tcf_ife_act(struct sk_buff *skb, const struct tc_action *a,
pr_info_ratelimited("unknown failure(policy neither de/encode\n");
spin_lock(&ife->tcf_lock);
bstats_update(&ife->tcf_bstats, skb);
- ife->tcf_tm.lastuse = jiffies;
+ tcf_lastuse_update(&ife->tcf_tm);
ife->tcf_qstats.drops++;
spin_unlock(&ife->tcf_lock);
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 9f002ad..e7c0f4d 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -34,7 +34,8 @@ static int ipt_net_id;
static int xt_net_id;
-static int ipt_init_target(struct xt_entry_target *t, char *table, unsigned int hook)
+static int ipt_init_target(struct xt_entry_target *t, char *table,
+ unsigned int hook)
{
struct xt_tgchk_param par;
struct xt_target *target;
@@ -212,7 +213,7 @@ static int tcf_ipt(struct sk_buff *skb, const struct tc_action *a,
spin_lock(&ipt->tcf_lock);
- ipt->tcf_tm.lastuse = jiffies;
+ tcf_lastuse_update(&ipt->tcf_tm);
bstats_update(&ipt->tcf_bstats, skb);
/* yes, we have to worry about both in and out dev
@@ -250,7 +251,8 @@ static int tcf_ipt(struct sk_buff *skb, const struct tc_action *a,
}
-static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
+static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind,
+ int ref)
{
unsigned char *b = skb_tail_pointer(skb);
struct tcf_ipt *ipt = a->priv;
@@ -277,11 +279,11 @@ static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind, int
nla_put(skb, TCA_IPT_CNT, sizeof(struct tc_cnt), &c) ||
nla_put_string(skb, TCA_IPT_TABLE, ipt->tcfi_tname))
goto nla_put_failure;
- tm.install = jiffies_to_clock_t(jiffies - ipt->tcf_tm.install);
- tm.lastuse = jiffies_to_clock_t(jiffies - ipt->tcf_tm.lastuse);
- tm.expires = jiffies_to_clock_t(ipt->tcf_tm.expires);
+
+ tcf_tm_dump(&tm, &ipt->tcf_tm);
if (nla_put_64bit(skb, TCA_IPT_TM, sizeof(tm), &tm, TCA_IPT_PAD))
goto nla_put_failure;
+
kfree(t);
return skb->len;
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 128942b..787751a 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -157,7 +157,6 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
u32 at;
tcf_lastuse_update(&m->tcf_tm);
-
bstats_cpu_update(this_cpu_ptr(m->common.cpu_bstats), skb);
rcu_read_lock();
@@ -219,9 +218,8 @@ static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind, i
if (nla_put(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt))
goto nla_put_failure;
- t.install = jiffies_to_clock_t(jiffies - m->tcf_tm.install);
- t.lastuse = jiffies_to_clock_t(jiffies - m->tcf_tm.lastuse);
- t.expires = jiffies_to_clock_t(m->tcf_tm.expires);
+
+ tcf_tm_dump(&t, &m->tcf_tm);
if (nla_put_64bit(skb, TCA_MIRRED_TM, sizeof(t), &t, TCA_MIRRED_PAD))
goto nla_put_failure;
return skb->len;
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index c0a879f..06ccb03 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -103,7 +103,7 @@ static int tcf_nat(struct sk_buff *skb, const struct tc_action *a,
spin_lock(&p->tcf_lock);
- p->tcf_tm.lastuse = jiffies;
+ tcf_lastuse_update(&p->tcf_tm);
old_addr = p->old_addr;
new_addr = p->new_addr;
mask = p->mask;
@@ -264,9 +264,8 @@ static int tcf_nat_dump(struct sk_buff *skb, struct tc_action *a,
if (nla_put(skb, TCA_NAT_PARMS, sizeof(opt), &opt))
goto nla_put_failure;
- t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install);
- t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse);
- t.expires = jiffies_to_clock_t(p->tcf_tm.expires);
+
+ tcf_tm_dump(&t, &p->tcf_tm);
if (nla_put_64bit(skb, TCA_NAT_TM, sizeof(t), &t, TCA_NAT_PAD))
goto nla_put_failure;
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index c6e18f2..82d3c14 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -121,7 +121,7 @@ static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a,
spin_lock(&p->tcf_lock);
- p->tcf_tm.lastuse = jiffies;
+ tcf_lastuse_update(&p->tcf_tm);
if (p->tcfp_nkeys > 0) {
struct tc_pedit_key *tkey = p->tcfp_keys;
@@ -200,11 +200,11 @@ static int tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a,
if (nla_put(skb, TCA_PEDIT_PARMS, s, opt))
goto nla_put_failure;
- t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install);
- t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse);
- t.expires = jiffies_to_clock_t(p->tcf_tm.expires);
+
+ tcf_tm_dump(&t, &p->tcf_tm);
if (nla_put_64bit(skb, TCA_PEDIT_TM, sizeof(t), &t, TCA_PEDIT_PAD))
goto nla_put_failure;
+
kfree(opt);
return skb->len;
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index c557789..ff34dd3 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -182,7 +182,8 @@ override:
if (est) {
err = gen_replace_estimator(&police->tcf_bstats, NULL,
&police->tcf_rate_est,
- &police->tcf_lock, est);
+ &police->tcf_lock,
+ NULL, est);
if (err)
goto failure_unlock;
} else if (tb[TCA_POLICE_AVRATE] &&
@@ -336,6 +337,7 @@ tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
t.install = jiffies_to_clock_t(jiffies - police->tcf_tm.install);
t.lastuse = jiffies_to_clock_t(jiffies - police->tcf_tm.lastuse);
+ t.firstuse = jiffies_to_clock_t(jiffies - police->tcf_tm.firstuse);
t.expires = jiffies_to_clock_t(police->tcf_tm.expires);
if (nla_put_64bit(skb, TCA_POLICE_TM, sizeof(t), &t, TCA_POLICE_PAD))
goto nla_put_failure;
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index e42f8da..be5fbb5 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -35,7 +35,7 @@ static int tcf_simp(struct sk_buff *skb, const struct tc_action *a,
struct tcf_defact *d = a->priv;
spin_lock(&d->tcf_lock);
- d->tcf_tm.lastuse = jiffies;
+ tcf_lastuse_update(&d->tcf_tm);
bstats_update(&d->tcf_bstats, skb);
/* print policy string followed by _ then packet count
@@ -158,9 +158,8 @@ static int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a,
if (nla_put(skb, TCA_DEF_PARMS, sizeof(opt), &opt) ||
nla_put_string(skb, TCA_DEF_DATA, d->tcfd_defdata))
goto nla_put_failure;
- t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install);
- t.lastuse = jiffies_to_clock_t(jiffies - d->tcf_tm.lastuse);
- t.expires = jiffies_to_clock_t(d->tcf_tm.expires);
+
+ tcf_tm_dump(&t, &d->tcf_tm);
if (nla_put_64bit(skb, TCA_DEF_TM, sizeof(t), &t, TCA_DEF_PAD))
goto nla_put_failure;
return skb->len;
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index e928802..7e2bc3c 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -37,7 +37,7 @@ static int tcf_skbedit(struct sk_buff *skb, const struct tc_action *a,
struct tcf_skbedit *d = a->priv;
spin_lock(&d->tcf_lock);
- d->tcf_tm.lastuse = jiffies;
+ tcf_lastuse_update(&d->tcf_tm);
bstats_update(&d->tcf_bstats, skb);
if (d->flags & SKBEDIT_F_PRIORITY)
@@ -168,9 +168,8 @@ static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
nla_put(skb, TCA_SKBEDIT_MARK, sizeof(d->mark),
&d->mark))
goto nla_put_failure;
- t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install);
- t.lastuse = jiffies_to_clock_t(jiffies - d->tcf_tm.lastuse);
- t.expires = jiffies_to_clock_t(d->tcf_tm.expires);
+
+ tcf_tm_dump(&t, &d->tcf_tm);
if (nla_put_64bit(skb, TCA_SKBEDIT_TM, sizeof(t), &t, TCA_SKBEDIT_PAD))
goto nla_put_failure;
return skb->len;
diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
index ac4adc8..b075d50 100644
--- a/net/sched/act_vlan.c
+++ b/net/sched/act_vlan.c
@@ -31,7 +31,7 @@ static int tcf_vlan(struct sk_buff *skb, const struct tc_action *a,
int err;
spin_lock(&v->tcf_lock);
- v->tcf_tm.lastuse = jiffies;
+ tcf_lastuse_update(&v->tcf_tm);
bstats_update(&v->tcf_bstats, skb);
action = v->tcf_action;
@@ -179,12 +179,11 @@ static int tcf_vlan_dump(struct sk_buff *skb, struct tc_action *a,
if (v->tcfv_action == TCA_VLAN_ACT_PUSH &&
(nla_put_u16(skb, TCA_VLAN_PUSH_VLAN_ID, v->tcfv_push_vid) ||
- nla_put_be16(skb, TCA_VLAN_PUSH_VLAN_PROTOCOL, v->tcfv_push_proto)))
+ nla_put_be16(skb, TCA_VLAN_PUSH_VLAN_PROTOCOL,
+ v->tcfv_push_proto)))
goto nla_put_failure;
- t.install = jiffies_to_clock_t(jiffies - v->tcf_tm.install);
- t.lastuse = jiffies_to_clock_t(jiffies - v->tcf_tm.lastuse);
- t.expires = jiffies_to_clock_t(v->tcf_tm.expires);
+ tcf_tm_dump(&t, &v->tcf_tm);
if (nla_put_64bit(skb, TCA_VLAN_TM, sizeof(t), &t, TCA_VLAN_PAD))
goto nla_put_failure;
return skb->len;
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index a75864d..aafa6bce 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -351,8 +351,9 @@ errout:
return err;
}
-static int tcf_fill_node(struct net *net, struct sk_buff *skb, struct tcf_proto *tp,
- unsigned long fh, u32 portid, u32 seq, u16 flags, int event)
+static int tcf_fill_node(struct net *net, struct sk_buff *skb,
+ struct tcf_proto *tp, unsigned long fh, u32 portid,
+ u32 seq, u16 flags, int event)
{
struct tcmsg *tcm;
struct nlmsghdr *nlh;
@@ -474,9 +475,11 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
TC_H_MIN(tcm->tcm_info) != tp->protocol)
continue;
if (t > s_t)
- memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
+ memset(&cb->args[1], 0,
+ sizeof(cb->args)-sizeof(cb->args[0]));
if (cb->args[1] == 0) {
- if (tcf_fill_node(net, skb, tp, 0, NETLINK_CB(cb->skb).portid,
+ if (tcf_fill_node(net, skb, tp, 0,
+ NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
RTM_NEWTFILTER) <= 0)
break;
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index b3b7978..1ea6f76 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -66,6 +66,7 @@ struct cls_fl_filter {
struct fl_flow_key key;
struct list_head list;
u32 handle;
+ u32 flags;
struct rcu_head rcu;
};
@@ -123,6 +124,9 @@ static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct fl_flow_key skb_key;
struct fl_flow_key skb_mkey;
+ if (!atomic_read(&head->ht.nelems))
+ return -1;
+
fl_clear_masked_range(&skb_key, &head->mask);
skb_key.indev_ifindex = skb->skb_iif;
/* skb_flow_dissect() does not set n_proto in case an unknown protocol,
@@ -136,7 +140,7 @@ static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
f = rhashtable_lookup_fast(&head->ht,
fl_key_get_start(&skb_mkey, &head->mask),
head->ht_params);
- if (f) {
+ if (f && !(f->flags & TCA_CLS_FLAGS_SKIP_SW)) {
*res = f->res;
return tcf_exts_exec(skb, &f->exts, res);
}
@@ -524,7 +528,6 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
struct cls_fl_filter *fnew;
struct nlattr *tb[TCA_FLOWER_MAX + 1];
struct fl_flow_mask mask = {};
- u32 flags = 0;
int err;
if (!tca[TCA_OPTIONS])
@@ -552,8 +555,14 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
}
fnew->handle = handle;
- if (tb[TCA_FLOWER_FLAGS])
- flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
+ if (tb[TCA_FLOWER_FLAGS]) {
+ fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
+
+ if (!tc_flags_valid(fnew->flags)) {
+ err = -EINVAL;
+ goto errout;
+ }
+ }
err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr);
if (err)
@@ -563,10 +572,12 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
if (err)
goto errout;
- err = rhashtable_insert_fast(&head->ht, &fnew->ht_node,
- head->ht_params);
- if (err)
- goto errout;
+ if (!(fnew->flags & TCA_CLS_FLAGS_SKIP_SW)) {
+ err = rhashtable_insert_fast(&head->ht, &fnew->ht_node,
+ head->ht_params);
+ if (err)
+ goto errout;
+ }
fl_hw_replace_filter(tp,
&head->dissector,
@@ -574,7 +585,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
&fnew->key,
&fnew->exts,
(unsigned long)fnew,
- flags);
+ fnew->flags);
if (fold) {
rhashtable_remove_fast(&head->ht, &fold->ht_node,
@@ -734,6 +745,8 @@ static int fl_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
sizeof(key->tp.dst))))
goto nla_put_failure;
+ nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags);
+
if (tcf_exts_dump(skb, &f->exts))
goto nla_put_failure;
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index ddf047d..d4a8bbf 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -982,7 +982,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
rcu_assign_pointer(sch->stab, stab);
}
if (tca[TCA_RATE]) {
- spinlock_t *root_lock;
+ seqcount_t *running;
err = -EOPNOTSUPP;
if (sch->flags & TCQ_F_MQROOT)
@@ -991,14 +991,15 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
if ((sch->parent != TC_H_ROOT) &&
!(sch->flags & TCQ_F_INGRESS) &&
(!p || !(p->flags & TCQ_F_MQROOT)))
- root_lock = qdisc_root_sleeping_lock(sch);
+ running = qdisc_root_sleeping_running(sch);
else
- root_lock = qdisc_lock(sch);
+ running = &sch->running;
err = gen_new_estimator(&sch->bstats,
sch->cpu_bstats,
&sch->rate_est,
- root_lock,
+ NULL,
+ running,
tca[TCA_RATE]);
if (err)
goto err_out4;
@@ -1061,7 +1062,8 @@ static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
gen_replace_estimator(&sch->bstats,
sch->cpu_bstats,
&sch->rate_est,
- qdisc_root_sleeping_lock(sch),
+ NULL,
+ qdisc_root_sleeping_running(sch),
tca[TCA_RATE]);
}
out:
@@ -1369,8 +1371,7 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
goto nla_put_failure;
if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
- qdisc_root_sleeping_lock(q), &d,
- TCA_PAD) < 0)
+ NULL, &d, TCA_PAD) < 0)
goto nla_put_failure;
if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
@@ -1381,7 +1382,8 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
cpu_qstats = q->cpu_qstats;
}
- if (gnet_stats_copy_basic(&d, cpu_bstats, &q->bstats) < 0 ||
+ if (gnet_stats_copy_basic(qdisc_root_sleeping_running(q),
+ &d, cpu_bstats, &q->bstats) < 0 ||
gnet_stats_copy_rate_est(&d, &q->bstats, &q->rate_est) < 0 ||
gnet_stats_copy_queue(&d, cpu_qstats, &q->qstats, qlen) < 0)
goto nla_put_failure;
@@ -1684,8 +1686,7 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
goto nla_put_failure;
if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
- qdisc_root_sleeping_lock(q), &d,
- TCA_PAD) < 0)
+ NULL, &d, TCA_PAD) < 0)
goto nla_put_failure;
if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index 1911af3..7e6c12d 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -519,20 +519,6 @@ static struct sk_buff *atm_tc_peek(struct Qdisc *sch)
return p->link.q->ops->peek(p->link.q);
}
-static unsigned int atm_tc_drop(struct Qdisc *sch)
-{
- struct atm_qdisc_data *p = qdisc_priv(sch);
- struct atm_flow_data *flow;
- unsigned int len;
-
- pr_debug("atm_tc_drop(sch %p,[qdisc %p])\n", sch, p);
- list_for_each_entry(flow, &p->flows, list) {
- if (flow->q->ops->drop && (len = flow->q->ops->drop(flow->q)))
- return len;
- }
- return 0;
-}
-
static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt)
{
struct atm_qdisc_data *p = qdisc_priv(sch);
@@ -637,7 +623,8 @@ atm_tc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
{
struct atm_flow_data *flow = (struct atm_flow_data *)arg;
- if (gnet_stats_copy_basic(d, NULL, &flow->bstats) < 0 ||
+ if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
+ d, NULL, &flow->bstats) < 0 ||
gnet_stats_copy_queue(d, NULL, &flow->qstats, flow->q->q.qlen) < 0)
return -1;
@@ -671,7 +658,6 @@ static struct Qdisc_ops atm_qdisc_ops __read_mostly = {
.enqueue = atm_tc_enqueue,
.dequeue = atm_tc_dequeue,
.peek = atm_tc_peek,
- .drop = atm_tc_drop,
.init = atm_tc_init,
.reset = atm_tc_reset,
.destroy = atm_tc_destroy,
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index baafddf..f2af31b 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -80,10 +80,6 @@ struct cbq_class {
unsigned char priority; /* class priority */
unsigned char priority2; /* priority to be used after overlimit */
unsigned char ewma_log; /* time constant for idle time calculation */
- unsigned char ovl_strategy;
-#ifdef CONFIG_NET_CLS_ACT
- unsigned char police;
-#endif
u32 defmap;
@@ -94,10 +90,6 @@ struct cbq_class {
u32 avpkt;
struct qdisc_rate_table *R_tab;
- /* Overlimit strategy parameters */
- void (*overlimit)(struct cbq_class *cl);
- psched_tdiff_t penalty;
-
/* General scheduler (WRR) parameters */
long allot;
long quantum; /* Allotment per WRR round */
@@ -382,9 +374,6 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return ret;
}
-#ifdef CONFIG_NET_CLS_ACT
- cl->q->__parent = sch;
-#endif
ret = qdisc_enqueue(skb, cl->q);
if (ret == NET_XMIT_SUCCESS) {
sch->q.qlen++;
@@ -402,11 +391,8 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return ret;
}
-/* Overlimit actions */
-
-/* TC_CBQ_OVL_CLASSIC: (default) penalize leaf class by adding offtime */
-
-static void cbq_ovl_classic(struct cbq_class *cl)
+/* Overlimit action: penalize leaf class by adding offtime */
+static void cbq_overlimit(struct cbq_class *cl)
{
struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
psched_tdiff_t delay = cl->undertime - q->now;
@@ -456,99 +442,6 @@ static void cbq_ovl_classic(struct cbq_class *cl)
}
}
-/* TC_CBQ_OVL_RCLASSIC: penalize by offtime classes in hierarchy, when
- * they go overlimit
- */
-
-static void cbq_ovl_rclassic(struct cbq_class *cl)
-{
- struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
- struct cbq_class *this = cl;
-
- do {
- if (cl->level > q->toplevel) {
- cl = NULL;
- break;
- }
- } while ((cl = cl->borrow) != NULL);
-
- if (cl == NULL)
- cl = this;
- cbq_ovl_classic(cl);
-}
-
-/* TC_CBQ_OVL_DELAY: delay until it will go to underlimit */
-
-static void cbq_ovl_delay(struct cbq_class *cl)
-{
- struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
- psched_tdiff_t delay = cl->undertime - q->now;
-
- if (test_bit(__QDISC_STATE_DEACTIVATED,
- &qdisc_root_sleeping(cl->qdisc)->state))
- return;
-
- if (!cl->delayed) {
- psched_time_t sched = q->now;
- ktime_t expires;
-
- delay += cl->offtime;
- if (cl->avgidle < 0)
- delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log);
- if (cl->avgidle < cl->minidle)
- cl->avgidle = cl->minidle;
- cl->undertime = q->now + delay;
-
- if (delay > 0) {
- sched += delay + cl->penalty;
- cl->penalized = sched;
- cl->cpriority = TC_CBQ_MAXPRIO;
- q->pmask |= (1<<TC_CBQ_MAXPRIO);
-
- expires = ns_to_ktime(PSCHED_TICKS2NS(sched));
- if (hrtimer_try_to_cancel(&q->delay_timer) &&
- ktime_to_ns(ktime_sub(
- hrtimer_get_expires(&q->delay_timer),
- expires)) > 0)
- hrtimer_set_expires(&q->delay_timer, expires);
- hrtimer_restart(&q->delay_timer);
- cl->delayed = 1;
- cl->xstats.overactions++;
- return;
- }
- delay = 1;
- }
- if (q->wd_expires == 0 || q->wd_expires > delay)
- q->wd_expires = delay;
-}
-
-/* TC_CBQ_OVL_LOWPRIO: penalize class by lowering its priority band */
-
-static void cbq_ovl_lowprio(struct cbq_class *cl)
-{
- struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
-
- cl->penalized = q->now + cl->penalty;
-
- if (cl->cpriority != cl->priority2) {
- cl->cpriority = cl->priority2;
- q->pmask |= (1<<cl->cpriority);
- cl->xstats.overactions++;
- }
- cbq_ovl_classic(cl);
-}
-
-/* TC_CBQ_OVL_DROP: penalize class by dropping */
-
-static void cbq_ovl_drop(struct cbq_class *cl)
-{
- if (cl->q->ops->drop)
- if (cl->q->ops->drop(cl->q))
- cl->qdisc->q.qlen--;
- cl->xstats.overactions++;
- cbq_ovl_classic(cl);
-}
-
static psched_tdiff_t cbq_undelay_prio(struct cbq_sched_data *q, int prio,
psched_time_t now)
{
@@ -625,40 +518,6 @@ static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
return HRTIMER_NORESTART;
}
-#ifdef CONFIG_NET_CLS_ACT
-static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
-{
- struct Qdisc *sch = child->__parent;
- struct cbq_sched_data *q = qdisc_priv(sch);
- struct cbq_class *cl = q->rx_class;
-
- q->rx_class = NULL;
-
- if (cl && (cl = cbq_reclassify(skb, cl)) != NULL) {
- int ret;
-
- cbq_mark_toplevel(q, cl);
-
- q->rx_class = cl;
- cl->q->__parent = sch;
-
- ret = qdisc_enqueue(skb, cl->q);
- if (ret == NET_XMIT_SUCCESS) {
- sch->q.qlen++;
- if (!cl->next_alive)
- cbq_activate_class(cl);
- return 0;
- }
- if (net_xmit_drop_count(ret))
- qdisc_qstats_drop(sch);
- return 0;
- }
-
- qdisc_qstats_drop(sch);
- return -1;
-}
-#endif
-
/*
* It is mission critical procedure.
*
@@ -807,7 +666,7 @@ cbq_under_limit(struct cbq_class *cl)
cl = cl->borrow;
if (!cl) {
this_cl->qstats.overlimits++;
- this_cl->overlimit(this_cl);
+ cbq_overlimit(this_cl);
return NULL;
}
if (cl->level > q->toplevel)
@@ -1166,31 +1025,6 @@ static void cbq_link_class(struct cbq_class *this)
}
}
-static unsigned int cbq_drop(struct Qdisc *sch)
-{
- struct cbq_sched_data *q = qdisc_priv(sch);
- struct cbq_class *cl, *cl_head;
- int prio;
- unsigned int len;
-
- for (prio = TC_CBQ_MAXPRIO; prio >= 0; prio--) {
- cl_head = q->active[prio];
- if (!cl_head)
- continue;
-
- cl = cl_head;
- do {
- if (cl->q->ops->drop && (len = cl->q->ops->drop(cl->q))) {
- sch->q.qlen--;
- if (!cl->q->q.qlen)
- cbq_deactivate_class(cl);
- return len;
- }
- } while ((cl = cl->next_alive) != cl_head);
- }
- return 0;
-}
-
static void
cbq_reset(struct Qdisc *sch)
{
@@ -1280,50 +1114,6 @@ static int cbq_set_wrr(struct cbq_class *cl, struct tc_cbq_wrropt *wrr)
return 0;
}
-static int cbq_set_overlimit(struct cbq_class *cl, struct tc_cbq_ovl *ovl)
-{
- switch (ovl->strategy) {
- case TC_CBQ_OVL_CLASSIC:
- cl->overlimit = cbq_ovl_classic;
- break;
- case TC_CBQ_OVL_DELAY:
- cl->overlimit = cbq_ovl_delay;
- break;
- case TC_CBQ_OVL_LOWPRIO:
- if (ovl->priority2 - 1 >= TC_CBQ_MAXPRIO ||
- ovl->priority2 - 1 <= cl->priority)
- return -EINVAL;
- cl->priority2 = ovl->priority2 - 1;
- cl->overlimit = cbq_ovl_lowprio;
- break;
- case TC_CBQ_OVL_DROP:
- cl->overlimit = cbq_ovl_drop;
- break;
- case TC_CBQ_OVL_RCLASSIC:
- cl->overlimit = cbq_ovl_rclassic;
- break;
- default:
- return -EINVAL;
- }
- cl->penalty = ovl->penalty;
- return 0;
-}
-
-#ifdef CONFIG_NET_CLS_ACT
-static int cbq_set_police(struct cbq_class *cl, struct tc_cbq_police *p)
-{
- cl->police = p->police;
-
- if (cl->q->handle) {
- if (p->police == TC_POLICE_RECLASSIFY)
- cl->q->reshape_fail = cbq_reshape_fail;
- else
- cl->q->reshape_fail = NULL;
- }
- return 0;
-}
-#endif
-
static int cbq_set_fopt(struct cbq_class *cl, struct tc_cbq_fopt *fopt)
{
cbq_change_defmap(cl, fopt->split, fopt->defmap, fopt->defchange);
@@ -1375,8 +1165,6 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
q->link.priority = TC_CBQ_MAXPRIO - 1;
q->link.priority2 = TC_CBQ_MAXPRIO - 1;
q->link.cpriority = TC_CBQ_MAXPRIO - 1;
- q->link.ovl_strategy = TC_CBQ_OVL_CLASSIC;
- q->link.overlimit = cbq_ovl_classic;
q->link.allot = psched_mtu(qdisc_dev(sch));
q->link.quantum = q->link.allot;
q->link.weight = q->link.R_tab->rate.rate;
@@ -1463,24 +1251,6 @@ nla_put_failure:
return -1;
}
-static int cbq_dump_ovl(struct sk_buff *skb, struct cbq_class *cl)
-{
- unsigned char *b = skb_tail_pointer(skb);
- struct tc_cbq_ovl opt;
-
- opt.strategy = cl->ovl_strategy;
- opt.priority2 = cl->priority2 + 1;
- opt.pad = 0;
- opt.penalty = cl->penalty;
- if (nla_put(skb, TCA_CBQ_OVL_STRATEGY, sizeof(opt), &opt))
- goto nla_put_failure;
- return skb->len;
-
-nla_put_failure:
- nlmsg_trim(skb, b);
- return -1;
-}
-
static int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl)
{
unsigned char *b = skb_tail_pointer(skb);
@@ -1500,36 +1270,11 @@ nla_put_failure:
return -1;
}
-#ifdef CONFIG_NET_CLS_ACT
-static int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl)
-{
- unsigned char *b = skb_tail_pointer(skb);
- struct tc_cbq_police opt;
-
- if (cl->police) {
- opt.police = cl->police;
- opt.__res1 = 0;
- opt.__res2 = 0;
- if (nla_put(skb, TCA_CBQ_POLICE, sizeof(opt), &opt))
- goto nla_put_failure;
- }
- return skb->len;
-
-nla_put_failure:
- nlmsg_trim(skb, b);
- return -1;
-}
-#endif
-
static int cbq_dump_attr(struct sk_buff *skb, struct cbq_class *cl)
{
if (cbq_dump_lss(skb, cl) < 0 ||
cbq_dump_rate(skb, cl) < 0 ||
cbq_dump_wrr(skb, cl) < 0 ||
- cbq_dump_ovl(skb, cl) < 0 ||
-#ifdef CONFIG_NET_CLS_ACT
- cbq_dump_police(skb, cl) < 0 ||
-#endif
cbq_dump_fopt(skb, cl) < 0)
return -1;
return 0;
@@ -1600,7 +1345,8 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
if (cl->undertime != PSCHED_PASTPERFECT)
cl->xstats.undertime = cl->undertime - q->now;
- if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 ||
+ if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
+ d, NULL, &cl->bstats) < 0 ||
gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->q->q.qlen) < 0)
return -1;
@@ -1618,11 +1364,6 @@ static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
&pfifo_qdisc_ops, cl->common.classid);
if (new == NULL)
return -ENOBUFS;
- } else {
-#ifdef CONFIG_NET_CLS_ACT
- if (cl->police == TC_POLICE_RECLASSIFY)
- new->reshape_fail = cbq_reshape_fail;
-#endif
}
*old = qdisc_replace(sch, new, &cl->q);
@@ -1735,6 +1476,9 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
if (err < 0)
return err;
+ if (tb[TCA_CBQ_OVL_STRATEGY] || tb[TCA_CBQ_POLICE])
+ return -EOPNOTSUPP;
+
if (cl) {
/* Check parent */
if (parentid) {
@@ -1755,7 +1499,8 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
if (tca[TCA_RATE]) {
err = gen_replace_estimator(&cl->bstats, NULL,
&cl->rate_est,
- qdisc_root_sleeping_lock(sch),
+ NULL,
+ qdisc_root_sleeping_running(sch),
tca[TCA_RATE]);
if (err) {
qdisc_put_rtab(rtab);
@@ -1782,14 +1527,6 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
}
- if (tb[TCA_CBQ_OVL_STRATEGY])
- cbq_set_overlimit(cl, nla_data(tb[TCA_CBQ_OVL_STRATEGY]));
-
-#ifdef CONFIG_NET_CLS_ACT
- if (tb[TCA_CBQ_POLICE])
- cbq_set_police(cl, nla_data(tb[TCA_CBQ_POLICE]));
-#endif
-
if (tb[TCA_CBQ_FOPT])
cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
@@ -1848,7 +1585,8 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
if (tca[TCA_RATE]) {
err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est,
- qdisc_root_sleeping_lock(sch),
+ NULL,
+ qdisc_root_sleeping_running(sch),
tca[TCA_RATE]);
if (err) {
kfree(cl);
@@ -1884,13 +1622,6 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
cl->maxidle = q->link.maxidle;
if (cl->avpkt == 0)
cl->avpkt = q->link.avpkt;
- cl->overlimit = cbq_ovl_classic;
- if (tb[TCA_CBQ_OVL_STRATEGY])
- cbq_set_overlimit(cl, nla_data(tb[TCA_CBQ_OVL_STRATEGY]));
-#ifdef CONFIG_NET_CLS_ACT
- if (tb[TCA_CBQ_POLICE])
- cbq_set_police(cl, nla_data(tb[TCA_CBQ_POLICE]));
-#endif
if (tb[TCA_CBQ_FOPT])
cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
sch_tree_unlock(sch);
@@ -2035,7 +1766,6 @@ static struct Qdisc_ops cbq_qdisc_ops __read_mostly = {
.enqueue = cbq_enqueue,
.dequeue = cbq_dequeue,
.peek = qdisc_peek_dequeued,
- .drop = cbq_drop,
.init = cbq_init,
.reset = cbq_reset,
.destroy = cbq_destroy,
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index 0a08c86..04e0b05 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -365,22 +365,6 @@ static struct sk_buff *choke_dequeue(struct Qdisc *sch)
return skb;
}
-static unsigned int choke_drop(struct Qdisc *sch)
-{
- struct choke_sched_data *q = qdisc_priv(sch);
- unsigned int len;
-
- len = qdisc_queue_drop(sch);
- if (len > 0)
- q->stats.other++;
- else {
- if (!red_is_idling(&q->vars))
- red_start_of_idle_period(&q->vars);
- }
-
- return len;
-}
-
static void choke_reset(struct Qdisc *sch)
{
struct choke_sched_data *q = qdisc_priv(sch);
@@ -569,7 +553,6 @@ static struct Qdisc_ops choke_qdisc_ops __read_mostly = {
.enqueue = choke_enqueue,
.dequeue = choke_dequeue,
.peek = choke_peek_head,
- .drop = choke_drop,
.init = choke_init,
.destroy = choke_destroy,
.reset = choke_reset,
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index bf8af2c..22609e4 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -91,7 +91,8 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
if (tca[TCA_RATE]) {
err = gen_replace_estimator(&cl->bstats, NULL,
&cl->rate_est,
- qdisc_root_sleeping_lock(sch),
+ NULL,
+ qdisc_root_sleeping_running(sch),
tca[TCA_RATE]);
if (err)
return err;
@@ -119,7 +120,8 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
if (tca[TCA_RATE]) {
err = gen_replace_estimator(&cl->bstats, NULL, &cl->rate_est,
- qdisc_root_sleeping_lock(sch),
+ NULL,
+ qdisc_root_sleeping_running(sch),
tca[TCA_RATE]);
if (err) {
qdisc_destroy(cl->qdisc);
@@ -279,7 +281,8 @@ static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg,
if (qlen)
xstats.deficit = cl->deficit;
- if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 ||
+ if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
+ d, NULL, &cl->bstats) < 0 ||
gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
gnet_stats_copy_queue(d, NULL, &cl->qdisc->qstats, qlen) < 0)
return -1;
@@ -420,27 +423,6 @@ out:
return NULL;
}
-static unsigned int drr_drop(struct Qdisc *sch)
-{
- struct drr_sched *q = qdisc_priv(sch);
- struct drr_class *cl;
- unsigned int len;
-
- list_for_each_entry(cl, &q->active, alist) {
- if (cl->qdisc->ops->drop) {
- len = cl->qdisc->ops->drop(cl->qdisc);
- if (len > 0) {
- sch->qstats.backlog -= len;
- sch->q.qlen--;
- if (cl->qdisc->q.qlen == 0)
- list_del(&cl->alist);
- return len;
- }
- }
- }
- return 0;
-}
-
static int drr_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
{
struct drr_sched *q = qdisc_priv(sch);
@@ -510,7 +492,6 @@ static struct Qdisc_ops drr_qdisc_ops __read_mostly = {
.enqueue = drr_enqueue,
.dequeue = drr_dequeue,
.peek = qdisc_peek_dequeued,
- .drop = drr_drop,
.init = drr_init_qdisc,
.reset = drr_reset_qdisc,
.destroy = drr_destroy_qdisc,
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index 34b4dda..b9ba5f6 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -320,23 +320,6 @@ static struct sk_buff *dsmark_peek(struct Qdisc *sch)
return p->q->ops->peek(p->q);
}
-static unsigned int dsmark_drop(struct Qdisc *sch)
-{
- struct dsmark_qdisc_data *p = qdisc_priv(sch);
- unsigned int len;
-
- pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
-
- if (p->q->ops->drop == NULL)
- return 0;
-
- len = p->q->ops->drop(p->q);
- if (len)
- sch->q.qlen--;
-
- return len;
-}
-
static int dsmark_init(struct Qdisc *sch, struct nlattr *opt)
{
struct dsmark_qdisc_data *p = qdisc_priv(sch);
@@ -489,7 +472,6 @@ static struct Qdisc_ops dsmark_qdisc_ops __read_mostly = {
.enqueue = dsmark_enqueue,
.dequeue = dsmark_dequeue,
.peek = dsmark_peek,
- .drop = dsmark_drop,
.init = dsmark_init,
.reset = dsmark_reset,
.destroy = dsmark_destroy,
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index 2177eac..dea70e3 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -24,7 +24,7 @@ static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= sch->limit))
return qdisc_enqueue_tail(skb, sch);
- return qdisc_reshape_fail(skb, sch);
+ return qdisc_drop(skb, sch);
}
static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch)
@@ -32,7 +32,7 @@ static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (likely(skb_queue_len(&sch->q) < sch->limit))
return qdisc_enqueue_tail(skb, sch);
- return qdisc_reshape_fail(skb, sch);
+ return qdisc_drop(skb, sch);
}
static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch)
@@ -99,7 +99,6 @@ struct Qdisc_ops pfifo_qdisc_ops __read_mostly = {
.enqueue = pfifo_enqueue,
.dequeue = qdisc_dequeue_head,
.peek = qdisc_peek_head,
- .drop = qdisc_queue_drop,
.init = fifo_init,
.reset = qdisc_reset_queue,
.change = fifo_init,
@@ -114,7 +113,6 @@ struct Qdisc_ops bfifo_qdisc_ops __read_mostly = {
.enqueue = bfifo_enqueue,
.dequeue = qdisc_dequeue_head,
.peek = qdisc_peek_head,
- .drop = qdisc_queue_drop,
.init = fifo_init,
.reset = qdisc_reset_queue,
.change = fifo_init,
@@ -129,7 +127,6 @@ struct Qdisc_ops pfifo_head_drop_qdisc_ops __read_mostly = {
.enqueue = pfifo_tail_enqueue,
.dequeue = qdisc_dequeue_head,
.peek = qdisc_peek_head,
- .drop = qdisc_queue_drop_head,
.init = fifo_init,
.reset = qdisc_reset_queue,
.change = fifo_init,
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index da250b2..a302e8e 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -184,15 +184,6 @@ static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets)
return idx;
}
-static unsigned int fq_codel_qdisc_drop(struct Qdisc *sch)
-{
- unsigned int prev_backlog;
-
- prev_backlog = sch->qstats.backlog;
- fq_codel_drop(sch, 1U);
- return prev_backlog - sch->qstats.backlog;
-}
-
static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct fq_codel_sched_data *q = qdisc_priv(sch);
@@ -578,11 +569,13 @@ static int fq_codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
st.qdisc_stats.memory_usage = q->memory_usage;
st.qdisc_stats.drop_overmemory = q->drop_overmemory;
+ sch_tree_lock(sch);
list_for_each(pos, &q->new_flows)
st.qdisc_stats.new_flows_len++;
list_for_each(pos, &q->old_flows)
st.qdisc_stats.old_flows_len++;
+ sch_tree_unlock(sch);
return gnet_stats_copy_app(d, &st, sizeof(st));
}
@@ -636,7 +629,7 @@ static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl,
if (idx < q->flows_cnt) {
const struct fq_codel_flow *flow = &q->flows[idx];
- const struct sk_buff *skb = flow->head;
+ const struct sk_buff *skb;
memset(&xstats, 0, sizeof(xstats));
xstats.type = TCA_FQ_CODEL_XSTATS_CLASS;
@@ -654,9 +647,14 @@ static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl,
codel_time_to_us(delta) :
-codel_time_to_us(-delta);
}
- while (skb) {
- qs.qlen++;
- skb = skb->next;
+ if (flow->head) {
+ sch_tree_lock(sch);
+ skb = flow->head;
+ while (skb) {
+ qs.qlen++;
+ skb = skb->next;
+ }
+ sch_tree_unlock(sch);
}
qs.backlog = q->backlogs[idx];
qs.drops = flow->dropped;
@@ -709,7 +707,6 @@ static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = {
.enqueue = fq_codel_enqueue,
.dequeue = fq_codel_dequeue,
.peek = qdisc_peek_dequeued,
- .drop = fq_codel_qdisc_drop,
.init = fq_codel_init,
.reset = fq_codel_reset,
.destroy = fq_codel_destroy,
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index f9e0e9c..0c9cb51 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -112,7 +112,7 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
/*
* Transmit possibly several skbs, and handle the return status as
- * required. Holding the __QDISC___STATE_RUNNING bit guarantees that
+ * required. Owning running seqcount bit guarantees that
* only one CPU can execute this function.
*
* Returns to the caller:
@@ -165,7 +165,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
/*
* NOTE: Called under qdisc_lock(q) with locally disabled BH.
*
- * __QDISC___STATE_RUNNING guarantees only one CPU can process
+ * running seqcount guarantees only one CPU can process
* this qdisc at a time. qdisc_lock(q) serializes queue accesses for
* this queue.
*
@@ -381,6 +381,7 @@ struct Qdisc noop_qdisc = {
.list = LIST_HEAD_INIT(noop_qdisc.list),
.q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
.dev_queue = &noop_netdev_queue,
+ .running = SEQCNT_ZERO(noop_qdisc.running),
.busylock = __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock),
};
EXPORT_SYMBOL(noop_qdisc);
@@ -539,6 +540,7 @@ struct Qdisc_ops pfifo_fast_ops __read_mostly = {
EXPORT_SYMBOL(pfifo_fast_ops);
static struct lock_class_key qdisc_tx_busylock;
+static struct lock_class_key qdisc_running_key;
struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
const struct Qdisc_ops *ops)
@@ -572,6 +574,10 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
lockdep_set_class(&sch->busylock,
dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
+ seqcount_init(&sch->running);
+ lockdep_set_class(&sch->running,
+ dev->qdisc_running_key ?: &qdisc_running_key);
+
sch->ops = ops;
sch->enqueue = ops->enqueue;
sch->dequeue = ops->dequeue;
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index 8010510..b5fb63c7 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -276,40 +276,6 @@ static struct sk_buff *gred_dequeue(struct Qdisc *sch)
return NULL;
}
-static unsigned int gred_drop(struct Qdisc *sch)
-{
- struct sk_buff *skb;
- struct gred_sched *t = qdisc_priv(sch);
-
- skb = qdisc_dequeue_tail(sch);
- if (skb) {
- unsigned int len = qdisc_pkt_len(skb);
- struct gred_sched_data *q;
- u16 dp = tc_index_to_dp(skb);
-
- if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
- net_warn_ratelimited("GRED: Unable to relocate VQ 0x%x while dropping, screwing up backlog\n",
- tc_index_to_dp(skb));
- } else {
- q->backlog -= len;
- q->stats.other++;
-
- if (gred_wred_mode(t)) {
- if (!sch->qstats.backlog)
- red_start_of_idle_period(&t->wred_set);
- } else {
- if (!q->backlog)
- red_start_of_idle_period(&q->vars);
- }
- }
-
- qdisc_drop(skb, sch);
- return len;
- }
-
- return 0;
-}
-
static void gred_reset(struct Qdisc *sch)
{
int i;
@@ -623,7 +589,6 @@ static struct Qdisc_ops gred_qdisc_ops __read_mostly = {
.enqueue = gred_enqueue,
.dequeue = gred_dequeue,
.peek = qdisc_peek_head,
- .drop = gred_drop,
.init = gred_init,
.reset = gred_reset,
.destroy = gred_destroy,
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 1ac9f9f..eb3d3f5 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1015,11 +1015,10 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
cur_time = psched_get_time();
if (tca[TCA_RATE]) {
- spinlock_t *lock = qdisc_root_sleeping_lock(sch);
-
err = gen_replace_estimator(&cl->bstats, NULL,
&cl->rate_est,
- lock,
+ NULL,
+ qdisc_root_sleeping_running(sch),
tca[TCA_RATE]);
if (err)
return err;
@@ -1068,7 +1067,8 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
if (tca[TCA_RATE]) {
err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est,
- qdisc_root_sleeping_lock(sch),
+ NULL,
+ qdisc_root_sleeping_running(sch),
tca[TCA_RATE]);
if (err) {
kfree(cl);
@@ -1373,7 +1373,7 @@ hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
xstats.work = cl->cl_total;
xstats.rtwork = cl->cl_cumul;
- if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 ||
+ if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), d, NULL, &cl->bstats) < 0 ||
gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->qdisc->q.qlen) < 0)
return -1;
@@ -1672,32 +1672,6 @@ hfsc_dequeue(struct Qdisc *sch)
return skb;
}
-static unsigned int
-hfsc_drop(struct Qdisc *sch)
-{
- struct hfsc_sched *q = qdisc_priv(sch);
- struct hfsc_class *cl;
- unsigned int len;
-
- list_for_each_entry(cl, &q->droplist, dlist) {
- if (cl->qdisc->ops->drop != NULL &&
- (len = cl->qdisc->ops->drop(cl->qdisc)) > 0) {
- if (cl->qdisc->q.qlen == 0) {
- update_vf(cl, 0, 0);
- set_passive(cl);
- } else {
- list_move_tail(&cl->dlist, &q->droplist);
- }
- cl->qstats.drops++;
- qdisc_qstats_drop(sch);
- sch->qstats.backlog -= len;
- sch->q.qlen--;
- return len;
- }
- }
- return 0;
-}
-
static const struct Qdisc_class_ops hfsc_class_ops = {
.change = hfsc_change_class,
.delete = hfsc_delete_class,
@@ -1724,7 +1698,6 @@ static struct Qdisc_ops hfsc_qdisc_ops __read_mostly = {
.enqueue = hfsc_enqueue,
.dequeue = hfsc_dequeue,
.peek = qdisc_peek_dequeued,
- .drop = hfsc_drop,
.cl_ops = &hfsc_class_ops,
.priv_size = sizeof(struct hfsc_sched),
.owner = THIS_MODULE
diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
index 13d6f83..c517918 100644
--- a/net/sched/sch_hhf.c
+++ b/net/sched/sch_hhf.c
@@ -368,15 +368,6 @@ static unsigned int hhf_drop(struct Qdisc *sch)
return bucket - q->buckets;
}
-static unsigned int hhf_qdisc_drop(struct Qdisc *sch)
-{
- unsigned int prev_backlog;
-
- prev_backlog = sch->qstats.backlog;
- hhf_drop(sch);
- return prev_backlog - sch->qstats.backlog;
-}
-
static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct hhf_sched_data *q = qdisc_priv(sch);
@@ -709,7 +700,6 @@ static struct Qdisc_ops hhf_qdisc_ops __read_mostly = {
.enqueue = hhf_enqueue,
.dequeue = hhf_dequeue,
.peek = qdisc_peek_dequeued,
- .drop = hhf_qdisc_drop,
.init = hhf_init,
.reset = hhf_reset,
.destroy = hhf_destroy,
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index d4b4218..b74d066 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -936,31 +936,6 @@ fin:
return skb;
}
-/* try to drop from each class (by prio) until one succeed */
-static unsigned int htb_drop(struct Qdisc *sch)
-{
- struct htb_sched *q = qdisc_priv(sch);
- int prio;
-
- for (prio = TC_HTB_NUMPRIO - 1; prio >= 0; prio--) {
- struct list_head *p;
- list_for_each(p, q->drops + prio) {
- struct htb_class *cl = list_entry(p, struct htb_class,
- un.leaf.drop_list);
- unsigned int len;
- if (cl->un.leaf.q->ops->drop &&
- (len = cl->un.leaf.q->ops->drop(cl->un.leaf.q))) {
- sch->qstats.backlog -= len;
- sch->q.qlen--;
- if (!cl->un.leaf.q->q.qlen)
- htb_deactivate(q, cl);
- return len;
- }
- }
- }
- return 0;
-}
-
/* reset all classes */
/* always caled under BH & queue lock */
static void htb_reset(struct Qdisc *sch)
@@ -1141,7 +1116,8 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
cl->xstats.tokens = PSCHED_NS2TICKS(cl->tokens);
cl->xstats.ctokens = PSCHED_NS2TICKS(cl->ctokens);
- if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 ||
+ if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
+ d, NULL, &cl->bstats) < 0 ||
gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 ||
gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0)
return -1;
@@ -1395,7 +1371,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
if (htb_rate_est || tca[TCA_RATE]) {
err = gen_new_estimator(&cl->bstats, NULL,
&cl->rate_est,
- qdisc_root_sleeping_lock(sch),
+ NULL,
+ qdisc_root_sleeping_running(sch),
tca[TCA_RATE] ? : &est.nla);
if (err) {
kfree(cl);
@@ -1457,11 +1434,10 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
parent->children++;
} else {
if (tca[TCA_RATE]) {
- spinlock_t *lock = qdisc_root_sleeping_lock(sch);
-
err = gen_replace_estimator(&cl->bstats, NULL,
&cl->rate_est,
- lock,
+ NULL,
+ qdisc_root_sleeping_running(sch),
tca[TCA_RATE]);
if (err)
return err;
@@ -1599,7 +1575,6 @@ static struct Qdisc_ops htb_qdisc_ops __read_mostly = {
.enqueue = htb_enqueue,
.dequeue = htb_dequeue,
.peek = qdisc_peek_dequeued,
- .drop = htb_drop,
.init = htb_init,
.reset = htb_reset,
.destroy = htb_destroy,
diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
index 56a77b8..b943982 100644
--- a/net/sched/sch_mq.c
+++ b/net/sched/sch_mq.c
@@ -199,7 +199,7 @@ static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
sch = dev_queue->qdisc_sleeping;
- if (gnet_stats_copy_basic(d, NULL, &sch->bstats) < 0 ||
+ if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 ||
gnet_stats_copy_queue(d, NULL, &sch->qstats, sch->q.qlen) < 0)
return -1;
return 0;
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
index b8002ce..549c663 100644
--- a/net/sched/sch_mqprio.c
+++ b/net/sched/sch_mqprio.c
@@ -342,7 +342,8 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
* hold here is the look on dev_queue->qdisc_sleeping
* also acquired below.
*/
- spin_unlock_bh(d->lock);
+ if (d->lock)
+ spin_unlock_bh(d->lock);
for (i = tc.offset; i < tc.offset + tc.count; i++) {
struct netdev_queue *q = netdev_get_tx_queue(dev, i);
@@ -359,15 +360,17 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
spin_unlock_bh(qdisc_lock(qdisc));
}
/* Reclaim root sleeping lock before completing stats */
- spin_lock_bh(d->lock);
- if (gnet_stats_copy_basic(d, NULL, &bstats) < 0 ||
+ if (d->lock)
+ spin_lock_bh(d->lock);
+ if (gnet_stats_copy_basic(NULL, d, NULL, &bstats) < 0 ||
gnet_stats_copy_queue(d, NULL, &qstats, qlen) < 0)
return -1;
} else {
struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
sch = dev_queue->qdisc_sleeping;
- if (gnet_stats_copy_basic(d, NULL, &sch->bstats) < 0 ||
+ if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
+ d, NULL, &sch->bstats) < 0 ||
gnet_stats_copy_queue(d, NULL,
&sch->qstats, sch->q.qlen) < 0)
return -1;
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index bcdd54b..5ea9330 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -151,27 +151,6 @@ static struct sk_buff *multiq_peek(struct Qdisc *sch)
}
-static unsigned int multiq_drop(struct Qdisc *sch)
-{
- struct multiq_sched_data *q = qdisc_priv(sch);
- int band;
- unsigned int len;
- struct Qdisc *qdisc;
-
- for (band = q->bands - 1; band >= 0; band--) {
- qdisc = q->queues[band];
- if (qdisc->ops->drop) {
- len = qdisc->ops->drop(qdisc);
- if (len != 0) {
- sch->q.qlen--;
- return len;
- }
- }
- }
- return 0;
-}
-
-
static void
multiq_reset(struct Qdisc *sch)
{
@@ -356,7 +335,8 @@ static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
struct Qdisc *cl_q;
cl_q = q->queues[cl - 1];
- if (gnet_stats_copy_basic(d, NULL, &cl_q->bstats) < 0 ||
+ if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
+ d, NULL, &cl_q->bstats) < 0 ||
gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0)
return -1;
@@ -415,7 +395,6 @@ static struct Qdisc_ops multiq_qdisc_ops __read_mostly = {
.enqueue = multiq_enqueue,
.dequeue = multiq_dequeue,
.peek = multiq_peek,
- .drop = multiq_drop,
.init = multiq_init,
.reset = multiq_reset,
.destroy = multiq_destroy,
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 205bed0..9ca7947 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -407,7 +407,7 @@ static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch)
segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
if (IS_ERR_OR_NULL(segs)) {
- qdisc_reshape_fail(skb, sch);
+ qdisc_drop(skb, sch);
return NULL;
}
consume_skb(skb);
@@ -499,7 +499,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
}
if (unlikely(skb_queue_len(&sch->q) >= sch->limit))
- return qdisc_reshape_fail(skb, sch);
+ return qdisc_drop(skb, sch);
qdisc_qstats_backlog_inc(sch, skb);
@@ -576,35 +576,6 @@ finish_segs:
return NET_XMIT_SUCCESS;
}
-static unsigned int netem_drop(struct Qdisc *sch)
-{
- struct netem_sched_data *q = qdisc_priv(sch);
- unsigned int len;
-
- len = qdisc_queue_drop(sch);
-
- if (!len) {
- struct rb_node *p = rb_first(&q->t_root);
-
- if (p) {
- struct sk_buff *skb = netem_rb_to_skb(p);
-
- rb_erase(p, &q->t_root);
- sch->q.qlen--;
- skb->next = NULL;
- skb->prev = NULL;
- qdisc_qstats_backlog_dec(sch, skb);
- kfree_skb(skb);
- }
- }
- if (!len && q->qdisc && q->qdisc->ops->drop)
- len = q->qdisc->ops->drop(q->qdisc);
- if (len)
- qdisc_qstats_drop(sch);
-
- return len;
-}
-
static struct sk_buff *netem_dequeue(struct Qdisc *sch)
{
struct netem_sched_data *q = qdisc_priv(sch);
@@ -1143,7 +1114,6 @@ static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
.enqueue = netem_enqueue,
.dequeue = netem_dequeue,
.peek = qdisc_peek_dequeued,
- .drop = netem_drop,
.init = netem_init,
.reset = netem_reset,
.destroy = netem_destroy,
diff --git a/net/sched/sch_plug.c b/net/sched/sch_plug.c
index 5abfe44..ff0d968 100644
--- a/net/sched/sch_plug.c
+++ b/net/sched/sch_plug.c
@@ -96,7 +96,7 @@ static int plug_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return qdisc_enqueue_tail(skb, sch);
}
- return qdisc_reshape_fail(skb, sch);
+ return qdisc_drop(skb, sch);
}
static struct sk_buff *plug_dequeue(struct Qdisc *sch)
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 4b0a821..de49268 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -127,25 +127,6 @@ static struct sk_buff *prio_dequeue(struct Qdisc *sch)
}
-static unsigned int prio_drop(struct Qdisc *sch)
-{
- struct prio_sched_data *q = qdisc_priv(sch);
- int prio;
- unsigned int len;
- struct Qdisc *qdisc;
-
- for (prio = q->bands-1; prio >= 0; prio--) {
- qdisc = q->queues[prio];
- if (qdisc->ops->drop && (len = qdisc->ops->drop(qdisc)) != 0) {
- sch->qstats.backlog -= len;
- sch->q.qlen--;
- return len;
- }
- }
- return 0;
-}
-
-
static void
prio_reset(struct Qdisc *sch)
{
@@ -323,7 +304,8 @@ static int prio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
struct Qdisc *cl_q;
cl_q = q->queues[cl - 1];
- if (gnet_stats_copy_basic(d, NULL, &cl_q->bstats) < 0 ||
+ if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
+ d, NULL, &cl_q->bstats) < 0 ||
gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0)
return -1;
@@ -382,7 +364,6 @@ static struct Qdisc_ops prio_qdisc_ops __read_mostly = {
.enqueue = prio_enqueue,
.dequeue = prio_dequeue,
.peek = prio_peek,
- .drop = prio_drop,
.init = prio_init,
.reset = prio_reset,
.destroy = prio_destroy,
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index f18857f..0427fa8 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -460,7 +460,8 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
if (tca[TCA_RATE]) {
err = gen_replace_estimator(&cl->bstats, NULL,
&cl->rate_est,
- qdisc_root_sleeping_lock(sch),
+ NULL,
+ qdisc_root_sleeping_running(sch),
tca[TCA_RATE]);
if (err)
return err;
@@ -486,7 +487,8 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
if (tca[TCA_RATE]) {
err = gen_new_estimator(&cl->bstats, NULL,
&cl->rate_est,
- qdisc_root_sleeping_lock(sch),
+ NULL,
+ qdisc_root_sleeping_running(sch),
tca[TCA_RATE]);
if (err)
goto destroy_class;
@@ -663,7 +665,8 @@ static int qfq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
xstats.weight = cl->agg->class_weight;
xstats.lmax = cl->agg->lmax;
- if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 ||
+ if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
+ d, NULL, &cl->bstats) < 0 ||
gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
gnet_stats_copy_queue(d, NULL,
&cl->qdisc->qstats, cl->qdisc->q.qlen) < 0)
@@ -1422,52 +1425,6 @@ static void qfq_qlen_notify(struct Qdisc *sch, unsigned long arg)
qfq_deactivate_class(q, cl);
}
-static unsigned int qfq_drop_from_slot(struct qfq_sched *q,
- struct hlist_head *slot)
-{
- struct qfq_aggregate *agg;
- struct qfq_class *cl;
- unsigned int len;
-
- hlist_for_each_entry(agg, slot, next) {
- list_for_each_entry(cl, &agg->active, alist) {
-
- if (!cl->qdisc->ops->drop)
- continue;
-
- len = cl->qdisc->ops->drop(cl->qdisc);
- if (len > 0) {
- if (cl->qdisc->q.qlen == 0)
- qfq_deactivate_class(q, cl);
-
- return len;
- }
- }
- }
- return 0;
-}
-
-static unsigned int qfq_drop(struct Qdisc *sch)
-{
- struct qfq_sched *q = qdisc_priv(sch);
- struct qfq_group *grp;
- unsigned int i, j, len;
-
- for (i = 0; i <= QFQ_MAX_INDEX; i++) {
- grp = &q->groups[i];
- for (j = 0; j < QFQ_MAX_SLOTS; j++) {
- len = qfq_drop_from_slot(q, &grp->slots[j]);
- if (len > 0) {
- sch->q.qlen--;
- return len;
- }
- }
-
- }
-
- return 0;
-}
-
static int qfq_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
{
struct qfq_sched *q = qdisc_priv(sch);
@@ -1562,7 +1519,6 @@ static struct Qdisc_ops qfq_qdisc_ops __read_mostly = {
.enqueue = qfq_enqueue,
.dequeue = qfq_dequeue,
.peek = qdisc_peek_dequeued,
- .drop = qfq_drop,
.init = qfq_init_qdisc,
.reset = qfq_reset_qdisc,
.destroy = qfq_destroy_qdisc,
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 91578bd..a0d5753 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -136,26 +136,6 @@ static struct sk_buff *red_peek(struct Qdisc *sch)
return child->ops->peek(child);
}
-static unsigned int red_drop(struct Qdisc *sch)
-{
- struct red_sched_data *q = qdisc_priv(sch);
- struct Qdisc *child = q->qdisc;
- unsigned int len;
-
- if (child->ops->drop && (len = child->ops->drop(child)) > 0) {
- q->stats.other++;
- qdisc_qstats_drop(sch);
- sch->qstats.backlog -= len;
- sch->q.qlen--;
- return len;
- }
-
- if (!red_is_idling(&q->vars))
- red_start_of_idle_period(&q->vars);
-
- return 0;
-}
-
static void red_reset(struct Qdisc *sch)
{
struct red_sched_data *q = qdisc_priv(sch);
@@ -365,7 +345,6 @@ static struct Qdisc_ops red_qdisc_ops __read_mostly = {
.enqueue = red_enqueue,
.dequeue = red_dequeue,
.peek = red_peek,
- .drop = red_drop,
.init = red_init,
.reset = red_reset,
.destroy = red_destroy,
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 498f0a2..a2e0b85 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -896,7 +896,6 @@ static struct Qdisc_ops sfq_qdisc_ops __read_mostly = {
.enqueue = sfq_enqueue,
.dequeue = sfq_dequeue,
.peek = qdisc_peek_dequeued,
- .drop = sfq_drop,
.init = sfq_init,
.reset = sfq_reset,
.destroy = sfq_destroy,
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 3161e49..7fa3d6e 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -166,7 +166,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
if (IS_ERR_OR_NULL(segs))
- return qdisc_reshape_fail(skb, sch);
+ return qdisc_drop(skb, sch);
nb = 0;
while (segs) {
@@ -198,7 +198,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (qdisc_pkt_len(skb) > q->max_size) {
if (skb_is_gso(skb) && skb_gso_mac_seglen(skb) <= q->max_size)
return tbf_segment(skb, sch);
- return qdisc_reshape_fail(skb, sch);
+ return qdisc_drop(skb, sch);
}
ret = qdisc_enqueue(skb, q->qdisc);
if (ret != NET_XMIT_SUCCESS) {
@@ -212,19 +212,6 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return NET_XMIT_SUCCESS;
}
-static unsigned int tbf_drop(struct Qdisc *sch)
-{
- struct tbf_sched_data *q = qdisc_priv(sch);
- unsigned int len = 0;
-
- if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
- sch->qstats.backlog -= len;
- sch->q.qlen--;
- qdisc_qstats_drop(sch);
- }
- return len;
-}
-
static bool tbf_peak_present(const struct tbf_sched_data *q)
{
return q->peak.rate_bytes_ps;
@@ -559,7 +546,6 @@ static struct Qdisc_ops tbf_qdisc_ops __read_mostly = {
.enqueue = tbf_enqueue,
.dequeue = tbf_dequeue,
.peek = qdisc_peek_dequeued,
- .drop = tbf_drop,
.init = tbf_init,
.reset = tbf_reset,
.destroy = tbf_destroy,
diff --git a/net/sctp/Makefile b/net/sctp/Makefile
index 0fca582..6c4f749 100644
--- a/net/sctp/Makefile
+++ b/net/sctp/Makefile
@@ -11,7 +11,8 @@ sctp-y := sm_statetable.o sm_statefuns.o sm_sideeffect.o \
transport.o chunk.o sm_make_chunk.o ulpevent.o \
inqueue.o outqueue.o ulpqueue.o \
tsnmap.o bind_addr.o socket.o primitive.o \
- output.o input.o debug.o ssnmap.o auth.o
+ output.o input.o debug.o ssnmap.o auth.o \
+ offload.o
sctp_probe-y := probe.o
diff --git a/net/sctp/input.c b/net/sctp/input.c
index a701527..6f8e676 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -112,7 +112,6 @@ int sctp_rcv(struct sk_buff *skb)
struct sctp_ep_common *rcvr;
struct sctp_transport *transport = NULL;
struct sctp_chunk *chunk;
- struct sctphdr *sh;
union sctp_addr src;
union sctp_addr dest;
int family;
@@ -124,28 +123,29 @@ int sctp_rcv(struct sk_buff *skb)
__SCTP_INC_STATS(net, SCTP_MIB_INSCTPPACKS);
- if (skb_linearize(skb))
+ /* If packet is too small to contain a single chunk, let's not
+ * waste time on it anymore.
+ */
+ if (skb->len < sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr) +
+ skb_transport_offset(skb))
goto discard_it;
- sh = sctp_hdr(skb);
+ if (!pskb_may_pull(skb, sizeof(struct sctphdr)))
+ goto discard_it;
- /* Pull up the IP and SCTP headers. */
+ /* Pull up the IP header. */
__skb_pull(skb, skb_transport_offset(skb));
- if (skb->len < sizeof(struct sctphdr))
- goto discard_it;
skb->csum_valid = 0; /* Previous value not applicable */
if (skb_csum_unnecessary(skb))
__skb_decr_checksum_unnecessary(skb);
- else if (!sctp_checksum_disable && sctp_rcv_checksum(net, skb) < 0)
+ else if (!sctp_checksum_disable &&
+ !(skb_shinfo(skb)->gso_type & SKB_GSO_SCTP) &&
+ sctp_rcv_checksum(net, skb) < 0)
goto discard_it;
skb->csum_valid = 1;
- skb_pull(skb, sizeof(struct sctphdr));
-
- /* Make sure we at least have chunk headers worth of data left. */
- if (skb->len < sizeof(struct sctp_chunkhdr))
- goto discard_it;
+ __skb_pull(skb, sizeof(struct sctphdr));
family = ipver2af(ip_hdr(skb)->version);
af = sctp_get_af_specific(family);
@@ -230,7 +230,7 @@ int sctp_rcv(struct sk_buff *skb)
chunk->rcvr = rcvr;
/* Remember the SCTP header. */
- chunk->sctp_hdr = sh;
+ chunk->sctp_hdr = sctp_hdr(skb);
/* Set the source and destination addresses of the incoming chunk. */
sctp_init_addrs(chunk, &src, &dest);
@@ -660,19 +660,23 @@ out_unlock:
*/
static int sctp_rcv_ootb(struct sk_buff *skb)
{
- sctp_chunkhdr_t *ch;
- __u8 *ch_end;
-
- ch = (sctp_chunkhdr_t *) skb->data;
+ sctp_chunkhdr_t *ch, _ch;
+ int ch_end, offset = 0;
/* Scan through all the chunks in the packet. */
do {
+ /* Make sure we have at least the header there */
+ if (offset + sizeof(sctp_chunkhdr_t) > skb->len)
+ break;
+
+ ch = skb_header_pointer(skb, offset, sizeof(*ch), &_ch);
+
/* Break out if chunk length is less then minimal. */
if (ntohs(ch->length) < sizeof(sctp_chunkhdr_t))
break;
- ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
- if (ch_end > skb_tail_pointer(skb))
+ ch_end = offset + WORD_ROUND(ntohs(ch->length));
+ if (ch_end > skb->len)
break;
/* RFC 8.4, 2) If the OOTB packet contains an ABORT chunk, the
@@ -697,8 +701,8 @@ static int sctp_rcv_ootb(struct sk_buff *skb)
if (SCTP_CID_INIT == ch->type && (void *)ch != skb->data)
goto discard;
- ch = (sctp_chunkhdr_t *) ch_end;
- } while (ch_end < skb_tail_pointer(skb));
+ offset = ch_end;
+ } while (ch_end < skb->len);
return 0;
@@ -1173,6 +1177,17 @@ static struct sctp_association *__sctp_rcv_lookup_harder(struct net *net,
{
sctp_chunkhdr_t *ch;
+ /* We do not allow GSO frames here as we need to linearize and
+ * then cannot guarantee frame boundaries. This shouldn't be an
+ * issue as packets hitting this are mostly INIT or INIT-ACK and
+ * those cannot be on GSO-style anyway.
+ */
+ if ((skb_shinfo(skb)->gso_type & SKB_GSO_SCTP) == SKB_GSO_SCTP)
+ return NULL;
+
+ if (skb_linearize(skb))
+ return NULL;
+
ch = (sctp_chunkhdr_t *) skb->data;
/* The code below will attempt to walk the chunk and extract
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
index 9d87bba..edabbbd 100644
--- a/net/sctp/inqueue.c
+++ b/net/sctp/inqueue.c
@@ -130,13 +130,25 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
* at this time.
*/
- if ((chunk = queue->in_progress)) {
+ chunk = queue->in_progress;
+ if (chunk) {
/* There is a packet that we have been working on.
* Any post processing work to do before we move on?
*/
if (chunk->singleton ||
chunk->end_of_packet ||
chunk->pdiscard) {
+ if (chunk->head_skb == chunk->skb) {
+ chunk->skb = skb_shinfo(chunk->skb)->frag_list;
+ goto new_skb;
+ }
+ if (chunk->skb->next) {
+ chunk->skb = chunk->skb->next;
+ goto new_skb;
+ }
+
+ if (chunk->head_skb)
+ chunk->skb = chunk->head_skb;
sctp_chunk_free(chunk);
chunk = queue->in_progress = NULL;
} else {
@@ -152,34 +164,64 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
if (!chunk) {
struct list_head *entry;
+next_chunk:
/* Is the queue empty? */
- if (list_empty(&queue->in_chunk_list))
+ entry = sctp_list_dequeue(&queue->in_chunk_list);
+ if (!entry)
return NULL;
- entry = queue->in_chunk_list.next;
- chunk = queue->in_progress =
- list_entry(entry, struct sctp_chunk, list);
- list_del_init(entry);
+ chunk = list_entry(entry, struct sctp_chunk, list);
- /* This is the first chunk in the packet. */
- chunk->singleton = 1;
- ch = (sctp_chunkhdr_t *) chunk->skb->data;
- chunk->data_accepted = 0;
+ /* Linearize if it's not GSO */
+ if ((skb_shinfo(chunk->skb)->gso_type & SKB_GSO_SCTP) != SKB_GSO_SCTP &&
+ skb_is_nonlinear(chunk->skb)) {
+ if (skb_linearize(chunk->skb)) {
+ __SCTP_INC_STATS(dev_net(chunk->skb->dev), SCTP_MIB_IN_PKT_DISCARDS);
+ sctp_chunk_free(chunk);
+ goto next_chunk;
+ }
+
+ /* Update sctp_hdr as it probably changed */
+ chunk->sctp_hdr = sctp_hdr(chunk->skb);
+ }
+
+ if ((skb_shinfo(chunk->skb)->gso_type & SKB_GSO_SCTP) == SKB_GSO_SCTP) {
+ /* GSO-marked skbs but without frags, handle
+ * them normally
+ */
+ if (skb_shinfo(chunk->skb)->frag_list)
+ chunk->head_skb = chunk->skb;
+
+ /* skbs with "cover letter" */
+ if (chunk->head_skb && chunk->skb->data_len == chunk->skb->len)
+ chunk->skb = skb_shinfo(chunk->skb)->frag_list;
+
+ if (WARN_ON(!chunk->skb)) {
+ __SCTP_INC_STATS(dev_net(chunk->skb->dev), SCTP_MIB_IN_PKT_DISCARDS);
+ sctp_chunk_free(chunk);
+ goto next_chunk;
+ }
+ }
if (chunk->asoc)
sock_rps_save_rxhash(chunk->asoc->base.sk, chunk->skb);
+
+ queue->in_progress = chunk;
+
+new_skb:
+ /* This is the first chunk in the packet. */
+ ch = (sctp_chunkhdr_t *) chunk->skb->data;
+ chunk->singleton = 1;
+ chunk->data_accepted = 0;
+ chunk->pdiscard = 0;
+ chunk->auth = 0;
+ chunk->has_asconf = 0;
+ chunk->end_of_packet = 0;
+ chunk->ecn_ce_done = 0;
}
chunk->chunk_hdr = ch;
chunk->chunk_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
- /* In the unlikely case of an IP reassembly, the skb could be
- * non-linear. If so, update chunk_end so that it doesn't go past
- * the skb->tail.
- */
- if (unlikely(skb_is_nonlinear(chunk->skb))) {
- if (chunk->chunk_end > skb_tail_pointer(chunk->skb))
- chunk->chunk_end = skb_tail_pointer(chunk->skb);
- }
skb_pull(chunk->skb, sizeof(sctp_chunkhdr_t));
chunk->subh.v = NULL; /* Subheader is no longer valid. */
diff --git a/net/sctp/offload.c b/net/sctp/offload.c
new file mode 100644
index 0000000..a37887b
--- /dev/null
+++ b/net/sctp/offload.c
@@ -0,0 +1,98 @@
+/*
+ * sctp_offload - GRO/GSO Offloading for SCTP
+ *
+ * Copyright (C) 2015, Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/kprobes.h>
+#include <linux/socket.h>
+#include <linux/sctp.h>
+#include <linux/proc_fs.h>
+#include <linux/vmalloc.h>
+#include <linux/module.h>
+#include <linux/kfifo.h>
+#include <linux/time.h>
+#include <net/net_namespace.h>
+
+#include <linux/skbuff.h>
+#include <net/sctp/sctp.h>
+#include <net/sctp/checksum.h>
+#include <net/protocol.h>
+
+static __le32 sctp_gso_make_checksum(struct sk_buff *skb)
+{
+ skb->ip_summed = CHECKSUM_NONE;
+ return sctp_compute_cksum(skb, skb_transport_offset(skb));
+}
+
+static struct sk_buff *sctp_gso_segment(struct sk_buff *skb,
+ netdev_features_t features)
+{
+ struct sk_buff *segs = ERR_PTR(-EINVAL);
+ struct sctphdr *sh;
+
+ sh = sctp_hdr(skb);
+ if (!pskb_may_pull(skb, sizeof(*sh)))
+ goto out;
+
+ __skb_pull(skb, sizeof(*sh));
+
+ if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
+ /* Packet is from an untrusted source, reset gso_segs. */
+ struct skb_shared_info *pinfo = skb_shinfo(skb);
+ struct sk_buff *frag_iter;
+
+ pinfo->gso_segs = 0;
+ if (skb->len != skb->data_len) {
+ /* Means we have chunks in here too */
+ pinfo->gso_segs++;
+ }
+
+ skb_walk_frags(skb, frag_iter)
+ pinfo->gso_segs++;
+
+ segs = NULL;
+ goto out;
+ }
+
+ segs = skb_segment(skb, features | NETIF_F_HW_CSUM);
+ if (IS_ERR(segs))
+ goto out;
+
+ /* All that is left is update SCTP CRC if necessary */
+ if (!(features & NETIF_F_SCTP_CRC)) {
+ for (skb = segs; skb; skb = skb->next) {
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ sh = sctp_hdr(skb);
+ sh->checksum = sctp_gso_make_checksum(skb);
+ }
+ }
+ }
+
+out:
+ return segs;
+}
+
+static const struct net_offload sctp_offload = {
+ .callbacks = {
+ .gso_segment = sctp_gso_segment,
+ },
+};
+
+int __init sctp_offload_init(void)
+{
+ return inet_add_offload(&sctp_offload, IPPROTO_SCTP);
+}
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 9844fe5..1541a91 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -84,18 +84,42 @@ static void sctp_packet_reset(struct sctp_packet *packet)
struct sctp_packet *sctp_packet_config(struct sctp_packet *packet,
__u32 vtag, int ecn_capable)
{
- struct sctp_chunk *chunk = NULL;
+ struct sctp_transport *tp = packet->transport;
+ struct sctp_association *asoc = tp->asoc;
pr_debug("%s: packet:%p vtag:0x%x\n", __func__, packet, vtag);
packet->vtag = vtag;
+ if (asoc && tp->dst) {
+ struct sock *sk = asoc->base.sk;
+
+ rcu_read_lock();
+ if (__sk_dst_get(sk) != tp->dst) {
+ dst_hold(tp->dst);
+ sk_setup_caps(sk, tp->dst);
+ }
+
+ if (sk_can_gso(sk)) {
+ struct net_device *dev = tp->dst->dev;
+
+ packet->max_size = dev->gso_max_size;
+ } else {
+ packet->max_size = asoc->pathmtu;
+ }
+ rcu_read_unlock();
+
+ } else {
+ packet->max_size = tp->pathmtu;
+ }
+
if (ecn_capable && sctp_packet_empty(packet)) {
- chunk = sctp_get_ecne_prepend(packet->transport->asoc);
+ struct sctp_chunk *chunk;
/* If there a is a prepend chunk stick it on the list before
* any other chunks get appended.
*/
+ chunk = sctp_get_ecne_prepend(asoc);
if (chunk)
sctp_packet_append_chunk(packet, chunk);
}
@@ -158,7 +182,8 @@ sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *packet,
sctp_xmit_t retval;
int error = 0;
- pr_debug("%s: packet:%p chunk:%p\n", __func__, packet, chunk);
+ pr_debug("%s: packet:%p size:%Zu chunk:%p size:%d\n", __func__,
+ packet, packet->size, chunk, chunk->skb ? chunk->skb->len : -1);
switch ((retval = (sctp_packet_append_chunk(packet, chunk)))) {
case SCTP_XMIT_PMTU_FULL:
@@ -381,12 +406,15 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
struct sctp_transport *tp = packet->transport;
struct sctp_association *asoc = tp->asoc;
struct sctphdr *sh;
- struct sk_buff *nskb;
+ struct sk_buff *nskb = NULL, *head = NULL;
struct sctp_chunk *chunk, *tmp;
struct sock *sk;
int err = 0;
int padding; /* How much padding do we need? */
+ int pkt_size;
__u8 has_data = 0;
+ int gso = 0;
+ int pktcount = 0;
struct dst_entry *dst;
unsigned char *auth = NULL; /* pointer to auth in skb data */
@@ -400,18 +428,37 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
chunk = list_entry(packet->chunk_list.next, struct sctp_chunk, list);
sk = chunk->skb->sk;
- /* Allocate the new skb. */
- nskb = alloc_skb(packet->size + MAX_HEADER, gfp);
- if (!nskb)
+ /* Allocate the head skb, or main one if not in GSO */
+ if (packet->size > tp->pathmtu && !packet->ipfragok) {
+ if (sk_can_gso(sk)) {
+ gso = 1;
+ pkt_size = packet->overhead;
+ } else {
+ /* If this happens, we trash this packet and try
+ * to build a new one, hopefully correct this
+ * time. Application may notice this error.
+ */
+ pr_err_once("Trying to GSO but underlying device doesn't support it.");
+ goto nomem;
+ }
+ } else {
+ pkt_size = packet->size;
+ }
+ head = alloc_skb(pkt_size + MAX_HEADER, gfp);
+ if (!head)
goto nomem;
+ if (gso) {
+ NAPI_GRO_CB(head)->last = head;
+ skb_shinfo(head)->gso_type = sk->sk_gso_type;
+ }
/* Make sure the outbound skb has enough header room reserved. */
- skb_reserve(nskb, packet->overhead + MAX_HEADER);
+ skb_reserve(head, packet->overhead + MAX_HEADER);
/* Set the owning socket so that we know where to get the
* destination IP address.
*/
- sctp_packet_set_owner_w(nskb, sk);
+ sctp_packet_set_owner_w(head, sk);
if (!sctp_transport_dst_check(tp)) {
sctp_transport_route(tp, NULL, sctp_sk(sk));
@@ -422,11 +469,11 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
dst = dst_clone(tp->dst);
if (!dst)
goto no_route;
- skb_dst_set(nskb, dst);
+ skb_dst_set(head, dst);
/* Build the SCTP header. */
- sh = (struct sctphdr *)skb_push(nskb, sizeof(struct sctphdr));
- skb_reset_transport_header(nskb);
+ sh = (struct sctphdr *)skb_push(head, sizeof(struct sctphdr));
+ skb_reset_transport_header(head);
sh->source = htons(packet->source_port);
sh->dest = htons(packet->destination_port);
@@ -441,90 +488,133 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
sh->vtag = htonl(packet->vtag);
sh->checksum = 0;
- /**
- * 6.10 Bundling
- *
- * An endpoint bundles chunks by simply including multiple
- * chunks in one outbound SCTP packet. ...
- */
-
- /**
- * 3.2 Chunk Field Descriptions
- *
- * The total length of a chunk (including Type, Length and
- * Value fields) MUST be a multiple of 4 bytes. If the length
- * of the chunk is not a multiple of 4 bytes, the sender MUST
- * pad the chunk with all zero bytes and this padding is not
- * included in the chunk length field. The sender should
- * never pad with more than 3 bytes.
- *
- * [This whole comment explains WORD_ROUND() below.]
- */
-
pr_debug("***sctp_transmit_packet***\n");
- list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
- list_del_init(&chunk->list);
- if (sctp_chunk_is_data(chunk)) {
- /* 6.3.1 C4) When data is in flight and when allowed
- * by rule C5, a new RTT measurement MUST be made each
- * round trip. Furthermore, new RTT measurements
- * SHOULD be made no more than once per round-trip
- * for a given destination transport address.
- */
+ do {
+ /* Set up convenience variables... */
+ chunk = list_entry(packet->chunk_list.next, struct sctp_chunk, list);
+ pktcount++;
- if (!chunk->resent && !tp->rto_pending) {
- chunk->rtt_in_progress = 1;
- tp->rto_pending = 1;
+ /* Calculate packet size, so it fits in PMTU. Leave
+ * other chunks for the next packets.
+ */
+ if (gso) {
+ pkt_size = packet->overhead;
+ list_for_each_entry(chunk, &packet->chunk_list, list) {
+ int padded = WORD_ROUND(chunk->skb->len);
+
+ if (pkt_size + padded > tp->pathmtu)
+ break;
+ pkt_size += padded;
}
- has_data = 1;
- }
+ /* Allocate a new skb. */
+ nskb = alloc_skb(pkt_size + MAX_HEADER, gfp);
+ if (!nskb)
+ goto nomem;
- padding = WORD_ROUND(chunk->skb->len) - chunk->skb->len;
- if (padding)
- memset(skb_put(chunk->skb, padding), 0, padding);
+ /* Make sure the outbound skb has enough header
+ * room reserved.
+ */
+ skb_reserve(nskb, packet->overhead + MAX_HEADER);
+ } else {
+ nskb = head;
+ }
- /* if this is the auth chunk that we are adding,
- * store pointer where it will be added and put
- * the auth into the packet.
+ /**
+ * 3.2 Chunk Field Descriptions
+ *
+ * The total length of a chunk (including Type, Length and
+ * Value fields) MUST be a multiple of 4 bytes. If the length
+ * of the chunk is not a multiple of 4 bytes, the sender MUST
+ * pad the chunk with all zero bytes and this padding is not
+ * included in the chunk length field. The sender should
+ * never pad with more than 3 bytes.
+ *
+ * [This whole comment explains WORD_ROUND() below.]
*/
- if (chunk == packet->auth)
- auth = skb_tail_pointer(nskb);
- memcpy(skb_put(nskb, chunk->skb->len),
+ pkt_size -= packet->overhead;
+ list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
+ list_del_init(&chunk->list);
+ if (sctp_chunk_is_data(chunk)) {
+ /* 6.3.1 C4) When data is in flight and when allowed
+ * by rule C5, a new RTT measurement MUST be made each
+ * round trip. Furthermore, new RTT measurements
+ * SHOULD be made no more than once per round-trip
+ * for a given destination transport address.
+ */
+
+ if (!chunk->resent && !tp->rto_pending) {
+ chunk->rtt_in_progress = 1;
+ tp->rto_pending = 1;
+ }
+
+ has_data = 1;
+ }
+
+ padding = WORD_ROUND(chunk->skb->len) - chunk->skb->len;
+ if (padding)
+ memset(skb_put(chunk->skb, padding), 0, padding);
+
+ /* if this is the auth chunk that we are adding,
+ * store pointer where it will be added and put
+ * the auth into the packet.
+ */
+ if (chunk == packet->auth)
+ auth = skb_tail_pointer(nskb);
+
+ memcpy(skb_put(nskb, chunk->skb->len),
chunk->skb->data, chunk->skb->len);
- pr_debug("*** Chunk:%p[%s] %s 0x%x, length:%d, chunk->skb->len:%d, "
- "rtt_in_progress:%d\n", chunk,
- sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)),
- chunk->has_tsn ? "TSN" : "No TSN",
- chunk->has_tsn ? ntohl(chunk->subh.data_hdr->tsn) : 0,
- ntohs(chunk->chunk_hdr->length), chunk->skb->len,
- chunk->rtt_in_progress);
-
- /*
- * If this is a control chunk, this is our last
- * reference. Free data chunks after they've been
- * acknowledged or have failed.
- */
- if (!sctp_chunk_is_data(chunk))
- sctp_chunk_free(chunk);
- }
+ pr_debug("*** Chunk:%p[%s] %s 0x%x, length:%d, chunk->skb->len:%d, rtt_in_progress:%d\n",
+ chunk,
+ sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)),
+ chunk->has_tsn ? "TSN" : "No TSN",
+ chunk->has_tsn ? ntohl(chunk->subh.data_hdr->tsn) : 0,
+ ntohs(chunk->chunk_hdr->length), chunk->skb->len,
+ chunk->rtt_in_progress);
+
+ /* If this is a control chunk, this is our last
+ * reference. Free data chunks after they've been
+ * acknowledged or have failed.
+ * Re-queue auth chunks if needed.
+ */
+ pkt_size -= WORD_ROUND(chunk->skb->len);
- /* SCTP-AUTH, Section 6.2
- * The sender MUST calculate the MAC as described in RFC2104 [2]
- * using the hash function H as described by the MAC Identifier and
- * the shared association key K based on the endpoint pair shared key
- * described by the shared key identifier. The 'data' used for the
- * computation of the AUTH-chunk is given by the AUTH chunk with its
- * HMAC field set to zero (as shown in Figure 6) followed by all
- * chunks that are placed after the AUTH chunk in the SCTP packet.
- */
- if (auth)
- sctp_auth_calculate_hmac(asoc, nskb,
- (struct sctp_auth_chunk *)auth,
- gfp);
+ if (chunk == packet->auth && !list_empty(&packet->chunk_list))
+ list_add(&chunk->list, &packet->chunk_list);
+ else if (!sctp_chunk_is_data(chunk))
+ sctp_chunk_free(chunk);
+
+ if (!pkt_size)
+ break;
+ }
+
+ /* SCTP-AUTH, Section 6.2
+ * The sender MUST calculate the MAC as described in RFC2104 [2]
+ * using the hash function H as described by the MAC Identifier and
+ * the shared association key K based on the endpoint pair shared key
+ * described by the shared key identifier. The 'data' used for the
+ * computation of the AUTH-chunk is given by the AUTH chunk with its
+ * HMAC field set to zero (as shown in Figure 6) followed by all
+ * chunks that are placed after the AUTH chunk in the SCTP packet.
+ */
+ if (auth)
+ sctp_auth_calculate_hmac(asoc, nskb,
+ (struct sctp_auth_chunk *)auth,
+ gfp);
+
+ if (!gso)
+ break;
+
+ if (skb_gro_receive(&head, nskb))
+ goto nomem;
+ nskb = NULL;
+ if (WARN_ON_ONCE(skb_shinfo(head)->gso_segs >=
+ sk->sk_gso_max_segs))
+ goto nomem;
+ } while (!list_empty(&packet->chunk_list));
/* 2) Calculate the Adler-32 checksum of the whole packet,
* including the SCTP common header and all the
@@ -532,16 +622,18 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
*
* Note: Adler-32 is no longer applicable, as has been replaced
* by CRC32-C as described in <draft-ietf-tsvwg-sctpcsum-02.txt>.
+ *
+ * If it's a GSO packet, it's postponed to sctp_skb_segment.
*/
- if (!sctp_checksum_disable) {
- if (!(dst->dev->features & NETIF_F_SCTP_CRC) ||
- (dst_xfrm(dst) != NULL) || packet->ipfragok) {
- sh->checksum = sctp_compute_cksum(nskb, 0);
+ if (!sctp_checksum_disable || gso) {
+ if (!gso && (!(dst->dev->features & NETIF_F_SCTP_CRC) ||
+ dst_xfrm(dst) || packet->ipfragok)) {
+ sh->checksum = sctp_compute_cksum(head, 0);
} else {
/* no need to seed pseudo checksum for SCTP */
- nskb->ip_summed = CHECKSUM_PARTIAL;
- nskb->csum_start = skb_transport_header(nskb) - nskb->head;
- nskb->csum_offset = offsetof(struct sctphdr, checksum);
+ head->ip_summed = CHECKSUM_PARTIAL;
+ head->csum_start = skb_transport_header(head) - head->head;
+ head->csum_offset = offsetof(struct sctphdr, checksum);
}
}
@@ -557,7 +649,7 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
* Note: The works for IPv6 layer checks this bit too later
* in transmission. See IP6_ECN_flow_xmit().
*/
- tp->af_specific->ecn_capable(nskb->sk);
+ tp->af_specific->ecn_capable(sk);
/* Set up the IP options. */
/* BUG: not implemented
@@ -566,7 +658,7 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
/* Dump that on IP! */
if (asoc) {
- asoc->stats.opackets++;
+ asoc->stats.opackets += pktcount;
if (asoc->peer.last_sent_to != tp)
/* Considering the multiple CPU scenario, this is a
* "correcter" place for last_sent_to. --xguo
@@ -589,16 +681,36 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
}
}
- pr_debug("***sctp_transmit_packet*** skb->len:%d\n", nskb->len);
+ pr_debug("***sctp_transmit_packet*** skb->len:%d\n", head->len);
+
+ if (gso) {
+ /* Cleanup our debris for IP stacks */
+ memset(head->cb, 0, max(sizeof(struct inet_skb_parm),
+ sizeof(struct inet6_skb_parm)));
- nskb->ignore_df = packet->ipfragok;
- tp->af_specific->sctp_xmit(nskb, tp);
+ skb_shinfo(head)->gso_segs = pktcount;
+ skb_shinfo(head)->gso_size = GSO_BY_FRAGS;
+
+ /* We have to refresh this in case we are xmiting to
+ * more than one transport at a time
+ */
+ rcu_read_lock();
+ if (__sk_dst_get(sk) != tp->dst) {
+ dst_hold(tp->dst);
+ sk_setup_caps(sk, tp->dst);
+ }
+ rcu_read_unlock();
+ }
+ head->ignore_df = packet->ipfragok;
+ tp->af_specific->sctp_xmit(head, tp);
out:
sctp_packet_reset(packet);
return err;
no_route:
- kfree_skb(nskb);
+ kfree_skb(head);
+ if (nskb != head)
+ kfree_skb(nskb);
if (asoc)
IP_INC_STATS(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES);
@@ -751,39 +863,63 @@ static sctp_xmit_t sctp_packet_will_fit(struct sctp_packet *packet,
struct sctp_chunk *chunk,
u16 chunk_len)
{
- size_t psize;
- size_t pmtu;
- int too_big;
+ size_t psize, pmtu;
sctp_xmit_t retval = SCTP_XMIT_OK;
psize = packet->size;
- pmtu = ((packet->transport->asoc) ?
- (packet->transport->asoc->pathmtu) :
- (packet->transport->pathmtu));
-
- too_big = (psize + chunk_len > pmtu);
+ if (packet->transport->asoc)
+ pmtu = packet->transport->asoc->pathmtu;
+ else
+ pmtu = packet->transport->pathmtu;
/* Decide if we need to fragment or resubmit later. */
- if (too_big) {
- /* It's OK to fragmet at IP level if any one of the following
+ if (psize + chunk_len > pmtu) {
+ /* It's OK to fragment at IP level if any one of the following
* is true:
- * 1. The packet is empty (meaning this chunk is greater
- * the MTU)
- * 2. The chunk we are adding is a control chunk
- * 3. The packet doesn't have any data in it yet and data
- * requires authentication.
+ * 1. The packet is empty (meaning this chunk is greater
+ * the MTU)
+ * 2. The packet doesn't have any data in it yet and data
+ * requires authentication.
*/
- if (sctp_packet_empty(packet) || !sctp_chunk_is_data(chunk) ||
+ if (sctp_packet_empty(packet) ||
(!packet->has_data && chunk->auth)) {
/* We no longer do re-fragmentation.
* Just fragment at the IP layer, if we
* actually hit this condition
*/
packet->ipfragok = 1;
- } else {
- retval = SCTP_XMIT_PMTU_FULL;
+ goto out;
}
+
+ /* It is also okay to fragment if the chunk we are
+ * adding is a control chunk, but only if current packet
+ * is not a GSO one otherwise it causes fragmentation of
+ * a large frame. So in this case we allow the
+ * fragmentation by forcing it to be in a new packet.
+ */
+ if (!sctp_chunk_is_data(chunk) && packet->has_data)
+ retval = SCTP_XMIT_PMTU_FULL;
+
+ if (psize + chunk_len > packet->max_size)
+ /* Hit GSO/PMTU limit, gotta flush */
+ retval = SCTP_XMIT_PMTU_FULL;
+
+ if (!packet->transport->burst_limited &&
+ psize + chunk_len > (packet->transport->cwnd >> 1))
+ /* Do not allow a single GSO packet to use more
+ * than half of cwnd.
+ */
+ retval = SCTP_XMIT_PMTU_FULL;
+
+ if (packet->transport->burst_limited &&
+ psize + chunk_len > (packet->transport->burst_limited >> 1))
+ /* Do not allow a single GSO packet to use more
+ * than half of original cwnd.
+ */
+ retval = SCTP_XMIT_PMTU_FULL;
+ /* Otherwise it will fit in the GSO packet */
}
+out:
return retval;
}
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index d3d50da..40022ee 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -1516,6 +1516,9 @@ static __init int sctp_init(void)
if (status)
goto err_v6_add_protocol;
+ if (sctp_offload_init() < 0)
+ pr_crit("%s: Cannot add SCTP protocol offload\n", __func__);
+
out:
return status;
err_v6_add_protocol:
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 67154b8..712fb23 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -4003,6 +4003,8 @@ static int sctp_init_sock(struct sock *sk)
return -ESOCKTNOSUPPORT;
}
+ sk->sk_gso_type = SKB_GSO_SCTP;
+
/* Initialize default send parameters. These parameters can be
* modified with the SCTP_DEFAULT_SEND_PARAM socket option.
*/
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 7059c94..a904ccd 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -87,7 +87,6 @@ struct tipc_stats {
* @peer_bearer_id: bearer id used by link's peer endpoint
* @bearer_id: local bearer id used by link
* @tolerance: minimum link continuity loss needed to reset link [in ms]
- * @keepalive_intv: link keepalive timer interval
* @abort_limit: # of unacknowledged continuity probes needed to reset link
* @state: current state of link FSM
* @peer_caps: bitmap describing capabilities of peer node
@@ -131,7 +130,6 @@ struct tipc_link {
u32 peer_bearer_id;
u32 bearer_id;
u32 tolerance;
- unsigned long keepalive_intv;
u32 abort_limit;
u32 state;
u16 peer_caps;
diff --git a/net/tipc/node.c b/net/tipc/node.c
index e01e2c71..d6a490f 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -378,14 +378,13 @@ static void tipc_node_calculate_timer(struct tipc_node *n, struct tipc_link *l)
{
unsigned long tol = tipc_link_tolerance(l);
unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4;
- unsigned long keepalive_intv = msecs_to_jiffies(intv);
/* Link with lowest tolerance determines timer interval */
- if (keepalive_intv < n->keepalive_intv)
- n->keepalive_intv = keepalive_intv;
+ if (intv < n->keepalive_intv)
+ n->keepalive_intv = intv;
- /* Ensure link's abort limit corresponds to current interval */
- tipc_link_set_abort_limit(l, tol / jiffies_to_msecs(n->keepalive_intv));
+ /* Ensure link's abort limit corresponds to current tolerance */
+ tipc_link_set_abort_limit(l, tol / n->keepalive_intv);
}
static void tipc_node_delete(struct tipc_node *node)
@@ -526,7 +525,7 @@ static void tipc_node_timeout(unsigned long data)
if (rc & TIPC_LINK_DOWN_EVT)
tipc_node_link_down(n, bearer_id, false);
}
- mod_timer(&n->timer, jiffies + n->keepalive_intv);
+ mod_timer(&n->timer, jiffies + msecs_to_jiffies(n->keepalive_intv));
}
/**
@@ -735,6 +734,7 @@ void tipc_node_check_dest(struct net *net, u32 onode,
bool accept_addr = false;
bool reset = true;
char *if_name;
+ unsigned long intv;
*dupl_addr = false;
*respond = false;
@@ -840,9 +840,11 @@ void tipc_node_check_dest(struct net *net, u32 onode,
le->link = l;
n->link_cnt++;
tipc_node_calculate_timer(n, l);
- if (n->link_cnt == 1)
- if (!mod_timer(&n->timer, jiffies + n->keepalive_intv))
+ if (n->link_cnt == 1) {
+ intv = jiffies + msecs_to_jiffies(n->keepalive_intv);
+ if (!mod_timer(&n->timer, intv))
tipc_node_get(n);
+ }
}
memcpy(&le->maddr, maddr, sizeof(*maddr));
exit:
@@ -950,7 +952,7 @@ static void tipc_node_fsm_evt(struct tipc_node *n, int evt)
state = SELF_UP_PEER_UP;
break;
case SELF_LOST_CONTACT_EVT:
- state = SELF_DOWN_PEER_LEAVING;
+ state = SELF_DOWN_PEER_DOWN;
break;
case SELF_ESTABL_CONTACT_EVT:
case PEER_LOST_CONTACT_EVT:
@@ -969,7 +971,7 @@ static void tipc_node_fsm_evt(struct tipc_node *n, int evt)
state = SELF_UP_PEER_UP;
break;
case PEER_LOST_CONTACT_EVT:
- state = SELF_LEAVING_PEER_DOWN;
+ state = SELF_DOWN_PEER_DOWN;
break;
case SELF_LOST_CONTACT_EVT:
case PEER_ESTABL_CONTACT_EVT: