summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/powerpc/fsl/l2-switch.txt99
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-l2switch-0.dtsi93
-rw-r--r--arch/powerpc/boot/dts/fsl/t1040si-post.dtsi46
-rw-r--r--arch/powerpc/boot/dts/t1040rdb.dts18
-rw-r--r--arch/powerpc/boot/dts/t104xqds.dtsi17
-rw-r--r--arch/powerpc/boot/dts/t2080rdb-usdpaa-shared-interfaces.dts148
-rw-r--r--arch/powerpc/boot/dts/t2080rdb-usdpaa.dts119
-rw-r--r--arch/powerpc/boot/dts/t4240rdb-usdpaa-shared-interfaces.dts191
-rw-r--r--arch/powerpc/boot/dts/t4240rdb-usdpaa.dts208
-rw-r--r--arch/powerpc/boot/dts/t4240rdb.dts476
-rw-r--r--arch/powerpc/configs/85xx/e6500rev2_defconfig5
-rw-r--r--arch/powerpc/configs/mpc85xx_defconfig1
-rw-r--r--arch/powerpc/configs/mpc85xx_smp_defconfig1
-rw-r--r--arch/powerpc/include/asm/switch_to.h6
-rw-r--r--arch/powerpc/kernel/process.c32
-rw-r--r--arch/powerpc/kvm/booke.c12
-rw-r--r--arch/powerpc/platforms/85xx/Kconfig2
-rw-r--r--arch/powerpc/platforms/85xx/corenet_generic.c7
-rw-r--r--arch/powerpc/platforms/Kconfig2
-rw-r--r--drivers/crypto/caam/Kconfig3
-rw-r--r--drivers/dma/fsldma.c100
-rw-r--r--drivers/dma/fsldma.h16
-rw-r--r--drivers/iommu/fsl_pamu.c153
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c2
-rw-r--r--drivers/mtd/nand/fsl_ifc_nand.c2
-rw-r--r--drivers/net/Kconfig25
-rw-r--r--drivers/net/bonding/Makefile11
-rw-r--r--drivers/net/bonding/bond_3ad.c9
-rw-r--r--drivers/net/bonding/bond_main.c39
-rw-r--r--drivers/net/bonding/bond_sysfs.c24
-rw-r--r--drivers/net/bonding/bonding.h7
-rw-r--r--drivers/net/bonding/hw_distribution.c2255
-rw-r--r--drivers/net/bonding/hw_distribution.h165
-rw-r--r--drivers/net/bonding/hw_oh_pcd.c744
-rw-r--r--drivers/net/ethernet/freescale/asf_gianfar.c3
-rwxr-xr-xdrivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_fm.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c346
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h16
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c57
-rw-r--r--drivers/staging/fsl_dpa_offload/dpa_classifier.c32
-rw-r--r--drivers/staging/fsl_dpa_offload/dpa_ipsec_ioctl.h3
-rw-r--r--drivers/staging/fsl_dpa_offload/dts/t2080rdb-usdpaa-shared-interfaces.dts182
-rw-r--r--drivers/staging/fsl_dpa_offload/dts/t2080rdb-usdpaa.dts164
-rw-r--r--drivers/staging/fsl_dpa_offload/wrp_dpa_ipsec.c4
-rw-r--r--drivers/staging/fsl_pme2/Makefile2
-rw-r--r--drivers/staging/fsl_pme2/pme2_ctrl.c144
-rw-r--r--drivers/staging/fsl_pme2/pme2_private.h223
-rw-r--r--drivers/staging/fsl_pme2/pme2_suspend.c1184
-rw-r--r--drivers/uio/fsl_dma_uio.c61
-rw-r--r--drivers/usb/gadget/fsl_udc_core.c2
-rw-r--r--drivers/usb/host/ehci-fsl.c43
-rw-r--r--include/linux/fsl_pme.h3
52 files changed, 7279 insertions, 230 deletions
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/l2-switch.txt b/Documentation/devicetree/bindings/powerpc/fsl/l2-switch.txt
new file mode 100644
index 0000000..c192f74
--- /dev/null
+++ b/Documentation/devicetree/bindings/powerpc/fsl/l2-switch.txt
@@ -0,0 +1,99 @@
+T1040 L2switch Device Tree Binding
+Copyright (C) 2013 Freescale Semiconductor Inc.
+
+CONTENTS
+ - Overview
+ - L2switch Node
+ - Port Node
+
+=====================================================================
+Overview
+
+DESCRIPTION
+
+ T1040 integrates a Gigabit Ethernet switch core with eight 10/100/1000 Mbps
+ Ethernet ports and two 10/100/1000/2500 Mbps ports.
+
+=====================================================================
+L2switch Node
+
+Description
+
+ This node specifies the address range of l2switch configuration registers
+ and interrupts. It also contains a set of child nodes defining the
+ Ethernet ports.
+
+PROPERTIES
+
+ - compatible
+ Usage: required
+ Value type: <string>
+ Definition: Must include "vitesse-9953"
+
+ - reg
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: Standard property which specifies the physical address and
+ length of the l2switch configuration registers.
+
+ - interrupts
+ Usage: required
+ Value type: <prop_encoded-array>
+ Definition: Standard property, specifies the interrupts generated by
+ this device.
+ The value of the interrupts property consists of one interrupt
+ specifier. The format of the specifier is defined by the binding
+ document describing the node's interrupt parent.
+
+=====================================================================
+Port Node
+
+Description
+
+ This node specifies the status and connection type of each of the Ethernet
+ ports.
+
+PROPERTIES
+
+ - compatible
+ Usage: required
+ Value type: <string>
+ Definition: Must include "vitesse-9953-port"
+
+ - port-index
+ Usage: required
+ Value type: <u32>
+ Definition: Specifies the port index. Values 0, 1 are used by the two
+ internal 10/100/1000/2500 Mbps ports, values 2-9 are used by the
+ eight external 10/100/1000 Mbps ports.
+
+ - phy-connection-type
+ Usage: required
+ Value type: <string>
+ Definition: Specifies the controller/PHY interface type.
+
+ - status
+ Usage: optional
+ Value type: <string>
+ Definition: Standard property.
+ Indicates the operational status of the port. "disabled" is used
+ to indicate that the port is not usable (for example because the
+ QSGMII link to the PHYs is not available with current RCW).
+
+ - fixed-link
+ Usage: optional
+ Value type: <prop_encoded-array>
+ Definition: Specifies link parameters in the absence of a PHY.
+ <a b c d e> where a is emulated phy id - must be unique to among
+ all specified fixed-links, b is duplex - 0 half, 1 full, c is
+ link speed - 10/100/1000/2500, d is pause - 0 no pause, 1 pause,
+ e is asym_pause - 0 no asym_pause, 1 asym_pause.
+
+ - phy-handle
+ Usage: optional
+ Value type: <phandle>
+ Definition: The phandle for the PHY connected to this l2switch port.
+
+
+ Note: All other standard properties (see the ePAPR) are allowed but are
+ optional.
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-l2switch-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-l2switch-0.dtsi
new file mode 100644
index 0000000..e15ac3c
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/qoriq-l2switch-0.dtsi
@@ -0,0 +1,93 @@
+/*
+ * T1040 Silicon/SoC L2switch device tree stub [ controller @ offset 0x800000 ]
+ *
+ * Copyright 2013 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+l2switch: l2switch@800000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ compatible = "vitesse-9953";
+ clock-frequency = <0>;
+
+ reg = <0x800000 0x290000>;
+
+ port@100000 {
+ compatible = "vitesse-9953-port";
+ port-index = <0>;
+ };
+
+ port@110000 {
+ compatible = "vitesse-9953-port";
+ port-index = <1>;
+ };
+
+ port@120000 {
+ compatible = "vitesse-9953-port";
+ port-index = <2>;
+ };
+
+ port@130000 {
+ compatible = "vitesse-9953-port";
+ port-index = <3>;
+ };
+
+ port@140000 {
+ compatible = "vitesse-9953-port";
+ port-index = <4>;
+ };
+
+ port@150000 {
+ compatible = "vitesse-9953-port";
+ port-index = <5>;
+ };
+
+ port@160000 {
+ compatible = "vitesse-9953-port";
+ port-index = <6>;
+ };
+
+ port@170000 {
+ compatible = "vitesse-9953-port";
+ port-index = <7>;
+ };
+
+ port@180000 {
+ compatible = "vitesse-9953-port";
+ port-index = <8>;
+ };
+
+ port@190000 {
+ compatible = "vitesse-9953-port";
+ port-index = <9>;
+ };
+ };
diff --git a/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi b/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi
index 696e30b..f48d5c2 100644
--- a/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi
@@ -638,6 +638,7 @@ sata@221000 {
status = "disabled";
};
};
+/include/ "qoriq-l2switch-0.dtsi"
l2switch@800000 {
interrupts = <26 2 0 0>;
@@ -757,48 +758,3 @@ sata@221000 {
};
};
};
-
-&qe {
- #address-cells = <1>;
- #size-cells = <1>;
- device_type = "qe";
- compatible = "fsl,qe";
- fsl,qe-num-riscs = <1>;
- fsl,qe-num-snums = <28>;
-
- qeic: interrupt-controller@80 {
- interrupt-controller;
- compatible = "fsl,qe-ic";
- #address-cells = <0>;
- #interrupt-cells = <1>;
- reg = <0x80 0x80>;
- interrupts = <95 2 0 0 94 2 0 0>; //high:79 low:78
- };
-
- ucc@2000 {
- cell-index = <1>;
- reg = <0x2000 0x200>;
- interrupts = <32>;
- interrupt-parent = <&qeic>;
- };
-
- ucc@2200 {
- cell-index = <3>;
- reg = <0x2200 0x200>;
- interrupts = <34>;
- interrupt-parent = <&qeic>;
- };
-
- muram@10000 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,qe-muram", "fsl,cpm-muram";
- ranges = <0x0 0x10000 0x6000>;
-
- data-only@0 {
- compatible = "fsl,qe-muram-data",
- "fsl,cpm-muram-data";
- reg = <0x0 0x6000>;
- };
- };
-};
diff --git a/arch/powerpc/boot/dts/t1040rdb.dts b/arch/powerpc/boot/dts/t1040rdb.dts
index dcb5a93..d1caf30 100644
--- a/arch/powerpc/boot/dts/t1040rdb.dts
+++ b/arch/powerpc/boot/dts/t1040rdb.dts
@@ -304,6 +304,14 @@
};
};
+ /* bp dts definition is borrowed from other USDPAA dts */
+ bp6: buffer-pool@6 {
+ compatible = "fsl,t1040-bpool", "fsl,bpool";
+ fsl,bpid = <6>;
+ fsl,bpool-ethernet-cfg = <0 0 0 1728 0 0xfeedabba>;
+ fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>;
+ };
+
fsl,dpaa {
compatible = "fsl,t1040-dpaa", "fsl,dpaa";
ethernet@0 {
@@ -331,6 +339,16 @@
fsl,fman-mac = <&enet4>;
sleep = <&rcpm 0x08000008>;
};
+ /* enable one offline port */
+ dpa-fman0-oh@5 {
+ compatible = "fsl,dpa-oh";
+ /* Define frame queues for the OH port*/
+ /* <OH Rx error, OH Rx default> */
+ fsl,qman-frame-queues-oh = <0x68 1 0x69 1>;
+ fsl,bman-buffer-pools = <&bp6>;
+ fsl,qman-frame-queues-tx = <0x90 8>;
+ fsl,fman-oh-port = <&fman0_oh5>;
+ };
};
qe: qe@ffe139999 {
diff --git a/arch/powerpc/boot/dts/t104xqds.dtsi b/arch/powerpc/boot/dts/t104xqds.dtsi
index 079a4cd..ba4de7f 100644
--- a/arch/powerpc/boot/dts/t104xqds.dtsi
+++ b/arch/powerpc/boot/dts/t104xqds.dtsi
@@ -329,6 +329,13 @@
};
};
};
+ /* bp dts definition is borrowed from other USDPAA dts */
+ bp6: buffer-pool@6 {
+ compatible = "fsl,t1040-bpool", "fsl,bpool";
+ fsl,bpid = <6>;
+ fsl,bpool-ethernet-cfg = <0 0 0 1728 0 0xfeedabba>;
+ fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>;
+ };
fsl,dpaa {
compatible = "fsl,t1040-dpaa", "fsl,dpaa";
@@ -357,6 +364,16 @@
fsl,fman-mac = <&enet4>;
sleep = <&rcpm 0x08000008>;
};
+ /* Enable one offline port as default to support HW based LAG */
+ dpa-fman0-oh@5 {
+ compatible = "fsl,dpa-oh";
+ /* Define frame queues for the OH port*/
+ /* <OH Rx error, OH Rx default> */
+ fsl,qman-frame-queues-oh = <0x68 1 0x69 1>;
+ fsl,bman-buffer-pools = <&bp6>;
+ fsl,qman-frame-queues-tx = <0x90 8>;
+ fsl,fman-oh-port = <&fman0_oh5>;
+ };
};
pci0: pcie@ffe240000 {
diff --git a/arch/powerpc/boot/dts/t2080rdb-usdpaa-shared-interfaces.dts b/arch/powerpc/boot/dts/t2080rdb-usdpaa-shared-interfaces.dts
new file mode 100644
index 0000000..babce4d
--- /dev/null
+++ b/arch/powerpc/boot/dts/t2080rdb-usdpaa-shared-interfaces.dts
@@ -0,0 +1,148 @@
+/*
+ * T2080RDB USDPAA Device Tree Source
+ *
+ * Copyright 2014 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/include/ "t2080rdb.dts"
+
+/ {
+ /* NB: "bpool-ethernet-seeds" is not set to avoid buffer seeding,
+ * because apps seed these pools with buffers allocated at
+ * run-time.
+ * HOWEVER, the kernel driver requires the buffer-size so
+ * "fsl,bpool-ethernet-cfg" is set. It also mis-interprets
+ * things if the base-address is zero (hence the 0xdeadbeef
+ * values).
+ */
+ bp7: buffer-pool@7 {
+ compatible = "fsl,t2080-bpool", "fsl,bpool";
+ fsl,bpid = <7>;
+ fsl,bpool-ethernet-cfg = <0 0 0 192 0 0xdeadbeef>;
+ fsl,bpool-thresholds = <0x400 0xc00 0x0 0x0>;
+ };
+ bp8: buffer-pool@8 {
+ compatible = "fsl,t2080-bpool", "fsl,bpool";
+ fsl,bpid = <8>;
+ fsl,bpool-ethernet-cfg = <0 0 0 576 0 0xabbaf00d>;
+ fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>;
+ };
+ bp9: buffer-pool@9 {
+ compatible = "fsl,t2080-bpool", "fsl,bpool";
+ fsl,bpid = <9>;
+ fsl,bpool-ethernet-cfg = <0 0 0 1728 0 0xfeedabba>;
+ fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>;
+ };
+
+ bp10: buffer-pool@10 {
+ compatible = "fsl,t2080-bpool", "fsl,bpool";
+ fsl,bpid = <10>;
+ fsl,bpool-thresholds = <0x10 0x30 0x0 0x0>;
+ };
+ bp11: buffer-pool@11 {
+ compatible = "fsl,t2080-bpool", "fsl,bpool";
+ fsl,bpid = <11>;
+ fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>;
+ };
+ bp12: buffer-pool@12 {
+ compatible = "fsl,t2080-bpool", "fsl,bpool";
+ fsl,bpid = <12>;
+ fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>;
+ };
+ bp16: buffer-pool@16 {
+ compatible = "fsl,t2080-bpool", "fsl,bpool";
+ fsl,bpid = <16>;
+ fsl,bpool-ethernet-cfg = <0 2048 0 1728 0 0>;
+ fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>;
+ };
+ bp17: buffer-pool@17 {
+ compatible = "fsl,t2080-bpool", "fsl,bpool";
+ fsl,bpid = <17>;
+ fsl,bpool-ethernet-cfg = <0 2048 0 1728 0 0>;
+ fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>;
+ };
+
+ fsl,dpaa {
+ ethernet@0 {/* 10G */
+ compatible = "fsl,t2080-dpa-ethernet-init", "fsl,dpa-ethernet-init";
+ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
+ fsl,qman-frame-queues-rx = <0x90 1 0x91 1>;
+ fsl,qman-frame-queues-tx = <0x98 1 0x99 1>;
+ };
+ /* ethernet@1 declared as shared MAC. USDPAA will seed buffers to
+ * this buffer pool. The ethernet driver will initialize the RX default,
+ * RX error, TX error, TX confirm and 8 TX Frame queues. On receiving frame
+ * at this interface, the ethernet driver will do kmap_atomic/kunmap_atomic
+ * for that frame. */
+ ethernet@1 {/* 10G */
+ compatible = "fsl,t2080-dpa-ethernet-shared", "fsl,dpa-ethernet-shared";
+ fsl,bman-buffer-pools = <&bp17>;
+ fsl,qman-frame-queues-rx = <0x92 1 0x93 1 0x2000 3>;
+ fsl,qman-frame-queues-tx = <0 1 0 1 0x3000 8>;
+ };
+ ethernet@2 {/* 1G */
+ compatible = "fsl,t2080-dpa-ethernet-init", "fsl,dpa-ethernet-init";
+ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
+ fsl,qman-frame-queues-rx = <0x54 1 0x55 1>;
+ fsl,qman-frame-queues-tx = <0x74 1 0x75 1>;
+ };
+ ethernet@3 {/* 1G */
+ compatible = "fsl,t2080-dpa-ethernet-init", "fsl,dpa-ethernet-init";
+ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
+ fsl,qman-frame-queues-rx = <0x56 1 0x57 1>;
+ fsl,qman-frame-queues-tx = <0x76 1 0x77 1>;
+ };
+ ethernet@8 {/* 10G */
+ compatible = "fsl,t2080-dpa-ethernet-init", "fsl,dpa-ethernet-init";
+ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
+ fsl,qman-frame-queues-rx = <0x5c 1 0x5d 1>;
+ fsl,qman-frame-queues-tx = <0x7c 1 0x7d 1>;
+ };
+ ethernet@9 {/* 10G */
+ compatible = "fsl,t2080-dpa-ethernet-init", "fsl,dpa-ethernet-init";
+ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
+ fsl,qman-frame-queues-rx = <0x5e 1 0x5f 1>;
+ fsl,qman-frame-queues-tx = <0x7e 1 0x7f 1>;
+ };
+ /* ethernet@10 declared as MAC-less interface with no "fsl,fman-mac" property.
+ * USDPAA will seed buffers to this buffer pool and initialize 8 TX Frame
+ * queues. The ethernet driver will initialize 8 RX default Frame queues.
+ * On receiving frame at this interface, the ethernet driver will do
+ * kmap_atomic/kunmap_atomic for that frame. */
+ ethernet@10 {
+ compatible = "fsl,t2080-dpa-ethernet-macless", "fsl,dpa-ethernet-macless";
+ fsl,bman-buffer-pools = <&bp16>;
+ fsl,qman-frame-queues-rx = <4000 8>;
+ fsl,qman-frame-queues-tx = <4008 8>;
+ local-mac-address = [00 11 22 33 44 55];
+ };
+ };
+};
diff --git a/arch/powerpc/boot/dts/t2080rdb-usdpaa.dts b/arch/powerpc/boot/dts/t2080rdb-usdpaa.dts
new file mode 100644
index 0000000..c4dac2d
--- /dev/null
+++ b/arch/powerpc/boot/dts/t2080rdb-usdpaa.dts
@@ -0,0 +1,119 @@
+/*
+ * T2080RDB USDPAA Device Tree Source
+ *
+ * Copyright 2014 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/include/ "t2080rdb.dts"
+
+/ {
+ /* NB: "bpool-ethernet-seeds" is not set to avoid buffer seeding,
+ * because apps seed these pools with buffers allocated at
+ * run-time.
+ * HOWEVER, the kernel driver requires the buffer-size so
+ * "fsl,bpool-ethernet-cfg" is set. It also mis-interprets
+ * things if the base-address is zero (hence the 0xdeadbeef
+ * values).
+ */
+ bp7: buffer-pool@7 {
+ compatible = "fsl,t2080-bpool", "fsl,bpool";
+ fsl,bpid = <7>;
+ fsl,bpool-ethernet-cfg = <0 0 0 192 0 0xdeadbeef>;
+ fsl,bpool-thresholds = <0x400 0xc00 0x0 0x0>;
+ };
+ bp8: buffer-pool@8 {
+ compatible = "fsl,t2080-bpool", "fsl,bpool";
+ fsl,bpid = <8>;
+ fsl,bpool-ethernet-cfg = <0 0 0 576 0 0xabbaf00d>;
+ fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>;
+ };
+ bp9: buffer-pool@9 {
+ compatible = "fsl,t2080-bpool", "fsl,bpool";
+ fsl,bpid = <9>;
+ fsl,bpool-ethernet-cfg = <0 0 0 1728 0 0xfeedabba>;
+ fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>;
+ };
+
+ bp10: buffer-pool@10 {
+ compatible = "fsl,t2080-bpool", "fsl,bpool";
+ fsl,bpid = <10>;
+ fsl,bpool-thresholds = <0x10 0x30 0x0 0x0>;
+ };
+ bp11: buffer-pool@11 {
+ compatible = "fsl,t2080-bpool", "fsl,bpool";
+ fsl,bpid = <11>;
+ fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>;
+ };
+ bp12: buffer-pool@12 {
+ compatible = "fsl,t2080-bpool", "fsl,bpool";
+ fsl,bpid = <12>;
+ fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>;
+ };
+
+ fsl,dpaa {
+ ethernet@0 {/* 10G */
+ compatible = "fsl,t2080-dpa-ethernet-init", "fsl,dpa-ethernet-init";
+ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
+ fsl,qman-frame-queues-rx = <0x90 1 0x91 1>;
+ fsl,qman-frame-queues-tx = <0x98 1 0x99 1>;
+ };
+ ethernet@1 {/* 10G */
+ compatible = "fsl,t2080-dpa-ethernet-init", "fsl,dpa-ethernet-init";
+ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
+ fsl,qman-frame-queues-rx = <0x92 1 0x93 1>;
+ fsl,qman-frame-queues-tx = <0x9a 1 0x9b 1>;
+ };
+ ethernet@2 {/* 1G */
+ compatible = "fsl,t2080-dpa-ethernet-init", "fsl,dpa-ethernet-init";
+ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
+ fsl,qman-frame-queues-rx = <0x54 1 0x55 1>;
+ fsl,qman-frame-queues-tx = <0x74 1 0x75 1>;
+ };
+ ethernet@3 {/* 1G */
+ compatible = "fsl,t2080-dpa-ethernet-init", "fsl,dpa-ethernet-init";
+ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
+ fsl,qman-frame-queues-rx = <0x56 1 0x57 1>;
+ fsl,qman-frame-queues-tx = <0x76 1 0x77 1>;
+ };
+ ethernet@8 {/* 10G */
+ compatible = "fsl,t2080-dpa-ethernet-init", "fsl,dpa-ethernet-init";
+ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
+ fsl,qman-frame-queues-rx = <0x5c 1 0x5d 1>;
+ fsl,qman-frame-queues-tx = <0x7c 1 0x7d 1>;
+ };
+ ethernet@9 {/* 10G */
+ compatible = "fsl,t2080-dpa-ethernet-init", "fsl,dpa-ethernet-init";
+ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
+ fsl,qman-frame-queues-rx = <0x5e 1 0x5f 1>;
+ fsl,qman-frame-queues-tx = <0x7e 1 0x7f 1>;
+ };
+ };
+};
diff --git a/arch/powerpc/boot/dts/t4240rdb-usdpaa-shared-interfaces.dts b/arch/powerpc/boot/dts/t4240rdb-usdpaa-shared-interfaces.dts
new file mode 100644
index 0000000..689db9c
--- /dev/null
+++ b/arch/powerpc/boot/dts/t4240rdb-usdpaa-shared-interfaces.dts
@@ -0,0 +1,191 @@
+/*
+ * T4240RDB USDPAA Device Tree Source
+ *
+ * Copyright 2014 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/include/ "t4240rdb.dts"
+
+/ {
+ /* NB: "bpool-ethernet-seeds" is not set to avoid buffer seeding,
+ * because apps seed these pools with buffers allocated at
+ * run-time.
+ * HOWEVER, the kernel driver requires the buffer-size so
+ * "fsl,bpool-ethernet-cfg" is set. It also mis-interprets
+ * things if the base-address is zero (hence the 0xdeadbeef
+ * values).
+ */
+ bp7: buffer-pool@7 {
+ compatible = "fsl,t4240-bpool", "fsl,bpool";
+ fsl,bpid = <7>;
+ fsl,bpool-ethernet-cfg = <0 0 0 192 0 0xdeadbeef>;
+ fsl,bpool-thresholds = <0x400 0xc00 0x0 0x0>;
+ };
+ bp8: buffer-pool@8 {
+ compatible = "fsl,t4240-bpool", "fsl,bpool";
+ fsl,bpid = <8>;
+ fsl,bpool-ethernet-cfg = <0 0 0 576 0 0xabbaf00d>;
+ fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>;
+ };
+ bp9: buffer-pool@9 {
+ compatible = "fsl,t4240-bpool", "fsl,bpool";
+ fsl,bpid = <9>;
+ fsl,bpool-ethernet-cfg = <0 0 0 1728 0 0xfeedabba>;
+ fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>;
+ };
+
+ bp10: buffer-pool@10 {
+ compatible = "fsl,t4240-bpool", "fsl,bpool";
+ fsl,bpid = <10>;
+ fsl,bpool-thresholds = <0x10 0x30 0x0 0x0>;
+ };
+ bp11: buffer-pool@11 {
+ compatible = "fsl,t4240-bpool", "fsl,bpool";
+ fsl,bpid = <11>;
+ fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>;
+ };
+ bp12: buffer-pool@12 {
+ compatible = "fsl,t4240-bpool", "fsl,bpool";
+ fsl,bpid = <12>;
+ fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>;
+ };
+ bp16: buffer-pool@16 {
+ compatible = "fsl,t4240-bpool", "fsl,bpool";
+ fsl,bpid = <16>;
+ fsl,bpool-ethernet-cfg = <0 2048 0 1728 0 0>;
+ fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>;
+ };
+ bp17: buffer-pool@17 {
+ compatible = "fsl,t4240-bpool", "fsl,bpool";
+ fsl,bpid = <17>;
+ fsl,bpool-ethernet-cfg = <0 2048 0 1728 0 0>;
+ fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>;
+ };
+
+ fsl,dpaa {
+ ethernet@0 {
+ compatible = "fsl,t4240-dpa-ethernet-init", "fsl,dpa-ethernet-init";
+ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
+ fsl,qman-frame-queues-rx = <0x50 1 0x51 1>;
+ fsl,qman-frame-queues-tx = <0x70 1 0x71 1>;
+ };
+ ethernet@1 {
+ compatible = "fsl,t4240-dpa-ethernet-init", "fsl,dpa-ethernet-init";
+ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
+ fsl,qman-frame-queues-rx = <0x52 1 0x53 1>;
+ fsl,qman-frame-queues-tx = <0x72 1 0x73 1>;
+ };
+ ethernet@2 {
+ compatible = "fsl,t4240-dpa-ethernet-init", "fsl,dpa-ethernet-init";
+ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
+ fsl,qman-frame-queues-rx = <0x54 1 0x55 1>;
+ fsl,qman-frame-queues-tx = <0x74 1 0x75 1>;
+ };
+ ethernet@3 {
+ compatible = "fsl,t4240-dpa-ethernet-init", "fsl,dpa-ethernet-init";
+ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
+ fsl,qman-frame-queues-rx = <0x56 1 0x57 1>;
+ fsl,qman-frame-queues-tx = <0x76 1 0x77 1>;
+ };
+ ethernet@6 {
+ compatible = "fsl,t4240-dpa-ethernet-init", "fsl,dpa-ethernet-init";
+ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
+ fsl,qman-frame-queues-rx = <0x5c 1 0x5d 1>;
+ fsl,qman-frame-queues-tx = <0x7c 1 0x7d 1>;
+ };
+ ethernet@7 {
+ compatible = "fsl,t4240-dpa-ethernet-init", "fsl,dpa-ethernet-init";
+ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
+ fsl,qman-frame-queues-rx = <0x5e 1 0x5f 1>;
+ fsl,qman-frame-queues-tx = <0x7e 1 0x7f 1>;
+ };
+ ethernet@8 {
+ compatible = "fsl,t4240-dpa-ethernet-init", "fsl,dpa-ethernet-init";
+ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
+ fsl,qman-frame-queues-rx = <0x60 1 0x61 1>;
+ fsl,qman-frame-queues-tx = <0x80 1 0x81 1>;
+ };
+ ethernet@9 {
+ compatible = "fsl,t4240-dpa-ethernet-init", "fsl,dpa-ethernet-init";
+ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
+ fsl,qman-frame-queues-rx = <0x62 1 0x63 1>;
+ fsl,qman-frame-queues-tx = <0x82 1 0x83 1>;
+ };
+ ethernet@10 {
+ compatible = "fsl,t4240-dpa-ethernet-init", "fsl,dpa-ethernet-init";
+ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
+ fsl,qman-frame-queues-rx = <0x64 1 0x65 1>;
+ fsl,qman-frame-queues-tx = <0x84 1 0x85 1>;
+ };
+ ethernet@11 {
+ compatible = "fsl,t4240-dpa-ethernet-init", "fsl,dpa-ethernet-init";
+ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
+ fsl,qman-frame-queues-rx = <0x66 1 0x67 1>;
+ fsl,qman-frame-queues-tx = <0x86 1 0x87 1>;
+ };
+ ethernet@14 {
+ compatible = "fsl,t4240-dpa-ethernet-init", "fsl,dpa-ethernet-init";
+ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
+ fsl,qman-frame-queues-rx = <0x6c 1 0x6d 1>;
+ fsl,qman-frame-queues-tx = <0x8c 1 0x8d 1>;
+ };
+ /* ethernet@15 declared as shared MAC. USDPAA will seed buffers to
+ * this buffer pool. The ethernet driver will initialize the RX default,
+ * RX error, TX error, TX confirm and 8 TX Frame queues. On receiving frame
+ * at this interface, the ethernet driver will do kmap_atomic/kunmap_atomic
+ * for that frame. */
+ ethernet@15 {
+ compatible = "fsl,t4240-dpa-ethernet-shared", "fsl,dpa-ethernet-shared";
+ fsl,bman-buffer-pools = <&bp17>;
+ fsl,qman-frame-queues-rx = <0x6e 1 0x6f 1 0x2000 3>;
+ fsl,qman-frame-queues-tx = <0 1 0 1 0x3000 8>;
+ };
+ /* ethernet@16 declared as MAC-less interface with no "fsl,fman-mac" property.
+ * USDPAA will seed buffers to this buffer pool and initialize 8 TX Frame
+ * queues. The ethernet driver will initialize 8 RX default Frame queues.
+ * On receiving frame at this interface, the ethernet driver will do
+ * kmap_atomic/kunmap_atomic for that frame. */
+ ethernet@16 {
+ compatible = "fsl,t4240-dpa-ethernet-macless", "fsl,dpa-ethernet-macless";
+ fsl,bman-buffer-pools = <&bp16>;
+ fsl,qman-frame-queues-rx = <4000 8>;
+ fsl,qman-frame-queues-tx = <4008 8>;
+ local-mac-address = [00 11 22 33 44 55];
+ };
+ dpa-fman0-oh@2 {
+ compatible = "fsl,dpa-oh";
+ /* Define frame queues for the OH port*/
+ /* <OH Rx error, OH Rx default> */
+ fsl,qman-frame-queues-oh = <0x90 1 0x91 1>;
+ fsl,fman-oh-port = <&fman0_oh2>;
+ };
+ };
+};
diff --git a/arch/powerpc/boot/dts/t4240rdb-usdpaa.dts b/arch/powerpc/boot/dts/t4240rdb-usdpaa.dts
new file mode 100644
index 0000000..a9e30eb
--- /dev/null
+++ b/arch/powerpc/boot/dts/t4240rdb-usdpaa.dts
@@ -0,0 +1,208 @@
+/*
+ * T4240RDB USDPAA Device Tree Source
+ *
+ * Copyright 2014 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/include/ "t4240rdb.dts"
+
+/ {
+ /* NB: "bpool-ethernet-seeds" is not set to avoid buffer seeding,
+ * because apps seed these pools with buffers allocated at
+ * run-time.
+ * HOWEVER, the kernel driver requires the buffer-size so
+ * "fsl,bpool-ethernet-cfg" is set. It also mis-interprets
+ * things if the base-address is zero (hence the 0xdeadbeef
+ * values).
+ */
+ bp7: buffer-pool@7 {
+ compatible = "fsl,t4240-bpool", "fsl,bpool";
+ fsl,bpid = <7>;
+ fsl,bpool-ethernet-cfg = <0 0 0 192 0 0xdeadbeef>;
+ fsl,bpool-thresholds = <0x400 0xc00 0x0 0x0>;
+ };
+ bp8: buffer-pool@8 {
+ compatible = "fsl,t4240-bpool", "fsl,bpool";
+ fsl,bpid = <8>;
+ fsl,bpool-ethernet-cfg = <0 0 0 576 0 0xabbaf00d>;
+ fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>;
+ };
+ bp9: buffer-pool@9 {
+ compatible = "fsl,t4240-bpool", "fsl,bpool";
+ fsl,bpid = <9>;
+ fsl,bpool-ethernet-cfg = <0 0 0 1728 0 0xfeedabba>;
+ fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>;
+ };
+
+ bp10: buffer-pool@10 {
+ compatible = "fsl,t4240-bpool", "fsl,bpool";
+ fsl,bpid = <10>;
+ fsl,bpool-thresholds = <0x10 0x30 0x0 0x0>;
+ };
+ bp11: buffer-pool@11 {
+ compatible = "fsl,t4240-bpool", "fsl,bpool";
+ fsl,bpid = <11>;
+ fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>;
+ };
+ bp12: buffer-pool@12 {
+ compatible = "fsl,t4240-bpool", "fsl,bpool";
+ fsl,bpid = <12>;
+ fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>;
+ };
+ bp13: buffer-pool@13 {
+ compatible = "fsl,t4240-bpool", "fsl,bpool", "fsl,bpool-ethernet-seeds";
+ fsl,bpid = <13>;
+ fsl,bpool-ethernet-cfg = <0 2048 0 1728 0 0>;
+ fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>;
+ };
+
+ chosen {
+ dpaa-extended-args {
+ fman0-extd-args {
+ cell-index = <0x0>;
+ compatible = "fsl,fman-extended-args";
+ dma-aid-mode = "port";
+ fman0_oh3-extd-args {
+ cell-index = <0x2>;
+ compatible = "fsl,fman-port-op-extended-args";
+ vsp-window = <0x8 0x0>;
+ };
+ fman0_oh4-extd-args {
+ cell-index = <0x3>;
+ compatible = "fsl,fman-port-op-extended-args";
+ vsp-window = <0x8 0x0>;
+ };
+ };
+ };
+ };
+
+ fsl,dpaa {
+ ethernet@0 {
+ compatible = "fsl,t4240-dpa-ethernet-init", "fsl,dpa-ethernet-init";
+ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
+ fsl,qman-frame-queues-rx = <0x50 1 0x51 1>;
+ fsl,qman-frame-queues-tx = <0x70 1 0x71 1>;
+ };
+ ethernet@1 {
+ compatible = "fsl,t4240-dpa-ethernet-init", "fsl,dpa-ethernet-init";
+ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
+ fsl,qman-frame-queues-rx = <0x52 1 0x53 1>;
+ fsl,qman-frame-queues-tx = <0x72 1 0x73 1>;
+ };
+ ethernet@2 {
+ compatible = "fsl,t4240-dpa-ethernet-init", "fsl,dpa-ethernet-init";
+ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
+ fsl,qman-frame-queues-rx = <0x54 1 0x55 1>;
+ fsl,qman-frame-queues-tx = <0x74 1 0x75 1>;
+ };
+ ethernet@3 {
+ compatible = "fsl,t4240-dpa-ethernet-init", "fsl,dpa-ethernet-init";
+ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
+ fsl,qman-frame-queues-rx = <0x56 1 0x57 1>;
+ fsl,qman-frame-queues-tx = <0x76 1 0x77 1>;
+ };
+ ethernet@6 {
+ compatible = "fsl,t4240-dpa-ethernet-init", "fsl,dpa-ethernet-init";
+ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
+ fsl,qman-frame-queues-rx = <0x5c 1 0x5d 1>;
+ fsl,qman-frame-queues-tx = <0x7c 1 0x7d 1>;
+ };
+ ethernet@7 {
+ compatible = "fsl,t4240-dpa-ethernet-init", "fsl,dpa-ethernet-init";
+ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
+ fsl,qman-frame-queues-rx = <0x5e 1 0x5f 1>;
+ fsl,qman-frame-queues-tx = <0x7e 1 0x7f 1>;
+ };
+ ethernet@8 {
+ compatible = "fsl,t4240-dpa-ethernet-init", "fsl,dpa-ethernet-init";
+ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
+ fsl,qman-frame-queues-rx = <0x60 1 0x61 1>;
+ fsl,qman-frame-queues-tx = <0x80 1 0x81 1>;
+ };
+ ethernet@9 {
+ compatible = "fsl,t4240-dpa-ethernet-init", "fsl,dpa-ethernet-init";
+ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
+ fsl,qman-frame-queues-rx = <0x62 1 0x63 1>;
+ fsl,qman-frame-queues-tx = <0x82 1 0x83 1>;
+ };
+ ethernet@10 {
+ compatible = "fsl,t4240-dpa-ethernet-init", "fsl,dpa-ethernet-init";
+ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
+ fsl,qman-frame-queues-rx = <0x64 1 0x65 1>;
+ fsl,qman-frame-queues-tx = <0x84 1 0x85 1>;
+ };
+ ethernet@11 {
+ compatible = "fsl,t4240-dpa-ethernet-init", "fsl,dpa-ethernet-init";
+ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
+ fsl,qman-frame-queues-rx = <0x66 1 0x67 1>;
+ fsl,qman-frame-queues-tx = <0x86 1 0x87 1>;
+ };
+ ethernet@14 {
+ compatible = "fsl,t4240-dpa-ethernet-init", "fsl,dpa-ethernet-init";
+ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
+ fsl,qman-frame-queues-rx = <0x6c 1 0x6d 1>;
+ fsl,qman-frame-queues-tx = <0x8c 1 0x8d 1>;
+ };
+ ethernet@15 {
+ compatible = "fsl,t4240-dpa-ethernet-init", "fsl,dpa-ethernet-init";
+ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
+ fsl,qman-frame-queues-rx = <0x6e 1 0x6f 1>;
+ fsl,qman-frame-queues-tx = <0x8e 1 0x8f 1>;
+ };
+ ethernet@16 {
+ compatible = "fsl,t4240-dpa-ethernet-generic", "fsl,dpa-ethernet-generic";
+ fsl,qman-frame-queues-tx = <0x92 1>;
+ fsl,oh-ports = <&oh3 &oh4>;
+ local-mac-address = [00 11 22 33 44 66];
+ };
+
+ dpa-fman0-oh@2 {
+ compatible = "fsl,dpa-oh";
+ /* Define frame queues for the OH port*/
+ /* <OH Rx error, OH Rx default> */
+ fsl,qman-frame-queues-oh = <0x90 1 0x91 1>;
+ fsl,fman-oh-port = <&fman0_oh2>;
+ };
+ oh3: dpa-fman0-oh@3 {
+ compatible = "fsl,dpa-oh";
+ fsl,bman-buffer-pools = <&bp13>;
+ fsl,qman-frame-queues-oh = <0x96 1 0x97 1>;
+ fsl,fman-oh-port = <&fman0_oh3>;
+ };
+ oh4: dpa-fman0-oh@4 {
+ compatible = "fsl,dpa-oh";
+ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
+ fsl,qman-frame-queues-oh = <0x94 1 0x95 1>;
+ fsl,fman-oh-port = <&fman0_oh4>;
+ };
+
+ };
+};
diff --git a/arch/powerpc/boot/dts/t4240rdb.dts b/arch/powerpc/boot/dts/t4240rdb.dts
new file mode 100644
index 0000000..934b96a
--- /dev/null
+++ b/arch/powerpc/boot/dts/t4240rdb.dts
@@ -0,0 +1,476 @@
+/*
+ * T4240RDB Device Tree Source
+ *
+ * Copyright 2014 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/include/ "fsl/t4240si-pre.dtsi"
+
+/ {
+ model = "fsl,T4240RDB";
+ compatible = "fsl,T4240RDB";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ interrupt-parent = <&mpic>;
+
+ aliases {
+ sgmii_phy21 = &sgmiiphy21;
+ sgmii_phy22 = &sgmiiphy22;
+ sgmii_phy23 = &sgmiiphy23;
+ sgmii_phy24 = &sgmiiphy24;
+ sgmii_phy41 = &sgmiiphy41;
+ sgmii_phy42 = &sgmiiphy42;
+ sgmii_phy43 = &sgmiiphy43;
+ sgmii_phy44 = &sgmiiphy44;
+ ethernet0 = &enet0;
+ ethernet1 = &enet1;
+ ethernet2 = &enet2;
+ ethernet3 = &enet3;
+ ethernet4 = &enet14;
+ ethernet5 = &enet15;
+ ethernet6 = &enet6;
+ ethernet7 = &enet7;
+ ethernet8 = &enet8;
+ ethernet9 = &enet9;
+ ethernet10 = &enet10;
+ ethernet11 = &enet11;
+ ethernet12 = &enet12;
+ ethernet13 = &enet13;
+ ethernet14 = &enet4;
+ ethernet15 = &enet5;
+ };
+
+ ifc: localbus@ffe124000 {
+ reg = <0xf 0xfe124000 0 0x2000>;
+ ranges = <0 0 0xf 0xe8000000 0x08000000
+ 2 0 0xf 0xff800000 0x00010000
+ 3 0 0xf 0xffdf0000 0x00008000>;
+
+ nor@0,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "cfi-flash";
+ reg = <0x0 0x0 0x8000000>;
+
+ bank-width = <2>;
+ device-width = <1>;
+ };
+
+ nand@2,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,ifc-nand";
+ reg = <0x2 0x0 0x10000>;
+ };
+ };
+
+ memory {
+ device_type = "memory";
+ };
+
+ dcsr: dcsr@f00000000 {
+ ranges = <0x00000000 0xf 0x00000000 0x01072000>;
+ };
+
+ bportals: bman-portals@ff4000000 {
+ ranges = <0x0 0xf 0xf4000000 0x2000000>;
+ };
+
+ qportals: qman-portals@ff6000000 {
+ ranges = <0x0 0xf 0xf6000000 0x2000000>;
+ };
+
+ lportals: lac-portals@ff8000000 {
+ ranges = <0x0 0xf 0xf8000000 0x20000>;
+ };
+
+ soc: soc@ffe000000 {
+ ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
+ reg = <0xf 0xfe000000 0 0x00001000>;
+ spi@110000 {
+ flash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "sst,sst25wf040";
+ reg = <0>;
+ spi-max-frequency = <40000000>; /* input clock */
+ };
+ };
+
+ i2c@118000 {
+ eeprom@52 {
+ compatible = "at24,24c256";
+ reg = <0x52>;
+ };
+ eeprom@54 {
+ compatible = "at24,24c256";
+ reg = <0x54>;
+ };
+ eeprom@56 {
+ compatible = "at24,24c256";
+ reg = <0x56>;
+ };
+ rtc@68 {
+ compatible = "dallas,ds1374";
+ reg = <0x68>;
+ interrupts = <0x1 0x1 0 0>;
+ };
+ monitor@2f {
+ compatible = "w83793";
+ reg = <0x2f>;
+ };
+ };
+
+ fman0: fman@400000 {
+ enet0: ethernet@e0000 {
+ phy-handle = <&sgmiiphy21>;
+ phy-connection-type = "sgmii";
+ };
+
+ enet1: ethernet@e2000 {
+ phy-handle = <&sgmiiphy22>;
+ phy-connection-type = "sgmii";
+ };
+
+ enet2: ethernet@e4000 {
+ phy-handle = <&sgmiiphy23>;
+ phy-connection-type = "sgmii";
+ };
+
+ enet3: ethernet@e6000 {
+ phy-handle = <&sgmiiphy24>;
+ phy-connection-type = "sgmii";
+ };
+
+ enet4: ethernet@e8000 {
+ status = "disabled";
+ };
+
+ enet5: ethernet@ea000 {
+ status = "disabled";
+ };
+
+ enet6: ethernet@f0000 { /* FM1@TSEC9/FM1@TGEC1 */
+ phy-handle = <&xfiphy1>;
+ phy-connection-type = "xgmii";
+ };
+
+ enet7: ethernet@f2000 { /* FM1@TSEC10/FM1@TGEC2 */
+ phy-handle = <&xfiphy2>;
+ phy-connection-type = "xgmii";
+ };
+
+ mdio@fc000 {
+ status = "disabled";
+ };
+
+ mdio@fd000 {
+ status = "disabled";
+ };
+
+ fman0_oh2 {
+ status = "disabled";
+ };
+ fman0_oh3 {
+ status = "disabled";
+ };
+ fman0_oh4 {
+ status = "disabled";
+ };
+ fman0_oh5 {
+ status = "disabled";
+ };
+ fman0_oh6 {
+ status = "disabled";
+ };
+ };
+
+ fman1: fman@500000 {
+ enet8: ethernet@e0000 {
+ phy-handle = <&sgmiiphy41>;
+ phy-connection-type = "sgmii";
+ };
+
+ enet9: ethernet@e2000 {
+ phy-handle = <&sgmiiphy42>;
+ phy-connection-type = "sgmii";
+ };
+
+ enet10: ethernet@e4000 {
+ phy-handle = <&sgmiiphy43>;
+ phy-connection-type = "sgmii";
+ };
+
+ enet11: ethernet@e6000 {
+ phy-handle = <&sgmiiphy44>;
+ phy-connection-type = "sgmii";
+ };
+
+ enet12: ethernet@e8000 {
+ status = "disabled";
+ };
+
+ enet13: ethernet@ea000 {
+ status = "disabled";
+ };
+
+ enet14: ethernet@f0000 { /* FM2@TSEC9/FM2@TGEC1 */
+ phy-handle = <&xfiphy3>;
+ phy-connection-type = "xgmii";
+ };
+
+ enet15: ethernet@f2000 { /* FM2@TSEC10/FM2@TGEC2 */
+ phy-handle = <&xfiphy4>;
+ phy-connection-type = "xgmii";
+ };
+
+ mdio0: mdio@fc000 {
+ sgmiiphy21: ethernet-phy@0 {
+ reg = <0x0>;
+ };
+
+ sgmiiphy22: ethernet-phy@1 {
+ reg = <0x1>;
+ };
+
+ sgmiiphy23: ethernet-phy@2 {
+ reg = <0x2>;
+ };
+
+ sgmiiphy24: ethernet-phy@3 {
+ reg = <0x3>;
+ };
+
+ sgmiiphy41: ethernet-phy@4 {
+ reg = <0x4>;
+ };
+
+ sgmiiphy42: ethernet-phy@5 {
+ reg = <0x5>;
+ };
+
+ sgmiiphy43: ethernet-phy@6 {
+ reg = <0x6>;
+ };
+
+ sgmiiphy44: ethernet-phy@7 {
+ reg = <0x7>;
+ };
+ };
+
+ xmdio0: mdio@fd000 {
+ xfiphy1: ethernet-phy@10 {
+ compatible = "ethernet-phy-ieee802.3-c45";
+ reg = <0x10>;
+ };
+
+ xfiphy2: ethernet-phy@11 {
+ compatible = "ethernet-phy-ieee802.3-c45";
+ reg = <0x11>;
+ };
+
+ xfiphy3: ethernet-phy@13 {
+ compatible = "ethernet-phy-ieee802.3-c45";
+ reg = <0x13>;
+ };
+
+ xfiphy4: ethernet-phy@12 {
+ compatible = "ethernet-phy-ieee802.3-c45";
+ reg = <0x12>;
+ };
+ };
+
+ fman1_oh3 {
+ status = "disabled";
+ };
+ fman1_oh4 {
+ status = "disabled";
+ };
+ fman1_oh5 {
+ status = "disabled";
+ };
+ fman1_oh6 {
+ status = "disabled";
+ };
+ };
+
+ sdhc@114000 {
+ voltage-ranges = <1800 1800 3300 3300>;
+ };
+ };
+
+ pci0: pcie@ffe240000 {
+ reg = <0xf 0xfe240000 0 0x10000>;
+ ranges = <0x02000000 0 0xe0000000 0xc 0x00000000 0x0 0x20000000
+ 0x01000000 0 0x00000000 0xf 0xf8000000 0x0 0x00010000>;
+ pcie@0 {
+ ranges = <0x02000000 0 0xe0000000
+ 0x02000000 0 0xe0000000
+ 0 0x20000000
+
+ 0x01000000 0 0x00000000
+ 0x01000000 0 0x00000000
+ 0 0x00010000>;
+ };
+ };
+
+ pci1: pcie@ffe250000 {
+ reg = <0xf 0xfe250000 0 0x10000>;
+ ranges = <0x02000000 0x0 0xe0000000 0xc 0x20000000 0x0 0x20000000
+ 0x01000000 0x0 0x00000000 0xf 0xf8010000 0x0 0x00010000>;
+ pcie@0 {
+ ranges = <0x02000000 0 0xe0000000
+ 0x02000000 0 0xe0000000
+ 0 0x20000000
+
+ 0x01000000 0 0x00000000
+ 0x01000000 0 0x00000000
+ 0 0x00010000>;
+ };
+ };
+
+ pci2: pcie@ffe260000 {
+ reg = <0xf 0xfe260000 0 0x1000>;
+ ranges = <0x02000000 0 0xe0000000 0xc 0x40000000 0 0x20000000
+ 0x01000000 0 0x00000000 0xf 0xf8020000 0 0x00010000>;
+ pcie@0 {
+ ranges = <0x02000000 0 0xe0000000
+ 0x02000000 0 0xe0000000
+ 0 0x20000000
+
+ 0x01000000 0 0x00000000
+ 0x01000000 0 0x00000000
+ 0 0x00010000>;
+ };
+ };
+
+ pci3: pcie@ffe270000 {
+ reg = <0xf 0xfe270000 0 0x10000>;
+ ranges = <0x02000000 0 0xe0000000 0xc 0x60000000 0 0x20000000
+ 0x01000000 0 0x00000000 0xf 0xf8030000 0 0x00010000>;
+ pcie@0 {
+ ranges = <0x02000000 0 0xe0000000
+ 0x02000000 0 0xe0000000
+ 0 0x20000000
+
+ 0x01000000 0 0x00000000
+ 0x01000000 0 0x00000000
+ 0 0x00010000>;
+ };
+ };
+ rio: rapidio@ffe0c0000 {
+ reg = <0xf 0xfe0c0000 0 0x11000>;
+
+ port1 {
+ ranges = <0 0 0xc 0x20000000 0 0x10000000>;
+ };
+ port2 {
+ ranges = <0 0 0xc 0x30000000 0 0x10000000>;
+ };
+ };
+
+ fsl,dpaa {
+ compatible = "fsl,t4240-dpaa", "fsl,dpaa";
+ ethernet@0 {
+ compatible = "fsl,t4240-dpa-ethernet", "fsl,dpa-ethernet";
+ fsl,fman-mac = <&enet0>;
+ };
+ ethernet@1 {
+ compatible = "fsl,t4240-dpa-ethernet", "fsl,dpa-ethernet";
+ fsl,fman-mac = <&enet1>;
+ };
+ ethernet@2 {
+ compatible = "fsl,t4240-dpa-ethernet", "fsl,dpa-ethernet";
+ fsl,fman-mac = <&enet2>;
+ };
+ ethernet@3 {
+ compatible = "fsl,t4240-dpa-ethernet", "fsl,dpa-ethernet";
+ fsl,fman-mac = <&enet3>;
+ };
+ ethernet@4 {
+ compatible = "fsl,t4240-dpa-ethernet", "fsl,dpa-ethernet";
+ fsl,fman-mac = <&enet4>;
+ status = "disabled";
+ };
+ ethernet@5 {
+ compatible = "fsl,t4240-dpa-ethernet", "fsl,dpa-ethernet";
+ fsl,fman-mac = <&enet5>;
+ status = "disabled";
+ };
+ ethernet@6 {
+ compatible = "fsl,t4240-dpa-ethernet", "fsl,dpa-ethernet";
+ fsl,fman-mac = <&enet6>;
+ };
+ ethernet@7 {
+ compatible = "fsl,t4240-dpa-ethernet", "fsl,dpa-ethernet";
+ fsl,fman-mac = <&enet7>;
+ };
+ ethernet@8 {
+ compatible = "fsl,t4240-dpa-ethernet", "fsl,dpa-ethernet";
+ fsl,fman-mac = <&enet8>;
+ };
+ ethernet@9 {
+ compatible = "fsl,t4240-dpa-ethernet", "fsl,dpa-ethernet";
+ fsl,fman-mac = <&enet9>;
+ };
+ ethernet@10 {
+ compatible = "fsl,t4240-dpa-ethernet", "fsl,dpa-ethernet";
+ fsl,fman-mac = <&enet10>;
+ };
+ ethernet@11 {
+ compatible = "fsl,t4240-dpa-ethernet", "fsl,dpa-ethernet";
+ fsl,fman-mac = <&enet11>;
+ };
+ ethernet@12 {
+ compatible = "fsl,t4240-dpa-ethernet", "fsl,dpa-ethernet";
+ fsl,fman-mac = <&enet12>;
+ status = "disabled";
+ };
+ ethernet@13 {
+ compatible = "fsl,t4240-dpa-ethernet", "fsl,dpa-ethernet";
+ fsl,fman-mac = <&enet13>;
+ status = "disabled";
+ };
+ ethernet@14 {
+ compatible = "fsl,t4240-dpa-ethernet", "fsl,dpa-ethernet";
+ fsl,fman-mac = <&enet14>;
+ };
+ ethernet@15 {
+ compatible = "fsl,t4240-dpa-ethernet", "fsl,dpa-ethernet";
+ fsl,fman-mac = <&enet15>;
+ };
+ };
+};
+
+/include/ "fsl/t4240si-post.dtsi"
+/include/ "fsl/qoriq-dpaa-res3.dtsi"
+/include/ "fsl/qoriq-qman-ceetm0.dtsi"
+/include/ "fsl/qoriq-qman-ceetm1.dtsi"
diff --git a/arch/powerpc/configs/85xx/e6500rev2_defconfig b/arch/powerpc/configs/85xx/e6500rev2_defconfig
index 9bdce3e..f7a855b 100644
--- a/arch/powerpc/configs/85xx/e6500rev2_defconfig
+++ b/arch/powerpc/configs/85xx/e6500rev2_defconfig
@@ -106,6 +106,7 @@ CONFIG_FMAN_T4240=y
CONFIG_FSL_DPAA_ETH=y
CONFIG_E1000E=y
CONFIG_FSL_10GBASE_KR=y
+CONFIG_VITESSE_PHY=y
CONFIG_FIXED_PHY=y
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
@@ -125,7 +126,8 @@ CONFIG_SPI=y
CONFIG_SPI_GPIO=y
CONFIG_SPI_FSL_SPI=y
CONFIG_SPI_FSL_ESPI=y
-# CONFIG_HWMON is not set
+CONFIG_HWMON=y
+CONFIG_SENSORS_W83793=y
CONFIG_VIDEO_OUTPUT_CONTROL=y
CONFIG_USB=y
CONFIG_USB_EHCI_HCD=y
@@ -139,6 +141,7 @@ CONFIG_EDAC=y
CONFIG_EDAC_MM_EDAC=y
CONFIG_EDAC_MPC85XX=y
CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_DS1374=y
CONFIG_RTC_DRV_DS3232=y
CONFIG_RTC_DRV_CMOS=y
CONFIG_DMADEVICES=y
diff --git a/arch/powerpc/configs/mpc85xx_defconfig b/arch/powerpc/configs/mpc85xx_defconfig
index 7c8904d..7e4a6dd 100644
--- a/arch/powerpc/configs/mpc85xx_defconfig
+++ b/arch/powerpc/configs/mpc85xx_defconfig
@@ -264,6 +264,7 @@ CONFIG_LZO_DECOMPRESS=y
CONFIG_CRYPTO_DEFLATE=y
CONFIG_CRYPTO_LZO=y
CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_DEBUG_INFO=y
CONFIG_CRYPTO_PCBC=m
diff --git a/arch/powerpc/configs/mpc85xx_smp_defconfig b/arch/powerpc/configs/mpc85xx_smp_defconfig
index 0396c23..7eb8788 100644
--- a/arch/powerpc/configs/mpc85xx_smp_defconfig
+++ b/arch/powerpc/configs/mpc85xx_smp_defconfig
@@ -276,6 +276,7 @@ CONFIG_LZO_DECOMPRESS=y
CONFIG_CRYPTO_DEFLATE=y
CONFIG_CRYPTO_LZO=y
CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_DEBUG_INFO=y
CONFIG_CRYPTO_PCBC=m
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h
index a18b4ab..aace905 100644
--- a/arch/powerpc/include/asm/switch_to.h
+++ b/arch/powerpc/include/asm/switch_to.h
@@ -35,11 +35,7 @@ extern void giveup_vsx(struct task_struct *);
extern void enable_kernel_spe(void);
extern void giveup_spe(struct task_struct *);
extern void load_up_spe(struct task_struct *);
-extern void switch_booke_debug_regs(struct thread_struct *new_thread);
-
-#ifdef CONFIG_PPC_ADV_DEBUG_REGS
-extern void switch_booke_debug_regs(struct thread_struct *new_thread);
-#endif
+extern void switch_booke_debug_regs(struct debug_reg *new_debug);
#ifndef CONFIG_SMP
extern void discard_lazy_cpu_state(void);
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 5b2490a..815b540 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -339,7 +339,7 @@ static void set_debug_reg_defaults(struct thread_struct *thread)
#endif
}
-static void prime_debug_regs(struct thread_struct *thread)
+static void prime_debug_regs(struct debug_reg *debug)
{
/*
* We could have inherited MSR_DE from userspace, since
@@ -348,22 +348,22 @@ static void prime_debug_regs(struct thread_struct *thread)
*/
mtmsr(mfmsr() & ~MSR_DE);
- mtspr(SPRN_IAC1, thread->debug.iac1);
- mtspr(SPRN_IAC2, thread->debug.iac2);
+ mtspr(SPRN_IAC1, debug->iac1);
+ mtspr(SPRN_IAC2, debug->iac2);
#if CONFIG_PPC_ADV_DEBUG_IACS > 2
- mtspr(SPRN_IAC3, thread->debug.iac3);
- mtspr(SPRN_IAC4, thread->debug.iac4);
+ mtspr(SPRN_IAC3, debug->iac3);
+ mtspr(SPRN_IAC4, debug->iac4);
#endif
- mtspr(SPRN_DAC1, thread->debug.dac1);
- mtspr(SPRN_DAC2, thread->debug.dac2);
+ mtspr(SPRN_DAC1, debug->dac1);
+ mtspr(SPRN_DAC2, debug->dac2);
#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
- mtspr(SPRN_DVC1, thread->debug.dvc1);
- mtspr(SPRN_DVC2, thread->debug.dvc2);
+ mtspr(SPRN_DVC1, debug->dvc1);
+ mtspr(SPRN_DVC2, debug->dvc2);
#endif
- mtspr(SPRN_DBCR0, thread->debug.dbcr0);
- mtspr(SPRN_DBCR1, thread->debug.dbcr1);
+ mtspr(SPRN_DBCR0, debug->dbcr0);
+ mtspr(SPRN_DBCR1, debug->dbcr1);
#ifdef CONFIG_BOOKE
- mtspr(SPRN_DBCR2, thread->debug.dbcr2);
+ mtspr(SPRN_DBCR2, debug->dbcr2);
#endif
}
/*
@@ -371,11 +371,11 @@ static void prime_debug_regs(struct thread_struct *thread)
* debug registers, set the debug registers from the values
* stored in the new thread.
*/
-void switch_booke_debug_regs(struct thread_struct *new_thread)
+void switch_booke_debug_regs(struct debug_reg *new_debug)
{
if ((current->thread.debug.dbcr0 & DBCR0_IDM)
- || (new_thread->debug.dbcr0 & DBCR0_IDM))
- prime_debug_regs(new_thread);
+ || (new_debug->dbcr0 & DBCR0_IDM))
+ prime_debug_regs(new_debug);
}
EXPORT_SYMBOL_GPL(switch_booke_debug_regs);
#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
@@ -682,7 +682,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
#endif /* CONFIG_SMP */
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
- switch_booke_debug_regs(&new->thread);
+ switch_booke_debug_regs(&new->thread.debug);
#else
/*
* For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index d95438e..12c6706 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -679,7 +679,7 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
{
int ret, s;
- struct thread_struct thread;
+ struct debug_reg debug;
#ifdef CONFIG_PPC_FPU
struct thread_fp_state fp;
int fpexc_mode;
@@ -745,9 +745,9 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
#endif
/* Switch to guest debug context */
- thread.debug = vcpu->arch.shadow_dbg_reg;
- switch_booke_debug_regs(&thread);
- thread.debug = current->thread.debug;
+ debug = vcpu->arch.shadow_dbg_reg;
+ switch_booke_debug_regs(&debug);
+ debug = current->thread.debug;
current->thread.debug = vcpu->arch.shadow_dbg_reg;
kvmppc_fix_ee_before_entry();
@@ -758,8 +758,8 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
We also get here with interrupts enabled. */
/* Switch back to user space debug context */
- switch_booke_debug_regs(&thread);
- current->thread.debug = thread.debug;
+ switch_booke_debug_regs(&debug);
+ current->thread.debug = debug;
#ifdef CONFIG_PPC_FPU
kvmppc_save_guest_fp(vcpu);
diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig
index ae08633..ae9fdb51 100644
--- a/arch/powerpc/platforms/85xx/Kconfig
+++ b/arch/powerpc/platforms/85xx/Kconfig
@@ -284,7 +284,7 @@ config CORENET_GENERIC
For 32bit kernel, the following boards are supported:
P2041 RDB, P3041 DS and P4080 DS
For 64bit kernel, the following boards are supported:
- T208x QDS and RDB, T4240 QDS and B4 QDS
+ T208x QDS and RDB, T4240 QDS, T4240 RDB and B4 QDS
The following boards are supported for both 32bit and 64bit kernel:
P5020 DS, P5040 DS, T104xQDS, T104xRDB
diff --git a/arch/powerpc/platforms/85xx/corenet_generic.c b/arch/powerpc/platforms/85xx/corenet_generic.c
index 7026526..45a61bb 100644
--- a/arch/powerpc/platforms/85xx/corenet_generic.c
+++ b/arch/powerpc/platforms/85xx/corenet_generic.c
@@ -42,7 +42,9 @@ void __init corenet_gen_pic_init(void)
unsigned int flags = MPIC_BIG_ENDIAN | MPIC_SINGLE_DEST_CPU |
MPIC_NO_RESET;
+#ifdef CONFIG_QUICC_ENGINE
struct device_node *np;
+#endif
if (ppc_md.get_irq == mpic_get_coreint_irq)
flags |= MPIC_ENABLE_COREINT;
@@ -52,12 +54,14 @@ void __init corenet_gen_pic_init(void)
mpic_init(mpic);
+#ifdef CONFIG_QUICC_ENGINE
np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic");
if (np) {
qe_ic_init(np, 0, qe_ic_cascade_low_mpic,
qe_ic_cascade_high_mpic);
of_node_put(np);
}
+#endif
}
/*
@@ -108,9 +112,11 @@ static const struct of_device_id of_device_ids[] = {
{
.compatible = "fsl,qoriq-pcie-v3.0",
},
+#ifdef CONFIG_QUICC_ENGINE
{
.compatible = "fsl,qe",
},
+#endif
/* The following two are for the Freescale hypervisor */
{
.name = "hypervisor",
@@ -141,6 +147,7 @@ static const char * const boards[] __initconst = {
"fsl,T2081QDS",
"fsl,T2080RDB",
"fsl,T4240QDS",
+ "fsl,T4240RDB",
"fsl,B4860QDS",
"fsl,B4420QDS",
"fsl,B4220QDS",
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index bf9c6d4..be181b6 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -280,7 +280,7 @@ config TAU_AVERAGE
config QUICC_ENGINE
bool "Freescale QUICC Engine (QE) Support"
- depends on FSL_SOC && PPC32
+ depends on FSL_SOC && (PPC32 || PPC64)
select PPC_LIB_RHEAP
select CRC32
help
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index d7863d9..1d2db39 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -153,6 +153,5 @@ config CRYPTO_DEV_FSL_CAAM_DEBUG
config CRYPTO_DEV_FSL_CAAM_JR_UIO
tristate "Freescale Job Ring UIO support"
- depends on CRYPTO_DEV_FSL_CAAM
- depends on UIO
+ depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR && UIO
default y
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index b150668..7ef96f1 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -411,6 +411,14 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
spin_lock_bh(&chan->desc_lock);
+#ifdef CONFIG_PM
+ if (unlikely(chan->pm_state != RUNNING)) {
+ chan_dbg(chan, "cannot submit due to suspend\n");
+ spin_unlock_bh(&chan->desc_lock);
+ return -1;
+ }
+#endif
+
/*
* assign cookies to all of the software descriptors
* that make up this transaction
@@ -1334,6 +1342,9 @@ static int fsl_dma_chan_probe(struct fsldma_device *fdev,
INIT_LIST_HEAD(&chan->ld_running);
INIT_LIST_HEAD(&chan->ld_completed);
chan->idle = true;
+#ifdef CONFIG_PM
+ chan->pm_state = RUNNING;
+#endif
chan->common.device = &fdev->common;
dma_cookie_init(&chan->common);
@@ -1473,6 +1484,92 @@ static int fsldma_of_remove(struct platform_device *op)
return 0;
}
+#ifdef CONFIG_PM
+static int fsldma_prepare(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct fsldma_device *fdev = platform_get_drvdata(pdev);
+ struct fsldma_chan *chan;
+ int i;
+
+ for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
+ chan = fdev->chan[i];
+ if (!chan)
+ continue;
+
+ spin_lock_bh(&chan->desc_lock);
+ chan->pm_state = SUSPENDING;
+ if (!list_empty(&chan->ld_pending))
+ fsl_chan_xfer_ld_queue(chan);
+ spin_unlock_bh(&chan->desc_lock);
+ }
+
+ return 0;
+}
+
+static int fsldma_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct fsldma_device *fdev = platform_get_drvdata(pdev);
+ struct fsldma_chan *chan;
+ int i;
+
+ for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
+ chan = fdev->chan[i];
+ if (!chan)
+ continue;
+
+ spin_lock_bh(&chan->desc_lock);
+ if (!chan->idle)
+ goto out;
+ chan->regs_save.mr = DMA_IN(chan, &chan->regs->mr, 32);
+ chan->pm_state = SUSPENDED;
+ spin_unlock_bh(&chan->desc_lock);
+ }
+ return 0;
+
+out:
+ for (; i >= 0; i--) {
+ chan = fdev->chan[i];
+ if (!chan)
+ continue;
+ chan->pm_state = RUNNING;
+ spin_unlock_bh(&chan->desc_lock);
+ }
+ return -EBUSY;
+}
+
+static int fsldma_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct fsldma_device *fdev = platform_get_drvdata(pdev);
+ struct fsldma_chan *chan;
+ u32 mode;
+ int i;
+
+ for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
+ chan = fdev->chan[i];
+ if (!chan)
+ continue;
+
+ spin_lock_bh(&chan->desc_lock);
+ mode = chan->regs_save.mr
+ & ~FSL_DMA_MR_CS & ~FSL_DMA_MR_CC & ~FSL_DMA_MR_CA;
+ DMA_OUT(chan, &chan->regs->mr, mode, 32);
+ chan->pm_state = RUNNING;
+ spin_unlock_bh(&chan->desc_lock);
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops fsldma_pm_ops = {
+ .prepare = fsldma_prepare,
+ .suspend = fsldma_suspend,
+ .resume = fsldma_resume,
+};
+#endif
+
static const struct of_device_id fsldma_of_ids[] = {
{ .compatible = "fsl,elo3-dma", },
{ .compatible = "fsl,eloplus-dma", },
@@ -1485,6 +1582,9 @@ static struct platform_driver fsldma_of_driver = {
.name = "fsl-elo-dma",
.owner = THIS_MODULE,
.of_match_table = fsldma_of_ids,
+#ifdef CONFIG_PM
+ .pm = &fsldma_pm_ops,
+#endif
},
.probe = fsldma_of_probe,
.remove = fsldma_of_remove,
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h
index ec19517..eecaf9e 100644
--- a/drivers/dma/fsldma.h
+++ b/drivers/dma/fsldma.h
@@ -134,6 +134,18 @@ struct fsldma_device {
#define FSL_DMA_CHAN_PAUSE_EXT 0x00001000
#define FSL_DMA_CHAN_START_EXT 0x00002000
+#ifdef CONFIG_PM
+struct fsldma_chan_regs_save {
+ u32 mr;
+};
+
+enum fsldma_pm_state {
+ RUNNING = 0,
+ SUSPENDING,
+ SUSPENDED,
+};
+#endif
+
struct fsldma_chan {
char name[8]; /* Channel name */
struct fsldma_chan_regs __iomem *regs;
@@ -161,6 +173,10 @@ struct fsldma_chan {
struct tasklet_struct tasklet;
u32 feature;
bool idle; /* DMA controller is idle */
+#ifdef CONFIG_PM
+ struct fsldma_chan_regs_save regs_save;
+ enum fsldma_pm_state pm_state;
+#endif
void (*toggle_ext_pause)(struct fsldma_chan *fsl_chan, int enable);
void (*toggle_ext_start)(struct fsldma_chan *fsl_chan, int enable);
diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c
index 70130e1..b33561e 100644
--- a/drivers/iommu/fsl_pamu.c
+++ b/drivers/iommu/fsl_pamu.c
@@ -34,6 +34,7 @@
#include <asm/fsl_guts.h>
#include <asm/fsl_kibo.h>
#include <asm/mpc85xx.h>
+#include <linux/syscore_ops.h>
#include "fsl_pamu.h"
@@ -45,10 +46,13 @@
#define make64(high, low) (((u64)(high) << 32) | (low))
-struct pamu_isr_data {
+struct pamu_info {
void __iomem *pamu_reg_base; /* Base address of PAMU regs*/
unsigned int count; /* The number of PAMUs */
-};
+} pamu_info_data;
+
+/* Pointer to the device configuration space */
+static struct ccsr_guts __iomem *guts_regs;
static struct paace *ppaact;
static struct paace *spaact;
@@ -124,6 +128,34 @@ static struct paace *pamu_get_ppaace(int liodn)
}
/**
+ * set_dcfg_liodn() - set the device LIODN in DCFG
+ * @np: device tree node pointer
+ * @liodn: liodn value to program
+ *
+ * Returns 0 upon success else error code < 0 returned
+ */
+static int set_dcfg_liodn(struct device_node *np, int liodn)
+{
+ const __be32 *prop;
+ u32 liodn_reg_offset;
+ int len;
+ void __iomem *dcfg_region = (void *)guts_regs;
+
+ if (!dcfg_region)
+ return -ENODEV;
+
+ prop = of_get_property(np, "fsl,liodn-reg", &len);
+ if (!prop || len != 8)
+ return -EINVAL;
+
+ liodn_reg_offset = be32_to_cpup(&prop[1]);
+
+ out_be32((u32 *)(dcfg_region + liodn_reg_offset), liodn);
+
+ return 0;
+}
+
+/**
* pamu_enable_liodn() - Set valid bit of PACCE
* @liodn: liodn PAACT index for desired PAACE
*
@@ -848,7 +880,7 @@ static void __init setup_omt(struct ome *omt)
* Get the maximum number of PAACT table entries
* and subwindows supported by PAMU
*/
-static void get_pamu_cap_values(unsigned long pamu_reg_base)
+static void get_pamu_cap_values(void *pamu_reg_base)
{
u32 pc_val;
@@ -858,9 +890,8 @@ static void get_pamu_cap_values(unsigned long pamu_reg_base)
}
/* Setup PAMU registers pointing to PAACT, SPAACT and OMT */
-int setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size,
- phys_addr_t ppaact_phys, phys_addr_t spaact_phys,
- phys_addr_t omt_phys)
+int setup_one_pamu(void *pamu_reg_base, phys_addr_t ppaact_phys,
+ phys_addr_t spaact_phys, phys_addr_t omt_phys)
{
u32 *pc;
struct pamu_mmap_regs *pamu_regs;
@@ -984,7 +1015,7 @@ static void __init setup_liodns(void)
irqreturn_t pamu_av_isr(int irq, void *arg)
{
- struct pamu_isr_data *data = arg;
+ struct pamu_info *data = arg;
phys_addr_t phys;
unsigned int i, j, ret;
@@ -1229,11 +1260,9 @@ static const struct {
static int __init fsl_pamu_probe(struct platform_device *pdev)
{
void __iomem *pamu_regs = NULL;
- struct ccsr_guts __iomem *guts_regs = NULL;
u32 pamubypenr, pamu_counter;
+ void __iomem *pamu_reg_base;
unsigned long pamu_reg_off;
- unsigned long pamu_reg_base;
- struct pamu_isr_data *data = NULL;
struct device_node *guts_node;
u64 size;
struct page *p;
@@ -1259,23 +1288,17 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
}
of_get_address(pdev->dev.of_node, 0, &size, NULL);
+ pamu_info_data.pamu_reg_base = pamu_regs;
+ pamu_info_data.count = size / PAMU_OFFSET;
+
irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
if (irq == NO_IRQ) {
dev_warn(&pdev->dev, "no interrupts listed in PAMU node\n");
goto error;
}
- data = kzalloc(sizeof(struct pamu_isr_data), GFP_KERNEL);
- if (!data) {
- dev_err(&pdev->dev, "PAMU isr data memory allocation failed\n");
- ret = -ENOMEM;
- goto error;
- }
- data->pamu_reg_base = pamu_regs;
- data->count = size / PAMU_OFFSET;
-
/* The ISR needs access to the regs, so we won't iounmap them */
- ret = request_irq(irq, pamu_av_isr, 0, "pamu", data);
+ ret = request_irq(irq, pamu_av_isr, 0, "pamu", &pamu_info_data);
if (ret < 0) {
dev_err(&pdev->dev, "error %i installing ISR for irq %i\n",
ret, irq);
@@ -1299,7 +1322,7 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
}
/* read in the PAMU capability registers */
- get_pamu_cap_values((unsigned long)pamu_regs);
+ get_pamu_cap_values(pamu_regs);
/*
* To simplify the allocation of a coherency domain, we allocate the
* PAACT and the OMT in the same memory buffer. Unfortunately, this
@@ -1378,9 +1401,9 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
for (pamu_reg_off = 0, pamu_counter = 0x80000000; pamu_reg_off < size;
pamu_reg_off += PAMU_OFFSET, pamu_counter >>= 1) {
- pamu_reg_base = (unsigned long) pamu_regs + pamu_reg_off;
- setup_one_pamu(pamu_reg_base, pamu_reg_off, ppaact_phys,
- spaact_phys, omt_phys);
+ pamu_reg_base = pamu_regs + pamu_reg_off;
+ setup_one_pamu(pamu_reg_base, ppaact_phys, spaact_phys,
+ omt_phys);
/* Disable PAMU bypass for this PAMU */
pamubypenr &= ~pamu_counter;
}
@@ -1390,8 +1413,6 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
/* Enable all relevant PAMU(s) */
out_be32(&guts_regs->pamubypenr, pamubypenr);
- iounmap(guts_regs);
-
/* Enable DMA for the LIODNs in the device tree*/
setup_liodns();
@@ -1403,12 +1424,7 @@ error_genpool:
error:
if (irq != NO_IRQ)
- free_irq(irq, data);
-
- if (data) {
- memset(data, 0, sizeof(struct pamu_isr_data));
- kfree(data);
- }
+ free_irq(irq, &pamu_info_data);
if (pamu_regs)
iounmap(pamu_regs);
@@ -1442,6 +1458,77 @@ static struct platform_driver fsl_of_pamu_driver = {
.probe = fsl_pamu_probe,
};
+#ifdef CONFIG_SUSPEND
+static int iommu_suspend(void)
+{
+ int i;
+
+ for (i = 0; i < pamu_info_data.count; i++) {
+ u32 val;
+ void __iomem *p;
+
+ p = pamu_info_data.pamu_reg_base + i * PAMU_OFFSET;
+ val = in_be32((u32 *)(p + PAMU_PICS));
+ /* Disable access violation interrupts */
+ out_be32((u32 *)(p + PAMU_PICS),
+ val & ~PAMU_ACCESS_VIOLATION_ENABLE);
+ }
+
+ return 0;
+}
+
+static void restore_dcfg_liodns(void)
+{
+ struct device_node *node;
+ const __be32 *prop;
+ int ret, liodn;
+
+ for_each_node_with_property(node, "fsl,liodn-reg") {
+ prop = of_get_property(node, "fsl,liodn", 0);
+ if (!prop)
+ continue;
+ liodn = be32_to_cpup(prop);
+ ret = set_dcfg_liodn(node, liodn);
+ if (ret)
+ pr_debug("LIODN restore failed for %s\n",
+ node->full_name);
+ }
+}
+
+static void iommu_resume(void)
+{
+ int i;
+ u32 pamubypenr, pamu_counter;
+
+ restore_dcfg_liodns();
+ pamubypenr = in_be32(&guts_regs->pamubypenr);
+ for (i = 0, pamu_counter = 0x80000000; i < pamu_info_data.count;
+ i++, pamu_counter >>= 1) {
+ void __iomem *p;
+
+ p = pamu_info_data.pamu_reg_base + i * PAMU_OFFSET;
+ setup_one_pamu(p, virt_to_phys(ppaact), virt_to_phys(spaact),
+ virt_to_phys(omt));
+ pamubypenr &= ~pamu_counter;
+ }
+ /* Enable all PAMUs */
+ out_be32(&guts_regs->pamubypenr, pamubypenr);
+}
+
+static struct syscore_ops iommu_syscore_ops = {
+ .resume = iommu_resume,
+ .suspend = iommu_suspend,
+};
+
+static void __init init_iommu_pm_ops(void)
+{
+ register_syscore_ops(&iommu_syscore_ops);
+}
+
+#else
+static inline void init_iommu_pm_ops(void) {}
+#endif /* CONFIG_SUSPEND */
+
static __init int fsl_pamu_init(void)
{
struct platform_device *pdev = NULL;
@@ -1499,6 +1586,8 @@ static __init int fsl_pamu_init(void)
goto error_device_add;
}
+ init_iommu_pm_ops();
+
return 0;
error_device_add:
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index 2065720..116f2e4 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -876,7 +876,7 @@ static int fsl_elbc_nand_probe(struct platform_device *pdev)
goto err;
}
- priv->mtd.name = kasprintf(GFP_KERNEL, "%x.flash", (unsigned)res.start);
+ priv->mtd.name = kasprintf(GFP_KERNEL, "%llx.flash", (u64)res.start);
if (!priv->mtd.name) {
ret = -ENOMEM;
goto err;
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
index eb150f1..8639a42 100644
--- a/drivers/mtd/nand/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -1103,7 +1103,7 @@ static int fsl_ifc_nand_probe(struct platform_device *dev)
IFC_NAND_EVTER_INTR_FTOERIR_EN |
IFC_NAND_EVTER_INTR_WPERIR_EN,
&ifc->ifc_nand.nand_evter_intr_en);
- priv->mtd.name = kasprintf(GFP_KERNEL, "%x.flash", (unsigned)res.start);
+ priv->mtd.name = kasprintf(GFP_KERNEL, "%llx.flash", (u64)res.start);
if (!priv->mtd.name) {
ret = -ENOMEM;
goto err;
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 1989c90..1fc20d1 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -197,6 +197,31 @@ config BONDING
To compile this driver as a module, choose M here: the module
will be called bonding.
+config HW_DISTRIBUTION_WITH_OH
+ default n
+ bool "Bonding driver with h/w based Tx traffic distribuition support"
+ depends on BONDING
+ depends on FSL_DPAA_OFFLINE_PORTS
+ ---help---
+ Say 'Y' if you wish to distribute Tx data traffics based on
+ the hardware of Freescale DPAA - Offline Port PCD in your
+ mode Link Aggregation (802.3ad mode)
+
+ In theory, this method can offload CPU when selecting the
+ transmit hash policy to use for slave device selection.
+ if this feature can integrate with CEETM Qos, they two can
+ provide a bundle Qos rather than individual links Qos.
+
+config HW_LAG_DEBUG
+ default n
+ bool "Show detailed debug information"
+ depends on HW_DISTRIBUTION_WITH_OH
+ ---help---
+ This provide a wrapper of pr_info, which can show useful debug
+ information such as fuction name and line number.
+ Say 'Y' if you wish to get more details debugging informations
+ at run-time, otherwise please disable it.
+
config DUMMY
tristate "Dummy net driver support"
---help---
diff --git a/drivers/net/bonding/Makefile b/drivers/net/bonding/Makefile
index 4c21bf6..4226f29 100644
--- a/drivers/net/bonding/Makefile
+++ b/drivers/net/bonding/Makefile
@@ -2,6 +2,13 @@
# Makefile for the Ethernet Bonding driver
#
+include $(srctree)/drivers/net/ethernet/freescale/fman/ncsw_config.mk
+ccflags-y += \
+ -I$(srctree)/drivers/net/ethernet/freescale/dpa \
+ -I$(srctree)/drivers/net/ethernet/freescale/fman/src/wrapper \
+ -I$(srctree)/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd \
+ -I$(srctree)/drivers/net/ethernet/freescale/fman/Peripherals/FM/inc
+
obj-$(CONFIG_BONDING) += bonding.o
bonding-objs := bond_main.o bond_3ad.o bond_alb.o bond_sysfs.o bond_debugfs.o
@@ -9,3 +16,7 @@ bonding-objs := bond_main.o bond_3ad.o bond_alb.o bond_sysfs.o bond_debugfs.o
proc-$(CONFIG_PROC_FS) += bond_procfs.o
bonding-objs += $(proc-y)
+hash_pcd_based_xmit_frames_distribution-$(CONFIG_HW_DISTRIBUTION_WITH_OH) += \
+ hw_distribution.o hw_oh_pcd.o
+
+bonding-objs += $(hash_pcd_based_xmit_frames_distribution-y)
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index b3c2252..51cb153 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -2440,6 +2440,15 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
goto out;
}
+#ifdef CONFIG_HW_DISTRIBUTION_WITH_OH
+ hw_lag_dbg("skb->protocol:0x%0x\n", skb->protocol);
+ /* exclude ARP/LACP */
+ if ((bond->slave_cnt == SLAVES_PER_BOND) &&
+ are_all_slaves_linkup(bond) &&
+ (bond->params.ohp) && (bond->params.ohp->oh_en == 1))
+ return enqueue_pkt_to_oh(bond, skb, NULL);
+#endif
+
slave_agg_no = bond->xmit_hash_policy(skb, slaves_in_agg);
bond_for_each_slave(bond, slave) {
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index dd8057d..5cd2cf1 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1643,6 +1643,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
new_slave->link != BOND_LINK_DOWN ? "n up" : " down");
/* enslave is successful */
+#ifdef CONFIG_HW_DISTRIBUTION_WITH_OH
+ fill_oh_pcd_fqs_with_slave_info(bond, new_slave);
+ apply_pcd(bond, NO_POLICY);
+#endif
return 0;
/* Undo stages on error */
@@ -1891,6 +1895,9 @@ static int __bond_release_one(struct net_device *bond_dev,
slave_dev->priv_flags &= ~IFF_BONDING;
+#ifdef CONFIG_HW_DISTRIBUTION_WITH_OH
+ del_oh_pcd_fqs_with_slave_info(bond, slave);
+#endif
kfree(slave);
return 0; /* deletion OK */
@@ -3256,6 +3263,10 @@ static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
stats->tx_heartbeat_errors += sstats->tx_heartbeat_errors;
stats->tx_window_errors += sstats->tx_window_errors;
}
+
+#ifdef CONFIG_HW_DISTRIBUTION_WITH_OH
+ add_statistics(bond, stats);
+#endif
read_unlock_bh(&bond->lock);
return stats;
@@ -3989,6 +4000,9 @@ static const struct device_type bond_type = {
static void bond_destructor(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
+#ifdef CONFIG_HW_DISTRIBUTION_WITH_OH
+ release_pcd_mem(bond);
+#endif
if (bond->wq)
destroy_workqueue(bond->wq);
free_netdev(bond_dev);
@@ -4548,7 +4562,10 @@ int bond_create(struct net *net, const char *name)
res = register_netdevice(bond_dev);
netif_carrier_off(bond_dev);
-
+#ifdef CONFIG_HW_DISTRIBUTION_WITH_OH
+ if (res == 0)
+ init_status(bond_dev);
+#endif
rtnl_unlock();
if (res < 0)
bond_destructor(bond_dev);
@@ -4620,6 +4637,13 @@ static int __init bonding_init(void)
}
register_netdevice_notifier(&bond_netdev_notifier);
+#ifdef CONFIG_HW_DISTRIBUTION_WITH_OH
+ if (get_oh_info())
+ pr_err("oh ports probe error, use software distribution\n");
+ else
+ pr_info("get offline ports information ok.\n");
+#endif
+
out:
return res;
err:
@@ -4638,6 +4662,10 @@ static void __exit bonding_exit(void)
rtnl_link_unregister(&bond_link_ops);
unregister_pernet_subsys(&bond_net_ops);
+#ifdef CONFIG_HW_DISTRIBUTION_WITH_OH
+ kfree(poh);
+ hw_lag_dbg("released offline port resources\n");
+#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
/*
@@ -4646,8 +4674,13 @@ static void __exit bonding_exit(void)
WARN_ON(atomic_read(&netpoll_block_tx));
#endif
}
-
-module_init(bonding_init);
+/**
+ * late init to wait till oh port initilization ready,
+ * oh port can help distribute outgoing traffics based
+ * on hardware (FSL DPAA Offline port and PCD).
+ * module_init(bonding_init);
+ */
+late_initcall(bonding_init);
module_exit(bonding_exit);
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index b60f95b..9465ae9 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -391,6 +391,9 @@ static ssize_t bonding_store_xmit_hash(struct device *d,
(int)strlen(buf) - 1, buf);
ret = -EINVAL;
} else {
+#ifdef CONFIG_HW_DISTRIBUTION_WITH_OH
+ apply_pcd(bond, new_value);
+#endif
bond->params.xmit_policy = new_value;
bond_set_mode_ops(bond, bond->params.mode);
pr_info("%s: setting xmit hash policy to %s (%d).\n",
@@ -950,6 +953,21 @@ static ssize_t bonding_store_min_links(struct device *d,
}
static DEVICE_ATTR(min_links, S_IRUGO | S_IWUSR,
bonding_show_min_links, bonding_store_min_links);
+#ifdef CONFIG_HW_DISTRIBUTION_WITH_OH
+static DEVICE_ATTR(offline_port_xmit_statistics, S_IRUGO,
+ bonding_show_offline_port_xmit_statistics, NULL);
+
+static DEVICE_ATTR(offline_ports, S_IRUGO,
+ bonding_show_offline_ports, NULL);
+
+static DEVICE_ATTR(oh_needed_for_hw_distribution, S_IRUGO | S_IWUSR,
+ bonding_show_oh_needed_for_hw_distribution,
+ bonding_store_oh_needed_for_hw_distribution);
+
+static DEVICE_ATTR(oh_en, S_IRUGO | S_IWUSR,
+ bonding_show_oh_enable,
+ bonding_store_oh_enable);
+#endif
static ssize_t bonding_show_ad_select(struct device *d,
struct device_attribute *attr,
@@ -1775,6 +1793,12 @@ static struct attribute *per_bond_attrs[] = {
&dev_attr_resend_igmp.attr,
&dev_attr_min_links.attr,
&dev_attr_lp_interval.attr,
+#ifdef CONFIG_HW_DISTRIBUTION_WITH_OH
+ &dev_attr_offline_ports.attr,
+ &dev_attr_oh_needed_for_hw_distribution.attr,
+ &dev_attr_oh_en.attr,
+ &dev_attr_offline_port_xmit_statistics.attr,
+#endif
NULL,
};
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 03cf3fd..de21036 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -25,6 +25,9 @@
#include <linux/etherdevice.h>
#include "bond_3ad.h"
#include "bond_alb.h"
+#ifdef CONFIG_HW_DISTRIBUTION_WITH_OH
+#include "hw_distribution.h"
+#endif
#define DRV_VERSION "3.7.1"
#define DRV_RELDATE "April 27, 2011"
@@ -177,6 +180,10 @@ struct bond_params {
int all_slaves_active;
int resend_igmp;
int lp_interval;
+#ifdef CONFIG_HW_DISTRIBUTION_WITH_OH
+ struct oh_port_priv *ohp;
+ struct rtnl_link_stats64 oh_stats;
+#endif
};
struct bond_parm_tbl {
diff --git a/drivers/net/bonding/hw_distribution.c b/drivers/net/bonding/hw_distribution.c
new file mode 100644
index 0000000..1403659
--- /dev/null
+++ b/drivers/net/bonding/hw_distribution.c
@@ -0,0 +1,2255 @@
+/* Copyright 2014 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <linux/kthread.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/ipv6.h>
+#include <linux/if_vlan.h>
+#include <net/ip.h>
+#include "hw_distribution.h"
+#include "mac.h"
+#define to_dev(obj) container_of(obj, struct device, kobj)
+#define to_bond(cd) ((struct bonding *)(netdev_priv(to_net_dev(cd))))
+#define master_to_bond(net_dev) ((struct bonding *)(netdev_priv(net_dev)))
+/**
+ * This includes L4 checksum errors, but also other errors that
+ * the Hard Parser can detect, such as invalid combinations of
+ * TCP control flags, or bad UDP lengths.
+ */
+#define FM_L4_PARSE_ERROR 0x10
+/* Check if the hardware parser has run */
+#define FM_L4_HXS_RUN 0xE0
+/**
+ * Check if the FMan Hardware Parser has run for L4 protocols.
+ * @parse_result_ptr must be of type (fm_prs_result_t *).
+ */
+#define fm_l4_hxs_has_run(parse_result_ptr) \
+ ((parse_result_ptr)->l4r & FM_L4_HXS_RUN)
+/**
+ * If the FMan Hardware Parser has run for L4 protocols, check
+ * error status.
+ * @parse_result_ptr must be of type fm_prs_result_t *).
+ */
+#define fm_l4_hxs_error(parse_result_ptr) \
+ ((parse_result_ptr)->l4r & FM_L4_PARSE_ERROR)
+
+#define DPA_WRITE_SKB_PTR(skb, skbh, addr, off) \
+ { \
+ skbh = (struct sk_buff **)addr; \
+ *(skbh + (off)) = skb; \
+ }
+
+#define DPA_READ_SKB_PTR(skb, skbh, addr, off) \
+ { \
+ skbh = (struct sk_buff **)addr; \
+ skb = *(skbh + (off)); \
+ }
+
+static const struct of_device_id oh_port_match_table[] = {
+ {
+ .compatible = "fsl,dpa-oh"
+ },
+ {}
+};
+
+struct oh_port_priv *poh; /* Offline port information pointer */
+int available_num_of_oh_ports;
+/**
+ * Sysfs interfaces:
+ * Show the statistics information by offline port xmit.
+ */
+ssize_t bonding_show_offline_port_xmit_statistics(struct device *d,
+ struct device_attribute *attr, char *buf)
+{
+ int res = 0;
+ struct bonding *bond = to_bond(d);
+
+ if (bond->params.mode != BOND_MODE_8023AD) {
+ pr_err("%s: This command only support 802.3ad mode.\n",
+ bond->dev->name);
+ return -EPERM;
+ }
+
+ if (!bond->params.ohp) {
+ pr_err("error, have not bind an offline port\n");
+
+ return -EPERM;
+ }
+
+ if (!bond->params.ohp->oh_en) {
+ pr_err("error, binded offline port is not enabled.\n");
+
+ return -EPERM;
+ }
+
+ res += sprintf(buf + res, "offline port TX packets: %llu\n",
+ bond->params.oh_stats.tx_packets);
+ res += sprintf(buf + res, "offline port TX bytes: %llu\n",
+ bond->params.oh_stats.tx_bytes);
+ res += sprintf(buf + res, "offline port TX errors: %llu\n",
+ bond->params.oh_stats.tx_errors);
+ res += sprintf(buf + res, "offline port TX dropped: %llu\n",
+ bond->params.oh_stats.tx_dropped);
+
+ if (res)
+ buf[res-1] = '\n'; /* eat the leftover space */
+ return res;
+}
+/**
+ * Sysfs interfaces:
+ * Show all available offline ports can be binded to a bond.
+ */
+ssize_t bonding_show_offline_ports(struct device *d,
+ struct device_attribute *attr, char *buf)
+{
+ int i, res = 0;
+ struct bonding *bond = to_bond(d);
+
+ if (bond->params.mode != BOND_MODE_8023AD) {
+ pr_err("%s: This command only support 802.3ad mode.\n",
+ bond->dev->name);
+ return -EPERM;
+ }
+
+ for (i = 0; i < available_num_of_oh_ports; i++) {
+ if (poh[i].oh_dev)
+ res += sprintf(buf + res, "%s\n", poh[i].friendname);
+ }
+ if (res)
+ buf[res-1] = '\n'; /* eat the leftover space */
+ return res;
+}
+/**
+ * Sysfs interfaces:
+ * Show the offline_port has already attached to the current bond,
+ * which can help bond to do hardware based slave selection.
+ */
+ssize_t bonding_show_oh_needed_for_hw_distribution(struct device *d,
+ struct device_attribute *attr, char *buf)
+{
+ int res = 0;
+ struct bonding *bond = to_bond(d);
+
+ if (bond->params.mode != BOND_MODE_8023AD) {
+ pr_err("%s: This command only support 802.3ad mode.\n",
+ bond->dev->name);
+ return -EPERM;
+ }
+
+ if (!bond->params.ohp) {
+ pr_err("error, have not bind an offline port\n");
+
+ return -EPERM;
+ }
+
+ res += sprintf(buf + res, "%s\n", bond->params.ohp->friendname);
+ if (res)
+ buf[res-1] = '\n'; /* eat the leftover space */
+
+ return res;
+}
+/**
+ * System interface:
+ * Add one Offline port into the current bond for utilizing PCD to
+ * do TX traffic distribution based on hard ware.
+ * This codes firt verify the input Offline port name validation,
+ * then store the Offline port to the current bond->params.
+ */
+ssize_t bonding_store_oh_needed_for_hw_distribution(struct device *d,
+ struct device_attribute *attr, const char *buffer, size_t count)
+{
+ char command[OHFRIENDNAMSIZ + 1] = { 0, };
+ int ret = count, i, errno;
+ struct bonding *bond = to_bond(d);
+ struct oh_port_priv *tmp = poh;
+ bool find = false;
+
+ if (bond->params.mode != BOND_MODE_8023AD) {
+ pr_err("%s: This command only support 802.3ad mode.\n",
+ bond->dev->name);
+ return -EPERM;
+ }
+ if (bond->slave_cnt > 0) {
+ pr_err("%s: Detach slaves before change oh binding.\n",
+ bond->dev->name);
+ return -EPERM;
+ }
+
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+ /* OHFRIENDNAMSIZ = 10, there is 10 chars in a command. */
+ errno = sscanf(buffer, "%10s", command);
+ if ((strlen(command) <= 1) || (errno != 1))
+ goto err_no_cmd;
+
+ if ((bond->params.ohp) && (bond->params.ohp->friendname[0]) &&
+ strncasecmp(command, bond->params.ohp->friendname,
+ OHFRIENDNAMSIZ) == 0) {
+ pr_err("%s: has already used %s.\n",
+ bond->dev->name, command);
+ ret = -EPERM;
+ goto out;
+ } else
+ for (i = 0; i < available_num_of_oh_ports; i++) {
+ if (tmp->oh_dev) {
+ if (strncasecmp(command, tmp->friendname,
+ OHFRIENDNAMSIZ) == 0) {
+ find = true;
+ bond->params.ohp = tmp;
+ break;
+ } else
+ tmp++;
+ }
+ }
+
+ if (!find)
+ goto err_no_cmd;
+
+ pr_info("bind OH port oh_needed_for_hw_distribution: %s to %s\n",
+ bond->params.ohp->friendname, bond->dev->name);
+
+ goto out;
+
+err_no_cmd:
+ pr_err("%s:bad command or no such OH port,\n"
+ "please try other OH ports.\n"
+ "Eg: echo OH0 > oh_needed_for_hw_distribution.\n",
+ bond->dev->name);
+ ret = -EPERM;
+
+out:
+ rtnl_unlock();
+ return ret;
+}
+/**
+ * Sysfs interfaces:
+ * Show whether current offline port binding to the bond is active or not.
+ */
+ssize_t bonding_show_oh_enable(struct device *d,
+ struct device_attribute *attr, char *buf)
+{
+
+ int res = 0;
+ struct bonding *bond = to_bond(d);
+ uint16_t channel;
+ unsigned long fman_dcpid, oh_offset, cell_index;
+
+ if (bond->params.mode != BOND_MODE_8023AD) {
+ pr_err("%s: This command only support 802.3ad mode.\n",
+ bond->dev->name);
+ return -EPERM;
+ }
+
+ if (!bond->params.ohp) {
+ pr_err("error, have not bind a offline port\n");
+
+ return -EPERM;
+ }
+
+ res += sprintf(buf + res, "%d\n", bond->params.ohp->oh_en);
+ if (res)
+ buf[res-1] = '\n'; /* eat the leftover space */
+
+ if ((bond->params.ohp->oh_en) &&
+ (!export_oh_port_info_to_ceetm(bond, &channel,
+ &fman_dcpid, &oh_offset, &cell_index)))
+ hw_lag_dbg("offline port channel:%d\n", channel);
+
+ return res;
+}
+/**
+ * Sysfs interfaces:
+ * Set current offline port which is binding to the bond active or not,
+ * this interface can disable or enable the offline port which is binding
+ * to a bond at run-time.
+ */
+ssize_t bonding_store_oh_enable(struct device *d,
+ struct device_attribute *attr, const char *buffer,
+ size_t count)
+{
+ int new_value, ret;
+ struct bonding *bond = to_bond(d);
+
+ if (bond->params.mode != BOND_MODE_8023AD) {
+ pr_err("%s: This command only support 802.3ad mode.\n",
+ bond->dev->name);
+ return -EPERM;
+ }
+
+ ret = sscanf(buffer, "%d", &new_value);
+ pr_info("new_value:%d, ret: %d\n", new_value, ret);
+ if (ret != 1) {
+ pr_err("%s: Bad command, use echo [1|0] > oh_en.\n",
+ bond->dev->name);
+ return -EINVAL;
+ }
+
+ if (!bond->params.ohp) {
+ pr_err("error, have not bind a offline port\n");
+ return -EPERM;
+ }
+
+ if ((new_value == 0) || (new_value == 1)) {
+ bond->params.ohp->oh_en = new_value;
+ return count;
+ } else {
+ pr_err("%s: Bad value, only is 1 or 0.\n",
+ bond->dev->name);
+ return -EINVAL;
+ }
+}
+
+/**
+ * Judge a slave net device is a dpa-eth NIC,
+ * return true if it is a dpa-eth NIC,
+ * otherwise return false.
+ */
+static bool is_dpa_eth_port(struct net_device *netdev)
+{
+ struct device *dev = (struct device *) &(netdev->dev);
+
+ if ((strlen(dev_driver_string(dev->parent)) >= 7) &&
+ strncmp(dev_driver_string(dev->parent), "fsl_dpa", 7) == 0)
+ return true;
+ else
+ return false;
+}
+
+bool are_all_slaves_linkup(struct bonding *bond)
+{
+ struct slave *slave;
+ struct list_head *iter;
+
+ read_lock(&bond->lock);
+ bond_for_each_slave(bond, slave, iter)
+ if (!(SLAVE_IS_OK(slave))) {
+ read_unlock(&bond->lock);
+ return false;
+ }
+
+ read_unlock(&bond->lock);
+ return true;
+}
+
+unsigned int to_which_oh_i_attached(struct oh_port_priv *current_poh)
+{
+ struct oh_port_priv *org = poh;
+ int i = 0;
+ while (current_poh - org) {
+ i++;
+ org++;
+ }
+
+ return i;
+}
+/* Borrowed from dpa_fd_release, removed netdev params. */
+static void __attribute__((nonnull))
+dpa_oh_fd_release(const struct qm_fd *fd)
+{
+ struct qm_sg_entry *sgt;
+ struct dpa_bp *dpa_bp;
+ struct bm_buffer bmb;
+
+ bmb.hi = fd->addr_hi;
+ bmb.lo = fd->addr_lo;
+
+ dpa_bp = dpa_bpid2pool(fd->bpid);
+ DPA_BUG_ON(!dpa_bp);
+
+ if (fd->format == qm_fd_sg) {
+ sgt = (phys_to_virt(bm_buf_addr(&bmb)) + dpa_fd_offset(fd));
+ dpa_release_sgt(sgt);
+ }
+
+ while (bman_release(dpa_bp->pool, &bmb, 1, 0))
+ cpu_relax();
+}
+
+static void dpa_oh_drain_bp(struct dpa_bp *bp)
+{
+ int i, num;
+ struct bm_buffer bmb[8];
+ dma_addr_t addr;
+ int *countptr = __this_cpu_ptr(bp->percpu_count);
+ int count = *countptr;
+ struct sk_buff **skbh;
+
+ while (count >= 8) {
+ num = bman_acquire(bp->pool, bmb, 8, 0);
+ /* There may still be up to 7 buffers in the pool;
+ * just leave them there until more arrive
+ */
+ if (num < 0)
+ break;
+ for (i = 0; i < num; i++) {
+ addr = bm_buf_addr(&bmb[i]);
+ /* bp->free_buf_cb(phys_to_virt(addr)); */
+ skbh = (struct sk_buff **)phys_to_virt(addr);
+ dma_unmap_single(bp->dev, addr, bp->size,
+ DMA_TO_DEVICE);
+ dev_kfree_skb(*skbh);
+ }
+ count -= num;
+ }
+ *countptr = count;
+}
+static int dpa_oh_tx_bp_probe(struct device *dev,
+ struct fm_port *tx_port,
+ struct dpa_bp **draining_tx_bp,
+ struct dpa_buffer_layout_s **tx_buf_layout)
+{
+ struct fm_port_params params;
+ struct dpa_bp *bp = NULL;
+ struct dpa_buffer_layout_s *buf_layout = NULL;
+
+ buf_layout = devm_kzalloc(dev, sizeof(*buf_layout), GFP_KERNEL);
+ if (!buf_layout) {
+ dev_err(dev, "devm_kzalloc() failed\n");
+ return -ENOMEM;
+ }
+
+ buf_layout->priv_data_size = DPA_TX_PRIV_DATA_SIZE;
+ buf_layout->parse_results = true;
+ buf_layout->hash_results = true;
+ buf_layout->time_stamp = false;
+
+ fm_port_get_buff_layout_ext_params(tx_port, &params);
+ buf_layout->manip_extra_space = params.manip_extra_space;
+ buf_layout->data_align = params.data_align ? : DPA_FD_DATA_ALIGNMENT;
+
+ bp = devm_kzalloc(dev, sizeof(*bp), GFP_KERNEL);
+ if (unlikely(bp == NULL)) {
+ dev_err(dev, "devm_kzalloc() failed\n");
+ return -ENOMEM;
+ }
+
+ bp->size = dpa_bp_size(buf_layout);
+ bp->percpu_count = alloc_percpu(*bp->percpu_count);
+ bp->target_count = CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT;
+
+ *draining_tx_bp = bp;
+ *tx_buf_layout = buf_layout;
+
+ return 0;
+}
+static int dpa_oh_bp_create(struct oh_port_priv *ohp)
+{
+ int err = 0;
+ struct dpa_bp *draining_tx_bp;
+ struct dpa_buffer_layout_s *tx_buf_layout;
+
+ err = dpa_oh_tx_bp_probe(ohp->dpa_oh_dev, ohp->oh_config->oh_port,
+ &draining_tx_bp, &tx_buf_layout);
+ if (err) {
+ pr_err("errors on dpa_oh_tx_bp_probe()\n");
+ return err;
+ }
+
+ ohp->tx_bp = draining_tx_bp;
+ ohp->tx_buf_layout = tx_buf_layout;
+
+ err = dpa_bp_alloc(ohp->tx_bp);
+ if (err < 0) {
+ /* _dpa_bp_free(ohp->tx_bp); */
+ pr_err("error on dpa_bp_alloc()\n");
+ ohp->tx_bp = NULL;
+ return err;
+ }
+ hw_lag_dbg("created bp, bpid(ohp->tx_bp):%d\n", ohp->tx_bp->bpid);
+
+ return 0;
+}
+/**
+ * Copied from DPA-Eth driver (since they have different params type):
+ * Cleanup function for outgoing frame descriptors that were built on Tx path,
+ * either contiguous frames or scatter/gather ones.
+ * Skb freeing is not handled here.
+ *
+ * This function may be called on error paths in the Tx function, so guard
+ * against cases when not all fd relevant fields were filled in.
+ *
+ * Return the skb backpointer, since for S/G frames the buffer containing it
+ * gets freed here.
+ */
+struct sk_buff *oh_cleanup_tx_fd(const struct qm_fd *fd)
+{
+ int i, nr_frags;
+ const struct qm_sg_entry *sgt;
+ struct sk_buff **skbh;
+ struct sk_buff *skb = NULL;
+ dma_addr_t addr = qm_fd_addr(fd);
+ struct dpa_bp *dpa_bp = dpa_bpid2pool(fd->bpid);
+ const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
+
+ DPA_BUG_ON(fd->cmd & FM_FD_CMD_FCO);
+ dma_unmap_single(dpa_bp->dev, addr, dpa_bp->size, dma_dir);
+
+ /* retrieve skb back pointer */
+ DPA_READ_SKB_PTR(skb, skbh, phys_to_virt(addr), 0);
+ nr_frags = skb_shinfo(skb)->nr_frags;
+
+ if (fd->format == qm_fd_sg) {
+ /* The sgt buffer has been allocated with netdev_alloc_frag(),
+ * it's from lowmem.
+ */
+ sgt = phys_to_virt(addr + dpa_fd_offset(fd));
+
+ /* sgt[0] is from lowmem, was dma_map_single()-ed */
+ dma_unmap_single(dpa_bp->dev, sgt[0].addr,
+ sgt[0].length, dma_dir);
+
+ /* remaining pages were mapped with dma_map_page() */
+ for (i = 1; i < nr_frags; i++) {
+ DPA_BUG_ON(sgt[i].extension);
+
+ dma_unmap_page(dpa_bp->dev, sgt[i].addr,
+ sgt[i].length, dma_dir);
+ }
+
+ /* Free the page frag that we allocated on Tx */
+ put_page(virt_to_head_page(sgt));
+ }
+
+ return skb;
+}
+
+static void dump_parser_result(const struct qm_fd *fd)
+{
+#ifdef CONFIG_HW_LAG_DEBUG
+ dma_addr_t addr = qm_fd_addr(fd);
+ void *vaddr;
+ const fm_prs_result_t *parse_results;
+
+ vaddr = phys_to_virt(addr);
+ DPA_BUG_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
+
+ parse_results = (const fm_prs_result_t *)(vaddr +
+ DPA_TX_PRIV_DATA_SIZE);
+
+ hw_lag_dbg("parse_results->l2r:0x%08x\n", parse_results->l2r);
+
+ hw_lag_dbg("FM_L3_PARSE_RESULT_IPV4:0x%0x\n"
+ "FM_L3_PARSE_RESULT_IPV6:0x%0x\n"
+ "parse_results->l3r:0x%08x\n",
+ parse_results->l3r & FM_L3_PARSE_RESULT_IPV4,
+ parse_results->l3r & FM_L3_PARSE_RESULT_IPV6,
+ parse_results->l3r);
+
+ hw_lag_dbg("fm_l4_hxs_has_run(parse_results):0x%0x\n"
+ "fm_l4_hxs_error(parse_results):0x%0x\n",
+ fm_l4_hxs_has_run(parse_results),
+ fm_l4_hxs_error(parse_results));
+
+ hw_lag_dbg("fd->status & FM_FD_STAT_L4CV:0x%x\n"
+ "parse_results->l4r:0x%08x\n"
+ "fm_l4_frame_is_tcp(parse_results):0x%0x\n",
+ fd->status & FM_FD_STAT_L4CV,
+ parse_results->l4r,
+ fm_l4_frame_is_tcp(parse_results));
+#endif
+}
+
+static void show_dbg_info(const struct qm_fd *fd, const char *func_name,
+ struct sk_buff *skb)
+{
+#ifdef CONFIG_HW_LAG_DEBUG
+ u32 pad, fd_status;
+ dma_addr_t addr;
+ struct ethhdr *eth;
+ struct iphdr *iph;
+ struct tcphdr *tcph;
+ struct udphdr *udph;
+ unsigned int data_start;
+ unsigned long skb_addr;
+
+ fd_status = fd->status;
+ addr = qm_fd_addr(fd);
+
+ /* find out the pad */
+ skb_addr = virt_to_phys(skb->head);
+ pad = addr - skb_addr;
+
+ /* The skb is currently pointed at head + headroom. The packet
+ * starts at skb->head + pad + fd offset.
+ */
+ data_start = pad + dpa_fd_offset(fd) - skb_headroom(skb);
+
+ skb_pull(skb, data_start);
+
+ pr_info("[%s]:fd->status:0x%08x\n", func_name, fd_status);
+ pr_info("[%s]:fd tx status:0x%08x. fd rx status:0x%08x\n",
+ func_name,
+ fd_status & FM_FD_STAT_TX_ERRORS,
+ fd_status & FM_FD_STAT_RX_ERRORS);
+
+ if (likely(fd_status & FM_FD_STAT_ERR_PHYSICAL))
+ pr_err("FM_FD_STAT_ERR_PHYSICAL\n");
+ if (fd_status & FM_PORT_FRM_ERR_DMA)
+ pr_err("FM_PORT_FRM_ERR_DMA\n");
+ if (fd_status & FM_PORT_FRM_ERR_PHYSICAL)
+ pr_err("FM_PORT_FRM_ERR_PHYSICAL\n");
+ if (fd_status & FM_PORT_FRM_ERR_SIZE)
+ pr_err("FM_PORT_FRM_ERR_SIZE\n");
+ if (fd_status & FM_PORT_FRM_ERR_PRS_HDR_ERR)
+ pr_err("oh_pcd_confq FM_PORT_FRM_ERR_PRS_HDR_ERR\n");
+
+ pr_info("[%s]:fd->format - qm_fd_contig:%d\n", func_name,
+ fd->format - qm_fd_contig);
+ pr_info("[%s]:fd->bpid:%d\n", func_name, fd->bpid);
+
+ /* get L2 info */
+ skb->protocol = htons(ETH_P_802_3);
+ skb_reset_mac_header(skb);
+ skb_pull_inline(skb, ETH_HLEN);
+
+ eth = eth_hdr(skb);
+
+ pr_info("\n[%s]:dmac:%02x:%02x:%02x:%02x:%02x:%02x\n"
+ "smac:%02x:%02x:%02x:%02x:%02x:%02x\n"
+ "h_proto:0x%04x\n", func_name,
+ eth->h_dest[0], eth->h_dest[1], eth->h_dest[2],
+ eth->h_dest[3], eth->h_dest[4], eth->h_dest[5],
+ eth->h_source[0], eth->h_source[1], eth->h_source[2],
+ eth->h_source[3], eth->h_source[4], eth->h_source[5],
+ eth->h_proto);
+
+ if (fd_status & FM_FD_STAT_L4CV) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ pr_info("[%s]:skb->ip_summed = CHECKSUM_UNNECESSARY\n",
+ func_name);
+ } else {
+ skb->ip_summed = CHECKSUM_NONE;
+ pr_info("[%s]:skb->ip_summed = CHECKSUM_NONE\n", func_name);
+ }
+
+ /* get L3 and part of L4 info */
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
+ skb_reset_mac_len(skb);
+
+ if (eth->h_proto == ETH_P_IP) {
+ iph = ip_hdr(skb);
+ pr_info("[%s]:L3_pro:0x%0x, dip:0x%0x, sip:0x%0x\n", func_name,
+ iph->protocol, iph->daddr, iph->saddr);
+
+ skb_pull_inline(skb, ip_hdrlen(skb));
+ skb_reset_transport_header(skb);
+
+ if (iph->protocol == IPPROTO_TCP) {
+ tcph = tcp_hdr(skb);
+ pr_info("[%s]:tcp csum:0x%0x\n",
+ func_name, tcph->check);
+ } else if (iph->protocol == IPPROTO_UDP) {
+ udph = udp_hdr(skb);
+ pr_info("[%s]:udp csum:0x%0x\n",
+ func_name, udph->check);
+ }
+
+ } else if (eth->h_proto == ETH_P_ARP) {
+ pr_info("[%s]:ARP.\n", func_name);
+ } else if (eth->h_proto == ETH_P_IPV6) {
+ pr_info("[%s]:IPv6.\n", func_name);
+ } else if (eth->h_proto == ETH_P_SLOW) {
+ pr_info("[%s]:802.3ad pkt.\n", func_name);
+ } else {
+ pr_info("[%s]:other pkt.\n", func_name);
+ }
+
+ return;
+#endif
+}
+/**
+ * When enqueue an frame from kernel module to offline port,
+ * once errors happeds, this callback will be entered.
+ */
+static enum qman_cb_dqrr_result
+oh_ingress_tx_error_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq)
+{
+ struct sk_buff *skb;
+ const struct qm_fd *fd = &dq->fd;
+
+ skb = oh_cleanup_tx_fd(fd);
+ dump_parser_result(fd);
+ show_dbg_info(fd, __func__, skb);
+ dev_kfree_skb_any(skb);
+
+ return qman_cb_dqrr_consume;
+}
+/**
+ * This subroutine is copied from oNIC, it should not be call
+ * in normal case, only for debugging outgoing traffics to oh
+ * tx port while no PCD applied for oh port. such as debugging
+ * oh port tx L4 csum.
+ */
+static enum qman_cb_dqrr_result __hot
+oh_ingress_tx_default_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq)
+{
+ struct net_device *netdev;
+ struct dpa_priv_s *priv;
+ struct dpa_bp *bp;
+ struct dpa_percpu_priv_s *percpu_priv;
+ struct sk_buff **skbh;
+ struct sk_buff *skb;
+ struct iphdr *iph;
+ const struct qm_fd *fd = &dq->fd;
+ u32 fd_status = fd->status;
+ u32 pad;
+ dma_addr_t addr = qm_fd_addr(fd);
+ unsigned int data_start;
+ unsigned long skb_addr;
+ int *countptr;
+ struct ethhdr *eth;
+
+ hw_lag_dbg("fd->status:0x%08x\n", fd_status);
+
+ hw_lag_dbg("fd tx status:0x%08x. fd rx status:0x%08x\n",
+ fd_status & FM_FD_STAT_TX_ERRORS,
+ fd_status & FM_FD_STAT_RX_ERRORS);
+
+ if (likely(fd_status & FM_FD_STAT_ERR_PHYSICAL))
+ pr_err("FM_FD_STAT_ERR_PHYSICAL\n");
+
+ if (fd_status & FM_PORT_FRM_ERR_DMA)
+ pr_err("FM_PORT_FRM_ERR_DMA\n");
+ if (fd_status & FM_PORT_FRM_ERR_PHYSICAL)
+ pr_err("FM_PORT_FRM_ERR_PHYSICAL\n");
+ if (fd_status & FM_PORT_FRM_ERR_SIZE)
+ pr_err("FM_PORT_FRM_ERR_SIZE\n");
+ if (fd_status & FM_PORT_FRM_ERR_PRS_HDR_ERR)
+ pr_err("oh_tx_defq FM_PORT_FRM_ERR_PRS_HDR_ERR\n");
+
+ netdev = ((struct dpa_fq *)fq)->net_dev;
+ if (!netdev) {
+ pr_err("error netdev == NULL.\n");
+ skbh = (struct sk_buff **)phys_to_virt(addr);
+ dev_kfree_skb(*skbh);
+ return qman_cb_dqrr_consume;
+ }
+ priv = netdev_priv(netdev);
+ dump_parser_result(fd);
+
+ percpu_priv = __this_cpu_ptr(priv->percpu_priv);
+ countptr = __this_cpu_ptr(priv->dpa_bp->percpu_count);
+
+ skbh = (struct sk_buff **)phys_to_virt(addr);
+ /* according to the last common code (bp refill) the skb pointer is set
+ * to another address shifted by sizeof(struct sk_buff) to the left
+ */
+ skb = *(skbh - 1);
+
+ if (unlikely(fd_status & FM_FD_STAT_RX_ERRORS) != 0) {
+ hw_lag_dbg("FD status = 0x%08x\n",
+ fd_status & FM_FD_STAT_RX_ERRORS);
+
+ percpu_priv->stats.rx_errors++;
+ oh_cleanup_tx_fd(fd);
+ goto qman_consume;
+ }
+ if (unlikely(fd->format != qm_fd_contig)) {
+ percpu_priv->stats.rx_dropped++;
+ hw_lag_dbg("Dropping a SG frame\n");
+ oh_cleanup_tx_fd(fd);
+ goto qman_consume;
+ }
+
+ hw_lag_dbg("fd->bpid:%d\n", fd->bpid);
+ bp = dpa_bpid2pool(fd->bpid);
+ dma_unmap_single(bp->dev, addr, bp->size, DMA_TO_DEVICE);
+
+ /* find out the pad */
+ skb_addr = virt_to_phys(skb->head);
+ pad = addr - skb_addr;
+
+ countptr = __this_cpu_ptr(bp->percpu_count);
+ (*countptr)--;
+
+ /* The skb is currently pointed at head + headroom. The packet
+ * starts at skb->head + pad + fd offset.
+ */
+ data_start = pad + dpa_fd_offset(fd) - skb_headroom(skb);
+ skb_pull(skb, data_start);
+
+ /* get L2 info */
+ skb->protocol = eth_type_trans(skb, netdev);
+ eth = eth_hdr(skb);
+
+ hw_lag_dbg("dmac:%02x:%02x:%02x:%02x:%02x:%02x\n"
+ "smac:%02x:%02x:%02x:%02x:%02x:%02x\n"
+ "h_proto:0x%04x\n",
+ eth->h_dest[0], eth->h_dest[1], eth->h_dest[2],
+ eth->h_dest[3], eth->h_dest[4], eth->h_dest[5],
+ eth->h_source[0], eth->h_source[1], eth->h_source[2],
+ eth->h_source[3], eth->h_source[4], eth->h_source[5],
+ eth->h_proto);
+
+ if (unlikely(dpa_check_rx_mtu(skb, netdev->mtu))) {
+ percpu_priv->stats.rx_dropped++;
+ goto qman_consume;
+ }
+
+ if (fd_status & FM_FD_STAT_L4CV) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ hw_lag_dbg("skb->ip_summed = CHECKSUM_UNNECESSARY\n");
+ } else {
+ skb->ip_summed = CHECKSUM_NONE;
+ hw_lag_dbg("skb->ip_summed = CHECKSUM_NONE\n");
+ }
+
+ /* get L3 and part of L4 info */
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
+ skb_reset_mac_len(skb);
+
+ if (eth->h_proto == ETH_P_IP) {
+ iph = ip_hdr(skb);
+ hw_lag_dbg("L3_pro:0x%0x, dip:0x%0x, sip:0x%0x\n",
+ iph->protocol, iph->daddr, iph->saddr);
+ } else if (eth->h_proto == ETH_P_ARP) {
+ hw_lag_dbg("ARP.\n");
+ } else if (eth->h_proto == ETH_P_IPV6) {
+ hw_lag_dbg("IPv6.\n");
+ } else if (eth->h_proto == ETH_P_SLOW) {
+ hw_lag_dbg("802.3ad pkt.\n");
+ } else {
+ hw_lag_dbg("other pkt.\n");
+ }
+
+qman_consume:
+ dev_kfree_skb_any(skb);
+
+ return qman_cb_dqrr_consume;
+}
+/**
+ * When frame leave from PCD fqs then goes final terminated physical
+ * ports(MAC ports),once errors happend, this callback will be entered.
+ * dump debugging information when HW_LAG_DEBUG enabled .
+ */
+static enum qman_cb_dqrr_result
+oh_pcd_err_dqrr(struct qman_portal *portal, struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq)
+{
+ struct sk_buff *skb;
+ const struct qm_fd *fd = &dq->fd;
+
+ skb = oh_cleanup_tx_fd(fd);
+ dump_parser_result(fd);
+ show_dbg_info(fd, __func__, skb);
+ dev_kfree_skb_any(skb);
+
+ return qman_cb_dqrr_consume;
+
+}
+/**
+ * When frame leave from offline port tx fqs then goes into offline tx
+ * ports(MAC ports), it will be into confirm fq, this callback will be
+ * entered.
+ * dump debugging information when HW_LAG_DEBUG enabled.
+ * don't free skb, since offline port is not the final consumer.
+ */
+static enum qman_cb_dqrr_result __hot
+oh_tx_conf_dqrr(struct qman_portal *portal, struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq)
+{
+ struct sk_buff *skb;
+ const struct qm_fd *fd = &dq->fd;
+
+ skb = oh_cleanup_tx_fd(fd);
+ dump_parser_result(fd);
+ show_dbg_info(fd, __func__, skb);
+
+ return qman_cb_dqrr_consume;
+}
+
+static void lag_public_egress_ern(struct qman_portal *portal,
+ struct qman_fq *fq, const struct qm_mr_entry *msg)
+{
+ /* will add ERN statistics in the future version. */
+ const struct qm_fd *fd = &msg->ern.fd;
+ struct sk_buff *skb;
+
+ if (msg->ern.fd.cmd & FM_FD_CMD_FCO) {
+ dpa_oh_fd_release(fd);
+ return;
+ }
+
+ skb = oh_cleanup_tx_fd(fd);
+ dump_parser_result(fd);
+ show_dbg_info(fd, __func__, skb);
+ dev_kfree_skb_any(skb);
+}
+
+/**
+ * This subroutine will be called when frame out of oh pcd fqs and
+ * consumed by (MAC ports).
+ * Display debugging information if HW_LAG_DEBUG on.
+ */
+static enum qman_cb_dqrr_result __hot
+oh_pcd_conf_dqrr(struct qman_portal *portal, struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq)
+{
+ struct sk_buff *skb;
+ const struct qm_fd *fd = &dq->fd;
+
+ skb = oh_cleanup_tx_fd(fd);
+ show_dbg_info(fd, __func__, skb);
+ dev_kfree_skb_any(skb);
+
+ return qman_cb_dqrr_consume;
+}
+
+static const struct qman_fq oh_tx_defq = {
+ .cb = { .dqrr = oh_ingress_tx_default_dqrr}
+};
+/* for OH ports Rx Error queues = Tx Error queues */
+static const struct qman_fq oh_tx_errq = {
+ .cb = { .dqrr = oh_ingress_tx_error_dqrr}
+};
+
+static const struct qman_fq oh_pcd_confq = {
+ .cb = { .dqrr = oh_pcd_conf_dqrr}
+};
+static const struct qman_fq oh_pcd_errq = {
+ .cb = { .dqrr = oh_pcd_err_dqrr}
+};
+static const struct qman_fq oh_tx_confq = {
+ .cb = { .dqrr = oh_tx_conf_dqrr}
+};
+static const struct qman_fq oh_pcd_egress_ernq = {
+ .cb = { .ern = lag_public_egress_ern}
+};
+static const struct qman_fq oh_egress_ernq = {
+ .cb = { .ern = lag_public_egress_ern}
+};
+
+static int oh_add_channel(void *__arg)
+{
+ int cpu;
+ struct qman_portal *portal;
+ const cpumask_t *cpus = qman_affine_cpus();
+ u32 pool = QM_SDQCR_CHANNELS_POOL_CONV((u32)(unsigned long)__arg);
+
+ for_each_cpu(cpu, cpus) {
+ portal = (struct qman_portal *)qman_get_affine_portal(cpu);
+ qman_p_static_dequeue_add(portal, pool);
+ }
+
+ return 0;
+}
+
+static int init_oh_errq_defq(struct device *dev,
+ uint32_t fqid_err, uint32_t fqid_def,
+ struct dpa_fq **oh_errq, struct dpa_fq **oh_defq,
+ uint16_t *priv_channel)
+{
+ int errno;
+ struct dpa_fq *errq, *defq;
+ /* These two vaules come from DPA-Eth driver */
+ uint8_t wq_errq = 2, wq_defq = 1;
+ u32 channel;
+ struct qm_mcc_initfq initfq;
+ struct qm_fqd fqd;
+ struct task_struct *kth;
+
+ /* Get a channel */
+ errno = qman_alloc_pool(&channel);
+ if (errno) {
+ pr_err("error on getting pool channel.\n");
+ return errno;
+ }
+
+ if (channel < 0) {
+ errno = channel;
+ pr_err("error on dpa_get_channel().\n");
+ return errno;
+ }
+
+ /* Start a thread that will walk the cpus with affine portals
+ * and add this pool channel to each's dequeue mask.
+ */
+
+ kth = kthread_run(oh_add_channel, (void *)(unsigned long)channel,
+ "oh_add_channel");
+ if (!kth) {
+ pr_warn("run kthread faild...\n");
+ return -ENOMEM;
+ }
+
+ /* Allocate memories for Tx ErrQ and Tx DefQ of oh port */
+ errq = devm_kzalloc(dev, sizeof(struct dpa_fq), GFP_KERNEL);
+ if (!errq) {
+ pr_err("devm_kzalloc() for OH errq failed\n");
+ return -ENOMEM;
+ }
+ defq = devm_kzalloc(dev, sizeof(struct dpa_fq), GFP_KERNEL);
+ if (!defq) {
+ pr_err("devm_kzalloc() for OH defq failed.\n");
+ return -ENOMEM;
+ }
+
+ /* Set Tx ErrQ callbacks of oh port */
+ errq->fq_base = oh_tx_errq;
+
+ /* Set the flags of the oh port Tx ErrQ/Tx DefQ and create the FQs */
+ errq->fq_base.flags = QMAN_FQ_FLAG_NO_ENQUEUE;
+ errno = qman_create_fq(fqid_err, errq->fq_base.flags, &errq->fq_base);
+ if (errno) {
+ pr_err("error on create OH errq.\n");
+ return errno;
+ }
+
+ defq->fq_base = oh_tx_defq;
+ defq->fq_base.flags = QMAN_FQ_FLAG_NO_ENQUEUE;
+ errno = qman_create_fq(fqid_def, defq->fq_base.flags, &defq->fq_base);
+ if (errno) {
+ pr_err("error on create OH defq.\n");
+ return errno;
+ }
+
+ *priv_channel = (uint16_t)channel;
+ /* Set the FQs init options then init the FQs */
+ initfq.we_mask = QM_INITFQ_WE_DESTWQ;
+ initfq.fqd.dest.channel = (uint16_t)channel;
+ initfq.fqd.dest.wq = wq_errq;
+ initfq.we_mask |= QM_INITFQ_WE_FQCTRL;
+ initfq.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE;
+ initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
+ initfq.fqd.fq_ctrl |= QM_FQCTRL_CTXASTASHING | QM_FQCTRL_AVOIDBLOCK;
+ initfq.fqd.context_a.stashing.exclusive = QM_STASHING_EXCL_DATA |
+ QM_STASHING_EXCL_CTX | QM_STASHING_EXCL_ANNOTATION;
+ initfq.fqd.context_a.stashing.data_cl = 2;
+ initfq.fqd.context_a.stashing.annotation_cl = 1;
+ initfq.fqd.context_a.stashing.context_cl =
+ DIV_ROUND_UP(sizeof(struct qman_fq), 64);
+
+ /* init oh ports errors fq */
+ errno = qman_init_fq(&errq->fq_base, QMAN_INITFQ_FLAG_SCHED, &initfq);
+ if (errno < 0) {
+ pr_err("error on qman_init_fq %u = %d\n", fqid_err, errno);
+ qman_destroy_fq(&errq->fq_base, 0);
+ devm_kfree(dev, errq);
+ return errno;
+ }
+
+ errno = qman_query_fq(&errq->fq_base, &fqd);
+ hw_lag_dbg("errno of qman_query_fq:0x%08x\n", errno);
+ if (fqd.fq_ctrl != initfq.fqd.fq_ctrl) {
+ pr_err("queried fq_ctrl=%x, should be=%x\n", fqd.fq_ctrl,
+ initfq.fqd.fq_ctrl);
+ panic("fail");
+ }
+ if (memcmp(&fqd.td, &initfq.fqd.td, sizeof(fqd.td))) {
+ pr_err("queried td_thresh=%x:%x, should be=%x:%x\n",
+ fqd.td.exp, fqd.td.mant,
+ initfq.fqd.td.exp, initfq.fqd.td.mant);
+ panic("fail");
+ }
+
+ /* init oh ports default fq */
+ initfq.fqd.dest.wq = wq_defq;
+ errno = qman_init_fq(&defq->fq_base, QMAN_INITFQ_FLAG_SCHED, &initfq);
+ if (errno < 0) {
+ pr_err("error on qman_init_fq %u = %d\n", fqid_def, errno);
+ qman_destroy_fq(&defq->fq_base, 0);
+ devm_kfree(dev, defq);
+ return errno;
+ }
+
+ *oh_errq = errq;
+ *oh_defq = defq;
+
+ hw_lag_dbg("oh port defq and oh port errq initialize OK\n");
+
+ return BOND_OH_SUCCESS;
+}
+/**
+ * Initialize pcd err fqs and pcd confirmation fqs.
+ * HW LAG uses this method rather than reuse DPA-Eth private rx err/
+ * rx def/tx err/tx confirm FQs and callbacks, since HW LAG uses
+ * different data structure from DPA-Eth private driver.
+ */
+static int init_oh_pcderrq_pcdconfq(struct device *dev,
+ uint32_t *fqid_pcderr, uint32_t *fqid_pcdconf,
+ struct dpa_fq **oh_pcderrq, struct dpa_fq **oh_pcdconfq,
+ uint16_t priv_channel)
+{
+ int errno;
+ struct dpa_fq *pcderrq, *pcdconfq;
+ /* These two vaules come from DPA-Eth driver */
+ uint8_t wq_errq = 2, wq_confq = 1;
+ struct qm_mcc_initfq initfq;
+
+ /* Allocate memories for PCD ErrQ and PCD confirm Q of oh port */
+ pcderrq = devm_kzalloc(dev, sizeof(struct dpa_fq), GFP_KERNEL);
+ if (!pcderrq) {
+ pr_err("devm_kzalloc() for OH pcderrq failed\n");
+ return -ENOMEM;
+ }
+
+ pcdconfq = devm_kzalloc(dev, sizeof(struct dpa_fq), GFP_KERNEL);
+ if (!pcdconfq) {
+ pr_err("devm_kzalloc() for OH pcdconfq failed.\n");
+ return -ENOMEM;
+ }
+
+ /* Set PCD ErrQ callbacks of oh port */
+ pcderrq->fq_base = oh_pcd_errq;
+
+ /* Set the flags of the oh port PCD ErrQ, create the FQs */
+ pcderrq->fq_base.flags = QMAN_FQ_FLAG_NO_ENQUEUE |
+ QMAN_FQ_FLAG_DYNAMIC_FQID;
+ errno = qman_create_fq(0, pcderrq->fq_base.flags, &pcderrq->fq_base);
+ if (errno) {
+ pr_err("error on create OH pcderrq.\n");
+ return errno;
+ }
+ *fqid_pcderr = pcderrq->fq_base.fqid;
+ hw_lag_dbg("*fqid_pcderr:%d\n", *fqid_pcderr);
+
+ /* Set PCD confirm Q callbacks of oh port */
+ pcdconfq->fq_base = oh_pcd_confq;
+ /* Set the flags of the oh port PCD confQ, create the FQs */
+ pcdconfq->fq_base.flags = QMAN_FQ_FLAG_NO_ENQUEUE|
+ QMAN_FQ_FLAG_DYNAMIC_FQID;
+ errno = qman_create_fq(0, pcdconfq->fq_base.flags, &pcdconfq->fq_base);
+ if (errno) {
+ pr_err("error on create OH pcdconfq.\n");
+ return errno;
+ }
+ *fqid_pcdconf = pcdconfq->fq_base.fqid;
+ hw_lag_dbg("*fqid_pcdconf:%d\n", *fqid_pcdconf);
+
+ /* Set the FQs init options then init the FQs */
+ initfq.we_mask = QM_INITFQ_WE_DESTWQ;
+ initfq.fqd.dest.channel = priv_channel;
+ initfq.fqd.dest.wq = wq_errq;
+ initfq.we_mask |= QM_INITFQ_WE_FQCTRL;
+ initfq.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE;
+ initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
+ initfq.fqd.fq_ctrl |= QM_FQCTRL_CTXASTASHING | QM_FQCTRL_AVOIDBLOCK;
+ initfq.fqd.context_a.stashing.exclusive = QM_STASHING_EXCL_DATA |
+ QM_STASHING_EXCL_CTX | QM_STASHING_EXCL_ANNOTATION;
+ initfq.fqd.context_a.stashing.data_cl = 2;
+ initfq.fqd.context_a.stashing.annotation_cl = 1;
+ initfq.fqd.context_a.stashing.context_cl =
+ DIV_ROUND_UP(sizeof(struct qman_fq), 64);
+
+ /* init pcd errors fq */
+ errno = qman_init_fq(&pcderrq->fq_base,
+ QMAN_INITFQ_FLAG_SCHED, &initfq);
+ if (errno < 0) {
+ pr_err("error on qman_init_fq pcderrq:%u = %d\n",
+ *fqid_pcderr, errno);
+ qman_destroy_fq(&pcderrq->fq_base, 0);
+ devm_kfree(dev, pcderrq);
+
+ return errno;
+ }
+
+ /* init pcd confirm fq */
+ initfq.fqd.dest.wq = wq_confq;
+ errno = qman_init_fq(&pcdconfq->fq_base,
+ QMAN_INITFQ_FLAG_SCHED, &initfq);
+ if (errno < 0) {
+ pr_err("error on qman_init_fq pcdconfq:%u = %d\n",
+ *fqid_pcdconf, errno);
+ qman_destroy_fq(&pcdconfq->fq_base, 0);
+ devm_kfree(dev, pcdconfq);
+
+ return errno;
+ }
+
+ *oh_pcderrq = pcderrq;
+ *oh_pcdconfq = pcdconfq;
+
+ hw_lag_dbg("oh pcd confq and pcd errq initialize OK\n");
+
+ return BOND_OH_SUCCESS;
+}
+/**
+ * Initialize confirmation fq for offline port tx fqs.
+ * This confirmation call back is enabled in case of buffer is released
+ * by BM after frame entered into tx port of offline port.
+ */
+static int init_oh_txconfq(struct device *dev, uint32_t *fqid_ohtxconf,
+ struct dpa_fq **oh_txconfq, uint16_t priv_channel)
+{
+ int errno;
+ struct dpa_fq *txconfq;
+ /* This vaule comes from DPA-Eth driver */
+ uint8_t wq_confq = 1;
+ struct qm_mcc_initfq initfq;
+
+ /* Allocate memories for PCD ErrQ and PCD confirm Q of oh port */
+ txconfq = devm_kzalloc(dev, sizeof(struct dpa_fq), GFP_KERNEL);
+ if (!txconfq) {
+ pr_err("devm_kzalloc() for OH tx confq failed.\n");
+ return -ENOMEM;
+ }
+
+ /* Set tx confirm Q callbacks of oh port */
+ txconfq->fq_base = oh_tx_confq;
+ /* Set the flags of the oh port PCD confQ, create the FQs */
+ txconfq->fq_base.flags = QMAN_FQ_FLAG_NO_ENQUEUE |
+ QMAN_FQ_FLAG_DYNAMIC_FQID;
+ errno = qman_create_fq(0, txconfq->fq_base.flags, &txconfq->fq_base);
+ if (errno) {
+ pr_err("error on create OH tx confq.\n");
+ return errno;
+ }
+ *fqid_ohtxconf = txconfq->fq_base.fqid;
+ hw_lag_dbg("dynamic *fqid_ohtxconf:%d\n", *fqid_ohtxconf);
+
+ /* Set the FQs init options then init the FQs */
+ initfq.we_mask = QM_INITFQ_WE_DESTWQ;
+ initfq.fqd.dest.channel = priv_channel;
+ initfq.fqd.dest.wq = wq_confq;
+ initfq.we_mask |= QM_INITFQ_WE_FQCTRL;
+ initfq.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE;
+ initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
+ initfq.fqd.fq_ctrl |= QM_FQCTRL_CTXASTASHING | QM_FQCTRL_AVOIDBLOCK;
+ initfq.fqd.context_a.stashing.exclusive = QM_STASHING_EXCL_DATA |
+ QM_STASHING_EXCL_CTX | QM_STASHING_EXCL_ANNOTATION;
+ initfq.fqd.context_a.stashing.data_cl = 2;
+ initfq.fqd.context_a.stashing.annotation_cl = 1;
+ initfq.fqd.context_a.stashing.context_cl =
+ DIV_ROUND_UP(sizeof(struct qman_fq), 64);
+
+ /* init oh tx confirm fq */
+ initfq.fqd.dest.wq = wq_confq;
+ errno = qman_init_fq(&txconfq->fq_base,
+ QMAN_INITFQ_FLAG_SCHED, &initfq);
+ if (errno < 0) {
+ pr_err("error on qman_init_fq oh tx confq:%u = %d\n",
+ *fqid_ohtxconf, errno);
+ qman_destroy_fq(&txconfq->fq_base, 0);
+ devm_kfree(dev, txconfq);
+
+ return errno;
+ }
+
+ *oh_txconfq = txconfq;
+
+ hw_lag_dbg("oh tx confq initialize OK\n");
+
+ return BOND_OH_SUCCESS;
+}
+/**
+ * Initialize dynamic particular tx fqs of offline port for LAG xmit,
+ * does not reuse tx fqs initialized by offline port driver. This method
+ * can avoid to modify offline port driver even if the confirmation fq
+ * need to be enabled.
+ */
+static int init_oh_tx_lag_fqs(struct device *dev,
+ struct dpa_fq **oh_tx_lag_fqs, uint32_t fqid_ohtxconf,
+ uint16_t oh_tx_channel)
+{
+ int errno = BOND_OH_SUCCESS, i, tx_fqs_count;
+ uint16_t wq_id;
+ struct dpa_fq *lag_fqs;
+ struct qm_mcc_initfq fq_opts;
+ uint32_t create_flags, init_flags;
+
+ tx_fqs_count = num_possible_cpus();
+ /* Allocate particular tx queues of offline port for LAG xmit. */
+ lag_fqs = devm_kzalloc(dev, sizeof(struct dpa_fq) * tx_fqs_count,
+ GFP_KERNEL);
+ if (!lag_fqs) {
+ pr_err("Can't allocate tx fqs for LAG xmit.\n");
+ errno = -ENOMEM;
+ goto return_kfree;
+ }
+
+ /* Set flags for particular tx fqs, especially for dynamic fqid. */
+ create_flags = QMAN_FQ_FLAG_TO_DCPORTAL | QMAN_FQ_FLAG_DYNAMIC_FQID;
+
+ /* Create particular tx fqs of offline port for LAG xmit */
+ for (i = 0; i < tx_fqs_count; i++) {
+ /* set egress_ern callback for offline port tx fq */
+ lag_fqs[i].fq_base = oh_egress_ernq;
+ errno = qman_create_fq(0, create_flags, &lag_fqs[i].fq_base);
+ if (errno) {
+ pr_err("Error on creating tx fqs for LAG xmit.\n");
+ goto return_kfree;
+ }
+ }
+
+ /* Set init flags for tx fqs of oh port */
+ init_flags = QMAN_INITFQ_FLAG_SCHED;
+
+ /* Set fq init options. Specify destination wq id and channel */
+ memset(&fq_opts, 0, sizeof(fq_opts));
+ fq_opts.we_mask = QM_INITFQ_WE_DESTWQ;
+ /* wq info from DPA-Eth driver */
+ wq_id = 3;
+ fq_opts.fqd.dest.wq = wq_id;
+ fq_opts.fqd.dest.channel = oh_tx_channel;
+
+ fq_opts.we_mask |= QM_INITFQ_WE_FQCTRL;
+ fq_opts.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE;
+ fq_opts.fqd.fq_ctrl |= QM_FQCTRL_CTXASTASHING | QM_FQCTRL_AVOIDBLOCK;
+ fq_opts.fqd.context_a.stashing.exclusive = QM_STASHING_EXCL_DATA |
+ QM_STASHING_EXCL_CTX | QM_STASHING_EXCL_ANNOTATION;
+ fq_opts.fqd.context_a.stashing.data_cl = 2;
+ fq_opts.fqd.context_a.stashing.annotation_cl = 1;
+ fq_opts.fqd.context_a.stashing.context_cl =
+ DIV_ROUND_UP(sizeof(struct qman_fq), 64);
+
+#ifdef CONFIG_HW_LAG_DEBUG
+ fq_opts.we_mask |= QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
+ /**
+ * CTXA[OVFQ] = 1
+ * we set particular tx own confirmation fq and their own callback
+ * in case of interrupt DPA-Eth private conf callback/err callback
+ * /def callback.
+ */
+
+ fq_opts.fqd.context_a.hi = 0x80000000;
+ fq_opts.fqd.context_a.lo = 0x0;
+ fq_opts.fqd.context_b = fqid_ohtxconf;
+#endif
+ /* Initialize particular tx frame queue of offline port for LAG xmit */
+ for (i = 0; i < tx_fqs_count; i++) {
+ errno = qman_init_fq(&lag_fqs[i].fq_base, init_flags, &fq_opts);
+ if (errno)
+ goto init_error;
+ }
+
+ for (i = 0; i < tx_fqs_count; i++) {
+ hw_lag_dbg("ok, created lag_fqs: fqid:%d\n",
+ lag_fqs[i].fq_base.fqid);
+ }
+
+ *oh_tx_lag_fqs = lag_fqs;
+
+ return BOND_OH_SUCCESS;
+init_error:
+ while (i-- < 0) {
+ hw_lag_dbg("errors on initializing tx fqs, No.:%d tx fq.\n", i);
+ qman_destroy_fq(&lag_fqs[i].fq_base, 0);
+ }
+
+return_kfree:
+ if (lag_fqs)
+ devm_kfree(dev, lag_fqs);
+
+ return errno;
+}
+/**
+ * This subroutine has been copied from offline_port driver
+ * to get all information of all offline ports by parse DTS
+ * return BOND_OH_SUCCESS when get information successfully.
+ */
+int get_oh_info(void)
+{
+ struct platform_device *oh_of_dev, *of_dev;
+ struct device *dpa_oh_dev, *oh_dev;
+ struct device_node *dpa_oh_node = NULL, *oh_node;
+ int fqcount, lenp, errno = BOND_OH_SUCCESS, i = 0;
+ const phandle *p_oh_port_handle;
+ const unsigned int *p_port_id;
+ const unsigned int *p_channel_id;
+ struct fm_port *oh_port;
+ unsigned long port_handle_cnt;
+ struct fm_port_params params;
+
+ available_num_of_oh_ports = 0;
+
+ /* probe offline ports and alloc memory, these codes need refining
+ * to save memory and need to get rid of the global variable.
+ */
+ poh = kzalloc(sizeof(struct oh_port_priv) * FM_MAX_NUM_OF_OH_PORTS,
+ GFP_KERNEL);
+ if (!poh)
+ return -ENOMEM;
+
+ for_each_matching_node(dpa_oh_node, oh_port_match_table) {
+ if (dpa_oh_node) {
+ p_oh_port_handle = of_get_property(dpa_oh_node,
+ "fsl,fman-oh-port", &lenp);
+ if (!p_oh_port_handle) {
+ pr_err("No OH port handle in node %s\n",
+ dpa_oh_node->full_name);
+ return -EINVAL;
+ }
+ hw_lag_dbg("dpa_oh_node->name:%s\n",
+ dpa_oh_node->full_name);
+ BUG_ON(lenp % sizeof(*p_oh_port_handle));
+ if (lenp != sizeof(*p_oh_port_handle)) {
+ port_handle_cnt =
+ lenp / sizeof(*p_oh_port_handle);
+
+ pr_err("Found %lu oh port in node %s\n"
+ "only 1 phandle is allowed.\n",
+ port_handle_cnt,
+ dpa_oh_node->full_name);
+ return -EINVAL;
+ }
+
+ oh_node = of_find_node_by_phandle(*p_oh_port_handle);
+ if (!oh_node) {
+ pr_err("no oh node referenced from %s\n",
+ dpa_oh_node->full_name);
+ return -EINVAL;
+ }
+ hw_lag_dbg("Found oh_node->full_name %s.\n",
+ oh_node->full_name);
+ p_port_id = of_get_property(oh_node,
+ "cell-index", &lenp);
+
+ if (!p_port_id) {
+ pr_err("No port id found in node %s\n",
+ dpa_oh_node->full_name);
+ return -EINVAL;
+ }
+
+ hw_lag_dbg("Found port id %ud, in node %s\n",
+ *p_port_id, dpa_oh_node->full_name);
+ BUG_ON(lenp % sizeof(*p_port_id));
+
+ /* Read channel id for the queues */
+ p_channel_id =
+ of_get_property(oh_node,
+ "fsl,qman-channel-id", &lenp);
+ if (!p_channel_id) {
+ pr_err("No channel id found in node %s\n",
+ dpa_oh_node->full_name);
+ return -EINVAL;
+ }
+
+ BUG_ON(lenp % sizeof(*p_channel_id));
+
+ oh_of_dev = of_find_device_by_node(oh_node);
+ BUG_ON(!oh_of_dev);
+ oh_dev = &oh_of_dev->dev;
+ of_dev = of_find_device_by_node(dpa_oh_node);
+ BUG_ON(!oh_of_dev);
+ dpa_oh_dev = &of_dev->dev;
+ poh[i].of_dev = of_dev;
+ poh[i].oh_of_dev = oh_of_dev;
+ poh[i].dpa_oh_dev = dpa_oh_dev;
+ poh[i].oh_dev = oh_dev;
+ poh[i].dpa_oh_node = dpa_oh_node;
+ poh[i].oh_node = oh_node;
+ poh[i].cell_index = *p_port_id;
+ poh[i].oh_config = dev_get_drvdata(dpa_oh_dev);
+ poh[i].p_oh_port_handle = p_oh_port_handle;
+ poh[i].oh_channel_id = *p_channel_id;
+ oh_port = poh[i].oh_config->oh_port;
+ fm_port_get_buff_layout_ext_params(oh_port, &params);
+ poh[i].bpid = params.pool_param[0].id;
+ poh[i].bp_size = params.pool_param[0].size;
+ /* give a friend name like "fman0-oh@1"
+ * rather than "/fsl,dpaa/dpa-fman0-oh@1".
+ * fill friendname array with dpa_oh_node->full_name,
+ * please don't use oh0 since documentatin says oh0
+ * has bad performance.
+ */
+ memcpy(poh[i].friendname,
+ dpa_oh_node->full_name + 14, 10);
+
+ fqcount = roundup_pow_of_two(FM_MAX_NUM_OF_MACS);
+ if (qman_alloc_fqid_range(&poh[i].pcd_fqids_base,
+ fqcount, fqcount, 0) != fqcount) {
+ pr_err("error on alloc continuous pcd fqid\n");
+ return -EINVAL;
+ }
+
+ errno = init_oh_errq_defq(poh[i].dpa_oh_dev,
+ poh[i].oh_config->error_fqid,
+ poh[i].oh_config->default_fqid,
+ &poh[i].oh_errq,
+ &poh[i].oh_defq,
+ &poh[i].p_oh_rcv_channel);
+ if (errno != BOND_OH_SUCCESS) {
+ pr_err("error when probe errq or defq.\n");
+ return errno;
+ }
+
+ errno = init_oh_pcderrq_pcdconfq(poh[i].dpa_oh_dev,
+ &poh[i].fqid_pcderr,
+ &poh[i].fqid_pcdconf,
+ &poh[i].oh_pcderrq,
+ &poh[i].oh_pcdconfq,
+ poh[i].p_oh_rcv_channel);
+ if (errno != BOND_OH_SUCCESS) {
+ pr_err("error on probe pcderrq or pcdconfq\n");
+ return errno;
+ }
+
+ errno = init_oh_txconfq(poh[i].dpa_oh_dev,
+ &poh[i].fqid_ohtxconf,
+ &poh[i].oh_txconfq,
+ poh[i].oh_channel_id);
+ if (errno != BOND_OH_SUCCESS) {
+ pr_err("error on init offline port tx confq\n");
+ return errno;
+ }
+
+ errno = init_oh_tx_lag_fqs(poh[i].dpa_oh_dev,
+ &poh[i].oh_tx_lag_fqs,
+ poh[i].fqid_ohtxconf,
+ poh[i].oh_channel_id);
+ if (errno != BOND_OH_SUCCESS) {
+ pr_err("error on init offline port tx confq\n");
+ return errno;
+ }
+
+ errno = dpa_oh_bp_create(&poh[i]);
+ if (errno != BOND_OH_SUCCESS) {
+ pr_err("error on init offline tx bp.\n");
+ return errno;
+ }
+
+ available_num_of_oh_ports = ++i;
+ }
+ }
+
+ return errno;
+}
+/**
+ * Get the FM_MAC_RES_ID from a dpa-eth NIC, return 0 if it is not a dpa-eth,
+ * otherwise return FM_MAC_RES_ID
+ * this function does not process macless, LAG does not need a macless IF.
+ */
+static unsigned long long get_fm_mac_res_id(struct net_device *netdev)
+{
+ struct dpa_priv_s *priv = netdev_priv(netdev);
+ if (!is_dpa_eth_port(netdev))
+ return 0;
+
+ return (unsigned long long)priv->mac_dev->res->start;
+}
+/**
+ * Get the DCP_ID from a dpa-eth NIC, return 0 if it is not a dpa-eth,
+ * return 1 if it's fm0, return 2 if it's fm1, since there are only 2
+ * FMAN in current DPA SOC.
+ * this function does not process macless, LAG does not need a macless IF.
+ */
+int get_dcp_id_from_dpa_eth_port(struct net_device *netdev)
+{
+ unsigned long long mac_res_start = get_fm_mac_res_id(netdev);
+
+ if ((mac_res_start >= FM1_GB0) && (mac_res_start <= FM1_10G))
+ return 1;
+ else if ((mac_res_start >= FM2_GB0) && (mac_res_start <= FM2_10G))
+ return 2;
+ else
+ return 0;
+}
+/**
+ * Get all information of the offline port which is being used
+ * by a bundle, such as fman_dcpid, offline port offset, cell index,
+ * offline port channel. This API is required by CEETM Qos.
+ * Regarding fman dpcid, till sdk1.6, there is one fman in p1023, the
+ * offset is 0x1000000, for other dpaa socs, the offset of fman0 is
+ * 0x400000, the offset of fman1 is 0x500000, hence for current socs,
+ * the offset of fman0 <=0x4000000, 0x400000 < fman1 <=0x500000.
+ * return BOND_OH_SUCCESS when got all information, otherwise return
+ * Non-Zero.
+ */
+#define FMAN0_MAX_OFFSET 0x400000
+#define FMAN1_MAX_OFFSET 0x500000
+int export_oh_port_info_to_ceetm(struct bonding *bond, uint16_t *channel,
+ unsigned long *fman_dcpid, unsigned long *oh_offset,
+ unsigned long *cell_index)
+{
+ /**
+ * split str: "/soc@ffe000000/fman@400000/port@84000", then get
+ * the fman@ part and port@ part from them. regex is good enough
+ * as below:
+ * ret = sscanf((char *) p, "%*[^@]@%*[^@]@%[^/]/port@%s", s1, s2);
+ * but the kernel version does not support the method.
+ */
+ int errno;
+ char s1[16] = {0}, s2[16] = {0};
+ char *p, *p1;
+
+ if (!bond->params.ohp) {
+ pr_err("The bundle has not binded an offline port.\n");
+ return 1;
+ }
+
+ if (!bond->params.ohp->oh_en) {
+ pr_err("The offline is disabled, to enable it, use sysfs.\n");
+ return 2;
+ }
+
+ if (!bond->params.ohp->oh_node) {
+ pr_err("The offline node error.\n");
+ return 3;
+ }
+
+ p = strstr(bond->params.ohp->oh_node->full_name, "fman@");
+ p += strlen("fman@");
+ p1 = strstr(p, "/port@");
+
+ memcpy(s1, p, p1 - p);
+
+ p = strstr(p, "/port@");
+ p += strlen("/port@");
+
+ errno = sscanf((const char *) p, "%s", s2);
+ if (errno != 1) {
+ pr_err("parser error while process offline port node\n");
+ return 4;
+ }
+
+ errno = kstrtoul(s1, 16, fman_dcpid) | kstrtoul(s2, 16, oh_offset);
+ if (errno) {
+ pr_err("error on kstrtoul fman_dcpid, of_offset\n");
+ return 5;
+ }
+ if (*fman_dcpid <= FMAN0_MAX_OFFSET) {
+ *fman_dcpid = 0;
+ } else if ((*fman_dcpid > FMAN0_MAX_OFFSET) &&
+ (*fman_dcpid <= FMAN1_MAX_OFFSET)) {
+ *fman_dcpid = 1;
+ } else {
+ pr_err("error on calculating fman dcpid, new soc appears.\n");
+ return 6;
+ }
+
+ *channel = bond->params.ohp->oh_channel_id;
+ *cell_index = bond->params.ohp->cell_index;
+
+ hw_lag_dbg("This oh port mapped to bond has channel:0x%0x\n", *channel);
+ hw_lag_dbg("fman_dcpid:0x%0lx, oh_offset:0x%0lx, cell-index:%0lx\n",
+ *fman_dcpid, *oh_offset, *cell_index);
+
+ return BOND_OH_SUCCESS;
+}
+EXPORT_SYMBOL(export_oh_port_info_to_ceetm);
+/**
+ * Public APIs which can use for Link Aggregation and CEETM Qos
+ * show bond info and slave device info when they are available
+ */
+int show_dpa_slave_info(struct bonding *bond, struct slave *slave)
+{
+ struct dpa_priv_s *priv = netdev_priv(slave->dev);
+ if (bond)
+ pr_info("bond->dev->name:%s, slave_cnt:%d\n",
+ bond->dev->name, bond->slave_cnt);
+ if (slave)
+ pr_info("new_slave:%s\n", slave->dev->name);
+
+ if (is_dpa_eth_port(slave->dev)) {
+ pr_info("priv->mac_dev->res->start:%llx\n",
+ (unsigned long long)priv->mac_dev->res->start);
+ pr_info("get_dcp_id_from_dpa_eth_port(netdev):0x%0x\n",
+ get_dcp_id_from_dpa_eth_port(slave->dev));
+ } else
+ pr_info("the slave device %s is not a DPAA-Eth NIC\n",
+ slave->dev->name);
+
+ return 0;
+}
+
+int init_status(struct net_device *netdev)
+{
+ struct bonding *bond = master_to_bond(netdev);
+ memset(&bond->params.oh_stats, 0, sizeof(struct rtnl_link_stats64));
+
+ return BOND_OH_SUCCESS;
+}
+
+void add_statistics(struct bonding *bond, struct rtnl_link_stats64 *stats)
+{
+ stats->tx_packets += bond->params.oh_stats.tx_packets;
+ stats->tx_bytes += bond->params.oh_stats.tx_bytes;
+ stats->tx_errors += bond->params.oh_stats.tx_errors;
+ stats->tx_dropped += bond->params.oh_stats.tx_dropped;
+}
+/**
+ * Copied from oNIC (removed priv)
+ * Turn on HW checksum computation for this outgoing frame.
+ * If the current protocol is not something we support in this regard
+ * (or if the stack has already computed the SW checksum), we do nothing.
+ *
+ * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value
+ * otherwise.
+ *
+ * Note that this function may modify the fd->cmd field and the skb data buffer
+ * (the Parse Results area).
+ */
+int oh_tx_csum_enable(struct sk_buff *skb,
+ struct qm_fd *fd,
+ char *parse_results)
+{
+ fm_prs_result_t *parse_result;
+ struct iphdr *iph;
+ struct ipv6hdr *ipv6h = NULL;
+ struct tcphdr *tcph;
+ struct udphdr *udph;
+ int l4_proto;
+ int ethertype = ntohs(skb->protocol);
+ int retval = 0, i;
+ unsigned char *p;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ /* Fill in some fields of the Parse Results array, so the FMan
+ * can find them as if they came from the FMan Parser.
+ */
+ parse_result = (fm_prs_result_t *)parse_results;
+ /* If we're dealing with VLAN, get the real Ethernet type */
+ if (ethertype == ETH_P_8021Q) {
+ /* We can't always assume the MAC header is set correctly
+ * by the stack, so reset to beginning of skb->data
+ */
+ skb_reset_mac_header(skb);
+ ethertype = ntohs(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto);
+ /* below l2r need look up FMAN RM to verify */
+ parse_result->l2r = FM_PR_L2_VLAN | FM_PR_L2_VLAN_STACK;
+ } else {
+ parse_result->l2r = FM_PR_L2_ETHERNET;
+ }
+
+ /* Fill in the relevant L3 parse result fields
+ * and read the L4 protocol type
+ */
+ switch (ethertype) {
+ case ETH_P_IP:
+ parse_result->l3r = FM_L3_PARSE_RESULT_IPV4;
+ iph = ip_hdr(skb);
+ BUG_ON(iph == NULL);
+ l4_proto = ntohs(iph->protocol);
+ break;
+ case ETH_P_IPV6:
+ parse_result->l3r = FM_L3_PARSE_RESULT_IPV6;
+ ipv6h = ipv6_hdr(skb);
+ BUG_ON(ipv6h == NULL);
+ l4_proto = ntohs(ipv6h->nexthdr);
+ break;
+ default:
+ /* We shouldn't even be here */
+ hw_lag_dbg("Can't compute HW csum for L3 proto 0x%x\n",
+ ntohs(skb->protocol));
+ retval = -EIO;
+ goto return_error;
+ }
+
+ hw_lag_dbg("skb->protocol(L3):0x%04x, ethertype:%x\n",
+ ntohs(skb->protocol), ethertype);
+
+ /* Fill in the relevant L4 parse result fields */
+ switch (l4_proto) {
+ case IPPROTO_UDP:
+ parse_result->l4r = FM_L4_PARSE_RESULT_UDP;
+ udph = (struct udphdr *)(skb->data + skb_transport_offset(skb));
+ hw_lag_dbg("udp org csum:0x%0x\n", udph->check);
+ skb_set_transport_header(skb, skb_checksum_start_offset(skb));
+ skb_checksum_help(skb);
+ hw_lag_dbg("udp software csum:0x%0x\n", udph->check);
+ break;
+ case IPPROTO_TCP:
+ parse_result->l4r = FM_L4_PARSE_RESULT_TCP;
+ tcph = (struct tcphdr *)(skb->data + skb_transport_offset(skb));
+ p = skb->data;
+ hw_lag_dbg("\ndmac:%02x:%02x:%02x:%02x:%02x:%02x\n"
+ "smac:%02x:%02x:%02x:%02x:%02x:%02x\n"
+ "h_proto:0x%04x\n", p[0], p[1], p[2], p[3], p[4], p[5],
+ p[6], p[7], p[8], p[9], p[10], p[11],
+ *((short *)(p + 12)));
+
+ /* dump skb data info for manual calculate L4CSUM,
+ * jump over net header first
+ */
+ p += skb_network_offset(skb);
+ for (i = 0; i < skb->len - skb_network_offset(skb) - 4; i += 4)
+ hw_lag_dbg("%08x\n", *((unsigned int *) (p + i)));
+
+ for (; i < skb->len - skb_network_offset(skb); i++)
+ hw_lag_dbg("%02x\n", *(p + i));
+
+ hw_lag_dbg("tcp org csum:0x%0x.\n", tcph->check);
+ skb_set_transport_header(skb, skb_checksum_start_offset(skb));
+ skb_checksum_help(skb);
+ hw_lag_dbg("tcp software csum:0x%0x,\n", tcph->check);
+
+ break;
+ default:
+ /* This can as well be a BUG() */
+ pr_err("Can't compute HW csum for L4 proto 0x%x\n",
+ l4_proto);
+ retval = -EIO;
+ goto return_error;
+ }
+
+ hw_lag_dbg("l4_proto:0x%04x, result->l2r:0x%04x\n",
+ l4_proto, parse_result->l2r);
+ hw_lag_dbg("result->l3r:0x%04x, result->l4r:0x%02x.\n",
+ parse_result->l3r, parse_result->l4r);
+
+ /* At index 0 is IPOffset_1 as defined in the Parse Results */
+ parse_result->ip_off[0] = skb_network_offset(skb);
+ parse_result->l4_off = skb_transport_offset(skb);
+
+ /* Enable L3 (and L4, if TCP or UDP) HW checksum. */
+ fd->cmd |= FM_FD_CMD_RPD | FM_FD_CMD_DTC;
+
+ /* On P1023 and similar platforms fd->cmd interpretation could
+ * be disabled by setting CONTEXT_A bit ICMD; currently this bit
+ * is not set so we do not need to check; in the future, if/when
+ * using context_a we need to check this bit
+ */
+
+return_error:
+ return retval;
+}
+
+static int __hot dpa_oh_xmit(struct qm_fd *fd, struct qman_fq *tx_fq)
+{
+ int err, i;
+
+ for (i = 0; i < 100000; i++) {
+ err = qman_enqueue(tx_fq, fd, 0);
+ if (err != -EBUSY)
+ break;
+ }
+
+ if (unlikely(err < 0)) {
+ /* TODO differentiate b/w -EBUSY (EQCR full) and other codes? */
+ pr_err("qman_enqueue() error.\n");
+ return err;
+ }
+
+ return 0;
+}
+
+
+int __hot dpa_oh_tx(struct sk_buff *skb, struct bonding *bond,
+ struct net_device *net_dev, struct dpa_fq *tx_fq)
+{
+ struct dpa_priv_s *priv;
+ struct dpa_bp *bp = bond->params.ohp->tx_bp;
+
+ struct sk_buff **skbh = NULL;
+ dma_addr_t addr;
+ struct qm_fd fd;
+ int err = 0;
+ int *countptr;
+ struct rtnl_link_stats64 *percpu_stats;
+
+ tx_fq->net_dev = bond->params.ohp->slave[0]->dev;
+ priv = netdev_priv(bond->params.ohp->slave[0]->dev);
+ percpu_stats = &bond->params.oh_stats;
+ countptr = __this_cpu_ptr(bond->params.ohp->tx_bp->percpu_count);
+
+ if (unlikely(skb_headroom(skb) < priv->tx_headroom)) {
+ struct sk_buff *skb_new;
+
+ skb_new = skb_realloc_headroom(skb, priv->tx_headroom);
+ if (unlikely(!skb_new)) {
+ percpu_stats->tx_errors++;
+ kfree_skb(skb);
+ goto done;
+ }
+ kfree_skb(skb);
+ skb = skb_new;
+ }
+
+ clear_fd(&fd);
+
+ /* store skb backpointer to release the skb later */
+ skbh = (struct sk_buff **)(skb->data - priv->tx_headroom);
+ *skbh = skb;
+
+ /* TODO check if skb->len + priv->tx_headroom < bp->size */
+
+ /* Enable L3/L4 hardware checksum computation.
+ *
+ * We must do this before dma_map_single(), because we may
+ * need to write into the skb.
+ */
+ err = oh_tx_csum_enable(skb, &fd,
+ ((char *)skbh) + DPA_TX_PRIV_DATA_SIZE);
+
+ if (unlikely(err < 0)) {
+ pr_err("HW csum error: %d\n", err);
+
+ return err;
+ }
+
+ addr = dma_map_single(bp->dev, skbh,
+ skb->len + priv->tx_headroom, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(bp->dev, addr))) {
+ pr_err("dma_map_single() failed\n");
+ goto dma_mapping_failed;
+ }
+
+ fd.format = qm_fd_contig;
+ fd.length20 = skb->len;
+ fd.offset = priv->tx_headroom;
+ fd.addr_hi = upper_32_bits(addr);
+ fd.addr_lo = lower_32_bits(addr);
+ /* fd.cmd |= FM_FD_CMD_FCO; */
+ fd.bpid = bp->bpid;
+
+ /* (Partially) drain the Draining Buffer Pool pool; each core
+ * acquires at most the number of buffers it put there; since
+ * BMan allows for up to 8 buffers to be acquired at one time,
+ * work in batches of 8 for efficiency reasons
+ */
+ dpa_oh_drain_bp(bp);
+
+ if (unlikely(dpa_oh_xmit(&fd, &tx_fq->fq_base) < 0)) {
+ /* oh tx error, add statistics */
+ bond->params.oh_stats.tx_packets++;
+ bond->params.oh_stats.tx_errors++;
+ hw_lag_dbg("3ad enqueue_pkt error...txerr_pkt:%llu\n",
+ bond->params.oh_stats.tx_packets);
+ goto xmit_failed;
+ } else {
+ /* oh tx OK, add statistics */
+ bond->params.oh_stats.tx_packets++;
+ bond->params.oh_stats.tx_bytes += skb->len;
+ hw_lag_dbg("3ad enqueue_pkt OK...tx_pkt:%llu\n",
+ bond->params.oh_stats.tx_packets);
+ return NETDEV_TX_OK;
+ }
+
+ countptr = __this_cpu_ptr(bp->percpu_count);
+ (*countptr)++;
+
+ goto done;
+
+xmit_failed:
+ dma_unmap_single(bp->dev, addr, fd.offset + fd.length20, DMA_TO_DEVICE);
+dma_mapping_failed:
+ percpu_stats->tx_errors++;
+ dev_kfree_skb(skb);
+done:
+ return NETDEV_TX_OK;
+}
+/**
+ * Enqueue one skb pkt to offline port which attached to a bond.
+ * bond: current bond's pointer
+ * skb: pkt which will be enqueued to the offline port
+ * ceetm_fq: pkt will use this fq for xmit. if this ceetm_fq is
+ * pointing to NULL, will use default tx_fq for xmit.
+ * return BOND_OH_SUCCESS if enqueued, otherwise return errors.
+ */
+int enqueue_pkt_to_oh(struct bonding *bond, struct sk_buff *skb,
+ struct dpa_fq *ceetm_fq)
+{
+ struct oh_port_priv *p_oh = bond->params.ohp;
+ struct net_device *slave_netdev = NULL;
+ struct dpa_fq *tx_fq = p_oh->oh_tx_lag_fqs;
+
+ slave_netdev = p_oh->slave[0]->dev;
+
+ p_oh->oh_errq->net_dev = slave_netdev;
+ p_oh->oh_defq->net_dev = slave_netdev;
+
+ if (!is_dpa_eth_port(slave_netdev)) {
+ pr_err("is not dpaa NIC or NULL pointer.\n");
+ return -EINVAL;
+ }
+
+ if (ceetm_fq)
+ return dpa_oh_tx(skb, bond, slave_netdev, ceetm_fq);
+ else
+ return dpa_oh_tx(skb, bond, slave_netdev, tx_fq);
+}
+EXPORT_SYMBOL(enqueue_pkt_to_oh);
+
+static int get_dpa_slave_info(struct slave *slave, uint16_t *tx_channel)
+{
+ struct dpa_priv_s *priv = netdev_priv(slave->dev);
+
+ if (!is_dpa_eth_port(slave->dev) || !(priv->mac_dev))
+ return BOND_OH_ERROR;
+
+ *tx_channel = fm_get_tx_port_channel(priv->mac_dev->port_dev[TX]);
+
+ return BOND_OH_SUCCESS;
+}
+
+int get_dpa_slave_info_ex(struct slave *slave, uint16_t *tx_channel,
+ struct qman_fq **egress_fq, u32 *first_fqid)
+{
+ struct dpa_priv_s *priv = netdev_priv(slave->dev);
+
+ if (!is_dpa_eth_port(slave->dev) || !(priv->mac_dev))
+ return BOND_OH_ERROR;
+
+ *tx_channel = fm_get_tx_port_channel(priv->mac_dev->port_dev[TX]);
+ *egress_fq = priv->egress_fqs[0];
+ *first_fqid = priv->egress_fqs[0]->fqid;
+
+ return BOND_OH_SUCCESS;
+}
+
+/* Creates Frame Queues, these 2 good subroutines are completely copied from
+ * Bogdan Purcareata's good patch "Offline port queues initialization", HW_LAG
+ * need to initialize FQs for an offline port PCD usage with tx_channel/wq of
+ * slave devices which have already attached to a bond, HW_LAG OH port dequeue,
+ * then enqueue PCD FQs to DPA-Eth via these PCD FQs.
+ */
+static int create_oh_pcd_fq(struct qman_fq *fq, u32 fqid_pcdconf,
+ uint32_t fq_id, uint16_t tx_channel, uint16_t wq_id)
+{
+ struct qm_mcc_initfq fq_opts;
+ uint32_t create_flags, init_flags;
+ uint32_t ret = 0;
+
+ if (!fq)
+ return BOND_OH_ERROR;
+
+ /* Set flags for FQ create */
+ create_flags = QMAN_FQ_FLAG_TO_DCPORTAL;
+
+ /* set egress_ern callback for pcd fqs */
+ *fq = oh_pcd_egress_ernq;
+
+ /* Create frame queue */
+ ret = qman_create_fq(fq_id, create_flags, fq);
+ if (ret != 0)
+ return BOND_OH_ERROR;
+
+ /* Set flags for FQ init */
+ init_flags = QMAN_INITFQ_FLAG_SCHED;
+
+ /* Set FQ init options. Specify destination WQ ID and channel */
+ memset(&fq_opts, 0, sizeof(fq_opts));
+ fq_opts.we_mask = QM_INITFQ_WE_DESTWQ;
+ fq_opts.fqd.dest.wq = wq_id;
+ fq_opts.fqd.dest.channel = tx_channel;
+
+ fq_opts.we_mask |= QM_INITFQ_WE_FQCTRL;
+ fq_opts.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE;
+ fq_opts.fqd.fq_ctrl |= QM_FQCTRL_CTXASTASHING | QM_FQCTRL_AVOIDBLOCK;
+ fq_opts.fqd.context_a.stashing.exclusive = QM_STASHING_EXCL_DATA |
+ QM_STASHING_EXCL_CTX | QM_STASHING_EXCL_ANNOTATION;
+ fq_opts.fqd.context_a.stashing.data_cl = 2;
+ fq_opts.fqd.context_a.stashing.annotation_cl = 1;
+ fq_opts.fqd.context_a.stashing.context_cl =
+ DIV_ROUND_UP(sizeof(struct qman_fq), 64);
+
+ fq_opts.we_mask |= QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
+ /**
+ * CTXA[OVFQ] = 1
+ * we set PCD own confirmation Q and their own callback in case of
+ * interrupt DPA-Eth private conf callback/err callback/def callback.
+ */
+ fq_opts.fqd.context_a.hi = 0x80000000;
+ fq_opts.fqd.context_a.lo = 0x0;
+ fq_opts.fqd.context_b = fqid_pcdconf;
+
+ /* Initialize frame queue */
+ ret = qman_init_fq(fq, init_flags, &fq_opts);
+ if (ret != 0) {
+ qman_destroy_fq(fq, 0);
+ return BOND_OH_ERROR;
+ }
+ hw_lag_dbg("FQ create_flags:0X%0x, init_flags:0X%0x\n",
+ create_flags, init_flags);
+
+ return BOND_OH_SUCCESS;
+}
+
+static int hw_lag_allocate_pcd_queues(struct device *dev,
+ struct dpa_fq **p_pcd_fq, u32 fqid_pcdconf, u32 fqid,
+ uint16_t tx_channel, uint16_t wq)
+{
+ /* Allocate pcd queues */
+ int errno = BOND_OH_SUCCESS;
+ struct dpa_fq *pcd_fq;
+ hw_lag_dbg("Allocating PCD queues...p_pcd_fq:%p, fqid:%d\n",
+ *p_pcd_fq, fqid);
+ pcd_fq = devm_kzalloc(dev, sizeof(struct dpa_fq), GFP_KERNEL);
+ if (!pcd_fq) {
+ pr_err("can't allocate slave PCD FQ!\n");
+ errno = -ENOMEM;
+ goto return_kfree;
+ }
+
+ hw_lag_dbg("Allocated pcd_fq:%p, fqid:%d\n", pcd_fq, fqid);
+ /* Create pcd queues */
+ errno = create_oh_pcd_fq(&pcd_fq->fq_base, fqid_pcdconf,
+ fqid, tx_channel, wq);
+ if (errno != BOND_OH_SUCCESS) {
+ pr_err("can't create lag PCD FQ:%u\n", fqid);
+ errno = -EINVAL;
+ goto return_kfree;
+ }
+
+ *p_pcd_fq = pcd_fq;
+ hw_lag_dbg("created pcd_fq:%p, fqid:%d, *p_pcd_fq:%p\n",
+ pcd_fq, fqid, *p_pcd_fq);
+ return BOND_OH_SUCCESS;
+
+return_kfree:
+ if (pcd_fq)
+ devm_kfree(dev, pcd_fq);
+ return errno;
+}
+
+/* Destroys Frame Queues */
+static void hw_lag_fq_destroy(struct qman_fq *fq)
+{
+ int errno = BOND_OH_SUCCESS;
+
+ errno = qman_retire_fq(fq, NULL);
+ if (unlikely(errno < 0))
+ pr_err("qman_retire_fq(%u)=%d\n", qman_fq_fqid(fq), errno);
+
+ errno = qman_oos_fq(fq);
+ if (unlikely(errno < 0))
+ pr_err("qman_oos_fq(%u)=%d\n", qman_fq_fqid(fq), errno);
+
+ qman_destroy_fq(fq, 0);
+}
+/* release fq memory */
+static int hw_lag_release_fq(struct device *dev, struct dpa_fq *fq)
+{
+
+ if (!fq)
+ return BOND_OH_ERROR;
+
+ hw_lag_fq_destroy(&fq->fq_base);
+ if (!dev)
+ return BOND_OH_ERROR;
+
+ devm_kfree(dev, fq);
+
+ return BOND_OH_SUCCESS;
+}
+/**
+ * Get DPA slave device information: wq/channel_id, allocate FQID/FQ memory,
+ * then set FQ flags, record the slave pointer in case of remove these
+ * information when detaching slave in the future.
+ */
+int fill_oh_pcd_fqs_with_slave_info(struct bonding *bond, struct slave *slave)
+{
+ uint16_t tx_channel;
+ struct dpa_fq *pcd_fq = NULL;
+ struct oh_port_priv *cur;
+ u32 fqid;
+ uint16_t wq_id = 3; /* the default value in DPA-Eth private driver */
+
+ if (bond->params.mode != BOND_MODE_8023AD) {
+ pr_err("error, only support 802.3ad when fill OH FQs.\n");
+ return BOND_OH_ERROR;
+ }
+
+ if (is_dpa_eth_port(slave->dev) == false) {
+ pr_err("error, only support dpa eth nic.\n");
+ return BOND_OH_ERROR;
+ }
+
+ if (bond->slave_cnt > SLAVES_PER_BOND) {
+ pr_err("error, only support 2 dpa nic per bond.\n");
+ return BOND_OH_ERROR;
+ }
+
+ if (get_dpa_slave_info(slave, &tx_channel) == BOND_OH_ERROR) {
+ pr_err("error on getting dpa info when fill OH FQs.\n");
+ return BOND_OH_ERROR;
+ }
+
+ cur = bond->params.ohp;
+ if (!cur) {
+ pr_err("have not bind an OH port,\n");
+ pr_err("will use software tx traffic distribution.\n");
+ return BOND_OH_ERROR;
+ }
+
+ hw_lag_dbg("cur->pcd_fqs[0]:%p, cur->pcd_fqs[1]:%p\n",
+ cur->pcd_fqs[0], cur->pcd_fqs[1]);
+ if (!cur->pcd_fqs[0])
+ fqid = cur->pcd_fqids_base;
+ else
+ fqid = cur->pcd_fqids_base + 1;
+
+ hw_lag_dbg("pcd_fq:%p, fqid:%d Before alloc.\n", pcd_fq, fqid);
+
+ if (hw_lag_allocate_pcd_queues(cur->dpa_oh_dev, &pcd_fq,
+ cur->fqid_pcdconf, fqid, tx_channel,
+ wq_id) == BOND_OH_ERROR) {
+ pr_err("error on create pcd fqs\n");
+ return BOND_OH_ERROR;
+ }
+
+ hw_lag_dbg("pcd_fq:%p, fqid:%d, tx_channel:%d, wq_id:%d After alloc.\n",
+ pcd_fq, fqid, tx_channel, wq_id);
+ hw_lag_dbg("fqid:0x%0x, tx_channel:0x%0x, wq_id:0x%0x After alloc.\n",
+ fqid, tx_channel, wq_id);
+
+ if (!cur->pcd_fqs[0]) {
+ cur->pcd_fqs[0] = pcd_fq;
+ cur->slave[0] = slave;
+ } else if (!cur->pcd_fqs[1]) {
+ cur->pcd_fqs[1] = pcd_fq;
+ cur->slave[1] = slave;
+ }
+
+ return BOND_OH_SUCCESS;
+}
+
+/* forget offline port pcd information according to slave pointer,
+ * then destroy fq and release the fq memory.
+ */
+int del_oh_pcd_fqs_with_slave_info(struct bonding *bond, struct slave *slave)
+{
+ struct oh_port_priv *cur;
+ struct dpa_fq *pcd_fq;
+
+ if (is_dpa_eth_port(slave->dev) == false) {
+ pr_err("error, only support dpa eth nic.\n");
+ return BOND_OH_ERROR;
+ }
+ cur = bond->params.ohp;
+ if (!cur) {
+ pr_err("have not bind an OH port,\n");
+ pr_err("will use software tx traffic distribution.\n");
+ return BOND_OH_ERROR;
+ }
+ if (slave == cur->slave[0]) {
+ pcd_fq = cur->pcd_fqs[0];
+ cur->pcd_fqs[0] = NULL;
+ cur->slave[0] = NULL;
+ } else if (slave == cur->slave[1]) {
+ pcd_fq = cur->pcd_fqs[1];
+ cur->pcd_fqs[1] = NULL;
+ cur->slave[1] = NULL;
+ } else
+ pcd_fq = NULL;
+
+ return hw_lag_release_fq(cur->dpa_oh_dev, pcd_fq);
+}
diff --git a/drivers/net/bonding/hw_distribution.h b/drivers/net/bonding/hw_distribution.h
new file mode 100644
index 0000000..32f6e2f
--- /dev/null
+++ b/drivers/net/bonding/hw_distribution.h
@@ -0,0 +1,165 @@
+/**
+ * Copyright 2014 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __HARDWARE_DISTRIBUTION_H
+#define __HARDWARE_DISTRIBUTION_H
+
+#include <linux/of_platform.h>
+
+#include "bonding.h"
+#include "bond_3ad.h"
+#include "bond_alb.h"
+#include "lnxwrp_fm.h"
+#include "offline_port.h"
+#include "dpaa_eth.h"
+#include "dpaa_eth_common.h"
+/* FMD includes */
+#include "error_ext.h"
+#include "fm_pcd_ext.h"
+#include "fm_cc.h"
+#include "crc64.h"
+
+#define OHFRIENDNAMSIZ 10 /* fman0-oh@1, ... fman1-oh@6 */
+#define OHNODENAMSIZ 24 /* /fsl,dpaa/dpa-fman0-oh@1 */
+#define BOND_OH_SUCCESS 0
+#define BOND_OH_ERROR -1
+#define NO_POLICY 0xFF /* this is a magic number */
+
+#define FM1_GB0 0xffe4e0000
+#define FM1_10G 0xffe4f0000
+#define FM2_GB0 0xffe5e0000
+#define FM2_10G 0xffe5f0000
+
+#define DPA_FQ_TD 0x200000
+
+/* There are 4 FMAN Ethernet Ports per T1040, 2 of them are for the
+ * Link Aggregation for the L2Swith trunk link, thus there are at
+ * most 2 ports left for the other Link Aggregation, this implies
+ * 2 MAX_BOND_CNT * SLAVES_PER_BOND = 4 FMAN Ethernet Ports.
+ * In fact,we only need numbers of offline port in a DTS:
+ * offline port count = min(FM_MAX_NUM_OF_OH_PORTS, MAX_BOND_CNT)
+ */
+#define MAX_BOND_CNT 2
+#define SLAVES_PER_BOND 2
+
+#ifdef CONFIG_HW_LAG_DEBUG
+#define hw_lag_dbg(fmt, arg...) \
+ pr_info("LAG:[CPU %d ln %d fn %s] - " fmt, smp_processor_id(), \
+ __LINE__, __func__, ##arg)
+#else
+#define hw_lag_dbg(fmt, arg...)
+#endif
+
+struct oh_port_priv {
+ unsigned int oh_channel_id;
+ struct dpa_oh_config_s *oh_config;
+ struct dpa_fq *pcd_fqs[SLAVES_PER_BOND];
+ struct dpa_fq *oh_defq, *oh_errq;
+ uint16_t p_oh_rcv_channel;
+ struct slave *slave[SLAVES_PER_BOND];
+ u32 pcd_fqids_base;
+ uint32_t fqid_pcderr, fqid_pcdconf, fqid_ohtxconf;
+ struct dpa_fq *oh_pcderrq, *oh_pcdconfq, *oh_txconfq;
+ /* init dynamic particular tx fqs of offline port for LAG xmit,
+ * does not reuse tx fqs initialized by offline port driver.
+ */
+ struct dpa_fq *oh_tx_lag_fqs;
+ const phandle *p_oh_port_handle;
+ struct platform_device *oh_of_dev, *of_dev;
+ struct device *dpa_oh_dev, *oh_dev;
+ struct device_node *dpa_oh_node, *oh_node;
+ struct dpa_bp *tx_bp;
+ struct dpa_buffer_layout_s *tx_buf_layout;
+ uint8_t bpid; /**< External buffer pool id */
+ uint16_t bp_size; /**< External buffer pool buffer size */
+ int oh_en; /* enable or disable offline port's help at run-time */
+ unsigned char friendname[OHFRIENDNAMSIZ];
+ unsigned long cell_index;
+ t_Handle h_FmPcd;
+ t_Handle h_FmPort;
+ t_Handle h_NetEnv;
+
+ t_FmPcdNetEnvParams *netEnvParams;
+ t_FmPcdKgSchemeParams *scheme;
+ t_FmPortPcdParams *pcdParam;
+ t_FmPortPcdPrsParams *prsParam;
+ t_FmPortPcdKgParams *kgParam;
+ int numberof_pre_schemes;
+};
+
+enum e_dist_hdr {
+ L2_MAC = 0,
+ MAC_L3_IPV6,
+ MAC_L3_IPV4,
+ MAC_IPV6_TCP,
+ MAC_IPV6_UDP,
+ MAC_IPV4_TCP,
+ MAC_IPV4_UDP,
+ MAX_SCHEMES
+};
+
+extern struct oh_port_priv *poh;
+extern int available_num_of_oh_ports;
+
+int get_oh_info(void);
+unsigned int to_which_oh_i_attached(struct oh_port_priv *current_poh);
+bool are_all_slaves_linkup(struct bonding *bond);
+int get_dcp_id_from_dpa_eth_port(struct net_device *netdev);
+int export_oh_port_info_to_ceetm(struct bonding *bond, uint16_t *channel,
+ unsigned long *fman_dcpid, unsigned long *oh_offset,
+ unsigned long *cell_index);
+int show_dpa_slave_info(struct bonding *bond, struct slave *slave);
+int get_dpa_slave_info_ex(struct slave *slave, uint16_t *tx_channel,
+ struct qman_fq **egress_fq, u32 *first_fqid);
+int enqueue_pkt_to_oh(struct bonding *bond, struct sk_buff *skb,
+ struct dpa_fq *ceetm_fq);
+ssize_t bonding_show_offline_port_xmit_statistics(struct device *d,
+ struct device_attribute *attr, char *buf);
+ssize_t bonding_show_offline_ports(struct device *d,
+ struct device_attribute *attr, char *buf);
+ssize_t bonding_show_oh_needed_for_hw_distribution(struct device *d,
+ struct device_attribute *attr, char *buf);
+ssize_t bonding_store_oh_needed_for_hw_distribution(struct device *d,
+ struct device_attribute *attr, const char *buffer,
+ size_t count);
+ssize_t bonding_show_oh_enable(struct device *d,
+ struct device_attribute *attr, char *buf);
+ssize_t bonding_store_oh_enable(struct device *d,
+ struct device_attribute *attr, const char *buffer,
+ size_t count);
+int fill_oh_pcd_fqs_with_slave_info(struct bonding *bond, struct slave *slave);
+int del_oh_pcd_fqs_with_slave_info(struct bonding *bond, struct slave *slave);
+bool apply_pcd(struct bonding *bond, int new_xmit_policy);
+int release_pcd_mem(struct bonding *bond);
+int init_status(struct net_device *netdev);
+void add_statistics(struct bonding *bond, struct rtnl_link_stats64 *stats);
+#endif /* __HARDWARE_DISTRIBUTION_H */
diff --git a/drivers/net/bonding/hw_oh_pcd.c b/drivers/net/bonding/hw_oh_pcd.c
new file mode 100644
index 0000000..424281b
--- /dev/null
+++ b/drivers/net/bonding/hw_oh_pcd.c
@@ -0,0 +1,744 @@
+/**
+ * Copyright 2014 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "hw_distribution.h"
+
+static t_LnxWrpFmDev *get_FmDev_from_FmPortDev(t_LnxWrpFmPortDev *p_FmPortDev)
+{
+ t_LnxWrpFmDev *p_LnxWrpFmDev;
+ p_LnxWrpFmDev = (t_LnxWrpFmDev *) p_FmPortDev->h_LnxWrpFmDev;
+ return p_LnxWrpFmDev;
+}
+
+static t_LnxWrpFmPortDev *get_FmPortDev_from_fm_port(struct fm_port *fm_port)
+{
+ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev;
+ p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev *) fm_port;
+ return p_LnxWrpFmPortDev;
+}
+
+/* for: FM_PORT_Disable/FM_PORT_Enable/FM_PORT_SetPCD
+ * (t_Handle h_FmPort,...)
+ */
+static t_Handle get_h_FmPort_from_fm_port(struct fm_port *fm_port)
+{
+ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev =
+ get_FmPortDev_from_fm_port(fm_port);
+ return p_LnxWrpFmPortDev->h_Dev;
+}
+
+/* for: FM_PCD_Enable/FM_PCD_NetEnvCharacteristicsSet/
+ * FM_PCD_KgSchemeSet(t_Handle h_FmPcd)
+ */
+static t_Handle get_h_FmPcd_from_fm_port(struct fm_port *fm_port)
+{
+ t_LnxWrpFmPortDev *p_LnxWrpFmPortDev;
+ t_LnxWrpFmDev *p_LnxWrpFmDev;
+ t_Handle h_FmPcd;
+ p_LnxWrpFmPortDev = get_FmPortDev_from_fm_port(fm_port);
+ p_LnxWrpFmDev = get_FmDev_from_FmPortDev(p_LnxWrpFmPortDev);
+ h_FmPcd = p_LnxWrpFmDev->h_PcdDev;
+ return h_FmPcd;
+}
+
+static int alloc_pcd_mem(struct fm_port *fm_port, uint8_t numOfSchemes,
+ u32 pcd_fqids_base, uint8_t distNumOfQueues,
+ struct bonding *bond)
+{
+ t_Handle h_FmPcd;
+ t_Handle h_FmPort;
+ t_Handle h_NetEnv;
+
+ t_FmPcdNetEnvParams *netEnvParams;
+ t_FmPcdKgSchemeParams *scheme;
+ t_FmPortPcdParams *pcdParam;
+ t_FmPortPcdPrsParams *prsParam;
+ t_FmPortPcdKgParams *kgParam;
+ /* reuse below "ea_xxx_yyy" variables, can reduce 120 lines of codes */
+ t_FmPcdExtractEntry ea_eth_sa, ea_eth_da, ea_ipv4_sa, ea_ipv4_da,
+ ea_ipv6_sa, ea_ipv6_da, ea_tcp_sp, ea_tcp_dp,
+ ea_udp_sp, ea_udp_dp, ea_nexthdr, ea_nextp;
+
+ if (bond->params.ohp->h_FmPcd)
+ return BOND_OH_SUCCESS;
+
+ /* get handle of fm_port/fm_pcd from kernel struct */
+ h_FmPort = get_h_FmPort_from_fm_port(fm_port);
+ if (!h_FmPort) {
+ pr_err("error on get_h_FmPort_from_fm_port.\n");
+ return E_INVALID_VALUE;
+ }
+ h_FmPcd = get_h_FmPcd_from_fm_port(fm_port);
+ if (!h_FmPcd) {
+ pr_err("error on get_h_FmPcd_from_fm_port.\n");
+ return E_INVALID_VALUE;
+ }
+ /* set net env, get handle of net env */
+ netEnvParams = kzalloc(sizeof(t_FmPcdNetEnvParams), GFP_KERNEL);
+ if (!netEnvParams) {
+ pr_err("Failed to allocate netEnvParams.\n");
+ return -ENOMEM;
+ }
+ hw_lag_dbg("netEnvParams:%p\n", netEnvParams);
+ netEnvParams->numOfDistinctionUnits = 5;
+ netEnvParams->units[0].hdrs[0].hdr = HEADER_TYPE_ETH;
+ netEnvParams->units[1].hdrs[0].hdr = HEADER_TYPE_IPv4;
+ netEnvParams->units[2].hdrs[0].hdr = HEADER_TYPE_IPv6;
+ netEnvParams->units[3].hdrs[0].hdr = HEADER_TYPE_TCP;
+ netEnvParams->units[4].hdrs[0].hdr = HEADER_TYPE_UDP;
+
+ FM_PCD_Enable(h_FmPcd);
+ h_NetEnv = FM_PCD_NetEnvCharacteristicsSet(h_FmPcd, netEnvParams);
+ if (!h_NetEnv) {
+ pr_err("error on FM_PCD_NetEnvCharacteristicsSet.\n");
+ return E_INVALID_VALUE;
+ }
+ hw_lag_dbg("FM_PCD_NetEnvCharacteristicsSet() ok.\n");
+ /* bind port to PCD properties */
+ /* initialize PCD parameters */
+ pcdParam = kzalloc(sizeof(t_FmPortPcdParams), GFP_KERNEL);
+ if (!pcdParam) {
+ pr_err("Failed to allocate pcdParam.\n");
+ return -ENOMEM;
+ }
+ hw_lag_dbg("pcdParam:%p\n", pcdParam);
+ /* initialize parser port parameters */
+ prsParam = kzalloc(sizeof(t_FmPortPcdPrsParams), GFP_KERNEL);
+ if (!prsParam) {
+ pr_err("Failed to allocate prsParam.\n");
+ return -ENOMEM;
+ }
+
+ hw_lag_dbg("prsParam:%p\n", prsParam);
+ prsParam->parsingOffset = 0;
+ prsParam->firstPrsHdr = HEADER_TYPE_ETH;
+ pcdParam->h_NetEnv = h_NetEnv;
+ pcdParam->pcdSupport = e_FM_PORT_PCD_SUPPORT_PRS_AND_KG;
+ pcdParam->p_PrsParams = prsParam;
+
+ /* initialize Keygen port parameters */
+ kgParam = kzalloc(sizeof(t_FmPortPcdKgParams), GFP_KERNEL);
+ if (!kgParam) {
+ pr_err("Failed to allocate kgParam.\n");
+ return -ENOMEM;
+ }
+
+ hw_lag_dbg("kgParam:%p\n", kgParam);
+ kgParam->numOfSchemes = numOfSchemes;
+ kgParam->directScheme = FALSE;
+
+ pcdParam->p_KgParams = kgParam;
+
+ /* initialize schemes according to numOfSchemes */
+ scheme = kzalloc(sizeof(t_FmPcdKgSchemeParams) * MAX_SCHEMES,
+ GFP_KERNEL);
+ if (!scheme) {
+ pr_err("Failed to allocate scheme.\n");
+ return -ENOMEM;
+ }
+
+ hw_lag_dbg("scheme:%p\n", scheme);
+ /* Distribution: according to Layer2 info MAC */
+ scheme[L2_MAC].alwaysDirect = 0;
+ scheme[L2_MAC].netEnvParams.numOfDistinctionUnits = 1;
+ scheme[L2_MAC].netEnvParams.unitIds[0] = 0;
+ scheme[L2_MAC].useHash = 1;
+ scheme[L2_MAC].baseFqid = pcd_fqids_base;
+ scheme[L2_MAC].nextEngine = e_FM_PCD_DONE;
+ scheme[L2_MAC].schemeCounter.update = 1;
+ scheme[L2_MAC].schemeCounter.value = 0;
+ scheme[L2_MAC].keyExtractAndHashParams.numOfUsedMasks = 0;
+ scheme[L2_MAC].keyExtractAndHashParams.hashShift = 0;
+ scheme[L2_MAC].keyExtractAndHashParams.symmetricHash = 0;
+ scheme[L2_MAC].keyExtractAndHashParams.hashDistributionNumOfFqids =
+ distNumOfQueues;
+ scheme[L2_MAC].keyExtractAndHashParams.numOfUsedExtracts = 2;
+ scheme[L2_MAC].numOfUsedExtractedOrs = 0;
+ scheme[L2_MAC].netEnvParams.h_NetEnv = h_NetEnv;
+ scheme[L2_MAC].id.relativeSchemeId = L2_MAC;
+
+ /* Extract field:ethernet.src */
+ memset(&ea_eth_sa, 0, sizeof(t_FmPcdExtractEntry));
+ ea_eth_sa.type = e_FM_PCD_EXTRACT_BY_HDR;
+ ea_eth_sa.extractByHdr.hdr = HEADER_TYPE_ETH;
+ ea_eth_sa.extractByHdr.hdrIndex = e_FM_PCD_HDR_INDEX_NONE;
+ ea_eth_sa.extractByHdr.ignoreProtocolValidation = 0;
+ ea_eth_sa.extractByHdr.type = e_FM_PCD_EXTRACT_FULL_FIELD;
+ ea_eth_sa.extractByHdr.extractByHdrType.fullField.eth =
+ NET_HEADER_FIELD_ETH_SA;
+ scheme[L2_MAC].keyExtractAndHashParams.extractArray[0] =
+ ea_eth_sa;
+
+ /* Extract field:ethernet.dst */
+ memset(&ea_eth_sa, 0, sizeof(t_FmPcdExtractEntry));
+ ea_eth_da.type = e_FM_PCD_EXTRACT_BY_HDR;
+ ea_eth_da.extractByHdr.hdr = HEADER_TYPE_ETH;
+ ea_eth_da.extractByHdr.hdrIndex = e_FM_PCD_HDR_INDEX_NONE;
+ ea_eth_da.extractByHdr.ignoreProtocolValidation = 0;
+ ea_eth_da.extractByHdr.type = e_FM_PCD_EXTRACT_FULL_FIELD;
+ ea_eth_da.extractByHdr.extractByHdrType.fullField.eth =
+ NET_HEADER_FIELD_ETH_DA;
+ scheme[L2_MAC].keyExtractAndHashParams.extractArray[1] =
+ ea_eth_da;
+
+ /* Distribution: Layer2 and Layer3 info, MAC and ipv6 */
+ scheme[MAC_L3_IPV6].alwaysDirect = 0;
+ scheme[MAC_L3_IPV6].netEnvParams.numOfDistinctionUnits = 2;
+ scheme[MAC_L3_IPV6].netEnvParams.unitIds[0] = 0;
+ scheme[MAC_L3_IPV6].netEnvParams.unitIds[1] = 2;
+ scheme[MAC_L3_IPV6].useHash = 1;
+ scheme[MAC_L3_IPV6].baseFqid = pcd_fqids_base;
+ scheme[MAC_L3_IPV6].nextEngine = e_FM_PCD_DONE;
+ scheme[MAC_L3_IPV6].schemeCounter.update = 1;
+ scheme[MAC_L3_IPV6].schemeCounter.value = 0;
+ scheme[MAC_L3_IPV6].keyExtractAndHashParams.numOfUsedMasks = 0;
+ scheme[MAC_L3_IPV6].keyExtractAndHashParams.hashShift = 0;
+ scheme[MAC_L3_IPV6].keyExtractAndHashParams.symmetricHash = 0;
+ scheme[MAC_L3_IPV6].keyExtractAndHashParams.hashDistributionNumOfFqids =
+ distNumOfQueues;
+ scheme[MAC_L3_IPV6].keyExtractAndHashParams.numOfUsedExtracts = 4;
+ scheme[MAC_L3_IPV6].numOfUsedExtractedOrs = 0;
+ scheme[MAC_L3_IPV6].netEnvParams.h_NetEnv = h_NetEnv;
+ scheme[MAC_L3_IPV6].id.relativeSchemeId = MAC_L3_IPV6;
+ /* Extract field:ethernet.src */
+ scheme[MAC_L3_IPV6].keyExtractAndHashParams.extractArray[0] =
+ ea_eth_sa;
+ /* Extract field:ethernet.dst */
+ scheme[MAC_L3_IPV6].keyExtractAndHashParams.extractArray[1] =
+ ea_eth_da;
+
+ /* Extract field:ipv6.src */
+ memset(&ea_ipv6_sa, 0, sizeof(t_FmPcdExtractEntry));
+ ea_ipv6_sa.type = e_FM_PCD_EXTRACT_BY_HDR;
+ ea_ipv6_sa.extractByHdr.hdr = HEADER_TYPE_IPv6;
+ ea_ipv6_sa.extractByHdr.hdrIndex = e_FM_PCD_HDR_INDEX_NONE;
+ ea_ipv6_sa.extractByHdr.ignoreProtocolValidation = 0;
+ ea_ipv6_sa.extractByHdr.type = e_FM_PCD_EXTRACT_FULL_FIELD;
+ ea_ipv6_sa.extractByHdr.extractByHdrType.fullField.ipv6 =
+ NET_HEADER_FIELD_IPv6_SRC_IP;
+ scheme[MAC_L3_IPV6].keyExtractAndHashParams.extractArray[2] =
+ ea_ipv6_sa;
+
+ /* Extract field:ipv6.dst */
+ memset(&ea_ipv6_da, 0, sizeof(t_FmPcdExtractEntry));
+ ea_ipv6_da.type = e_FM_PCD_EXTRACT_BY_HDR;
+ ea_ipv6_da.extractByHdr.hdr = HEADER_TYPE_IPv6;
+ ea_ipv6_da.extractByHdr.hdrIndex = e_FM_PCD_HDR_INDEX_NONE;
+ ea_ipv6_da.extractByHdr.ignoreProtocolValidation = 0;
+ ea_ipv6_da.extractByHdr.type = e_FM_PCD_EXTRACT_FULL_FIELD;
+ ea_ipv6_da.extractByHdr.extractByHdrType.fullField.ipv6 =
+ NET_HEADER_FIELD_IPv6_DST_IP;
+ scheme[MAC_L3_IPV6].keyExtractAndHashParams.extractArray[3] =
+ ea_ipv6_da;
+
+ /* Distribution: Layer2 and Layer3 info, MAC ipv4 */
+ scheme[MAC_L3_IPV4].alwaysDirect = 0;
+ scheme[MAC_L3_IPV4].netEnvParams.numOfDistinctionUnits = 2;
+ scheme[MAC_L3_IPV4].netEnvParams.unitIds[0] = 0;
+ scheme[MAC_L3_IPV4].netEnvParams.unitIds[1] = 1;
+ scheme[MAC_L3_IPV4].useHash = 1;
+ scheme[MAC_L3_IPV4].baseFqid = pcd_fqids_base;
+ scheme[MAC_L3_IPV4].nextEngine = e_FM_PCD_DONE;
+ scheme[MAC_L3_IPV4].schemeCounter.update = 1;
+ scheme[MAC_L3_IPV4].schemeCounter.value = 0;
+ scheme[MAC_L3_IPV4].keyExtractAndHashParams.numOfUsedMasks = 0;
+ scheme[MAC_L3_IPV4].keyExtractAndHashParams.hashShift = 0;
+ scheme[MAC_L3_IPV4].keyExtractAndHashParams.symmetricHash = 0;
+ scheme[MAC_L3_IPV4].keyExtractAndHashParams.hashDistributionNumOfFqids =
+ distNumOfQueues;
+ scheme[MAC_L3_IPV4].keyExtractAndHashParams.numOfUsedExtracts = 4;
+ scheme[MAC_L3_IPV4].numOfUsedExtractedOrs = 0;
+ scheme[MAC_L3_IPV4].netEnvParams.h_NetEnv = h_NetEnv;
+ scheme[MAC_L3_IPV4].id.relativeSchemeId = MAC_L3_IPV4;
+ /* Extract field:ethernet.src */
+ scheme[MAC_L3_IPV4].keyExtractAndHashParams.extractArray[0] =
+ ea_eth_sa;
+ /* Extract field:ethernet.dst */
+ scheme[MAC_L3_IPV4].keyExtractAndHashParams.extractArray[1] =
+ ea_eth_da;
+ /* Extract field:ipv4.src */
+ memset(&ea_ipv4_sa, 0, sizeof(t_FmPcdExtractEntry));
+ ea_ipv4_sa.type = e_FM_PCD_EXTRACT_BY_HDR;
+ ea_ipv4_sa.extractByHdr.hdr = HEADER_TYPE_IPv4;
+ ea_ipv4_sa.extractByHdr.hdrIndex = e_FM_PCD_HDR_INDEX_NONE;
+ ea_ipv4_sa.extractByHdr.ignoreProtocolValidation = 0;
+ ea_ipv4_sa.extractByHdr.type = e_FM_PCD_EXTRACT_FULL_FIELD;
+ ea_ipv4_sa.extractByHdr.extractByHdrType.fullField.ipv4 =
+ NET_HEADER_FIELD_IPv4_SRC_IP;
+ scheme[MAC_L3_IPV4].keyExtractAndHashParams.extractArray[2] =
+ ea_ipv4_sa;
+ /* Extract field:ipv4.dst */
+ memset(&ea_ipv4_da, 0, sizeof(t_FmPcdExtractEntry));
+ ea_ipv4_da.type = e_FM_PCD_EXTRACT_BY_HDR;
+ ea_ipv4_da.extractByHdr.hdr = HEADER_TYPE_IPv4;
+ ea_ipv4_da.extractByHdr.hdrIndex = e_FM_PCD_HDR_INDEX_NONE;
+ ea_ipv4_da.extractByHdr.ignoreProtocolValidation = 0;
+ ea_ipv4_da.extractByHdr.type = e_FM_PCD_EXTRACT_FULL_FIELD;
+ ea_ipv4_da.extractByHdr.extractByHdrType.fullField.ipv4 =
+ NET_HEADER_FIELD_IPv4_DST_IP;
+ scheme[MAC_L3_IPV4].keyExtractAndHashParams.extractArray[3] =
+ ea_ipv4_da;
+
+ /* Distribution: Layer234 info MAC ipv6 tcp */
+ scheme[MAC_IPV6_TCP].alwaysDirect = 0;
+ scheme[MAC_IPV6_TCP].netEnvParams.numOfDistinctionUnits = 3;
+ scheme[MAC_IPV6_TCP].netEnvParams.unitIds[0] = 0;
+ scheme[MAC_IPV6_TCP].netEnvParams.unitIds[1] = 2;
+ scheme[MAC_IPV6_TCP].netEnvParams.unitIds[2] = 3;
+ scheme[MAC_IPV6_TCP].useHash = 1;
+ scheme[MAC_IPV6_TCP].baseFqid = pcd_fqids_base;
+ scheme[MAC_IPV6_TCP].nextEngine = e_FM_PCD_DONE;
+ scheme[MAC_IPV6_TCP].schemeCounter.update = 1;
+ scheme[MAC_IPV6_TCP].schemeCounter.value = 0;
+ scheme[MAC_IPV6_TCP].keyExtractAndHashParams.numOfUsedMasks = 0;
+ scheme[MAC_IPV6_TCP].keyExtractAndHashParams.hashShift = 0;
+ scheme[MAC_IPV6_TCP].keyExtractAndHashParams.symmetricHash = 0;
+ scheme[MAC_IPV6_TCP].keyExtractAndHashParams.hashDistributionNumOfFqids
+ = distNumOfQueues;
+ scheme[MAC_IPV6_TCP].keyExtractAndHashParams.numOfUsedExtracts = 7;
+ scheme[MAC_IPV6_TCP].numOfUsedExtractedOrs = 0;
+ scheme[MAC_IPV6_TCP].netEnvParams.h_NetEnv = h_NetEnv;
+ scheme[MAC_IPV6_TCP].id.relativeSchemeId = MAC_IPV6_TCP;
+ /* Extract field:ethernet.src */
+ scheme[MAC_IPV6_TCP].keyExtractAndHashParams.extractArray[0] =
+ ea_eth_sa;
+ /* Extract field:ethernet.dst */
+ scheme[MAC_IPV6_TCP].keyExtractAndHashParams.extractArray[1] =
+ ea_eth_da;
+ /* Extract field:ipv6.src */
+ scheme[MAC_IPV6_TCP].keyExtractAndHashParams.extractArray[2] =
+ ea_ipv6_sa;
+ /* Extract field:ipv6.dst */
+ scheme[MAC_IPV6_TCP].keyExtractAndHashParams.extractArray[3] =
+ ea_ipv6_da;
+
+ /* Extract field:ipv6.nexthdr */
+ memset(&ea_nexthdr, 0, sizeof(t_FmPcdExtractEntry));
+ ea_nexthdr.type = e_FM_PCD_EXTRACT_BY_HDR;
+ ea_nexthdr.extractByHdr.hdr = HEADER_TYPE_IPv6;
+ ea_nexthdr.extractByHdr.hdrIndex = e_FM_PCD_HDR_INDEX_NONE;
+ ea_nexthdr.extractByHdr.ignoreProtocolValidation = 0;
+ ea_nexthdr.extractByHdr.type = e_FM_PCD_EXTRACT_FULL_FIELD;
+ ea_nexthdr.extractByHdr.extractByHdrType.fullField.ipv6 =
+ NET_HEADER_FIELD_IPv6_NEXT_HDR;
+ scheme[MAC_IPV6_TCP].keyExtractAndHashParams.extractArray[4] =
+ ea_nexthdr;
+ /* Extract field:tcp.sport */
+ memset(&ea_tcp_sp, 0, sizeof(t_FmPcdExtractEntry));
+ ea_tcp_sp.type = e_FM_PCD_EXTRACT_BY_HDR;
+ ea_tcp_sp.extractByHdr.hdr = HEADER_TYPE_TCP;
+ ea_tcp_sp.extractByHdr.hdrIndex = e_FM_PCD_HDR_INDEX_NONE;
+ ea_tcp_sp.extractByHdr.ignoreProtocolValidation = 0;
+ ea_tcp_sp.extractByHdr.type = e_FM_PCD_EXTRACT_FULL_FIELD;
+ ea_tcp_sp.extractByHdr.extractByHdrType.fullField.tcp =
+ NET_HEADER_FIELD_TCP_PORT_SRC;
+ scheme[MAC_IPV6_TCP].keyExtractAndHashParams.extractArray[5] =
+ ea_tcp_sp;
+ /* Extract field:tcp.dport */
+ memset(&ea_tcp_dp, 0, sizeof(t_FmPcdExtractEntry));
+ ea_tcp_dp.type = e_FM_PCD_EXTRACT_BY_HDR;
+ ea_tcp_dp.extractByHdr.hdr = HEADER_TYPE_TCP;
+ ea_tcp_dp.extractByHdr.hdrIndex = e_FM_PCD_HDR_INDEX_NONE;
+ ea_tcp_dp.extractByHdr.ignoreProtocolValidation = 0;
+ ea_tcp_dp.extractByHdr.type = e_FM_PCD_EXTRACT_FULL_FIELD;
+ ea_tcp_dp.extractByHdr.extractByHdrType.fullField.tcp =
+ NET_HEADER_FIELD_TCP_PORT_DST;
+ scheme[MAC_IPV6_TCP].keyExtractAndHashParams.extractArray[6] =
+ ea_tcp_dp;
+
+ /* Distribution: Layer234 info MAC ipv6 udp */
+ scheme[MAC_IPV6_UDP].alwaysDirect = 0;
+ scheme[MAC_IPV6_UDP].netEnvParams.numOfDistinctionUnits = 3;
+ scheme[MAC_IPV6_UDP].netEnvParams.unitIds[0] = 0;
+ scheme[MAC_IPV6_UDP].netEnvParams.unitIds[1] = 2;
+ scheme[MAC_IPV6_UDP].netEnvParams.unitIds[2] = 4;
+ scheme[MAC_IPV6_UDP].useHash = 1;
+ scheme[MAC_IPV6_UDP].baseFqid = pcd_fqids_base;
+ scheme[MAC_IPV6_UDP].nextEngine = e_FM_PCD_DONE;
+ scheme[MAC_IPV6_UDP].schemeCounter.update = 1;
+ scheme[MAC_IPV6_UDP].schemeCounter.value = 0;
+ scheme[MAC_IPV6_UDP].keyExtractAndHashParams.numOfUsedMasks = 0;
+ scheme[MAC_IPV6_UDP].keyExtractAndHashParams.hashShift = 0;
+ scheme[MAC_IPV6_UDP].keyExtractAndHashParams.symmetricHash = 0;
+ scheme[MAC_IPV6_UDP].keyExtractAndHashParams.hashDistributionNumOfFqids
+ = distNumOfQueues;
+ scheme[MAC_IPV6_UDP].keyExtractAndHashParams.numOfUsedExtracts = 7;
+ scheme[MAC_IPV6_UDP].numOfUsedExtractedOrs = 0;
+ scheme[MAC_IPV6_UDP].netEnvParams.h_NetEnv = h_NetEnv;
+ scheme[MAC_IPV6_UDP].id.relativeSchemeId = MAC_IPV6_UDP;
+ /* Extract field:ethernet.src */
+ scheme[MAC_IPV6_UDP].keyExtractAndHashParams.extractArray[0] =
+ ea_eth_sa;
+ /* Extract field:ethernet.dst */
+ scheme[MAC_IPV6_UDP].keyExtractAndHashParams.extractArray[1] =
+ ea_eth_da;
+ /* Extract field:ipv6.src */
+ scheme[MAC_IPV6_UDP].keyExtractAndHashParams.extractArray[2] =
+ ea_ipv6_sa;
+ /* Extract field:ipv6.dst */
+ scheme[MAC_IPV6_UDP].keyExtractAndHashParams.extractArray[3] =
+ ea_ipv6_da;
+ /* Extract field:ipv6.nexthdr */
+ scheme[MAC_IPV6_UDP].keyExtractAndHashParams.extractArray[4] =
+ ea_nexthdr;
+ /* Extract field:udp.sport */
+ memset(&ea_udp_sp, 0, sizeof(t_FmPcdExtractEntry));
+ ea_udp_sp.type = e_FM_PCD_EXTRACT_BY_HDR;
+ ea_udp_sp.extractByHdr.hdr = HEADER_TYPE_UDP;
+ ea_udp_sp.extractByHdr.hdrIndex = e_FM_PCD_HDR_INDEX_NONE;
+ ea_udp_sp.extractByHdr.ignoreProtocolValidation = 0;
+ ea_udp_sp.extractByHdr.type = e_FM_PCD_EXTRACT_FULL_FIELD;
+ ea_udp_sp.extractByHdr.extractByHdrType.fullField.udp
+ = NET_HEADER_FIELD_UDP_PORT_SRC;
+ scheme[MAC_IPV6_UDP].keyExtractAndHashParams.extractArray[5] =
+ ea_udp_sp;
+ /* Extract field:udp.dport */
+ memset(&ea_udp_dp, 0, sizeof(t_FmPcdExtractEntry));
+ ea_udp_dp.type = e_FM_PCD_EXTRACT_BY_HDR;
+ ea_udp_dp.extractByHdr.hdr = HEADER_TYPE_UDP;
+ ea_udp_dp.extractByHdr.hdrIndex = e_FM_PCD_HDR_INDEX_NONE;
+ ea_udp_dp.extractByHdr.ignoreProtocolValidation = 0;
+ ea_udp_dp.extractByHdr.type = e_FM_PCD_EXTRACT_FULL_FIELD;
+ ea_udp_dp.extractByHdr.extractByHdrType.fullField.udp
+ = NET_HEADER_FIELD_UDP_PORT_DST;
+ scheme[MAC_IPV6_UDP].keyExtractAndHashParams.extractArray[6] =
+ ea_udp_dp;
+
+ /* Distribution: Layer234 info MAC ipv4 tcp */
+ scheme[MAC_IPV4_TCP].alwaysDirect = 0;
+ scheme[MAC_IPV4_TCP].netEnvParams.numOfDistinctionUnits = 3;
+ scheme[MAC_IPV4_TCP].netEnvParams.unitIds[0] = 0;
+ scheme[MAC_IPV4_TCP].netEnvParams.unitIds[1] = 1;
+ scheme[MAC_IPV4_TCP].netEnvParams.unitIds[2] = 3;
+ scheme[MAC_IPV4_TCP].useHash = 1;
+ scheme[MAC_IPV4_TCP].baseFqid = pcd_fqids_base;
+ scheme[MAC_IPV4_TCP].nextEngine = e_FM_PCD_DONE;
+ scheme[MAC_IPV4_TCP].schemeCounter.update = 1;
+ scheme[MAC_IPV4_TCP].schemeCounter.value = 0;
+ scheme[MAC_IPV4_TCP].keyExtractAndHashParams.numOfUsedMasks = 0;
+ scheme[MAC_IPV4_TCP].keyExtractAndHashParams.hashShift = 0;
+ scheme[MAC_IPV4_TCP].keyExtractAndHashParams.symmetricHash = 0;
+ scheme[MAC_IPV4_TCP].keyExtractAndHashParams.hashDistributionNumOfFqids
+ = distNumOfQueues;
+ scheme[MAC_IPV4_TCP].keyExtractAndHashParams.numOfUsedExtracts = 7;
+ scheme[MAC_IPV4_TCP].numOfUsedExtractedOrs = 0;
+ scheme[MAC_IPV4_TCP].netEnvParams.h_NetEnv = h_NetEnv;
+ scheme[MAC_IPV4_TCP].id.relativeSchemeId = MAC_IPV4_TCP;
+ /* Extract field:ethernet.src */
+ scheme[MAC_IPV4_TCP].keyExtractAndHashParams.extractArray[0] =
+ ea_eth_sa;
+ /* Extract field:ethernet.dst */
+ scheme[MAC_IPV4_TCP].keyExtractAndHashParams.extractArray[1] =
+ ea_eth_da;
+ /* Extract field:ipv4.src */
+ scheme[MAC_IPV4_TCP].keyExtractAndHashParams.extractArray[2] =
+ ea_ipv4_sa;
+ /* Extract field:ipv4.dst */
+ scheme[MAC_IPV4_TCP].keyExtractAndHashParams.extractArray[3] =
+ ea_ipv4_da;
+ /* Extract field:ipv4.nextp */
+ memset(&ea_nextp, 0, sizeof(t_FmPcdExtractEntry));
+ ea_nextp.type = e_FM_PCD_EXTRACT_BY_HDR;
+ ea_nextp.extractByHdr.hdr = HEADER_TYPE_IPv4;
+ ea_nextp.extractByHdr.hdrIndex = e_FM_PCD_HDR_INDEX_NONE;
+ ea_nextp.extractByHdr.ignoreProtocolValidation = 0;
+ ea_nextp.extractByHdr.type = e_FM_PCD_EXTRACT_FULL_FIELD;
+ ea_nextp.extractByHdr.extractByHdrType.fullField.ipv4 =
+ NET_HEADER_FIELD_IPv4_PROTO;
+ scheme[MAC_IPV4_TCP].keyExtractAndHashParams.extractArray[4] =
+ ea_nextp;
+ /* Extract field:tcp.sport */
+ scheme[MAC_IPV4_TCP].keyExtractAndHashParams.extractArray[5] =
+ ea_tcp_sp;
+ /* Extract field:tcp.dport */
+ scheme[MAC_IPV4_TCP].keyExtractAndHashParams.extractArray[6] =
+ ea_tcp_dp;
+
+ /* Distribution: Layer234 info MAC ipv4 udp */
+ scheme[MAC_IPV4_UDP].alwaysDirect = 0;
+ scheme[MAC_IPV4_UDP].netEnvParams.numOfDistinctionUnits = 3;
+ scheme[MAC_IPV4_UDP].netEnvParams.unitIds[0] = 0;
+ scheme[MAC_IPV4_UDP].netEnvParams.unitIds[1] = 1;
+ scheme[MAC_IPV4_UDP].netEnvParams.unitIds[2] = 4;
+ scheme[MAC_IPV4_UDP].useHash = 1;
+ scheme[MAC_IPV4_UDP].baseFqid = pcd_fqids_base;
+ scheme[MAC_IPV4_UDP].nextEngine = e_FM_PCD_DONE;
+ scheme[MAC_IPV4_UDP].schemeCounter.update = 1;
+ scheme[MAC_IPV4_UDP].schemeCounter.value = 0;
+ scheme[MAC_IPV4_UDP].keyExtractAndHashParams.numOfUsedMasks = 0;
+ scheme[MAC_IPV4_UDP].keyExtractAndHashParams.hashShift = 0;
+ scheme[MAC_IPV4_UDP].keyExtractAndHashParams.symmetricHash = 0;
+ scheme[MAC_IPV4_UDP].keyExtractAndHashParams.hashDistributionNumOfFqids
+ = distNumOfQueues;
+ scheme[MAC_IPV4_UDP].keyExtractAndHashParams.numOfUsedExtracts = 7;
+ scheme[MAC_IPV4_UDP].numOfUsedExtractedOrs = 0;
+ scheme[MAC_IPV4_UDP].netEnvParams.h_NetEnv = h_NetEnv;
+ scheme[MAC_IPV4_UDP].id.relativeSchemeId = MAC_IPV4_UDP;
+ /* Extract field:ethernet.src */
+ scheme[MAC_IPV4_UDP].keyExtractAndHashParams.extractArray[0] =
+ ea_eth_sa;
+ /* Extract field:ethernet.dst */
+ scheme[MAC_IPV4_UDP].keyExtractAndHashParams.extractArray[1] =
+ ea_eth_da;
+ /* Extract field:ipv4.src */
+ scheme[MAC_IPV4_UDP].keyExtractAndHashParams.extractArray[2] =
+ ea_ipv4_sa;
+ /* Extract field:ipv4.dst */
+ scheme[MAC_IPV4_UDP].keyExtractAndHashParams.extractArray[3] =
+ ea_ipv4_da;
+ /* Extract field:ipv4.nextp */
+ scheme[MAC_IPV4_UDP].keyExtractAndHashParams.extractArray[4] =
+ ea_nextp;
+ /* Extract field:udp.sport */
+ scheme[MAC_IPV4_UDP].keyExtractAndHashParams.extractArray[5] =
+ ea_udp_sp;
+ /* Extract field:udp.dport */
+ scheme[MAC_IPV4_UDP].keyExtractAndHashParams.extractArray[6] =
+ ea_udp_dp;
+
+ bond->params.ohp->h_FmPcd = h_FmPcd;
+ bond->params.ohp->h_FmPort = h_FmPort;
+ bond->params.ohp->h_NetEnv = h_NetEnv;
+ bond->params.ohp->prsParam = prsParam;
+ bond->params.ohp->kgParam = kgParam;
+ bond->params.ohp->pcdParam = pcdParam;
+ bond->params.ohp->scheme = scheme;
+ bond->params.ohp->netEnvParams = netEnvParams;
+ hw_lag_dbg("alloc_pcd_mem() ok.\n");
+
+ return BOND_OH_SUCCESS;
+}
+
+int release_pcd_mem(struct bonding *bond)
+{
+
+ kfree(bond->params.ohp->prsParam);
+ kfree(bond->params.ohp->kgParam);
+ kfree(bond->params.ohp->pcdParam);
+ kfree(bond->params.ohp->scheme);
+ kfree(bond->params.ohp->netEnvParams);
+
+ bond->params.ohp->h_FmPcd = NULL;
+ bond->params.ohp->h_FmPort = NULL;
+ bond->params.ohp->h_NetEnv = NULL;
+ bond->params.ohp->prsParam = NULL;
+ bond->params.ohp->kgParam = NULL;
+ bond->params.ohp->pcdParam = NULL;
+ bond->params.ohp->scheme = NULL;
+ bond->params.ohp->netEnvParams = NULL;
+ bond->params.ohp->numberof_pre_schemes = 0;
+
+ return BOND_OH_SUCCESS;
+}
+
+static int replace_pcd(struct fm_port *fm_port, uint8_t numOfSchemes,
+ u32 pcd_fqids_base, uint8_t distNumOfQueues,
+ struct bonding *bond)
+{
+
+ t_Handle h_FmPcd, h_FmPort, h_NetEnv;
+
+ t_FmPcdNetEnvParams *netEnvParams;
+ t_FmPcdKgSchemeParams *scheme;
+ t_FmPortPcdParams *pcdParam;
+ t_FmPortPcdPrsParams *prsParam;
+ t_FmPortPcdKgParams *kgParam;
+ int i, err, numberof_pre_schemes;
+
+ numberof_pre_schemes = bond->params.ohp->numberof_pre_schemes;
+
+ if (numberof_pre_schemes == numOfSchemes) {
+ hw_lag_dbg("numberof_pre_schemes == numOfSchemes.\n");
+ return BOND_OH_SUCCESS;
+ }
+
+ h_FmPcd = bond->params.ohp->h_FmPcd;
+ h_FmPort = bond->params.ohp->h_FmPort;
+ h_NetEnv = bond->params.ohp->h_NetEnv;
+
+ netEnvParams = bond->params.ohp->netEnvParams;
+ scheme = bond->params.ohp->scheme;
+ pcdParam = bond->params.ohp->pcdParam;
+ prsParam = bond->params.ohp->prsParam;
+ kgParam = bond->params.ohp->kgParam;
+ kgParam->numOfSchemes = numOfSchemes;
+ hw_lag_dbg("h_FmPcd:%p, h_FmPort:%p, h_NetEnv:%p\n",
+ h_FmPcd, h_FmPort, h_NetEnv);
+ hw_lag_dbg("netEnvParams:%p, scheme:%p, pcdParam:%p\n",
+ netEnvParams, scheme, pcdParam);
+ hw_lag_dbg("prsParam:%p, kgParam:%p, numberof_pre_schemes:%d\n",
+ prsParam, kgParam, numberof_pre_schemes);
+
+ for (i = 0; i < numberof_pre_schemes; i++) {
+ if (kgParam->h_Schemes[i])
+ err = FM_PCD_KgSchemeDelete(kgParam->h_Schemes[i]);
+ if (err == E_OK)
+ hw_lag_dbg("KgSchemeDelete(h_Schemes[%d]) = %p OK.\n",
+ i, kgParam->h_Schemes[i]);
+ else {
+ pr_err("KgSchemeDelete(h_Schemes[%d]) = %p Err.\n",
+ i, kgParam->h_Schemes[i]);
+ return BOND_OH_ERROR;
+ }
+
+ }
+ for (i = 0; i < numOfSchemes; i++) {
+ scheme[i].baseFqid = pcd_fqids_base;
+ scheme[i].keyExtractAndHashParams.hashDistributionNumOfFqids =
+ distNumOfQueues;
+ hw_lag_dbg("scheme[%d]->pcd_fqids_base:%d\n", i,
+ pcd_fqids_base);
+ hw_lag_dbg("scheme[%d]->distNumOfQueues:%d\n", i,
+ distNumOfQueues);
+ if (!kgParam->h_Schemes[i])
+ kgParam->h_Schemes[i] =
+ FM_PCD_KgSchemeSet(h_FmPcd, &scheme[i]);
+ hw_lag_dbg("kgParam->h_Schemes[%d]:%p.\n",
+ i, kgParam->h_Schemes[i]);
+ }
+ hw_lag_dbg("FM_PCD_KgSchemeSet() OK.\n");
+
+ if (bond->params.ohp->oh_en == 1) {
+ bond->params.ohp->oh_en = 0;
+ err = FM_PORT_Disable(h_FmPort);
+ if (err == E_OK) {
+ hw_lag_dbg("FM_PORT_Disable() OK with oh_en\n");
+ err = FM_PORT_SetPCD(h_FmPort, pcdParam);
+ if (err == E_OK) {
+ hw_lag_dbg("FM_PORT_SetPCD() OK with oh_en\n");
+ err = FM_PORT_Enable(h_FmPort);
+ if (err == E_OK)
+ hw_lag_dbg("FM_PORT_Enable() OK.\n");
+ else
+ pr_err("FM_PORT_Enable() err.\n");
+ } else {
+ pr_err("FM_PORT_SetPCD() err in oh_en\n");
+ FM_PORT_Enable(h_FmPort);
+ }
+ } else
+ pr_err("FM_PORT_Disable() errors with oh_en\n");
+ bond->params.ohp->oh_en = 1;
+ } else {
+ FM_PORT_Disable(h_FmPort);
+ err = FM_PORT_SetPCD(h_FmPort, pcdParam);
+ FM_PORT_Enable(h_FmPort);
+ }
+ if (GET_ERROR_TYPE(ERROR_CODE(err)) != E_OK)
+ return BOND_OH_ERROR;
+
+ bond->params.ohp->numberof_pre_schemes = numOfSchemes;
+ return BOND_OH_SUCCESS;
+
+}
+
+/* get all offline port information from bond, including
+ * dev,oh handler, PCD FQid base and PCD FQ count, then
+ * get the new xmit policy, copy schemes needed from the
+ * cached_scheme pointer, config PCD params, init PCD dev,
+ * set PCD Net Env Characteristics, then set Keygen Scheme
+ * params to the PCD dev, disable offline port, set PCD
+ * params to the offline port dev, at last enable the offline
+ * port.
+ * this subroutine return true when it can apply PCD to
+ * the offline port, otherwise return false.
+ */
+bool apply_pcd(struct bonding *bond, int new_xmit_policy)
+{
+ int true_policy;
+ struct fm_port *fm_port;
+ uint8_t numOfSchemes;
+ u32 pcd_fqids_base;
+ uint8_t distNumOfQueues;
+ int err;
+
+ if (bond->params.mode != BOND_MODE_8023AD) {
+ hw_lag_dbg("not 802.3ad mode, can't apply PCD\n");
+ return false;
+ }
+ if (!bond->params.ohp) {
+ pr_err("have not bind an OH port,\n");
+ pr_err("will use software tx traffic distribution.\n");
+ return false;
+ }
+ if (bond->slave_cnt != SLAVES_PER_BOND) {
+ hw_lag_dbg("can't apply PCD, slave_cnt:%d\n", SLAVES_PER_BOND);
+ return false;
+ }
+ if (new_xmit_policy == NO_POLICY)
+ true_policy = bond->params.xmit_policy;
+ else
+ true_policy = new_xmit_policy;
+ fm_port = bond->params.ohp->oh_config->oh_port;
+
+ /* chang the XML PCD from user space to kernel PCD,
+ * please refer to the output of fmc host command mode
+ */
+ switch (true_policy) {
+ case BOND_XMIT_POLICY_LAYER23:
+ /* will be support in the future version
+ * numOfSchemes = 3;
+ */
+ break;
+ case BOND_XMIT_POLICY_LAYER34:
+ /* will be support in the future version
+ * numOfSchemes = 7;
+ */
+ break;
+ case BOND_XMIT_POLICY_LAYER2:
+ numOfSchemes = 1;
+ break;
+ default:
+ numOfSchemes = 1;
+ break;
+ }
+ pcd_fqids_base = bond->params.ohp->pcd_fqids_base;
+ distNumOfQueues = SLAVES_PER_BOND;
+ hw_lag_dbg("fm_port:%p, numOfSchemes:%d, pcd_fqids_base:%d",
+ fm_port, numOfSchemes, pcd_fqids_base);
+ hw_lag_dbg("distNumOfQueues:%d, bond:%p\n", distNumOfQueues, bond);
+ err = alloc_pcd_mem(fm_port, numOfSchemes, pcd_fqids_base,
+ distNumOfQueues, bond);
+ if (err == BOND_OH_SUCCESS) {
+ err = replace_pcd(fm_port, numOfSchemes, pcd_fqids_base,
+ distNumOfQueues, bond);
+ if (err == BOND_OH_SUCCESS) {
+ hw_lag_dbg("applied PCD.\n");
+ return true;
+ } else {
+ pr_err("error on replace_pcd()\n");
+ return false;
+ }
+ } else {
+ pr_err("error on replace_pcd()\n");
+ return false;
+ }
+}
diff --git a/drivers/net/ethernet/freescale/asf_gianfar.c b/drivers/net/ethernet/freescale/asf_gianfar.c
index af7f205..5280ed9 100644
--- a/drivers/net/ethernet/freescale/asf_gianfar.c
+++ b/drivers/net/ethernet/freescale/asf_gianfar.c
@@ -55,7 +55,8 @@ static inline void gfar_asf_reclaim_skb(struct sk_buff *skb)
static inline void gfar_recycle_skb(struct sk_buff *skb)
{
struct sk_buff_head *h = &__get_cpu_var(skb_recycle_list);
- int skb_size = SKB_DATA_ALIGN(GFAR_RXB_REC_SZ + NET_SKB_PAD);
+ int skb_size = SKB_DATA_ALIGN(GFAR_RXB_REC_SZ + NET_SKB_PAD
+ + EXTRA_HEADROOM);
if (skb_queue_len(h) < DEFAULT_RX_RING_SIZE &&
!skb_cloned(skb) && !skb_is_nonlinear(skb) &&
diff --git a/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_fm.c b/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_fm.c
index 7c09d8b..0da272b 100755
--- a/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_fm.c
+++ b/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_fm.c
@@ -790,7 +790,7 @@ static t_Error ConfigureFmDev(t_LnxWrpFmDev *p_LnxWrpFmDev)
if (unlikely(_errno < 0))
RETURN_ERROR(MAJOR, E_INVALID_STATE, ("can_request_irq() = %d", _errno));
#endif
- _errno = devm_request_irq(p_LnxWrpFmDev->dev, p_LnxWrpFmDev->irq, fm_irq, 0, "fman", p_LnxWrpFmDev);
+ _errno = devm_request_irq(p_LnxWrpFmDev->dev, p_LnxWrpFmDev->irq, fm_irq, IRQF_NO_SUSPEND, "fman", p_LnxWrpFmDev);
if (unlikely(_errno < 0))
RETURN_ERROR(MAJOR, E_INVALID_STATE, ("request_irq(%d) = %d", p_LnxWrpFmDev->irq, _errno));
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 574b87e..933ba9a 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -85,6 +85,9 @@
#include <linux/udp.h>
#include <linux/in.h>
#include <linux/net_tstamp.h>
+#ifdef CONFIG_PM
+#include <linux/inetdevice.h>
+#endif
#include <asm/io.h>
#include <asm/reg.h>
@@ -146,6 +149,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
int amount_pull, struct napi_struct *napi);
+static int __gfar_is_rx_idle(struct gfar_private *priv);
static void gfar_halt_nodisable(struct gfar_private *priv);
static void gfar_clear_exact_match(struct net_device *dev);
static void gfar_set_mac_for_addr(struct net_device *dev, int num,
@@ -356,7 +360,7 @@ static void gfar_mac_rx_config(struct gfar_private *priv)
u32 rctrl = 0;
if (priv->rx_filer_enable) {
- rctrl |= RCTRL_FILREN;
+ rctrl |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
/* Program the RIR0 reg with the required distribution */
if (priv->poll_mode == GFAR_SQ_POLLING)
gfar_write(&regs->rir0, DEFAULT_2RXQ_RIR0);
@@ -381,10 +385,10 @@ static void gfar_mac_rx_config(struct gfar_private *priv)
/* Enable HW time stamping if requested from user space */
if (priv->hwts_rx_en)
- rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
+ rctrl |= RCTRL_TS_ENABLE;
if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
- rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
+ rctrl |= RCTRL_VLEX;
/* Init rctrl based on our settings */
gfar_write(&regs->rctrl, rctrl);
@@ -481,6 +485,15 @@ static struct net_device_stats *gfar_get_stats(struct net_device *dev)
return &dev->stats;
}
+int gfar_set_mac_addr(struct net_device *dev, void *p)
+{
+ eth_mac_addr(dev, p);
+
+ gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
+
+ return 0;
+}
+
static const struct net_device_ops gfar_netdev_ops = {
.ndo_open = gfar_enet_open,
.ndo_start_xmit = gfar_start_xmit,
@@ -491,7 +504,7 @@ static const struct net_device_ops gfar_netdev_ops = {
.ndo_tx_timeout = gfar_timeout,
.ndo_do_ioctl = gfar_ioctl,
.ndo_get_stats = gfar_get_stats,
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = gfar_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = gfar_netpoll,
@@ -886,6 +899,9 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
if (of_get_property(np, "fsl,magic-packet", NULL))
priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
+ if (of_get_property(np, "fsl,wake-on-filer", NULL))
+ priv->device_flags |= FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER;
+
priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
/* Find the TBI PHY. If it's not there, we don't support SGMII */
@@ -1287,7 +1303,6 @@ static int gfar_probe(struct platform_device *ofdev)
priv->dev = &ofdev->dev;
SET_NETDEV_DEV(dev, &ofdev->dev);
- spin_lock_init(&priv->bflock);
INIT_WORK(&priv->reset_task, gfar_reset_task);
platform_set_drvdata(ofdev, priv);
@@ -1331,6 +1346,8 @@ static int gfar_probe(struct platform_device *ofdev)
dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
}
+ dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+
gfar_init_addr_hash_table(priv);
/* Insert receive time stamps into padding alignment bytes */
@@ -1379,9 +1396,15 @@ static int gfar_probe(struct platform_device *ofdev)
/* Carrier starts down, phylib will bring it up */
netif_carrier_off(dev);
- device_init_wakeup(&dev->dev,
- priv->device_flags &
- FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET)
+ priv->wol_supported |= GFAR_WOL_MAGIC;
+
+ if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER) &&
+ priv->rx_filer_enable)
+ priv->wol_supported |= GFAR_WOL_FILER_UCAST |
+ GFAR_WOL_FILER_ARP;
+
+ device_set_wakeup_capable(&ofdev->dev, priv->wol_supported);
/* fill out IRQ number and name fields */
for (i = 0; i < priv->num_grps; i++) {
@@ -1451,56 +1474,247 @@ static int gfar_remove(struct platform_device *ofdev)
#ifdef CONFIG_PM
-static int gfar_suspend(struct device *dev)
+static void __gfar_filer_disable(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 temp;
+
+ temp = gfar_read(&regs->rctrl);
+ temp &= ~(RCTRL_FILREN | RCTRL_PRSDEP_INIT);
+ gfar_write(&regs->rctrl, temp);
+}
+
+static void __gfar_filer_enable(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 temp;
+
+ temp = gfar_read(&regs->rctrl);
+ temp |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
+ gfar_write(&regs->rctrl, temp);
+}
+
+/* Get the first IP address on this chain for this interface
+ * so that we can configure wakeup with WOL for ARP.
+ */
+static int gfar_get_ip(struct gfar_private *priv, __be32 *ip_addr)
+{
+ struct in_device *in_dev;
+ int err = -ENOENT;
+
+ rcu_read_lock();
+ in_dev = __in_dev_get_rcu(priv->ndev);
+ if (in_dev != NULL) {
+ for_primary_ifa(in_dev) {
+ *ip_addr = ifa->ifa_address;
+ err = 0;
+ break;
+ } endfor_ifa(in_dev);
+ }
+ rcu_read_unlock();
+ return err;
+}
+
+static int gfar_filer_config_wol(struct gfar_private *priv)
{
- struct gfar_private *priv = dev_get_drvdata(dev);
struct net_device *ndev = priv->ndev;
+ u32 rqfcr, rqfpr;
+ unsigned int i;
+ u8 rqfcr_queue;
+ int err = 0;
+
+ __gfar_filer_disable(priv);
+
+ /* init filer table */
+ rqfcr = RQFCR_RJE | RQFCR_CMP_MATCH;
+ rqfpr = 0x0;
+ for (i = 0; i <= MAX_FILER_IDX; i++)
+ gfar_write_filer(priv, i, rqfcr, rqfpr);
+
+ i = 0;
+ /* select a rx queue in group 0 */
+ rqfcr_queue = (u8)find_first_bit(&priv->gfargrp[0].rx_bit_map,
+ priv->num_rx_queues);
+
+ if (priv->wol_opts & GFAR_WOL_FILER_UCAST) {
+ /* Unicast packet, accept it */
+ u32 dest_mac_addr = (ndev->dev_addr[0] << 16) |
+ (ndev->dev_addr[1] << 8) |
+ ndev->dev_addr[2];
+
+ rqfcr = (rqfcr_queue << 10) | RQFCR_AND |
+ RQFCR_CMP_EXACT | RQFCR_PID_DAH;
+ rqfpr = dest_mac_addr;
+ gfar_write_filer(priv, i++, rqfcr, rqfpr);
+
+ dest_mac_addr = (ndev->dev_addr[3] << 16) |
+ (ndev->dev_addr[4] << 8) |
+ ndev->dev_addr[5];
+ rqfcr = (rqfcr_queue << 10) | RQFCR_GPI |
+ RQFCR_CMP_EXACT | RQFCR_PID_DAL;
+ rqfpr = dest_mac_addr;
+ gfar_write_filer(priv, i++, rqfcr, rqfpr);
+ }
+
+ if (priv->wol_opts & GFAR_WOL_FILER_ARP) {
+ /* ARP request packet, accept it */
+ __be32 ip_addr;
+
+ err = gfar_get_ip(priv, &ip_addr);
+ if (err) {
+ netif_err(priv, wol, ndev, "Failed to get ip addr\n");
+ goto out;
+ }
+
+ rqfcr = (rqfcr_queue << 10) | RQFCR_AND |
+ RQFCR_CMP_EXACT | RQFCR_PID_MASK;
+ rqfpr = RQFPR_ARQ;
+ gfar_write_filer(priv, i++, rqfcr, rqfpr);
+
+ rqfcr = (rqfcr_queue << 10) | RQFCR_AND |
+ RQFCR_CMP_EXACT | RQFCR_PID_PARSE;
+ rqfpr = RQFPR_ARQ;
+ gfar_write_filer(priv, i++, rqfcr, rqfpr);
+
+ /* match DEST_IP address in ARP req packet */
+ rqfcr = (rqfcr_queue << 10) | RQFCR_AND |
+ RQFCR_CMP_EXACT | RQFCR_PID_MASK;
+ rqfpr = FPR_FILER_MASK;
+ gfar_write_filer(priv, i++, rqfcr, rqfpr);
+
+ rqfcr = (rqfcr_queue << 10) | RQFCR_GPI |
+ RQFCR_CMP_EXACT | RQFCR_PID_DIA;
+ rqfpr = ip_addr;
+ gfar_write_filer(priv, i++, rqfcr, rqfpr);
+ }
+out:
+ __gfar_filer_enable(priv);
+ return err;
+}
+
+static void gfar_filer_restore_table(struct gfar_private *priv)
+{
+ u32 rqfcr, rqfpr;
+ unsigned int i;
+
+ __gfar_filer_disable(priv);
+
+ for (i = 0; i <= MAX_FILER_IDX; i++) {
+ rqfcr = priv->ftp_rqfcr[i];
+ rqfpr = priv->ftp_rqfpr[i];
+ gfar_write_filer(priv, i, rqfcr, rqfpr);
+ }
+
+ __gfar_filer_enable(priv);
+}
+
+void gfar_start_wol_filer(struct gfar_private *priv)
+{
struct gfar __iomem *regs = priv->gfargrp[0].regs;
- unsigned long flags;
u32 tempval;
+ int i = 0;
- int magic_packet = priv->wol_en &&
- (priv->device_flags &
- FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
+ /* Enable Rx hw queues */
+ gfar_write(&regs->rqueue, priv->rqueue);
- netif_device_detach(ndev);
+ /* Initialize DMACTRL to have WWR and WOP */
+ tempval = gfar_read(&regs->dmactrl);
+ tempval |= DMACTRL_INIT_SETTINGS;
+ gfar_write(&regs->dmactrl, tempval);
+
+ /* Make sure we aren't stopped */
+ tempval = gfar_read(&regs->dmactrl);
+ tempval &= ~DMACTRL_GRS;
+ gfar_write(&regs->dmactrl, tempval);
- if (netif_running(ndev)) {
+ for (i = 0; i < priv->num_grps; i++) {
+ regs = priv->gfargrp[i].regs;
+ /* Clear RHLT, so that the DMA starts polling now */
+ gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
+ /* enable the filer general purpose interrupts */
+ gfar_write(&regs->imask, IMASK_FGPI);
+ }
- local_irq_save_nort(flags);
- lock_tx_qs(priv);
+ /* Enable Rx/Tx DMA */
+ tempval = gfar_read(&regs->maccfg1);
+ tempval |= MACCFG1_RX_EN;
+ gfar_write(&regs->maccfg1, tempval);
+}
- gfar_halt_nodisable(priv);
+void gfar_halt_wol_filer(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 tempval;
- /* Disable Tx, and Rx if wake-on-LAN is disabled. */
- tempval = gfar_read(&regs->maccfg1);
+ /* Dissable the Rx hw queues */
+ gfar_write(&regs->rqueue, 0);
- tempval &= ~MACCFG1_TX_EN;
+ gfar_ints_disable(priv);
- if (!magic_packet)
- tempval &= ~MACCFG1_RX_EN;
+ /* Stop the DMA, and wait for it to stop */
+ tempval = gfar_read(&regs->dmactrl);
+ if (!(tempval & DMACTRL_GRS)) {
+ int ret;
- gfar_write(&regs->maccfg1, tempval);
+ tempval |= DMACTRL_GRS;
+ gfar_write(&regs->dmactrl, tempval);
- unlock_tx_qs(priv);
- local_irq_restore_nort(flags);
+ do {
+ ret = spin_event_timeout((gfar_read(&regs->ievent) &
+ IEVENT_GRSC), 1000000, 0);
+ if (!ret && !(gfar_read(&regs->ievent) & IEVENT_GRSC))
+ ret = __gfar_is_rx_idle(priv);
+ } while (!ret);
+ }
- disable_napi(priv);
+ /* Disable Rx DMA */
+ tempval = gfar_read(&regs->maccfg1);
+ tempval &= ~MACCFG1_RX_EN;
+ gfar_write(&regs->maccfg1, tempval);
+}
- if (magic_packet) {
- /* Enable interrupt on Magic Packet */
- gfar_write(&regs->imask, IMASK_MAG);
+static int gfar_suspend(struct device *dev)
+{
+ struct gfar_private *priv = dev_get_drvdata(dev);
+ struct net_device *ndev = priv->ndev;
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 tempval;
+ u16 wol = priv->wol_opts;
+ int err = 0;
- /* Enable Magic Packet mode */
- tempval = gfar_read(&regs->maccfg2);
- tempval |= MACCFG2_MPEN;
- gfar_write(&regs->maccfg2, tempval);
- } else {
- phy_stop(priv->phydev);
- }
+ if (!netif_running(ndev))
+ return 0;
+
+ disable_napi(priv);
+ netif_tx_lock(ndev);
+ netif_device_detach(ndev);
+ netif_tx_unlock(ndev);
+
+ gfar_halt(priv);
+
+ if (wol & GFAR_WOL_MAGIC) {
+ /* Enable interrupt on Magic Packet */
+ gfar_write(&regs->imask, IMASK_MAG);
+
+ /* Enable Magic Packet mode */
+ tempval = gfar_read(&regs->maccfg2);
+ tempval |= MACCFG2_MPEN;
+ gfar_write(&regs->maccfg2, tempval);
+
+ /* re-enable the Rx block */
+ tempval = gfar_read(&regs->maccfg1);
+ tempval |= MACCFG1_RX_EN;
+ gfar_write(&regs->maccfg1, tempval);
+
+ } else if (wol & (GFAR_WOL_FILER_UCAST | GFAR_WOL_FILER_ARP)) {
+ err = gfar_filer_config_wol(priv);
+ gfar_start_wol_filer(priv);
+
+ } else {
+ phy_stop(priv->phydev);
}
- return 0;
+ return err;
}
static int gfar_resume(struct device *dev)
@@ -1508,37 +1722,29 @@ static int gfar_resume(struct device *dev)
struct gfar_private *priv = dev_get_drvdata(dev);
struct net_device *ndev = priv->ndev;
struct gfar __iomem *regs = priv->gfargrp[0].regs;
- unsigned long flags;
u32 tempval;
- int magic_packet = priv->wol_en &&
- (priv->device_flags &
- FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
+ u16 wol = priv->wol_opts;
- if (!netif_running(ndev)) {
- netif_device_attach(ndev);
+ if (!netif_running(ndev))
return 0;
- }
- if (!magic_packet && priv->phydev)
- phy_start(priv->phydev);
+ if (wol & GFAR_WOL_MAGIC) {
+ /* Disable Magic Packet mode */
+ tempval = gfar_read(&regs->maccfg2);
+ tempval &= ~MACCFG2_MPEN;
+ gfar_write(&regs->maccfg2, tempval);
- /* Disable Magic Packet mode, in case something
- * else woke us up.
- */
- local_irq_save_nort(flags);
- lock_tx_qs(priv);
+ } else if (wol & (GFAR_WOL_FILER_UCAST | GFAR_WOL_FILER_ARP)) {
+ gfar_halt_wol_filer(priv);
+ gfar_filer_restore_table(priv);
- tempval = gfar_read(&regs->maccfg2);
- tempval &= ~MACCFG2_MPEN;
- gfar_write(&regs->maccfg2, tempval);
+ } else {
+ phy_start(priv->phydev);
+ }
gfar_start(priv);
- unlock_tx_qs(priv);
- local_irq_restore_nort(flags);
-
netif_device_attach(ndev);
-
enable_napi(priv);
return 0;
@@ -1954,7 +2160,8 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
/* Install our interrupt handlers for Error,
* Transmit, and Receive
*/
- err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0,
+ err = request_irq(gfar_irq(grp, ER)->irq, gfar_error,
+ IRQF_NO_SUSPEND,
gfar_irq(grp, ER)->name, grp);
if (err < 0) {
netif_err(priv, intr, dev, "Can't get IRQ %d\n",
@@ -1969,7 +2176,8 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
gfar_irq(grp, TX)->irq);
goto tx_irq_fail;
}
- err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0,
+ err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive,
+ IRQF_NO_SUSPEND,
gfar_irq(grp, RX)->name, grp);
if (err < 0) {
netif_err(priv, intr, dev, "Can't get IRQ %d\n",
@@ -1977,7 +2185,8 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
goto rx_irq_fail;
}
} else {
- err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0,
+ err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt,
+ IRQF_NO_SUSPEND,
gfar_irq(grp, TX)->name, grp);
if (err < 0) {
netif_err(priv, intr, dev, "Can't get IRQ %d\n",
@@ -2078,8 +2287,6 @@ static int gfar_enet_open(struct net_device *dev)
if (err)
return err;
- device_set_wakeup_enable(&dev->dev, priv->wol_en);
-
return err;
}
void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
@@ -2621,7 +2828,14 @@ irqreturn_t gfar_receive(int irq, void *grp_id)
{
struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
unsigned long flags;
- u32 imask;
+ u32 imask, ievent;
+
+ ievent = gfar_read(&grp->regs->ievent);
+
+ if (unlikely(ievent & IEVENT_FGPI)) {
+ gfar_write(&grp->regs->ievent, IEVENT_FGPI);
+ return IRQ_HANDLED;
+ }
if (likely(napi_schedule_prep(&grp->napi_rx))) {
spin_lock_irqsave(&grp->grplock, flags);
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 44a32ac..1d1b9b1 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -406,6 +406,7 @@ extern const char gfar_driver_version[];
#define IEVENT_MAG 0x00000800
#define IEVENT_GRSC 0x00000100
#define IEVENT_RXF0 0x00000080
+#define IEVENT_FGPI 0x00000010
#define IEVENT_FIR 0x00000008
#define IEVENT_FIQ 0x00000004
#define IEVENT_DPE 0x00000002
@@ -438,6 +439,7 @@ extern const char gfar_driver_version[];
#define IMASK_MAG 0x00000800
#define IMASK_GRSC 0x00000100
#define IMASK_RXFEN0 0x00000080
+#define IMASK_FGPI 0x00000010
#define IMASK_FIR 0x00000008
#define IMASK_FIQ 0x00000004
#define IMASK_DPE 0x00000002
@@ -606,6 +608,10 @@ extern const char gfar_driver_version[];
#define GFAR_INT_NAME_MAX (IFNAMSIZ + 6) /* '_g#_xx' */
+#define GFAR_WOL_MAGIC 0x00000001
+#define GFAR_WOL_FILER_UCAST 0x00000002
+#define GFAR_WOL_FILER_ARP 0x00000004
+
struct txbd8
{
union {
@@ -1056,6 +1062,7 @@ struct gfar {
#define FSL_GIANFAR_DEV_HAS_BD_STASHING 0x00000200
#define FSL_GIANFAR_DEV_HAS_BUF_STASHING 0x00000400
#define FSL_GIANFAR_DEV_HAS_TIMER 0x00000800
+#define FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER 0x00001000
#if defined CONFIG_FSL_GIANFAR_1588
#define FSL_GIANFAR_DEV_HAS_TS_TO_BUFFER 0x00001000
@@ -1302,9 +1309,6 @@ struct gfar_private {
int oldduplex;
int oldlink;
- /* Bitfield update lock */
- spinlock_t bflock;
-
uint32_t msg_enable;
struct work_struct reset_task;
@@ -1314,8 +1318,6 @@ struct gfar_private {
extended_hash:1,
bd_stash_en:1,
rx_filer_enable:1,
- /* Wake-on-LAN enabled */
- wol_en:1,
/* Enable priorty based Tx scheduling in Hw */
prio_sched_en:1,
/* Flow control flags */
@@ -1344,6 +1346,10 @@ struct gfar_private {
u32 __iomem *hash_regs[16];
int hash_width;
+ /* wake-on-lan settings */
+ u16 wol_opts;
+ u16 wol_supported;
+
/*Filer table*/
unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index f2dfa10..e2bf5d1 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -42,6 +42,9 @@
#include <linux/phy.h>
#include <linux/sort.h>
#include <linux/if_vlan.h>
+#ifdef CONFIG_PM
+#include <sysdev/fsl_soc.h>
+#endif
#include "gianfar.h"
@@ -635,31 +638,57 @@ static void gfar_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct gfar_private *priv = netdev_priv(dev);
- if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) {
- wol->supported = WAKE_MAGIC;
- wol->wolopts = priv->wol_en ? WAKE_MAGIC : 0;
- } else {
- wol->supported = wol->wolopts = 0;
- }
+ wol->supported = wol->wolopts = 0;
+
+ if (priv->wol_supported & GFAR_WOL_MAGIC)
+ wol->supported |= WAKE_MAGIC;
+
+ if (priv->wol_supported & GFAR_WOL_FILER_UCAST)
+ wol->supported |= WAKE_UCAST;
+
+ if (priv->wol_supported & GFAR_WOL_FILER_ARP)
+ wol->supported |= WAKE_ARP;
+
+ if (priv->wol_opts & GFAR_WOL_MAGIC)
+ wol->wolopts |= WAKE_MAGIC;
+
+ if (priv->wol_opts & GFAR_WOL_FILER_UCAST)
+ wol->wolopts |= WAKE_UCAST;
+
+ if (priv->wol_opts & GFAR_WOL_FILER_ARP)
+ wol->wolopts |= WAKE_ARP;
}
static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct gfar_private *priv = netdev_priv(dev);
- unsigned long flags;
+ int err;
+ u16 wol_opts = 0;
- if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
- wol->wolopts != 0)
+ if (!priv->wol_supported && wol->wolopts)
return -EINVAL;
- if (wol->wolopts & ~WAKE_MAGIC)
+ if (wol->wolopts & ~(WAKE_MAGIC | WAKE_UCAST | WAKE_ARP))
return -EINVAL;
- device_set_wakeup_enable(&dev->dev, wol->wolopts & WAKE_MAGIC);
+ if (wol->wolopts & WAKE_MAGIC) {
+ wol_opts |= GFAR_WOL_MAGIC;
+ } else {
+ if (wol->wolopts & WAKE_UCAST)
+ wol_opts |= GFAR_WOL_FILER_UCAST;
+ if (wol->wolopts & WAKE_ARP)
+ wol_opts |= GFAR_WOL_FILER_ARP;
+ }
- spin_lock_irqsave(&priv->bflock, flags);
- priv->wol_en = !!device_may_wakeup(&dev->dev);
- spin_unlock_irqrestore(&priv->bflock, flags);
+ priv->wol_opts = wol_opts & priv->wol_supported;
+
+ device_set_wakeup_enable(priv->dev, priv->wol_opts);
+
+ err = mpc85xx_pmc_set_wake(priv->dev, priv->wol_opts);
+ if (err) {
+ device_set_wakeup_enable(priv->dev, false);
+ return err;
+ }
return 0;
}
diff --git a/drivers/staging/fsl_dpa_offload/dpa_classifier.c b/drivers/staging/fsl_dpa_offload/dpa_classifier.c
index e4610d4..d1281ae 100644
--- a/drivers/staging/fsl_dpa_offload/dpa_classifier.c
+++ b/drivers/staging/fsl_dpa_offload/dpa_classifier.c
@@ -406,6 +406,7 @@ int dpa_classif_table_modify_miss_action(int td,
const struct dpa_cls_tbl_action *miss_action)
{
int errno;
+ int old_hmd, hmd;
t_Error err;
t_FmPcdCcNextEngineParams miss_engine_params;
struct dpa_cls_table *ptable;
@@ -428,11 +429,35 @@ int dpa_classif_table_modify_miss_action(int td,
return -ENOSYS;
}
+ /*
+ * Check existing header manipulation descriptors and release if
+ * found.
+ */
+ switch (ptable->miss_action.type) {
+ case DPA_CLS_TBL_ACTION_ENQ:
+ old_hmd = ptable->miss_action.enq_params.hmd;
+ break;
+ case DPA_CLS_TBL_ACTION_NEXT_TABLE:
+ old_hmd = ptable->miss_action.next_table_params.hmd;
+ break;
+#if (DPAA_VERSION >= 11)
+ case DPA_CLS_TBL_ACTION_MCAST:
+ old_hmd = ptable->miss_action.mcast_params.hmd;
+ break;
+#endif /* (DPAA_VERSION >= 11) */
+ default:
+ old_hmd = DPA_OFFLD_DESC_NONE;
+ break;
+ }
+ dpa_classif_hm_release_chain(old_hmd);
+
/* Fill the [miss_engine_params] structure w/ data */
errno = action_to_next_engine_params(miss_action, &miss_engine_params,
- NULL, ptable->params.distribution,
+ &hmd, ptable->params.distribution,
ptable->params.classification);
if (errno < 0) {
+ /* Lock back the old HM chain. */
+ dpa_classif_hm_lock_chain(old_hmd);
RELEASE_OBJECT(ptable);
log_err("Failed verification of miss action params for table "
"td=%d.\n", td);
@@ -443,6 +468,8 @@ int dpa_classif_table_modify_miss_action(int td,
err = FM_PCD_HashTableModifyMissNextEngine(ptable->params.
cc_node, &miss_engine_params);
if (err != E_OK) {
+ /* Lock back the old HM chain. */
+ dpa_classif_hm_lock_chain(old_hmd);
RELEASE_OBJECT(ptable);
log_err("FMan driver call failed - "
"FM_PCD_HashTableModifyMissNextEngine "
@@ -454,6 +481,8 @@ int dpa_classif_table_modify_miss_action(int td,
err = FM_PCD_MatchTableModifyMissNextEngine((t_Handle)ptable->
int_cc_node[0].cc_node, &miss_engine_params);
if (err != E_OK) {
+ /* Lock back the old HM chain. */
+ dpa_classif_hm_lock_chain(old_hmd);
RELEASE_OBJECT(ptable);
log_err("FMan driver call failed - "
"FM_PCD_MatchTableModifyMissNextEngine (td=%d, "
@@ -463,6 +492,7 @@ int dpa_classif_table_modify_miss_action(int td,
}
}
+ /* Store Miss Action (including its header manip chain). */
memcpy(&ptable->miss_action, miss_action, sizeof(*miss_action));
RELEASE_OBJECT(ptable);
diff --git a/drivers/staging/fsl_dpa_offload/dpa_ipsec_ioctl.h b/drivers/staging/fsl_dpa_offload/dpa_ipsec_ioctl.h
index 9d1da46..d76b0ff 100644
--- a/drivers/staging/fsl_dpa_offload/dpa_ipsec_ioctl.h
+++ b/drivers/staging/fsl_dpa_offload/dpa_ipsec_ioctl.h
@@ -95,6 +95,8 @@ struct ioc_compat_sa_out_params {
compat_uptr_t outer_ip_header;
compat_uptr_t outer_udp_header;
uint16_t post_sec_flow_id;
+ uint8_t dscp_start;
+ uint8_t dscp_end;
};
struct ioc_compat_sa_in_params {
@@ -158,6 +160,7 @@ struct ioc_compat_policy_params {
uint8_t dest_prefix_len;
uint8_t protocol;
bool masked_proto;
+ bool use_dscp;
union {
struct dpa_ipsec_l4_params l4;
struct dpa_ipsec_icmp_params icmp;
diff --git a/drivers/staging/fsl_dpa_offload/dts/t2080rdb-usdpaa-shared-interfaces.dts b/drivers/staging/fsl_dpa_offload/dts/t2080rdb-usdpaa-shared-interfaces.dts
new file mode 100644
index 0000000..b07fbd9
--- /dev/null
+++ b/drivers/staging/fsl_dpa_offload/dts/t2080rdb-usdpaa-shared-interfaces.dts
@@ -0,0 +1,182 @@
+/*
+ * T2080PCIe-RDB USDPAA Device Tree Source
+ *
+ * Copyright 2014 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/include/ "t2080rdb.dts"
+
+/ {
+ /* NB: "bpool-ethernet-seeds" is not set to avoid buffer seeding,
+ * because apps seed these pools with buffers allocated at
+ * run-time.
+ * HOWEVER, the kernel driver requires the buffer-size so
+ * "fsl,bpool-ethernet-cfg" is set. It also mis-interprets
+ * things if the base-address is zero (hence the 0xdeadbeef
+ * values).
+ */
+ bp7: buffer-pool@7 {
+ compatible = "fsl,t2080-bpool", "fsl,bpool";
+ fsl,bpid = <7>;
+ fsl,bpool-ethernet-cfg = <0 0 0 192 0 0xdeadbeef>;
+ fsl,bpool-thresholds = <0x400 0xc00 0x0 0x0>;
+ };
+ bp8: buffer-pool@8 {
+ compatible = "fsl,t2080-bpool", "fsl,bpool";
+ fsl,bpid = <8>;
+ fsl,bpool-ethernet-cfg = <0 0 0 576 0 0xabbaf00d>;
+ fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>;
+ };
+ bp9: buffer-pool@9 {
+ compatible = "fsl,t2080-bpool", "fsl,bpool";
+ fsl,bpid = <9>;
+ fsl,bpool-ethernet-cfg = <0 0 0 1728 0 0xfeedabba>;
+ fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>;
+ };
+
+ bp10: buffer-pool@10 {
+ compatible = "fsl,t2080-bpool", "fsl,bpool";
+ fsl,bpid = <10>;
+ fsl,bpool-thresholds = <0x10 0x30 0x0 0x0>;
+ };
+ bp11: buffer-pool@11 {
+ compatible = "fsl,t2080-bpool", "fsl,bpool";
+ fsl,bpid = <11>;
+ fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>;
+ };
+ bp12: buffer-pool@12 {
+ compatible = "fsl,t2080-bpool", "fsl,bpool";
+ fsl,bpid = <12>;
+ fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>;
+ };
+ bp16: buffer-pool@16 {
+ compatible = "fsl,t2080-bpool", "fsl,bpool";
+ fsl,bpid = <16>;
+ fsl,bpool-ethernet-cfg = <0 2048 0 1728 0 0>;
+ fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>;
+ };
+ bp17: buffer-pool@17 {
+ compatible = "fsl,t2080-bpool", "fsl,bpool";
+ fsl,bpid = <17>;
+ fsl,bpool-ethernet-cfg = <0 2048 0 1728 0 0>;
+ fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>;
+ };
+
+ fsl,dpaa {
+ ethernet@0 {/* 10G */
+ compatible = "fsl,t2080-dpa-ethernet-init", "fsl,dpa-ethernet-init";
+ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
+ fsl,qman-frame-queues-rx = <0x90 1 0x91 1>;
+ fsl,qman-frame-queues-tx = <0x98 1 0x99 1>;
+ };
+ /* ethernet@1 declared as shared MAC. USDPAA will seed buffers to
+ * this buffer pool. The ethernet driver will initialize the RX default,
+ * RX error, TX error, TX confirm and 8 TX Frame queues. On receiving frame
+ * at this interface, the ethernet driver will do kmap_atomic/kunmap_atomic
+ * for that frame. */
+ ethernet@1 {/* 10G */
+ compatible = "fsl,t2080-dpa-ethernet-shared", "fsl,dpa-ethernet-shared";
+ fsl,bman-buffer-pools = <&bp17>;
+ fsl,qman-frame-queues-rx = <0x92 1 0x93 1 0x2000 3>;
+ fsl,qman-frame-queues-tx = <0 1 0 1 0x3000 8>;
+ };
+ ethernet@2 {/* 1G */
+ compatible = "fsl,t2080-dpa-ethernet-init", "fsl,dpa-ethernet-init";
+ fsl,bman-buffer-pools = <&bp16>;
+ fsl,qman-frame-queues-rx = <0x54 1 0x55 1>;
+ fsl,qman-frame-queues-tx = <0x74 1 0x75 1>;
+ };
+ ethernet@3 {/* 1G */
+ compatible = "fsl,t2080-dpa-ethernet-init", "fsl,dpa-ethernet-init";
+ fsl,bman-buffer-pools = <&bp16>;
+ fsl,qman-frame-queues-rx = <0x56 1 0x57 1>;
+ fsl,qman-frame-queues-tx = <0x76 1 0x77 1>;
+ };
+ ethernet@8 {/* 10G */
+ compatible = "fsl,t2080-dpa-ethernet-init", "fsl,dpa-ethernet-init";
+ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
+ fsl,qman-frame-queues-rx = <0x5c 1 0x5d 1>;
+ fsl,qman-frame-queues-tx = <0x7c 1 0x7d 1>;
+ };
+ ethernet@9 {/* 10G */
+ compatible = "fsl,t2080-dpa-ethernet-init", "fsl,dpa-ethernet-init";
+ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
+ fsl,qman-frame-queues-rx = <0x5e 1 0x5f 1>;
+ fsl,qman-frame-queues-tx = <0x7e 1 0x7f 1>;
+ };
+ /* ethernet@10 declared as MAC-less interface with no "fsl,fman-mac" property.
+ * USDPAA will seed buffers to this buffer pool and initialize 8 TX Frame
+ * queues. The ethernet driver will initialize 8 RX default Frame queues.
+ * On receiving frame at this interface, the ethernet driver will do
+ * kmap_atomic/kunmap_atomic for that frame. */
+ ethernet@10 {
+ compatible = "fsl,t2080-dpa-ethernet-macless", "fsl,dpa-ethernet-macless";
+ fsl,bman-buffer-pools = <&bp16>;
+ fsl,qman-frame-queues-rx = <4000 8>;
+ fsl,qman-frame-queues-tx = <4008 8>;
+ local-mac-address = [00 11 22 33 44 55];
+ };
+ ethernet@11 {
+ compatible = "fsl,t2080-dpa-ethernet-macless", "fsl,dpa-ethernet-macless";
+ fsl,bman-buffer-pools = <&bp16>;
+ fsl,qman-frame-queues-rx = <5000 8>;
+ fsl,qman-frame-queues-tx = <5008 8>;
+ local-mac-address = [00 11 22 33 44 66];
+ };
+ ethernet@12 {
+ compatible = "fsl,t2080-dpa-ethernet-macless", "fsl,dpa-ethernet-macless";
+ fsl,bman-buffer-pools = <&bp16>;
+ fsl,qman-frame-queues-rx = <6000 8>;
+ fsl,qman-frame-queues-tx = <6008 8>;
+ local-mac-address = [00 11 22 33 44 77];
+ };
+ dpa-fman0-oh@2 {
+ compatible = "fsl,dpa-oh";
+ /* Define frame queues for the OH port*/
+ /* <OH Rx error, OH Rx default> */
+ fsl,qman-frame-queues-oh = <0x6e 1 0x6f 1>;
+ fsl,bman-buffer-pools = <&bp9>;
+ fsl,fman-oh-port = <&fman0_oh2>;
+ };
+ dpa_fman0_oh3: dpa-fman0-oh@3 {
+ compatible = "fsl,dpa-oh";
+ fsl,qman-frame-queues-oh = <0x68 1 0x69 1>;
+ fsl,bman-buffer-pools = <&bp9>;
+ fsl,fman-oh-port = <&fman0_oh3>;
+ };
+ dpa_fman0_oh4: dpa-fman0-oh@4 {
+ compatible = "fsl,dpa-oh";
+ fsl,qman-frame-queues-oh = <0x70 1 0x71 1>;
+ fsl,bman-buffer-pools = <&bp9>;
+ fsl,fman-oh-port = <&fman0_oh4>;
+ };
+ };
+};
diff --git a/drivers/staging/fsl_dpa_offload/dts/t2080rdb-usdpaa.dts b/drivers/staging/fsl_dpa_offload/dts/t2080rdb-usdpaa.dts
new file mode 100644
index 0000000..269aaea
--- /dev/null
+++ b/drivers/staging/fsl_dpa_offload/dts/t2080rdb-usdpaa.dts
@@ -0,0 +1,164 @@
+/*
+ * T2080PCIe-RDB USDPAA Device Tree Source
+ *
+ * Copyright 2014 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/include/ "t2080rdb.dts"
+
+/ {
+ /* NB: "bpool-ethernet-seeds" is not set to avoid buffer seeding,
+ * because apps seed these pools with buffers allocated at
+ * run-time.
+ * HOWEVER, the kernel driver requires the buffer-size so
+ * "fsl,bpool-ethernet-cfg" is set. It also mis-interprets
+ * things if the base-address is zero (hence the 0xdeadbeef
+ * values).
+ */
+ bp7: buffer-pool@7 {
+ compatible = "fsl,t2080-bpool", "fsl,bpool";
+ fsl,bpid = <7>;
+ fsl,bpool-ethernet-cfg = <0 0 0 192 0 0xdeadbeef>;
+ fsl,bpool-thresholds = <0x400 0xc00 0x0 0x0>;
+ };
+ bp8: buffer-pool@8 {
+ compatible = "fsl,t2080-bpool", "fsl,bpool";
+ fsl,bpid = <8>;
+ fsl,bpool-ethernet-cfg = <0 0 0 576 0 0xabbaf00d>;
+ fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>;
+ };
+ bp9: buffer-pool@9 {
+ compatible = "fsl,t2080-bpool", "fsl,bpool";
+ fsl,bpid = <9>;
+ fsl,bpool-ethernet-cfg = <0 0 0 1728 0 0xfeedabba>;
+ fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>;
+ };
+
+ bp10: buffer-pool@10 {
+ compatible = "fsl,t2080-bpool", "fsl,bpool";
+ fsl,bpid = <10>;
+ fsl,bpool-thresholds = <0x10 0x30 0x0 0x0>;
+ };
+ bp11: buffer-pool@11 {
+ compatible = "fsl,t2080-bpool", "fsl,bpool";
+ fsl,bpid = <11>;
+ fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>;
+ };
+ bp12: buffer-pool@12 {
+ compatible = "fsl,t2080-bpool", "fsl,bpool";
+ fsl,bpid = <12>;
+ fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>;
+ };
+ bp16: buffer-pool@16 {
+ compatible = "fsl,t2080-bpool", "fsl,bpool";
+ fsl,bpid = <16>;
+ fsl,bpool-ethernet-cfg = <0 2048 0 1728 0 0>;
+ fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>;
+ };
+
+ fsl,dpaa {
+ ethernet@0 {/* 10G */
+ compatible = "fsl,t2080-dpa-ethernet-init", "fsl,dpa-ethernet-init";
+ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
+ fsl,qman-frame-queues-rx = <0x90 1 0x91 1>;
+ fsl,qman-frame-queues-tx = <0x98 1 0x99 1>;
+ };
+ ethernet@1 {/* 10G */
+ compatible = "fsl,t2080-dpa-ethernet-init", "fsl,dpa-ethernet-init";
+ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
+ fsl,qman-frame-queues-rx = <0x92 1 0x93 1>;
+ fsl,qman-frame-queues-tx = <0x9a 1 0x9b 1>;
+ };
+ ethernet@2 {/* 1G */
+ compatible = "fsl,t2080-dpa-ethernet-init", "fsl,dpa-ethernet-init";
+ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
+ fsl,qman-frame-queues-rx = <0x54 1 0x55 1>;
+ fsl,qman-frame-queues-tx = <0x74 1 0x75 1>;
+ };
+ ethernet@3 {/* 1G */
+ compatible = "fsl,t2080-dpa-ethernet-init", "fsl,dpa-ethernet-init";
+ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
+ fsl,qman-frame-queues-rx = <0x56 1 0x57 1>;
+ fsl,qman-frame-queues-tx = <0x76 1 0x77 1>;
+ };
+ ethernet@8 {/* 10G */
+ compatible = "fsl,t2080-dpa-ethernet-init", "fsl,dpa-ethernet-init";
+ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
+ fsl,qman-frame-queues-rx = <0x5c 1 0x5d 1>;
+ fsl,qman-frame-queues-tx = <0x7c 1 0x7d 1>;
+ };
+ ethernet@9 {/* 10G */
+ compatible = "fsl,t2080-dpa-ethernet-init", "fsl,dpa-ethernet-init";
+ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
+ fsl,qman-frame-queues-rx = <0x5e 1 0x5f 1>;
+ fsl,qman-frame-queues-tx = <0x7e 1 0x7f 1>;
+ };
+ /* ethernet@10 declared as MAC-less interface with no "fsl,fman-mac" property.
+ * USDPAA will seed buffers to this buffer pool and initialize 8 TX Frame
+ * queues. The ethernet driver will initialize 8 RX default Frame queues.
+ * On receiving frame at this interface, the ethernet driver will do
+ * kmap_atomic/kunmap_atomic for that frame. */
+ ethernet@10 {
+ compatible = "fsl,t2080-dpa-ethernet-macless", "fsl,dpa-ethernet-macless";
+ fsl,bman-buffer-pools = <&bp16>;
+ fsl,qman-frame-queues-rx = <4000 8>;
+ fsl,qman-frame-queues-tx = <4008 8>;
+ local-mac-address = [00 11 22 33 44 55];
+ };
+ ethernet@11 {
+ compatible = "fsl,t2080-dpa-ethernet-macless", "fsl,dpa-ethernet-macless";
+ fsl,bman-buffer-pools = <&bp16>;
+ fsl,qman-frame-queues-rx = <5000 8>;
+ fsl,qman-frame-queues-tx = <5008 8>;
+ local-mac-address = [00 11 22 33 44 66];
+ };
+ dpa-fman0-oh@2 {
+ compatible = "fsl,dpa-oh";
+ /* Define frame queues for the OH port*/
+ /* <OH Rx error, OH Rx default> */
+ fsl,qman-frame-queues-oh = <0x6e 1 0x6f 1>;
+ fsl,bman-buffer-pools = <&bp9>;
+ fsl,fman-oh-port = <&fman0_oh2>;
+ };
+ dpa_fman0_oh3: dpa-fman0-oh@3 {
+ compatible = "fsl,dpa-oh";
+ fsl,qman-frame-queues-oh = <0x68 1 0x69 1>;
+ fsl,bman-buffer-pools = <&bp9>;
+ fsl,fman-oh-port = <&fman0_oh3>;
+ };
+ dpa_fman0_oh4: dpa-fman0-oh@4 {
+ compatible = "fsl,dpa-oh";
+ fsl,qman-frame-queues-oh = <0x70 1 0x71 1>;
+ fsl,bman-buffer-pools = <&bp9>;
+ fsl,fman-oh-port = <&fman0_oh4>;
+ };
+ };
+};
diff --git a/drivers/staging/fsl_dpa_offload/wrp_dpa_ipsec.c b/drivers/staging/fsl_dpa_offload/wrp_dpa_ipsec.c
index f4b081f..5d04f84 100644
--- a/drivers/staging/fsl_dpa_offload/wrp_dpa_ipsec.c
+++ b/drivers/staging/fsl_dpa_offload/wrp_dpa_ipsec.c
@@ -118,6 +118,8 @@ static void compat_copy_sa_out_params(struct dpa_ipsec_sa_out_params *prm,
prm->outer_udp_header = (void *)
compat_ptr(compat_prm->outer_udp_header);
prm->post_sec_flow_id = compat_prm->post_sec_flow_id;
+ prm->dscp_start = compat_prm->dscp_start;
+ prm->dscp_end = compat_prm->dscp_end;
}
static void compat_copy_sa_crypto_params(struct dpa_ipsec_sa_crypto_params *prm,
@@ -205,6 +207,7 @@ static void compat_copy_dpa_ipsec_add_rem_policy(
prm->pol_params.protocol = compat_prm->pol_params.protocol;
prm->pol_params.masked_proto =
compat_prm->pol_params.masked_proto;
+ prm->pol_params.use_dscp = compat_prm->pol_params.use_dscp;
prm->pol_params.l4 = compat_prm->pol_params.l4;
if (compat_prm->pol_params.dir_params.type ==
DPA_IPSEC_POL_DIR_PARAMS_MANIP)
@@ -231,6 +234,7 @@ static void compat_copy_dpa_ipsec_add_rem_policy(
compat_prm->pol_params.protocol = prm->pol_params.protocol;
compat_prm->pol_params.masked_proto =
prm->pol_params.masked_proto;
+ compat_prm->pol_params.use_dscp = prm->pol_params.use_dscp;
compat_prm->pol_params.l4 = prm->pol_params.l4;
if (prm->pol_params.dir_params.type ==
DPA_IPSEC_POL_DIR_PARAMS_MANIP)
diff --git a/drivers/staging/fsl_pme2/Makefile b/drivers/staging/fsl_pme2/Makefile
index 694513b..e91c514 100644
--- a/drivers/staging/fsl_pme2/Makefile
+++ b/drivers/staging/fsl_pme2/Makefile
@@ -1,5 +1,5 @@
# PME
-obj-$(CONFIG_FSL_PME2_CTRL) += pme2_ctrl.o pme2_sysfs.o
+obj-$(CONFIG_FSL_PME2_CTRL) += pme2_ctrl.o pme2_sysfs.o pme2_suspend.o
obj-$(CONFIG_FSL_PME2_PORTAL) += pme2.o
pme2-y := pme2_low.o pme2_high.o
obj-$(CONFIG_FSL_PME2_TEST_HIGH) += pme2_test_high.o
diff --git a/drivers/staging/fsl_pme2/pme2_ctrl.c b/drivers/staging/fsl_pme2/pme2_ctrl.c
index 30956c2..016648c 100644
--- a/drivers/staging/fsl_pme2/pme2_ctrl.c
+++ b/drivers/staging/fsl_pme2/pme2_ctrl.c
@@ -295,6 +295,7 @@ static int of_fsl_pme_probe(struct platform_device *ofdev)
int srec_aim = 0, srec_esr = 0;
u32 srecontextsize_code;
u32 dec1;
+ struct pme2_private_data *priv_data;
/*
* TODO: This standby handling won't work properly after failover, it's
@@ -339,8 +340,8 @@ static int of_fsl_pme_probe(struct platform_device *ofdev)
if (likely(pme_err_irq != NO_IRQ)) {
/* Register the pme ISR handler */
- err = request_irq(pme_err_irq, pme_isr, IRQF_SHARED, "pme-err",
- dev);
+ err = request_irq(pme_err_irq, pme_isr,
+ IRQF_SHARED | IRQF_PERCPU, "pme-err", dev);
if (err) {
dev_err(dev, "request_irq() failed\n");
goto out_unmap_ctrl_region;
@@ -415,6 +416,11 @@ static int of_fsl_pme_probe(struct platform_device *ofdev)
(CONFIG_FSL_PME2_SRE_MAX_INSTRUCTION_LIMIT << 16) |
CONFIG_FSL_PME2_SRE_MAX_BLOCK_NUMBER);
+#ifdef CONFIG_PM
+ /* can't flush pme device easily. Disable caching for FC and RES */
+ pme_out(global_pme, CDCR, 0x00000009);
+#endif
+
/* Setup Accumulator */
if (pme_stat_interval)
schedule_delayed_work(&accumulator_work,
@@ -424,6 +430,19 @@ static int of_fsl_pme_probe(struct platform_device *ofdev)
if (err)
goto out_stop_accumulator;
+ priv_data = kzalloc(sizeof(*priv_data), GFP_KERNEL);
+ if (!priv_data)
+ goto out_stop_accumulator;
+
+ priv_data->regs = (uint32_t __iomem *)regs;
+ priv_data->pme_rev1 = pme_in(global_pme, PM_IP_REV1);
+ dev_set_drvdata(dev, priv_data);
+
+#ifdef CONFIG_PM
+ /* setup resources required for power management */
+ init_pme_suspend(priv_data);
+#endif
+
/* Enable interrupts */
pme_out(global_pme, IER, PME_ALL_ERR);
dev_info(dev, "ver: 0x%08x\n", pme_in(global_pme, PM_IP_REV1));
@@ -448,11 +467,96 @@ out:
return err;
}
+#ifdef CONFIG_PM
+void restore_all_ccsr(struct ccsr_backup_info *save_ccsr,
+ uint32_t __iomem *regs)
+{
+ int i;
+ int num_regs = sizeof(save_ccsr->regdb)/sizeof(uint32_t);
+ uint32_t *pme_reg = &save_ccsr->regdb.pmfa.isr;
+#ifdef CONFIG_PM_DEBUG
+ int diff_count = 0;
+#endif
+
+ for (i = 0; i < num_regs; i++) {
+#ifdef CONFIG_PM_DEBUG
+ /* skip enable register */
+ if ((pme_reg + i) != (&save_ccsr->regdb.pmfa.faconf)) {
+ uint32_t pme_reg_val;
+ pme_reg_val = in_be32(regs + i);
+ if (pme_reg_val != *(pme_reg + i))
+ diff_count++;
+ out_be32(regs + i, *(pme_reg + i));
+ }
+#else
+ /* skip enable register */
+ if ((pme_reg + i) != (&save_ccsr->regdb.pmfa.faconf))
+ out_be32(regs + i, *(pme_reg + i));
+#endif
+ }
+
+#ifdef CONFIG_PM_DEBUG
+ pr_info("pme ccsr restore: %d registers were different\n", diff_count);
+#endif
+}
+
+void save_all_ccsr(struct ccsr_backup_info *save_ccsr, uint32_t __iomem *regs)
+{
+ int i;
+ int num_regs = sizeof(save_ccsr->regdb)/sizeof(uint32_t);
+ uint32_t *pme_reg;
+
+ /* setup ddr space to save ccsr */
+ pme_reg = &save_ccsr->regdb.pmfa.isr;
+
+ for (i = 0; i < num_regs; i++)
+ *(pme_reg+i) = in_be32(regs + i);
+}
+
+static int pme2_pm_suspend(struct device *dev)
+{
+ struct pme2_private_data *priv_data;
+
+ priv_data = dev_get_drvdata(dev);
+ if (!priv_data) {
+ pr_err("No device data\n");
+ return -ENOMEM;
+ }
+ dev_dbg(dev, "fsl-pme PM suspend\n");
+
+ return pme_suspend(priv_data);
+}
+
+static int pme2_pm_resume(struct device *dev)
+{
+ struct pme2_private_data *priv_data;
+
+ priv_data = dev_get_drvdata(dev);
+ if (!priv_data) {
+ pr_err("No device data\n");
+ return -ENOMEM;
+ }
+ dev_dbg(dev, "fsl-pme PM resume\n");
+
+ return pme_resume(priv_data);
+
+}
+#else
+#define pme2_pm_suspend NULL
+#define pme2_pm_resume NULL
+#endif
+
+static const struct dev_pm_ops pme2_pm_ops = {
+ .suspend = pme2_pm_suspend,
+ .resume = pme2_pm_resume,
+};
+
static struct platform_driver of_fsl_pme_driver = {
.driver = {
.owner = THIS_MODULE,
.name = DRV_NAME,
.of_match_table = of_fsl_pme_ids,
+ .pm = &pme2_pm_ops,
},
.probe = of_fsl_pme_probe,
.remove = of_fsl_pme_remove,
@@ -768,7 +872,25 @@ int pme_attr_set(enum pme_attr attr, u32 val)
case pme_attr_pmtr:
pme_out(global_pme, PMTR, val);
break;
-
+ case pme_attr_faconf_rst:
+ attr_val = pme_in(global_pme, FACONF);
+ if (val)
+ attr_val |= PME_FACONF_RESET;
+ else
+ attr_val &= ~(PME_FACONF_RESET);
+ pme_out(global_pme, FACONF, attr_val);
+ break;
+ case pme_attr_faconf_en:
+ attr_val = pme_in(global_pme, FACONF);
+ if (val)
+ attr_val |= PME_FACONF_ENABLE;
+ else
+ attr_val &= ~(PME_FACONF_ENABLE);
+ pme_out(global_pme, FACONF, attr_val);
+ break;
+ case pme_attr_efqc:
+ pme_out(global_pme, EFQC, val);
+ break;
default:
pr_err("pme: Unknown attr %u\n", attr);
return -EINVAL;
@@ -1246,6 +1368,22 @@ int pme_attr_get(enum pme_attr attr, u32 *val)
attr_val = pme_in(global_pme, SRRWC);
break;
+ case pme_attr_faconf_rst:
+ attr_val = pme_in(global_pme, FACONF);
+ attr_val &= PME_FACONF_RESET;
+ break;
+
+ case pme_attr_faconf_en:
+ attr_val = pme_in(global_pme, FACONF);
+ attr_val &= PME_FACONF_ENABLE;
+ attr_val >>= 1;
+ break;
+
+ case pme_attr_efqc:
+ attr_val = pme_in(global_pme, EFQC);
+ break;
+
+
default:
pr_err("pme: Unknown attr %u\n", attr);
return -EINVAL;
diff --git a/drivers/staging/fsl_pme2/pme2_private.h b/drivers/staging/fsl_pme2/pme2_private.h
index 3968f02..4bd92c2 100644
--- a/drivers/staging/fsl_pme2/pme2_private.h
+++ b/drivers/staging/fsl_pme2/pme2_private.h
@@ -29,6 +29,9 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#ifndef PME2_PRIVATE_H
+#define PME2_PRIVATE_H
+
#include "pme2_sys.h"
#include <linux/fsl_pme.h>
@@ -50,6 +53,219 @@ void pme2_remove_sysfs_dev_files(struct platform_device *ofdev);
void accumulator_update_interval(u32 interval);
#endif
+#ifdef CONFIG_PM
+
+struct pme_save_regs_pmfa {
+ uint32_t isr;
+ uint32_t ier;
+ uint32_t isdr;
+ uint32_t iir;
+ uint32_t ifr;
+ uint32_t rll;
+ uint32_t cdcr;
+ uint32_t reserved1[2];
+ uint32_t trunci;
+ uint32_t rbc;
+ uint32_t esr;
+ uint32_t ecr0;
+ uint32_t ecr1;
+ uint32_t reserved2[6];
+ uint32_t efqc;
+ uint32_t sram_addr;
+ uint32_t sram_rdat;
+ uint32_t sram_wdat;
+ uint32_t faconf;
+ uint32_t pmstat;
+ uint32_t famcr;
+ uint32_t pmtr;
+ uint32_t reserved3;
+ uint32_t pehd;
+ uint32_t reserved4[2];
+ uint32_t bsc0;
+ uint32_t bsc1;
+ uint32_t bsc2;
+ uint32_t bsc3;
+ uint32_t bsc4;
+ uint32_t bsc5;
+ uint32_t bsc6;
+ uint32_t bsc7;
+ uint32_t reserved5[16];
+ uint32_t qmbfd0;
+ uint32_t qmbfd1;
+ uint32_t qmbfd2;
+ uint32_t qmbfd3;
+ uint32_t qmbctxtah;
+ uint32_t qmbctxtal;
+ uint32_t qmbctxtb;
+ uint32_t qmbctl;
+ uint32_t ecc1bes;
+ uint32_t ecc2bes;
+ uint32_t reserved6[2];
+ uint32_t eccaddr;
+ uint32_t reserved7[27];
+ uint32_t tbt0ecc1th;
+ uint32_t tbt0ecc1ec;
+ uint32_t tbt1ecc1th;
+ uint32_t tbt1ecc1ec;
+ uint32_t vlt0ecc1th;
+ uint32_t vlt0ecc1ec;
+ uint32_t vlt1ecc1th;
+ uint32_t vlt1ecc1ec;
+ uint32_t cmecc1th;
+ uint32_t cmecc1ec;
+ uint32_t reserved8[2];
+ uint32_t dxcmecc1th;
+ uint32_t dxcmecc1ec;
+ uint32_t reserved9[2];
+ uint32_t dxemecc1th;
+ uint32_t dxemecc1ec;
+ uint32_t reserved10[14];
+};
+
+struct pme_save_regs_kes {
+ uint32_t stnib;
+ uint32_t stnis;
+ uint32_t stnth1;
+ uint32_t stnth2;
+ uint32_t stnthv;
+ uint32_t stnths;
+ uint32_t stnch;
+ uint32_t swdb;
+ uint32_t kvlts;
+ uint32_t kec;
+ uint32_t reserved1[22];
+};
+
+struct pme_save_regs_dxe {
+ uint32_t stnpm;
+ uint32_t stns1m;
+ uint32_t drcic;
+ uint32_t drcmc;
+ uint32_t stnpmr;
+ uint32_t reserved1[3];
+ uint32_t pdsrbah;
+ uint32_t pdsrbal;
+ uint32_t dmcr;
+ uint32_t dec0;
+ uint32_t dec1;
+ uint32_t reserved2[3];
+ uint32_t dlc;
+ uint32_t reserved3[15];
+};
+
+struct pme_save_regs_sre {
+ uint32_t stndrs;
+ uint32_t stnesr;
+ uint32_t stns1r;
+ uint32_t stnob;
+ uint32_t scbarh;
+ uint32_t scbarl;
+ uint32_t smcr;
+ uint32_t reserved1;
+ uint32_t srec;
+ uint32_t reserved2;
+ uint32_t esrp;
+ uint32_t reserved3[3];
+ uint32_t srrv0;
+ uint32_t srrv1;
+ uint32_t srrv2;
+ uint32_t srrv3;
+ uint32_t srrv4;
+ uint32_t srrv5;
+ uint32_t srrv6;
+ uint32_t srrv7;
+ uint32_t srrfi;
+ uint32_t reserved4;
+ uint32_t srri;
+ uint32_t srrr;
+ uint32_t srrwc;
+ uint32_t sfrcc;
+ uint32_t sec1;
+ uint32_t sec2;
+ uint32_t sec3;
+ uint32_t reserved5;
+};
+
+struct pme_save_regs_mia {
+ uint32_t mia_byc;
+ uint32_t mia_blc;
+ uint32_t mia_ce;
+ uint32_t reserved1;
+ uint32_t mia_cr;
+ uint32_t reserved2[284];
+};
+
+struct pme_save_regs_gen {
+ uint32_t liodnbr;
+ uint32_t reserved1[126];
+ uint32_t srcidr;
+ uint32_t reserved2[2];
+ uint32_t liodnr;
+ uint32_t reserved3[122];
+ uint32_t pm_ip_rev_1;
+ uint32_t pm_ip_rev_2;
+};
+
+struct pme_save_reg_all {
+ struct pme_save_regs_pmfa pmfa;
+ struct pme_save_regs_kes kes;
+ struct pme_save_regs_dxe dxe;
+ struct pme_save_regs_sre sre;
+ struct pme_save_regs_mia mia;
+ struct pme_save_regs_gen gen;
+};
+
+struct pme_pwrmgmt_ctx {
+ struct qman_fq tx_fq;
+ struct qman_fq rx_fq;
+ struct qm_fd result_fd;
+ struct completion done;
+};
+
+struct pmtcc_raw_db {
+ /* vmalloc's memory. Save PME's sram data */
+ uint8_t *alldb;
+};
+
+struct ccsr_backup_info {
+ uint32_t save_faconf_en;
+ uint32_t save_cdcr;
+ struct pme_save_reg_all regdb;
+};
+
+struct portal_backup_info {
+ /* vmalloc's memory. Save PME's sram data */
+ struct pmtcc_raw_db db;
+ struct pme_pwrmgmt_ctx *ctx;
+ struct platform_device *pdev;
+ int backup_failed;
+};
+
+#endif /* CONFIG_PM */
+
+struct pme2_private_data {
+ uint32_t pme_rev1;
+ uint32_t __iomem *regs;
+#ifdef CONFIG_PM
+ struct ccsr_backup_info save_ccsr;
+ struct portal_backup_info save_db;
+#endif
+};
+
+#ifdef CONFIG_PM
+/* Hooks from pme_ctrl to pme_suspend */
+int init_pme_suspend(struct pme2_private_data *priv_data);
+void exit_pme_suspend(struct pme2_private_data *priv_data);
+int pme_suspend(struct pme2_private_data *priv_data);
+int pme_resume(struct pme2_private_data *priv_data);
+
+/* Hooks from pme_suspend into pme_ctrl */
+void restore_all_ccsr(struct ccsr_backup_info *save_ccsr,
+ uint32_t __iomem *regs);
+void save_all_ccsr(struct ccsr_backup_info *save_ccsr,
+ uint32_t __iomem *regs);
+#endif
+
static inline void set_fd_addr(struct qm_fd *fd, dma_addr_t addr)
{
qm_fd_addr_set64(fd, addr);
@@ -215,3 +431,10 @@ static inline int is_version_2_1_4(u32 pme_rev1, u32 pme_rev2)
(get_errata_rev(pme_rev2) == 4);
}
+static inline int is_version(u32 pme_rev1, int major, int minor)
+{
+ return (get_major_rev(pme_rev1) == major) &&
+ (get_minor_rev(pme_rev1) == minor);
+}
+
+#endif
diff --git a/drivers/staging/fsl_pme2/pme2_suspend.c b/drivers/staging/fsl_pme2/pme2_suspend.c
new file mode 100644
index 0000000..e2ef2af
--- /dev/null
+++ b/drivers/staging/fsl_pme2/pme2_suspend.c
@@ -0,0 +1,1184 @@
+/* Copyright 2013 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * This software is provided by Freescale Semiconductor "as is" and any
+ * express or implied warranties, including, but not limited to, the implied
+ * warranties of merchantability and fitness for a particular purpose are
+ * disclaimed. In no event shall Freescale Semiconductor be liable for any
+ * direct, indirect, incidental, special, exemplary, or consequential damages
+ * (including, but not limited to, procurement of substitute goods or services;
+ * loss of use, data, or profits; or business interruption) however caused and
+ * on any theory of liability, whether in contract, strict liability, or tort
+ * (including negligence or otherwise) arising in any way out of the use of
+ * this software, even if advised of the possibility of such damage.
+ */
+
+#ifdef CONFIG_PM
+
+#include "pme2_private.h"
+#include "pme2_regs.h"
+#include <linux/vmalloc.h>
+
+static dma_addr_t pme_suspend_map(struct platform_device *pdev, void *ptr)
+{
+ return dma_map_single(&pdev->dev, ptr, 1, DMA_BIDIRECTIONAL);
+}
+
+/*
+ * The following SRAM tables need to be saved
+ * 1-byte trigger table
+ * 2-byte trigger table
+ * variable length trigger table
+ * confidence table
+ * User-Defined Group Mapping tablle
+ * Equivalent Byte Mapping table
+ * Special Trigger table
+ */
+enum pme_pmtcc_table_id {
+ PME_ONE_BYTE_TRIGGER_TBL = 0x00,
+ PME_TWO_BYTE_TRIGGER_TBL = 0x01,
+ PME_VARIABLE_TRIGGER_TBL = 0x02,
+ PME_CONFIDENCE_TBL = 0x03,
+ PME_UDG_TBL = 0x05,
+ PME_EQUIVALENT_BYTE_TBL = 0x06,
+ PME_SPECIAL_TRIGGER_TBL = 0x08,
+ PME_LAST_TABLE = PME_SPECIAL_TRIGGER_TBL
+};
+
+static enum pme_pmtcc_table_id table_list[] = {PME_ONE_BYTE_TRIGGER_TBL,
+ PME_TWO_BYTE_TRIGGER_TBL, PME_VARIABLE_TRIGGER_TBL, PME_CONFIDENCE_TBL,
+ PME_UDG_TBL, PME_EQUIVALENT_BYTE_TBL, PME_SPECIAL_TRIGGER_TBL};
+
+struct pme_pmtcc_header_t {
+ uint8_t protocol_version;
+ uint8_t msg_type;
+ uint16_t reserved;
+ /* total message length, including the header */
+ uint32_t msg_length;
+ uint64_t msg_id;
+ uint8_t data[0];
+} __packed;
+
+/*
+ * The next few macros define the sizes (in bytes) of the entries in
+ * the different PM H/W tables.
+ */
+#define PME_ONE_BYTE_TRIGGER_ENTRY_SIZE 32
+#define PME_TWO_BYTE_TRIGGER_ENTRY_SIZE 8
+#define PME_VARIABLE_TRIGGER_ENTRY_SIZE 8
+#define PME_CONFIDENCE_ENTRY_SIZE 4
+#define PME_CONFIRMATION_ENTRY_SIZE 128
+#define PME_USER_DEFINED_GROUP_READ_ENTRY_SIZE 4
+#define PME_USER_DEFINED_GROUP_WRITE_ENTRY_SIZE 256
+#define PME_EQUIVALENCE_READ_ENTRY_SIZE 4
+#define PME_EQUIVALENCE_WRITE_ENTRY_SIZE 256
+#define PME_SESSION_CONTEXT_ENTRY_SIZE 32
+#define PME_SPECIAL_TRIGGER_ENTRY_SIZE 32
+
+union pme_table_entry_t {
+ /* The next few types define the entries for the different PM tables. */
+ uint8_t one_byte_trigger_entry[PME_ONE_BYTE_TRIGGER_ENTRY_SIZE];
+ uint8_t two_byte_trigger_entry[PME_TWO_BYTE_TRIGGER_ENTRY_SIZE];
+ uint8_t variable_trigger_entry[PME_VARIABLE_TRIGGER_ENTRY_SIZE];
+ uint8_t confidence_entry[PME_CONFIDENCE_ENTRY_SIZE];
+ uint8_t udg_read_entry[PME_USER_DEFINED_GROUP_READ_ENTRY_SIZE];
+ uint8_t udg_write_entry[PME_USER_DEFINED_GROUP_WRITE_ENTRY_SIZE];
+ uint8_t equivalence_read_entry[PME_EQUIVALENCE_READ_ENTRY_SIZE];
+ uint8_t equivalence_write_entry[PME_EQUIVALENCE_WRITE_ENTRY_SIZE];
+ uint8_t special_trigger_entry[PME_SPECIAL_TRIGGER_ENTRY_SIZE];
+} __packed;
+
+/* This type defines an indexed table entry. */
+struct pme_indexed_table_entry_t {
+ uint32_t index;
+ union pme_table_entry_t entry;
+} __packed;
+
+/* table read request */
+struct pme_pmtcc_read_request_msg_t {
+ struct pme_pmtcc_header_t header;
+ uint32_t table_id;
+ uint32_t index;
+} __packed;
+
+/* table read reply message. */
+struct pme_pmtcc_read_reply_msg_t {
+ struct pme_pmtcc_header_t header;
+ uint32_t table_id;
+ struct pme_indexed_table_entry_t indexed_entry;
+} __packed;
+
+/* table write request message */
+struct pme_pmtcc_write_request_msg_t {
+ struct pme_pmtcc_header_t header;
+ uint32_t table_id;
+ struct pme_indexed_table_entry_t indexed_entry;
+} __packed;
+
+/*
+ * The next few macros define the number of entries in the different PM
+ * H/W tables.
+ */
+#define PME_CONFIDENCE_ENTRY_NUM_PER_TRIGGER_ENTRY 4
+
+#define PME_ONE_BYTE_TRIGGER_ENTRY_NUM 1
+
+#define PME_TWO_BYTE_TRIGGER_ENTRY_NUM_V1 512
+#define PME_VARIABLE_TRIGGER_ENTRY_NUM_V1 4096
+
+#define PME_TWO_BYTE_TRIGGER_ENTRY_NUM_V2_0 2048
+#define PME_VARIABLE_TRIGGER_ENTRY_NUM_V2_0 16384
+
+#define PME_TWO_BYTE_TRIGGER_ENTRY_NUM_V2_1 1024
+#define PME_VARIABLE_TRIGGER_ENTRY_NUM_V2_1 8192
+
+#define PME_TWO_BYTE_TRIGGER_ENTRY_NUM_V2_2 512
+#define PME_VARIABLE_TRIGGER_ENTRY_NUM_V2_2 4096
+
+#define PME_SPECIAL_CONFIDENCE_ENTRY_NUM 64
+#define PME_ONE_BYTE_CONFIDENCE_ENTRY_NUM 64
+
+#define PME_SPECIAL_CONFIDENCE_ENTRY_NUM_V2_2 32
+
+#define PME_CONFIDENCE_ENTRY_NUM_V1 \
+ ((PME_TWO_BYTE_TRIGGER_ENTRY_NUM_V1 + \
+ PME_VARIABLE_TRIGGER_ENTRY_NUM_V1 + \
+ PME_ONE_BYTE_CONFIDENCE_ENTRY_NUM + \
+ PME_SPECIAL_CONFIDENCE_ENTRY_NUM) * \
+ PME_CONFIDENCE_ENTRY_NUM_PER_TRIGGER_ENTRY)
+
+#define PME_CONFIDENCE_ENTRY_NUM_V2_0 \
+ ((PME_TWO_BYTE_TRIGGER_ENTRY_NUM_V2_0 + \
+ PME_VARIABLE_TRIGGER_ENTRY_NUM_V2_0 + \
+ PME_ONE_BYTE_CONFIDENCE_ENTRY_NUM + \
+ PME_SPECIAL_CONFIDENCE_ENTRY_NUM) * \
+ PME_CONFIDENCE_ENTRY_NUM_PER_TRIGGER_ENTRY)
+
+#define PME_CONFIDENCE_ENTRY_NUM_V2_1 \
+ ((PME_TWO_BYTE_TRIGGER_ENTRY_NUM_V2_1 + \
+ PME_VARIABLE_TRIGGER_ENTRY_NUM_V2_1 + \
+ PME_ONE_BYTE_CONFIDENCE_ENTRY_NUM + \
+ PME_SPECIAL_CONFIDENCE_ENTRY_NUM) * \
+ PME_CONFIDENCE_ENTRY_NUM_PER_TRIGGER_ENTRY)
+
+#define PME_CONFIDENCE_ENTRY_NUM_V2_2 \
+ ((PME_TWO_BYTE_TRIGGER_ENTRY_NUM_V2_2 + \
+ PME_VARIABLE_TRIGGER_ENTRY_NUM_V2_2 + \
+ PME_ONE_BYTE_CONFIDENCE_ENTRY_NUM + \
+ PME_SPECIAL_CONFIDENCE_ENTRY_NUM_V2_2) * \
+ PME_CONFIDENCE_ENTRY_NUM_PER_TRIGGER_ENTRY)
+
+#define PME_EQUIVALENCE_ENTRY_NUM 1
+#define PME_USER_DEFINED_GROUP_ENTRY_NUM 1
+#define PME_SPECIAL_TRIGGER_ENTRY_NUM 1
+
+/*
+ * The next few macros below define the sizes of the different
+ * messages. Note the the macros related to the table read and write
+ * messages assume that there is only one entry in the read/write
+ * message.
+ */
+#define PME_TABLE_READ_REQUEST_MSG_SIZE \
+ sizeof(struct pme_pmtcc_read_request_msg_t)
+
+#define PME_ONE_BYTE_TABLE_READ_REPLY_MSG_SIZE \
+ (sizeof(struct pme_pmtcc_read_reply_msg_t) - \
+ sizeof(union pme_table_entry_t) + \
+ PME_ONE_BYTE_TRIGGER_ENTRY_SIZE)
+
+#define PME_TWO_BYTE_TABLE_READ_REPLY_MSG_SIZE \
+ (sizeof(struct pme_pmtcc_read_reply_msg_t) - \
+ sizeof(union pme_table_entry_t) + \
+ PME_TWO_BYTE_TRIGGER_ENTRY_SIZE)
+
+#define PME_VARIABLE_TABLE_READ_REPLY_MSG_SIZE \
+ (sizeof(struct pme_pmtcc_read_reply_msg_t) - \
+ sizeof(union pme_table_entry_t) + \
+ PME_VARIABLE_TRIGGER_ENTRY_SIZE)
+
+#define PME_CONFIDENCE_TABLE_READ_REPLY_MSG_SIZE \
+ (sizeof(struct pme_pmtcc_read_reply_msg_t) - \
+ sizeof(union pme_table_entry_t) + \
+ PME_CONFIDENCE_ENTRY_SIZE)
+
+#define PME_UDG_TABLE_READ_REPLY_MSG_SIZE \
+ (sizeof(struct pme_pmtcc_read_reply_msg_t) - \
+ sizeof(union pme_table_entry_t) + \
+ PME_USER_DEFINED_GROUP_READ_ENTRY_SIZE)
+
+#define PME_EQUIVALENCE_TABLE_READ_REPLY_MSG_SIZE \
+ (sizeof(struct pme_pmtcc_read_reply_msg_t) - \
+ sizeof(union pme_table_entry_t) + \
+ PME_EQUIVALENCE_READ_ENTRY_SIZE)
+
+#define PME_SPECIAL_TABLE_READ_REPLY_MSG_SIZE \
+ (sizeof(struct pme_pmtcc_read_reply_msg_t) - \
+ sizeof(union pme_table_entry_t) + \
+ PME_SPECIAL_TRIGGER_ENTRY_SIZE)
+
+#define PME_ONE_BYTE_TABLE_WRITE_REQUEST_MSG_SIZE \
+ (sizeof(struct pme_pmtcc_write_request_msg_t) - \
+ sizeof(union pme_table_entry_t) + \
+ PME_ONE_BYTE_TRIGGER_ENTRY_SIZE)
+
+#define PME_TWO_BYTE_TABLE_WRITE_REQUEST_MSG_SIZE \
+ (sizeof(struct pme_pmtcc_write_request_msg_t) - \
+ sizeof(union pme_table_entry_t) + \
+ PME_TWO_BYTE_TRIGGER_ENTRY_SIZE)
+
+#define PME_VARIABLE_TABLE_WRITE_REQUEST_MSG_SIZE \
+ (sizeof(struct pme_pmtcc_write_request_msg_t) - \
+ sizeof(union pme_table_entry_t) + \
+ PME_VARIABLE_TRIGGER_ENTRY_SIZE)
+
+#define PME_CONFIDENCE_TABLE_WRITE_REQUEST_MSG_SIZE \
+ (sizeof(struct pme_pmtcc_write_request_msg_t) - \
+ sizeof(union pme_table_entry_t) + \
+ PME_CONFIDENCE_ENTRY_SIZE)
+
+#define PME_UDG_TABLE_WRITE_REQUEST_MSG_SIZE \
+ (sizeof(struct pme_pmtcc_write_request_msg_t) - \
+ sizeof(union pme_table_entry_t) + \
+ PME_USER_DEFINED_GROUP_WRITE_ENTRY_SIZE)
+
+#define PME_EQUIVALENCE_TABLE_WRITE_REQUEST_MSG_SIZE \
+ (sizeof(struct pme_pmtcc_write_request_msg_t) - \
+ sizeof(union pme_table_entry_t) + \
+ PME_EQUIVALENCE_WRITE_ENTRY_SIZE)
+
+#define PME_SPECIAL_TABLE_WRITE_REQUEST_MSG_SIZE \
+ (sizeof(struct pme_pmtcc_write_request_msg_t) - \
+ sizeof(union pme_table_entry_t) + \
+ PME_SPECIAL_TRIGGER_ENTRY_SIZE)
+
+/*
+ * Index 0..255, bools do indicated which errors are serious
+ * 0x40, 0x41, 0x48, 0x49, 0x4c, 0x4e, 0x4f, 0x50, 0x51, 0x59, 0x5a, 0x5b,
+ * 0x5c, 0x5d, 0x5f, 0x60, 0x80, 0xc0, 0xc1, 0xc2, 0xc4, 0xd2,
+ * 0xd4, 0xd5, 0xd7, 0xd9, 0xda, 0xe0, 0xe7
+ */
+static u8 serious_error_vec[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x00, 0x01, 0x01,
+ 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x01, 0x01, 0x01, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x01, 0x01, 0x00, 0x01, 0x00, 0x01, 0x01, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq, const struct qm_dqrr_entry *dq)
+{
+ u8 status = (u8)pme_fd_res_status(&dq->fd);
+ u8 flags = pme_fd_res_flags(&dq->fd);
+ struct pme_pwrmgmt_ctx *ctx = (struct pme_pwrmgmt_ctx *)fq;
+
+ if (unlikely(flags & PME_STATUS_UNRELIABLE))
+ pr_err("pme status error 0x%x\n", (u32)flags);
+ else if (unlikely((serious_error_vec[status])))
+ pr_err("pme error status 0x%x\n", (u32)status);
+ memcpy(&ctx->result_fd, &dq->fd, sizeof(*&dq->fd));
+ complete(&ctx->done);
+ return qman_cb_dqrr_consume;
+}
+
+static void cb_fqs(__always_unused struct qman_portal *portal,
+ __always_unused struct qman_fq *fq,
+ const struct qm_mr_entry *mr)
+{
+ u8 verb = mr->verb & QM_MR_VERB_TYPE_MASK;
+ if (verb == QM_MR_VERB_FQRNI)
+ return;
+ /* nothing else is supposed to occur */
+ BUG();
+}
+
+static const struct qman_fq_cb pme_fq_base_out = {
+ .dqrr = cb_dqrr,
+ .fqs = cb_fqs
+};
+
+static const struct qman_fq_cb pme_fq_base_in = {
+ .fqs = cb_fqs,
+ .ern = NULL
+};
+
+static void pme_pwrmgmt_initfq(struct qm_mcc_initfq *initfq, u32 rfqid)
+{
+ struct pme_context_a *pme_a =
+ (struct pme_context_a *)&initfq->fqd.context_a;
+ struct pme_context_b *pme_b =
+ (struct pme_context_b *)&initfq->fqd.context_b;
+
+ initfq->we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
+ QM_INITFQ_WE_CONTEXTB;
+ initfq->fqd.dest.channel = qm_channel_pme;
+ initfq->fqd.dest.wq = 4;
+ pme_a->mode = pme_mode_direct;
+ pme_context_a_set64(pme_a, 0);
+ pme_b->rfqid = rfqid;
+}
+
+static int pme_pwrmgmt_ctx_reconfigure_tx(struct pme_pwrmgmt_ctx *ctx)
+{
+ struct qm_mcc_initfq initfq;
+ u32 flags = QMAN_INITFQ_FLAG_SCHED | QMAN_INITFQ_FLAG_LOCAL;
+ int ret;
+
+ memset(&initfq, 0, sizeof(initfq));
+ initfq.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL;
+ initfq.fqd.dest.wq = 4;
+ initfq.fqd.fq_ctrl = 0; /* disable stashing */
+ ret = qman_init_fq(&ctx->tx_fq, flags, &initfq);
+ return ret;
+}
+
+static int pme_pwrmgmt_ctx_reconfigure_rx(struct pme_pwrmgmt_ctx *ctx)
+{
+ struct qm_mcc_initfq initfq;
+ int ret;
+
+ memset(&initfq, 0, sizeof(initfq));
+ pme_pwrmgmt_initfq(&initfq, qman_fq_fqid(&ctx->tx_fq));
+ ret = qman_init_fq(&ctx->rx_fq, 0, &initfq);
+ return ret;
+}
+
+int pme_pwrmgmt_ctx_init(struct pme_pwrmgmt_ctx *ctx)
+{
+ int ret;
+
+ ctx->tx_fq.cb = pme_fq_base_out;
+ ctx->rx_fq.cb = pme_fq_base_in;
+
+ /* Create tx (from pme point of view) frame queue */
+ ret = qman_create_fq(0, QMAN_FQ_FLAG_TO_DCPORTAL |
+ QMAN_FQ_FLAG_DYNAMIC_FQID | QMAN_FQ_FLAG_LOCKED,
+ &ctx->rx_fq);
+ if (ret)
+ return ret;
+
+ ret = qman_create_fq(0, QMAN_FQ_FLAG_NO_ENQUEUE |
+ QMAN_FQ_FLAG_DYNAMIC_FQID | QMAN_FQ_FLAG_LOCKED,
+ &ctx->tx_fq);
+ if (ret)
+ goto create_rx_failed;
+
+ ret = pme_pwrmgmt_ctx_reconfigure_rx(ctx);
+ if (ret)
+ goto config_rx_failed;
+
+ ret = pme_pwrmgmt_ctx_reconfigure_tx(ctx);
+ if (ret)
+ goto config_tx_failed;
+
+ return 0;
+config_tx_failed:
+config_rx_failed:
+ qman_destroy_fq(&ctx->rx_fq, 0);
+create_rx_failed:
+ qman_destroy_fq(&ctx->tx_fq, 0);
+ return ret;
+}
+
+static void pme_pwrmgmt_ctx_finish(struct pme_pwrmgmt_ctx *ctx)
+{
+ u32 flags;
+ int ret;
+
+ ret = qman_retire_fq(&ctx->tx_fq, &flags);
+ BUG_ON(ret);
+ BUG_ON(flags & QMAN_FQ_STATE_BLOCKOOS);
+ ret = qman_retire_fq(&ctx->rx_fq, &flags);
+ BUG_ON(ret);
+ BUG_ON(flags & QMAN_FQ_STATE_BLOCKOOS);
+ ret = qman_oos_fq(&ctx->tx_fq);
+ BUG_ON(ret);
+ ret = qman_oos_fq(&ctx->rx_fq);
+ BUG_ON(ret);
+ qman_destroy_fq(&ctx->tx_fq, 0);
+ qman_destroy_fq(&ctx->rx_fq, 0);
+}
+
+static int create_pwrmgmt_ctx(struct portal_backup_info *save_db)
+{
+ int ret;
+
+ /* check to see if context already exists */
+ if (save_db->ctx)
+ return 0;
+
+ save_db->ctx = kzalloc(sizeof(*save_db->ctx), GFP_KERNEL);
+ if (!save_db->ctx)
+ return -ENOMEM;
+
+ init_completion(&save_db->ctx->done);
+ ret = pme_pwrmgmt_ctx_init(save_db->ctx);
+ if (ret) {
+ pr_err("Error pme_pwrmgmt_ctx_init\n");
+ goto error_free_mem;
+ }
+ return 0;
+
+error_free_mem:
+ kfree(save_db->ctx);
+ save_db->ctx = NULL;
+ return ret;
+}
+
+static int delete_pwrmgmt_ctx(struct portal_backup_info *save_db)
+{
+ if (!save_db->ctx)
+ return 0;
+
+ pme_pwrmgmt_ctx_finish(save_db->ctx);
+ kfree(save_db->ctx);
+ save_db->ctx = NULL;
+
+ return 0;
+}
+
+/* Send a pmtcc pme frame */
+static int pme_pwrmgmt_ctx_pmtcc(struct pme_pwrmgmt_ctx *ctx, u32 flags,
+ struct qm_fd *fd)
+{
+ int ret;
+
+ struct pme_cmd_pmtcc *pmtcc = (struct pme_cmd_pmtcc *)&fd->cmd;
+ pmtcc->cmd = pme_cmd_pmtcc;
+
+ ret = qman_enqueue(&ctx->rx_fq, fd, flags &
+ (QMAN_ENQUEUE_FLAG_WAIT | QMAN_ENQUEUE_FLAG_WAIT_INT));
+
+ return ret;
+}
+
+static int get_table_attributes(enum pme_pmtcc_table_id tbl_id,
+ uint32_t pme_rev1, int *num_read_entries, int *num_write_entries,
+ int *read_size, int *read_reply_size, int *write_size,
+ int *read_entry_size, int *write_entry_size)
+{
+ *read_size = PME_TABLE_READ_REQUEST_MSG_SIZE;
+
+ switch (tbl_id) {
+ case PME_ONE_BYTE_TRIGGER_TBL:
+ *num_read_entries = PME_ONE_BYTE_TRIGGER_ENTRY_NUM;
+ *num_write_entries = PME_ONE_BYTE_TRIGGER_ENTRY_NUM;
+ *read_reply_size = PME_ONE_BYTE_TABLE_READ_REPLY_MSG_SIZE;
+ *write_size = PME_ONE_BYTE_TABLE_WRITE_REQUEST_MSG_SIZE;
+ *read_entry_size = PME_ONE_BYTE_TRIGGER_ENTRY_SIZE;
+ *write_entry_size = PME_ONE_BYTE_TRIGGER_ENTRY_SIZE;
+ break;
+
+ case PME_TWO_BYTE_TRIGGER_TBL:
+ if (is_version(pme_rev1, 2, 0))
+ *num_read_entries = PME_TWO_BYTE_TRIGGER_ENTRY_NUM_V2_0;
+ else if (is_version(pme_rev1, 2, 1))
+ *num_read_entries = PME_TWO_BYTE_TRIGGER_ENTRY_NUM_V2_1;
+ else if (is_version(pme_rev1, 2, 2))
+ *num_read_entries = PME_TWO_BYTE_TRIGGER_ENTRY_NUM_V2_2;
+ else {
+ pr_err("pme suspend: unsupported pme version %u\n",
+ pme_rev1);
+ return -EINVAL;
+ }
+ *num_write_entries = *num_read_entries;
+ *read_reply_size = PME_TWO_BYTE_TABLE_READ_REPLY_MSG_SIZE;
+ *write_size = PME_TWO_BYTE_TABLE_WRITE_REQUEST_MSG_SIZE;
+ *read_entry_size = PME_TWO_BYTE_TRIGGER_ENTRY_SIZE;
+ *write_entry_size = PME_TWO_BYTE_TRIGGER_ENTRY_SIZE;
+ break;
+
+ case PME_VARIABLE_TRIGGER_TBL:
+ if (is_version(pme_rev1, 2, 0))
+ *num_read_entries = PME_VARIABLE_TRIGGER_ENTRY_NUM_V2_0;
+ else if (is_version(pme_rev1, 2, 1))
+ *num_read_entries = PME_VARIABLE_TRIGGER_ENTRY_NUM_V2_1;
+ else if (is_version(pme_rev1, 2, 2))
+ *num_read_entries = PME_VARIABLE_TRIGGER_ENTRY_NUM_V2_2;
+ else {
+ pr_err("pme suspend: unsupported pme version %u\n",
+ pme_rev1);
+ return -EINVAL;
+ }
+ *num_write_entries = *num_read_entries;
+ *read_reply_size = PME_VARIABLE_TABLE_READ_REPLY_MSG_SIZE;
+ *write_size = PME_VARIABLE_TABLE_WRITE_REQUEST_MSG_SIZE;
+ *read_entry_size = PME_VARIABLE_TRIGGER_ENTRY_SIZE;
+ *write_entry_size = PME_VARIABLE_TRIGGER_ENTRY_SIZE;
+ break;
+
+ case PME_CONFIDENCE_TBL:
+ if (is_version(pme_rev1, 2, 0))
+ *num_read_entries = PME_CONFIDENCE_ENTRY_NUM_V2_0;
+ else if (is_version(pme_rev1, 2, 1))
+ *num_read_entries = PME_CONFIDENCE_ENTRY_NUM_V2_1;
+ else if (is_version(pme_rev1, 2, 2))
+ *num_read_entries = PME_CONFIDENCE_ENTRY_NUM_V2_2;
+ else {
+ pr_err("pme suspend: unsupported pme version %u\n",
+ pme_rev1);
+ return -EINVAL;
+ }
+ *num_write_entries = *num_read_entries;
+ *read_reply_size = PME_CONFIDENCE_TABLE_READ_REPLY_MSG_SIZE;
+ *write_size = PME_CONFIDENCE_TABLE_WRITE_REQUEST_MSG_SIZE;
+ *read_entry_size = PME_CONFIDENCE_ENTRY_SIZE;
+ *write_entry_size = PME_CONFIDENCE_ENTRY_SIZE;
+ break;
+ case PME_UDG_TBL:
+ *num_read_entries = 256;
+ *num_write_entries = PME_USER_DEFINED_GROUP_ENTRY_NUM;
+ *read_reply_size = PME_UDG_TABLE_READ_REPLY_MSG_SIZE;
+ *write_size = PME_UDG_TABLE_WRITE_REQUEST_MSG_SIZE;
+ *read_entry_size = (PME_UDG_TABLE_READ_REPLY_MSG_SIZE -
+ PME_TABLE_READ_REQUEST_MSG_SIZE);
+ *write_entry_size = PME_USER_DEFINED_GROUP_WRITE_ENTRY_SIZE;
+ break;
+
+ case PME_EQUIVALENT_BYTE_TBL:
+ *num_read_entries = 256;
+ *num_write_entries = PME_EQUIVALENCE_ENTRY_NUM;
+ *read_reply_size = PME_EQUIVALENCE_TABLE_READ_REPLY_MSG_SIZE;
+ *write_size = PME_EQUIVALENCE_TABLE_WRITE_REQUEST_MSG_SIZE;
+ *read_entry_size = (PME_EQUIVALENCE_TABLE_READ_REPLY_MSG_SIZE -
+ PME_TABLE_READ_REQUEST_MSG_SIZE);
+ *write_entry_size = PME_EQUIVALENCE_WRITE_ENTRY_SIZE;
+ break;
+
+ case PME_SPECIAL_TRIGGER_TBL:
+ *num_read_entries = PME_SPECIAL_TRIGGER_ENTRY_NUM;
+ *num_write_entries = PME_SPECIAL_TRIGGER_ENTRY_NUM;
+ *read_reply_size = PME_SPECIAL_TABLE_READ_REPLY_MSG_SIZE;
+ *write_size = PME_SPECIAL_TABLE_WRITE_REQUEST_MSG_SIZE;
+ *read_entry_size = PME_SPECIAL_TRIGGER_ENTRY_SIZE;
+ *write_entry_size = PME_SPECIAL_TRIGGER_ENTRY_SIZE;
+ break;
+ }
+ return 0;
+}
+
+#ifdef PME_SUSPEND_DEBUG
+static int total_size_read_request(enum pme_pmtcc_table_id tbl_id,
+ uint32_t pme_rev1)
+{
+ int ret, num_read_entries, read_size, read_reply_size, write_size,
+ read_entry_size, write_entry_size, num_write_entries;
+
+ ret = get_table_attributes(tbl_id, pme_rev1, &num_read_entries,
+ &num_write_entries, &read_size, &read_reply_size, &write_size,
+ &read_entry_size, &write_entry_size);
+
+ if (ret)
+ return ret;
+
+ return num_read_entries * read_size;
+}
+
+static int total_size_read_response_request(enum pme_pmtcc_table_id tbl_id,
+ uint32_t pme_rev1)
+{
+ int ret, num_read_entries, read_size, read_reply_size, write_size,
+ read_entry_size, write_entry_size, num_write_entries;
+
+ ret = get_table_attributes(tbl_id, pme_rev1, &num_read_entries,
+ &num_write_entries, &read_size, &read_reply_size, &write_size,
+ &read_entry_size, &write_entry_size);
+
+ if (ret)
+ return ret;
+
+ return num_read_entries * read_reply_size;
+}
+
+static int total_size_write_request(enum pme_pmtcc_table_id tbl_id,
+ uint32_t pme_rev1)
+{
+ int ret, num_read_entries, read_size, read_reply_size, write_size,
+ read_entry_size, write_entry_size, num_write_entries;
+
+ ret = get_table_attributes(tbl_id, pme_rev1, &num_read_entries,
+ &num_write_entries, &read_size, &read_reply_size, &write_size,
+ &read_entry_size, &write_entry_size);
+
+ if (ret)
+ return ret;
+
+ return num_write_entries * write_entry_size;
+}
+#endif
+
+static int sizeof_all_db_tables(uint32_t pme_rev1)
+{
+ enum pme_pmtcc_table_id tbl_id;
+ int i, ret, size = 0;
+
+ for (i = 0; i < ARRAY_SIZE(table_list); i++) {
+ int num_read_entries, read_size, read_reply_size, write_size,
+ read_entry_size, write_entry_size, num_write_entries;
+ tbl_id = table_list[i];
+
+ ret = get_table_attributes(tbl_id, pme_rev1, &num_read_entries,
+ &num_write_entries, &read_size, &read_reply_size,
+ &write_size, &read_entry_size, &write_entry_size);
+
+ if (ret)
+ return ret;
+ size += (write_entry_size * num_write_entries);
+ }
+ return size;
+}
+
+#ifdef PME_SUSPEND_DEBUG
+static void print_debug(uint32_t pme_rev1)
+{
+ int i = 0;
+
+ pr_info("size of db is %d\n", sizeof_all_db_tables(pme_rev1));
+
+ do {
+ int num_read_entries, read_size, read_reply_size, write_size,
+ num_write_entries, read_entry_size, write_entry_size;
+
+ get_table_attributes(table_list[i], pme_rev1, &num_read_entries,
+ &num_write_entries, &read_size, &read_reply_size,
+ &write_size, &read_entry_size, &write_entry_size);
+
+ pr_info("Table Id %d\n", table_list[i]);
+ pr_info(" num_read_entries %d, r_sz %d, rr_sz %d, w_sz %d\n",
+ num_read_entries, read_size, read_reply_size,
+ write_size);
+ pr_info(" num_wr_entries %d, r_entry_size %d w_entry_size %d\n",
+ num_write_entries, read_entry_size, write_entry_size);
+ pr_info(" total read request size %d\n",
+ total_size_read_request(table_list[i], pme_rev1));
+ pr_info(" total read reply request size %d\n",
+ total_size_read_response_request(table_list[i],
+ pme_rev1));
+ pr_info(" total write request size %d\n",
+ total_size_write_request(table_list[i], pme_rev1));
+
+ if (table_list[i] == PME_LAST_TABLE)
+ break;
+ i++;
+ } while (1);
+}
+#endif
+
+static void free_databases(struct portal_backup_info *save_db)
+{
+ vfree(save_db->db.alldb);
+ save_db->db.alldb = NULL;
+}
+
+static int alloc_databases(struct pme2_private_data *priv_data)
+{
+ int sizedb;
+
+ sizedb = sizeof_all_db_tables(priv_data->pme_rev1);
+ if (sizedb < 0) {
+ pr_err("Error getting db size\n");
+ return -EINVAL;
+ }
+
+ priv_data->save_db.db.alldb = vzalloc(sizedb);
+ if (!priv_data->save_db.db.alldb)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int save_all_tables(struct portal_backup_info *save_db,
+ uint32_t pme_rev1)
+{
+ struct pmtcc_raw_db *db = &save_db->db;
+ enum pme_pmtcc_table_id tbl_id;
+ int i, ret;
+ uint8_t *current_tbl = db->alldb;
+
+ for (i = 0; i < ARRAY_SIZE(table_list); i++) {
+ int num_read_entries, read_size, read_reply_size, write_size,
+ read_entry_size, write_entry_size, num_write_entries;
+ int idx;
+ struct pme_pmtcc_read_request_msg_t *entry;
+ struct qm_fd fd;
+ struct qm_sg_entry *sg_table = NULL;
+ uint8_t *input_data, *output_data;
+ enum pme_status status;
+
+ tbl_id = table_list[i];
+ ret = get_table_attributes(tbl_id, pme_rev1, &num_read_entries,
+ &num_write_entries, &read_size, &read_reply_size,
+ &write_size, &read_entry_size, &write_entry_size);
+
+ /* Allocate input and output frame data */
+ output_data = kzalloc(read_reply_size, GFP_KERNEL);
+ input_data = kzalloc(read_size, GFP_KERNEL);
+ sg_table = kzalloc(2 * sizeof(*sg_table), GFP_KERNEL);
+
+ entry = (struct pme_pmtcc_read_request_msg_t *)
+ input_data;
+ entry->header.protocol_version = pme_rev1;
+ entry->header.msg_length = read_size;
+ entry->table_id = tbl_id;
+
+ /* build fd */
+ memset(&fd, 0, sizeof(fd));
+ qm_sg_entry_set64(&sg_table[0], pme_suspend_map(save_db->pdev,
+ output_data));
+ sg_table[0].length = read_reply_size;
+ qm_sg_entry_set64(&sg_table[1], pme_suspend_map(save_db->pdev,
+ input_data));
+ sg_table[1].length = read_size;
+ sg_table[1].final = 1;
+ fd.format = qm_fd_compound;
+ qm_fd_addr_set64(&fd, pme_suspend_map(save_db->pdev, sg_table));
+#ifdef PME_SUSPEND_DEBUG
+ pr_info("Doing table %d\n", tbl_id);
+#endif
+ for (idx = 0; idx < num_read_entries; idx++) {
+ entry->index = idx;
+ memset(output_data, 0, read_reply_size);
+ ret = pme_pwrmgmt_ctx_pmtcc(save_db->ctx,
+ QMAN_ENQUEUE_FLAG_WAIT, &fd);
+
+ if (ret)
+ pr_err("error with pme_pwrmgmt_ctx_pmtcc\n");
+
+ wait_for_completion(&save_db->ctx->done);
+
+ status = pme_fd_res_status(&save_db->ctx->result_fd);
+ if (status) {
+ ret = -EINVAL;
+ pr_err("PMTCC read status failed %d\n", status);
+ save_db->backup_failed = 1;
+ break;
+ }
+ if (pme_fd_res_flags(&save_db->ctx->result_fd) &
+ PME_STATUS_UNRELIABLE) {
+ pr_err("pme %x\n", pme_fd_res_flags(
+ &save_db->ctx->result_fd));
+ ret = -EINVAL;
+ save_db->backup_failed = 1;
+ break;
+ }
+ /* copy the response */
+ if (tbl_id == PME_UDG_TBL ||
+ tbl_id == PME_EQUIVALENT_BYTE_TBL) {
+ /* Only copy over 8 lower bits to first byte */
+ uint32_t tmp32;
+ uint8_t tmp8;
+ memcpy(&tmp32, output_data + read_size,
+ read_entry_size);
+ tmp8 = (uint8_t)tmp32;
+ memcpy(current_tbl + (idx * 1), &tmp8, 1);
+ } else {
+ memcpy(current_tbl + (idx * write_entry_size),
+ output_data + read_size,
+ write_entry_size);
+ }
+ }
+ current_tbl += num_write_entries * write_entry_size;
+
+ /* Free input and output frame data */
+ kfree(output_data);
+ kfree(input_data);
+ kfree(sg_table);
+ /* if failed, stop saving database */
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
+/* don't need to write zero to PME sram since POR is all zero */
+static int is_all_zero(uint8_t *buf, int size)
+{
+ int i;
+ for (i = 0; i < size; i++) {
+ if (buf[i] != 0)
+ return 0;
+ }
+ return 1;
+}
+
+static int restore_all_tables(struct portal_backup_info *save_db,
+ uint32_t pme_rev1)
+{
+ struct pmtcc_raw_db *db = &save_db->db;
+ enum pme_pmtcc_table_id tbl_id;
+ int i, ret;
+ uint8_t *current_tbl = db->alldb;
+
+ for (i = 0; i < ARRAY_SIZE(table_list); i++) {
+ int num_read_entries, read_size, read_reply_size, write_size,
+ read_entry_size, write_entry_size, num_write_entries;
+ int idx;
+ struct pme_pmtcc_write_request_msg_t *entry;
+ struct qm_fd fd;
+ uint8_t *input_data;
+ enum pme_status status;
+
+ tbl_id = table_list[i];
+ ret = get_table_attributes(tbl_id, pme_rev1, &num_read_entries,
+ &num_write_entries, &read_size, &read_reply_size,
+ &write_size, &read_entry_size, &write_entry_size);
+
+ /* Allocate input frame data */
+ input_data = kzalloc(write_size, GFP_KERNEL);
+
+ entry = (struct pme_pmtcc_write_request_msg_t *)
+ input_data;
+ entry->header.protocol_version = pme_rev1;
+ entry->header.msg_type = 0x01; /* write */
+ entry->header.msg_length = write_size;
+ entry->table_id = tbl_id;
+
+ /* build fd */
+ memset(&fd, 0, sizeof(fd));
+ qm_fd_addr_set64(&fd, pme_suspend_map(save_db->pdev,
+ input_data));
+ fd.format = qm_fd_contig_big;
+ fd.length29 = write_size;
+#ifdef PME_SUSPEND_DEBUG
+ pr_info("Doing table %d\n", tbl_id);
+#endif
+ for (idx = 0; idx < num_write_entries; idx++) {
+ if (is_all_zero(current_tbl + (idx * write_entry_size),
+ write_entry_size)) {
+ continue;
+ }
+ entry->indexed_entry.index = idx;
+
+ memcpy(input_data + (write_size - write_entry_size),
+ current_tbl + (idx * write_entry_size),
+ write_entry_size);
+
+ ret = pme_pwrmgmt_ctx_pmtcc(save_db->ctx,
+ QMAN_ENQUEUE_FLAG_WAIT, &fd);
+
+ if (ret)
+ pr_err("error with pmtcc\n");
+
+ wait_for_completion(&save_db->ctx->done);
+
+ status = pme_fd_res_status(&save_db->ctx->result_fd);
+ if (status) {
+ ret = -EINVAL;
+ pr_err("PMTCC write status fail %d\n", status);
+ break;
+ }
+ if (pme_fd_res_flags(&save_db->ctx->result_fd) &
+ PME_STATUS_UNRELIABLE) {
+ pr_err("pme %x\n", pme_fd_res_flags(
+ &save_db->ctx->result_fd));
+ ret = -EINVAL;
+ break;
+ }
+ }
+ current_tbl += num_write_entries * write_entry_size;
+
+ /* Free input and output frame data */
+ kfree(input_data);
+ /* if failed, stop restoring database */
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
+int fsl_pme_save_db(struct pme2_private_data *priv_data)
+{
+ int ret;
+ struct portal_backup_info *save_db = &priv_data->save_db;
+
+#ifdef PME_SUSPEND_DEBUG
+ print_debug(priv_data->pme_rev1);
+#endif
+ ret = save_all_tables(save_db, priv_data->pme_rev1);
+
+ return ret;
+}
+
+static int is_pme_active(void)
+{
+ uint32_t val;
+ int ret;
+
+ ret = pme_attr_get(pme_attr_pmstat, &val);
+ if (ret) {
+ pr_err("Error reading activity bit\n");
+ return ret;
+ }
+ return val;
+}
+
+static void reset_db_saved_state(struct portal_backup_info *db_info)
+{
+ db_info->backup_failed = 0;
+}
+
+/**
+ * pme_suspend - power management suspend function
+ *
+ * @priv_data: pme2 device private data
+ *
+ * Saves the pme device volatile state prior to suspension.
+ * CCSR space and SRAM state is saved to DDR
+ */
+int pme_suspend(struct pme2_private_data *priv_data)
+{
+ int ret;
+ struct ccsr_backup_info *ccsr_info;
+ struct portal_backup_info *db_info;
+
+ ccsr_info = &priv_data->save_ccsr;
+ db_info = &priv_data->save_db;
+
+ reset_db_saved_state(db_info);
+
+ pme_attr_get(pme_attr_faconf_en, &ccsr_info->save_faconf_en);
+ pme_attr_get(pme_attr_cdcr, &ccsr_info->save_cdcr);
+
+ /* disable pme */
+ pme_attr_set(pme_attr_faconf_en, 0);
+ /* disable caching, only SRE will be flushed. FC caching already off */
+ pme_attr_set(pme_attr_cdcr, 0xffffffff);
+
+ /* wait until device is not active */
+ while (is_pme_active()) {
+ cpu_relax();
+ /* TODO: sanity check */
+ }
+#ifdef PME_SUSPEND_DEBUG
+ pr_info("PME is quiescent\n");
+#endif
+
+ /* save CCSR space */
+ save_all_ccsr(ccsr_info, priv_data->regs);
+
+#ifdef PME_SUSPEND_DEBUG
+ pr_info("First reg read is %u\n",
+ ccsr_info->regdb.pmfa.faconf);
+ pr_info("Last reg read is %u\n",
+ ccsr_info->regdb.gen.pm_ip_rev_2);
+#endif
+
+ /* save sram, must first configure the new exclusive fq before
+ * enabling pme */
+ ret = pme2_exclusive_set(&db_info->ctx->rx_fq);
+ if (ret)
+ pr_err("Error getting exclusive mode\n");
+
+ /* save sram database, hook into pme_suspend. enable pme first */
+ pme_attr_set(pme_attr_faconf_en, 1);
+ ret = fsl_pme_save_db(priv_data);
+ /* disable pme */
+ pme_attr_set(pme_attr_faconf_en, 0);
+
+ /* wait until device is not active */
+ while (is_pme_active()) {
+ cpu_relax();
+ /* TODO: sanity check */
+ }
+#ifdef PME_SUSPEND_DEBUG
+ pr_info("PME is quiescent\n");
+#endif
+
+ /* if saving db failed, reset internal state explicitly */
+ if (db_info->backup_failed) {
+ /* set the PME reset bit */
+ pme_attr_set(pme_attr_faconf_rst, 1);
+ /* clear the PME reset bit */
+ pme_attr_set(pme_attr_faconf_rst, 0);
+ /* wait until device is not active */
+ while (is_pme_active()) {
+ cpu_relax();
+ /* TODO: sanity check */
+ }
+ }
+ return 0;
+}
+
+/**
+ * pme_resume - power management resume function
+ *
+ * @priv_data: pme2 device private data
+ *
+ * Restores the pme device to its original state prior to suspension.
+ * CCSR space and SRAM state is restored
+ */
+int pme_resume(struct pme2_private_data *priv_data)
+{
+ int ret;
+ struct ccsr_backup_info *ccsr_info;
+ struct portal_backup_info *db_info;
+ int db_restore_failed = 0;
+
+ ccsr_info = &priv_data->save_ccsr;
+ db_info = &priv_data->save_db;
+
+#ifdef PME_SUSPEND_DEBUG
+ pr_info("fsl_pme_restore_db\n");
+ print_debug(priv_data->pme_rev1);
+#endif
+
+ /* when PME was saved, it was disabled. Therefore it will remain */
+ restore_all_ccsr(ccsr_info, priv_data->regs);
+ /* restore caching state */
+ pme_attr_set(pme_attr_cdcr, ccsr_info->save_cdcr);
+
+ /* Don't restore database if it wasn't saved properly */
+ if (db_info->backup_failed)
+ goto skip_db_restore;
+ /* set private exclusive mode before enabling pme */
+ /* save sram, must first configure the new exclusive fq before
+ * enabling pme */
+ ret = pme2_exclusive_set(&db_info->ctx->rx_fq);
+ if (ret)
+ pr_err("Error getting exclusive mode\n");
+
+ /* save sram database, hook into pme_suspend. enable pme first */
+ pme_attr_set(pme_attr_faconf_en, 1);
+
+ ret = restore_all_tables(db_info, priv_data->pme_rev1);
+ if (ret)
+ db_restore_failed = 1;
+
+ /* disable pme */
+ pme_attr_set(pme_attr_faconf_en, 0);
+ /* wait until device is not active */
+ while (is_pme_active()) {
+ cpu_relax();
+ /* TODO: sanity check */
+ }
+ if (db_restore_failed) {
+ /* set the PME reset bit */
+ pme_attr_set(pme_attr_faconf_rst, 1);
+ /* clear the PME reset bit */
+ pme_attr_set(pme_attr_faconf_rst, 0);
+ /* when PME was saved, it was disabled. Therefore it will
+ * remain disabled */
+ restore_all_ccsr(ccsr_info, priv_data->regs);
+ /* restore caching state */
+ pme_attr_set(pme_attr_cdcr, ccsr_info->save_cdcr);
+ }
+
+ /* restore EFQC register */
+ pme_attr_set(pme_attr_efqc, ccsr_info->regdb.pmfa.efqc);
+
+skip_db_restore:
+ /* restore pme enable state */
+ pme_attr_set(pme_attr_faconf_en, ccsr_info->save_faconf_en);
+
+ return 0;
+}
+
+
+/**
+ * init_pme_suspend - initialize pme resources for power management
+ *
+ * @priv_data: pme2 device private data
+ *
+ * All resources required to suspend the PME device are allocated.
+ * They include memory,frame queues, platform device
+ */
+int init_pme_suspend(struct pme2_private_data *priv_data)
+{
+ int ret;
+ struct ccsr_backup_info *ccsr_info;
+ struct portal_backup_info *db_info;
+
+ ccsr_info = &priv_data->save_ccsr;
+ db_info = &priv_data->save_db;
+
+ db_info->pdev = platform_device_alloc("fsl_pme_suspend", -1);
+ if (!db_info->pdev)
+ goto failed_alloc_device;
+ if (dma_set_mask(&db_info->pdev->dev, DMA_BIT_MASK(40)))
+ goto failed_dma_mask;
+ if (platform_device_add(db_info->pdev))
+ goto failed_device_add;
+
+ /* allocate frame queues */
+ ret = create_pwrmgmt_ctx(db_info);
+ if (ret)
+ goto failed_create_pwrmgmt_ctx;
+
+ ret = alloc_databases(priv_data);
+ if (ret)
+ goto failed_alloc_databases;
+
+ return 0;
+
+failed_alloc_databases:
+ delete_pwrmgmt_ctx(db_info);
+failed_create_pwrmgmt_ctx:
+ platform_device_del(db_info->pdev);
+failed_device_add:
+failed_dma_mask:
+ platform_device_put(db_info->pdev);
+ db_info->pdev = NULL;
+failed_alloc_device:
+ return -ENOMEM;
+}
+
+/**
+ * exit_pme_suspend - release pme resources for power management
+ *
+ * @priv_data: pme2 device private data
+ *
+ * All resources required to suspend the PME device are released.
+ * They include memory,frame queues, platform device
+ */
+void exit_pme_suspend(struct pme2_private_data *priv_data)
+{
+ struct portal_backup_info *db_info;
+
+ db_info = &priv_data->save_db;
+
+ free_databases(db_info);
+ delete_pwrmgmt_ctx(db_info);
+ platform_device_del(db_info->pdev);
+ platform_device_put(db_info->pdev);
+ db_info->pdev = NULL;
+}
+
+#endif /* CONFIG_PM */
+
diff --git a/drivers/uio/fsl_dma_uio.c b/drivers/uio/fsl_dma_uio.c
index fca99bf..899523f 100644
--- a/drivers/uio/fsl_dma_uio.c
+++ b/drivers/uio/fsl_dma_uio.c
@@ -25,6 +25,7 @@
#include <linux/uio_driver.h>
#include <linux/list.h>
#include <linux/io.h>
+#include <linux/mm.h>
static const char dma_uio_version[] = "DMA UIO driver v1.0";
@@ -82,7 +83,33 @@ static irqreturn_t dma_uio_irq_handler(int irq, struct uio_info *dev_info)
return IRQ_HANDLED;
}
-static int __init dma_chan_uio_setup(struct dma_chan *dma_ch)
+static int dma_uio_mmap(struct uio_info *info, struct vm_area_struct *vma)
+{
+ int mi;
+ struct uio_mem *mem;
+
+ if (vma->vm_pgoff < MAX_UIO_MAPS) {
+ if (info->mem[vma->vm_pgoff].size == 0)
+ return -EINVAL;
+ mi = (int)vma->vm_pgoff;
+ } else
+ return -EINVAL;
+
+ mem = info->mem + mi;
+
+ if (vma->vm_end - vma->vm_start > mem->size)
+ return -EINVAL;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ return remap_pfn_range(vma,
+ vma->vm_start,
+ mem->addr >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+}
+
+static int dma_chan_uio_setup(struct dma_chan *dma_ch)
{
int ret;
struct dma_uio_info *info;
@@ -112,8 +139,8 @@ static int __init dma_chan_uio_setup(struct dma_chan *dma_ch)
info->uio.handler = dma_uio_irq_handler;
info->uio.open = dma_uio_open;
info->uio.release = dma_uio_release;
+ info->uio.mmap = dma_uio_mmap;
info->uio.priv = dma_ch;
-
ret = uio_register_device(dma_ch->dev, &info->uio);
if (ret) {
dev_err(dma_ch->dev, "dma_uio: UIO registration failed\n");
@@ -124,13 +151,13 @@ static int __init dma_chan_uio_setup(struct dma_chan *dma_ch)
}
static int fsl_dma_chan_probe(struct fsldma_device *fdev,
- struct device_node *node)
+ struct device_node *node,
+ u32 chanid)
{
struct resource regs;
struct dma_chan *dma_ch;
struct device_node *dma_node;
int err;
- u32 *cell;
struct platform_device *dev = fdev->dev;
dma_node = node;
@@ -142,14 +169,9 @@ static int fsl_dma_chan_probe(struct fsldma_device *fdev,
return -ENOMEM;
}
- cell = (u32 *)of_get_property(dma_node, "cell-index", NULL);
- if (!cell) {
- dev_err(&dev->dev, "Can't get property 'cell-index'\n");
- return -EFAULT;
- }
dma_ch->dma_id = fdev->dma_id;
- dma_ch->ch_id = *cell;
+ dma_ch->ch_id = chanid;
dma_ch->dev = &dev->dev;
err = of_address_to_resource(dma_node, 0, &regs);
@@ -201,13 +223,8 @@ static int fsl_dma_uio_probe(struct platform_device *dev)
{
struct device_node *child;
struct fsldma_device *fdev;
- u32 *cell;
-
- cell = (u32 *)of_get_property(dev->dev.of_node, "cell-index", NULL);
- if (!cell) {
- dev_err(&dev->dev, "Can't get property 'cell-index'\n");
- return -ENODEV;
- }
+ static u32 dmaid;
+ u32 chanid = 0;
fdev = devm_kzalloc(&dev->dev, sizeof(struct fsldma_device),
GFP_KERNEL);
@@ -216,15 +233,14 @@ static int fsl_dma_uio_probe(struct platform_device *dev)
return -ENOMEM;
}
- fdev->dma_id = *cell;
+ fdev->dma_id = dmaid++;
fdev->dev = dev;
INIT_LIST_HEAD(&fdev->ch_list);
dev_set_drvdata(&dev->dev, fdev);
for_each_child_of_node(dev->dev.of_node, child)
if (of_device_is_compatible(child, "fsl,eloplus-dma-channel"))
- fsl_dma_chan_probe(fdev, child);
-
+ fsl_dma_chan_probe(fdev, child, chanid++);
return 0;
}
@@ -244,9 +260,8 @@ static int fsl_dma_uio_remove(struct platform_device *dev)
static const struct of_device_id fsl_of_dma_match[] = {
- {
- .compatible = "fsl,eloplus-dma",
- },
+ { .compatible = "fsl,elo3-dma", },
+ { .compatible = "fsl,eloplus-dma", },
{}
};
diff --git a/drivers/usb/gadget/fsl_udc_core.c b/drivers/usb/gadget/fsl_udc_core.c
index fa27a4c..c540dfa 100644
--- a/drivers/usb/gadget/fsl_udc_core.c
+++ b/drivers/usb/gadget/fsl_udc_core.c
@@ -2536,8 +2536,8 @@ static int __exit fsl_udc_remove(struct platform_device *pdev)
if (!udc_controller)
return -ENODEV;
- usb_del_gadget_udc(&udc_controller->gadget);
udc_controller->done = &done;
+ usb_del_gadget_udc(&udc_controller->gadget);
fsl_udc_clk_release();
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index 3ef6e26..c307ab0 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -38,6 +38,7 @@ struct ehci_fsl {
struct ehci_hcd ehci;
#ifdef CONFIG_PM
+ struct ehci_regs *saved_regs;
/* Saved USB PHY settings, need to restore after deep sleep. */
u32 usb_ctrl;
#endif
@@ -482,7 +483,43 @@ static int ehci_fsl_setup(struct usb_hcd *hcd)
return retval;
}
+
+
+
#ifdef CONFIG_PM
+/* save usb registers */
+static int ehci_fsl_save_context(struct usb_hcd *hcd)
+{
+ struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd);
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ void __iomem *non_ehci = hcd->regs;
+
+ ehci_fsl->saved_regs = kzalloc(sizeof(struct ehci_regs), GFP_KERNEL);
+ if (!ehci_fsl->saved_regs)
+ return -ENOMEM;
+ _memcpy_fromio(ehci_fsl->saved_regs, ehci->regs,
+ sizeof(struct ehci_regs));
+ ehci_fsl->usb_ctrl = in_be32(non_ehci + FSL_SOC_USB_CTRL);
+ return 0;
+
+}
+
+/*Restore usb registers */
+static int ehci_fsl_restore_context(struct usb_hcd *hcd)
+{
+ struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd);
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ void __iomem *non_ehci = hcd->regs;
+
+ if (ehci_fsl->saved_regs) {
+ _memcpy_toio(ehci->regs, ehci_fsl->saved_regs,
+ sizeof(struct ehci_regs));
+ out_be32(non_ehci + FSL_SOC_USB_CTRL, ehci_fsl->usb_ctrl);
+ kfree(ehci_fsl->saved_regs);
+ ehci_fsl->saved_regs = NULL;
+ }
+ return 0;
+}
#ifdef CONFIG_PPC_MPC512x
static int ehci_fsl_mpc512x_drv_suspend(struct device *dev)
@@ -658,6 +695,9 @@ static int ehci_fsl_drv_suspend(struct device *dev)
ehci_prepare_ports_for_controller_suspend(hcd_to_ehci(hcd),
device_may_wakeup(dev));
+
+ ehci_fsl_save_context(hcd);
+
if (!fsl_deep_sleep())
return 0;
@@ -671,6 +711,9 @@ static int ehci_fsl_drv_resume(struct device *dev)
struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd);
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
void __iomem *non_ehci = hcd->regs;
+
+ ehci_fsl_restore_context(hcd);
+
#if defined(CONFIG_FSL_USB2_OTG) || defined(CONFIG_FSL_USB2_OTG_MODULE)
struct usb_bus host = hcd->self;
#endif
diff --git a/include/linux/fsl_pme.h b/include/linux/fsl_pme.h
index 330b10b..fbeec4f 100644
--- a/include/linux/fsl_pme.h
+++ b/include/linux/fsl_pme.h
@@ -770,6 +770,9 @@ enum pme_attr {
pme_attr_scbarl,
pme_attr_bsc_first, /* create 64-wide space for bsc */
pme_attr_bsc_last = pme_attr_bsc_first + 63,
+ pme_attr_faconf_rst,
+ pme_attr_faconf_en,
+ pme_attr_efqc
};
#define pme_attr_bsc(n) (pme_attr_bsc_first + (n))