summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/crypto/caam/Kconfig12
-rw-r--r--drivers/crypto/caam/caamhash.c4
-rw-r--r--drivers/crypto/caam/ctrl.c74
-rw-r--r--drivers/crypto/caam/intern.h1
-rw-r--r--drivers/crypto/caam/jr.c4
-rw-r--r--drivers/crypto/caam/key_gen.c2
-rw-r--r--drivers/crypto/caam/key_gen.h2
-rw-r--r--drivers/crypto/caam/regs.h12
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c38
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c4
-rw-r--r--drivers/net/ethernet/freescale/dpa/Kconfig4
-rw-r--r--drivers/net/ethernet/freescale/dpa/Makefile10
-rw-r--r--drivers/net/ethernet/freescale/dpa/dpa-ethtool.c6
-rw-r--r--drivers/net/ethernet/freescale/dpa/dpaa_1588.c31
-rw-r--r--drivers/net/ethernet/freescale/dpa/dpaa_debugfs.c8
-rw-r--r--drivers/net/ethernet/freescale/dpa/dpaa_eth-common.h89
-rw-r--r--drivers/net/ethernet/freescale/dpa/dpaa_eth.c3875
-rw-r--r--drivers/net/ethernet/freescale/dpa/dpaa_eth.h200
-rw-r--r--drivers/net/ethernet/freescale/dpa/dpaa_eth_common.c1770
-rw-r--r--drivers/net/ethernet/freescale/dpa/dpaa_eth_common.h133
-rw-r--r--drivers/net/ethernet/freescale/dpa/dpaa_eth_macless.c407
-rw-r--r--drivers/net/ethernet/freescale/dpa/dpaa_eth_non_sg.c719
-rw-r--r--drivers/net/ethernet/freescale/dpa/dpaa_eth_proxy.c188
-rw-r--r--drivers/net/ethernet/freescale/dpa/dpaa_eth_sg.c275
-rw-r--r--drivers/net/ethernet/freescale/dpa/dpaa_eth_shared.c786
-rw-r--r--drivers/net/ethernet/freescale/dpa/dpaa_eth_sysfs.c36
-rw-r--r--drivers/net/ethernet/freescale/dpa/dpaa_eth_unit_test.c407
-rw-r--r--drivers/net/ethernet/freescale/dpa/dpaa_eth_unit_test.h41
-rw-r--r--drivers/net/ethernet/freescale/dpa/mac-api.c496
-rw-r--r--drivers/net/ethernet/freescale/dpa/mac.c6
-rw-r--r--drivers/net/ethernet/freescale/dpa/mac.h46
-rw-r--r--drivers/net/ethernet/freescale/dpa/offline_port.c7
-rw-r--r--drivers/net/ethernet/freescale/fman/Kconfig6
-rw-r--r--drivers/net/ethernet/freescale/fman/src/inc/wrapper/lnxwrp_fsl_fman.h93
-rw-r--r--drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_fm.c458
-rw-r--r--drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_fm.h2
-rw-r--r--drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_ioctls_fm.c49
-rw-r--r--drivers/pci/quirks.c10
-rw-r--r--drivers/staging/fsl_qbman/Kconfig7
-rw-r--r--drivers/staging/fsl_qbman/Makefile1
-rw-r--r--drivers/staging/fsl_qbman/bman_config.c4
-rw-r--r--drivers/staging/fsl_qbman/bman_driver.c11
-rw-r--r--drivers/staging/fsl_qbman/bman_high.c20
-rw-r--r--drivers/staging/fsl_qbman/bman_private.h8
-rw-r--r--drivers/staging/fsl_qbman/dpa_sys.h5
-rw-r--r--drivers/staging/fsl_qbman/fsl_usdpaa.c56
-rw-r--r--drivers/staging/fsl_qbman/fsl_usdpaa_irq.c80
-rw-r--r--drivers/staging/fsl_qbman/qbman_driver.c75
-rw-r--r--drivers/staging/fsl_qbman/qman_config.c4
-rw-r--r--drivers/staging/fsl_qbman/qman_driver.c11
-rw-r--r--drivers/staging/fsl_qbman/qman_high.c22
-rw-r--r--drivers/staging/fsl_qbman/qman_private.h14
-rw-r--r--drivers/usb/host/ehci-fsl.c51
-rw-r--r--drivers/usb/host/ehci-fsl.h3
-rw-r--r--drivers/usb/host/fsl-mph-dr-of.c18
55 files changed, 6079 insertions, 4622 deletions
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index 6b07017..1f1630e 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -82,13 +82,13 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
Selecting this will use SEC Queue Interface for sending
& receiving crypto jobs to/from SEC. This gives better
performance than job ring interface when the number of
- number of cores are more than the number of job rings
- assigned to kernel. Also the number of portals assigned
- to kernel should be more than the number of job rings.
+ cores are more than the number of job rings assigned to
+ the kernel. The number of portals assigned to the kernel
+ should also be more than the number of job rings.
- Currently only AEAD algorithms have been implemented to
- use SEC-QI backend interface. Rest of the algorithms
- would use job ring interface.
+ Currently, only AEAD algorithms have been implemented on
+ top of SEC-QI backend interface. The rest of the algorithms
+ use job ring interface.
To compile this as a module, choose M here: the module
will be called caamalg_qi.
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index bf8c77c..98429b8 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -409,7 +409,7 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
return 0;
}
-static u32 gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
+static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
u32 keylen)
{
return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
@@ -418,7 +418,7 @@ static u32 gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
}
/* Digest hash size if it is too large */
-static u32 hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
+static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
u32 *keylen, u8 *key_out, u32 digestsize)
{
struct device *jrdev = ctx->jrdev;
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 33b3899..70bdbcc 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -84,55 +84,53 @@ static void build_instantiation_desc(u32 *desc)
OP_ALG_RNG4_SK);
}
-struct instantiate_result {
- struct completion completion;
- int err;
-};
-
-static void rng4_init_done(struct device *dev, u32 *desc, u32 err,
- void *context)
+static int instantiate_rng(struct device *ctrldev)
{
- struct instantiate_result *instantiation = context;
-
- if (err) {
- char tmp[CAAM_ERROR_STR_MAX];
-
- dev_err(dev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
- }
-
- instantiation->err = err;
- complete(&instantiation->completion);
-}
-
-static int instantiate_rng(struct device *jrdev)
-{
- struct instantiate_result instantiation;
-
- dma_addr_t desc_dma;
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
+ struct caam_full __iomem *topregs;
+ unsigned int timeout = 100000;
u32 *desc;
- int ret;
+ int i, ret = 0;
desc = kmalloc(CAAM_CMD_SZ * 6, GFP_KERNEL | GFP_DMA);
if (!desc) {
- dev_err(jrdev, "cannot allocate RNG init descriptor memory\n");
+ dev_err(ctrldev, "can't allocate RNG init descriptor memory\n");
return -ENOMEM;
}
-
build_instantiation_desc(desc);
- desc_dma = dma_map_single(jrdev, desc, desc_bytes(desc), DMA_TO_DEVICE);
- init_completion(&instantiation.completion);
- ret = caam_jr_enqueue(jrdev, desc, rng4_init_done, &instantiation);
- if (!ret) {
- wait_for_completion_interruptible(&instantiation.completion);
- ret = instantiation.err;
- if (ret)
- dev_err(jrdev, "unable to instantiate RNG\n");
+
+ /* Set the bit to request direct access to DECO0 */
+ topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
+ setbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE);
+
+ while (!(rd_reg32(&topregs->ctrl.deco_rq) & DECORR_DEN0) &&
+ --timeout)
+ cpu_relax();
+
+ if (!timeout) {
+ dev_err(ctrldev, "failed to acquire DECO 0\n");
+ ret = -EIO;
+ goto out;
}
- dma_unmap_single(jrdev, desc_dma, desc_bytes(desc), DMA_TO_DEVICE);
+ for (i = 0; i < desc_len(desc); i++)
+ topregs->deco.descbuf[i] = *(desc + i);
- kfree(desc);
+ wr_reg32(&topregs->deco.jr_ctl_hi, DECO_JQCR_WHL | DECO_JQCR_FOUR);
+
+ timeout = 10000000;
+ while ((rd_reg32(&topregs->deco.desc_dbg) & DECO_DBG_VALID) &&
+ --timeout)
+ cpu_relax();
+ if (!timeout) {
+ dev_err(ctrldev, "failed to instantiate RNG\n");
+ ret = -EIO;
+ }
+
+ clrbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE);
+out:
+ kfree(desc);
return ret;
}
@@ -318,7 +316,7 @@ static int caam_probe(struct platform_device *pdev)
if ((cha_vid & CHA_ID_RNG_MASK) >> CHA_ID_RNG_SHIFT >= 4 &&
!(rd_reg32(&topregs->ctrl.r4tst[0].rdsta) & RDSTA_IF0)) {
kick_trng(pdev);
- ret = instantiate_rng(ctrlpriv->jrdev[0]);
+ ret = instantiate_rng(dev);
if (ret) {
caam_remove(pdev);
return ret;
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index 9f155fb..9f74c4f 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -43,6 +43,7 @@ struct caam_jrentry_info {
/* Private sub-storage for a single JobR */
struct caam_drv_private_jr {
struct device *parentdev; /* points back to controller dev */
+ struct platform_device *jr_pdev;/* points to platform device for JR */
int ridx;
struct caam_job_ring __iomem *rregs; /* JobR's register space */
struct napi_struct __percpu *irqtask;
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index 7b57d00..0923516 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -441,6 +441,7 @@ int caam_jr_shutdown(struct device *dev)
dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH,
jrp->outring, outbusaddr);
kfree(jrp->entinfo);
+ of_device_unregister(jrp->jr_pdev);
return ret;
}
@@ -488,6 +489,8 @@ int caam_jr_probe(struct platform_device *pdev, struct device_node *np,
kfree(jrpriv);
return -EINVAL;
}
+
+ jrpriv->jr_pdev = jr_pdev;
jrdev = &jr_pdev->dev;
dev_set_drvdata(jrdev, jrpriv);
ctrlpriv->jrdev[ring] = jrdev;
@@ -506,6 +509,7 @@ int caam_jr_probe(struct platform_device *pdev, struct device_node *np,
/* Now do the platform independent part */
error = caam_jr_init(jrdev); /* now turn on hardware */
if (error) {
+ of_device_unregister(jr_pdev);
kfree(jrpriv);
return error;
}
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c
index f6dba10..87138d2 100644
--- a/drivers/crypto/caam/key_gen.c
+++ b/drivers/crypto/caam/key_gen.c
@@ -44,7 +44,7 @@ Split key generation-----------------------------------------------
[06] 0x64260028 fifostr: class2 mdsplit-jdk len=40
@0xffe04000
*/
-u32 gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
+int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
int split_key_pad_len, const u8 *key_in, u32 keylen,
u32 alg_op)
{
diff --git a/drivers/crypto/caam/key_gen.h b/drivers/crypto/caam/key_gen.h
index d95d290..c5588f6 100644
--- a/drivers/crypto/caam/key_gen.h
+++ b/drivers/crypto/caam/key_gen.h
@@ -12,6 +12,6 @@ struct split_key_result {
void split_key_done(struct device *dev, u32 *desc, u32 err, void *context);
-u32 gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
+int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
int split_key_pad_len, const u8 *key_in, u32 keylen,
u32 alg_op);
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index c09142f..4455396 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -341,6 +341,8 @@ struct caam_ctrl {
#define MCFGR_DMA_RESET 0x10000000
#define MCFGR_LONG_PTR 0x00010000 /* Use >32-bit desc addressing */
#define SCFGR_RDBENABLE 0x00000400
+#define DECORR_RQD0ENABLE 0x00000001 /* Enable DECO0 for direct access */
+#define DECORR_DEN0 0x00010000 /* DECO0 available for access*/
/* AXI read cache control */
#define MCFGR_ARCACHE_SHIFT 12
@@ -703,9 +705,16 @@ struct caam_deco {
struct deco_sg_table sctr_tbl[4]; /* DxSTR - Scatter Tables */
u32 rsvd29[48];
u32 descbuf[64]; /* DxDESB - Descriptor buffer */
- u32 rsvd30[320];
+ u32 rscvd30[193];
+ u32 desc_dbg; /* DxDDR - DECO Debug Register */
+ u32 rsvd31[126];
};
+/* DECO DBG Register Valid Bit*/
+#define DECO_DBG_VALID 0x80000000
+#define DECO_JQCR_WHL 0x20000000
+#define DECO_JQCR_FOUR 0x10000000
+
/*
* Current top-level view of memory map is:
*
@@ -733,6 +742,7 @@ struct caam_full {
u64 rsvd[512];
struct caam_assurance assure;
struct caam_queue_if qi;
+ struct caam_deco deco;
};
#endif /* REGS_H */
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index 6e46d2f..291bad1 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -24,6 +24,9 @@
#define VENDOR_V_22 0x12
#define VENDOR_V_23 0x13
+
+static u32 svr;
+
static u32 esdhc_readl(struct sdhci_host *host, int reg)
{
u32 ret;
@@ -146,7 +149,7 @@ static void esdhc_writeb(struct sdhci_host *host, u8 val, int reg)
if (!host->pwr || !val)
return;
- if (fsl_svr_is(SVR_T4240)) {
+ if (SVR_SOC_VER(svr) == SVR_T4240) {
u8 vol;
vol = sdhci_be32bs_readb(host, reg);
@@ -202,11 +205,21 @@ static void esdhci_of_adma_workaround(struct sdhci_host *host, u32 intmask)
* Check for A-004388: eSDHC DMA might not stop if error
* occurs on system transaction
* Impact list:
- * T4240-R1.0 B4860-R1.0 P1010-R1.0
+ * T4240-4160-R1.0 B4860-4420-R1.0 P1010-1014-R1.0
+ * P3041-R1.0-R2.0-R1.1 P2041-2040-R1.0-R1.1-R2.0
+ * P5040-5021-R2.0
*/
- if (!((fsl_svr_is(SVR_T4240) && fsl_svr_rev_is(1, 0)) ||
- (fsl_svr_is(SVR_B4860) && fsl_svr_rev_is(1, 0)) ||
- (fsl_svr_is(SVR_P1010) && fsl_svr_rev_is(1, 0))))
+ if (!(((SVR_SOC_VER(svr) == SVR_T4240) && (SVR_REV(svr) == 0x10)) ||
+ ((SVR_SOC_VER(svr) == SVR_T4160) && (SVR_REV(svr) == 0x10)) ||
+ ((SVR_SOC_VER(svr) == SVR_B4420) && (SVR_REV(svr) == 0x10)) ||
+ ((SVR_SOC_VER(svr) == SVR_B4860) && (SVR_REV(svr) == 0x10)) ||
+ ((SVR_SOC_VER(svr) == SVR_P1010) && (SVR_REV(svr) == 0x10)) ||
+ ((SVR_SOC_VER(svr) == SVR_P1014) && (SVR_REV(svr) == 0x10)) ||
+ ((SVR_SOC_VER(svr) == SVR_P3041) && (SVR_REV(svr) <= 0x20)) ||
+ ((SVR_SOC_VER(svr) == SVR_P2041) && (SVR_REV(svr) <= 0x20)) ||
+ ((SVR_SOC_VER(svr) == SVR_P2040) && (SVR_REV(svr) <= 0x20)) ||
+ ((SVR_SOC_VER(svr) == SVR_P5021) && (SVR_REV(svr) == 0x20)) ||
+ ((SVR_SOC_VER(svr) == SVR_P5040) && (SVR_REV(svr) == 0x20))))
return;
if (host->flags & SDHCI_USE_ADMA) {
@@ -335,6 +348,7 @@ static void esdhc_of_platform_init(struct sdhci_host *host)
{
u32 vvn;
+ svr = mfspr(SPRN_SVR);
vvn = in_be32(host->ioaddr + SDHCI_SLOT_INT_STATUS);
vvn = (vvn & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT;
if (vvn == VENDOR_V_22)
@@ -350,14 +364,12 @@ static void esdhc_of_platform_init(struct sdhci_host *host)
* T4240-R1.0 B4860-R1.0 P3041-R1.0 P3041-R2.0 P2041-R1.0
* P2041-R1.1 P2041-R2.0 P1010-R1.0
*/
- if ((fsl_svr_is(SVR_T4240) && fsl_svr_rev_is(1, 0)) ||
- (fsl_svr_is(SVR_B4860) && fsl_svr_rev_is(1, 0)) ||
- (fsl_svr_is(SVR_P3041) && fsl_svr_rev_is(1, 0)) ||
- (fsl_svr_is(SVR_P3041) && fsl_svr_rev_is(2, 0)) ||
- (fsl_svr_is(SVR_P2041) && fsl_svr_rev_is(2, 0)) ||
- (fsl_svr_is(SVR_P2041) && fsl_svr_rev_is(1, 1)) ||
- (fsl_svr_is(SVR_P2041) && fsl_svr_rev_is(1, 0)) ||
- (fsl_svr_is(SVR_P1010) && fsl_svr_rev_is(1, 0)))
+ if (((SVR_SOC_VER(svr) == SVR_T4240) && (SVR_REV(svr) == 0x10)) ||
+ ((SVR_SOC_VER(svr) == SVR_B4860) && (SVR_REV(svr) == 0x10)) ||
+ ((SVR_SOC_VER(svr) == SVR_P1010) && (SVR_REV(svr) == 0x10)) ||
+ ((SVR_SOC_VER(svr) == SVR_P3041) && (SVR_REV(svr) == 0x10)) ||
+ ((SVR_SOC_VER(svr) == SVR_P3041) && (SVR_REV(svr) == 0x20)) ||
+ ((SVR_SOC_VER(svr) == SVR_P2041) && (SVR_REV(svr) <= 0x20)))
host->quirks2 |= SDHCI_QUIRK2_BROKEN_RESET_ALL;
}
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index 59d79a7..7ade389 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -94,7 +94,11 @@ void sdhci_get_of_property(struct platform_device *pdev)
host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
}
+ if (of_device_is_compatible(np, "fsl,p5020-esdhc"))
+ host->quirks2 |= SDHCI_QUIRK2_LONG_TIME_CMD_COMPLETE_IRQ;
+
if (of_device_is_compatible(np, "fsl,p5040-esdhc") ||
+ of_device_is_compatible(np, "fsl,p5020-esdhc") ||
of_device_is_compatible(np, "fsl,p4080-esdhc") ||
of_device_is_compatible(np, "fsl,p1020-esdhc"))
host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
diff --git a/drivers/net/ethernet/freescale/dpa/Kconfig b/drivers/net/ethernet/freescale/dpa/Kconfig
index aa0d1e6..be62a1d 100644
--- a/drivers/net/ethernet/freescale/dpa/Kconfig
+++ b/drivers/net/ethernet/freescale/dpa/Kconfig
@@ -122,9 +122,11 @@ config FSL_DPAA_ETH_REFILL_THRESHOLD
modify this value unless one has very specific performance reasons.
config FSL_DPAA_ETH_UNIT_TESTS
- bool
+ bool "DPAA Ethernet unit tests"
depends on FSL_DPAA_ETH
default n
+ ---help---
+ A series of tests that will verify basic functionality of DPAA Ethernet driver at bootup.
config FSL_DPAA_ETH_DEBUGFS
tristate "DPAA Ethernet debugfs interface"
diff --git a/drivers/net/ethernet/freescale/dpa/Makefile b/drivers/net/ethernet/freescale/dpa/Makefile
index 7d4b9c7..413bd06 100644
--- a/drivers/net/ethernet/freescale/dpa/Makefile
+++ b/drivers/net/ethernet/freescale/dpa/Makefile
@@ -12,11 +12,17 @@ obj-$(CONFIG_FSL_DPAA_1588) += dpaa_1588.o
obj-$(CONFIG_FSL_DPAA_ETH_SG_SUPPORT) += fsl-dpa-sg.o
# dpaa_debugfs needs to be initialized before dpaa_eth
obj-$(CONFIG_FSL_DPAA_ETH_DEBUGFS) += dpaa_debugfs.o
-obj-$(CONFIG_FSL_DPAA_ETH) += fsl-mac.o fsl-dpa.o
+obj-$(CONFIG_FSL_DPAA_ETH) += fsl-mac.o fsl-dpa.o \
+ fsl-dpa-shared.o fsl-dpa-macless.o fsl-dpa-proxy.o
obj-$(CONFIG_FSL_DPAA_OFFLINE_PORTS) += fsl-oh.o
+obj-$(CONFIG_FSL_DPAA_ETH_UNIT_TESTS) += dpaa_eth_unit_test.o
-fsl-dpa-objs := dpa-ethtool.o dpaa_eth.o dpaa_eth_sysfs.o
+fsl-dpa-objs := dpa-ethtool.o dpaa_eth_common.o dpaa_eth_sysfs.o \
+ dpaa_eth.o dpaa_eth_non_sg.o
fsl-dpa-sg-objs := dpaa_eth_sg.o
+fsl-dpa-shared-objs := dpaa_eth_shared.o
+fsl-dpa-macless-objs := dpaa_eth_macless.o
+fsl-dpa-proxy-objs := dpaa_eth_proxy.o
fsl-mac-objs := mac.o mac-api.o
fsl-oh-objs := offline_port.o
diff --git a/drivers/net/ethernet/freescale/dpa/dpa-ethtool.c b/drivers/net/ethernet/freescale/dpa/dpa-ethtool.c
index c5f6225..34681d1 100644
--- a/drivers/net/ethernet/freescale/dpa/dpa-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpa/dpa-ethtool.c
@@ -191,14 +191,16 @@ int __cold dpa_set_pauseparam(struct net_device *net_dev, struct ethtool_pausepa
}
en = et_pauseparam->rx_pause ? true : false;
- _errno = priv->mac_dev->set_rx_pause(priv->mac_dev, en);
+ _errno = priv->mac_dev->set_rx_pause(
+ priv->mac_dev->get_mac_handle(priv->mac_dev), en);
if (unlikely(_errno < 0)) {
netdev_err(net_dev, "set_rx_pause() = %d\n", _errno);
return _errno;
}
en = et_pauseparam->tx_pause ? true : false;
- _errno = priv->mac_dev->set_tx_pause(priv->mac_dev, en);
+ _errno = priv->mac_dev->set_tx_pause(
+ priv->mac_dev->get_mac_handle(priv->mac_dev), en);
if (unlikely(_errno < 0)) {
netdev_err(net_dev, "set_tx_pause() = %d\n", _errno);
return _errno;
diff --git a/drivers/net/ethernet/freescale/dpa/dpaa_1588.c b/drivers/net/ethernet/freescale/dpa/dpaa_1588.c
index bdc4caf..501eacf 100644
--- a/drivers/net/ethernet/freescale/dpa/dpaa_1588.c
+++ b/drivers/net/ethernet/freescale/dpa/dpaa_1588.c
@@ -31,6 +31,7 @@
#include <linux/udp.h>
#include <asm/div64.h>
#include "dpaa_eth.h"
+#include "dpaa_eth_common.h"
#include "dpaa_1588.h"
static int dpa_ptp_init_circ(struct dpa_ptp_circ_buf *ptp_buf, u32 size)
@@ -390,18 +391,20 @@ static void dpa_set_fiper_alarm(struct dpa_ptp_tsu *tsu,
u64 tmp, fiper;
if (mac_dev->fm_rtc_disable)
- mac_dev->fm_rtc_disable(tsu->dpa_priv->net_dev);
+ mac_dev->fm_rtc_disable(get_fm_handle(tsu->dpa_priv->net_dev));
/* TMR_FIPER1 will pulse every second after ALARM1 expired */
tmp = (u64)cnt_time->sec * NANOSEC_PER_SECOND + (u64)cnt_time->nsec;
fiper = NANOSEC_PER_SECOND - DPA_PTP_NOMINAL_FREQ_PERIOD_NS;
if (mac_dev->fm_rtc_set_alarm)
- mac_dev->fm_rtc_set_alarm(tsu->dpa_priv->net_dev, 0, tmp);
+ mac_dev->fm_rtc_set_alarm(get_fm_handle(tsu->dpa_priv->net_dev),
+ 0, tmp);
if (mac_dev->fm_rtc_set_fiper)
- mac_dev->fm_rtc_set_fiper(tsu->dpa_priv->net_dev, 0, fiper);
+ mac_dev->fm_rtc_set_fiper(get_fm_handle(tsu->dpa_priv->net_dev),
+ 0, fiper);
if (mac_dev->fm_rtc_enable)
- mac_dev->fm_rtc_enable(tsu->dpa_priv->net_dev);
+ mac_dev->fm_rtc_enable(get_fm_handle(tsu->dpa_priv->net_dev));
}
static void dpa_get_curr_cnt(struct dpa_ptp_tsu *tsu,
@@ -412,7 +415,8 @@ static void dpa_get_curr_cnt(struct dpa_ptp_tsu *tsu,
u32 mod;
if (mac_dev->fm_rtc_get_cnt)
- mac_dev->fm_rtc_get_cnt(tsu->dpa_priv->net_dev, &tmp);
+ mac_dev->fm_rtc_get_cnt(get_fm_handle(tsu->dpa_priv->net_dev),
+ &tmp);
mod = do_div(tmp, NANOSEC_PER_SECOND);
curr_time->sec = (u32)tmp;
@@ -428,7 +432,8 @@ static void dpa_set_1588cnt(struct dpa_ptp_tsu *tsu,
tmp = (u64)cnt_time->sec * NANOSEC_PER_SECOND + (u64)cnt_time->nsec;
if (mac_dev->fm_rtc_set_cnt)
- mac_dev->fm_rtc_set_cnt(tsu->dpa_priv->net_dev, tmp);
+ mac_dev->fm_rtc_set_cnt(get_fm_handle(tsu->dpa_priv->net_dev),
+ tmp);
/* Restart fiper two seconds later */
cnt_time->sec += 2;
@@ -442,7 +447,8 @@ static void dpa_get_drift(struct dpa_ptp_tsu *tsu, u32 *addend)
u32 drift;
if (mac_dev->fm_rtc_get_drift)
- mac_dev->fm_rtc_get_drift(tsu->dpa_priv->net_dev, &drift);
+ mac_dev->fm_rtc_get_drift(get_fm_handle(tsu->dpa_priv->net_dev),
+ &drift);
*addend = drift;
}
@@ -452,7 +458,8 @@ static void dpa_set_drift(struct dpa_ptp_tsu *tsu, u32 addend)
struct mac_device *mac_dev = tsu->dpa_priv->mac_dev;
if (mac_dev->fm_rtc_set_drift)
- mac_dev->fm_rtc_set_drift(tsu->dpa_priv->net_dev, addend);
+ mac_dev->fm_rtc_set_drift(get_fm_handle(tsu->dpa_priv->net_dev),
+ addend);
}
static void dpa_flush_timestamp(struct dpa_ptp_tsu *tsu)
@@ -479,16 +486,16 @@ int dpa_ioctl_1588(struct net_device *dev, struct ifreq *ifr, int cmd)
case PTP_ENBL_TXTS_IOCTL:
tsu->hwts_tx_en_ioctl = 1;
if (mac_dev->fm_rtc_enable)
- mac_dev->fm_rtc_enable(dev);
+ mac_dev->fm_rtc_enable(get_fm_handle(dev));
if (mac_dev->ptp_enable)
- mac_dev->ptp_enable(mac_dev);
+ mac_dev->ptp_enable(mac_dev->get_mac_handle(mac_dev));
break;
case PTP_DSBL_TXTS_IOCTL:
tsu->hwts_tx_en_ioctl = 0;
if (mac_dev->fm_rtc_disable)
- mac_dev->fm_rtc_disable(dev);
+ mac_dev->fm_rtc_disable(get_fm_handle(dev));
if (mac_dev->ptp_disable)
- mac_dev->ptp_disable(mac_dev);
+ mac_dev->ptp_disable(mac_dev->get_mac_handle(mac_dev));
break;
case PTP_ENBL_RXTS_IOCTL:
tsu->hwts_rx_en_ioctl = 1;
diff --git a/drivers/net/ethernet/freescale/dpa/dpaa_debugfs.c b/drivers/net/ethernet/freescale/dpa/dpaa_debugfs.c
index a0ae535..f84b19e 100644
--- a/drivers/net/ethernet/freescale/dpa/dpaa_debugfs.c
+++ b/drivers/net/ethernet/freescale/dpa/dpaa_debugfs.c
@@ -78,12 +78,8 @@ static int dpa_debugfs_show(struct seq_file *file, void *offset)
for_each_online_cpu(i) {
percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
- /* Only private interfaces have an associated counter for bp
- * buffers. Also the counter isn't initialized before the first
- * ifconfig up
- */
- if (!priv->shared && percpu_priv->dpa_bp_count)
- dpa_bp_count = *percpu_priv->dpa_bp_count;
+ if (dpa_bp->percpu_count)
+ dpa_bp_count = *(per_cpu_ptr(dpa_bp->percpu_count, i));
total.in_interrupt += percpu_priv->in_interrupt;
total.stats.rx_packets += percpu_priv->stats.rx_packets;
diff --git a/drivers/net/ethernet/freescale/dpa/dpaa_eth-common.h b/drivers/net/ethernet/freescale/dpa/dpaa_eth-common.h
deleted file mode 100644
index b3b0fd1..0000000
--- a/drivers/net/ethernet/freescale/dpa/dpaa_eth-common.h
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Copyright 2008-2012 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __DPA_COMMON_H
-#define __DPA_COMMON_H
-
-#include <linux/kernel.h> /* pr_*() */
-#include <linux/device.h> /* dev_*() */
-#include <linux/smp.h> /* smp_processor_id() */
-
-#define __hot
-
-/* Simple enum of FQ types - used for array indexing */
-enum port_type {RX, TX};
-
-/* More detailed FQ types - used for fine-grained WQ assignments */
-enum dpa_fq_type {
- FQ_TYPE_RX_DEFAULT = 1, /* Rx Default FQs */
- FQ_TYPE_RX_ERROR, /* Rx Error FQs */
- FQ_TYPE_RX_PCD, /* User-defined PCDs */
- FQ_TYPE_TX, /* "Real" Tx FQs */
- FQ_TYPE_TX_CONFIRM, /* Tx Confirmation FQs (actually Rx FQs) */
- FQ_TYPE_TX_ERROR, /* Tx Error FQs (these are actually Rx FQs) */
-#ifdef CONFIG_FSL_DPAA_TX_RECYCLE
- FQ_TYPE_TX_RECYCLE, /* Tx FQs for recycleable frames only */
-#endif
-};
-
-/* TODO: This structure should be renamed & moved to the FMD wrapper */
-struct dpa_buffer_layout_s {
- uint16_t priv_data_size;
- bool parse_results;
- bool time_stamp;
- bool hash_results;
- uint8_t manip_extra_space;
- uint16_t data_align;
-};
-
-#define DPA_TX_PRIV_DATA_SIZE 16
-#define DPA_PARSE_RESULTS_SIZE sizeof(t_FmPrsResult)
-#define DPA_TIME_STAMP_SIZE 8
-#define DPA_HASH_RESULTS_SIZE 8
-
-
-#define dpaa_eth_init_port(type, port, param, errq_id, defq_id, buf_layout,\
- frag_enabled) \
-{ \
- param.errq = errq_id; \
- param.defq = defq_id; \
- param.priv_data_size = buf_layout->priv_data_size; \
- param.parse_results = buf_layout->parse_results; \
- param.hash_results = buf_layout->hash_results; \
- param.frag_enable = frag_enabled; \
- param.time_stamp = buf_layout->time_stamp; \
- param.manip_extra_space = buf_layout->manip_extra_space; \
- param.data_align = buf_layout->data_align; \
- fm_set_##type##_port_params(port, &param); \
-}
-
-#endif /* __DPA_COMMON_H */
diff --git a/drivers/net/ethernet/freescale/dpa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpa/dpaa_eth.c
index 55edca4..5709765 100644
--- a/drivers/net/ethernet/freescale/dpa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpa/dpaa_eth.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2008-2012 Freescale Semiconductor Inc.
+ * Copyright 2008-2013 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -36,13 +36,10 @@
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/sort.h>
#include <linux/of_mdio.h>
-#include <linux/of_platform.h>
#include <linux/of_net.h>
#include <linux/kthread.h>
#include <linux/io.h>
-#include <linux/etherdevice.h>
#include <linux/if_arp.h> /* arp_hdr_len() */
#include <linux/if_vlan.h> /* VLAN_HLEN */
#include <linux/icmp.h> /* struct icmphdr */
@@ -51,7 +48,6 @@
#include <linux/udp.h> /* struct udphdr */
#include <linux/tcp.h> /* struct tcphdr */
#include <linux/net.h> /* net_ratelimit() */
-#include <linux/net_tstamp.h> /* struct hwtstamp_config */
#include <linux/if_ether.h> /* ETH_P_IP and ETH_P_IPV6 */
#include <linux/highmem.h>
#include <linux/percpu.h>
@@ -65,10 +61,13 @@
#include "mac.h"
#include "dpaa_eth.h"
-#include "dpaa_1588.h"
+#include "dpaa_eth_common.h"
#ifdef CONFIG_FSL_DPAA_ETH_DEBUGFS
#include "dpaa_debugfs.h"
#endif /* CONFIG_FSL_DPAA_ETH_DEBUGFS */
+#ifdef CONFIG_FSL_DPAA_ETH_UNIT_TESTS
+#include "dpaa_eth_unit_test.h"
+#endif /* CONFIG_FSL_DPAA_ETH_UNIT_TESTS */
/* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
* using trace events only need to #include <trace/events/sched.h>
@@ -76,78 +75,11 @@
#define CREATE_TRACE_POINTS
#include "dpaa_eth_trace.h"
-
-#define ARRAY2_SIZE(arr) (ARRAY_SIZE(arr) * ARRAY_SIZE((arr)[0]))
-
-/* DPAA platforms benefit from hardware-assisted queue management */
-#ifdef CONFIG_AS_FASTPATH
-#define DPA_NETIF_FEATURES (NETIF_F_HW_QDISC | NETIF_F_HW_ACCEL_MQ)
-#else
-#define DPA_NETIF_FEATURES NETIF_F_HW_ACCEL_MQ
-#endif
-
-#ifdef CONFIG_FSL_DPAA_ETH_UNIT_TESTS
-#undef CONFIG_FSL_DPAA_ETH_UNIT_TESTS
-#endif
-
#define DPA_NAPI_WEIGHT 64
-/* Size in bytes of the Congestion State notification threshold on 10G ports */
-#define DPA_CS_THRESHOLD_10G 0x10000000
-/*
- * Size in bytes of the Congestion State notification threshold on 1G ports.
-
- * The 1G dTSECs can quite easily be flooded by cores doing Tx in a tight loop
- * (e.g. by sending UDP datagrams at "while(1) speed"),
- * and the larger the frame size, the more acute the problem.
- *
- * So we have to find a balance between these factors:
- * - avoiding the device staying congested for a prolonged time (risking
- * the netdev watchdog to fire - see also the tx_timeout module param);
- * - affecting performance of protocols such as TCP, which otherwise
- * behave well under the congestion notification mechanism;
- * - preventing the Tx cores from tightly-looping (as if the congestion
- * threshold was too low to be effective);
- * - running out of memory if the CS threshold is set too high.
- */
-#define DPA_CS_THRESHOLD_1G 0x06000000
-
-/* Size in bytes of the FQ taildrop threshold */
-#define DPA_FQ_TD 0x200000
-
-/* S/G table requires at least 256 bytes */
-#define sgt_buffer_size(priv) \
- dpa_get_buffer_size(&priv->buf_layout[TX], 256)
-
-/* Maximum frame size on Tx for which skb copying is preferrable to
- * creating a S/G frame */
-#define DPA_SKB_COPY_MAX_SIZE 256
-
/* Valid checksum indication */
#define DPA_CSUM_VALID 0xFFFF
-/* Maximum offset value for a contig or sg FD (represented on 9bits) */
-#define DPA_MAX_FD_OFFSET ((1 << 9) - 1)
-
-/*
- * Maximum size of a buffer for which recycling is allowed.
- * We need an upper limit such that forwarded skbs that get reallocated on Tx
- * aren't allowed to grow unboundedly. On the other hand, we need to make sure
- * that skbs allocated by us will not fail to be recycled due to their size.
- *
- * For a requested size, the kernel allocator provides the next power of two
- * sized block, which the stack will use as is, regardless of the actual size
- * it required; since we must acommodate at most 9.6K buffers (L2 maximum
- * supported frame size), set the recycling upper limit to 16K.
- */
-#define DPA_RECYCLE_MAX_SIZE 16384
-
-/* For MAC-based interfaces, we compute the tx needed headroom from the
- * associated Tx port's buffer layout settings.
- * For MACless interfaces just use a default value.
- */
-#define DPA_DEFAULT_TX_HEADROOM 64
-
#define DPA_DESCRIPTION "FSL DPAA Ethernet driver"
MODULE_LICENSE("Dual BSD/GPL");
@@ -166,8 +98,8 @@ module_param(tx_timeout, ushort, S_IRUGO);
MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
/* dpaa_eth mirror for the FMan values */
-static int dpa_rx_extra_headroom;
-static int dpa_max_frm;
+int dpa_rx_extra_headroom;
+int dpa_max_frm;
static const char rtx[][3] = {
[RX] = "RX",
@@ -186,14 +118,10 @@ int dpa_free_pcd_fqids(struct device *, uint32_t) __attribute__((weak));
#define DPAA_ETH_MAX_PAD (L1_CACHE_BYTES * 8)
-static struct dpa_bp *dpa_bp_array[64];
-
-static struct dpa_bp *default_pool;
-static bool default_pool_seeded;
-static uint32_t default_buf_size;
+uint8_t dpa_priv_common_bpid;
/* A set of callbacks for hooking into the fastpath at different points. */
-static struct dpaa_eth_hooks_s dpaa_eth_hooks;
+struct dpaa_eth_hooks_s dpaa_eth_hooks;
/*
* This function should only be called on the probe paths, since it makes no
* effort to guarantee consistency of the destination hooks structure.
@@ -207,1109 +135,6 @@ void fsl_dpaa_eth_set_hooks(struct dpaa_eth_hooks_s *hooks)
}
EXPORT_SYMBOL(fsl_dpaa_eth_set_hooks);
-
-struct dpa_bp *dpa_bpid2pool(int bpid)
-{
- return dpa_bp_array[bpid];
-}
-
-static void dpa_bp_depletion(struct bman_portal *portal,
- struct bman_pool *pool, void *cb_ctx, int depleted)
-{
- if (net_ratelimit())
- pr_err("Invalid Pool depleted notification!\n");
-}
-
-/*
- * Copy from a memory region that requires kmapping to a linear buffer,
- * taking into account page boundaries in the source
- */
-static void
-copy_from_unmapped_area(void *dest, dma_addr_t phys_start, size_t buf_size)
-{
- struct page *page;
- size_t size, offset;
- void *page_vaddr;
-
- while (buf_size > 0) {
- offset = offset_in_page(phys_start);
- size = (offset + buf_size > PAGE_SIZE) ?
- PAGE_SIZE - offset : buf_size;
-
- page = pfn_to_page(phys_start >> PAGE_SHIFT);
- page_vaddr = kmap_atomic(page);
-
- memcpy(dest, page_vaddr + offset, size);
-
- kunmap_atomic(page_vaddr);
-
- phys_start += size;
- dest += size;
- buf_size -= size;
- }
-}
-
-/*
- * Copy to a memory region that requires kmapping from a linear buffer,
- * taking into account page boundaries in the destination
- */
-static void
-copy_to_unmapped_area(dma_addr_t phys_start, void *src, size_t buf_size)
-{
- struct page *page;
- size_t size, offset;
- void *page_vaddr;
-
- while (buf_size > 0) {
- offset = offset_in_page(phys_start);
- size = (offset + buf_size > PAGE_SIZE) ?
- PAGE_SIZE - offset : buf_size;
-
- page = pfn_to_page(phys_start >> PAGE_SHIFT);
- page_vaddr = kmap_atomic(page);
-
- memcpy(page_vaddr + offset, src, size);
-
- kunmap_atomic(page_vaddr);
-
- phys_start += size;
- src += size;
- buf_size -= size;
- }
-}
-
-#ifndef CONFIG_FSL_DPAA_ETH_SG_SUPPORT
-/* Allocate 8 socket buffers.
- * These buffers are counted for a particular CPU.
- */
-static void dpa_bp_add_8(const struct dpa_bp *dpa_bp, unsigned int cpu)
-{
- struct bm_buffer bmb[8];
- struct sk_buff **skbh;
- dma_addr_t addr;
- int i;
- struct sk_buff *skb;
- int *count_ptr;
-
- count_ptr = per_cpu_ptr(dpa_bp->percpu_count, cpu);
-
- for (i = 0; i < 8; i++) {
- /*
- * The buffers tend to be aligned all to the same cache
- * index. A standard dequeue operation pulls in 15 packets.
- * This means that when it stashes, it evicts half of the
- * packets it's stashing. In order to prevent that, we pad
- * by a variable number of cache lines, to reduce collisions.
- * We always pad by at least 1 cache line, because we want
- * a little extra room at the beginning for IPSec and to
- * accommodate NET_IP_ALIGN.
- */
- int pad = (i + 1) * L1_CACHE_BYTES;
-
- skb = dev_alloc_skb(dpa_bp->size + pad);
- if (unlikely(!skb)) {
- printk(KERN_ERR "dev_alloc_skb() failed\n");
- bm_buffer_set64(&bmb[i], 0);
- break;
- }
-
- skbh = (struct sk_buff **)(skb->head + pad);
- *skbh = skb;
-
- /*
- * Here we need to map only for device write (DMA_FROM_DEVICE),
- * but on Tx recycling we may also get buffers in the pool that
- * are mapped bidirectionally.
- * Use DMA_BIDIRECTIONAL here as well to avoid any
- * inconsistencies when unmapping.
- */
- addr = dma_map_single(dpa_bp->dev, skb->head + pad,
- dpa_bp->size, DMA_BIDIRECTIONAL);
- if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
- dev_err(dpa_bp->dev, "DMA mapping failed");
- break;
- }
-
- bm_buffer_set64(&bmb[i], addr);
- }
-
- /* Avoid releasing a completely null buffer; bman_release() requires
- * at least one buf. */
- if (likely(i)) {
- /*
- * Release the buffers. In case bman is busy, keep trying
- * until successful. bman_release() is guaranteed to succeed
- * in a reasonable amount of time
- */
- while (bman_release(dpa_bp->pool, bmb, i, 0))
- cpu_relax();
-
- *count_ptr += i;
- }
-}
-
-void dpa_make_private_pool(struct dpa_bp *dpa_bp)
-{
- int i;
-
- dpa_bp->percpu_count = alloc_percpu(*dpa_bp->percpu_count);
-
- /* Give each cpu an allotment of "count" buffers */
- for_each_online_cpu(i) {
- int j;
-
- for (j = 0; j < dpa_bp->target_count; j += 8)
- dpa_bp_add_8(dpa_bp, i);
- }
-}
-#endif /* CONFIG_FSL_DPAA_ETH_SG_SUPPORT */
-
-static void dpaa_eth_seed_pool(struct dpa_bp *bp)
-{
- int count = bp->target_count;
- size_t addr = bp->paddr;
-
- while (count) {
- struct bm_buffer bufs[8];
- int num_bufs = 0;
-
- do {
- BUG_ON(addr > 0xffffffffffffull);
- bufs[num_bufs].bpid = bp->bpid;
- bm_buffer_set64(&bufs[num_bufs++], addr);
- addr += bp->size;
-
- } while (--count && (num_bufs < 8));
-
- while (bman_release(bp->pool, bufs, num_bufs, 0))
- cpu_relax();
- }
-}
-
-/*
- * Add buffers/pages/skbuffs for Rx processing whenever bpool count falls below
- * REFILL_THRESHOLD.
- */
-static int dpaa_eth_refill_bpools(struct dpa_percpu_priv_s *percpu_priv)
-{
- int *countptr = percpu_priv->dpa_bp_count;
- int count = *countptr;
- const struct dpa_bp *dpa_bp = percpu_priv->dpa_bp;
- int new_pages __maybe_unused;
-#ifndef CONFIG_FSL_DPAA_ETH_SG_SUPPORT
-
- /* this function is called in softirq context;
- * no need to protect smp_processor_id() on RT kernel
- */
- unsigned int cpu = smp_processor_id();
-
- if (unlikely(count < CONFIG_FSL_DPAA_ETH_REFILL_THRESHOLD)) {
- int i;
-
- for (i = count; i < CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT; i += 8)
- dpa_bp_add_8(dpa_bp, cpu);
- }
-#else
- /* Add pages to the buffer pool */
- while (count < CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT) {
- new_pages = _dpa_bp_add_8_pages(dpa_bp);
- if (unlikely(!new_pages)) {
- /* Avoid looping forever if we've temporarily
- * run out of memory. We'll try again at the next
- * NAPI cycle.
- */
- break;
- }
- count += new_pages;
- }
- *countptr = count;
-
- if (*countptr < CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT)
- return -ENOMEM;
-#endif
-
- return 0;
-}
-
-static int dpa_make_shared_port_pool(struct dpa_bp *bp)
-{
- /*
- * In MAC-less and Shared-MAC scenarios the physical
- * address of the buffer pool in device tree is set
- * to 0 to specify that another entity (USDPAA) will
- * allocate and seed the buffers
- */
- if (!bp->paddr)
- return 0;
-
- devm_request_mem_region(bp->dev, bp->paddr,
- bp->size * bp->config_count, KBUILD_MODNAME);
- bp->vaddr = devm_ioremap_prot(bp->dev, bp->paddr,
- bp->size * bp->config_count, 0);
- if (bp->vaddr == NULL) {
- pr_err("Could not map memory for pool %d\n", bp->bpid);
- return -EIO;
- }
-
- if (bp->seed_pool)
- dpaa_eth_seed_pool(bp);
-
- return 0;
-}
-
-static int __must_check __attribute__((nonnull))
-dpa_bp_alloc(struct dpa_bp *dpa_bp)
-{
- int err;
- struct bman_pool_params bp_params;
- struct platform_device *pdev;
-
- BUG_ON(dpa_bp->size == 0);
- BUG_ON(dpa_bp->config_count == 0);
-
- bp_params.flags = BMAN_POOL_FLAG_DEPLETION;
- bp_params.cb = dpa_bp_depletion;
- bp_params.cb_ctx = dpa_bp;
-
- /* We support two options. Either a global shared pool, or
- * a specified pool. If the pool is specified, we only
- * create one per bpid */
- if (dpa_bp->kernel_pool && default_pool) {
- atomic_inc(&default_pool->refs);
- return 0;
- }
-
- if (dpa_bp_array[dpa_bp->bpid]) {
- atomic_inc(&dpa_bp_array[dpa_bp->bpid]->refs);
- return 0;
- }
-
- if (dpa_bp->bpid == 0)
- bp_params.flags |= BMAN_POOL_FLAG_DYNAMIC_BPID;
- else
- bp_params.bpid = dpa_bp->bpid;
-
- dpa_bp->pool = bman_new_pool(&bp_params);
- if (unlikely(dpa_bp->pool == NULL)) {
- pr_err("bman_new_pool() failed\n");
- return -ENODEV;
- }
-
- dpa_bp->bpid = bman_get_params(dpa_bp->pool)->bpid;
-
- pdev = platform_device_register_simple("dpaa_eth_bpool",
- dpa_bp->bpid, NULL, 0);
- if (IS_ERR(pdev)) {
- err = PTR_ERR(pdev);
- goto pdev_register_failed;
- }
-
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
- if (err)
- goto pdev_mask_failed;
-
- dpa_bp->dev = &pdev->dev;
-
- if (dpa_bp->kernel_pool) {
- if (!default_pool)
- default_pool = dpa_bp;
- } else {
- err = dpa_make_shared_port_pool(dpa_bp);
- if (err)
- goto make_shared_pool_failed;
- }
-
- dpa_bp_array[dpa_bp->bpid] = dpa_bp;
-
- atomic_set(&dpa_bp->refs, 1);
-
- return 0;
-
-make_shared_pool_failed:
-pdev_mask_failed:
- platform_device_unregister(pdev);
-pdev_register_failed:
- bman_free_pool(dpa_bp->pool);
-
- return err;
-}
-
-#ifndef CONFIG_FSL_DPAA_ETH_SG_SUPPORT
-static inline void _dpa_bp_free_buf(void *addr)
-{
- struct sk_buff **skbh = addr;
- struct sk_buff *skb;
-
- skb = *skbh;
- dev_kfree_skb_any(skb);
-}
-#else
-static inline void _dpa_bp_free_buf(void *addr)
-{
- free_page((unsigned long)addr);
-}
-#endif
-
-static void __cold __attribute__((nonnull))
-_dpa_bp_free(struct dpa_bp *dpa_bp)
-{
- struct dpa_bp *bp = dpa_bpid2pool(dpa_bp->bpid);
-
- if (!atomic_dec_and_test(&bp->refs))
- return;
-
- if (bp->kernel_pool) {
- int num;
-
- do {
- struct bm_buffer bmb[8];
- int i;
-
- num = bman_acquire(bp->pool, bmb, 8, 0);
-
- for (i = 0; i < num; i++) {
- dma_addr_t addr = bm_buf_addr(&bmb[i]);
-
- dma_unmap_single(bp->dev, addr, bp->size,
- DMA_BIDIRECTIONAL);
-
- _dpa_bp_free_buf(phys_to_virt(addr));
- }
- } while (num == 8);
- }
-
- dpa_bp_array[bp->bpid] = 0;
- bman_free_pool(bp->pool);
-}
-
-static void __cold __attribute__((nonnull))
-dpa_bp_free(struct dpa_priv_s *priv, struct dpa_bp *dpa_bp)
-{
- int i;
-
- for (i = 0; i < priv->bp_count; i++)
- _dpa_bp_free(&priv->dpa_bp[i]);
-}
-
-/* QM */
-
-static struct qman_fq *_dpa_get_tx_conf_queue(const struct dpa_priv_s *priv,
- struct qman_fq *tx_fq)
-{
- int i;
-
- for (i = 0; i < DPAA_ETH_TX_QUEUES; i++)
- if (priv->egress_fqs[i] == tx_fq)
- return priv->conf_fqs[i];
-
- return NULL;
-}
-
-static int __must_check __attribute__((nonnull))
-_dpa_fq_alloc(struct list_head *list, struct dpa_fq *dpa_fq)
-{
- int _errno;
- const struct dpa_priv_s *priv;
- struct device *dev;
- struct qman_fq *fq;
- struct qm_mcc_initfq initfq;
- struct qman_fq *confq;
-
- priv = netdev_priv(dpa_fq->net_dev);
- dev = dpa_fq->net_dev->dev.parent;
-
- if (dpa_fq->fqid == 0)
- dpa_fq->flags |= QMAN_FQ_FLAG_DYNAMIC_FQID;
-
- dpa_fq->init = !(dpa_fq->flags & QMAN_FQ_FLAG_NO_MODIFY);
-
- _errno = qman_create_fq(dpa_fq->fqid, dpa_fq->flags, &dpa_fq->fq_base);
- if (_errno) {
- dev_err(dev, "qman_create_fq() failed\n");
- return _errno;
- }
- fq = &dpa_fq->fq_base;
-
- if (dpa_fq->init) {
- initfq.we_mask = QM_INITFQ_WE_FQCTRL;
- /* FIXME: why would we want to keep an empty FQ in cache? */
- initfq.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE;
-
-#ifdef CONFIG_FSL_DPAA_ETH_SG_SUPPORT
- /* Try to reduce the number of portal interrupts for
- * Tx Confirmation FQs.
- */
- if (dpa_fq->fq_type == FQ_TYPE_TX_CONFIRM)
- initfq.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
-#endif
-
- /* FQ placement */
- initfq.we_mask |= QM_INITFQ_WE_DESTWQ;
-
- initfq.fqd.dest.channel = dpa_fq->channel;
- initfq.fqd.dest.wq = dpa_fq->wq;
-
- /*
- * Put all egress queues in a congestion group of their own.
- * Sensu stricto, the Tx confirmation queues are Rx FQs,
- * rather than Tx - but they nonetheless account for the
- * memory footprint on behalf of egress traffic. We therefore
- * place them in the netdev's CGR, along with the Tx FQs.
- */
- if (dpa_fq->fq_type == FQ_TYPE_TX ||
- dpa_fq->fq_type == FQ_TYPE_TX_CONFIRM) {
- initfq.we_mask |= QM_INITFQ_WE_CGID;
- initfq.fqd.fq_ctrl |= QM_FQCTRL_CGE;
- initfq.fqd.cgid = priv->cgr_data.cgr.cgrid;
- /*
- * Set a fixed overhead accounting, in an attempt to
- * reduce the impact of fixed-size skb shells and the
- * driver's needed headroom on system memory. This is
- * especially the case when the egress traffic is
- * composed of small datagrams.
- * Unfortunately, QMan's OAL value is capped to an
- * insufficient value, but even that is better than
- * no overhead accounting at all.
- */
- initfq.we_mask |= QM_INITFQ_WE_OAC;
- initfq.fqd.oac_init.oac = QM_OAC_CG;
- initfq.fqd.oac_init.oal = min(sizeof(struct sk_buff) +
- priv->tx_headroom, (size_t)FSL_QMAN_MAX_OAL);
- }
-
- /*
- * For MAC-less devices we only get here for RX frame queues
- * initialization, which are the TX queues of the other
- * partition.
- * It is safe to rely on one partition to set the FQ taildrop
- * threshold for the TX queues of the other partition
- * because the ERN notifications will be received by the
- * partition doing qman_enqueue.
- */
- if (!priv->mac_dev) {
- initfq.we_mask |= QM_INITFQ_WE_TDTHRESH;
- qm_fqd_taildrop_set(&initfq.fqd.td,
- DPA_FQ_TD, 1);
- initfq.fqd.fq_ctrl = QM_FQCTRL_TDE;
- }
-
- /*
- * Configure the Tx confirmation queue, now that we know
- * which Tx queue it pairs with.
- */
- if (dpa_fq->fq_type == FQ_TYPE_TX) {
- confq = _dpa_get_tx_conf_queue(priv, &dpa_fq->fq_base);
- if (confq) {
- initfq.we_mask |= QM_INITFQ_WE_CONTEXTA |
- QM_INITFQ_WE_CONTEXTB;
- /* CTXA[OVFQ] = 1 */
- initfq.fqd.context_a.hi = 0x80000000;
- initfq.fqd.context_a.lo = 0x0;
- initfq.fqd.context_b = qman_fq_fqid(confq);
- }
- }
-
-#ifdef CONFIG_FSL_DPAA_TX_RECYCLE
- /*
- * Configure the Tx queues for recycled frames, such that the
- * buffers are released by FMan and no confirmation is sent
- */
- if (dpa_fq->fq_type == FQ_TYPE_TX_RECYCLE) {
- initfq.we_mask |= QM_INITFQ_WE_CONTEXTA |
- QM_INITFQ_WE_CONTEXTB;
- /*
- * ContextA: OVFQ=1 (use ContextB FQID for confirmation)
- * OVOM=1 (use contextA2 bits instead of ICAD)
- * A2V=1 (contextA A2 field is valid)
- * B0V=1 (contextB field is valid)
- * ContextA A2: EBD=1 (deallocate buffers inside FMan)
- * ContextB: Confirmation FQID = 0
- */
- initfq.fqd.context_a.hi = 0x96000000;
- initfq.fqd.context_a.lo = 0x80000000;
- initfq.fqd.context_b = 0;
- }
-#endif
-
- /* Initialization common to all ingress queues */
- if (dpa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) {
- initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
- initfq.fqd.fq_ctrl |=
- QM_FQCTRL_CTXASTASHING | QM_FQCTRL_AVOIDBLOCK;
- initfq.fqd.context_a.stashing.exclusive =
- QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX |
- QM_STASHING_EXCL_ANNOTATION;
- initfq.fqd.context_a.stashing.data_cl = 2;
- initfq.fqd.context_a.stashing.annotation_cl = 1;
- initfq.fqd.context_a.stashing.context_cl =
- DIV_ROUND_UP(sizeof(struct qman_fq), 64);
- };
-
- _errno = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &initfq);
- if (_errno < 0) {
- dev_err(dev, "qman_init_fq(%u) = %d\n",
- qman_fq_fqid(fq), _errno);
- qman_destroy_fq(fq, 0);
- return _errno;
- }
- }
-
- dpa_fq->fqid = qman_fq_fqid(fq);
- list_add_tail(&dpa_fq->list, list);
-
- return 0;
-}
-
-static int __cold __attribute__((nonnull))
-_dpa_fq_free(struct device *dev, struct qman_fq *fq)
-{
- int _errno, __errno;
- struct dpa_fq *dpa_fq;
- const struct dpa_priv_s *priv;
-
- _errno = 0;
-
- dpa_fq = container_of(fq, struct dpa_fq, fq_base);
- priv = netdev_priv(dpa_fq->net_dev);
-
- if (dpa_fq->init) {
- _errno = qman_retire_fq(fq, NULL);
- if (unlikely(_errno < 0) && netif_msg_drv(priv))
- dev_err(dev, "qman_retire_fq(%u) = %d\n",
- qman_fq_fqid(fq), _errno);
-
- __errno = qman_oos_fq(fq);
- if (unlikely(__errno < 0) && netif_msg_drv(priv)) {
- dev_err(dev, "qman_oos_fq(%u) = %d\n",
- qman_fq_fqid(fq), __errno);
- if (_errno >= 0)
- _errno = __errno;
- }
- }
-
- qman_destroy_fq(fq, 0);
- list_del(&dpa_fq->list);
-
- return _errno;
-}
-
-static int __cold __attribute__((nonnull))
-dpa_fq_free(struct device *dev, struct list_head *list)
-{
- int _errno, __errno;
- struct dpa_fq *dpa_fq, *tmp;
-
- _errno = 0;
- list_for_each_entry_safe(dpa_fq, tmp, list, list) {
- __errno = _dpa_fq_free(dev, (struct qman_fq *)dpa_fq);
- if (unlikely(__errno < 0) && _errno >= 0)
- _errno = __errno;
- }
-
- return _errno;
-}
-
-static inline void * __must_check __attribute__((nonnull))
-dpa_phys2virt(const struct dpa_bp *dpa_bp, dma_addr_t addr)
-{
- return dpa_bp->vaddr + (addr - dpa_bp->paddr);
-}
-
-static void
-dpa_release_sgt(struct qm_sg_entry *sgt, struct dpa_bp *dpa_bp,
- struct bm_buffer *bmb)
-{
- int i = 0, j;
-
- do {
- dpa_bp = dpa_bpid2pool(sgt[i].bpid);
- BUG_ON(IS_ERR(dpa_bp));
-
- j = 0;
- do {
- BUG_ON(sgt[i].extension);
-
- bmb[j].hi = sgt[i].addr_hi;
- bmb[j].lo = sgt[i].addr_lo;
-
- j++; i++;
- } while (j < ARRAY_SIZE(bmb) &&
- !sgt[i-1].final &&
- sgt[i-1].bpid == sgt[i].bpid);
-
- while (bman_release(dpa_bp->pool, bmb, j, 0))
- cpu_relax();
- } while (!sgt[i-1].final);
-}
-
-static void
-dpa_fd_release_sg(const struct net_device *net_dev,
- const struct qm_fd *fd)
-{
- const struct dpa_priv_s *priv;
- struct qm_sg_entry *sgt;
- struct dpa_bp *_dpa_bp, *dpa_bp;
- struct bm_buffer _bmb, bmb[8];
-
- priv = netdev_priv(net_dev);
-
- _bmb.hi = fd->addr_hi;
- _bmb.lo = fd->addr_lo;
-
- _dpa_bp = dpa_bpid2pool(fd->bpid);
-
- if (_dpa_bp->vaddr) {
- sgt = dpa_phys2virt(_dpa_bp, bm_buf_addr(&_bmb)) +
- dpa_fd_offset(fd);
- dpa_release_sgt(sgt, dpa_bp, bmb);
- } else {
- sgt = kmalloc(DPA_SGT_MAX_ENTRIES * sizeof(*sgt), GFP_ATOMIC);
- if (sgt == NULL) {
- if (netif_msg_tx_err(priv) && net_ratelimit())
- netdev_err(net_dev,
- "Memory allocation failed\n");
- return;
- }
-
- copy_from_unmapped_area(sgt, bm_buf_addr(&_bmb) +
- dpa_fd_offset(fd),
- min(DPA_SGT_MAX_ENTRIES * sizeof(*sgt),
- _dpa_bp->size));
- dpa_release_sgt(sgt, dpa_bp, bmb);
- kfree(sgt);
- }
-
- while (bman_release(_dpa_bp->pool, &_bmb, 1, 0))
- cpu_relax();
-}
-
-void __attribute__((nonnull))
-dpa_fd_release(const struct net_device *net_dev, const struct qm_fd *fd)
-{
- struct qm_sg_entry *sgt;
- struct dpa_bp *_dpa_bp, *dpa_bp;
- struct bm_buffer _bmb, bmb[8];
-
- _bmb.hi = fd->addr_hi;
- _bmb.lo = fd->addr_lo;
-
- _dpa_bp = dpa_bpid2pool(fd->bpid);
- BUG_ON(IS_ERR(_dpa_bp));
-
- if (fd->format == qm_fd_sg) {
- sgt = (phys_to_virt(bm_buf_addr(&_bmb)) + dpa_fd_offset(fd));
- dpa_release_sgt(sgt, dpa_bp, bmb);
- }
-
- while (bman_release(_dpa_bp->pool, &_bmb, 1, 0))
- cpu_relax();
-}
-EXPORT_SYMBOL(dpa_fd_release);
-
-#ifndef CONFIG_FSL_DPAA_ETH_SG_SUPPORT
-/*
- * Cleanup function for outgoing frame descriptors that were built on Tx path,
- * either contiguous frames or scatter/gather ones with a single data buffer.
- * Skb freeing is not handled here.
- *
- * This function may be called on error paths in the Tx function, so guard
- * against cases when not all fd relevant fields were filled in.
- *
- * Return the skb backpointer, since for S/G frames the buffer containing it
- * gets freed here.
- */
-struct sk_buff *_dpa_cleanup_tx_fd(const struct dpa_priv_s *priv,
- const struct qm_fd *fd)
-{
- dma_addr_t addr = qm_fd_addr(fd);
- dma_addr_t sg_addr;
- void *vaddr;
- struct dpa_bp *bp = priv->dpa_bp;
- struct sk_buff **skbh;
- struct sk_buff *skb = NULL;
-
- BUG_ON(!fd);
-
- if (unlikely(!addr))
- return skb;
- vaddr = phys_to_virt(addr);
- skbh = (struct sk_buff **)vaddr;
-
- if (fd->format == qm_fd_contig) {
- /* For contiguous frames, just unmap data buffer;
- * mapping direction depends on whether the frame was
- * meant to be recycled or not */
- if (fd->cmd & FM_FD_CMD_FCO)
- dma_unmap_single(bp->dev, addr, bp->size,
- DMA_BIDIRECTIONAL);
- else
- dma_unmap_single(bp->dev, addr, bp->size,
- DMA_TO_DEVICE);
- /* Retrieve the skb backpointer */
- skb = *skbh;
- } else {
- /* For s/g, we need to unmap both the SGT buffer and the
- * data buffer, and also free the SGT buffer */
- struct qm_sg_entry *sg_entry;
-
- /* Unmap first buffer (contains S/G table) */
- dma_unmap_single(bp->dev, addr, sgt_buffer_size(priv),
- DMA_TO_DEVICE);
-
- /* Unmap data buffer */
- sg_entry = (struct qm_sg_entry *)(vaddr + fd->offset);
- sg_addr = qm_sg_addr(sg_entry);
- if (likely(sg_addr))
- dma_unmap_single(bp->dev, sg_addr, bp->size,
- DMA_TO_DEVICE);
- /* Retrieve the skb backpointer */
- skb = *skbh;
-
- }
-/* on some error paths this might not be necessary: */
-#ifdef CONFIG_FSL_DPAA_1588
- if (priv->tsu && priv->tsu->valid && priv->tsu->hwts_tx_en_ioctl)
- dpa_ptp_store_txstamp(priv, skb, (void *)skbh);
-#endif
-#ifdef CONFIG_FSL_DPAA_TS
- if (unlikely(priv->ts_tx_en &&
- skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
- struct skb_shared_hwtstamps shhwtstamps;
-
- if (!dpa_get_ts(priv, TX, &shhwtstamps, (void *)skbh))
- skb_tstamp_tx(skb, &shhwtstamps);
- }
-#endif /* CONFIG_FSL_DPAA_TS */
-
- /* Free first buffer (which was allocated on Tx) containing the
- * skb backpointer and hardware timestamp information
- */
- if (fd->format != qm_fd_contig)
- kfree(vaddr);
-
- return skb;
-}
-#endif /* CONFIG_FSL_DPAA_ETH_SG_SUPPORT */
-
-/* net_device */
-
-/**
- * @param net_dev the device for which statistics are calculated
- * @param stats the function fills this structure with the device's statistics
- * @return the address of the structure containing the statistics
- *
- * Calculates the statistics for the given device by adding the statistics
- * collected by each CPU.
- */
-static struct rtnl_link_stats64 * __cold
-dpa_get_stats64(struct net_device *net_dev,
- struct rtnl_link_stats64 *stats)
-{
- struct dpa_priv_s *priv = netdev_priv(net_dev);
- u64 *cpustats;
- u64 *netstats = (u64 *)stats;
- int i, j;
- struct dpa_percpu_priv_s *percpu_priv;
- int numstats = sizeof(struct rtnl_link_stats64) / sizeof(u64);
-
- for_each_online_cpu(i) {
- percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
-
- cpustats = (u64 *)&percpu_priv->stats;
-
- for (j = 0; j < numstats; j++)
- netstats[j] += cpustats[j];
- }
-
- return stats;
-}
-
-static int dpa_change_mtu(struct net_device *net_dev, int new_mtu)
-{
- const int max_mtu = dpa_get_max_mtu();
- const int min_mtu = dpa_get_min_mtu();
-
- /* Make sure we don't exceed the Ethernet controller's MAXFRM */
- if (new_mtu < min_mtu || new_mtu > max_mtu) {
- netdev_err(net_dev, "Invalid L3 mtu %d "
- "(must be between %d and %d).\n",
- new_mtu, min_mtu, max_mtu);
- return -EINVAL;
- }
- net_dev->mtu = new_mtu;
-
- return 0;
-}
-
-/* .ndo_init callback */
-static int dpa_ndo_init(struct net_device *net_dev)
-{
- /*
- * If fsl_fm_max_frm is set to a higher value than the all-common 1500,
- * we choose conservatively and let the user explicitly set a higher
- * MTU via ifconfig. Otherwise, the user may end up with different MTUs
- * in the same LAN.
- * If on the other hand fsl_fm_max_frm has been chosen below 1500,
- * start with the maximum allowed.
- */
- int init_mtu = min(dpa_get_max_mtu(), ETH_DATA_LEN);
-
- pr_debug("Setting initial MTU on net device: %d\n", init_mtu);
- net_dev->mtu = init_mtu;
-
- return 0;
-}
-
-static int dpa_set_mac_address(struct net_device *net_dev, void *addr)
-{
- const struct dpa_priv_s *priv;
- int _errno;
-
- priv = netdev_priv(net_dev);
-
- _errno = eth_mac_addr(net_dev, addr);
- if (_errno < 0) {
- if (netif_msg_drv(priv))
- netdev_err(net_dev,
- "eth_mac_addr() = %d\n",
- _errno);
- return _errno;
- }
-
- if (!priv->mac_dev)
- /* MAC-less interface, so nothing more to do here */
- return 0;
-
- _errno = priv->mac_dev->change_addr(priv->mac_dev, net_dev->dev_addr);
- if (_errno < 0) {
- if (netif_msg_drv(priv))
- netdev_err(net_dev,
- "mac_dev->change_addr() = %d\n",
- _errno);
- return _errno;
- }
-
- return 0;
-}
-
-static void dpa_set_rx_mode(struct net_device *net_dev)
-{
- int _errno;
- const struct dpa_priv_s *priv;
-
- priv = netdev_priv(net_dev);
-
- if (!priv->mac_dev)
- return;
-
- if (!!(net_dev->flags & IFF_PROMISC) != priv->mac_dev->promisc) {
- _errno = priv->mac_dev->change_promisc(priv->mac_dev);
- if (unlikely(_errno < 0) && netif_msg_drv(priv))
- netdev_err(net_dev,
- "mac_dev->change_promisc() = %d\n",
- _errno);
- }
-
- _errno = priv->mac_dev->set_multi(net_dev);
- if (unlikely(_errno < 0) && netif_msg_drv(priv))
- netdev_err(net_dev, "mac_dev->set_multi() = %d\n", _errno);
-}
-
-#if defined(CONFIG_FSL_DPAA_1588) || defined(CONFIG_FSL_DPAA_TS)
-u64 dpa_get_timestamp_ns(const struct dpa_priv_s *priv, enum port_type rx_tx,
- const void *data)
-{
- u64 *ts, ns;
-
- ts = FM_PORT_GetBufferTimeStamp(
- fm_port_get_handle(priv->mac_dev->port_dev[rx_tx]), data);
-
- if (!ts || *ts == 0)
- return 0;
-
- /* multiple DPA_PTP_NOMINAL_FREQ_PERIOD_NS for case of non power of 2 */
- ns = *ts << DPA_PTP_NOMINAL_FREQ_PERIOD_SHIFT;
-
- return ns;
-}
-#endif
-#ifdef CONFIG_FSL_DPAA_TS
-int dpa_get_ts(const struct dpa_priv_s *priv, enum port_type rx_tx,
- struct skb_shared_hwtstamps *shhwtstamps, const void *data)
-{
- u64 ns;
-
- ns = dpa_get_timestamp_ns(priv, rx_tx, data);
-
- if (ns == 0)
- return -EINVAL;
-
- memset(shhwtstamps, 0, sizeof(*shhwtstamps));
- shhwtstamps->hwtstamp = ns_to_ktime(ns);
-
- return 0;
-}
-
-static void dpa_ts_tx_enable(struct net_device *dev)
-{
- struct dpa_priv_s *priv = netdev_priv(dev);
- struct mac_device *mac_dev = priv->mac_dev;
-
- if (mac_dev->fm_rtc_enable)
- mac_dev->fm_rtc_enable(dev);
- if (mac_dev->ptp_enable)
- mac_dev->ptp_enable(mac_dev);
-
- priv->ts_tx_en = TRUE;
-}
-
-static void dpa_ts_tx_disable(struct net_device *dev)
-{
- struct dpa_priv_s *priv = netdev_priv(dev);
-
-#if 0
-/*
- * the RTC might be needed by the Rx Ts, cannot disable here
- * no separate ptp_disable API for Rx/Tx, cannot disable here
- */
- struct mac_device *mac_dev = priv->mac_dev;
-
- if (mac_dev->fm_rtc_disable)
- mac_dev->fm_rtc_disable(dev);
-
- if (mac_dev->ptp_disable)
- mac_dev->ptp_disable(mac_dev);
-#endif
-
- priv->ts_tx_en = FALSE;
-}
-
-static void dpa_ts_rx_enable(struct net_device *dev)
-{
- struct dpa_priv_s *priv = netdev_priv(dev);
- struct mac_device *mac_dev = priv->mac_dev;
-
- if (mac_dev->fm_rtc_enable)
- mac_dev->fm_rtc_enable(dev);
- if (mac_dev->ptp_enable)
- mac_dev->ptp_enable(mac_dev);
-
- priv->ts_rx_en = TRUE;
-}
-
-static void dpa_ts_rx_disable(struct net_device *dev)
-{
- struct dpa_priv_s *priv = netdev_priv(dev);
-
-#if 0
-/*
- * the RTC might be needed by the Tx Ts, cannot disable here
- * no separate ptp_disable API for Rx/Tx, cannot disable here
- */
- struct mac_device *mac_dev = priv->mac_dev;
-
- if (mac_dev->fm_rtc_disable)
- mac_dev->fm_rtc_disable(dev);
-
- if (mac_dev->ptp_disable)
- mac_dev->ptp_disable(mac_dev);
-#endif
-
- priv->ts_rx_en = FALSE;
-}
-
-static int dpa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- struct hwtstamp_config config;
-
- if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
- return -EFAULT;
-
- switch (config.tx_type) {
- case HWTSTAMP_TX_OFF:
- dpa_ts_tx_disable(dev);
- break;
- case HWTSTAMP_TX_ON:
- dpa_ts_tx_enable(dev);
- break;
- default:
- return -ERANGE;
- }
-
- if (config.rx_filter == HWTSTAMP_FILTER_NONE)
- dpa_ts_rx_disable(dev);
- else {
- dpa_ts_rx_enable(dev);
- /* TS is set for all frame types, not only those requested */
- config.rx_filter = HWTSTAMP_FILTER_ALL;
- }
-
- return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
-}
-#endif /* CONFIG_FSL_DPAA_TS */
-
-static int dpa_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
-#ifdef CONFIG_FSL_DPAA_1588
- struct dpa_priv_s *priv = netdev_priv(dev);
-#endif
- int ret = 0;
-
-/* at least one timestamping feature must be enabled to proceed */
-#if defined(CONFIG_FSL_DPAA_1588) || defined(CONFIG_FSL_DPAA_TS)
- if (!netif_running(dev))
-#endif
- return -EINVAL;
-
-#ifdef CONFIG_FSL_DPAA_TS
- if (cmd == SIOCSHWTSTAMP)
- return dpa_ts_ioctl(dev, rq, cmd);
-#endif /* CONFIG_FSL_DPAA_TS */
-
-#ifdef CONFIG_FSL_DPAA_1588
- if ((cmd >= PTP_ENBL_TXTS_IOCTL) && (cmd <= PTP_CLEANUP_TS)) {
- if (priv->tsu && priv->tsu->valid)
- ret = dpa_ioctl_1588(dev, rq, cmd);
- else
- ret = -ENODEV;
- }
-#endif
-
- return ret;
-}
-
-#ifndef CONFIG_FSL_DPAA_ETH_SG_SUPPORT
-/*
- * When we put the buffer into the pool, we purposefully added
- * some padding to the address so that the buffers wouldn't all
- * be page-aligned. But the skb has been reset to a default state,
- * so it is pointing up to DPAA_ETH_MAX_PAD - L1_CACHE_BYTES bytes
- * before the actual data. We subtract skb->head from the fd addr,
- * and then mask off the translated part to get the actual distance.
- */
-static int dpa_process_one(struct dpa_percpu_priv_s *percpu_priv,
- struct sk_buff *skb, struct dpa_bp *bp, const struct qm_fd *fd)
-{
- dma_addr_t fd_addr = qm_fd_addr(fd);
- unsigned long skb_addr = virt_to_phys(skb->head);
- u32 pad = fd_addr - skb_addr;
- unsigned int data_start;
-
- (*percpu_priv->dpa_bp_count)--;
-
- /*
- * The skb is currently pointed at head + headroom. The packet
- * starts at skb->head + pad + fd offset.
- */
- data_start = pad + dpa_fd_offset(fd) - skb_headroom(skb);
- skb_put(skb, dpa_fd_length(fd) + data_start);
- skb_pull(skb, data_start);
-
- return 0;
-}
-#endif
-
/*
* Checks whether the checksum field in Parse Results array is valid
* (equals 0xFFFF) and increments the .cse counter otherwise
@@ -1346,8 +171,7 @@ static void _dpa_rx_error(struct net_device *net_dev,
* interference with zero-loss convergence benchmark results.
*/
if (likely(fd->status & FM_FD_STAT_ERR_PHYSICAL))
- pr_warn_once("fsl-dpa: non-zero error counters " \
- "in fman statistics (sysfs)\n");
+ pr_warn_once("fsl-dpa: non-zero error counters in fman statistics (sysfs)\n");
else
if (netif_msg_hw(priv) && net_ratelimit())
netdev_err(net_dev, "Err FD status = 0x%08x\n",
@@ -1450,139 +274,6 @@ void __hot _dpa_process_parse_results(const t_FmPrsResult *parse_results,
*use_gro = 0;
}
-#ifndef CONFIG_FSL_DPAA_ETH_SG_SUPPORT
-void __hot _dpa_rx(struct net_device *net_dev,
- const struct dpa_priv_s *priv,
- struct dpa_percpu_priv_s *percpu_priv,
- const struct qm_fd *fd,
- u32 fqid)
-{
- struct dpa_bp *dpa_bp;
- struct sk_buff *skb;
- struct sk_buff **skbh;
- dma_addr_t addr = qm_fd_addr(fd);
- u32 fd_status = fd->status;
- unsigned int skb_len;
- t_FmPrsResult *parse_result;
- int use_gro = net_dev->features & NETIF_F_GRO;
-
- skbh = (struct sk_buff **)phys_to_virt(addr);
-
- if (unlikely(fd_status & FM_FD_STAT_ERRORS) != 0) {
- if (netif_msg_hw(priv) && net_ratelimit())
- netdev_warn(net_dev, "FD status = 0x%08x\n",
- fd->status & FM_FD_STAT_ERRORS);
-
- percpu_priv->stats.rx_errors++;
-
- goto _return_dpa_fd_release;
- }
-
- if (unlikely(fd->format != qm_fd_contig)) {
- percpu_priv->stats.rx_dropped++;
- if (netif_msg_rx_status(priv) && net_ratelimit())
- netdev_warn(net_dev, "Dropping a SG frame\n");
- goto _return_dpa_fd_release;
- }
-
- dpa_bp = dpa_bpid2pool(fd->bpid);
-
- dma_unmap_single(dpa_bp->dev, addr, dpa_bp->size, DMA_BIDIRECTIONAL);
- /* Execute the Rx processing hook, if it exists. */
- if (dpaa_eth_hooks.rx_default && dpaa_eth_hooks.rx_default((void *)fd,
- net_dev, fqid) == DPAA_ETH_STOLEN)
- /* won't count the rx bytes in */
- goto skb_stolen;
-
- skb = *skbh;
- prefetch(skb);
-
- /* Fill the SKB */
- dpa_process_one(percpu_priv, skb, dpa_bp, fd);
-
- prefetch(skb_shinfo(skb));
-
-#ifdef CONFIG_FSL_DPAA_1588
- if (priv->tsu && priv->tsu->valid && priv->tsu->hwts_rx_en_ioctl)
- dpa_ptp_store_rxstamp(priv, skb, (void *)skbh);
-#endif
-
- skb->protocol = eth_type_trans(skb, net_dev);
-
- if (unlikely(dpa_check_rx_mtu(skb, net_dev->mtu))) {
- percpu_priv->stats.rx_dropped++;
- goto drop_large_frame;
- }
-
-
- skb_len = skb->len;
-
- /* Validate the skb csum and figure out whether GRO is appropriate */
- parse_result = (t_FmPrsResult *)((u8 *)skbh + DPA_RX_PRIV_DATA_SIZE);
- _dpa_process_parse_results(parse_result, fd, skb, &use_gro);
-
-#ifdef CONFIG_FSL_DPAA_TS
- if (priv->ts_rx_en)
- dpa_get_ts(priv, RX, skb_hwtstamps(skb), (void *)skbh);
-#endif /* CONFIG_FSL_DPAA_TS */
-
- if (use_gro) {
- gro_result_t gro_result;
-
- gro_result = napi_gro_receive(&percpu_priv->napi, skb);
- if (unlikely(gro_result == GRO_DROP)) {
- percpu_priv->stats.rx_dropped++;
- goto packet_dropped;
- }
- } else if (unlikely(netif_receive_skb(skb) == NET_RX_DROP)) {
- percpu_priv->stats.rx_dropped++;
- goto packet_dropped;
- }
-
- percpu_priv->stats.rx_packets++;
- percpu_priv->stats.rx_bytes += skb_len;
-
-packet_dropped:
-skb_stolen:
- return;
-
-drop_large_frame:
- dev_kfree_skb(skb);
- return;
-
-_return_dpa_fd_release:
- dpa_fd_release(net_dev, fd);
-}
-#endif /* CONFIG_FSL_DPAA_ETH_SG_SUPPORT */
-
-static void dpaa_eth_napi_disable(struct dpa_priv_s *priv)
-{
- struct dpa_percpu_priv_s *percpu_priv;
- int i;
-
- if (priv->shared)
- return;
-
- for_each_online_cpu(i) {
- percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
- napi_disable(&percpu_priv->napi);
- }
-}
-
-static void dpaa_eth_napi_enable(struct dpa_priv_s *priv)
-{
- struct dpa_percpu_priv_s *percpu_priv;
- int i;
-
- if (priv->shared)
- return;
-
- for_each_online_cpu(i) {
- percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
- napi_enable(&percpu_priv->napi);
- }
-}
-
static int dpaa_eth_poll(struct napi_struct *napi, int budget)
{
int cleaned = qman_poll_dqrr(budget);
@@ -1622,9 +313,10 @@ static void __hot _dpa_tx_conf(struct net_device *net_dev,
/* it's the hook that must now perform cleanup */
return;
- /* This might not perfectly reflect the reality, if the core dequeueing
+ /* This might not perfectly reflect the reality, if the core dequeuing
* the Tx confirmation is different from the one that did the enqueue,
- * but at least it'll show up in the total count. */
+ * but at least it'll show up in the total count.
+ */
percpu_priv->tx_confirm++;
skb = _dpa_cleanup_tx_fd(priv, fd);
@@ -1632,743 +324,44 @@ static void __hot _dpa_tx_conf(struct net_device *net_dev,
dev_kfree_skb(skb);
}
-static struct dpa_bp *dpa_size2pool(struct dpa_priv_s *priv, size_t size)
-{
- int i;
-
- for (i = 0; i < priv->bp_count; i++)
- if ((size + priv->tx_headroom) <= priv->dpa_bp[i].size)
- return dpa_bpid2pool(priv->dpa_bp[i].bpid);
- return ERR_PTR(-ENODEV);
-}
-
-static void dpa_set_buffer_layout(struct dpa_priv_s *priv, struct fm_port *port,
- struct dpa_buffer_layout_s *layout, int type)
-{
- struct fm_port_params params;
-
- layout->priv_data_size = (type == RX ?
- DPA_RX_PRIV_DATA_SIZE : DPA_TX_PRIV_DATA_SIZE);
- layout->parse_results = true;
- layout->hash_results = true;
-#if defined(CONFIG_FSL_DPAA_1588) || defined(CONFIG_FSL_DPAA_TS)
- layout->time_stamp = true;
-#endif
- fm_port_get_buff_layout_ext_params(port, &params);
- layout->manip_extra_space = params.manip_extra_space;
- layout->data_align = params.data_align;
-}
-
-/**
- * Turn on HW checksum computation for this outgoing frame.
- * If the current protocol is not something we support in this regard
- * (or if the stack has already computed the SW checksum), we do nothing.
- *
- * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value
- * otherwise.
- *
- * Note that this function may modify the fd->cmd field and the skb data buffer
- * (the Parse Results area).
- */
-int dpa_enable_tx_csum(struct dpa_priv_s *priv,
- struct sk_buff *skb, struct qm_fd *fd, char *parse_results)
-{
- t_FmPrsResult *parse_result;
- struct iphdr *iph;
- struct ipv6hdr *ipv6h = NULL;
- int l4_proto;
- int ethertype = ntohs(skb->protocol);
- int retval = 0;
-
- if (!priv->mac_dev || skb->ip_summed != CHECKSUM_PARTIAL)
- return 0;
-
- /* Note: L3 csum seems to be already computed in sw, but we can't choose
- * L4 alone from the FM configuration anyway. */
-
- /* Fill in some fields of the Parse Results array, so the FMan
- * can find them as if they came from the FMan Parser. */
- parse_result = (t_FmPrsResult *)parse_results;
-
- /* If we're dealing with VLAN, get the real Ethernet type */
- if (ethertype == ETH_P_8021Q) {
- /* We can't always assume the MAC header is set correctly
- * by the stack, so reset to beginning of skb->data */
- skb_reset_mac_header(skb);
- ethertype = ntohs(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto);
- }
-
- /* Fill in the relevant L3 parse result fields
- * and read the L4 protocol type */
- switch (ethertype) {
- case ETH_P_IP:
- parse_result->l3r = FM_L3_PARSE_RESULT_IPV4;
- iph = ip_hdr(skb);
- BUG_ON(iph == NULL);
- l4_proto = ntohs(iph->protocol);
- break;
- case ETH_P_IPV6:
- parse_result->l3r = FM_L3_PARSE_RESULT_IPV6;
- ipv6h = ipv6_hdr(skb);
- BUG_ON(ipv6h == NULL);
- l4_proto = ntohs(ipv6h->nexthdr);
- break;
- default:
- /* We shouldn't even be here */
- if (netif_msg_tx_err(priv) && net_ratelimit())
- netdev_alert(priv->net_dev, "Can't compute HW csum "
- "for L3 proto 0x%x\n", ntohs(skb->protocol));
- retval = -EIO;
- goto return_error;
- }
-
- /* Fill in the relevant L4 parse result fields */
- switch (l4_proto) {
- case IPPROTO_UDP:
- parse_result->l4r = FM_L4_PARSE_RESULT_UDP;
- break;
- case IPPROTO_TCP:
- parse_result->l4r = FM_L4_PARSE_RESULT_TCP;
- break;
- default:
- /* This can as well be a BUG() */
- if (netif_msg_tx_err(priv) && net_ratelimit())
- netdev_alert(priv->net_dev, "Can't compute HW csum "
- "for L4 proto 0x%x\n", l4_proto);
- retval = -EIO;
- goto return_error;
- }
-
- /* At index 0 is IPOffset_1 as defined in the Parse Results */
- parse_result->ip_off[0] = skb_network_offset(skb);
- parse_result->l4_off = skb_transport_offset(skb);
-
- /* Enable L3 (and L4, if TCP or UDP) HW checksum. */
- fd->cmd |= FM_FD_CMD_RPD | FM_FD_CMD_DTC;
-
- /*
- * On P1023 and similar platforms fd->cmd interpretation could
- * be disabled by setting CONTEXT_A bit ICMD; currently this bit
- * is not set so we do not need to check; in the future, if/when
- * using context_a we need to check this bit
- */
-
-return_error:
- return retval;
-}
-
-static int __hot dpa_shared_tx(struct sk_buff *skb, struct net_device *net_dev)
-{
- struct dpa_bp *dpa_bp;
- struct bm_buffer bmb;
- struct dpa_percpu_priv_s *percpu_priv;
- struct dpa_priv_s *priv;
- struct qm_fd fd;
- int queue_mapping;
- int err;
- void *dpa_bp_vaddr;
- t_FmPrsResult parse_results;
-
- priv = netdev_priv(net_dev);
- percpu_priv = per_cpu_ptr(priv->percpu_priv, smp_processor_id());
-
- memset(&fd, 0, sizeof(fd));
- fd.format = qm_fd_contig;
-
- queue_mapping = smp_processor_id();
-
- dpa_bp = dpa_size2pool(priv, skb_headlen(skb));
- if (unlikely(IS_ERR(dpa_bp))) {
- percpu_priv->stats.tx_errors++;
- err = PTR_ERR(dpa_bp);
- goto bpools_too_small_error;
- }
-
- err = bman_acquire(dpa_bp->pool, &bmb, 1, 0);
- if (unlikely(err <= 0)) {
- percpu_priv->stats.tx_errors++;
- if (err == 0)
- err = -ENOMEM;
- goto buf_acquire_failed;
- }
- fd.bpid = dpa_bp->bpid;
-
- fd.length20 = skb_headlen(skb);
- fd.addr_hi = bmb.hi;
- fd.addr_lo = bmb.lo;
- fd.offset = priv->tx_headroom;
-
- /*
- * The virtual address of the buffer pool is expected to be NULL
- * in scenarios like MAC-less or Shared-MAC between Linux and
- * USDPAA. In this case the buffers are dynamically mapped/unmapped.
- */
- if (dpa_bp->vaddr) {
- dpa_bp_vaddr = dpa_phys2virt(dpa_bp, bm_buf_addr(&bmb));
-
- /* Copy the packet payload */
- skb_copy_from_linear_data(skb,
- dpa_bp_vaddr + dpa_fd_offset(&fd),
- dpa_fd_length(&fd));
-
- /* Enable L3/L4 hardware checksum computation, if applicable */
- err = dpa_enable_tx_csum(priv, skb, &fd,
- dpa_bp_vaddr + DPA_TX_PRIV_DATA_SIZE);
- } else {
- err = dpa_enable_tx_csum(priv, skb, &fd,
- (char *)&parse_results);
-
- copy_to_unmapped_area(bm_buf_addr(&bmb) + DPA_TX_PRIV_DATA_SIZE,
- &parse_results,
- DPA_PARSE_RESULTS_SIZE);
-
- copy_to_unmapped_area(bm_buf_addr(&bmb) + dpa_fd_offset(&fd),
- skb->data,
- dpa_fd_length(&fd));
- }
-
- if (unlikely(err < 0)) {
- if (netif_msg_tx_err(priv) && net_ratelimit())
- netdev_err(net_dev, "Tx HW csum error: %d\n", err);
- percpu_priv->stats.tx_errors++;
- goto l3_l4_csum_failed;
- }
-
- err = dpa_xmit(priv, &percpu_priv->stats, queue_mapping, &fd);
-
-l3_l4_csum_failed:
-bpools_too_small_error:
-buf_acquire_failed:
- /* We're done with the skb */
- dev_kfree_skb(skb);
-
- return NETDEV_TX_OK;
-}
-
-#ifndef CONFIG_FSL_DPAA_ETH_SG_SUPPORT
-static int skb_to_sg_fd(struct dpa_priv_s *priv,
- struct sk_buff *skb, struct qm_fd *fd)
-{
- struct dpa_bp *dpa_bp = priv->dpa_bp;
- void *vaddr;
- dma_addr_t paddr;
- struct sk_buff **skbh;
- struct qm_sg_entry *sg_entry;
- struct net_device *net_dev = priv->net_dev;
- int err;
-
- /* Allocate the first buffer in the FD (used for storing S/G table) */
- vaddr = kmalloc(sgt_buffer_size(priv), GFP_ATOMIC);
- if (unlikely(vaddr == NULL)) {
- if (netif_msg_tx_err(priv) && net_ratelimit())
- netdev_err(net_dev, "Memory allocation failed\n");
- return -ENOMEM;
- }
- /* Store skb backpointer at the beginning of the buffer */
- skbh = (struct sk_buff **)vaddr;
- *skbh = skb;
-
- /* Fill in FD */
- fd->format = qm_fd_sg;
- fd->offset = priv->tx_headroom;
- fd->length20 = skb->len;
-
- /* Enable hardware checksum computation */
- err = dpa_enable_tx_csum(priv, skb, fd,
- (char *)vaddr + DPA_TX_PRIV_DATA_SIZE);
- if (unlikely(err < 0)) {
- if (netif_msg_tx_err(priv) && net_ratelimit())
- netdev_err(net_dev, "HW csum error: %d\n", err);
- kfree(vaddr);
- return err;
- }
-
- /* Map the buffer and store its address in the FD */
- paddr = dma_map_single(dpa_bp->dev, vaddr, sgt_buffer_size(priv),
- DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dpa_bp->dev, paddr))) {
- if (netif_msg_tx_err(priv) && net_ratelimit())
- netdev_err(net_dev, "DMA mapping failed\n");
- kfree(vaddr);
- return -EINVAL;
- }
-
- fd->addr_hi = upper_32_bits(paddr);
- fd->addr_lo = lower_32_bits(paddr);
-
- /* Fill in S/G entry */
- sg_entry = (struct qm_sg_entry *)(vaddr + fd->offset);
-
- sg_entry->extension = 0;
- sg_entry->final = 1;
- sg_entry->length = skb->len;
- /*
- * Put the same offset in the data buffer as in the SGT (first) buffer.
- * This is the format for S/G frames generated by FMan; the manual is
- * not clear if same is required of Tx S/G frames, but since we know
- * for sure we have at least tx_headroom bytes of skb headroom,
- * lets not take any chances.
- */
- sg_entry->offset = priv->tx_headroom;
-
- paddr = dma_map_single(dpa_bp->dev, skb->data - sg_entry->offset,
- dpa_bp->size, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dpa_bp->dev, paddr))) {
- if (netif_msg_tx_err(priv) && net_ratelimit())
- netdev_err(net_dev, "DMA mapping failed\n");
- return -EINVAL;
- }
- sg_entry->addr_hi = upper_32_bits(paddr);
- sg_entry->addr_lo = lower_32_bits(paddr);
-
-#ifdef CONFIG_FSL_DPAA_TS
- if (unlikely(priv->ts_tx_en &&
- skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
- skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- }
-#endif /* CONFIG_FSL_DPAA_TS */
-
- return 0;
-}
-
-static int skb_to_contig_fd(struct dpa_priv_s *priv,
- struct dpa_percpu_priv_s *percpu_priv,
- struct sk_buff *skb, struct qm_fd *fd)
-{
- struct sk_buff **skbh;
- dma_addr_t addr;
- struct dpa_bp *dpa_bp = priv->dpa_bp;
- struct net_device *net_dev = priv->net_dev;
- enum dma_data_direction dma_dir = DMA_TO_DEVICE;
- bool can_recycle = false;
- int offset, extra_offset;
- int err;
-
- /*
- * We are guaranteed that we have at least tx_headroom bytes.
- * Buffers we allocated are padded to improve cache usage. In order
- * to increase buffer re-use, we aim to keep any such buffers the
- * same. This means the address passed to the FM should be
- * tx_headroom bytes before the data for forwarded frames.
- *
- * However, offer some flexibility in fd layout, to allow originating
- * (termination) buffers to be also recycled when possible.
- *
- * First, see if the conditions needed to recycle the skb are met:
- * - skb not cloned, not shared
- * - buffer size is large enough to accomodate a maximum size Rx frame
- * - buffer size does not exceed the maximum size allowed in the pool
- * (to avoid unbounded increase of buffer size in certain forwarding
- * conditions)
- * - buffer address is 16 byte aligned, as per DPAARM
- * - there's enough room in the buffer pool
- */
- if (likely(skb_is_recycleable(skb, dpa_bp->size) &&
- (skb_end_pointer(skb) - skb->head <= DPA_RECYCLE_MAX_SIZE) &&
- (*percpu_priv->dpa_bp_count < dpa_bp->target_count))) {
- /* Compute the minimum necessary fd offset */
- offset = dpa_bp->size - skb->len - skb_tailroom(skb);
-
- /*
- * And make sure the offset is no lower than the offset
- * required by FMan
- */
- offset = max_t(int, offset, priv->tx_headroom);
-
- /*
- * We also need to align the buffer address to 16, such that
- * Fman will be able to reuse it on Rx.
- * Since the buffer going to FMan starts at (skb->data - offset)
- * this is what we'll try to align. We already know that
- * headroom is at least tx_headroom bytes long, but with
- * the extra offset needed for alignment we may go beyond
- * the beginning of the buffer.
- *
- * Also need to check that we don't go beyond the maximum
- * offset that can be set for a contiguous FD.
- */
- extra_offset = (unsigned long)(skb->data - offset) & 0xF;
- if (likely((offset + extra_offset) <= skb_headroom(skb) &&
- (offset + extra_offset) <= DPA_MAX_FD_OFFSET)) {
- /* We're good to go for recycling*/
- offset += extra_offset;
- can_recycle = true;
- }
- }
-
-#ifdef CONFIG_FSL_DPAA_TS
- if (unlikely(priv->ts_tx_en &&
- skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
- /* we need the fd back to get the timestamp */
- can_recycle = false;
- skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- }
-#endif /* CONFIG_FSL_DPAA_TS */
-
- if (likely(can_recycle)) {
- /* Buffer will get recycled, setup fd accordingly */
- fd->cmd |= FM_FD_CMD_FCO;
- fd->bpid = dpa_bp->bpid;
- /*
- * Since the buffer will get back to the Bman pool
- * and be re-used on Rx, map it for both read and write
- */
- dma_dir = DMA_BIDIRECTIONAL;
- } else {
- /*
- * No recycling here, so we don't care about address alignment.
- * Just use the smallest offset required by FMan
- */
- offset = priv->tx_headroom;
- }
-
- skbh = (struct sk_buff **)(skb->data - offset);
- *skbh = skb;
-
-
- /* Enable L3/L4 hardware checksum computation.
- *
- * We must do this before dma_map_single(), because we may
- * need to write into the skb. */
- err = dpa_enable_tx_csum(priv, skb, fd,
- ((char *)skbh) + DPA_TX_PRIV_DATA_SIZE);
- if (unlikely(err < 0)) {
- if (netif_msg_tx_err(priv) && net_ratelimit())
- netdev_err(net_dev, "HW csum error: %d\n", err);
- return err;
- }
-
- fd->format = qm_fd_contig;
- fd->length20 = skb->len;
- fd->offset = offset;
-
- addr = dma_map_single(dpa_bp->dev, skbh, dpa_bp->size, dma_dir);
- if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
- if (netif_msg_tx_err(priv) && net_ratelimit())
- netdev_err(net_dev, "dma_map_single() failed\n");
- return -EINVAL;
- }
-
- fd->addr_hi = upper_32_bits(addr);
- fd->addr_lo = lower_32_bits(addr);
-
- return 0;
-}
-
-int __hot dpa_tx(struct sk_buff *skb, struct net_device *net_dev)
-{
- struct dpa_priv_s *priv;
- struct qm_fd fd;
- struct dpa_percpu_priv_s *percpu_priv;
- struct rtnl_link_stats64 *percpu_stats;
- int queue_mapping;
- int err;
-
- /* If there is a Tx hook, run it. */
- if (dpaa_eth_hooks.tx &&
- dpaa_eth_hooks.tx(skb, net_dev) == DPAA_ETH_STOLEN)
- /* won't update any Tx stats */
- goto done;
-
- priv = netdev_priv(net_dev);
- percpu_priv = per_cpu_ptr(priv->percpu_priv, smp_processor_id());
- percpu_stats = &percpu_priv->stats;
-
- clear_fd(&fd);
- queue_mapping = dpa_get_queue_mapping(skb);
-
- if (unlikely(skb_headroom(skb) < priv->tx_headroom)) {
- struct sk_buff *skb_new;
-
- skb_new = skb_realloc_headroom(skb, priv->tx_headroom);
- if (unlikely(!skb_new)) {
- percpu_stats->tx_errors++;
- kfree_skb(skb);
- goto done;
- }
- kfree_skb(skb);
- skb = skb_new;
- }
-
-#ifdef CONFIG_FSL_DPAA_1588
- if (priv->tsu && priv->tsu->valid && priv->tsu->hwts_tx_en_ioctl)
- fd.cmd |= FM_FD_CMD_UPD;
-#endif
-#ifdef CONFIG_FSL_DPAA_TS
- if (unlikely(priv->ts_tx_en &&
- skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
- fd.cmd |= FM_FD_CMD_UPD;
-#endif /* CONFIG_FSL_DPAA_TS */
-
- /*
- * We have two paths here:
- *
- * 1.If the skb is cloned, create a S/G frame to avoid unsharing it.
- * The S/G table will contain only one entry, pointing to our skb
- * data buffer.
- * The private data area containing the skb backpointer will reside
- * inside the first buffer, such that it won't risk being overwritten
- * in case a second skb pointing to the same data buffer is being
- * processed concurently.
- * No recycling is possible in this case, as the data buffer is shared.
- *
- * 2.If skb is not cloned, then the private area inside it can be
- * safely used to store the skb backpointer. Simply create a contiguous
- * fd in this case.
- * Recycling can happen if the right conditions are met.
- */
- if (skb_cloned(skb) && (skb->len > DPA_SKB_COPY_MAX_SIZE))
- err = skb_to_sg_fd(priv, skb, &fd);
- else {
- /* If cloned skb, but length is below DPA_SKB_COPY_MAX_SIZE,
- * it's more efficient to unshare it and then use the new skb */
- skb = skb_unshare(skb, GFP_ATOMIC);
- if (unlikely(!skb)) {
- percpu_stats->tx_errors++;
- goto done;
- }
- err = skb_to_contig_fd(priv, percpu_priv, skb, &fd);
- }
- if (unlikely(err < 0)) {
- percpu_stats->tx_errors++;
- goto fd_create_failed;
- }
-
- if (fd.cmd & FM_FD_CMD_FCO) {
- /* This skb is recycleable, and the fd generated from it
- * has been filled in accordingly.
- * NOTE: The recycling mechanism is fragile and dependant on
- * upstream changes. It will be maintained for now, but plans
- * are to remove it altoghether from the driver.
- */
- skb_recycle(skb);
- skb = NULL;
- (*percpu_priv->dpa_bp_count)++;
- percpu_priv->tx_returned++;
- }
-
- if (unlikely(dpa_xmit(priv, percpu_stats, queue_mapping,
- &fd) < 0))
- goto xmit_failed;
-
- net_dev->trans_start = jiffies;
- goto done;
-
-xmit_failed:
- if (fd.cmd & FM_FD_CMD_FCO) {
- (*percpu_priv->dpa_bp_count)--;
- percpu_priv->tx_returned--;
- }
-fd_create_failed:
- _dpa_cleanup_tx_fd(priv, &fd);
- dev_kfree_skb(skb);
-
-done:
- return NETDEV_TX_OK;
-}
-#endif /* CONFIG_FSL_DPAA_ETH_SG_SUPPORT */
-
-/**
- * Congestion group state change notification callback.
- * Stops the device's egress queues while they are congested and
- * wakes them upon exiting congested state.
- * Also updates some CGR-related stats.
- */
-static void dpaa_eth_cgscn(struct qman_portal *qm, struct qman_cgr *cgr,
- int congested)
-{
- struct dpa_priv_s *priv = (struct dpa_priv_s *)container_of(cgr,
- struct dpa_priv_s, cgr_data.cgr);
-
- if (congested) {
- priv->cgr_data.congestion_start_jiffies = jiffies;
- netif_tx_stop_all_queues(priv->net_dev);
- priv->cgr_data.cgr_congested_count++;
- } else {
- priv->cgr_data.congested_jiffies +=
- (jiffies - priv->cgr_data.congestion_start_jiffies);
- netif_tx_wake_all_queues(priv->net_dev);
- }
-}
-
static enum qman_cb_dqrr_result
-ingress_rx_error_dqrr(struct qman_portal *portal,
+priv_rx_error_dqrr(struct qman_portal *portal,
struct qman_fq *fq,
const struct qm_dqrr_entry *dq)
{
struct net_device *net_dev;
struct dpa_priv_s *priv;
struct dpa_percpu_priv_s *percpu_priv;
- int err;
net_dev = ((struct dpa_fq *)fq)->net_dev;
priv = netdev_priv(net_dev);
percpu_priv = per_cpu_ptr(priv->percpu_priv, smp_processor_id());
- if (dpaa_eth_napi_schedule(percpu_priv)) {
- percpu_priv->in_interrupt++;
+ if (dpaa_eth_napi_schedule(percpu_priv))
return qman_cb_dqrr_stop;
- }
- err = dpaa_eth_refill_bpools(percpu_priv);
- if (err) {
+ if (unlikely(dpaa_eth_refill_bpools(percpu_priv)))
/* Unable to refill the buffer pool due to insufficient
* system memory. Just release the frame back into the pool,
* otherwise we'll soon end up with an empty buffer pool.
*/
dpa_fd_release(net_dev, &dq->fd);
- return qman_cb_dqrr_consume;
- }
-
- _dpa_rx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
-
- return qman_cb_dqrr_consume;
-}
-
-static enum qman_cb_dqrr_result __hot
-shared_rx_dqrr(struct qman_portal *portal, struct qman_fq *fq,
- const struct qm_dqrr_entry *dq)
-{
- struct net_device *net_dev;
- struct dpa_priv_s *priv;
- struct dpa_percpu_priv_s *percpu_priv;
- const struct qm_fd *fd = &dq->fd;
- struct dpa_bp *dpa_bp;
- struct sk_buff *skb;
- struct qm_sg_entry *sgt;
- int i;
-
- net_dev = ((struct dpa_fq *)fq)->net_dev;
- priv = netdev_priv(net_dev);
-
- percpu_priv = per_cpu_ptr(priv->percpu_priv, smp_processor_id());
-
- dpa_bp = dpa_bpid2pool(fd->bpid);
- BUG_ON(IS_ERR(dpa_bp));
-
- if (unlikely(fd->status & FM_FD_STAT_ERRORS) != 0) {
- if (netif_msg_hw(priv) && net_ratelimit())
- netdev_warn(net_dev, "FD status = 0x%08x\n",
- fd->status & FM_FD_STAT_ERRORS);
-
- percpu_priv->stats.rx_errors++;
-
- goto out;
- }
-
- skb = __netdev_alloc_skb(net_dev,
- priv->tx_headroom + dpa_fd_length(fd),
- GFP_ATOMIC);
- if (unlikely(skb == NULL)) {
- if (netif_msg_rx_err(priv) && net_ratelimit())
- netdev_err(net_dev, "Could not alloc skb\n");
-
- percpu_priv->stats.rx_dropped++;
-
- goto out;
- }
-
- skb_reserve(skb, priv->tx_headroom);
-
- if (fd->format == qm_fd_sg) {
- if (dpa_bp->vaddr) {
- sgt = dpa_phys2virt(dpa_bp,
- qm_fd_addr(fd)) + dpa_fd_offset(fd);
-
- for (i = 0; i < DPA_SGT_MAX_ENTRIES; i++) {
- BUG_ON(sgt[i].extension);
-
- /* copy from sgt[i] */
- memcpy(skb_put(skb, sgt[i].length),
- dpa_phys2virt(dpa_bp,
- qm_sg_addr(&sgt[i]) +
- sgt[i].offset),
- sgt[i].length);
- if (sgt[i].final)
- break;
- }
- } else {
- sgt = kmalloc(DPA_SGT_MAX_ENTRIES * sizeof(*sgt),
- GFP_ATOMIC);
- if (unlikely(sgt == NULL)) {
- if (netif_msg_tx_err(priv) && net_ratelimit())
- netdev_err(net_dev,
- "Memory allocation failed\n");
- return -ENOMEM;
- }
-
- copy_from_unmapped_area(sgt,
- qm_fd_addr(fd) + dpa_fd_offset(fd),
- min(DPA_SGT_MAX_ENTRIES * sizeof(*sgt),
- dpa_bp->size));
-
- for (i = 0; i < DPA_SGT_MAX_ENTRIES; i++) {
- BUG_ON(sgt[i].extension);
-
- copy_from_unmapped_area(
- skb_put(skb, sgt[i].length),
- qm_sg_addr(&sgt[i]) + sgt[i].offset,
- sgt[i].length);
-
- if (sgt[i].final)
- break;
- }
-
- kfree(sgt);
- }
- goto skb_copied;
- }
-
- /* otherwise fd->format == qm_fd_contig */
- if (dpa_bp->vaddr) {
- /* Fill the SKB */
- memcpy(skb_put(skb, dpa_fd_length(fd)),
- dpa_phys2virt(dpa_bp, qm_fd_addr(fd)) +
- dpa_fd_offset(fd), dpa_fd_length(fd));
- } else {
- copy_from_unmapped_area(skb_put(skb, dpa_fd_length(fd)),
- qm_fd_addr(fd) + dpa_fd_offset(fd),
- dpa_fd_length(fd));
- }
-
-skb_copied:
- skb->protocol = eth_type_trans(skb, net_dev);
-
- /* IP Reassembled frames are allowed to be larger than MTU */
- if (unlikely(dpa_check_rx_mtu(skb, net_dev->mtu) &&
- !(fd->status & FM_FD_IPR))) {
- percpu_priv->stats.rx_dropped++;
- dev_kfree_skb_any(skb);
- goto out;
- }
-
- if (unlikely(netif_rx(skb) != NET_RX_SUCCESS))
- percpu_priv->stats.rx_dropped++;
- else {
- percpu_priv->stats.rx_packets++;
- percpu_priv->stats.rx_bytes += dpa_fd_length(fd);
- }
-
-out:
- if (fd->format == qm_fd_sg)
- dpa_fd_release_sg(net_dev, fd);
else
- dpa_fd_release(net_dev, fd);
+ _dpa_rx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
return qman_cb_dqrr_consume;
}
static enum qman_cb_dqrr_result __hot
-ingress_rx_default_dqrr(struct qman_portal *portal,
+priv_rx_default_dqrr(struct qman_portal *portal,
struct qman_fq *fq,
const struct qm_dqrr_entry *dq)
{
struct net_device *net_dev;
struct dpa_priv_s *priv;
struct dpa_percpu_priv_s *percpu_priv;
- int err;
net_dev = ((struct dpa_fq *)fq)->net_dev;
priv = netdev_priv(net_dev);
@@ -2379,29 +372,25 @@ ingress_rx_default_dqrr(struct qman_portal *portal,
/* IRQ handler, non-migratable; safe to use __this_cpu_ptr here */
percpu_priv = __this_cpu_ptr(priv->percpu_priv);
- if (unlikely(dpaa_eth_napi_schedule(percpu_priv))) {
- percpu_priv->in_interrupt++;
+ if (unlikely(dpaa_eth_napi_schedule(percpu_priv)))
return qman_cb_dqrr_stop;
- }
/* Vale of plenty: make sure we didn't run out of buffers */
- err = dpaa_eth_refill_bpools(percpu_priv);
- if (err) {
+
+ if (unlikely(dpaa_eth_refill_bpools(percpu_priv)))
/* Unable to refill the buffer pool due to insufficient
* system memory. Just release the frame back into the pool,
* otherwise we'll soon end up with an empty buffer pool.
*/
dpa_fd_release(net_dev, &dq->fd);
- return qman_cb_dqrr_consume;
- }
-
- _dpa_rx(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
+ else
+ _dpa_rx(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
return qman_cb_dqrr_consume;
}
static enum qman_cb_dqrr_result
-ingress_tx_error_dqrr(struct qman_portal *portal,
+priv_tx_conf_error_dqrr(struct qman_portal *portal,
struct qman_fq *fq,
const struct qm_dqrr_entry *dq)
{
@@ -2414,10 +403,8 @@ ingress_tx_error_dqrr(struct qman_portal *portal,
percpu_priv = per_cpu_ptr(priv->percpu_priv, smp_processor_id());
- if (dpaa_eth_napi_schedule(percpu_priv)) {
- percpu_priv->in_interrupt++;
+ if (dpaa_eth_napi_schedule(percpu_priv))
return qman_cb_dqrr_stop;
- }
_dpa_tx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
@@ -2425,7 +412,7 @@ ingress_tx_error_dqrr(struct qman_portal *portal,
}
static enum qman_cb_dqrr_result __hot
-ingress_tx_default_dqrr(struct qman_portal *portal,
+priv_tx_conf_default_dqrr(struct qman_portal *portal,
struct qman_fq *fq,
const struct qm_dqrr_entry *dq)
{
@@ -2442,138 +429,15 @@ ingress_tx_default_dqrr(struct qman_portal *portal,
/* Non-migratable context, safe to use __this_cpu_ptr */
percpu_priv = __this_cpu_ptr(priv->percpu_priv);
- if (dpaa_eth_napi_schedule(percpu_priv)) {
- percpu_priv->in_interrupt++;
+ if (dpaa_eth_napi_schedule(percpu_priv))
return qman_cb_dqrr_stop;
- }
_dpa_tx_conf(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
return qman_cb_dqrr_consume;
}
-static enum qman_cb_dqrr_result
-shared_tx_error_dqrr(struct qman_portal *portal,
- struct qman_fq *fq,
- const struct qm_dqrr_entry *dq)
-{
- struct net_device *net_dev;
- struct dpa_priv_s *priv;
- struct dpa_percpu_priv_s *percpu_priv;
- struct dpa_bp *dpa_bp;
- const struct qm_fd *fd = &dq->fd;
-
- net_dev = ((struct dpa_fq *)fq)->net_dev;
- priv = netdev_priv(net_dev);
-
- dpa_bp = dpa_bpid2pool(fd->bpid);
- BUG_ON(IS_ERR(dpa_bp));
-
- percpu_priv = per_cpu_ptr(priv->percpu_priv, smp_processor_id());
-
- if (netif_msg_hw(priv) && net_ratelimit())
- netdev_warn(net_dev, "FD status = 0x%08x\n",
- fd->status & FM_FD_STAT_ERRORS);
-
- if ((fd->format == qm_fd_sg) && (!dpa_bp->vaddr))
- dpa_fd_release_sg(net_dev, fd);
- else
- dpa_fd_release(net_dev, fd);
-
- percpu_priv->stats.tx_errors++;
-
- return qman_cb_dqrr_consume;
-}
-
-static enum qman_cb_dqrr_result __hot
-shared_tx_default_dqrr(struct qman_portal *portal,
- struct qman_fq *fq,
- const struct qm_dqrr_entry *dq)
-{
- struct net_device *net_dev;
- struct dpa_priv_s *priv;
- struct dpa_percpu_priv_s *percpu_priv;
- struct dpa_bp *dpa_bp;
- const struct qm_fd *fd = &dq->fd;
-
- net_dev = ((struct dpa_fq *)fq)->net_dev;
- priv = netdev_priv(net_dev);
-
- dpa_bp = dpa_bpid2pool(fd->bpid);
- BUG_ON(IS_ERR(dpa_bp));
-
- percpu_priv = per_cpu_ptr(priv->percpu_priv, smp_processor_id());
-
- if (unlikely(fd->status & FM_FD_STAT_ERRORS) != 0) {
- if (netif_msg_hw(priv) && net_ratelimit())
- netdev_warn(net_dev, "FD status = 0x%08x\n",
- fd->status & FM_FD_STAT_ERRORS);
-
- percpu_priv->stats.tx_errors++;
- }
-
- if ((fd->format == qm_fd_sg) && (!dpa_bp->vaddr))
- dpa_fd_release_sg(net_dev, fd);
- else
- dpa_fd_release(net_dev, fd);
-
- percpu_priv->tx_confirm++;
-
- return qman_cb_dqrr_consume;
-}
-
-static void count_ern(struct dpa_percpu_priv_s *percpu_priv,
- const struct qm_mr_entry *msg)
-{
- switch (msg->ern.rc & QM_MR_RC_MASK) {
- case QM_MR_RC_CGR_TAILDROP:
- percpu_priv->ern_cnt.cg_tdrop++;
- break;
- case QM_MR_RC_WRED:
- percpu_priv->ern_cnt.wred++;
- break;
- case QM_MR_RC_ERROR:
- percpu_priv->ern_cnt.err_cond++;
- break;
- case QM_MR_RC_ORPWINDOW_EARLY:
- percpu_priv->ern_cnt.early_window++;
- break;
- case QM_MR_RC_ORPWINDOW_LATE:
- percpu_priv->ern_cnt.late_window++;
- break;
- case QM_MR_RC_FQ_TAILDROP:
- percpu_priv->ern_cnt.fq_tdrop++;
- break;
- case QM_MR_RC_ORPWINDOW_RETIRED:
- percpu_priv->ern_cnt.fq_retired++;
- break;
- case QM_MR_RC_ORP_ZERO:
- percpu_priv->ern_cnt.orp_zero++;
- break;
- }
-}
-
-static void shared_ern(struct qman_portal *portal,
- struct qman_fq *fq,
- const struct qm_mr_entry *msg)
-{
- struct net_device *net_dev;
- const struct dpa_priv_s *priv;
- struct dpa_percpu_priv_s *percpu_priv;
- struct dpa_fq *dpa_fq = (struct dpa_fq *)fq;
-
- net_dev = dpa_fq->net_dev;
- priv = netdev_priv(net_dev);
- percpu_priv = per_cpu_ptr(priv->percpu_priv, smp_processor_id());
-
- dpa_fd_release(net_dev, &msg->ern.fd);
-
- percpu_priv->stats.tx_dropped++;
- percpu_priv->stats.tx_fifo_errors++;
- count_ern(percpu_priv, msg);
-}
-
-static void egress_ern(struct qman_portal *portal,
+static void priv_ern(struct qman_portal *portal,
struct qman_fq *fq,
const struct qm_mr_entry *msg)
{
@@ -2606,453 +470,75 @@ static void egress_ern(struct qman_portal *portal,
dev_kfree_skb_any(skb);
}
-static const struct qman_fq rx_shared_fq = {
- .cb = { .dqrr = shared_rx_dqrr }
-};
-static const struct qman_fq rx_private_defq = {
- .cb = { .dqrr = ingress_rx_default_dqrr }
-};
-static const struct qman_fq rx_private_errq = {
- .cb = { .dqrr = ingress_rx_error_dqrr }
-};
-static const struct qman_fq tx_private_defq = {
- .cb = { .dqrr = ingress_tx_default_dqrr }
-};
-static const struct qman_fq tx_private_errq = {
- .cb = { .dqrr = ingress_tx_error_dqrr }
-};
-static const struct qman_fq tx_shared_defq = {
- .cb = { .dqrr = shared_tx_default_dqrr }
-};
-static const struct qman_fq tx_shared_errq = {
- .cb = { .dqrr = shared_tx_error_dqrr }
-};
-static const struct qman_fq private_egress_fq = {
- .cb = { .ern = egress_ern }
+static const dpa_fq_cbs_t private_fq_cbs = {
+ .rx_defq = { .cb = { .dqrr = priv_rx_default_dqrr } },
+ .tx_defq = { .cb = { .dqrr = priv_tx_conf_default_dqrr } },
+ .rx_errq = { .cb = { .dqrr = priv_rx_error_dqrr } },
+ .tx_errq = { .cb = { .dqrr = priv_tx_conf_error_dqrr } },
+ .egress_ern = { .cb = { .ern = priv_ern } }
};
-static const struct qman_fq shared_egress_fq = {
- .cb = { .ern = shared_ern }
-};
-
-#ifdef CONFIG_FSL_DPAA_ETH_UNIT_TESTS
-static bool tx_unit_test_passed = true;
-
-static void tx_unit_test_ern(struct qman_portal *portal,
- struct qman_fq *fq,
- const struct qm_mr_entry *msg)
-{
- struct net_device *net_dev;
- struct dpa_priv_s *priv;
- struct sk_buff **skbh;
- struct sk_buff *skb;
- const struct qm_fd *fd;
- dma_addr_t addr;
-
- net_dev = ((struct dpa_fq *)fq)->net_dev;
- priv = netdev_priv(net_dev);
-
- tx_unit_test_passed = false;
-
- fd = &msg->ern.fd;
-
- addr = qm_fd_addr(fd);
-
- skbh = (struct sk_buff **)phys_to_virt(addr);
- skb = *skbh;
-
- if (!skb || !is_kernel_addr((unsigned long)skb))
- panic("Corrupt skb in ERN!\n");
-
- kfree_skb(skb);
-}
-
-static unsigned char *tx_unit_skb_head;
-static unsigned char *tx_unit_skb_end;
-static int tx_unit_tested;
-static enum qman_cb_dqrr_result tx_unit_test_dqrr(
- struct qman_portal *portal,
- struct qman_fq *fq,
- const struct qm_dqrr_entry *dq)
+static void dpaa_eth_napi_enable(struct dpa_priv_s *priv)
{
- struct net_device *net_dev;
- struct dpa_priv_s *priv;
- struct sk_buff **skbh;
- struct sk_buff *skb;
- const struct qm_fd *fd;
- dma_addr_t addr;
- unsigned char *startaddr;
struct dpa_percpu_priv_s *percpu_priv;
+ int i;
- tx_unit_test_passed = false;
-
- tx_unit_tested++;
-
- net_dev = ((struct dpa_fq *)fq)->net_dev;
- priv = netdev_priv(net_dev);
-
- percpu_priv = per_cpu_ptr(priv->percpu_priv, smp_processor_id());
-
- fd = &dq->fd;
-
- addr = qm_fd_addr(fd);
-
- skbh = (struct sk_buff **)phys_to_virt(addr);
- startaddr = (unsigned char *)skbh;
- skb = *skbh;
-
- if (!skb || !is_kernel_addr((unsigned long)skb))
- panic("Invalid skb address in TX Unit Test FD\n");
-
- /* Make sure we're dealing with the same skb */
- if (skb->head != tx_unit_skb_head
- || skb_end_pointer(skb) != tx_unit_skb_end)
- goto out;
-
- /*
- * If we recycled, then there must be enough room between fd.addr
- * and skb->end for a new RX buffer
- */
- if (fd->cmd & FM_FD_CMD_FCO) {
- size_t bufsize = skb_end_pointer(skb) - startaddr;
-
- if (bufsize < dpa_get_max_frm())
- goto out;
- } else {
- /*
- * If we didn't recycle, but the buffer was big enough,
- * increment the counter to put it back
- */
- if (skb_end_pointer(skb) - skb->head >=
- dpa_get_max_frm())
- (*percpu_priv->dpa_bp_count)++;
-
- /* If we didn't recycle, the data pointer should be good */
- if (skb->data != startaddr + dpa_fd_offset(fd))
- goto out;
+ for_each_online_cpu(i) {
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
+ napi_enable(&percpu_priv->napi);
}
-
- tx_unit_test_passed = true;
-out:
- /* The skb is no longer needed, and belongs to us */
- kfree_skb(skb);
-
- return qman_cb_dqrr_consume;
}
-static const struct qman_fq tx_unit_test_fq = {
- .cb = { .dqrr = tx_unit_test_dqrr, .ern = tx_unit_test_ern }
-};
-
-static struct dpa_fq unit_fq;
-#ifdef CONFIG_FSL_DPAA_TX_RECYCLE
-static struct dpa_fq unit_recycle_fq;
-#endif
-static bool tx_unit_test_ran; /* Starts as false */
-
-static int dpa_tx_unit_test(struct net_device *net_dev)
+static void dpaa_eth_napi_disable(struct dpa_priv_s *priv)
{
- /* Create a new FQ */
- struct dpa_priv_s *priv = netdev_priv(net_dev);
- struct qman_fq *oldq;
- int size, headroom;
struct dpa_percpu_priv_s *percpu_priv;
- cpumask_var_t old_cpumask;
- int test_count = 0;
- int err = 0;
- int tests_failed = 0;
- const cpumask_t *cpus = qman_affine_cpus();
-#ifdef CONFIG_FSL_DPAA_TX_RECYCLE
- struct qman_fq *oldrecycleq;
-#endif
-
- if (!alloc_cpumask_var(&old_cpumask, GFP_KERNEL)) {
- pr_err("UNIT test cpumask allocation failed\n");
- return -ENOMEM;
- }
-
- cpumask_copy(old_cpumask, tsk_cpus_allowed(current));
- set_cpus_allowed_ptr(current, cpus);
- /* disable bottom halves */
- local_bh_disable();
-
- percpu_priv = per_cpu_ptr(priv->percpu_priv, smp_processor_id());
-
- qman_irqsource_remove(QM_PIRQ_DQRI);
- unit_fq.net_dev = net_dev;
- unit_fq.fq_base = tx_unit_test_fq;
-
- /* Save old queue */
- oldq = priv->egress_fqs[smp_processor_id()];
-
- err = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID, &unit_fq.fq_base);
-
- if (err < 0) {
- pr_err("UNIT test FQ create failed: %d\n", err);
- goto fq_create_fail;
- }
-
- err = qman_init_fq(&unit_fq.fq_base,
- QMAN_INITFQ_FLAG_SCHED | QMAN_INITFQ_FLAG_LOCAL, NULL);
- if (err < 0) {
- pr_err("UNIT test FQ init failed: %d\n", err);
- goto fq_init_fail;
- }
-
- /* Replace queue 0 with this queue */
- priv->egress_fqs[smp_processor_id()] = &unit_fq.fq_base;
-
-#ifdef CONFIG_FSL_DPAA_TX_RECYCLE
- oldrecycleq = priv->recycle_fqs[smp_processor_id()];
- unit_recycle_fq.net_dev = net_dev;
- unit_recycle_fq.fq_base = tx_unit_test_fq;
-
- err = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID,
- &unit_recycle_fq.fq_base);
-
- if (err < 0) {
- pr_err("UNIT test Recycle FQ create failed: %d\n", err);
- goto recycle_fq_create_fail;
- }
-
- err = qman_init_fq(&unit_recycle_fq.fq_base,
- QMAN_INITFQ_FLAG_SCHED | QMAN_INITFQ_FLAG_LOCAL, NULL);
- if (err < 0) {
- pr_err("UNIT test Recycle FQ init failed: %d\n", err);
- goto recycle_fq_init_fail;
- }
-
- priv->recycle_fqs[smp_processor_id()] = &unit_recycle_fq.fq_base;
-
- pr_err("TX Unit Test using FQ: %d - Recycle FQ: %d\n",
- qman_fq_fqid(&unit_fq.fq_base),
- qman_fq_fqid(&unit_recycle_fq.fq_base));
-#else
- pr_err("TX Unit Test using FQ %d\n", qman_fq_fqid(&unit_fq.fq_base));
-#endif
+ int i;
- /* Try packet sizes from 64-bytes to just above the maximum */
- for (size = 64; size <= 9600 + 128; size += 64) {
- for (headroom = priv->tx_headroom; headroom < 0x800;
- headroom += 16) {
- int ret;
- struct sk_buff *skb;
-
- test_count++;
-
- skb = dev_alloc_skb(size + headroom);
-
- if (!skb) {
- pr_err("Failed to allocate skb\n");
- err = -ENOMEM;
- goto end_test;
- }
-
- if (skb_end_pointer(skb) - skb->head >=
- dpa_get_max_frm())
- (*percpu_priv->dpa_bp_count)--;
-
- skb_put(skb, size + headroom);
- skb_pull(skb, headroom);
-
- tx_unit_skb_head = skb->head;
- tx_unit_skb_end = skb_end_pointer(skb);
-
- skb_set_queue_mapping(skb, smp_processor_id());
-
- /* tx */
- ret = net_dev->netdev_ops->ndo_start_xmit(skb, net_dev);
-
- if (ret != NETDEV_TX_OK) {
- pr_err("Failed to TX with err %d\n", ret);
- err = -EIO;
- goto end_test;
- }
-
- /* Wait for it to arrive */
- ret = spin_event_timeout(qman_poll_dqrr(1) != 0,
- 100000, 1);
-
- if (!ret) {
- pr_err("TX Packet never arrived\n");
- /*
- * Count the test as failed.
- */
- tests_failed++;
- }
-
- /* Was it good? */
- if (tx_unit_test_passed == false) {
- pr_err("Test failed:\n");
- pr_err("size: %d pad: %d head: %p end: %p\n",
- size, headroom, tx_unit_skb_head,
- tx_unit_skb_end);
- tests_failed++;
- }
- }
+ for_each_online_cpu(i) {
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
+ napi_disable(&percpu_priv->napi);
}
-
-end_test:
- err = qman_retire_fq(&unit_fq.fq_base, NULL);
- if (unlikely(err < 0))
- pr_err("Could not retire TX Unit Test FQ (%d)\n", err);
-
- err = qman_oos_fq(&unit_fq.fq_base);
- if (unlikely(err < 0))
- pr_err("Could not OOS TX Unit Test FQ (%d)\n", err);
-
-#ifdef CONFIG_FSL_DPAA_TX_RECYCLE
- err = qman_retire_fq(&unit_recycle_fq.fq_base, NULL);
- if (unlikely(err < 0))
- pr_err("Could not retire Recycle TX Unit Test FQ (%d)\n", err);
-
- err = qman_oos_fq(&unit_recycle_fq.fq_base);
- if (unlikely(err < 0))
- pr_err("Could not OOS Recycle TX Unit Test FQ (%d)\n", err);
-
-recycle_fq_init_fail:
- qman_destroy_fq(&unit_recycle_fq.fq_base, 0);
-
-recycle_fq_create_fail:
- priv->recycle_fqs[smp_processor_id()] = oldrecycleq;
-#endif
-
-fq_init_fail:
- qman_destroy_fq(&unit_fq.fq_base, 0);
-
-fq_create_fail:
- priv->egress_fqs[smp_processor_id()] = oldq;
- local_bh_enable();
- qman_irqsource_add(QM_PIRQ_DQRI);
- tx_unit_test_ran = true;
- set_cpus_allowed_ptr(current, old_cpumask);
- free_cpumask_var(old_cpumask);
-
- pr_err("Tested %d/%d packets. %d failed\n", test_count, tx_unit_tested,
- tests_failed);
-
- if (tests_failed)
- err = -EINVAL;
-
- /* Reset counters */
- memset(&percpu_priv->stats, 0, sizeof(percpu_priv->stats));
-
- return err;
}
-#endif
-static int __cold dpa_start(struct net_device *net_dev)
+static int __cold dpa_eth_priv_start(struct net_device *net_dev)
{
- int err, i;
+ int err;
struct dpa_priv_s *priv;
- struct mac_device *mac_dev;
- struct dpa_percpu_priv_s *percpu_priv;
priv = netdev_priv(net_dev);
- mac_dev = priv->mac_dev;
-
- if (!mac_dev)
- goto no_mac;
+#ifndef CONFIG_FSL_DPAA_ETH_SG_SUPPORT
/* Seed the global buffer pool at the first ifconfig up
* of a private port. Update the percpu buffer counters
* of each private interface.
*/
- if (!priv->shared && !default_pool_seeded) {
- default_pool->size = default_buf_size;
- dpa_make_private_pool(default_pool);
- default_pool_seeded = true;
- }
- for_each_online_cpu(i) {
- percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
- if (!priv->shared && !percpu_priv->dpa_bp) {
- percpu_priv->dpa_bp = priv->dpa_bp;
- percpu_priv->dpa_bp_count =
- per_cpu_ptr(priv->dpa_bp->percpu_count, i);
- }
- }
+ dpa_bp_priv_non_sg_seed(priv->dpa_bp);
+#endif
dpaa_eth_napi_enable(priv);
- err = mac_dev->init_phy(net_dev);
- if (err < 0) {
- if (netif_msg_ifup(priv))
- netdev_err(net_dev, "init_phy() = %d\n", err);
- goto init_phy_failed;
- }
-
- for_each_port_device(i, mac_dev->port_dev)
- fm_port_enable(mac_dev->port_dev[i]);
-
- err = priv->mac_dev->start(mac_dev);
- if (err < 0) {
- if (netif_msg_ifup(priv))
- netdev_err(net_dev, "mac_dev->start() = %d\n", err);
- goto mac_start_failed;
- }
-
-no_mac:
- netif_tx_start_all_queues(net_dev);
-
- return 0;
-
-mac_start_failed:
- for_each_port_device(i, mac_dev->port_dev)
- fm_port_disable(mac_dev->port_dev[i]);
-
-init_phy_failed:
- dpaa_eth_napi_disable(priv);
+ err = dpa_start(net_dev);
+ if (err < 0)
+ dpaa_eth_napi_disable(priv);
return err;
}
-static int __cold dpa_stop(struct net_device *net_dev)
-{
- int _errno, i;
- struct dpa_priv_s *priv;
- struct mac_device *mac_dev;
-
- priv = netdev_priv(net_dev);
- mac_dev = priv->mac_dev;
-
- netif_tx_stop_all_queues(net_dev);
- if (!mac_dev)
- return 0;
- _errno = mac_dev->stop(mac_dev);
- if (unlikely(_errno < 0))
- if (netif_msg_ifdown(priv))
- netdev_err(net_dev, "mac_dev->stop() = %d\n",
- _errno);
-
- for_each_port_device(i, mac_dev->port_dev)
- fm_port_disable(mac_dev->port_dev[i]);
+static int __cold dpa_eth_priv_stop(struct net_device *net_dev)
+{
+ int _errno;
+ struct dpa_priv_s *priv;
- if (mac_dev->phy_dev)
- phy_disconnect(mac_dev->phy_dev);
- mac_dev->phy_dev = NULL;
+ _errno = dpa_stop(net_dev);
+ priv = netdev_priv(net_dev);
dpaa_eth_napi_disable(priv);
return _errno;
}
-static void __cold dpa_timeout(struct net_device *net_dev)
-{
- const struct dpa_priv_s *priv;
- struct dpa_percpu_priv_s *percpu_priv;
-
- priv = netdev_priv(net_dev);
- percpu_priv = per_cpu_ptr(priv->percpu_priv, smp_processor_id());
-
- if (netif_msg_timer(priv))
- netdev_crit(net_dev, "Transmit timeout latency: %u ms\n",
- jiffies_to_msecs(jiffies - net_dev->trans_start));
-
- percpu_priv->stats.tx_errors++;
-}
-
#ifdef CONFIG_NET_POLL_CONTROLLER
static void dpaa_eth_poll_controller(struct net_device *net_dev)
{
@@ -3067,285 +553,10 @@ static void dpaa_eth_poll_controller(struct net_device *net_dev)
}
#endif
-static int dpa_bp_cmp(const void *dpa_bp0, const void *dpa_bp1)
-{
- return ((struct dpa_bp *)dpa_bp0)->size -
- ((struct dpa_bp *)dpa_bp1)->size;
-}
-
-static struct dpa_bp * __cold __must_check __attribute__((nonnull))
-dpa_bp_probe(struct platform_device *_of_dev, size_t *count)
-{
- int i, lenp, na, ns;
- struct device *dev;
- struct device_node *dev_node;
- const phandle *phandle_prop;
- const uint32_t *bpid;
- const uint32_t *bpool_cfg;
- struct dpa_bp *dpa_bp;
- int has_kernel_pool = 0;
- int has_shared_pool = 0;
-
- dev = &_of_dev->dev;
-
- /* The default is one, if there's no property */
- *count = 1;
-
- /* There are three types of buffer pool configuration:
- * 1) No bp assignment
- * 2) A static assignment to an empty configuration
- * 3) A static assignment to one or more configured pools
- *
- * We don't support using multiple unconfigured pools.
- */
-
- /* Get the buffer pools to be used */
- phandle_prop = of_get_property(dev->of_node,
- "fsl,bman-buffer-pools", &lenp);
-
- if (phandle_prop)
- *count = lenp / sizeof(phandle);
- else {
- if (default_pool)
- return default_pool;
-
- has_kernel_pool = 1;
- }
-
- dpa_bp = devm_kzalloc(dev, *count * sizeof(*dpa_bp), GFP_KERNEL);
- if (unlikely(dpa_bp == NULL)) {
- dev_err(dev, "devm_kzalloc() failed\n");
- return ERR_PTR(-ENOMEM);
- }
-
- dev_node = of_find_node_by_path("/");
- if (unlikely(dev_node == NULL)) {
- dev_err(dev, "of_find_node_by_path(/) failed\n");
- return ERR_PTR(-EINVAL);
- }
-
- na = of_n_addr_cells(dev_node);
- ns = of_n_size_cells(dev_node);
-
- for (i = 0; i < *count && phandle_prop; i++) {
- of_node_put(dev_node);
- dev_node = of_find_node_by_phandle(phandle_prop[i]);
- if (unlikely(dev_node == NULL)) {
- dev_err(dev, "of_find_node_by_phandle() failed\n");
- return ERR_PTR(-EFAULT);
- }
-
- if (unlikely(!of_device_is_compatible(dev_node, "fsl,bpool"))) {
- dev_err(dev,
- "!of_device_is_compatible(%s, fsl,bpool)\n",
- dev_node->full_name);
- dpa_bp = ERR_PTR(-EINVAL);
- goto _return_of_node_put;
- }
-
- bpid = of_get_property(dev_node, "fsl,bpid", &lenp);
- if ((bpid == NULL) || (lenp != sizeof(*bpid))) {
- dev_err(dev, "fsl,bpid property not found.\n");
- dpa_bp = ERR_PTR(-EINVAL);
- goto _return_of_node_put;
- }
- dpa_bp[i].bpid = *bpid;
-
- bpool_cfg = of_get_property(dev_node, "fsl,bpool-ethernet-cfg",
- &lenp);
- if (bpool_cfg && (lenp == (2 * ns + na) * sizeof(*bpool_cfg))) {
- const uint32_t *seed_pool;
-
- dpa_bp[i].config_count =
- (int)of_read_number(bpool_cfg, ns);
- dpa_bp[i].size = of_read_number(bpool_cfg + ns, ns);
- dpa_bp[i].paddr =
- of_read_number(bpool_cfg + 2 * ns, na);
-
- seed_pool = of_get_property(dev_node,
- "fsl,bpool-ethernet-seeds", &lenp);
- dpa_bp[i].seed_pool = !!seed_pool;
-
- has_shared_pool = 1;
- } else {
- has_kernel_pool = 1;
- }
-
- if (i > 0)
- has_shared_pool = 1;
- }
-
- if (has_kernel_pool && has_shared_pool) {
- dev_err(dev, "Invalid buffer pool configuration "
- "for node %s\n", dev_node->full_name);
- dpa_bp = ERR_PTR(-EINVAL);
- goto _return_of_node_put;
- } else if (has_kernel_pool) {
- dpa_bp->target_count = CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT;
- dpa_bp->kernel_pool = 1;
- }
-
- sort(dpa_bp, *count, sizeof(*dpa_bp), dpa_bp_cmp, NULL);
-
- return dpa_bp;
-
-_return_of_node_put:
- if (dev_node)
- of_node_put(dev_node);
-
- return dpa_bp;
-}
-
-static int dpa_bp_create(struct net_device *net_dev, struct dpa_bp *dpa_bp,
- size_t count)
-{
- struct dpa_priv_s *priv = netdev_priv(net_dev);
- int i;
-
- if (dpa_bp->kernel_pool) {
- priv->shared = 0;
-
- if (netif_msg_probe(priv))
- dev_info(net_dev->dev.parent,
- "Using private BM buffer pools\n");
- } else {
- priv->shared = 1;
- }
-
- priv->dpa_bp = dpa_bp;
- priv->bp_count = count;
-
- for (i = 0; i < count; i++) {
- int err;
- err = dpa_bp_alloc(&dpa_bp[i]);
- if (err < 0) {
- dpa_bp_free(priv, dpa_bp);
- priv->dpa_bp = NULL;
- return err;
- }
-
- /* For now, just point to the default pool.
- * We can add support for more pools, later
- */
- if (dpa_bp->kernel_pool)
- priv->dpa_bp = default_pool;
- }
-
- return 0;
-}
-
-static struct mac_device * __cold __must_check
-__attribute__((nonnull))
-dpa_mac_probe(struct platform_device *_of_dev)
-{
- struct device *dpa_dev, *dev;
- struct device_node *mac_node;
- int lenp;
- const phandle *phandle_prop;
- struct platform_device *of_dev;
- struct mac_device *mac_dev;
-#ifdef CONFIG_FSL_DPAA_1588
- struct net_device *net_dev = NULL;
- struct dpa_priv_s *priv = NULL;
- struct device_node *timer_node;
-#endif
-
- phandle_prop = of_get_property(_of_dev->dev.of_node,
- "fsl,fman-mac", &lenp);
- if (phandle_prop == NULL)
- return NULL;
-
- BUG_ON(lenp != sizeof(phandle));
-
- dpa_dev = &_of_dev->dev;
-
- mac_node = of_find_node_by_phandle(*phandle_prop);
- if (unlikely(mac_node == NULL)) {
- dev_err(dpa_dev, "of_find_node_by_phandle() failed\n");
- return ERR_PTR(-EFAULT);
- }
-
- of_dev = of_find_device_by_node(mac_node);
- if (unlikely(of_dev == NULL)) {
- dev_err(dpa_dev, "of_find_device_by_node(%s) failed\n",
- mac_node->full_name);
- of_node_put(mac_node);
- return ERR_PTR(-EINVAL);
- }
- of_node_put(mac_node);
-
- dev = &of_dev->dev;
-
- mac_dev = dev_get_drvdata(dev);
- if (unlikely(mac_dev == NULL)) {
- dev_err(dpa_dev, "dev_get_drvdata(%s) failed\n",
- dev_name(dev));
- return ERR_PTR(-EINVAL);
- }
-
-#ifdef CONFIG_FSL_DPAA_1588
- phandle_prop = of_get_property(mac_node, "ptimer-handle", &lenp);
- if (phandle_prop && ((mac_dev->phy_if != PHY_INTERFACE_MODE_SGMII) ||
- ((mac_dev->phy_if == PHY_INTERFACE_MODE_SGMII) &&
- (mac_dev->speed == SPEED_1000)))) {
- timer_node = of_find_node_by_phandle(*phandle_prop);
- if (timer_node && (net_dev = dev_get_drvdata(dpa_dev))) {
- priv = netdev_priv(net_dev);
- if (!dpa_ptp_init(priv))
- dev_info(dev, "%s: ptp 1588 is initialized.\n",
- mac_node->full_name);
- }
- }
-#endif
-
- return mac_dev;
-}
-
-static const char fsl_qman_frame_queues[][25] = {
- [RX] = "fsl,qman-frame-queues-rx",
- [TX] = "fsl,qman-frame-queues-tx"
-};
-
-
-#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
-static u16 dpa_select_queue(struct net_device *net_dev, struct sk_buff *skb)
-{
- return smp_processor_id();
-}
-#endif
-
-static netdev_features_t dpa_fix_features(struct net_device *dev,
- netdev_features_t features)
-{
- struct dpa_priv_s *priv = netdev_priv(dev);
- netdev_features_t unsupported_features = 0;
-
- /* In theory we should never be requested to enable features that
- * we didn't set in netdev->features and netdev->hw_features at probe
- * time, but double check just to be on the safe side.
- */
- if (!priv->mac_dev)
- unsupported_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
- /* We don't support enabling Rx csum through ethtool yet */
- unsupported_features |= NETIF_F_RXCSUM;
-
- features &= ~unsupported_features;
-
- return features;
-}
-
-static int dpa_set_features(struct net_device *dev, netdev_features_t features)
-{
- /* Not much to do here for now */
- dev->features = features;
- return 0;
-}
-
-
static const struct net_device_ops dpa_private_ops = {
- .ndo_open = dpa_start,
+ .ndo_open = dpa_eth_priv_start,
.ndo_start_xmit = dpa_tx,
- .ndo_stop = dpa_stop,
+ .ndo_stop = dpa_eth_priv_stop,
.ndo_tx_timeout = dpa_timeout,
.ndo_get_stats64 = dpa_get_stats64,
.ndo_set_mac_address = dpa_set_mac_address,
@@ -3364,512 +575,13 @@ static const struct net_device_ops dpa_private_ops = {
#endif
};
-static const struct net_device_ops dpa_shared_ops = {
- .ndo_open = dpa_start,
- .ndo_start_xmit = dpa_shared_tx,
- .ndo_stop = dpa_stop,
- .ndo_tx_timeout = dpa_timeout,
- .ndo_get_stats64 = dpa_get_stats64,
- .ndo_set_mac_address = dpa_set_mac_address,
- .ndo_validate_addr = eth_validate_addr,
-#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
- .ndo_select_queue = dpa_select_queue,
-#endif
- .ndo_change_mtu = dpa_change_mtu,
- .ndo_set_rx_mode = dpa_set_rx_mode,
- .ndo_init = dpa_ndo_init,
- .ndo_set_features = dpa_set_features,
- .ndo_fix_features = dpa_fix_features,
- .ndo_do_ioctl = dpa_ioctl,
-};
-
-static u32 rx_pool_channel;
-static DEFINE_SPINLOCK(rx_pool_channel_init);
-
-static int dpa_get_channel(struct device *dev,
- struct device_node *dpa_node)
-{
- spin_lock(&rx_pool_channel_init);
- if (!rx_pool_channel) {
- u32 pool;
- int ret = qman_alloc_pool(&pool);
- if (!ret)
- rx_pool_channel = pool;
- }
- spin_unlock(&rx_pool_channel_init);
- if (!rx_pool_channel)
- return -ENOMEM;
- return rx_pool_channel;
-}
-
-struct fqid_cell {
- uint32_t start;
- uint32_t count;
-};
-
-static const struct fqid_cell default_fqids[][3] = {
- [RX] = { {0, 1}, {0, 1}, {0, DPAA_ETH_RX_QUEUES} },
- [TX] = { {0, 1}, {0, 1}, {0, DPAA_ETH_TX_QUEUES} }
-};
-
-static const struct fqid_cell tx_confirm_fqids[] = {
- {0, DPAA_ETH_TX_QUEUES}
-};
-
-#ifdef CONFIG_FSL_DPAA_TX_RECYCLE
-static const struct fqid_cell tx_recycle_fqids[] = {
- {0, DPAA_ETH_TX_QUEUES}
-};
-#endif
-
-static int
-dpa_fq_probe(struct platform_device *_of_dev, struct list_head *list,
- struct dpa_fq **defq, struct dpa_fq **errq,
- struct dpa_fq **fqs, struct dpa_fq **txconfq,
- struct dpa_fq **txrecycle, int ptype)
-{
- struct device *dev = &_of_dev->dev;
- struct device_node *np = dev->of_node;
- const struct fqid_cell *fqids;
- int i, j, lenp;
- int num_fqids;
- struct dpa_fq *dpa_fq;
- int err = 0;
-
- /* per-core tx confirmation queues */
- if (txconfq) {
- fqids = tx_confirm_fqids;
- dpa_fq = devm_kzalloc(dev, sizeof(*dpa_fq) * fqids[0].count,
- GFP_KERNEL);
- if (dpa_fq == NULL) {
- dev_err(dev, "devm_kzalloc() failed\n");
- return -ENOMEM;
- }
- *txconfq = dpa_fq;
- for (j = 0; j < fqids[0].count; j++)
- dpa_fq[j].fq_type = FQ_TYPE_TX_CONFIRM;
-
- for (j = 0; j < fqids[0].count; j++) {
- dpa_fq[j].fqid = fqids[0].start ?
- fqids[0].start + j : 0;
- _dpa_assign_wq(dpa_fq + j);
- list_add_tail(&dpa_fq[j].list, list);
- }
- }
-
-#ifdef CONFIG_FSL_DPAA_TX_RECYCLE
- /* per-core tx queues for recycleable frames (FManv3 only) */
- if (txrecycle) {
- fqids = tx_recycle_fqids;
- dpa_fq = devm_kzalloc(dev, sizeof(*dpa_fq) * fqids[0].count,
- GFP_KERNEL);
- if (dpa_fq == NULL) {
- dev_err(dev, "devm_kzalloc() failed\n");
- return -ENOMEM;
- }
-
- *txrecycle = dpa_fq;
- for (j = 0; j < fqids[0].count; j++)
- dpa_fq[j].fq_type = FQ_TYPE_TX_RECYCLE;
-
- for (j = 0; j < fqids[0].count; j++) {
- dpa_fq[j].fqid = fqids[0].start ?
- fqids[0].start + j : 0;
- _dpa_assign_wq(dpa_fq + j);
- list_add_tail(&dpa_fq[j].list, list);
- }
- }
-#endif
-
- fqids = of_get_property(np, fsl_qman_frame_queues[ptype], &lenp);
- if (fqids == NULL) {
- fqids = default_fqids[ptype];
- num_fqids = 3;
- } else
- num_fqids = lenp / sizeof(*fqids);
-
- for (i = 0; i < num_fqids; i++) {
- dpa_fq = devm_kzalloc(dev, sizeof(*dpa_fq) * fqids[i].count,
- GFP_KERNEL);
- if (dpa_fq == NULL) {
- dev_err(dev, "devm_kzalloc() failed\n");
- return -ENOMEM;
- }
-
- /* The first queue is the Error queue */
- if (i == 0 && errq) {
- *errq = dpa_fq;
-
- if (fqids[i].count != 1) {
- dev_err(dev, "Too many error queues!\n");
- err = -EINVAL;
- goto invalid_error_queues;
- }
-
- dpa_fq[0].fq_type = (ptype == RX ?
- FQ_TYPE_RX_ERROR : FQ_TYPE_TX_ERROR);
- }
-
- /* The second queue is the the Default queue */
- if (i == 1 && defq) {
- *defq = dpa_fq;
-
- if (fqids[i].count != 1) {
- dev_err(dev, "Too many default queues!\n");
- err = -EINVAL;
- goto invalid_default_queues;
- }
-
- dpa_fq[0].fq_type = (ptype == RX ?
- FQ_TYPE_RX_DEFAULT : FQ_TYPE_TX_CONFIRM);
- }
-
- /*
- * All subsequent queues are gathered together.
- * The first 8 will be used by the private linux interface
- * if these are TX queues
- */
- if (i == 2 || (!errq && i == 0 && fqs)) {
- *fqs = dpa_fq;
-
- for (j = 0; j < fqids[i].count; j++)
- dpa_fq[j].fq_type = (ptype == RX ?
- FQ_TYPE_RX_PCD : FQ_TYPE_TX);
- }
-
- for (j = 0; j < fqids[i].count; j++) {
- dpa_fq[j].fqid = fqids[i].start ?
- fqids[i].start + j : 0;
- _dpa_assign_wq(dpa_fq + j);
- list_add_tail(&dpa_fq[j].list, list);
- }
- }
-
-invalid_default_queues:
-invalid_error_queues:
- return err;
-}
-
-static void dpa_setup_ingress(struct dpa_priv_s *priv, struct dpa_fq *fq,
- const struct qman_fq *template)
-{
- fq->fq_base = *template;
- fq->net_dev = priv->net_dev;
-
- fq->flags = QMAN_FQ_FLAG_NO_ENQUEUE;
- fq->channel = priv->channel;
-}
-
-static void dpa_setup_egress(struct dpa_priv_s *priv,
- struct list_head *head, struct dpa_fq *fq,
- struct fm_port *port)
-{
- struct list_head *ptr = &fq->list;
- struct dpa_fq *iter;
- int i = 0;
-
- while (true) {
- iter = list_entry(ptr, struct dpa_fq, list);
- if (priv->shared)
- iter->fq_base = shared_egress_fq;
- else
- iter->fq_base = private_egress_fq;
-
- iter->net_dev = priv->net_dev;
- if (port) {
- iter->flags = QMAN_FQ_FLAG_TO_DCPORTAL;
- iter->channel = fm_get_tx_port_channel(port);
- } else
- iter->flags = QMAN_FQ_FLAG_NO_MODIFY;
-
- if (list_is_last(ptr, head))
- break;
-
- ptr = ptr->next;
- }
-
- /* Allocate frame queues to all available CPUs no matter the number of
- * queues specified in device tree.
- */
- for (i = 0, ptr = &fq->list; i < DPAA_ETH_TX_QUEUES; i++) {
- iter = list_entry(ptr, struct dpa_fq, list);
- priv->egress_fqs[i] = &iter->fq_base;
-
- if (list_is_last(ptr, head)) {
- ptr = &fq->list;
- continue;
- }
-
- ptr = ptr->next;
- }
-}
-
-#ifdef CONFIG_FSL_DPAA_TX_RECYCLE
-static void dpa_setup_recycle_queues(struct dpa_priv_s *priv, struct dpa_fq *fq,
- struct fm_port *port)
-{
- int i = 0;
- struct list_head *ptr = &fq->list;
-
- for (i = 0; i < DPAA_ETH_TX_QUEUES; i++) {
- struct dpa_fq *iter = list_entry(ptr, struct dpa_fq, list);
-
- iter->fq_base = private_egress_fq;
- iter->net_dev = priv->net_dev;
-
- priv->recycle_fqs[i] = &iter->fq_base;
-
- iter->flags = QMAN_FQ_FLAG_TO_DCPORTAL;
- iter->channel = fm_get_tx_port_channel(port);
-
- ptr = ptr->next;
- }
-}
-#endif
-
-static void dpa_setup_conf_queues(struct dpa_priv_s *priv, struct dpa_fq *fq)
-{
- struct list_head *ptr = &fq->list;
- int i;
-
- /*
- * Configure the queues to be core affine.
- * The implicit assumption here is that each cpu has its own Tx queue
- */
- for (i = 0; i < DPAA_ETH_TX_QUEUES; i++) {
- struct dpa_fq *iter = list_entry(ptr, struct dpa_fq, list);
-
- dpa_setup_ingress(priv, iter, &tx_private_defq);
- /* Leave the confirmation queue in the default pool channel */
- priv->conf_fqs[i] = &iter->fq_base;
-
- ptr = ptr->next;
- }
-}
-
-static void dpa_setup_ingress_queues(struct dpa_priv_s *priv,
- struct list_head *head, struct dpa_fq *fq)
-{
- struct list_head *ptr = &fq->list;
- u32 fqid;
- int portals[NR_CPUS];
- int i, cpu, num_portals = 0;
- const cpumask_t *affine_cpus = qman_affine_cpus();
-
- for_each_cpu(cpu, affine_cpus)
- portals[num_portals++] = qman_affine_channel(cpu);
- if (num_portals == 0) {
- dev_err(fq->net_dev->dev.parent,
- "No Qman software (affine) channels found");
- return;
- }
-
- i = 0;
- fqid = 0;
- if (priv->mac_dev)
- fqid = (priv->mac_dev->res->start & 0x1fffff) >> 6;
-
- while (true) {
- struct dpa_fq *iter = list_entry(ptr, struct dpa_fq, list);
-
- if (priv->shared)
- dpa_setup_ingress(priv, iter, &rx_shared_fq);
- else
- dpa_setup_ingress(priv, iter, &rx_private_defq);
-
- if (!iter->fqid)
- iter->fqid = fqid++;
-
- /* Assign the queues to a channel in a round-robin fashion */
- iter->channel = portals[i];
- i = (i + 1) % num_portals;
-
- if (list_is_last(ptr, head))
- break;
-
- ptr = ptr->next;
- }
-}
-
-static void
-dpaa_eth_init_tx_port(struct fm_port *port, struct dpa_fq *errq,
- struct dpa_fq *defq, struct dpa_buffer_layout_s *buf_layout)
-{
- struct fm_port_params tx_port_param;
- bool frag_enabled = false;
-
- memset(&tx_port_param, 0, sizeof(tx_port_param));
- dpaa_eth_init_port(tx, port, tx_port_param, errq->fqid, defq->fqid,
- buf_layout, frag_enabled);
-}
-
-static void
-dpaa_eth_init_rx_port(struct fm_port *port, struct dpa_bp *bp, size_t count,
- struct dpa_fq *errq, struct dpa_fq *defq,
- struct dpa_buffer_layout_s *buf_layout)
-{
- struct fm_port_params rx_port_param;
- int i;
- bool frag_enabled = false;
-
- memset(&rx_port_param, 0, sizeof(rx_port_param));
- count = min(ARRAY_SIZE(rx_port_param.pool_param), count);
- rx_port_param.num_pools = count;
- for (i = 0; i < count; i++) {
- if (i >= rx_port_param.num_pools)
- break;
- rx_port_param.pool_param[i].id = bp[i].bpid;
- rx_port_param.pool_param[i].size = bp[i].size;
- }
-
- dpaa_eth_init_port(rx, port, rx_port_param, errq->fqid, defq->fqid,
- buf_layout, frag_enabled);
-}
-
-static void dpa_rx_fq_init(struct dpa_priv_s *priv, struct list_head *head,
- struct dpa_fq *defq, struct dpa_fq *errq,
- struct dpa_fq *fqs)
-{
- if (fqs)
- dpa_setup_ingress_queues(priv, head, fqs);
-
- /* Only real devices need default/error queues set up */
- if (!priv->mac_dev)
- return;
-
- if (defq->fqid == 0 && netif_msg_probe(priv))
- pr_info("Using dynamic RX QM frame queues\n");
-
- if (priv->shared) {
- dpa_setup_ingress(priv, defq, &rx_shared_fq);
- dpa_setup_ingress(priv, errq, &rx_shared_fq);
- } else {
- dpa_setup_ingress(priv, defq, &rx_private_defq);
- dpa_setup_ingress(priv, errq, &rx_private_errq);
- }
-}
-
-static void dpa_tx_fq_init(struct dpa_priv_s *priv, struct list_head *head,
- struct dpa_fq *defq, struct dpa_fq *errq,
- struct dpa_fq *fqs, struct dpa_fq *confqs,
- struct dpa_fq *recyclefqs, struct fm_port *port)
-{
- if (fqs)
- dpa_setup_egress(priv, head, fqs, port);
-
- /* Only real devices need default/error queues set up */
- if (!priv->mac_dev)
- return;
-
- if (defq->fqid == 0 && netif_msg_probe(priv))
- pr_info("Using dynamic TX QM frame queues\n");
-
- /* The shared driver doesn't use tx confirmation */
- if (priv->shared) {
- dpa_setup_ingress(priv, defq, &tx_shared_defq);
- dpa_setup_ingress(priv, errq, &tx_shared_errq);
- } else {
- dpa_setup_ingress(priv, defq, &tx_private_defq);
- dpa_setup_ingress(priv, errq, &tx_private_errq);
- if (confqs)
- dpa_setup_conf_queues(priv, confqs);
-#ifdef CONFIG_FSL_DPAA_TX_RECYCLE
- if (recyclefqs)
- dpa_setup_recycle_queues(priv, recyclefqs, port);
-#endif
-
- }
-}
-
-static int dpa_netdev_init(struct device_node *dpa_node,
- struct net_device *net_dev)
-{
- int err;
- const uint8_t *mac_addr;
- struct dpa_priv_s *priv = netdev_priv(net_dev);
- struct device *dev = net_dev->dev.parent;
-
- net_dev->hw_features |= DPA_NETIF_FEATURES;
-
- if (!priv->mac_dev) {
- /* Get the MAC address */
- mac_addr = of_get_mac_address(dpa_node);
- if (mac_addr == NULL) {
- if (netif_msg_probe(priv))
- dev_err(dev, "No MAC address found!\n");
- return -EINVAL;
- }
- } else {
- net_dev->mem_start = priv->mac_dev->res->start;
- net_dev->mem_end = priv->mac_dev->res->end;
-
- mac_addr = priv->mac_dev->addr;
- net_dev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_LLTX);
-
- /*
- * Advertise S/G and HIGHDMA support for MAC-ful,
- * private interfaces
- */
- if (!priv->shared) {
-#ifdef CONFIG_FSL_DPAA_ETH_SG_SUPPORT
- net_dev->hw_features |= NETIF_F_SG | NETIF_F_HIGHDMA;
- /* Recent kernels enable GSO automatically, if
- * we declare NETIF_F_SG. For conformity, we'll
- * still declare GSO explicitly.
- */
- net_dev->features |= NETIF_F_GSO;
-#endif
- /* Advertise GRO support */
- net_dev->features |= NETIF_F_GRO;
- }
- }
-
- net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
-
- net_dev->features |= net_dev->hw_features;
- net_dev->vlan_features = net_dev->features;
-
- memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len);
- memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
-
- SET_ETHTOOL_OPS(net_dev, &dpa_ethtool_ops);
-
- net_dev->needed_headroom = priv->tx_headroom;
- net_dev->watchdog_timeo = msecs_to_jiffies(tx_timeout);
-
- err = register_netdev(net_dev);
- if (err < 0) {
- dev_err(dev, "register_netdev() = %d\n", err);
- return err;
- }
-
-#ifdef CONFIG_FSL_DPAA_ETH_DEBUGFS
- /* create debugfs entry for this net_device */
- err = dpa_netdev_debugfs_create(net_dev);
- if (err) {
- unregister_netdev(net_dev);
- return err;
- }
-#endif /* CONFIG_FSL_DPAA_ETH_DEBUGFS */
-
- return 0;
-}
-
-static int dpa_shared_netdev_init(struct device_node *dpa_node,
- struct net_device *net_dev)
-{
- net_dev->netdev_ops = &dpa_shared_ops;
-
- return dpa_netdev_init(dpa_node, net_dev);
-}
-
static int dpa_private_netdev_init(struct device_node *dpa_node,
struct net_device *net_dev)
{
int i;
struct dpa_priv_s *priv = netdev_priv(net_dev);
struct dpa_percpu_priv_s *percpu_priv;
+ const uint8_t *mac_addr;
/*
* Although we access another CPU's private data here
@@ -3885,341 +597,227 @@ static int dpa_private_netdev_init(struct device_node *dpa_node,
net_dev->netdev_ops = &dpa_private_ops;
- return dpa_netdev_init(dpa_node, net_dev);
-}
+ mac_addr = priv->mac_dev->addr;
-int dpa_alloc_pcd_fqids(struct device *dev, uint32_t num,
- uint8_t alignment, uint32_t *base_fqid)
-{
- dev_crit(dev, "callback not implemented!\n");
- BUG();
+ net_dev->mem_start = priv->mac_dev->res->start;
+ net_dev->mem_end = priv->mac_dev->res->end;
- return 0;
-}
+ net_dev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_LLTX);
-int dpa_free_pcd_fqids(struct device *dev, uint32_t base_fqid)
-{
-
- dev_crit(dev, "callback not implemented!\n");
- BUG();
+#ifdef CONFIG_FSL_DPAA_ETH_SG_SUPPORT
+ /* Advertise S/G and HIGHDMA support for private interfaces */
+ net_dev->hw_features |= NETIF_F_SG | NETIF_F_HIGHDMA;
+ /* Recent kernels enable GSO automatically, if
+ * we declare NETIF_F_SG. For conformity, we'll
+ * still declare GSO explicitly.
+ */
+ net_dev->features |= NETIF_F_GSO;
+#endif
+ /* Advertise GRO support */
+ net_dev->features |= NETIF_F_GRO;
- return 0;
+ return dpa_netdev_init(dpa_node, net_dev, mac_addr, tx_timeout);
}
-static int dpaa_eth_add_channel(void *__arg)
+static struct dpa_bp * __cold
+dpa_priv_bp_probe(struct device *dev)
{
- const cpumask_t *cpus = qman_affine_cpus();
- u32 pool = QM_SDQCR_CHANNELS_POOL_CONV((u32)(unsigned long)__arg);
- int cpu;
+ struct dpa_bp *dpa_bp;
- for_each_cpu(cpu, cpus) {
- set_cpus_allowed_ptr(current, get_cpu_mask(cpu));
- qman_static_dequeue_add(pool);
+ dpa_bp = devm_kzalloc(dev, sizeof(*dpa_bp), GFP_KERNEL);
+ if (unlikely(dpa_bp == NULL)) {
+ dev_err(dev, "devm_kzalloc() failed\n");
+ return ERR_PTR(-ENOMEM);
}
- return 0;
+
+ dpa_bp->percpu_count = alloc_percpu(*dpa_bp->percpu_count);
+ dpa_bp->target_count = CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT;
+ dpa_bp->drain_cb = dpa_bp_drain;
+
+#ifdef CONFIG_FSL_DPAA_ETH_SG_SUPPORT
+ dpa_bp->seed_cb = dpa_bp_priv_seed;
+#endif /* CONFIG_FSL_DPAA_ETH_SG_SUPPORT */
+
+ return dpa_bp;
}
-static int dpaa_eth_cgr_init(struct dpa_priv_s *priv)
+static int dpa_priv_bp_create(struct net_device *net_dev, struct dpa_bp *dpa_bp,
+ size_t count)
{
- struct qm_mcc_initcgr initcgr;
- u32 cs_th;
- int err;
+ struct dpa_priv_s *priv = netdev_priv(net_dev);
+ int i;
- err = qman_alloc_cgrid(&priv->cgr_data.cgr.cgrid);
- if (err < 0) {
- pr_err("Error %d allocating CGR ID\n", err);
- goto out_error;
- }
- priv->cgr_data.cgr.cb = dpaa_eth_cgscn;
+ if (netif_msg_probe(priv))
+ dev_dbg(net_dev->dev.parent,
+ "Using private BM buffer pools\n");
- /* Enable Congestion State Change Notifications and CS taildrop */
- initcgr.we_mask = QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES;
- initcgr.cgr.cscn_en = QM_CGR_EN;
- /*
- * Set different thresholds based on the MAC speed.
- * TODO: this may turn suboptimal if the MAC is reconfigured at a speed
- * lower than its max, e.g. if a dTSEC later negotiates a 100Mbps link.
- * In such cases, we ought to reconfigure the threshold, too.
- */
- if (priv->mac_dev->if_support & SUPPORTED_10000baseT_Full)
- cs_th = DPA_CS_THRESHOLD_10G;
- else
- cs_th = DPA_CS_THRESHOLD_1G;
- qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1);
+ priv->bp_count = count;
- initcgr.we_mask |= QM_CGR_WE_CSTD_EN;
- initcgr.cgr.cstd_en = QM_CGR_EN;
+ for (i = 0; i < count; i++) {
+ int err;
+ err = dpa_bp_alloc(&dpa_bp[i]);
+ if (err < 0) {
+ dpa_bp_free(priv, dpa_bp);
+ priv->dpa_bp = NULL;
+ return err;
+ }
- err = qman_create_cgr(&priv->cgr_data.cgr, QMAN_CGR_FLAG_USE_INIT,
- &initcgr);
- if (err < 0) {
- pr_err("Error %d creating CGR with ID %d\n", err,
- priv->cgr_data.cgr.cgrid);
- qman_release_cgrid(priv->cgr_data.cgr.cgrid);
- goto out_error;
+ priv->dpa_bp = &dpa_bp[i];
}
- pr_debug("Created CGR %d for netdev with hwaddr %pM on "
- "QMan channel %d\n", priv->cgr_data.cgr.cgrid,
- priv->mac_dev->addr, priv->cgr_data.cgr.chan);
-out_error:
- return err;
+ dpa_priv_common_bpid = priv->dpa_bp->bpid;
+ return 0;
}
static const struct of_device_id dpa_match[];
+
static int
-dpaa_eth_probe(struct platform_device *_of_dev)
+dpaa_eth_priv_probe(struct platform_device *_of_dev)
{
int err = 0, i;
struct device *dev;
struct device_node *dpa_node;
struct dpa_bp *dpa_bp;
struct dpa_fq *dpa_fq, *tmp;
- struct list_head rxfqlist;
- struct list_head txfqlist;
- size_t count;
+ size_t count = 1;
struct net_device *net_dev = NULL;
struct dpa_priv_s *priv = NULL;
struct dpa_percpu_priv_s *percpu_priv;
- struct dpa_fq *rxdefault = NULL;
- struct dpa_fq *txdefault = NULL;
- struct dpa_fq *rxerror = NULL;
- struct dpa_fq *txerror = NULL;
- struct dpa_fq *rxextra = NULL;
- struct dpa_fq *txfqs = NULL;
- struct dpa_fq *txconf = NULL;
- struct dpa_fq *txrecycle = NULL;
- struct fm_port *rxport = NULL;
- struct fm_port *txport = NULL;
+ struct fm_port_fqs port_fqs;
struct dpa_buffer_layout_s *buf_layout = NULL;
- bool is_shared = false;
struct mac_device *mac_dev;
- int proxy_enet;
- const struct of_device_id *match;
+ struct task_struct *kth;
dev = &_of_dev->dev;
dpa_node = dev->of_node;
- match = of_match_device(dpa_match, dev);
- if (!match)
- return -EINVAL;
-
if (!of_device_is_available(dpa_node))
return -ENODEV;
- /*
- * If it's not an fsl,dpa-ethernet node, we just serve as a proxy
- * initializer driver, and don't do any linux device setup
+ /* Get the buffer pools assigned to this interface;
+ * run only once the default pool probing code
*/
- proxy_enet = strcmp(match->compatible, "fsl,dpa-ethernet");
+ dpa_bp = (dpa_bpid2pool(dpa_priv_common_bpid)) ? :
+ dpa_priv_bp_probe(dev);
+ if (IS_ERR(dpa_bp))
+ return PTR_ERR(dpa_bp);
/*
* Allocate this early, so we can store relevant information in
- * the private area
+ * the private area (needed by 1588 code in dpa_mac_probe)
*/
- if (!proxy_enet) {
- net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TX_QUEUES);
- if (!net_dev) {
- dev_err(dev, "alloc_etherdev_mq() failed\n");
- return -ENOMEM;
- }
-
- /* Do this here, so we can be verbose early */
- SET_NETDEV_DEV(net_dev, dev);
- dev_set_drvdata(dev, net_dev);
+ net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TX_QUEUES);
+ if (!net_dev) {
+ dev_err(dev, "alloc_etherdev_mq() failed\n");
+ return -ENOMEM;
+ }
- priv = netdev_priv(net_dev);
- priv->net_dev = net_dev;
+ /* Do this here, so we can be verbose early */
+ SET_NETDEV_DEV(net_dev, dev);
+ dev_set_drvdata(dev, net_dev);
- priv->msg_enable = netif_msg_init(debug, -1);
- }
+ priv = netdev_priv(net_dev);
+ priv->net_dev = net_dev;
+ strcpy(priv->if_type, "private");
- /* Get the buffer pools assigned to this interface */
- dpa_bp = dpa_bp_probe(_of_dev, &count);
- if (IS_ERR(dpa_bp)) {
- err = PTR_ERR(dpa_bp);
- goto bp_probe_failed;
- }
+ priv->msg_enable = netif_msg_init(debug, -1);
mac_dev = dpa_mac_probe(_of_dev);
- if (IS_ERR(mac_dev)) {
+ if (IS_ERR(mac_dev) || !mac_dev) {
err = PTR_ERR(mac_dev);
goto mac_probe_failed;
- } else if (mac_dev) {
- rxport = mac_dev->port_dev[RX];
- txport = mac_dev->port_dev[TX];
-
- /* We have physical ports, so we need to establish
- * the buffer layout.
- */
- buf_layout = devm_kzalloc(dev, 2 * sizeof(*buf_layout),
- GFP_KERNEL);
- if (!buf_layout) {
- dev_err(dev, "devm_kzalloc() failed\n");
- goto alloc_failed;
- }
- dpa_set_buffer_layout(priv, rxport, &buf_layout[RX], RX);
- dpa_set_buffer_layout(priv, txport, &buf_layout[TX], TX);
}
- if (!dpa_bp->kernel_pool) {
- is_shared = true;
- } else {
- /* For private ports, need to compute the size of the default
- * buffer pool, based on FMan port buffer layout;also update
- * the maximum buffer size for private ports if necessary
- */
- dpa_bp->size = dpa_bp_size(&buf_layout[RX]);
- if (dpa_bp->size > default_buf_size)
- default_buf_size = dpa_bp->size;
+ /* We have physical ports, so we need to establish
+ * the buffer layout.
+ */
+ buf_layout = devm_kzalloc(dev, 2 * sizeof(*buf_layout),
+ GFP_KERNEL);
+ if (!buf_layout) {
+ dev_err(dev, "devm_kzalloc() failed\n");
+ goto alloc_failed;
}
+ dpa_set_buffers_layout(mac_dev, buf_layout);
- INIT_LIST_HEAD(&rxfqlist);
- INIT_LIST_HEAD(&txfqlist);
-
- if (rxport)
- err = dpa_fq_probe(_of_dev, &rxfqlist, &rxdefault, &rxerror,
- &rxextra, NULL, NULL, RX);
- else
- err = dpa_fq_probe(_of_dev, &rxfqlist, NULL, NULL,
- &rxextra, NULL, NULL, RX);
-
- if (err < 0)
- goto rx_fq_probe_failed;
-
- if (txport)
-#ifdef CONFIG_FSL_DPAA_TX_RECYCLE
- err = dpa_fq_probe(_of_dev, &txfqlist, &txdefault, &txerror,
- &txfqs, (is_shared ? NULL : &txconf),
- (is_shared ? NULL : &txrecycle), TX);
-#else
- err = dpa_fq_probe(_of_dev, &txfqlist, &txdefault, &txerror,
- &txfqs, (is_shared ? NULL : &txconf), NULL, TX);
-#endif
- else
- err = dpa_fq_probe(_of_dev, &txfqlist, NULL, NULL, &txfqs,
- NULL, NULL, TX);
-
- if (err < 0)
- goto tx_fq_probe_failed;
-
- /*
- * Now we have all of the configuration information.
- * We support a number of configurations:
- * 1) Private interface - An optimized linux ethernet driver with
- * a real network connection.
- * 2) Shared interface - A device intended for virtual connections
- * or for a real interface that is shared between partitions
- * 3) Proxy initializer - Just configures the MAC on behalf of
- * another partition
+ /* For private ports, need to compute the size of the default
+ * buffer pool, based on FMan port buffer layout;also update
+ * the maximum buffer size for private ports if necessary
*/
+ dpa_bp->size = dpa_bp_size(&buf_layout[RX]);
+#ifndef CONFIG_FSL_DPAA_ETH_SG_SUPPORT
+ dpa_bp_default_buf_size_update(dpa_bp->size);
+#endif
- /* bp init */
- if (net_dev) {
- struct task_struct *kth;
-
- err = dpa_bp_create(net_dev, dpa_bp, count);
-
- if (err < 0)
- goto bp_create_failed;
-
- priv->mac_dev = mac_dev;
+ INIT_LIST_HEAD(&priv->dpa_fq_list);
- priv->channel = dpa_get_channel(dev, dpa_node);
+ memset(&port_fqs, 0, sizeof(port_fqs));
- if (priv->channel < 0) {
- err = priv->channel;
- goto get_channel_failed;
- }
+ err = dpa_fq_probe_mac(dev, &priv->dpa_fq_list, &port_fqs, true, RX);
+ if (!err)
+ err = dpa_fq_probe_mac(dev, &priv->dpa_fq_list,
+ &port_fqs, true, TX);
- /* Start a thread that will walk the cpus with affine portals
- * and add this pool channel to each's dequeue mask. */
- kth = kthread_run(dpaa_eth_add_channel,
- (void *)(unsigned long)priv->channel,
- "dpaa_%p:%d", net_dev, priv->channel);
- if (!kth) {
- err = -ENOMEM;
- goto add_channel_failed;
- }
+ if (err < 0)
+ goto fq_probe_failed;
- dpa_rx_fq_init(priv, &rxfqlist, rxdefault, rxerror, rxextra);
- dpa_tx_fq_init(priv, &txfqlist, txdefault, txerror, txfqs,
- txconf, txrecycle, txport);
+ /* bp init */
- /*
- * Create a congestion group for this netdev, with
- * dynamically-allocated CGR ID.
- * Must be executed after probing the MAC, but before
- * assigning the egress FQs to the CGRs.
- * Don't create a congestion group for MAC-less interfaces.
- */
- if (priv->mac_dev) {
- err = dpaa_eth_cgr_init(priv);
- if (err < 0) {
- dev_err(dev, "Error initializing CGR\n");
- goto cgr_init_failed;
- }
- }
+ err = dpa_priv_bp_create(net_dev, dpa_bp, count);
- /* Add the FQs to the interface, and make them active */
- INIT_LIST_HEAD(&priv->dpa_fq_list);
+ if (err < 0)
+ goto bp_create_failed;
- list_for_each_entry_safe(dpa_fq, tmp, &rxfqlist, list) {
- err = _dpa_fq_alloc(&priv->dpa_fq_list, dpa_fq);
- if (err < 0)
- goto fq_alloc_failed;
- }
+ priv->mac_dev = mac_dev;
- list_for_each_entry_safe(dpa_fq, tmp, &txfqlist, list) {
- err = _dpa_fq_alloc(&priv->dpa_fq_list, dpa_fq);
- if (err < 0)
- goto fq_alloc_failed;
- }
+ priv->channel = dpa_get_channel(dev, dpa_node);
- if (mac_dev) {
- priv->buf_layout = buf_layout;
- priv->tx_headroom =
- dpa_get_headroom(&priv->buf_layout[TX]);
- } else {
- priv->tx_headroom = DPA_DEFAULT_TX_HEADROOM;
- }
+ if (priv->channel < 0) {
+ err = priv->channel;
+ goto get_channel_failed;
}
- /* All real interfaces need their ports initialized */
- if (mac_dev) {
- struct fm_port_pcd_param rx_port_pcd_param;
-
- dpaa_eth_init_tx_port(txport, txerror, txdefault,
- &buf_layout[TX]);
- dpaa_eth_init_rx_port(rxport, dpa_bp, count, rxerror,
- rxdefault, &buf_layout[RX]);
-
- rx_port_pcd_param.cba = dpa_alloc_pcd_fqids;
- rx_port_pcd_param.cbf = dpa_free_pcd_fqids;
- rx_port_pcd_param.dev = dev;
- fm_port_pcd_bind(rxport, &rx_port_pcd_param);
+ /* Start a thread that will walk the cpus with affine portals
+ * and add this pool channel to each's dequeue mask.
+ */
+ kth = kthread_run(dpaa_eth_add_channel,
+ (void *)(unsigned long)priv->channel,
+ "dpaa_%p:%d", net_dev, priv->channel);
+ if (!kth) {
+ err = -ENOMEM;
+ goto add_channel_failed;
}
+ dpa_fq_setup(priv, &private_fq_cbs, priv->mac_dev->port_dev[TX]);
+
/*
- * Proxy interfaces need to be started, and the allocated
- * memory freed
+ * Create a congestion group for this netdev, with
+ * dynamically-allocated CGR ID.
+ * Must be executed after probing the MAC, but before
+ * assigning the egress FQs to the CGRs.
*/
- if (!net_dev) {
- devm_kfree(&_of_dev->dev, dpa_bp);
- devm_kfree(&_of_dev->dev, rxdefault);
- devm_kfree(&_of_dev->dev, rxerror);
- devm_kfree(&_of_dev->dev, txdefault);
- devm_kfree(&_of_dev->dev, txerror);
-
- if (mac_dev)
- for_each_port_device(i, mac_dev->port_dev)
- fm_port_enable(mac_dev->port_dev[i]);
+ err = dpaa_eth_cgr_init(priv);
+ if (err < 0) {
+ dev_err(dev, "Error initializing CGR\n");
+ goto cgr_init_failed;
+ }
- return 0;
+ /* Add the FQs to the interface, and make them active */
+ list_for_each_entry_safe(dpa_fq, tmp, &priv->dpa_fq_list, list) {
+ err = dpa_fq_init(dpa_fq, false);
+ if (err < 0)
+ goto fq_alloc_failed;
}
- /* Now we need to initialize either a private or shared interface */
+ priv->buf_layout = buf_layout;
+ priv->tx_headroom = dpa_get_headroom(&priv->buf_layout[TX]);
+ priv->rx_headroom = dpa_get_headroom(&priv->buf_layout[RX]);
+
+ /* All real interfaces need their ports initialized */
+ dpaa_eth_init_ports(mac_dev, dpa_bp, count, &port_fqs,
+ buf_layout, dev);
+
priv->percpu_priv = alloc_percpu(*priv->percpu_priv);
if (priv->percpu_priv == NULL) {
@@ -4230,25 +828,21 @@ dpaa_eth_probe(struct platform_device *_of_dev)
for_each_online_cpu(i) {
percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
memset(percpu_priv, 0, sizeof(*percpu_priv));
+ percpu_priv->dpa_bp = priv->dpa_bp;
}
- if (priv->shared)
- err = dpa_shared_netdev_init(dpa_node, net_dev);
- else
- err = dpa_private_netdev_init(dpa_node, net_dev);
+ err = dpa_private_netdev_init(dpa_node, net_dev);
if (err < 0)
goto netdev_init_failed;
dpaa_eth_sysfs_init(&net_dev->dev);
+ printk(KERN_INFO "fsl_dpa: Probed interface %s\n", net_dev->name);
+
#ifdef CONFIG_FSL_DPAA_ETH_UNIT_TESTS
/* The unit test is designed to test private interfaces */
- if (!priv->shared && !tx_unit_test_ran) {
- err = dpa_tx_unit_test(net_dev);
-
- WARN_ON(err);
- }
+ dpa_unit_tests(net_dev);
#endif
return 0;
@@ -4269,11 +863,10 @@ get_channel_failed:
if (net_dev)
dpa_bp_free(priv, priv->dpa_bp);
bp_create_failed:
-tx_fq_probe_failed:
-rx_fq_probe_failed:
+fq_probe_failed:
+ devm_kfree(dev, buf_layout);
alloc_failed:
mac_probe_failed:
-bp_probe_failed:
dev_set_drvdata(dev, NULL);
if (net_dev)
free_netdev(net_dev);
@@ -4285,57 +878,17 @@ static const struct of_device_id dpa_match[] = {
{
.compatible = "fsl,dpa-ethernet"
},
- {
- .compatible = "fsl,dpa-ethernet-init"
- },
{}
};
MODULE_DEVICE_TABLE(of, dpa_match);
-static int __cold dpa_remove(struct platform_device *of_dev)
-{
- int err;
- struct device *dev;
- struct net_device *net_dev;
- struct dpa_priv_s *priv;
-
- dev = &of_dev->dev;
- net_dev = dev_get_drvdata(dev);
- priv = netdev_priv(net_dev);
-
- dpaa_eth_sysfs_remove(dev);
-
- dev_set_drvdata(dev, NULL);
- unregister_netdev(net_dev);
-
- err = dpa_fq_free(dev, &priv->dpa_fq_list);
-
- free_percpu(priv->percpu_priv);
-
- dpa_bp_free(priv, priv->dpa_bp);
-
-#ifdef CONFIG_FSL_DPAA_ETH_DEBUGFS
- /* remove debugfs entry for this net_device */
- dpa_netdev_debugfs_remove(net_dev);
-#endif /* CONFIG_FSL_DPAA_ETH_DEBUGFS */
-
-#ifdef CONFIG_FSL_DPAA_1588
- if (priv->tsu && priv->tsu->valid)
- dpa_ptp_cleanup(priv);
-#endif
-
- free_netdev(net_dev);
-
- return err;
-}
-
static struct platform_driver dpa_driver = {
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = dpa_match,
.owner = THIS_MODULE,
},
- .probe = dpaa_eth_probe,
+ .probe = dpaa_eth_priv_probe,
.remove = dpa_remove
};
@@ -4343,7 +896,7 @@ static int __init __cold dpa_load(void)
{
int _errno;
- pr_info(KBUILD_MODNAME ": " DPA_DESCRIPTION " (" VERSION ")\n");
+ printk(KERN_INFO KBUILD_MODNAME ": " DPA_DESCRIPTION " (" VERSION ")\n");
/* initialise dpaa_eth mirror values */
dpa_rx_extra_headroom = fm_get_rx_extra_headroom();
diff --git a/drivers/net/ethernet/freescale/dpa/dpaa_eth.h b/drivers/net/ethernet/freescale/dpa/dpaa_eth.h
index 939bcb1..4f743c3 100644
--- a/drivers/net/ethernet/freescale/dpa/dpaa_eth.h
+++ b/drivers/net/ethernet/freescale/dpa/dpaa_eth.h
@@ -45,8 +45,6 @@
#include <linux/fsl_qman.h> /* struct qman_fq */
-#include "dpaa_eth-common.h"
-
#include "lnxwrp_fsl_fman.h"
#include "fm_ext.h"
#include "fm_port_ext.h" /* FM_PORT_FRM_ERR_* */
@@ -57,11 +55,10 @@
#endif /* CONFIG_FSL_DPAA_ETH_DEBUGFS */
#include "dpaa_eth_trace.h"
-#ifdef CONFIG_FSL_DPAA_ETH_SG_SUPPORT
-#define dpa_get_rx_extra_headroom() fm_get_rx_extra_headroom()
-#else
+extern int dpa_rx_extra_headroom;
+extern int dpa_max_frm;
+
#define dpa_get_rx_extra_headroom() dpa_rx_extra_headroom
-#endif
#define dpa_get_max_frm() dpa_max_frm
/*
@@ -72,35 +69,85 @@
#define dpa_get_max_mtu() \
(dpa_get_max_frm() - (VLAN_ETH_HLEN + ETH_FCS_LEN))
+#define __hot
+
+/* Simple enum of FQ types - used for array indexing */
+enum port_type {RX, TX};
+
+/* TODO: This structure should be renamed & moved to the FMD wrapper */
+struct dpa_buffer_layout_s {
+ uint16_t priv_data_size;
+ bool parse_results;
+ bool time_stamp;
+ bool hash_results;
+ uint8_t manip_extra_space;
+ uint16_t data_align;
+};
+
+#define DPA_TX_PRIV_DATA_SIZE 16
+#define DPA_PARSE_RESULTS_SIZE sizeof(t_FmPrsResult)
+#define DPA_TIME_STAMP_SIZE 8
+#define DPA_HASH_RESULTS_SIZE 8
+#define DPA_RX_PRIV_DATA_SIZE (DPA_TX_PRIV_DATA_SIZE + \
+ dpa_get_rx_extra_headroom())
+
+#define DPA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */
+
+
+#define FM_FD_STAT_ERRORS \
+ (FM_PORT_FRM_ERR_DMA | FM_PORT_FRM_ERR_PHYSICAL | \
+ FM_PORT_FRM_ERR_SIZE | FM_PORT_FRM_ERR_CLS_DISCARD | \
+ FM_PORT_FRM_ERR_EXTRACTION | FM_PORT_FRM_ERR_NO_SCHEME | \
+ FM_PORT_FRM_ERR_ILL_PLCR | FM_PORT_FRM_ERR_PRS_TIMEOUT | \
+ FM_PORT_FRM_ERR_PRS_ILL_INSTRUCT | FM_PORT_FRM_ERR_PRS_HDR_ERR)
#ifdef CONFIG_FSL_DPAA_ETH_SG_SUPPORT
-/* We may want this value configurable. Must be <= PAGE_SIZE minus a reserved
- * area where skb shared info can reside
- * A lower value may help with recycling rates, at least on forwarding.
+/* Can't be paranoid enough: we want this cacheline-aligned.
+ * netdev_alloc_frag() will use it as is, so we have to do the
+ * alignment job here.
*/
-#define DPA_SKB_TAILROOM SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
-#define dpa_bp_size(buffer_layout) (PAGE_SIZE - DPA_SKB_TAILROOM)
-#else
+#define DPA_BP_RAW_SIZE ((PAGE_SIZE >> 1) & ~(SMP_CACHE_BYTES - 1))
+/* This is what FMan is ever allowed to use.
+ * FMan-DMA requires 16-byte alignment for Rx buffers, but SKB_DATA_ALIGN is
+ * even stronger (SMP_CACHE_BYTES-aligned), so we just get away with that,
+ * via SKB_WITH_OVERHEAD(). We can't rely on netdev_alloc_frag() giving us
+ * half-page-aligned buffers (can we?), so we reserve some more space
+ * for start-of-buffer alignment.
+ */
+#define dpa_bp_size(buffer_layout) (SKB_WITH_OVERHEAD(DPA_BP_RAW_SIZE) - \
+ SMP_CACHE_BYTES)
+/* We must ensure that skb_shinfo is always cacheline-aligned. */
+#define DPA_SKB_SIZE(size) ((size) & ~(SMP_CACHE_BYTES - 1))
+#else /* CONFIG_FSL_DPAA_ETH_SG_SUPPORT */
/* Default buffer size is based on L2 MAX_FRM value, minus the FCS which
* is stripped down by hardware.
*/
#define dpa_bp_size(buffer_layout) \
dpa_get_buffer_size(buffer_layout, (dpa_get_max_frm() - ETH_FCS_LEN))
-#endif
+#endif /* CONFIG_FSL_DPAA_ETH_SG_SUPPORT */
-
-#define DPA_RX_PRIV_DATA_SIZE (DPA_TX_PRIV_DATA_SIZE + \
- dpa_get_rx_extra_headroom())
-/* number of Tx queues to FMan */
-#define DPAA_ETH_TX_QUEUES NR_CPUS
-#define DPAA_ETH_RX_QUEUES 128
+/*
+ * Maximum size of a buffer for which recycling is allowed.
+ * We need an upper limit such that forwarded skbs that get reallocated on Tx
+ * aren't allowed to grow unboundedly. On the other hand, we need to make sure
+ * that skbs allocated by us will not fail to be recycled due to their size.
+ *
+ * For a requested size, the kernel allocator provides the next power of two
+ * sized block, which the stack will use as is, regardless of the actual size
+ * it required; since we must acommodate at most 9.6K buffers (L2 maximum
+ * supported frame size), set the recycling upper limit to 16K.
+ */
+#define DPA_RECYCLE_MAX_SIZE 16384
#if defined(CONFIG_FSL_DPAA_FMAN_UNIT_TESTS)
/*TODO: temporary for fman pcd testing */
#define FMAN_PCD_TESTS_MAX_NUM_RANGES 20
#endif
+#define DPAA_ETH_PCD_FQ_BASE(device_addr) \
+ (((device_addr) & 0x1fffff) >> 6)
+
/* return codes for the dpaa-eth hooks */
enum dpaa_eth_hook_result {
/* fd/skb was retained by the hook.
@@ -165,8 +212,6 @@ struct dpaa_eth_hooks_s {
void fsl_dpaa_eth_set_hooks(struct dpaa_eth_hooks_s *hooks);
-#define DPA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */
-
/*
* Largest value that the FQD's OAL field can hold.
* This is DPAA-1.x specific.
@@ -174,6 +219,12 @@ void fsl_dpaa_eth_set_hooks(struct dpaa_eth_hooks_s *hooks);
*/
#define FSL_QMAN_MAX_OAL 127
+/* Maximum offset value for a contig or sg FD (represented on 9 bits) */
+#define DPA_MAX_FD_OFFSET ((1 << 9) - 1)
+
+/* Default alignment for start of data in an Rx FD */
+#define DPA_FD_DATA_ALIGNMENT 16
+
/*
* Values for the L3R field of the FM Parse Results
*/
@@ -207,12 +258,6 @@ void fsl_dpaa_eth_set_hooks(struct dpaa_eth_hooks_s *hooks);
*/
#define FM_FD_STAT_L4CV 0x00000004
-#define FM_FD_STAT_ERRORS \
- (FM_PORT_FRM_ERR_DMA | FM_PORT_FRM_ERR_PHYSICAL | \
- FM_PORT_FRM_ERR_SIZE | FM_PORT_FRM_ERR_CLS_DISCARD | \
- FM_PORT_FRM_ERR_EXTRACTION | FM_PORT_FRM_ERR_NO_SCHEME | \
- FM_PORT_FRM_ERR_ILL_PLCR | FM_PORT_FRM_ERR_PRS_TIMEOUT | \
- FM_PORT_FRM_ERR_PRS_ILL_INSTRUCT | FM_PORT_FRM_ERR_PRS_HDR_ERR)
#define FM_FD_STAT_ERR_PHYSICAL FM_PORT_FRM_ERR_PHYSICAL
@@ -238,11 +283,29 @@ void fsl_dpaa_eth_set_hooks(struct dpaa_eth_hooks_s *hooks);
#define fm_l4_frame_is_tcp(parse_result_ptr) \
((parse_result_ptr)->l4r & FM_L4_PARSE_RESULT_TCP)
+/* number of Tx queues to FMan */
+#define DPAA_ETH_TX_QUEUES NR_CPUS
+#define DPAA_ETH_RX_QUEUES 128
+
struct pcd_range {
uint32_t base;
uint32_t count;
};
+/* More detailed FQ types - used for fine-grained WQ assignments */
+enum dpa_fq_type {
+ FQ_TYPE_RX_DEFAULT = 1, /* Rx Default FQs */
+ FQ_TYPE_RX_ERROR, /* Rx Error FQs */
+ FQ_TYPE_RX_PCD, /* User-defined PCDs */
+ FQ_TYPE_TX, /* "Real" Tx FQs */
+ FQ_TYPE_TX_CONFIRM, /* Tx default Conf FQ (actually an Rx FQ) */
+ FQ_TYPE_TX_CONF_MQ, /* Tx conf FQs (one for each Tx FQ) */
+ FQ_TYPE_TX_ERROR, /* Tx Error FQs (these are actually Rx FQs) */
+#ifdef CONFIG_FSL_DPAA_TX_RECYCLE
+ FQ_TYPE_TX_RECYCLE, /* Tx FQs for recycleable frames only */
+#endif
+};
+
struct dpa_fq {
struct qman_fq fq_base;
struct list_head list;
@@ -255,6 +318,19 @@ struct dpa_fq {
enum dpa_fq_type fq_type;
};
+typedef struct dpa_fq_cbs_t {
+ struct qman_fq rx_defq;
+ struct qman_fq tx_defq;
+ struct qman_fq rx_errq;
+ struct qman_fq tx_errq;
+ struct qman_fq egress_ern;
+} dpa_fq_cbs_t;
+
+struct fqid_cell {
+ uint32_t start;
+ uint32_t count;
+};
+
struct dpa_bp {
struct bman_pool *pool;
uint8_t bpid;
@@ -285,10 +361,13 @@ struct dpa_bp {
* the buffers
*/
void *vaddr;
- int kernel_pool;
/* current number of buffers in the bpool alloted to this CPU */
int *percpu_count;
atomic_t refs;
+ /* some bpools need to be seeded before use by this cb */
+ int (*seed_cb)(struct dpa_bp *);
+ /* some bpools need to be emptied before freeing by this cb */
+ void (*drain_cb)(struct dpa_bp *);
};
struct dpa_rx_errors {
@@ -313,12 +392,6 @@ struct dpa_ern_cnt {
struct dpa_percpu_priv_s {
struct net_device *net_dev;
- /*
- * Pointer to the percpu_count of the shared buffer pool
- * used for the private ports; this assumes there is only
- * one bpool used
- */
- int *dpa_bp_count;
struct dpa_bp *dpa_bp;
struct napi_struct napi;
u64 in_interrupt;
@@ -339,7 +412,6 @@ struct dpa_priv_s {
* (even though it can be computed based on the fields of buf_layout)
*/
uint16_t tx_headroom;
- int shared;
struct net_device *net_dev;
struct mac_device *mac_dev;
struct qman_fq *egress_fqs[DPAA_ETH_TX_QUEUES];
@@ -391,37 +463,36 @@ struct dpa_priv_s {
#endif /* CONFIG_FSL_DPAA_TS */
struct dpa_buffer_layout_s *buf_layout;
- u8 macless_idx;
+ uint16_t rx_headroom;
+ char if_type[30];
};
-extern const struct ethtool_ops dpa_ethtool_ops;
-
-void __attribute__((nonnull))
-dpa_fd_release(const struct net_device *net_dev, const struct qm_fd *fd);
-
-void dpa_make_private_pool(struct dpa_bp *dpa_bp);
-
-struct dpa_bp *dpa_bpid2pool(int bpid);
+struct fm_port_fqs {
+ struct dpa_fq *tx_defq;
+ struct dpa_fq *tx_errq;
+ struct dpa_fq *rx_defq;
+ struct dpa_fq *rx_errq;
+};
+/* functions with different implementation for SG and non-SG: */
+void dpa_bp_priv_seed(struct dpa_bp *dpa_bp);
+int dpaa_eth_refill_bpools(struct dpa_percpu_priv_s *percpu_priv);
void __hot _dpa_rx(struct net_device *net_dev,
const struct dpa_priv_s *priv,
struct dpa_percpu_priv_s *percpu_priv,
const struct qm_fd *fd,
u32 fqid);
-
int __hot dpa_tx(struct sk_buff *skb, struct net_device *net_dev);
-
struct sk_buff *_dpa_cleanup_tx_fd(const struct dpa_priv_s *priv,
const struct qm_fd *fd);
-
void __hot _dpa_process_parse_results(const t_FmPrsResult *parse_results,
const struct qm_fd *fd,
struct sk_buff *skb,
int *use_gro);
#ifdef CONFIG_FSL_DPAA_ETH_SG_SUPPORT
-void dpa_bp_add_8_pages(const struct dpa_bp *dpa_bp, int cpu_id);
-int _dpa_bp_add_8_pages(const struct dpa_bp *dpa_bp);
+void dpa_bp_add_8_bufs(const struct dpa_bp *dpa_bp, int cpu_id);
+int _dpa_bp_add_8_bufs(const struct dpa_bp *dpa_bp);
#endif
/*
@@ -450,6 +521,7 @@ static inline int dpaa_eth_napi_schedule(struct dpa_percpu_priv_s *percpu_priv)
int ret = qman_irqsource_remove(QM_PIRQ_DQRI);
if (likely(!ret)) {
napi_schedule(&percpu_priv->napi);
+ percpu_priv->in_interrupt++;
return 1;
}
}
@@ -507,7 +579,7 @@ static inline uint16_t dpa_get_buffer_size(struct dpa_buffer_layout_s *bl,
return dpa_get_headroom(bl) + data_size;
}
-void fm_mac_dump_regs(struct mac_device *mac_dev);
+void fm_mac_dump_regs(struct fm_mac_dev *fm_mac_dev);
void dpaa_eth_sysfs_remove(struct device *dev);
void dpaa_eth_sysfs_init(struct device *dev);
@@ -590,6 +662,7 @@ static inline void _dpa_assign_wq(struct dpa_fq *fq)
{
switch (fq->fq_type) {
case FQ_TYPE_TX_CONFIRM:
+ case FQ_TYPE_TX_CONF_MQ:
fq->wq = 1;
break;
case FQ_TYPE_RX_DEFAULT:
@@ -624,14 +697,25 @@ static inline void _dpa_assign_wq(struct dpa_fq *fq)
#define dpa_get_queue_mapping(skb) \
skb_get_queue_mapping(skb)
#endif
-#if defined(CONFIG_FSL_DPAA_1588) || defined(CONFIG_FSL_DPAA_TS)
-u64 dpa_get_timestamp_ns(const struct dpa_priv_s *priv,
- enum port_type rx_tx, const void *data);
+
+#ifndef CONFIG_FSL_DPAA_ETH_SG_SUPPORT
+void dpa_bp_default_buf_size_update(uint32_t size);
+uint32_t dpa_bp_default_buf_size_get(void);
+void dpa_bp_priv_non_sg_seed(struct dpa_bp *dpa_bp);
+
+static inline void _dpa_bp_free_buf(void *addr)
+{
+ struct sk_buff **skbh = addr;
+ struct sk_buff *skb;
+
+ skb = *skbh;
+ dev_kfree_skb_any(skb);
+}
+#else
+static inline void _dpa_bp_free_buf(void *addr)
+{
+ put_page(virt_to_head_page(addr));
+}
#endif
-#ifdef CONFIG_FSL_DPAA_TS
-/* Updates the skb shared hw timestamp from the hardware timestamp */
-int dpa_get_ts(const struct dpa_priv_s *priv, enum port_type rx_tx,
- struct skb_shared_hwtstamps *shhwtstamps, const void *data);
-#endif /* CONFIG_FSL_DPAA_TS */
#endif /* __DPA_H */
diff --git a/drivers/net/ethernet/freescale/dpa/dpaa_eth_common.c b/drivers/net/ethernet/freescale/dpa/dpaa_eth_common.c
new file mode 100644
index 0000000..013ff0f
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpa/dpaa_eth_common.c
@@ -0,0 +1,1770 @@
+/* Copyright 2008-2013 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/of_net.h>
+#include <linux/etherdevice.h>
+#include <linux/kthread.h>
+#include <linux/percpu.h>
+#include <linux/highmem.h>
+#include <linux/sort.h>
+#include <linux/fsl_qman.h>
+#include "dpaa_eth.h"
+#include "dpaa_eth_common.h"
+#include "dpaa_1588.h"
+
+
+/* DPAA platforms benefit from hardware-assisted queue management */
+#ifdef CONFIG_AS_FASTPATH
+#define DPA_NETIF_FEATURES (NETIF_F_HW_QDISC | NETIF_F_HW_ACCEL_MQ)
+#else
+#define DPA_NETIF_FEATURES NETIF_F_HW_ACCEL_MQ
+#endif
+
+/* Size in bytes of the FQ taildrop threshold */
+#define DPA_FQ_TD 0x200000
+
+static struct dpa_bp *dpa_bp_array[64];
+
+static const struct fqid_cell tx_confirm_fqids[] = {
+ {0, DPAA_ETH_TX_QUEUES}
+};
+
+#ifdef CONFIG_FSL_DPAA_TX_RECYCLE
+static const struct fqid_cell tx_recycle_fqids[] = {
+ {0, DPAA_ETH_TX_QUEUES}
+};
+#endif
+
+static const struct fqid_cell default_fqids[][3] = {
+ [RX] = { {0, 1}, {0, 1}, {0, DPAA_ETH_RX_QUEUES} },
+ [TX] = { {0, 1}, {0, 1}, {0, DPAA_ETH_TX_QUEUES} }
+};
+
+static const char fsl_qman_frame_queues[][25] = {
+ [RX] = "fsl,qman-frame-queues-rx",
+ [TX] = "fsl,qman-frame-queues-tx"
+};
+
+extern const struct ethtool_ops dpa_ethtool_ops;
+
+int dpa_netdev_init(struct device_node *dpa_node,
+ struct net_device *net_dev,
+ const uint8_t *mac_addr,
+ uint16_t tx_timeout)
+{
+ int err;
+ struct dpa_priv_s *priv = netdev_priv(net_dev);
+ struct device *dev = net_dev->dev.parent;
+
+ net_dev->hw_features |= DPA_NETIF_FEATURES;
+
+ net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+
+ net_dev->features |= net_dev->hw_features;
+ net_dev->vlan_features = net_dev->features;
+
+ memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len);
+ memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
+
+ SET_ETHTOOL_OPS(net_dev, &dpa_ethtool_ops);
+
+ net_dev->needed_headroom = priv->tx_headroom;
+ net_dev->watchdog_timeo = msecs_to_jiffies(tx_timeout);
+
+ err = register_netdev(net_dev);
+ if (err < 0) {
+ dev_err(dev, "register_netdev() = %d\n", err);
+ return err;
+ }
+
+#ifdef CONFIG_FSL_DPAA_ETH_DEBUGFS
+ /* create debugfs entry for this net_device */
+ err = dpa_netdev_debugfs_create(net_dev);
+ if (err) {
+ unregister_netdev(net_dev);
+ return err;
+ }
+#endif /* CONFIG_FSL_DPAA_ETH_DEBUGFS */
+
+ return 0;
+}
+
+int __cold dpa_start(struct net_device *net_dev)
+{
+ int err, i;
+ struct dpa_priv_s *priv;
+ struct mac_device *mac_dev;
+
+ priv = netdev_priv(net_dev);
+ mac_dev = priv->mac_dev;
+
+ err = mac_dev->init_phy(net_dev);
+ if (err < 0) {
+ if (netif_msg_ifup(priv))
+ netdev_err(net_dev, "init_phy() = %d\n", err);
+ return err;
+ }
+
+ for_each_port_device(i, mac_dev->port_dev)
+ fm_port_enable(mac_dev->port_dev[i]);
+
+ err = priv->mac_dev->start(mac_dev);
+ if (err < 0) {
+ if (netif_msg_ifup(priv))
+ netdev_err(net_dev, "mac_dev->start() = %d\n", err);
+ goto mac_start_failed;
+ }
+
+ netif_tx_start_all_queues(net_dev);
+
+ return 0;
+
+mac_start_failed:
+ for_each_port_device(i, mac_dev->port_dev)
+ fm_port_disable(mac_dev->port_dev[i]);
+
+ return err;
+}
+
+int __cold dpa_stop(struct net_device *net_dev)
+{
+ int _errno, i;
+ struct dpa_priv_s *priv;
+ struct mac_device *mac_dev;
+
+ priv = netdev_priv(net_dev);
+ mac_dev = priv->mac_dev;
+
+ netif_tx_stop_all_queues(net_dev);
+
+ _errno = mac_dev->stop(mac_dev);
+ if (unlikely(_errno < 0))
+ if (netif_msg_ifdown(priv))
+ netdev_err(net_dev, "mac_dev->stop() = %d\n",
+ _errno);
+
+ for_each_port_device(i, mac_dev->port_dev)
+ fm_port_disable(mac_dev->port_dev[i]);
+
+ if (mac_dev->phy_dev)
+ phy_disconnect(mac_dev->phy_dev);
+ mac_dev->phy_dev = NULL;
+
+ return _errno;
+}
+
+void __cold dpa_timeout(struct net_device *net_dev)
+{
+ const struct dpa_priv_s *priv;
+ struct dpa_percpu_priv_s *percpu_priv;
+
+ priv = netdev_priv(net_dev);
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, smp_processor_id());
+
+ if (netif_msg_timer(priv))
+ netdev_crit(net_dev, "Transmit timeout latency: %u ms\n",
+ jiffies_to_msecs(jiffies - net_dev->trans_start));
+
+ percpu_priv->stats.tx_errors++;
+}
+
+/* net_device */
+
+/**
+ * @param net_dev the device for which statistics are calculated
+ * @param stats the function fills this structure with the device's statistics
+ * @return the address of the structure containing the statistics
+ *
+ * Calculates the statistics for the given device by adding the statistics
+ * collected by each CPU.
+ */
+struct rtnl_link_stats64 * __cold
+dpa_get_stats64(struct net_device *net_dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct dpa_priv_s *priv = netdev_priv(net_dev);
+ u64 *cpustats;
+ u64 *netstats = (u64 *)stats;
+ int i, j;
+ struct dpa_percpu_priv_s *percpu_priv;
+ int numstats = sizeof(struct rtnl_link_stats64) / sizeof(u64);
+
+ for_each_online_cpu(i) {
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
+
+ cpustats = (u64 *)&percpu_priv->stats;
+
+ for (j = 0; j < numstats; j++)
+ netstats[j] += cpustats[j];
+ }
+
+ return stats;
+}
+
+int dpa_change_mtu(struct net_device *net_dev, int new_mtu)
+{
+ const int max_mtu = dpa_get_max_mtu();
+ const int min_mtu = dpa_get_min_mtu();
+
+ /* Make sure we don't exceed the Ethernet controller's MAXFRM */
+ if (new_mtu < min_mtu || new_mtu > max_mtu) {
+ netdev_err(net_dev, "Invalid L3 mtu %d (must be between %d and %d).\n",
+ new_mtu, min_mtu, max_mtu);
+ return -EINVAL;
+ }
+ net_dev->mtu = new_mtu;
+
+ return 0;
+}
+
+/* .ndo_init callback */
+int dpa_ndo_init(struct net_device *net_dev)
+{
+ /* If fsl_fm_max_frm is set to a higher value than the all-common 1500,
+ * we choose conservatively and let the user explicitly set a higher
+ * MTU via ifconfig. Otherwise, the user may end up with different MTUs
+ * in the same LAN.
+ * If on the other hand fsl_fm_max_frm has been chosen below 1500,
+ * start with the maximum allowed.
+ */
+ int init_mtu = min(dpa_get_max_mtu(), ETH_DATA_LEN);
+
+ pr_debug("Setting initial MTU on net device: %d\n", init_mtu);
+ net_dev->mtu = init_mtu;
+
+ return 0;
+}
+
+int dpa_set_features(struct net_device *dev, netdev_features_t features)
+{
+ /* Not much to do here for now */
+ dev->features = features;
+ return 0;
+}
+
+netdev_features_t dpa_fix_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ netdev_features_t unsupported_features = 0;
+
+ /* In theory we should never be requested to enable features that
+ * we didn't set in netdev->features and netdev->hw_features at probe
+ * time, but double check just to be on the safe side.
+ * We don't support enabling Rx csum through ethtool yet
+ */
+ unsupported_features |= NETIF_F_RXCSUM;
+
+ features &= ~unsupported_features;
+
+ return features;
+}
+
+#if defined(CONFIG_FSL_DPAA_1588) || defined(CONFIG_FSL_DPAA_TS)
+u64 dpa_get_timestamp_ns(const struct dpa_priv_s *priv, enum port_type rx_tx,
+ const void *data)
+{
+ u64 *ts, ns;
+
+ ts = fm_port_get_buffer_time_stamp(priv->mac_dev->port_dev[rx_tx],
+ data);
+
+ if (!ts || *ts == 0)
+ return 0;
+
+ /* multiple DPA_PTP_NOMINAL_FREQ_PERIOD_NS for case of non power of 2 */
+ ns = *ts << DPA_PTP_NOMINAL_FREQ_PERIOD_SHIFT;
+
+ return ns;
+}
+#endif
+#ifdef CONFIG_FSL_DPAA_TS
+int dpa_get_ts(const struct dpa_priv_s *priv, enum port_type rx_tx,
+ struct skb_shared_hwtstamps *shhwtstamps, const void *data)
+{
+ u64 ns;
+
+ ns = dpa_get_timestamp_ns(priv, rx_tx, data);
+
+ if (ns == 0)
+ return -EINVAL;
+
+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+ shhwtstamps->hwtstamp = ns_to_ktime(ns);
+
+ return 0;
+}
+
+static void dpa_ts_tx_enable(struct net_device *dev)
+{
+ struct dpa_priv_s *priv = netdev_priv(dev);
+ struct mac_device *mac_dev = priv->mac_dev;
+
+ if (mac_dev->fm_rtc_enable)
+ mac_dev->fm_rtc_enable(get_fm_handle(dev));
+ if (mac_dev->ptp_enable)
+ mac_dev->ptp_enable(mac_dev->get_mac_handle(mac_dev));
+
+ priv->ts_tx_en = TRUE;
+}
+
+static void dpa_ts_tx_disable(struct net_device *dev)
+{
+ struct dpa_priv_s *priv = netdev_priv(dev);
+
+#if 0
+/* the RTC might be needed by the Rx Ts, cannot disable here
+ * no separate ptp_disable API for Rx/Tx, cannot disable here
+ */
+ struct mac_device *mac_dev = priv->mac_dev;
+
+ if (mac_dev->fm_rtc_disable)
+ mac_dev->fm_rtc_disable(get_fm_handle(dev));
+
+ if (mac_dev->ptp_disable)
+ mac_dev->ptp_disable(mac_dev->get_mac_handle(mac_dev));
+#endif
+
+ priv->ts_tx_en = FALSE;
+}
+
+static void dpa_ts_rx_enable(struct net_device *dev)
+{
+ struct dpa_priv_s *priv = netdev_priv(dev);
+ struct mac_device *mac_dev = priv->mac_dev;
+
+ if (mac_dev->fm_rtc_enable)
+ mac_dev->fm_rtc_enable(get_fm_handle(dev));
+ if (mac_dev->ptp_enable)
+ mac_dev->ptp_enable(mac_dev->get_mac_handle(mac_dev));
+
+ priv->ts_rx_en = TRUE;
+}
+
+static void dpa_ts_rx_disable(struct net_device *dev)
+{
+ struct dpa_priv_s *priv = netdev_priv(dev);
+
+#if 0
+/* the RTC might be needed by the Tx Ts, cannot disable here
+ * no separate ptp_disable API for Rx/Tx, cannot disable here
+ */
+ struct mac_device *mac_dev = priv->mac_dev;
+
+ if (mac_dev->fm_rtc_disable)
+ mac_dev->fm_rtc_disable(get_fm_handle(dev));
+
+ if (mac_dev->ptp_disable)
+ mac_dev->ptp_disable(mac_dev->get_mac_handle(mac_dev));
+#endif
+
+ priv->ts_rx_en = FALSE;
+}
+
+static int dpa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct hwtstamp_config config;
+
+ if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ dpa_ts_tx_disable(dev);
+ break;
+ case HWTSTAMP_TX_ON:
+ dpa_ts_tx_enable(dev);
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ if (config.rx_filter == HWTSTAMP_FILTER_NONE)
+ dpa_ts_rx_disable(dev);
+ else {
+ dpa_ts_rx_enable(dev);
+ /* TS is set for all frame types, not only those requested */
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ }
+
+ return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+#endif /* CONFIG_FSL_DPAA_TS */
+
+int dpa_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+#ifdef CONFIG_FSL_DPAA_1588
+ struct dpa_priv_s *priv = netdev_priv(dev);
+#endif
+ int ret = 0;
+
+/* at least one timestamping feature must be enabled to proceed */
+#if defined(CONFIG_FSL_DPAA_1588) || defined(CONFIG_FSL_DPAA_TS)
+ if (!netif_running(dev))
+#endif
+ return -EINVAL;
+
+#ifdef CONFIG_FSL_DPAA_TS
+ if (cmd == SIOCSHWTSTAMP)
+ return dpa_ts_ioctl(dev, rq, cmd);
+#endif /* CONFIG_FSL_DPAA_TS */
+
+#ifdef CONFIG_FSL_DPAA_1588
+ if ((cmd >= PTP_ENBL_TXTS_IOCTL) && (cmd <= PTP_CLEANUP_TS)) {
+ if (priv->tsu && priv->tsu->valid)
+ ret = dpa_ioctl_1588(dev, rq, cmd);
+ else
+ ret = -ENODEV;
+ }
+#endif
+
+ return ret;
+}
+
+int __cold dpa_remove(struct platform_device *of_dev)
+{
+ int err;
+ struct device *dev;
+ struct net_device *net_dev;
+ struct dpa_priv_s *priv;
+
+ dev = &of_dev->dev;
+ net_dev = dev_get_drvdata(dev);
+
+ priv = netdev_priv(net_dev);
+
+ dpaa_eth_sysfs_remove(dev);
+
+ dev_set_drvdata(dev, NULL);
+ unregister_netdev(net_dev);
+
+ err = dpa_fq_free(dev, &priv->dpa_fq_list);
+
+ free_percpu(priv->percpu_priv);
+
+ dpa_bp_free(priv, priv->dpa_bp);
+ if (priv->buf_layout)
+ devm_kfree(dev, priv->buf_layout);
+
+#ifdef CONFIG_FSL_DPAA_ETH_DEBUGFS
+ /* remove debugfs entry for this net_device */
+ dpa_netdev_debugfs_remove(net_dev);
+#endif /* CONFIG_FSL_DPAA_ETH_DEBUGFS */
+
+#ifdef CONFIG_FSL_DPAA_1588
+ if (priv->tsu && priv->tsu->valid)
+ dpa_ptp_cleanup(priv);
+#endif
+
+ free_netdev(net_dev);
+
+ return err;
+}
+
+struct mac_device * __cold __must_check
+__attribute__((nonnull))
+dpa_mac_probe(struct platform_device *_of_dev)
+{
+ struct device *dpa_dev, *dev;
+ struct device_node *mac_node;
+ int lenp;
+ const phandle *phandle_prop;
+ struct platform_device *of_dev;
+ struct mac_device *mac_dev;
+#ifdef CONFIG_FSL_DPAA_1588
+ struct net_device *net_dev = NULL;
+ struct dpa_priv_s *priv = NULL;
+ struct device_node *timer_node;
+#endif
+
+ phandle_prop = of_get_property(_of_dev->dev.of_node,
+ "fsl,fman-mac", &lenp);
+ if (phandle_prop == NULL)
+ return NULL;
+
+ BUG_ON(lenp != sizeof(phandle));
+
+ dpa_dev = &_of_dev->dev;
+
+ mac_node = of_find_node_by_phandle(*phandle_prop);
+ if (unlikely(mac_node == NULL)) {
+ dev_err(dpa_dev, "of_find_node_by_phandle() failed\n");
+ return ERR_PTR(-EFAULT);
+ }
+
+ of_dev = of_find_device_by_node(mac_node);
+ if (unlikely(of_dev == NULL)) {
+ dev_err(dpa_dev, "of_find_device_by_node(%s) failed\n",
+ mac_node->full_name);
+ of_node_put(mac_node);
+ return ERR_PTR(-EINVAL);
+ }
+ of_node_put(mac_node);
+
+ dev = &of_dev->dev;
+
+ mac_dev = dev_get_drvdata(dev);
+ if (unlikely(mac_dev == NULL)) {
+ dev_err(dpa_dev, "dev_get_drvdata(%s) failed\n",
+ dev_name(dev));
+ return ERR_PTR(-EINVAL);
+ }
+
+#ifdef CONFIG_FSL_DPAA_1588
+ phandle_prop = of_get_property(mac_node, "ptimer-handle", &lenp);
+ if (phandle_prop && ((mac_dev->phy_if != PHY_INTERFACE_MODE_SGMII) ||
+ ((mac_dev->phy_if == PHY_INTERFACE_MODE_SGMII) &&
+ (mac_dev->speed == SPEED_1000)))) {
+ timer_node = of_find_node_by_phandle(*phandle_prop);
+ if (timer_node)
+ net_dev = dev_get_drvdata(dpa_dev);
+ if (timer_node && net_dev) {
+ priv = netdev_priv(net_dev);
+ if (!dpa_ptp_init(priv))
+ dev_info(dev, "%s: ptp 1588 is initialized.\n",
+ mac_node->full_name);
+ }
+ }
+#endif
+
+ return mac_dev;
+}
+
+int dpa_set_mac_address(struct net_device *net_dev, void *addr)
+{
+ const struct dpa_priv_s *priv;
+ int _errno;
+ struct mac_device *mac_dev;
+
+ priv = netdev_priv(net_dev);
+
+ _errno = eth_mac_addr(net_dev, addr);
+ if (_errno < 0) {
+ if (netif_msg_drv(priv))
+ netdev_err(net_dev,
+ "eth_mac_addr() = %d\n",
+ _errno);
+ return _errno;
+ }
+
+ mac_dev = priv->mac_dev;
+
+ _errno = mac_dev->change_addr(mac_dev->get_mac_handle(mac_dev),
+ net_dev->dev_addr);
+ if (_errno < 0) {
+ if (netif_msg_drv(priv))
+ netdev_err(net_dev,
+ "mac_dev->change_addr() = %d\n",
+ _errno);
+ return _errno;
+ }
+
+ return 0;
+}
+
+void dpa_set_rx_mode(struct net_device *net_dev)
+{
+ int _errno;
+ const struct dpa_priv_s *priv;
+
+ priv = netdev_priv(net_dev);
+
+ if (!!(net_dev->flags & IFF_PROMISC) != priv->mac_dev->promisc) {
+ priv->mac_dev->promisc = !priv->mac_dev->promisc;
+ _errno = priv->mac_dev->set_promisc(
+ priv->mac_dev->get_mac_handle(priv->mac_dev),
+ priv->mac_dev->promisc);
+ if (unlikely(_errno < 0) && netif_msg_drv(priv))
+ netdev_err(net_dev,
+ "mac_dev->set_promisc() = %d\n",
+ _errno);
+ }
+
+ _errno = priv->mac_dev->set_multi(net_dev);
+ if (unlikely(_errno < 0) && netif_msg_drv(priv))
+ netdev_err(net_dev, "mac_dev->set_multi() = %d\n", _errno);
+}
+
+void dpa_set_buffers_layout(struct mac_device *mac_dev,
+ struct dpa_buffer_layout_s *layout)
+{
+ struct fm_port_params params;
+
+ /* Rx */
+ layout[RX].priv_data_size = DPA_RX_PRIV_DATA_SIZE;
+ layout[RX].parse_results = true;
+ layout[RX].hash_results = true;
+#if defined(CONFIG_FSL_DPAA_1588) || defined(CONFIG_FSL_DPAA_TS)
+ layout[RX].time_stamp = true;
+#endif
+ fm_port_get_buff_layout_ext_params(mac_dev->port_dev[RX], &params);
+ layout[RX].manip_extra_space = params.manip_extra_space;
+ /* a value of zero for data alignment means "don't care", so align to
+ * a non-zero value to prevent FMD from using its own default
+ */
+ layout[RX].data_align = params.data_align ? : DPA_FD_DATA_ALIGNMENT;
+
+ /* Tx */
+ layout[TX].priv_data_size = DPA_TX_PRIV_DATA_SIZE;
+ layout[TX].parse_results = true;
+ layout[TX].hash_results = true;
+#if defined(CONFIG_FSL_DPAA_1588) || defined(CONFIG_FSL_DPAA_TS)
+ layout[TX].time_stamp = true;
+#endif
+ fm_port_get_buff_layout_ext_params(mac_dev->port_dev[TX], &params);
+ layout[TX].manip_extra_space = params.manip_extra_space;
+ layout[RX].data_align = params.data_align ? : DPA_FD_DATA_ALIGNMENT;
+}
+
+static int dpa_bp_cmp(const void *dpa_bp0, const void *dpa_bp1)
+{
+ return ((struct dpa_bp *)dpa_bp0)->size -
+ ((struct dpa_bp *)dpa_bp1)->size;
+}
+
+struct dpa_bp * __cold __must_check /* __attribute__((nonnull)) */
+dpa_bp_probe(struct platform_device *_of_dev, size_t *count)
+{
+ int i, lenp, na, ns;
+ struct device *dev;
+ struct device_node *dev_node;
+ const phandle *phandle_prop;
+ const uint32_t *bpid;
+ const uint32_t *bpool_cfg;
+ struct dpa_bp *dpa_bp;
+
+ dev = &_of_dev->dev;
+
+ /* The default is one, if there's no property */
+ *count = 1;
+
+ /* Get the buffer pools to be used */
+ phandle_prop = of_get_property(dev->of_node,
+ "fsl,bman-buffer-pools", &lenp);
+
+ if (phandle_prop)
+ *count = lenp / sizeof(phandle);
+ else {
+ dev_err(dev,
+ "missing fsl,bman-buffer-pools device tree entry\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ dpa_bp = devm_kzalloc(dev, *count * sizeof(*dpa_bp), GFP_KERNEL);
+ if (unlikely(dpa_bp == NULL)) {
+ dev_err(dev, "devm_kzalloc() failed\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ dev_node = of_find_node_by_path("/");
+ if (unlikely(dev_node == NULL)) {
+ dev_err(dev, "of_find_node_by_path(/) failed\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ na = of_n_addr_cells(dev_node);
+ ns = of_n_size_cells(dev_node);
+
+ for (i = 0; i < *count && phandle_prop; i++) {
+ of_node_put(dev_node);
+ dev_node = of_find_node_by_phandle(phandle_prop[i]);
+ if (unlikely(dev_node == NULL)) {
+ dev_err(dev, "of_find_node_by_phandle() failed\n");
+ return ERR_PTR(-EFAULT);
+ }
+
+ if (unlikely(!of_device_is_compatible(dev_node, "fsl,bpool"))) {
+ dev_err(dev,
+ "!of_device_is_compatible(%s, fsl,bpool)\n",
+ dev_node->full_name);
+ dpa_bp = ERR_PTR(-EINVAL);
+ goto _return_of_node_put;
+ }
+
+ bpid = of_get_property(dev_node, "fsl,bpid", &lenp);
+ if ((bpid == NULL) || (lenp != sizeof(*bpid))) {
+ dev_err(dev, "fsl,bpid property not found.\n");
+ dpa_bp = ERR_PTR(-EINVAL);
+ goto _return_of_node_put;
+ }
+ dpa_bp[i].bpid = *bpid;
+
+ bpool_cfg = of_get_property(dev_node, "fsl,bpool-ethernet-cfg",
+ &lenp);
+ if (bpool_cfg && (lenp == (2 * ns + na) * sizeof(*bpool_cfg))) {
+ const uint32_t *seed_pool;
+
+ dpa_bp[i].config_count =
+ (int)of_read_number(bpool_cfg, ns);
+ dpa_bp[i].size = of_read_number(bpool_cfg + ns, ns);
+ dpa_bp[i].paddr =
+ of_read_number(bpool_cfg + 2 * ns, na);
+
+ seed_pool = of_get_property(dev_node,
+ "fsl,bpool-ethernet-seeds", &lenp);
+ dpa_bp[i].seed_pool = !!seed_pool;
+
+ } else {
+ dev_err(dev,
+ "Missing/invalid fsl,bpool-ethernet-cfg device tree entry for node %s\n",
+ dev_node->full_name);
+ dpa_bp = ERR_PTR(-EINVAL);
+ goto _return_of_node_put;
+ }
+ }
+
+ sort(dpa_bp, *count, sizeof(*dpa_bp), dpa_bp_cmp, NULL);
+
+ return dpa_bp;
+
+_return_of_node_put:
+ if (dev_node)
+ of_node_put(dev_node);
+
+ return dpa_bp;
+}
+
+int dpa_bp_shared_port_seed(struct dpa_bp *bp)
+{
+ /* In MAC-less and Shared-MAC scenarios the physical
+ * address of the buffer pool in device tree is set
+ * to 0 to specify that another entity (USDPAA) will
+ * allocate and seed the buffers
+ */
+ if (!bp->paddr)
+ return 0;
+
+ /* allocate memory region for buffers */
+ devm_request_mem_region(bp->dev, bp->paddr,
+ bp->size * bp->config_count, KBUILD_MODNAME);
+ bp->vaddr = devm_ioremap_prot(bp->dev, bp->paddr,
+ bp->size * bp->config_count, 0);
+ if (bp->vaddr == NULL) {
+ pr_err("Could not map memory for pool %d\n", bp->bpid);
+ return -EIO;
+ }
+
+ /* seed pool with buffers from that memory region */
+ if (bp->seed_pool) {
+ int count = bp->target_count;
+ size_t addr = bp->paddr;
+
+ while (count) {
+ struct bm_buffer bufs[8];
+ int num_bufs = 0;
+
+ do {
+ BUG_ON(addr > 0xffffffffffffull);
+ bufs[num_bufs].bpid = bp->bpid;
+ bm_buffer_set64(&bufs[num_bufs++], addr);
+ addr += bp->size;
+
+ } while (--count && (num_bufs < 8));
+
+ while (bman_release(bp->pool, bufs, num_bufs, 0))
+ cpu_relax();
+ }
+ }
+
+ return 0;
+}
+
+int __attribute__((nonnull))
+dpa_bp_alloc(struct dpa_bp *dpa_bp)
+{
+ int err;
+ struct bman_pool_params bp_params;
+ struct platform_device *pdev;
+
+ BUG_ON(dpa_bp->size == 0);
+ BUG_ON(dpa_bp->config_count == 0);
+
+ bp_params.flags = 0;
+
+ /* If the pool is already specified, we only create one per bpid */
+ if (dpa_bpid2pool_use(dpa_bp->bpid))
+ return 0;
+
+ if (dpa_bp->bpid == 0)
+ bp_params.flags |= BMAN_POOL_FLAG_DYNAMIC_BPID;
+ else
+ bp_params.bpid = dpa_bp->bpid;
+
+ dpa_bp->pool = bman_new_pool(&bp_params);
+ if (unlikely(dpa_bp->pool == NULL)) {
+ pr_err("bman_new_pool() failed\n");
+ return -ENODEV;
+ }
+
+ dpa_bp->bpid = bman_get_params(dpa_bp->pool)->bpid;
+
+ pdev = platform_device_register_simple("dpaa_eth_bpool",
+ dpa_bp->bpid, NULL, 0);
+ if (IS_ERR(pdev)) {
+ err = PTR_ERR(pdev);
+ goto pdev_register_failed;
+ }
+
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
+ if (err)
+ goto pdev_mask_failed;
+
+ dpa_bp->dev = &pdev->dev;
+
+ if (dpa_bp->seed_cb) {
+ err = dpa_bp->seed_cb(dpa_bp);
+ if (err)
+ goto pool_seed_failed;
+ }
+
+ dpa_bpid2pool_map(dpa_bp->bpid, dpa_bp);
+
+ return 0;
+
+pool_seed_failed:
+pdev_mask_failed:
+ platform_device_unregister(pdev);
+pdev_register_failed:
+ bman_free_pool(dpa_bp->pool);
+
+ return err;
+}
+
+int dpa_bp_create(struct net_device *net_dev, struct dpa_bp *dpa_bp,
+ size_t count)
+{
+ struct dpa_priv_s *priv = netdev_priv(net_dev);
+ int i;
+
+ priv->dpa_bp = dpa_bp;
+ priv->bp_count = count;
+
+ for (i = 0; i < count; i++) {
+ int err;
+ err = dpa_bp_alloc(&dpa_bp[i]);
+ if (err < 0) {
+ dpa_bp_free(priv, dpa_bp);
+ priv->dpa_bp = NULL;
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+void dpa_bp_drain(struct dpa_bp *bp)
+{
+ int num;
+
+ do {
+ struct bm_buffer bmb[8];
+ int i;
+
+ num = bman_acquire(bp->pool, bmb, 8, 0);
+
+ for (i = 0; i < num; i++) {
+ dma_addr_t addr = bm_buf_addr(&bmb[i]);
+
+ dma_unmap_single(bp->dev, addr, bp->size,
+ DMA_BIDIRECTIONAL);
+
+ _dpa_bp_free_buf(phys_to_virt(addr));
+ }
+ } while (num == 8);
+}
+
+static void __cold __attribute__((nonnull))
+_dpa_bp_free(struct dpa_bp *dpa_bp)
+{
+ struct dpa_bp *bp = dpa_bpid2pool(dpa_bp->bpid);
+
+ if (!atomic_dec_and_test(&bp->refs))
+ return;
+
+ if (bp->drain_cb)
+ bp->drain_cb(bp);
+
+ dpa_bp_array[bp->bpid] = 0;
+ bman_free_pool(bp->pool);
+}
+
+void __cold __attribute__((nonnull))
+dpa_bp_free(struct dpa_priv_s *priv, struct dpa_bp *dpa_bp)
+{
+ int i;
+
+ for (i = 0; i < priv->bp_count; i++)
+ _dpa_bp_free(&priv->dpa_bp[i]);
+}
+
+struct dpa_bp *dpa_bpid2pool(int bpid)
+{
+ return dpa_bp_array[bpid];
+}
+
+void dpa_bpid2pool_map(int bpid, struct dpa_bp *dpa_bp)
+{
+ dpa_bp_array[bpid] = dpa_bp;
+ atomic_set(&dpa_bp->refs, 1);
+}
+
+bool dpa_bpid2pool_use(int bpid)
+{
+ if (dpa_bpid2pool(bpid)) {
+ atomic_inc(&dpa_bp_array[bpid]->refs);
+ return true;
+ }
+
+ return false;
+}
+
+#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
+u16 dpa_select_queue(struct net_device *net_dev, struct sk_buff *skb)
+{
+ return smp_processor_id();
+}
+#endif
+
+struct dpa_fq *dpa_fq_alloc(struct device *dev,
+ const struct fqid_cell *fqids,
+ struct list_head *list,
+ enum dpa_fq_type fq_type)
+{
+ int i;
+ struct dpa_fq *dpa_fq;
+
+ dpa_fq = devm_kzalloc(dev, sizeof(*dpa_fq) * fqids->count, GFP_KERNEL);
+ if (dpa_fq == NULL)
+ return NULL;
+
+ for (i = 0; i < fqids->count; i++) {
+ dpa_fq[i].fq_type = fq_type;
+ dpa_fq[i].fqid = fqids->start ? fqids->start + i : 0;
+ _dpa_assign_wq(dpa_fq + i);
+ list_add_tail(&dpa_fq[i].list, list);
+ }
+
+ return dpa_fq;
+}
+
+/* Probing of FQs for MACful ports */
+int dpa_fq_probe_mac(struct device *dev, struct list_head *list,
+ struct fm_port_fqs *port_fqs,
+ bool alloc_tx_conf_fqs,
+ enum port_type ptype)
+{
+ const struct fqid_cell *fqids;
+ struct dpa_fq *dpa_fq;
+ struct device_node *np = dev->of_node;
+ int num_ranges;
+ int i, lenp;
+
+ if (ptype == TX && alloc_tx_conf_fqs) {
+ if (!dpa_fq_alloc(dev, tx_confirm_fqids, list,
+ FQ_TYPE_TX_CONF_MQ))
+ goto fq_alloc_failed;
+
+#ifdef CONFIG_FSL_DPAA_TX_RECYCLE
+ /* per-core Tx queues for recyclable frames (FManv3 only) */
+ if (!dpa_fq_alloc(dev, tx_recycle_fqids, list,
+ FQ_TYPE_TX_RECYCLE))
+ goto fq_alloc_failed;
+#endif
+ }
+
+ fqids = of_get_property(np, fsl_qman_frame_queues[ptype], &lenp);
+ if (fqids == NULL) {
+ /* No dts definition, so use the defaults. */
+ fqids = default_fqids[ptype];
+ num_ranges = 3;
+ } else {
+ num_ranges = lenp / sizeof(*fqids);
+ }
+
+ for (i = 0; i < num_ranges; i++) {
+ switch (i) {
+ case 0:
+ /* The first queue is the error queue */
+ if (fqids[i].count != 1)
+ goto invalid_error_queue;
+
+ dpa_fq = dpa_fq_alloc(dev, &fqids[i], list,
+ ptype == RX ?
+ FQ_TYPE_RX_ERROR :
+ FQ_TYPE_TX_ERROR);
+ if (dpa_fq == NULL)
+ goto fq_alloc_failed;
+
+ if (ptype == RX)
+ port_fqs->rx_errq = &dpa_fq[0];
+ else
+ port_fqs->tx_errq = &dpa_fq[0];
+ break;
+ case 1:
+ /* the second queue is the default queue */
+ if (fqids[i].count != 1)
+ goto invalid_default_queue;
+
+ dpa_fq = dpa_fq_alloc(dev, &fqids[i], list,
+ ptype == RX ?
+ FQ_TYPE_RX_DEFAULT :
+ FQ_TYPE_TX_CONFIRM);
+ if (dpa_fq == NULL)
+ goto fq_alloc_failed;
+
+ if (ptype == RX)
+ port_fqs->rx_defq = &dpa_fq[0];
+ else
+ port_fqs->tx_defq = &dpa_fq[0];
+ break;
+ default:
+ /* all subsequent queues are either RX PCD or Tx */
+ if (!dpa_fq_alloc(dev, &fqids[i], list, ptype == RX ?
+ FQ_TYPE_RX_PCD : FQ_TYPE_TX))
+ goto fq_alloc_failed;
+ break;
+ }
+ }
+
+ return 0;
+
+fq_alloc_failed:
+ dev_err(dev, "dpa_fq_alloc() failed\n");
+ return -ENOMEM;
+
+invalid_default_queue:
+invalid_error_queue:
+ dev_err(dev, "Too many default or error queues\n");
+ return -EINVAL;
+}
+
+static u32 rx_pool_channel;
+static DEFINE_SPINLOCK(rx_pool_channel_init);
+
+int dpa_get_channel(struct device *dev, struct device_node *dpa_node)
+{
+ spin_lock(&rx_pool_channel_init);
+ if (!rx_pool_channel) {
+ u32 pool;
+ int ret = qman_alloc_pool(&pool);
+ if (!ret)
+ rx_pool_channel = pool;
+ }
+ spin_unlock(&rx_pool_channel_init);
+ if (!rx_pool_channel)
+ return -ENOMEM;
+ return rx_pool_channel;
+}
+
+int dpaa_eth_add_channel(void *__arg)
+{
+ const cpumask_t *cpus = qman_affine_cpus();
+ u32 pool = QM_SDQCR_CHANNELS_POOL_CONV((u32)(unsigned long)__arg);
+ int cpu;
+
+ for_each_cpu(cpu, cpus) {
+ set_cpus_allowed_ptr(current, get_cpu_mask(cpu));
+ qman_static_dequeue_add(pool);
+ }
+ return 0;
+}
+
+/**
+ * Congestion group state change notification callback.
+ * Stops the device's egress queues while they are congested and
+ * wakes them upon exiting congested state.
+ * Also updates some CGR-related stats.
+ */
+static void dpaa_eth_cgscn(struct qman_portal *qm, struct qman_cgr *cgr,
+ int congested)
+{
+ struct dpa_priv_s *priv = (struct dpa_priv_s *)container_of(cgr,
+ struct dpa_priv_s, cgr_data.cgr);
+
+ if (congested) {
+ priv->cgr_data.congestion_start_jiffies = jiffies;
+ netif_tx_stop_all_queues(priv->net_dev);
+ priv->cgr_data.cgr_congested_count++;
+ } else {
+ priv->cgr_data.congested_jiffies +=
+ (jiffies - priv->cgr_data.congestion_start_jiffies);
+ netif_tx_wake_all_queues(priv->net_dev);
+ }
+}
+
+/* Size in bytes of the Congestion State notification threshold on 10G ports */
+#define DPA_CS_THRESHOLD_10G 0x10000000
+/* Size in bytes of the Congestion State notification threshold on 1G ports.
+
+ * The 1G dTSECs can quite easily be flooded by cores doing Tx in a tight loop
+ * (e.g. by sending UDP datagrams at "while(1) speed"),
+ * and the larger the frame size, the more acute the problem.
+ *
+ * So we have to find a balance between these factors:
+ * - avoiding the device staying congested for a prolonged time (risking
+ * the netdev watchdog to fire - see also the tx_timeout module param);
+ * - affecting performance of protocols such as TCP, which otherwise
+ * behave well under the congestion notification mechanism;
+ * - preventing the Tx cores from tightly-looping (as if the congestion
+ * threshold was too low to be effective);
+ * - running out of memory if the CS threshold is set too high.
+ */
+#define DPA_CS_THRESHOLD_1G 0x06000000
+
+int dpaa_eth_cgr_init(struct dpa_priv_s *priv)
+{
+ struct qm_mcc_initcgr initcgr;
+ u32 cs_th;
+ int err;
+
+ err = qman_alloc_cgrid(&priv->cgr_data.cgr.cgrid);
+ if (err < 0) {
+ pr_err("Error %d allocating CGR ID\n", err);
+ goto out_error;
+ }
+ priv->cgr_data.cgr.cb = dpaa_eth_cgscn;
+
+ /* Enable Congestion State Change Notifications and CS taildrop */
+ initcgr.we_mask = QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES;
+ initcgr.cgr.cscn_en = QM_CGR_EN;
+
+ /* Set different thresholds based on the MAC speed.
+ * TODO: this may turn suboptimal if the MAC is reconfigured at a speed
+ * lower than its max, e.g. if a dTSEC later negotiates a 100Mbps link.
+ * In such cases, we ought to reconfigure the threshold, too.
+ */
+ if (priv->mac_dev->if_support & SUPPORTED_10000baseT_Full)
+ cs_th = DPA_CS_THRESHOLD_10G;
+ else
+ cs_th = DPA_CS_THRESHOLD_1G;
+ qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1);
+
+ initcgr.we_mask |= QM_CGR_WE_CSTD_EN;
+ initcgr.cgr.cstd_en = QM_CGR_EN;
+
+ err = qman_create_cgr(&priv->cgr_data.cgr, QMAN_CGR_FLAG_USE_INIT,
+ &initcgr);
+ if (err < 0) {
+ pr_err("Error %d creating CGR with ID %d\n", err,
+ priv->cgr_data.cgr.cgrid);
+ qman_release_cgrid(priv->cgr_data.cgr.cgrid);
+ goto out_error;
+ }
+ pr_debug("Created CGR %d for netdev with hwaddr %pM on QMan channel %d\n",
+ priv->cgr_data.cgr.cgrid, priv->mac_dev->addr,
+ priv->cgr_data.cgr.chan);
+
+out_error:
+ return err;
+}
+
+static inline void dpa_setup_ingress(const struct dpa_priv_s *priv,
+ struct dpa_fq *fq,
+ const struct qman_fq *template)
+{
+ fq->fq_base = *template;
+ fq->net_dev = priv->net_dev;
+
+ fq->flags = QMAN_FQ_FLAG_NO_ENQUEUE;
+ fq->channel = priv->channel;
+}
+
+static inline void dpa_setup_egress(const struct dpa_priv_s *priv,
+ struct dpa_fq *fq,
+ struct fm_port *port,
+ const struct qman_fq *template)
+{
+ fq->fq_base = *template;
+ fq->net_dev = priv->net_dev;
+
+ if (port) {
+ fq->flags = QMAN_FQ_FLAG_TO_DCPORTAL;
+ fq->channel = fm_get_tx_port_channel(port);
+ } else {
+ fq->flags = QMAN_FQ_FLAG_NO_MODIFY;
+ }
+}
+
+void dpa_fq_setup(struct dpa_priv_s *priv, const dpa_fq_cbs_t *fq_cbs,
+ struct fm_port *tx_port)
+{
+ struct dpa_fq *fq;
+ int portals[NR_CPUS];
+ int cpu, portal_cnt = 0, num_portals = 0;
+ uint32_t pcd_fqid;
+ const cpumask_t *affine_cpus = qman_affine_cpus();
+ int egress_cnt = 0, conf_cnt = 0;
+#ifdef CONFIG_FSL_DPAA_TX_RECYCLE
+ int recycle_cnt = 0;
+#endif
+
+ /* Prepare for PCD FQs init */
+ for_each_cpu(cpu, affine_cpus)
+ portals[num_portals++] = qman_affine_channel(cpu);
+ if (num_portals == 0)
+ dev_err(priv->net_dev->dev.parent,
+ "No Qman software (affine) channels found");
+
+ pcd_fqid = (priv->mac_dev) ?
+ DPAA_ETH_PCD_FQ_BASE(priv->mac_dev->res->start) : 0;
+
+ /* Initialize each FQ in the list */
+ list_for_each_entry(fq, &priv->dpa_fq_list, list) {
+ switch (fq->fq_type) {
+ case FQ_TYPE_RX_DEFAULT:
+ BUG_ON(!priv->mac_dev);
+ dpa_setup_ingress(priv, fq, &fq_cbs->rx_defq);
+ break;
+ case FQ_TYPE_RX_ERROR:
+ BUG_ON(!priv->mac_dev);
+ dpa_setup_ingress(priv, fq, &fq_cbs->rx_errq);
+ break;
+ case FQ_TYPE_RX_PCD:
+ /* For MACless we can't have dynamic Rx queues */
+ BUG_ON(!priv->mac_dev && !fq->fqid);
+ dpa_setup_ingress(priv, fq, &fq_cbs->rx_defq);
+ if (!fq->fqid)
+ fq->fqid = pcd_fqid++;
+ fq->channel = portals[portal_cnt];
+ portal_cnt = (portal_cnt + 1) % num_portals;
+ break;
+ case FQ_TYPE_TX:
+ dpa_setup_egress(priv, fq, tx_port,
+ &fq_cbs->egress_ern);
+ /* If we have more Tx queues than the number of cores,
+ * just ignore the extra ones.
+ */
+ if (egress_cnt < DPAA_ETH_TX_QUEUES)
+ priv->egress_fqs[egress_cnt++] = &fq->fq_base;
+ break;
+ case FQ_TYPE_TX_CONFIRM:
+ BUG_ON(!priv->mac_dev);
+ dpa_setup_ingress(priv, fq, &fq_cbs->tx_defq);
+ break;
+ case FQ_TYPE_TX_CONF_MQ:
+ BUG_ON(!priv->mac_dev);
+ dpa_setup_ingress(priv, fq, &fq_cbs->tx_defq);
+ priv->conf_fqs[conf_cnt++] = &fq->fq_base;
+ break;
+ case FQ_TYPE_TX_ERROR:
+ BUG_ON(!priv->mac_dev);
+ dpa_setup_ingress(priv, fq, &fq_cbs->tx_errq);
+ break;
+#ifdef CONFIG_FSL_DPAA_TX_RECYCLE
+ case FQ_TYPE_TX_RECYCLE:
+ BUG_ON(!priv->mac_dev);
+ dpa_setup_egress(priv, fq, tx_port,
+ &fq_cbs->egress_ern);
+ priv->recycle_fqs[recycle_cnt++] = &fq->fq_base;
+ break;
+#endif
+ default:
+ dev_warn(priv->net_dev->dev.parent,
+ "Unknown FQ type detected!\n");
+ break;
+ }
+ }
+
+ /* The number of Tx queues may be smaller than the number of cores, if
+ * the Tx queue range is specified in the device tree instead of being
+ * dynamically allocated.
+ * Make sure all CPUs receive a corresponding Tx queue.
+ */
+ while (egress_cnt < DPAA_ETH_TX_QUEUES) {
+ list_for_each_entry(fq, &priv->dpa_fq_list, list) {
+ if (fq->fq_type != FQ_TYPE_TX)
+ continue;
+ priv->egress_fqs[egress_cnt++] = &fq->fq_base;
+ if (egress_cnt == DPAA_ETH_TX_QUEUES)
+ break;
+ }
+ }
+}
+
+static struct qman_fq *_dpa_get_tx_conf_queue(const struct dpa_priv_s *priv,
+ struct qman_fq *tx_fq)
+{
+ int i;
+
+ for (i = 0; i < DPAA_ETH_TX_QUEUES; i++)
+ if (priv->egress_fqs[i] == tx_fq)
+ return priv->conf_fqs[i];
+
+ return NULL;
+}
+
+int dpa_fq_init(struct dpa_fq *dpa_fq, bool td_enable)
+{
+ int _errno;
+ const struct dpa_priv_s *priv;
+ struct device *dev;
+ struct qman_fq *fq;
+ struct qm_mcc_initfq initfq;
+ struct qman_fq *confq;
+
+ priv = netdev_priv(dpa_fq->net_dev);
+ dev = dpa_fq->net_dev->dev.parent;
+
+ if (dpa_fq->fqid == 0)
+ dpa_fq->flags |= QMAN_FQ_FLAG_DYNAMIC_FQID;
+
+ dpa_fq->init = !(dpa_fq->flags & QMAN_FQ_FLAG_NO_MODIFY);
+
+ _errno = qman_create_fq(dpa_fq->fqid, dpa_fq->flags, &dpa_fq->fq_base);
+ if (_errno) {
+ dev_err(dev, "qman_create_fq() failed\n");
+ return _errno;
+ }
+ fq = &dpa_fq->fq_base;
+
+ if (dpa_fq->init) {
+ initfq.we_mask = QM_INITFQ_WE_FQCTRL;
+ /* FIXME: why would we want to keep an empty FQ in cache? */
+ initfq.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE;
+
+#ifdef CONFIG_FSL_DPAA_ETH_SG_SUPPORT
+ /* Try to reduce the number of portal interrupts for
+ * Tx Confirmation FQs.
+ */
+ if (dpa_fq->fq_type == FQ_TYPE_TX_CONFIRM)
+ initfq.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
+#endif
+
+ /* FQ placement */
+ initfq.we_mask |= QM_INITFQ_WE_DESTWQ;
+
+ initfq.fqd.dest.channel = dpa_fq->channel;
+ initfq.fqd.dest.wq = dpa_fq->wq;
+
+ /* Put all egress queues in a congestion group of their own.
+ * Sensu stricto, the Tx confirmation queues are Rx FQs,
+ * rather than Tx - but they nonetheless account for the
+ * memory footprint on behalf of egress traffic. We therefore
+ * place them in the netdev's CGR, along with the Tx FQs.
+ */
+ if (dpa_fq->fq_type == FQ_TYPE_TX ||
+ dpa_fq->fq_type == FQ_TYPE_TX_CONFIRM ||
+ dpa_fq->fq_type == FQ_TYPE_TX_CONF_MQ) {
+ initfq.we_mask |= QM_INITFQ_WE_CGID;
+ initfq.fqd.fq_ctrl |= QM_FQCTRL_CGE;
+ initfq.fqd.cgid = priv->cgr_data.cgr.cgrid;
+ /* Set a fixed overhead accounting, in an attempt to
+ * reduce the impact of fixed-size skb shells and the
+ * driver's needed headroom on system memory. This is
+ * especially the case when the egress traffic is
+ * composed of small datagrams.
+ * Unfortunately, QMan's OAL value is capped to an
+ * insufficient value, but even that is better than
+ * no overhead accounting at all.
+ */
+ initfq.we_mask |= QM_INITFQ_WE_OAC;
+ initfq.fqd.oac_init.oac = QM_OAC_CG;
+ initfq.fqd.oac_init.oal = min(sizeof(struct sk_buff) +
+ priv->tx_headroom, (size_t)FSL_QMAN_MAX_OAL);
+ }
+
+ if (td_enable) {
+ initfq.we_mask |= QM_INITFQ_WE_TDTHRESH;
+ qm_fqd_taildrop_set(&initfq.fqd.td,
+ DPA_FQ_TD, 1);
+ initfq.fqd.fq_ctrl = QM_FQCTRL_TDE;
+ }
+
+ /* Configure the Tx confirmation queue, now that we know
+ * which Tx queue it pairs with.
+ */
+ if (dpa_fq->fq_type == FQ_TYPE_TX) {
+ confq = _dpa_get_tx_conf_queue(priv, &dpa_fq->fq_base);
+ if (confq) {
+ initfq.we_mask |= QM_INITFQ_WE_CONTEXTA |
+ QM_INITFQ_WE_CONTEXTB;
+ /* CTXA[OVFQ] = 1 */
+ initfq.fqd.context_a.hi = 0x80000000;
+ initfq.fqd.context_a.lo = 0x0;
+ initfq.fqd.context_b = qman_fq_fqid(confq);
+ }
+ }
+
+#ifdef CONFIG_FSL_DPAA_TX_RECYCLE
+ /* Configure the Tx queues for recycled frames, such that the
+ * buffers are released by FMan and no confirmation is sent
+ */
+ if (dpa_fq->fq_type == FQ_TYPE_TX_RECYCLE) {
+ initfq.we_mask |= QM_INITFQ_WE_CONTEXTA |
+ QM_INITFQ_WE_CONTEXTB;
+ /* ContextA: OVFQ=1 (use ContextB FQID for confirmation)
+ * OVOM=1 (use contextA2 bits instead of ICAD)
+ * A2V=1 (contextA A2 field is valid)
+ * B0V=1 (contextB field is valid)
+ * ContextA A2: EBD=1 (deallocate buffers inside FMan)
+ * ContextB: Confirmation FQID = 0
+ */
+ initfq.fqd.context_a.hi = 0x96000000;
+ initfq.fqd.context_a.lo = 0x80000000;
+ initfq.fqd.context_b = 0;
+ }
+#endif
+
+ /* Initialization common to all ingress queues */
+ if (dpa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) {
+ initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
+ initfq.fqd.fq_ctrl |=
+ QM_FQCTRL_CTXASTASHING | QM_FQCTRL_AVOIDBLOCK;
+ initfq.fqd.context_a.stashing.exclusive =
+ QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX |
+ QM_STASHING_EXCL_ANNOTATION;
+ initfq.fqd.context_a.stashing.data_cl = 2;
+ initfq.fqd.context_a.stashing.annotation_cl = 1;
+ initfq.fqd.context_a.stashing.context_cl =
+ DIV_ROUND_UP(sizeof(struct qman_fq), 64);
+ };
+
+ _errno = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &initfq);
+ if (_errno < 0) {
+ dev_err(dev, "qman_init_fq(%u) = %d\n",
+ qman_fq_fqid(fq), _errno);
+ qman_destroy_fq(fq, 0);
+ return _errno;
+ }
+ }
+
+ dpa_fq->fqid = qman_fq_fqid(fq);
+
+ return 0;
+}
+
+static int __cold __attribute__((nonnull))
+_dpa_fq_free(struct device *dev, struct qman_fq *fq)
+{
+ int _errno, __errno;
+ struct dpa_fq *dpa_fq;
+ const struct dpa_priv_s *priv;
+
+ _errno = 0;
+
+ dpa_fq = container_of(fq, struct dpa_fq, fq_base);
+ priv = netdev_priv(dpa_fq->net_dev);
+
+ if (dpa_fq->init) {
+ _errno = qman_retire_fq(fq, NULL);
+ if (unlikely(_errno < 0) && netif_msg_drv(priv))
+ dev_err(dev, "qman_retire_fq(%u) = %d\n",
+ qman_fq_fqid(fq), _errno);
+
+ __errno = qman_oos_fq(fq);
+ if (unlikely(__errno < 0) && netif_msg_drv(priv)) {
+ dev_err(dev, "qman_oos_fq(%u) = %d\n",
+ qman_fq_fqid(fq), __errno);
+ if (_errno >= 0)
+ _errno = __errno;
+ }
+ }
+
+ qman_destroy_fq(fq, 0);
+ list_del(&dpa_fq->list);
+
+ return _errno;
+}
+
+int __cold __attribute__((nonnull))
+dpa_fq_free(struct device *dev, struct list_head *list)
+{
+ int _errno, __errno;
+ struct dpa_fq *dpa_fq, *tmp;
+
+ _errno = 0;
+ list_for_each_entry_safe(dpa_fq, tmp, list, list) {
+ __errno = _dpa_fq_free(dev, (struct qman_fq *)dpa_fq);
+ if (unlikely(__errno < 0) && _errno >= 0)
+ _errno = __errno;
+ }
+
+ return _errno;
+}
+
+static void
+dpaa_eth_init_tx_port(struct fm_port *port, struct dpa_fq *errq,
+ struct dpa_fq *defq, struct dpa_buffer_layout_s *buf_layout)
+{
+ struct fm_port_params tx_port_param;
+ bool frag_enabled = false;
+
+ memset(&tx_port_param, 0, sizeof(tx_port_param));
+ dpaa_eth_init_port(tx, port, tx_port_param, errq->fqid, defq->fqid,
+ buf_layout, frag_enabled);
+}
+
+static void
+dpaa_eth_init_rx_port(struct fm_port *port, struct dpa_bp *bp, size_t count,
+ struct dpa_fq *errq, struct dpa_fq *defq,
+ struct dpa_buffer_layout_s *buf_layout)
+{
+ struct fm_port_params rx_port_param;
+ int i;
+ bool frag_enabled = false;
+
+ memset(&rx_port_param, 0, sizeof(rx_port_param));
+ count = min(ARRAY_SIZE(rx_port_param.pool_param), count);
+ rx_port_param.num_pools = count;
+ for (i = 0; i < count; i++) {
+ if (i >= rx_port_param.num_pools)
+ break;
+ rx_port_param.pool_param[i].id = bp[i].bpid;
+ rx_port_param.pool_param[i].size = bp[i].size;
+ }
+
+ dpaa_eth_init_port(rx, port, rx_port_param, errq->fqid, defq->fqid,
+ buf_layout, frag_enabled);
+}
+
+int dpa_alloc_pcd_fqids(struct device *dev, uint32_t num,
+ uint8_t alignment, uint32_t *base_fqid)
+{
+ dev_crit(dev, "callback not implemented!\n");
+
+ return 0;
+}
+
+int dpa_free_pcd_fqids(struct device *dev, uint32_t base_fqid)
+{
+
+ dev_crit(dev, "callback not implemented!\n");
+
+ return 0;
+}
+
+void dpaa_eth_init_ports(struct mac_device *mac_dev,
+ struct dpa_bp *bp, size_t count,
+ struct fm_port_fqs *port_fqs,
+ struct dpa_buffer_layout_s *buf_layout,
+ struct device *dev)
+{
+ struct fm_port_pcd_param rx_port_pcd_param;
+ struct fm_port *rxport = mac_dev->port_dev[RX];
+ struct fm_port *txport = mac_dev->port_dev[TX];
+
+ dpaa_eth_init_tx_port(txport, port_fqs->tx_errq,
+ port_fqs->tx_defq, &buf_layout[TX]);
+ dpaa_eth_init_rx_port(rxport, bp, count, port_fqs->rx_errq,
+ port_fqs->rx_defq, &buf_layout[RX]);
+
+ rx_port_pcd_param.cba = dpa_alloc_pcd_fqids;
+ rx_port_pcd_param.cbf = dpa_free_pcd_fqids;
+ rx_port_pcd_param.dev = dev;
+ fm_port_pcd_bind(rxport, &rx_port_pcd_param);
+}
+
+void dpa_release_sgt(struct qm_sg_entry *sgt, struct bm_buffer *bmb)
+{
+ struct dpa_bp *dpa_bp;
+ int i = 0, j;
+
+ do {
+ dpa_bp = dpa_bpid2pool(sgt[i].bpid);
+ BUG_ON(IS_ERR(dpa_bp));
+
+ j = 0;
+ do {
+ BUG_ON(sgt[i].extension);
+
+ bmb[j].hi = sgt[i].addr_hi;
+ bmb[j].lo = sgt[i].addr_lo;
+
+ j++; i++;
+ } while (j < ARRAY_SIZE(bmb) &&
+ !sgt[i-1].final &&
+ sgt[i-1].bpid == sgt[i].bpid);
+
+ while (bman_release(dpa_bp->pool, bmb, j, 0))
+ cpu_relax();
+ } while (!sgt[i-1].final);
+}
+
+void __attribute__((nonnull))
+dpa_fd_release(const struct net_device *net_dev, const struct qm_fd *fd)
+{
+ struct qm_sg_entry *sgt;
+ struct dpa_bp *_dpa_bp;
+ struct bm_buffer _bmb, bmb[8];
+
+ _bmb.hi = fd->addr_hi;
+ _bmb.lo = fd->addr_lo;
+
+ _dpa_bp = dpa_bpid2pool(fd->bpid);
+ BUG_ON(IS_ERR(_dpa_bp));
+
+ if (fd->format == qm_fd_sg) {
+ sgt = (phys_to_virt(bm_buf_addr(&_bmb)) + dpa_fd_offset(fd));
+ dpa_release_sgt(sgt, bmb);
+ }
+
+ while (bman_release(_dpa_bp->pool, &_bmb, 1, 0))
+ cpu_relax();
+}
+EXPORT_SYMBOL(dpa_fd_release);
+
+void count_ern(struct dpa_percpu_priv_s *percpu_priv,
+ const struct qm_mr_entry *msg)
+{
+ switch (msg->ern.rc & QM_MR_RC_MASK) {
+ case QM_MR_RC_CGR_TAILDROP:
+ percpu_priv->ern_cnt.cg_tdrop++;
+ break;
+ case QM_MR_RC_WRED:
+ percpu_priv->ern_cnt.wred++;
+ break;
+ case QM_MR_RC_ERROR:
+ percpu_priv->ern_cnt.err_cond++;
+ break;
+ case QM_MR_RC_ORPWINDOW_EARLY:
+ percpu_priv->ern_cnt.early_window++;
+ break;
+ case QM_MR_RC_ORPWINDOW_LATE:
+ percpu_priv->ern_cnt.late_window++;
+ break;
+ case QM_MR_RC_FQ_TAILDROP:
+ percpu_priv->ern_cnt.fq_tdrop++;
+ break;
+ case QM_MR_RC_ORPWINDOW_RETIRED:
+ percpu_priv->ern_cnt.fq_retired++;
+ break;
+ case QM_MR_RC_ORP_ZERO:
+ percpu_priv->ern_cnt.orp_zero++;
+ break;
+ }
+}
+
+
+/**
+ * Turn on HW checksum computation for this outgoing frame.
+ * If the current protocol is not something we support in this regard
+ * (or if the stack has already computed the SW checksum), we do nothing.
+ *
+ * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value
+ * otherwise.
+ *
+ * Note that this function may modify the fd->cmd field and the skb data buffer
+ * (the Parse Results area).
+ */
+int dpa_enable_tx_csum(struct dpa_priv_s *priv,
+ struct sk_buff *skb, struct qm_fd *fd, char *parse_results)
+{
+ t_FmPrsResult *parse_result;
+ struct iphdr *iph;
+ struct ipv6hdr *ipv6h = NULL;
+ int l4_proto;
+ int ethertype = ntohs(skb->protocol);
+ int retval = 0;
+
+ if (!priv->mac_dev || skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ /* Note: L3 csum seems to be already computed in sw, but we can't choose
+ * L4 alone from the FM configuration anyway.
+ */
+
+ /* Fill in some fields of the Parse Results array, so the FMan
+ * can find them as if they came from the FMan Parser.
+ */
+ parse_result = (t_FmPrsResult *)parse_results;
+
+ /* If we're dealing with VLAN, get the real Ethernet type */
+ if (ethertype == ETH_P_8021Q) {
+ /* We can't always assume the MAC header is set correctly
+ * by the stack, so reset to beginning of skb->data
+ */
+ skb_reset_mac_header(skb);
+ ethertype = ntohs(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto);
+ }
+
+ /* Fill in the relevant L3 parse result fields
+ * and read the L4 protocol type
+ */
+ switch (ethertype) {
+ case ETH_P_IP:
+ parse_result->l3r = FM_L3_PARSE_RESULT_IPV4;
+ iph = ip_hdr(skb);
+ BUG_ON(iph == NULL);
+ l4_proto = ntohs(iph->protocol);
+ break;
+ case ETH_P_IPV6:
+ parse_result->l3r = FM_L3_PARSE_RESULT_IPV6;
+ ipv6h = ipv6_hdr(skb);
+ BUG_ON(ipv6h == NULL);
+ l4_proto = ntohs(ipv6h->nexthdr);
+ break;
+ default:
+ /* We shouldn't even be here */
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ netdev_alert(priv->net_dev,
+ "Can't compute HW csum for L3 proto 0x%x\n",
+ ntohs(skb->protocol));
+ retval = -EIO;
+ goto return_error;
+ }
+
+ /* Fill in the relevant L4 parse result fields */
+ switch (l4_proto) {
+ case IPPROTO_UDP:
+ parse_result->l4r = FM_L4_PARSE_RESULT_UDP;
+ break;
+ case IPPROTO_TCP:
+ parse_result->l4r = FM_L4_PARSE_RESULT_TCP;
+ break;
+ default:
+ /* This can as well be a BUG() */
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ netdev_alert(priv->net_dev,
+ "Can't compute HW csum for L4 proto 0x%x\n",
+ l4_proto);
+ retval = -EIO;
+ goto return_error;
+ }
+
+ /* At index 0 is IPOffset_1 as defined in the Parse Results */
+ parse_result->ip_off[0] = skb_network_offset(skb);
+ parse_result->l4_off = skb_transport_offset(skb);
+
+ /* Enable L3 (and L4, if TCP or UDP) HW checksum. */
+ fd->cmd |= FM_FD_CMD_RPD | FM_FD_CMD_DTC;
+
+ /* On P1023 and similar platforms fd->cmd interpretation could
+ * be disabled by setting CONTEXT_A bit ICMD; currently this bit
+ * is not set so we do not need to check; in the future, if/when
+ * using context_a we need to check this bit
+ */
+
+return_error:
+ return retval;
+}
diff --git a/drivers/net/ethernet/freescale/dpa/dpaa_eth_common.h b/drivers/net/ethernet/freescale/dpa/dpaa_eth_common.h
new file mode 100644
index 0000000..8d11fe9
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpa/dpaa_eth_common.h
@@ -0,0 +1,133 @@
+/* Copyright 2008-2013 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __DPAA_ETH_COMMON_H
+#define __DPAA_ETH_COMMON_H
+
+#include <linux/etherdevice.h> /* struct net_device */
+#include <linux/fsl_bman.h> /* struct bm_buffer */
+#include <linux/of_platform.h> /* struct platform_device */
+#include <linux/net_tstamp.h> /* struct hwtstamp_config */
+
+#define dpaa_eth_init_port(type, port, param, errq_id, defq_id, buf_layout,\
+ frag_enabled) \
+{ \
+ param.errq = errq_id; \
+ param.defq = defq_id; \
+ param.priv_data_size = buf_layout->priv_data_size; \
+ param.parse_results = buf_layout->parse_results; \
+ param.hash_results = buf_layout->hash_results; \
+ param.frag_enable = frag_enabled; \
+ param.time_stamp = buf_layout->time_stamp; \
+ param.manip_extra_space = buf_layout->manip_extra_space; \
+ param.data_align = buf_layout->data_align; \
+ fm_set_##type##_port_params(port, &param); \
+}
+
+int dpa_netdev_init(struct device_node *dpa_node,
+ struct net_device *net_dev,
+ const uint8_t *mac_addr,
+ uint16_t tx_timeout);
+int __cold dpa_start(struct net_device *net_dev);
+int __cold dpa_stop(struct net_device *net_dev);
+void __cold dpa_timeout(struct net_device *net_dev);
+struct rtnl_link_stats64 * __cold
+dpa_get_stats64(struct net_device *net_dev,
+ struct rtnl_link_stats64 *stats);
+int dpa_change_mtu(struct net_device *net_dev, int new_mtu);
+int dpa_ndo_init(struct net_device *net_dev);
+int dpa_set_features(struct net_device *dev, netdev_features_t features);
+netdev_features_t dpa_fix_features(struct net_device *dev,
+ netdev_features_t features);
+#if defined(CONFIG_FSL_DPAA_1588) || defined(CONFIG_FSL_DPAA_TS)
+u64 dpa_get_timestamp_ns(const struct dpa_priv_s *priv,
+ enum port_type rx_tx, const void *data);
+#endif
+#ifdef CONFIG_FSL_DPAA_TS
+/* Updates the skb shared hw timestamp from the hardware timestamp */
+int dpa_get_ts(const struct dpa_priv_s *priv, enum port_type rx_tx,
+ struct skb_shared_hwtstamps *shhwtstamps, const void *data);
+#endif /* CONFIG_FSL_DPAA_TS */
+int dpa_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+int __cold dpa_remove(struct platform_device *of_dev);
+struct mac_device * __cold __must_check
+__attribute__((nonnull)) dpa_mac_probe(struct platform_device *_of_dev);
+int dpa_set_mac_address(struct net_device *net_dev, void *addr);
+void dpa_set_rx_mode(struct net_device *net_dev);
+void dpa_set_buffers_layout(struct mac_device *mac_dev,
+ struct dpa_buffer_layout_s *layout);
+struct dpa_bp * __cold __must_check /* __attribute__((nonnull)) */
+dpa_bp_probe(struct platform_device *_of_dev, size_t *count);
+int dpa_bp_create(struct net_device *net_dev, struct dpa_bp *dpa_bp,
+ size_t count);
+int dpa_bp_shared_port_seed(struct dpa_bp *bp);
+int __attribute__((nonnull))
+dpa_bp_alloc(struct dpa_bp *dpa_bp);
+void __cold __attribute__((nonnull))
+dpa_bp_free(struct dpa_priv_s *priv, struct dpa_bp *dpa_bp);
+struct dpa_bp *dpa_bpid2pool(int bpid);
+void dpa_bpid2pool_map(int bpid, struct dpa_bp *dpa_bp);
+bool dpa_bpid2pool_use(int bpid);
+void dpa_bp_drain(struct dpa_bp *bp);
+#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
+u16 dpa_select_queue(struct net_device *net_dev, struct sk_buff *skb);
+#endif
+struct dpa_fq *dpa_fq_alloc(struct device *dev,
+ const struct fqid_cell *fqids,
+ struct list_head *list,
+ enum dpa_fq_type fq_type);
+int dpa_fq_probe_mac(struct device *dev, struct list_head *list,
+ struct fm_port_fqs *port_fqs,
+ bool tx_conf_fqs_per_core,
+ enum port_type ptype);
+int dpa_get_channel(struct device *dev, struct device_node *dpa_node);
+int dpaa_eth_add_channel(void *__arg);
+int dpaa_eth_cgr_init(struct dpa_priv_s *priv);
+void dpa_fq_setup(struct dpa_priv_s *priv, const dpa_fq_cbs_t *fq_cbs,
+ struct fm_port *tx_port);
+int dpa_fq_init(struct dpa_fq *dpa_fq, bool td_enable);
+int __cold __attribute__((nonnull))
+dpa_fq_free(struct device *dev, struct list_head *list);
+void dpaa_eth_init_ports(struct mac_device *mac_dev,
+ struct dpa_bp *bp, size_t count,
+ struct fm_port_fqs *port_fqs,
+ struct dpa_buffer_layout_s *buf_layout,
+ struct device *dev);
+void dpa_release_sgt(struct qm_sg_entry *sgt,
+ struct bm_buffer *bmb);
+void __attribute__((nonnull))
+dpa_fd_release(const struct net_device *net_dev, const struct qm_fd *fd);
+void count_ern(struct dpa_percpu_priv_s *percpu_priv,
+ const struct qm_mr_entry *msg);
+int dpa_enable_tx_csum(struct dpa_priv_s *priv,
+ struct sk_buff *skb, struct qm_fd *fd, char *parse_results);
+
+#endif /* __DPAA_ETH_COMMON_H */
diff --git a/drivers/net/ethernet/freescale/dpa/dpaa_eth_macless.c b/drivers/net/ethernet/freescale/dpa/dpaa_eth_macless.c
new file mode 100644
index 0000000..3dc845e
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpa/dpaa_eth_macless.c
@@ -0,0 +1,407 @@
+/*
+ * Copyright 2008-2013 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) \
+ KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
+ KBUILD_BASENAME".c", __LINE__, __func__
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/of_net.h>
+#include <linux/etherdevice.h>
+#include <linux/kthread.h>
+#include <linux/percpu.h>
+#include <linux/highmem.h>
+#include <linux/fsl_qman.h>
+#include "dpaa_eth.h"
+#include "dpaa_eth_common.h"
+#include "lnxwrp_fsl_fman.h" /* fm_get_rx_extra_headroom(), fm_get_max_frm() */
+
+/* For MAC-based interfaces, we compute the tx needed headroom from the
+ * associated Tx port's buffer layout settings.
+ * For MACless interfaces just use a default value.
+ */
+#define DPA_DEFAULT_TX_HEADROOM 64
+
+#define DPA_DESCRIPTION "FSL DPAA MACless Ethernet driver"
+
+MODULE_LICENSE("Dual BSD/GPL");
+
+MODULE_DESCRIPTION(DPA_DESCRIPTION);
+
+static uint8_t debug = -1;
+module_param(debug, byte, S_IRUGO);
+MODULE_PARM_DESC(debug, "Module/Driver verbosity level");
+
+/* This has to work in tandem with the DPA_CS_THRESHOLD_xxx values. */
+static uint16_t tx_timeout = 1000;
+module_param(tx_timeout, ushort, S_IRUGO);
+MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
+
+/* reused from the shared driver */
+extern const dpa_fq_cbs_t shared_fq_cbs;
+int __hot dpa_shared_tx(struct sk_buff *skb, struct net_device *net_dev);
+
+/* forward declarations */
+static int __cold dpa_macless_start(struct net_device *net_dev);
+static int __cold dpa_macless_stop(struct net_device *net_dev);
+static int dpa_set_macless_address(struct net_device *net_dev, void *addr);
+static int dpaa_eth_macless_probe(struct platform_device *_of_dev);
+static netdev_features_t
+dpa_macless_fix_features(struct net_device *dev, netdev_features_t features);
+static int dpa_macless_netdev_init(struct device_node *dpa_node,
+ struct net_device *net_dev);
+
+static const struct net_device_ops dpa_macless_ops = {
+ .ndo_open = dpa_macless_start,
+ .ndo_start_xmit = dpa_shared_tx,
+ .ndo_stop = dpa_macless_stop,
+ .ndo_tx_timeout = dpa_timeout,
+ .ndo_get_stats64 = dpa_get_stats64,
+ .ndo_set_mac_address = dpa_set_macless_address,
+ .ndo_validate_addr = eth_validate_addr,
+#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
+ .ndo_select_queue = dpa_select_queue,
+#endif
+ .ndo_change_mtu = dpa_change_mtu,
+ .ndo_init = dpa_ndo_init,
+ .ndo_set_features = dpa_set_features,
+ .ndo_fix_features = dpa_macless_fix_features,
+};
+
+static const struct of_device_id dpa_macless_match[] = {
+ {
+ .compatible = "fsl,dpa-ethernet-macless"
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dpa_macless_match);
+
+static struct platform_driver dpa_macless_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = dpa_macless_match,
+ .owner = THIS_MODULE,
+ },
+ .probe = dpaa_eth_macless_probe,
+ .remove = dpa_remove
+};
+
+static const char macless_frame_queues[][25] = {
+ [RX] = "fsl,qman-frame-queues-rx",
+ [TX] = "fsl,qman-frame-queues-tx"
+};
+
+static int __cold dpa_macless_start(struct net_device *net_dev)
+{
+ netif_tx_start_all_queues(net_dev);
+
+ return 0;
+}
+
+static int __cold dpa_macless_stop(struct net_device *net_dev)
+{
+ netif_tx_stop_all_queues(net_dev);
+
+ return 0;
+}
+
+static int dpa_set_macless_address(struct net_device *net_dev, void *addr)
+{
+ const struct dpa_priv_s *priv;
+ int _errno;
+
+ priv = netdev_priv(net_dev);
+
+ _errno = eth_mac_addr(net_dev, addr);
+ if (_errno < 0) {
+ if (netif_msg_drv(priv))
+ netdev_err(net_dev,
+ "eth_mac_addr() = %d\n",
+ _errno);
+ return _errno;
+ }
+
+ return 0;
+}
+
+static netdev_features_t
+dpa_macless_fix_features(struct net_device *dev, netdev_features_t features)
+{
+ netdev_features_t unsupported_features = 0;
+
+ /* In theory we should never be requested to enable features that
+ * we didn't set in netdev->features and netdev->hw_features at probe
+ * time, but double check just to be on the safe side.
+ */
+ unsupported_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ /* We don't support enabling Rx csum through ethtool yet */
+ unsupported_features |= NETIF_F_RXCSUM;
+
+ features &= ~unsupported_features;
+
+ return features;
+}
+
+static int dpa_macless_netdev_init(struct device_node *dpa_node,
+ struct net_device *net_dev)
+{
+ struct dpa_priv_s *priv = netdev_priv(net_dev);
+ struct device *dev = net_dev->dev.parent;
+ const uint8_t *mac_addr;
+
+ net_dev->netdev_ops = &dpa_macless_ops;
+
+ /* Get the MAC address */
+ mac_addr = of_get_mac_address(dpa_node);
+ if (mac_addr == NULL) {
+ if (netif_msg_probe(priv))
+ dev_err(dev, "No MAC address found!\n");
+ return -EINVAL;
+ }
+
+ return dpa_netdev_init(dpa_node, net_dev, mac_addr, tx_timeout);
+}
+
+/* Probing of FQs for MACless ports */
+static int dpa_fq_probe_macless(struct device *dev, struct list_head *list,
+ enum port_type ptype)
+{
+ struct device_node *np = dev->of_node;
+ const struct fqid_cell *fqids;
+ int num_ranges;
+ int i, lenp;
+
+ fqids = of_get_property(np, macless_frame_queues[ptype], &lenp);
+ if (fqids == NULL) {
+ dev_err(dev, "Need FQ definition in dts for MACless devices\n");
+ return -EINVAL;
+ }
+
+ num_ranges = lenp / sizeof(*fqids);
+
+ /* All ranges defined in the device tree are used as Rx/Tx queues */
+ for (i = 0; i < num_ranges; i++) {
+ if (!dpa_fq_alloc(dev, &fqids[i], list, ptype == RX ?
+ FQ_TYPE_RX_PCD : FQ_TYPE_TX)) {
+ dev_err(dev, "_dpa_fq_alloc() failed\n");
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static int dpaa_eth_macless_probe(struct platform_device *_of_dev)
+{
+ int err = 0, i;
+ struct device *dev;
+ struct device_node *dpa_node;
+ struct dpa_bp *dpa_bp;
+ struct dpa_fq *dpa_fq, *tmp;
+ size_t count;
+ struct net_device *net_dev = NULL;
+ struct dpa_priv_s *priv = NULL;
+ struct dpa_percpu_priv_s *percpu_priv;
+ struct fm_port_fqs port_fqs;
+ struct task_struct *kth;
+ static u8 macless_idx;
+
+ dev = &_of_dev->dev;
+
+ dpa_node = dev->of_node;
+
+ if (!of_device_is_available(dpa_node))
+ return -ENODEV;
+
+ /* Get the buffer pools assigned to this interface */
+ dpa_bp = dpa_bp_probe(_of_dev, &count);
+ if (IS_ERR(dpa_bp))
+ return PTR_ERR(dpa_bp);
+
+ dpa_bp->seed_cb = dpa_bp_shared_port_seed;
+
+ /* Allocate this early, so we can store relevant information in
+ * the private area (needed by 1588 code in dpa_mac_probe)
+ */
+ net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TX_QUEUES);
+ if (!net_dev) {
+ dev_err(dev, "alloc_etherdev_mq() failed\n");
+ return -ENOMEM;
+ }
+
+ /* Do this here, so we can be verbose early */
+ SET_NETDEV_DEV(net_dev, dev);
+ dev_set_drvdata(dev, net_dev);
+
+ priv = netdev_priv(net_dev);
+ priv->net_dev = net_dev;
+ sprintf(priv->if_type, "macless%d", macless_idx++);
+
+ priv->msg_enable = netif_msg_init(debug, -1);
+
+ INIT_LIST_HEAD(&priv->dpa_fq_list);
+
+ memset(&port_fqs, 0, sizeof(port_fqs));
+
+ err = dpa_fq_probe_macless(dev, &priv->dpa_fq_list, RX);
+ if (!err)
+ err = dpa_fq_probe_macless(dev, &priv->dpa_fq_list,
+ TX);
+ if (err < 0)
+ goto fq_probe_failed;
+
+ /* bp init */
+
+ err = dpa_bp_create(net_dev, dpa_bp, count);
+
+ if (err < 0)
+ goto bp_create_failed;
+
+ priv->mac_dev = NULL;
+
+ priv->channel = dpa_get_channel(dev, dpa_node);
+
+ if (priv->channel < 0) {
+ err = priv->channel;
+ goto get_channel_failed;
+ }
+
+ /* Start a thread that will walk the cpus with affine portals
+ * and add this pool channel to each's dequeue mask.
+ */
+ kth = kthread_run(dpaa_eth_add_channel,
+ (void *)(unsigned long)priv->channel,
+ "dpaa_%p:%d", net_dev, priv->channel);
+ if (!kth) {
+ err = -ENOMEM;
+ goto add_channel_failed;
+ }
+
+ dpa_fq_setup(priv, &shared_fq_cbs, NULL);
+
+ /* Add the FQs to the interface, and make them active */
+ list_for_each_entry_safe(dpa_fq, tmp, &priv->dpa_fq_list, list) {
+ /* For MAC-less devices we only get here for RX frame queues
+ * initialization, which are the TX queues of the other
+ * partition.
+ * It is safe to rely on one partition to set the FQ taildrop
+ * threshold for the TX queues of the other partition
+ * because the ERN notifications will be received by the
+ * partition doing qman_enqueue.
+ */
+ err = dpa_fq_init(dpa_fq, true);
+ if (err < 0)
+ goto fq_alloc_failed;
+ }
+
+ priv->tx_headroom = DPA_DEFAULT_TX_HEADROOM;
+
+ priv->percpu_priv = alloc_percpu(*priv->percpu_priv);
+
+ if (priv->percpu_priv == NULL) {
+ dev_err(dev, "alloc_percpu() failed\n");
+ err = -ENOMEM;
+ goto alloc_percpu_failed;
+ }
+ for_each_online_cpu(i) {
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
+ memset(percpu_priv, 0, sizeof(*percpu_priv));
+ }
+
+ err = dpa_macless_netdev_init(dpa_node, net_dev);
+ if (err < 0)
+ goto netdev_init_failed;
+
+ dpaa_eth_sysfs_init(&net_dev->dev);
+
+ printk(KERN_INFO "fsl_dpa_macless: Probed %s interface as %s\n",
+ priv->if_type, net_dev->name);
+
+ return 0;
+
+netdev_init_failed:
+ if (net_dev)
+ free_percpu(priv->percpu_priv);
+alloc_percpu_failed:
+fq_alloc_failed:
+ if (net_dev) {
+ dpa_fq_free(dev, &priv->dpa_fq_list);
+ qman_release_cgrid(priv->cgr_data.cgr.cgrid);
+ qman_delete_cgr(&priv->cgr_data.cgr);
+ }
+add_channel_failed:
+get_channel_failed:
+ if (net_dev)
+ dpa_bp_free(priv, priv->dpa_bp);
+bp_create_failed:
+fq_probe_failed:
+ dev_set_drvdata(dev, NULL);
+ if (net_dev)
+ free_netdev(net_dev);
+
+ return err;
+}
+
+static int __init __cold dpa_macless_load(void)
+{
+ int _errno;
+
+ printk(KERN_INFO KBUILD_MODNAME ": " DPA_DESCRIPTION " (" VERSION ")\n");
+
+ /* Initialize dpaa_eth mirror values */
+ dpa_rx_extra_headroom = fm_get_rx_extra_headroom();
+ dpa_max_frm = fm_get_max_frm();
+
+ _errno = platform_driver_register(&dpa_macless_driver);
+ if (unlikely(_errno < 0)) {
+ pr_err(KBUILD_MODNAME
+ ": %s:%hu:%s(): platform_driver_register() = %d\n",
+ KBUILD_BASENAME".c", __LINE__, __func__, _errno);
+ }
+
+ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
+ KBUILD_BASENAME".c", __func__);
+
+ return _errno;
+}
+module_init(dpa_macless_load);
+
+static void __exit __cold dpa_macless_unload(void)
+{
+ platform_driver_unregister(&dpa_macless_driver);
+
+ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
+ KBUILD_BASENAME".c", __func__);
+}
+module_exit(dpa_macless_unload);
diff --git a/drivers/net/ethernet/freescale/dpa/dpaa_eth_non_sg.c b/drivers/net/ethernet/freescale/dpa/dpaa_eth_non_sg.c
new file mode 100644
index 0000000..428553a
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpa/dpaa_eth_non_sg.c
@@ -0,0 +1,719 @@
+/*
+ * Copyright 2008-2013 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) \
+ KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
+ KBUILD_BASENAME".c", __LINE__, __func__
+
+#include <linux/init.h>
+#include <linux/of_platform.h>
+#include "dpaa_eth.h"
+#include "dpaa_eth_common.h"
+#include "dpaa_1588.h"
+
+#ifndef CONFIG_FSL_DPAA_ETH_SG_SUPPORT
+
+/* Maximum frame size on Tx for which skb copying is preferrable to
+ * creating a S/G frame
+ */
+#define DPA_SKB_COPY_MAX_SIZE 256
+
+/* S/G table requires at least 256 bytes */
+#define sgt_buffer_size(priv) \
+ dpa_get_buffer_size(&priv->buf_layout[TX], 256)
+
+extern struct dpaa_eth_hooks_s dpaa_eth_hooks;
+uint32_t default_buf_size;
+
+/* Allocate 8 socket buffers.
+ * These buffers are counted for a particular CPU.
+ */
+static void dpa_bp_add_8(const struct dpa_bp *dpa_bp, unsigned int cpu)
+{
+ struct bm_buffer bmb[8];
+ struct sk_buff **skbh;
+ dma_addr_t addr;
+ int i;
+ struct sk_buff *skb;
+ int *count_ptr;
+
+ count_ptr = per_cpu_ptr(dpa_bp->percpu_count, cpu);
+
+ for (i = 0; i < 8; i++) {
+ /* The buffers tend to be aligned all to the same cache
+ * index. A standard dequeue operation pulls in 15 packets.
+ * This means that when it stashes, it evicts half of the
+ * packets it's stashing. In order to prevent that, we pad
+ * by a variable number of cache lines, to reduce collisions.
+ * We always pad by at least 1 cache line, because we want
+ * a little extra room at the beginning for IPSec and to
+ * accommodate NET_IP_ALIGN.
+ */
+ int pad = (i + 1) * L1_CACHE_BYTES;
+
+ skb = dev_alloc_skb(dpa_bp->size + pad);
+ if (unlikely(!skb)) {
+ pr_err("dev_alloc_skb() failed\n");
+ bm_buffer_set64(&bmb[i], 0);
+ break;
+ }
+
+ skbh = (struct sk_buff **)(skb->head + pad);
+ *skbh = skb;
+
+ /* Here we need to map only for device write (DMA_FROM_DEVICE),
+ * but on Tx recycling we may also get buffers in the pool that
+ * are mapped bidirectionally.
+ * Use DMA_BIDIRECTIONAL here as well to avoid any
+ * inconsistencies when unmapping.
+ */
+ addr = dma_map_single(dpa_bp->dev, skb->head + pad,
+ dpa_bp->size, DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
+ dev_err(dpa_bp->dev, "DMA mapping failed");
+ break;
+ }
+
+ bm_buffer_set64(&bmb[i], addr);
+ }
+
+ /* Avoid releasing a completely null buffer; bman_release() requires
+ * at least one buf.
+ */
+ if (likely(i)) {
+ /* Release the buffers. In case bman is busy, keep trying
+ * until successful. bman_release() is guaranteed to succeed
+ * in a reasonable amount of time
+ */
+ while (bman_release(dpa_bp->pool, bmb, i, 0))
+ cpu_relax();
+
+ *count_ptr += i;
+ }
+}
+
+void dpa_bp_default_buf_size_update(uint32_t size)
+{
+ if (size > default_buf_size)
+ default_buf_size = size;
+}
+
+uint32_t dpa_bp_default_buf_size_get(void)
+{
+ return default_buf_size;
+}
+
+void dpa_bp_priv_seed(struct dpa_bp *dpa_bp)
+{
+ int i;
+ dpa_bp->size = default_buf_size;
+
+ /* Give each cpu an allotment of "count" buffers */
+ for_each_online_cpu(i) {
+ int j;
+
+ for (j = 0; j < dpa_bp->target_count; j += 8)
+ dpa_bp_add_8(dpa_bp, i);
+ }
+}
+
+void dpa_bp_priv_non_sg_seed(struct dpa_bp *dpa_bp)
+{
+ static bool default_pool_seeded;
+
+ if (default_pool_seeded)
+ return;
+
+ default_pool_seeded = true;
+
+ dpa_bp_priv_seed(dpa_bp);
+}
+
+/* Add buffers/(skbuffs) for Rx processing whenever bpool count falls below
+ * REFILL_THRESHOLD.
+ */
+int dpaa_eth_refill_bpools(struct dpa_percpu_priv_s *percpu_priv)
+{
+ const struct dpa_bp *dpa_bp = percpu_priv->dpa_bp;
+ int *countptr = __this_cpu_ptr(dpa_bp->percpu_count);
+ int count = *countptr;
+ /* this function is called in softirq context;
+ * no need to protect smp_processor_id() on RT kernel
+ */
+ unsigned int cpu = smp_processor_id();
+
+ if (unlikely(count < CONFIG_FSL_DPAA_ETH_REFILL_THRESHOLD)) {
+ int i;
+
+ for (i = count; i < CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT; i += 8)
+ dpa_bp_add_8(dpa_bp, cpu);
+ }
+
+ return 0;
+}
+
+/* Cleanup function for outgoing frame descriptors that were built on Tx path,
+ * either contiguous frames or scatter/gather ones with a single data buffer.
+ * Skb freeing is not handled here.
+ *
+ * This function may be called on error paths in the Tx function, so guard
+ * against cases when not all fd relevant fields were filled in.
+ *
+ * Return the skb backpointer, since for S/G frames the buffer containing it
+ * gets freed here.
+ */
+struct sk_buff *_dpa_cleanup_tx_fd(const struct dpa_priv_s *priv,
+ const struct qm_fd *fd)
+{
+ dma_addr_t addr = qm_fd_addr(fd);
+ dma_addr_t sg_addr;
+ void *vaddr;
+ struct dpa_bp *bp = priv->dpa_bp;
+ struct sk_buff **skbh;
+ struct sk_buff *skb = NULL;
+
+ BUG_ON(!fd);
+
+ if (unlikely(!addr))
+ return skb;
+ vaddr = phys_to_virt(addr);
+ skbh = (struct sk_buff **)vaddr;
+
+ if (fd->format == qm_fd_contig) {
+ /* For contiguous frames, just unmap data buffer;
+ * mapping direction depends on whether the frame was
+ * meant to be recycled or not
+ */
+ if (fd->cmd & FM_FD_CMD_FCO)
+ dma_unmap_single(bp->dev, addr, bp->size,
+ DMA_BIDIRECTIONAL);
+ else
+ dma_unmap_single(bp->dev, addr, bp->size,
+ DMA_TO_DEVICE);
+ /* Retrieve the skb backpointer */
+ skb = *skbh;
+ } else {
+ /* For s/g, we need to unmap both the SGT buffer and the
+ * data buffer, and also free the SGT buffer
+ */
+ struct qm_sg_entry *sg_entry;
+
+ /* Unmap first buffer (contains S/G table) */
+ dma_unmap_single(bp->dev, addr, sgt_buffer_size(priv),
+ DMA_TO_DEVICE);
+
+ /* Unmap data buffer */
+ sg_entry = (struct qm_sg_entry *)(vaddr + fd->offset);
+ sg_addr = qm_sg_addr(sg_entry);
+ if (likely(sg_addr))
+ dma_unmap_single(bp->dev, sg_addr, bp->size,
+ DMA_TO_DEVICE);
+ /* Retrieve the skb backpointer */
+ skb = *skbh;
+
+ }
+/* on some error paths this might not be necessary: */
+#ifdef CONFIG_FSL_DPAA_1588
+ if (priv->tsu && priv->tsu->valid && priv->tsu->hwts_tx_en_ioctl)
+ dpa_ptp_store_txstamp(priv, skb, (void *)skbh);
+#endif
+#ifdef CONFIG_FSL_DPAA_TS
+ if (unlikely(priv->ts_tx_en &&
+ skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+ struct skb_shared_hwtstamps shhwtstamps;
+
+ if (!dpa_get_ts(priv, TX, &shhwtstamps, (void *)skbh))
+ skb_tstamp_tx(skb, &shhwtstamps);
+ }
+#endif /* CONFIG_FSL_DPAA_TS */
+
+ /* Free first buffer (which was allocated on Tx) containing the
+ * skb backpointer and hardware timestamp information
+ */
+ if (fd->format != qm_fd_contig)
+ kfree(vaddr);
+
+ return skb;
+}
+
+/* When we put the buffer into the pool, we purposefully added
+ * some padding to the address so that the buffers wouldn't all
+ * be page-aligned. But the skb has been reset to a default state,
+ * so it is pointing up to DPAA_ETH_MAX_PAD - L1_CACHE_BYTES bytes
+ * before the actual data. We subtract skb->head from the fd addr,
+ * and then mask off the translated part to get the actual distance.
+ */
+static int dpa_process_one(struct dpa_percpu_priv_s *percpu_priv,
+ struct sk_buff *skb, struct dpa_bp *bp, const struct qm_fd *fd)
+{
+ dma_addr_t fd_addr = qm_fd_addr(fd);
+ unsigned long skb_addr = virt_to_phys(skb->head);
+ u32 pad = fd_addr - skb_addr;
+ unsigned int data_start;
+ int *countptr = __this_cpu_ptr(bp->percpu_count);
+
+ (*countptr)--;
+
+ /* The skb is currently pointed at head + headroom. The packet
+ * starts at skb->head + pad + fd offset.
+ */
+ data_start = pad + dpa_fd_offset(fd) - skb_headroom(skb);
+ skb_put(skb, dpa_fd_length(fd) + data_start);
+ skb_pull(skb, data_start);
+
+ return 0;
+}
+
+void __hot _dpa_rx(struct net_device *net_dev,
+ const struct dpa_priv_s *priv,
+ struct dpa_percpu_priv_s *percpu_priv,
+ const struct qm_fd *fd,
+ u32 fqid)
+{
+ struct dpa_bp *dpa_bp;
+ struct sk_buff *skb;
+ struct sk_buff **skbh;
+ dma_addr_t addr = qm_fd_addr(fd);
+ u32 fd_status = fd->status;
+ unsigned int skb_len;
+ t_FmPrsResult *parse_result;
+ int use_gro = net_dev->features & NETIF_F_GRO;
+
+ skbh = (struct sk_buff **)phys_to_virt(addr);
+
+ if (unlikely(fd_status & FM_FD_STAT_ERRORS) != 0) {
+ if (netif_msg_hw(priv) && net_ratelimit())
+ netdev_warn(net_dev, "FD status = 0x%08x\n",
+ fd->status & FM_FD_STAT_ERRORS);
+
+ percpu_priv->stats.rx_errors++;
+
+ goto _return_dpa_fd_release;
+ }
+
+ if (unlikely(fd->format != qm_fd_contig)) {
+ percpu_priv->stats.rx_dropped++;
+ if (netif_msg_rx_status(priv) && net_ratelimit())
+ netdev_warn(net_dev, "Dropping a SG frame\n");
+ goto _return_dpa_fd_release;
+ }
+
+ dpa_bp = dpa_bpid2pool(fd->bpid);
+
+ dma_unmap_single(dpa_bp->dev, addr, dpa_bp->size, DMA_BIDIRECTIONAL);
+ /* Execute the Rx processing hook, if it exists. */
+ if (dpaa_eth_hooks.rx_default && dpaa_eth_hooks.rx_default((void *)fd,
+ net_dev, fqid) == DPAA_ETH_STOLEN)
+ /* won't count the rx bytes in */
+ goto skb_stolen;
+
+ skb = *skbh;
+ prefetch(skb);
+
+ /* Fill the SKB */
+ dpa_process_one(percpu_priv, skb, dpa_bp, fd);
+
+ prefetch(skb_shinfo(skb));
+
+#ifdef CONFIG_FSL_DPAA_1588
+ if (priv->tsu && priv->tsu->valid && priv->tsu->hwts_rx_en_ioctl)
+ dpa_ptp_store_rxstamp(priv, skb, (void *)skbh);
+#endif
+
+ skb->protocol = eth_type_trans(skb, net_dev);
+
+ if (unlikely(dpa_check_rx_mtu(skb, net_dev->mtu))) {
+ percpu_priv->stats.rx_dropped++;
+ goto drop_large_frame;
+ }
+
+
+ skb_len = skb->len;
+
+ /* Validate the skb csum and figure out whether GRO is appropriate */
+ parse_result = (t_FmPrsResult *)((u8 *)skbh + DPA_RX_PRIV_DATA_SIZE);
+ _dpa_process_parse_results(parse_result, fd, skb, &use_gro);
+
+#ifdef CONFIG_FSL_DPAA_TS
+ if (priv->ts_rx_en)
+ dpa_get_ts(priv, RX, skb_hwtstamps(skb), (void *)skbh);
+#endif /* CONFIG_FSL_DPAA_TS */
+
+ if (use_gro) {
+ gro_result_t gro_result;
+
+ gro_result = napi_gro_receive(&percpu_priv->napi, skb);
+ if (unlikely(gro_result == GRO_DROP)) {
+ percpu_priv->stats.rx_dropped++;
+ goto packet_dropped;
+ }
+ } else if (unlikely(netif_receive_skb(skb) == NET_RX_DROP)) {
+ percpu_priv->stats.rx_dropped++;
+ goto packet_dropped;
+ }
+
+ percpu_priv->stats.rx_packets++;
+ percpu_priv->stats.rx_bytes += skb_len;
+
+packet_dropped:
+skb_stolen:
+ return;
+
+drop_large_frame:
+ dev_kfree_skb(skb);
+ return;
+
+_return_dpa_fd_release:
+ dpa_fd_release(net_dev, fd);
+}
+
+static int skb_to_sg_fd(struct dpa_priv_s *priv,
+ struct sk_buff *skb, struct qm_fd *fd)
+{
+ struct dpa_bp *dpa_bp = priv->dpa_bp;
+ void *vaddr;
+ dma_addr_t paddr;
+ struct sk_buff **skbh;
+ struct qm_sg_entry *sg_entry;
+ struct net_device *net_dev = priv->net_dev;
+ int err;
+
+ /* Allocate the first buffer in the FD (used for storing S/G table) */
+ vaddr = kmalloc(sgt_buffer_size(priv), GFP_ATOMIC);
+ if (unlikely(vaddr == NULL)) {
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ netdev_err(net_dev, "Memory allocation failed\n");
+ return -ENOMEM;
+ }
+ /* Store skb backpointer at the beginning of the buffer */
+ skbh = (struct sk_buff **)vaddr;
+ *skbh = skb;
+
+ /* Fill in FD */
+ fd->format = qm_fd_sg;
+ fd->offset = priv->tx_headroom;
+ fd->length20 = skb->len;
+
+ /* Enable hardware checksum computation */
+ err = dpa_enable_tx_csum(priv, skb, fd,
+ (char *)vaddr + DPA_TX_PRIV_DATA_SIZE);
+ if (unlikely(err < 0)) {
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ netdev_err(net_dev, "HW csum error: %d\n", err);
+ kfree(vaddr);
+ return err;
+ }
+
+ /* Map the buffer and store its address in the FD */
+ paddr = dma_map_single(dpa_bp->dev, vaddr, sgt_buffer_size(priv),
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dpa_bp->dev, paddr))) {
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ netdev_err(net_dev, "DMA mapping failed\n");
+ kfree(vaddr);
+ return -EINVAL;
+ }
+
+ fd->addr_hi = upper_32_bits(paddr);
+ fd->addr_lo = lower_32_bits(paddr);
+
+ /* Fill in S/G entry */
+ sg_entry = (struct qm_sg_entry *)(vaddr + fd->offset);
+
+ sg_entry->extension = 0;
+ sg_entry->final = 1;
+ sg_entry->length = skb->len;
+ /* Put the same offset in the data buffer as in the SGT (first) buffer.
+ * This is the format for S/G frames generated by FMan; the manual is
+ * not clear if same is required of Tx S/G frames, but since we know
+ * for sure we have at least tx_headroom bytes of skb headroom,
+ * lets not take any chances.
+ */
+ sg_entry->offset = priv->tx_headroom;
+
+ paddr = dma_map_single(dpa_bp->dev, skb->data - sg_entry->offset,
+ dpa_bp->size, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dpa_bp->dev, paddr))) {
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ netdev_err(net_dev, "DMA mapping failed\n");
+ return -EINVAL;
+ }
+ sg_entry->addr_hi = upper_32_bits(paddr);
+ sg_entry->addr_lo = lower_32_bits(paddr);
+
+#ifdef CONFIG_FSL_DPAA_TS
+ if (unlikely(priv->ts_tx_en &&
+ skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ }
+#endif /* CONFIG_FSL_DPAA_TS */
+
+ return 0;
+}
+
+static int skb_to_contig_fd(struct dpa_priv_s *priv,
+ struct dpa_percpu_priv_s *percpu_priv,
+ struct sk_buff *skb, struct qm_fd *fd)
+{
+ struct sk_buff **skbh;
+ dma_addr_t addr;
+ struct dpa_bp *dpa_bp = priv->dpa_bp;
+ struct net_device *net_dev = priv->net_dev;
+ enum dma_data_direction dma_dir = DMA_TO_DEVICE;
+ bool can_recycle = false;
+ int offset, extra_offset;
+ int err;
+ int *countptr = __this_cpu_ptr(dpa_bp->percpu_count);
+
+ /* We are guaranteed that we have at least tx_headroom bytes.
+ * Buffers we allocated are padded to improve cache usage. In order
+ * to increase buffer re-use, we aim to keep any such buffers the
+ * same. This means the address passed to the FM should be
+ * tx_headroom bytes before the data for forwarded frames.
+ *
+ * However, offer some flexibility in fd layout, to allow originating
+ * (termination) buffers to be also recycled when possible.
+ *
+ * First, see if the conditions needed to recycle the skb are met:
+ * - skb not cloned, not shared
+ * - buffer size is large enough to accomodate a maximum size Rx frame
+ * - buffer size does not exceed the maximum size allowed in the pool
+ * (to avoid unbounded increase of buffer size in certain forwarding
+ * conditions)
+ * - buffer address is 16 byte aligned, as per DPAARM
+ * - there's enough room in the buffer pool
+ */
+ if (likely(skb_is_recycleable(skb, dpa_bp->size) &&
+ (skb_end_pointer(skb) - skb->head <=
+ DPA_RECYCLE_MAX_SIZE) &&
+ (*countptr < dpa_bp->target_count))) {
+ /* Compute the minimum necessary fd offset */
+ offset = dpa_bp->size - skb->len - skb_tailroom(skb);
+
+ /* And make sure the offset is no lower than the offset
+ * required by FMan
+ */
+ offset = max_t(int, offset, priv->tx_headroom);
+
+ /* We also need to align the buffer address to 16, such that
+ * Fman will be able to reuse it on Rx.
+ * Since the buffer going to FMan starts at (skb->data - offset)
+ * this is what we'll try to align. We already know that
+ * headroom is at least tx_headroom bytes long, but with
+ * the extra offset needed for alignment we may go beyond
+ * the beginning of the buffer.
+ *
+ * Also need to check that we don't go beyond the maximum
+ * offset that can be set for a contiguous FD.
+ */
+ extra_offset = (unsigned long)(skb->data - offset) & 0xF;
+ if (likely((offset + extra_offset) <= skb_headroom(skb) &&
+ (offset + extra_offset) <= DPA_MAX_FD_OFFSET)) {
+ /* We're good to go for recycling*/
+ offset += extra_offset;
+ can_recycle = true;
+ }
+ }
+
+#ifdef CONFIG_FSL_DPAA_TS
+ if (unlikely(priv->ts_tx_en &&
+ skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+ /* we need the fd back to get the timestamp */
+ can_recycle = false;
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ }
+#endif /* CONFIG_FSL_DPAA_TS */
+
+ if (likely(can_recycle)) {
+ /* Buffer will get recycled, setup fd accordingly */
+ fd->cmd |= FM_FD_CMD_FCO;
+ fd->bpid = dpa_bp->bpid;
+ /* Since the buffer will get back to the Bman pool
+ * and be re-used on Rx, map it for both read and write
+ */
+ dma_dir = DMA_BIDIRECTIONAL;
+ } else {
+ /* No recycling here, so we don't care about address alignment.
+ * Just use the smallest offset required by FMan
+ */
+ offset = priv->tx_headroom;
+ }
+
+ skbh = (struct sk_buff **)(skb->data - offset);
+ *skbh = skb;
+
+
+ /* Enable L3/L4 hardware checksum computation.
+ *
+ * We must do this before dma_map_single(), because we may
+ * need to write into the skb.
+ */
+ err = dpa_enable_tx_csum(priv, skb, fd,
+ ((char *)skbh) + DPA_TX_PRIV_DATA_SIZE);
+ if (unlikely(err < 0)) {
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ netdev_err(net_dev, "HW csum error: %d\n", err);
+ return err;
+ }
+
+ fd->format = qm_fd_contig;
+ fd->length20 = skb->len;
+ fd->offset = offset;
+
+ addr = dma_map_single(dpa_bp->dev, skbh, dpa_bp->size, dma_dir);
+ if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ netdev_err(net_dev, "dma_map_single() failed\n");
+ return -EINVAL;
+ }
+
+ fd->addr_hi = upper_32_bits(addr);
+ fd->addr_lo = lower_32_bits(addr);
+
+ return 0;
+}
+
+int __hot dpa_tx(struct sk_buff *skb, struct net_device *net_dev)
+{
+ struct dpa_priv_s *priv;
+ struct qm_fd fd;
+ struct dpa_percpu_priv_s *percpu_priv;
+ struct rtnl_link_stats64 *percpu_stats;
+ int queue_mapping;
+ int err;
+ int *countptr;
+
+ /* If there is a Tx hook, run it. */
+ if (dpaa_eth_hooks.tx &&
+ dpaa_eth_hooks.tx(skb, net_dev) == DPAA_ETH_STOLEN)
+ /* won't update any Tx stats */
+ goto done;
+
+ priv = netdev_priv(net_dev);
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, smp_processor_id());
+ percpu_stats = &percpu_priv->stats;
+ countptr = __this_cpu_ptr(priv->dpa_bp->percpu_count);
+
+ clear_fd(&fd);
+ queue_mapping = dpa_get_queue_mapping(skb);
+
+ if (unlikely(skb_headroom(skb) < priv->tx_headroom)) {
+ struct sk_buff *skb_new;
+
+ skb_new = skb_realloc_headroom(skb, priv->tx_headroom);
+ if (unlikely(!skb_new)) {
+ percpu_stats->tx_errors++;
+ kfree_skb(skb);
+ goto done;
+ }
+ kfree_skb(skb);
+ skb = skb_new;
+ }
+
+#ifdef CONFIG_FSL_DPAA_1588
+ if (priv->tsu && priv->tsu->valid && priv->tsu->hwts_tx_en_ioctl)
+ fd.cmd |= FM_FD_CMD_UPD;
+#endif
+#ifdef CONFIG_FSL_DPAA_TS
+ if (unlikely(priv->ts_tx_en &&
+ skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
+ fd.cmd |= FM_FD_CMD_UPD;
+#endif /* CONFIG_FSL_DPAA_TS */
+
+ /* We have two paths here:
+ *
+ * 1.If the skb is cloned, create a S/G frame to avoid unsharing it.
+ * The S/G table will contain only one entry, pointing to our skb
+ * data buffer.
+ * The private data area containing the skb backpointer will reside
+ * inside the first buffer, such that it won't risk being overwritten
+ * in case a second skb pointing to the same data buffer is being
+ * processed concurently.
+ * No recycling is possible in this case, as the data buffer is shared.
+ *
+ * 2.If skb is not cloned, then the private area inside it can be
+ * safely used to store the skb backpointer. Simply create a contiguous
+ * fd in this case.
+ * Recycling can happen if the right conditions are met.
+ */
+ if (skb_cloned(skb) && (skb->len > DPA_SKB_COPY_MAX_SIZE))
+ err = skb_to_sg_fd(priv, skb, &fd);
+ else {
+ /* If cloned skb, but length is below DPA_SKB_COPY_MAX_SIZE,
+ * it's more efficient to unshare it and then use the new skb
+ */
+ skb = skb_unshare(skb, GFP_ATOMIC);
+ if (unlikely(!skb)) {
+ percpu_stats->tx_errors++;
+ goto done;
+ }
+ err = skb_to_contig_fd(priv, percpu_priv, skb, &fd);
+ }
+ if (unlikely(err < 0)) {
+ percpu_stats->tx_errors++;
+ goto fd_create_failed;
+ }
+
+ if (fd.cmd & FM_FD_CMD_FCO) {
+ /* This skb is recycleable, and the fd generated from it
+ * has been filled in accordingly.
+ * NOTE: The recycling mechanism is fragile and dependant on
+ * upstream changes. It will be maintained for now, but plans
+ * are to remove it altoghether from the driver.
+ */
+ skb_recycle(skb);
+ skb = NULL;
+ (*countptr)++;
+ percpu_priv->tx_returned++;
+ }
+
+ if (unlikely(dpa_xmit(priv, percpu_stats, queue_mapping,
+ &fd) < 0))
+ goto xmit_failed;
+
+ net_dev->trans_start = jiffies;
+ goto done;
+
+xmit_failed:
+ if (fd.cmd & FM_FD_CMD_FCO) {
+ (*countptr)--;
+ percpu_priv->tx_returned--;
+ }
+fd_create_failed:
+ _dpa_cleanup_tx_fd(priv, &fd);
+ dev_kfree_skb(skb);
+
+done:
+ return NETDEV_TX_OK;
+}
+
+#endif /* CONFIG_FSL_DPAA_ETH_SG_SUPPORT */
diff --git a/drivers/net/ethernet/freescale/dpa/dpaa_eth_proxy.c b/drivers/net/ethernet/freescale/dpa/dpaa_eth_proxy.c
new file mode 100644
index 0000000..22a941a
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpa/dpaa_eth_proxy.c
@@ -0,0 +1,188 @@
+/*
+ * Copyright 2008-2013 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) \
+ KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
+ KBUILD_BASENAME".c", __LINE__, __func__
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include "dpaa_eth.h"
+#include "dpaa_eth_common.h"
+#include "lnxwrp_fsl_fman.h" /* fm_get_rx_extra_headroom(), fm_get_max_frm() */
+
+#define DPA_DESCRIPTION "FSL DPAA Proxy initialization driver"
+
+MODULE_LICENSE("Dual BSD/GPL");
+
+MODULE_DESCRIPTION(DPA_DESCRIPTION);
+
+static uint8_t debug = -1;
+module_param(debug, byte, S_IRUGO);
+MODULE_PARM_DESC(debug, "Module/Driver verbosity level");
+
+/* forward declarations */
+static int dpaa_eth_proxy_probe(struct platform_device *_of_dev);
+static int __cold dpa_eth_proxy_remove(struct platform_device *of_dev);
+
+static const struct of_device_id dpa_proxy_match[] = {
+ {
+ .compatible = "fsl,dpa-ethernet-init"
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dpa_proxy_match);
+
+static struct platform_driver dpa_proxy_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME"-proxy",
+ .of_match_table = dpa_proxy_match,
+ .owner = THIS_MODULE,
+ },
+ .probe = dpaa_eth_proxy_probe,
+ .remove = dpa_eth_proxy_remove
+};
+
+static int dpaa_eth_proxy_probe(struct platform_device *_of_dev)
+{
+ int err = 0, i;
+ struct device *dev;
+ struct device_node *dpa_node;
+ struct dpa_bp *dpa_bp;
+ struct list_head proxy_fq_list;
+ size_t count;
+ struct fm_port_fqs port_fqs;
+ struct dpa_buffer_layout_s *buf_layout = NULL;
+ struct mac_device *mac_dev;
+
+ dev = &_of_dev->dev;
+
+ dpa_node = dev->of_node;
+
+ if (!of_device_is_available(dpa_node))
+ return -ENODEV;
+
+ /* Get the buffer pools assigned to this interface */
+ dpa_bp = dpa_bp_probe(_of_dev, &count);
+ if (IS_ERR(dpa_bp))
+ return PTR_ERR(dpa_bp);
+
+ mac_dev = dpa_mac_probe(_of_dev);
+ if (IS_ERR(mac_dev))
+ return PTR_ERR(mac_dev);
+
+ /* We have physical ports, so we need to establish
+ * the buffer layout.
+ */
+ buf_layout = devm_kzalloc(dev, 2 * sizeof(*buf_layout),
+ GFP_KERNEL);
+ if (!buf_layout) {
+ dev_err(dev, "devm_kzalloc() failed\n");
+ return -ENOMEM;
+ }
+ dpa_set_buffers_layout(mac_dev, buf_layout);
+
+ INIT_LIST_HEAD(&proxy_fq_list);
+
+ memset(&port_fqs, 0, sizeof(port_fqs));
+
+ err = dpa_fq_probe_mac(dev, &proxy_fq_list, &port_fqs, true, RX);
+ if (!err)
+ err = dpa_fq_probe_mac(dev, &proxy_fq_list, &port_fqs, true,
+ TX);
+ if (err < 0) {
+ devm_kfree(dev, buf_layout);
+ return err;
+ }
+
+ /* Proxy initializer - Just configures the MAC on behalf of
+ * another partition.
+ */
+ dpaa_eth_init_ports(mac_dev, dpa_bp, count, &port_fqs,
+ buf_layout, dev);
+
+ /* Proxy interfaces need to be started, and the allocated
+ * memory freed
+ */
+ devm_kfree(dev, buf_layout);
+ devm_kfree(dev, dpa_bp);
+
+ /* Free FQ structures */
+ devm_kfree(dev, port_fqs.rx_defq);
+ devm_kfree(dev, port_fqs.rx_errq);
+ devm_kfree(dev, port_fqs.tx_defq);
+ devm_kfree(dev, port_fqs.tx_errq);
+
+ for_each_port_device(i, mac_dev->port_dev)
+ fm_port_enable(mac_dev->port_dev[i]);
+
+ return 0; /* Proxy interface initialization ended */
+}
+
+static int __cold dpa_eth_proxy_remove(struct platform_device *of_dev)
+{
+ return 0;
+}
+
+static int __init __cold dpa_proxy_load(void)
+{
+ int _errno;
+
+ printk(KERN_INFO KBUILD_MODNAME ": " DPA_DESCRIPTION " (" VERSION ")\n");
+
+ /* Initialize dpaa_eth mirror values */
+ dpa_rx_extra_headroom = fm_get_rx_extra_headroom();
+ dpa_max_frm = fm_get_max_frm();
+
+ _errno = platform_driver_register(&dpa_proxy_driver);
+ if (unlikely(_errno < 0)) {
+ pr_err(KBUILD_MODNAME
+ ": %s:%hu:%s(): platform_driver_register() = %d\n",
+ KBUILD_BASENAME".c", __LINE__, __func__, _errno);
+ }
+
+ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
+ KBUILD_BASENAME".c", __func__);
+
+ return _errno;
+}
+module_init(dpa_proxy_load);
+
+static void __exit __cold dpa_proxy_unload(void)
+{
+ platform_driver_unregister(&dpa_proxy_driver);
+
+ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
+ KBUILD_BASENAME".c", __func__);
+}
+module_exit(dpa_proxy_unload);
diff --git a/drivers/net/ethernet/freescale/dpa/dpaa_eth_sg.c b/drivers/net/ethernet/freescale/dpa/dpaa_eth_sg.c
index f93c346..0b5d269 100644
--- a/drivers/net/ethernet/freescale/dpa/dpaa_eth_sg.c
+++ b/drivers/net/ethernet/freescale/dpa/dpaa_eth_sg.c
@@ -40,27 +40,23 @@
#include <linux/fsl_bman.h>
#include "dpaa_eth.h"
+#include "dpaa_eth_common.h"
#include "dpaa_1588.h"
#ifdef CONFIG_FSL_DPAA_ETH_SG_SUPPORT
#define DPA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */
-/* DMA map and add a page into the bpool */
-static void dpa_bp_add_page(struct dpa_bp *dpa_bp, unsigned long vaddr)
+/* DMA map and add a page frag back into the bpool.
+ * @vaddr fragment must have been allocated with netdev_alloc_frag(),
+ * specifically for fitting into @dpa_bp.
+ */
+static void dpa_bp_recycle_frag(struct dpa_bp *dpa_bp, unsigned long vaddr)
{
struct bm_buffer bmb;
int *count_ptr;
dma_addr_t addr;
- int offset;
count_ptr = __this_cpu_ptr(dpa_bp->percpu_count);
-
- /* Make sure we don't map beyond end of page */
- offset = vaddr & (PAGE_SIZE - 1);
- if (unlikely(dpa_bp->size + offset > PAGE_SIZE)) {
- free_page(vaddr);
- return;
- }
addr = dma_map_single(dpa_bp->dev, (void *)vaddr, dpa_bp->size,
DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
@@ -76,24 +72,25 @@ static void dpa_bp_add_page(struct dpa_bp *dpa_bp, unsigned long vaddr)
(*count_ptr)++;
}
-int _dpa_bp_add_8_pages(const struct dpa_bp *dpa_bp)
+int _dpa_bp_add_8_bufs(const struct dpa_bp *dpa_bp)
{
struct bm_buffer bmb[8];
- unsigned long new_page;
+ void *new_buf;
dma_addr_t addr;
int i;
struct device *dev = dpa_bp->dev;
for (i = 0; i < 8; i++) {
- new_page = __get_free_page(GFP_ATOMIC);
- if (likely(new_page)) {
- addr = dma_map_single(dev, (void *)new_page,
+ new_buf = netdev_alloc_frag(DPA_BP_RAW_SIZE);
+ if (likely(new_buf)) {
+ new_buf = PTR_ALIGN(new_buf, SMP_CACHE_BYTES);
+ addr = dma_map_single(dev, new_buf,
dpa_bp->size, DMA_BIDIRECTIONAL);
if (likely(!dma_mapping_error(dev, addr))) {
bm_buffer_set64(&bmb[i], addr);
continue;
} else
- free_page(new_page);
+ put_page(virt_to_head_page(new_buf));
}
/* Something went wrong */
@@ -112,7 +109,7 @@ release_bufs:
return i;
bail_out:
- net_err_ratelimited("dpa_bp_add_8_pages() failed\n");
+ net_err_ratelimited("dpa_bp_add_8_bufs() failed\n");
WARN_ONCE(1, "Memory allocation failure on Rx\n");
bm_buffer_set64(&bmb[i], 0);
@@ -127,21 +124,19 @@ bail_out:
}
/*
- * Cold path wrapper over _dpa_bp_add_8_pages().
+ * Cold path wrapper over _dpa_bp_add_8_bufs().
*/
-void dpa_bp_add_8_pages(const struct dpa_bp *dpa_bp, int cpu)
+void dpa_bp_add_8_bufs(const struct dpa_bp *dpa_bp, int cpu)
{
int *count_ptr = per_cpu_ptr(dpa_bp->percpu_count, cpu);
- *count_ptr += _dpa_bp_add_8_pages(dpa_bp);
+ *count_ptr += _dpa_bp_add_8_bufs(dpa_bp);
}
-void dpa_make_private_pool(struct dpa_bp *dpa_bp)
+void dpa_bp_priv_seed(struct dpa_bp *dpa_bp)
{
int i;
- dpa_bp->percpu_count = alloc_percpu(*dpa_bp->percpu_count);
-
- /* Give each CPU an allotment of "page_count" buffers */
+ /* Give each CPU an allotment of "config_count" buffers */
for_each_online_cpu(i) {
int j;
@@ -150,12 +145,42 @@ void dpa_make_private_pool(struct dpa_bp *dpa_bp)
* we do it at boot time so it is safe
*/
for (j = 0; j < dpa_bp->config_count; j += 8)
- dpa_bp_add_8_pages(dpa_bp, i);
+ dpa_bp_add_8_bufs(dpa_bp, i);
}
}
/*
- * Cleanup function for outgoing frame descriptors that were built on Tx path,
+ * Add buffers/(pages) for Rx processing whenever bpool count falls below
+ * REFILL_THRESHOLD.
+ */
+int dpaa_eth_refill_bpools(struct dpa_percpu_priv_s *percpu_priv)
+{
+ const struct dpa_bp *dpa_bp = percpu_priv->dpa_bp;
+ int *countptr = __this_cpu_ptr(percpu_priv->dpa_bp->percpu_count);
+ int count = *countptr;
+ int new_bufs;
+
+ /* Add pages to the buffer pool */
+ while (count < CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT) {
+ new_bufs = _dpa_bp_add_8_bufs(dpa_bp);
+ if (unlikely(!new_bufs)) {
+ /* Avoid looping forever if we've temporarily
+ * run out of memory. We'll try again at the next
+ * NAPI cycle.
+ */
+ break;
+ }
+ count += new_bufs;
+ }
+ *countptr = count;
+
+ if (unlikely(*countptr < CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT))
+ return -ENOMEM;
+
+ return 0;
+}
+
+/* Cleanup function for outgoing frame descriptors that were built on Tx path,
* either contiguous frames or scatter/gather ones.
* Skb freeing is not handled here.
*
@@ -214,11 +239,11 @@ struct sk_buff *_dpa_cleanup_tx_fd(const struct dpa_priv_s *priv,
BUG_ON(sgt[i].extension);
dma_unmap_page(dpa_bp->dev, sgt[i].addr,
- dpa_bp->size, dma_dir);
+ sgt[i].length, dma_dir);
}
/*
- * TODO: dpa_bp_add_page() ?
+ * TODO: dpa_bp_recycle_frag() ?
* We could put these in the pool, since we allocated them
* and we know they're not used by anyone else
*/
@@ -249,6 +274,57 @@ struct sk_buff *_dpa_cleanup_tx_fd(const struct dpa_priv_s *priv,
return skb;
}
+#ifndef CONFIG_FSL_DPAA_TS
+static bool dpa_skb_is_recyclable(struct sk_buff *skb)
+{
+ /* No recycling possible if skb has an userspace buffer */
+ if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)
+ return false;
+
+ /* or if it's cloned or shared */
+ if (skb_shared(skb) || skb_cloned(skb) ||
+ skb->fclone != SKB_FCLONE_UNAVAILABLE)
+ return false;
+
+ /* or if it's kmalloc'ed */
+ if (skb->head_frag == 0)
+ return false;
+
+ return true;
+}
+
+static bool dpa_buf_is_recyclable(struct sk_buff *skb,
+ uint32_t min_size,
+ uint16_t min_offset,
+ unsigned char **new_buf_start)
+{
+ unsigned char *new;
+
+ /* In order to recycle a buffer, the following conditions must be met:
+ * - buffer size no less than the buffer pool size
+ * - buffer size no higher than an upper limit (to avoid moving too much
+ * system memory to the buffer pools)
+ * - buffer address aligned to cacheline bytes
+ * - offset of data from start of buffer no lower than a minimum value
+ * - offset of data from start of buffer no higher than a maximum value
+ */
+ new = min(skb_end_pointer(skb) - min_size, skb->data - min_offset);
+
+ /* left align to the nearest cacheline */
+ new = (unsigned char *)((unsigned long)new & ~(SMP_CACHE_BYTES - 1));
+
+ if (likely(new >= skb->head &&
+ new >= (skb->data - DPA_MAX_FD_OFFSET) &&
+ skb_end_pointer(skb) - new <= DPA_RECYCLE_MAX_SIZE)) {
+ *new_buf_start = new;
+ return true;
+ }
+
+ return false;
+}
+#endif /* CONFIG_FSL_DPAA_TS */
+
+
/*
* Build a linear skb around the received buffer.
* We are guaranteed there is enough room at the end of the data buffer to
@@ -258,12 +334,14 @@ static struct sk_buff *__hot contig_fd_to_skb(const struct dpa_priv_s *priv,
const struct qm_fd *fd, int *use_gro)
{
dma_addr_t addr = qm_fd_addr(fd);
+ ssize_t fd_off = dpa_fd_offset(fd);
void *vaddr;
struct dpa_bp *dpa_bp = priv->dpa_bp;
const t_FmPrsResult *parse_results;
struct sk_buff *skb = NULL;
vaddr = phys_to_virt(addr);
+ BUG_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
/* do we need the timestamp for bad frames? */
#ifdef CONFIG_FSL_DPAA_1588
@@ -271,15 +349,21 @@ static struct sk_buff *__hot contig_fd_to_skb(const struct dpa_priv_s *priv,
dpa_ptp_store_rxstamp(priv, skb, vaddr);
#endif
- /* Build the skb and adjust data and tail pointers */
- skb = build_skb(vaddr, dpa_bp->size + DPA_SKB_TAILROOM);
+ /* Build the skb and adjust data and tail pointers, to make sure
+ * forwarded skbs will have enough space on Tx if extra headers
+ * are added.
+ *
+ * Caveat: we must make it so both skb->head and skb->end (hence,
+ * skb_shinfo) be SMP_CACHE_BYTES-aligned. The former is aligned,
+ * thanks to vaddr. We still need to adjust the size accordingly.
+ */
+ skb = build_skb(vaddr, DPA_SKB_SIZE(dpa_bp->size) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
if (unlikely(!skb))
return NULL;
- /* Make sure forwarded skbs will have enough space on Tx,
- * if extra headers are added.
- */
- skb_reserve(skb, priv->tx_headroom + dpa_get_rx_extra_headroom());
+ BUG_ON(fd_off != priv->rx_headroom);
+ skb_reserve(skb, fd_off);
skb_put(skb, dpa_fd_length(fd));
/* Peek at the parse results for csum validation */
@@ -299,17 +383,18 @@ static struct sk_buff *__hot contig_fd_to_skb(const struct dpa_priv_s *priv,
* Build an skb with the data of the first S/G entry in the linear portion and
* the rest of the frame as skb fragments.
*
- * The page holding the S/G Table is recycled here.
+ * The page fragment holding the S/G Table is recycled here.
*/
static struct sk_buff *__hot sg_fd_to_skb(const struct dpa_priv_s *priv,
const struct qm_fd *fd, int *use_gro)
{
const struct qm_sg_entry *sgt;
dma_addr_t addr = qm_fd_addr(fd);
+ ssize_t fd_off = dpa_fd_offset(fd);
dma_addr_t sg_addr;
void *vaddr, *sg_vaddr;
struct dpa_bp *dpa_bp;
- struct page *page;
+ struct page *page, *head_page;
int frag_offset, frag_len;
int page_offset;
int i;
@@ -318,6 +403,7 @@ static struct sk_buff *__hot sg_fd_to_skb(const struct dpa_priv_s *priv,
int *count_ptr;
vaddr = phys_to_virt(addr);
+ BUG_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
#ifdef CONFIG_FSL_DPAA_1588
if (priv->tsu && priv->tsu->valid && priv->tsu->hwts_rx_en_ioctl)
dpa_ptp_store_rxstamp(priv, skb, vaddr);
@@ -329,7 +415,7 @@ static struct sk_buff *__hot sg_fd_to_skb(const struct dpa_priv_s *priv,
#endif /* CONFIG_FSL_DPAA_TS */
/* Iterate through the SGT entries and add data buffers to the skb */
- sgt = vaddr + dpa_fd_offset(fd);
+ sgt = vaddr + fd_off;
for (i = 0; i < DPA_SGT_MAX_ENTRIES; i++) {
/* Extension bit is not supported */
BUG_ON(sgt[i].extension);
@@ -340,6 +426,7 @@ static struct sk_buff *__hot sg_fd_to_skb(const struct dpa_priv_s *priv,
sg_addr = qm_sg_addr(&sgt[i]);
sg_vaddr = phys_to_virt(sg_addr);
+ BUG_ON(!IS_ALIGNED((unsigned long)sg_vaddr, SMP_CACHE_BYTES));
if (i == 0) {
/* Tentatively access the first buffer, but don't unmap
@@ -351,8 +438,8 @@ static struct sk_buff *__hot sg_fd_to_skb(const struct dpa_priv_s *priv,
/* This is the first S/G entry, so build the skb
* around its data buffer
*/
- skb = build_skb(sg_vaddr,
- dpa_bp->size + DPA_SKB_TAILROOM);
+ skb = build_skb(sg_vaddr, DPA_SKB_SIZE(dpa_bp->size) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
if (unlikely(!skb))
/* dpa_fd_release() will put the current frame
* back into the pool. DMA mapping status has
@@ -375,8 +462,8 @@ static struct sk_buff *__hot sg_fd_to_skb(const struct dpa_priv_s *priv,
/* Make sure forwarded skbs will have enough space
* on Tx, if extra headers are added.
*/
- skb_reserve(skb, priv->tx_headroom +
- dpa_get_rx_extra_headroom());
+ BUG_ON(fd_off != priv->rx_headroom);
+ skb_reserve(skb, fd_off);
skb_put(skb, sgt[i].length);
} else {
dma_unmap_single(dpa_bp->dev, sg_addr, dpa_bp->size,
@@ -386,16 +473,26 @@ static struct sk_buff *__hot sg_fd_to_skb(const struct dpa_priv_s *priv,
* be added in an skb fragment; fragment index is offset
* by one since first S/G entry was incorporated in the
* linear part of the skb.
+ *
+ * Caution: 'page' may be a tail page.
+ */
+ page = virt_to_page(sg_vaddr);
+ head_page = virt_to_head_page(sg_vaddr);
+ /* Compute offset in (possibly tail) page */
+ page_offset = ((unsigned long)sg_vaddr &
+ (PAGE_SIZE - 1)) +
+ (page_address(page) - page_address(head_page));
+ /* page_offset only refers to the beginning of sgt[i];
+ * but the buffer itself may have an internal offset.
*/
- page = pfn_to_page(sg_addr >> PAGE_SHIFT);
- page_offset = (unsigned long)sg_vaddr & (PAGE_SIZE - 1);
frag_offset = sgt[i].offset + page_offset;
frag_len = sgt[i].length;
- /* TODO kernel 3.8 fixup; we might want to account for
- * the true-truesize.
+ /* skb_add_rx_frag() does no checking on the page; if
+ * we pass it a tail page, we'll end up with
+ * bad page accounting and eventually with segafults.
*/
- skb_add_rx_frag(skb, i - 1, page, frag_offset, frag_len,
- frag_len);
+ skb_add_rx_frag(skb, i - 1, head_page, frag_offset,
+ frag_len, dpa_bp->size);
}
/* Update the pool count for the current {cpu x bpool} */
(*count_ptr)--;
@@ -403,11 +500,12 @@ static struct sk_buff *__hot sg_fd_to_skb(const struct dpa_priv_s *priv,
if (sgt[i].final)
break;
}
+ WARN_ONCE(i == DPA_SGT_MAX_ENTRIES, "No final bit on SGT\n");
- /* recycle the SGT page */
+ /* recycle the SGT fragment */
dpa_bp = dpa_bpid2pool(fd->bpid);
BUG_ON(IS_ERR(dpa_bp));
- dpa_bp_add_page(dpa_bp, (unsigned long)vaddr);
+ dpa_bp_recycle_frag(dpa_bp, (unsigned long)vaddr);
return skb;
}
@@ -510,16 +608,48 @@ static int __hot skb_to_contig_fd(struct dpa_priv_s *priv,
{
struct sk_buff **skbh;
dma_addr_t addr;
- struct dpa_bp *dpa_bp;
+ struct dpa_bp *dpa_bp = priv->dpa_bp;
struct net_device *net_dev = priv->net_dev;
int err;
+ enum dma_data_direction dma_dir = DMA_TO_DEVICE;
+ int *count_ptr = __this_cpu_ptr(dpa_bp->percpu_count);
+ unsigned char *rec_buf_start;
- /* We are guaranteed that we have at least tx_headroom bytes */
+ /* We are guaranteed to have at least tx_headroom bytes */
skbh = (struct sk_buff **)(skb->data - priv->tx_headroom);
+ fd->offset = priv->tx_headroom;
- *skbh = skb;
+#ifndef CONFIG_FSL_DPAA_TS
+ /* Check recycling conditions; only if timestamp support is not
+ * enabled, otherwise we need the fd back on tx confirmation
+ */
- dpa_bp = priv->dpa_bp;
+ /* We cannot recycle the buffer if the pool is already full */
+ if (unlikely(*count_ptr >= dpa_bp->target_count))
+ goto no_recycle;
+
+ /* ... or if the skb doesn't meet the recycling criteria */
+ if (unlikely(!dpa_skb_is_recyclable(skb)))
+ goto no_recycle;
+
+ /* ... or if buffer recycling conditions are not met */
+ if (unlikely(!dpa_buf_is_recyclable(skb, dpa_bp->size,
+ priv->tx_headroom, &rec_buf_start)))
+ goto no_recycle;
+
+ /* Buffer is recyclable; use the new start address */
+ skbh = (struct sk_buff **)rec_buf_start;
+
+ /* and set fd parameters and DMA mapping direction */
+ fd->cmd |= FM_FD_CMD_FCO;
+ fd->bpid = dpa_bp->bpid;
+ BUG_ON(skb->data - rec_buf_start > DPA_MAX_FD_OFFSET);
+ fd->offset = (uint16_t)(skb->data - rec_buf_start);
+ dma_dir = DMA_BIDIRECTIONAL;
+#endif
+
+no_recycle:
+ *skbh = skb;
/*
* Enable L3/L4 hardware checksum computation.
@@ -538,9 +668,10 @@ static int __hot skb_to_contig_fd(struct dpa_priv_s *priv,
/* Fill in the FD */
fd->format = qm_fd_contig;
fd->length20 = skb->len;
- fd->offset = priv->tx_headroom; /* This is now guaranteed */
- addr = dma_map_single(dpa_bp->dev, skbh, dpa_bp->size, DMA_TO_DEVICE);
+ /* Map the entire buffer size that may be seen by FMan, but no more */
+ addr = dma_map_single(dpa_bp->dev, skbh,
+ skb_end_pointer(skb) - (unsigned char *)skbh, dma_dir);
if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
if (netif_msg_tx_err(priv) && net_ratelimit())
netdev_err(net_dev, "dma_map_single() failed\n");
@@ -618,12 +749,9 @@ static int __hot skb_to_sg_fd(struct dpa_priv_s *priv,
sgt[i].extension = 0;
sgt[i].final = 0;
- /* This shouldn't happen */
- BUG_ON(!frag->page.p);
-
- addr = skb_frag_dma_map(dpa_bp->dev, frag, 0, dpa_bp->size,
+ BUG_ON(!skb_frag_page(frag));
+ addr = skb_frag_dma_map(dpa_bp->dev, frag, 0, sgt[i].length,
dma_dir);
-
if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
dev_err(dpa_bp->dev, "DMA mapping failed");
err = -EINVAL;
@@ -644,7 +772,8 @@ static int __hot skb_to_sg_fd(struct dpa_priv_s *priv,
skbh = (struct sk_buff **)buffer_start;
*skbh = skb;
- addr = dma_map_single(dpa_bp->dev, buffer_start, dpa_bp->size, dma_dir);
+ addr = dma_map_single(dpa_bp->dev, buffer_start,
+ skb_end_pointer(skb) - (unsigned char *)buffer_start, dma_dir);
if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
dev_err(dpa_bp->dev, "DMA mapping failed");
err = -EINVAL;
@@ -659,7 +788,7 @@ sgt_map_failed:
sg_map_failed:
for (j = 0; j < i; j++)
dma_unmap_page(dpa_bp->dev, qm_sg_addr(&sgt[j]),
- dpa_bp->size, dma_dir);
+ sgt[j].length, dma_dir);
sg0_map_failed:
csum_failed:
put_page(virt_to_head_page(sgt_buf));
@@ -676,11 +805,13 @@ int __hot dpa_tx(struct sk_buff *skb, struct net_device *net_dev)
int err = 0;
const int queue_mapping = dpa_get_queue_mapping(skb);
const bool nonlinear = skb_is_nonlinear(skb);
+ int *countptr;
priv = netdev_priv(net_dev);
/* Non-migratable context, safe to use __this_cpu_ptr */
percpu_priv = __this_cpu_ptr(priv->percpu_priv);
percpu_stats = &percpu_priv->stats;
+ countptr = __this_cpu_ptr(percpu_priv->dpa_bp->percpu_count);
clear_fd(&fd);
@@ -754,14 +885,34 @@ int __hot dpa_tx(struct sk_buff *skb, struct net_device *net_dev)
if (unlikely(err < 0))
goto skb_to_fd_failed;
+ if (fd.cmd & FM_FD_CMD_FCO) {
+ /* The buffer contained in this skb will be recycled. Update
+ * the buffer pool percpu count. Also bump up the usage count
+ * of the page containing the recycled buffer to make sure it
+ * doesn't get freed.
+ */
+ (*countptr)++;
+ get_page(virt_to_head_page(skb->head));
+ percpu_priv->tx_returned++;
+ }
+
if (unlikely(dpa_xmit(priv, percpu_stats, queue_mapping, &fd) < 0))
goto xmit_failed;
+ /* If we recycled the buffer, no need to hold on to the skb anymore */
+ if (fd.cmd & FM_FD_CMD_FCO)
+ dev_kfree_skb(skb);
+
net_dev->trans_start = jiffies;
return NETDEV_TX_OK;
xmit_failed:
+ if (fd.cmd & FM_FD_CMD_FCO) {
+ (*countptr)--;
+ put_page(virt_to_head_page(skb->head));
+ percpu_priv->tx_returned--;
+ }
_dpa_cleanup_tx_fd(priv, &fd);
skb_to_fd_failed:
enomem:
diff --git a/drivers/net/ethernet/freescale/dpa/dpaa_eth_shared.c b/drivers/net/ethernet/freescale/dpa/dpaa_eth_shared.c
new file mode 100644
index 0000000..c0b9cf2
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpa/dpaa_eth_shared.c
@@ -0,0 +1,786 @@
+/*
+ * Copyright 2008-2013 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) \
+ KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
+ KBUILD_BASENAME".c", __LINE__, __func__
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/etherdevice.h>
+#include <linux/kthread.h>
+#include <linux/percpu.h>
+#include <linux/highmem.h>
+#include <linux/fsl_qman.h>
+#include "dpaa_eth.h"
+#include "dpaa_eth_common.h"
+#include "lnxwrp_fsl_fman.h" /* fm_get_rx_extra_headroom(), fm_get_max_frm() */
+
+/* forward declarations */
+static enum qman_cb_dqrr_result __hot
+shared_rx_dqrr(struct qman_portal *portal, struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq);
+static enum qman_cb_dqrr_result __hot
+shared_tx_default_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq);
+static enum qman_cb_dqrr_result
+shared_tx_error_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq);
+static void shared_ern(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_mr_entry *msg);
+int __hot dpa_shared_tx(struct sk_buff *skb, struct net_device *net_dev);
+
+#define DPA_DESCRIPTION "FSL DPAA Shared Ethernet driver"
+
+MODULE_LICENSE("Dual BSD/GPL");
+
+MODULE_DESCRIPTION(DPA_DESCRIPTION);
+
+static uint8_t debug = -1;
+module_param(debug, byte, S_IRUGO);
+MODULE_PARM_DESC(debug, "Module/Driver verbosity level");
+
+/* This has to work in tandem with the DPA_CS_THRESHOLD_xxx values. */
+static uint16_t tx_timeout = 1000;
+module_param(tx_timeout, ushort, S_IRUGO);
+MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
+
+static const struct of_device_id dpa_shared_match[];
+
+static const struct net_device_ops dpa_shared_ops = {
+ .ndo_open = dpa_start,
+ .ndo_start_xmit = dpa_shared_tx,
+ .ndo_stop = dpa_stop,
+ .ndo_tx_timeout = dpa_timeout,
+ .ndo_get_stats64 = dpa_get_stats64,
+ .ndo_set_mac_address = dpa_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
+ .ndo_select_queue = dpa_select_queue,
+#endif
+ .ndo_change_mtu = dpa_change_mtu,
+ .ndo_set_rx_mode = dpa_set_rx_mode,
+ .ndo_init = dpa_ndo_init,
+ .ndo_set_features = dpa_set_features,
+ .ndo_fix_features = dpa_fix_features,
+ .ndo_do_ioctl = dpa_ioctl,
+};
+
+const dpa_fq_cbs_t shared_fq_cbs = {
+ .rx_defq = { .cb = { .dqrr = shared_rx_dqrr } },
+ .tx_defq = { .cb = { .dqrr = shared_tx_default_dqrr } },
+ .rx_errq = { .cb = { .dqrr = shared_rx_dqrr } },
+ .tx_errq = { .cb = { .dqrr = shared_tx_error_dqrr } },
+ .egress_ern = { .cb = { .ern = shared_ern } }
+};
+
+static inline void * __must_check __attribute__((nonnull))
+dpa_phys2virt(const struct dpa_bp *dpa_bp, dma_addr_t addr)
+{
+ return dpa_bp->vaddr + (addr - dpa_bp->paddr);
+}
+
+static struct dpa_bp *dpa_size2pool(struct dpa_priv_s *priv, size_t size)
+{
+ int i;
+
+ for (i = 0; i < priv->bp_count; i++)
+ if ((size + priv->tx_headroom) <= priv->dpa_bp[i].size)
+ return dpa_bpid2pool(priv->dpa_bp[i].bpid);
+ return ERR_PTR(-ENODEV);
+}
+
+/* Copy to a memory region that requires kmapping from a linear buffer,
+ * taking into account page boundaries in the destination
+ */
+static void
+copy_to_unmapped_area(dma_addr_t phys_start, void *src, size_t buf_size)
+{
+ struct page *page;
+ size_t size, offset;
+ void *page_vaddr;
+
+ while (buf_size > 0) {
+ offset = offset_in_page(phys_start);
+ size = (offset + buf_size > PAGE_SIZE) ?
+ PAGE_SIZE - offset : buf_size;
+
+ page = pfn_to_page(phys_start >> PAGE_SHIFT);
+ page_vaddr = kmap_atomic(page);
+
+ memcpy(page_vaddr + offset, src, size);
+
+ kunmap_atomic(page_vaddr);
+
+ phys_start += size;
+ src += size;
+ buf_size -= size;
+ }
+}
+
+/* Copy from a memory region that requires kmapping to a linear buffer,
+ * taking into account page boundaries in the source
+ */
+static void
+copy_from_unmapped_area(void *dest, dma_addr_t phys_start, size_t buf_size)
+{
+ struct page *page;
+ size_t size, offset;
+ void *page_vaddr;
+
+ while (buf_size > 0) {
+ offset = offset_in_page(phys_start);
+ size = (offset + buf_size > PAGE_SIZE) ?
+ PAGE_SIZE - offset : buf_size;
+
+ page = pfn_to_page(phys_start >> PAGE_SHIFT);
+ page_vaddr = kmap_atomic(page);
+
+ memcpy(dest, page_vaddr + offset, size);
+
+ kunmap_atomic(page_vaddr);
+
+ phys_start += size;
+ dest += size;
+ buf_size -= size;
+ }
+}
+
+static void
+dpa_fd_release_sg(const struct net_device *net_dev,
+ const struct qm_fd *fd)
+{
+ const struct dpa_priv_s *priv;
+ struct qm_sg_entry *sgt;
+ struct dpa_bp *_dpa_bp;
+ struct bm_buffer _bmb, bmb[8];
+
+ priv = netdev_priv(net_dev);
+
+ _bmb.hi = fd->addr_hi;
+ _bmb.lo = fd->addr_lo;
+
+ _dpa_bp = dpa_bpid2pool(fd->bpid);
+
+ if (_dpa_bp->vaddr) {
+ sgt = dpa_phys2virt(_dpa_bp, bm_buf_addr(&_bmb)) +
+ dpa_fd_offset(fd);
+ dpa_release_sgt(sgt, bmb);
+ } else {
+ sgt = kmalloc(DPA_SGT_MAX_ENTRIES * sizeof(*sgt), GFP_ATOMIC);
+ if (sgt == NULL) {
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ netdev_err(net_dev,
+ "Memory allocation failed\n");
+ return;
+ }
+
+ copy_from_unmapped_area(sgt, bm_buf_addr(&_bmb) +
+ dpa_fd_offset(fd),
+ min(DPA_SGT_MAX_ENTRIES * sizeof(*sgt),
+ _dpa_bp->size));
+ dpa_release_sgt(sgt, bmb);
+ kfree(sgt);
+ }
+
+ while (bman_release(_dpa_bp->pool, &_bmb, 1, 0))
+ cpu_relax();
+}
+
+static enum qman_cb_dqrr_result __hot
+shared_rx_dqrr(struct qman_portal *portal, struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq)
+{
+ struct net_device *net_dev;
+ struct dpa_priv_s *priv;
+ struct dpa_percpu_priv_s *percpu_priv;
+ const struct qm_fd *fd = &dq->fd;
+ struct dpa_bp *dpa_bp;
+ struct sk_buff *skb;
+ struct qm_sg_entry *sgt;
+ int i;
+
+ net_dev = ((struct dpa_fq *)fq)->net_dev;
+ priv = netdev_priv(net_dev);
+
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, smp_processor_id());
+
+ dpa_bp = dpa_bpid2pool(fd->bpid);
+ BUG_ON(IS_ERR(dpa_bp));
+
+ if (unlikely(fd->status & FM_FD_STAT_ERRORS) != 0) {
+ if (netif_msg_hw(priv) && net_ratelimit())
+ netdev_warn(net_dev, "FD status = 0x%08x\n",
+ fd->status & FM_FD_STAT_ERRORS);
+
+ percpu_priv->stats.rx_errors++;
+
+ goto out;
+ }
+
+ skb = __netdev_alloc_skb(net_dev,
+ priv->tx_headroom + dpa_fd_length(fd),
+ GFP_ATOMIC);
+ if (unlikely(skb == NULL)) {
+ if (netif_msg_rx_err(priv) && net_ratelimit())
+ netdev_err(net_dev, "Could not alloc skb\n");
+
+ percpu_priv->stats.rx_dropped++;
+
+ goto out;
+ }
+
+ skb_reserve(skb, priv->tx_headroom);
+
+ if (fd->format == qm_fd_sg) {
+ if (dpa_bp->vaddr) {
+ sgt = dpa_phys2virt(dpa_bp,
+ qm_fd_addr(fd)) + dpa_fd_offset(fd);
+
+ for (i = 0; i < DPA_SGT_MAX_ENTRIES; i++) {
+ BUG_ON(sgt[i].extension);
+
+ /* copy from sgt[i] */
+ memcpy(skb_put(skb, sgt[i].length),
+ dpa_phys2virt(dpa_bp,
+ qm_sg_addr(&sgt[i]) +
+ sgt[i].offset),
+ sgt[i].length);
+ if (sgt[i].final)
+ break;
+ }
+ } else {
+ sgt = kmalloc(DPA_SGT_MAX_ENTRIES * sizeof(*sgt),
+ GFP_ATOMIC);
+ if (unlikely(sgt == NULL)) {
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ netdev_err(net_dev,
+ "Memory allocation failed\n");
+ return -ENOMEM;
+ }
+
+ copy_from_unmapped_area(sgt,
+ qm_fd_addr(fd) + dpa_fd_offset(fd),
+ min(DPA_SGT_MAX_ENTRIES * sizeof(*sgt),
+ dpa_bp->size));
+
+ for (i = 0; i < DPA_SGT_MAX_ENTRIES; i++) {
+ BUG_ON(sgt[i].extension);
+
+ copy_from_unmapped_area(
+ skb_put(skb, sgt[i].length),
+ qm_sg_addr(&sgt[i]) + sgt[i].offset,
+ sgt[i].length);
+
+ if (sgt[i].final)
+ break;
+ }
+
+ kfree(sgt);
+ }
+ goto skb_copied;
+ }
+
+ /* otherwise fd->format == qm_fd_contig */
+ if (dpa_bp->vaddr) {
+ /* Fill the SKB */
+ memcpy(skb_put(skb, dpa_fd_length(fd)),
+ dpa_phys2virt(dpa_bp, qm_fd_addr(fd)) +
+ dpa_fd_offset(fd), dpa_fd_length(fd));
+ } else {
+ copy_from_unmapped_area(skb_put(skb, dpa_fd_length(fd)),
+ qm_fd_addr(fd) + dpa_fd_offset(fd),
+ dpa_fd_length(fd));
+ }
+
+skb_copied:
+ skb->protocol = eth_type_trans(skb, net_dev);
+
+ /* IP Reassembled frames are allowed to be larger than MTU */
+ if (unlikely(dpa_check_rx_mtu(skb, net_dev->mtu) &&
+ !(fd->status & FM_FD_IPR))) {
+ percpu_priv->stats.rx_dropped++;
+ dev_kfree_skb_any(skb);
+ goto out;
+ }
+
+ if (unlikely(netif_rx(skb) != NET_RX_SUCCESS))
+ percpu_priv->stats.rx_dropped++;
+ else {
+ percpu_priv->stats.rx_packets++;
+ percpu_priv->stats.rx_bytes += dpa_fd_length(fd);
+ }
+
+out:
+ if (fd->format == qm_fd_sg)
+ dpa_fd_release_sg(net_dev, fd);
+ else
+ dpa_fd_release(net_dev, fd);
+
+ return qman_cb_dqrr_consume;
+}
+
+static enum qman_cb_dqrr_result
+shared_tx_error_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq)
+{
+ struct net_device *net_dev;
+ struct dpa_priv_s *priv;
+ struct dpa_percpu_priv_s *percpu_priv;
+ struct dpa_bp *dpa_bp;
+ const struct qm_fd *fd = &dq->fd;
+
+ net_dev = ((struct dpa_fq *)fq)->net_dev;
+ priv = netdev_priv(net_dev);
+
+ dpa_bp = dpa_bpid2pool(fd->bpid);
+ BUG_ON(IS_ERR(dpa_bp));
+
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, smp_processor_id());
+
+ if (netif_msg_hw(priv) && net_ratelimit())
+ netdev_warn(net_dev, "FD status = 0x%08x\n",
+ fd->status & FM_FD_STAT_ERRORS);
+
+ if ((fd->format == qm_fd_sg) && (!dpa_bp->vaddr))
+ dpa_fd_release_sg(net_dev, fd);
+ else
+ dpa_fd_release(net_dev, fd);
+
+ percpu_priv->stats.tx_errors++;
+
+ return qman_cb_dqrr_consume;
+}
+
+static enum qman_cb_dqrr_result __hot
+shared_tx_default_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq)
+{
+ struct net_device *net_dev;
+ struct dpa_priv_s *priv;
+ struct dpa_percpu_priv_s *percpu_priv;
+ struct dpa_bp *dpa_bp;
+ const struct qm_fd *fd = &dq->fd;
+
+ net_dev = ((struct dpa_fq *)fq)->net_dev;
+ priv = netdev_priv(net_dev);
+
+ dpa_bp = dpa_bpid2pool(fd->bpid);
+ BUG_ON(IS_ERR(dpa_bp));
+
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, smp_processor_id());
+
+ if (unlikely(fd->status & FM_FD_STAT_ERRORS) != 0) {
+ if (netif_msg_hw(priv) && net_ratelimit())
+ netdev_warn(net_dev, "FD status = 0x%08x\n",
+ fd->status & FM_FD_STAT_ERRORS);
+
+ percpu_priv->stats.tx_errors++;
+ }
+
+ if ((fd->format == qm_fd_sg) && (!dpa_bp->vaddr))
+ dpa_fd_release_sg(net_dev, fd);
+ else
+ dpa_fd_release(net_dev, fd);
+
+ percpu_priv->tx_confirm++;
+
+ return qman_cb_dqrr_consume;
+}
+
+static void shared_ern(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_mr_entry *msg)
+{
+ struct net_device *net_dev;
+ const struct dpa_priv_s *priv;
+ struct dpa_percpu_priv_s *percpu_priv;
+ struct dpa_fq *dpa_fq = (struct dpa_fq *)fq;
+
+ net_dev = dpa_fq->net_dev;
+ priv = netdev_priv(net_dev);
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, smp_processor_id());
+
+ dpa_fd_release(net_dev, &msg->ern.fd);
+
+ percpu_priv->stats.tx_dropped++;
+ percpu_priv->stats.tx_fifo_errors++;
+ count_ern(percpu_priv, msg);
+}
+
+int __hot dpa_shared_tx(struct sk_buff *skb, struct net_device *net_dev)
+{
+ struct dpa_bp *dpa_bp;
+ struct bm_buffer bmb;
+ struct dpa_percpu_priv_s *percpu_priv;
+ struct dpa_priv_s *priv;
+ struct qm_fd fd;
+ int queue_mapping;
+ int err;
+ void *dpa_bp_vaddr;
+ t_FmPrsResult parse_results;
+
+ priv = netdev_priv(net_dev);
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, smp_processor_id());
+
+ memset(&fd, 0, sizeof(fd));
+ fd.format = qm_fd_contig;
+
+ queue_mapping = smp_processor_id();
+
+ dpa_bp = dpa_size2pool(priv, skb_headlen(skb));
+ if (unlikely(IS_ERR(dpa_bp))) {
+ percpu_priv->stats.tx_errors++;
+ err = PTR_ERR(dpa_bp);
+ goto bpools_too_small_error;
+ }
+
+ err = bman_acquire(dpa_bp->pool, &bmb, 1, 0);
+ if (unlikely(err <= 0)) {
+ percpu_priv->stats.tx_errors++;
+ if (err == 0)
+ err = -ENOMEM;
+ goto buf_acquire_failed;
+ }
+ fd.bpid = dpa_bp->bpid;
+
+ fd.length20 = skb_headlen(skb);
+ fd.addr_hi = bmb.hi;
+ fd.addr_lo = bmb.lo;
+ fd.offset = priv->tx_headroom;
+
+ /* The virtual address of the buffer pool is expected to be NULL
+ * in scenarios like MAC-less or Shared-MAC between Linux and
+ * USDPAA. In this case the buffers are dynamically mapped/unmapped.
+ */
+ if (dpa_bp->vaddr) {
+ dpa_bp_vaddr = dpa_phys2virt(dpa_bp, bm_buf_addr(&bmb));
+
+ /* Copy the packet payload */
+ skb_copy_from_linear_data(skb,
+ dpa_bp_vaddr + dpa_fd_offset(&fd),
+ dpa_fd_length(&fd));
+
+ /* Enable L3/L4 hardware checksum computation, if applicable */
+ err = dpa_enable_tx_csum(priv, skb, &fd,
+ dpa_bp_vaddr + DPA_TX_PRIV_DATA_SIZE);
+ } else {
+ err = dpa_enable_tx_csum(priv, skb, &fd,
+ (char *)&parse_results);
+
+ copy_to_unmapped_area(bm_buf_addr(&bmb) + DPA_TX_PRIV_DATA_SIZE,
+ &parse_results,
+ DPA_PARSE_RESULTS_SIZE);
+
+ copy_to_unmapped_area(bm_buf_addr(&bmb) + dpa_fd_offset(&fd),
+ skb->data,
+ dpa_fd_length(&fd));
+ }
+
+ if (unlikely(err < 0)) {
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ netdev_err(net_dev, "Tx HW csum error: %d\n", err);
+ percpu_priv->stats.tx_errors++;
+ goto l3_l4_csum_failed;
+ }
+
+ err = dpa_xmit(priv, &percpu_priv->stats, queue_mapping, &fd);
+
+l3_l4_csum_failed:
+bpools_too_small_error:
+buf_acquire_failed:
+ /* We're done with the skb */
+ dev_kfree_skb(skb);
+
+ return NETDEV_TX_OK;
+}
+
+static int dpa_shared_netdev_init(struct device_node *dpa_node,
+ struct net_device *net_dev)
+{
+ struct dpa_priv_s *priv = netdev_priv(net_dev);
+ const uint8_t *mac_addr;
+
+ net_dev->netdev_ops = &dpa_shared_ops;
+
+ net_dev->mem_start = priv->mac_dev->res->start;
+ net_dev->mem_end = priv->mac_dev->res->end;
+
+ mac_addr = priv->mac_dev->addr;
+
+ net_dev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_LLTX);
+
+ return dpa_netdev_init(dpa_node, net_dev, mac_addr, tx_timeout);
+}
+
+static int
+dpaa_eth_shared_probe(struct platform_device *_of_dev)
+{
+ int err = 0, i;
+ struct device *dev;
+ struct device_node *dpa_node;
+ struct dpa_bp *dpa_bp;
+ struct dpa_fq *dpa_fq, *tmp;
+ size_t count;
+ struct net_device *net_dev = NULL;
+ struct dpa_priv_s *priv = NULL;
+ struct dpa_percpu_priv_s *percpu_priv;
+ struct fm_port_fqs port_fqs;
+ struct dpa_buffer_layout_s *buf_layout = NULL;
+ struct mac_device *mac_dev;
+ struct task_struct *kth;
+
+ dev = &_of_dev->dev;
+
+ dpa_node = dev->of_node;
+
+ if (!of_device_is_available(dpa_node))
+ return -ENODEV;
+
+ /* Get the buffer pools assigned to this interface */
+ dpa_bp = dpa_bp_probe(_of_dev, &count);
+ if (IS_ERR(dpa_bp))
+ return PTR_ERR(dpa_bp);
+
+ dpa_bp->seed_cb = dpa_bp_shared_port_seed;
+
+ /* Allocate this early, so we can store relevant information in
+ * the private area (needed by 1588 code in dpa_mac_probe)
+ */
+ net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TX_QUEUES);
+ if (!net_dev) {
+ dev_err(dev, "alloc_etherdev_mq() failed\n");
+ return -ENOMEM;
+ }
+
+ /* Do this here, so we can be verbose early */
+ SET_NETDEV_DEV(net_dev, dev);
+ dev_set_drvdata(dev, net_dev);
+
+ priv = netdev_priv(net_dev);
+ priv->net_dev = net_dev;
+ strcpy(priv->if_type, "shared");
+
+ priv->msg_enable = netif_msg_init(debug, -1);
+
+ mac_dev = dpa_mac_probe(_of_dev);
+ if (IS_ERR(mac_dev) || !mac_dev) {
+ err = PTR_ERR(mac_dev);
+ goto mac_probe_failed;
+ }
+
+ /* We have physical ports, so we need to establish
+ * the buffer layout.
+ */
+ buf_layout = devm_kzalloc(dev, 2 * sizeof(*buf_layout),
+ GFP_KERNEL);
+ if (!buf_layout) {
+ dev_err(dev, "devm_kzalloc() failed\n");
+ goto alloc_failed;
+ }
+ dpa_set_buffers_layout(mac_dev, buf_layout);
+
+ INIT_LIST_HEAD(&priv->dpa_fq_list);
+
+ memset(&port_fqs, 0, sizeof(port_fqs));
+
+ err = dpa_fq_probe_mac(dev, &priv->dpa_fq_list, &port_fqs,
+ false, RX);
+ if (!err)
+ err = dpa_fq_probe_mac(dev, &priv->dpa_fq_list,
+ &port_fqs, false, TX);
+ if (err < 0)
+ goto fq_probe_failed;
+
+ /* bp init */
+
+ err = dpa_bp_create(net_dev, dpa_bp, count);
+
+ if (err < 0)
+ goto bp_create_failed;
+
+ priv->mac_dev = mac_dev;
+
+ priv->channel = dpa_get_channel(dev, dpa_node);
+
+ if (priv->channel < 0) {
+ err = priv->channel;
+ goto get_channel_failed;
+ }
+
+ /* Start a thread that will walk the cpus with affine portals
+ * and add this pool channel to each's dequeue mask.
+ */
+ kth = kthread_run(dpaa_eth_add_channel,
+ (void *)(unsigned long)priv->channel,
+ "dpaa_%p:%d", net_dev, priv->channel);
+ if (!kth) {
+ err = -ENOMEM;
+ goto add_channel_failed;
+ }
+
+ dpa_fq_setup(priv, &shared_fq_cbs, priv->mac_dev->port_dev[TX]);
+
+ /* Create a congestion group for this netdev, with
+ * dynamically-allocated CGR ID.
+ * Must be executed after probing the MAC, but before
+ * assigning the egress FQs to the CGRs.
+ */
+ err = dpaa_eth_cgr_init(priv);
+ if (err < 0) {
+ dev_err(dev, "Error initializing CGR\n");
+ goto cgr_init_failed;
+ }
+
+ /* Add the FQs to the interface, and make them active */
+ list_for_each_entry_safe(dpa_fq, tmp, &priv->dpa_fq_list, list) {
+ err = dpa_fq_init(dpa_fq, false);
+ if (err < 0)
+ goto fq_alloc_failed;
+ }
+
+ priv->buf_layout = buf_layout;
+ priv->tx_headroom =
+ dpa_get_headroom(&priv->buf_layout[TX]);
+
+ /* All real interfaces need their ports initialized */
+ dpaa_eth_init_ports(mac_dev, dpa_bp, count, &port_fqs,
+ buf_layout, dev);
+
+ /* Now we need to initialize either a private or shared interface */
+ priv->percpu_priv = alloc_percpu(*priv->percpu_priv);
+
+ if (priv->percpu_priv == NULL) {
+ dev_err(dev, "alloc_percpu() failed\n");
+ err = -ENOMEM;
+ goto alloc_percpu_failed;
+ }
+ for_each_online_cpu(i) {
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
+ memset(percpu_priv, 0, sizeof(*percpu_priv));
+ }
+
+ err = dpa_shared_netdev_init(dpa_node, net_dev);
+
+ if (err < 0)
+ goto netdev_init_failed;
+
+ dpaa_eth_sysfs_init(&net_dev->dev);
+
+ printk(KERN_INFO "fsl_dpa_shared: Probed shared interface %s\n", net_dev->name);
+
+ return 0;
+
+netdev_init_failed:
+ if (net_dev)
+ free_percpu(priv->percpu_priv);
+alloc_percpu_failed:
+fq_alloc_failed:
+ if (net_dev) {
+ dpa_fq_free(dev, &priv->dpa_fq_list);
+ qman_release_cgrid(priv->cgr_data.cgr.cgrid);
+ qman_delete_cgr(&priv->cgr_data.cgr);
+ }
+cgr_init_failed:
+add_channel_failed:
+get_channel_failed:
+ if (net_dev)
+ dpa_bp_free(priv, priv->dpa_bp);
+bp_create_failed:
+fq_probe_failed:
+ devm_kfree(dev, buf_layout);
+alloc_failed:
+mac_probe_failed:
+ dev_set_drvdata(dev, NULL);
+ if (net_dev)
+ free_netdev(net_dev);
+
+ return err;
+}
+
+static const struct of_device_id dpa_shared_match[] = {
+ {
+ .compatible = "fsl,dpa-ethernet-shared"
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dpa_shared_match);
+
+static struct platform_driver dpa_shared_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = dpa_shared_match,
+ .owner = THIS_MODULE,
+ },
+ .probe = dpaa_eth_shared_probe,
+ .remove = dpa_remove
+};
+
+static int __init __cold dpa_shared_load(void)
+{
+ int _errno;
+
+ printk(KERN_INFO KBUILD_MODNAME ": " DPA_DESCRIPTION " (" VERSION ")\n");
+
+ /* Initialize dpaa_eth mirror values */
+ dpa_rx_extra_headroom = fm_get_rx_extra_headroom();
+ dpa_max_frm = fm_get_max_frm();
+
+ _errno = platform_driver_register(&dpa_shared_driver);
+ if (unlikely(_errno < 0)) {
+ pr_err(KBUILD_MODNAME
+ ": %s:%hu:%s(): platform_driver_register() = %d\n",
+ KBUILD_BASENAME".c", __LINE__, __func__, _errno);
+ }
+
+ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
+ KBUILD_BASENAME".c", __func__);
+
+ return _errno;
+}
+module_init(dpa_shared_load);
+
+static void __exit __cold dpa_shared_unload(void)
+{
+ pr_debug(KBUILD_MODNAME ": -> %s:%s()\n",
+ KBUILD_BASENAME".c", __func__);
+
+ platform_driver_unregister(&dpa_shared_driver);
+}
+module_exit(dpa_shared_unload);
diff --git a/drivers/net/ethernet/freescale/dpa/dpaa_eth_sysfs.c b/drivers/net/ethernet/freescale/dpa/dpaa_eth_sysfs.c
index 7b7c103..9e293da 100644
--- a/drivers/net/ethernet/freescale/dpa/dpaa_eth_sysfs.c
+++ b/drivers/net/ethernet/freescale/dpa/dpaa_eth_sysfs.c
@@ -41,8 +41,6 @@
#include "dpaa_1588.h"
#endif
-static u8 macless_idx;
-
static ssize_t dpaa_eth_show_addr(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -60,13 +58,12 @@ static ssize_t dpaa_eth_show_type(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
+ ssize_t res = 0;
- if (!priv->mac_dev)
- return sprintf(buf, "macless%d", priv->macless_idx);
- else if (priv->shared)
- return sprintf(buf, "shared");
- else
- return sprintf(buf, "private");
+ if (priv)
+ res = sprintf(buf, "%s", priv->if_type);
+
+ return res;
}
static ssize_t dpaa_eth_show_fqids(struct device *dev,
@@ -95,7 +92,10 @@ static ssize_t dpaa_eth_show_fqids(struct device *dev,
str = "Rx PCD";
break;
case FQ_TYPE_TX_CONFIRM:
- str = "Tx confirmation";
+ str = "Tx default confirmation";
+ break;
+ case FQ_TYPE_TX_CONF_MQ:
+ str = "Tx confirmation (mq)";
break;
case FQ_TYPE_TX_ERROR:
str = "Tx error";
@@ -167,7 +167,7 @@ static ssize_t dpaa_eth_show_mac_regs(struct device *dev,
struct mac_device *mac_dev = priv->mac_dev;
if (mac_dev)
- fm_mac_dump_regs(mac_dev);
+ fm_mac_dump_regs(mac_dev->get_mac_handle(mac_dev));
else
return sprintf(buf, "no mac registers\n");
@@ -228,23 +228,15 @@ static struct device_attribute dpaa_eth_attrs[] = {
void dpaa_eth_sysfs_init(struct device *dev)
{
- struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
int i;
for (i = 0; i < ARRAY_SIZE(dpaa_eth_attrs); i++)
if (device_create_file(dev, &dpaa_eth_attrs[i])) {
dev_err(dev, "Error creating sysfs file\n");
- goto device_create_file_failed;
+ while (i > 0)
+ device_remove_file(dev, &dpaa_eth_attrs[--i]);
+ return;
}
-
- if (!priv->mac_dev)
- priv->macless_idx = macless_idx++;
-
- return;
-
-device_create_file_failed:
- while (i > 0)
- device_remove_file(dev, &dpaa_eth_attrs[--i]);
}
void dpaa_eth_sysfs_remove(struct device *dev)
@@ -259,5 +251,3 @@ void dpaa_eth_sysfs_remove(struct device *dev)
-
-
diff --git a/drivers/net/ethernet/freescale/dpa/dpaa_eth_unit_test.c b/drivers/net/ethernet/freescale/dpa/dpaa_eth_unit_test.c
new file mode 100644
index 0000000..62095a7
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpa/dpaa_eth_unit_test.c
@@ -0,0 +1,407 @@
+/*
+ * Copyright 2008-2013 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <linux/module.h>
+#include <linux/fsl_bman.h>
+
+#include "dpaa_eth.h"
+#include "dpaa_eth_unit_test.h"
+
+static bool tx_unit_test_passed = true;
+static unsigned char *tx_unit_skb_head;
+static unsigned char *tx_unit_skb_end;
+static int tx_unit_tested;
+static struct dpa_fq unit_fq;
+#ifdef CONFIG_FSL_DPAA_TX_RECYCLE
+static struct dpa_fq unit_recycle_fq;
+#endif
+static bool tx_unit_test_ran; /* initialized as false */
+
+static void tx_unit_test_ern(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_mr_entry *msg)
+{
+ struct net_device *net_dev;
+ struct dpa_priv_s *priv;
+ struct sk_buff **skbh;
+ struct sk_buff *skb;
+ const struct qm_fd *fd;
+ dma_addr_t addr;
+
+ net_dev = ((struct dpa_fq *)fq)->net_dev;
+ priv = netdev_priv(net_dev);
+
+ tx_unit_test_passed = false;
+
+ fd = &msg->ern.fd;
+
+ addr = qm_fd_addr(fd);
+
+ skbh = (struct sk_buff **)phys_to_virt(addr);
+ skb = *skbh;
+
+ if (!skb || !is_kernel_addr((unsigned long)skb))
+ panic("Corrupt skb in ERN!\n");
+
+ kfree_skb(skb);
+}
+
+static enum qman_cb_dqrr_result tx_unit_test_dqrr(
+ struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq)
+{
+ struct net_device *net_dev;
+ struct dpa_priv_s *priv;
+ struct sk_buff **skbh;
+ struct sk_buff *skb;
+ const struct qm_fd *fd;
+ dma_addr_t addr;
+ unsigned char *startaddr;
+ struct dpa_percpu_priv_s *percpu_priv;
+ int *countptr;
+
+ tx_unit_test_passed = false;
+
+ tx_unit_tested++;
+
+ net_dev = ((struct dpa_fq *)fq)->net_dev;
+ priv = netdev_priv(net_dev);
+
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, smp_processor_id());
+
+ fd = &dq->fd;
+
+ addr = qm_fd_addr(fd);
+
+ skbh = (struct sk_buff **)phys_to_virt(addr);
+ startaddr = (unsigned char *)skbh;
+ skb = *skbh;
+
+ if (!skb || !is_kernel_addr((unsigned long)skb))
+ panic("Invalid skb address in TX Unit Test FD\n");
+
+ /* Make sure we're dealing with the same skb */
+ if (skb->head != tx_unit_skb_head
+ || skb_end_pointer(skb) != tx_unit_skb_end)
+ goto out;
+
+ /* If we recycled, then there must be enough room between fd.addr
+ * and skb->end for a new RX buffer
+ */
+ if (fd->cmd & FM_FD_CMD_FCO) {
+ size_t bufsize = skb_end_pointer(skb) - startaddr;
+
+ if (bufsize < dpa_get_max_frm())
+ goto out;
+ } else {
+ /*
+ * If we didn't recycle, but the buffer was big enough,
+ * increment the counter to put it back
+ */
+ countptr = __this_cpu_ptr(priv->dpa_bp->percpu_count);
+ if (skb_end_pointer(skb) - skb->head >=
+ dpa_get_max_frm())
+ (*countptr)++;
+
+ /* If we didn't recycle, the data pointer should be good */
+ if (skb->data != startaddr + dpa_fd_offset(fd))
+ goto out;
+ }
+
+ tx_unit_test_passed = true;
+out:
+ /* The skb is no longer needed, and belongs to us */
+ kfree_skb(skb);
+
+ return qman_cb_dqrr_consume;
+}
+
+static const struct qman_fq tx_unit_test_fq = {
+ .cb = { .dqrr = tx_unit_test_dqrr, .ern = tx_unit_test_ern }
+};
+
+static int dpa_tx_unit_test(struct net_device *net_dev)
+{
+ /* Create a new FQ */
+ struct dpa_priv_s *priv = netdev_priv(net_dev);
+ struct qman_fq *oldq;
+ int size, headroom;
+ struct dpa_percpu_priv_s *percpu_priv;
+ cpumask_var_t old_cpumask;
+ int test_count = 0;
+ int err = 0;
+ int tests_failed = 0;
+ const cpumask_t *cpus = qman_affine_cpus();
+#ifdef CONFIG_FSL_DPAA_TX_RECYCLE
+ struct qman_fq *oldrecycleq;
+#endif
+
+ if (!alloc_cpumask_var(&old_cpumask, GFP_KERNEL)) {
+ pr_err("UNIT test cpumask allocation failed\n");
+ return -ENOMEM;
+ }
+
+ cpumask_copy(old_cpumask, tsk_cpus_allowed(current));
+ set_cpus_allowed_ptr(current, cpus);
+ /* disable bottom halves */
+ local_bh_disable();
+
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, smp_processor_id());
+
+ qman_irqsource_remove(QM_PIRQ_DQRI);
+ unit_fq.net_dev = net_dev;
+ unit_fq.fq_base = tx_unit_test_fq;
+
+ /* Save old queue */
+ oldq = priv->egress_fqs[smp_processor_id()];
+
+ err = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID, &unit_fq.fq_base);
+
+ if (err < 0) {
+ pr_err("UNIT test FQ create failed: %d\n", err);
+ goto fq_create_fail;
+ }
+
+ err = qman_init_fq(&unit_fq.fq_base,
+ QMAN_INITFQ_FLAG_SCHED | QMAN_INITFQ_FLAG_LOCAL, NULL);
+ if (err < 0) {
+ pr_err("UNIT test FQ init failed: %d\n", err);
+ goto fq_init_fail;
+ }
+
+ /* Replace queue 0 with this queue */
+ priv->egress_fqs[smp_processor_id()] = &unit_fq.fq_base;
+
+#ifdef CONFIG_FSL_DPAA_TX_RECYCLE
+ oldrecycleq = priv->recycle_fqs[smp_processor_id()];
+ unit_recycle_fq.net_dev = net_dev;
+ unit_recycle_fq.fq_base = tx_unit_test_fq;
+
+ err = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID,
+ &unit_recycle_fq.fq_base);
+
+ if (err < 0) {
+ pr_err("UNIT test Recycle FQ create failed: %d\n", err);
+ goto recycle_fq_create_fail;
+ }
+
+ err = qman_init_fq(&unit_recycle_fq.fq_base,
+ QMAN_INITFQ_FLAG_SCHED | QMAN_INITFQ_FLAG_LOCAL, NULL);
+ if (err < 0) {
+ pr_err("UNIT test Recycle FQ init failed: %d\n", err);
+ goto recycle_fq_init_fail;
+ }
+
+ priv->recycle_fqs[smp_processor_id()] = &unit_recycle_fq.fq_base;
+
+ pr_err("TX Unit Test using FQ: %d - Recycle FQ: %d\n",
+ qman_fq_fqid(&unit_fq.fq_base),
+ qman_fq_fqid(&unit_recycle_fq.fq_base));
+#else
+ pr_err("TX Unit Test using FQ %d\n", qman_fq_fqid(&unit_fq.fq_base));
+#endif
+
+ /* Try packet sizes from 64-bytes to just above the maximum */
+ for (size = 64; size <= 9600 + 128; size += 64) {
+ for (headroom = priv->tx_headroom; headroom < 0x800;
+ headroom += 16) {
+ int ret;
+ struct sk_buff *skb;
+ int *countptr =
+ __this_cpu_ptr(priv->dpa_bp->percpu_count);
+
+ test_count++;
+
+ skb = dev_alloc_skb(size + headroom);
+
+ if (!skb) {
+ pr_err("Failed to allocate skb\n");
+ err = -ENOMEM;
+ goto end_test;
+ }
+
+ if (skb_end_pointer(skb) - skb->head >=
+ dpa_get_max_frm())
+ (*countptr)--;
+
+ skb_put(skb, size + headroom);
+ skb_pull(skb, headroom);
+
+ tx_unit_skb_head = skb->head;
+ tx_unit_skb_end = skb_end_pointer(skb);
+
+ skb_set_queue_mapping(skb, smp_processor_id());
+
+ /* tx */
+ ret = net_dev->netdev_ops->ndo_start_xmit(skb, net_dev);
+
+ if (ret != NETDEV_TX_OK) {
+ pr_err("Failed to TX with err %d\n", ret);
+ err = -EIO;
+ goto end_test;
+ }
+
+ /* Wait for it to arrive */
+ ret = spin_event_timeout(qman_poll_dqrr(1) != 0,
+ 100000, 1);
+
+ if (!ret) {
+ pr_err("TX Packet never arrived\n");
+ /*
+ * Count the test as failed.
+ */
+ tests_failed++;
+ }
+
+ /* Was it good? */
+ if (tx_unit_test_passed == false) {
+ pr_err("Test failed:\n");
+ pr_err("size: %d pad: %d head: %p end: %p\n",
+ size, headroom, tx_unit_skb_head,
+ tx_unit_skb_end);
+ tests_failed++;
+ }
+ }
+ }
+
+end_test:
+ err = qman_retire_fq(&unit_fq.fq_base, NULL);
+ if (unlikely(err < 0))
+ pr_err("Could not retire TX Unit Test FQ (%d)\n", err);
+
+ err = qman_oos_fq(&unit_fq.fq_base);
+ if (unlikely(err < 0))
+ pr_err("Could not OOS TX Unit Test FQ (%d)\n", err);
+
+#ifdef CONFIG_FSL_DPAA_TX_RECYCLE
+ err = qman_retire_fq(&unit_recycle_fq.fq_base, NULL);
+ if (unlikely(err < 0))
+ pr_err("Could not retire Recycle TX Unit Test FQ (%d)\n", err);
+
+ err = qman_oos_fq(&unit_recycle_fq.fq_base);
+ if (unlikely(err < 0))
+ pr_err("Could not OOS Recycle TX Unit Test FQ (%d)\n", err);
+
+recycle_fq_init_fail:
+ qman_destroy_fq(&unit_recycle_fq.fq_base, 0);
+
+recycle_fq_create_fail:
+ priv->recycle_fqs[smp_processor_id()] = oldrecycleq;
+#endif
+
+fq_init_fail:
+ qman_destroy_fq(&unit_fq.fq_base, 0);
+
+fq_create_fail:
+ priv->egress_fqs[smp_processor_id()] = oldq;
+ local_bh_enable();
+ qman_irqsource_add(QM_PIRQ_DQRI);
+ tx_unit_test_ran = true;
+ set_cpus_allowed_ptr(current, old_cpumask);
+ free_cpumask_var(old_cpumask);
+
+ pr_err("Tested %d/%d packets. %d failed\n", test_count, tx_unit_tested,
+ tests_failed);
+
+ if (tests_failed)
+ err = -EINVAL;
+
+ /* Reset counters */
+ memset(&percpu_priv->stats, 0, sizeof(percpu_priv->stats));
+
+ return err;
+}
+
+extern struct dpa_bp *dpa_bpid2pool(int bpid);
+
+void dpa_unit_test_drain_default_pool(struct net_device *net_dev)
+{
+ int i;
+ int num;
+ struct dpa_priv_s *priv;
+ struct dpa_bp *default_pool = dpa_bpid2pool(dpa_priv_common_bpid);
+
+ priv = netdev_priv(net_dev);
+
+ do {
+ struct bm_buffer bmb[8];
+
+ num = bman_acquire(default_pool->pool, bmb, 8, 0);
+
+ for (i = 0; i < num; i++) {
+ dma_addr_t addr = bm_buf_addr(&bmb[i]);
+
+ dma_unmap_single(default_pool->dev, addr,
+ default_pool->size,
+ DMA_BIDIRECTIONAL);
+
+ _dpa_bp_free_buf(phys_to_virt(addr));
+ }
+ } while (num == 8);
+
+ /* restore counters to their previous state */
+ for_each_online_cpu(i) {
+ int *countptr = per_cpu_ptr(default_pool->percpu_count, i);
+ *countptr = 0;
+ }
+}
+
+void dpa_unit_test_seed_default_pool(struct net_device *net_dev)
+{
+ struct dpa_priv_s *priv;
+ struct dpa_bp *default_pool = dpa_bpid2pool(dpa_priv_common_bpid);
+
+ priv = netdev_priv(net_dev);
+
+#ifndef CONFIG_FSL_DPAA_ETH_SG_SUPPORT
+ default_pool->size = dpa_bp_default_buf_size_get();
+#endif /* CONFIG_FSL_DPAA_ETH_SG_SUPPORT */
+ dpa_bp_priv_seed(default_pool);
+}
+
+void dpa_unit_tests(struct net_device *net_dev)
+{
+ int err;
+
+ /* the unit tests use the default pool */
+ if (!dpa_priv_common_bpid)
+ return;
+
+ if (!tx_unit_test_ran) {
+ dpa_unit_test_seed_default_pool(net_dev);
+
+ err = dpa_tx_unit_test(net_dev);
+ WARN_ON(err);
+
+ dpa_unit_test_drain_default_pool(net_dev);
+ }
+}
diff --git a/drivers/net/ethernet/freescale/dpa/dpaa_eth_unit_test.h b/drivers/net/ethernet/freescale/dpa/dpaa_eth_unit_test.h
new file mode 100644
index 0000000..0343e43
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpa/dpaa_eth_unit_test.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2013 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DPAA_ETH_UNIT_TEST_H_
+#define DPAA_ETH_UNIT_TEST_H_
+
+extern int dpa_max_frm;
+extern uint8_t dpa_priv_common_bpid;
+
+void dpa_unit_tests(struct net_device *net_dev);
+
+#endif /* DPAA_ETH_UNIT_TEST_H_ */
diff --git a/drivers/net/ethernet/freescale/dpa/mac-api.c b/drivers/net/ethernet/freescale/dpa/mac-api.c
index 501b082..2ea6a72 100644
--- a/drivers/net/ethernet/freescale/dpa/mac-api.c
+++ b/drivers/net/ethernet/freescale/dpa/mac-api.c
@@ -40,13 +40,11 @@
#include <linux/phy.h>
#include <linux/netdevice.h>
-#include "dpaa_eth-common.h"
#include "dpaa_eth.h"
#include "mac.h"
+#include "lnxwrp_fsl_fman.h"
#include "error_ext.h" /* GET_ERROR_TYPE, E_OK */
-#include "fm_mac_ext.h"
-#include "fm_rtc_ext.h"
#define MAC_DESCRIPTION "FSL FMan MAC API based driver"
@@ -57,7 +55,7 @@ MODULE_AUTHOR("Emil Medve <Emilian.Medve@Freescale.com>");
MODULE_DESCRIPTION(MAC_DESCRIPTION);
struct mac_priv_s {
- t_Handle mac;
+ struct fm_mac_dev *fm_mac;
};
const char *mac_driver_description __initconst = MAC_DESCRIPTION;
@@ -108,8 +106,7 @@ static void mac_exception(t_Handle _mac_dev, e_FmMacExceptions exception)
if (e_FM_MAC_EX_10G_RX_FIFO_OVFL == exception) {
/* don't flag RX FIFO after the first */
- FM_MAC_SetException(
- ((struct mac_priv_s *)macdev_priv(_mac_dev))->mac,
+ fm_mac_set_exception(mac_dev->get_mac_handle(mac_dev),
e_FM_MAC_EX_10G_RX_FIFO_OVFL, false);
printk(KERN_ERR "10G MAC got RX FIFO Error = %x\n", exception);
}
@@ -121,7 +118,6 @@ static void mac_exception(t_Handle _mac_dev, e_FmMacExceptions exception)
static int __cold init(struct mac_device *mac_dev)
{
int _errno;
- t_Error err;
struct mac_priv_s *priv;
t_FmMacParams param;
uint32_t version;
@@ -133,101 +129,71 @@ static int __cold init(struct mac_device *mac_dev)
param.enetMode = macdev2enetinterface(mac_dev);
memcpy(&param.addr, mac_dev->addr, min(sizeof(param.addr),
sizeof(mac_dev->addr)));
- param.macId = mac_dev->cell_index;
- param.h_Fm = (t_Handle)mac_dev->fm;
+ param.macId = mac_dev->cell_index;
+ param.h_Fm = (t_Handle)mac_dev->fm;
param.mdioIrq = NO_IRQ;
param.f_Exception = mac_exception;
param.f_Event = mac_exception;
- param.h_App = mac_dev;
+ param.h_App = mac_dev;
- priv->mac = FM_MAC_Config(&param);
- if (unlikely(priv->mac == NULL)) {
- dev_err(mac_dev->dev, "FM_MAC_Config() failed\n");
+ priv->fm_mac = fm_mac_config(&param);
+ if (unlikely(priv->fm_mac == NULL)) {
_errno = -EINVAL;
goto _return;
}
- fm_mac_set_handle(mac_dev->fm_dev, priv->mac,
+ fm_mac_set_handle(mac_dev->fm_dev, priv->fm_mac,
(macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) ?
param.macId : param.macId + FM_MAX_NUM_OF_1G_MACS);
- err = FM_MAC_ConfigMaxFrameLength(priv->mac,
+ _errno = fm_mac_config_max_frame_length(priv->fm_mac,
fm_get_max_frm());
- _errno = -GET_ERROR_TYPE(err);
- if (unlikely(_errno < 0)) {
- dev_err(mac_dev->dev,
- "FM_MAC_ConfigMaxFrameLength() = 0x%08x\n", err);
+ if (unlikely(_errno < 0))
goto _return_fm_mac_free;
- }
if (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) {
/* 10G always works with pad and CRC */
- err = FM_MAC_ConfigPadAndCrc(priv->mac, true);
- _errno = -GET_ERROR_TYPE(err);
- if (unlikely(_errno < 0)) {
- dev_err(mac_dev->dev,
- "FM_MAC_ConfigPadAndCrc() = 0x%08x\n", err);
+ _errno = fm_mac_config_pad_and_crc(priv->fm_mac, true);
+ if (unlikely(_errno < 0))
goto _return_fm_mac_free;
- }
- err = FM_MAC_ConfigHalfDuplex(priv->mac, mac_dev->half_duplex);
- _errno = -GET_ERROR_TYPE(err);
- if (unlikely(_errno < 0)) {
- dev_err(mac_dev->dev,
- "FM_MAC_ConfigHalfDuplex() = 0x%08x\n", err);
+ _errno = fm_mac_config_half_duplex(priv->fm_mac,
+ mac_dev->half_duplex);
+ if (unlikely(_errno < 0))
goto _return_fm_mac_free;
- }
}
else {
- err = FM_MAC_ConfigResetOnInit(priv->mac, true);
- _errno = -GET_ERROR_TYPE(err);
- if (unlikely(_errno < 0)) {
- dev_err(mac_dev->dev,
- "FM_MAC_ConfigResetOnInit() = 0x%08x\n", err);
+ _errno = fm_mac_config_reset_on_init(priv->fm_mac, true);
+ if (unlikely(_errno < 0))
goto _return_fm_mac_free;
- }
}
- err = FM_MAC_Init(priv->mac);
- _errno = -GET_ERROR_TYPE(err);
- if (unlikely(_errno < 0)) {
- dev_err(mac_dev->dev, "FM_MAC_Init() = 0x%08x\n", err);
+ _errno = fm_mac_init(priv->fm_mac);
+ if (unlikely(_errno < 0))
goto _return_fm_mac_free;
- }
#ifndef CONFIG_FMAN_MIB_CNT_OVF_IRQ_EN
/* For 1G MAC, disable by default the MIB counters overflow interrupt */
if (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) {
- err = FM_MAC_SetException(priv->mac,
+ _errno = fm_mac_set_exception(mac_dev->get_mac_handle(mac_dev),
e_FM_MAC_EX_1G_RX_MIB_CNT_OVFL, FALSE);
- _errno = -GET_ERROR_TYPE(err);
- if (unlikely(_errno < 0)) {
- dev_err(mac_dev->dev,
- "FM_MAC_SetException() = 0x%08x\n", err);
+ if (unlikely(_errno < 0))
goto _return_fm_mac_free;
- }
}
#endif /* !CONFIG_FMAN_MIB_CNT_OVF_IRQ_EN */
/* For 10G MAC, disable Tx ECC exception */
if (macdev2enetinterface(mac_dev) == e_ENET_MODE_XGMII_10000) {
- err = FM_MAC_SetException(priv->mac,
+ _errno = fm_mac_set_exception(mac_dev->get_mac_handle(mac_dev),
e_FM_MAC_EX_10G_1TX_ECC_ER, FALSE);
- _errno = -GET_ERROR_TYPE(err);
- if (unlikely(_errno < 0)) {
- dev_err(mac_dev->dev,
- "FM_MAC_SetException() = 0x%08x\n", err);
+ if (unlikely(_errno < 0))
goto _return_fm_mac_free;
- }
}
- err = FM_MAC_GetVesrion(priv->mac, &version);
- _errno = -GET_ERROR_TYPE(err);
- if (unlikely(_errno < 0)) {
- dev_err(mac_dev->dev, "FM_MAC_GetVesrion() = 0x%08x\n",
- err);
+ _errno = fm_mac_get_version(priv->fm_mac, &version);
+ if (unlikely(_errno < 0))
goto _return_fm_mac_free;
- }
+
dev_info(mac_dev->dev, "FMan %s version: 0x%08x\n",
((macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) ?
"dTSEC" : "XGEC"), version);
@@ -236,9 +202,8 @@ static int __cold init(struct mac_device *mac_dev)
_return_fm_mac_free:
- err = FM_MAC_Free(priv->mac);
- if (unlikely(-GET_ERROR_TYPE(err) < 0))
- dev_err(mac_dev->dev, "FM_MAC_Free() = 0x%08x\n", err);
+ fm_mac_free(mac_dev->get_mac_handle(mac_dev));
+
_return:
return _errno;
}
@@ -246,7 +211,6 @@ _return:
static int __cold memac_init(struct mac_device *mac_dev)
{
int _errno;
- t_Error err;
struct mac_priv_s *priv;
t_FmMacParams param;
@@ -263,44 +227,31 @@ static int __cold memac_init(struct mac_device *mac_dev)
param.f_Event = mac_exception;
param.h_App = mac_dev;
- priv->mac = FM_MAC_Config(&param);
- if (unlikely(priv->mac == NULL)) {
- dev_err(mac_dev->dev, "FM_MAC_Config() failed\n");
+ priv->fm_mac = fm_mac_config(&param);
+ if (unlikely(priv->fm_mac == NULL)) {
_errno = -EINVAL;
goto _return;
}
- err = FM_MAC_ConfigMaxFrameLength(priv->mac, fm_get_max_frm());
- _errno = -GET_ERROR_TYPE(err);
- if (unlikely(_errno < 0)) {
- dev_err(mac_dev->dev,
- "FM_MAC_ConfigMaxFrameLength() = 0x%08x\n", err);
+ _errno = fm_mac_config_max_frame_length(priv->fm_mac, fm_get_max_frm());
+ if (unlikely(_errno < 0))
goto _return_fm_mac_free;
- }
- err = FM_MAC_ConfigResetOnInit(priv->mac, true);
- _errno = -GET_ERROR_TYPE(err);
- if (unlikely(_errno < 0)) {
- dev_err(mac_dev->dev,
- "FM_MAC_ConfigResetOnInit() = 0x%08x\n", err);
+ _errno = fm_mac_config_reset_on_init(priv->fm_mac, true);
+ if (unlikely(_errno < 0))
goto _return_fm_mac_free;
- }
- err = FM_MAC_Init(priv->mac);
- _errno = -GET_ERROR_TYPE(err);
- if (unlikely(_errno < 0)) {
- dev_err(mac_dev->dev, "FM_MAC_Init() = 0x%08x\n", err);
+ _errno = fm_mac_init(priv->fm_mac);
+ if (unlikely(_errno < 0))
goto _return_fm_mac_free;
- }
dev_info(mac_dev->dev, "FMan MEMAC\n");
goto _return;
_return_fm_mac_free:
- err = FM_MAC_Free(priv->mac);
- if (unlikely(-GET_ERROR_TYPE(err) < 0))
- dev_err(mac_dev->dev, "FM_MAC_Free() = 0x%08x\n", err);
+ fm_mac_free(priv->fm_mac);
+
_return:
return _errno;
}
@@ -308,16 +259,11 @@ _return:
static int __cold start(struct mac_device *mac_dev)
{
int _errno;
- t_Error err;
struct phy_device *phy_dev = mac_dev->phy_dev;
- err = FM_MAC_Enable(((struct mac_priv_s *)macdev_priv(mac_dev))->mac,
- e_COMM_MODE_RX_AND_TX);
- _errno = -GET_ERROR_TYPE(err);
- if (unlikely(_errno < 0))
- dev_err(mac_dev->dev, "FM_MAC_Enable() = 0x%08x\n", err);
+ _errno = fm_mac_enable(mac_dev->get_mac_handle(mac_dev));
- if (phy_dev) {
+ if (!_errno && phy_dev) {
if (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000)
phy_start(phy_dev);
else if (phy_dev->drv->read_status)
@@ -329,36 +275,11 @@ static int __cold start(struct mac_device *mac_dev)
static int __cold stop(struct mac_device *mac_dev)
{
- int _errno;
- t_Error err;
-
if (mac_dev->phy_dev &&
(macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000))
phy_stop(mac_dev->phy_dev);
- err = FM_MAC_Disable(((struct mac_priv_s *)macdev_priv(mac_dev))->mac,
- e_COMM_MODE_RX_AND_TX);
- _errno = -GET_ERROR_TYPE(err);
- if (unlikely(_errno < 0))
- dev_err(mac_dev->dev, "FM_MAC_Disable() = 0x%08x\n", err);
-
- return _errno;
-}
-
-static int __cold change_promisc(struct mac_device *mac_dev)
-{
- int _errno;
- t_Error err;
-
- err = FM_MAC_SetPromiscuous(
- ((struct mac_priv_s *)macdev_priv(mac_dev))->mac,
- mac_dev->promisc = !mac_dev->promisc);
- _errno = -GET_ERROR_TYPE(err);
- if (unlikely(_errno < 0))
- dev_err(mac_dev->dev,
- "FM_MAC_SetPromiscuous() = 0x%08x\n", err);
-
- return _errno;
+ return fm_mac_disable(mac_dev->get_mac_handle(mac_dev));
}
static int __cold set_multi(struct net_device *net_dev)
@@ -369,7 +290,6 @@ static int __cold set_multi(struct net_device *net_dev)
struct mac_address *old_addr, *tmp;
struct netdev_hw_addr *ha;
int _errno;
- t_Error err;
priv = netdev_priv(net_dev);
mac_dev = priv->mac_dev;
@@ -377,28 +297,22 @@ static int __cold set_multi(struct net_device *net_dev)
/* Clear previous address list */
list_for_each_entry_safe(old_addr, tmp, &mac_dev->mc_addr_list, list) {
- err = FM_MAC_RemoveHashMacAddr(mac_priv->mac,
- (t_EnetAddr *)old_addr->addr);
- _errno = -GET_ERROR_TYPE(err);
- if (_errno < 0) {
- dev_err(mac_dev->dev,
- "FM_MAC_RemoveHashMacAddr() = 0x%08x\n", err);
+ _errno = fm_mac_remove_hash_mac_addr(mac_priv->fm_mac,
+ (t_EnetAddr *)old_addr->addr);
+ if (_errno < 0)
return _errno;
- }
+
list_del(&old_addr->list);
kfree(old_addr);
}
/* Add all the addresses from the new list */
netdev_for_each_mc_addr(ha, net_dev) {
- err = FM_MAC_AddHashMacAddr(mac_priv->mac,
+ _errno = fm_mac_add_hash_mac_addr(mac_priv->fm_mac,
(t_EnetAddr *)ha->addr);
- _errno = -GET_ERROR_TYPE(err);
- if (_errno < 0) {
- dev_err(mac_dev->dev,
- "FM_MAC_AddHashMacAddr() = 0x%08x\n", err);
+ if (_errno < 0)
return _errno;
- }
+
tmp = kmalloc(sizeof(struct mac_address), GFP_ATOMIC);
if (!tmp) {
dev_err(mac_dev->dev, "Out of memory\n");
@@ -410,50 +324,14 @@ static int __cold set_multi(struct net_device *net_dev)
return 0;
}
-static int __cold change_addr(struct mac_device *mac_dev, uint8_t *addr)
-{
- int _errno;
- t_Error err;
-
- err = FM_MAC_ModifyMacAddr(
- ((struct mac_priv_s *)macdev_priv(mac_dev))->mac,
- (t_EnetAddr *)addr);
- _errno = -GET_ERROR_TYPE(err);
- if (_errno < 0)
- dev_err(mac_dev->dev,
- "FM_MAC_ModifyMacAddr() = 0x%08x\n", err);
-
- return _errno;
-}
-
static void adjust_link(struct net_device *net_dev)
{
struct dpa_priv_s *priv = netdev_priv(net_dev);
struct mac_device *mac_dev = priv->mac_dev;
struct phy_device *phy_dev = mac_dev->phy_dev;
-#if (DPAA_VERSION < 11)
- struct mac_priv_s *mac_priv;
-#endif
- int _errno;
- t_Error err;
-
- if (!phy_dev->link) {
-#if (DPAA_VERSION < 11)
- mac_priv = (struct mac_priv_s *)macdev_priv(mac_dev);
- FM_MAC_RestartAutoneg(mac_priv->mac);
-#endif
- return;
- }
- err = FM_MAC_AdjustLink(
- ((struct mac_priv_s *)macdev_priv(mac_dev))->mac,
- phy_dev->speed, phy_dev->duplex);
- _errno = -GET_ERROR_TYPE(err);
- if (unlikely(_errno < 0))
- dev_err(mac_dev->dev, "FM_MAC_AdjustLink() = 0x%08x\n",
- err);
-
- return;
+ fm_mac_adjust_link(mac_dev->get_mac_handle(mac_dev),
+ phy_dev->link, phy_dev->speed, phy_dev->duplex);
}
/* Initializes driver's PHY state, and attaches to the PHY.
@@ -561,256 +439,25 @@ static int memac_init_phy(struct net_device *net_dev)
return 0;
}
-static int __cold uninit(struct mac_device *mac_dev)
+static int __cold uninit(struct fm_mac_dev *fm_mac_dev)
{
int _errno, __errno;
- t_Error err;
- const struct mac_priv_s *priv;
- priv = macdev_priv(mac_dev);
-
- err = FM_MAC_Disable(priv->mac, e_COMM_MODE_RX_AND_TX);
- _errno = -GET_ERROR_TYPE(err);
- if (unlikely(_errno < 0))
- dev_err(mac_dev->dev, "FM_MAC_Disable() = 0x%08x\n", err);
+ _errno = fm_mac_disable(fm_mac_dev);
+ __errno = fm_mac_free(fm_mac_dev);
- err = FM_MAC_Free(priv->mac);
- __errno = -GET_ERROR_TYPE(err);
if (unlikely(__errno < 0)) {
- dev_err(mac_dev->dev, "FM_MAC_Free() = 0x%08x\n", err);
- if (_errno < 0)
- _errno = __errno;
+ _errno = __errno;
}
return _errno;
}
-static int __cold ptp_enable(struct mac_device *mac_dev)
+static struct fm_mac_dev *get_mac_handle(struct mac_device *mac_dev)
{
- int _errno;
- t_Error err;
const struct mac_priv_s *priv;
-
priv = macdev_priv(mac_dev);
-
- err = FM_MAC_Enable1588TimeStamp(priv->mac);
- _errno = -GET_ERROR_TYPE(err);
- if (unlikely(_errno < 0))
- dev_err(mac_dev->dev, "FM_MAC_Enable1588TimeStamp()"
- "= 0x%08x\n", err);
- return _errno;
-}
-
-static int __cold ptp_disable(struct mac_device *mac_dev)
-{
- int _errno;
- t_Error err;
- const struct mac_priv_s *priv;
-
- priv = macdev_priv(mac_dev);
-
- err = FM_MAC_Disable1588TimeStamp(priv->mac);
- _errno = -GET_ERROR_TYPE(err);
- if (unlikely(_errno < 0))
- dev_err(mac_dev->dev, "FM_MAC_Disable1588TimeStamp()"
- "= 0x%08x\n", err);
- return _errno;
-}
-
-static void *get_mac_handle(struct mac_device *mac_dev)
-{
- const struct mac_priv_s *priv;
- priv = macdev_priv(mac_dev);
- return (void*)priv->mac;
-}
-
-static int __cold set_rx_pause(struct mac_device *mac_dev, bool en)
-{
- int _errno;
- t_Error err;
-
- /* if rx pause is enabled, do NOT ignore pause frames */
- err = FM_MAC_SetRxIgnorePauseFrames(
- ((struct mac_priv_s *)macdev_priv(mac_dev))->mac, !en);
-
- _errno = -GET_ERROR_TYPE(err);
- if (_errno < 0)
- dev_err(mac_dev->dev,
- "FM_MAC_SetRxIgnorePauseFrames() = 0x%08x\n", err);
-
- return _errno;
-}
-
-static int __cold set_tx_pause(struct mac_device *mac_dev, bool en)
-{
- int _errno;
- t_Error err;
-
- if (en)
- err = FM_MAC_SetTxPauseFrames(
- ((struct mac_priv_s *)macdev_priv(mac_dev))->mac,
- TX_PAUSE_PRIO_DEFAULT,
- TX_PAUSE_TIME_ENABLE,
- TX_PAUSE_THRESH_DEFAULT);
- else
- err = FM_MAC_SetTxPauseFrames(
- ((struct mac_priv_s *)macdev_priv(mac_dev))->mac,
- TX_PAUSE_PRIO_DEFAULT,
- TX_PAUSE_TIME_DISABLE,
- TX_PAUSE_THRESH_DEFAULT);
-
- _errno = -GET_ERROR_TYPE(err);
- if (_errno < 0)
- dev_err(mac_dev->dev,
- "FM_MAC_SetTxPauseFrames() = 0x%08x\n", err);
-
- return _errno;
-}
-
-static int __cold fm_rtc_enable(struct net_device *net_dev)
-{
- struct dpa_priv_s *priv = netdev_priv(net_dev);
- struct mac_device *mac_dev = priv->mac_dev;
- int _errno;
- t_Error err;
-
- err = FM_RTC_Enable(fm_get_rtc_handle(mac_dev->fm_dev), 0);
- _errno = -GET_ERROR_TYPE(err);
- if (unlikely(_errno < 0))
- dev_err(mac_dev->dev, "FM_RTC_Enable = 0x%08x\n", err);
-
- return _errno;
-}
-
-static int __cold fm_rtc_disable(struct net_device *net_dev)
-{
- struct dpa_priv_s *priv = netdev_priv(net_dev);
- struct mac_device *mac_dev = priv->mac_dev;
- int _errno;
- t_Error err;
-
- err = FM_RTC_Disable(fm_get_rtc_handle(mac_dev->fm_dev));
- _errno = -GET_ERROR_TYPE(err);
- if (unlikely(_errno < 0))
- dev_err(mac_dev->dev, "FM_RTC_Disable = 0x%08x\n", err);
-
- return _errno;
-}
-
-static int __cold fm_rtc_get_cnt(struct net_device *net_dev, uint64_t *ts)
-{
- struct dpa_priv_s *priv = netdev_priv(net_dev);
- struct mac_device *mac_dev = priv->mac_dev;
- int _errno;
- t_Error err;
-
- err = FM_RTC_GetCurrentTime(fm_get_rtc_handle(mac_dev->fm_dev), ts);
- _errno = -GET_ERROR_TYPE(err);
- if (unlikely(_errno < 0))
- dev_err(mac_dev->dev, "FM_RTC_GetCurrentTime = 0x%08x\n",
- err);
-
- return _errno;
-}
-
-static int __cold fm_rtc_set_cnt(struct net_device *net_dev, uint64_t ts)
-{
- struct dpa_priv_s *priv = netdev_priv(net_dev);
- struct mac_device *mac_dev = priv->mac_dev;
- int _errno;
- t_Error err;
-
- err = FM_RTC_SetCurrentTime(fm_get_rtc_handle(mac_dev->fm_dev), ts);
- _errno = -GET_ERROR_TYPE(err);
- if (unlikely(_errno < 0))
- dev_err(mac_dev->dev, "FM_RTC_SetCurrentTime = 0x%08x\n",
- err);
-
- return _errno;
-}
-
-static int __cold fm_rtc_get_drift(struct net_device *net_dev, uint32_t *drift)
-{
- struct dpa_priv_s *priv = netdev_priv(net_dev);
- struct mac_device *mac_dev = priv->mac_dev;
- int _errno;
- t_Error err;
-
- err = FM_RTC_GetFreqCompensation(fm_get_rtc_handle(mac_dev->fm_dev),
- drift);
- _errno = -GET_ERROR_TYPE(err);
- if (unlikely(_errno < 0))
- dev_err(mac_dev->dev, "FM_RTC_GetFreqCompensation ="
- "0x%08x\n", err);
-
- return _errno;
-}
-
-static int __cold fm_rtc_set_drift(struct net_device *net_dev, uint32_t drift)
-{
- struct dpa_priv_s *priv = netdev_priv(net_dev);
- struct mac_device *mac_dev = priv->mac_dev;
- int _errno;
- t_Error err;
-
- err = FM_RTC_SetFreqCompensation(fm_get_rtc_handle(mac_dev->fm_dev),
- drift);
- _errno = -GET_ERROR_TYPE(err);
- if (unlikely(_errno < 0))
- dev_err(mac_dev->dev, "FM_RTC_SetFreqCompensation ="
- "0x%08x\n", err);
-
- return _errno;
-}
-
-static int __cold fm_rtc_set_alarm(struct net_device *net_dev, uint32_t id,
- uint64_t time)
-{
- struct dpa_priv_s *priv = netdev_priv(net_dev);
- struct mac_device *mac_dev = priv->mac_dev;
- t_FmRtcAlarmParams alarm;
- int _errno;
- t_Error err;
-
- alarm.alarmId = id;
- alarm.alarmTime = time;
- alarm.f_AlarmCallback = NULL;
- err = FM_RTC_SetAlarm(fm_get_rtc_handle(mac_dev->fm_dev),
- &alarm);
- _errno = -GET_ERROR_TYPE(err);
- if (unlikely(_errno < 0))
- dev_err(mac_dev->dev, "FM_RTC_SetAlarm ="
- "0x%08x\n", err);
-
- return _errno;
-}
-
-static int __cold fm_rtc_set_fiper(struct net_device *net_dev, uint32_t id,
- uint64_t fiper)
-{
- struct dpa_priv_s *priv = netdev_priv(net_dev);
- struct mac_device *mac_dev = priv->mac_dev;
- t_FmRtcPeriodicPulseParams pp;
- int _errno;
- t_Error err;
-
- pp.periodicPulseId = id;
- pp.periodicPulsePeriod = fiper;
- pp.f_PeriodicPulseCallback = NULL;
- err = FM_RTC_SetPeriodicPulse(fm_get_rtc_handle(mac_dev->fm_dev), &pp);
- _errno = -GET_ERROR_TYPE(err);
- if (unlikely(_errno < 0))
- dev_err(mac_dev->dev, "FM_RTC_SetPeriodicPulse ="
- "0x%08x\n", err);
-
- return _errno;
-}
-
-void fm_mac_dump_regs(struct mac_device *mac_dev)
-{
- struct mac_priv_s *mac_priv = macdev_priv(mac_dev);
-
- FM_MAC_DumpRegs(mac_priv->mac);
+ return priv->fm_mac;
}
static void __cold setup_dtsec(struct mac_device *mac_dev)
@@ -819,15 +466,15 @@ static void __cold setup_dtsec(struct mac_device *mac_dev)
mac_dev->init = init;
mac_dev->start = start;
mac_dev->stop = stop;
- mac_dev->change_promisc = change_promisc;
- mac_dev->change_addr = change_addr;
+ mac_dev->set_promisc = fm_mac_set_promiscuous;
+ mac_dev->change_addr = fm_mac_modify_mac_addr;
mac_dev->set_multi = set_multi;
mac_dev->uninit = uninit;
- mac_dev->ptp_enable = ptp_enable;
- mac_dev->ptp_disable = ptp_disable;
+ mac_dev->ptp_enable = fm_mac_enable_1588_time_stamp;
+ mac_dev->ptp_disable = fm_mac_disable_1588_time_stamp;
mac_dev->get_mac_handle = get_mac_handle;
- mac_dev->set_tx_pause = set_tx_pause;
- mac_dev->set_rx_pause = set_rx_pause;
+ mac_dev->set_tx_pause = fm_mac_set_tx_pause_frames;
+ mac_dev->set_rx_pause = fm_mac_set_rx_ignore_pause_frames;
mac_dev->fm_rtc_enable = fm_rtc_enable;
mac_dev->fm_rtc_disable = fm_rtc_disable;
mac_dev->fm_rtc_get_cnt = fm_rtc_get_cnt;
@@ -844,12 +491,13 @@ static void __cold setup_xgmac(struct mac_device *mac_dev)
mac_dev->init = init;
mac_dev->start = start;
mac_dev->stop = stop;
- mac_dev->change_promisc = change_promisc;
- mac_dev->change_addr = change_addr;
+ mac_dev->set_promisc = fm_mac_set_promiscuous;
+ mac_dev->change_addr = fm_mac_modify_mac_addr;
mac_dev->set_multi = set_multi;
mac_dev->uninit = uninit;
- mac_dev->set_tx_pause = set_tx_pause;
- mac_dev->set_rx_pause = set_rx_pause;
+ mac_dev->get_mac_handle = get_mac_handle;
+ mac_dev->set_tx_pause = fm_mac_set_tx_pause_frames;
+ mac_dev->set_rx_pause = fm_mac_set_rx_ignore_pause_frames;
}
static void __cold setup_memac(struct mac_device *mac_dev)
@@ -858,13 +506,13 @@ static void __cold setup_memac(struct mac_device *mac_dev)
mac_dev->init = memac_init;
mac_dev->start = start;
mac_dev->stop = stop;
- mac_dev->change_promisc = change_promisc;
- mac_dev->change_addr = change_addr;
+ mac_dev->set_promisc = fm_mac_set_promiscuous;
+ mac_dev->change_addr = fm_mac_modify_mac_addr;
mac_dev->set_multi = set_multi;
mac_dev->uninit = uninit;
mac_dev->get_mac_handle = get_mac_handle;
- mac_dev->set_tx_pause = set_tx_pause;
- mac_dev->set_rx_pause = set_rx_pause;
+ mac_dev->set_tx_pause = fm_mac_set_tx_pause_frames;
+ mac_dev->set_rx_pause = fm_mac_set_rx_ignore_pause_frames;
mac_dev->fm_rtc_enable = fm_rtc_enable;
mac_dev->fm_rtc_disable = fm_rtc_disable;
mac_dev->fm_rtc_get_cnt = fm_rtc_get_cnt;
diff --git a/drivers/net/ethernet/freescale/dpa/mac.c b/drivers/net/ethernet/freescale/dpa/mac.c
index 9a18804..73f4532 100644
--- a/drivers/net/ethernet/freescale/dpa/mac.c
+++ b/drivers/net/ethernet/freescale/dpa/mac.c
@@ -40,8 +40,6 @@
#include <linux/device.h>
#include <linux/phy.h>
-#include "dpaa_eth-common.h"
-
#include "lnxwrp_fm_ext.h"
#include "mac.h"
@@ -116,7 +114,7 @@ static int __cold free_macdev(struct mac_device *mac_dev)
{
dev_set_drvdata(mac_dev->dev, NULL);
- return mac_dev->uninit(mac_dev);
+ return mac_dev->uninit(mac_dev->get_mac_handle(mac_dev));
}
static const struct of_device_id mac_match[] = {
@@ -197,7 +195,7 @@ static int __cold mac_probe(struct platform_device *_of_dev)
goto _return_of_node_put;
}
- mac_dev->fm = (void *)fm_get_handle(mac_dev->fm_dev);
+ mac_dev->fm = (void *)fm_get_handle(mac_dev->fm_dev);
of_node_put(dev_node);
/* Get the address of the memory mapped registers */
diff --git a/drivers/net/ethernet/freescale/dpa/mac.h b/drivers/net/ethernet/freescale/dpa/mac.h
index 7025ea7..7545a1c 100644
--- a/drivers/net/ethernet/freescale/dpa/mac.h
+++ b/drivers/net/ethernet/freescale/dpa/mac.h
@@ -71,29 +71,28 @@ struct mac_device {
u32 rx_pause;
u32 tx_pause;
+ struct fm_mac_dev *(*get_mac_handle)(struct mac_device *mac_dev);
int (*init_phy)(struct net_device *net_dev);
int (*init)(struct mac_device *mac_dev);
int (*start)(struct mac_device *mac_dev);
int (*stop)(struct mac_device *mac_dev);
- int (*change_promisc)(struct mac_device *mac_dev);
- int (*change_addr)(struct mac_device *mac_dev, uint8_t *addr);
+ int (*set_promisc)(struct fm_mac_dev *fm_mac_dev, bool enable);
+ int (*change_addr)(struct fm_mac_dev *fm_mac_dev, uint8_t *addr);
int (*set_multi)(struct net_device *net_dev);
- int (*uninit)(struct mac_device *mac_dev);
- int (*ptp_enable)(struct mac_device *mac_dev);
- int (*ptp_disable)(struct mac_device *mac_dev);
- void *(*get_mac_handle)(struct mac_device *mac_dev);
- int (*set_rx_pause)(struct mac_device *mac_dev, bool en);
- int (*set_tx_pause)(struct mac_device *mac_dev, bool en);
- int (*fm_rtc_enable)(struct net_device *net_dev);
- int (*fm_rtc_disable)(struct net_device *net_dev);
- int (*fm_rtc_get_cnt)(struct net_device *net_dev, uint64_t *ts);
- int (*fm_rtc_set_cnt)(struct net_device *net_dev, uint64_t ts);
- int (*fm_rtc_get_drift)(struct net_device *net_dev, uint32_t *drift);
- int (*fm_rtc_set_drift)(struct net_device *net_dev, uint32_t drift);
- int (*fm_rtc_set_alarm)(struct net_device *net_dev, uint32_t id,
- uint64_t time);
- int (*fm_rtc_set_fiper)(struct net_device *net_dev, uint32_t id,
- uint64_t fiper);
+ int (*uninit)(struct fm_mac_dev *fm_mac_dev);
+ int (*ptp_enable)(struct fm_mac_dev *fm_mac_dev);
+ int (*ptp_disable)(struct fm_mac_dev *fm_mac_dev);
+ int (*set_rx_pause)(struct fm_mac_dev *fm_mac_dev, bool en);
+ int (*set_tx_pause)(struct fm_mac_dev *fm_mac_dev, bool en);
+ int (*fm_rtc_enable)(struct fm *fm_dev);
+ int (*fm_rtc_disable)(struct fm *fm_dev);
+ int (*fm_rtc_get_cnt)(struct fm *fm_dev, uint64_t *ts);
+ int (*fm_rtc_set_cnt)(struct fm *fm_dev, uint64_t ts);
+ int (*fm_rtc_get_drift)(struct fm *fm_dev, uint32_t *drift);
+ int (*fm_rtc_set_drift)(struct fm *fm_dev, uint32_t drift);
+ int (*fm_rtc_set_alarm)(struct fm *fm_dev, uint32_t id, uint64_t time);
+ int (*fm_rtc_set_fiper)(struct fm *fm_dev, uint32_t id,
+ uint64_t fiper);
};
struct mac_address {
@@ -101,10 +100,14 @@ struct mac_address {
struct list_head list;
};
+#define get_fm_handle(net_dev) \
+ (((struct dpa_priv_s *)netdev_priv(net_dev))->mac_dev->fm_dev)
+
#define for_each_port_device(i, port_dev) \
for (i = 0; i < ARRAY_SIZE(port_dev); i++)
-static inline void * __attribute((nonnull)) macdev_priv(const struct mac_device *mac_dev)
+static inline __attribute((nonnull)) void *macdev_priv(
+ const struct mac_device *mac_dev)
{
return (void *)mac_dev + sizeof(*mac_dev);
}
@@ -113,9 +116,4 @@ extern const char *mac_driver_description;
extern const size_t mac_sizeof_priv[];
extern void (*const mac_setup[])(struct mac_device *mac_dev);
-#define TX_PAUSE_PRIO_DEFAULT 0xff
-#define TX_PAUSE_TIME_ENABLE 0xf000
-#define TX_PAUSE_TIME_DISABLE 0
-#define TX_PAUSE_THRESH_DEFAULT 0
-
#endif /* __MAC_H */
diff --git a/drivers/net/ethernet/freescale/dpa/offline_port.c b/drivers/net/ethernet/freescale/dpa/offline_port.c
index aa30caa..c002dc9 100644
--- a/drivers/net/ethernet/freescale/dpa/offline_port.c
+++ b/drivers/net/ethernet/freescale/dpa/offline_port.c
@@ -44,7 +44,8 @@
#include <linux/of_platform.h>
#include "offline_port.h"
-#include "dpaa_eth-common.h"
+#include "dpaa_eth.h"
+#include "dpaa_eth_common.h"
#define OH_MOD_DESCRIPTION "FSL FMan Offline Parsing port driver"
/*
@@ -128,7 +129,7 @@ oh_port_probe(struct platform_device *_of_dev)
struct platform_device *oh_of_dev;
struct device_node *oh_node, *bpool_node = NULL, *root_node;
struct device *oh_dev;
- struct dpa_oh_config_s *oh_config;
+ struct dpa_oh_config_s *oh_config = NULL;
uint32_t *oh_all_queues;
uint32_t queues_count;
uint32_t crt_fqid_base;
@@ -436,7 +437,7 @@ static int __init __cold oh_port_load(void)
{
int _errno;
- pr_info(KBUILD_MODNAME ": " OH_MOD_DESCRIPTION " (" VERSION ")\n");
+ printk(KERN_INFO KBUILD_MODNAME ": " OH_MOD_DESCRIPTION " (" VERSION ")\n");
_errno = platform_driver_register(&oh_port_driver);
if (_errno < 0) {
diff --git a/drivers/net/ethernet/freescale/fman/Kconfig b/drivers/net/ethernet/freescale/fman/Kconfig
index ec7449f..eaecb1c 100644
--- a/drivers/net/ethernet/freescale/fman/Kconfig
+++ b/drivers/net/ethernet/freescale/fman/Kconfig
@@ -85,14 +85,16 @@ config FSL_FM_MAX_FRAME_SIZE
config FSL_FM_RX_EXTRA_HEADROOM
int "Add extra headroom at beginning of data buffers"
depends on FSL_FMAN
- range 0 384
+ range 16 384
default "64"
help
Configure this to tell the Frame Manager to reserve some extra
space at the beginning of a data buffer on the receive path,
before Internal Context fields are copied. This is in addition
to the private data area already reserved for driver internal
- use. The option does not affect in any way the layout of
+ use. The provided value must be a multiple of 16.
+
+ This option does not affect in any way the layout of
transmitted buffers. You may be required to enable the config
option FMAN_RESOURCE_ALLOCATION_ALGORITHM and also
FMAN_DISABLE_OH_TO_REUSE_RESOURCES to have enough resources
diff --git a/drivers/net/ethernet/freescale/fman/src/inc/wrapper/lnxwrp_fsl_fman.h b/drivers/net/ethernet/freescale/fman/src/inc/wrapper/lnxwrp_fsl_fman.h
index 8adf489..f182e3f 100644
--- a/drivers/net/ethernet/freescale/fman/src/inc/wrapper/lnxwrp_fsl_fman.h
+++ b/drivers/net/ethernet/freescale/fman/src/inc/wrapper/lnxwrp_fsl_fman.h
@@ -44,6 +44,8 @@
#include <linux/fsl_qman.h> /* struct qman_fq */
#include "dpaa_integration_ext.h"
#include "fm_port_ext.h"
+#include "fm_mac_ext.h"
+#include "fm_rtc_ext.h"
/**************************************************************************//**
@Group FM_LnxKern_grp Frame Manager Linux wrapper API
@@ -67,11 +69,16 @@
/*****************************************************************************/
/**************************************************************************//**
- @Description A structure ..,
+ @Description FM device opaque structure used for type checking
*//***************************************************************************/
struct fm;
/**************************************************************************//**
+ @Description FM MAC device opaque structure used for type checking
+*//***************************************************************************/
+struct fm_mac_dev;
+
+/**************************************************************************//**
@Description A structure ..,
*//***************************************************************************/
struct fm_port;
@@ -273,7 +280,10 @@ int fm_port_enable(struct fm_port *port);
*//***************************************************************************/
void fm_port_disable(struct fm_port *port);
-void *fm_port_get_handle(struct fm_port *port);
+void *fm_port_get_handle(const struct fm_port *port);
+
+u64 *fm_port_get_buffer_time_stamp(const struct fm_port *port,
+ const void *data);
/**************************************************************************//**
@Function fm_port_get_base_address
@@ -340,6 +350,85 @@ int fm_port_set_rate_limit(struct fm_port *port,
*//***************************************************************************/
int fm_port_del_rate_limit(struct fm_port *port);
+/**************************************************************************//**
+@Function fm_mac_set_exception
+
+@Description Set MAC exception state.
+
+@Param[in] fm_mac_dev - A handle of the FM MAC device.
+@Param[in] exception - FM MAC exception type.
+@Param[in] enable - new state.
+
+*//***************************************************************************/
+int fm_mac_set_exception(struct fm_mac_dev *fm_mac_dev,
+ e_FmMacExceptions exception, bool enable);
+
+int fm_mac_free(struct fm_mac_dev *fm_mac_dev);
+
+struct fm_mac_dev *fm_mac_config(t_FmMacParams *params);
+
+int fm_mac_config_max_frame_length(struct fm_mac_dev *fm_mac_dev,
+ int len);
+
+int fm_mac_config_pad_and_crc(struct fm_mac_dev *fm_mac_dev, bool enable);
+
+int fm_mac_config_half_duplex(struct fm_mac_dev *fm_mac_dev, bool enable);
+
+int fm_mac_config_reset_on_init(struct fm_mac_dev *fm_mac_dev, bool enable);
+
+int fm_mac_init(struct fm_mac_dev *fm_mac_dev);
+
+int fm_mac_get_version(struct fm_mac_dev *fm_mac_dev, uint32_t *version);
+
+int fm_mac_enable(struct fm_mac_dev *fm_mac_dev);
+
+int fm_mac_disable(struct fm_mac_dev *fm_mac_dev);
+
+int fm_mac_set_promiscuous(struct fm_mac_dev *fm_mac_dev,
+ bool enable);
+
+int fm_mac_remove_hash_mac_addr(struct fm_mac_dev *fm_mac_dev,
+ t_EnetAddr *mac_addr);
+
+int fm_mac_add_hash_mac_addr(struct fm_mac_dev *fm_mac_dev,
+ t_EnetAddr *mac_addr);
+
+int fm_mac_modify_mac_addr(struct fm_mac_dev *fm_mac_dev,
+ uint8_t *addr);
+
+int fm_mac_adjust_link(struct fm_mac_dev *fm_mac_dev,
+ bool link, int speed, bool duplex);
+
+int fm_mac_enable_1588_time_stamp(struct fm_mac_dev *fm_mac_dev);
+
+int fm_mac_disable_1588_time_stamp(struct fm_mac_dev *fm_mac_dev);
+
+int fm_mac_set_rx_ignore_pause_frames(
+ struct fm_mac_dev *fm_mac_dev, bool en);
+
+int fm_mac_set_tx_pause_frames(struct fm_mac_dev *fm_mac_dev,
+ bool en);
+
+int fm_rtc_enable(struct fm *fm_dev);
+
+int fm_rtc_disable(struct fm *fm_dev);
+
+int fm_rtc_get_cnt(struct fm *fm_dev, uint64_t *ts);
+
+int fm_rtc_set_cnt(struct fm *fm_dev, uint64_t ts);
+
+int fm_rtc_get_drift(struct fm *fm_dev, uint32_t *drift);
+
+int fm_rtc_set_drift(struct fm *fm_dev, uint32_t drift);
+
+int fm_rtc_set_alarm(struct fm *fm_dev, uint32_t id,
+ uint64_t time);
+
+int fm_rtc_set_fiper(struct fm *fm_dev, uint32_t id,
+ uint64_t fiper);
+
+void fm_mac_dump_regs(struct fm_mac_dev *fm_mac_dev);
+
/** @} */ /* end of FM_LnxKern_ctrl_grp group */
/** @} */ /* end of FM_LnxKern_grp group */
diff --git a/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_fm.c b/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_fm.c
index 7a56ec0..10de0ad 100644
--- a/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_fm.c
+++ b/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_fm.c
@@ -108,6 +108,11 @@ extern struct device_node *GetFmPortAdvArgsDevTreeNode (struct device_node *fm_n
/* Maximum value for the fsl_fm_rx_extra_headroom bootarg */
#define FSL_FM_RX_EXTRA_HEADROOM_MAX 384
+#define TX_PAUSE_PRIO_DEFAULT 0xff
+#define TX_PAUSE_TIME_ENABLE 0xf000
+#define TX_PAUSE_TIME_DISABLE 0
+#define TX_PAUSE_THRESH_DEFAULT 0
+
/*
* Max frame size, across all interfaces.
* Configurable from Kconfig or bootargs, to avoid allocating
@@ -142,7 +147,7 @@ int fm_get_max_frm()
int fm_get_rx_extra_headroom()
{
- return fsl_fm_rx_extra_headroom;
+ return ALIGN(fsl_fm_rx_extra_headroom, 16);
}
static int __init fm_set_max_frm(char *str)
@@ -1203,7 +1208,7 @@ void fm_port_unbind(struct fm_port *port)
}
EXPORT_SYMBOL(fm_port_unbind);
-void * fm_port_get_handle(struct fm_port *port)
+void *fm_port_get_handle(const struct fm_port *port)
{
t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev*)port;
@@ -1211,6 +1216,14 @@ void * fm_port_get_handle(struct fm_port *port)
}
EXPORT_SYMBOL(fm_port_get_handle);
+u64 *fm_port_get_buffer_time_stamp(const struct fm_port *port,
+ const void *data)
+{
+ return FM_PORT_GetBufferTimeStamp(fm_port_get_handle(port),
+ (void *)data);
+}
+EXPORT_SYMBOL(fm_port_get_buffer_time_stamp);
+
void fm_port_get_base_addr(const struct fm_port *port, uint64_t *base_addr)
{
t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev *)port;
@@ -1315,6 +1328,447 @@ int fm_port_del_rate_limit(struct fm_port *port)
}
EXPORT_SYMBOL(fm_port_del_rate_limit);
+int fm_mac_set_exception(struct fm_mac_dev *fm_mac_dev,
+ e_FmMacExceptions exception, bool enable)
+{
+ int err;
+ int _errno;
+
+ err = FM_MAC_SetException(fm_mac_dev, exception, enable);
+
+ _errno = -GET_ERROR_TYPE(err);
+ if (unlikely(_errno < 0))
+ pr_err("FM_MAC_SetException() = 0x%08x\n", err);
+
+ return _errno;
+}
+EXPORT_SYMBOL(fm_mac_set_exception);
+
+int fm_mac_free(struct fm_mac_dev *fm_mac_dev)
+{
+ int err;
+ int _error;
+
+ err = FM_MAC_Free(fm_mac_dev);
+ _error = -GET_ERROR_TYPE(err);
+
+ if (unlikely(_error < 0))
+ pr_err("FM_MAC_Free() = 0x%08x\n", err);
+
+ return _error;
+}
+EXPORT_SYMBOL(fm_mac_free);
+
+struct fm_mac_dev *fm_mac_config(t_FmMacParams *params)
+{
+ struct fm_mac_dev *fm_mac_dev;
+
+ fm_mac_dev = FM_MAC_Config(params);
+ if (unlikely(fm_mac_dev == NULL))
+ pr_err("FM_MAC_Config() failed\n");
+
+ return fm_mac_dev;
+}
+EXPORT_SYMBOL(fm_mac_config);
+
+int fm_mac_config_max_frame_length(struct fm_mac_dev *fm_mac_dev,
+ int len)
+{
+ int err;
+ int _errno;
+
+ err = FM_MAC_ConfigMaxFrameLength(fm_mac_dev, len);
+ _errno = -GET_ERROR_TYPE(err);
+ if (unlikely(_errno < 0))
+ pr_err("FM_MAC_ConfigMaxFrameLength() = 0x%08x\n", err);
+
+ return _errno;
+}
+EXPORT_SYMBOL(fm_mac_config_max_frame_length);
+
+int fm_mac_config_pad_and_crc(struct fm_mac_dev *fm_mac_dev, bool enable)
+{
+ int err;
+ int _errno;
+
+ err = FM_MAC_ConfigPadAndCrc(fm_mac_dev, enable);
+ _errno = -GET_ERROR_TYPE(err);
+ if (unlikely(_errno < 0))
+ pr_err("FM_MAC_ConfigPadAndCrc() = 0x%08x\n", err);
+
+ return _errno;
+}
+EXPORT_SYMBOL(fm_mac_config_pad_and_crc);
+
+int fm_mac_config_half_duplex(struct fm_mac_dev *fm_mac_dev, bool enable)
+{
+ int err;
+ int _errno;
+
+ err = FM_MAC_ConfigHalfDuplex(fm_mac_dev, enable);
+ _errno = -GET_ERROR_TYPE(err);
+ if (unlikely(_errno < 0))
+ pr_err("FM_MAC_ConfigHalfDuplex() = 0x%08x\n", err);
+
+ return _errno;
+}
+EXPORT_SYMBOL(fm_mac_config_half_duplex);
+
+int fm_mac_config_reset_on_init(struct fm_mac_dev *fm_mac_dev, bool enable)
+{
+ int err;
+ int _errno;
+
+ err = FM_MAC_ConfigResetOnInit(fm_mac_dev, enable);
+ _errno = -GET_ERROR_TYPE(err);
+ if (unlikely(_errno < 0))
+ pr_err("FM_MAC_ConfigResetOnInit() = 0x%08x\n", err);
+
+ return _errno;
+}
+EXPORT_SYMBOL(fm_mac_config_reset_on_init);
+
+int fm_mac_init(struct fm_mac_dev *fm_mac_dev)
+{
+ int err;
+ int _errno;
+
+ err = FM_MAC_Init(fm_mac_dev);
+ _errno = -GET_ERROR_TYPE(err);
+ if (unlikely(_errno < 0))
+ pr_err("FM_MAC_Init() = 0x%08x\n", err);
+
+ return _errno;
+}
+EXPORT_SYMBOL(fm_mac_init);
+
+int fm_mac_get_version(struct fm_mac_dev *fm_mac_dev, uint32_t *version)
+{
+ int err;
+ int _errno;
+
+ err = FM_MAC_GetVesrion(fm_mac_dev, version);
+ _errno = -GET_ERROR_TYPE(err);
+ if (unlikely(_errno < 0))
+ pr_err("FM_MAC_GetVesrion() = 0x%08x\n", err);
+
+ return _errno;
+}
+EXPORT_SYMBOL(fm_mac_get_version);
+
+int fm_mac_enable(struct fm_mac_dev *fm_mac_dev)
+{
+ int _errno;
+ t_Error err;
+
+ err = FM_MAC_Enable(fm_mac_dev, e_COMM_MODE_RX_AND_TX);
+ _errno = -GET_ERROR_TYPE(err);
+ if (unlikely(_errno < 0))
+ pr_err("FM_MAC_Enable() = 0x%08x\n", err);
+
+ return _errno;
+}
+EXPORT_SYMBOL(fm_mac_enable);
+
+int fm_mac_disable(struct fm_mac_dev *fm_mac_dev)
+{
+ int _errno;
+ t_Error err;
+
+ err = FM_MAC_Disable(fm_mac_dev, e_COMM_MODE_RX_AND_TX);
+ _errno = -GET_ERROR_TYPE(err);
+ if (unlikely(_errno < 0))
+ pr_err("FM_MAC_Disable() = 0x%08x\n", err);
+
+ return _errno;
+}
+EXPORT_SYMBOL(fm_mac_disable);
+
+int fm_mac_set_promiscuous(struct fm_mac_dev *fm_mac_dev,
+ bool enable)
+{
+ int _errno;
+ t_Error err;
+
+ err = FM_MAC_SetPromiscuous(fm_mac_dev, enable);
+ _errno = -GET_ERROR_TYPE(err);
+ if (unlikely(_errno < 0))
+ pr_err("FM_MAC_SetPromiscuous() = 0x%08x\n", err);
+
+ return _errno;
+}
+EXPORT_SYMBOL(fm_mac_set_promiscuous);
+
+int fm_mac_remove_hash_mac_addr(struct fm_mac_dev *fm_mac_dev,
+ t_EnetAddr *mac_addr)
+{
+ int _errno;
+ t_Error err;
+
+ err = FM_MAC_RemoveHashMacAddr(fm_mac_dev, mac_addr);
+ _errno = -GET_ERROR_TYPE(err);
+ if (_errno < 0) {
+ pr_err("FM_MAC_RemoveHashMacAddr() = 0x%08x\n", err);
+ return _errno;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(fm_mac_remove_hash_mac_addr);
+
+int fm_mac_add_hash_mac_addr(struct fm_mac_dev *fm_mac_dev,
+ t_EnetAddr *mac_addr)
+{
+ int _errno;
+ t_Error err;
+
+ err = FM_MAC_AddHashMacAddr(fm_mac_dev, mac_addr);
+ _errno = -GET_ERROR_TYPE(err);
+ if (_errno < 0) {
+ pr_err("FM_MAC_AddHashMacAddr() = 0x%08x\n", err);
+ return _errno;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(fm_mac_add_hash_mac_addr);
+
+int fm_mac_modify_mac_addr(struct fm_mac_dev *fm_mac_dev,
+ uint8_t *addr)
+{
+ int _errno;
+ t_Error err;
+
+ err = FM_MAC_ModifyMacAddr(fm_mac_dev, (t_EnetAddr *)addr);
+ _errno = -GET_ERROR_TYPE(err);
+ if (_errno < 0)
+ pr_err("FM_MAC_ModifyMacAddr() = 0x%08x\n", err);
+
+ return _errno;
+}
+EXPORT_SYMBOL(fm_mac_modify_mac_addr);
+
+int fm_mac_adjust_link(struct fm_mac_dev *fm_mac_dev,
+ bool link, int speed, bool duplex)
+{
+ int _errno;
+ t_Error err;
+
+ if (!link) {
+#if (DPAA_VERSION < 11)
+ FM_MAC_RestartAutoneg(fm_mac_dev);
+#endif
+ return 0;
+ }
+
+ err = FM_MAC_AdjustLink(fm_mac_dev, speed, duplex);
+ _errno = -GET_ERROR_TYPE(err);
+ if (unlikely(_errno < 0))
+ pr_err("FM_MAC_AdjustLink() = 0x%08x\n", err);
+
+ return _errno;
+}
+EXPORT_SYMBOL(fm_mac_adjust_link);
+
+int fm_mac_enable_1588_time_stamp(struct fm_mac_dev *fm_mac_dev)
+{
+ int _errno;
+ t_Error err;
+
+ err = FM_MAC_Enable1588TimeStamp(fm_mac_dev);
+ _errno = -GET_ERROR_TYPE(err);
+ if (unlikely(_errno < 0))
+ pr_err("FM_MAC_Enable1588TimeStamp() = 0x%08x\n", err);
+ return _errno;
+}
+EXPORT_SYMBOL(fm_mac_enable_1588_time_stamp);
+
+int fm_mac_disable_1588_time_stamp(struct fm_mac_dev *fm_mac_dev)
+{
+ int _errno;
+ t_Error err;
+
+ err = FM_MAC_Disable1588TimeStamp(fm_mac_dev);
+ _errno = -GET_ERROR_TYPE(err);
+ if (unlikely(_errno < 0))
+ pr_err("FM_MAC_Disable1588TimeStamp() = 0x%08x\n", err);
+ return _errno;
+}
+EXPORT_SYMBOL(fm_mac_disable_1588_time_stamp);
+
+int fm_mac_set_rx_ignore_pause_frames(
+ struct fm_mac_dev *fm_mac_dev, bool en)
+{
+ int _errno;
+ t_Error err;
+
+ /* if rx pause is enabled, do NOT ignore pause frames */
+ err = FM_MAC_SetRxIgnorePauseFrames(fm_mac_dev, !en);
+
+ _errno = -GET_ERROR_TYPE(err);
+ if (_errno < 0)
+ pr_err("FM_MAC_SetRxIgnorePauseFrames() = 0x%08x\n", err);
+
+ return _errno;
+}
+EXPORT_SYMBOL(fm_mac_set_rx_ignore_pause_frames);
+
+int fm_mac_set_tx_pause_frames(struct fm_mac_dev *fm_mac_dev,
+ bool en)
+{
+ int _errno;
+ t_Error err;
+
+ if (en)
+ err = FM_MAC_SetTxPauseFrames(fm_mac_dev,
+ TX_PAUSE_PRIO_DEFAULT,
+ TX_PAUSE_TIME_ENABLE,
+ TX_PAUSE_THRESH_DEFAULT);
+ else
+ err = FM_MAC_SetTxPauseFrames(fm_mac_dev,
+ TX_PAUSE_PRIO_DEFAULT,
+ TX_PAUSE_TIME_DISABLE,
+ TX_PAUSE_THRESH_DEFAULT);
+
+ _errno = -GET_ERROR_TYPE(err);
+ if (_errno < 0)
+ pr_err("FM_MAC_SetTxPauseFrames() = 0x%08x\n", err);
+
+ return _errno;
+}
+EXPORT_SYMBOL(fm_mac_set_tx_pause_frames);
+
+int fm_rtc_enable(struct fm *fm_dev)
+{
+ int _errno;
+ t_Error err;
+
+ err = FM_RTC_Enable(fm_get_rtc_handle(fm_dev), 0);
+ _errno = -GET_ERROR_TYPE(err);
+ if (unlikely(_errno < 0))
+ pr_err("FM_RTC_Enable = 0x%08x\n", err);
+
+ return _errno;
+}
+EXPORT_SYMBOL(fm_rtc_enable);
+
+int fm_rtc_disable(struct fm *fm_dev)
+{
+ int _errno;
+ t_Error err;
+
+ err = FM_RTC_Disable(fm_get_rtc_handle(fm_dev));
+ _errno = -GET_ERROR_TYPE(err);
+ if (unlikely(_errno < 0))
+ pr_err("FM_RTC_Disable = 0x%08x\n", err);
+
+ return _errno;
+}
+EXPORT_SYMBOL(fm_rtc_disable);
+
+int fm_rtc_get_cnt(struct fm *fm_dev, uint64_t *ts)
+{
+ int _errno;
+ t_Error err;
+
+ err = FM_RTC_GetCurrentTime(fm_get_rtc_handle(fm_dev), ts);
+ _errno = -GET_ERROR_TYPE(err);
+ if (unlikely(_errno < 0))
+ pr_err("FM_RTC_GetCurrentTime = 0x%08x\n", err);
+
+ return _errno;
+}
+EXPORT_SYMBOL(fm_rtc_get_cnt);
+
+int fm_rtc_set_cnt(struct fm *fm_dev, uint64_t ts)
+{
+ int _errno;
+ t_Error err;
+
+ err = FM_RTC_SetCurrentTime(fm_get_rtc_handle(fm_dev), ts);
+ _errno = -GET_ERROR_TYPE(err);
+ if (unlikely(_errno < 0))
+ pr_err("FM_RTC_SetCurrentTime = 0x%08x\n", err);
+
+ return _errno;
+}
+EXPORT_SYMBOL(fm_rtc_set_cnt);
+
+int fm_rtc_get_drift(struct fm *fm_dev, uint32_t *drift)
+{
+ int _errno;
+ t_Error err;
+
+ err = FM_RTC_GetFreqCompensation(fm_get_rtc_handle(fm_dev),
+ drift);
+ _errno = -GET_ERROR_TYPE(err);
+ if (unlikely(_errno < 0))
+ pr_err("FM_RTC_GetFreqCompensation = 0x%08x\n", err);
+
+ return _errno;
+}
+EXPORT_SYMBOL(fm_rtc_get_drift);
+
+int fm_rtc_set_drift(struct fm *fm_dev, uint32_t drift)
+{
+ int _errno;
+ t_Error err;
+
+ err = FM_RTC_SetFreqCompensation(fm_get_rtc_handle(fm_dev),
+ drift);
+ _errno = -GET_ERROR_TYPE(err);
+ if (unlikely(_errno < 0))
+ pr_err("FM_RTC_SetFreqCompensation = 0x%08x\n", err);
+
+ return _errno;
+}
+EXPORT_SYMBOL(fm_rtc_set_drift);
+
+int fm_rtc_set_alarm(struct fm *fm_dev, uint32_t id,
+ uint64_t time)
+{
+ t_FmRtcAlarmParams alarm;
+ int _errno;
+ t_Error err;
+
+ alarm.alarmId = id;
+ alarm.alarmTime = time;
+ alarm.f_AlarmCallback = NULL;
+ err = FM_RTC_SetAlarm(fm_get_rtc_handle(fm_dev),
+ &alarm);
+ _errno = -GET_ERROR_TYPE(err);
+ if (unlikely(_errno < 0))
+ pr_err("FM_RTC_SetAlarm = 0x%08x\n", err);
+
+ return _errno;
+}
+EXPORT_SYMBOL(fm_rtc_set_alarm);
+
+int fm_rtc_set_fiper(struct fm *fm_dev, uint32_t id,
+ uint64_t fiper)
+{
+ t_FmRtcPeriodicPulseParams pp;
+ int _errno;
+ t_Error err;
+
+ pp.periodicPulseId = id;
+ pp.periodicPulsePeriod = fiper;
+ pp.f_PeriodicPulseCallback = NULL;
+ err = FM_RTC_SetPeriodicPulse(fm_get_rtc_handle(fm_dev), &pp);
+ _errno = -GET_ERROR_TYPE(err);
+ if (unlikely(_errno < 0))
+ pr_err("FM_RTC_SetPeriodicPulse = 0x%08x\n", err);
+
+ return _errno;
+}
+EXPORT_SYMBOL(fm_rtc_set_fiper);
+
+void fm_mac_dump_regs(struct fm_mac_dev *fm_mac_dev)
+{
+ FM_MAC_DumpRegs(fm_mac_dev);
+}
+EXPORT_SYMBOL(fm_mac_dump_regs);
+
void fm_mutex_lock(void)
{
mutex_lock(&lnxwrp_mutex);
diff --git a/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_fm.h b/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_fm.h
index 7cfe3e6..feab96f 100644
--- a/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_fm.h
+++ b/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_fm.h
@@ -74,7 +74,7 @@
#endif
#ifndef CONFIG_FSL_FM_RX_EXTRA_HEADROOM
-#define CONFIG_FSL_FM_RX_EXTRA_HEADROOM 0
+#define CONFIG_FSL_FM_RX_EXTRA_HEADROOM 16
#endif
typedef enum {
diff --git a/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_ioctls_fm.c b/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_ioctls_fm.c
index d896b27..3104385 100644
--- a/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_ioctls_fm.c
+++ b/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_ioctls_fm.c
@@ -77,8 +77,6 @@
#include "lnxwrp_fm.h"
-#include "dpaa_eth.h"
-
#define CMP_IOC_DEFINE(def) (IOC_##def != def)
/* fm_pcd_ioctls.h === fm_pcd_ext.h assertions */
@@ -4173,39 +4171,30 @@ t_Error LnxwrpFmPortIOCTL(t_LnxWrpFmPortDev *p_LnxWrpFmPortDev, unsigned int cmd
if (p_LnxWrpFmPortDev->pcd_owner_params.dev)
{
- struct net_device *net_dev = dev_get_drvdata(p_LnxWrpFmPortDev->pcd_owner_params.dev);
+ int id = -1;
- if (net_dev)
+ switch(p_LnxWrpFmPortDev->settings.param.portType)
{
- struct dpa_priv_s *priv = netdev_priv(net_dev);
-
- if (priv)
- {
- struct mac_device *mac_dev = priv->mac_dev;
-
- if (mac_dev)
- {
- void *mac_handle = mac_dev->get_mac_handle(mac_dev);
-
- err = (cmd == FM_PORT_IOC_ADD_RX_HASH_MAC_ADDR)
- ? FM_MAC_AddHashMacAddr((t_Handle) mac_handle, (t_EnetAddr*) param)
- : FM_MAC_RemoveHashMacAddr((t_Handle) mac_handle, (t_EnetAddr*) param)
- ;
- }
- else
- {
- err = E_NOT_AVAILABLE;
- REPORT_ERROR(MINOR, err, ("Attempt to add/remove hash MAC addr. to/from MAC-less port!"));
- }
- }
- else
- /* Not possible, set err nevertheless: */
+ case e_FM_PORT_TYPE_RX:
+ case e_FM_PORT_TYPE_TX:
+ id = p_LnxWrpFmPortDev->id;
+ break;
+ case e_FM_PORT_TYPE_RX_10G:
+ case e_FM_PORT_TYPE_TX_10G:
+ id = p_LnxWrpFmPortDev->id + FM_MAX_NUM_OF_1G_MACS;
+ break;
+ default:
err = E_NOT_AVAILABLE;
+ REPORT_ERROR(MINOR, err, ("Attempt to add/remove hash MAC addr. to/from MAC-less port!"));
}
- else
+ if (id >= 0)
{
- err = E_NOT_AVAILABLE;
- REPORT_ERROR(MINOR, err, ("No net device (and no MAC!) associated to this port!"));
+ t_LnxWrpFmDev *fm = (t_LnxWrpFmDev *)p_LnxWrpFmPortDev->h_LnxWrpFmDev;
+ t_Handle mac_handle = fm->macs[id].h_Dev;
+
+ err = (cmd == FM_PORT_IOC_ADD_RX_HASH_MAC_ADDR)
+ ? FM_MAC_AddHashMacAddr(mac_handle, (t_EnetAddr*) param)
+ : FM_MAC_RemoveHashMacAddr(mac_handle, (t_EnetAddr*) param);
}
}
else
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 0369fb6..5088639 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -2794,6 +2794,16 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x342e, vtd_mask_spec_errors);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x3c28, vtd_mask_spec_errors);
#endif
+#ifdef CONFIG_FSL_PCI
+static void quirk_freescale_class(struct pci_dev *dev)
+{
+ dev_info(&dev->dev, "Setting PCI class for FSL PCI host bridge\n");
+ dev->class = (PCI_CLASS_BRIDGE_PCI << 8) | (dev->class & 0xff);
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID,
+ quirk_freescale_class);
+#endif
+
static void fixup_ti816x_class(struct pci_dev *dev)
{
/* TI 816x devices do not have class code set when in PCIe boot mode */
diff --git a/drivers/staging/fsl_qbman/Kconfig b/drivers/staging/fsl_qbman/Kconfig
index d5adc5a..dee570f 100644
--- a/drivers/staging/fsl_qbman/Kconfig
+++ b/drivers/staging/fsl_qbman/Kconfig
@@ -218,6 +218,13 @@ config QMAN_CEETM_UPDATE_PERIOD
Unless you know what you are doing, leave this value at its default.
+config FSL_QMAN_INIT_TIMEOUT
+ int "timeout for qman init stage, in seconds"
+ default 10
+ ---help---
+ The timeout setting to quit the initialization loop for non-control
+ partition in case the control partition fails to boot-up.
+
endif # FSL_QMAN
endmenu
diff --git a/drivers/staging/fsl_qbman/Makefile b/drivers/staging/fsl_qbman/Makefile
index 2975e28..91b01a2 100644
--- a/drivers/staging/fsl_qbman/Makefile
+++ b/drivers/staging/fsl_qbman/Makefile
@@ -1,5 +1,6 @@
# Common
obj-$(CONFIG_FSL_DPA) += dpa_alloc.o
+obj-$(CONFIG_HAS_FSL_QBMAN) += qbman_driver.o
# Bman
obj-$(CONFIG_FSL_BMAN) += bman_high.o
diff --git a/drivers/staging/fsl_qbman/bman_config.c b/drivers/staging/fsl_qbman/bman_config.c
index a2f2ad4..850823a 100644
--- a/drivers/staging/fsl_qbman/bman_config.c
+++ b/drivers/staging/fsl_qbman/bman_config.c
@@ -29,10 +29,6 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef CONFIG_SMP
-#include <linux/smp.h> /* get_hard_smp_processor_id() */
-#endif
-
#include <asm/cacheflush.h>
#include "bman_private.h"
diff --git a/drivers/staging/fsl_qbman/bman_driver.c b/drivers/staging/fsl_qbman/bman_driver.c
index 27a5b0f..ed7dde0 100644
--- a/drivers/staging/fsl_qbman/bman_driver.c
+++ b/drivers/staging/fsl_qbman/bman_driver.c
@@ -320,7 +320,7 @@ __setup("bportals=", parse_bportals);
* which are selected in a round-robin fashion.
* Any portal configs left unused are available for USDPAA allocation.
*/
-static __init int bman_init(void)
+__init int bman_init(void)
{
struct cpumask slave_cpus;
struct cpumask unshared_cpus = *cpu_none_mask;
@@ -427,6 +427,14 @@ static __init int bman_init(void)
for_each_cpu(cpu, &slave_cpus)
init_slave(cpu);
pr_info("Bman portals initialised\n");
+ return 0;
+}
+
+__init int bman_resource_init(void)
+{
+ struct device_node *dn;
+ int ret;
+
/* Initialise BPID allocation ranges */
for_each_compatible_node(dn, NULL, "fsl,bpid-range") {
ret = fsl_bpid_range_init(dn);
@@ -435,4 +443,3 @@ static __init int bman_init(void)
}
return 0;
}
-subsys_initcall(bman_init);
diff --git a/drivers/staging/fsl_qbman/bman_high.c b/drivers/staging/fsl_qbman/bman_high.c
index e6e6946..c439e55 100644
--- a/drivers/staging/fsl_qbman/bman_high.c
+++ b/drivers/staging/fsl_qbman/bman_high.c
@@ -844,16 +844,16 @@ int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num,
return -EINVAL;
if (pool->params.flags & BMAN_POOL_FLAG_NO_RELEASE)
return -EINVAL;
+#endif
+ /* Without stockpile, this API is a pass-through to the h/w operation */
+ if (!(pool->params.flags & BMAN_POOL_FLAG_STOCKPILE))
+ return __bman_release(pool, bufs, num, flags);
+#ifdef CONFIG_FSL_DPA_CHECKING
if (!atomic_dec_and_test(&pool->in_use)) {
pr_crit("Parallel attempts to enter bman_released() detected.");
panic("only one instance of bman_released/acquired allowed");
}
#endif
- /* Without stockpile, this API is a pass-through to the h/w operation */
- if (!(pool->params.flags & BMAN_POOL_FLAG_STOCKPILE)) {
- ret = __bman_release(pool, bufs, num, flags);
- goto release_done;
- }
/* This needs some explanation. Adding the given buffers may take the
* stockpile over the threshold, but in fact the stockpile may already
* *be* over the threshold if a previous release-to-hw attempt had
@@ -930,16 +930,16 @@ int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num,
return -EINVAL;
if (pool->params.flags & BMAN_POOL_FLAG_ONLY_RELEASE)
return -EINVAL;
+#endif
+ /* Without stockpile, this API is a pass-through to the h/w operation */
+ if (!(pool->params.flags & BMAN_POOL_FLAG_STOCKPILE))
+ return __bman_acquire(pool, bufs, num);
+#ifdef CONFIG_FSL_DPA_CHECKING
if (!atomic_dec_and_test(&pool->in_use)) {
pr_crit("Parallel attempts to enter bman_acquire() detected.");
panic("only one instance of bman_released/acquired allowed");
}
#endif
- /* Without stockpile, this API is a pass-through to the h/w operation */
- if (!(pool->params.flags & BMAN_POOL_FLAG_STOCKPILE)) {
- ret = __bman_acquire(pool, bufs, num);
- goto acquire_done;
- }
/* Only need a h/w op if we'll hit the low-water thresh */
if (!(flags & BMAN_ACQUIRE_FLAG_STOCKPILE) &&
(pool->sp_fill <= (BMAN_STOCKPILE_LOW + num))) {
diff --git a/drivers/staging/fsl_qbman/bman_private.h b/drivers/staging/fsl_qbman/bman_private.h
index 2df9857..7e54701 100644
--- a/drivers/staging/fsl_qbman/bman_private.h
+++ b/drivers/staging/fsl_qbman/bman_private.h
@@ -84,11 +84,6 @@ struct bm_portal_config *bm_get_unused_portal_idx(uint32_t idx);
void bm_put_unused_portal(struct bm_portal_config *pcfg);
void bm_set_liodns(struct bm_portal_config *pcfg);
-/* Lookup a BMan portal associated with an FD */
-struct bm_portal_config *usdpaa_get_bm_portal_config(struct file *filp,
- void *cinh);
-
-
/* Pool logic in the portal driver, during initialisation, needs to know if
* there's access to CCSR or not (if not, it'll cripple the pool allocator). */
#ifdef CONFIG_FSL_BMAN_CONFIG
@@ -157,4 +152,7 @@ int bm_pool_set(u32 bpid, const u32 *thresholds);
/* Read the free buffer count for a given buffer */
u32 bm_pool_free_buffers(u32 bpid);
+__init int bman_init(void);
+__init int bman_resource_init(void);
+
#endif /* CONFIG_FSL_BMAN_CONFIG */
diff --git a/drivers/staging/fsl_qbman/dpa_sys.h b/drivers/staging/fsl_qbman/dpa_sys.h
index 9306d87..2f33351 100644
--- a/drivers/staging/fsl_qbman/dpa_sys.h
+++ b/drivers/staging/fsl_qbman/dpa_sys.h
@@ -321,4 +321,9 @@ err:
return -EINVAL;
}
+/* Hooks from fsl_usdpaa_irq.c to fsl_usdpaa.c */
+int usdpaa_get_portal_config(struct file *filp, void *cinh,
+ enum usdpaa_portal_type ptype, unsigned int *irq,
+ void **iir_reg);
+
#endif /* DPA_SYS_H */
diff --git a/drivers/staging/fsl_qbman/fsl_usdpaa.c b/drivers/staging/fsl_qbman/fsl_usdpaa.c
index 9dc63a2..33b1923 100644
--- a/drivers/staging/fsl_qbman/fsl_usdpaa.c
+++ b/drivers/staging/fsl_qbman/fsl_usdpaa.c
@@ -321,7 +321,6 @@ static int usdpaa_open(struct inode *inode, struct file *filp)
return 0;
}
-
#define DQRR_MAXFILL 15
/* Reset a QMan portal to its default state */
@@ -435,7 +434,6 @@ static int qm_check_and_destroy_fqs(struct qm_portal *portal, void *ctx,
++fq_id;
}
return 0;
-
}
static bool check_channel_device(void *_ctx, u32 channel)
@@ -1368,13 +1366,13 @@ static long usdpaa_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
return -EINVAL;
}
-
-
static long usdpaa_ioctl_compat(struct file *fp, unsigned int cmd,
unsigned long arg)
{
+#ifdef CONFIG_COMPAT
struct ctx *ctx = fp->private_data;
void __user *a = (void __user *)arg;
+#endif
switch (cmd) {
#ifdef CONFIG_COMPAT
case USDPAA_IOCTL_DMA_MAP_COMPAT:
@@ -1441,56 +1439,36 @@ static long usdpaa_ioctl_compat(struct file *fp, unsigned int cmd,
return -EINVAL;
}
-struct qm_portal_config *usdpaa_get_qm_portal_config(struct file *filp,
- void *hint)
-{
- /* Walk the list of portals for filp and return the config
- for the portal that matches the hint */
-
- struct ctx *context;
- struct portal_mapping *portal;
-
- /* First sanitize the filp */
- if (filp->f_op->open != usdpaa_open)
- return NULL;
- context = filp->private_data;
- spin_lock(&context->lock);
- list_for_each_entry(portal, &context->portals, list) {
- if (portal->user.type == usdpaa_portal_qman &&
- portal->user.addr.cinh == hint) {
- spin_unlock(&context->lock);
- return portal->qportal;
- }
- }
- spin_unlock(&context->lock);
- return NULL;
-}
-
-struct bm_portal_config *usdpaa_get_bm_portal_config(struct file *filp,
- void *hint)
+int usdpaa_get_portal_config(struct file *filp, void *cinh,
+ enum usdpaa_portal_type ptype, unsigned int *irq,
+ void **iir_reg)
{
/* Walk the list of portals for filp and return the config
for the portal that matches the hint */
-
struct ctx *context;
struct portal_mapping *portal;
/* First sanitize the filp */
if (filp->f_op->open != usdpaa_open)
- return NULL;
-
+ return -ENODEV;
context = filp->private_data;
-
spin_lock(&context->lock);
list_for_each_entry(portal, &context->portals, list) {
- if (portal->user.type == usdpaa_portal_bman &&
- portal->user.addr.cinh == hint) {
+ if (portal->user.type == ptype &&
+ portal->user.addr.cinh == cinh) {
+ if (ptype == usdpaa_portal_qman) {
+ *irq = portal->qportal->public_cfg.irq;
+ *iir_reg = portal->qportal->addr_virt[1] + QM_REG_IIR;
+ } else {
+ *irq = portal->bportal->public_cfg.irq;
+ *iir_reg = portal->bportal->addr_virt[1] + BM_REG_IIR;
+ }
spin_unlock(&context->lock);
- return portal->bportal;
+ return 0;
}
}
spin_unlock(&context->lock);
- return NULL;
+ return -EINVAL;
}
static const struct file_operations usdpaa_fops = {
diff --git a/drivers/staging/fsl_qbman/fsl_usdpaa_irq.c b/drivers/staging/fsl_qbman/fsl_usdpaa_irq.c
index a2c609f..54073fb 100644
--- a/drivers/staging/fsl_qbman/fsl_usdpaa_irq.c
+++ b/drivers/staging/fsl_qbman/fsl_usdpaa_irq.c
@@ -61,14 +61,11 @@ struct usdpaa_irq_ctx {
struct file *usdpaa_filp;
};
-
static int usdpaa_irq_open(struct inode *inode, struct file *filp)
{
- struct usdpaa_irq_ctx *ctx = kmalloc(sizeof(struct usdpaa_irq_ctx),
- GFP_KERNEL);
+ struct usdpaa_irq_ctx *ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
-
ctx->irq_set = 0;
ctx->irq_count = 0;
ctx->last_irq_count = 0;
@@ -92,80 +89,58 @@ static int usdpaa_irq_release(struct inode *inode, struct file *filp)
return 0;
}
-
-irqreturn_t usdpaa_irq_handler(int irq, void *_ctx)
+static irqreturn_t usdpaa_irq_handler(int irq, void *_ctx)
{
unsigned long flags;
struct usdpaa_irq_ctx *ctx = _ctx;
spin_lock_irqsave(&ctx->lock, flags);
++ctx->irq_count;
- wake_up_all(&ctx->wait_queue);
spin_unlock_irqrestore(&ctx->lock, flags);
+ wake_up_all(&ctx->wait_queue);
/* Set the inhibit register. This will be reenabled
once the USDPAA code handles the IRQ */
out_be32(ctx->inhibit_addr, 0x1);
return IRQ_HANDLED;
}
-
-
static int map_irq(struct file *fp, struct usdpaa_ioctl_irq_map *irq_map)
{
struct usdpaa_irq_ctx *ctx = fp->private_data;
- struct qm_portal_config *qportal = NULL;
- struct bm_portal_config *bportal = NULL;
- int irq, ret;
- void *inhibit_reg;
- struct file *old_filp = ctx->usdpaa_filp;
+ int ret;
+
+ if (ctx->irq_set) {
+ pr_debug("Setting USDPAA IRQ when it was already set!\n");
+ return -EBUSY;
+ }
ctx->usdpaa_filp = fget(irq_map->fd);
if (!ctx->usdpaa_filp) {
- pr_err("fget() returned NULL for fd %d\n", irq_map->fd);
+ pr_debug("USDPAA fget(%d) returned NULL\n", irq_map->fd);
return -EINVAL;
}
- if (irq_map->type == usdpaa_portal_qman) {
- qportal = usdpaa_get_qm_portal_config(ctx->usdpaa_filp,
- irq_map->portal_cinh);
- if (!qportal) {
- pr_err("Couldn't associate info to QMan Portal\n");
- fput(ctx->usdpaa_filp);
- return -EINVAL;
- }
- /* Lookup IRQ number for portal */
- irq = qportal->public_cfg.irq;
- inhibit_reg = qportal->addr_virt[1] + QM_REG_IIR;
- } else {
- bportal = usdpaa_get_bm_portal_config(ctx->usdpaa_filp,
- irq_map->portal_cinh);
- if (!bportal) {
- pr_err("Couldn't associate info to BMan Portal\n");
- fput(ctx->usdpaa_filp);
- return -EINVAL;
- }
- /* Lookup IRQ number for portal */
- irq = bportal->public_cfg.irq;
- inhibit_reg = bportal->addr_virt[1] + BM_REG_IIR;
- }
- if (ctx->irq_set) {
- fput(old_filp);
- free_irq(ctx->irq_num, ctx);
+ ret = usdpaa_get_portal_config(ctx->usdpaa_filp, irq_map->portal_cinh,
+ irq_map->type, &ctx->irq_num,
+ &ctx->inhibit_addr);
+ if (ret) {
+ pr_debug("USDPAA IRQ couldn't identify portal\n");
+ fput(ctx->usdpaa_filp);
+ return ret;
}
ctx->irq_set = 1;
- ctx->irq_num = irq;
- ctx->inhibit_addr = inhibit_reg;
- ret = request_irq(irq, usdpaa_irq_handler, 0,
+ ret = request_irq(ctx->irq_num, usdpaa_irq_handler, 0,
"usdpaa_irq", ctx);
if (ret) {
- pr_err("request_irq for irq %d failed, ret= %d\n", irq, ret);
+ pr_err("USDPAA request_irq(%d) failed, ret= %d\n",
+ ctx->irq_num, ret);
ctx->irq_set = 0;
fput(ctx->usdpaa_filp);
return ret;
}
return 0;
-};
+}
static long usdpaa_irq_ioctl(struct file *fp, unsigned int cmd,
unsigned long arg)
@@ -174,7 +149,7 @@ static long usdpaa_irq_ioctl(struct file *fp, unsigned int cmd,
struct usdpaa_ioctl_irq_map irq_map;
if (cmd != USDPAA_IOCTL_PORTAL_IRQ_MAP) {
- pr_err("Unknown command 0x%x\n", cmd);
+ pr_debug("USDPAA IRQ unknown command 0x%x\n", cmd);
return -EINVAL;
}
@@ -183,7 +158,7 @@ static long usdpaa_irq_ioctl(struct file *fp, unsigned int cmd,
if (ret)
return ret;
return map_irq(fp, &irq_map);
-};
+}
static ssize_t usdpaa_irq_read(struct file *filp, char __user *buff,
size_t count, loff_t *offp)
@@ -192,12 +167,12 @@ static ssize_t usdpaa_irq_read(struct file *filp, char __user *buff,
int ret;
if (!ctx->irq_set) {
- pr_err("Reading USDPAA IRQ before it was set\n");
+ pr_debug("Reading USDPAA IRQ before it was set\n");
return -EINVAL;
}
if (count < sizeof(ctx->irq_count)) {
- pr_err("USDPAA IRQ Read too small\n");
+ pr_debug("USDPAA IRQ Read too small\n");
return -EINVAL;
}
if (ctx->irq_count == ctx->last_irq_count) {
@@ -216,7 +191,6 @@ static ssize_t usdpaa_irq_read(struct file *filp, char __user *buff,
sizeof(ctx->last_irq_count)))
return -EFAULT;
return sizeof(ctx->irq_count);
-
}
static unsigned int usdpaa_irq_poll(struct file *filp, poll_table *wait)
@@ -240,7 +214,9 @@ static unsigned int usdpaa_irq_poll(struct file *filp, poll_table *wait)
static long usdpaa_irq_ioctl_compat(struct file *fp, unsigned int cmd,
unsigned long arg)
{
+#ifdef CONFIG_COMPAT
void __user *a = (void __user *)arg;
+#endif
switch (cmd) {
#ifdef CONFIG_COMPAT
case USDPAA_IOCTL_PORTAL_IRQ_MAP_COMPAT:
@@ -258,7 +234,7 @@ static long usdpaa_irq_ioctl_compat(struct file *fp, unsigned int cmd,
default:
return usdpaa_irq_ioctl(fp, cmd, arg);
}
-};
+}
static const struct file_operations usdpaa_irq_fops = {
.open = usdpaa_irq_open,
diff --git a/drivers/staging/fsl_qbman/qbman_driver.c b/drivers/staging/fsl_qbman/qbman_driver.c
new file mode 100644
index 0000000..814db34
--- /dev/null
+++ b/drivers/staging/fsl_qbman/qbman_driver.c
@@ -0,0 +1,75 @@
+/* Copyright 2013 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/time.h>
+#include "qman_private.h"
+#include "bman_private.h"
+
+static __init int qbman_init(void)
+{
+
+ bman_init();
+ qman_init();
+ if (!qman_have_ccsr()) {
+ struct qman_fq fq = {
+ .fqid = 1
+ };
+ struct qm_mcr_queryfq_np np;
+ int err, retry = CONFIG_FSL_QMAN_INIT_TIMEOUT;
+ struct timespec nowts, diffts, startts = current_kernel_time();
+ /* Loop while querying given fqid succeeds or time out */
+ while (1) {
+ err = qman_query_fq_np(&fq, &np);
+ if (!err) {
+ /* success, control-plane has configured QMan */
+ break;
+ } else if (err != -ERANGE) {
+ pr_err("QMan: I/O error, continuing anyway\n");
+ break;
+ }
+ nowts = current_kernel_time();
+ diffts = timespec_sub(nowts, startts);
+ if (diffts.tv_sec > 0) {
+ if (!retry--) {
+ pr_err("QMan: time out, control-plane"
+ " dead?\n");
+ break;
+ }
+ pr_warn("QMan: polling for the control-plane"
+ " (%d)\n", retry);
+ }
+ }
+ }
+ bman_resource_init();
+ qman_resource_init();
+ return 0;
+}
+subsys_initcall(qbman_init);
diff --git a/drivers/staging/fsl_qbman/qman_config.c b/drivers/staging/fsl_qbman/qman_config.c
index 006dd3a..d420c8c 100644
--- a/drivers/staging/fsl_qbman/qman_config.c
+++ b/drivers/staging/fsl_qbman/qman_config.c
@@ -29,10 +29,6 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef CONFIG_SMP
-#include <linux/smp.h> /* get_hard_smp_processor_id() */
-#endif
-
#include <asm/cacheflush.h>
#include "qman_private.h"
diff --git a/drivers/staging/fsl_qbman/qman_driver.c b/drivers/staging/fsl_qbman/qman_driver.c
index b706b21..a1aab54 100644
--- a/drivers/staging/fsl_qbman/qman_driver.c
+++ b/drivers/staging/fsl_qbman/qman_driver.c
@@ -634,7 +634,7 @@ static int __init parse_qportals(char *str)
}
__setup("qportals=", parse_qportals);
-static __init int qman_init(void)
+__init int qman_init(void)
{
struct cpumask slave_cpus;
struct cpumask unshared_cpus = *cpu_none_mask;
@@ -757,6 +757,14 @@ static __init int qman_init(void)
for_each_cpu(cpu, &slave_cpus)
init_slave(cpu);
pr_info("Qman portals initialised\n");
+ return 0;
+}
+
+__init int qman_resource_init(void)
+{
+ struct device_node *dn;
+ int ret;
+
/* Initialise FQID allocation ranges */
for_each_compatible_node(dn, NULL, "fsl,fqid-range") {
ret = fsl_fqid_range_init(dn);
@@ -787,4 +795,3 @@ static __init int qman_init(void)
}
return 0;
}
-subsys_initcall(qman_init);
diff --git a/drivers/staging/fsl_qbman/qman_high.c b/drivers/staging/fsl_qbman/qman_high.c
index c8451f1..f8f0524 100644
--- a/drivers/staging/fsl_qbman/qman_high.c
+++ b/drivers/staging/fsl_qbman/qman_high.c
@@ -114,14 +114,12 @@ struct qman_portal {
struct list_head cgr_cbs;
/* list lock */
spinlock_t cgr_lock;
-
/* 2-element array. ccgrs[0] is mask, ccgrs[1] is snapshot. */
struct qman_ccgrs *ccgrs[QMAN_CEETM_MAX];
/* 256-element array, each is a linked-list of CCSCN handlers. */
struct list_head ccgr_cbs[QMAN_CEETM_MAX];
/* list lock */
spinlock_t ccgr_lock;
-
/* track if memory was allocated by the driver */
u8 alloced;
};
@@ -302,8 +300,8 @@ static irqreturn_t portal_isr(__always_unused int irq, void *ptr)
* as part of the handling of this interrupt source. We mustn't
* clear it a second time in this top-level function.
*/
- u32 clear = QM_DQAVAIL_MASK | (p->irq_sources & ~QM_PIRQ_CSCI) |
- (p->irq_sources & ~QM_PIRQ_CCSCI);
+ u32 clear = QM_DQAVAIL_MASK | (p->irq_sources &
+ ~(QM_PIRQ_CSCI | QM_PIRQ_CCSCI));
u32 is = qm_isr_status_read(&p->p) & p->irq_sources;
/* DQRR-handling if it's interrupt-driven */
if (is & QM_PIRQ_DQRI)
@@ -604,7 +602,6 @@ void qman_destroy_portal(struct qman_portal *qm)
/* Stop dequeues on the portal */
qm_dqrr_sdqcr_set(&qm->p, 0);
-
/* NB we do this to "quiesce" EQCR. If we add enqueue-completions or
* something related to QM_PIRQ_EQCI, this may need fixing.
* Also, due to the prefetching model used for CI updates in the enqueue
@@ -721,7 +718,6 @@ static u32 __poll_portal_slow(struct qman_portal *p, u32 is)
* state change.
*/
qm_isr_status_clear(&p->p, QM_PIRQ_CSCI);
-
qm_mc_start(&p->p);
qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
while (!(mcr = qm_mc_result(&p->p)))
@@ -876,7 +872,6 @@ mr_loop:
mr_done:
qm_mr_cci_consume(&p->p, num);
}
-
/*
* QM_PIRQ_CSCI/CCSCI has already been cleared, as part of its specific
* processing. If that interrupt source has meanwhile been re-asserted,
@@ -1731,7 +1726,9 @@ int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np)
*np = mcr->queryfq_np;
PORTAL_IRQ_UNLOCK(p, irqflags);
put_affine_portal();
- if (res != QM_MCR_RESULT_OK)
+ if (res == QM_MCR_RESULT_ERR_FQID)
+ return -ERANGE;
+ else if (res != QM_MCR_RESULT_OK)
return -EIO;
return 0;
}
@@ -2355,8 +2352,7 @@ int qman_delete_cgr(struct qman_cgr *cgr)
/* Overwrite TARG */
local_opts.we_mask = QM_CGR_WE_CSCN_TARG;
if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
- local_opts.cgr.cscn_targ_upd_ctrl =
- ~QM_CGR_TARG_UDP_CTRL_WRITE_BIT | PORTAL_IDX(p);
+ local_opts.cgr.cscn_targ_upd_ctrl = PORTAL_IDX(p);
else
local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ &
~(TARG_MASK(p));
@@ -4249,9 +4245,11 @@ int qman_ceetm_ccg_release(struct qm_ceetm_ccg *ccg)
list_for_each_entry(i, &p->ccgr_cbs[ccg->parent->dcp_idx], cb_node)
if ((i->idx == ccg->idx) && i->cb)
goto release_lock;
+ config_opts.ccgrid = CEETM_CCGR_CM_CONFIGURE |
+ (ccg->parent->idx << 4) | ccg->idx;
+ config_opts.dcpid = ccg->parent->dcp_idx;
config_opts.we_mask = QM_CCGR_WE_CSCN_TUPD;
- config_opts.cm_config.cscn_tupd =
- ~QM_CGR_TARG_UDP_CTRL_WRITE_BIT | PORTAL_IDX(p);
+ config_opts.cm_config.cscn_tupd = PORTAL_IDX(p);
ret = qman_ceetm_configure_ccgr(&config_opts);
release_lock:
diff --git a/drivers/staging/fsl_qbman/qman_private.h b/drivers/staging/fsl_qbman/qman_private.h
index 0654cbb..d2bdd1e 100644
--- a/drivers/staging/fsl_qbman/qman_private.h
+++ b/drivers/staging/fsl_qbman/qman_private.h
@@ -238,11 +238,6 @@ int qman_testwrite_cgr(struct qman_cgr *cgr, u64 i_bcnt,
int qman_setup_fq_lookup_table(size_t num_entries);
#endif
-/* Lookup a QMan portal associated with an FD */
-struct qm_portal_config *usdpaa_get_qm_portal_config(struct file *filp,
- void *cinh);
-
-
/*************************************************/
/* QMan s/w corenet portal, low-level i/face */
@@ -371,6 +366,15 @@ struct qm_portal_config *usdpaa_get_qm_portal_config(struct file *filp,
#define qm_isr_inhibit(qm) __qm_isr_write(qm, qm_isr_inhibit, 1)
#define qm_isr_uninhibit(qm) __qm_isr_write(qm, qm_isr_inhibit, 0)
+#ifdef CONFIG_FSL_QMAN_CONFIG
+int qman_have_ccsr(void);
+#else
+#define qman_have_ccsr 0
+#endif
+
+__init int qman_init(void);
+__init int qman_resource_init(void);
+
/* CEETM related */
#define QMAN_CEETM_MAX 2
extern u8 num_ceetms;
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index f1e8547..8d79759 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -52,6 +52,46 @@ struct ehci_fsl {
unsigned hcd_add:1;
};
+static bool usb_phy_clk_valid(struct usb_hcd *hcd,
+ enum fsl_usb2_phy_modes phy_mode)
+{
+ void __iomem *non_ehci = hcd->regs;
+ struct device *dev = hcd->self.controller;
+ struct fsl_usb2_platform_data *pdata = dev->platform_data;
+ bool ret = true;
+ int retry = UTMI_PHY_CLK_VALID_CHK_RETRY;
+
+ if (phy_mode == FSL_USB2_PHY_ULPI) {
+ /* check PHY_CLK_VALID to get phy clk valid */
+ if (!(spin_event_timeout(in_be32(non_ehci + FSL_SOC_USB_CTRL) &
+ PHY_CLK_VALID, FSL_USB_PHY_CLK_TIMEOUT, 0) ||
+ in_be32(non_ehci + FSL_SOC_USB_PRICTRL))) {
+ ret = false;
+ }
+ } else if (phy_mode == FSL_USB2_PHY_UTMI) {
+ if (!(in_be32(non_ehci + FSL_SOC_USB_CTRL) & PHY_CLK_VALID)) {
+ ret = false;
+ if (pdata->controller_ver < FSL_USB_VER_2_4) {
+ while (retry--) {
+ clrbits32(non_ehci + FSL_SOC_USB_CTRL,
+ CTRL_UTMI_PHY_EN);
+ setbits32(non_ehci + FSL_SOC_USB_CTRL,
+ CTRL_UTMI_PHY_EN);
+ /* delay required for Clk to appear */
+ mdelay(FSL_UTMI_PHY_DLY);
+ if ((in_be32(non_ehci +
+ FSL_SOC_USB_CTRL) & PHY_CLK_VALID)) {
+ ret = true;
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ return ret;
+}
+
static struct ehci_fsl *hcd_to_ehci_fsl(struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
@@ -325,14 +365,9 @@ static int ehci_fsl_setup_phy(struct usb_hcd *hcd,
}
if (pdata->have_sysif_regs && pdata->controller_ver &&
- (phy_mode == FSL_USB2_PHY_ULPI)) {
- /* check PHY_CLK_VALID to get phy clk valid */
- if (!(spin_event_timeout(in_be32(non_ehci + FSL_SOC_USB_CTRL) &
- PHY_CLK_VALID, FSL_USB_PHY_CLK_TIMEOUT, 0) ||
- in_be32(non_ehci + FSL_SOC_USB_PRICTRL))) {
- printk(KERN_WARNING "fsl-ehci: USB PHY clock invalid\n");
- return -EINVAL;
- }
+ !usb_phy_clk_valid(hcd, phy_mode)) {
+ printk(KERN_ERR "fsl-ehci: USB PHY clock invalid\n");
+ return -EINVAL;
}
ehci_writel(ehci, portsc, &ehci->regs->port_status[port_offset]);
diff --git a/drivers/usb/host/ehci-fsl.h b/drivers/usb/host/ehci-fsl.h
index dbd292e..a032358 100644
--- a/drivers/usb/host/ehci-fsl.h
+++ b/drivers/usb/host/ehci-fsl.h
@@ -62,4 +62,7 @@
#define UTMI_PHY_EN (1<<9)
#define ULPI_PHY_CLK_SEL (1<<10)
#define PHY_CLK_VALID (1<<17)
+
+/* Retry count for checking UTMI PHY CLK validity */
+#define UTMI_PHY_CLK_VALID_CHK_RETRY 5
#endif /* _EHCI_FSL_H */
diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c
index b906cd7..74f4fcd 100644
--- a/drivers/usb/host/fsl-mph-dr-of.c
+++ b/drivers/usb/host/fsl-mph-dr-of.c
@@ -131,15 +131,15 @@ static bool has_erratum_a005275(struct device_node *node)
* P5020 and P5010 rev 1.0 and 2.0
* P5040 and P1010 rev 1.0
*/
- if ((fsl_svr_is(SVR_P3041)) || (fsl_svr_is(SVR_P3041_E)) ||
- (fsl_svr_is(SVR_P2041)) || (fsl_svr_is(SVR_P2041_E)))
- flag = (IS_SVR_REV(svr, 1, 0)) || (IS_SVR_REV(svr, 1, 1));
- else if ((fsl_svr_is(SVR_P5020)) || (fsl_svr_is(SVR_P5020_E)) ||
- (fsl_svr_is(SVR_P5010)) || (fsl_svr_is(SVR_P5010_E)))
- flag = (IS_SVR_REV(svr, 1, 0)) || (IS_SVR_REV(svr, 2, 0));
- else if ((fsl_svr_is(SVR_P5040)) || (fsl_svr_is(SVR_P5040_E)) ||
- (fsl_svr_is(SVR_P1010)) || (fsl_svr_is(SVR_P1010_E)))
- flag = IS_SVR_REV(svr, 1, 0);
+ if ((SVR_SOC_VER(svr) == SVR_P2041) ||
+ (SVR_SOC_VER(svr) == SVR_P3041))
+ flag = (SVR_REV(svr) == 0x10) || (SVR_REV(svr) == 0x11);
+ else if ((SVR_SOC_VER(svr) == SVR_P5010) ||
+ (SVR_SOC_VER(svr) == SVR_P5020))
+ flag = (SVR_REV(svr) == 0x10) || (SVR_REV(svr) == 0x20);
+ else if ((SVR_SOC_VER(svr) == SVR_P5040) ||
+ (SVR_SOC_VER(svr) == SVR_P1010))
+ flag = (SVR_REV(svr) == 0x10);
return flag;
}