summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/net/ethernet/freescale/dpa/dpaa_eth-common.h3
-rw-r--r--drivers/net/ethernet/freescale/dpa/dpaa_eth.c106
-rw-r--r--drivers/net/ethernet/freescale/dpa/dpaa_eth.h20
-rw-r--r--drivers/net/ethernet/freescale/dpa/dpaa_eth_sg.c4
-rw-r--r--drivers/net/ethernet/freescale/dpa/dpaa_eth_sysfs.c5
5 files changed, 122 insertions, 16 deletions
diff --git a/drivers/net/ethernet/freescale/dpa/dpaa_eth-common.h b/drivers/net/ethernet/freescale/dpa/dpaa_eth-common.h
index 8ea1a3e..58a71cf 100644
--- a/drivers/net/ethernet/freescale/dpa/dpaa_eth-common.h
+++ b/drivers/net/ethernet/freescale/dpa/dpaa_eth-common.h
@@ -161,6 +161,9 @@ enum dpa_fq_type {
FQ_TYPE_TX, /* "Real" Tx FQs */
FQ_TYPE_TX_CONFIRM, /* Tx Confirmation FQs (actually Rx FQs) */
FQ_TYPE_TX_ERROR, /* Tx Error FQs (these are actually Rx FQs) */
+#ifdef CONFIG_DPA_TX_RECYCLE
+ FQ_TYPE_TX_RECYCLE, /* Tx FQs for recycleable frames only */
+#endif
};
diff --git a/drivers/net/ethernet/freescale/dpa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpa/dpaa_eth.c
index bbb45dc..d7d0dd8 100644
--- a/drivers/net/ethernet/freescale/dpa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpa/dpaa_eth.c
@@ -657,6 +657,28 @@ _dpa_fq_alloc(struct list_head *list, struct dpa_fq *dpa_fq)
}
}
+#ifdef CONFIG_DPA_TX_RECYCLE
+ /*
+ * Configure the Tx queues for recycled frames, such that the
+ * buffers are released by FMan and no confirmation is sent
+ */
+ if (dpa_fq->fq_type == FQ_TYPE_TX_RECYCLE) {
+ initfq.we_mask |= QM_INITFQ_WE_CONTEXTA |
+ QM_INITFQ_WE_CONTEXTB;
+ /*
+ * ContextA: OVFQ=1 (use ContextB FQID for confirmation)
+ * OVOM=1 (use contextA2 bits instead of ICAD)
+ * A2V=1 (contextA A2 field is valid)
+ * B0V=1 (contextB field is valid)
+ * ContextA A2: EBD=1 (deallocate buffers inside FMan)
+ * ContextB: Confirmation FQID = 0
+ */
+ initfq.fqd.context_a.hi = 0x96000000;
+ initfq.fqd.context_a.lo = 0x80000000;
+ initfq.fqd.context_b = 0;
+ }
+#endif
+
/* Initialization common to all ingress queues */
if (dpa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) {
initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
@@ -1893,10 +1915,6 @@ int __hot dpa_tx(struct sk_buff *skb, struct net_device *net_dev)
goto fd_create_failed;
}
-#if (DPAA_VERSION >= 11)
- fd.cmd &= ~FM_FD_CMD_FCO;
-#endif
-
if (fd.cmd & FM_FD_CMD_FCO) {
/* This skb is recycleable, and the fd generated from it
* has been filled in accordingly */
@@ -3229,10 +3247,17 @@ static const struct fqid_cell tx_confirm_fqids[] __devinitconst = {
{0, DPAA_ETH_TX_QUEUES}
};
+#ifdef CONFIG_DPA_TX_RECYCLE
+static const struct fqid_cell tx_recycle_fqids[] = {
+ {0, DPAA_ETH_TX_QUEUES}
+};
+#endif
+
static int __devinit
dpa_fq_probe(struct platform_device *_of_dev, struct list_head *list,
struct dpa_fq **defq, struct dpa_fq **errq,
- struct dpa_fq **fqs, struct dpa_fq **txconfq, int ptype)
+ struct dpa_fq **fqs, struct dpa_fq **txconfq,
+ struct dpa_fq **txrecycle, int ptype)
{
struct device *dev = &_of_dev->dev;
struct device_node *np = dev->of_node;
@@ -3263,6 +3288,30 @@ dpa_fq_probe(struct platform_device *_of_dev, struct list_head *list,
}
}
+#ifdef CONFIG_DPA_TX_RECYCLE
+ /* per-core tx queues for recycleable frames (FManv3 only) */
+ if (txrecycle) {
+ fqids = tx_recycle_fqids;
+ dpa_fq = devm_kzalloc(dev, sizeof(*dpa_fq) * fqids[0].count,
+ GFP_KERNEL);
+ if (dpa_fq == NULL) {
+ dpaa_eth_err(dev, "devm_kzalloc() failed\n");
+ return -ENOMEM;
+ }
+
+ *txrecycle = dpa_fq;
+ for (j = 0; j < fqids[0].count; j++)
+ dpa_fq[j].fq_type = FQ_TYPE_TX_RECYCLE;
+
+ for (j = 0; j < fqids[0].count; j++) {
+ dpa_fq[j].fqid = fqids[0].start ?
+ fqids[0].start + j : 0;
+ _dpa_assign_wq(dpa_fq + j);
+ list_add_tail(&dpa_fq[j].list, list);
+ }
+ }
+#endif
+
fqids = of_get_property(np, fsl_qman_frame_queues[ptype], &lenp);
if (fqids == NULL) {
fqids = default_fqids[ptype];
@@ -3374,6 +3423,29 @@ static void dpa_setup_egress(struct dpa_priv_s *priv,
}
}
+#ifdef CONFIG_DPA_TX_RECYCLE
+static void dpa_setup_recycle_queues(struct dpa_priv_s *priv, struct dpa_fq *fq,
+ struct fm_port *port)
+{
+ int i = 0;
+ struct list_head *ptr = &fq->list;
+
+ for (i = 0; i < DPAA_ETH_TX_QUEUES; i++) {
+ struct dpa_fq *iter = list_entry(ptr, struct dpa_fq, list);
+
+ iter->fq_base = private_egress_fq;
+ iter->net_dev = priv->net_dev;
+
+ priv->recycle_fqs[i] = &iter->fq_base;
+
+ iter->flags = QMAN_FQ_FLAG_TO_DCPORTAL;
+ iter->channel = fm_get_tx_port_channel(port);
+
+ ptr = ptr->next;
+ }
+}
+#endif
+
static void dpa_setup_conf_queues(struct dpa_priv_s *priv, struct dpa_fq *fq)
{
const cpumask_t *affine_cpus = qman_affine_cpus();
@@ -3498,7 +3570,7 @@ static void dpa_rx_fq_init(struct dpa_priv_s *priv, struct list_head *head,
static void dpa_tx_fq_init(struct dpa_priv_s *priv, struct list_head *head,
struct dpa_fq *defq, struct dpa_fq *errq,
struct dpa_fq *fqs, struct dpa_fq *confqs,
- struct fm_port *port)
+ struct dpa_fq *recyclefqs, struct fm_port *port)
{
if (fqs)
dpa_setup_egress(priv, head, fqs, port);
@@ -3519,6 +3591,11 @@ static void dpa_tx_fq_init(struct dpa_priv_s *priv, struct list_head *head,
dpa_setup_ingress(priv, errq, &tx_private_errq);
if (confqs)
dpa_setup_conf_queues(priv, confqs);
+#ifdef CONFIG_DPA_TX_RECYCLE
+ if (recyclefqs)
+ dpa_setup_recycle_queues(priv, recyclefqs, port);
+#endif
+
}
}
@@ -3744,6 +3821,7 @@ dpaa_eth_probe(struct platform_device *_of_dev)
struct dpa_fq *rxextra = NULL;
struct dpa_fq *txfqs = NULL;
struct dpa_fq *txconf = NULL;
+ struct dpa_fq *txrecycle = NULL;
struct fm_port *rxport = NULL;
struct fm_port *txport = NULL;
bool has_timer = FALSE;
@@ -3813,20 +3891,26 @@ dpaa_eth_probe(struct platform_device *_of_dev)
if (rxport)
err = dpa_fq_probe(_of_dev, &rxfqlist, &rxdefault, &rxerror,
- &rxextra, NULL, RX);
+ &rxextra, NULL, NULL, RX);
else
err = dpa_fq_probe(_of_dev, &rxfqlist, NULL, NULL,
- &rxextra, NULL, RX);
+ &rxextra, NULL, NULL, RX);
if (err < 0)
goto rx_fq_probe_failed;
if (txport)
+#ifdef CONFIG_DPA_TX_RECYCLE
err = dpa_fq_probe(_of_dev, &txfqlist, &txdefault, &txerror,
- &txfqs, (is_shared ? NULL : &txconf), TX);
+ &txfqs, (is_shared ? NULL : &txconf),
+ (is_shared ? NULL : &txrecycle), TX);
+#else
+ err = dpa_fq_probe(_of_dev, &txfqlist, &txdefault, &txerror,
+ &txfqs, (is_shared ? NULL : &txconf), NULL, TX);
+#endif
else
err = dpa_fq_probe(_of_dev, &txfqlist, NULL, NULL, &txfqs,
- NULL, TX);
+ NULL, NULL, TX);
if (err < 0)
goto tx_fq_probe_failed;
@@ -3872,7 +3956,7 @@ dpaa_eth_probe(struct platform_device *_of_dev)
dpa_rx_fq_init(priv, &rxfqlist, rxdefault, rxerror, rxextra);
dpa_tx_fq_init(priv, &txfqlist, txdefault, txerror, txfqs,
- txconf, txport);
+ txconf, txrecycle, txport);
/*
* Create a congestion group for this netdev, with
diff --git a/drivers/net/ethernet/freescale/dpa/dpaa_eth.h b/drivers/net/ethernet/freescale/dpa/dpaa_eth.h
index 2085be1..10ccf56 100644
--- a/drivers/net/ethernet/freescale/dpa/dpaa_eth.h
+++ b/drivers/net/ethernet/freescale/dpa/dpaa_eth.h
@@ -361,6 +361,9 @@ struct dpa_priv_s {
struct list_head dpa_fq_list;
struct qman_fq *egress_fqs[DPAA_ETH_TX_QUEUES];
struct qman_fq *conf_fqs[DPAA_ETH_TX_QUEUES];
+#ifdef CONFIG_DPA_TX_RECYCLE
+ struct qman_fq *recycle_fqs[DPAA_ETH_TX_QUEUES];
+#endif
struct mac_device *mac_dev;
@@ -500,9 +503,21 @@ static inline int __hot dpa_xmit(struct dpa_priv_s *priv,
struct qm_fd *fd)
{
int err, i;
+ struct qman_fq *egress_fq;
+
+#ifdef CONFIG_DPA_TX_RECYCLE
+ /* Choose egress fq based on whether we want
+ * to recycle the frame or not */
+ if (fd->cmd & FM_FD_CMD_FCO)
+ egress_fq = priv->recycle_fqs[queue];
+ else
+ egress_fq = priv->egress_fqs[queue];
+#else
+ egress_fq = priv->egress_fqs[queue];
+#endif
for (i = 0; i < 100000; i++) {
- err = qman_enqueue(priv->egress_fqs[queue], fd, 0);
+ err = qman_enqueue(egress_fq, fd, 0);
if (err != -EBUSY)
break;
}
@@ -554,6 +569,9 @@ static inline void _dpa_assign_wq(struct dpa_fq *fq)
break;
case FQ_TYPE_RX_DEFAULT:
case FQ_TYPE_TX:
+#ifdef CONFIG_DPA_TX_RECYCLE
+ case FQ_TYPE_TX_RECYCLE:
+#endif
case FQ_TYPE_RX_PCD:
fq->wq = 3;
break;
diff --git a/drivers/net/ethernet/freescale/dpa/dpaa_eth_sg.c b/drivers/net/ethernet/freescale/dpa/dpaa_eth_sg.c
index 09b2928..ca3b72c 100644
--- a/drivers/net/ethernet/freescale/dpa/dpaa_eth_sg.c
+++ b/drivers/net/ethernet/freescale/dpa/dpaa_eth_sg.c
@@ -767,10 +767,6 @@ int __hot dpa_tx(struct sk_buff *skb, struct net_device *net_dev)
return NETDEV_TX_OK;
}
-#if (DPAA_VERSION >= 11)
- fd.cmd &= ~FM_FD_CMD_FCO;
-#endif
-
if (unlikely(dpa_xmit(priv, percpu_priv, queue_mapping, &fd) < 0))
goto xmit_failed;
diff --git a/drivers/net/ethernet/freescale/dpa/dpaa_eth_sysfs.c b/drivers/net/ethernet/freescale/dpa/dpaa_eth_sysfs.c
index 1a75354..1aed378 100644
--- a/drivers/net/ethernet/freescale/dpa/dpaa_eth_sysfs.c
+++ b/drivers/net/ethernet/freescale/dpa/dpaa_eth_sysfs.c
@@ -85,6 +85,11 @@ static ssize_t dpaa_eth_show_fqids(struct device *dev,
case FQ_TYPE_TX:
str = "Tx";
break;
+#ifdef CONFIG_DPA_TX_RECYCLE
+ case FQ_TYPE_TX_RECYCLE:
+ str = "Tx(recycling)";
+ break;
+#endif
default:
str = "Unknown";
}