summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.c')
-rw-r--r--drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.c65
1 files changed, 43 insertions, 22 deletions
diff --git a/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.c b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.c
index cac613b..b090db1 100644
--- a/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.c
+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.c
@@ -69,24 +69,27 @@ static void get_dcp_and_sp(struct net_device *dev, enum qm_dc_portal *dcp_id,
static void ceetm_ern(struct qman_portal *portal, struct qman_fq *fq,
const struct qm_mr_entry *msg)
{
- struct net_device *net_dev;
- struct ceetm_class *cls;
+ struct dpa_percpu_priv_s *dpa_percpu_priv;
struct ceetm_class_stats *cstats = NULL;
const struct dpa_priv_s *dpa_priv;
- struct dpa_percpu_priv_s *dpa_percpu_priv;
- struct sk_buff *skb;
struct qm_fd fd = msg->ern.fd;
+ struct net_device *net_dev;
+ struct ceetm_fq *ceetm_fq;
+ struct ceetm_class *cls;
+ struct sk_buff *skb;
- net_dev = ((struct ceetm_fq *)fq)->net_dev;
+ ceetm_fq = container_of(fq, struct ceetm_fq, fq);
+ net_dev = ceetm_fq->net_dev;
dpa_priv = netdev_priv(net_dev);
dpa_percpu_priv = raw_cpu_ptr(dpa_priv->percpu_priv);
/* Increment DPA counters */
dpa_percpu_priv->stats.tx_dropped++;
dpa_percpu_priv->stats.tx_fifo_errors++;
+ count_ern(dpa_percpu_priv, msg);
/* Increment CEETM counters */
- cls = ((struct ceetm_fq *)fq)->ceetm_cls;
+ cls = ceetm_fq->ceetm_cls;
switch (cls->type) {
case CEETM_PRIO:
cstats = this_cpu_ptr(cls->prio.cstats);
@@ -99,11 +102,15 @@ static void ceetm_ern(struct qman_portal *portal, struct qman_fq *fq,
if (cstats)
cstats->ern_drop_count++;
+ /* Release the buffers that were supposed to be recycled. */
if (fd.bpid != 0xff) {
dpa_fd_release(net_dev, &fd);
return;
}
+ /* Release the frames that were supposed to return on the
+ * confirmation path.
+ */
skb = _dpa_cleanup_tx_fd(dpa_priv, &fd);
dev_kfree_skb_any(skb);
}
@@ -125,16 +132,16 @@ static void ceetm_cscn(struct qm_ceetm_ccg *ccg, void *cb_ctx, int congested)
break;
}
+ ceetm_fq->congested = congested;
+
if (congested) {
dpa_priv->cgr_data.congestion_start_jiffies = jiffies;
- netif_tx_stop_all_queues(dpa_priv->net_dev);
dpa_priv->cgr_data.cgr_congested_count++;
if (cstats)
cstats->congested_count++;
} else {
dpa_priv->cgr_data.congested_jiffies +=
(jiffies - dpa_priv->cgr_data.congestion_start_jiffies);
- netif_tx_wake_all_queues(dpa_priv->net_dev);
}
}
@@ -148,6 +155,7 @@ static int ceetm_alloc_fq(struct ceetm_fq **fq, struct net_device *dev,
(*fq)->net_dev = dev;
(*fq)->ceetm_cls = cls;
+ (*fq)->congested = 0;
return 0;
}
@@ -185,9 +193,9 @@ static int ceetm_config_ccg(struct qm_ceetm_ccg **ccg,
/* Set the congestion state thresholds according to the link speed */
if (dpa_priv->mac_dev->if_support & SUPPORTED_10000baseT_Full)
- cs_th = CONFIG_FSL_DPAA_CS_THRESHOLD_10G;
+ cs_th = CONFIG_FSL_DPAA_CEETM_CCS_THRESHOLD_10G;
else
- cs_th = CONFIG_FSL_DPAA_CS_THRESHOLD_1G;
+ cs_th = CONFIG_FSL_DPAA_CEETM_CCS_THRESHOLD_1G;
qm_cgr_cs_thres_set64(&ccg_params.cs_thres_in, cs_th, 1);
qm_cgr_cs_thres_set64(&ccg_params.cs_thres_out,
@@ -1908,17 +1916,22 @@ static struct ceetm_class *ceetm_classify(struct sk_buff *skb,
int __hot ceetm_tx(struct sk_buff *skb, struct net_device *net_dev)
{
- int ret;
- bool act_drop = false;
+ const int queue_mapping = dpa_get_queue_mapping(skb);
struct Qdisc *sch = net_dev->qdisc;
- struct ceetm_class *cl;
- struct dpa_priv_s *priv_dpa;
- struct qman_fq *egress_fq, *conf_fq;
- struct ceetm_qdisc *priv = qdisc_priv(sch);
- struct ceetm_qdisc_stats *qstats = this_cpu_ptr(priv->root.qstats);
struct ceetm_class_stats *cstats;
- const int queue_mapping = dpa_get_queue_mapping(skb);
- spinlock_t *root_lock = qdisc_lock(sch);
+ struct ceetm_qdisc_stats *qstats;
+ struct dpa_priv_s *priv_dpa;
+ struct ceetm_fq *ceetm_fq;
+ struct ceetm_qdisc *priv;
+ struct qman_fq *conf_fq;
+ struct ceetm_class *cl;
+ spinlock_t *root_lock;
+ bool act_drop = false;
+ int ret;
+
+ root_lock = qdisc_lock(sch);
+ priv = qdisc_priv(sch);
+ qstats = this_cpu_ptr(priv->root.qstats);
spin_lock(root_lock);
cl = ceetm_classify(skb, sch, &ret, &act_drop);
@@ -1945,11 +1958,11 @@ int __hot ceetm_tx(struct sk_buff *skb, struct net_device *net_dev)
*/
switch (cl->type) {
case CEETM_PRIO:
- egress_fq = &cl->prio.fq->fq;
+ ceetm_fq = cl->prio.fq;
cstats = this_cpu_ptr(cl->prio.cstats);
break;
case CEETM_WBFS:
- egress_fq = &cl->wbfs.fq->fq;
+ ceetm_fq = cl->wbfs.fq;
cstats = this_cpu_ptr(cl->wbfs.cstats);
break;
default:
@@ -1957,8 +1970,16 @@ int __hot ceetm_tx(struct sk_buff *skb, struct net_device *net_dev)
goto drop;
}
+ /* If the FQ is congested, avoid enqueuing the frame and dropping it
+ * when it returns on the ERN path. Drop it here directly instead.
+ */
+ if (unlikely(ceetm_fq->congested)) {
+ qstats->drops++;
+ goto drop;
+ }
+
bstats_update(&cstats->bstats, skb);
- return dpa_tx_extended(skb, net_dev, egress_fq, conf_fq);
+ return dpa_tx_extended(skb, net_dev, &ceetm_fq->fq, conf_fq);
drop:
dev_kfree_skb_any(skb);