summaryrefslogtreecommitdiff
path: root/drivers/staging/fsl-dpaa2/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/staging/fsl-dpaa2/ethernet')
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c18
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c502
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h61
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c24
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h68
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpni.c135
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpni.h68
7 files changed, 770 insertions, 106 deletions
diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c
index 445c5d1..933412e 100644
--- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c
@@ -110,9 +110,9 @@ static int dpaa2_dbg_fqs_show(struct seq_file *file, void *offset)
int i, err;
seq_printf(file, "FQ stats for %s:\n", priv->net_dev->name);
- seq_printf(file, "%s%16s%16s%16s%16s%16s\n",
- "VFQID", "CPU", "Type", "Frames", "Pending frames",
- "Congestion");
+ seq_printf(file, "%s%16s%16s%16s%16s%16s%16s\n",
+ "VFQID", "CPU", "Traffic Class", "Type", "Frames",
+ "Pending frames", "Congestion");
for (i = 0; i < priv->num_fqs; i++) {
fq = &priv->fq[i];
@@ -120,9 +120,10 @@ static int dpaa2_dbg_fqs_show(struct seq_file *file, void *offset)
if (err)
fcnt = 0;
- seq_printf(file, "%5d%16d%16s%16llu%16u%16llu\n",
+ seq_printf(file, "%5d%16d%16d%16s%16llu%16u%16llu\n",
fq->fqid,
fq->target_cpu,
+ fq->tc,
fq_type_to_str(fq),
fq->stats.frames,
fcnt,
@@ -158,19 +159,20 @@ static int dpaa2_dbg_ch_show(struct seq_file *file, void *offset)
int i;
seq_printf(file, "Channel stats for %s:\n", priv->net_dev->name);
- seq_printf(file, "%s%16s%16s%16s%16s%16s\n",
+ seq_printf(file, "%s%16s%16s%16s%16s%16s%16s\n",
"CHID", "CPU", "Deq busy", "Frames", "CDANs",
- "Avg frm/CDAN");
+ "Avg frm/CDAN", "Buf count");
for (i = 0; i < priv->num_channels; i++) {
ch = priv->channel[i];
- seq_printf(file, "%4d%16d%16llu%16llu%16llu%16llu\n",
+ seq_printf(file, "%4d%16d%16llu%16llu%16llu%16llu%16d\n",
ch->ch_id,
ch->nctx.desired_cpu,
ch->stats.dequeue_portal_busy,
ch->stats.frames,
ch->stats.cdan,
- ch->stats.frames / ch->stats.cdan);
+ ch->stats.frames / ch->stats.cdan,
+ ch->buf_count);
}
return 0;
diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
index 452eca5..789a5e6 100644
--- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
@@ -137,6 +137,8 @@ static struct sk_buff *build_linear_skb(struct dpaa2_eth_priv *priv,
u16 fd_offset = dpaa2_fd_get_offset(fd);
u32 fd_length = dpaa2_fd_get_len(fd);
+ ch->buf_count--;
+
skb = build_skb(fd_vaddr, DPAA2_ETH_SKB_SIZE);
if (unlikely(!skb))
return NULL;
@@ -144,8 +146,6 @@ static struct sk_buff *build_linear_skb(struct dpaa2_eth_priv *priv,
skb_reserve(skb, fd_offset);
skb_put(skb, fd_length);
- ch->buf_count--;
-
return skb;
}
@@ -183,7 +183,7 @@ static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
/* We build the skb around the first data buffer */
skb = build_skb(sg_vaddr, DPAA2_ETH_SKB_SIZE);
if (unlikely(!skb))
- return NULL;
+ goto err_build;
sg_offset = dpaa2_sg_get_offset(sge);
skb_reserve(skb, sg_offset);
@@ -214,6 +214,32 @@ static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
ch->buf_count -= i + 2;
return skb;
+
+err_build:
+ /* We still need to subtract the buffers used by this FD from our
+ * software counter
+ */
+ for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++)
+ if (dpaa2_sg_is_final(&sgt[i]))
+ break;
+ ch->buf_count -= i + 2;
+
+ return NULL;
+}
+
+static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ void *vaddr;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ /* Same logic as on regular Rx path */
+ vaddr = dpaa2_eth_iova_to_virt(priv->iommu_domain, buf_array[i]);
+ dma_unmap_single(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
+ put_page(virt_to_head_page(vaddr));
+ }
}
/* Main Rx frame processing routine */
@@ -521,7 +547,7 @@ static int build_sg_fd(struct dpaa2_eth_priv *priv,
dpaa2_fd_set_addr(fd, addr);
dpaa2_fd_set_len(fd, skb->len);
- fd->simple.ctrl = DPAA2_FD_CTRL_ASAL | FD_CTRL_PTA | FD_CTRL_PTV1;
+ fd->simple.ctrl = DPAA2_FD_CTRL_ASAL | FD_CTRL_PTA;
if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
enable_tx_tstamp(fd, sgt_buf);
@@ -578,7 +604,7 @@ static int build_single_fd(struct dpaa2_eth_priv *priv,
dpaa2_fd_set_len(fd, skb->len);
dpaa2_fd_set_format(fd, dpaa2_fd_single);
- fd->simple.ctrl = DPAA2_FD_CTRL_ASAL | FD_CTRL_PTA | FD_CTRL_PTV1;
+ fd->simple.ctrl = DPAA2_FD_CTRL_ASAL | FD_CTRL_PTA;
if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
enable_tx_tstamp(fd, buffer_start);
@@ -597,7 +623,7 @@ static int build_single_fd(struct dpaa2_eth_priv *priv,
*/
static void free_tx_fd(const struct dpaa2_eth_priv *priv,
const struct dpaa2_fd *fd,
- u32 *status)
+ u32 *status, bool in_napi)
{
struct device *dev = priv->net_dev->dev.parent;
dma_addr_t fd_addr;
@@ -676,7 +702,7 @@ static void free_tx_fd(const struct dpaa2_eth_priv *priv,
kfree(skbh);
/* Move on with skb release */
- dev_kfree_skb(skb);
+ napi_consume_skb(skb, in_napi);
}
static int dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
@@ -760,7 +786,7 @@ static int dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
if (unlikely(err < 0)) {
percpu_stats->tx_errors++;
/* Clean up everything, including freeing the skb */
- free_tx_fd(priv, &fd, NULL);
+ free_tx_fd(priv, &fd, NULL, false);
} else {
percpu_stats->tx_packets++;
percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd);
@@ -813,7 +839,7 @@ static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
fd->simple.ctrl & DPAA2_FD_TX_ERR_MASK);
}
- free_tx_fd(priv, fd, check_fas_errors ? &status : NULL);
+ free_tx_fd(priv, fd, check_fas_errors ? &status : NULL, true);
/* if there are no errors, we're done */
if (likely(!errors))
@@ -883,7 +909,7 @@ static int add_bufs(struct dpaa2_eth_priv *priv, u16 bpid)
u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
void *buf;
dma_addr_t addr;
- int i;
+ int i, err;
for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
/* Allocate buffer visible to WRIOP + skb shared info +
@@ -910,22 +936,25 @@ static int add_bufs(struct dpaa2_eth_priv *priv, u16 bpid)
}
release_bufs:
- /* In case the portal is busy, retry until successful.
- * The buffer release function would only fail if the QBMan portal
- * was busy, which implies portal contention (i.e. more CPUs than
- * portals, i.e. GPPs w/o affine DPIOs). For all practical purposes,
- * there is little we can realistically do, short of giving up -
- * in which case we'd risk depleting the buffer pool and never again
- * receiving the Rx interrupt which would kick-start the refill logic.
- * So just keep retrying, at the risk of being moved to ksoftirqd.
- */
- while (dpaa2_io_service_release(NULL, bpid, buf_array, i))
+ /* In case the portal is busy, retry until successful */
+ while ((err = dpaa2_io_service_release(NULL, bpid,
+ buf_array, i)) == -EBUSY)
cpu_relax();
+
+ /* If release command failed, clean up and bail out; not much
+ * else we can do about it
+ */
+ if (unlikely(err)) {
+ free_bufs(priv, buf_array, i);
+ return 0;
+ }
+
return i;
err_map:
put_page(virt_to_head_page(buf));
err_alloc:
+ /* If we managed to allocate at least some buffers, release them */
if (i)
goto release_bufs;
@@ -968,10 +997,8 @@ static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
*/
static void drain_bufs(struct dpaa2_eth_priv *priv, int count)
{
- struct device *dev = priv->net_dev->dev.parent;
u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
- void *vaddr;
- int ret, i;
+ int ret;
do {
ret = dpaa2_io_service_acquire(NULL, priv->bpid,
@@ -980,15 +1007,7 @@ static void drain_bufs(struct dpaa2_eth_priv *priv, int count)
netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
return;
}
- for (i = 0; i < ret; i++) {
- /* Same logic as on regular Rx path */
- vaddr = dpaa2_eth_iova_to_virt(priv->iommu_domain,
- buf_array[i]);
- dma_unmap_single(dev, buf_array[i],
- DPAA2_ETH_RX_BUF_SIZE,
- DMA_FROM_DEVICE);
- put_page(virt_to_head_page(vaddr));
- }
+ free_bufs(priv, buf_array, ret);
} while (ret);
}
@@ -1838,7 +1857,7 @@ static void set_fq_affinity(struct dpaa2_eth_priv *priv)
static void setup_fqs(struct dpaa2_eth_priv *priv)
{
- int i;
+ int i, j;
/* We have one TxConf FQ per Tx flow. Tx queues MUST be at the
* beginning of the queue array.
@@ -1851,11 +1870,13 @@ static void setup_fqs(struct dpaa2_eth_priv *priv)
priv->fq[priv->num_fqs++].flowid = (u16)i;
}
- for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
- priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
- priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
- priv->fq[priv->num_fqs++].flowid = (u16)i;
- }
+ for (i = 0; i < dpaa2_eth_tc_count(priv); i++)
+ for (j = 0; j < dpaa2_eth_queue_count(priv); j++) {
+ priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
+ priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
+ priv->fq[priv->num_fqs].tc = (u8)i;
+ priv->fq[priv->num_fqs++].flowid = (u16)j;
+ }
#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
/* We have exactly one Rx error queue per DPNI */
@@ -2098,9 +2119,6 @@ static int setup_dpni(struct fsl_mc_device *ls_dev)
dev_warn(dev, "Tx data offset (%d) not a multiple of 64B",
priv->tx_data_offset);
- /* Accommodate software annotation space (SWA) */
- priv->tx_data_offset += DPAA2_ETH_SWA_SIZE;
-
/* Enable congestion notifications for Tx queues */
err = setup_tx_congestion(priv);
if (err)
@@ -2156,39 +2174,111 @@ static void free_dpni(struct dpaa2_eth_priv *priv)
kfree(priv->cscn_unaligned);
}
-int setup_fqs_taildrop(struct dpaa2_eth_priv *priv,
- bool enable)
+static int set_queue_taildrop(struct dpaa2_eth_priv *priv,
+ struct dpni_taildrop *td)
{
struct device *dev = priv->net_dev->dev.parent;
- struct dpni_taildrop td;
- int err = 0, i;
+ int err, i;
- td.enable = enable;
- td.threshold = DPAA2_ETH_TAILDROP_THRESH;
-
- if (enable) {
- priv->num_bufs = DPAA2_ETH_NUM_BUFS_TD;
- priv->refill_thresh = DPAA2_ETH_REFILL_THRESH_TD;
- } else {
- priv->num_bufs = DPAA2_ETH_NUM_BUFS_FC /
- priv->num_channels;
- priv->refill_thresh = priv->num_bufs - DPAA2_ETH_BUFS_PER_CMD;
- }
for (i = 0; i < priv->num_fqs; i++) {
if (priv->fq[i].type != DPAA2_RX_FQ)
continue;
err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
- DPNI_CP_QUEUE, DPNI_QUEUE_RX, 0,
- priv->fq[i].flowid, &td);
+ DPNI_CP_QUEUE, DPNI_QUEUE_RX,
+ priv->fq[i].tc, priv->fq[i].flowid,
+ td);
if (err) {
dev_err(dev, "dpni_set_taildrop() failed (%d)\n", err);
- break;
+ return err;
}
}
- return err;
+ return 0;
+}
+
+static int set_group_taildrop(struct dpaa2_eth_priv *priv,
+ struct dpni_taildrop *td)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpni_taildrop disable_td, *tc_td;
+ int i, err;
+
+ memset(&disable_td, 0, sizeof(struct dpni_taildrop));
+ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
+ if (td->enable && dpaa2_eth_is_pfc_enabled(priv, i))
+ /* Do not set taildrop thresholds for PFC-enabled
+ * traffic classes. We will enable congestion
+ * notifications for them.
+ */
+ tc_td = &disable_td;
+ else
+ tc_td = td;
+
+ err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
+ DPNI_CP_GROUP, DPNI_QUEUE_RX,
+ i, 0, tc_td);
+ if (err) {
+ dev_err(dev, "dpni_set_taildrop() failed (%d)\n", err);
+ return err;
+ }
+ }
+ return 0;
+}
+
+/* Enable/disable Rx FQ taildrop
+ *
+ * Rx FQ taildrop is mutually exclusive with flow control and it only gets
+ * disabled when FC is active. Depending on FC status, we need to compute
+ * the maximum number of buffers in the pool differently, so use the
+ * opportunity to update max number of buffers as well.
+ */
+int set_rx_taildrop(struct dpaa2_eth_priv *priv)
+{
+ enum dpaa2_eth_td_cfg cfg = dpaa2_eth_get_td_type(priv);
+ struct dpni_taildrop td_queue, td_group;
+ int err = 0;
+
+ switch (cfg) {
+ case DPAA2_ETH_TD_NONE:
+ memset(&td_queue, 0, sizeof(struct dpni_taildrop));
+ memset(&td_group, 0, sizeof(struct dpni_taildrop));
+ priv->num_bufs = DPAA2_ETH_NUM_BUFS_FC /
+ priv->num_channels;
+ break;
+ case DPAA2_ETH_TD_QUEUE:
+ memset(&td_group, 0, sizeof(struct dpni_taildrop));
+ td_queue.enable = 1;
+ td_queue.units = DPNI_CONGESTION_UNIT_BYTES;
+ td_queue.threshold = DPAA2_ETH_TAILDROP_THRESH /
+ dpaa2_eth_tc_count(priv);
+ priv->num_bufs = DPAA2_ETH_NUM_BUFS_TD;
+ break;
+ case DPAA2_ETH_TD_GROUP:
+ memset(&td_queue, 0, sizeof(struct dpni_taildrop));
+ td_group.enable = 1;
+ td_group.units = DPNI_CONGESTION_UNIT_FRAMES;
+ td_group.threshold = NAPI_POLL_WEIGHT *
+ dpaa2_eth_queue_count(priv);
+ priv->num_bufs = NAPI_POLL_WEIGHT *
+ dpaa2_eth_tc_count(priv);
+ break;
+ default:
+ break;
+ }
+
+ err = set_queue_taildrop(priv, &td_queue);
+ if (err)
+ return err;
+
+ err = set_group_taildrop(priv, &td_group);
+ if (err)
+ return err;
+
+ priv->refill_thresh = priv->num_bufs - DPAA2_ETH_BUFS_PER_CMD;
+
+ return 0;
}
static int setup_rx_flow(struct dpaa2_eth_priv *priv,
@@ -2201,7 +2291,7 @@ static int setup_rx_flow(struct dpaa2_eth_priv *priv,
int err;
err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
- DPNI_QUEUE_RX, 0, fq->flowid, &q, &qid);
+ DPNI_QUEUE_RX, fq->tc, fq->flowid, &q, &qid);
if (err) {
dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
return err;
@@ -2214,7 +2304,7 @@ static int setup_rx_flow(struct dpaa2_eth_priv *priv,
q.destination.priority = 1;
q.user_context = (u64)fq;
err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
- DPNI_QUEUE_RX, 0, fq->flowid, q_opt, &q);
+ DPNI_QUEUE_RX, fq->tc, fq->flowid, q_opt, &q);
if (err) {
dev_err(dev, "dpni_set_queue() failed (%d)\n", err);
return err;
@@ -2411,7 +2501,13 @@ static int set_hash(struct dpaa2_eth_priv *priv)
dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
}
- err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg);
+ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
+ err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, i,
+ &dist_cfg);
+ if (err)
+ break;
+ }
+
dma_unmap_single(dev, dist_cfg.key_cfg_iova,
DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE);
if (err)
@@ -2438,6 +2534,7 @@ static int bind_dpni(struct dpaa2_eth_priv *priv)
pools_params.num_dpbp = 1;
pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
pools_params.pools[0].backup_pool = 0;
+ pools_params.pools[0].priority_mask = 0xff;
pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE;
err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
if (err) {
@@ -2923,6 +3020,264 @@ static void dpaa2_eth_sysfs_remove(struct device *dev)
device_remove_file(dev, &dpaa2_eth_attrs[i]);
}
+#ifdef CONFIG_FSL_DPAA2_ETH_DCB
+static int dpaa2_eth_dcbnl_ieee_getpfc(struct net_device *net_dev,
+ struct ieee_pfc *pfc)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ struct dpni_congestion_notification_cfg notification_cfg;
+ struct dpni_link_state state;
+ int err, i;
+
+ pfc->pfc_cap = dpaa2_eth_tc_count(priv);
+
+ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
+ if (err) {
+ netdev_err(net_dev, "ERROR %d getting link state", err);
+ return err;
+ }
+
+ if (!(state.options & DPNI_LINK_OPT_PFC_PAUSE))
+ return 0;
+
+ priv->pfc.pfc_en = 0;
+ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
+ err = dpni_get_congestion_notification(priv->mc_io, 0,
+ priv->mc_token,
+ DPNI_QUEUE_RX,
+ i, &notification_cfg);
+ if (err) {
+ netdev_err(net_dev, "Error %d getting congestion notif",
+ err);
+ return err;
+ }
+
+ if (notification_cfg.threshold_entry)
+ priv->pfc.pfc_en |= 1 << i;
+ }
+
+ pfc->pfc_en = priv->pfc.pfc_en;
+ pfc->mbc = priv->pfc.mbc;
+ pfc->delay = priv->pfc.delay;
+
+ return 0;
+}
+
+/* Configure ingress classification based on VLAN PCP */
+static int set_vlan_qos(struct dpaa2_eth_priv *priv)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpkg_profile_cfg kg_cfg = {0};
+ struct dpni_qos_tbl_cfg qos_cfg = {0};
+ struct dpni_rule_cfg key_params;
+ u8 *params_iova;
+ __be16 key, mask = cpu_to_be16(VLAN_PRIO_MASK);
+ int err = 0, i, j = 0;
+
+ if (priv->vlan_clsf_set)
+ return 0;
+
+ params_iova = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
+ if (!params_iova)
+ return -ENOMEM;
+
+ kg_cfg.num_extracts = 1;
+ kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR;
+ kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_VLAN;
+ kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FULL_FIELD;
+ kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_VLAN_TCI;
+
+ err = dpni_prepare_key_cfg(&kg_cfg, params_iova);
+ if (err) {
+ dev_err(dev, "dpkg_prepare_key_cfg failed: %d\n", err);
+ goto out_free;
+ }
+
+ /* Set QoS table */
+ qos_cfg.default_tc = 0;
+ qos_cfg.discard_on_miss = 0;
+ qos_cfg.key_cfg_iova = dma_map_single(dev, params_iova,
+ DPAA2_CLASSIFIER_DMA_SIZE,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, qos_cfg.key_cfg_iova)) {
+ dev_err(dev, "%s: DMA mapping failed\n", __func__);
+ err = -ENOMEM;
+ goto out_free;
+ }
+ err = dpni_set_qos_table(priv->mc_io, 0, priv->mc_token, &qos_cfg);
+ dma_unmap_single(dev, qos_cfg.key_cfg_iova,
+ DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE);
+
+ if (err) {
+ dev_err(dev, "dpni_set_qos_table failed: %d\n", err);
+ goto out_free;
+ }
+
+ key_params.key_size = sizeof(key);
+
+ if (dpaa2_eth_fs_mask_enabled(priv)) {
+ key_params.mask_iova = dma_map_single(dev, &mask, sizeof(mask),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, key_params.mask_iova)) {
+ dev_err(dev, "DMA mapping failed %s\n", __func__);
+ err = -ENOMEM;
+ goto out_free;
+ }
+ } else {
+ key_params.mask_iova = 0;
+ }
+
+ key_params.key_iova = dma_map_single(dev, &key, sizeof(key),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, key_params.key_iova)) {
+ dev_err(dev, "%s: DMA mapping failed\n", __func__);
+ err = -ENOMEM;
+ goto out_unmap_mask;
+ }
+
+ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
+ key = cpu_to_be16(i << VLAN_PRIO_SHIFT);
+ dma_sync_single_for_device(dev, key_params.key_iova,
+ sizeof(key), DMA_TO_DEVICE);
+
+ err = dpni_add_qos_entry(priv->mc_io, 0, priv->mc_token,
+ &key_params, i, j++);
+ if (err) {
+ dev_err(dev, "dpni_add_qos_entry failed: %d\n", err);
+ goto out_unmap;
+ }
+ }
+
+ priv->vlan_clsf_set = true;
+
+out_unmap:
+ dma_unmap_single(dev, key_params.key_iova, sizeof(key), DMA_TO_DEVICE);
+out_unmap_mask:
+ if (key_params.mask_iova)
+ dma_unmap_single(dev, key_params.mask_iova, sizeof(mask),
+ DMA_TO_DEVICE);
+out_free:
+ kfree(params_iova);
+ return err;
+}
+
+static int dpaa2_eth_dcbnl_ieee_setpfc(struct net_device *net_dev,
+ struct ieee_pfc *pfc)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ struct dpni_congestion_notification_cfg notification_cfg = {0};
+ struct dpni_link_state state = {0};
+ struct dpni_link_cfg cfg = {0};
+ int err = 0, i;
+
+ if (priv->pfc.pfc_en == pfc->pfc_en)
+ /* Same enabled mask, nothing to be done */
+ return 0;
+
+ err = set_vlan_qos(priv);
+ if (err)
+ return err;
+
+ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
+ if (err) {
+ netdev_err(net_dev, "ERROR %d getting link state", err);
+ return err;
+ }
+
+ cfg.rate = state.rate;
+ cfg.options = state.options;
+ if (pfc->pfc_en)
+ cfg.options |= DPNI_LINK_OPT_PFC_PAUSE;
+ else
+ cfg.options &= ~DPNI_LINK_OPT_PFC_PAUSE;
+
+ err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
+ if (err) {
+ netdev_err(net_dev, "ERROR %d setting link cfg", err);
+ return err;
+ }
+
+ memcpy(&priv->pfc, pfc, sizeof(priv->pfc));
+
+ err = set_rx_taildrop(priv);
+ if (err)
+ return err;
+
+ /* configure congestion notifications */
+ notification_cfg.notification_mode = DPNI_CONG_OPT_FLOW_CONTROL;
+ notification_cfg.units = DPNI_CONGESTION_UNIT_FRAMES;
+ notification_cfg.message_iova = 0ULL;
+ notification_cfg.message_ctx = 0ULL;
+
+ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
+ if (dpaa2_eth_is_pfc_enabled(priv, i)) {
+ notification_cfg.threshold_entry = NAPI_POLL_WEIGHT;
+ notification_cfg.threshold_exit = NAPI_POLL_WEIGHT / 2;
+ } else {
+ notification_cfg.threshold_entry = 0;
+ notification_cfg.threshold_exit = 0;
+ }
+
+ err = dpni_set_congestion_notification(priv->mc_io, 0,
+ priv->mc_token,
+ DPNI_QUEUE_RX,
+ i, &notification_cfg);
+ if (err) {
+ netdev_err(net_dev, "Error %d setting congestion notif",
+ err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static u8 dpaa2_eth_dcbnl_getdcbx(struct net_device *net_dev)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
+ return priv->dcbx_mode;
+}
+
+static u8 dpaa2_eth_dcbnl_setdcbx(struct net_device *net_dev, u8 mode)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
+ priv->dcbx_mode = mode;
+ return 0;
+}
+
+static u8 dpaa2_eth_dcbnl_getcap(struct net_device *net_dev, int capid, u8 *cap)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
+ switch (capid) {
+ case DCB_CAP_ATTR_PFC:
+ *cap = true;
+ break;
+ case DCB_CAP_ATTR_PFC_TCS:
+ *cap = 1 << dpaa2_eth_tc_count(priv);
+ break;
+ case DCB_CAP_ATTR_DCBX:
+ *cap = priv->dcbx_mode;
+ break;
+ default:
+ *cap = false;
+ break;
+ }
+
+ return 0;
+}
+
+const struct dcbnl_rtnl_ops dpaa2_eth_dcbnl_ops = {
+ .ieee_getpfc = dpaa2_eth_dcbnl_ieee_getpfc,
+ .ieee_setpfc = dpaa2_eth_dcbnl_ieee_setpfc,
+ .getdcbx = dpaa2_eth_dcbnl_getdcbx,
+ .setdcbx = dpaa2_eth_dcbnl_setdcbx,
+ .getcap = dpaa2_eth_dcbnl_getcap,
+};
+#endif
+
static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
{
struct device *dev;
@@ -2951,7 +3306,8 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
&priv->mc_io);
if (err) {
- dev_err(dev, "MC portal allocation failed\n");
+ dev_dbg(dev, "MC portal allocation failed\n");
+ err = -EPROBE_DEFER;
goto err_portal_alloc;
}
@@ -2977,10 +3333,6 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
if (err)
goto err_bind;
- /* Add a NAPI context for each channel */
- add_ch_napi(priv);
- enable_ch_napi(priv);
-
/* Percpu statistics */
priv->percpu_stats = alloc_percpu(*priv->percpu_stats);
if (!priv->percpu_stats) {
@@ -3023,6 +3375,14 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
goto err_alloc_rings;
net_dev->ethtool_ops = &dpaa2_ethtool_ops;
+#ifdef CONFIG_FSL_DPAA2_ETH_DCB
+ net_dev->dcbnl_ops = &dpaa2_eth_dcbnl_ops;
+ priv->dcbx_mode = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
+#endif
+
+ /* Add a NAPI context for each channel */
+ add_ch_napi(priv);
+ enable_ch_napi(priv);
err = setup_irqs(dpni_dev);
if (err) {
@@ -3086,6 +3446,9 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
#endif
dpaa2_eth_sysfs_remove(&net_dev->dev);
+ disable_ch_napi(priv);
+ del_ch_napi(priv);
+
unregister_netdev(net_dev);
dev_info(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
@@ -3097,9 +3460,6 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
free_rings(priv);
free_percpu(priv->percpu_stats);
free_percpu(priv->percpu_extras);
-
- disable_ch_napi(priv);
- del_ch_napi(priv);
free_dpbp(priv);
free_dpio(priv);
free_dpni(priv);
diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
index 86cb12e..62de3c7 100644
--- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
@@ -33,6 +33,7 @@
#define __DPAA2_ETH_H
#include <linux/atomic.h>
+#include <linux/dcbnl.h>
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
#include "../../fsl-mc/include/dpaa2-io.h"
@@ -96,7 +97,7 @@
#define DPAA2_ETH_RX_BUF_ALIGN 64
#define DPAA2_ETH_RX_BUF_ALIGN_V1 256
#define DPAA2_ETH_NEEDED_HEADROOM(p_priv) \
- ((p_priv)->tx_data_offset + DPAA2_ETH_TX_BUF_ALIGN)
+ ((p_priv)->tx_data_offset + DPAA2_ETH_TX_BUF_ALIGN - HH_DATA_MOD)
/* rx_extra_head prevents reallocations in L3 processing. */
#define DPAA2_ETH_SKB_SIZE \
@@ -114,17 +115,19 @@
/* PTP nominal frequency 1GHz */
#define DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS 1
-/* Leave enough extra space in the headroom to make sure the skb is
- * not realloc'd in forwarding scenarios.
- */
-#define DPAA2_ETH_RX_HEAD_ROOM 192
-
/* We are accommodating a skb backpointer and some S/G info
* in the frame's software annotation. The hardware
* options are either 0 or 64, so we choose the latter.
*/
#define DPAA2_ETH_SWA_SIZE 64
+/* Extra headroom space requested to hardware, in order to make sure there's
+ * no realloc'ing in forwarding scenarios
+ */
+#define DPAA2_ETH_RX_HEAD_ROOM \
+ (DPAA2_ETH_TX_HWA_SIZE - DPAA2_ETH_RX_HWA_SIZE + \
+ DPAA2_ETH_TX_BUF_ALIGN)
+
/* Must keep this struct smaller than DPAA2_ETH_SWA_SIZE */
struct dpaa2_eth_swa {
struct sk_buff *skb;
@@ -301,16 +304,17 @@ struct dpaa2_eth_ch_stats {
__u64 pull_err;
};
+#define DPAA2_ETH_MAX_DPCONS NR_CPUS
+#define DPAA2_ETH_MAX_TCS 8
+
/* Maximum number of queues associated with a DPNI */
-#define DPAA2_ETH_MAX_RX_QUEUES 16
-#define DPAA2_ETH_MAX_TX_QUEUES NR_CPUS
+#define DPAA2_ETH_MAX_RX_QUEUES (DPNI_MAX_DIST_SIZE * DPAA2_ETH_MAX_TCS)
+#define DPAA2_ETH_MAX_TX_QUEUES DPNI_MAX_SENDERS
#define DPAA2_ETH_MAX_RX_ERR_QUEUES 1
#define DPAA2_ETH_MAX_QUEUES (DPAA2_ETH_MAX_RX_QUEUES + \
DPAA2_ETH_MAX_TX_QUEUES + \
DPAA2_ETH_MAX_RX_ERR_QUEUES)
-#define DPAA2_ETH_MAX_DPCONS NR_CPUS
-
enum dpaa2_eth_fq_type {
DPAA2_RX_FQ = 0,
DPAA2_TX_CONF_FQ,
@@ -323,6 +327,7 @@ struct dpaa2_eth_fq {
u32 fqid;
u32 tx_qdbin;
u16 flowid;
+ u8 tc;
int target_cpu;
struct dpaa2_eth_channel *channel;
enum dpaa2_eth_fq_type type;
@@ -429,6 +434,10 @@ struct dpaa2_eth_priv {
struct dpaa2_eth_cls_rule *cls_rule;
struct dpni_tx_shaping_cfg shaping_cfg;
+
+ u8 dcbx_mode;
+ struct ieee_pfc pfc;
+ bool vlan_clsf_set;
};
#define dpaa2_eth_hash_enabled(priv) \
@@ -454,7 +463,37 @@ static inline int dpaa2_eth_queue_count(struct dpaa2_eth_priv *priv)
return priv->dpni_attrs.num_queues;
}
+static inline int dpaa2_eth_tc_count(struct dpaa2_eth_priv *priv)
+{
+ return priv->dpni_attrs.num_tcs;
+}
+
+static inline bool dpaa2_eth_is_pfc_enabled(struct dpaa2_eth_priv *priv,
+ int traffic_class)
+{
+ return priv->pfc.pfc_en & (1 << traffic_class);
+}
+
+enum dpaa2_eth_td_cfg {
+ DPAA2_ETH_TD_NONE,
+ DPAA2_ETH_TD_QUEUE,
+ DPAA2_ETH_TD_GROUP
+};
+
+static inline enum dpaa2_eth_td_cfg
+dpaa2_eth_get_td_type(struct dpaa2_eth_priv *priv)
+{
+ bool pfc_enabled = !!(priv->pfc.pfc_en);
+
+ if (pfc_enabled)
+ return DPAA2_ETH_TD_GROUP;
+ else if (priv->tx_pause_frames)
+ return DPAA2_ETH_TD_NONE;
+ else
+ return DPAA2_ETH_TD_QUEUE;
+}
+
void check_cls_support(struct dpaa2_eth_priv *priv);
-int setup_fqs_taildrop(struct dpaa2_eth_priv *priv, bool enable);
+int set_rx_taildrop(struct dpaa2_eth_priv *priv);
#endif /* __DPAA2_H */
diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
index 9859814..304a21c 100644
--- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
@@ -230,7 +230,8 @@ static int dpaa2_eth_set_pauseparam(struct net_device *net_dev,
if (current_tx_pause == pause->tx_pause)
goto out;
- err = setup_fqs_taildrop(priv, !pause->tx_pause);
+ priv->tx_pause_frames = pause->tx_pause;
+ err = set_rx_taildrop(priv);
if (err)
netdev_dbg(net_dev, "ERROR %d configuring taildrop", err);
@@ -676,7 +677,7 @@ static int do_cls(struct net_device *net_dev,
struct dpni_rule_cfg rule_cfg;
struct dpni_fs_action_cfg fs_act = { 0 };
void *dma_mem;
- int err = 0;
+ int err = 0, tc;
if (!dpaa2_eth_fs_enabled(priv)) {
netdev_err(net_dev, "dev does not support steering!\n");
@@ -719,12 +720,19 @@ static int do_cls(struct net_device *net_dev,
else
fs_act.flow_id = fs->ring_cookie;
- if (add)
- err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token,
- 0, fs->location, &rule_cfg, &fs_act);
- else
- err = dpni_remove_fs_entry(priv->mc_io, 0, priv->mc_token,
- 0, &rule_cfg);
+ for (tc = 0; tc < dpaa2_eth_tc_count(priv); tc++) {
+ if (add)
+ err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token,
+ tc, fs->location, &rule_cfg,
+ &fs_act);
+ else
+ err = dpni_remove_fs_entry(priv->mc_io, 0,
+ priv->mc_token, tc,
+ &rule_cfg);
+
+ if (err)
+ break;
+ }
dma_unmap_single(dev, rule_cfg.key_iova,
rule_cfg.key_size * 2, DMA_TO_DEVICE);
diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h b/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h
index fa353d7..29f44df 100644
--- a/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h
@@ -37,9 +37,11 @@
#define DPNI_VER_MAJOR 7
#define DPNI_VER_MINOR 0
#define DPNI_CMD_BASE_VERSION 1
+#define DPNI_CMD_2ND_VERSION 2
#define DPNI_CMD_ID_OFFSET 4
#define DPNI_CMD(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_BASE_VERSION)
+#define DPNI_CMD_V2(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_2ND_VERSION)
#define DPNI_CMDID_OPEN DPNI_CMD(0x801)
#define DPNI_CMDID_CLOSE DPNI_CMD(0x800)
@@ -62,7 +64,7 @@
#define DPNI_CMDID_GET_IRQ_STATUS DPNI_CMD(0x016)
#define DPNI_CMDID_CLEAR_IRQ_STATUS DPNI_CMD(0x017)
-#define DPNI_CMDID_SET_POOLS DPNI_CMD(0x200)
+#define DPNI_CMDID_SET_POOLS DPNI_CMD_V2(0x200)
#define DPNI_CMDID_SET_ERRORS_BEHAVIOR DPNI_CMD(0x20B)
#define DPNI_CMDID_GET_QDID DPNI_CMD(0x210)
@@ -85,6 +87,8 @@
#define DPNI_CMDID_SET_RX_TC_DIST DPNI_CMD(0x235)
+#define DPNI_CMDID_SET_QOS_TBL DPNI_CMD(0x240)
+#define DPNI_CMDID_ADD_QOS_ENT DPNI_CMD(0x241)
#define DPNI_CMDID_ADD_FS_ENT DPNI_CMD(0x244)
#define DPNI_CMDID_REMOVE_FS_ENT DPNI_CMD(0x245)
#define DPNI_CMDID_CLR_FS_ENT DPNI_CMD(0x246)
@@ -125,13 +129,14 @@ struct dpni_cmd_open {
#define DPNI_BACKUP_POOL(val, order) (((val) & 0x1) << (order))
struct dpni_cmd_set_pools {
- /* cmd word 0 */
u8 num_dpbp;
u8 backup_pool_mask;
__le16 pad;
- /* cmd word 0..4 */
- __le32 dpbp_id[DPNI_MAX_DPBP];
- /* cmd word 4..6 */
+ struct {
+ __le16 dpbp_id;
+ u8 priority_mask;
+ u8 pad;
+ } pool[DPNI_MAX_DPBP];
__le16 buffer_size[DPNI_MAX_DPBP];
};
@@ -510,6 +515,36 @@ struct dpni_cmd_set_queue {
__le64 user_context;
};
+#define DPNI_DISCARD_ON_MISS_SHIFT 0
+#define DPNI_DISCARD_ON_MISS_SIZE 1
+
+struct dpni_cmd_set_qos_table {
+ u32 pad;
+ u8 default_tc;
+ /* only the LSB */
+ u8 discard_on_miss;
+ u16 pad1[21];
+ u64 key_cfg_iova;
+};
+
+struct dpni_cmd_add_qos_entry {
+ u16 pad;
+ u8 tc_id;
+ u8 key_size;
+ u16 index;
+ u16 pad2;
+ u64 key_iova;
+ u64 mask_iova;
+};
+
+struct dpni_cmd_remove_qos_entry {
+ u8 pad1[3];
+ u8 key_size;
+ u32 pad2;
+ u64 key_iova;
+ u64 mask_iova;
+};
+
struct dpni_cmd_add_fs_entry {
/* cmd word 0 */
u16 options;
@@ -597,4 +632,27 @@ struct dpni_cmd_set_congestion_notification {
u32 threshold_exit;
};
+struct dpni_cmd_get_congestion_notification {
+ /* cmd word 0 */
+ u8 qtype;
+ u8 tc;
+};
+
+struct dpni_rsp_get_congestion_notification {
+ /* cmd word 0 */
+ u64 pad;
+ /* cmd word 1 */
+ u32 dest_id;
+ u16 notification_mode;
+ u8 dest_priority;
+ /* from LSB: dest_type: 4 units:2 */
+ u8 type_units;
+ /* cmd word 2 */
+ u64 message_iova;
+ /* cmd word 3 */
+ u64 message_ctx;
+ /* cmd word 4 */
+ u32 threshold_entry;
+ u32 threshold_exit;
+};
#endif /* _FSL_DPNI_CMD_H */
diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpni.c b/drivers/staging/fsl-dpaa2/ethernet/dpni.c
index 3c23e4d..5c15ece 100644
--- a/drivers/staging/fsl-dpaa2/ethernet/dpni.c
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.c
@@ -198,7 +198,10 @@ int dpni_set_pools(struct fsl_mc_io *mc_io,
cmd_params = (struct dpni_cmd_set_pools *)cmd.params;
cmd_params->num_dpbp = cfg->num_dpbp;
for (i = 0; i < DPNI_MAX_DPBP; i++) {
- cmd_params->dpbp_id[i] = cpu_to_le32(cfg->pools[i].dpbp_id);
+ cmd_params->pool[i].dpbp_id =
+ cpu_to_le16(cfg->pools[i].dpbp_id);
+ cmd_params->pool[i].priority_mask =
+ cfg->pools[i].priority_mask;
cmd_params->buffer_size[i] =
cpu_to_le16(cfg->pools[i].buffer_size);
cmd_params->backup_pool_mask |=
@@ -1374,6 +1377,82 @@ int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io,
return mc_send_command(mc_io, &cmd);
}
+/*
+ * dpni_set_qos_table() - Set QoS mapping table
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cfg: QoS table configuration
+ *
+ * This function and all QoS-related functions require that
+ *'max_tcs > 1' was set at DPNI creation.
+ *
+ * warning: Before calling this function, call dpkg_prepare_key_cfg() to
+ * prepare the key_cfg_iova parameter
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_qos_table(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dpni_qos_tbl_cfg *cfg)
+{
+ struct dpni_cmd_set_qos_table *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_QOS_TBL,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_qos_table *)cmd.params;
+ cmd_params->default_tc = cfg->default_tc;
+ cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova);
+ dpni_set_field(cmd_params->discard_on_miss,
+ ENABLE,
+ cfg->discard_on_miss);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_add_qos_entry() - Add QoS mapping entry (to select a traffic class)
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cfg: QoS rule to add
+ * @tc_id: Traffic class selection (0-7)
+ * @index: Location in the QoS table where to insert the entry.
+ * Only relevant if MASKING is enabled for QoS classification on
+ * this DPNI, it is ignored for exact match.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_add_qos_entry(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dpni_rule_cfg *cfg,
+ u8 tc_id,
+ u16 index)
+{
+ struct dpni_cmd_add_qos_entry *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_QOS_ENT,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_add_qos_entry *)cmd.params;
+ cmd_params->tc_id = tc_id;
+ cmd_params->key_size = cfg->key_size;
+ cmd_params->index = cpu_to_le16(index);
+ cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
+ cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
/**
* dpni_add_fs_entry() - Add Flow Steering entry for a specific traffic class
* (to select a flow ID)
@@ -1498,6 +1577,60 @@ int dpni_set_congestion_notification(struct fsl_mc_io *mc_io,
}
/**
+ * dpni_get_congestion_notification() - Get traffic class congestion
+ * notification configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @qtype: Type of queue - Rx, Tx and Tx confirm types are supported
+ * @tc_id: Traffic class selection (0-7)
+ * @cfg: congestion notification configuration
+ *
+ * Return: '0' on Success; error code otherwise.
+ */
+int dpni_get_congestion_notification(
+ struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_queue_type qtype,
+ u8 tc_id,
+ struct dpni_congestion_notification_cfg *cfg)
+{
+ struct dpni_rsp_get_congestion_notification *rsp_params;
+ struct dpni_cmd_get_congestion_notification *cmd_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(
+ DPNI_CMDID_GET_CONGESTION_NOTIFICATION,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_get_congestion_notification *)cmd.params;
+ cmd_params->qtype = qtype;
+ cmd_params->tc = tc_id;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpni_rsp_get_congestion_notification *)cmd.params;
+ cfg->units = dpni_get_field(rsp_params->type_units, CONG_UNITS);
+ cfg->threshold_entry = le32_to_cpu(rsp_params->threshold_entry);
+ cfg->threshold_exit = le32_to_cpu(rsp_params->threshold_exit);
+ cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx);
+ cfg->message_iova = le64_to_cpu(rsp_params->message_iova);
+ cfg->notification_mode = le16_to_cpu(rsp_params->notification_mode);
+ cfg->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id);
+ cfg->dest_cfg.priority = rsp_params->dest_priority;
+ cfg->dest_cfg.dest_type = dpni_get_field(rsp_params->type_units,
+ DEST_TYPE);
+
+ return 0;
+}
+
+/**
* dpni_set_queue() - Set queue parameters
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpni.h b/drivers/staging/fsl-dpaa2/ethernet/dpni.h
index 600c357..40128ed 100644
--- a/drivers/staging/fsl-dpaa2/ethernet/dpni.h
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.h
@@ -52,6 +52,14 @@ struct fsl_mc_io;
* Maximum number of buffer pools per DPNI
*/
#define DPNI_MAX_DPBP 8
+/**
+ * Maximum number of senders
+ */
+#define DPNI_MAX_SENDERS 8
+/**
+ * Maximum distribution size
+ */
+#define DPNI_MAX_DIST_SIZE 8
/**
* All traffic classes considered; see dpni_set_queue()
@@ -123,13 +131,15 @@ struct dpni_pools_cfg {
/**
* struct pools - Buffer pools parameters
* @dpbp_id: DPBP object ID
+ * @priority_mask: priorities served by DPBP
* @buffer_size: Buffer size
* @backup_pool: Backup pool
*/
struct {
- int dpbp_id;
+ u16 dpbp_id;
+ u8 priority_mask;
u16 buffer_size;
- int backup_pool;
+ u8 backup_pool;
} pools[DPNI_MAX_DPBP];
};
@@ -509,6 +519,10 @@ int dpni_reset_statistics(struct fsl_mc_io *mc_io,
* Enable a-symmetric pause frames
*/
#define DPNI_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
+/**
+ * Enable priority flow control pause frames
+ */
+#define DPNI_LINK_OPT_PFC_PAUSE 0x0000000000000010ULL
/**
* struct - Structure representing DPNI link configuration
@@ -658,6 +672,26 @@ int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg,
u8 *key_cfg_buf);
/**
+ * struct dpni_qos_tbl_cfg - Structure representing QOS table configuration
+ * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
+ * key extractions to be used as the QoS criteria by calling
+ * dpkg_prepare_key_cfg()
+ * @discard_on_miss: Set to '1' to discard frames in case of no match (miss);
+ * '0' to use the 'default_tc' in such cases
+ * @default_tc: Used in case of no-match and 'discard_on_miss'= 0
+ */
+struct dpni_qos_tbl_cfg {
+ u64 key_cfg_iova;
+ int discard_on_miss;
+ u8 default_tc;
+};
+
+int dpni_set_qos_table(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dpni_qos_tbl_cfg *cfg);
+
+/**
* struct dpni_rx_tc_dist_cfg - Rx traffic class distribution configuration
* @dist_size: Set the distribution size;
* supported values: 1,2,3,4,6,7,8,12,14,16,24,28,32,48,56,64,96,
@@ -850,6 +884,12 @@ struct dpni_dest_cfg {
* sw-portal's DQRR, the DQRI interrupt is asserted immediately (if enabled)
*/
#define DPNI_CONG_OPT_INTR_COALESCING_DISABLED 0x00000020
+/**
+ * This congestion will trigger flow control or priority flow control.
+ * This will have effect only if flow control is enabled with
+ * dpni_set_link_cfg().
+ */
+#define DPNI_CONG_OPT_FLOW_CONTROL 0x00000040
/**
* struct dpni_congestion_notification_cfg - congestion notification
@@ -883,6 +923,14 @@ int dpni_set_congestion_notification(struct fsl_mc_io *mc_io,
u8 tc_id,
const struct dpni_congestion_notification_cfg *cfg);
+int dpni_get_congestion_notification(
+ struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_queue_type qtype,
+ u8 tc_id,
+ struct dpni_congestion_notification_cfg *cfg);
+
/**
* struct dpni_taildrop - Structure representing the taildrop
* @enable: Indicates whether the taildrop is active or not.
@@ -929,6 +977,22 @@ struct dpni_rule_cfg {
u8 key_size;
};
+int dpni_add_qos_entry(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dpni_rule_cfg *cfg,
+ u8 tc_id,
+ u16 index);
+
+int dpni_remove_qos_entry(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dpni_rule_cfg *cfg);
+
+int dpni_clear_qos_table(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
/**
* Discard matching traffic. If set, this takes precedence over any other
* configuration and matching traffic is always discarded.