summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/freescale/dpa/dpaa_eth_unit_test.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/freescale/dpa/dpaa_eth_unit_test.c')
-rw-r--r--drivers/net/ethernet/freescale/dpa/dpaa_eth_unit_test.c407
1 files changed, 407 insertions, 0 deletions
diff --git a/drivers/net/ethernet/freescale/dpa/dpaa_eth_unit_test.c b/drivers/net/ethernet/freescale/dpa/dpaa_eth_unit_test.c
new file mode 100644
index 0000000..62095a7
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpa/dpaa_eth_unit_test.c
@@ -0,0 +1,407 @@
+/*
+ * Copyright 2008-2013 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <linux/module.h>
+#include <linux/fsl_bman.h>
+
+#include "dpaa_eth.h"
+#include "dpaa_eth_unit_test.h"
+
+static bool tx_unit_test_passed = true;
+static unsigned char *tx_unit_skb_head;
+static unsigned char *tx_unit_skb_end;
+static int tx_unit_tested;
+static struct dpa_fq unit_fq;
+#ifdef CONFIG_FSL_DPAA_TX_RECYCLE
+static struct dpa_fq unit_recycle_fq;
+#endif
+static bool tx_unit_test_ran; /* initialized as false */
+
+static void tx_unit_test_ern(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_mr_entry *msg)
+{
+ struct net_device *net_dev;
+ struct dpa_priv_s *priv;
+ struct sk_buff **skbh;
+ struct sk_buff *skb;
+ const struct qm_fd *fd;
+ dma_addr_t addr;
+
+ net_dev = ((struct dpa_fq *)fq)->net_dev;
+ priv = netdev_priv(net_dev);
+
+ tx_unit_test_passed = false;
+
+ fd = &msg->ern.fd;
+
+ addr = qm_fd_addr(fd);
+
+ skbh = (struct sk_buff **)phys_to_virt(addr);
+ skb = *skbh;
+
+ if (!skb || !is_kernel_addr((unsigned long)skb))
+ panic("Corrupt skb in ERN!\n");
+
+ kfree_skb(skb);
+}
+
+static enum qman_cb_dqrr_result tx_unit_test_dqrr(
+ struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq)
+{
+ struct net_device *net_dev;
+ struct dpa_priv_s *priv;
+ struct sk_buff **skbh;
+ struct sk_buff *skb;
+ const struct qm_fd *fd;
+ dma_addr_t addr;
+ unsigned char *startaddr;
+ struct dpa_percpu_priv_s *percpu_priv;
+ int *countptr;
+
+ tx_unit_test_passed = false;
+
+ tx_unit_tested++;
+
+ net_dev = ((struct dpa_fq *)fq)->net_dev;
+ priv = netdev_priv(net_dev);
+
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, smp_processor_id());
+
+ fd = &dq->fd;
+
+ addr = qm_fd_addr(fd);
+
+ skbh = (struct sk_buff **)phys_to_virt(addr);
+ startaddr = (unsigned char *)skbh;
+ skb = *skbh;
+
+ if (!skb || !is_kernel_addr((unsigned long)skb))
+ panic("Invalid skb address in TX Unit Test FD\n");
+
+ /* Make sure we're dealing with the same skb */
+ if (skb->head != tx_unit_skb_head
+ || skb_end_pointer(skb) != tx_unit_skb_end)
+ goto out;
+
+ /* If we recycled, then there must be enough room between fd.addr
+ * and skb->end for a new RX buffer
+ */
+ if (fd->cmd & FM_FD_CMD_FCO) {
+ size_t bufsize = skb_end_pointer(skb) - startaddr;
+
+ if (bufsize < dpa_get_max_frm())
+ goto out;
+ } else {
+ /*
+ * If we didn't recycle, but the buffer was big enough,
+ * increment the counter to put it back
+ */
+ countptr = __this_cpu_ptr(priv->dpa_bp->percpu_count);
+ if (skb_end_pointer(skb) - skb->head >=
+ dpa_get_max_frm())
+ (*countptr)++;
+
+ /* If we didn't recycle, the data pointer should be good */
+ if (skb->data != startaddr + dpa_fd_offset(fd))
+ goto out;
+ }
+
+ tx_unit_test_passed = true;
+out:
+ /* The skb is no longer needed, and belongs to us */
+ kfree_skb(skb);
+
+ return qman_cb_dqrr_consume;
+}
+
+static const struct qman_fq tx_unit_test_fq = {
+ .cb = { .dqrr = tx_unit_test_dqrr, .ern = tx_unit_test_ern }
+};
+
+static int dpa_tx_unit_test(struct net_device *net_dev)
+{
+ /* Create a new FQ */
+ struct dpa_priv_s *priv = netdev_priv(net_dev);
+ struct qman_fq *oldq;
+ int size, headroom;
+ struct dpa_percpu_priv_s *percpu_priv;
+ cpumask_var_t old_cpumask;
+ int test_count = 0;
+ int err = 0;
+ int tests_failed = 0;
+ const cpumask_t *cpus = qman_affine_cpus();
+#ifdef CONFIG_FSL_DPAA_TX_RECYCLE
+ struct qman_fq *oldrecycleq;
+#endif
+
+ if (!alloc_cpumask_var(&old_cpumask, GFP_KERNEL)) {
+ pr_err("UNIT test cpumask allocation failed\n");
+ return -ENOMEM;
+ }
+
+ cpumask_copy(old_cpumask, tsk_cpus_allowed(current));
+ set_cpus_allowed_ptr(current, cpus);
+ /* disable bottom halves */
+ local_bh_disable();
+
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, smp_processor_id());
+
+ qman_irqsource_remove(QM_PIRQ_DQRI);
+ unit_fq.net_dev = net_dev;
+ unit_fq.fq_base = tx_unit_test_fq;
+
+ /* Save old queue */
+ oldq = priv->egress_fqs[smp_processor_id()];
+
+ err = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID, &unit_fq.fq_base);
+
+ if (err < 0) {
+ pr_err("UNIT test FQ create failed: %d\n", err);
+ goto fq_create_fail;
+ }
+
+ err = qman_init_fq(&unit_fq.fq_base,
+ QMAN_INITFQ_FLAG_SCHED | QMAN_INITFQ_FLAG_LOCAL, NULL);
+ if (err < 0) {
+ pr_err("UNIT test FQ init failed: %d\n", err);
+ goto fq_init_fail;
+ }
+
+ /* Replace queue 0 with this queue */
+ priv->egress_fqs[smp_processor_id()] = &unit_fq.fq_base;
+
+#ifdef CONFIG_FSL_DPAA_TX_RECYCLE
+ oldrecycleq = priv->recycle_fqs[smp_processor_id()];
+ unit_recycle_fq.net_dev = net_dev;
+ unit_recycle_fq.fq_base = tx_unit_test_fq;
+
+ err = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID,
+ &unit_recycle_fq.fq_base);
+
+ if (err < 0) {
+ pr_err("UNIT test Recycle FQ create failed: %d\n", err);
+ goto recycle_fq_create_fail;
+ }
+
+ err = qman_init_fq(&unit_recycle_fq.fq_base,
+ QMAN_INITFQ_FLAG_SCHED | QMAN_INITFQ_FLAG_LOCAL, NULL);
+ if (err < 0) {
+ pr_err("UNIT test Recycle FQ init failed: %d\n", err);
+ goto recycle_fq_init_fail;
+ }
+
+ priv->recycle_fqs[smp_processor_id()] = &unit_recycle_fq.fq_base;
+
+ pr_err("TX Unit Test using FQ: %d - Recycle FQ: %d\n",
+ qman_fq_fqid(&unit_fq.fq_base),
+ qman_fq_fqid(&unit_recycle_fq.fq_base));
+#else
+ pr_err("TX Unit Test using FQ %d\n", qman_fq_fqid(&unit_fq.fq_base));
+#endif
+
+ /* Try packet sizes from 64-bytes to just above the maximum */
+ for (size = 64; size <= 9600 + 128; size += 64) {
+ for (headroom = priv->tx_headroom; headroom < 0x800;
+ headroom += 16) {
+ int ret;
+ struct sk_buff *skb;
+ int *countptr =
+ __this_cpu_ptr(priv->dpa_bp->percpu_count);
+
+ test_count++;
+
+ skb = dev_alloc_skb(size + headroom);
+
+ if (!skb) {
+ pr_err("Failed to allocate skb\n");
+ err = -ENOMEM;
+ goto end_test;
+ }
+
+ if (skb_end_pointer(skb) - skb->head >=
+ dpa_get_max_frm())
+ (*countptr)--;
+
+ skb_put(skb, size + headroom);
+ skb_pull(skb, headroom);
+
+ tx_unit_skb_head = skb->head;
+ tx_unit_skb_end = skb_end_pointer(skb);
+
+ skb_set_queue_mapping(skb, smp_processor_id());
+
+ /* tx */
+ ret = net_dev->netdev_ops->ndo_start_xmit(skb, net_dev);
+
+ if (ret != NETDEV_TX_OK) {
+ pr_err("Failed to TX with err %d\n", ret);
+ err = -EIO;
+ goto end_test;
+ }
+
+ /* Wait for it to arrive */
+ ret = spin_event_timeout(qman_poll_dqrr(1) != 0,
+ 100000, 1);
+
+ if (!ret) {
+ pr_err("TX Packet never arrived\n");
+ /*
+ * Count the test as failed.
+ */
+ tests_failed++;
+ }
+
+ /* Was it good? */
+ if (tx_unit_test_passed == false) {
+ pr_err("Test failed:\n");
+ pr_err("size: %d pad: %d head: %p end: %p\n",
+ size, headroom, tx_unit_skb_head,
+ tx_unit_skb_end);
+ tests_failed++;
+ }
+ }
+ }
+
+end_test:
+ err = qman_retire_fq(&unit_fq.fq_base, NULL);
+ if (unlikely(err < 0))
+ pr_err("Could not retire TX Unit Test FQ (%d)\n", err);
+
+ err = qman_oos_fq(&unit_fq.fq_base);
+ if (unlikely(err < 0))
+ pr_err("Could not OOS TX Unit Test FQ (%d)\n", err);
+
+#ifdef CONFIG_FSL_DPAA_TX_RECYCLE
+ err = qman_retire_fq(&unit_recycle_fq.fq_base, NULL);
+ if (unlikely(err < 0))
+ pr_err("Could not retire Recycle TX Unit Test FQ (%d)\n", err);
+
+ err = qman_oos_fq(&unit_recycle_fq.fq_base);
+ if (unlikely(err < 0))
+ pr_err("Could not OOS Recycle TX Unit Test FQ (%d)\n", err);
+
+recycle_fq_init_fail:
+ qman_destroy_fq(&unit_recycle_fq.fq_base, 0);
+
+recycle_fq_create_fail:
+ priv->recycle_fqs[smp_processor_id()] = oldrecycleq;
+#endif
+
+fq_init_fail:
+ qman_destroy_fq(&unit_fq.fq_base, 0);
+
+fq_create_fail:
+ priv->egress_fqs[smp_processor_id()] = oldq;
+ local_bh_enable();
+ qman_irqsource_add(QM_PIRQ_DQRI);
+ tx_unit_test_ran = true;
+ set_cpus_allowed_ptr(current, old_cpumask);
+ free_cpumask_var(old_cpumask);
+
+ pr_err("Tested %d/%d packets. %d failed\n", test_count, tx_unit_tested,
+ tests_failed);
+
+ if (tests_failed)
+ err = -EINVAL;
+
+ /* Reset counters */
+ memset(&percpu_priv->stats, 0, sizeof(percpu_priv->stats));
+
+ return err;
+}
+
+extern struct dpa_bp *dpa_bpid2pool(int bpid);
+
+void dpa_unit_test_drain_default_pool(struct net_device *net_dev)
+{
+ int i;
+ int num;
+ struct dpa_priv_s *priv;
+ struct dpa_bp *default_pool = dpa_bpid2pool(dpa_priv_common_bpid);
+
+ priv = netdev_priv(net_dev);
+
+ do {
+ struct bm_buffer bmb[8];
+
+ num = bman_acquire(default_pool->pool, bmb, 8, 0);
+
+ for (i = 0; i < num; i++) {
+ dma_addr_t addr = bm_buf_addr(&bmb[i]);
+
+ dma_unmap_single(default_pool->dev, addr,
+ default_pool->size,
+ DMA_BIDIRECTIONAL);
+
+ _dpa_bp_free_buf(phys_to_virt(addr));
+ }
+ } while (num == 8);
+
+ /* restore counters to their previous state */
+ for_each_online_cpu(i) {
+ int *countptr = per_cpu_ptr(default_pool->percpu_count, i);
+ *countptr = 0;
+ }
+}
+
+void dpa_unit_test_seed_default_pool(struct net_device *net_dev)
+{
+ struct dpa_priv_s *priv;
+ struct dpa_bp *default_pool = dpa_bpid2pool(dpa_priv_common_bpid);
+
+ priv = netdev_priv(net_dev);
+
+#ifndef CONFIG_FSL_DPAA_ETH_SG_SUPPORT
+ default_pool->size = dpa_bp_default_buf_size_get();
+#endif /* CONFIG_FSL_DPAA_ETH_SG_SUPPORT */
+ dpa_bp_priv_seed(default_pool);
+}
+
+void dpa_unit_tests(struct net_device *net_dev)
+{
+ int err;
+
+ /* the unit tests use the default pool */
+ if (!dpa_priv_common_bpid)
+ return;
+
+ if (!tx_unit_test_ran) {
+ dpa_unit_test_seed_default_pool(net_dev);
+
+ err = dpa_tx_unit_test(net_dev);
+ WARN_ON(err);
+
+ dpa_unit_test_drain_default_pool(net_dev);
+ }
+}