summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/freescale/dpa/dpaa_eth_shared.c
diff options
context:
space:
mode:
authorMadalin Bucur <madalin.bucur@freescale.com>2013-06-14 16:44:58 (GMT)
committerFleming Andrew-AFLEMING <AFLEMING@freescale.com>2013-07-15 21:47:05 (GMT)
commitbfca638e9bde6c93b56d67c33a251d0b2d3f4710 (patch)
tree18312a69bf9fa0f5cf040e4be4a1aac7d3c62491 /drivers/net/ethernet/freescale/dpa/dpaa_eth_shared.c
parent2de112cb7c912e54b8f65d1fa0eb948d28f1e1ee (diff)
downloadlinux-fsl-qoriq-bfca638e9bde6c93b56d67c33a251d0b2d3f4710.tar.xz
dpaa_eth: move shared interface code to a new file
Separated the code related to shared interfaces. Signed-off-by: Madalin Bucur <madalin.bucur@freescale.com> Change-Id: I082a351a7d73a128a9761b3ad245886e00355c53 Reviewed-on: http://git.am.freescale.net:8181/3085 Reviewed-by: Sovaiala Cristian-Constantin-B39531 <Cristian.Sovaiala@freescale.com> Reviewed-by: Radulescu Ruxandra Ioana-B05472 <ruxandra.radulescu@freescale.com> Reviewed-by: Fleming Andrew-AFLEMING <AFLEMING@freescale.com> Tested-by: Fleming Andrew-AFLEMING <AFLEMING@freescale.com>
Diffstat (limited to 'drivers/net/ethernet/freescale/dpa/dpaa_eth_shared.c')
-rw-r--r--drivers/net/ethernet/freescale/dpa/dpaa_eth_shared.c831
1 files changed, 831 insertions, 0 deletions
diff --git a/drivers/net/ethernet/freescale/dpa/dpaa_eth_shared.c b/drivers/net/ethernet/freescale/dpa/dpaa_eth_shared.c
new file mode 100644
index 0000000..8b20595
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpa/dpaa_eth_shared.c
@@ -0,0 +1,831 @@
+/*
+ * Copyright 2008-2013 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) \
+ KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
+ KBUILD_BASENAME".c", __LINE__, __func__
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/etherdevice.h>
+#include <linux/kthread.h>
+#include <linux/percpu.h>
+#include <linux/highmem.h>
+#include <linux/fsl_bman.h>
+#include <linux/fsl_qman.h>
+#include "dpaa_eth.h"
+#include "lnxwrp_fsl_fman.h" /* fm_get_rx_extra_headroom(), fm_get_max_frm() */
+
+static uint8_t debug = -1;
+module_param(debug, byte, S_IRUGO);
+MODULE_PARM_DESC(debug, "Module/Driver verbosity level");
+
+/* candidates for dpa_eth_common.c */
+int dpa_netdev_init(struct device_node *dpa_node,
+ struct net_device *net_dev, const uint8_t *mac_addr);
+int __cold dpa_start(struct net_device *net_dev);
+int __cold dpa_stop(struct net_device *net_dev);
+void __cold dpa_timeout(struct net_device *net_dev);
+struct rtnl_link_stats64 * __cold
+dpa_get_stats64(struct net_device *net_dev,
+ struct rtnl_link_stats64 *stats);
+int dpa_set_mac_address(struct net_device *net_dev, void *addr);
+void dpa_set_rx_mode(struct net_device *net_dev);
+int dpa_ndo_init(struct net_device *net_dev);
+int dpa_set_features(struct net_device *dev, netdev_features_t features);
+netdev_features_t dpa_fix_features(struct net_device *dev,
+ netdev_features_t features);
+int dpa_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
+u16 dpa_select_queue(struct net_device *net_dev, struct sk_buff *skb);
+#endif
+int dpa_change_mtu(struct net_device *net_dev, int new_mtu);
+int __cold dpa_remove(struct platform_device *of_dev);
+struct dpa_bp * __cold __must_check __attribute__((nonnull))
+dpa_bp_probe(struct platform_device *_of_dev, size_t *count);
+struct mac_device * __cold __must_check
+__attribute__((nonnull)) dpa_mac_probe(struct platform_device *_of_dev);
+void dpa_set_buffers_layout(struct mac_device *mac_dev,
+ struct dpa_buffer_layout_s *layout);
+int dpa_fq_probe_mac(struct device *dev, struct list_head *list,
+ struct fm_port_fqs *port_fqs,
+ bool tx_conf_fqs_per_core,
+ enum port_type ptype);
+int dpa_bp_create(struct net_device *net_dev, struct dpa_bp *dpa_bp,
+ size_t count);
+int dpa_get_channel(struct device *dev, struct device_node *dpa_node);
+void dpa_fq_setup(struct dpa_priv_s *priv, const dpa_fq_cbs_t *fq_cbs,
+ struct fm_port *tx_port);
+int dpaa_eth_cgr_init(struct dpa_priv_s *priv);
+int dpa_fq_init(struct dpa_fq *dpa_fq);
+void dpaa_eth_init_ports(struct mac_device *mac_dev,
+ struct dpa_bp *bp, size_t count,
+ struct fm_port_fqs *port_fqs,
+ struct dpa_buffer_layout_s *buf_layout,
+ struct device *dev);
+void dpaa_eth_sysfs_init(struct device *dev);
+int __cold __attribute__((nonnull))
+dpa_fq_free(struct device *dev, struct list_head *list);
+void __cold __attribute__((nonnull))
+dpa_bp_free(struct dpa_priv_s *priv, struct dpa_bp *dpa_bp);
+struct dpa_bp *dpa_bpid2pool(int bpid);
+void dpa_release_sgt(struct qm_sg_entry *sgt, struct dpa_bp *dpa_bp,
+ struct bm_buffer *bmb);
+void __attribute__((nonnull))
+dpa_fd_release(const struct net_device *net_dev, const struct qm_fd *fd);
+void count_ern(struct dpa_percpu_priv_s *percpu_priv,
+ const struct qm_mr_entry *msg);
+int dpa_enable_tx_csum(struct dpa_priv_s *priv,
+ struct sk_buff *skb, struct qm_fd *fd, char *parse_results);
+int dpaa_eth_add_channel(void *__arg);
+
+/* forward declarations */
+static enum qman_cb_dqrr_result __hot
+shared_rx_dqrr(struct qman_portal *portal, struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq);
+static enum qman_cb_dqrr_result __hot
+shared_tx_default_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq);
+static enum qman_cb_dqrr_result
+shared_tx_error_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq);
+static void shared_ern(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_mr_entry *msg);
+int __hot dpa_shared_tx(struct sk_buff *skb, struct net_device *net_dev);
+
+#define DPA_DESCRIPTION "FSL DPAA Shared Ethernet driver"
+
+static const struct of_device_id dpa_shared_match[];
+
+static const struct net_device_ops dpa_shared_ops = {
+ .ndo_open = dpa_start,
+ .ndo_start_xmit = dpa_shared_tx,
+ .ndo_stop = dpa_stop,
+ .ndo_tx_timeout = dpa_timeout,
+ .ndo_get_stats64 = dpa_get_stats64,
+ .ndo_set_mac_address = dpa_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
+ .ndo_select_queue = dpa_select_queue,
+#endif
+ .ndo_change_mtu = dpa_change_mtu,
+ .ndo_set_rx_mode = dpa_set_rx_mode,
+ .ndo_init = dpa_ndo_init,
+ .ndo_set_features = dpa_set_features,
+ .ndo_fix_features = dpa_fix_features,
+ .ndo_do_ioctl = dpa_ioctl,
+};
+
+const dpa_fq_cbs_t shared_fq_cbs = {
+ .rx_defq = { .cb = { .dqrr = shared_rx_dqrr } },
+ .tx_defq = { .cb = { .dqrr = shared_tx_default_dqrr } },
+ .rx_errq = { .cb = { .dqrr = shared_rx_dqrr } },
+ .tx_errq = { .cb = { .dqrr = shared_tx_error_dqrr } },
+ .egress_ern = { .cb = { .ern = shared_ern } }
+};
+
+static inline void * __must_check __attribute__((nonnull))
+dpa_phys2virt(const struct dpa_bp *dpa_bp, dma_addr_t addr)
+{
+ return dpa_bp->vaddr + (addr - dpa_bp->paddr);
+}
+
+static struct dpa_bp *dpa_size2pool(struct dpa_priv_s *priv, size_t size)
+{
+ int i;
+
+ for (i = 0; i < priv->bp_count; i++)
+ if ((size + priv->tx_headroom) <= priv->dpa_bp[i].size)
+ return dpa_bpid2pool(priv->dpa_bp[i].bpid);
+ return ERR_PTR(-ENODEV);
+}
+
+/* Copy to a memory region that requires kmapping from a linear buffer,
+ * taking into account page boundaries in the destination
+ */
+static void
+copy_to_unmapped_area(dma_addr_t phys_start, void *src, size_t buf_size)
+{
+ struct page *page;
+ size_t size, offset;
+ void *page_vaddr;
+
+ while (buf_size > 0) {
+ offset = offset_in_page(phys_start);
+ size = (offset + buf_size > PAGE_SIZE) ?
+ PAGE_SIZE - offset : buf_size;
+
+ page = pfn_to_page(phys_start >> PAGE_SHIFT);
+ page_vaddr = kmap_atomic(page);
+
+ memcpy(page_vaddr + offset, src, size);
+
+ kunmap_atomic(page_vaddr);
+
+ phys_start += size;
+ src += size;
+ buf_size -= size;
+ }
+}
+
+/* Copy from a memory region that requires kmapping to a linear buffer,
+ * taking into account page boundaries in the source
+ */
+static void
+copy_from_unmapped_area(void *dest, dma_addr_t phys_start, size_t buf_size)
+{
+ struct page *page;
+ size_t size, offset;
+ void *page_vaddr;
+
+ while (buf_size > 0) {
+ offset = offset_in_page(phys_start);
+ size = (offset + buf_size > PAGE_SIZE) ?
+ PAGE_SIZE - offset : buf_size;
+
+ page = pfn_to_page(phys_start >> PAGE_SHIFT);
+ page_vaddr = kmap_atomic(page);
+
+ memcpy(dest, page_vaddr + offset, size);
+
+ kunmap_atomic(page_vaddr);
+
+ phys_start += size;
+ dest += size;
+ buf_size -= size;
+ }
+}
+
+static void
+dpa_fd_release_sg(const struct net_device *net_dev,
+ const struct qm_fd *fd)
+{
+ const struct dpa_priv_s *priv;
+ struct qm_sg_entry *sgt;
+ struct dpa_bp *_dpa_bp, *dpa_bp;
+ struct bm_buffer _bmb, bmb[8];
+
+ priv = netdev_priv(net_dev);
+
+ _bmb.hi = fd->addr_hi;
+ _bmb.lo = fd->addr_lo;
+
+ _dpa_bp = dpa_bpid2pool(fd->bpid);
+
+ if (_dpa_bp->vaddr) {
+ sgt = dpa_phys2virt(_dpa_bp, bm_buf_addr(&_bmb)) +
+ dpa_fd_offset(fd);
+ dpa_release_sgt(sgt, dpa_bp, bmb);
+ } else {
+ sgt = kmalloc(DPA_SGT_MAX_ENTRIES * sizeof(*sgt), GFP_ATOMIC);
+ if (sgt == NULL) {
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ netdev_err(net_dev,
+ "Memory allocation failed\n");
+ return;
+ }
+
+ copy_from_unmapped_area(sgt, bm_buf_addr(&_bmb) +
+ dpa_fd_offset(fd),
+ min(DPA_SGT_MAX_ENTRIES * sizeof(*sgt),
+ _dpa_bp->size));
+ dpa_release_sgt(sgt, dpa_bp, bmb);
+ kfree(sgt);
+ }
+
+ while (bman_release(_dpa_bp->pool, &_bmb, 1, 0))
+ cpu_relax();
+}
+
+static enum qman_cb_dqrr_result __hot
+shared_rx_dqrr(struct qman_portal *portal, struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq)
+{
+ struct net_device *net_dev;
+ struct dpa_priv_s *priv;
+ struct dpa_percpu_priv_s *percpu_priv;
+ const struct qm_fd *fd = &dq->fd;
+ struct dpa_bp *dpa_bp;
+ struct sk_buff *skb;
+ struct qm_sg_entry *sgt;
+ int i;
+
+ net_dev = ((struct dpa_fq *)fq)->net_dev;
+ priv = netdev_priv(net_dev);
+
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, smp_processor_id());
+
+ dpa_bp = dpa_bpid2pool(fd->bpid);
+ BUG_ON(IS_ERR(dpa_bp));
+
+ if (unlikely(fd->status & FM_FD_STAT_ERRORS) != 0) {
+ if (netif_msg_hw(priv) && net_ratelimit())
+ netdev_warn(net_dev, "FD status = 0x%08x\n",
+ fd->status & FM_FD_STAT_ERRORS);
+
+ percpu_priv->stats.rx_errors++;
+
+ goto out;
+ }
+
+ skb = __netdev_alloc_skb(net_dev,
+ priv->tx_headroom + dpa_fd_length(fd),
+ GFP_ATOMIC);
+ if (unlikely(skb == NULL)) {
+ if (netif_msg_rx_err(priv) && net_ratelimit())
+ netdev_err(net_dev, "Could not alloc skb\n");
+
+ percpu_priv->stats.rx_dropped++;
+
+ goto out;
+ }
+
+ skb_reserve(skb, priv->tx_headroom);
+
+ if (fd->format == qm_fd_sg) {
+ if (dpa_bp->vaddr) {
+ sgt = dpa_phys2virt(dpa_bp,
+ qm_fd_addr(fd)) + dpa_fd_offset(fd);
+
+ for (i = 0; i < DPA_SGT_MAX_ENTRIES; i++) {
+ BUG_ON(sgt[i].extension);
+
+ /* copy from sgt[i] */
+ memcpy(skb_put(skb, sgt[i].length),
+ dpa_phys2virt(dpa_bp,
+ qm_sg_addr(&sgt[i]) +
+ sgt[i].offset),
+ sgt[i].length);
+ if (sgt[i].final)
+ break;
+ }
+ } else {
+ sgt = kmalloc(DPA_SGT_MAX_ENTRIES * sizeof(*sgt),
+ GFP_ATOMIC);
+ if (unlikely(sgt == NULL)) {
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ netdev_err(net_dev,
+ "Memory allocation failed\n");
+ return -ENOMEM;
+ }
+
+ copy_from_unmapped_area(sgt,
+ qm_fd_addr(fd) + dpa_fd_offset(fd),
+ min(DPA_SGT_MAX_ENTRIES * sizeof(*sgt),
+ dpa_bp->size));
+
+ for (i = 0; i < DPA_SGT_MAX_ENTRIES; i++) {
+ BUG_ON(sgt[i].extension);
+
+ copy_from_unmapped_area(
+ skb_put(skb, sgt[i].length),
+ qm_sg_addr(&sgt[i]) + sgt[i].offset,
+ sgt[i].length);
+
+ if (sgt[i].final)
+ break;
+ }
+
+ kfree(sgt);
+ }
+ goto skb_copied;
+ }
+
+ /* otherwise fd->format == qm_fd_contig */
+ if (dpa_bp->vaddr) {
+ /* Fill the SKB */
+ memcpy(skb_put(skb, dpa_fd_length(fd)),
+ dpa_phys2virt(dpa_bp, qm_fd_addr(fd)) +
+ dpa_fd_offset(fd), dpa_fd_length(fd));
+ } else {
+ copy_from_unmapped_area(skb_put(skb, dpa_fd_length(fd)),
+ qm_fd_addr(fd) + dpa_fd_offset(fd),
+ dpa_fd_length(fd));
+ }
+
+skb_copied:
+ skb->protocol = eth_type_trans(skb, net_dev);
+
+ /* IP Reassembled frames are allowed to be larger than MTU */
+ if (unlikely(dpa_check_rx_mtu(skb, net_dev->mtu) &&
+ !(fd->status & FM_FD_IPR))) {
+ percpu_priv->stats.rx_dropped++;
+ dev_kfree_skb_any(skb);
+ goto out;
+ }
+
+ if (unlikely(netif_rx(skb) != NET_RX_SUCCESS))
+ percpu_priv->stats.rx_dropped++;
+ else {
+ percpu_priv->stats.rx_packets++;
+ percpu_priv->stats.rx_bytes += dpa_fd_length(fd);
+ }
+
+out:
+ if (fd->format == qm_fd_sg)
+ dpa_fd_release_sg(net_dev, fd);
+ else
+ dpa_fd_release(net_dev, fd);
+
+ return qman_cb_dqrr_consume;
+}
+
+static enum qman_cb_dqrr_result
+shared_tx_error_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq)
+{
+ struct net_device *net_dev;
+ struct dpa_priv_s *priv;
+ struct dpa_percpu_priv_s *percpu_priv;
+ struct dpa_bp *dpa_bp;
+ const struct qm_fd *fd = &dq->fd;
+
+ net_dev = ((struct dpa_fq *)fq)->net_dev;
+ priv = netdev_priv(net_dev);
+
+ dpa_bp = dpa_bpid2pool(fd->bpid);
+ BUG_ON(IS_ERR(dpa_bp));
+
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, smp_processor_id());
+
+ if (netif_msg_hw(priv) && net_ratelimit())
+ netdev_warn(net_dev, "FD status = 0x%08x\n",
+ fd->status & FM_FD_STAT_ERRORS);
+
+ if ((fd->format == qm_fd_sg) && (!dpa_bp->vaddr))
+ dpa_fd_release_sg(net_dev, fd);
+ else
+ dpa_fd_release(net_dev, fd);
+
+ percpu_priv->stats.tx_errors++;
+
+ return qman_cb_dqrr_consume;
+}
+
+static enum qman_cb_dqrr_result __hot
+shared_tx_default_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq)
+{
+ struct net_device *net_dev;
+ struct dpa_priv_s *priv;
+ struct dpa_percpu_priv_s *percpu_priv;
+ struct dpa_bp *dpa_bp;
+ const struct qm_fd *fd = &dq->fd;
+
+ net_dev = ((struct dpa_fq *)fq)->net_dev;
+ priv = netdev_priv(net_dev);
+
+ dpa_bp = dpa_bpid2pool(fd->bpid);
+ BUG_ON(IS_ERR(dpa_bp));
+
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, smp_processor_id());
+
+ if (unlikely(fd->status & FM_FD_STAT_ERRORS) != 0) {
+ if (netif_msg_hw(priv) && net_ratelimit())
+ netdev_warn(net_dev, "FD status = 0x%08x\n",
+ fd->status & FM_FD_STAT_ERRORS);
+
+ percpu_priv->stats.tx_errors++;
+ }
+
+ if ((fd->format == qm_fd_sg) && (!dpa_bp->vaddr))
+ dpa_fd_release_sg(net_dev, fd);
+ else
+ dpa_fd_release(net_dev, fd);
+
+ percpu_priv->tx_confirm++;
+
+ return qman_cb_dqrr_consume;
+}
+
+static void shared_ern(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_mr_entry *msg)
+{
+ struct net_device *net_dev;
+ const struct dpa_priv_s *priv;
+ struct dpa_percpu_priv_s *percpu_priv;
+ struct dpa_fq *dpa_fq = (struct dpa_fq *)fq;
+
+ net_dev = dpa_fq->net_dev;
+ priv = netdev_priv(net_dev);
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, smp_processor_id());
+
+ dpa_fd_release(net_dev, &msg->ern.fd);
+
+ percpu_priv->stats.tx_dropped++;
+ percpu_priv->stats.tx_fifo_errors++;
+ count_ern(percpu_priv, msg);
+}
+
+int __hot dpa_shared_tx(struct sk_buff *skb, struct net_device *net_dev)
+{
+ struct dpa_bp *dpa_bp;
+ struct bm_buffer bmb;
+ struct dpa_percpu_priv_s *percpu_priv;
+ struct dpa_priv_s *priv;
+ struct qm_fd fd;
+ int queue_mapping;
+ int err;
+ void *dpa_bp_vaddr;
+ t_FmPrsResult parse_results;
+
+ priv = netdev_priv(net_dev);
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, smp_processor_id());
+
+ memset(&fd, 0, sizeof(fd));
+ fd.format = qm_fd_contig;
+
+ queue_mapping = smp_processor_id();
+
+ dpa_bp = dpa_size2pool(priv, skb_headlen(skb));
+ if (unlikely(IS_ERR(dpa_bp))) {
+ percpu_priv->stats.tx_errors++;
+ err = PTR_ERR(dpa_bp);
+ goto bpools_too_small_error;
+ }
+
+ err = bman_acquire(dpa_bp->pool, &bmb, 1, 0);
+ if (unlikely(err <= 0)) {
+ percpu_priv->stats.tx_errors++;
+ if (err == 0)
+ err = -ENOMEM;
+ goto buf_acquire_failed;
+ }
+ fd.bpid = dpa_bp->bpid;
+
+ fd.length20 = skb_headlen(skb);
+ fd.addr_hi = bmb.hi;
+ fd.addr_lo = bmb.lo;
+ fd.offset = priv->tx_headroom;
+
+ /* The virtual address of the buffer pool is expected to be NULL
+ * in scenarios like MAC-less or Shared-MAC between Linux and
+ * USDPAA. In this case the buffers are dynamically mapped/unmapped.
+ */
+ if (dpa_bp->vaddr) {
+ dpa_bp_vaddr = dpa_phys2virt(dpa_bp, bm_buf_addr(&bmb));
+
+ /* Copy the packet payload */
+ skb_copy_from_linear_data(skb,
+ dpa_bp_vaddr + dpa_fd_offset(&fd),
+ dpa_fd_length(&fd));
+
+ /* Enable L3/L4 hardware checksum computation, if applicable */
+ err = dpa_enable_tx_csum(priv, skb, &fd,
+ dpa_bp_vaddr + DPA_TX_PRIV_DATA_SIZE);
+ } else {
+ err = dpa_enable_tx_csum(priv, skb, &fd,
+ (char *)&parse_results);
+
+ copy_to_unmapped_area(bm_buf_addr(&bmb) + DPA_TX_PRIV_DATA_SIZE,
+ &parse_results,
+ DPA_PARSE_RESULTS_SIZE);
+
+ copy_to_unmapped_area(bm_buf_addr(&bmb) + dpa_fd_offset(&fd),
+ skb->data,
+ dpa_fd_length(&fd));
+ }
+
+ if (unlikely(err < 0)) {
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ netdev_err(net_dev, "Tx HW csum error: %d\n", err);
+ percpu_priv->stats.tx_errors++;
+ goto l3_l4_csum_failed;
+ }
+
+ err = dpa_xmit(priv, &percpu_priv->stats, queue_mapping, &fd);
+
+l3_l4_csum_failed:
+bpools_too_small_error:
+buf_acquire_failed:
+ /* We're done with the skb */
+ dev_kfree_skb(skb);
+
+ return NETDEV_TX_OK;
+}
+
+static int dpa_shared_netdev_init(struct device_node *dpa_node,
+ struct net_device *net_dev)
+{
+ struct dpa_priv_s *priv = netdev_priv(net_dev);
+ const uint8_t *mac_addr;
+
+ net_dev->netdev_ops = &dpa_shared_ops;
+
+ net_dev->mem_start = priv->mac_dev->res->start;
+ net_dev->mem_end = priv->mac_dev->res->end;
+
+ mac_addr = priv->mac_dev->addr;
+
+ net_dev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_LLTX);
+
+ return dpa_netdev_init(dpa_node, net_dev, mac_addr);
+}
+
+static int
+dpaa_eth_shared_probe(struct platform_device *_of_dev)
+{
+ int err = 0, i;
+ struct device *dev;
+ struct device_node *dpa_node;
+ struct dpa_bp *dpa_bp;
+ struct dpa_fq *dpa_fq, *tmp;
+ size_t count;
+ struct net_device *net_dev = NULL;
+ struct dpa_priv_s *priv = NULL;
+ struct dpa_percpu_priv_s *percpu_priv;
+ struct fm_port_fqs port_fqs;
+ struct dpa_buffer_layout_s *buf_layout = NULL;
+ struct mac_device *mac_dev;
+ struct task_struct *kth;
+
+ dev = &_of_dev->dev;
+
+ dpa_node = dev->of_node;
+
+ if (!of_device_is_available(dpa_node))
+ return -ENODEV;
+
+ /* Get the buffer pools assigned to this interface */
+ dpa_bp = dpa_bp_probe(_of_dev, &count);
+ if (IS_ERR(dpa_bp))
+ return PTR_ERR(dpa_bp);
+
+ /* Allocate this early, so we can store relevant information in
+ * the private area (needed by 1588 code in dpa_mac_probe)
+ */
+ net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TX_QUEUES);
+ if (!net_dev) {
+ dev_err(dev, "alloc_etherdev_mq() failed\n");
+ return -ENOMEM;
+ }
+
+ /* Do this here, so we can be verbose early */
+ SET_NETDEV_DEV(net_dev, dev);
+ dev_set_drvdata(dev, net_dev);
+
+ priv = netdev_priv(net_dev);
+ priv->net_dev = net_dev;
+
+ priv->msg_enable = netif_msg_init(debug, -1);
+
+ mac_dev = dpa_mac_probe(_of_dev);
+ if (IS_ERR(mac_dev)) {
+ err = PTR_ERR(mac_dev);
+ goto mac_probe_failed;
+ }
+
+ /* We have physical ports, so we need to establish
+ * the buffer layout.
+ */
+ buf_layout = devm_kzalloc(dev, 2 * sizeof(*buf_layout),
+ GFP_KERNEL);
+ if (!buf_layout) {
+ dev_err(dev, "devm_kzalloc() failed\n");
+ goto alloc_failed;
+ }
+ dpa_set_buffers_layout(mac_dev, buf_layout);
+
+ INIT_LIST_HEAD(&priv->dpa_fq_list);
+
+ memset(&port_fqs, 0, sizeof(port_fqs));
+
+ err = dpa_fq_probe_mac(dev, &priv->dpa_fq_list, &port_fqs,
+ false, RX);
+ if (!err)
+ err = dpa_fq_probe_mac(dev, &priv->dpa_fq_list,
+ &port_fqs, false, TX);
+ if (err < 0)
+ goto fq_probe_failed;
+
+ /* bp init */
+
+ err = dpa_bp_create(net_dev, dpa_bp, count);
+
+ if (err < 0)
+ goto bp_create_failed;
+
+ priv->mac_dev = mac_dev;
+
+ priv->channel = dpa_get_channel(dev, dpa_node);
+
+ if (priv->channel < 0) {
+ err = priv->channel;
+ goto get_channel_failed;
+ }
+
+ /* Start a thread that will walk the cpus with affine portals
+ * and add this pool channel to each's dequeue mask.
+ */
+ kth = kthread_run(dpaa_eth_add_channel,
+ (void *)(unsigned long)priv->channel,
+ "dpaa_%p:%d", net_dev, priv->channel);
+ if (!kth) {
+ err = -ENOMEM;
+ goto add_channel_failed;
+ }
+
+ dpa_fq_setup(priv, &shared_fq_cbs, priv->mac_dev->port_dev[TX]);
+
+ /* Create a congestion group for this netdev, with
+ * dynamically-allocated CGR ID.
+ * Must be executed after probing the MAC, but before
+ * assigning the egress FQs to the CGRs.
+ */
+ err = dpaa_eth_cgr_init(priv);
+ if (err < 0) {
+ dev_err(dev, "Error initializing CGR\n");
+ goto cgr_init_failed;
+ }
+
+ /* Add the FQs to the interface, and make them active */
+ list_for_each_entry_safe(dpa_fq, tmp, &priv->dpa_fq_list, list) {
+ err = dpa_fq_init(dpa_fq);
+ if (err < 0)
+ goto fq_alloc_failed;
+ }
+
+ priv->buf_layout = buf_layout;
+ priv->tx_headroom =
+ dpa_get_headroom(&priv->buf_layout[TX]);
+
+ /* All real interfaces need their ports initialized */
+ dpaa_eth_init_ports(mac_dev, dpa_bp, count, &port_fqs,
+ buf_layout, dev);
+
+ /* Now we need to initialize either a private or shared interface */
+ priv->percpu_priv = alloc_percpu(*priv->percpu_priv);
+
+ if (priv->percpu_priv == NULL) {
+ dev_err(dev, "alloc_percpu() failed\n");
+ err = -ENOMEM;
+ goto alloc_percpu_failed;
+ }
+ for_each_online_cpu(i) {
+ percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
+ memset(percpu_priv, 0, sizeof(*percpu_priv));
+ }
+
+ err = dpa_shared_netdev_init(dpa_node, net_dev);
+
+ if (err < 0)
+ goto netdev_init_failed;
+
+ dpaa_eth_sysfs_init(&net_dev->dev);
+
+ return 0;
+
+netdev_init_failed:
+ if (net_dev)
+ free_percpu(priv->percpu_priv);
+alloc_percpu_failed:
+fq_alloc_failed:
+ if (net_dev) {
+ dpa_fq_free(dev, &priv->dpa_fq_list);
+ qman_release_cgrid(priv->cgr_data.cgr.cgrid);
+ qman_delete_cgr(&priv->cgr_data.cgr);
+ }
+cgr_init_failed:
+add_channel_failed:
+get_channel_failed:
+ if (net_dev)
+ dpa_bp_free(priv, priv->dpa_bp);
+bp_create_failed:
+fq_probe_failed:
+alloc_failed:
+mac_probe_failed:
+ dev_set_drvdata(dev, NULL);
+ if (net_dev)
+ free_netdev(net_dev);
+
+ return err;
+}
+
+static const struct of_device_id dpa_shared_match[] = {
+ {
+ .compatible = "fsl,dpa-ethernet-shared"
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dpa_shared_match);
+
+static struct platform_driver dpa_shared_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = dpa_shared_match,
+ .owner = THIS_MODULE,
+ },
+ .probe = dpaa_eth_shared_probe,
+ .remove = dpa_remove
+};
+
+static int __init __cold dpa_shared_load(void)
+{
+ int _errno;
+
+ pr_info(KBUILD_MODNAME ": " DPA_DESCRIPTION " (" VERSION ")\n");
+
+/* Todo: is it safe to remove these?
+ / * Initialize dpaa_eth mirror values * /
+ dpa_rx_extra_headroom = fm_get_rx_extra_headroom();
+ dpa_max_frm = fm_get_max_frm();
+*/
+ _errno = platform_driver_register(&dpa_shared_driver);
+ if (unlikely(_errno < 0)) {
+ pr_err(KBUILD_MODNAME
+ ": %s:%hu:%s(): platform_driver_register() = %d\n",
+ KBUILD_BASENAME".c", __LINE__, __func__, _errno);
+ }
+
+ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
+ KBUILD_BASENAME".c", __func__);
+
+ return _errno;
+}
+module_init(dpa_shared_load);
+
+static void __exit __cold dpa_shared_unload(void)
+{
+ pr_debug(KBUILD_MODNAME ": -> %s:%s()\n",
+ KBUILD_BASENAME".c", __func__);
+
+ platform_driver_unregister(&dpa_shared_driver);
+}
+module_exit(dpa_shared_unload);