/* * Copyright 2008-2013 Freescale Semiconductor Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Freescale Semiconductor nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * * ALTERNATIVELY, this software may be distributed under the terms of the * GNU General Public License ("GPL") as published by the Free Software * Foundation, either version 2 of that License or (at your option) any * later version. * * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define pr_fmt(fmt) \ KBUILD_MODNAME ": %s:%hu:%s() " fmt, \ KBUILD_BASENAME".c", __LINE__, __func__ #include #include #include #include #include #include #include /* arp_hdr_len() */ #include /* VLAN_HLEN */ #include /* struct icmphdr */ #include /* struct iphdr */ #include /* struct ipv6hdr */ #include /* struct udphdr */ #include /* struct tcphdr */ #include /* net_ratelimit() */ #include /* ETH_P_IP and ETH_P_IPV6 */ #include #include #include #include /* get_hard_smp_processor_id() */ #include #include "fsl_fman.h" #include "fm_ext.h" #include "fm_port_ext.h" #include "mac.h" #include "dpaa_eth.h" #include "dpaa_eth_common.h" #ifdef CONFIG_FSL_DPAA_ETH_DEBUGFS #include "dpaa_debugfs.h" #endif /* CONFIG_FSL_DPAA_ETH_DEBUGFS */ #ifdef CONFIG_FSL_DPAA_ETH_UNIT_TESTS #include "dpaa_eth_unit_test.h" #endif /* CONFIG_FSL_DPAA_ETH_UNIT_TESTS */ /* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files * using trace events only need to #include */ #define CREATE_TRACE_POINTS #include "dpaa_eth_trace.h" #define DPA_NAPI_WEIGHT 64 /* Valid checksum indication */ #define DPA_CSUM_VALID 0xFFFF #define DPA_DESCRIPTION "FSL DPAA Ethernet driver" MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Andy Fleming "); MODULE_DESCRIPTION(DPA_DESCRIPTION); static uint8_t debug = -1; module_param(debug, byte, S_IRUGO); MODULE_PARM_DESC(debug, "Module/Driver verbosity level"); /* This has to work in tandem with the DPA_CS_THRESHOLD_xxx values. */ static uint16_t tx_timeout = 1000; module_param(tx_timeout, ushort, S_IRUGO); MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms"); /* dpaa_eth mirror for the FMan values */ int dpa_rx_extra_headroom; int dpa_max_frm; static const char rtx[][3] = { [RX] = "RX", [TX] = "TX" }; #if defined(CONFIG_FSL_FMAN_TEST) /* Defined as weak, to be implemented by fman pcd tester. */ int dpa_alloc_pcd_fqids(struct device *, uint32_t, uint8_t, uint32_t *) __attribute__((weak)); int dpa_free_pcd_fqids(struct device *, uint32_t) __attribute__((weak)); #endif /* CONFIG_FSL_DPAA_FMAN_UNIT_TESTS */ /* BM */ #define DPAA_ETH_MAX_PAD (L1_CACHE_BYTES * 8) struct dpa_bp *default_pool; bool default_pool_seeded; uint32_t default_buf_size; /* A set of callbacks for hooking into the fastpath at different points. */ struct dpaa_eth_hooks_s dpaa_eth_hooks; /* * This function should only be called on the probe paths, since it makes no * effort to guarantee consistency of the destination hooks structure. */ void fsl_dpaa_eth_set_hooks(struct dpaa_eth_hooks_s *hooks) { if (hooks) dpaa_eth_hooks = *hooks; else pr_err("NULL pointer to hooks!\n"); } EXPORT_SYMBOL(fsl_dpaa_eth_set_hooks); /* * Checks whether the checksum field in Parse Results array is valid * (equals 0xFFFF) and increments the .cse counter otherwise */ static inline void dpa_csum_validation(const struct dpa_priv_s *priv, struct dpa_percpu_priv_s *percpu_priv, const struct qm_fd *fd) { dma_addr_t addr = qm_fd_addr(fd); struct dpa_bp *dpa_bp = priv->dpa_bp; void *frm = phys_to_virt(addr); t_FmPrsResult *parse_result; if (unlikely(!frm)) return; dma_unmap_single(dpa_bp->dev, addr, dpa_bp->size, DMA_BIDIRECTIONAL); parse_result = (t_FmPrsResult *)(frm + DPA_RX_PRIV_DATA_SIZE); if (parse_result->cksum != DPA_CSUM_VALID) percpu_priv->rx_errors.cse++; } static void _dpa_rx_error(struct net_device *net_dev, const struct dpa_priv_s *priv, struct dpa_percpu_priv_s *percpu_priv, const struct qm_fd *fd, u32 fqid) { /* * limit common, possibly innocuous Rx FIFO Overflow errors' * interference with zero-loss convergence benchmark results. */ if (likely(fd->status & FM_FD_STAT_ERR_PHYSICAL)) pr_warn_once("fsl-dpa: non-zero error counters in fman statistics (sysfs)\n"); else if (netif_msg_hw(priv) && net_ratelimit()) netdev_err(net_dev, "Err FD status = 0x%08x\n", fd->status & FM_FD_STAT_ERRORS); if (dpaa_eth_hooks.rx_error && dpaa_eth_hooks.rx_error(net_dev, fd, fqid) == DPAA_ETH_STOLEN) /* it's up to the hook to perform resource cleanup */ return; percpu_priv->stats.rx_errors++; if (fd->status & FM_PORT_FRM_ERR_DMA) percpu_priv->rx_errors.dme++; if (fd->status & FM_PORT_FRM_ERR_PHYSICAL) percpu_priv->rx_errors.fpe++; if (fd->status & FM_PORT_FRM_ERR_SIZE) percpu_priv->rx_errors.fse++; if (fd->status & FM_PORT_FRM_ERR_PRS_HDR_ERR) percpu_priv->rx_errors.phe++; if (fd->status & FM_FD_STAT_L4CV) dpa_csum_validation(priv, percpu_priv, fd); dpa_fd_release(net_dev, fd); } static void _dpa_tx_error(struct net_device *net_dev, const struct dpa_priv_s *priv, struct dpa_percpu_priv_s *percpu_priv, const struct qm_fd *fd, u32 fqid) { struct sk_buff *skb; if (netif_msg_hw(priv) && net_ratelimit()) netdev_warn(net_dev, "FD status = 0x%08x\n", fd->status & FM_FD_STAT_ERRORS); if (dpaa_eth_hooks.tx_error && dpaa_eth_hooks.tx_error(net_dev, fd, fqid) == DPAA_ETH_STOLEN) /* now the hook must ensure proper cleanup */ return; percpu_priv->stats.tx_errors++; skb = _dpa_cleanup_tx_fd(priv, fd); dev_kfree_skb(skb); } /* * Helper function to factor out frame validation logic on all Rx paths. Its * purpose is to extract from the Parse Results structure information about * the integrity of the frame, its checksum, the length of the parsed headers * and whether the frame is suitable for GRO. * * Assumes no parser errors, since any error frame is dropped before this * function is called. * * @skb will have its ip_summed field overwritten; * @use_gro will only be written with 0, if the frame is definitely not * GRO-able; otherwise, it will be left unchanged; * @hdr_size will be written with a safe value, at least the size of the * headers' length. */ void __hot _dpa_process_parse_results(const t_FmPrsResult *parse_results, const struct qm_fd *fd, struct sk_buff *skb, int *use_gro) { if (fd->status & FM_FD_STAT_L4CV) { /* * The parser has run and performed L4 checksum validation. * We know there were no parser errors (and implicitly no * L4 csum error), otherwise we wouldn't be here. */ skb->ip_summed = CHECKSUM_UNNECESSARY; /* * Don't go through GRO for certain types of traffic that * we know are not GRO-able, such as dgram-based protocols. * In the worst-case scenarios, such as small-pkt terminating * UDP, the extra GRO processing would be overkill. * * The only protocol the Parser supports that is also GRO-able * is currently TCP. */ if (!fm_l4_frame_is_tcp(parse_results)) *use_gro = 0; return; } /* * We're here because either the parser didn't run or the L4 checksum * was not verified. This may include the case of a UDP frame with * checksum zero or an L4 proto other than TCP/UDP */ skb->ip_summed = CHECKSUM_NONE; /* Bypass GRO for unknown traffic or if no PCDs are applied */ *use_gro = 0; } static int dpaa_eth_poll(struct napi_struct *napi, int budget) { int cleaned = qman_poll_dqrr(budget); if (cleaned < budget) { int tmp; napi_complete(napi); tmp = qman_irqsource_add(QM_PIRQ_DQRI); BUG_ON(tmp); } return cleaned; } static void __hot _dpa_tx_conf(struct net_device *net_dev, const struct dpa_priv_s *priv, struct dpa_percpu_priv_s *percpu_priv, const struct qm_fd *fd, u32 fqid) { struct sk_buff *skb; /* do we need the timestamp for the error frames? */ if (unlikely(fd->status & FM_FD_STAT_ERRORS) != 0) { if (netif_msg_hw(priv) && net_ratelimit()) netdev_warn(net_dev, "FD status = 0x%08x\n", fd->status & FM_FD_STAT_ERRORS); percpu_priv->stats.tx_errors++; } /* hopefully we need not get the timestamp before the hook */ if (dpaa_eth_hooks.tx_confirm && dpaa_eth_hooks.tx_confirm(net_dev, fd, fqid) == DPAA_ETH_STOLEN) /* it's the hook that must now perform cleanup */ return; /* This might not perfectly reflect the reality, if the core dequeuing * the Tx confirmation is different from the one that did the enqueue, * but at least it'll show up in the total count. */ percpu_priv->tx_confirm++; skb = _dpa_cleanup_tx_fd(priv, fd); dev_kfree_skb(skb); } static enum qman_cb_dqrr_result priv_rx_error_dqrr(struct qman_portal *portal, struct qman_fq *fq, const struct qm_dqrr_entry *dq) { struct net_device *net_dev; struct dpa_priv_s *priv; struct dpa_percpu_priv_s *percpu_priv; net_dev = ((struct dpa_fq *)fq)->net_dev; priv = netdev_priv(net_dev); percpu_priv = per_cpu_ptr(priv->percpu_priv, smp_processor_id()); if (dpaa_eth_napi_schedule(percpu_priv)) { percpu_priv->in_interrupt++; return qman_cb_dqrr_stop; } if (unlikely(dpaa_eth_refill_bpools(percpu_priv))) /* Unable to refill the buffer pool due to insufficient * system memory. Just release the frame back into the pool, * otherwise we'll soon end up with an empty buffer pool. */ dpa_fd_release(net_dev, &dq->fd); else _dpa_rx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid); return qman_cb_dqrr_consume; } static enum qman_cb_dqrr_result __hot priv_rx_default_dqrr(struct qman_portal *portal, struct qman_fq *fq, const struct qm_dqrr_entry *dq) { struct net_device *net_dev; struct dpa_priv_s *priv; struct dpa_percpu_priv_s *percpu_priv; net_dev = ((struct dpa_fq *)fq)->net_dev; priv = netdev_priv(net_dev); /* Trace the Rx fd */ trace_dpa_rx_fd(net_dev, fq, &dq->fd); /* IRQ handler, non-migratable; safe to use __this_cpu_ptr here */ percpu_priv = __this_cpu_ptr(priv->percpu_priv); if (unlikely(dpaa_eth_napi_schedule(percpu_priv))) { percpu_priv->in_interrupt++; return qman_cb_dqrr_stop; } /* Vale of plenty: make sure we didn't run out of buffers */ if (unlikely(dpaa_eth_refill_bpools(percpu_priv))) /* Unable to refill the buffer pool due to insufficient * system memory. Just release the frame back into the pool, * otherwise we'll soon end up with an empty buffer pool. */ dpa_fd_release(net_dev, &dq->fd); else _dpa_rx(net_dev, priv, percpu_priv, &dq->fd, fq->fqid); return qman_cb_dqrr_consume; } static enum qman_cb_dqrr_result priv_tx_conf_error_dqrr(struct qman_portal *portal, struct qman_fq *fq, const struct qm_dqrr_entry *dq) { struct net_device *net_dev; struct dpa_priv_s *priv; struct dpa_percpu_priv_s *percpu_priv; net_dev = ((struct dpa_fq *)fq)->net_dev; priv = netdev_priv(net_dev); percpu_priv = per_cpu_ptr(priv->percpu_priv, smp_processor_id()); if (dpaa_eth_napi_schedule(percpu_priv)) { percpu_priv->in_interrupt++; return qman_cb_dqrr_stop; } _dpa_tx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid); return qman_cb_dqrr_consume; } static enum qman_cb_dqrr_result __hot priv_tx_conf_default_dqrr(struct qman_portal *portal, struct qman_fq *fq, const struct qm_dqrr_entry *dq) { struct net_device *net_dev; struct dpa_priv_s *priv; struct dpa_percpu_priv_s *percpu_priv; net_dev = ((struct dpa_fq *)fq)->net_dev; priv = netdev_priv(net_dev); /* Trace the fd */ trace_dpa_tx_conf_fd(net_dev, fq, &dq->fd); /* Non-migratable context, safe to use __this_cpu_ptr */ percpu_priv = __this_cpu_ptr(priv->percpu_priv); if (dpaa_eth_napi_schedule(percpu_priv)) { percpu_priv->in_interrupt++; return qman_cb_dqrr_stop; } _dpa_tx_conf(net_dev, priv, percpu_priv, &dq->fd, fq->fqid); return qman_cb_dqrr_consume; } static void priv_ern(struct qman_portal *portal, struct qman_fq *fq, const struct qm_mr_entry *msg) { struct net_device *net_dev; const struct dpa_priv_s *priv; struct sk_buff *skb; struct dpa_percpu_priv_s *percpu_priv; struct qm_fd fd = msg->ern.fd; net_dev = ((struct dpa_fq *)fq)->net_dev; priv = netdev_priv(net_dev); /* Non-migratable context, safe to use __this_cpu_ptr */ percpu_priv = __this_cpu_ptr(priv->percpu_priv); percpu_priv->stats.tx_dropped++; percpu_priv->stats.tx_fifo_errors++; count_ern(percpu_priv, msg); /* * If we intended this buffer to go into the pool * when the FM was done, we need to put it in * manually. */ if (msg->ern.fd.cmd & FM_FD_CMD_FCO) { dpa_fd_release(net_dev, &fd); return; } skb = _dpa_cleanup_tx_fd(priv, &fd); dev_kfree_skb_any(skb); } static const dpa_fq_cbs_t private_fq_cbs = { .rx_defq = { .cb = { .dqrr = priv_rx_default_dqrr } }, .tx_defq = { .cb = { .dqrr = priv_tx_conf_default_dqrr } }, .rx_errq = { .cb = { .dqrr = priv_rx_error_dqrr } }, .tx_errq = { .cb = { .dqrr = priv_tx_conf_error_dqrr } }, .egress_ern = { .cb = { .ern = priv_ern } } }; static int __cold dpa_eth_priv_start(struct net_device *net_dev) { int i; struct dpa_priv_s *priv; struct dpa_percpu_priv_s *percpu_priv; priv = netdev_priv(net_dev); /* Seed the global buffer pool at the first ifconfig up * of a private port. Update the percpu buffer counters * of each private interface. */ if (!default_pool_seeded) { default_pool->size = default_buf_size; dpa_make_private_pool(default_pool); default_pool_seeded = true; } for_each_online_cpu(i) { percpu_priv = per_cpu_ptr(priv->percpu_priv, i); if (!percpu_priv->dpa_bp) { percpu_priv->dpa_bp = priv->dpa_bp; percpu_priv->dpa_bp_count = per_cpu_ptr(priv->dpa_bp->percpu_count, i); } } return dpa_start(net_dev); } static int __cold dpa_eth_priv_stop(struct net_device *net_dev) { int _errno; struct dpa_priv_s *priv; _errno = dpa_stop(net_dev); priv = netdev_priv(net_dev); dpaa_eth_napi_disable(priv); return _errno; } #ifdef CONFIG_NET_POLL_CONTROLLER static void dpaa_eth_poll_controller(struct net_device *net_dev) { struct dpa_priv_s *priv = netdev_priv(net_dev); struct dpa_percpu_priv_s *percpu_priv = this_cpu_ptr(priv->percpu_priv); struct napi_struct napi = percpu_priv->napi; qman_irqsource_remove(QM_PIRQ_DQRI); qman_poll_dqrr(napi.weight); qman_irqsource_add(QM_PIRQ_DQRI); } #endif static const struct net_device_ops dpa_private_ops = { .ndo_open = dpa_eth_priv_start, .ndo_start_xmit = dpa_tx, .ndo_stop = dpa_eth_priv_stop, .ndo_tx_timeout = dpa_timeout, .ndo_get_stats64 = dpa_get_stats64, .ndo_set_mac_address = dpa_set_mac_address, .ndo_validate_addr = eth_validate_addr, #ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE .ndo_select_queue = dpa_select_queue, #endif .ndo_change_mtu = dpa_change_mtu, .ndo_set_rx_mode = dpa_set_rx_mode, .ndo_init = dpa_ndo_init, .ndo_set_features = dpa_set_features, .ndo_fix_features = dpa_fix_features, .ndo_do_ioctl = dpa_ioctl, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = dpaa_eth_poll_controller, #endif }; static int dpa_private_netdev_init(struct device_node *dpa_node, struct net_device *net_dev) { int i; struct dpa_priv_s *priv = netdev_priv(net_dev); struct dpa_percpu_priv_s *percpu_priv; const uint8_t *mac_addr; /* * Although we access another CPU's private data here * we do it at initialization so it is safe */ for_each_online_cpu(i) { percpu_priv = per_cpu_ptr(priv->percpu_priv, i); percpu_priv->net_dev = net_dev; netif_napi_add(net_dev, &percpu_priv->napi, dpaa_eth_poll, DPA_NAPI_WEIGHT); } net_dev->netdev_ops = &dpa_private_ops; mac_addr = priv->mac_dev->addr; net_dev->mem_start = priv->mac_dev->res->start; net_dev->mem_end = priv->mac_dev->res->end; net_dev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_LLTX); #ifdef CONFIG_FSL_DPAA_ETH_SG_SUPPORT /* Advertise S/G and HIGHDMA support for private interfaces */ net_dev->hw_features |= NETIF_F_SG | NETIF_F_HIGHDMA; /* Recent kernels enable GSO automatically, if * we declare NETIF_F_SG. For conformity, we'll * still declare GSO explicitly. */ net_dev->features |= NETIF_F_GSO; #endif /* Advertise GRO support */ net_dev->features |= NETIF_F_GRO; return dpa_netdev_init(dpa_node, net_dev, mac_addr, tx_timeout); } static const struct of_device_id dpa_match[]; static int dpaa_eth_priv_probe(struct platform_device *_of_dev) { int err = 0, i; struct device *dev; struct device_node *dpa_node; struct dpa_bp *dpa_bp; struct dpa_fq *dpa_fq, *tmp; size_t count; struct net_device *net_dev = NULL; struct dpa_priv_s *priv = NULL; struct dpa_percpu_priv_s *percpu_priv; struct fm_port_fqs port_fqs; struct dpa_buffer_layout_s *buf_layout = NULL; struct mac_device *mac_dev; struct task_struct *kth; dev = &_of_dev->dev; dpa_node = dev->of_node; if (!of_device_is_available(dpa_node)) return -ENODEV; /* Get the buffer pools assigned to this interface */ dpa_bp = dpa_bp_probe(_of_dev, &count, default_pool); if (IS_ERR(dpa_bp)) return PTR_ERR(dpa_bp); /* * Allocate this early, so we can store relevant information in * the private area (needed by 1588 code in dpa_mac_probe) */ net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TX_QUEUES); if (!net_dev) { dev_err(dev, "alloc_etherdev_mq() failed\n"); return -ENOMEM; } /* Do this here, so we can be verbose early */ SET_NETDEV_DEV(net_dev, dev); dev_set_drvdata(dev, net_dev); priv = netdev_priv(net_dev); priv->net_dev = net_dev; priv->msg_enable = netif_msg_init(debug, -1); mac_dev = dpa_mac_probe(_of_dev); if (IS_ERR(mac_dev) || !mac_dev) { err = PTR_ERR(mac_dev); goto mac_probe_failed; } /* We have physical ports, so we need to establish * the buffer layout. */ buf_layout = devm_kzalloc(dev, 2 * sizeof(*buf_layout), GFP_KERNEL); if (!buf_layout) { dev_err(dev, "devm_kzalloc() failed\n"); goto alloc_failed; } dpa_set_buffers_layout(mac_dev, buf_layout); /* For private ports, need to compute the size of the default * buffer pool, based on FMan port buffer layout;also update * the maximum buffer size for private ports if necessary */ dpa_bp->size = dpa_bp_size(&buf_layout[RX]); if (dpa_bp->size > default_buf_size) default_buf_size = dpa_bp->size; INIT_LIST_HEAD(&priv->dpa_fq_list); memset(&port_fqs, 0, sizeof(port_fqs)); err = dpa_fq_probe_mac(dev, &priv->dpa_fq_list, &port_fqs, true, RX); if (!err) err = dpa_fq_probe_mac(dev, &priv->dpa_fq_list, &port_fqs, true, TX); if (err < 0) goto fq_probe_failed; /* bp init */ err = dpa_bp_create(net_dev, dpa_bp, count, &default_pool); if (err < 0) goto bp_create_failed; priv->mac_dev = mac_dev; priv->channel = dpa_get_channel(dev, dpa_node); if (priv->channel < 0) { err = priv->channel; goto get_channel_failed; } /* Start a thread that will walk the cpus with affine portals * and add this pool channel to each's dequeue mask. */ kth = kthread_run(dpaa_eth_add_channel, (void *)(unsigned long)priv->channel, "dpaa_%p:%d", net_dev, priv->channel); if (!kth) { err = -ENOMEM; goto add_channel_failed; } dpa_fq_setup(priv, &private_fq_cbs, priv->mac_dev->port_dev[TX]); /* * Create a congestion group for this netdev, with * dynamically-allocated CGR ID. * Must be executed after probing the MAC, but before * assigning the egress FQs to the CGRs. */ err = dpaa_eth_cgr_init(priv); if (err < 0) { dev_err(dev, "Error initializing CGR\n"); goto cgr_init_failed; } /* Add the FQs to the interface, and make them active */ list_for_each_entry_safe(dpa_fq, tmp, &priv->dpa_fq_list, list) { err = dpa_fq_init(dpa_fq, false); if (err < 0) goto fq_alloc_failed; } priv->buf_layout = buf_layout; priv->tx_headroom = dpa_get_headroom(&priv->buf_layout[TX]); priv->rx_headroom = dpa_get_headroom(&priv->buf_layout[RX]); /* All real interfaces need their ports initialized */ dpaa_eth_init_ports(mac_dev, dpa_bp, count, &port_fqs, buf_layout, dev); priv->percpu_priv = alloc_percpu(*priv->percpu_priv); if (priv->percpu_priv == NULL) { dev_err(dev, "alloc_percpu() failed\n"); err = -ENOMEM; goto alloc_percpu_failed; } for_each_online_cpu(i) { percpu_priv = per_cpu_ptr(priv->percpu_priv, i); memset(percpu_priv, 0, sizeof(*percpu_priv)); } err = dpa_private_netdev_init(dpa_node, net_dev); if (err < 0) goto netdev_init_failed; dpaa_eth_sysfs_init(&net_dev->dev); printk(KERN_INFO "fsl_dpa: Probed interface %s\n", net_dev->name); #ifdef CONFIG_FSL_DPAA_ETH_UNIT_TESTS /* The unit test is designed to test private interfaces */ dpa_unit_tests(net_dev); #endif return 0; netdev_init_failed: if (net_dev) free_percpu(priv->percpu_priv); alloc_percpu_failed: fq_alloc_failed: if (net_dev) { dpa_fq_free(dev, &priv->dpa_fq_list); qman_release_cgrid(priv->cgr_data.cgr.cgrid); qman_delete_cgr(&priv->cgr_data.cgr); } cgr_init_failed: add_channel_failed: get_channel_failed: if (net_dev) dpa_bp_free(priv, priv->dpa_bp); bp_create_failed: fq_probe_failed: devm_kfree(dev, buf_layout); alloc_failed: mac_probe_failed: dev_set_drvdata(dev, NULL); if (net_dev) free_netdev(net_dev); return err; } static const struct of_device_id dpa_match[] = { { .compatible = "fsl,dpa-ethernet" }, {} }; MODULE_DEVICE_TABLE(of, dpa_match); static struct platform_driver dpa_driver = { .driver = { .name = KBUILD_MODNAME, .of_match_table = dpa_match, .owner = THIS_MODULE, }, .probe = dpaa_eth_priv_probe, .remove = dpa_remove }; static int __init __cold dpa_load(void) { int _errno; printk(KERN_INFO KBUILD_MODNAME ": " DPA_DESCRIPTION " (" VERSION ")\n"); /* initialise dpaa_eth mirror values */ dpa_rx_extra_headroom = fm_get_rx_extra_headroom(); dpa_max_frm = fm_get_max_frm(); _errno = platform_driver_register(&dpa_driver); if (unlikely(_errno < 0)) { pr_err(KBUILD_MODNAME ": %s:%hu:%s(): platform_driver_register() = %d\n", KBUILD_BASENAME".c", __LINE__, __func__, _errno); } pr_debug(KBUILD_MODNAME ": %s:%s() ->\n", KBUILD_BASENAME".c", __func__); return _errno; } module_init(dpa_load); static void __exit __cold dpa_unload(void) { pr_debug(KBUILD_MODNAME ": -> %s:%s()\n", KBUILD_BASENAME".c", __func__); platform_driver_unregister(&dpa_driver); pr_debug(KBUILD_MODNAME ": %s:%s() ->\n", KBUILD_BASENAME".c", __func__); } module_exit(dpa_unload);