From 53fb5c007ea674ca4a140973ca6d4dd40e0dea6c Mon Sep 17 00:00:00 2001 From: Madalin Bucur Date: Wed, 11 Sep 2013 11:24:43 +0300 Subject: dpaa_eth: revert driver code to last known good state Undoing the merge conflict artifacts, out of order and incomplete patch application code changes. This reverts the sdk-kernel-3.8 branch to the state at 70c2c840d8ff41359aafd7f510144c235a32fce1. Signed-off-by: Madalin Bucur Change-Id: Ib7282b356eeb38884fce716e247620c431bd3c72 Reviewed-on: http://git.am.freescale.net:8181/4642 Tested-by: Review Code-CDREVIEW Reviewed-by: Bogdan Hamciuc Reviewed-by: Rivera Jose-B46482 diff --git a/drivers/net/ethernet/freescale/dpa/Makefile b/drivers/net/ethernet/freescale/dpa/Makefile index c6ca17e..413bd06 100644 --- a/drivers/net/ethernet/freescale/dpa/Makefile +++ b/drivers/net/ethernet/freescale/dpa/Makefile @@ -1,26 +1,24 @@ # # Makefile for the Freescale Ethernet controllers # -ccflags-y += -DVERSION=\"\" +EXTRA_CFLAGS += -DVERSION=\"\" # #Include netcomm SW specific definitions include $(srctree)/drivers/net/ethernet/freescale/fman/ncsw_config.mk -ccflags-y += -I$(NET_DPA) +EXTRA_CFLAGS += -I$(NET_DPA) obj-$(CONFIG_FSL_DPAA_1588) += dpaa_1588.o obj-$(CONFIG_FSL_DPAA_ETH_SG_SUPPORT) += fsl-dpa-sg.o # dpaa_debugfs needs to be initialized before dpaa_eth obj-$(CONFIG_FSL_DPAA_ETH_DEBUGFS) += dpaa_debugfs.o -obj-$(CONFIG_FSL_DPAA_ETH) += fsl-mac.o fsl-dpa.o fsl-dpa-common.o \ - fsl-dpa-base.o fsl-dpa-shared.o fsl-dpa-macless.o fsl-dpa-proxy.o +obj-$(CONFIG_FSL_DPAA_ETH) += fsl-mac.o fsl-dpa.o \ + fsl-dpa-shared.o fsl-dpa-macless.o fsl-dpa-proxy.o obj-$(CONFIG_FSL_DPAA_OFFLINE_PORTS) += fsl-oh.o obj-$(CONFIG_FSL_DPAA_ETH_UNIT_TESTS) += dpaa_eth_unit_test.o -fsl-dpa-objs := dpa-ethtool.o dpaa_eth_sysfs.o \ +fsl-dpa-objs := dpa-ethtool.o dpaa_eth_common.o dpaa_eth_sysfs.o \ dpaa_eth.o dpaa_eth_non_sg.o -fsl-dpa-common-objs := dpaa_eth_common.o -fsl-dpa-base-objs := dpaa_eth_base.o fsl-dpa-sg-objs := dpaa_eth_sg.o fsl-dpa-shared-objs := dpaa_eth_shared.o fsl-dpa-macless-objs := dpaa_eth_macless.o diff --git a/drivers/net/ethernet/freescale/dpa/dpa-ethtool.c b/drivers/net/ethernet/freescale/dpa/dpa-ethtool.c index 5eece2f..34681d1 100644 --- a/drivers/net/ethernet/freescale/dpa/dpa-ethtool.c +++ b/drivers/net/ethernet/freescale/dpa/dpa-ethtool.c @@ -37,8 +37,7 @@ #include "dpaa_eth.h" -static int __cold dpa_get_settings(struct net_device *net_dev, - struct ethtool_cmd *et_cmd) +static int __cold dpa_get_settings(struct net_device *net_dev, struct ethtool_cmd *et_cmd) { int _errno; struct dpa_priv_s *priv; @@ -50,8 +49,8 @@ static int __cold dpa_get_settings(struct net_device *net_dev, return -ENODEV; } if (unlikely(priv->mac_dev->phy_dev == NULL)) { - netdev_dbg(net_dev, "phy device not initialized\n"); - return 0; + netdev_err(net_dev, "phy device not initialized\n"); + return -ENODEV; } _errno = phy_ethtool_gset(priv->mac_dev->phy_dev, et_cmd); @@ -61,8 +60,7 @@ static int __cold dpa_get_settings(struct net_device *net_dev, return _errno; } -static int __cold dpa_set_settings(struct net_device *net_dev, - struct ethtool_cmd *et_cmd) +static int __cold dpa_set_settings(struct net_device *net_dev, struct ethtool_cmd *et_cmd) { int _errno; struct dpa_priv_s *priv; @@ -85,8 +83,7 @@ static int __cold dpa_set_settings(struct net_device *net_dev, return _errno; } -static void __cold dpa_get_drvinfo(struct net_device *net_dev, - struct ethtool_drvinfo *drvinfo) +static void __cold dpa_get_drvinfo(struct net_device *net_dev, struct ethtool_drvinfo *drvinfo) { int _errno; @@ -94,18 +91,16 @@ static void __cold dpa_get_drvinfo(struct net_device *net_dev, sizeof(drvinfo->driver) - 1)[sizeof(drvinfo->driver)-1] = 0; strncpy(drvinfo->version, VERSION, sizeof(drvinfo->driver) - 1)[sizeof(drvinfo->version)-1] = 0; - _errno = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), - "%X", 0); + _errno = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%X", 0); - if (unlikely(_errno >= sizeof(drvinfo->fw_version))) { - /* Truncated output */ + if (unlikely(_errno >= sizeof(drvinfo->fw_version))) { /* Truncated output */ netdev_notice(net_dev, "snprintf() = %d\n", _errno); } else if (unlikely(_errno < 0)) { netdev_warn(net_dev, "snprintf() = %d\n", _errno); memset(drvinfo->fw_version, 0, sizeof(drvinfo->fw_version)); } strncpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent), - sizeof(drvinfo->bus_info)-1)[sizeof(drvinfo->bus_info)-1] = 0; + sizeof(drvinfo->bus_info) - 1)[sizeof(drvinfo->bus_info)-1] = 0; } uint32_t __cold dpa_get_msglevel(struct net_device *net_dev) @@ -145,8 +140,7 @@ int __cold dpa_nway_reset(struct net_device *net_dev) return _errno; } -void __cold dpa_get_ringparam(struct net_device *net_dev, - struct ethtool_ringparam *et_ringparam) +void __cold dpa_get_ringparam(struct net_device *net_dev, struct ethtool_ringparam *et_ringparam) { et_ringparam->rx_max_pending = 0; et_ringparam->rx_mini_max_pending = 0; @@ -159,8 +153,7 @@ void __cold dpa_get_ringparam(struct net_device *net_dev, et_ringparam->tx_pending = 0; } -void __cold dpa_get_pauseparam(struct net_device *net_dev, - struct ethtool_pauseparam *et_pauseparam) +void __cold dpa_get_pauseparam(struct net_device *net_dev, struct ethtool_pauseparam *et_pauseparam) { struct dpa_priv_s *priv; @@ -180,8 +173,7 @@ void __cold dpa_get_pauseparam(struct net_device *net_dev, et_pauseparam->tx_pause = priv->mac_dev->tx_pause; } -int __cold dpa_set_pauseparam(struct net_device *net_dev, - struct ethtool_pauseparam *et_pauseparam) +int __cold dpa_set_pauseparam(struct net_device *net_dev, struct ethtool_pauseparam *et_pauseparam) { struct dpa_priv_s *priv; int _errno; diff --git a/drivers/net/ethernet/freescale/dpa/dpaa_1588.c b/drivers/net/ethernet/freescale/dpa/dpaa_1588.c index efed56f..501eacf 100644 --- a/drivers/net/ethernet/freescale/dpa/dpaa_1588.c +++ b/drivers/net/ethernet/freescale/dpa/dpaa_1588.c @@ -167,7 +167,8 @@ static int dpa_ptp_find_and_remove(struct dpa_ptp_circ_buf *ptp_buf, return 0; } -/* Parse the PTP packets +/* + * Parse the PTP packets * * The PTP header can be found in an IPv4 packet, IPv6 patcket or in * an IEEE802.3 ethernet frame. This function returns the position of @@ -187,8 +188,7 @@ static u8 *dpa_ptp_parse_packet(struct sk_buff *skb, u16 *eth_type) #ifdef CONFIG_FSL_DPAA_ETH_SG_SUPPORT /* when we can receive S/G frames we need to check the data we want to - * access is in the linear skb buffer - */ + * access is in the linear skb buffer */ if (!pskb_may_pull(skb, access_len)) return NULL; #endif diff --git a/drivers/net/ethernet/freescale/dpa/dpaa_debugfs.c b/drivers/net/ethernet/freescale/dpa/dpaa_debugfs.c index f424f4a..f84b19e 100644 --- a/drivers/net/ethernet/freescale/dpa/dpaa_debugfs.c +++ b/drivers/net/ethernet/freescale/dpa/dpaa_debugfs.c @@ -34,6 +34,7 @@ #include /* struct qm_mcr_querycgr */ #include #include +#include /* get_hard_smp_processor_id() if !CONFIG_SMP */ #include "dpaa_debugfs.h" #include "dpaa_eth.h" /* struct dpa_priv_s, dpa_percpu_priv_s, dpa_bp */ @@ -90,8 +91,8 @@ static int dpa_debugfs_show(struct seq_file *file, void *offset) total.stats.rx_errors += percpu_priv->stats.rx_errors; count_total += dpa_bp_count; - seq_printf(file, " %hu %8llu %8llu %8llu %8llu ", - i, + seq_printf(file, " %hu/%hu %8llu %8llu %8llu %8llu ", + get_hard_smp_processor_id(i), i, percpu_priv->in_interrupt, percpu_priv->stats.rx_packets, percpu_priv->stats.tx_packets, @@ -143,8 +144,8 @@ static int dpa_debugfs_show(struct seq_file *file, void *offset) total.rx_errors.phe += percpu_priv->rx_errors.phe; total.rx_errors.cse += percpu_priv->rx_errors.cse; - seq_printf(file, " %hu %8llu %8llu ", - i, + seq_printf(file, " %hu/%hu %8llu %8llu ", + get_hard_smp_processor_id(i), i, percpu_priv->rx_errors.dme, percpu_priv->rx_errors.fpe); seq_printf(file, "%8llu %8llu %8llu\n", @@ -175,8 +176,8 @@ static int dpa_debugfs_show(struct seq_file *file, void *offset) total.ern_cnt.fq_retired += percpu_priv->ern_cnt.fq_retired; total.ern_cnt.orp_zero += percpu_priv->ern_cnt.orp_zero; - seq_printf(file, " %hu %8llu %8llu %8llu %8llu ", - i, + seq_printf(file, " %hu/%hu %8llu %8llu %8llu %8llu ", + get_hard_smp_processor_id(i), i, percpu_priv->ern_cnt.cg_tdrop, percpu_priv->ern_cnt.wred, percpu_priv->ern_cnt.err_cond, diff --git a/drivers/net/ethernet/freescale/dpa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpa/dpaa_eth.c index 77a3a35..5709765 100644 --- a/drivers/net/ethernet/freescale/dpa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpa/dpaa_eth.c @@ -52,6 +52,7 @@ #include #include #include +#include /* get_hard_smp_processor_id() */ #include #include "fsl_fman.h" @@ -134,7 +135,8 @@ void fsl_dpaa_eth_set_hooks(struct dpaa_eth_hooks_s *hooks) } EXPORT_SYMBOL(fsl_dpaa_eth_set_hooks); -/* Checks whether the checksum field in Parse Results array is valid +/* + * Checks whether the checksum field in Parse Results array is valid * (equals 0xFFFF) and increments the .cse counter otherwise */ static inline void @@ -145,14 +147,14 @@ dpa_csum_validation(const struct dpa_priv_s *priv, dma_addr_t addr = qm_fd_addr(fd); struct dpa_bp *dpa_bp = priv->dpa_bp; void *frm = phys_to_virt(addr); - fm_prs_result_t *parse_result; + t_FmPrsResult *parse_result; if (unlikely(!frm)) return; dma_unmap_single(dpa_bp->dev, addr, dpa_bp->size, DMA_BIDIRECTIONAL); - parse_result = (fm_prs_result_t *)(frm + DPA_RX_PRIV_DATA_SIZE); + parse_result = (t_FmPrsResult *)(frm + DPA_RX_PRIV_DATA_SIZE); if (parse_result->cksum != DPA_CSUM_VALID) percpu_priv->rx_errors.cse++; @@ -215,19 +217,12 @@ static void _dpa_tx_error(struct net_device *net_dev, percpu_priv->stats.tx_errors++; - /* If we intended the buffers from this frame to go into the bpools - * when the FMan transmit was done, we need to put it in manually. - */ - if (fd->cmd & FM_FD_CMD_FCO) { - dpa_fd_release(net_dev, fd); - return; - } - skb = _dpa_cleanup_tx_fd(priv, fd); dev_kfree_skb(skb); } -/* Helper function to factor out frame validation logic on all Rx paths. Its +/* + * Helper function to factor out frame validation logic on all Rx paths. Its * purpose is to extract from the Parse Results structure information about * the integrity of the frame, its checksum, the length of the parsed headers * and whether the frame is suitable for GRO. @@ -241,18 +236,20 @@ static void _dpa_tx_error(struct net_device *net_dev, * @hdr_size will be written with a safe value, at least the size of the * headers' length. */ -void __hot _dpa_process_parse_results(const fm_prs_result_t *parse_results, +void __hot _dpa_process_parse_results(const t_FmPrsResult *parse_results, const struct qm_fd *fd, struct sk_buff *skb, int *use_gro) { if (fd->status & FM_FD_STAT_L4CV) { - /* The parser has run and performed L4 checksum validation. + /* + * The parser has run and performed L4 checksum validation. * We know there were no parser errors (and implicitly no * L4 csum error), otherwise we wouldn't be here. */ skb->ip_summed = CHECKSUM_UNNECESSARY; - /* Don't go through GRO for certain types of traffic that + /* + * Don't go through GRO for certain types of traffic that * we know are not GRO-able, such as dgram-based protocols. * In the worst-case scenarios, such as small-pkt terminating * UDP, the extra GRO processing would be overkill. @@ -266,7 +263,8 @@ void __hot _dpa_process_parse_results(const fm_prs_result_t *parse_results, return; } - /* We're here because either the parser didn't run or the L4 checksum + /* + * We're here because either the parser didn't run or the L4 checksum * was not verified. This may include the case of a UDP frame with * checksum zero or an L4 proto other than TCP/UDP */ @@ -338,12 +336,12 @@ priv_rx_error_dqrr(struct qman_portal *portal, net_dev = ((struct dpa_fq *)fq)->net_dev; priv = netdev_priv(net_dev); - percpu_priv = __this_cpu_ptr(priv->percpu_priv); + percpu_priv = per_cpu_ptr(priv->percpu_priv, smp_processor_id()); if (dpaa_eth_napi_schedule(percpu_priv)) return qman_cb_dqrr_stop; - if (unlikely(dpaa_eth_refill_bpools(priv->dpa_bp))) + if (unlikely(dpaa_eth_refill_bpools(percpu_priv))) /* Unable to refill the buffer pool due to insufficient * system memory. Just release the frame back into the pool, * otherwise we'll soon end up with an empty buffer pool. @@ -379,7 +377,7 @@ priv_rx_default_dqrr(struct qman_portal *portal, /* Vale of plenty: make sure we didn't run out of buffers */ - if (unlikely(dpaa_eth_refill_bpools(priv->dpa_bp))) + if (unlikely(dpaa_eth_refill_bpools(percpu_priv))) /* Unable to refill the buffer pool due to insufficient * system memory. Just release the frame back into the pool, * otherwise we'll soon end up with an empty buffer pool. @@ -403,7 +401,7 @@ priv_tx_conf_error_dqrr(struct qman_portal *portal, net_dev = ((struct dpa_fq *)fq)->net_dev; priv = netdev_priv(net_dev); - percpu_priv = __this_cpu_ptr(priv->percpu_priv); + percpu_priv = per_cpu_ptr(priv->percpu_priv, smp_processor_id()); if (dpaa_eth_napi_schedule(percpu_priv)) return qman_cb_dqrr_stop; @@ -458,7 +456,8 @@ static void priv_ern(struct qman_portal *portal, percpu_priv->stats.tx_fifo_errors++; count_ern(percpu_priv, msg); - /* If we intended this buffer to go into the pool + /* + * If we intended this buffer to go into the pool * when the FM was done, we need to put it in * manually. */ @@ -545,7 +544,7 @@ static void dpaa_eth_poll_controller(struct net_device *net_dev) { struct dpa_priv_s *priv = netdev_priv(net_dev); struct dpa_percpu_priv_s *percpu_priv = - __this_cpu_ptr(priv->percpu_priv); + this_cpu_ptr(priv->percpu_priv); struct napi_struct napi = percpu_priv->napi; qman_irqsource_remove(QM_PIRQ_DQRI); @@ -584,7 +583,8 @@ static int dpa_private_netdev_init(struct device_node *dpa_node, struct dpa_percpu_priv_s *percpu_priv; const uint8_t *mac_addr; - /* Although we access another CPU's private data here + /* + * Although we access another CPU's private data here * we do it at initialization so it is safe */ for_each_online_cpu(i) { @@ -633,12 +633,10 @@ dpa_priv_bp_probe(struct device *dev) dpa_bp->percpu_count = alloc_percpu(*dpa_bp->percpu_count); dpa_bp->target_count = CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT; + dpa_bp->drain_cb = dpa_bp_drain; #ifdef CONFIG_FSL_DPAA_ETH_SG_SUPPORT dpa_bp->seed_cb = dpa_bp_priv_seed; - dpa_bp->free_buf_cb = _dpa_bp_free_pf; -#else - dpa_bp->free_buf_cb = _dpa_bp_free_skb; #endif /* CONFIG_FSL_DPAA_ETH_SG_SUPPORT */ return dpa_bp; @@ -706,13 +704,14 @@ dpaa_eth_priv_probe(struct platform_device *_of_dev) if (IS_ERR(dpa_bp)) return PTR_ERR(dpa_bp); - /* Allocate this early, so we can store relevant information in + /* + * Allocate this early, so we can store relevant information in * the private area (needed by 1588 code in dpa_mac_probe) */ net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TX_QUEUES); if (!net_dev) { dev_err(dev, "alloc_etherdev_mq() failed\n"); - goto alloc_etherdev_mq_failed; + return -ENOMEM; } /* Do this here, so we can be verbose early */ @@ -792,7 +791,8 @@ dpaa_eth_priv_probe(struct platform_device *_of_dev) dpa_fq_setup(priv, &private_fq_cbs, priv->mac_dev->port_dev[TX]); - /* Create a congestion group for this netdev, with + /* + * Create a congestion group for this netdev, with * dynamically-allocated CGR ID. * Must be executed after probing the MAC, but before * assigning the egress FQs to the CGRs. @@ -828,6 +828,7 @@ dpaa_eth_priv_probe(struct platform_device *_of_dev) for_each_online_cpu(i) { percpu_priv = per_cpu_ptr(priv->percpu_priv, i); memset(percpu_priv, 0, sizeof(*percpu_priv)); + percpu_priv->dpa_bp = priv->dpa_bp; } err = dpa_private_netdev_init(dpa_node, net_dev); @@ -869,8 +870,6 @@ mac_probe_failed: dev_set_drvdata(dev, NULL); if (net_dev) free_netdev(net_dev); -alloc_etherdev_mq_failed: - devm_kfree(dev, dpa_bp); return err; } @@ -897,8 +896,7 @@ static int __init __cold dpa_load(void) { int _errno; - printk(KERN_INFO KBUILD_MODNAME ": " - DPA_DESCRIPTION " (" VERSION ")\n"); + printk(KERN_INFO KBUILD_MODNAME ": " DPA_DESCRIPTION " (" VERSION ")\n"); /* initialise dpaa_eth mirror values */ dpa_rx_extra_headroom = fm_get_rx_extra_headroom(); diff --git a/drivers/net/ethernet/freescale/dpa/dpaa_eth.h b/drivers/net/ethernet/freescale/dpa/dpaa_eth.h index f33ff0a..4f743c3 100644 --- a/drivers/net/ethernet/freescale/dpa/dpaa_eth.h +++ b/drivers/net/ethernet/freescale/dpa/dpaa_eth.h @@ -61,7 +61,8 @@ extern int dpa_max_frm; #define dpa_get_rx_extra_headroom() dpa_rx_extra_headroom #define dpa_get_max_frm() dpa_max_frm -/* Currently we have the same max_frm on all interfaces, so these macros +/* + * Currently we have the same max_frm on all interfaces, so these macros * don't get a net_device argument. This will change in the future. */ #define dpa_get_min_mtu() 64 @@ -84,7 +85,7 @@ struct dpa_buffer_layout_s { }; #define DPA_TX_PRIV_DATA_SIZE 16 -#define DPA_PARSE_RESULTS_SIZE sizeof(fm_prs_result_t) +#define DPA_PARSE_RESULTS_SIZE sizeof(t_FmPrsResult) #define DPA_TIME_STAMP_SIZE 8 #define DPA_HASH_RESULTS_SIZE 8 #define DPA_RX_PRIV_DATA_SIZE (DPA_TX_PRIV_DATA_SIZE + \ @@ -126,7 +127,8 @@ struct dpa_buffer_layout_s { dpa_get_buffer_size(buffer_layout, (dpa_get_max_frm() - ETH_FCS_LEN)) #endif /* CONFIG_FSL_DPAA_ETH_SG_SUPPORT */ -/* Maximum size of a buffer for which recycling is allowed. +/* + * Maximum size of a buffer for which recycling is allowed. * We need an upper limit such that forwarded skbs that get reallocated on Tx * aren't allowed to grow unboundedly. On the other hand, we need to make sure * that skbs allocated by us will not fail to be recycled due to their size. @@ -163,7 +165,8 @@ enum dpaa_eth_hook_result { * perform any fd cleanup, nor update the interface statistics. */ DPAA_ETH_STOLEN, - /* fd/skb was returned to the Ethernet driver for regular processing. + /* + * fd/skb was returned to the Ethernet driver for regular processing. * The hook is not allowed to, for instance, reallocate the skb (as if * by linearizing, copying, cloning or reallocating the headroom). */ @@ -177,16 +180,19 @@ typedef enum dpaa_eth_hook_result (*dpaa_eth_egress_hook_t)( typedef enum dpaa_eth_hook_result (*dpaa_eth_confirm_hook_t)( struct net_device *net_dev, const struct qm_fd *fd, u32 fqid); -/* Various hooks used for unit-testing and/or fastpath optimizations. +/* + * Various hooks used for unit-testing and/or fastpath optimizations. * Currently only one set of such hooks is supported. */ struct dpaa_eth_hooks_s { - /* Invoked on the Tx private path, immediately after receiving the skb + /* + * Invoked on the Tx private path, immediately after receiving the skb * from the stack. */ dpaa_eth_egress_hook_t tx; - /* Invoked on the Rx private path, right before passing the skb + /* + * Invoked on the Rx private path, right before passing the skb * up the stack. At that point, the packet's protocol id has already * been set. The skb's data pointer is now at the L3 header, and * skb->mac_header points to the L2 header. skb->len has been adjusted @@ -206,7 +212,8 @@ struct dpaa_eth_hooks_s { void fsl_dpaa_eth_set_hooks(struct dpaa_eth_hooks_s *hooks); -/* Largest value that the FQD's OAL field can hold. +/* + * Largest value that the FQD's OAL field can hold. * This is DPAA-1.x specific. * TODO: This rather belongs in fsl_qman.h */ @@ -218,21 +225,24 @@ void fsl_dpaa_eth_set_hooks(struct dpaa_eth_hooks_s *hooks); /* Default alignment for start of data in an Rx FD */ #define DPA_FD_DATA_ALIGNMENT 16 -/* Values for the L3R field of the FM Parse Results +/* + * Values for the L3R field of the FM Parse Results */ /* L3 Type field: First IP Present IPv4 */ #define FM_L3_PARSE_RESULT_IPV4 0x8000 /* L3 Type field: First IP Present IPv6 */ #define FM_L3_PARSE_RESULT_IPV6 0x4000 -/* Values for the L4R field of the FM Parse Results +/* + * Values for the L4R field of the FM Parse Results * See $8.8.4.7.20 - L4 HXS - L4 Results from DPAA-Rev2 Reference Manual. */ /* L4 Type field: UDP */ #define FM_L4_PARSE_RESULT_UDP 0x40 /* L4 Type field: TCP */ #define FM_L4_PARSE_RESULT_TCP 0x20 -/* This includes L4 checksum errors, but also other errors that the Hard Parser +/* + * This includes L4 checksum errors, but also other errors that the Hard Parser * can detect, such as invalid combinations of TCP control flags, or bad UDP * lengths. */ @@ -240,7 +250,8 @@ void fsl_dpaa_eth_set_hooks(struct dpaa_eth_hooks_s *hooks); /* Check if the hardware parser has run */ #define FM_L4_HXS_RUN 0xE0 -/* FD status field indicating whether the FM Parser has attempted to validate +/* + * FD status field indicating whether the FM Parser has attempted to validate * the L4 csum of the frame. * Note that having this bit set doesn't necessarily imply that the checksum * is valid. One would have to check the parse results to find that out. @@ -250,21 +261,24 @@ void fsl_dpaa_eth_set_hooks(struct dpaa_eth_hooks_s *hooks); #define FM_FD_STAT_ERR_PHYSICAL FM_PORT_FRM_ERR_PHYSICAL -/* Check if the FMan Hardware Parser has run for L4 protocols. +/* + * Check if the FMan Hardware Parser has run for L4 protocols. * - * @parse_result_ptr must be of type (fm_prs_result_t *). + * @parse_result_ptr must be of type (t_FmPrsResult *). */ #define fm_l4_hxs_has_run(parse_result_ptr) \ ((parse_result_ptr)->l4r & FM_L4_HXS_RUN) -/* Iff the FMan Hardware Parser has run for L4 protocols, check error status. +/* + * Iff the FMan Hardware Parser has run for L4 protocols, check error status. * - * @parse_result_ptr must be of type (fm_prs_result_t *). + * @parse_result_ptr must be of type (t_FmPrsResult *). */ #define fm_l4_hxs_error(parse_result_ptr) \ ((parse_result_ptr)->l4r & FM_L4_PARSE_ERROR) -/* Check if the parsed frame was found to be a TCP segment. +/* + * Check if the parsed frame was found to be a TCP segment. * - * @parse_result_ptr must be of type (fm_prs_result_t *). + * @parse_result_ptr must be of type (t_FmPrsResult *). */ #define fm_l4_frame_is_tcp(parse_result_ptr) \ ((parse_result_ptr)->l4r & FM_L4_PARSE_RESULT_TCP) @@ -322,24 +336,28 @@ struct dpa_bp { uint8_t bpid; struct device *dev; union { - /* The buffer pools used for the private ports are initialized + /* + * The buffer pools used for the private ports are initialized * with target_count buffers for each CPU; at runtime the * number of buffers per CPU is constantly brought back to this * level */ int target_count; - /* The configured value for the number of buffers in the pool, + /* + * The configured value for the number of buffers in the pool, * used for shared port buffer pools */ int config_count; }; size_t size; bool seed_pool; - /* physical address of the contiguous memory used by the pool to store + /* + * physical address of the contiguous memory used by the pool to store * the buffers */ dma_addr_t paddr; - /* virtual address of the contiguous memory used by the pool to store + /* + * virtual address of the contiguous memory used by the pool to store * the buffers */ void *vaddr; @@ -348,10 +366,8 @@ struct dpa_bp { atomic_t refs; /* some bpools need to be seeded before use by this cb */ int (*seed_cb)(struct dpa_bp *); - /* some bpools need to be emptied before freeing; this cb is used - * for freeing of individual buffers taken from the pool - */ - void (*free_buf_cb)(void *addr); + /* some bpools need to be emptied before freeing by this cb */ + void (*drain_cb)(struct dpa_bp *); }; struct dpa_rx_errors { @@ -376,6 +392,7 @@ struct dpa_ern_cnt { struct dpa_percpu_priv_s { struct net_device *net_dev; + struct dpa_bp *dpa_bp; struct napi_struct napi; u64 in_interrupt; u64 tx_returned; @@ -390,7 +407,8 @@ struct dpa_percpu_priv_s { struct dpa_priv_s { struct dpa_percpu_priv_s *percpu_priv; struct dpa_bp *dpa_bp; - /* Store here the needed Tx headroom for convenience and speed + /* + * Store here the needed Tx headroom for convenience and speed * (even though it can be computed based on the fields of buf_layout) */ uint16_t tx_headroom; @@ -457,8 +475,8 @@ struct fm_port_fqs { }; /* functions with different implementation for SG and non-SG: */ -int dpa_bp_priv_seed(struct dpa_bp *dpa_bp); -int dpaa_eth_refill_bpools(struct dpa_bp *dpa_bp); +void dpa_bp_priv_seed(struct dpa_bp *dpa_bp); +int dpaa_eth_refill_bpools(struct dpa_percpu_priv_s *percpu_priv); void __hot _dpa_rx(struct net_device *net_dev, const struct dpa_priv_s *priv, struct dpa_percpu_priv_s *percpu_priv, @@ -467,7 +485,7 @@ void __hot _dpa_rx(struct net_device *net_dev, int __hot dpa_tx(struct sk_buff *skb, struct net_device *net_dev); struct sk_buff *_dpa_cleanup_tx_fd(const struct dpa_priv_s *priv, const struct qm_fd *fd); -void __hot _dpa_process_parse_results(const fm_prs_result_t *parse_results, +void __hot _dpa_process_parse_results(const t_FmPrsResult *parse_results, const struct qm_fd *fd, struct sk_buff *skb, int *use_gro); @@ -477,7 +495,8 @@ void dpa_bp_add_8_bufs(const struct dpa_bp *dpa_bp, int cpu_id); int _dpa_bp_add_8_bufs(const struct dpa_bp *dpa_bp); #endif -/* Turn on HW checksum computation for this outgoing frame. +/* + * Turn on HW checksum computation for this outgoing frame. * If the current protocol is not something we support in this regard * (or if the stack has already computed the SW checksum), we do nothing. * @@ -492,7 +511,8 @@ int dpa_enable_tx_csum(struct dpa_priv_s *priv, static inline int dpaa_eth_napi_schedule(struct dpa_percpu_priv_s *percpu_priv) { - /* In case of threaded ISR for RT enable kernel, + /* + * In case of threaded ISR for RT enable kernel, * in_irq() does not return appropriate value, so use * in_serving_softirq to distinguish softirq or irq context. */ @@ -581,8 +601,7 @@ static inline int __hot dpa_xmit(struct dpa_priv_s *priv, #ifdef CONFIG_FSL_DPAA_TX_RECYCLE /* Choose egress fq based on whether we want - * to recycle the frame or not - */ + * to recycle the frame or not */ if (fd->cmd & FM_FD_CMD_FCO) egress_fq = priv->recycle_fqs[queue]; else @@ -615,7 +634,8 @@ static inline int __hot dpa_xmit(struct dpa_priv_s *priv, #if defined CONFIG_FSL_DPAA_ETH_WQ_LEGACY #define DPA_NUM_WQS 8 -/* Older WQ assignment: statically-defined FQIDs (such as PCDs) are assigned +/* + * Older WQ assignment: statically-defined FQIDs (such as PCDs) are assigned * round-robin to all WQs available. Dynamically-allocated FQIDs go to WQ7. * * Not necessarily the best scheme, but worked fine so far, so we might want @@ -626,7 +646,8 @@ static inline void _dpa_assign_wq(struct dpa_fq *fq) fq->wq = fq->fqid ? fq->fqid % DPA_NUM_WQS : DPA_NUM_WQS - 1; } #elif defined CONFIG_FSL_DPAA_ETH_WQ_MULTI -/* Use multiple WQs for FQ assignment: +/* + * Use multiple WQs for FQ assignment: * - Tx Confirmation queues go to WQ1. * - Rx Default, Tx and PCD queues go to WQ3 (no differentiation between * Rx and Tx traffic, or between Rx Default and Rx PCD frames). @@ -682,7 +703,7 @@ void dpa_bp_default_buf_size_update(uint32_t size); uint32_t dpa_bp_default_buf_size_get(void); void dpa_bp_priv_non_sg_seed(struct dpa_bp *dpa_bp); -static inline void _dpa_bp_free_skb(void *addr) +static inline void _dpa_bp_free_buf(void *addr) { struct sk_buff **skbh = addr; struct sk_buff *skb; @@ -691,7 +712,7 @@ static inline void _dpa_bp_free_skb(void *addr) dev_kfree_skb_any(skb); } #else -static inline void _dpa_bp_free_pf(void *addr) +static inline void _dpa_bp_free_buf(void *addr) { put_page(virt_to_head_page(addr)); } diff --git a/drivers/net/ethernet/freescale/dpa/dpaa_eth_base.c b/drivers/net/ethernet/freescale/dpa/dpaa_eth_base.c deleted file mode 100644 index 95da2a3..0000000 --- a/drivers/net/ethernet/freescale/dpa/dpaa_eth_base.c +++ /dev/null @@ -1,219 +0,0 @@ -/* Copyright 2008-2013 Freescale Semiconductor, Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "dpaa_eth.h" -#include "dpaa_eth_common.h" - -static int dpa_bp_cmp(const void *dpa_bp0, const void *dpa_bp1) -{ - return ((struct dpa_bp *)dpa_bp0)->size - - ((struct dpa_bp *)dpa_bp1)->size; -} - -struct dpa_bp * __cold __must_check /* __attribute__((nonnull)) */ -dpa_bp_probe(struct platform_device *_of_dev, size_t *count) -{ - int i, lenp, na, ns; - struct device *dev; - struct device_node *dev_node; - const phandle *phandle_prop; - const uint32_t *bpid; - const uint32_t *bpool_cfg; - struct dpa_bp *dpa_bp; - - dev = &_of_dev->dev; - - /* The default is one, if there's no property */ - *count = 1; - - /* Get the buffer pools to be used */ - phandle_prop = of_get_property(dev->of_node, - "fsl,bman-buffer-pools", &lenp); - - if (phandle_prop) - *count = lenp / sizeof(phandle); - else { - dev_err(dev, - "missing fsl,bman-buffer-pools device tree entry\n"); - return ERR_PTR(-EINVAL); - } - - dpa_bp = devm_kzalloc(dev, *count * sizeof(*dpa_bp), GFP_KERNEL); - if (unlikely(dpa_bp == NULL)) { - dev_err(dev, "devm_kzalloc() failed\n"); - return ERR_PTR(-ENOMEM); - } - - dev_node = of_find_node_by_path("/"); - if (unlikely(dev_node == NULL)) { - dev_err(dev, "of_find_node_by_path(/) failed\n"); - return ERR_PTR(-EINVAL); - } - - na = of_n_addr_cells(dev_node); - ns = of_n_size_cells(dev_node); - - for (i = 0; i < *count && phandle_prop; i++) { - of_node_put(dev_node); - dev_node = of_find_node_by_phandle(phandle_prop[i]); - if (unlikely(dev_node == NULL)) { - dev_err(dev, "of_find_node_by_phandle() failed\n"); - return ERR_PTR(-EFAULT); - } - - if (unlikely(!of_device_is_compatible(dev_node, "fsl,bpool"))) { - dev_err(dev, - "!of_device_is_compatible(%s, fsl,bpool)\n", - dev_node->full_name); - dpa_bp = ERR_PTR(-EINVAL); - goto _return_of_node_put; - } - - bpid = of_get_property(dev_node, "fsl,bpid", &lenp); - if ((bpid == NULL) || (lenp != sizeof(*bpid))) { - dev_err(dev, "fsl,bpid property not found.\n"); - dpa_bp = ERR_PTR(-EINVAL); - goto _return_of_node_put; - } - dpa_bp[i].bpid = *bpid; - - bpool_cfg = of_get_property(dev_node, "fsl,bpool-ethernet-cfg", - &lenp); - if (bpool_cfg && (lenp == (2 * ns + na) * sizeof(*bpool_cfg))) { - const uint32_t *seed_pool; - - dpa_bp[i].config_count = - (int)of_read_number(bpool_cfg, ns); - dpa_bp[i].size = of_read_number(bpool_cfg + ns, ns); - dpa_bp[i].paddr = - of_read_number(bpool_cfg + 2 * ns, na); - - seed_pool = of_get_property(dev_node, - "fsl,bpool-ethernet-seeds", &lenp); - dpa_bp[i].seed_pool = !!seed_pool; - - } else { - dev_err(dev, - "Missing/invalid fsl,bpool-ethernet-cfg device tree entry for node %s\n", - dev_node->full_name); - dpa_bp = ERR_PTR(-EINVAL); - goto _return_of_node_put; - } - } - - sort(dpa_bp, *count, sizeof(*dpa_bp), dpa_bp_cmp, NULL); - - return dpa_bp; - -_return_of_node_put: - if (dev_node) - of_node_put(dev_node); - - return dpa_bp; -} - -int dpa_bp_shared_port_seed(struct dpa_bp *bp) -{ - /* In MAC-less and Shared-MAC scenarios the physical - * address of the buffer pool in device tree is set - * to 0 to specify that another entity (USDPAA) will - * allocate and seed the buffers - */ - if (!bp->paddr) - return 0; - - /* allocate memory region for buffers */ - devm_request_mem_region(bp->dev, bp->paddr, - bp->size * bp->config_count, KBUILD_MODNAME); - bp->vaddr = devm_ioremap_prot(bp->dev, bp->paddr, - bp->size * bp->config_count, 0); - if (bp->vaddr == NULL) { - pr_err("Could not map memory for pool %d\n", bp->bpid); - return -EIO; - } - - /* seed pool with buffers from that memory region */ - if (bp->seed_pool) { - int count = bp->target_count; - size_t addr = bp->paddr; - - while (count) { - struct bm_buffer bufs[8]; - int num_bufs = 0; - - do { - BUG_ON(addr > 0xffffffffffffull); - bufs[num_bufs].bpid = bp->bpid; - bm_buffer_set64(&bufs[num_bufs++], addr); - addr += bp->size; - - } while (--count && (num_bufs < 8)); - - while (bman_release(bp->pool, bufs, num_bufs, 0)) - cpu_relax(); - } - } - - return 0; -} - -int dpa_bp_create(struct net_device *net_dev, struct dpa_bp *dpa_bp, - size_t count) -{ - struct dpa_priv_s *priv = netdev_priv(net_dev); - int i; - - priv->dpa_bp = dpa_bp; - priv->bp_count = count; - - for (i = 0; i < count; i++) { - int err; - err = dpa_bp_alloc(&dpa_bp[i]); - if (err < 0) { - dpa_bp_free(priv, dpa_bp); - priv->dpa_bp = NULL; - return err; - } - } - - return 0; -} - diff --git a/drivers/net/ethernet/freescale/dpa/dpaa_eth_base.h b/drivers/net/ethernet/freescale/dpa/dpaa_eth_base.h deleted file mode 100644 index 5b5ef1e..0000000 --- a/drivers/net/ethernet/freescale/dpa/dpaa_eth_base.h +++ /dev/null @@ -1,46 +0,0 @@ -/* Copyright 2008-2013 Freescale Semiconductor, Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Freescale Semiconductor nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") as published by the Free Software - * Foundation, either version 2 of that License or (at your option) any - * later version. - * - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef __DPAA_ETH_BASE_H -#define __DPAA_ETH_BASE_H - -#include /* struct net_device */ -#include /* struct bm_buffer */ -#include /* struct platform_device */ -#include /* struct hwtstamp_config */ - -struct dpa_bp * __cold __must_check /* __attribute__((nonnull)) */ -dpa_bp_probe(struct platform_device *_of_dev, size_t *count); -int dpa_bp_create(struct net_device *net_dev, struct dpa_bp *dpa_bp, - size_t count); -int dpa_bp_shared_port_seed(struct dpa_bp *bp); - -#endif /* __DPAA_ETH_BASE_H */ diff --git a/drivers/net/ethernet/freescale/dpa/dpaa_eth_common.c b/drivers/net/ethernet/freescale/dpa/dpaa_eth_common.c index fc7433e..013ff0f 100644 --- a/drivers/net/ethernet/freescale/dpa/dpaa_eth_common.c +++ b/drivers/net/ethernet/freescale/dpa/dpaa_eth_common.c @@ -190,7 +190,7 @@ void __cold dpa_timeout(struct net_device *net_dev) struct dpa_percpu_priv_s *percpu_priv; priv = netdev_priv(net_dev); - percpu_priv = __this_cpu_ptr(priv->percpu_priv); + percpu_priv = per_cpu_ptr(priv->percpu_priv, smp_processor_id()); if (netif_msg_timer(priv)) netdev_crit(net_dev, "Transmit timeout latency: %u ms\n", @@ -335,7 +335,7 @@ static void dpa_ts_tx_enable(struct net_device *dev) if (mac_dev->ptp_enable) mac_dev->ptp_enable(mac_dev->get_mac_handle(mac_dev)); - priv->ts_tx_en = true; + priv->ts_tx_en = TRUE; } static void dpa_ts_tx_disable(struct net_device *dev) @@ -355,7 +355,7 @@ static void dpa_ts_tx_disable(struct net_device *dev) mac_dev->ptp_disable(mac_dev->get_mac_handle(mac_dev)); #endif - priv->ts_tx_en = false; + priv->ts_tx_en = FALSE; } static void dpa_ts_rx_enable(struct net_device *dev) @@ -368,7 +368,7 @@ static void dpa_ts_rx_enable(struct net_device *dev) if (mac_dev->ptp_enable) mac_dev->ptp_enable(mac_dev->get_mac_handle(mac_dev)); - priv->ts_rx_en = true; + priv->ts_rx_en = TRUE; } static void dpa_ts_rx_disable(struct net_device *dev) @@ -388,7 +388,7 @@ static void dpa_ts_rx_disable(struct net_device *dev) mac_dev->ptp_disable(mac_dev->get_mac_handle(mac_dev)); #endif - priv->ts_rx_en = false; + priv->ts_rx_en = FALSE; } static int dpa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) @@ -474,8 +474,6 @@ int __cold dpa_remove(struct platform_device *of_dev) free_percpu(priv->percpu_priv); dpa_bp_free(priv, priv->dpa_bp); - devm_kfree(dev, priv->dpa_bp); - if (priv->buf_layout) devm_kfree(dev, priv->buf_layout); @@ -649,6 +647,159 @@ void dpa_set_buffers_layout(struct mac_device *mac_dev, layout[RX].data_align = params.data_align ? : DPA_FD_DATA_ALIGNMENT; } +static int dpa_bp_cmp(const void *dpa_bp0, const void *dpa_bp1) +{ + return ((struct dpa_bp *)dpa_bp0)->size - + ((struct dpa_bp *)dpa_bp1)->size; +} + +struct dpa_bp * __cold __must_check /* __attribute__((nonnull)) */ +dpa_bp_probe(struct platform_device *_of_dev, size_t *count) +{ + int i, lenp, na, ns; + struct device *dev; + struct device_node *dev_node; + const phandle *phandle_prop; + const uint32_t *bpid; + const uint32_t *bpool_cfg; + struct dpa_bp *dpa_bp; + + dev = &_of_dev->dev; + + /* The default is one, if there's no property */ + *count = 1; + + /* Get the buffer pools to be used */ + phandle_prop = of_get_property(dev->of_node, + "fsl,bman-buffer-pools", &lenp); + + if (phandle_prop) + *count = lenp / sizeof(phandle); + else { + dev_err(dev, + "missing fsl,bman-buffer-pools device tree entry\n"); + return ERR_PTR(-EINVAL); + } + + dpa_bp = devm_kzalloc(dev, *count * sizeof(*dpa_bp), GFP_KERNEL); + if (unlikely(dpa_bp == NULL)) { + dev_err(dev, "devm_kzalloc() failed\n"); + return ERR_PTR(-ENOMEM); + } + + dev_node = of_find_node_by_path("/"); + if (unlikely(dev_node == NULL)) { + dev_err(dev, "of_find_node_by_path(/) failed\n"); + return ERR_PTR(-EINVAL); + } + + na = of_n_addr_cells(dev_node); + ns = of_n_size_cells(dev_node); + + for (i = 0; i < *count && phandle_prop; i++) { + of_node_put(dev_node); + dev_node = of_find_node_by_phandle(phandle_prop[i]); + if (unlikely(dev_node == NULL)) { + dev_err(dev, "of_find_node_by_phandle() failed\n"); + return ERR_PTR(-EFAULT); + } + + if (unlikely(!of_device_is_compatible(dev_node, "fsl,bpool"))) { + dev_err(dev, + "!of_device_is_compatible(%s, fsl,bpool)\n", + dev_node->full_name); + dpa_bp = ERR_PTR(-EINVAL); + goto _return_of_node_put; + } + + bpid = of_get_property(dev_node, "fsl,bpid", &lenp); + if ((bpid == NULL) || (lenp != sizeof(*bpid))) { + dev_err(dev, "fsl,bpid property not found.\n"); + dpa_bp = ERR_PTR(-EINVAL); + goto _return_of_node_put; + } + dpa_bp[i].bpid = *bpid; + + bpool_cfg = of_get_property(dev_node, "fsl,bpool-ethernet-cfg", + &lenp); + if (bpool_cfg && (lenp == (2 * ns + na) * sizeof(*bpool_cfg))) { + const uint32_t *seed_pool; + + dpa_bp[i].config_count = + (int)of_read_number(bpool_cfg, ns); + dpa_bp[i].size = of_read_number(bpool_cfg + ns, ns); + dpa_bp[i].paddr = + of_read_number(bpool_cfg + 2 * ns, na); + + seed_pool = of_get_property(dev_node, + "fsl,bpool-ethernet-seeds", &lenp); + dpa_bp[i].seed_pool = !!seed_pool; + + } else { + dev_err(dev, + "Missing/invalid fsl,bpool-ethernet-cfg device tree entry for node %s\n", + dev_node->full_name); + dpa_bp = ERR_PTR(-EINVAL); + goto _return_of_node_put; + } + } + + sort(dpa_bp, *count, sizeof(*dpa_bp), dpa_bp_cmp, NULL); + + return dpa_bp; + +_return_of_node_put: + if (dev_node) + of_node_put(dev_node); + + return dpa_bp; +} + +int dpa_bp_shared_port_seed(struct dpa_bp *bp) +{ + /* In MAC-less and Shared-MAC scenarios the physical + * address of the buffer pool in device tree is set + * to 0 to specify that another entity (USDPAA) will + * allocate and seed the buffers + */ + if (!bp->paddr) + return 0; + + /* allocate memory region for buffers */ + devm_request_mem_region(bp->dev, bp->paddr, + bp->size * bp->config_count, KBUILD_MODNAME); + bp->vaddr = devm_ioremap_prot(bp->dev, bp->paddr, + bp->size * bp->config_count, 0); + if (bp->vaddr == NULL) { + pr_err("Could not map memory for pool %d\n", bp->bpid); + return -EIO; + } + + /* seed pool with buffers from that memory region */ + if (bp->seed_pool) { + int count = bp->target_count; + size_t addr = bp->paddr; + + while (count) { + struct bm_buffer bufs[8]; + int num_bufs = 0; + + do { + BUG_ON(addr > 0xffffffffffffull); + bufs[num_bufs].bpid = bp->bpid; + bm_buffer_set64(&bufs[num_bufs++], addr); + addr += bp->size; + + } while (--count && (num_bufs < 8)); + + while (bman_release(bp->pool, bufs, num_bufs, 0)) + cpu_relax(); + } + } + + return 0; +} + int __attribute__((nonnull)) dpa_bp_alloc(struct dpa_bp *dpa_bp) { @@ -710,27 +861,37 @@ pdev_register_failed: return err; } +int dpa_bp_create(struct net_device *net_dev, struct dpa_bp *dpa_bp, + size_t count) +{ + struct dpa_priv_s *priv = netdev_priv(net_dev); + int i; + + priv->dpa_bp = dpa_bp; + priv->bp_count = count; + + for (i = 0; i < count; i++) { + int err; + err = dpa_bp_alloc(&dpa_bp[i]); + if (err < 0) { + dpa_bp_free(priv, dpa_bp); + priv->dpa_bp = NULL; + return err; + } + } + + return 0; +} + void dpa_bp_drain(struct dpa_bp *bp) { - int ret, num = 8; + int num; do { struct bm_buffer bmb[8]; int i; - ret = bman_acquire(bp->pool, bmb, 8, 0); - if (ret < 0) { - if (num == 8) { - /* we have less than 8 buffers left; - * drain them one by one - */ - num = 1; - continue; - } else { - /* Pool is fully drained */ - break; - } - } + num = bman_acquire(bp->pool, bmb, 8, 0); for (i = 0; i < num; i++) { dma_addr_t addr = bm_buf_addr(&bmb[i]); @@ -738,9 +899,9 @@ void dpa_bp_drain(struct dpa_bp *bp) dma_unmap_single(bp->dev, addr, bp->size, DMA_BIDIRECTIONAL); - bp->free_buf_cb(phys_to_virt(addr)); + _dpa_bp_free_buf(phys_to_virt(addr)); } - } while (ret > 0); + } while (num == 8); } static void __cold __attribute__((nonnull)) @@ -751,14 +912,11 @@ _dpa_bp_free(struct dpa_bp *dpa_bp) if (!atomic_dec_and_test(&bp->refs)) return; - if (bp->free_buf_cb) - dpa_bp_drain(bp); + if (bp->drain_cb) + bp->drain_cb(bp); dpa_bp_array[bp->bpid] = 0; bman_free_pool(bp->pool); - - if (bp->dev) - platform_device_unregister(to_platform_device(bp->dev)); } void __cold __attribute__((nonnull)) @@ -1522,7 +1680,7 @@ void count_ern(struct dpa_percpu_priv_s *percpu_priv, int dpa_enable_tx_csum(struct dpa_priv_s *priv, struct sk_buff *skb, struct qm_fd *fd, char *parse_results) { - fm_prs_result_t *parse_result; + t_FmPrsResult *parse_result; struct iphdr *iph; struct ipv6hdr *ipv6h = NULL; int l4_proto; @@ -1539,7 +1697,7 @@ int dpa_enable_tx_csum(struct dpa_priv_s *priv, /* Fill in some fields of the Parse Results array, so the FMan * can find them as if they came from the FMan Parser. */ - parse_result = (fm_prs_result_t *)parse_results; + parse_result = (t_FmPrsResult *)parse_results; /* If we're dealing with VLAN, get the real Ethernet type */ if (ethertype == ETH_P_8021Q) { diff --git a/drivers/net/ethernet/freescale/dpa/dpaa_eth_common.h b/drivers/net/ethernet/freescale/dpa/dpaa_eth_common.h index 6ee9825..8d11fe9 100644 --- a/drivers/net/ethernet/freescale/dpa/dpaa_eth_common.h +++ b/drivers/net/ethernet/freescale/dpa/dpaa_eth_common.h @@ -84,6 +84,11 @@ int dpa_set_mac_address(struct net_device *net_dev, void *addr); void dpa_set_rx_mode(struct net_device *net_dev); void dpa_set_buffers_layout(struct mac_device *mac_dev, struct dpa_buffer_layout_s *layout); +struct dpa_bp * __cold __must_check /* __attribute__((nonnull)) */ +dpa_bp_probe(struct platform_device *_of_dev, size_t *count); +int dpa_bp_create(struct net_device *net_dev, struct dpa_bp *dpa_bp, + size_t count); +int dpa_bp_shared_port_seed(struct dpa_bp *bp); int __attribute__((nonnull)) dpa_bp_alloc(struct dpa_bp *dpa_bp); void __cold __attribute__((nonnull)) diff --git a/drivers/net/ethernet/freescale/dpa/dpaa_eth_macless.c b/drivers/net/ethernet/freescale/dpa/dpaa_eth_macless.c index 0e89aed..3dc845e 100644 --- a/drivers/net/ethernet/freescale/dpa/dpaa_eth_macless.c +++ b/drivers/net/ethernet/freescale/dpa/dpaa_eth_macless.c @@ -45,7 +45,6 @@ #include #include "dpaa_eth.h" #include "dpaa_eth_common.h" -#include "dpaa_eth_base.h" #include "lnxwrp_fsl_fman.h" /* fm_get_rx_extra_headroom(), fm_get_max_frm() */ /* For MAC-based interfaces, we compute the tx needed headroom from the @@ -378,8 +377,7 @@ static int __init __cold dpa_macless_load(void) { int _errno; - printk(KERN_INFO KBUILD_MODNAME ": " - DPA_DESCRIPTION " (" VERSION ")\n"); + printk(KERN_INFO KBUILD_MODNAME ": " DPA_DESCRIPTION " (" VERSION ")\n"); /* Initialize dpaa_eth mirror values */ dpa_rx_extra_headroom = fm_get_rx_extra_headroom(); diff --git a/drivers/net/ethernet/freescale/dpa/dpaa_eth_non_sg.c b/drivers/net/ethernet/freescale/dpa/dpaa_eth_non_sg.c index 3cf0b1d..428553a 100644 --- a/drivers/net/ethernet/freescale/dpa/dpaa_eth_non_sg.c +++ b/drivers/net/ethernet/freescale/dpa/dpaa_eth_non_sg.c @@ -132,7 +132,7 @@ uint32_t dpa_bp_default_buf_size_get(void) return default_buf_size; } -int dpa_bp_priv_seed(struct dpa_bp *dpa_bp) +void dpa_bp_priv_seed(struct dpa_bp *dpa_bp) { int i; dpa_bp->size = default_buf_size; @@ -144,7 +144,6 @@ int dpa_bp_priv_seed(struct dpa_bp *dpa_bp) for (j = 0; j < dpa_bp->target_count; j += 8) dpa_bp_add_8(dpa_bp, i); } - return 0; } void dpa_bp_priv_non_sg_seed(struct dpa_bp *dpa_bp) @@ -162,8 +161,9 @@ void dpa_bp_priv_non_sg_seed(struct dpa_bp *dpa_bp) /* Add buffers/(skbuffs) for Rx processing whenever bpool count falls below * REFILL_THRESHOLD. */ -int dpaa_eth_refill_bpools(struct dpa_bp* dpa_bp) +int dpaa_eth_refill_bpools(struct dpa_percpu_priv_s *percpu_priv) { + const struct dpa_bp *dpa_bp = percpu_priv->dpa_bp; int *countptr = __this_cpu_ptr(dpa_bp->percpu_count); int count = *countptr; /* this function is called in softirq context; @@ -305,7 +305,7 @@ void __hot _dpa_rx(struct net_device *net_dev, dma_addr_t addr = qm_fd_addr(fd); u32 fd_status = fd->status; unsigned int skb_len; - fm_prs_result_t *parse_result; + t_FmPrsResult *parse_result; int use_gro = net_dev->features & NETIF_F_GRO; skbh = (struct sk_buff **)phys_to_virt(addr); @@ -360,7 +360,7 @@ void __hot _dpa_rx(struct net_device *net_dev, skb_len = skb->len; /* Validate the skb csum and figure out whether GRO is appropriate */ - parse_result = (fm_prs_result_t *)((u8 *)skbh + DPA_RX_PRIV_DATA_SIZE); + parse_result = (t_FmPrsResult *)((u8 *)skbh + DPA_RX_PRIV_DATA_SIZE); _dpa_process_parse_results(parse_result, fd, skb, &use_gro); #ifdef CONFIG_FSL_DPAA_TS @@ -619,7 +619,7 @@ int __hot dpa_tx(struct sk_buff *skb, struct net_device *net_dev) goto done; priv = netdev_priv(net_dev); - percpu_priv = __this_cpu_ptr(priv->percpu_priv); + percpu_priv = per_cpu_ptr(priv->percpu_priv, smp_processor_id()); percpu_stats = &percpu_priv->stats; countptr = __this_cpu_ptr(priv->dpa_bp->percpu_count); diff --git a/drivers/net/ethernet/freescale/dpa/dpaa_eth_proxy.c b/drivers/net/ethernet/freescale/dpa/dpaa_eth_proxy.c index adb1125..22a941a 100644 --- a/drivers/net/ethernet/freescale/dpa/dpaa_eth_proxy.c +++ b/drivers/net/ethernet/freescale/dpa/dpaa_eth_proxy.c @@ -39,7 +39,6 @@ #include #include "dpaa_eth.h" #include "dpaa_eth_common.h" -#include "dpaa_eth_base.h" #include "lnxwrp_fsl_fman.h" /* fm_get_rx_extra_headroom(), fm_get_max_frm() */ #define DPA_DESCRIPTION "FSL DPAA Proxy initialization driver" @@ -159,8 +158,7 @@ static int __init __cold dpa_proxy_load(void) { int _errno; - printk(KERN_INFO KBUILD_MODNAME ": " - DPA_DESCRIPTION " (" VERSION ")\n"); + printk(KERN_INFO KBUILD_MODNAME ": " DPA_DESCRIPTION " (" VERSION ")\n"); /* Initialize dpaa_eth mirror values */ dpa_rx_extra_headroom = fm_get_rx_extra_headroom(); diff --git a/drivers/net/ethernet/freescale/dpa/dpaa_eth_sg.c b/drivers/net/ethernet/freescale/dpa/dpaa_eth_sg.c index e6fd10d..0b5d269 100644 --- a/drivers/net/ethernet/freescale/dpa/dpaa_eth_sg.c +++ b/drivers/net/ethernet/freescale/dpa/dpaa_eth_sg.c @@ -98,7 +98,8 @@ int _dpa_bp_add_8_bufs(const struct dpa_bp *dpa_bp) } release_bufs: - /* Release the buffers. In case bman is busy, keep trying + /* + * Release the buffers. In case bman is busy, keep trying * until successful. bman_release() is guaranteed to succeed * in a reasonable amount of time */ @@ -112,7 +113,8 @@ bail_out: WARN_ONCE(1, "Memory allocation failure on Rx\n"); bm_buffer_set64(&bmb[i], 0); - /* Avoid releasing a completely null buffer; bman_release() requires + /* + * Avoid releasing a completely null buffer; bman_release() requires * at least one buffer. */ if (likely(i)) @@ -130,7 +132,7 @@ void dpa_bp_add_8_bufs(const struct dpa_bp *dpa_bp, int cpu) *count_ptr += _dpa_bp_add_8_bufs(dpa_bp); } -int dpa_bp_priv_seed(struct dpa_bp *dpa_bp) +void dpa_bp_priv_seed(struct dpa_bp *dpa_bp) { int i; @@ -138,41 +140,42 @@ int dpa_bp_priv_seed(struct dpa_bp *dpa_bp) for_each_online_cpu(i) { int j; - /* Although we access another CPU's counters here + /* + * Although we access another CPU's counters here * we do it at boot time so it is safe */ for (j = 0; j < dpa_bp->config_count; j += 8) dpa_bp_add_8_bufs(dpa_bp, i); } - return 0; } -/* Add buffers/(pages) for Rx processing whenever bpool count falls below +/* + * Add buffers/(pages) for Rx processing whenever bpool count falls below * REFILL_THRESHOLD. */ -int dpaa_eth_refill_bpools(struct dpa_bp *dpa_bp) +int dpaa_eth_refill_bpools(struct dpa_percpu_priv_s *percpu_priv) { - int *countptr = __this_cpu_ptr(dpa_bp->percpu_count); + const struct dpa_bp *dpa_bp = percpu_priv->dpa_bp; + int *countptr = __this_cpu_ptr(percpu_priv->dpa_bp->percpu_count); int count = *countptr; int new_bufs; - if (unlikely(count < CONFIG_FSL_DPAA_ETH_REFILL_THRESHOLD)) { - do { - new_bufs = _dpa_bp_add_8_bufs(dpa_bp); - if (unlikely(!new_bufs)) { - /* Avoid looping forever if we've temporarily - * run out of memory. We'll try again at the - * next NAPI cycle. - */ - break; - } - count += new_bufs; - } while (count < CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT); - - *countptr = count; - if (unlikely(count < CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT)) - return -ENOMEM; + /* Add pages to the buffer pool */ + while (count < CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT) { + new_bufs = _dpa_bp_add_8_bufs(dpa_bp); + if (unlikely(!new_bufs)) { + /* Avoid looping forever if we've temporarily + * run out of memory. We'll try again at the next + * NAPI cycle. + */ + break; + } + count += new_bufs; } + *countptr = count; + + if (unlikely(*countptr < CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT)) + return -ENOMEM; return 0; } @@ -207,7 +210,8 @@ struct sk_buff *_dpa_cleanup_tx_fd(const struct dpa_priv_s *priv, nr_frags = skb_shinfo(skb)->nr_frags; if (fd->format == qm_fd_sg) { - /* The sgt buffer has been allocated with netdev_alloc_frag(), + /* + * The sgt buffer has been allocated with netdev_alloc_frag(), * it's from lowmem. */ sgt = phys_to_virt(addr + dpa_fd_offset(fd)); @@ -238,7 +242,8 @@ struct sk_buff *_dpa_cleanup_tx_fd(const struct dpa_priv_s *priv, sgt[i].length, dma_dir); } - /* TODO: dpa_bp_recycle_frag() ? + /* + * TODO: dpa_bp_recycle_frag() ? * We could put these in the pool, since we allocated them * and we know they're not used by anyone else */ @@ -272,11 +277,7 @@ struct sk_buff *_dpa_cleanup_tx_fd(const struct dpa_priv_s *priv, #ifndef CONFIG_FSL_DPAA_TS static bool dpa_skb_is_recyclable(struct sk_buff *skb) { - /* No recycling possible if skb buffer is kmalloc'ed */ - if (skb->head_frag == 0) - return false; - - /* or if it's an userspace buffer */ + /* No recycling possible if skb has an userspace buffer */ if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) return false; @@ -285,6 +286,10 @@ static bool dpa_skb_is_recyclable(struct sk_buff *skb) skb->fclone != SKB_FCLONE_UNAVAILABLE) return false; + /* or if it's kmalloc'ed */ + if (skb->head_frag == 0) + return false; + return true; } @@ -320,7 +325,8 @@ static bool dpa_buf_is_recyclable(struct sk_buff *skb, #endif /* CONFIG_FSL_DPAA_TS */ -/* Build a linear skb around the received buffer. +/* + * Build a linear skb around the received buffer. * We are guaranteed there is enough room at the end of the data buffer to * accomodate the shared info area of the skb. */ @@ -331,7 +337,7 @@ static struct sk_buff *__hot contig_fd_to_skb(const struct dpa_priv_s *priv, ssize_t fd_off = dpa_fd_offset(fd); void *vaddr; struct dpa_bp *dpa_bp = priv->dpa_bp; - const fm_prs_result_t *parse_results; + const t_FmPrsResult *parse_results; struct sk_buff *skb = NULL; vaddr = phys_to_virt(addr); @@ -361,8 +367,7 @@ static struct sk_buff *__hot contig_fd_to_skb(const struct dpa_priv_s *priv, skb_put(skb, dpa_fd_length(fd)); /* Peek at the parse results for csum validation */ - parse_results = (const fm_prs_result_t *)(vaddr + - DPA_RX_PRIV_DATA_SIZE); + parse_results = (const t_FmPrsResult *)(vaddr + DPA_RX_PRIV_DATA_SIZE); _dpa_process_parse_results(parse_results, fd, skb, use_gro); #ifdef CONFIG_FSL_DPAA_TS @@ -374,7 +379,8 @@ static struct sk_buff *__hot contig_fd_to_skb(const struct dpa_priv_s *priv, } -/* Build an skb with the data of the first S/G entry in the linear portion and +/* + * Build an skb with the data of the first S/G entry in the linear portion and * the rest of the frame as skb fragments. * * The page fragment holding the S/G Table is recycled here. @@ -392,7 +398,7 @@ static struct sk_buff *__hot sg_fd_to_skb(const struct dpa_priv_s *priv, int frag_offset, frag_len; int page_offset; int i; - const fm_prs_result_t *parse_results; + const t_FmPrsResult *parse_results; struct sk_buff *skb = NULL; int *count_ptr; @@ -448,7 +454,7 @@ static struct sk_buff *__hot sg_fd_to_skb(const struct dpa_priv_s *priv, * Context in the buffer containing the sgt. * Inspect the parse results before anything else. */ - parse_results = (const fm_prs_result_t *)(vaddr + + parse_results = (const t_FmPrsResult *)(vaddr + DPA_RX_PRIV_DATA_SIZE); _dpa_process_parse_results(parse_results, fd, skb, use_gro); @@ -462,7 +468,8 @@ static struct sk_buff *__hot sg_fd_to_skb(const struct dpa_priv_s *priv, } else { dma_unmap_single(dpa_bp->dev, sg_addr, dpa_bp->size, DMA_BIDIRECTIONAL); - /* Not the first S/G entry; all data from buffer will + /* + * Not the first S/G entry; all data from buffer will * be added in an skb fragment; fragment index is offset * by one since first S/G entry was incorporated in the * linear part of the skb. @@ -570,11 +577,17 @@ void __hot _dpa_rx(struct net_device *net_dev, skb_len = skb->len; if (use_gro) { - if (unlikely(napi_gro_receive(&percpu_priv->napi, skb) == - GRO_DROP)) + gro_result_t gro_result; + + gro_result = napi_gro_receive(&percpu_priv->napi, skb); + if (unlikely(gro_result == GRO_DROP)) { + percpu_stats->rx_dropped++; goto packet_dropped; - } else if (unlikely(netif_receive_skb(skb) == NET_RX_DROP)) + } + } else if (unlikely(netif_receive_skb(skb) == NET_RX_DROP)) { + percpu_stats->rx_dropped++; goto packet_dropped; + } percpu_stats->rx_packets++; percpu_stats->rx_bytes += skb_len; @@ -602,43 +615,44 @@ static int __hot skb_to_contig_fd(struct dpa_priv_s *priv, int *count_ptr = __this_cpu_ptr(dpa_bp->percpu_count); unsigned char *rec_buf_start; + /* We are guaranteed to have at least tx_headroom bytes */ + skbh = (struct sk_buff **)(skb->data - priv->tx_headroom); + fd->offset = priv->tx_headroom; + #ifndef CONFIG_FSL_DPAA_TS /* Check recycling conditions; only if timestamp support is not * enabled, otherwise we need the fd back on tx confirmation */ - /* We can recycle the buffer if: - * - the pool is not full - * - the buffer meets the skb recycling conditions - * - the buffer meets our own (size, offset, align) conditions - */ - if (likely((*count_ptr < dpa_bp->target_count) && - dpa_skb_is_recyclable(skb) && - dpa_buf_is_recyclable(skb, dpa_bp->size, - priv->tx_headroom, &rec_buf_start))) { - /* Buffer is recyclable; use the new start address */ - skbh = (struct sk_buff **)rec_buf_start; - - /* and set fd parameters and DMA mapping direction */ - fd->cmd |= FM_FD_CMD_FCO; - fd->bpid = dpa_bp->bpid; - BUG_ON(skb->data - rec_buf_start > DPA_MAX_FD_OFFSET); - fd->offset = (uint16_t)(skb->data - rec_buf_start); - dma_dir = DMA_BIDIRECTIONAL; - } else + /* We cannot recycle the buffer if the pool is already full */ + if (unlikely(*count_ptr >= dpa_bp->target_count)) + goto no_recycle; + + /* ... or if the skb doesn't meet the recycling criteria */ + if (unlikely(!dpa_skb_is_recyclable(skb))) + goto no_recycle; + + /* ... or if buffer recycling conditions are not met */ + if (unlikely(!dpa_buf_is_recyclable(skb, dpa_bp->size, + priv->tx_headroom, &rec_buf_start))) + goto no_recycle; + + /* Buffer is recyclable; use the new start address */ + skbh = (struct sk_buff **)rec_buf_start; + + /* and set fd parameters and DMA mapping direction */ + fd->cmd |= FM_FD_CMD_FCO; + fd->bpid = dpa_bp->bpid; + BUG_ON(skb->data - rec_buf_start > DPA_MAX_FD_OFFSET); + fd->offset = (uint16_t)(skb->data - rec_buf_start); + dma_dir = DMA_BIDIRECTIONAL; #endif - { - /* Not recyclable. - * We are guaranteed to have at least tx_headroom bytes - * available, so just use that for offset. - */ - skbh = (struct sk_buff **)(skb->data - priv->tx_headroom); - fd->offset = priv->tx_headroom; - } +no_recycle: *skbh = skb; - /* Enable L3/L4 hardware checksum computation. + /* + * Enable L3/L4 hardware checksum computation. * * We must do this before dma_map_single(DMA_TO_DEVICE), because we may * need to write into the skb. @@ -651,7 +665,7 @@ static int __hot skb_to_contig_fd(struct dpa_priv_s *priv, return err; } - /* Fill in the rest of the FD fields */ + /* Fill in the FD */ fd->format = qm_fd_contig; fd->length20 = skb->len; @@ -696,7 +710,8 @@ static int __hot skb_to_sg_fd(struct dpa_priv_s *priv, return -ENOMEM; } - /* Enable L3/L4 hardware checksum computation. + /* + * Enable L3/L4 hardware checksum computation. * * We must do this before dma_map_single(DMA_TO_DEVICE), because we may * need to write into the skb. @@ -796,7 +811,7 @@ int __hot dpa_tx(struct sk_buff *skb, struct net_device *net_dev) /* Non-migratable context, safe to use __this_cpu_ptr */ percpu_priv = __this_cpu_ptr(priv->percpu_priv); percpu_stats = &percpu_priv->stats; - countptr = __this_cpu_ptr(priv->dpa_bp->percpu_count); + countptr = __this_cpu_ptr(percpu_priv->dpa_bp->percpu_count); clear_fd(&fd); @@ -811,7 +826,8 @@ int __hot dpa_tx(struct sk_buff *skb, struct net_device *net_dev) skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; #endif /* CONFIG_FSL_DPAA_TS */ - /* MAX_SKB_FRAGS is larger than our DPA_SGT_MAX_ENTRIES; make sure + /* + * MAX_SKB_FRAGS is larger than our DPA_SGT_MAX_ENTRIES; make sure * we don't feed FMan with more fragments than it supports. * Btw, we're using the first sgt entry to store the linear part of * the skb, so we're one extra frag short. @@ -822,7 +838,8 @@ int __hot dpa_tx(struct sk_buff *skb, struct net_device *net_dev) err = skb_to_sg_fd(priv, skb, &fd); percpu_priv->tx_frag_skbuffs++; } else { - /* Make sure we have enough headroom to accomodate private + /* + * Make sure we have enough headroom to accomodate private * data, parse results, etc. Normally this shouldn't happen if * we're here via the standard kernel stack. */ @@ -839,7 +856,8 @@ int __hot dpa_tx(struct sk_buff *skb, struct net_device *net_dev) skb = skb_new; } - /* We're going to store the skb backpointer at the beginning + /* + * We're going to store the skb backpointer at the beginning * of the data buffer, so we need a privately owned skb */ @@ -850,7 +868,8 @@ int __hot dpa_tx(struct sk_buff *skb, struct net_device *net_dev) skb = nskb; /* skb_copy() has now linearized the skbuff. */ } else if (unlikely(nonlinear)) { - /* We are here because the egress skb contains + /* + * We are here because the egress skb contains * more fragments than we support. In this case, * we have no choice but to linearize it ourselves. */ diff --git a/drivers/net/ethernet/freescale/dpa/dpaa_eth_shared.c b/drivers/net/ethernet/freescale/dpa/dpaa_eth_shared.c index c43473e..c0b9cf2 100644 --- a/drivers/net/ethernet/freescale/dpa/dpaa_eth_shared.c +++ b/drivers/net/ethernet/freescale/dpa/dpaa_eth_shared.c @@ -44,7 +44,6 @@ #include #include "dpaa_eth.h" #include "dpaa_eth_common.h" -#include "dpaa_eth_base.h" #include "lnxwrp_fsl_fman.h" /* fm_get_rx_extra_headroom(), fm_get_max_frm() */ /* forward declarations */ @@ -237,7 +236,7 @@ shared_rx_dqrr(struct qman_portal *portal, struct qman_fq *fq, net_dev = ((struct dpa_fq *)fq)->net_dev; priv = netdev_priv(net_dev); - percpu_priv = __this_cpu_ptr(priv->percpu_priv); + percpu_priv = per_cpu_ptr(priv->percpu_priv, smp_processor_id()); dpa_bp = dpa_bpid2pool(fd->bpid); BUG_ON(IS_ERR(dpa_bp)); @@ -371,7 +370,7 @@ shared_tx_error_dqrr(struct qman_portal *portal, dpa_bp = dpa_bpid2pool(fd->bpid); BUG_ON(IS_ERR(dpa_bp)); - percpu_priv = __this_cpu_ptr(priv->percpu_priv); + percpu_priv = per_cpu_ptr(priv->percpu_priv, smp_processor_id()); if (netif_msg_hw(priv) && net_ratelimit()) netdev_warn(net_dev, "FD status = 0x%08x\n", @@ -404,7 +403,7 @@ shared_tx_default_dqrr(struct qman_portal *portal, dpa_bp = dpa_bpid2pool(fd->bpid); BUG_ON(IS_ERR(dpa_bp)); - percpu_priv = __this_cpu_ptr(priv->percpu_priv); + percpu_priv = per_cpu_ptr(priv->percpu_priv, smp_processor_id()); if (unlikely(fd->status & FM_FD_STAT_ERRORS) != 0) { if (netif_msg_hw(priv) && net_ratelimit()) @@ -435,7 +434,7 @@ static void shared_ern(struct qman_portal *portal, net_dev = dpa_fq->net_dev; priv = netdev_priv(net_dev); - percpu_priv = __this_cpu_ptr(priv->percpu_priv); + percpu_priv = per_cpu_ptr(priv->percpu_priv, smp_processor_id()); dpa_fd_release(net_dev, &msg->ern.fd); @@ -454,10 +453,10 @@ int __hot dpa_shared_tx(struct sk_buff *skb, struct net_device *net_dev) int queue_mapping; int err; void *dpa_bp_vaddr; - fm_prs_result_t parse_results; + t_FmPrsResult parse_results; priv = netdev_priv(net_dev); - percpu_priv = __this_cpu_ptr(priv->percpu_priv); + percpu_priv = per_cpu_ptr(priv->percpu_priv, smp_processor_id()); memset(&fd, 0, sizeof(fd)); fd.format = qm_fd_contig; @@ -704,8 +703,7 @@ dpaa_eth_shared_probe(struct platform_device *_of_dev) dpaa_eth_sysfs_init(&net_dev->dev); - printk(KERN_INFO "fsl_dpa_shared: Probed shared interface %s\n", - net_dev->name); + printk(KERN_INFO "fsl_dpa_shared: Probed shared interface %s\n", net_dev->name); return 0; @@ -758,8 +756,7 @@ static int __init __cold dpa_shared_load(void) { int _errno; - printk(KERN_INFO KBUILD_MODNAME ": " - DPA_DESCRIPTION " (" VERSION ")\n"); + printk(KERN_INFO KBUILD_MODNAME ": " DPA_DESCRIPTION " (" VERSION ")\n"); /* Initialize dpaa_eth mirror values */ dpa_rx_extra_headroom = fm_get_rx_extra_headroom(); diff --git a/drivers/net/ethernet/freescale/dpa/dpaa_eth_unit_test.c b/drivers/net/ethernet/freescale/dpa/dpaa_eth_unit_test.c index 08f9001..62095a7 100644 --- a/drivers/net/ethernet/freescale/dpa/dpaa_eth_unit_test.c +++ b/drivers/net/ethernet/freescale/dpa/dpaa_eth_unit_test.c @@ -283,7 +283,7 @@ static int dpa_tx_unit_test(struct net_device *net_dev) } /* Was it good? */ - if (!tx_unit_test_passed) { + if (tx_unit_test_passed == false) { pr_err("Test failed:\n"); pr_err("size: %d pad: %d head: %p end: %p\n", size, headroom, tx_unit_skb_head, @@ -364,7 +364,7 @@ void dpa_unit_test_drain_default_pool(struct net_device *net_dev) default_pool->size, DMA_BIDIRECTIONAL); - dpa_bp->free_buf_cb(phys_to_virt(addr)); + _dpa_bp_free_buf(phys_to_virt(addr)); } } while (num == 8); diff --git a/drivers/net/ethernet/freescale/dpa/mac-api.c b/drivers/net/ethernet/freescale/dpa/mac-api.c index 328d55a..2ea6a72 100644 --- a/drivers/net/ethernet/freescale/dpa/mac-api.c +++ b/drivers/net/ethernet/freescale/dpa/mac-api.c @@ -65,12 +65,14 @@ const size_t mac_sizeof_priv[] = { [MEMAC] = sizeof(struct mac_priv_s) }; -static const enet_mode_t _100[] = { +static const e_EnetMode _100[] = +{ [PHY_INTERFACE_MODE_MII] = e_ENET_MODE_MII_100, [PHY_INTERFACE_MODE_RMII] = e_ENET_MODE_RMII_100 }; -static const enet_mode_t _1000[] = { +static const e_EnetMode _1000[] = +{ [PHY_INTERFACE_MODE_GMII] = e_ENET_MODE_GMII_1000, [PHY_INTERFACE_MODE_SGMII] = e_ENET_MODE_SGMII_1000, [PHY_INTERFACE_MODE_TBI] = e_ENET_MODE_TBI_1000, @@ -81,7 +83,7 @@ static const enet_mode_t _1000[] = { [PHY_INTERFACE_MODE_RTBI] = e_ENET_MODE_RTBI_1000 }; -static enet_mode_t __cold __attribute__((nonnull)) +static e_EnetMode __cold __attribute__((nonnull)) macdev2enetinterface(const struct mac_device *mac_dev) { switch (mac_dev->max_speed) { @@ -96,7 +98,7 @@ macdev2enetinterface(const struct mac_device *mac_dev) } } -static void mac_exception(handle_t _mac_dev, e_FmMacExceptions exception) +static void mac_exception(t_Handle _mac_dev, e_FmMacExceptions exception) { struct mac_device *mac_dev; @@ -106,8 +108,7 @@ static void mac_exception(handle_t _mac_dev, e_FmMacExceptions exception) /* don't flag RX FIFO after the first */ fm_mac_set_exception(mac_dev->get_mac_handle(mac_dev), e_FM_MAC_EX_10G_RX_FIFO_OVFL, false); - dev_err(mac_dev->dev, "10G MAC got RX FIFO Error = %x\n", - exception); + printk(KERN_ERR "10G MAC got RX FIFO Error = %x\n", exception); } dev_dbg(mac_dev->dev, "%s:%s() -> %d\n", KBUILD_BASENAME".c", __func__, @@ -129,7 +130,7 @@ static int __cold init(struct mac_device *mac_dev) memcpy(¶m.addr, mac_dev->addr, min(sizeof(param.addr), sizeof(mac_dev->addr))); param.macId = mac_dev->cell_index; - param.h_Fm = (handle_t)mac_dev->fm; + param.h_Fm = (t_Handle)mac_dev->fm; param.mdioIrq = NO_IRQ; param.f_Exception = mac_exception; param.f_Event = mac_exception; @@ -160,7 +161,8 @@ static int __cold init(struct mac_device *mac_dev) mac_dev->half_duplex); if (unlikely(_errno < 0)) goto _return_fm_mac_free; - } else { + } + else { _errno = fm_mac_config_reset_on_init(priv->fm_mac, true); if (unlikely(_errno < 0)) goto _return_fm_mac_free; @@ -219,7 +221,7 @@ static int __cold memac_init(struct mac_device *mac_dev) param.enetMode = macdev2enetinterface(mac_dev); memcpy(¶m.addr, mac_dev->addr, sizeof(mac_dev->addr)); param.macId = mac_dev->cell_index; - param.h_Fm = (handle_t)mac_dev->fm; + param.h_Fm = (t_Handle)mac_dev->fm; param.mdioIrq = NO_IRQ; param.f_Exception = mac_exception; param.f_Event = mac_exception; @@ -282,12 +284,12 @@ static int __cold stop(struct mac_device *mac_dev) static int __cold set_multi(struct net_device *net_dev) { - struct dpa_priv_s *priv; - struct mac_device *mac_dev; - struct mac_priv_s *mac_priv; + struct dpa_priv_s *priv; + struct mac_device *mac_dev; + struct mac_priv_s *mac_priv; struct mac_address *old_addr, *tmp; struct netdev_hw_addr *ha; - int _errno; + int _errno; priv = netdev_priv(net_dev); mac_dev = priv->mac_dev; @@ -444,8 +446,9 @@ static int __cold uninit(struct fm_mac_dev *fm_mac_dev) _errno = fm_mac_disable(fm_mac_dev); __errno = fm_mac_free(fm_mac_dev); - if (unlikely(__errno < 0)) + if (unlikely(__errno < 0)) { _errno = __errno; + } return _errno; } diff --git a/drivers/net/ethernet/freescale/dpa/mac.c b/drivers/net/ethernet/freescale/dpa/mac.c index 24d351c..73f4532 100644 --- a/drivers/net/ethernet/freescale/dpa/mac.c +++ b/drivers/net/ethernet/freescale/dpa/mac.c @@ -52,7 +52,8 @@ | SUPPORTED_Autoneg \ | SUPPORTED_MII) -static const char phy_str[][11] = { +static const char phy_str[][11] = +{ [PHY_INTERFACE_MODE_MII] = "mii", [PHY_INTERFACE_MODE_GMII] = "gmii", [PHY_INTERFACE_MODE_SGMII] = "sgmii", @@ -77,7 +78,8 @@ static phy_interface_t __pure __attribute__((nonnull)) str2phy(const char *str) return PHY_INTERFACE_MODE_MII; } -static const uint16_t phy2speed[] = { +static const uint16_t phy2speed[] = +{ [PHY_INTERFACE_MODE_MII] = SPEED_100, [PHY_INTERFACE_MODE_GMII] = SPEED_1000, [PHY_INTERFACE_MODE_SGMII] = SPEED_1000, @@ -92,8 +94,7 @@ static const uint16_t phy2speed[] = { }; static struct mac_device * __cold -alloc_macdev(struct device *dev, size_t sizeof_priv, - void (*setup)(struct mac_device *mac_dev)) +alloc_macdev(struct device *dev, size_t sizeof_priv, void (*setup)(struct mac_device *mac_dev)) { struct mac_device *mac_dev; @@ -142,18 +143,16 @@ static int __cold mac_probe(struct platform_device *_of_dev) const char *char_prop; const phandle *phandle_prop; const uint32_t *uint32_prop; - const struct of_device_id *match; + const struct of_device_id *match; dev = &_of_dev->dev; mac_node = dev->of_node; - match = of_match_device(mac_match, dev); - if (!match) - return -EINVAL; + match = of_match_device(mac_match, dev); + if (!match) + return -EINVAL; - for (i = 0; i < ARRAY_SIZE(mac_match) - 1 && match != mac_match + i; - i++) - ; + for (i = 0; i < ARRAY_SIZE(mac_match) - 1 && match != mac_match + i; i++); BUG_ON(i >= ARRAY_SIZE(mac_match) - 1); mac_dev = alloc_macdev(dev, mac_sizeof_priv[i], mac_setup[i]); @@ -218,15 +217,15 @@ static int __cold mac_probe(struct platform_device *_of_dev) } mac_dev->vaddr = devm_ioremap(dev, mac_dev->res->start, - mac_dev->res->end + 1 - - mac_dev->res->start); + mac_dev->res->end + 1 - mac_dev->res->start); if (unlikely(mac_dev->vaddr == NULL)) { dev_err(dev, "devm_ioremap() failed\n"); _errno = -EIO; goto _return_dev_set_drvdata; } - /* XXX: Warning, future versions of Linux will most likely not even + /* + * XXX: Warning, future versions of Linux will most likely not even * call the driver code to allow us to override the TBIPA value, * we'll need to address this when we move to newer kernel rev */ @@ -417,7 +416,8 @@ static int __init __cold mac_load(void) _errno = platform_driver_register(&mac_driver); if (unlikely(_errno < 0)) { - pr_err(KBUILD_MODNAME ": %s:%hu:%s(): platform_driver_register() = %d\n", + pr_err(KBUILD_MODNAME ": %s:%hu:%s(): " \ + "platform_driver_register() = %d\n", KBUILD_BASENAME".c", __LINE__, __func__, _errno); goto _return; } diff --git a/drivers/net/ethernet/freescale/dpa/offline_port.c b/drivers/net/ethernet/freescale/dpa/offline_port.c index 95dd28a..c002dc9 100644 --- a/drivers/net/ethernet/freescale/dpa/offline_port.c +++ b/drivers/net/ethernet/freescale/dpa/offline_port.c @@ -134,7 +134,7 @@ oh_port_probe(struct platform_device *_of_dev) uint32_t queues_count; uint32_t crt_fqid_base; uint32_t crt_fq_count; - bool frag_enabled = false; + bool frag_enabled = FALSE; struct fm_port_params oh_port_tx_params; struct fm_port_pcd_param oh_port_pcd_params; struct dpa_buffer_layout_s buf_layout; @@ -369,7 +369,7 @@ oh_port_probe(struct platform_device *_of_dev) buf_layout.manip_extra_space != FRAG_MANIP_SPACE) goto init_port; - frag_enabled = true; + frag_enabled = TRUE; dev_info(dpa_oh_dev, "IP Fragmentation enabled for OH port %d", *port_id); -- cgit v0.10.2