diff options
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r-- | drivers/net/ethernet/freescale/dpa/dpa-ethtool.c | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/dpa/dpaa_debugfs.c | 13 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/dpa/dpaa_eth.c | 10 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/dpa/dpaa_eth.h | 13 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/dpa/dpaa_eth_common.c | 9 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/dpa/dpaa_eth_non_sg.c | 14 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/dpa/dpaa_eth_sg.c | 115 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/dpa/dpaa_eth_shared.c | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/dpa/dpaa_eth_unit_test.c | 2 |
9 files changed, 91 insertions, 91 deletions
diff --git a/drivers/net/ethernet/freescale/dpa/dpa-ethtool.c b/drivers/net/ethernet/freescale/dpa/dpa-ethtool.c index 37b8cae..5eece2f 100644 --- a/drivers/net/ethernet/freescale/dpa/dpa-ethtool.c +++ b/drivers/net/ethernet/freescale/dpa/dpa-ethtool.c @@ -50,8 +50,8 @@ static int __cold dpa_get_settings(struct net_device *net_dev, return -ENODEV; } if (unlikely(priv->mac_dev->phy_dev == NULL)) { - netdev_err(net_dev, "phy device not initialized\n"); - return -ENODEV; + netdev_dbg(net_dev, "phy device not initialized\n"); + return 0; } _errno = phy_ethtool_gset(priv->mac_dev->phy_dev, et_cmd); diff --git a/drivers/net/ethernet/freescale/dpa/dpaa_debugfs.c b/drivers/net/ethernet/freescale/dpa/dpaa_debugfs.c index 2a58724..f424f4a 100644 --- a/drivers/net/ethernet/freescale/dpa/dpaa_debugfs.c +++ b/drivers/net/ethernet/freescale/dpa/dpaa_debugfs.c @@ -34,7 +34,6 @@ #include <linux/fsl_qman.h> /* struct qm_mcr_querycgr */ #include <linux/debugfs.h> #include <asm/debug.h> -#include <linux/smp.h> /* get_hard_smp_processor_id() if !CONFIG_SMP */ #include "dpaa_debugfs.h" #include "dpaa_eth.h" /* struct dpa_priv_s, dpa_percpu_priv_s, dpa_bp */ @@ -91,8 +90,8 @@ static int dpa_debugfs_show(struct seq_file *file, void *offset) total.stats.rx_errors += percpu_priv->stats.rx_errors; count_total += dpa_bp_count; - seq_printf(file, " %hu/%hu %8llu %8llu %8llu %8llu ", - get_hard_smp_processor_id(i), i, + seq_printf(file, " %hu %8llu %8llu %8llu %8llu ", + i, percpu_priv->in_interrupt, percpu_priv->stats.rx_packets, percpu_priv->stats.tx_packets, @@ -144,8 +143,8 @@ static int dpa_debugfs_show(struct seq_file *file, void *offset) total.rx_errors.phe += percpu_priv->rx_errors.phe; total.rx_errors.cse += percpu_priv->rx_errors.cse; - seq_printf(file, " %hu/%hu %8llu %8llu ", - get_hard_smp_processor_id(i), i, + seq_printf(file, " %hu %8llu %8llu ", + i, percpu_priv->rx_errors.dme, percpu_priv->rx_errors.fpe); seq_printf(file, "%8llu %8llu %8llu\n", @@ -176,8 +175,8 @@ static int dpa_debugfs_show(struct seq_file *file, void *offset) total.ern_cnt.fq_retired += percpu_priv->ern_cnt.fq_retired; total.ern_cnt.orp_zero += percpu_priv->ern_cnt.orp_zero; - seq_printf(file, " %hu/%hu %8llu %8llu %8llu %8llu ", - get_hard_smp_processor_id(i), i, + seq_printf(file, " %hu %8llu %8llu %8llu %8llu ", + i, percpu_priv->ern_cnt.cg_tdrop, percpu_priv->ern_cnt.wred, percpu_priv->ern_cnt.err_cond, diff --git a/drivers/net/ethernet/freescale/dpa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpa/dpaa_eth.c index bcb51fb..77a3a35 100644 --- a/drivers/net/ethernet/freescale/dpa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpa/dpaa_eth.c @@ -52,7 +52,6 @@ #include <linux/highmem.h> #include <linux/percpu.h> #include <linux/dma-mapping.h> -#include <linux/smp.h> /* get_hard_smp_processor_id() */ #include <linux/fsl_bman.h> #include "fsl_fman.h" @@ -344,7 +343,7 @@ priv_rx_error_dqrr(struct qman_portal *portal, if (dpaa_eth_napi_schedule(percpu_priv)) return qman_cb_dqrr_stop; - if (unlikely(dpaa_eth_refill_bpools(percpu_priv))) + if (unlikely(dpaa_eth_refill_bpools(priv->dpa_bp))) /* Unable to refill the buffer pool due to insufficient * system memory. Just release the frame back into the pool, * otherwise we'll soon end up with an empty buffer pool. @@ -380,7 +379,7 @@ priv_rx_default_dqrr(struct qman_portal *portal, /* Vale of plenty: make sure we didn't run out of buffers */ - if (unlikely(dpaa_eth_refill_bpools(percpu_priv))) + if (unlikely(dpaa_eth_refill_bpools(priv->dpa_bp))) /* Unable to refill the buffer pool due to insufficient * system memory. Just release the frame back into the pool, * otherwise we'll soon end up with an empty buffer pool. @@ -634,10 +633,12 @@ dpa_priv_bp_probe(struct device *dev) dpa_bp->percpu_count = alloc_percpu(*dpa_bp->percpu_count); dpa_bp->target_count = CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT; - dpa_bp->drain_cb = dpa_bp_drain; #ifdef CONFIG_FSL_DPAA_ETH_SG_SUPPORT dpa_bp->seed_cb = dpa_bp_priv_seed; + dpa_bp->free_buf_cb = _dpa_bp_free_pf; +#else + dpa_bp->free_buf_cb = _dpa_bp_free_skb; #endif /* CONFIG_FSL_DPAA_ETH_SG_SUPPORT */ return dpa_bp; @@ -827,7 +828,6 @@ dpaa_eth_priv_probe(struct platform_device *_of_dev) for_each_online_cpu(i) { percpu_priv = per_cpu_ptr(priv->percpu_priv, i); memset(percpu_priv, 0, sizeof(*percpu_priv)); - percpu_priv->dpa_bp = priv->dpa_bp; } err = dpa_private_netdev_init(dpa_node, net_dev); diff --git a/drivers/net/ethernet/freescale/dpa/dpaa_eth.h b/drivers/net/ethernet/freescale/dpa/dpaa_eth.h index 611e5d8..f33ff0a 100644 --- a/drivers/net/ethernet/freescale/dpa/dpaa_eth.h +++ b/drivers/net/ethernet/freescale/dpa/dpaa_eth.h @@ -348,8 +348,10 @@ struct dpa_bp { atomic_t refs; /* some bpools need to be seeded before use by this cb */ int (*seed_cb)(struct dpa_bp *); - /* some bpools need to be emptied before freeing by this cb */ - void (*drain_cb)(struct dpa_bp *); + /* some bpools need to be emptied before freeing; this cb is used + * for freeing of individual buffers taken from the pool + */ + void (*free_buf_cb)(void *addr); }; struct dpa_rx_errors { @@ -374,7 +376,6 @@ struct dpa_ern_cnt { struct dpa_percpu_priv_s { struct net_device *net_dev; - struct dpa_bp *dpa_bp; struct napi_struct napi; u64 in_interrupt; u64 tx_returned; @@ -457,7 +458,7 @@ struct fm_port_fqs { /* functions with different implementation for SG and non-SG: */ int dpa_bp_priv_seed(struct dpa_bp *dpa_bp); -int dpaa_eth_refill_bpools(struct dpa_percpu_priv_s *percpu_priv); +int dpaa_eth_refill_bpools(struct dpa_bp *dpa_bp); void __hot _dpa_rx(struct net_device *net_dev, const struct dpa_priv_s *priv, struct dpa_percpu_priv_s *percpu_priv, @@ -681,7 +682,7 @@ void dpa_bp_default_buf_size_update(uint32_t size); uint32_t dpa_bp_default_buf_size_get(void); void dpa_bp_priv_non_sg_seed(struct dpa_bp *dpa_bp); -static inline void _dpa_bp_free_buf(void *addr) +static inline void _dpa_bp_free_skb(void *addr) { struct sk_buff **skbh = addr; struct sk_buff *skb; @@ -690,7 +691,7 @@ static inline void _dpa_bp_free_buf(void *addr) dev_kfree_skb_any(skb); } #else -static inline void _dpa_bp_free_buf(void *addr) +static inline void _dpa_bp_free_pf(void *addr) { put_page(virt_to_head_page(addr)); } diff --git a/drivers/net/ethernet/freescale/dpa/dpaa_eth_common.c b/drivers/net/ethernet/freescale/dpa/dpaa_eth_common.c index 6906c07..fc7433e 100644 --- a/drivers/net/ethernet/freescale/dpa/dpaa_eth_common.c +++ b/drivers/net/ethernet/freescale/dpa/dpaa_eth_common.c @@ -738,7 +738,7 @@ void dpa_bp_drain(struct dpa_bp *bp) dma_unmap_single(bp->dev, addr, bp->size, DMA_BIDIRECTIONAL); - _dpa_bp_free_buf(phys_to_virt(addr)); + bp->free_buf_cb(phys_to_virt(addr)); } } while (ret > 0); } @@ -751,11 +751,14 @@ _dpa_bp_free(struct dpa_bp *dpa_bp) if (!atomic_dec_and_test(&bp->refs)) return; - if (bp->drain_cb) - bp->drain_cb(bp); + if (bp->free_buf_cb) + dpa_bp_drain(bp); dpa_bp_array[bp->bpid] = 0; bman_free_pool(bp->pool); + + if (bp->dev) + platform_device_unregister(to_platform_device(bp->dev)); } void __cold __attribute__((nonnull)) diff --git a/drivers/net/ethernet/freescale/dpa/dpaa_eth_non_sg.c b/drivers/net/ethernet/freescale/dpa/dpaa_eth_non_sg.c index 2cfaaa8..b4d26ce 100644 --- a/drivers/net/ethernet/freescale/dpa/dpaa_eth_non_sg.c +++ b/drivers/net/ethernet/freescale/dpa/dpaa_eth_non_sg.c @@ -162,9 +162,8 @@ void dpa_bp_priv_non_sg_seed(struct dpa_bp *dpa_bp) /* Add buffers/(skbuffs) for Rx processing whenever bpool count falls below * REFILL_THRESHOLD. */ -int dpaa_eth_refill_bpools(struct dpa_percpu_priv_s *percpu_priv) +int dpaa_eth_refill_bpools(struct dpa_bp* dpa_bp) { - const struct dpa_bp *dpa_bp = percpu_priv->dpa_bp; int *countptr = __this_cpu_ptr(dpa_bp->percpu_count); int count = *countptr; /* this function is called in softirq context; @@ -373,14 +372,13 @@ void __hot _dpa_rx(struct net_device *net_dev, gro_result_t gro_result; gro_result = napi_gro_receive(&percpu_priv->napi, skb); - if (unlikely(gro_result == GRO_DROP)) { - percpu_priv->stats.rx_dropped++; + /* If frame is dropped by the stack, rx_dropped counter is + * incremented automatically, so no need for us to update it + */ + if (unlikely(gro_result == GRO_DROP)) goto packet_dropped; - } - } else if (unlikely(netif_receive_skb(skb) == NET_RX_DROP)) { - percpu_priv->stats.rx_dropped++; + } else if (unlikely(netif_receive_skb(skb) == NET_RX_DROP)) goto packet_dropped; - } percpu_priv->stats.rx_packets++; percpu_priv->stats.rx_bytes += skb_len; diff --git a/drivers/net/ethernet/freescale/dpa/dpaa_eth_sg.c b/drivers/net/ethernet/freescale/dpa/dpaa_eth_sg.c index cc76dda..eb300b4 100644 --- a/drivers/net/ethernet/freescale/dpa/dpaa_eth_sg.c +++ b/drivers/net/ethernet/freescale/dpa/dpaa_eth_sg.c @@ -150,29 +150,29 @@ int dpa_bp_priv_seed(struct dpa_bp *dpa_bp) /* Add buffers/(pages) for Rx processing whenever bpool count falls below * REFILL_THRESHOLD. */ -int dpaa_eth_refill_bpools(struct dpa_percpu_priv_s *percpu_priv) +int dpaa_eth_refill_bpools(struct dpa_bp *dpa_bp) { - const struct dpa_bp *dpa_bp = percpu_priv->dpa_bp; - int *countptr = __this_cpu_ptr(percpu_priv->dpa_bp->percpu_count); + int *countptr = __this_cpu_ptr(dpa_bp->percpu_count); int count = *countptr; int new_bufs; - /* Add pages to the buffer pool */ - while (count < CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT) { - new_bufs = _dpa_bp_add_8_bufs(dpa_bp); - if (unlikely(!new_bufs)) { - /* Avoid looping forever if we've temporarily - * run out of memory. We'll try again at the next - * NAPI cycle. - */ - break; - } - count += new_bufs; - } - *countptr = count; + if (unlikely(count < CONFIG_FSL_DPAA_ETH_REFILL_THRESHOLD)) { + do { + new_bufs = _dpa_bp_add_8_bufs(dpa_bp); + if (unlikely(!new_bufs)) { + /* Avoid looping forever if we've temporarily + * run out of memory. We'll try again at the + * next NAPI cycle. + */ + break; + } + count += new_bufs; + } while (count < CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT); - if (unlikely(*countptr < CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT)) - return -ENOMEM; + *countptr = count; + if (unlikely(count < CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT)) + return -ENOMEM; + } return 0; } @@ -272,7 +272,11 @@ struct sk_buff *_dpa_cleanup_tx_fd(const struct dpa_priv_s *priv, #ifndef CONFIG_FSL_DPAA_TS static bool dpa_skb_is_recyclable(struct sk_buff *skb) { - /* No recycling possible if skb has an userspace buffer */ + /* No recycling possible if skb buffer is kmalloc'ed */ + if (skb->head_frag == 0) + return false; + + /* or if it's an userspace buffer */ if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) return false; @@ -281,10 +285,6 @@ static bool dpa_skb_is_recyclable(struct sk_buff *skb) skb->fclone != SKB_FCLONE_UNAVAILABLE) return false; - /* or if it's kmalloc'ed */ - if (skb->head_frag == 0) - return false; - return true; } @@ -573,14 +573,13 @@ void __hot _dpa_rx(struct net_device *net_dev, gro_result_t gro_result; gro_result = napi_gro_receive(&percpu_priv->napi, skb); - if (unlikely(gro_result == GRO_DROP)) { - percpu_stats->rx_dropped++; + /* If frame is dropped by the stack, rx_dropped counter is + * incremented automatically, so no need for us to update it + */ + if (unlikely(gro_result == GRO_DROP)) goto packet_dropped; - } - } else if (unlikely(netif_receive_skb(skb) == NET_RX_DROP)) { - percpu_stats->rx_dropped++; + } else if (unlikely(netif_receive_skb(skb) == NET_RX_DROP)) goto packet_dropped; - } percpu_stats->rx_packets++; percpu_stats->rx_bytes += skb_len; @@ -608,40 +607,40 @@ static int __hot skb_to_contig_fd(struct dpa_priv_s *priv, int *count_ptr = __this_cpu_ptr(dpa_bp->percpu_count); unsigned char *rec_buf_start; - /* We are guaranteed to have at least tx_headroom bytes */ - skbh = (struct sk_buff **)(skb->data - priv->tx_headroom); - fd->offset = priv->tx_headroom; - #ifndef CONFIG_FSL_DPAA_TS /* Check recycling conditions; only if timestamp support is not * enabled, otherwise we need the fd back on tx confirmation */ - /* We cannot recycle the buffer if the pool is already full */ - if (unlikely(*count_ptr >= dpa_bp->target_count)) - goto no_recycle; - - /* ... or if the skb doesn't meet the recycling criteria */ - if (unlikely(!dpa_skb_is_recyclable(skb))) - goto no_recycle; - - /* ... or if buffer recycling conditions are not met */ - if (unlikely(!dpa_buf_is_recyclable(skb, dpa_bp->size, - priv->tx_headroom, &rec_buf_start))) - goto no_recycle; - - /* Buffer is recyclable; use the new start address */ - skbh = (struct sk_buff **)rec_buf_start; - - /* and set fd parameters and DMA mapping direction */ - fd->cmd |= FM_FD_CMD_FCO; - fd->bpid = dpa_bp->bpid; - BUG_ON(skb->data - rec_buf_start > DPA_MAX_FD_OFFSET); - fd->offset = (uint16_t)(skb->data - rec_buf_start); - dma_dir = DMA_BIDIRECTIONAL; + /* We can recycle the buffer if: + * - the pool is not full + * - the buffer meets the skb recycling conditions + * - the buffer meets our own (size, offset, align) conditions + */ + if (likely((*count_ptr < dpa_bp->target_count) && + dpa_skb_is_recyclable(skb) && + dpa_buf_is_recyclable(skb, dpa_bp->size, + priv->tx_headroom, &rec_buf_start))) { + /* Buffer is recyclable; use the new start address */ + skbh = (struct sk_buff **)rec_buf_start; + + /* and set fd parameters and DMA mapping direction */ + fd->cmd |= FM_FD_CMD_FCO; + fd->bpid = dpa_bp->bpid; + BUG_ON(skb->data - rec_buf_start > DPA_MAX_FD_OFFSET); + fd->offset = (uint16_t)(skb->data - rec_buf_start); + dma_dir = DMA_BIDIRECTIONAL; + } else #endif + { + /* Not recyclable. + * We are guaranteed to have at least tx_headroom bytes + * available, so just use that for offset. + */ + skbh = (struct sk_buff **)(skb->data - priv->tx_headroom); + fd->offset = priv->tx_headroom; + } -no_recycle: *skbh = skb; /* Enable L3/L4 hardware checksum computation. @@ -657,7 +656,7 @@ no_recycle: return err; } - /* Fill in the FD */ + /* Fill in the rest of the FD fields */ fd->format = qm_fd_contig; fd->length20 = skb->len; @@ -802,7 +801,7 @@ int __hot dpa_tx(struct sk_buff *skb, struct net_device *net_dev) /* Non-migratable context, safe to use __this_cpu_ptr */ percpu_priv = __this_cpu_ptr(priv->percpu_priv); percpu_stats = &percpu_priv->stats; - countptr = __this_cpu_ptr(percpu_priv->dpa_bp->percpu_count); + countptr = __this_cpu_ptr(priv->dpa_bp->percpu_count); clear_fd(&fd); diff --git a/drivers/net/ethernet/freescale/dpa/dpaa_eth_shared.c b/drivers/net/ethernet/freescale/dpa/dpaa_eth_shared.c index c43473e..31e4f0c 100644 --- a/drivers/net/ethernet/freescale/dpa/dpaa_eth_shared.c +++ b/drivers/net/ethernet/freescale/dpa/dpaa_eth_shared.c @@ -339,7 +339,7 @@ skb_copied: } if (unlikely(netif_rx(skb) != NET_RX_SUCCESS)) - percpu_priv->stats.rx_dropped++; + goto out; else { percpu_priv->stats.rx_packets++; percpu_priv->stats.rx_bytes += dpa_fd_length(fd); diff --git a/drivers/net/ethernet/freescale/dpa/dpaa_eth_unit_test.c b/drivers/net/ethernet/freescale/dpa/dpaa_eth_unit_test.c index 4856e15..08f9001 100644 --- a/drivers/net/ethernet/freescale/dpa/dpaa_eth_unit_test.c +++ b/drivers/net/ethernet/freescale/dpa/dpaa_eth_unit_test.c @@ -364,7 +364,7 @@ void dpa_unit_test_drain_default_pool(struct net_device *net_dev) default_pool->size, DMA_BIDIRECTIONAL); - _dpa_bp_free_buf(phys_to_virt(addr)); + dpa_bp->free_buf_cb(phys_to_virt(addr)); } } while (num == 8); |