summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMadalin Bucur <madalin.bucur@freescale.com>2013-07-15 14:29:59 (GMT)
committerFleming Andrew-AFLEMING <AFLEMING@freescale.com>2013-07-15 21:06:06 (GMT)
commit8a665e8ac3b040a1f2763627068c4e0229250b9c (patch)
tree742f69703e479123e440e91956ecdb404c23b8a3
parent66425bae51b45f3cf4a41605b8a3cfa8c9fc1de9 (diff)
downloadlinux-fsl-qoriq-8a665e8ac3b040a1f2763627068c4e0229250b9c.tar.xz
dpaa_eth: undo merge on master branch
One of the merged patches was no longer needed and the rest break a large set of patches already prepared for review. The required patches are available on top of the large patch set. Revert "dpaa_eth: Handle buffer pool depletion" This reverts commit 1fd9018f9c6a10932e86285c256bfdf50da8c0c1. Revert "dpaa_eth: Fix pool counters on error path" This reverts commit e8a66453a9b71d4d5290bd4962cc6ec2fda1b848. Revert "dpaa_eth: Switch compile-time default optimization" This reverts commit 47a79e454a7f54b96b3133e139e707944375edca. Revert "fmd,dpaa_eth: Selectively reenable FMan allocation algorithm" This reverts commit 3cc98fd0b85bdb61f83672fdc4609b68f0c7a75f. Revert "dpaa_eth: Fix endless loop in case of memory depletion" This reverts commit b676b243d7ccdd4879ea8c5fb74a6d3e2a7f2e5e. Revert "dpaa_eth: Set a fixed upper limit for recycleable buffer size" This reverts commit 252b622b8058a309d2429a33c2e0de089be896ac. Revert "dpaa_eth: Fix computation of cache padding" This reverts commit a3b310dbe6d25132698a88618e7ef52bc6749177. Revert "dpaa_eth: Fix Tx/TxConfirm initialization bug" This reverts commit 429d9f28709eb7c916d4c6da4cccfae33906c510. Change-Id: I22765f8406f43bb2c7d84fbf5fdb67c74697a008 Reviewed-on: http://git.am.freescale.net:8181/3305 Tested-by: Review Code-CDREVIEW <CDREVIEW@freescale.com> Reviewed-by: Fleming Andrew-AFLEMING <AFLEMING@freescale.com>
-rw-r--r--drivers/net/ethernet/freescale/dpa/Kconfig3
-rw-r--r--drivers/net/ethernet/freescale/dpa/dpaa_eth.c80
-rw-r--r--drivers/net/ethernet/freescale/dpa/dpaa_eth_sg.c60
-rw-r--r--drivers/net/ethernet/freescale/fman/Kconfig2
4 files changed, 41 insertions, 104 deletions
diff --git a/drivers/net/ethernet/freescale/dpa/Kconfig b/drivers/net/ethernet/freescale/dpa/Kconfig
index aa0d1e6..801e8b2 100644
--- a/drivers/net/ethernet/freescale/dpa/Kconfig
+++ b/drivers/net/ethernet/freescale/dpa/Kconfig
@@ -28,7 +28,7 @@ config FSL_DPAA_ETH_SG_SUPPORT
choice FSL_DPAA_ETH_OPTIMIZE
prompt "Optimization choices for the DPAA Ethernet driver"
depends on FSL_DPAA_ETH
- default FSL_DPAA_ETH_OPTIMIZE_FOR_IPFWD
+ default FSL_DPAA_ETH_OPTIMIZE_FOR_TERM
---help---
Compile-time switch between driver optimizations for forwarding use-cases and
termination scenarios.
@@ -36,7 +36,6 @@ choice FSL_DPAA_ETH_OPTIMIZE
config FSL_DPAA_ETH_OPTIMIZE_FOR_IPFWD
bool "Optimize for forwarding"
select FSL_DPAA_TX_RECYCLE if FMAN_T4240
- select FMAN_RESOURCE_ALLOCATION_ALGORITHM if (FMAN_P3040_P4080_P5020 || FMAN_P1023)
---help---
Optimize the DPAA-Ethernet driver for IP/IPSec forwarding use-cases.
This option disabled SG support in the DPAA Ethernet driver.
diff --git a/drivers/net/ethernet/freescale/dpa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpa/dpaa_eth.c
index 55edca4..2d47c8a 100644
--- a/drivers/net/ethernet/freescale/dpa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpa/dpaa_eth.c
@@ -130,17 +130,13 @@
#define DPA_MAX_FD_OFFSET ((1 << 9) - 1)
/*
- * Maximum size of a buffer for which recycling is allowed.
- * We need an upper limit such that forwarded skbs that get reallocated on Tx
- * aren't allowed to grow unboundedly. On the other hand, we need to make sure
- * that skbs allocated by us will not fail to be recycled due to their size.
- *
- * For a requested size, the kernel allocator provides the next power of two
- * sized block, which the stack will use as is, regardless of the actual size
- * it required; since we must acommodate at most 9.6K buffers (L2 maximum
- * supported frame size), set the recycling upper limit to 16K.
+ * Extra size of a buffer (beyond the size of the buffers that are seeded into
+ * the global pool) for which recycling is allowed.
+ * The value is arbitrary, but tries to reach a balance such that originating
+ * frames may get recycled, while forwarded skbs that get reallocated on Tx
+ * aren't allowed to grow unboundedly.
*/
-#define DPA_RECYCLE_MAX_SIZE 16384
+#define DPA_RECYCLE_EXTRA_SIZE 1024
/* For MAC-based interfaces, we compute the tx needed headroom from the
* associated Tx port's buffer layout settings.
@@ -390,14 +386,12 @@ static void dpaa_eth_seed_pool(struct dpa_bp *bp)
* Add buffers/pages/skbuffs for Rx processing whenever bpool count falls below
* REFILL_THRESHOLD.
*/
-static int dpaa_eth_refill_bpools(struct dpa_percpu_priv_s *percpu_priv)
+static void dpaa_eth_refill_bpools(struct dpa_percpu_priv_s *percpu_priv)
{
int *countptr = percpu_priv->dpa_bp_count;
int count = *countptr;
const struct dpa_bp *dpa_bp = percpu_priv->dpa_bp;
- int new_pages __maybe_unused;
#ifndef CONFIG_FSL_DPAA_ETH_SG_SUPPORT
-
/* this function is called in softirq context;
* no need to protect smp_processor_id() on RT kernel
*/
@@ -411,24 +405,10 @@ static int dpaa_eth_refill_bpools(struct dpa_percpu_priv_s *percpu_priv)
}
#else
/* Add pages to the buffer pool */
- while (count < CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT) {
- new_pages = _dpa_bp_add_8_pages(dpa_bp);
- if (unlikely(!new_pages)) {
- /* Avoid looping forever if we've temporarily
- * run out of memory. We'll try again at the next
- * NAPI cycle.
- */
- break;
- }
- count += new_pages;
- }
+ while (count < CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT)
+ count += _dpa_bp_add_8_pages(dpa_bp);
*countptr = count;
-
- if (*countptr < CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT)
- return -ENOMEM;
#endif
-
- return 0;
}
static int dpa_make_shared_port_pool(struct dpa_bp *bp)
@@ -1291,9 +1271,10 @@ static int dpa_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
static int dpa_process_one(struct dpa_percpu_priv_s *percpu_priv,
struct sk_buff *skb, struct dpa_bp *bp, const struct qm_fd *fd)
{
- dma_addr_t fd_addr = qm_fd_addr(fd);
- unsigned long skb_addr = virt_to_phys(skb->head);
- u32 pad = fd_addr - skb_addr;
+ dma_addr_t addr = qm_fd_addr(fd);
+ u32 addrlo = lower_32_bits(addr);
+ u32 skblo = lower_32_bits((unsigned long)skb->head);
+ u32 pad = (addrlo - skblo) & (PAGE_SIZE - 1);
unsigned int data_start;
(*percpu_priv->dpa_bp_count)--;
@@ -1964,7 +1945,8 @@ static int skb_to_contig_fd(struct dpa_priv_s *priv,
* - there's enough room in the buffer pool
*/
if (likely(skb_is_recycleable(skb, dpa_bp->size) &&
- (skb_end_pointer(skb) - skb->head <= DPA_RECYCLE_MAX_SIZE) &&
+ (skb_end_pointer(skb) - skb->head <=
+ dpa_bp->size + DPA_RECYCLE_EXTRA_SIZE) &&
(*percpu_priv->dpa_bp_count < dpa_bp->target_count))) {
/* Compute the minimum necessary fd offset */
offset = dpa_bp->size - skb->len - skb_tailroom(skb);
@@ -2199,7 +2181,6 @@ ingress_rx_error_dqrr(struct qman_portal *portal,
struct net_device *net_dev;
struct dpa_priv_s *priv;
struct dpa_percpu_priv_s *percpu_priv;
- int err;
net_dev = ((struct dpa_fq *)fq)->net_dev;
priv = netdev_priv(net_dev);
@@ -2211,16 +2192,7 @@ ingress_rx_error_dqrr(struct qman_portal *portal,
return qman_cb_dqrr_stop;
}
- err = dpaa_eth_refill_bpools(percpu_priv);
- if (err) {
- /* Unable to refill the buffer pool due to insufficient
- * system memory. Just release the frame back into the pool,
- * otherwise we'll soon end up with an empty buffer pool.
- */
- dpa_fd_release(net_dev, &dq->fd);
- return qman_cb_dqrr_consume;
- }
-
+ dpaa_eth_refill_bpools(percpu_priv);
_dpa_rx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
return qman_cb_dqrr_consume;
@@ -2368,7 +2340,6 @@ ingress_rx_default_dqrr(struct qman_portal *portal,
struct net_device *net_dev;
struct dpa_priv_s *priv;
struct dpa_percpu_priv_s *percpu_priv;
- int err;
net_dev = ((struct dpa_fq *)fq)->net_dev;
priv = netdev_priv(net_dev);
@@ -2385,16 +2356,7 @@ ingress_rx_default_dqrr(struct qman_portal *portal,
}
/* Vale of plenty: make sure we didn't run out of buffers */
- err = dpaa_eth_refill_bpools(percpu_priv);
- if (err) {
- /* Unable to refill the buffer pool due to insufficient
- * system memory. Just release the frame back into the pool,
- * otherwise we'll soon end up with an empty buffer pool.
- */
- dpa_fd_release(net_dev, &dq->fd);
- return qman_cb_dqrr_consume;
- }
-
+ dpaa_eth_refill_bpools(percpu_priv);
_dpa_rx(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
return qman_cb_dqrr_consume;
@@ -3591,16 +3553,12 @@ static void dpa_setup_egress(struct dpa_priv_s *priv,
/* Allocate frame queues to all available CPUs no matter the number of
* queues specified in device tree.
*/
- for (i = 0, ptr = &fq->list; i < DPAA_ETH_TX_QUEUES; i++) {
+ for (i = 0; i < DPAA_ETH_TX_QUEUES; i++) {
iter = list_entry(ptr, struct dpa_fq, list);
priv->egress_fqs[i] = &iter->fq_base;
- if (list_is_last(ptr, head)) {
+ if (list_is_last(ptr, head))
ptr = &fq->list;
- continue;
- }
-
- ptr = ptr->next;
}
}
diff --git a/drivers/net/ethernet/freescale/dpa/dpaa_eth_sg.c b/drivers/net/ethernet/freescale/dpa/dpaa_eth_sg.c
index f93c346..0f0f726 100644
--- a/drivers/net/ethernet/freescale/dpa/dpaa_eth_sg.c
+++ b/drivers/net/ethernet/freescale/dpa/dpaa_eth_sg.c
@@ -45,6 +45,22 @@
#ifdef CONFIG_FSL_DPAA_ETH_SG_SUPPORT
#define DPA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */
+/*
+ * It does not return a page as you get the page from the fd,
+ * this is only for refcounting and DMA unmapping
+ */
+static inline void dpa_bp_removed_one_page(struct dpa_bp *dpa_bp,
+ dma_addr_t dma_addr)
+{
+ int *count_ptr;
+
+ count_ptr = __this_cpu_ptr(dpa_bp->percpu_count);
+ (*count_ptr)--;
+
+ dma_unmap_single(dpa_bp->dev, dma_addr, dpa_bp->size,
+ DMA_BIDIRECTIONAL);
+}
+
/* DMA map and add a page into the bpool */
static void dpa_bp_add_page(struct dpa_bp *dpa_bp, unsigned long vaddr)
{
@@ -112,9 +128,7 @@ release_bufs:
return i;
bail_out:
- net_err_ratelimited("dpa_bp_add_8_pages() failed\n");
- WARN_ONCE(1, "Memory allocation failure on Rx\n");
-
+ dev_err(dpa_bp->dev, "dpa_bp_add_8_pages() failed\n");
bm_buffer_set64(&bmb[i], 0);
/*
* Avoid releasing a completely null buffer; bman_release() requires
@@ -315,7 +329,6 @@ static struct sk_buff *__hot sg_fd_to_skb(const struct dpa_priv_s *priv,
int i;
const t_FmPrsResult *parse_results;
struct sk_buff *skb = NULL;
- int *count_ptr;
vaddr = phys_to_virt(addr);
#ifdef CONFIG_FSL_DPAA_1588
@@ -336,33 +349,21 @@ static struct sk_buff *__hot sg_fd_to_skb(const struct dpa_priv_s *priv,
dpa_bp = dpa_bpid2pool(sgt[i].bpid);
BUG_ON(IS_ERR(dpa_bp));
- count_ptr = __this_cpu_ptr(dpa_bp->percpu_count);
sg_addr = qm_sg_addr(&sgt[i]);
sg_vaddr = phys_to_virt(sg_addr);
- if (i == 0) {
- /* Tentatively access the first buffer, but don't unmap
- * it until we're certain the skb allocation succeeds.
- */
- dma_sync_single_for_cpu(dpa_bp->dev, sg_addr,
- dpa_bp->size, DMA_BIDIRECTIONAL);
+ dpa_bp_removed_one_page(dpa_bp, sg_addr);
+ if (i == 0) {
/* This is the first S/G entry, so build the skb
* around its data buffer
*/
skb = build_skb(sg_vaddr,
dpa_bp->size + DPA_SKB_TAILROOM);
if (unlikely(!skb))
- /* dpa_fd_release() will put the current frame
- * back into the pool. DMA mapping status has
- * not changed, nor have the pool counts.
- */
return NULL;
- dma_unmap_single(dpa_bp->dev, sg_addr, dpa_bp->size,
- DMA_BIDIRECTIONAL);
-
/* In the case of a SG frame, FMan stores the Internal
* Context in the buffer containing the sgt.
* Inspect the parse results before anything else.
@@ -379,8 +380,6 @@ static struct sk_buff *__hot sg_fd_to_skb(const struct dpa_priv_s *priv,
dpa_get_rx_extra_headroom());
skb_put(skb, sgt[i].length);
} else {
- dma_unmap_single(dpa_bp->dev, sg_addr, dpa_bp->size,
- DMA_BIDIRECTIONAL);
/*
* Not the first S/G entry; all data from buffer will
* be added in an skb fragment; fragment index is offset
@@ -397,8 +396,6 @@ static struct sk_buff *__hot sg_fd_to_skb(const struct dpa_priv_s *priv,
skb_add_rx_frag(skb, i - 1, page, frag_offset, frag_len,
frag_len);
}
- /* Update the pool count for the current {cpu x bpool} */
- (*count_ptr)--;
if (sgt[i].final)
break;
@@ -424,7 +421,6 @@ void __hot _dpa_rx(struct net_device *net_dev,
unsigned int skb_len;
struct rtnl_link_stats64 *percpu_stats = &percpu_priv->stats;
int use_gro = net_dev->features & NETIF_F_GRO;
- int *count_ptr;
if (unlikely(fd_status & FM_FD_STAT_ERRORS) != 0) {
if (netif_msg_hw(priv) && net_ratelimit())
@@ -436,13 +432,7 @@ void __hot _dpa_rx(struct net_device *net_dev,
}
dpa_bp = dpa_bpid2pool(fd->bpid);
- count_ptr = __this_cpu_ptr(dpa_bp->percpu_count);
- /* Prepare to read from the buffer, but don't unmap it until
- * we know the skb allocation succeeded. At this point we already
- * own the buffer - i.e. FMan won't access it anymore.
- */
- dma_sync_single_for_cpu(dpa_bp->dev, addr, dpa_bp->size,
- DMA_BIDIRECTIONAL);
+ dpa_bp_removed_one_page(dpa_bp, addr);
/* prefetch the first 64 bytes of the frame or the SGT start */
prefetch(phys_to_virt(addr) + dpa_fd_offset(fd));
@@ -455,18 +445,8 @@ void __hot _dpa_rx(struct net_device *net_dev,
/* The only FD types that we may receive are contig and S/G */
BUG();
if (unlikely(!skb))
- /* We haven't yet touched the DMA mapping or the pool count;
- * dpa_fd_release() will just put the buffer back in the pool
- */
goto _release_frame;
- /* Account for either the contig buffer or the SGT buffer (depending on
- * which case we were in) having been removed from the pool.
- * Also, permanently unmap the buffer.
- */
- (*count_ptr)--;
- dma_unmap_single(dpa_bp->dev, addr, dpa_bp->size, DMA_BIDIRECTIONAL);
-
skb->protocol = eth_type_trans(skb, net_dev);
/* IP Reassembled frames are allowed to be larger than MTU */
diff --git a/drivers/net/ethernet/freescale/fman/Kconfig b/drivers/net/ethernet/freescale/fman/Kconfig
index ec7449f..a6d52e5 100644
--- a/drivers/net/ethernet/freescale/fman/Kconfig
+++ b/drivers/net/ethernet/freescale/fman/Kconfig
@@ -34,7 +34,7 @@ endmenu
config FMAN_RESOURCE_ALLOCATION_ALGORITHM
bool "Enable FMan dynamic resource allocation algorithm"
- default y if ((FMAN_P3040_P4080_P5020 || FMAN_P1023) && FSL_DPAA_ETH_OPTIMIZE_FOR_IPFWD)
+ default n
---help---
Enables algorithm for dynamic resource allocation