summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
authorMarian Rotariu <marian.rotariu@freescale.com>2014-06-03 15:57:37 (GMT)
committerJose Rivera <German.Rivera@freescale.com>2014-06-03 17:42:18 (GMT)
commit043fa544a260d1884fccfaa908e588be8f1db1fc (patch)
tree2b9d45b1ff96f9e9579dc22a0f6f47266d226e57 /drivers/net/ethernet
parent21885b41ac57b5e932840351cd53375460008248 (diff)
downloadlinux-fsl-qoriq-043fa544a260d1884fccfaa908e588be8f1db1fc.tar.xz
onic: increase frequency of draining
The draining function will be executed when each CPU has added more than 8 buffers in the draining pool. In some conditions, the sock buffers waits for the Ethernet driver to release the buffer in order to continue its transmission. To avoid memory depletion in the sock buffers, the Ethernet driver will release faster the received skb from the stack. This patch makes onic to call the drain procedure on RX path also even if there is only one buffer in the bpool. Change-Id: I19f800bbd17c23fda5739b7613fab3da355c6fbc Signed-off-by: Marian Rotariu <marian.rotariu@freescale.com> Reviewed-on: http://git.am.freescale.net:8181/13095 Tested-by: Review Code-CDREVIEW <CDREVIEW@freescale.com> Reviewed-by: Bogdan Hamciuc <bogdan.hamciuc@freescale.com> Reviewed-by: Jose Rivera <German.Rivera@freescale.com>
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/freescale/dpa/dpaa_eth_generic.c34
1 files changed, 18 insertions, 16 deletions
diff --git a/drivers/net/ethernet/freescale/dpa/dpaa_eth_generic.c b/drivers/net/ethernet/freescale/dpa/dpaa_eth_generic.c
index b31f5f1..6b1fd0a 100644
--- a/drivers/net/ethernet/freescale/dpa/dpaa_eth_generic.c
+++ b/drivers/net/ethernet/freescale/dpa/dpaa_eth_generic.c
@@ -89,6 +89,7 @@ static void dpa_generic_ern(struct qman_portal *portal,
const struct qm_mr_entry *msg);
static int __hot dpa_generic_tx(struct sk_buff *skb,
struct net_device *netdev);
+static void dpa_generic_drain_bp(struct dpa_bp *bp);
static const struct net_device_ops dpa_generic_ops = {
.ndo_open = dpa_generic_start,
@@ -361,6 +362,11 @@ dpa_generic_rx_dqrr(struct qman_portal *portal,
percpu_priv = __this_cpu_ptr(priv->percpu_priv);
countptr = __this_cpu_ptr(priv->rx_bp->percpu_count);
+ /* This is needed for TCP traffic as draining only on TX is not
+ * enough
+ */
+ dpa_generic_drain_bp(priv->draining_tx_bp);
+
if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal)))
return qman_cb_dqrr_stop;
@@ -440,30 +446,26 @@ qman_consume:
static void dpa_generic_drain_bp(struct dpa_bp *bp)
{
- int i, num;
- struct bm_buffer bmb[8];
+ int ret;
+ struct bm_buffer bmb;
dma_addr_t addr;
int *countptr = __this_cpu_ptr(bp->percpu_count);
int count = *countptr;
struct sk_buff **skbh;
- while (count >= 8) {
- num = bman_acquire(bp->pool, bmb, 8, 0);
- /* There may still be up to 7 buffers in the pool;
- * just leave them there until more arrive
- */
- if (num < 0)
- break;
- for (i = 0; i < num; i++) {
- addr = bm_buf_addr(&bmb[i]);
- /* bp->free_buf_cb(phys_to_virt(addr)); */
+ do {
+ /* most likely, the bpool has only one buffer in it */
+ ret = bman_acquire(bp->pool, &bmb, 1, 0);
+ if (ret > 0) {
+ addr = bm_buf_addr(&bmb);
skbh = (struct sk_buff **)phys_to_virt(addr);
dma_unmap_single(bp->dev, addr, bp->size,
- DMA_TO_DEVICE);
- dev_kfree_skb(*skbh);
+ DMA_TO_DEVICE);
+ dev_kfree_skb_any(*skbh);
+ count--;
}
- count -= num;
- }
+ } while (ret > 0);
+
*countptr = count;
}