summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/freescale/dpa/dpaa_eth_sg.c
diff options
context:
space:
mode:
authorBogdan Hamciuc <bogdan.hamciuc@freescale.com>2013-03-01 00:25:52 (GMT)
committerFleming Andrew-AFLEMING <AFLEMING@freescale.com>2013-04-08 23:24:59 (GMT)
commit536b2f172a871f260eeaba0715c9ac9dbc49f9dd (patch)
treebc71c4f7c17e65ff2f5e702c92b578c91638c0bc /drivers/net/ethernet/freescale/dpa/dpaa_eth_sg.c
parentd41438c80e48af11871c3863ea55d46959835c6b (diff)
downloadlinux-fsl-qoriq-536b2f172a871f260eeaba0715c9ac9dbc49f9dd.tar.xz
dpaa_eth: Optimize buffer allocation code
Reduce the code size and number of jumps generated for dpa_bp_add_8_pages(), which is one of the hottest functions on the Rx data path. Remove dependency on per_cpu_ptr of dpaaa_eth_refill_bpools(), by splitting it into a "this_cpu_ptr" hot function and a "per_cpu_ptr" cold function wrapper. Change-Id: If1bf9a29e9c43bf2345a36488f15846b1481a095 Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc@freescale.com> (cherry picked from commit 345457577cf48216ffe6e883c7792f5496734b41) Reviewed-on: http://git.am.freescale.net:8181/1058 Reviewed-by: Fleming Andrew-AFLEMING <AFLEMING@freescale.com> Tested-by: Fleming Andrew-AFLEMING <AFLEMING@freescale.com>
Diffstat (limited to 'drivers/net/ethernet/freescale/dpa/dpaa_eth_sg.c')
-rw-r--r--drivers/net/ethernet/freescale/dpa/dpaa_eth_sg.c65
1 files changed, 38 insertions, 27 deletions
diff --git a/drivers/net/ethernet/freescale/dpa/dpaa_eth_sg.c b/drivers/net/ethernet/freescale/dpa/dpaa_eth_sg.c
index cc269a3..1acf303 100644
--- a/drivers/net/ethernet/freescale/dpa/dpaa_eth_sg.c
+++ b/drivers/net/ethernet/freescale/dpa/dpaa_eth_sg.c
@@ -88,50 +88,61 @@ static void dpa_bp_add_page(struct dpa_bp *dpa_bp, unsigned long vaddr)
(*count_ptr)++;
}
-void dpa_bp_add_8_pages(struct dpa_bp *dpa_bp, int cpu_id)
+int _dpa_bp_add_8_pages(const struct dpa_bp *dpa_bp)
{
struct bm_buffer bmb[8];
unsigned long new_page;
- int *count_ptr;
dma_addr_t addr;
int i;
-
- count_ptr = per_cpu_ptr(dpa_bp->percpu_count, cpu_id);
+ struct device *dev = dpa_bp->dev;
for (i = 0; i < 8; i++) {
new_page = __get_free_page(GFP_ATOMIC);
- if (unlikely(!new_page)) {
- dpaa_eth_err(dpa_bp->dev, "__get_free_page() failed\n");
- bm_buffer_set64(&bmb[i], 0);
- break;
+ if (likely(new_page)) {
+ addr = dma_map_single(dev, (void *)new_page,
+ dpa_bp->size, DMA_BIDIRECTIONAL);
+ if (likely(!dma_mapping_error(dev, addr))) {
+ bm_buffer_set64(&bmb[i], addr);
+ continue;
+ } else
+ free_page(new_page);
}
- addr = dma_map_single(dpa_bp->dev, (void *)new_page,
- dpa_bp->size, DMA_BIDIRECTIONAL);
- if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
- dpaa_eth_err(dpa_bp->dev, "DMA mapping failed");
- free_page(new_page);
- break;
- }
-
- bm_buffer_set64(&bmb[i], addr);
+ /* Something went wrong */
+ goto bail_out;
}
+release_bufs:
+ /*
+ * Release the buffers. In case bman is busy, keep trying
+ * until successful. bman_release() is guaranteed to succeed
+ * in a reasonable amount of time
+ */
+ while (unlikely(bman_release(dpa_bp->pool, bmb, i, 0)))
+ cpu_relax();
+
+ return i;
+
+bail_out:
+ dev_err(dpa_bp->dev, "dpa_bp_add_8_pages() failed\n");
+ bm_buffer_set64(&bmb[i], 0);
/*
* Avoid releasing a completely null buffer; bman_release() requires
* at least one buffer.
*/
- if (likely(i)) {
- /*
- * Release the buffers. In case bman is busy, keep trying
- * until successful. bman_release() is guaranteed to succeed
- * in a reasonable amount of time
- */
- while (bman_release(dpa_bp->pool, bmb, i, 0))
- cpu_relax();
+ if (likely(i))
+ goto release_bufs;
- *count_ptr += i;
- }
+ return 0;
+}
+
+/*
+ * Cold path wrapper over _dpa_bp_add_8_pages().
+ */
+void dpa_bp_add_8_pages(const struct dpa_bp *dpa_bp, int cpu)
+{
+ int *count_ptr = per_cpu_ptr(dpa_bp->percpu_count, cpu);
+ *count_ptr += _dpa_bp_add_8_pages(dpa_bp);
}
void dpa_list_add_skb(struct dpa_percpu_priv_s *cpu_priv,