summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/sfc/rx.c
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2013-03-14 22:55:59 (GMT)
committerArnd Bergmann <arnd@arndb.de>2013-03-14 22:55:59 (GMT)
commit7ac6c89189c669fd5f883937f291f18d324d470b (patch)
tree5f0712bdedac45a40148ede0064d4e6eddb4260e /drivers/net/ethernet/sfc/rx.c
parent6fdd496e07f511a94ba27e4a1433038b32d6af05 (diff)
parent0ed66befaae893e82c9f016238282d73ef9fd6c7 (diff)
downloadlinux-fsl-qoriq-7ac6c89189c669fd5f883937f291f18d324d470b.tar.xz
Merge tag 'at91-fixes' of git://github.com/at91linux/linux-at91 into fixes
From Nicolas Ferre <nicolas.ferre@atmel.com>: Two patches for Device Tree on at91sam9x5/NAND. Two more for fixing PM suspend/resume IRQ on AIC5 and GPIO used with pinctrl. * tag 'at91-fixes' of git://github.com/at91linux/linux-at91: ARM: at91: fix infinite loop in at91_irq_suspend/resume ARM: at91: add gpio suspend/resume support when using pinctrl ARM: at91: dt: at91sam9x5: complete NAND pinctrl ARM: at91: dt: at91sam9x5: correct NAND pins comments Includes an update to -rc2 Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'drivers/net/ethernet/sfc/rx.c')
-rw-r--r--drivers/net/ethernet/sfc/rx.c25
1 files changed, 15 insertions, 10 deletions
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index d780a0d..879ff58 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -90,11 +90,7 @@ static unsigned int rx_refill_threshold;
static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx,
struct efx_rx_buffer *buf)
{
- /* Offset is always within one page, so we don't need to consider
- * the page order.
- */
- return ((unsigned int) buf->dma_addr & (PAGE_SIZE - 1)) +
- efx->type->rx_buffer_hash_size;
+ return buf->page_offset + efx->type->rx_buffer_hash_size;
}
static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
{
@@ -187,6 +183,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
struct efx_nic *efx = rx_queue->efx;
struct efx_rx_buffer *rx_buf;
struct page *page;
+ unsigned int page_offset;
struct efx_rx_page_state *state;
dma_addr_t dma_addr;
unsigned index, count;
@@ -211,12 +208,14 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
state->dma_addr = dma_addr;
dma_addr += sizeof(struct efx_rx_page_state);
+ page_offset = sizeof(struct efx_rx_page_state);
split:
index = rx_queue->added_count & rx_queue->ptr_mask;
rx_buf = efx_rx_buffer(rx_queue, index);
rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
rx_buf->u.page = page;
+ rx_buf->page_offset = page_offset;
rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
rx_buf->flags = EFX_RX_BUF_PAGE;
++rx_queue->added_count;
@@ -227,6 +226,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
/* Use the second half of the page */
get_page(page);
dma_addr += (PAGE_SIZE >> 1);
+ page_offset += (PAGE_SIZE >> 1);
++count;
goto split;
}
@@ -236,7 +236,8 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
}
static void efx_unmap_rx_buffer(struct efx_nic *efx,
- struct efx_rx_buffer *rx_buf)
+ struct efx_rx_buffer *rx_buf,
+ unsigned int used_len)
{
if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) {
struct efx_rx_page_state *state;
@@ -247,6 +248,10 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,
state->dma_addr,
efx_rx_buf_size(efx),
DMA_FROM_DEVICE);
+ } else if (used_len) {
+ dma_sync_single_for_cpu(&efx->pci_dev->dev,
+ rx_buf->dma_addr, used_len,
+ DMA_FROM_DEVICE);
}
} else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) {
dma_unmap_single(&efx->pci_dev->dev, rx_buf->dma_addr,
@@ -269,7 +274,7 @@ static void efx_free_rx_buffer(struct efx_nic *efx,
static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
struct efx_rx_buffer *rx_buf)
{
- efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
+ efx_unmap_rx_buffer(rx_queue->efx, rx_buf, 0);
efx_free_rx_buffer(rx_queue->efx, rx_buf);
}
@@ -535,10 +540,10 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
goto out;
}
- /* Release card resources - assumes all RX buffers consumed in-order
- * per RX queue
+ /* Release and/or sync DMA mapping - assumes all RX buffers
+ * consumed in-order per RX queue
*/
- efx_unmap_rx_buffer(efx, rx_buf);
+ efx_unmap_rx_buffer(efx, rx_buf, len);
/* Prefetch nice and early so data will (hopefully) be in cache by
* the time we look at it.