diff options
author | Kazuya Mizuguchi <kazuya.mizuguchi.ks@renesas.com> | 2015-09-30 06:15:53 (GMT) |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-10-03 12:05:19 (GMT) |
commit | e2dbb33ad9545de6859747b92556a74a81bed8f5 (patch) | |
tree | ac1fc3345a72e6e3e11117481a48b988b63ba5dd | |
parent | f3a6bd393c2c5d0e6b16624ba99a1c5fa07bdb0b (diff) | |
download | linux-e2dbb33ad9545de6859747b92556a74a81bed8f5.tar.xz |
ravb: Provide dev parameter to DMA API
This patch is in preparation for using this driver on arm64 where the
implementation of __dma_alloc_coherent fails if a device parameter is not
provided.
Signed-off-by: Kazuya Mizuguchi <kazuya.mizuguchi.ks@renesas.com>
Signed-off-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
Signed-off-by: Masaru Nagai <masaru.nagai.vx@renesas.com>
[horms: squashed into a single patch]
Signed-off-by: Simon Horman <horms+renesas@verge.net.au>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/ethernet/renesas/ravb_main.c | 38 |
1 files changed, 19 insertions, 19 deletions
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 450899e..4ca093d 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -201,7 +201,7 @@ static void ravb_ring_free(struct net_device *ndev, int q) if (priv->rx_ring[q]) { ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1); - dma_free_coherent(NULL, ring_size, priv->rx_ring[q], + dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q], priv->rx_desc_dma[q]); priv->rx_ring[q] = NULL; } @@ -209,7 +209,7 @@ static void ravb_ring_free(struct net_device *ndev, int q) if (priv->tx_ring[q]) { ring_size = sizeof(struct ravb_tx_desc) * (priv->num_tx_ring[q] * NUM_TX_DESC + 1); - dma_free_coherent(NULL, ring_size, priv->tx_ring[q], + dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q], priv->tx_desc_dma[q]); priv->tx_ring[q] = NULL; } @@ -240,13 +240,13 @@ static void ravb_ring_format(struct net_device *ndev, int q) rx_desc = &priv->rx_ring[q][i]; /* The size of the buffer should be on 16-byte boundary. */ rx_desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16)); - dma_addr = dma_map_single(&ndev->dev, priv->rx_skb[q][i]->data, + dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data, ALIGN(PKT_BUF_SZ, 16), DMA_FROM_DEVICE); /* We just set the data size to 0 for a failed mapping which * should prevent DMA from happening... */ - if (dma_mapping_error(&ndev->dev, dma_addr)) + if (dma_mapping_error(ndev->dev.parent, dma_addr)) rx_desc->ds_cc = cpu_to_le16(0); rx_desc->dptr = cpu_to_le32(dma_addr); rx_desc->die_dt = DT_FEMPTY; @@ -309,7 +309,7 @@ static int ravb_ring_init(struct net_device *ndev, int q) /* Allocate all RX descriptors. */ ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1); - priv->rx_ring[q] = dma_alloc_coherent(NULL, ring_size, + priv->rx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size, &priv->rx_desc_dma[q], GFP_KERNEL); if (!priv->rx_ring[q]) @@ -320,7 +320,7 @@ static int ravb_ring_init(struct net_device *ndev, int q) /* Allocate all TX descriptors. */ ring_size = sizeof(struct ravb_tx_desc) * (priv->num_tx_ring[q] * NUM_TX_DESC + 1); - priv->tx_ring[q] = dma_alloc_coherent(NULL, ring_size, + priv->tx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size, &priv->tx_desc_dma[q], GFP_KERNEL); if (!priv->tx_ring[q]) @@ -443,7 +443,7 @@ static int ravb_tx_free(struct net_device *ndev, int q) size = le16_to_cpu(desc->ds_tagl) & TX_DS; /* Free the original skb. */ if (priv->tx_skb[q][entry / NUM_TX_DESC]) { - dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr), + dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), size, DMA_TO_DEVICE); /* Last packet descriptor? */ if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) { @@ -546,7 +546,7 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q) skb = priv->rx_skb[q][entry]; priv->rx_skb[q][entry] = NULL; - dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr), + dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), ALIGN(PKT_BUF_SZ, 16), DMA_FROM_DEVICE); get_ts &= (q == RAVB_NC) ? @@ -586,14 +586,14 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q) if (!skb) break; /* Better luck next round. */ ravb_set_buffer_align(skb); - dma_addr = dma_map_single(&ndev->dev, skb->data, + dma_addr = dma_map_single(ndev->dev.parent, skb->data, le16_to_cpu(desc->ds_cc), DMA_FROM_DEVICE); skb_checksum_none_assert(skb); /* We just set the data size to 0 for a failed mapping * which should prevent DMA from happening... */ - if (dma_mapping_error(&ndev->dev, dma_addr)) + if (dma_mapping_error(ndev->dev.parent, dma_addr)) desc->ds_cc = cpu_to_le16(0); desc->dptr = cpu_to_le32(dma_addr); priv->rx_skb[q][entry] = skb; @@ -1300,8 +1300,8 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) entry / NUM_TX_DESC * DPTR_ALIGN; len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data; memcpy(buffer, skb->data, len); - dma_addr = dma_map_single(&ndev->dev, buffer, len, DMA_TO_DEVICE); - if (dma_mapping_error(&ndev->dev, dma_addr)) + dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE); + if (dma_mapping_error(ndev->dev.parent, dma_addr)) goto drop; desc = &priv->tx_ring[q][entry]; @@ -1310,8 +1310,8 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) buffer = skb->data + len; len = skb->len - len; - dma_addr = dma_map_single(&ndev->dev, buffer, len, DMA_TO_DEVICE); - if (dma_mapping_error(&ndev->dev, dma_addr)) + dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE); + if (dma_mapping_error(ndev->dev.parent, dma_addr)) goto unmap; desc++; @@ -1323,7 +1323,7 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC); if (!ts_skb) { desc--; - dma_unmap_single(&ndev->dev, dma_addr, len, + dma_unmap_single(ndev->dev.parent, dma_addr, len, DMA_TO_DEVICE); goto unmap; } @@ -1358,7 +1358,7 @@ exit: return NETDEV_TX_OK; unmap: - dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr), + dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), le16_to_cpu(desc->ds_tagl), DMA_TO_DEVICE); drop: dev_kfree_skb_any(skb); @@ -1708,7 +1708,7 @@ static int ravb_probe(struct platform_device *pdev) /* Allocate descriptor base address table */ priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM; - priv->desc_bat = dma_alloc_coherent(NULL, priv->desc_bat_size, + priv->desc_bat = dma_alloc_coherent(ndev->dev.parent, priv->desc_bat_size, &priv->desc_bat_dma, GFP_KERNEL); if (!priv->desc_bat) { dev_err(&ndev->dev, @@ -1763,7 +1763,7 @@ out_napi_del: netif_napi_del(&priv->napi[RAVB_BE]); ravb_mdio_release(priv); out_dma_free: - dma_free_coherent(NULL, priv->desc_bat_size, priv->desc_bat, + dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat, priv->desc_bat_dma); out_release: if (ndev) @@ -1779,7 +1779,7 @@ static int ravb_remove(struct platform_device *pdev) struct net_device *ndev = platform_get_drvdata(pdev); struct ravb_private *priv = netdev_priv(ndev); - dma_free_coherent(NULL, priv->desc_bat_size, priv->desc_bat, + dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat, priv->desc_bat_dma); /* Set reset mode */ ravb_write(ndev, CCC_OPC_RESET, CCC); |