summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/freescale/dpa
diff options
context:
space:
mode:
authorMarian-Cristian Rotariu <marian.rotariu@freescale.com>2014-10-29 13:56:19 (GMT)
committerMadalin Bucur <madalin.bucur@freescale.com>2015-03-11 16:05:52 (GMT)
commit36f096f59e4804b37ddc200d7770380f622fd4ca (patch)
treeea6dbcfa1aabc93629cbe924033110aea68790b7 /drivers/net/ethernet/freescale/dpa
parent2e7cdc3c7b1787f7843173aa553ec1858311d3ca (diff)
downloadlinux-fsl-qoriq-36f096f59e4804b37ddc200d7770380f622fd4ca.tar.xz
dpaa_eth: convert sg entry to be endian portable
The S/G entry resides in the main memory and is used directly by the FMan, therefore it needs proper conversion on both rx and tx. Change-Id: I4d85d80589fb8b72e98a13b9665ff2ebffcfc387 Reviewed-on: http://git.am.freescale.net:8181/22443 Reviewed-by: Madalin-Cristian Bucur <madalin.bucur@freescale.com> Reviewed-by: Marian Cristian Rotariu <marian.rotariu@freescale.com> Tested-by: Marian Cristian Rotariu <marian.rotariu@freescale.com> Reviewed-on: http://git.am.freescale.net:8181/32501
Diffstat (limited to 'drivers/net/ethernet/freescale/dpa')
-rw-r--r--drivers/net/ethernet/freescale/dpa/dpaa_eth_sg.c31
1 files changed, 18 insertions, 13 deletions
diff --git a/drivers/net/ethernet/freescale/dpa/dpaa_eth_sg.c b/drivers/net/ethernet/freescale/dpa/dpaa_eth_sg.c
index aa315c7..f9644a8 100644
--- a/drivers/net/ethernet/freescale/dpa/dpaa_eth_sg.c
+++ b/drivers/net/ethernet/freescale/dpa/dpaa_eth_sg.c
@@ -231,6 +231,7 @@ struct sk_buff *_dpa_cleanup_tx_fd(const struct dpa_priv_s *priv,
int i;
struct dpa_bp *dpa_bp = priv->dpa_bp;
dma_addr_t addr = qm_fd_addr(fd);
+ dma_addr_t sg_addr;
struct sk_buff **skbh;
struct sk_buff *skb = NULL;
const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
@@ -263,15 +264,18 @@ struct sk_buff *_dpa_cleanup_tx_fd(const struct dpa_priv_s *priv,
#endif /* CONFIG_FSL_DPAA_TS */
/* sgt[0] is from lowmem, was dma_map_single()-ed */
- dma_unmap_single(dpa_bp->dev, (dma_addr_t)sgt[0].addr,
- sgt[0].length, dma_dir);
+ /* TODO: sg_addr should be in CPU endianess */
+ sg_addr = qm_sg_addr(&sgt[0]);
+ dma_unmap_single(dpa_bp->dev, sg_addr,
+ be32_to_cpu(sgt[0].length), dma_dir);
/* remaining pages were mapped with dma_map_page() */
for (i = 1; i < nr_frags; i++) {
DPA_BUG_ON(sgt[i].extension);
-
- dma_unmap_page(dpa_bp->dev, (dma_addr_t)sgt[i].addr,
- sgt[i].length, dma_dir);
+ /* TODO: sg_addr should be in CPU endianess */
+ sg_addr = qm_sg_addr(&sgt[i]);
+ dma_unmap_page(dpa_bp->dev, sg_addr,
+ be32_to_cpu(sgt[i].length), dma_dir);
}
/* Free the page frag that we allocated on Tx */
@@ -437,6 +441,7 @@ static struct sk_buff *__hot sg_fd_to_skb(const struct dpa_priv_s *priv,
/* We use a single global Rx pool */
DPA_BUG_ON(dpa_bp != dpa_bpid2pool(sgt[i].bpid));
+ /* TODO: sg_addr should be in CPU endianess */
sg_addr = qm_sg_addr(&sgt[i]);
sg_vaddr = phys_to_virt(sg_addr);
DPA_BUG_ON(!IS_ALIGNED((unsigned long)sg_vaddr,
@@ -471,7 +476,7 @@ static struct sk_buff *__hot sg_fd_to_skb(const struct dpa_priv_s *priv,
*/
DPA_BUG_ON(fd_off != priv->rx_headroom);
skb_reserve(skb, fd_off);
- skb_put(skb, sgt[i].length);
+ skb_put(skb, be32_to_cpu(sgt[i].length));
} else {
/* Not the first S/G entry; all data from buffer will
* be added in an skb fragment; fragment index is offset
@@ -497,8 +502,8 @@ static struct sk_buff *__hot sg_fd_to_skb(const struct dpa_priv_s *priv,
/* page_offset only refers to the beginning of sgt[i];
* but the buffer itself may have an internal offset.
*/
- frag_offset = sgt[i].offset + page_offset;
- frag_len = sgt[i].length;
+ frag_offset = be16_to_cpu(sgt[i].offset) + page_offset;
+ frag_len = be32_to_cpu(sgt[i].length);
/* skb_add_rx_frag() does no checking on the page; if
* we pass it a tail page, we'll end up with
* bad page accounting and eventually with segafults.
@@ -766,7 +771,7 @@ static int __hot skb_to_sg_fd(struct dpa_priv_s *priv,
sgt = (struct qm_sg_entry *)(sgt_buf + priv->tx_headroom);
sgt[0].bpid = 0xff;
sgt[0].offset = 0;
- sgt[0].length = skb_headlen(skb);
+ sgt[0].length = cpu_to_be32(skb_headlen(skb));
sgt[0].extension = 0;
sgt[0].final = 0;
addr = dma_map_single(dpa_bp->dev, skb->data, sgt[0].length, dma_dir);
@@ -777,14 +782,14 @@ static int __hot skb_to_sg_fd(struct dpa_priv_s *priv,
}
sgt[0].addr_hi = (uint8_t)upper_32_bits(addr);
- sgt[0].addr_lo = lower_32_bits(addr);
+ sgt[0].addr_lo = cpu_to_be32(lower_32_bits(addr));
/* populate the rest of SGT entries */
for (i = 1; i <= nr_frags; i++) {
frag = &skb_shinfo(skb)->frags[i - 1];
sgt[i].bpid = 0xff;
sgt[i].offset = 0;
- sgt[i].length = frag->size;
+ sgt[i].length = cpu_to_be32(frag->size);
sgt[i].extension = 0;
sgt[i].final = 0;
@@ -799,7 +804,7 @@ static int __hot skb_to_sg_fd(struct dpa_priv_s *priv,
/* keep the offset in the address */
sgt[i].addr_hi = (uint8_t)upper_32_bits(addr);
- sgt[i].addr_lo = lower_32_bits(addr);
+ sgt[i].addr_lo = cpu_to_be32(lower_32_bits(addr));
}
sgt[i - 1].final = 1;
@@ -832,7 +837,7 @@ sgt_map_failed:
sg_map_failed:
for (j = 0; j < i; j++)
dma_unmap_page(dpa_bp->dev, qm_sg_addr(&sgt[j]),
- sgt[j].length, dma_dir);
+ be32_to_cpu(sgt[j].length), dma_dir);
sg0_map_failed:
csum_failed:
put_page(virt_to_head_page(sgt_buf));