summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorBogdan Hamciuc <bogdan.hamciuc@freescale.com>2013-03-12 08:56:55 (GMT)
committerFleming Andrew-AFLEMING <AFLEMING@freescale.com>2013-04-08 23:27:21 (GMT)
commit8383e87537aad09e934c70e835954898b7f6bd88 (patch)
tree61b7226dae5a273df0219de6b5f808e81c8895f3 /drivers
parentb82fa0cdf48483b3a7182fa2c1f0ef569d442163 (diff)
downloadlinux-fsl-qoriq-8383e87537aad09e934c70e835954898b7f6bd88.tar.xz
dpaa_eth: Linearize skbs with at least 16 frags
Egress skbuffs can have up to (at least) 18 frags, a value which FMan does not support. So far we hadn't hit such a test case and we overlooked the problem. But netperf with TCP_SENDFILE and a - rather artificial, yet legal - "-m 18" option generates fragmented traffic with more than 16 fragments, causing problems on the egress (Tx, Tx Confirm) leg. We must therefore linearize outgoing skbuffs which are more fragmented than we can handle. Code rearrangements attempt to compensate the slight performance impact of the newly-added conditionals. So far, TCP tests with the "-m" flag are the only known case of heavy fragmentation given our supported MTU range (<=9578), so this patch is unlikely to cause performance issues in our benchmarks. Change-Id: I28ef9f037f7f183c5bd74a466a516b96cb45ed60 Signed-off-by: Ioana Radulescu <ruxandra.radulescu@freescale.com> Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc@freescale.com> (cherry picked from commit eeb6f4ca9037752e834c3eb0f6b0521c13dcfa89) Reviewed-on: http://git.am.freescale.net:8181/1060 Reviewed-by: Fleming Andrew-AFLEMING <AFLEMING@freescale.com> Tested-by: Fleming Andrew-AFLEMING <AFLEMING@freescale.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/ethernet/freescale/dpa/dpaa_eth_sg.c49
1 files changed, 33 insertions, 16 deletions
diff --git a/drivers/net/ethernet/freescale/dpa/dpaa_eth_sg.c b/drivers/net/ethernet/freescale/dpa/dpaa_eth_sg.c
index d183503..95c1996 100644
--- a/drivers/net/ethernet/freescale/dpa/dpaa_eth_sg.c
+++ b/drivers/net/ethernet/freescale/dpa/dpaa_eth_sg.c
@@ -725,8 +725,9 @@ int __hot dpa_tx(struct sk_buff *skb, struct net_device *net_dev)
struct qm_fd fd;
struct dpa_percpu_priv_s *percpu_priv;
struct net_device_stats *percpu_stats;
- int queue_mapping;
- int err;
+ int err = 0;
+ const int queue_mapping = dpa_get_queue_mapping(skb);
+ const bool nonlinear = skb_is_nonlinear(skb);
priv = netdev_priv(net_dev);
/* Non-migratable context, safe to use __this_cpu_ptr */
@@ -735,15 +736,19 @@ int __hot dpa_tx(struct sk_buff *skb, struct net_device *net_dev)
clear_fd(&fd);
- queue_mapping = dpa_get_queue_mapping(skb);
-
-
#ifdef CONFIG_FSL_DPA_1588
if (priv->tsu && priv->tsu->valid && priv->tsu->hwts_tx_en_ioctl)
fd.cmd |= FM_FD_CMD_UPD;
#endif
- if (skb_is_nonlinear(skb)) {
+ /*
+ * MAX_SKB_FRAGS is larger than our DPA_SGT_MAX_ENTRIES; make sure
+ * we don't feed FMan with more fragments than it supports.
+ * Btw, we're using the first sgt entry to store the linear part of
+ * the skb, so we're one extra frag short.
+ */
+ if (nonlinear &&
+ likely(skb_shinfo(skb)->nr_frags < DPA_SGT_MAX_ENTRIES)) {
/* Just create a S/G fd based on the skb */
err = skb_to_sg_fd(priv, skb, &fd);
percpu_priv->tx_frag_skbuffs++;
@@ -770,20 +775,30 @@ int __hot dpa_tx(struct sk_buff *skb, struct net_device *net_dev)
* We're going to store the skb backpointer at the beginning
* of the data buffer, so we need a privately owned skb
*/
- skb = skb_unshare(skb, GFP_ATOMIC);
- if (unlikely(!skb)) {
- percpu_stats->tx_errors++;
- return NETDEV_TX_OK;
+
+ /* Code borrowed from skb_unshare(). */
+ if (skb_cloned(skb)) {
+ struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
+ kfree_skb(skb);
+ skb = nskb;
+ /* skb_copy() has now linearized the skbuff. */
+ } else if (unlikely(nonlinear)) {
+ /*
+ * We are here because the egress skb contains
+ * more fragments than we support. In this case,
+ * we have no choice but to linearize it ourselves.
+ */
+ err = __skb_linearize(skb);
}
+ if (unlikely(!skb || err < 0))
+ /* Common out-of-memory error path */
+ goto enomem;
/* Finally, create a contig FD from this skb */
err = skb_to_contig_fd(priv, skb, &fd);
}
- if (unlikely(err < 0)) {
- percpu_stats->tx_errors++;
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
- }
+ if (unlikely(err < 0))
+ goto skb_to_fd_failed;
if (unlikely(dpa_xmit(priv, percpu_stats, queue_mapping, &fd) < 0))
goto xmit_failed;
@@ -794,8 +809,10 @@ int __hot dpa_tx(struct sk_buff *skb, struct net_device *net_dev)
xmit_failed:
_dpa_cleanup_tx_fd(priv, &fd);
+skb_to_fd_failed:
+enomem:
+ percpu_stats->tx_errors++;
dev_kfree_skb(skb);
-
return NETDEV_TX_OK;
}