summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@redhat.com>2015-06-16 18:47:12 (GMT)
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2015-09-16 00:05:37 (GMT)
commitaae072e363bed4e91c00d57f753c799276ddb161 (patch)
tree0691a27f2b6b1ce7bbb9cc63183af110cac3b51f /drivers
parentbdc7f5902d22e7297b8f0d7a8d6ed8429cdba636 (diff)
downloadlinux-aae072e363bed4e91c00d57f753c799276ddb161.tar.xz
fm10k: Don't assume page fragments are page size
This change pulls out the optimization that assumed that all fragments would be limited to page size. That hasn't been the case for some time now and to assume this is incorrect as the TCP allocator can provide up to a 32K page fragment. Signed-off-by: Alexander Duyck <alexander.h.duyck@redhat.com> Acked-by: Jacob Keller <jacob.e.keller@intel.com> Tested-by: Krishneil Singh <krishneil.k.singh@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c7
1 files changed, 1 insertions, 6 deletions
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index b5b2925..6ad03e0 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -1079,9 +1079,7 @@ netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb,
struct fm10k_tx_buffer *first;
int tso;
u32 tx_flags = 0;
-#if PAGE_SIZE > FM10K_MAX_DATA_PER_TXD
unsigned short f;
-#endif
u16 count = TXD_USE_COUNT(skb_headlen(skb));
/* need: 1 descriptor per page * PAGE_SIZE/FM10K_MAX_DATA_PER_TXD,
@@ -1089,12 +1087,9 @@ netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb,
* + 2 desc gap to keep tail from touching head
* otherwise try next time
*/
-#if PAGE_SIZE > FM10K_MAX_DATA_PER_TXD
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
-#else
- count += skb_shinfo(skb)->nr_frags;
-#endif
+
if (fm10k_maybe_stop_tx(tx_ring, count + 3)) {
tx_ring->tx_stats.tx_busy++;
return NETDEV_TX_BUSY;