summaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2015-02-06 20:59:01 (GMT)
committerDavid S. Miller <davem@davemloft.net>2015-02-09 00:53:57 (GMT)
commit567e4b79731c352a17d73c483959f795d3593e03 (patch)
tree4af65c205a8b65cfc5fd7b42e7b8750728230616 /drivers/net
parent096a4cfa5807aa89c78ce12309c0b1c10cf88184 (diff)
downloadlinux-567e4b79731c352a17d73c483959f795d3593e03.tar.xz
net: rfs: add hash collision detection
Receive Flow Steering is a nice solution but suffers from hash collisions when a mix of connected and unconnected traffic is received on the host, when flow hash table is populated. Also, clearing flow in inet_release() makes RFS not very good for short lived flows, as many packets can follow close(). (FIN , ACK packets, ...) This patch extends the information stored into global hash table to not only include cpu number, but upper part of the hash value. I use a 32bit value, and dynamically split it in two parts. For host with less than 64 possible cpus, this gives 6 bits for the cpu number, and 26 (32-6) bits for the upper part of the hash. Since hash bucket selection use low order bits of the hash, we have a full hash match, if /proc/sys/net/core/rps_sock_flow_entries is big enough. If the hash found in flow table does not match, we fallback to RPS (if it is enabled for the rxqueue). This means that a packet for an non connected flow can avoid the IPI through a unrelated/victim CPU. This also means we no longer have to clear the table at socket close time, and this helps short lived flows performance. Signed-off-by: Eric Dumazet <edumazet@google.com> Acked-by: Tom Herbert <therbert@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/tun.c5
1 files changed, 1 insertions, 4 deletions
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index ad7d3d5..857dca4 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -256,7 +256,6 @@ static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e)
{
tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n",
e->rxhash, e->queue_index);
- sock_rps_reset_flow_hash(e->rps_rxhash);
hlist_del_rcu(&e->hash_link);
kfree_rcu(e, rcu);
--tun->flow_count;
@@ -373,10 +372,8 @@ unlock:
*/
static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash)
{
- if (unlikely(e->rps_rxhash != hash)) {
- sock_rps_reset_flow_hash(e->rps_rxhash);
+ if (unlikely(e->rps_rxhash != hash))
e->rps_rxhash = hash;
- }
}
/* We try to identify a flow through its rxhash first. The reason that