summaryrefslogtreecommitdiff
path: root/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
diff options
context:
space:
mode:
authorFlorian Westphal <fw@strlen.de>2016-05-02 16:39:55 (GMT)
committerPablo Neira Ayuso <pablo@netfilter.org>2016-05-05 14:39:47 (GMT)
commit56d52d4892d0e478a005b99ed10d0a7f488ea8c1 (patch)
tree89f2c12e4f197ac3876f5ebf01f61b7a3f49dd3e /net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
parent1b8c8a9f648c809c01a44114d7535ac8ca4c5ba3 (diff)
downloadlinux-56d52d4892d0e478a005b99ed10d0a7f488ea8c1.tar.xz
netfilter: conntrack: use a single hashtable for all namespaces
We already include netns address in the hash and compare the netns pointers during lookup, so even if namespaces have overlapping addresses entries will be spread across the table. Assuming 64k bucket size, this change saves 0.5 mbyte per namespace on a 64bit system. NAT bysrc and expectation hash is still per namespace, those will changed too soon. Future patch will also make conntrack object slab cache global again. Signed-off-by: Florian Westphal <fw@strlen.de> Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Diffstat (limited to 'net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c')
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c10
1 files changed, 4 insertions, 6 deletions
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
index 171aba1..f8fc7ab 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
@@ -31,15 +31,14 @@ struct ct_iter_state {
static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
{
- struct net *net = seq_file_net(seq);
struct ct_iter_state *st = seq->private;
struct hlist_nulls_node *n;
for (st->bucket = 0;
- st->bucket < net->ct.htable_size;
+ st->bucket < nf_conntrack_htable_size;
st->bucket++) {
n = rcu_dereference(
- hlist_nulls_first_rcu(&net->ct.hash[st->bucket]));
+ hlist_nulls_first_rcu(&nf_conntrack_hash[st->bucket]));
if (!is_a_nulls(n))
return n;
}
@@ -49,17 +48,16 @@ static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
static struct hlist_nulls_node *ct_get_next(struct seq_file *seq,
struct hlist_nulls_node *head)
{
- struct net *net = seq_file_net(seq);
struct ct_iter_state *st = seq->private;
head = rcu_dereference(hlist_nulls_next_rcu(head));
while (is_a_nulls(head)) {
if (likely(get_nulls_value(head) == st->bucket)) {
- if (++st->bucket >= net->ct.htable_size)
+ if (++st->bucket >= nf_conntrack_htable_size)
return NULL;
}
head = rcu_dereference(
- hlist_nulls_first_rcu(&net->ct.hash[st->bucket]));
+ hlist_nulls_first_rcu(&nf_conntrack_hash[st->bucket]));
}
return head;
}