summaryrefslogtreecommitdiff
path: root/net/ipv4
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/arp.c55
-rw-r--r--net/ipv4/devinet.c1
-rw-r--r--net/ipv4/fib_frontend.c4
-rw-r--r--net/ipv4/fib_semantics.c76
-rw-r--r--net/ipv4/icmp.c2
-rw-r--r--net/ipv4/igmp.c87
-rw-r--r--net/ipv4/inet_connection_sock.c2
-rw-r--r--net/ipv4/ip_fragment.c39
-rw-r--r--net/ipv4/ip_gre.c4
-rw-r--r--net/ipv4/ip_sockglue.c14
-rw-r--r--net/ipv4/ipcomp.c9
-rw-r--r--net/ipv4/ipip.c7
-rw-r--r--net/ipv4/proc.c4
-rw-r--r--net/ipv4/route.c7
-rw-r--r--net/ipv4/syncookies.c3
-rw-r--r--net/ipv4/tcp.c30
-rw-r--r--net/ipv4/tcp_ipv4.c25
-rw-r--r--net/ipv4/tcp_output.c22
-rw-r--r--net/ipv4/tcp_timer.c6
-rw-r--r--net/ipv4/udp.c4
-rw-r--r--net/ipv4/udplite.c4
21 files changed, 279 insertions, 126 deletions
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index c95cd93..1940b4d 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -70,6 +70,7 @@
* bonding can change the skb before
* sending (e.g. insert 8021q tag).
* Harald Welte : convert to make use of jenkins hash
+ * Jesper D. Brouer: Proxy ARP PVLAN RFC 3069 support.
*/
#include <linux/module.h>
@@ -524,12 +525,15 @@ int arp_bind_neighbour(struct dst_entry *dst)
/*
* Check if we can use proxy ARP for this path
*/
-
-static inline int arp_fwd_proxy(struct in_device *in_dev, struct rtable *rt)
+static inline int arp_fwd_proxy(struct in_device *in_dev,
+ struct net_device *dev, struct rtable *rt)
{
struct in_device *out_dev;
int imi, omi = -1;
+ if (rt->u.dst.dev == dev)
+ return 0;
+
if (!IN_DEV_PROXY_ARP(in_dev))
return 0;
@@ -548,6 +552,43 @@ static inline int arp_fwd_proxy(struct in_device *in_dev, struct rtable *rt)
}
/*
+ * Check for RFC3069 proxy arp private VLAN (allow to send back to same dev)
+ *
+ * RFC3069 supports proxy arp replies back to the same interface. This
+ * is done to support (ethernet) switch features, like RFC 3069, where
+ * the individual ports are not allowed to communicate with each
+ * other, BUT they are allowed to talk to the upstream router. As
+ * described in RFC 3069, it is possible to allow these hosts to
+ * communicate through the upstream router, by proxy_arp'ing.
+ *
+ * RFC 3069: "VLAN Aggregation for Efficient IP Address Allocation"
+ *
+ * This technology is known by different names:
+ * In RFC 3069 it is called VLAN Aggregation.
+ * Cisco and Allied Telesyn call it Private VLAN.
+ * Hewlett-Packard call it Source-Port filtering or port-isolation.
+ * Ericsson call it MAC-Forced Forwarding (RFC Draft).
+ *
+ */
+static inline int arp_fwd_pvlan(struct in_device *in_dev,
+ struct net_device *dev, struct rtable *rt,
+ __be32 sip, __be32 tip)
+{
+ /* Private VLAN is only concerned about the same ethernet segment */
+ if (rt->u.dst.dev != dev)
+ return 0;
+
+ /* Don't reply on self probes (often done by windowz boxes)*/
+ if (sip == tip)
+ return 0;
+
+ if (IN_DEV_PROXY_ARP_PVLAN(in_dev))
+ return 1;
+ else
+ return 0;
+}
+
+/*
* Interface to link layer: send routine and receive handler.
*/
@@ -833,8 +874,11 @@ static int arp_process(struct sk_buff *skb)
}
goto out;
} else if (IN_DEV_FORWARD(in_dev)) {
- if (addr_type == RTN_UNICAST && rt->u.dst.dev != dev &&
- (arp_fwd_proxy(in_dev, rt) || pneigh_lookup(&arp_tbl, net, &tip, dev, 0))) {
+ if (addr_type == RTN_UNICAST &&
+ (arp_fwd_proxy(in_dev, dev, rt) ||
+ arp_fwd_pvlan(in_dev, dev, rt, sip, tip) ||
+ pneigh_lookup(&arp_tbl, net, &tip, dev, 0)))
+ {
n = neigh_event_ns(&arp_tbl, sha, &sip, dev);
if (n)
neigh_release(n);
@@ -863,7 +907,8 @@ static int arp_process(struct sk_buff *skb)
devices (strip is candidate)
*/
if (n == NULL &&
- arp->ar_op == htons(ARPOP_REPLY) &&
+ (arp->ar_op == htons(ARPOP_REPLY) ||
+ (arp->ar_op == htons(ARPOP_REQUEST) && tip == sip)) &&
inet_addr_type(net, sip) == RTN_UNICAST)
n = __neigh_lookup(&arp_tbl, &sip, dev, 1);
}
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 040c4f0..cd71a39 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1408,6 +1408,7 @@ static struct devinet_sysctl_table {
DEVINET_SYSCTL_RW_ENTRY(ARP_IGNORE, "arp_ignore"),
DEVINET_SYSCTL_RW_ENTRY(ARP_ACCEPT, "arp_accept"),
DEVINET_SYSCTL_RW_ENTRY(ARP_NOTIFY, "arp_notify"),
+ DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP_PVLAN, "proxy_arp_pvlan"),
DEVINET_SYSCTL_FLUSHING_ENTRY(NOXFRM, "disable_xfrm"),
DEVINET_SYSCTL_FLUSHING_ENTRY(NOPOLICY, "disable_policy"),
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 82dbf71..9b3e28e 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -883,7 +883,7 @@ static void nl_fib_input(struct sk_buff *skb)
netlink_unicast(net->ipv4.fibnl, skb, pid, MSG_DONTWAIT);
}
-static int nl_fib_lookup_init(struct net *net)
+static int __net_init nl_fib_lookup_init(struct net *net)
{
struct sock *sk;
sk = netlink_kernel_create(net, NETLINK_FIB_LOOKUP, 0,
@@ -1004,7 +1004,7 @@ fail:
return err;
}
-static void __net_exit ip_fib_net_exit(struct net *net)
+static void ip_fib_net_exit(struct net *net)
{
unsigned int i;
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index ed19aa6..96b2101 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -62,8 +62,8 @@ static DEFINE_SPINLOCK(fib_multipath_lock);
#define for_nexthops(fi) { int nhsel; const struct fib_nh * nh; \
for (nhsel=0, nh = (fi)->fib_nh; nhsel < (fi)->fib_nhs; nh++, nhsel++)
-#define change_nexthops(fi) { int nhsel; struct fib_nh * nh; \
-for (nhsel=0, nh = (struct fib_nh *)((fi)->fib_nh); nhsel < (fi)->fib_nhs; nh++, nhsel++)
+#define change_nexthops(fi) { int nhsel; struct fib_nh *nexthop_nh; \
+for (nhsel=0, nexthop_nh = (struct fib_nh *)((fi)->fib_nh); nhsel < (fi)->fib_nhs; nexthop_nh++, nhsel++)
#else /* CONFIG_IP_ROUTE_MULTIPATH */
@@ -72,7 +72,7 @@ for (nhsel=0, nh = (struct fib_nh *)((fi)->fib_nh); nhsel < (fi)->fib_nhs; nh++,
#define for_nexthops(fi) { int nhsel = 0; const struct fib_nh * nh = (fi)->fib_nh; \
for (nhsel=0; nhsel < 1; nhsel++)
-#define change_nexthops(fi) { int nhsel = 0; struct fib_nh * nh = (struct fib_nh *)((fi)->fib_nh); \
+#define change_nexthops(fi) { int nhsel = 0; struct fib_nh *nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \
for (nhsel=0; nhsel < 1; nhsel++)
#endif /* CONFIG_IP_ROUTE_MULTIPATH */
@@ -145,9 +145,9 @@ void free_fib_info(struct fib_info *fi)
return;
}
change_nexthops(fi) {
- if (nh->nh_dev)
- dev_put(nh->nh_dev);
- nh->nh_dev = NULL;
+ if (nexthop_nh->nh_dev)
+ dev_put(nexthop_nh->nh_dev);
+ nexthop_nh->nh_dev = NULL;
} endfor_nexthops(fi);
fib_info_cnt--;
release_net(fi->fib_net);
@@ -162,9 +162,9 @@ void fib_release_info(struct fib_info *fi)
if (fi->fib_prefsrc)
hlist_del(&fi->fib_lhash);
change_nexthops(fi) {
- if (!nh->nh_dev)
+ if (!nexthop_nh->nh_dev)
continue;
- hlist_del(&nh->nh_hash);
+ hlist_del(&nexthop_nh->nh_hash);
} endfor_nexthops(fi)
fi->fib_dead = 1;
fib_info_put(fi);
@@ -395,19 +395,20 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
if (!rtnh_ok(rtnh, remaining))
return -EINVAL;
- nh->nh_flags = (cfg->fc_flags & ~0xFF) | rtnh->rtnh_flags;
- nh->nh_oif = rtnh->rtnh_ifindex;
- nh->nh_weight = rtnh->rtnh_hops + 1;
+ nexthop_nh->nh_flags =
+ (cfg->fc_flags & ~0xFF) | rtnh->rtnh_flags;
+ nexthop_nh->nh_oif = rtnh->rtnh_ifindex;
+ nexthop_nh->nh_weight = rtnh->rtnh_hops + 1;
attrlen = rtnh_attrlen(rtnh);
if (attrlen > 0) {
struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
nla = nla_find(attrs, attrlen, RTA_GATEWAY);
- nh->nh_gw = nla ? nla_get_be32(nla) : 0;
+ nexthop_nh->nh_gw = nla ? nla_get_be32(nla) : 0;
#ifdef CONFIG_NET_CLS_ROUTE
nla = nla_find(attrs, attrlen, RTA_FLOW);
- nh->nh_tclassid = nla ? nla_get_u32(nla) : 0;
+ nexthop_nh->nh_tclassid = nla ? nla_get_u32(nla) : 0;
#endif
}
@@ -738,7 +739,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
fi->fib_nhs = nhs;
change_nexthops(fi) {
- nh->nh_parent = fi;
+ nexthop_nh->nh_parent = fi;
} endfor_nexthops(fi)
if (cfg->fc_mx) {
@@ -808,7 +809,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
goto failure;
} else {
change_nexthops(fi) {
- if ((err = fib_check_nh(cfg, fi, nh)) != 0)
+ if ((err = fib_check_nh(cfg, fi, nexthop_nh)) != 0)
goto failure;
} endfor_nexthops(fi)
}
@@ -843,11 +844,11 @@ link_it:
struct hlist_head *head;
unsigned int hash;
- if (!nh->nh_dev)
+ if (!nexthop_nh->nh_dev)
continue;
- hash = fib_devindex_hashfn(nh->nh_dev->ifindex);
+ hash = fib_devindex_hashfn(nexthop_nh->nh_dev->ifindex);
head = &fib_info_devhash[hash];
- hlist_add_head(&nh->nh_hash, head);
+ hlist_add_head(&nexthop_nh->nh_hash, head);
} endfor_nexthops(fi)
spin_unlock_bh(&fib_info_lock);
return fi;
@@ -1080,21 +1081,21 @@ int fib_sync_down_dev(struct net_device *dev, int force)
prev_fi = fi;
dead = 0;
change_nexthops(fi) {
- if (nh->nh_flags&RTNH_F_DEAD)
+ if (nexthop_nh->nh_flags&RTNH_F_DEAD)
dead++;
- else if (nh->nh_dev == dev &&
- nh->nh_scope != scope) {
- nh->nh_flags |= RTNH_F_DEAD;
+ else if (nexthop_nh->nh_dev == dev &&
+ nexthop_nh->nh_scope != scope) {
+ nexthop_nh->nh_flags |= RTNH_F_DEAD;
#ifdef CONFIG_IP_ROUTE_MULTIPATH
spin_lock_bh(&fib_multipath_lock);
- fi->fib_power -= nh->nh_power;
- nh->nh_power = 0;
+ fi->fib_power -= nexthop_nh->nh_power;
+ nexthop_nh->nh_power = 0;
spin_unlock_bh(&fib_multipath_lock);
#endif
dead++;
}
#ifdef CONFIG_IP_ROUTE_MULTIPATH
- if (force > 1 && nh->nh_dev == dev) {
+ if (force > 1 && nexthop_nh->nh_dev == dev) {
dead = fi->fib_nhs;
break;
}
@@ -1144,18 +1145,20 @@ int fib_sync_up(struct net_device *dev)
prev_fi = fi;
alive = 0;
change_nexthops(fi) {
- if (!(nh->nh_flags&RTNH_F_DEAD)) {
+ if (!(nexthop_nh->nh_flags&RTNH_F_DEAD)) {
alive++;
continue;
}
- if (nh->nh_dev == NULL || !(nh->nh_dev->flags&IFF_UP))
+ if (nexthop_nh->nh_dev == NULL ||
+ !(nexthop_nh->nh_dev->flags&IFF_UP))
continue;
- if (nh->nh_dev != dev || !__in_dev_get_rtnl(dev))
+ if (nexthop_nh->nh_dev != dev ||
+ !__in_dev_get_rtnl(dev))
continue;
alive++;
spin_lock_bh(&fib_multipath_lock);
- nh->nh_power = 0;
- nh->nh_flags &= ~RTNH_F_DEAD;
+ nexthop_nh->nh_power = 0;
+ nexthop_nh->nh_flags &= ~RTNH_F_DEAD;
spin_unlock_bh(&fib_multipath_lock);
} endfor_nexthops(fi)
@@ -1182,9 +1185,9 @@ void fib_select_multipath(const struct flowi *flp, struct fib_result *res)
if (fi->fib_power <= 0) {
int power = 0;
change_nexthops(fi) {
- if (!(nh->nh_flags&RTNH_F_DEAD)) {
- power += nh->nh_weight;
- nh->nh_power = nh->nh_weight;
+ if (!(nexthop_nh->nh_flags&RTNH_F_DEAD)) {
+ power += nexthop_nh->nh_weight;
+ nexthop_nh->nh_power = nexthop_nh->nh_weight;
}
} endfor_nexthops(fi);
fi->fib_power = power;
@@ -1204,9 +1207,10 @@ void fib_select_multipath(const struct flowi *flp, struct fib_result *res)
w = jiffies % fi->fib_power;
change_nexthops(fi) {
- if (!(nh->nh_flags&RTNH_F_DEAD) && nh->nh_power) {
- if ((w -= nh->nh_power) <= 0) {
- nh->nh_power--;
+ if (!(nexthop_nh->nh_flags&RTNH_F_DEAD) &&
+ nexthop_nh->nh_power) {
+ if ((w -= nexthop_nh->nh_power) <= 0) {
+ nexthop_nh->nh_power--;
fi->fib_power--;
res->nh_sel = nhsel;
spin_unlock_bh(&fib_multipath_lock);
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index fe11f60..4b4c2bc 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -114,7 +114,7 @@ struct icmp_bxm {
/* An array of errno for error messages from dest unreach. */
/* RFC 1122: 3.2.2.1 States that NET_UNREACH, HOST_UNREACH and SR_FAILED MUST be considered 'transient errs'. */
-struct icmp_err icmp_err_convert[] = {
+const struct icmp_err icmp_err_convert[] = {
{
.errno = ENETUNREACH, /* ICMP_NET_UNREACH */
.fatal = 0,
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 76c0840..d2836399 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1799,7 +1799,7 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr)
iml->next = inet->mc_list;
iml->sflist = NULL;
iml->sfmode = MCAST_EXCLUDE;
- inet->mc_list = iml;
+ rcu_assign_pointer(inet->mc_list, iml);
ip_mc_inc_group(in_dev, addr);
err = 0;
done:
@@ -1807,24 +1807,46 @@ done:
return err;
}
+static void ip_sf_socklist_reclaim(struct rcu_head *rp)
+{
+ struct ip_sf_socklist *psf;
+
+ psf = container_of(rp, struct ip_sf_socklist, rcu);
+ /* sk_omem_alloc should have been decreased by the caller*/
+ kfree(psf);
+}
+
static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml,
struct in_device *in_dev)
{
+ struct ip_sf_socklist *psf = iml->sflist;
int err;
- if (iml->sflist == NULL) {
+ if (psf == NULL) {
/* any-source empty exclude case */
return ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr,
iml->sfmode, 0, NULL, 0);
}
err = ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr,
- iml->sfmode, iml->sflist->sl_count,
- iml->sflist->sl_addr, 0);
- sock_kfree_s(sk, iml->sflist, IP_SFLSIZE(iml->sflist->sl_max));
- iml->sflist = NULL;
+ iml->sfmode, psf->sl_count, psf->sl_addr, 0);
+ rcu_assign_pointer(iml->sflist, NULL);
+ /* decrease mem now to avoid the memleak warning */
+ atomic_sub(IP_SFLSIZE(psf->sl_max), &sk->sk_omem_alloc);
+ call_rcu(&psf->rcu, ip_sf_socklist_reclaim);
return err;
}
+
+static void ip_mc_socklist_reclaim(struct rcu_head *rp)
+{
+ struct ip_mc_socklist *iml;
+
+ iml = container_of(rp, struct ip_mc_socklist, rcu);
+ /* sk_omem_alloc should have been decreased by the caller*/
+ kfree(iml);
+}
+
+
/*
* Ask a socket to leave a group.
*/
@@ -1854,12 +1876,14 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
(void) ip_mc_leave_src(sk, iml, in_dev);
- *imlp = iml->next;
+ rcu_assign_pointer(*imlp, iml->next);
if (in_dev)
ip_mc_dec_group(in_dev, group);
rtnl_unlock();
- sock_kfree_s(sk, iml, sizeof(*iml));
+ /* decrease mem now to avoid the memleak warning */
+ atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
+ call_rcu(&iml->rcu, ip_mc_socklist_reclaim);
return 0;
}
if (!in_dev)
@@ -1974,9 +1998,12 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
if (psl) {
for (i=0; i<psl->sl_count; i++)
newpsl->sl_addr[i] = psl->sl_addr[i];
- sock_kfree_s(sk, psl, IP_SFLSIZE(psl->sl_max));
+ /* decrease mem now to avoid the memleak warning */
+ atomic_sub(IP_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc);
+ call_rcu(&psl->rcu, ip_sf_socklist_reclaim);
}
- pmc->sflist = psl = newpsl;
+ rcu_assign_pointer(pmc->sflist, newpsl);
+ psl = newpsl;
}
rv = 1; /* > 0 for insert logic below if sl_count is 0 */
for (i=0; i<psl->sl_count; i++) {
@@ -2072,11 +2099,13 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
if (psl) {
(void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode,
psl->sl_count, psl->sl_addr, 0);
- sock_kfree_s(sk, psl, IP_SFLSIZE(psl->sl_max));
+ /* decrease mem now to avoid the memleak warning */
+ atomic_sub(IP_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc);
+ call_rcu(&psl->rcu, ip_sf_socklist_reclaim);
} else
(void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode,
0, NULL, 0);
- pmc->sflist = newpsl;
+ rcu_assign_pointer(pmc->sflist, newpsl);
pmc->sfmode = msf->imsf_fmode;
err = 0;
done:
@@ -2209,30 +2238,40 @@ int ip_mc_sf_allow(struct sock *sk, __be32 loc_addr, __be32 rmt_addr, int dif)
struct ip_mc_socklist *pmc;
struct ip_sf_socklist *psl;
int i;
+ int ret;
+ ret = 1;
if (!ipv4_is_multicast(loc_addr))
- return 1;
+ goto out;
- for (pmc=inet->mc_list; pmc; pmc=pmc->next) {
+ rcu_read_lock();
+ for (pmc=rcu_dereference(inet->mc_list); pmc; pmc=rcu_dereference(pmc->next)) {
if (pmc->multi.imr_multiaddr.s_addr == loc_addr &&
pmc->multi.imr_ifindex == dif)
break;
}
+ ret = inet->mc_all;
if (!pmc)
- return inet->mc_all;
+ goto unlock;
psl = pmc->sflist;
+ ret = (pmc->sfmode == MCAST_EXCLUDE);
if (!psl)
- return pmc->sfmode == MCAST_EXCLUDE;
+ goto unlock;
for (i=0; i<psl->sl_count; i++) {
if (psl->sl_addr[i] == rmt_addr)
break;
}
+ ret = 0;
if (pmc->sfmode == MCAST_INCLUDE && i >= psl->sl_count)
- return 0;
+ goto unlock;
if (pmc->sfmode == MCAST_EXCLUDE && i < psl->sl_count)
- return 0;
- return 1;
+ goto unlock;
+ ret = 1;
+unlock:
+ rcu_read_unlock();
+out:
+ return ret;
}
/*
@@ -2251,7 +2290,7 @@ void ip_mc_drop_socket(struct sock *sk)
rtnl_lock();
while ((iml = inet->mc_list) != NULL) {
struct in_device *in_dev;
- inet->mc_list = iml->next;
+ rcu_assign_pointer(inet->mc_list, iml->next);
in_dev = inetdev_by_index(net, iml->multi.imr_ifindex);
(void) ip_mc_leave_src(sk, iml, in_dev);
@@ -2259,7 +2298,9 @@ void ip_mc_drop_socket(struct sock *sk)
ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr);
in_dev_put(in_dev);
}
- sock_kfree_s(sk, iml, sizeof(*iml));
+ /* decrease mem now to avoid the memleak warning */
+ atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
+ call_rcu(&iml->rcu, ip_mc_socklist_reclaim);
}
rtnl_unlock();
}
@@ -2603,7 +2644,7 @@ static const struct file_operations igmp_mcf_seq_fops = {
.release = seq_release_net,
};
-static int igmp_net_init(struct net *net)
+static int __net_init igmp_net_init(struct net *net)
{
struct proc_dir_entry *pde;
@@ -2621,7 +2662,7 @@ out_igmp:
return -ENOMEM;
}
-static void igmp_net_exit(struct net *net)
+static void __net_exit igmp_net_exit(struct net *net)
{
proc_net_remove(net, "mcfilter");
proc_net_remove(net, "igmp");
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index ee16475..8da6429 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -529,6 +529,8 @@ void inet_csk_reqsk_queue_prune(struct sock *parent,
syn_ack_recalc(req, thresh, max_retries,
queue->rskq_defer_accept,
&expire, &resend);
+ if (req->rsk_ops->syn_ack_timeout)
+ req->rsk_ops->syn_ack_timeout(parent, req);
if (!expire &&
(!resend ||
!req->rsk_ops->rtx_syn_ack(parent, req, NULL) ||
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 86964b3..b59430b 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -32,6 +32,8 @@
#include <linux/netdevice.h>
#include <linux/jhash.h>
#include <linux/random.h>
+#include <net/route.h>
+#include <net/dst.h>
#include <net/sock.h>
#include <net/ip.h>
#include <net/icmp.h>
@@ -205,11 +207,34 @@ static void ip_expire(unsigned long arg)
if ((qp->q.last_in & INET_FRAG_FIRST_IN) && qp->q.fragments != NULL) {
struct sk_buff *head = qp->q.fragments;
- /* Send an ICMP "Fragment Reassembly Timeout" message. */
rcu_read_lock();
head->dev = dev_get_by_index_rcu(net, qp->iif);
- if (head->dev)
- icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0);
+ if (!head->dev)
+ goto out_rcu_unlock;
+
+ /*
+ * Only search router table for the head fragment,
+ * when defraging timeout at PRE_ROUTING HOOK.
+ */
+ if (qp->user == IP_DEFRAG_CONNTRACK_IN && !skb_dst(head)) {
+ const struct iphdr *iph = ip_hdr(head);
+ int err = ip_route_input(head, iph->daddr, iph->saddr,
+ iph->tos, head->dev);
+ if (unlikely(err))
+ goto out_rcu_unlock;
+
+ /*
+ * Only an end host needs to send an ICMP
+ * "Fragment Reassembly Timeout" message, per RFC792.
+ */
+ if (skb_rtable(head)->rt_type != RTN_LOCAL)
+ goto out_rcu_unlock;
+
+ }
+
+ /* Send an ICMP "Fragment Reassembly Timeout" message. */
+ icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0);
+out_rcu_unlock:
rcu_read_unlock();
}
out:
@@ -646,7 +671,7 @@ static struct ctl_table ip4_frags_ctl_table[] = {
{ }
};
-static int ip4_frags_ns_ctl_register(struct net *net)
+static int __net_init ip4_frags_ns_ctl_register(struct net *net)
{
struct ctl_table *table;
struct ctl_table_header *hdr;
@@ -676,7 +701,7 @@ err_alloc:
return -ENOMEM;
}
-static void ip4_frags_ns_ctl_unregister(struct net *net)
+static void __net_exit ip4_frags_ns_ctl_unregister(struct net *net)
{
struct ctl_table *table;
@@ -704,7 +729,7 @@ static inline void ip4_frags_ctl_register(void)
}
#endif
-static int ipv4_frags_init_net(struct net *net)
+static int __net_init ipv4_frags_init_net(struct net *net)
{
/*
* Fragment cache limits. We will commit 256K at one time. Should we
@@ -726,7 +751,7 @@ static int ipv4_frags_init_net(struct net *net)
return ip4_frags_ns_ctl_register(net);
}
-static void ipv4_frags_exit_net(struct net *net)
+static void __net_exit ipv4_frags_exit_net(struct net *net)
{
ip4_frags_ns_ctl_unregister(net);
inet_frags_exit_net(&net->ipv4.frags, &ip4_frags);
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index f36ce15..7631b20 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -1307,7 +1307,7 @@ static void ipgre_destroy_tunnels(struct ipgre_net *ign, struct list_head *head)
}
}
-static int ipgre_init_net(struct net *net)
+static int __net_init ipgre_init_net(struct net *net)
{
struct ipgre_net *ign = net_generic(net, ipgre_net_id);
int err;
@@ -1334,7 +1334,7 @@ err_alloc_dev:
return err;
}
-static void ipgre_exit_net(struct net *net)
+static void __net_exit ipgre_exit_net(struct net *net)
{
struct ipgre_net *ign;
LIST_HEAD(list);
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index cafad9b..644dc43 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -451,7 +451,8 @@ static int do_ip_setsockopt(struct sock *sk, int level,
(1<<IP_TTL) | (1<<IP_HDRINCL) |
(1<<IP_MTU_DISCOVER) | (1<<IP_RECVERR) |
(1<<IP_ROUTER_ALERT) | (1<<IP_FREEBIND) |
- (1<<IP_PASSSEC) | (1<<IP_TRANSPARENT))) ||
+ (1<<IP_PASSSEC) | (1<<IP_TRANSPARENT) |
+ (1<<IP_MINTTL))) ||
optname == IP_MULTICAST_TTL ||
optname == IP_MULTICAST_ALL ||
optname == IP_MULTICAST_LOOP ||
@@ -936,6 +937,14 @@ mc_msf_out:
inet->transparent = !!val;
break;
+ case IP_MINTTL:
+ if (optlen < 1)
+ goto e_inval;
+ if (val < 0 || val > 255)
+ goto e_inval;
+ inet->min_ttl = val;
+ break;
+
default:
err = -ENOPROTOOPT;
break;
@@ -1198,6 +1207,9 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
case IP_TRANSPARENT:
val = inet->transparent;
break;
+ case IP_MINTTL:
+ val = inet->min_ttl;
+ break;
default:
release_sock(sk);
return -ENOPROTOOPT;
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
index 38fbf04..b55a0c3 100644
--- a/net/ipv4/ipcomp.c
+++ b/net/ipv4/ipcomp.c
@@ -25,6 +25,7 @@
static void ipcomp4_err(struct sk_buff *skb, u32 info)
{
+ struct net *net = dev_net(skb->dev);
__be32 spi;
struct iphdr *iph = (struct iphdr *)skb->data;
struct ip_comp_hdr *ipch = (struct ip_comp_hdr *)(skb->data+(iph->ihl<<2));
@@ -35,7 +36,7 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
return;
spi = htonl(ntohs(ipch->cpi));
- x = xfrm_state_lookup(&init_net, (xfrm_address_t *)&iph->daddr,
+ x = xfrm_state_lookup(net, (xfrm_address_t *)&iph->daddr,
spi, IPPROTO_COMP, AF_INET);
if (!x)
return;
@@ -47,9 +48,10 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
/* We always hold one tunnel user reference to indicate a tunnel */
static struct xfrm_state *ipcomp_tunnel_create(struct xfrm_state *x)
{
+ struct net *net = xs_net(x);
struct xfrm_state *t;
- t = xfrm_state_alloc(&init_net);
+ t = xfrm_state_alloc(net);
if (t == NULL)
goto out;
@@ -82,10 +84,11 @@ error:
*/
static int ipcomp_tunnel_attach(struct xfrm_state *x)
{
+ struct net *net = xs_net(x);
int err = 0;
struct xfrm_state *t;
- t = xfrm_state_lookup(&init_net, (xfrm_address_t *)&x->id.daddr.a4,
+ t = xfrm_state_lookup(net, (xfrm_address_t *)&x->id.daddr.a4,
x->props.saddr.a4, IPPROTO_IPIP, AF_INET);
if (!t) {
t = ipcomp_tunnel_create(x);
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index eda04fe..95db732 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -130,7 +130,6 @@ struct ipip_net {
struct net_device *fb_tunnel_dev;
};
-static void ipip_fb_tunnel_init(struct net_device *dev);
static void ipip_tunnel_init(struct net_device *dev);
static void ipip_tunnel_setup(struct net_device *dev);
@@ -730,7 +729,7 @@ static void ipip_tunnel_init(struct net_device *dev)
ipip_tunnel_bind_dev(dev);
}
-static void ipip_fb_tunnel_init(struct net_device *dev)
+static void __net_init ipip_fb_tunnel_init(struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
struct iphdr *iph = &tunnel->parms.iph;
@@ -773,7 +772,7 @@ static void ipip_destroy_tunnels(struct ipip_net *ipn, struct list_head *head)
}
}
-static int ipip_init_net(struct net *net)
+static int __net_init ipip_init_net(struct net *net)
{
struct ipip_net *ipn = net_generic(net, ipip_net_id);
int err;
@@ -806,7 +805,7 @@ err_alloc_dev:
return err;
}
-static void ipip_exit_net(struct net *net)
+static void __net_exit ipip_exit_net(struct net *net)
{
struct ipip_net *ipn = net_generic(net, ipip_net_id);
LIST_HEAD(list);
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index f25542c..1b09a6d 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -127,8 +127,8 @@ static const struct snmp_mib snmp4_ipextstats_list[] = {
SNMP_MIB_SENTINEL
};
-static struct {
- char *name;
+static const struct {
+ const char *name;
int index;
} icmpmibmap[] = {
{ "DestUnreachs", ICMP_DEST_UNREACH },
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index d62b05d..b16dfad 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1990,8 +1990,13 @@ static int __mkroute_input(struct sk_buff *skb,
if (skb->protocol != htons(ETH_P_IP)) {
/* Not IP (i.e. ARP). Do not create route, if it is
* invalid for proxy arp. DNAT routes are always valid.
+ *
+ * Proxy arp feature have been extended to allow, ARP
+ * replies back to the same interface, to support
+ * Private VLAN switch technologies. See arp.c.
*/
- if (out_dev == in_dev) {
+ if (out_dev == in_dev &&
+ IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
err = -EINVAL;
goto cleanup;
}
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 66fd80e..5c24db4 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -358,7 +358,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
tcp_select_initial_window(tcp_full_space(sk), req->mss,
&req->rcv_wnd, &req->window_clamp,
- ireq->wscale_ok, &rcv_wscale);
+ ireq->wscale_ok, &rcv_wscale,
+ dst_metric(&rt->u.dst, RTAX_INITRWND));
ireq->rcv_wscale = rcv_wscale;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index b0a26bb..d5d69ea 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -536,8 +536,7 @@ static inline void skb_entail(struct sock *sk, struct sk_buff *skb)
tp->nonagle &= ~TCP_NAGLE_PUSH;
}
-static inline void tcp_mark_urg(struct tcp_sock *tp, int flags,
- struct sk_buff *skb)
+static inline void tcp_mark_urg(struct tcp_sock *tp, int flags)
{
if (flags & MSG_OOB)
tp->snd_up = tp->write_seq;
@@ -546,13 +545,13 @@ static inline void tcp_mark_urg(struct tcp_sock *tp, int flags,
static inline void tcp_push(struct sock *sk, int flags, int mss_now,
int nonagle)
{
- struct tcp_sock *tp = tcp_sk(sk);
-
if (tcp_send_head(sk)) {
- struct sk_buff *skb = tcp_write_queue_tail(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+
if (!(flags & MSG_MORE) || forced_push(tp))
- tcp_mark_push(tp, skb);
- tcp_mark_urg(tp, flags, skb);
+ tcp_mark_push(tp, tcp_write_queue_tail(sk));
+
+ tcp_mark_urg(tp, flags);
__tcp_push_pending_frames(sk, mss_now,
(flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle);
}
@@ -877,12 +876,12 @@ ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset,
#define TCP_PAGE(sk) (sk->sk_sndmsg_page)
#define TCP_OFF(sk) (sk->sk_sndmsg_off)
-static inline int select_size(struct sock *sk)
+static inline int select_size(struct sock *sk, int sg)
{
struct tcp_sock *tp = tcp_sk(sk);
int tmp = tp->mss_cache;
- if (sk->sk_route_caps & NETIF_F_SG) {
+ if (sg) {
if (sk_can_gso(sk))
tmp = 0;
else {
@@ -906,7 +905,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
struct sk_buff *skb;
int iovlen, flags;
int mss_now, size_goal;
- int err, copied;
+ int sg, err, copied;
long timeo;
lock_sock(sk);
@@ -934,6 +933,8 @@ int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
goto out_err;
+ sg = sk->sk_route_caps & NETIF_F_SG;
+
while (--iovlen >= 0) {
int seglen = iov->iov_len;
unsigned char __user *from = iov->iov_base;
@@ -959,8 +960,9 @@ new_segment:
if (!sk_stream_memory_free(sk))
goto wait_for_sndbuf;
- skb = sk_stream_alloc_skb(sk, select_size(sk),
- sk->sk_allocation);
+ skb = sk_stream_alloc_skb(sk,
+ select_size(sk, sg),
+ sk->sk_allocation);
if (!skb)
goto wait_for_memory;
@@ -997,9 +999,7 @@ new_segment:
/* We can extend the last page
* fragment. */
merge = 1;
- } else if (i == MAX_SKB_FRAGS ||
- (!i &&
- !(sk->sk_route_caps & NETIF_F_SG))) {
+ } else if (i == MAX_SKB_FRAGS || !sg) {
/* Need to add new fragment and cannot
* do this because interface is non-SG,
* or because all the page slots are
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 65b8ebf..c3588b4 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -742,9 +742,9 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
* This still operates on a request_sock only, not on a big
* socket.
*/
-static int __tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
- struct request_sock *req,
- struct request_values *rvp)
+static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
+ struct request_sock *req,
+ struct request_values *rvp)
{
const struct inet_request_sock *ireq = inet_rsk(req);
int err = -1;
@@ -775,10 +775,11 @@ static int __tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
return err;
}
-static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req,
+static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req,
struct request_values *rvp)
{
- return __tcp_v4_send_synack(sk, NULL, req, rvp);
+ TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
+ return tcp_v4_send_synack(sk, NULL, req, rvp);
}
/*
@@ -1192,10 +1193,11 @@ static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb)
struct request_sock_ops tcp_request_sock_ops __read_mostly = {
.family = PF_INET,
.obj_size = sizeof(struct tcp_request_sock),
- .rtx_syn_ack = tcp_v4_send_synack,
+ .rtx_syn_ack = tcp_v4_rtx_synack,
.send_ack = tcp_v4_reqsk_send_ack,
.destructor = tcp_v4_reqsk_destructor,
.send_reset = tcp_v4_send_reset,
+ .syn_ack_timeout = tcp_syn_ack_timeout,
};
#ifdef CONFIG_TCP_MD5SIG
@@ -1373,8 +1375,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
}
tcp_rsk(req)->snt_isn = isn;
- if (__tcp_v4_send_synack(sk, dst, req,
- (struct request_values *)&tmp_ext) ||
+ if (tcp_v4_send_synack(sk, dst, req,
+ (struct request_values *)&tmp_ext) ||
want_cookie)
goto drop_and_free;
@@ -1649,6 +1651,9 @@ int tcp_v4_rcv(struct sk_buff *skb)
if (!sk)
goto no_tcp_socket;
+ if (iph->ttl < inet_sk(sk)->min_ttl)
+ goto discard_and_relse;
+
process:
if (sk->sk_state == TCP_TIME_WAIT)
goto do_time_wait;
@@ -2425,12 +2430,12 @@ static struct tcp_seq_afinfo tcp4_seq_afinfo = {
},
};
-static int tcp4_proc_init_net(struct net *net)
+static int __net_init tcp4_proc_init_net(struct net *net)
{
return tcp_proc_register(net, &tcp4_seq_afinfo);
}
-static void tcp4_proc_exit_net(struct net *net)
+static void __net_exit tcp4_proc_exit_net(struct net *net)
{
tcp_proc_unregister(net, &tcp4_seq_afinfo);
}
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 383ce23..4a1605d 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -183,7 +183,8 @@ static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
*/
void tcp_select_initial_window(int __space, __u32 mss,
__u32 *rcv_wnd, __u32 *window_clamp,
- int wscale_ok, __u8 *rcv_wscale)
+ int wscale_ok, __u8 *rcv_wscale,
+ __u32 init_rcv_wnd)
{
unsigned int space = (__space < 0 ? 0 : __space);
@@ -232,7 +233,13 @@ void tcp_select_initial_window(int __space, __u32 mss,
init_cwnd = 2;
else if (mss > 1460)
init_cwnd = 3;
- if (*rcv_wnd > init_cwnd * mss)
+ /* when initializing use the value from init_rcv_wnd
+ * rather than the default from above
+ */
+ if (init_rcv_wnd &&
+ (*rcv_wnd > init_rcv_wnd * mss))
+ *rcv_wnd = init_rcv_wnd * mss;
+ else if (*rcv_wnd > init_cwnd * mss)
*rcv_wnd = init_cwnd * mss;
}
@@ -1794,11 +1801,6 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
int nonagle)
{
- struct sk_buff *skb = tcp_send_head(sk);
-
- if (!skb)
- return;
-
/* If we are closed, the bytes will have to remain here.
* In time closedown will finish, we empty the write queue and
* all will be happy.
@@ -2422,7 +2424,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
&req->rcv_wnd,
&req->window_clamp,
ireq->wscale_ok,
- &rcv_wscale);
+ &rcv_wscale,
+ dst_metric(dst, RTAX_INITRWND));
ireq->rcv_wscale = rcv_wscale;
}
@@ -2549,7 +2552,8 @@ static void tcp_connect_init(struct sock *sk)
&tp->rcv_wnd,
&tp->window_clamp,
sysctl_tcp_window_scaling,
- &rcv_wscale);
+ &rcv_wscale,
+ dst_metric(dst, RTAX_INITRWND));
tp->rx_opt.rcv_wscale = rcv_wscale;
tp->rcv_ssthresh = tp->rcv_wnd;
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 8816a20..de7d1bf 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -474,6 +474,12 @@ static void tcp_synack_timer(struct sock *sk)
TCP_TIMEOUT_INIT, TCP_RTO_MAX);
}
+void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req)
+{
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPTIMEOUTS);
+}
+EXPORT_SYMBOL(tcp_syn_ack_timeout);
+
void tcp_set_keepalive(struct sock *sk, int val)
{
if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index f0126fd..4f7d212 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -2027,12 +2027,12 @@ static struct udp_seq_afinfo udp4_seq_afinfo = {
},
};
-static int udp4_proc_init_net(struct net *net)
+static int __net_init udp4_proc_init_net(struct net *net)
{
return udp_proc_register(net, &udp4_seq_afinfo);
}
-static void udp4_proc_exit_net(struct net *net)
+static void __net_exit udp4_proc_exit_net(struct net *net)
{
udp_proc_unregister(net, &udp4_seq_afinfo);
}
diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c
index 66f7951..6610bf7 100644
--- a/net/ipv4/udplite.c
+++ b/net/ipv4/udplite.c
@@ -81,12 +81,12 @@ static struct udp_seq_afinfo udplite4_seq_afinfo = {
},
};
-static int udplite4_proc_init_net(struct net *net)
+static int __net_init udplite4_proc_init_net(struct net *net)
{
return udp_proc_register(net, &udplite4_seq_afinfo);
}
-static void udplite4_proc_exit_net(struct net *net)
+static void __net_exit udplite4_proc_exit_net(struct net *net)
{
udp_proc_unregister(net, &udplite4_seq_afinfo);
}