summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-02-18 01:41:19 (GMT)
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-18 01:41:19 (GMT)
commitf5af19d10d151c5a2afae3306578f485c244db25 (patch)
tree54e762e70afb664d14152e6bcf89a48be3fb9c13 /net
parent0d695d6d8bc1ed39f20c9ce115abf0129b27cb6f (diff)
parent19334920eaf7df3f69950b040ede6c7598425a5b (diff)
downloadlinux-f5af19d10d151c5a2afae3306578f485c244db25.tar.xz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking updates from David Miller: 1) Missing netlink attribute validation in nft_lookup, from Patrick McHardy. 2) Restrict ipv6 partial checksum handling to UDP, since that's the only case it works for. From Vlad Yasevich. 3) Clear out silly device table sentinal macros used by SSB and BCMA drivers. From Joe Perches. 4) Make sure the remote checksum code never creates a situation where the remote checksum is applied yet the tunneling metadata describing the remote checksum transformation is still present. Otherwise an external entity might see this and apply the checksum again. From Tom Herbert. 5) Use msecs_to_jiffies() where applicable, from Nicholas Mc Guire. 6) Don't explicitly initialize timer struct fields, use setup_timer() and mod_timer() instead. From Vaishali Thakkar. 7) Don't invoke tg3_halt() without the tp->lock held, from Jun'ichi Nomura. 8) Missing __percpu annotation in ipvlan driver, from Eric Dumazet. 9) Don't potentially perform skb_get() on shared skbs, also from Eric Dumazet. 10) Fix COW'ing of metrics for non-DST_HOST routes in ipv6, from Martin KaFai Lau. 11) Fix merge resolution error between the iov_iter changes in vhost and some bug fixes that occurred at the same time. From Jason Wang. 12) If rtnl_configure_link() fails we have to perform a call to ->dellink() before unregistering the device. From WANG Cong. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (39 commits) net: dsa: Set valid phy interface type rtnetlink: call ->dellink on failure when ->newlink exists com20020-pci: add support for eae single card vhost_net: fix wrong iter offset when setting number of buffers net: spelling fixes net/core: Fix warning while make xmldocs caused by dev.c net: phy: micrel: disable NAND-tree for KSZ8021, KSZ8031, KSZ8051, KSZ8081 ipv6: fix ipv6_cow_metrics for non DST_HOST case openvswitch: Fix key serialization. r8152: restore hw settings hso: fix rx parsing logic when skb allocation fails tcp: make sure skb is not shared before using skb_get() bridge: netfilter: Move sysctl-specific error code inside #ifdef ipv6: fix possible deadlock in ip6_fl_purge / ip6_fl_gc ipvlan: add a missing __percpu pcpu_stats tg3: Hold tp->lock before calling tg3_halt() from tg3_init_one() bgmac: fix device initialization on Northstar SoCs (condition typo) qlcnic: Delete existing multicast MAC list before adding new net/mlx5_core: Fix configuration of log_uar_page_sz sunvnet: don't change gso data on clones ...
Diffstat (limited to 'net')
-rw-r--r--net/bridge/br_netfilter.c7
-rw-r--r--net/core/dev.c3
-rw-r--r--net/core/filter.c2
-rw-r--r--net/core/pktgen.c2
-rw-r--r--net/core/rtnetlink.c9
-rw-r--r--net/dsa/slave.c9
-rw-r--r--net/ipv4/devinet.c2
-rw-r--r--net/ipv4/fou.c42
-rw-r--r--net/ipv4/tcp_fastopen.c32
-rw-r--r--net/ipv4/udp_offload.c13
-rw-r--r--net/ipv6/ip6_flowlabel.c4
-rw-r--r--net/ipv6/ip6_output.c2
-rw-r--r--net/ipv6/route.c2
-rw-r--r--net/ipv6/udp_offload.c6
-rw-r--r--net/netfilter/nft_compat.c63
-rw-r--r--net/netfilter/nft_lookup.c1
-rw-r--r--net/openvswitch/flow.c2
-rw-r--r--net/openvswitch/flow_netlink.c4
-rw-r--r--net/rds/cong.c16
19 files changed, 173 insertions, 48 deletions
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 65728e0..0ee453f 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -987,15 +987,12 @@ static int __init br_netfilter_init(void)
if (brnf_sysctl_header == NULL) {
printk(KERN_WARNING
"br_netfilter: can't register to sysctl.\n");
- ret = -ENOMEM;
- goto err1;
+ nf_unregister_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops));
+ return -ENOMEM;
}
#endif
printk(KERN_NOTICE "Bridge firewalling registered\n");
return 0;
-err1:
- nf_unregister_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops));
- return ret;
}
static void __exit br_netfilter_fini(void)
diff --git a/net/core/dev.c b/net/core/dev.c
index d030575..8f9710c 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4024,6 +4024,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
NAPI_GRO_CB(skb)->flush = 0;
NAPI_GRO_CB(skb)->free = 0;
NAPI_GRO_CB(skb)->udp_mark = 0;
+ NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
/* Setup for GRO checksum validation */
switch (skb->ip_summed) {
@@ -5335,7 +5336,7 @@ EXPORT_SYMBOL(netdev_upper_dev_unlink);
/**
* netdev_bonding_info_change - Dispatch event about slave change
* @dev: device
- * @netdev_bonding_info: info to dispatch
+ * @bonding_info: info to dispatch
*
* Send NETDEV_BONDING_INFO to netdev notifiers with info.
* The caller must hold the RTNL lock.
diff --git a/net/core/filter.c b/net/core/filter.c
index ec9baea..f6bdc2b 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -531,7 +531,7 @@ do_pass:
*insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, fp->k);
break;
- /* Unkown instruction. */
+ /* Unknown instruction. */
default:
goto err;
}
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 9fa25b0..b4899f5b 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -97,7 +97,7 @@
* New xmit() return, do_div and misc clean up by Stephen Hemminger
* <shemminger@osdl.org> 040923
*
- * Randy Dunlap fixed u64 printk compiler waring
+ * Randy Dunlap fixed u64 printk compiler warning
*
* Remove FCS from BW calculation. Lennert Buytenhek <buytenh@wantstofly.org>
* New time handling. Lennert Buytenhek <buytenh@wantstofly.org> 041213
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 5be499b..ab293a3 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -2162,7 +2162,14 @@ replay:
}
err = rtnl_configure_link(dev, ifm);
if (err < 0) {
- unregister_netdevice(dev);
+ if (ops->newlink) {
+ LIST_HEAD(list_kill);
+
+ ops->dellink(dev, &list_kill);
+ unregister_netdevice_many(&list_kill);
+ } else {
+ unregister_netdevice(dev);
+ }
goto out;
}
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index d104ae1..f23dead 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -521,10 +521,13 @@ static int dsa_slave_phy_setup(struct dsa_slave_priv *p,
struct device_node *phy_dn, *port_dn;
bool phy_is_fixed = false;
u32 phy_flags = 0;
- int ret;
+ int mode, ret;
port_dn = cd->port_dn[p->port];
- p->phy_interface = of_get_phy_mode(port_dn);
+ mode = of_get_phy_mode(port_dn);
+ if (mode < 0)
+ mode = PHY_INTERFACE_MODE_NA;
+ p->phy_interface = mode;
phy_dn = of_parse_phandle(port_dn, "phy-handle", 0);
if (of_phy_is_fixed_link(port_dn)) {
@@ -559,6 +562,8 @@ static int dsa_slave_phy_setup(struct dsa_slave_priv *p,
if (!p->phy)
return -ENODEV;
+ /* Use already configured phy mode */
+ p->phy_interface = p->phy->interface;
phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link,
p->phy_interface);
} else {
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index f0b4a31d..3a8985c 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1186,7 +1186,7 @@ __be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope)
no_in_dev:
/* Not loopback addresses on loopback should be preferred
- in this case. It is importnat that lo is the first interface
+ in this case. It is important that lo is the first interface
in dev_base list.
*/
for_each_netdev_rcu(net, dev) {
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index 92ddea1..ff069f6 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -22,14 +22,18 @@ static LIST_HEAD(fou_list);
struct fou {
struct socket *sock;
u8 protocol;
+ u8 flags;
u16 port;
struct udp_offload udp_offloads;
struct list_head list;
};
+#define FOU_F_REMCSUM_NOPARTIAL BIT(0)
+
struct fou_cfg {
u16 type;
u8 protocol;
+ u8 flags;
struct udp_port_cfg udp_config;
};
@@ -64,24 +68,20 @@ static int fou_udp_recv(struct sock *sk, struct sk_buff *skb)
}
static struct guehdr *gue_remcsum(struct sk_buff *skb, struct guehdr *guehdr,
- void *data, size_t hdrlen, u8 ipproto)
+ void *data, size_t hdrlen, u8 ipproto,
+ bool nopartial)
{
__be16 *pd = data;
size_t start = ntohs(pd[0]);
size_t offset = ntohs(pd[1]);
size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
- if (skb->remcsum_offload) {
- /* Already processed in GRO path */
- skb->remcsum_offload = 0;
- return guehdr;
- }
-
if (!pskb_may_pull(skb, plen))
return NULL;
guehdr = (struct guehdr *)&udp_hdr(skb)[1];
- skb_remcsum_process(skb, (void *)guehdr + hdrlen, start, offset);
+ skb_remcsum_process(skb, (void *)guehdr + hdrlen,
+ start, offset, nopartial);
return guehdr;
}
@@ -142,7 +142,9 @@ static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
if (flags & GUE_PFLAG_REMCSUM) {
guehdr = gue_remcsum(skb, guehdr, data + doffset,
- hdrlen, guehdr->proto_ctype);
+ hdrlen, guehdr->proto_ctype,
+ !!(fou->flags &
+ FOU_F_REMCSUM_NOPARTIAL));
if (!guehdr)
goto drop;
@@ -214,7 +216,8 @@ out_unlock:
static struct guehdr *gue_gro_remcsum(struct sk_buff *skb, unsigned int off,
struct guehdr *guehdr, void *data,
- size_t hdrlen, u8 ipproto)
+ size_t hdrlen, u8 ipproto,
+ struct gro_remcsum *grc, bool nopartial)
{
__be16 *pd = data;
size_t start = ntohs(pd[0]);
@@ -222,7 +225,7 @@ static struct guehdr *gue_gro_remcsum(struct sk_buff *skb, unsigned int off,
size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
if (skb->remcsum_offload)
- return guehdr;
+ return NULL;
if (!NAPI_GRO_CB(skb)->csum_valid)
return NULL;
@@ -234,7 +237,8 @@ static struct guehdr *gue_gro_remcsum(struct sk_buff *skb, unsigned int off,
return NULL;
}
- skb_gro_remcsum_process(skb, (void *)guehdr + hdrlen, start, offset);
+ skb_gro_remcsum_process(skb, (void *)guehdr + hdrlen,
+ start, offset, grc, nopartial);
skb->remcsum_offload = 1;
@@ -254,6 +258,10 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head,
void *data;
u16 doffset = 0;
int flush = 1;
+ struct fou *fou = container_of(uoff, struct fou, udp_offloads);
+ struct gro_remcsum grc;
+
+ skb_gro_remcsum_init(&grc);
off = skb_gro_offset(skb);
len = off + sizeof(*guehdr);
@@ -295,7 +303,9 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head,
if (flags & GUE_PFLAG_REMCSUM) {
guehdr = gue_gro_remcsum(skb, off, guehdr,
data + doffset, hdrlen,
- guehdr->proto_ctype);
+ guehdr->proto_ctype, &grc,
+ !!(fou->flags &
+ FOU_F_REMCSUM_NOPARTIAL));
if (!guehdr)
goto out;
@@ -345,6 +355,7 @@ out_unlock:
rcu_read_unlock();
out:
NAPI_GRO_CB(skb)->flush |= flush;
+ skb_gro_remcsum_cleanup(skb, &grc);
return pp;
}
@@ -455,6 +466,7 @@ static int fou_create(struct net *net, struct fou_cfg *cfg,
sk = sock->sk;
+ fou->flags = cfg->flags;
fou->port = cfg->udp_config.local_udp_port;
/* Initial for fou type */
@@ -541,6 +553,7 @@ static struct nla_policy fou_nl_policy[FOU_ATTR_MAX + 1] = {
[FOU_ATTR_AF] = { .type = NLA_U8, },
[FOU_ATTR_IPPROTO] = { .type = NLA_U8, },
[FOU_ATTR_TYPE] = { .type = NLA_U8, },
+ [FOU_ATTR_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG, },
};
static int parse_nl_config(struct genl_info *info,
@@ -571,6 +584,9 @@ static int parse_nl_config(struct genl_info *info,
if (info->attrs[FOU_ATTR_TYPE])
cfg->type = nla_get_u8(info->attrs[FOU_ATTR_TYPE]);
+ if (info->attrs[FOU_ATTR_REMCSUM_NOPARTIAL])
+ cfg->flags |= FOU_F_REMCSUM_NOPARTIAL;
+
return 0;
}
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
index 53db2c3..ea82fd4 100644
--- a/net/ipv4/tcp_fastopen.c
+++ b/net/ipv4/tcp_fastopen.c
@@ -134,6 +134,7 @@ static bool tcp_fastopen_create_child(struct sock *sk,
struct tcp_sock *tp;
struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
struct sock *child;
+ u32 end_seq;
req->num_retrans = 0;
req->num_timeout = 0;
@@ -185,20 +186,35 @@ static bool tcp_fastopen_create_child(struct sock *sk,
/* Queue the data carried in the SYN packet. We need to first
* bump skb's refcnt because the caller will attempt to free it.
+ * Note that IPv6 might also have used skb_get() trick
+ * in tcp_v6_conn_request() to keep this SYN around (treq->pktopts)
+ * So we need to eventually get a clone of the packet,
+ * before inserting it in sk_receive_queue.
*
* XXX (TFO) - we honor a zero-payload TFO request for now,
* (any reason not to?) but no need to queue the skb since
* there is no data. How about SYN+FIN?
*/
- if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1) {
- skb = skb_get(skb);
- skb_dst_drop(skb);
- __skb_pull(skb, tcp_hdr(skb)->doff * 4);
- skb_set_owner_r(skb, child);
- __skb_queue_tail(&child->sk_receive_queue, skb);
- tp->syn_data_acked = 1;
+ end_seq = TCP_SKB_CB(skb)->end_seq;
+ if (end_seq != TCP_SKB_CB(skb)->seq + 1) {
+ struct sk_buff *skb2;
+
+ if (unlikely(skb_shared(skb)))
+ skb2 = skb_clone(skb, GFP_ATOMIC);
+ else
+ skb2 = skb_get(skb);
+
+ if (likely(skb2)) {
+ skb_dst_drop(skb2);
+ __skb_pull(skb2, tcp_hdrlen(skb));
+ skb_set_owner_r(skb2, child);
+ __skb_queue_tail(&child->sk_receive_queue, skb2);
+ tp->syn_data_acked = 1;
+ } else {
+ end_seq = TCP_SKB_CB(skb)->seq + 1;
+ }
}
- tcp_rsk(req)->rcv_nxt = tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
+ tcp_rsk(req)->rcv_nxt = tp->rcv_nxt = end_seq;
sk->sk_data_ready(sk);
bh_unlock_sock(child);
sock_put(child);
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index d10f6f4..4915d82 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -402,6 +402,13 @@ int udp_gro_complete(struct sk_buff *skb, int nhoff)
}
rcu_read_unlock();
+
+ if (skb->remcsum_offload)
+ skb_shinfo(skb)->gso_type |= SKB_GSO_TUNNEL_REMCSUM;
+
+ skb->encapsulation = 1;
+ skb_set_inner_mac_header(skb, nhoff + sizeof(struct udphdr));
+
return err;
}
@@ -410,9 +417,13 @@ static int udp4_gro_complete(struct sk_buff *skb, int nhoff)
const struct iphdr *iph = ip_hdr(skb);
struct udphdr *uh = (struct udphdr *)(skb->data + nhoff);
- if (uh->check)
+ if (uh->check) {
+ skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
uh->check = ~udp_v4_check(skb->len - nhoff, iph->saddr,
iph->daddr, 0);
+ } else {
+ skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
+ }
return udp_gro_complete(skb, nhoff);
}
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index 2f780cb..f45d6db 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -172,7 +172,7 @@ static void __net_exit ip6_fl_purge(struct net *net)
{
int i;
- spin_lock(&ip6_fl_lock);
+ spin_lock_bh(&ip6_fl_lock);
for (i = 0; i <= FL_HASH_MASK; i++) {
struct ip6_flowlabel *fl;
struct ip6_flowlabel __rcu **flp;
@@ -190,7 +190,7 @@ static void __net_exit ip6_fl_purge(struct net *net)
flp = &fl->next;
}
}
- spin_unlock(&ip6_fl_lock);
+ spin_unlock_bh(&ip6_fl_lock);
}
static struct ip6_flowlabel *fl_intern(struct net *net,
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index d33df4c..7deebf1 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1273,7 +1273,7 @@ emsgsize:
/* If this is the first and only packet and device
* supports checksum offloading, let's use it.
*/
- if (!skb &&
+ if (!skb && sk->sk_protocol == IPPROTO_UDP &&
length + fragheaderlen < mtu &&
rt->dst.dev->features & NETIF_F_V6_CSUM &&
!exthdrlen)
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 98565ce..4688bd4 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -141,7 +141,7 @@ static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
u32 *p = NULL;
if (!(rt->dst.flags & DST_HOST))
- return NULL;
+ return dst_cow_metrics_generic(dst, old);
peer = rt6_get_peer_create(rt);
if (peer) {
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index a562769..ab889bb 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -161,9 +161,13 @@ static int udp6_gro_complete(struct sk_buff *skb, int nhoff)
const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
struct udphdr *uh = (struct udphdr *)(skb->data + nhoff);
- if (uh->check)
+ if (uh->check) {
+ skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
uh->check = ~udp_v6_check(skb->len - nhoff, &ipv6h->saddr,
&ipv6h->daddr, 0);
+ } else {
+ skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
+ }
return udp_gro_complete(skb, nhoff);
}
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
index 265e190..c598f74 100644
--- a/net/netfilter/nft_compat.c
+++ b/net/netfilter/nft_compat.c
@@ -19,6 +19,7 @@
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_ipv4/ip_tables.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
+#include <linux/netfilter_bridge/ebtables.h>
#include <net/netfilter/nf_tables.h>
static int nft_compat_chain_validate_dependency(const char *tablename,
@@ -40,6 +41,7 @@ static int nft_compat_chain_validate_dependency(const char *tablename,
union nft_entry {
struct ipt_entry e4;
struct ip6t_entry e6;
+ struct ebt_entry ebt;
};
static inline void
@@ -50,9 +52,9 @@ nft_compat_set_par(struct xt_action_param *par, void *xt, const void *xt_info)
par->hotdrop = false;
}
-static void nft_target_eval(const struct nft_expr *expr,
- struct nft_data data[NFT_REG_MAX + 1],
- const struct nft_pktinfo *pkt)
+static void nft_target_eval_xt(const struct nft_expr *expr,
+ struct nft_data data[NFT_REG_MAX + 1],
+ const struct nft_pktinfo *pkt)
{
void *info = nft_expr_priv(expr);
struct xt_target *target = expr->ops->data;
@@ -66,7 +68,7 @@ static void nft_target_eval(const struct nft_expr *expr,
if (pkt->xt.hotdrop)
ret = NF_DROP;
- switch(ret) {
+ switch (ret) {
case XT_CONTINUE:
data[NFT_REG_VERDICT].verdict = NFT_CONTINUE;
break;
@@ -74,7 +76,41 @@ static void nft_target_eval(const struct nft_expr *expr,
data[NFT_REG_VERDICT].verdict = ret;
break;
}
- return;
+}
+
+static void nft_target_eval_bridge(const struct nft_expr *expr,
+ struct nft_data data[NFT_REG_MAX + 1],
+ const struct nft_pktinfo *pkt)
+{
+ void *info = nft_expr_priv(expr);
+ struct xt_target *target = expr->ops->data;
+ struct sk_buff *skb = pkt->skb;
+ int ret;
+
+ nft_compat_set_par((struct xt_action_param *)&pkt->xt, target, info);
+
+ ret = target->target(skb, &pkt->xt);
+
+ if (pkt->xt.hotdrop)
+ ret = NF_DROP;
+
+ switch (ret) {
+ case EBT_ACCEPT:
+ data[NFT_REG_VERDICT].verdict = NF_ACCEPT;
+ break;
+ case EBT_DROP:
+ data[NFT_REG_VERDICT].verdict = NF_DROP;
+ break;
+ case EBT_CONTINUE:
+ data[NFT_REG_VERDICT].verdict = NFT_CONTINUE;
+ break;
+ case EBT_RETURN:
+ data[NFT_REG_VERDICT].verdict = NFT_RETURN;
+ break;
+ default:
+ data[NFT_REG_VERDICT].verdict = ret;
+ break;
+ }
}
static const struct nla_policy nft_target_policy[NFTA_TARGET_MAX + 1] = {
@@ -100,6 +136,10 @@ nft_target_set_tgchk_param(struct xt_tgchk_param *par,
entry->e6.ipv6.proto = proto;
entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0;
break;
+ case NFPROTO_BRIDGE:
+ entry->ebt.ethproto = proto;
+ entry->ebt.invflags = inv ? EBT_IPROTO : 0;
+ break;
}
par->entryinfo = entry;
par->target = target;
@@ -307,6 +347,10 @@ nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx,
entry->e6.ipv6.proto = proto;
entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0;
break;
+ case NFPROTO_BRIDGE:
+ entry->ebt.ethproto = proto;
+ entry->ebt.invflags = inv ? EBT_IPROTO : 0;
+ break;
}
par->entryinfo = entry;
par->match = match;
@@ -490,6 +534,9 @@ nfnl_compat_get(struct sock *nfnl, struct sk_buff *skb,
case AF_INET6:
fmt = "ip6t_%s";
break;
+ case NFPROTO_BRIDGE:
+ fmt = "ebt_%s";
+ break;
default:
pr_err("nft_compat: unsupported protocol %d\n",
nfmsg->nfgen_family);
@@ -663,13 +710,17 @@ nft_target_select_ops(const struct nft_ctx *ctx,
nft_target->ops.type = &nft_target_type;
nft_target->ops.size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize));
- nft_target->ops.eval = nft_target_eval;
nft_target->ops.init = nft_target_init;
nft_target->ops.destroy = nft_target_destroy;
nft_target->ops.dump = nft_target_dump;
nft_target->ops.validate = nft_target_validate;
nft_target->ops.data = target;
+ if (family == NFPROTO_BRIDGE)
+ nft_target->ops.eval = nft_target_eval_bridge;
+ else
+ nft_target->ops.eval = nft_target_eval_xt;
+
list_add(&nft_target->head, &nft_target_list);
return &nft_target->ops;
diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
index 6404a72..9615b8b 100644
--- a/net/netfilter/nft_lookup.c
+++ b/net/netfilter/nft_lookup.c
@@ -39,6 +39,7 @@ static void nft_lookup_eval(const struct nft_expr *expr,
static const struct nla_policy nft_lookup_policy[NFTA_LOOKUP_MAX + 1] = {
[NFTA_LOOKUP_SET] = { .type = NLA_STRING },
+ [NFTA_LOOKUP_SET_ID] = { .type = NLA_U32 },
[NFTA_LOOKUP_SREG] = { .type = NLA_U32 },
[NFTA_LOOKUP_DREG] = { .type = NLA_U32 },
};
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index e2c348b..50ec42f 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -717,6 +717,8 @@ int ovs_flow_key_extract_userspace(const struct nlattr *attr,
{
int err;
+ memset(key, 0, OVS_SW_FLOW_KEY_METADATA_SIZE);
+
/* Extract metadata from netlink attributes. */
err = ovs_nla_get_flow_metadata(attr, key, log);
if (err)
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index 993281e..216f20b 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -1516,7 +1516,7 @@ int ovs_nla_put_identifier(const struct sw_flow *flow, struct sk_buff *skb)
/* Called with ovs_mutex or RCU read lock. */
int ovs_nla_put_masked_key(const struct sw_flow *flow, struct sk_buff *skb)
{
- return ovs_nla_put_key(&flow->mask->key, &flow->key,
+ return ovs_nla_put_key(&flow->key, &flow->key,
OVS_FLOW_ATTR_KEY, false, skb);
}
@@ -1746,7 +1746,7 @@ static int validate_and_copy_set_tun(const struct nlattr *attr,
struct sw_flow_key key;
struct ovs_tunnel_info *tun_info;
struct nlattr *a;
- int err, start, opts_type;
+ int err = 0, start, opts_type;
ovs_match_init(&match, &key, NULL);
opts_type = ipv4_tun_from_nlattr(nla_data(attr), &match, false, log);
diff --git a/net/rds/cong.c b/net/rds/cong.c
index e5b65ac..e6144b8 100644
--- a/net/rds/cong.c
+++ b/net/rds/cong.c
@@ -221,7 +221,21 @@ void rds_cong_queue_updates(struct rds_cong_map *map)
list_for_each_entry(conn, &map->m_conn_list, c_map_item) {
if (!test_and_set_bit(0, &conn->c_map_queued)) {
rds_stats_inc(s_cong_update_queued);
- rds_send_xmit(conn);
+ /* We cannot inline the call to rds_send_xmit() here
+ * for two reasons (both pertaining to a TCP transport):
+ * 1. When we get here from the receive path, we
+ * are already holding the sock_lock (held by
+ * tcp_v4_rcv()). So inlining calls to
+ * tcp_setsockopt and/or tcp_sendmsg will deadlock
+ * when it tries to get the sock_lock())
+ * 2. Interrupts are masked so that we can mark the
+ * the port congested from both send and recv paths.
+ * (See comment around declaration of rdc_cong_lock).
+ * An attempt to get the sock_lock() here will
+ * therefore trigger warnings.
+ * Defer the xmit to rds_send_worker() instead.
+ */
+ queue_delayed_work(rds_wq, &conn->c_send_w, 0);
}
}