summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan.c13
-rw-r--r--net/8021q/vlan_dev.c6
-rw-r--r--net/9p/client.c9
-rw-r--r--net/9p/trans_rdma.c11
-rw-r--r--net/Kconfig2
-rw-r--r--net/appletalk/atalk_proc.c2
-rw-r--r--net/bridge/br_device.c12
-rw-r--r--net/bridge/br_if.c3
-rw-r--r--net/bridge/br_mdb.c3
-rw-r--r--net/bridge/br_notify.c5
-rw-r--r--net/bridge/br_private.h12
-rw-r--r--net/ceph/messenger.c2
-rw-r--r--net/core/datagram.c72
-rw-r--r--net/core/dev.c26
-rw-r--r--net/core/fib_rules.c25
-rw-r--r--net/core/flow_dissector.c6
-rw-r--r--net/core/iovec.c24
-rw-r--r--net/core/net-sysfs.c22
-rw-r--r--net/core/pktgen.c61
-rw-r--r--net/core/rtnetlink.c29
-rw-r--r--net/core/sock.c166
-rw-r--r--net/core/stream.c2
-rw-r--r--net/dccp/proto.c4
-rw-r--r--net/ieee802154/6lowpan.c286
-rw-r--r--net/ieee802154/6lowpan.h20
-rw-r--r--net/ipv4/devinet.c17
-rw-r--r--net/ipv4/fib_rules.c25
-rw-r--r--net/ipv4/igmp.c80
-rw-r--r--net/ipv4/ip_gre.c4
-rw-r--r--net/ipv4/ip_input.c8
-rw-r--r--net/ipv4/ip_tunnel.c67
-rw-r--r--net/ipv4/ip_vti.c528
-rw-r--r--net/ipv4/ipip.c3
-rw-r--r--net/ipv4/ipmr.c15
-rw-r--r--net/ipv4/netfilter/ipt_MASQUERADE.c2
-rw-r--r--net/ipv4/ping.c2
-rw-r--r--net/ipv4/proc.c7
-rw-r--r--net/ipv4/raw.c2
-rw-r--r--net/ipv4/route.c24
-rw-r--r--net/ipv4/sysctl_net_ipv4.c7
-rw-r--r--net/ipv4/tcp.c15
-rw-r--r--net/ipv4/tcp_fastopen.c13
-rw-r--r--net/ipv4/tcp_input.c156
-rw-r--r--net/ipv4/tcp_ipv4.c27
-rw-r--r--net/ipv4/tcp_minisocks.c8
-rw-r--r--net/ipv4/tcp_output.c3
-rw-r--r--net/ipv4/tcp_probe.c67
-rw-r--r--net/ipv4/udp.c5
-rw-r--r--net/ipv6/addrconf.c104
-rw-r--r--net/ipv6/af_inet6.c1
-rw-r--r--net/ipv6/datagram.c2
-rw-r--r--net/ipv6/fib6_rules.c37
-rw-r--r--net/ipv6/ip6_fib.c16
-rw-r--r--net/ipv6/ip6_gre.c5
-rw-r--r--net/ipv6/ip6_input.c6
-rw-r--r--net/ipv6/ip6_tunnel.c45
-rw-r--r--net/ipv6/ip6mr.c11
-rw-r--r--net/ipv6/mcast.c51
-rw-r--r--net/ipv6/netfilter/ip6t_MASQUERADE.c2
-rw-r--r--net/ipv6/proc.c4
-rw-r--r--net/ipv6/raw.c9
-rw-r--r--net/ipv6/route.c5
-rw-r--r--net/ipv6/sit.c6
-rw-r--r--net/ipv6/tcp_ipv6.c10
-rw-r--r--net/ipx/ipx_proc.c2
-rw-r--r--net/irda/irttp.c50
-rw-r--r--net/key/af_key.c14
-rw-r--r--net/llc/llc_proc.c2
-rw-r--r--net/mac80211/cfg.c232
-rw-r--r--net/mac80211/chan.c58
-rw-r--r--net/mac80211/debugfs_sta.c9
-rw-r--r--net/mac80211/driver-ops.h13
-rw-r--r--net/mac80211/ht.c53
-rw-r--r--net/mac80211/ibss.c184
-rw-r--r--net/mac80211/ieee80211_i.h67
-rw-r--r--net/mac80211/iface.c11
-rw-r--r--net/mac80211/led.c19
-rw-r--r--net/mac80211/led.h4
-rw-r--r--net/mac80211/main.c15
-rw-r--r--net/mac80211/mesh.c7
-rw-r--r--net/mac80211/mesh_plink.c2
-rw-r--r--net/mac80211/mlme.c98
-rw-r--r--net/mac80211/rate.c46
-rw-r--r--net/mac80211/rate.h22
-rw-r--r--net/mac80211/rc80211_minstrel.c33
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c15
-rw-r--r--net/mac80211/rc80211_pid_algo.c1
-rw-r--r--net/mac80211/rx.c97
-rw-r--r--net/mac80211/scan.c72
-rw-r--r--net/mac80211/status.c90
-rw-r--r--net/mac80211/trace.h26
-rw-r--r--net/mac80211/tx.c108
-rw-r--r--net/mac80211/util.c216
-rw-r--r--net/netfilter/Kconfig22
-rw-r--r--net/netfilter/Makefile3
-rw-r--r--net/netfilter/core.c7
-rw-r--r--net/netfilter/ipvs/ip_vs_lblcr.c8
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_sctp.c23
-rw-r--r--net/netfilter/ipvs/ip_vs_sh.c6
-rw-r--r--net/netfilter/nf_conntrack_core.c69
-rw-r--r--net/netfilter/nf_conntrack_labels.c4
-rw-r--r--net/netfilter/nf_conntrack_netlink.c269
-rw-r--r--net/netfilter/nf_conntrack_proto.c4
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c4
-rw-r--r--net/netfilter/nf_nat_core.c6
-rw-r--r--net/netfilter/nf_nat_helper.c28
-rw-r--r--net/netfilter/nf_nat_proto_sctp.c8
-rw-r--r--net/netfilter/nf_tproxy_core.c62
-rw-r--r--net/netfilter/nfnetlink_queue_core.c11
-rw-r--r--net/netfilter/nfnetlink_queue_ct.c15
-rw-r--r--net/netfilter/xt_TPROXY.c169
-rw-r--r--net/netfilter/xt_addrtype.c2
-rw-r--r--net/netfilter/xt_socket.c66
-rw-r--r--net/netlink/af_netlink.c101
-rw-r--r--net/netlink/af_netlink.h3
-rw-r--r--net/openvswitch/Kconfig13
-rw-r--r--net/openvswitch/Makefile4
-rw-r--r--net/openvswitch/vport-vxlan.c204
-rw-r--r--net/openvswitch/vport.c3
-rw-r--r--net/openvswitch/vport.h1
-rw-r--r--net/packet/af_packet.c50
-rw-r--r--net/phonet/socket.c2
-rw-r--r--net/sched/sch_netem.c5
-rw-r--r--net/sctp/associola.c8
-rw-r--r--net/sctp/auth.c8
-rw-r--r--net/sctp/bind_addr.c8
-rw-r--r--net/sctp/chunk.c8
-rw-r--r--net/sctp/command.c8
-rw-r--r--net/sctp/debug.c8
-rw-r--r--net/sctp/endpointola.c8
-rw-r--r--net/sctp/input.c18
-rw-r--r--net/sctp/inqueue.c8
-rw-r--r--net/sctp/ipv6.c10
-rw-r--r--net/sctp/objcnt.c8
-rw-r--r--net/sctp/output.c8
-rw-r--r--net/sctp/outqueue.c8
-rw-r--r--net/sctp/primitive.c8
-rw-r--r--net/sctp/probe.c9
-rw-r--r--net/sctp/proc.c12
-rw-r--r--net/sctp/protocol.c10
-rw-r--r--net/sctp/sm_make_chunk.c107
-rw-r--r--net/sctp/sm_sideeffect.c8
-rw-r--r--net/sctp/sm_statefuns.c8
-rw-r--r--net/sctp/sm_statetable.c8
-rw-r--r--net/sctp/socket.c8
-rw-r--r--net/sctp/ssnmap.c8
-rw-r--r--net/sctp/sysctl.c8
-rw-r--r--net/sctp/transport.c8
-rw-r--r--net/sctp/tsnmap.c8
-rw-r--r--net/sctp/ulpevent.c8
-rw-r--r--net/sctp/ulpqueue.c8
-rw-r--r--net/sunrpc/svcsock.c2
-rw-r--r--net/sunrpc/xprtsock.c2
-rw-r--r--net/unix/af_unix.c70
-rw-r--r--net/vmw_vsock/af_vsock.c3
-rw-r--r--net/vmw_vsock/af_vsock.h175
-rw-r--r--net/vmw_vsock/vmci_transport.c2
-rw-r--r--net/vmw_vsock/vmci_transport.h4
-rw-r--r--net/vmw_vsock/vsock_addr.c3
-rw-r--r--net/vmw_vsock/vsock_addr.h30
-rw-r--r--net/wireless/core.c9
-rw-r--r--net/wireless/core.h2
-rw-r--r--net/wireless/mesh.c5
-rw-r--r--net/wireless/nl80211.c480
-rw-r--r--net/wireless/nl80211.h2
-rw-r--r--net/wireless/rdev-ops.h12
-rw-r--r--net/wireless/scan.c31
-rw-r--r--net/wireless/trace.h45
-rw-r--r--net/wireless/util.c14
-rw-r--r--net/xfrm/xfrm_policy.c12
-rw-r--r--net/xfrm/xfrm_state.c15
171 files changed, 3915 insertions, 2305 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 2fb2d88..61fc573 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -210,6 +210,7 @@ out_vid_del:
static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
{
struct net_device *new_dev;
+ struct vlan_dev_priv *vlan;
struct net *net = dev_net(real_dev);
struct vlan_net *vn = net_generic(net, vlan_net_id);
char name[IFNAMSIZ];
@@ -260,11 +261,12 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
new_dev->mtu = real_dev->mtu;
new_dev->priv_flags |= (real_dev->priv_flags & IFF_UNICAST_FLT);
- vlan_dev_priv(new_dev)->vlan_proto = htons(ETH_P_8021Q);
- vlan_dev_priv(new_dev)->vlan_id = vlan_id;
- vlan_dev_priv(new_dev)->real_dev = real_dev;
- vlan_dev_priv(new_dev)->dent = NULL;
- vlan_dev_priv(new_dev)->flags = VLAN_FLAG_REORDER_HDR;
+ vlan = vlan_dev_priv(new_dev);
+ vlan->vlan_proto = htons(ETH_P_8021Q);
+ vlan->vlan_id = vlan_id;
+ vlan->real_dev = real_dev;
+ vlan->dent = NULL;
+ vlan->flags = VLAN_FLAG_REORDER_HDR;
new_dev->rtnl_link_ops = &vlan_link_ops;
err = register_vlan_dev(new_dev);
@@ -459,6 +461,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
case NETDEV_NOTIFY_PEERS:
case NETDEV_BONDING_FAILOVER:
+ case NETDEV_RESEND_IGMP:
/* Propagate to vlan devices */
vlan_group_for_each_dev(grp, i, vlandev)
call_netdevice_notifiers(event, vlandev);
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 1cd3d2a..9ab8a7e 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -107,10 +107,10 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
u16 vlan_tci = 0;
int rc;
- if (!(vlan_dev_priv(dev)->flags & VLAN_FLAG_REORDER_HDR)) {
+ if (!(vlan->flags & VLAN_FLAG_REORDER_HDR)) {
vhdr = (struct vlan_hdr *) skb_push(skb, VLAN_HLEN);
- vlan_tci = vlan_dev_priv(dev)->vlan_id;
+ vlan_tci = vlan->vlan_id;
vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb);
vhdr->h_vlan_TCI = htons(vlan_tci);
@@ -133,7 +133,7 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
saddr = dev->dev_addr;
/* Now make the underlying real hard header */
- dev = vlan_dev_priv(dev)->real_dev;
+ dev = vlan->real_dev;
rc = dev_hard_header(skb, dev, type, daddr, saddr, len + vhdrlen);
if (rc > 0)
rc += vhdrlen;
diff --git a/net/9p/client.c b/net/9p/client.c
index 8b93cae..ba93bda 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -658,17 +658,12 @@ static int p9_client_flush(struct p9_client *c, struct p9_req_t *oldreq)
/*
* if we haven't received a response for oldreq,
- * remove it from the list, and notify the transport
- * layer that the reply will never arrive.
+ * remove it from the list
*/
- spin_lock(&c->lock);
if (oldreq->status == REQ_STATUS_FLSH) {
+ spin_lock(&c->lock);
list_del(&oldreq->req_list);
spin_unlock(&c->lock);
- if (c->trans_mod->cancelled)
- c->trans_mod->cancelled(c, req);
- } else {
- spin_unlock(&c->lock);
}
p9_free_req(c, req);
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
index 928f2bb..8f68df5 100644
--- a/net/9p/trans_rdma.c
+++ b/net/9p/trans_rdma.c
@@ -588,17 +588,6 @@ static int rdma_cancel(struct p9_client *client, struct p9_req_t *req)
return 1;
}
-/* A request has been fully flushed without a reply.
- * That means we have posted one buffer in excess.
- */
-static int rdma_cancelled(struct p9_client *client, struct p9_req_t *req)
-{
- struct p9_trans_rdma *rdma = client->trans;
-
- atomic_inc(&rdma->excess_rc);
- return 0;
-}
-
/**
* trans_create_rdma - Transport method for creating atransport instance
* @client: client instance
diff --git a/net/Kconfig b/net/Kconfig
index 2b40660..ee02136 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -281,7 +281,7 @@ menu "Network testing"
config NET_PKTGEN
tristate "Packet Generator (USE WITH CAUTION)"
- depends on PROC_FS
+ depends on INET && PROC_FS
---help---
This module will inject preconfigured packets, at a configurable
rate, out of a given interface. It is used for network interface
diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c
index c30f3a0..af46bc4 100644
--- a/net/appletalk/atalk_proc.c
+++ b/net/appletalk/atalk_proc.c
@@ -178,7 +178,7 @@ static int atalk_seq_socket_show(struct seq_file *seq, void *v)
at = at_sk(s);
seq_printf(seq, "%02X %04X:%02X:%02X %04X:%02X:%02X %08X:%08X "
- "%02X %d\n",
+ "%02X %u\n",
s->sk_type, ntohs(at->src_net), at->src_node, at->src_port,
ntohs(at->dest_net), at->dest_node, at->dest_port,
sk_wmem_alloc_get(s),
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 69363bd..0feaaa0 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -245,22 +245,22 @@ fail:
int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp)
{
struct netpoll *np;
- int err = 0;
+ int err;
+
+ if (!p->br->dev->npinfo)
+ return 0;
np = kzalloc(sizeof(*p->np), gfp);
- err = -ENOMEM;
if (!np)
- goto out;
+ return -ENOMEM;
err = __netpoll_setup(np, p->dev, gfp);
if (err) {
kfree(np);
- goto out;
+ return err;
}
p->np = np;
-
-out:
return err;
}
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 5623be6..aa6c9a8 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -363,7 +363,8 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
if (err)
goto err2;
- if (br_netpoll_info(br) && ((err = br_netpoll_enable(p, GFP_KERNEL))))
+ err = br_netpoll_enable(p, GFP_KERNEL);
+ if (err)
goto err3;
err = netdev_master_upper_dev_link(dev, br->dev);
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index 0daae3e..e4d5cd4 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -61,7 +61,8 @@ static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
for (i = 0; i < mdb->max; i++) {
struct net_bridge_mdb_entry *mp;
- struct net_bridge_port_group *p, **pp;
+ struct net_bridge_port_group *p;
+ struct net_bridge_port_group __rcu **pp;
struct net_bridge_port *port;
hlist_for_each_entry_rcu(mp, &mdb->mhash[i], hlist[mdb->ver]) {
diff --git a/net/bridge/br_notify.c b/net/bridge/br_notify.c
index 3a3f371..2998dd1 100644
--- a/net/bridge/br_notify.c
+++ b/net/bridge/br_notify.c
@@ -102,6 +102,11 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
case NETDEV_PRE_TYPE_CHANGE:
/* Forbid underlaying device to change its type. */
return NOTIFY_BAD;
+
+ case NETDEV_RESEND_IGMP:
+ /* Propagate to master device */
+ call_netdevice_notifiers(event, br->dev);
+ break;
}
/* Events that may cause spanning tree to refresh */
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 2f7da41..d41283c 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -334,11 +334,6 @@ extern void br_dev_delete(struct net_device *dev, struct list_head *list);
extern netdev_tx_t br_dev_xmit(struct sk_buff *skb,
struct net_device *dev);
#ifdef CONFIG_NET_POLL_CONTROLLER
-static inline struct netpoll_info *br_netpoll_info(struct net_bridge *br)
-{
- return br->dev->npinfo;
-}
-
static inline void br_netpoll_send_skb(const struct net_bridge_port *p,
struct sk_buff *skb)
{
@@ -351,11 +346,6 @@ static inline void br_netpoll_send_skb(const struct net_bridge_port *p,
extern int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp);
extern void br_netpoll_disable(struct net_bridge_port *p);
#else
-static inline struct netpoll_info *br_netpoll_info(struct net_bridge *br)
-{
- return NULL;
-}
-
static inline void br_netpoll_send_skb(const struct net_bridge_port *p,
struct sk_buff *skb)
{
@@ -476,7 +466,7 @@ extern void br_multicast_free_pg(struct rcu_head *head);
extern struct net_bridge_port_group *br_multicast_new_port_group(
struct net_bridge_port *port,
struct br_ip *group,
- struct net_bridge_port_group *next,
+ struct net_bridge_port_group __rcu *next,
unsigned char state);
extern void br_mdb_init(void);
extern void br_mdb_uninit(void);
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index eb0a46a..3be308e 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -409,7 +409,7 @@ static void ceph_sock_write_space(struct sock *sk)
* and net/core/stream.c:sk_stream_write_space().
*/
if (con_flag_test(con, CON_FLAG_WRITE_PENDING)) {
- if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
+ if (sk_stream_is_writeable(sk)) {
dout("%s %p queueing write work\n", __func__, con);
clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
queue_con(con);
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 8ab48cd..af814e7 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -48,6 +48,7 @@
#include <linux/highmem.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
+#include <linux/pagemap.h>
#include <net/protocol.h>
#include <linux/skbuff.h>
@@ -573,6 +574,77 @@ fault:
}
EXPORT_SYMBOL(skb_copy_datagram_from_iovec);
+/**
+ * zerocopy_sg_from_iovec - Build a zerocopy datagram from an iovec
+ * @skb: buffer to copy
+ * @from: io vector to copy to
+ * @offset: offset in the io vector to start copying from
+ * @count: amount of vectors to copy to buffer from
+ *
+ * The function will first copy up to headlen, and then pin the userspace
+ * pages and build frags through them.
+ *
+ * Returns 0, -EFAULT or -EMSGSIZE.
+ * Note: the iovec is not modified during the copy
+ */
+int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
+ int offset, size_t count)
+{
+ int len = iov_length(from, count) - offset;
+ int copy = min_t(int, skb_headlen(skb), len);
+ int size;
+ int i = 0;
+
+ /* copy up to skb headlen */
+ if (skb_copy_datagram_from_iovec(skb, 0, from, offset, copy))
+ return -EFAULT;
+
+ if (len == copy)
+ return 0;
+
+ offset += copy;
+ while (count--) {
+ struct page *page[MAX_SKB_FRAGS];
+ int num_pages;
+ unsigned long base;
+ unsigned long truesize;
+
+ /* Skip over from offset and copied */
+ if (offset >= from->iov_len) {
+ offset -= from->iov_len;
+ ++from;
+ continue;
+ }
+ len = from->iov_len - offset;
+ base = (unsigned long)from->iov_base + offset;
+ size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
+ if (i + size > MAX_SKB_FRAGS)
+ return -EMSGSIZE;
+ num_pages = get_user_pages_fast(base, size, 0, &page[i]);
+ if (num_pages != size) {
+ release_pages(&page[i], num_pages, 0);
+ return -EFAULT;
+ }
+ truesize = size * PAGE_SIZE;
+ skb->data_len += len;
+ skb->len += len;
+ skb->truesize += truesize;
+ atomic_add(truesize, &skb->sk->sk_wmem_alloc);
+ while (len) {
+ int off = base & ~PAGE_MASK;
+ int size = min_t(int, len, PAGE_SIZE - off);
+ skb_fill_page_desc(skb, i, page[i], off, size);
+ base += size;
+ len -= size;
+ i++;
+ }
+ offset = 0;
+ ++from;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(zerocopy_sg_from_iovec);
+
static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
u8 __user *to, int len,
__wsum *csump)
diff --git a/net/core/dev.c b/net/core/dev.c
index 26755dd..1ed2b66 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -174,7 +174,7 @@ static DEFINE_SPINLOCK(napi_hash_lock);
static unsigned int napi_gen_id;
static DEFINE_HASHTABLE(napi_hash, 8);
-seqcount_t devnet_rename_seq;
+static seqcount_t devnet_rename_seq;
static inline void dev_base_seq_inc(struct net *net)
{
@@ -1691,13 +1691,13 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
kfree_skb(skb);
return NET_RX_DROP;
}
- skb_scrub_packet(skb);
skb->protocol = eth_type_trans(skb, dev);
/* eth_type_trans() can set pkt_type.
- * clear pkt_type _after_ calling eth_type_trans()
+ * call skb_scrub_packet() after it to clear pkt_type _after_ calling
+ * eth_type_trans().
*/
- skb->pkt_type = PACKET_HOST;
+ skb_scrub_packet(skb);
return netif_rx(skb);
}
@@ -4989,6 +4989,24 @@ int dev_change_carrier(struct net_device *dev, bool new_carrier)
EXPORT_SYMBOL(dev_change_carrier);
/**
+ * dev_get_phys_port_id - Get device physical port ID
+ * @dev: device
+ * @ppid: port ID
+ *
+ * Get device physical port ID
+ */
+int dev_get_phys_port_id(struct net_device *dev,
+ struct netdev_phys_port_id *ppid)
+{
+ const struct net_device_ops *ops = dev->netdev_ops;
+
+ if (!ops->ndo_get_phys_port_id)
+ return -EOPNOTSUPP;
+ return ops->ndo_get_phys_port_id(dev, ppid);
+}
+EXPORT_SYMBOL(dev_get_phys_port_id);
+
+/**
* dev_new_index - allocate an ifindex
* @net: the applicable net namespace
*
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 2173544..2e65413 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -33,6 +33,9 @@ int fib_default_rule_add(struct fib_rules_ops *ops,
r->flags = flags;
r->fr_net = hold_net(ops->fro_net);
+ r->suppress_prefixlen = -1;
+ r->suppress_ifgroup = -1;
+
/* The lock is not required here, the list in unreacheable
* at the moment this function is called */
list_add_tail(&r->list, &ops->rules_list);
@@ -226,6 +229,9 @@ jumped:
else
err = ops->action(rule, fl, flags, arg);
+ if (!err && ops->suppress && ops->suppress(rule, arg))
+ continue;
+
if (err != -EAGAIN) {
if ((arg->flags & FIB_LOOKUP_NOREF) ||
likely(atomic_inc_not_zero(&rule->refcnt))) {
@@ -337,6 +343,15 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh)
rule->action = frh->action;
rule->flags = frh->flags;
rule->table = frh_get_table(frh, tb);
+ if (tb[FRA_SUPPRESS_PREFIXLEN])
+ rule->suppress_prefixlen = nla_get_u32(tb[FRA_SUPPRESS_PREFIXLEN]);
+ else
+ rule->suppress_prefixlen = -1;
+
+ if (tb[FRA_SUPPRESS_IFGROUP])
+ rule->suppress_ifgroup = nla_get_u32(tb[FRA_SUPPRESS_IFGROUP]);
+ else
+ rule->suppress_ifgroup = -1;
if (!tb[FRA_PRIORITY] && ops->default_pref)
rule->pref = ops->default_pref(ops);
@@ -523,6 +538,8 @@ static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops *ops,
+ nla_total_size(IFNAMSIZ) /* FRA_OIFNAME */
+ nla_total_size(4) /* FRA_PRIORITY */
+ nla_total_size(4) /* FRA_TABLE */
+ + nla_total_size(4) /* FRA_SUPPRESS_PREFIXLEN */
+ + nla_total_size(4) /* FRA_SUPPRESS_IFGROUP */
+ nla_total_size(4) /* FRA_FWMARK */
+ nla_total_size(4); /* FRA_FWMASK */
@@ -548,6 +565,8 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
frh->table = rule->table;
if (nla_put_u32(skb, FRA_TABLE, rule->table))
goto nla_put_failure;
+ if (nla_put_u32(skb, FRA_SUPPRESS_PREFIXLEN, rule->suppress_prefixlen))
+ goto nla_put_failure;
frh->res1 = 0;
frh->res2 = 0;
frh->action = rule->action;
@@ -580,6 +599,12 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
(rule->target &&
nla_put_u32(skb, FRA_GOTO, rule->target)))
goto nla_put_failure;
+
+ if (rule->suppress_ifgroup != -1) {
+ if (nla_put_u32(skb, FRA_SUPPRESS_IFGROUP, rule->suppress_ifgroup))
+ goto nla_put_failure;
+ }
+
if (ops->fill(rule, skb, frh) < 0)
goto nla_put_failure;
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index b84a1b1..159737c 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -140,7 +140,11 @@ ipv6:
break;
}
case IPPROTO_IPIP:
- goto again;
+ proto = htons(ETH_P_IP);
+ goto ip;
+ case IPPROTO_IPV6:
+ proto = htons(ETH_P_IPV6);
+ goto ipv6;
default:
break;
}
diff --git a/net/core/iovec.c b/net/core/iovec.c
index de178e4..b77eeec 100644
--- a/net/core/iovec.c
+++ b/net/core/iovec.c
@@ -212,3 +212,27 @@ out_fault:
goto out;
}
EXPORT_SYMBOL(csum_partial_copy_fromiovecend);
+
+unsigned long iov_pages(const struct iovec *iov, int offset,
+ unsigned long nr_segs)
+{
+ unsigned long seg, base;
+ int pages = 0, len, size;
+
+ while (nr_segs && (offset >= iov->iov_len)) {
+ offset -= iov->iov_len;
+ ++iov;
+ --nr_segs;
+ }
+
+ for (seg = 0; seg < nr_segs; seg++) {
+ base = (unsigned long)iov[seg].iov_base + offset;
+ len = iov[seg].iov_len - offset;
+ size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
+ pages += size;
+ offset = 0;
+ }
+
+ return pages;
+}
+EXPORT_SYMBOL(iov_pages);
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 981fed3..8826b0d 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -334,6 +334,27 @@ static ssize_t store_group(struct device *dev, struct device_attribute *attr,
return netdev_store(dev, attr, buf, len, change_group);
}
+static ssize_t show_phys_port_id(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *netdev = to_net_dev(dev);
+ ssize_t ret = -EINVAL;
+
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+ if (dev_isalive(netdev)) {
+ struct netdev_phys_port_id ppid;
+
+ ret = dev_get_phys_port_id(netdev, &ppid);
+ if (!ret)
+ ret = sprintf(buf, "%*phN\n", ppid.id_len, ppid.id);
+ }
+ rtnl_unlock();
+
+ return ret;
+}
+
static struct device_attribute net_class_attributes[] = {
__ATTR(addr_assign_type, S_IRUGO, show_addr_assign_type, NULL),
__ATTR(addr_len, S_IRUGO, show_addr_len, NULL),
@@ -355,6 +376,7 @@ static struct device_attribute net_class_attributes[] = {
__ATTR(tx_queue_len, S_IRUGO | S_IWUSR, show_tx_queue_len,
store_tx_queue_len),
__ATTR(netdev_group, S_IRUGO | S_IWUSR, show_group, store_group),
+ __ATTR(phys_port_id, S_IRUGO, show_phys_port_id, NULL),
{}
};
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 9640972..261357a 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -160,6 +160,8 @@
#include <net/net_namespace.h>
#include <net/checksum.h>
#include <net/ipv6.h>
+#include <net/udp.h>
+#include <net/ip6_checksum.h>
#include <net/addrconf.h>
#ifdef CONFIG_XFRM
#include <net/xfrm.h>
@@ -198,6 +200,7 @@
#define F_QUEUE_MAP_RND (1<<13) /* queue map Random */
#define F_QUEUE_MAP_CPU (1<<14) /* queue map mirrors smp_processor_id() */
#define F_NODE (1<<15) /* Node memory alloc*/
+#define F_UDPCSUM (1<<16) /* Include UDP checksum */
/* Thread control flag bits */
#define T_STOP (1<<0) /* Stop run */
@@ -631,6 +634,9 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
if (pkt_dev->flags & F_UDPDST_RND)
seq_printf(seq, "UDPDST_RND ");
+ if (pkt_dev->flags & F_UDPCSUM)
+ seq_printf(seq, "UDPCSUM ");
+
if (pkt_dev->flags & F_MPLS_RND)
seq_printf(seq, "MPLS_RND ");
@@ -1228,6 +1234,12 @@ static ssize_t pktgen_if_write(struct file *file,
else if (strcmp(f, "!NODE_ALLOC") == 0)
pkt_dev->flags &= ~F_NODE;
+ else if (strcmp(f, "UDPCSUM") == 0)
+ pkt_dev->flags |= F_UDPCSUM;
+
+ else if (strcmp(f, "!UDPCSUM") == 0)
+ pkt_dev->flags &= ~F_UDPCSUM;
+
else {
sprintf(pg_result,
"Flag -:%s:- unknown\nAvailable flags, (prepend ! to un-set flag):\n%s",
@@ -2733,7 +2745,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
udph->source = htons(pkt_dev->cur_udp_src);
udph->dest = htons(pkt_dev->cur_udp_dst);
udph->len = htons(datalen + 8); /* DATA + udphdr */
- udph->check = 0; /* No checksum */
+ udph->check = 0;
iph->ihl = 5;
iph->version = 4;
@@ -2747,11 +2759,28 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
iph->frag_off = 0;
iplen = 20 + 8 + datalen;
iph->tot_len = htons(iplen);
- iph->check = 0;
- iph->check = ip_fast_csum((void *)iph, iph->ihl);
+ ip_send_check(iph);
skb->protocol = protocol;
skb->dev = odev;
skb->pkt_type = PACKET_HOST;
+
+ if (!(pkt_dev->flags & F_UDPCSUM)) {
+ skb->ip_summed = CHECKSUM_NONE;
+ } else if (odev->features & NETIF_F_V4_CSUM) {
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ skb->csum = 0;
+ udp4_hwcsum(skb, udph->source, udph->dest);
+ } else {
+ __wsum csum = udp_csum(skb);
+
+ /* add protocol-dependent pseudo-header */
+ udph->check = csum_tcpudp_magic(udph->source, udph->dest,
+ datalen + 8, IPPROTO_UDP, csum);
+
+ if (udph->check == 0)
+ udph->check = CSUM_MANGLED_0;
+ }
+
pktgen_finalize_skb(pkt_dev, skb, datalen);
#ifdef CONFIG_XFRM
@@ -2768,7 +2797,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
struct sk_buff *skb = NULL;
__u8 *eth;
struct udphdr *udph;
- int datalen;
+ int datalen, udplen;
struct ipv6hdr *iph;
__be16 protocol = htons(ETH_P_IPV6);
__be32 *mpls;
@@ -2844,10 +2873,11 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
net_info_ratelimited("increased datalen to %d\n", datalen);
}
+ udplen = datalen + sizeof(struct udphdr);
udph->source = htons(pkt_dev->cur_udp_src);
udph->dest = htons(pkt_dev->cur_udp_dst);
- udph->len = htons(datalen + sizeof(struct udphdr));
- udph->check = 0; /* No checksum */
+ udph->len = htons(udplen);
+ udph->check = 0;
*(__be32 *) iph = htonl(0x60000000); /* Version + flow */
@@ -2858,7 +2888,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
iph->hop_limit = 32;
- iph->payload_len = htons(sizeof(struct udphdr) + datalen);
+ iph->payload_len = htons(udplen);
iph->nexthdr = IPPROTO_UDP;
iph->daddr = pkt_dev->cur_in6_daddr;
@@ -2868,6 +2898,23 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
skb->dev = odev;
skb->pkt_type = PACKET_HOST;
+ if (!(pkt_dev->flags & F_UDPCSUM)) {
+ skb->ip_summed = CHECKSUM_NONE;
+ } else if (odev->features & NETIF_F_V6_CSUM) {
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ skb->csum_start = skb_transport_header(skb) - skb->head;
+ skb->csum_offset = offsetof(struct udphdr, check);
+ udph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, udplen, IPPROTO_UDP, 0);
+ } else {
+ __wsum csum = udp_csum(skb);
+
+ /* add protocol-dependent pseudo-header */
+ udph->check = csum_ipv6_magic(&iph->saddr, &iph->daddr, udplen, IPPROTO_UDP, csum);
+
+ if (udph->check == 0)
+ udph->check = CSUM_MANGLED_0;
+ }
+
pktgen_finalize_skb(pkt_dev, skb, datalen);
return skb;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index ca198c1..2a0e21d 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -767,7 +767,8 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
+ rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
+ rtnl_port_size(dev) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
+ rtnl_link_get_size(dev) /* IFLA_LINKINFO */
- + rtnl_link_get_af_size(dev); /* IFLA_AF_SPEC */
+ + rtnl_link_get_af_size(dev) /* IFLA_AF_SPEC */
+ + nla_total_size(MAX_PHYS_PORT_ID_LEN); /* IFLA_PHYS_PORT_ID */
}
static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
@@ -846,6 +847,24 @@ static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev)
return 0;
}
+static int rtnl_phys_port_id_fill(struct sk_buff *skb, struct net_device *dev)
+{
+ int err;
+ struct netdev_phys_port_id ppid;
+
+ err = dev_get_phys_port_id(dev, &ppid);
+ if (err) {
+ if (err == -EOPNOTSUPP)
+ return 0;
+ return err;
+ }
+
+ if (nla_put(skb, IFLA_PHYS_PORT_ID, ppid.id_len, ppid.id))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
int type, u32 pid, u32 seq, u32 change,
unsigned int flags, u32 ext_filter_mask)
@@ -913,6 +932,9 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
goto nla_put_failure;
}
+ if (rtnl_phys_port_id_fill(skb, dev))
+ goto nla_put_failure;
+
attr = nla_reserve(skb, IFLA_STATS,
sizeof(struct rtnl_link_stats));
if (attr == NULL)
@@ -1113,6 +1135,7 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = {
[IFLA_PROMISCUITY] = { .type = NLA_U32 },
[IFLA_NUM_TX_QUEUES] = { .type = NLA_U32 },
[IFLA_NUM_RX_QUEUES] = { .type = NLA_U32 },
+ [IFLA_PHYS_PORT_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_PORT_ID_LEN },
};
EXPORT_SYMBOL(ifla_policy);
@@ -1844,10 +1867,10 @@ replay:
else
err = register_netdevice(dev);
- if (err < 0 && !IS_ERR(dev))
+ if (err < 0) {
free_netdev(dev);
- if (err < 0)
goto out;
+ }
err = rtnl_configure_link(dev, ifm);
if (err < 0)
diff --git a/net/core/sock.c b/net/core/sock.c
index 2c097c5..5b6beba 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -93,6 +93,7 @@
#include <linux/capability.h>
#include <linux/errno.h>
+#include <linux/errqueue.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
@@ -1575,6 +1576,25 @@ void sock_wfree(struct sk_buff *skb)
}
EXPORT_SYMBOL(sock_wfree);
+void skb_orphan_partial(struct sk_buff *skb)
+{
+ /* TCP stack sets skb->ooo_okay based on sk_wmem_alloc,
+ * so we do not completely orphan skb, but transfert all
+ * accounted bytes but one, to avoid unexpected reorders.
+ */
+ if (skb->destructor == sock_wfree
+#ifdef CONFIG_INET
+ || skb->destructor == tcp_wfree
+#endif
+ ) {
+ atomic_sub(skb->truesize - 1, &skb->sk->sk_wmem_alloc);
+ skb->truesize = 1;
+ } else {
+ skb_orphan(skb);
+ }
+}
+EXPORT_SYMBOL(skb_orphan_partial);
+
/*
* Read buffer destructor automatically called from kfree_skb.
*/
@@ -1721,24 +1741,23 @@ static long sock_wait_for_wmem(struct sock *sk, long timeo)
struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
unsigned long data_len, int noblock,
- int *errcode)
+ int *errcode, int max_page_order)
{
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
+ unsigned long chunk;
gfp_t gfp_mask;
long timeo;
int err;
int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+ struct page *page;
+ int i;
err = -EMSGSIZE;
if (npages > MAX_SKB_FRAGS)
goto failure;
- gfp_mask = sk->sk_allocation;
- if (gfp_mask & __GFP_WAIT)
- gfp_mask |= __GFP_REPEAT;
-
timeo = sock_sndtimeo(sk, noblock);
- while (1) {
+ while (!skb) {
err = sock_error(sk);
if (err != 0)
goto failure;
@@ -1747,50 +1766,52 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
if (sk->sk_shutdown & SEND_SHUTDOWN)
goto failure;
- if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
- skb = alloc_skb(header_len, gfp_mask);
- if (skb) {
- int i;
-
- /* No pages, we're done... */
- if (!data_len)
- break;
-
- skb->truesize += data_len;
- skb_shinfo(skb)->nr_frags = npages;
- for (i = 0; i < npages; i++) {
- struct page *page;
-
- page = alloc_pages(sk->sk_allocation, 0);
- if (!page) {
- err = -ENOBUFS;
- skb_shinfo(skb)->nr_frags = i;
- kfree_skb(skb);
- goto failure;
- }
-
- __skb_fill_page_desc(skb, i,
- page, 0,
- (data_len >= PAGE_SIZE ?
- PAGE_SIZE :
- data_len));
- data_len -= PAGE_SIZE;
- }
+ if (atomic_read(&sk->sk_wmem_alloc) >= sk->sk_sndbuf) {
+ set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+ err = -EAGAIN;
+ if (!timeo)
+ goto failure;
+ if (signal_pending(current))
+ goto interrupted;
+ timeo = sock_wait_for_wmem(sk, timeo);
+ continue;
+ }
- /* Full success... */
- break;
- }
- err = -ENOBUFS;
+ err = -ENOBUFS;
+ gfp_mask = sk->sk_allocation;
+ if (gfp_mask & __GFP_WAIT)
+ gfp_mask |= __GFP_REPEAT;
+
+ skb = alloc_skb(header_len, gfp_mask);
+ if (!skb)
goto failure;
+
+ skb->truesize += data_len;
+
+ for (i = 0; npages > 0; i++) {
+ int order = max_page_order;
+
+ while (order) {
+ if (npages >= 1 << order) {
+ page = alloc_pages(sk->sk_allocation |
+ __GFP_COMP | __GFP_NOWARN,
+ order);
+ if (page)
+ goto fill_page;
+ }
+ order--;
+ }
+ page = alloc_page(sk->sk_allocation);
+ if (!page)
+ goto failure;
+fill_page:
+ chunk = min_t(unsigned long, data_len,
+ PAGE_SIZE << order);
+ skb_fill_page_desc(skb, i, page, 0, chunk);
+ data_len -= chunk;
+ npages -= 1 << order;
}
- set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
- set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
- err = -EAGAIN;
- if (!timeo)
- goto failure;
- if (signal_pending(current))
- goto interrupted;
- timeo = sock_wait_for_wmem(sk, timeo);
}
skb_set_owner_w(skb, sk);
@@ -1799,6 +1820,7 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
interrupted:
err = sock_intr_errno(timeo);
failure:
+ kfree_skb(skb);
*errcode = err;
return NULL;
}
@@ -1807,7 +1829,7 @@ EXPORT_SYMBOL(sock_alloc_send_pskb);
struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
int noblock, int *errcode)
{
- return sock_alloc_send_pskb(sk, size, 0, noblock, errcode);
+ return sock_alloc_send_pskb(sk, size, 0, noblock, errcode, 0);
}
EXPORT_SYMBOL(sock_alloc_send_skb);
@@ -2425,6 +2447,52 @@ void sock_enable_timestamp(struct sock *sk, int flag)
}
}
+int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
+ int level, int type)
+{
+ struct sock_exterr_skb *serr;
+ struct sk_buff *skb, *skb2;
+ int copied, err;
+
+ err = -EAGAIN;
+ skb = skb_dequeue(&sk->sk_error_queue);
+ if (skb == NULL)
+ goto out;
+
+ copied = skb->len;
+ if (copied > len) {
+ msg->msg_flags |= MSG_TRUNC;
+ copied = len;
+ }
+ err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+ if (err)
+ goto out_free_skb;
+
+ sock_recv_timestamp(msg, sk, skb);
+
+ serr = SKB_EXT_ERR(skb);
+ put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
+
+ msg->msg_flags |= MSG_ERRQUEUE;
+ err = copied;
+
+ /* Reset and regenerate socket error */
+ spin_lock_bh(&sk->sk_error_queue.lock);
+ sk->sk_err = 0;
+ if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) {
+ sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
+ spin_unlock_bh(&sk->sk_error_queue.lock);
+ sk->sk_error_report(sk);
+ } else
+ spin_unlock_bh(&sk->sk_error_queue.lock);
+
+out_free_skb:
+ kfree_skb(skb);
+out:
+ return err;
+}
+EXPORT_SYMBOL(sock_recv_errqueue);
+
/*
* Get a socket option on an socket.
*
diff --git a/net/core/stream.c b/net/core/stream.c
index f5df85d..512f0a2 100644
--- a/net/core/stream.c
+++ b/net/core/stream.c
@@ -30,7 +30,7 @@ void sk_stream_write_space(struct sock *sk)
struct socket *sock = sk->sk_socket;
struct socket_wq *wq;
- if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) && sock) {
+ if (sk_stream_is_writeable(sk) && sock) {
clear_bit(SOCK_NOSPACE, &sock->flags);
rcu_read_lock();
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 6c7c78b..ba64750 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -336,7 +336,7 @@ unsigned int dccp_poll(struct file *file, struct socket *sock,
mask |= POLLIN | POLLRDNORM;
if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
- if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
+ if (sk_stream_is_writeable(sk)) {
mask |= POLLOUT | POLLWRNORM;
} else { /* send SIGIO later */
set_bit(SOCK_ASYNC_NOSPACE,
@@ -347,7 +347,7 @@ unsigned int dccp_poll(struct file *file, struct socket *sock,
* wspace test but before the flags are set,
* IO signal will be lost.
*/
- if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
+ if (sk_stream_is_writeable(sk))
mask |= POLLOUT | POLLWRNORM;
}
}
diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
index 3b9d5f2..c85e71e 100644
--- a/net/ieee802154/6lowpan.c
+++ b/net/ieee802154/6lowpan.c
@@ -67,39 +67,6 @@ static const u8 lowpan_ttl_values[] = {0, 1, 64, 255};
static LIST_HEAD(lowpan_devices);
-/*
- * Uncompression of linklocal:
- * 0 -> 16 bytes from packet
- * 1 -> 2 bytes from prefix - bunch of zeroes and 8 from packet
- * 2 -> 2 bytes from prefix - zeroes + 2 from packet
- * 3 -> 2 bytes from prefix - infer 8 bytes from lladdr
- *
- * NOTE: => the uncompress function does change 0xf to 0x10
- * NOTE: 0x00 => no-autoconfig => unspecified
- */
-static const u8 lowpan_unc_llconf[] = {0x0f, 0x28, 0x22, 0x20};
-
-/*
- * Uncompression of ctx-based:
- * 0 -> 0 bits from packet [unspecified / reserved]
- * 1 -> 8 bytes from prefix - bunch of zeroes and 8 from packet
- * 2 -> 8 bytes from prefix - zeroes + 2 from packet
- * 3 -> 8 bytes from prefix - infer 8 bytes from lladdr
- */
-static const u8 lowpan_unc_ctxconf[] = {0x00, 0x88, 0x82, 0x80};
-
-/*
- * Uncompression of ctx-base
- * 0 -> 0 bits from packet
- * 1 -> 2 bytes from prefix - bunch of zeroes 5 from packet
- * 2 -> 2 bytes from prefix - zeroes + 3 from packet
- * 3 -> 2 bytes from prefix - infer 1 bytes from lladdr
- */
-static const u8 lowpan_unc_mxconf[] = {0x0f, 0x25, 0x23, 0x21};
-
-/* Link local prefix */
-static const u8 lowpan_llprefix[] = {0xfe, 0x80};
-
/* private device info */
struct lowpan_dev_info {
struct net_device *real_dev; /* real WPAN device ptr */
@@ -191,55 +158,177 @@ lowpan_compress_addr_64(u8 **hc06_ptr, u8 shift, const struct in6_addr *ipaddr,
return rol8(val, shift);
}
-static void
-lowpan_uip_ds6_set_addr_iid(struct in6_addr *ipaddr, unsigned char *lladdr)
+/*
+ * Uncompress address function for source and
+ * destination address(non-multicast).
+ *
+ * address_mode is sam value or dam value.
+ */
+static int
+lowpan_uncompress_addr(struct sk_buff *skb,
+ struct in6_addr *ipaddr,
+ const u8 address_mode,
+ const struct ieee802154_addr *lladdr)
{
- memcpy(&ipaddr->s6_addr[8], lladdr, IEEE802154_ADDR_LEN);
- /* second bit-flip (Universe/Local) is done according RFC2464 */
- ipaddr->s6_addr[8] ^= 0x02;
+ bool fail;
+
+ switch (address_mode) {
+ case LOWPAN_IPHC_ADDR_00:
+ /* for global link addresses */
+ fail = lowpan_fetch_skb(skb, ipaddr->s6_addr, 16);
+ break;
+ case LOWPAN_IPHC_ADDR_01:
+ /* fe:80::XXXX:XXXX:XXXX:XXXX */
+ ipaddr->s6_addr[0] = 0xFE;
+ ipaddr->s6_addr[1] = 0x80;
+ fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[8], 8);
+ break;
+ case LOWPAN_IPHC_ADDR_02:
+ /* fe:80::ff:fe00:XXXX */
+ ipaddr->s6_addr[0] = 0xFE;
+ ipaddr->s6_addr[1] = 0x80;
+ ipaddr->s6_addr[11] = 0xFF;
+ ipaddr->s6_addr[12] = 0xFE;
+ fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[14], 2);
+ break;
+ case LOWPAN_IPHC_ADDR_03:
+ fail = false;
+ switch (lladdr->addr_type) {
+ case IEEE802154_ADDR_LONG:
+ /* fe:80::XXXX:XXXX:XXXX:XXXX
+ * \_________________/
+ * hwaddr
+ */
+ ipaddr->s6_addr[0] = 0xFE;
+ ipaddr->s6_addr[1] = 0x80;
+ memcpy(&ipaddr->s6_addr[8], lladdr->hwaddr,
+ IEEE802154_ADDR_LEN);
+ /* second bit-flip (Universe/Local)
+ * is done according RFC2464
+ */
+ ipaddr->s6_addr[8] ^= 0x02;
+ break;
+ case IEEE802154_ADDR_SHORT:
+ /* fe:80::ff:fe00:XXXX
+ * \__/
+ * short_addr
+ *
+ * Universe/Local bit is zero.
+ */
+ ipaddr->s6_addr[0] = 0xFE;
+ ipaddr->s6_addr[1] = 0x80;
+ ipaddr->s6_addr[11] = 0xFF;
+ ipaddr->s6_addr[12] = 0xFE;
+ ipaddr->s6_addr16[7] = htons(lladdr->short_addr);
+ break;
+ default:
+ pr_debug("Invalid addr_type set\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ pr_debug("Invalid address mode value: 0x%x\n", address_mode);
+ return -EINVAL;
+ }
+
+ if (fail) {
+ pr_debug("Failed to fetch skb data\n");
+ return -EIO;
+ }
+
+ lowpan_raw_dump_inline(NULL, "Reconstructed ipv6 addr is:\n",
+ ipaddr->s6_addr, 16);
+
+ return 0;
}
-/*
- * Uncompress addresses based on a prefix and a postfix with zeroes in
- * between. If the postfix is zero in length it will use the link address
- * to configure the IP address (autoconf style).
- * pref_post_count takes a byte where the first nibble specify prefix count
- * and the second postfix count (NOTE: 15/0xf => 16 bytes copy).
+/* Uncompress address function for source context
+ * based address(non-multicast).
*/
static int
-lowpan_uncompress_addr(struct sk_buff *skb, struct in6_addr *ipaddr,
- u8 const *prefix, u8 pref_post_count, unsigned char *lladdr)
+lowpan_uncompress_context_based_src_addr(struct sk_buff *skb,
+ struct in6_addr *ipaddr,
+ const u8 sam)
{
- u8 prefcount = pref_post_count >> 4;
- u8 postcount = pref_post_count & 0x0f;
-
- /* full nibble 15 => 16 */
- prefcount = (prefcount == 15 ? 16 : prefcount);
- postcount = (postcount == 15 ? 16 : postcount);
-
- if (lladdr)
- lowpan_raw_dump_inline(__func__, "linklocal address",
- lladdr, IEEE802154_ADDR_LEN);
- if (prefcount > 0)
- memcpy(ipaddr, prefix, prefcount);
-
- if (prefcount + postcount < 16)
- memset(&ipaddr->s6_addr[prefcount], 0,
- 16 - (prefcount + postcount));
-
- if (postcount > 0) {
- memcpy(&ipaddr->s6_addr[16 - postcount], skb->data, postcount);
- skb_pull(skb, postcount);
- } else if (prefcount > 0) {
- if (lladdr == NULL)
- return -EINVAL;
+ switch (sam) {
+ case LOWPAN_IPHC_ADDR_00:
+ /* unspec address ::
+ * Do nothing, address is already ::
+ */
+ break;
+ case LOWPAN_IPHC_ADDR_01:
+ /* TODO */
+ case LOWPAN_IPHC_ADDR_02:
+ /* TODO */
+ case LOWPAN_IPHC_ADDR_03:
+ /* TODO */
+ netdev_warn(skb->dev, "SAM value 0x%x not supported\n", sam);
+ return -EINVAL;
+ default:
+ pr_debug("Invalid sam value: 0x%x\n", sam);
+ return -EINVAL;
+ }
+
+ lowpan_raw_dump_inline(NULL,
+ "Reconstructed context based ipv6 src addr is:\n",
+ ipaddr->s6_addr, 16);
+
+ return 0;
+}
- /* no IID based configuration if no prefix and no data */
- lowpan_uip_ds6_set_addr_iid(ipaddr, lladdr);
+/* Uncompress function for multicast destination address,
+ * when M bit is set.
+ */
+static int
+lowpan_uncompress_multicast_daddr(struct sk_buff *skb,
+ struct in6_addr *ipaddr,
+ const u8 dam)
+{
+ bool fail;
+
+ switch (dam) {
+ case LOWPAN_IPHC_DAM_00:
+ /* 00: 128 bits. The full address
+ * is carried in-line.
+ */
+ fail = lowpan_fetch_skb(skb, ipaddr->s6_addr, 16);
+ break;
+ case LOWPAN_IPHC_DAM_01:
+ /* 01: 48 bits. The address takes
+ * the form ffXX::00XX:XXXX:XXXX.
+ */
+ ipaddr->s6_addr[0] = 0xFF;
+ fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[1], 1);
+ fail |= lowpan_fetch_skb(skb, &ipaddr->s6_addr[11], 5);
+ break;
+ case LOWPAN_IPHC_DAM_10:
+ /* 10: 32 bits. The address takes
+ * the form ffXX::00XX:XXXX.
+ */
+ ipaddr->s6_addr[0] = 0xFF;
+ fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[1], 1);
+ fail |= lowpan_fetch_skb(skb, &ipaddr->s6_addr[13], 3);
+ break;
+ case LOWPAN_IPHC_DAM_11:
+ /* 11: 8 bits. The address takes
+ * the form ff02::00XX.
+ */
+ ipaddr->s6_addr[0] = 0xFF;
+ ipaddr->s6_addr[1] = 0x02;
+ fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[15], 1);
+ break;
+ default:
+ pr_debug("DAM value has a wrong value: 0x%x\n", dam);
+ return -EINVAL;
+ }
+
+ if (fail) {
+ pr_debug("Failed to fetch skb data\n");
+ return -EIO;
}
- pr_debug("uncompressing %d + %d => ", prefcount, postcount);
- lowpan_raw_dump_inline(NULL, NULL, ipaddr->s6_addr, 16);
+ lowpan_raw_dump_inline(NULL, "Reconstructed ipv6 multicast addr is:\n",
+ ipaddr->s6_addr, 16);
return 0;
}
@@ -702,6 +791,12 @@ lowpan_alloc_new_frame(struct sk_buff *skb, u16 len, u16 tag)
skb_reserve(frame->skb, sizeof(struct ipv6hdr));
skb_put(frame->skb, frame->length);
+ /* copy the first control block to keep a
+ * trace of the link-layer addresses in case
+ * of a link-local compressed address
+ */
+ memcpy(frame->skb->cb, skb->cb, sizeof(skb->cb));
+
init_timer(&frame->timer);
/* time out is the same as for ipv6 - 60 sec */
frame->timer.expires = jiffies + LOWPAN_FRAG_TIMEOUT;
@@ -723,9 +818,9 @@ frame_err:
static int
lowpan_process_data(struct sk_buff *skb)
{
- struct ipv6hdr hdr;
+ struct ipv6hdr hdr = {};
u8 tmp, iphc0, iphc1, num_context = 0;
- u8 *_saddr, *_daddr;
+ const struct ieee802154_addr *_saddr, *_daddr;
int err;
lowpan_raw_dump_table(__func__, "raw skb data dump", skb->data,
@@ -828,8 +923,8 @@ lowpan_process_data(struct sk_buff *skb)
if (lowpan_fetch_skb_u8(skb, &iphc1))
goto drop;
- _saddr = mac_cb(skb)->sa.hwaddr;
- _daddr = mac_cb(skb)->da.hwaddr;
+ _saddr = &mac_cb(skb)->sa;
+ _daddr = &mac_cb(skb)->da;
pr_debug("iphc0 = %02x, iphc1 = %02x\n", iphc0, iphc1);
@@ -868,8 +963,6 @@ lowpan_process_data(struct sk_buff *skb)
hdr.priority = ((tmp >> 2) & 0x0f);
hdr.flow_lbl[0] = ((tmp << 6) & 0xC0) | ((tmp >> 2) & 0x30);
- hdr.flow_lbl[1] = 0;
- hdr.flow_lbl[2] = 0;
break;
/*
* Flow Label carried in-line
@@ -885,10 +978,6 @@ lowpan_process_data(struct sk_buff *skb)
break;
/* Traffic Class and Flow Label are elided */
case 3: /* 11b */
- hdr.priority = 0;
- hdr.flow_lbl[0] = 0;
- hdr.flow_lbl[1] = 0;
- hdr.flow_lbl[2] = 0;
break;
default:
break;
@@ -915,10 +1004,18 @@ lowpan_process_data(struct sk_buff *skb)
/* Extract SAM to the tmp variable */
tmp = ((iphc1 & LOWPAN_IPHC_SAM) >> LOWPAN_IPHC_SAM_BIT) & 0x03;
- /* Source address uncompression */
- pr_debug("source address stateless compression\n");
- err = lowpan_uncompress_addr(skb, &hdr.saddr, lowpan_llprefix,
- lowpan_unc_llconf[tmp], skb->data);
+ if (iphc1 & LOWPAN_IPHC_SAC) {
+ /* Source address context based uncompression */
+ pr_debug("SAC bit is set. Handle context based source address.\n");
+ err = lowpan_uncompress_context_based_src_addr(
+ skb, &hdr.saddr, tmp);
+ } else {
+ /* Source address uncompression */
+ pr_debug("source address stateless compression\n");
+ err = lowpan_uncompress_addr(skb, &hdr.saddr, tmp, _saddr);
+ }
+
+ /* Check on error of previous branch */
if (err)
goto drop;
@@ -931,23 +1028,14 @@ lowpan_process_data(struct sk_buff *skb)
pr_debug("dest: context-based mcast compression\n");
/* TODO: implement this */
} else {
- u8 prefix[] = {0xff, 0x02};
-
- pr_debug("dest: non context-based mcast compression\n");
- if (0 < tmp && tmp < 3) {
- if (lowpan_fetch_skb_u8(skb, &prefix[1]))
- goto drop;
- }
-
- err = lowpan_uncompress_addr(skb, &hdr.daddr, prefix,
- lowpan_unc_mxconf[tmp], NULL);
+ err = lowpan_uncompress_multicast_daddr(
+ skb, &hdr.daddr, tmp);
if (err)
goto drop;
}
} else {
pr_debug("dest: stateless compression\n");
- err = lowpan_uncompress_addr(skb, &hdr.daddr, lowpan_llprefix,
- lowpan_unc_llconf[tmp], skb->data);
+ err = lowpan_uncompress_addr(skb, &hdr.daddr, tmp, _daddr);
if (err)
goto drop;
}
diff --git a/net/ieee802154/6lowpan.h b/net/ieee802154/6lowpan.h
index 4b8f917..2869c05 100644
--- a/net/ieee802154/6lowpan.h
+++ b/net/ieee802154/6lowpan.h
@@ -193,10 +193,12 @@
/* Values of fields within the IPHC encoding second byte */
#define LOWPAN_IPHC_CID 0x80
+#define LOWPAN_IPHC_ADDR_00 0x00
+#define LOWPAN_IPHC_ADDR_01 0x01
+#define LOWPAN_IPHC_ADDR_02 0x02
+#define LOWPAN_IPHC_ADDR_03 0x03
+
#define LOWPAN_IPHC_SAC 0x40
-#define LOWPAN_IPHC_SAM_00 0x00
-#define LOWPAN_IPHC_SAM_01 0x10
-#define LOWPAN_IPHC_SAM_10 0x20
#define LOWPAN_IPHC_SAM 0x30
#define LOWPAN_IPHC_SAM_BIT 4
@@ -230,4 +232,16 @@
dest = 16 bit inline */
#define LOWPAN_NHC_UDP_CS_P_11 0xF3 /* source & dest = 0xF0B + 4bit inline */
+static inline bool lowpan_fetch_skb(struct sk_buff *skb,
+ void *data, const unsigned int len)
+{
+ if (unlikely(!pskb_may_pull(skb, len)))
+ return true;
+
+ skb_copy_from_linear_data(skb, data, len);
+ skb_pull(skb, len);
+
+ return false;
+}
+
#endif /* __6LOWPAN_H__ */
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 34ca6d5..a1b5bcb 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -73,6 +73,8 @@ static struct ipv4_devconf ipv4_devconf = {
[IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
[IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
[IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
+ [IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL - 1] = 10000 /*ms*/,
+ [IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL - 1] = 1000 /*ms*/,
},
};
@@ -83,6 +85,8 @@ static struct ipv4_devconf ipv4_devconf_dflt = {
[IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
[IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
[IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE - 1] = 1,
+ [IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL - 1] = 10000 /*ms*/,
+ [IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL - 1] = 1000 /*ms*/,
},
};
@@ -1126,10 +1130,7 @@ static int inet_gifconf(struct net_device *dev, char __user *buf, int len)
if (len < (int) sizeof(ifr))
break;
memset(&ifr, 0, sizeof(struct ifreq));
- if (ifa->ifa_label)
- strcpy(ifr.ifr_name, ifa->ifa_label);
- else
- strcpy(ifr.ifr_name, dev->name);
+ strcpy(ifr.ifr_name, ifa->ifa_label);
(*(struct sockaddr_in *)&ifr.ifr_addr).sin_family = AF_INET;
(*(struct sockaddr_in *)&ifr.ifr_addr).sin_addr.s_addr =
@@ -2097,11 +2098,15 @@ static struct devinet_sysctl_table {
DEVINET_SYSCTL_RW_ENTRY(ARP_ACCEPT, "arp_accept"),
DEVINET_SYSCTL_RW_ENTRY(ARP_NOTIFY, "arp_notify"),
DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP_PVLAN, "proxy_arp_pvlan"),
+ DEVINET_SYSCTL_RW_ENTRY(FORCE_IGMP_VERSION,
+ "force_igmp_version"),
+ DEVINET_SYSCTL_RW_ENTRY(IGMPV2_UNSOLICITED_REPORT_INTERVAL,
+ "igmpv2_unsolicited_report_interval"),
+ DEVINET_SYSCTL_RW_ENTRY(IGMPV3_UNSOLICITED_REPORT_INTERVAL,
+ "igmpv3_unsolicited_report_interval"),
DEVINET_SYSCTL_FLUSHING_ENTRY(NOXFRM, "disable_xfrm"),
DEVINET_SYSCTL_FLUSHING_ENTRY(NOPOLICY, "disable_policy"),
- DEVINET_SYSCTL_FLUSHING_ENTRY(FORCE_IGMP_VERSION,
- "force_igmp_version"),
DEVINET_SYSCTL_FLUSHING_ENTRY(PROMOTE_SECONDARIES,
"promote_secondaries"),
DEVINET_SYSCTL_FLUSHING_ENTRY(ROUTE_LOCALNET,
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index 26aa65d..523be38 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -101,6 +101,30 @@ errout:
return err;
}
+static bool fib4_rule_suppress(struct fib_rule *rule, struct fib_lookup_arg *arg)
+{
+ struct fib_result *result = (struct fib_result *) arg->result;
+ struct net_device *dev = result->fi->fib_dev;
+
+ /* do not accept result if the route does
+ * not meet the required prefix length
+ */
+ if (result->prefixlen <= rule->suppress_prefixlen)
+ goto suppress_route;
+
+ /* do not accept result if the route uses a device
+ * belonging to a forbidden interface group
+ */
+ if (rule->suppress_ifgroup != -1 && dev && dev->group == rule->suppress_ifgroup)
+ goto suppress_route;
+
+ return false;
+
+suppress_route:
+ if (!(arg->flags & FIB_LOOKUP_NOREF))
+ fib_info_put(result->fi);
+ return true;
+}
static int fib4_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)
{
@@ -267,6 +291,7 @@ static const struct fib_rules_ops __net_initconst fib4_rules_ops_template = {
.rule_size = sizeof(struct fib4_rule),
.addr_size = sizeof(u32),
.action = fib4_rule_action,
+ .suppress = fib4_rule_suppress,
.match = fib4_rule_match,
.configure = fib4_rule_configure,
.delete = fib4_rule_delete,
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index cd71190..d6c0e64 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -88,6 +88,7 @@
#include <linux/if_arp.h>
#include <linux/rtnetlink.h>
#include <linux/times.h>
+#include <linux/pkt_sched.h>
#include <net/net_namespace.h>
#include <net/arp.h>
@@ -113,7 +114,8 @@
#define IGMP_V1_Router_Present_Timeout (400*HZ)
#define IGMP_V2_Router_Present_Timeout (400*HZ)
-#define IGMP_Unsolicited_Report_Interval (10*HZ)
+#define IGMP_V2_Unsolicited_Report_Interval (10*HZ)
+#define IGMP_V3_Unsolicited_Report_Interval (1*HZ)
#define IGMP_Query_Response_Interval (10*HZ)
#define IGMP_Unsolicited_Report_Count 2
@@ -138,6 +140,29 @@
((in_dev)->mr_v2_seen && \
time_before(jiffies, (in_dev)->mr_v2_seen)))
+static int unsolicited_report_interval(struct in_device *in_dev)
+{
+ int interval_ms, interval_jiffies;
+
+ if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev))
+ interval_ms = IN_DEV_CONF_GET(
+ in_dev,
+ IGMPV2_UNSOLICITED_REPORT_INTERVAL);
+ else /* v3 */
+ interval_ms = IN_DEV_CONF_GET(
+ in_dev,
+ IGMPV3_UNSOLICITED_REPORT_INTERVAL);
+
+ interval_jiffies = msecs_to_jiffies(interval_ms);
+
+ /* _timer functions can't handle a delay of 0 jiffies so ensure
+ * we always return a positive value.
+ */
+ if (interval_jiffies <= 0)
+ interval_jiffies = 1;
+ return interval_jiffies;
+}
+
static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im);
static void igmpv3_del_delrec(struct in_device *in_dev, __be32 multiaddr);
static void igmpv3_clear_delrec(struct in_device *in_dev);
@@ -315,6 +340,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
if (size < 256)
return NULL;
}
+ skb->priority = TC_PRIO_CONTROL;
igmp_skb_size(skb) = size;
rt = ip_route_output_ports(net, &fl4, NULL, IGMPV3_ALL_MCR, 0,
@@ -670,6 +696,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
ip_rt_put(rt);
return -1;
}
+ skb->priority = TC_PRIO_CONTROL;
skb_dst_set(skb, &rt->dst);
@@ -719,7 +746,8 @@ static void igmp_ifc_timer_expire(unsigned long data)
igmpv3_send_cr(in_dev);
if (in_dev->mr_ifc_count) {
in_dev->mr_ifc_count--;
- igmp_ifc_start_timer(in_dev, IGMP_Unsolicited_Report_Interval);
+ igmp_ifc_start_timer(in_dev,
+ unsolicited_report_interval(in_dev));
}
__in_dev_put(in_dev);
}
@@ -744,7 +772,7 @@ static void igmp_timer_expire(unsigned long data)
if (im->unsolicit_count) {
im->unsolicit_count--;
- igmp_start_timer(im, IGMP_Unsolicited_Report_Interval);
+ igmp_start_timer(im, unsolicited_report_interval(in_dev));
}
im->reporter = 1;
spin_unlock(&im->lock);
@@ -1323,16 +1351,17 @@ out:
EXPORT_SYMBOL(ip_mc_inc_group);
/*
- * Resend IGMP JOIN report; used for bonding.
- * Called with rcu_read_lock()
+ * Resend IGMP JOIN report; used by netdev notifier.
*/
-void ip_mc_rejoin_groups(struct in_device *in_dev)
+static void ip_mc_rejoin_groups(struct in_device *in_dev)
{
#ifdef CONFIG_IP_MULTICAST
struct ip_mc_list *im;
int type;
- for_each_pmc_rcu(in_dev, im) {
+ ASSERT_RTNL();
+
+ for_each_pmc_rtnl(in_dev, im) {
if (im->multiaddr == IGMP_ALL_HOSTS)
continue;
@@ -1349,7 +1378,6 @@ void ip_mc_rejoin_groups(struct in_device *in_dev)
}
#endif
}
-EXPORT_SYMBOL(ip_mc_rejoin_groups);
/*
* A socket has left a multicast group on device dev
@@ -2735,8 +2763,42 @@ static struct pernet_operations igmp_net_ops = {
.exit = igmp_net_exit,
};
+static int igmp_netdev_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct in_device *in_dev;
+
+ switch (event) {
+ case NETDEV_RESEND_IGMP:
+ in_dev = __in_dev_get_rtnl(dev);
+ if (in_dev)
+ ip_mc_rejoin_groups(in_dev);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block igmp_notifier = {
+ .notifier_call = igmp_netdev_event,
+};
+
int __init igmp_mc_proc_init(void)
{
- return register_pernet_subsys(&igmp_net_ops);
+ int err;
+
+ err = register_pernet_subsys(&igmp_net_ops);
+ if (err)
+ return err;
+ err = register_netdevice_notifier(&igmp_notifier);
+ if (err)
+ goto reg_notif_fail;
+ return 0;
+
+reg_notif_fail:
+ unregister_pernet_subsys(&igmp_net_ops);
+ return err;
}
#endif
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 8d6939e..d7aea4c 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -534,7 +534,7 @@ static int __net_init ipgre_init_net(struct net *net)
static void __net_exit ipgre_exit_net(struct net *net)
{
struct ip_tunnel_net *itn = net_generic(net, ipgre_net_id);
- ip_tunnel_delete_net(itn);
+ ip_tunnel_delete_net(itn, &ipgre_link_ops);
}
static struct pernet_operations ipgre_net_ops = {
@@ -767,7 +767,7 @@ static int __net_init ipgre_tap_init_net(struct net *net)
static void __net_exit ipgre_tap_exit_net(struct net *net)
{
struct ip_tunnel_net *itn = net_generic(net, gre_tap_net_id);
- ip_tunnel_delete_net(itn);
+ ip_tunnel_delete_net(itn, &ipgre_tap_ops);
}
static struct pernet_operations ipgre_tap_net_ops = {
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 15e3e68..054a3e9 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -141,6 +141,7 @@
#include <net/icmp.h>
#include <net/raw.h>
#include <net/checksum.h>
+#include <net/inet_ecn.h>
#include <linux/netfilter_ipv4.h>
#include <net/xfrm.h>
#include <linux/mroute.h>
@@ -410,6 +411,13 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
if (iph->ihl < 5 || iph->version != 4)
goto inhdr_error;
+ BUILD_BUG_ON(IPSTATS_MIB_ECT1PKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_ECT_1);
+ BUILD_BUG_ON(IPSTATS_MIB_ECT0PKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_ECT_0);
+ BUILD_BUG_ON(IPSTATS_MIB_CEPKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_CE);
+ IP_ADD_STATS_BH(dev_net(dev),
+ IPSTATS_MIB_NOECTPKTS + (iph->tos & INET_ECN_MASK),
+ max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs));
+
if (!pskb_may_pull(skb, iph->ihl*4))
goto inhdr_error;
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index ca1cb2d..830de3f 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -350,7 +350,7 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
struct flowi4 fl4;
struct rtable *rt;
- rt = ip_route_output_tunnel(dev_net(dev), &fl4,
+ rt = ip_route_output_tunnel(tunnel->net, &fl4,
tunnel->parms.iph.protocol,
iph->daddr, iph->saddr,
tunnel->parms.o_key,
@@ -365,7 +365,7 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
}
if (!tdev && tunnel->parms.link)
- tdev = __dev_get_by_index(dev_net(dev), tunnel->parms.link);
+ tdev = __dev_get_by_index(tunnel->net, tunnel->parms.link);
if (tdev) {
hlen = tdev->hard_header_len + tdev->needed_headroom;
@@ -454,15 +454,16 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
tstats->rx_bytes += skb->len;
u64_stats_update_end(&tstats->syncp);
- if (tunnel->net != dev_net(tunnel->dev))
- skb_scrub_packet(skb);
-
if (tunnel->dev->type == ARPHRD_ETHER) {
skb->protocol = eth_type_trans(skb, tunnel->dev);
skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
} else {
skb->dev = tunnel->dev;
}
+
+ if (!net_eq(tunnel->net, dev_net(tunnel->dev)))
+ skb_scrub_packet(skb);
+
gro_cells_receive(&tunnel->gro_cells, skb);
return 0;
@@ -613,7 +614,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
goto tx_error;
}
- if (tunnel->net != dev_net(dev))
+ if (!net_eq(tunnel->net, dev_net(dev)))
skb_scrub_packet(skb);
if (tunnel->err_count > 0) {
@@ -653,7 +654,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
}
}
- err = iptunnel_xmit(dev_net(dev), rt, skb,
+ err = iptunnel_xmit(tunnel->net, rt, skb,
fl4.saddr, fl4.daddr, protocol,
ip_tunnel_ecn_encap(tos, inner_iph, skb), ttl, df);
iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
@@ -820,11 +821,10 @@ static void ip_tunnel_dev_free(struct net_device *dev)
void ip_tunnel_dellink(struct net_device *dev, struct list_head *head)
{
- struct net *net = dev_net(dev);
struct ip_tunnel *tunnel = netdev_priv(dev);
struct ip_tunnel_net *itn;
- itn = net_generic(net, tunnel->ip_tnl_net_id);
+ itn = net_generic(tunnel->net, tunnel->ip_tnl_net_id);
if (itn->fb_tunnel_dev != dev) {
ip_tunnel_del(netdev_priv(dev));
@@ -838,56 +838,68 @@ int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
{
struct ip_tunnel_net *itn = net_generic(net, ip_tnl_net_id);
struct ip_tunnel_parm parms;
+ unsigned int i;
- itn->tunnels = kzalloc(IP_TNL_HASH_SIZE * sizeof(struct hlist_head), GFP_KERNEL);
- if (!itn->tunnels)
- return -ENOMEM;
+ for (i = 0; i < IP_TNL_HASH_SIZE; i++)
+ INIT_HLIST_HEAD(&itn->tunnels[i]);
if (!ops) {
itn->fb_tunnel_dev = NULL;
return 0;
}
+
memset(&parms, 0, sizeof(parms));
if (devname)
strlcpy(parms.name, devname, IFNAMSIZ);
rtnl_lock();
itn->fb_tunnel_dev = __ip_tunnel_create(net, ops, &parms);
+ /* FB netdevice is special: we have one, and only one per netns.
+ * Allowing to move it to another netns is clearly unsafe.
+ */
+ if (!IS_ERR(itn->fb_tunnel_dev))
+ itn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
rtnl_unlock();
- if (IS_ERR(itn->fb_tunnel_dev)) {
- kfree(itn->tunnels);
- return PTR_ERR(itn->fb_tunnel_dev);
- }
- return 0;
+ return PTR_RET(itn->fb_tunnel_dev);
}
EXPORT_SYMBOL_GPL(ip_tunnel_init_net);
-static void ip_tunnel_destroy(struct ip_tunnel_net *itn, struct list_head *head)
+static void ip_tunnel_destroy(struct ip_tunnel_net *itn, struct list_head *head,
+ struct rtnl_link_ops *ops)
{
+ struct net *net = dev_net(itn->fb_tunnel_dev);
+ struct net_device *dev, *aux;
int h;
+ for_each_netdev_safe(net, dev, aux)
+ if (dev->rtnl_link_ops == ops)
+ unregister_netdevice_queue(dev, head);
+
for (h = 0; h < IP_TNL_HASH_SIZE; h++) {
struct ip_tunnel *t;
struct hlist_node *n;
struct hlist_head *thead = &itn->tunnels[h];
hlist_for_each_entry_safe(t, n, thead, hash_node)
- unregister_netdevice_queue(t->dev, head);
+ /* If dev is in the same netns, it has already
+ * been added to the list by the previous loop.
+ */
+ if (!net_eq(dev_net(t->dev), net))
+ unregister_netdevice_queue(t->dev, head);
}
if (itn->fb_tunnel_dev)
unregister_netdevice_queue(itn->fb_tunnel_dev, head);
}
-void ip_tunnel_delete_net(struct ip_tunnel_net *itn)
+void ip_tunnel_delete_net(struct ip_tunnel_net *itn, struct rtnl_link_ops *ops)
{
LIST_HEAD(list);
rtnl_lock();
- ip_tunnel_destroy(itn, &list);
+ ip_tunnel_destroy(itn, &list, ops);
unregister_netdevice_many(&list);
rtnl_unlock();
- kfree(itn->tunnels);
}
EXPORT_SYMBOL_GPL(ip_tunnel_delete_net);
@@ -929,23 +941,21 @@ EXPORT_SYMBOL_GPL(ip_tunnel_newlink);
int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[],
struct ip_tunnel_parm *p)
{
- struct ip_tunnel *t, *nt;
- struct net *net = dev_net(dev);
+ struct ip_tunnel *t;
struct ip_tunnel *tunnel = netdev_priv(dev);
+ struct net *net = tunnel->net;
struct ip_tunnel_net *itn = net_generic(net, tunnel->ip_tnl_net_id);
if (dev == itn->fb_tunnel_dev)
return -EINVAL;
- nt = netdev_priv(dev);
-
t = ip_tunnel_find(itn, p, dev->type);
if (t) {
if (t->dev != dev)
return -EEXIST;
} else {
- t = nt;
+ t = tunnel;
if (dev->type != ARPHRD_ETHER) {
unsigned int nflags = 0;
@@ -984,6 +994,7 @@ int ip_tunnel_init(struct net_device *dev)
}
tunnel->dev = dev;
+ tunnel->net = dev_net(dev);
strcpy(tunnel->parms.name, dev->name);
iph->version = 4;
iph->ihl = 5;
@@ -994,8 +1005,8 @@ EXPORT_SYMBOL_GPL(ip_tunnel_init);
void ip_tunnel_uninit(struct net_device *dev)
{
- struct net *net = dev_net(dev);
struct ip_tunnel *tunnel = netdev_priv(dev);
+ struct net *net = tunnel->net;
struct ip_tunnel_net *itn;
itn = net_generic(net, tunnel->ip_tnl_net_id);
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index 17cc0ff..e805e7b 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -44,176 +44,10 @@
#include <net/net_namespace.h>
#include <net/netns/generic.h>
-#define HASH_SIZE 16
-#define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&(HASH_SIZE-1))
-
static struct rtnl_link_ops vti_link_ops __read_mostly;
static int vti_net_id __read_mostly;
-struct vti_net {
- struct ip_tunnel __rcu *tunnels_r_l[HASH_SIZE];
- struct ip_tunnel __rcu *tunnels_r[HASH_SIZE];
- struct ip_tunnel __rcu *tunnels_l[HASH_SIZE];
- struct ip_tunnel __rcu *tunnels_wc[1];
- struct ip_tunnel __rcu **tunnels[4];
-
- struct net_device *fb_tunnel_dev;
-};
-
-static int vti_fb_tunnel_init(struct net_device *dev);
static int vti_tunnel_init(struct net_device *dev);
-static void vti_tunnel_setup(struct net_device *dev);
-static void vti_dev_free(struct net_device *dev);
-static int vti_tunnel_bind_dev(struct net_device *dev);
-
-#define VTI_XMIT(stats1, stats2) do { \
- int err; \
- int pkt_len = skb->len; \
- err = dst_output(skb); \
- if (net_xmit_eval(err) == 0) { \
- u64_stats_update_begin(&(stats1)->syncp); \
- (stats1)->tx_bytes += pkt_len; \
- (stats1)->tx_packets++; \
- u64_stats_update_end(&(stats1)->syncp); \
- } else { \
- (stats2)->tx_errors++; \
- (stats2)->tx_aborted_errors++; \
- } \
-} while (0)
-
-
-static struct ip_tunnel *vti_tunnel_lookup(struct net *net,
- __be32 remote, __be32 local)
-{
- unsigned h0 = HASH(remote);
- unsigned h1 = HASH(local);
- struct ip_tunnel *t;
- struct vti_net *ipn = net_generic(net, vti_net_id);
-
- for_each_ip_tunnel_rcu(t, ipn->tunnels_r_l[h0 ^ h1])
- if (local == t->parms.iph.saddr &&
- remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP))
- return t;
- for_each_ip_tunnel_rcu(t, ipn->tunnels_r[h0])
- if (remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP))
- return t;
-
- for_each_ip_tunnel_rcu(t, ipn->tunnels_l[h1])
- if (local == t->parms.iph.saddr && (t->dev->flags&IFF_UP))
- return t;
-
- for_each_ip_tunnel_rcu(t, ipn->tunnels_wc[0])
- if (t && (t->dev->flags&IFF_UP))
- return t;
- return NULL;
-}
-
-static struct ip_tunnel __rcu **__vti_bucket(struct vti_net *ipn,
- struct ip_tunnel_parm *parms)
-{
- __be32 remote = parms->iph.daddr;
- __be32 local = parms->iph.saddr;
- unsigned h = 0;
- int prio = 0;
-
- if (remote) {
- prio |= 2;
- h ^= HASH(remote);
- }
- if (local) {
- prio |= 1;
- h ^= HASH(local);
- }
- return &ipn->tunnels[prio][h];
-}
-
-static inline struct ip_tunnel __rcu **vti_bucket(struct vti_net *ipn,
- struct ip_tunnel *t)
-{
- return __vti_bucket(ipn, &t->parms);
-}
-
-static void vti_tunnel_unlink(struct vti_net *ipn, struct ip_tunnel *t)
-{
- struct ip_tunnel __rcu **tp;
- struct ip_tunnel *iter;
-
- for (tp = vti_bucket(ipn, t);
- (iter = rtnl_dereference(*tp)) != NULL;
- tp = &iter->next) {
- if (t == iter) {
- rcu_assign_pointer(*tp, t->next);
- break;
- }
- }
-}
-
-static void vti_tunnel_link(struct vti_net *ipn, struct ip_tunnel *t)
-{
- struct ip_tunnel __rcu **tp = vti_bucket(ipn, t);
-
- rcu_assign_pointer(t->next, rtnl_dereference(*tp));
- rcu_assign_pointer(*tp, t);
-}
-
-static struct ip_tunnel *vti_tunnel_locate(struct net *net,
- struct ip_tunnel_parm *parms,
- int create)
-{
- __be32 remote = parms->iph.daddr;
- __be32 local = parms->iph.saddr;
- struct ip_tunnel *t, *nt;
- struct ip_tunnel __rcu **tp;
- struct net_device *dev;
- char name[IFNAMSIZ];
- struct vti_net *ipn = net_generic(net, vti_net_id);
-
- for (tp = __vti_bucket(ipn, parms);
- (t = rtnl_dereference(*tp)) != NULL;
- tp = &t->next) {
- if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr)
- return t;
- }
- if (!create)
- return NULL;
-
- if (parms->name[0])
- strlcpy(name, parms->name, IFNAMSIZ);
- else
- strcpy(name, "vti%d");
-
- dev = alloc_netdev(sizeof(*t), name, vti_tunnel_setup);
- if (dev == NULL)
- return NULL;
-
- dev_net_set(dev, net);
-
- nt = netdev_priv(dev);
- nt->parms = *parms;
- dev->rtnl_link_ops = &vti_link_ops;
-
- vti_tunnel_bind_dev(dev);
-
- if (register_netdevice(dev) < 0)
- goto failed_free;
-
- dev_hold(dev);
- vti_tunnel_link(ipn, nt);
- return nt;
-
-failed_free:
- free_netdev(dev);
- return NULL;
-}
-
-static void vti_tunnel_uninit(struct net_device *dev)
-{
- struct net *net = dev_net(dev);
- struct vti_net *ipn = net_generic(net, vti_net_id);
-
- vti_tunnel_unlink(ipn, netdev_priv(dev));
- dev_put(dev);
-}
static int vti_err(struct sk_buff *skb, u32 info)
{
@@ -222,6 +56,8 @@ static int vti_err(struct sk_buff *skb, u32 info)
* 8 bytes of packet payload. It means, that precise relaying of
* ICMP in the real Internet is absolutely infeasible.
*/
+ struct net *net = dev_net(skb->dev);
+ struct ip_tunnel_net *itn = net_generic(net, vti_net_id);
struct iphdr *iph = (struct iphdr *)skb->data;
const int type = icmp_hdr(skb)->type;
const int code = icmp_hdr(skb)->code;
@@ -252,7 +88,8 @@ static int vti_err(struct sk_buff *skb, u32 info)
err = -ENOENT;
- t = vti_tunnel_lookup(dev_net(skb->dev), iph->daddr, iph->saddr);
+ t = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
+ iph->daddr, iph->saddr, 0);
if (t == NULL)
goto out;
@@ -281,8 +118,11 @@ static int vti_rcv(struct sk_buff *skb)
{
struct ip_tunnel *tunnel;
const struct iphdr *iph = ip_hdr(skb);
+ struct net *net = dev_net(skb->dev);
+ struct ip_tunnel_net *itn = net_generic(net, vti_net_id);
- tunnel = vti_tunnel_lookup(dev_net(skb->dev), iph->saddr, iph->daddr);
+ tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
+ iph->saddr, iph->daddr, 0);
if (tunnel != NULL) {
struct pcpu_tstats *tstats;
@@ -311,7 +151,6 @@ static int vti_rcv(struct sk_buff *skb)
static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
- struct pcpu_tstats *tstats;
struct iphdr *tiph = &tunnel->parms.iph;
u8 tos;
struct rtable *rt; /* Route to the other host */
@@ -319,6 +158,7 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
struct iphdr *old_iph = ip_hdr(skb);
__be32 dst = tiph->daddr;
struct flowi4 fl4;
+ int err;
if (skb->protocol != htons(ETH_P_IP))
goto tx_error;
@@ -367,8 +207,10 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
nf_reset(skb);
skb->dev = skb_dst(skb)->dev;
- tstats = this_cpu_ptr(dev->tstats);
- VTI_XMIT(tstats, &dev->stats);
+ err = dst_output(skb);
+ if (net_xmit_eval(err) == 0)
+ err = skb->len;
+ iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
return NETDEV_TX_OK;
tx_error_icmp:
@@ -379,198 +221,57 @@ tx_error:
return NETDEV_TX_OK;
}
-static int vti_tunnel_bind_dev(struct net_device *dev)
-{
- struct net_device *tdev = NULL;
- struct ip_tunnel *tunnel;
- struct iphdr *iph;
-
- tunnel = netdev_priv(dev);
- iph = &tunnel->parms.iph;
-
- if (iph->daddr) {
- struct rtable *rt;
- struct flowi4 fl4;
- memset(&fl4, 0, sizeof(fl4));
- flowi4_init_output(&fl4, tunnel->parms.link,
- be32_to_cpu(tunnel->parms.i_key),
- RT_TOS(iph->tos), RT_SCOPE_UNIVERSE,
- IPPROTO_IPIP, 0,
- iph->daddr, iph->saddr, 0, 0);
- rt = ip_route_output_key(dev_net(dev), &fl4);
- if (!IS_ERR(rt)) {
- tdev = rt->dst.dev;
- ip_rt_put(rt);
- }
- dev->flags |= IFF_POINTOPOINT;
- }
-
- if (!tdev && tunnel->parms.link)
- tdev = __dev_get_by_index(dev_net(dev), tunnel->parms.link);
-
- if (tdev) {
- dev->hard_header_len = tdev->hard_header_len +
- sizeof(struct iphdr);
- dev->mtu = tdev->mtu;
- }
- dev->iflink = tunnel->parms.link;
- return dev->mtu;
-}
-
static int
vti_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
int err = 0;
struct ip_tunnel_parm p;
- struct ip_tunnel *t;
- struct net *net = dev_net(dev);
- struct vti_net *ipn = net_generic(net, vti_net_id);
-
- switch (cmd) {
- case SIOCGETTUNNEL:
- t = NULL;
- if (dev == ipn->fb_tunnel_dev) {
- if (copy_from_user(&p, ifr->ifr_ifru.ifru_data,
- sizeof(p))) {
- err = -EFAULT;
- break;
- }
- t = vti_tunnel_locate(net, &p, 0);
- }
- if (t == NULL)
- t = netdev_priv(dev);
- memcpy(&p, &t->parms, sizeof(p));
- p.i_flags |= GRE_KEY | VTI_ISVTI;
- p.o_flags |= GRE_KEY;
- if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
- err = -EFAULT;
- break;
-
- case SIOCADDTUNNEL:
- case SIOCCHGTUNNEL:
- err = -EPERM;
- if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
- goto done;
- err = -EFAULT;
- if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
- goto done;
+ if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
+ return -EFAULT;
- err = -EINVAL;
+ if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
if (p.iph.version != 4 || p.iph.protocol != IPPROTO_IPIP ||
p.iph.ihl != 5)
- goto done;
-
- t = vti_tunnel_locate(net, &p, cmd == SIOCADDTUNNEL);
-
- if (dev != ipn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
- if (t != NULL) {
- if (t->dev != dev) {
- err = -EEXIST;
- break;
- }
- } else {
- if (((dev->flags&IFF_POINTOPOINT) &&
- !p.iph.daddr) ||
- (!(dev->flags&IFF_POINTOPOINT) &&
- p.iph.daddr)) {
- err = -EINVAL;
- break;
- }
- t = netdev_priv(dev);
- vti_tunnel_unlink(ipn, t);
- synchronize_net();
- t->parms.iph.saddr = p.iph.saddr;
- t->parms.iph.daddr = p.iph.daddr;
- t->parms.i_key = p.i_key;
- t->parms.o_key = p.o_key;
- t->parms.iph.protocol = IPPROTO_IPIP;
- memcpy(dev->dev_addr, &p.iph.saddr, 4);
- memcpy(dev->broadcast, &p.iph.daddr, 4);
- vti_tunnel_link(ipn, t);
- netdev_state_change(dev);
- }
- }
-
- if (t) {
- err = 0;
- if (cmd == SIOCCHGTUNNEL) {
- t->parms.i_key = p.i_key;
- t->parms.o_key = p.o_key;
- if (t->parms.link != p.link) {
- t->parms.link = p.link;
- vti_tunnel_bind_dev(dev);
- netdev_state_change(dev);
- }
- }
- p.i_flags |= GRE_KEY | VTI_ISVTI;
- p.o_flags |= GRE_KEY;
- if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms,
- sizeof(p)))
- err = -EFAULT;
- } else
- err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
- break;
+ return -EINVAL;
+ }
- case SIOCDELTUNNEL:
- err = -EPERM;
- if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
- goto done;
-
- if (dev == ipn->fb_tunnel_dev) {
- err = -EFAULT;
- if (copy_from_user(&p, ifr->ifr_ifru.ifru_data,
- sizeof(p)))
- goto done;
- err = -ENOENT;
-
- t = vti_tunnel_locate(net, &p, 0);
- if (t == NULL)
- goto done;
- err = -EPERM;
- if (t->dev == ipn->fb_tunnel_dev)
- goto done;
- dev = t->dev;
- }
- unregister_netdevice(dev);
- err = 0;
- break;
+ err = ip_tunnel_ioctl(dev, &p, cmd);
+ if (err)
+ return err;
- default:
- err = -EINVAL;
+ if (cmd != SIOCDELTUNNEL) {
+ p.i_flags |= GRE_KEY | VTI_ISVTI;
+ p.o_flags |= GRE_KEY;
}
-done:
- return err;
-}
-
-static int vti_tunnel_change_mtu(struct net_device *dev, int new_mtu)
-{
- if (new_mtu < 68 || new_mtu > 0xFFF8)
- return -EINVAL;
- dev->mtu = new_mtu;
+ if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
+ return -EFAULT;
return 0;
}
static const struct net_device_ops vti_netdev_ops = {
.ndo_init = vti_tunnel_init,
- .ndo_uninit = vti_tunnel_uninit,
+ .ndo_uninit = ip_tunnel_uninit,
.ndo_start_xmit = vti_tunnel_xmit,
.ndo_do_ioctl = vti_tunnel_ioctl,
- .ndo_change_mtu = vti_tunnel_change_mtu,
+ .ndo_change_mtu = ip_tunnel_change_mtu,
.ndo_get_stats64 = ip_tunnel_get_stats64,
};
-static void vti_dev_free(struct net_device *dev)
+static void vti_tunnel_setup(struct net_device *dev)
{
- free_percpu(dev->tstats);
- free_netdev(dev);
+ dev->netdev_ops = &vti_netdev_ops;
+ ip_tunnel_setup(dev, vti_net_id);
}
-static void vti_tunnel_setup(struct net_device *dev)
+static int vti_tunnel_init(struct net_device *dev)
{
- dev->netdev_ops = &vti_netdev_ops;
- dev->destructor = vti_dev_free;
+ struct ip_tunnel *tunnel = netdev_priv(dev);
+ struct iphdr *iph = &tunnel->parms.iph;
+
+ memcpy(dev->dev_addr, &iph->saddr, 4);
+ memcpy(dev->broadcast, &iph->daddr, 4);
dev->type = ARPHRD_TUNNEL;
dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr);
@@ -581,38 +282,18 @@ static void vti_tunnel_setup(struct net_device *dev)
dev->features |= NETIF_F_NETNS_LOCAL;
dev->features |= NETIF_F_LLTX;
dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
-}
-static int vti_tunnel_init(struct net_device *dev)
-{
- struct ip_tunnel *tunnel = netdev_priv(dev);
-
- tunnel->dev = dev;
- strcpy(tunnel->parms.name, dev->name);
-
- memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
- memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
-
- dev->tstats = alloc_percpu(struct pcpu_tstats);
- if (!dev->tstats)
- return -ENOMEM;
-
- return 0;
+ return ip_tunnel_init(dev);
}
-static int __net_init vti_fb_tunnel_init(struct net_device *dev)
+static void __net_init vti_fb_tunnel_init(struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
struct iphdr *iph = &tunnel->parms.iph;
- struct vti_net *ipn = net_generic(dev_net(dev), vti_net_id);
iph->version = 4;
iph->protocol = IPPROTO_IPIP;
iph->ihl = 5;
-
- dev_hold(dev);
- rcu_assign_pointer(ipn->tunnels_wc[0], tunnel);
- return 0;
}
static struct xfrm_tunnel vti_handler __read_mostly = {
@@ -621,76 +302,30 @@ static struct xfrm_tunnel vti_handler __read_mostly = {
.priority = 1,
};
-static void vti_destroy_tunnels(struct vti_net *ipn, struct list_head *head)
-{
- int prio;
-
- for (prio = 1; prio < 4; prio++) {
- int h;
- for (h = 0; h < HASH_SIZE; h++) {
- struct ip_tunnel *t;
-
- t = rtnl_dereference(ipn->tunnels[prio][h]);
- while (t != NULL) {
- unregister_netdevice_queue(t->dev, head);
- t = rtnl_dereference(t->next);
- }
- }
- }
-}
-
static int __net_init vti_init_net(struct net *net)
{
int err;
- struct vti_net *ipn = net_generic(net, vti_net_id);
-
- ipn->tunnels[0] = ipn->tunnels_wc;
- ipn->tunnels[1] = ipn->tunnels_l;
- ipn->tunnels[2] = ipn->tunnels_r;
- ipn->tunnels[3] = ipn->tunnels_r_l;
-
- ipn->fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel),
- "ip_vti0",
- vti_tunnel_setup);
- if (!ipn->fb_tunnel_dev) {
- err = -ENOMEM;
- goto err_alloc_dev;
- }
- dev_net_set(ipn->fb_tunnel_dev, net);
-
- err = vti_fb_tunnel_init(ipn->fb_tunnel_dev);
- if (err)
- goto err_reg_dev;
- ipn->fb_tunnel_dev->rtnl_link_ops = &vti_link_ops;
+ struct ip_tunnel_net *itn;
- err = register_netdev(ipn->fb_tunnel_dev);
+ err = ip_tunnel_init_net(net, vti_net_id, &vti_link_ops, "ip_vti0");
if (err)
- goto err_reg_dev;
+ return err;
+ itn = net_generic(net, vti_net_id);
+ vti_fb_tunnel_init(itn->fb_tunnel_dev);
return 0;
-
-err_reg_dev:
- vti_dev_free(ipn->fb_tunnel_dev);
-err_alloc_dev:
- /* nothing */
- return err;
}
static void __net_exit vti_exit_net(struct net *net)
{
- struct vti_net *ipn = net_generic(net, vti_net_id);
- LIST_HEAD(list);
-
- rtnl_lock();
- vti_destroy_tunnels(ipn, &list);
- unregister_netdevice_many(&list);
- rtnl_unlock();
+ struct ip_tunnel_net *itn = net_generic(net, vti_net_id);
+ ip_tunnel_delete_net(itn, &vti_link_ops);
}
static struct pernet_operations vti_net_ops = {
.init = vti_init_net,
.exit = vti_exit_net,
.id = &vti_net_id,
- .size = sizeof(struct vti_net),
+ .size = sizeof(struct ip_tunnel_net),
};
static int vti_tunnel_validate(struct nlattr *tb[], struct nlattr *data[])
@@ -728,78 +363,19 @@ static void vti_netlink_parms(struct nlattr *data[],
static int vti_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
- struct ip_tunnel *nt;
- struct net *net = dev_net(dev);
- struct vti_net *ipn = net_generic(net, vti_net_id);
- int mtu;
- int err;
-
- nt = netdev_priv(dev);
- vti_netlink_parms(data, &nt->parms);
-
- if (vti_tunnel_locate(net, &nt->parms, 0))
- return -EEXIST;
+ struct ip_tunnel_parm parms;
- mtu = vti_tunnel_bind_dev(dev);
- if (!tb[IFLA_MTU])
- dev->mtu = mtu;
-
- err = register_netdevice(dev);
- if (err)
- goto out;
-
- dev_hold(dev);
- vti_tunnel_link(ipn, nt);
-
-out:
- return err;
+ vti_netlink_parms(data, &parms);
+ return ip_tunnel_newlink(dev, tb, &parms);
}
static int vti_changelink(struct net_device *dev, struct nlattr *tb[],
struct nlattr *data[])
{
- struct ip_tunnel *t, *nt;
- struct net *net = dev_net(dev);
- struct vti_net *ipn = net_generic(net, vti_net_id);
struct ip_tunnel_parm p;
- int mtu;
-
- if (dev == ipn->fb_tunnel_dev)
- return -EINVAL;
- nt = netdev_priv(dev);
vti_netlink_parms(data, &p);
-
- t = vti_tunnel_locate(net, &p, 0);
-
- if (t) {
- if (t->dev != dev)
- return -EEXIST;
- } else {
- t = nt;
-
- vti_tunnel_unlink(ipn, t);
- t->parms.iph.saddr = p.iph.saddr;
- t->parms.iph.daddr = p.iph.daddr;
- t->parms.i_key = p.i_key;
- t->parms.o_key = p.o_key;
- if (dev->type != ARPHRD_ETHER) {
- memcpy(dev->dev_addr, &p.iph.saddr, 4);
- memcpy(dev->broadcast, &p.iph.daddr, 4);
- }
- vti_tunnel_link(ipn, t);
- netdev_state_change(dev);
- }
-
- if (t->parms.link != p.link) {
- t->parms.link = p.link;
- mtu = vti_tunnel_bind_dev(dev);
- if (!tb[IFLA_MTU])
- dev->mtu = mtu;
- netdev_state_change(dev);
- }
-
- return 0;
+ return ip_tunnel_changelink(dev, tb, &p);
}
static size_t vti_get_size(const struct net_device *dev)
@@ -865,7 +441,7 @@ static int __init vti_init(void)
err = xfrm4_mode_tunnel_input_register(&vti_handler);
if (err < 0) {
unregister_pernet_device(&vti_net_ops);
- pr_info(KERN_INFO "vti init: can't register tunnel\n");
+ pr_info("vti init: can't register tunnel\n");
}
err = rtnl_link_register(&vti_link_ops);
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 51fc2a1..87bd295 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -286,7 +286,6 @@ static void ipip_tunnel_setup(struct net_device *dev)
dev->flags = IFF_NOARP;
dev->iflink = 0;
dev->addr_len = 4;
- dev->features |= NETIF_F_NETNS_LOCAL;
dev->features |= NETIF_F_LLTX;
dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
@@ -437,7 +436,7 @@ static int __net_init ipip_init_net(struct net *net)
static void __net_exit ipip_exit_net(struct net *net)
{
struct ip_tunnel_net *itn = net_generic(net, ipip_net_id);
- ip_tunnel_delete_net(itn);
+ ip_tunnel_delete_net(itn, &ipip_link_ops);
}
static struct pernet_operations ipip_net_ops = {
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 132a096..bacc0bc 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -127,9 +127,9 @@ static struct kmem_cache *mrt_cachep __read_mostly;
static struct mr_table *ipmr_new_table(struct net *net, u32 id);
static void ipmr_free_table(struct mr_table *mrt);
-static int ip_mr_forward(struct net *net, struct mr_table *mrt,
- struct sk_buff *skb, struct mfc_cache *cache,
- int local);
+static void ip_mr_forward(struct net *net, struct mr_table *mrt,
+ struct sk_buff *skb, struct mfc_cache *cache,
+ int local);
static int ipmr_cache_report(struct mr_table *mrt,
struct sk_buff *pkt, vifi_t vifi, int assert);
static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
@@ -1795,9 +1795,9 @@ static int ipmr_find_vif(struct mr_table *mrt, struct net_device *dev)
/* "local" means that we should preserve one skb (for local delivery) */
-static int ip_mr_forward(struct net *net, struct mr_table *mrt,
- struct sk_buff *skb, struct mfc_cache *cache,
- int local)
+static void ip_mr_forward(struct net *net, struct mr_table *mrt,
+ struct sk_buff *skb, struct mfc_cache *cache,
+ int local)
{
int psend = -1;
int vif, ct;
@@ -1903,14 +1903,13 @@ last_forward:
ipmr_queue_xmit(net, mrt, skb2, cache, psend);
} else {
ipmr_queue_xmit(net, mrt, skb, cache, psend);
- return 0;
+ return;
}
}
dont_forward:
if (!local)
kfree_skb(skb);
- return 0;
}
static struct mr_table *ipmr_rt_fib_lookup(struct net *net, struct sk_buff *skb)
diff --git a/net/ipv4/netfilter/ipt_MASQUERADE.c b/net/ipv4/netfilter/ipt_MASQUERADE.c
index 30e4de9..00352ce 100644
--- a/net/ipv4/netfilter/ipt_MASQUERADE.c
+++ b/net/ipv4/netfilter/ipt_MASQUERADE.c
@@ -118,7 +118,7 @@ static int masq_device_event(struct notifier_block *this,
NF_CT_ASSERT(dev->ifindex != 0);
nf_ct_iterate_cleanup(net, device_cmp,
- (void *)(long)dev->ifindex);
+ (void *)(long)dev->ifindex, 0, 0);
}
return NOTIFY_DONE;
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 746427c..d7d9882 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -1082,7 +1082,7 @@ static void ping_v4_format_sock(struct sock *sp, struct seq_file *f,
__u16 srcp = ntohs(inet->inet_sport);
seq_printf(f, "%5d: %08X:%04X %08X:%04X"
- " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d%n",
+ " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d%n",
bucket, src, srcp, dest, destp, sp->sk_state,
sk_wmem_alloc_get(sp),
sk_rmem_alloc_get(sp),
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 463bd12..4a03358 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -111,7 +111,7 @@ static const struct snmp_mib snmp4_ipstats_list[] = {
SNMP_MIB_SENTINEL
};
-/* Following RFC4293 items are displayed in /proc/net/netstat */
+/* Following items are displayed in /proc/net/netstat */
static const struct snmp_mib snmp4_ipextstats_list[] = {
SNMP_MIB_ITEM("InNoRoutes", IPSTATS_MIB_INNOROUTES),
SNMP_MIB_ITEM("InTruncatedPkts", IPSTATS_MIB_INTRUNCATEDPKTS),
@@ -125,7 +125,12 @@ static const struct snmp_mib snmp4_ipextstats_list[] = {
SNMP_MIB_ITEM("OutMcastOctets", IPSTATS_MIB_OUTMCASTOCTETS),
SNMP_MIB_ITEM("InBcastOctets", IPSTATS_MIB_INBCASTOCTETS),
SNMP_MIB_ITEM("OutBcastOctets", IPSTATS_MIB_OUTBCASTOCTETS),
+ /* Non RFC4293 fields */
SNMP_MIB_ITEM("InCsumErrors", IPSTATS_MIB_CSUMERRORS),
+ SNMP_MIB_ITEM("InNoECTPkts", IPSTATS_MIB_NOECTPKTS),
+ SNMP_MIB_ITEM("InECT1Pkts", IPSTATS_MIB_ECT1PKTS),
+ SNMP_MIB_ITEM("InECT0Pkts", IPSTATS_MIB_ECT0PKTS),
+ SNMP_MIB_ITEM("InCEPkts", IPSTATS_MIB_CEPKTS),
SNMP_MIB_SENTINEL
};
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index dd44e0a..41d8450 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -987,7 +987,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
srcp = inet->inet_num;
seq_printf(seq, "%4d: %08X:%04X %08X:%04X"
- " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d\n",
+ " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d\n",
i, src, srcp, dest, destp, sp->sk_state,
sk_wmem_alloc_get(sp),
sk_rmem_alloc_get(sp),
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index a9a54a2..727f436 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -112,7 +112,8 @@
#define RT_FL_TOS(oldflp4) \
((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
-#define IP_MAX_MTU 0xFFF0
+/* IPv4 datagram length is stored into 16bit field (tot_len) */
+#define IP_MAX_MTU 0xFFFF
#define RT_GC_TIMEOUT (300*HZ)
@@ -435,12 +436,12 @@ static inline int ip_rt_proc_init(void)
static inline bool rt_is_expired(const struct rtable *rth)
{
- return rth->rt_genid != rt_genid(dev_net(rth->dst.dev));
+ return rth->rt_genid != rt_genid_ipv4(dev_net(rth->dst.dev));
}
void rt_cache_flush(struct net *net)
{
- rt_genid_bump(net);
+ rt_genid_bump_ipv4(net);
}
static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
@@ -1227,10 +1228,7 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst)
mtu = 576;
}
- if (mtu > IP_MAX_MTU)
- mtu = IP_MAX_MTU;
-
- return mtu;
+ return min_t(unsigned int, mtu, IP_MAX_MTU);
}
static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr)
@@ -1458,7 +1456,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
#endif
rth->dst.output = ip_rt_bug;
- rth->rt_genid = rt_genid(dev_net(dev));
+ rth->rt_genid = rt_genid_ipv4(dev_net(dev));
rth->rt_flags = RTCF_MULTICAST;
rth->rt_type = RTN_MULTICAST;
rth->rt_is_input= 1;
@@ -1589,7 +1587,7 @@ static int __mkroute_input(struct sk_buff *skb,
goto cleanup;
}
- rth->rt_genid = rt_genid(dev_net(rth->dst.dev));
+ rth->rt_genid = rt_genid_ipv4(dev_net(rth->dst.dev));
rth->rt_flags = flags;
rth->rt_type = res->type;
rth->rt_is_input = 1;
@@ -1760,7 +1758,7 @@ local_input:
rth->dst.tclassid = itag;
#endif
- rth->rt_genid = rt_genid(net);
+ rth->rt_genid = rt_genid_ipv4(net);
rth->rt_flags = flags|RTCF_LOCAL;
rth->rt_type = res.type;
rth->rt_is_input = 1;
@@ -1945,7 +1943,7 @@ add:
rth->dst.output = ip_output;
- rth->rt_genid = rt_genid(dev_net(dev_out));
+ rth->rt_genid = rt_genid_ipv4(dev_net(dev_out));
rth->rt_flags = flags;
rth->rt_type = type;
rth->rt_is_input = 0;
@@ -2227,7 +2225,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
rt->rt_iif = ort->rt_iif;
rt->rt_pmtu = ort->rt_pmtu;
- rt->rt_genid = rt_genid(net);
+ rt->rt_genid = rt_genid_ipv4(net);
rt->rt_flags = ort->rt_flags;
rt->rt_type = ort->rt_type;
rt->rt_gateway = ort->rt_gateway;
@@ -2665,7 +2663,7 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
static __net_init int rt_genid_init(struct net *net)
{
- atomic_set(&net->rt_genid, 0);
+ atomic_set(&net->ipv4.rt_genid, 0);
atomic_set(&net->fnhe_genid, 0);
get_random_bytes(&net->ipv4.dev_addr_genid,
sizeof(net->ipv4.dev_addr_genid));
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 610e324..8ed7c32 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -559,6 +559,13 @@ static struct ctl_table ipv4_table[] = {
.extra1 = &one,
},
{
+ .procname = "tcp_notsent_lowat",
+ .data = &sysctl_tcp_notsent_lowat,
+ .maxlen = sizeof(sysctl_tcp_notsent_lowat),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
.procname = "tcp_rmem",
.data = &sysctl_tcp_rmem,
.maxlen = sizeof(sysctl_tcp_rmem),
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index b2f6c74..4e42c03 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -410,10 +410,6 @@ void tcp_init_sock(struct sock *sk)
icsk->icsk_sync_mss = tcp_sync_mss;
- /* Presumed zeroed, in order of appearance:
- * cookie_in_always, cookie_out_never,
- * s_data_constant, s_data_in, s_data_out
- */
sk->sk_sndbuf = sysctl_tcp_wmem[1];
sk->sk_rcvbuf = sysctl_tcp_rmem[1];
@@ -499,7 +495,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
mask |= POLLIN | POLLRDNORM;
if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
- if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
+ if (sk_stream_is_writeable(sk)) {
mask |= POLLOUT | POLLWRNORM;
} else { /* send SIGIO later */
set_bit(SOCK_ASYNC_NOSPACE,
@@ -510,7 +506,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
* wspace test but before the flags are set,
* IO signal will be lost.
*/
- if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
+ if (sk_stream_is_writeable(sk))
mask |= POLLOUT | POLLWRNORM;
}
} else
@@ -2638,6 +2634,10 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
else
tp->tsoffset = val - tcp_time_stamp;
break;
+ case TCP_NOTSENT_LOWAT:
+ tp->notsent_lowat = val;
+ sk->sk_write_space(sk);
+ break;
default:
err = -ENOPROTOOPT;
break;
@@ -2854,6 +2854,9 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
case TCP_TIMESTAMP:
val = tcp_time_stamp + tp->tsoffset;
break;
+ case TCP_NOTSENT_LOWAT:
+ val = tp->notsent_lowat;
+ break;
default:
return -ENOPROTOOPT;
}
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
index 8f7ef0a..ab7bd35 100644
--- a/net/ipv4/tcp_fastopen.c
+++ b/net/ipv4/tcp_fastopen.c
@@ -58,23 +58,22 @@ error: kfree(ctx);
return err;
}
-/* Computes the fastopen cookie for the peer.
- * The peer address is a 128 bits long (pad with zeros for IPv4).
+/* Computes the fastopen cookie for the IP path.
+ * The path is a 128 bits long (pad with zeros for IPv4).
*
* The caller must check foc->len to determine if a valid cookie
* has been generated successfully.
*/
-void tcp_fastopen_cookie_gen(__be32 addr, struct tcp_fastopen_cookie *foc)
+void tcp_fastopen_cookie_gen(__be32 src, __be32 dst,
+ struct tcp_fastopen_cookie *foc)
{
- __be32 peer_addr[4] = { addr, 0, 0, 0 };
+ __be32 path[4] = { src, dst, 0, 0 };
struct tcp_fastopen_context *ctx;
rcu_read_lock();
ctx = rcu_dereference(tcp_fastopen_ctx);
if (ctx) {
- crypto_cipher_encrypt_one(ctx->tfm,
- foc->val,
- (__u8 *)peer_addr);
+ crypto_cipher_encrypt_one(ctx->tfm, foc->val, (__u8 *)path);
foc->len = TCP_FASTOPEN_COOKIE_SIZE;
}
rcu_read_unlock();
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 28af45a..ec492ea 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1048,6 +1048,7 @@ struct tcp_sacktag_state {
int reord;
int fack_count;
int flag;
+ s32 rtt; /* RTT measured by SACKing never-retransmitted data */
};
/* Check if skb is fully within the SACK block. In presence of GSO skbs,
@@ -1108,7 +1109,7 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
static u8 tcp_sacktag_one(struct sock *sk,
struct tcp_sacktag_state *state, u8 sacked,
u32 start_seq, u32 end_seq,
- bool dup_sack, int pcount)
+ int dup_sack, int pcount, u32 xmit_time)
{
struct tcp_sock *tp = tcp_sk(sk);
int fack_count = state->fack_count;
@@ -1148,6 +1149,9 @@ static u8 tcp_sacktag_one(struct sock *sk,
state->reord);
if (!after(end_seq, tp->high_seq))
state->flag |= FLAG_ORIG_SACK_ACKED;
+ /* Pick the earliest sequence sacked for RTT */
+ if (state->rtt < 0)
+ state->rtt = tcp_time_stamp - xmit_time;
}
if (sacked & TCPCB_LOST) {
@@ -1205,7 +1209,8 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
* tcp_highest_sack_seq() when skb is highest_sack.
*/
tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked,
- start_seq, end_seq, dup_sack, pcount);
+ start_seq, end_seq, dup_sack, pcount,
+ TCP_SKB_CB(skb)->when);
if (skb == tp->lost_skb_hint)
tp->lost_cnt_hint += pcount;
@@ -1479,7 +1484,8 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
TCP_SKB_CB(skb)->seq,
TCP_SKB_CB(skb)->end_seq,
dup_sack,
- tcp_skb_pcount(skb));
+ tcp_skb_pcount(skb),
+ TCP_SKB_CB(skb)->when);
if (!before(TCP_SKB_CB(skb)->seq,
tcp_highest_sack_seq(tp)))
@@ -1536,7 +1542,7 @@ static int tcp_sack_cache_ok(const struct tcp_sock *tp, const struct tcp_sack_bl
static int
tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
- u32 prior_snd_una)
+ u32 prior_snd_una, s32 *sack_rtt)
{
struct tcp_sock *tp = tcp_sk(sk);
const unsigned char *ptr = (skb_transport_header(ack_skb) +
@@ -1554,6 +1560,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
state.flag = 0;
state.reord = tp->packets_out;
+ state.rtt = -1;
if (!tp->sacked_out) {
if (WARN_ON(tp->fackets_out))
@@ -1737,6 +1744,7 @@ out:
WARN_ON((int)tp->retrans_out < 0);
WARN_ON((int)tcp_packets_in_flight(tp) < 0);
#endif
+ *sack_rtt = state.rtt;
return state.flag;
}
@@ -1869,8 +1877,13 @@ void tcp_enter_loss(struct sock *sk, int how)
}
tcp_verify_left_out(tp);
- tp->reordering = min_t(unsigned int, tp->reordering,
- sysctl_tcp_reordering);
+ /* Timeout in disordered state after receiving substantial DUPACKs
+ * suggests that the degree of reordering is over-estimated.
+ */
+ if (icsk->icsk_ca_state <= TCP_CA_Disorder &&
+ tp->sacked_out >= sysctl_tcp_reordering)
+ tp->reordering = min_t(unsigned int, tp->reordering,
+ sysctl_tcp_reordering);
tcp_set_ca_state(sk, TCP_CA_Loss);
tp->high_seq = tp->snd_nxt;
TCP_ECN_queue_cwr(tp);
@@ -2472,8 +2485,6 @@ static void tcp_try_to_open(struct sock *sk, int flag, const int prior_unsacked)
if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) {
tcp_try_keep_open(sk);
- if (inet_csk(sk)->icsk_ca_state != TCP_CA_Open)
- tcp_moderate_cwnd(tp);
} else {
tcp_cwnd_reduction(sk, prior_unsacked, 0);
}
@@ -2792,65 +2803,51 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked,
tcp_xmit_retransmit_queue(sk);
}
-void tcp_valid_rtt_meas(struct sock *sk, u32 seq_rtt)
+static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag,
+ s32 seq_rtt, s32 sack_rtt)
{
- tcp_rtt_estimator(sk, seq_rtt);
- tcp_set_rto(sk);
- inet_csk(sk)->icsk_backoff = 0;
-}
-EXPORT_SYMBOL(tcp_valid_rtt_meas);
+ const struct tcp_sock *tp = tcp_sk(sk);
+
+ /* Prefer RTT measured from ACK's timing to TS-ECR. This is because
+ * broken middle-boxes or peers may corrupt TS-ECR fields. But
+ * Karn's algorithm forbids taking RTT if some retransmitted data
+ * is acked (RFC6298).
+ */
+ if (flag & FLAG_RETRANS_DATA_ACKED)
+ seq_rtt = -1;
+
+ if (seq_rtt < 0)
+ seq_rtt = sack_rtt;
-/* Read draft-ietf-tcplw-high-performance before mucking
- * with this code. (Supersedes RFC1323)
- */
-static void tcp_ack_saw_tstamp(struct sock *sk, int flag)
-{
/* RTTM Rule: A TSecr value received in a segment is used to
* update the averaged RTT measurement only if the segment
* acknowledges some new data, i.e., only if it advances the
* left edge of the send window.
- *
* See draft-ietf-tcplw-high-performance-00, section 3.3.
- * 1998/04/10 Andrey V. Savochkin <saw@msu.ru>
- *
- * Changed: reset backoff as soon as we see the first valid sample.
- * If we do not, we get strongly overestimated rto. With timestamps
- * samples are accepted even from very old segments: f.e., when rtt=1
- * increases to 8, we retransmit 5 times and after 8 seconds delayed
- * answer arrives rto becomes 120 seconds! If at least one of segments
- * in window is lost... Voila. --ANK (010210)
*/
- struct tcp_sock *tp = tcp_sk(sk);
-
- tcp_valid_rtt_meas(sk, tcp_time_stamp - tp->rx_opt.rcv_tsecr);
-}
+ if (seq_rtt < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
+ seq_rtt = tcp_time_stamp - tp->rx_opt.rcv_tsecr;
-static void tcp_ack_no_tstamp(struct sock *sk, u32 seq_rtt, int flag)
-{
- /* We don't have a timestamp. Can only use
- * packets that are not retransmitted to determine
- * rtt estimates. Also, we must not reset the
- * backoff for rto until we get a non-retransmitted
- * packet. This allows us to deal with a situation
- * where the network delay has increased suddenly.
- * I.e. Karn's algorithm. (SIGCOMM '87, p5.)
- */
+ if (seq_rtt < 0)
+ return false;
- if (flag & FLAG_RETRANS_DATA_ACKED)
- return;
+ tcp_rtt_estimator(sk, seq_rtt);
+ tcp_set_rto(sk);
- tcp_valid_rtt_meas(sk, seq_rtt);
+ /* RFC6298: only reset backoff on valid RTT measurement. */
+ inet_csk(sk)->icsk_backoff = 0;
+ return true;
}
-static inline void tcp_ack_update_rtt(struct sock *sk, const int flag,
- const s32 seq_rtt)
+/* Compute time elapsed between (last) SYNACK and the ACK completing 3WHS. */
+static void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req)
{
- const struct tcp_sock *tp = tcp_sk(sk);
- /* Note that peer MAY send zero echo. In this case it is ignored. (rfc1323) */
- if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
- tcp_ack_saw_tstamp(sk, flag);
- else if (seq_rtt >= 0)
- tcp_ack_no_tstamp(sk, seq_rtt, flag);
+ struct tcp_sock *tp = tcp_sk(sk);
+ s32 seq_rtt = -1;
+
+ if (tp->lsndtime && !tp->total_retrans)
+ seq_rtt = tcp_time_stamp - tp->lsndtime;
+ tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, seq_rtt, -1);
}
static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
@@ -2939,7 +2936,7 @@ static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb)
* arrived at the other end.
*/
static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
- u32 prior_snd_una)
+ u32 prior_snd_una, s32 sack_rtt)
{
struct tcp_sock *tp = tcp_sk(sk);
const struct inet_connection_sock *icsk = inet_csk(sk);
@@ -2978,8 +2975,6 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
if (sacked & TCPCB_SACKED_RETRANS)
tp->retrans_out -= acked_pcount;
flag |= FLAG_RETRANS_DATA_ACKED;
- ca_seq_rtt = -1;
- seq_rtt = -1;
} else {
ca_seq_rtt = now - scb->when;
last_ackt = skb->tstamp;
@@ -3031,6 +3026,10 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
flag |= FLAG_SACK_RENEGING;
+ if (tcp_ack_update_rtt(sk, flag, seq_rtt, sack_rtt) ||
+ (flag & FLAG_ACKED))
+ tcp_rearm_rto(sk);
+
if (flag & FLAG_ACKED) {
const struct tcp_congestion_ops *ca_ops
= inet_csk(sk)->icsk_ca_ops;
@@ -3040,9 +3039,6 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
tcp_mtup_probe_success(sk);
}
- tcp_ack_update_rtt(sk, flag, seq_rtt);
- tcp_rearm_rto(sk);
-
if (tcp_is_reno(tp)) {
tcp_remove_reno_sacks(sk, pkts_acked);
} else {
@@ -3130,11 +3126,24 @@ static inline bool tcp_ack_is_dubious(const struct sock *sk, const int flag)
inet_csk(sk)->icsk_ca_state != TCP_CA_Open;
}
+/* Decide wheather to run the increase function of congestion control. */
static inline bool tcp_may_raise_cwnd(const struct sock *sk, const int flag)
{
- const struct tcp_sock *tp = tcp_sk(sk);
- return (!(flag & FLAG_ECE) || tp->snd_cwnd < tp->snd_ssthresh) &&
- !tcp_in_cwnd_reduction(sk);
+ if (tcp_in_cwnd_reduction(sk))
+ return false;
+
+ /* If reordering is high then always grow cwnd whenever data is
+ * delivered regardless of its ordering. Otherwise stay conservative
+ * and only grow cwnd on in-order delivery in Open state, and retain
+ * cwnd in Disordered state (RFC5681). A stretched ACK with
+ * new SACK or ECE mark may first advance cwnd here and later reduce
+ * cwnd in tcp_fastretrans_alert() based on more states.
+ */
+ if (tcp_sk(sk)->reordering > sysctl_tcp_reordering)
+ return flag & FLAG_FORWARD_PROGRESS;
+
+ return inet_csk(sk)->icsk_ca_state == TCP_CA_Open &&
+ flag & FLAG_DATA_ACKED;
}
/* Check that window update is acceptable.
@@ -3274,6 +3283,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
int prior_packets = tp->packets_out;
const int prior_unsacked = tp->packets_out - tp->sacked_out;
int acked = 0; /* Number of packets newly acked */
+ s32 sack_rtt = -1;
/* If the ack is older than previous acks
* then we can probably ignore it.
@@ -3330,7 +3340,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
flag |= tcp_ack_update_window(sk, skb, ack, ack_seq);
if (TCP_SKB_CB(skb)->sacked)
- flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una);
+ flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una,
+ &sack_rtt);
if (TCP_ECN_rcv_ecn_echo(tp, tcp_hdr(skb)))
flag |= FLAG_ECE;
@@ -3349,21 +3360,18 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
/* See if we can take anything off of the retransmit queue. */
acked = tp->packets_out;
- flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una);
+ flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una, sack_rtt);
acked -= tp->packets_out;
+ /* Advance cwnd if state allows */
+ if (tcp_may_raise_cwnd(sk, flag))
+ tcp_cong_avoid(sk, ack, prior_in_flight);
+
if (tcp_ack_is_dubious(sk, flag)) {
- /* Advance CWND, if state allows this. */
- if ((flag & FLAG_DATA_ACKED) && tcp_may_raise_cwnd(sk, flag))
- tcp_cong_avoid(sk, ack, prior_in_flight);
is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
tcp_fastretrans_alert(sk, acked, prior_unsacked,
is_dupack, flag);
- } else {
- if (flag & FLAG_DATA_ACKED)
- tcp_cong_avoid(sk, ack, prior_in_flight);
}
-
if (tp->tlp_high_seq)
tcp_process_tlp_ack(sk, ack, flag);
@@ -3402,7 +3410,8 @@ old_ack:
* If data was DSACKed, see if we can undo a cwnd reduction.
*/
if (TCP_SKB_CB(skb)->sacked) {
- flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una);
+ flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una,
+ &sack_rtt);
tcp_fastretrans_alert(sk, acked, prior_unsacked,
is_dupack, flag);
}
@@ -5624,9 +5633,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
* so release it.
*/
if (req) {
- tcp_synack_rtt_meas(sk, req);
tp->total_retrans = req->num_retrans;
-
reqsk_fastopen_remove(sk, req, false);
} else {
/* Make sure socket is routed, for correct metrics. */
@@ -5651,6 +5658,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
tp->snd_una = TCP_SKB_CB(skb)->ack_seq;
tp->snd_wnd = ntohs(th->window) << tp->rx_opt.snd_wscale;
tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
+ tcp_synack_rtt_meas(sk, req);
if (tp->rx_opt.tstamp_ok)
tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index b299da5..09d45d7 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -821,8 +821,7 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
*/
static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
struct request_sock *req,
- u16 queue_mapping,
- bool nocache)
+ u16 queue_mapping)
{
const struct inet_request_sock *ireq = inet_rsk(req);
struct flowi4 fl4;
@@ -852,7 +851,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req)
{
- int res = tcp_v4_send_synack(sk, NULL, req, 0, false);
+ int res = tcp_v4_send_synack(sk, NULL, req, 0);
if (!res)
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
@@ -890,7 +889,7 @@ bool tcp_syn_flood_action(struct sock *sk,
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
- if (!lopt->synflood_warned) {
+ if (!lopt->synflood_warned && sysctl_tcp_syncookies != 2) {
lopt->synflood_warned = 1;
pr_info("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n",
proto, ntohs(tcp_hdr(skb)->dest), msg);
@@ -1316,9 +1315,11 @@ static bool tcp_fastopen_check(struct sock *sk, struct sk_buff *skb,
tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
return true;
}
+
if (foc->len == TCP_FASTOPEN_COOKIE_SIZE) {
if ((sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_CHKED) == 0) {
- tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
+ tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr,
+ ip_hdr(skb)->daddr, valid_foc);
if ((valid_foc->len != TCP_FASTOPEN_COOKIE_SIZE) ||
memcmp(&foc->val[0], &valid_foc->val[0],
TCP_FASTOPEN_COOKIE_SIZE) != 0)
@@ -1329,14 +1330,16 @@ static bool tcp_fastopen_check(struct sock *sk, struct sk_buff *skb,
tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
return true;
} else if (foc->len == 0) { /* Client requesting a cookie */
- tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
+ tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr,
+ ip_hdr(skb)->daddr, valid_foc);
NET_INC_STATS_BH(sock_net(sk),
LINUX_MIB_TCPFASTOPENCOOKIEREQD);
} else {
/* Client sent a cookie with wrong size. Treat it
* the same as invalid and return a valid one.
*/
- tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
+ tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr,
+ ip_hdr(skb)->daddr, valid_foc);
}
return false;
}
@@ -1462,7 +1465,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
* limitations, they conserve resources and peer is
* evidently real one.
*/
- if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
+ if ((sysctl_tcp_syncookies == 2 ||
+ inet_csk_reqsk_queue_is_full(sk)) && !isn) {
want_cookie = tcp_syn_flood_action(sk, skb, "TCP");
if (!want_cookie)
goto drop;
@@ -1671,8 +1675,6 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
tcp_initialize_rcv_mss(newsk);
- tcp_synack_rtt_meas(newsk, req);
- newtp->total_retrans = req->num_retrans;
#ifdef CONFIG_TCP_MD5SIG
/* Copy over the MD5 key from the original socket */
@@ -2605,7 +2607,7 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req,
long delta = req->expires - jiffies;
seq_printf(f, "%4d: %08X:%04X %08X:%04X"
- " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %pK%n",
+ " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK%n",
i,
ireq->loc_addr,
ntohs(inet_sk(sk)->inet_sport),
@@ -2663,7 +2665,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
- "%08X %5d %8d %lu %d %pK %lu %lu %u %u %d%n",
+ "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d%n",
i, src, srcp, dest, destp, sk->sk_state,
tp->write_seq - tp->snd_una,
rx_queue,
@@ -2802,6 +2804,7 @@ struct proto tcp_prot = {
.unhash = inet_unhash,
.get_port = inet_csk_get_port,
.enter_memory_pressure = tcp_enter_memory_pressure,
+ .stream_memory_free = tcp_stream_memory_free,
.sockets_allocated = &tcp_sockets_allocated,
.orphan_count = &tcp_orphan_count,
.memory_allocated = &tcp_memory_allocated,
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index ab1c086..58a3e69 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -411,6 +411,8 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
tcp_enable_early_retrans(newtp);
newtp->tlp_high_seq = 0;
+ newtp->lsndtime = treq->snt_synack;
+ newtp->total_retrans = req->num_retrans;
/* So many TCP implementations out there (incorrectly) count the
* initial SYN frame in their delayed-ACK and congestion control
@@ -666,12 +668,6 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
if (!(flg & TCP_FLAG_ACK))
return NULL;
- /* Got ACK for our SYNACK, so update baseline for SYNACK RTT sample. */
- if (tmp_opt.saw_tstamp && tmp_opt.rcv_tsecr)
- tcp_rsk(req)->snt_synack = tmp_opt.rcv_tsecr;
- else if (req->num_retrans) /* don't take RTT sample if retrans && ~TS */
- tcp_rsk(req)->snt_synack = 0;
-
/* For Fast Open no more processing is needed (sk is the
* child socket).
*/
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 92fde8d..884efff 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -65,6 +65,9 @@ int sysctl_tcp_base_mss __read_mostly = TCP_BASE_MSS;
/* By default, RFC2861 behavior. */
int sysctl_tcp_slow_start_after_idle __read_mostly = 1;
+unsigned int sysctl_tcp_notsent_lowat __read_mostly = UINT_MAX;
+EXPORT_SYMBOL(sysctl_tcp_notsent_lowat);
+
static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
int push_one, gfp_t gfp);
diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
index d4943f6..301a3ef 100644
--- a/net/ipv4/tcp_probe.c
+++ b/net/ipv4/tcp_probe.c
@@ -54,12 +54,16 @@ static const char procname[] = "tcpprobe";
struct tcp_log {
ktime_t tstamp;
- __be32 saddr, daddr;
- __be16 sport, dport;
+ union {
+ struct sockaddr raw;
+ struct sockaddr_in v4;
+ struct sockaddr_in6 v6;
+ } src, dst;
u16 length;
u32 snd_nxt;
u32 snd_una;
u32 snd_wnd;
+ u32 rcv_wnd;
u32 snd_cwnd;
u32 ssthresh;
u32 srtt;
@@ -86,12 +90,36 @@ static inline int tcp_probe_avail(void)
return bufsize - tcp_probe_used() - 1;
}
+#define tcp_probe_copy_fl_to_si4(inet, si4, mem) \
+ do { \
+ si4.sin_family = AF_INET; \
+ si4.sin_port = inet->inet_##mem##port; \
+ si4.sin_addr.s_addr = inet->inet_##mem##addr; \
+ } while (0) \
+
+#if IS_ENABLED(CONFIG_IPV6)
+#define tcp_probe_copy_fl_to_si6(inet, si6, mem) \
+ do { \
+ struct ipv6_pinfo *pi6 = inet->pinet6; \
+ si6.sin6_family = AF_INET6; \
+ si6.sin6_port = inet->inet_##mem##port; \
+ si6.sin6_addr = pi6->mem##addr; \
+ si6.sin6_flowinfo = 0; /* No need here. */ \
+ si6.sin6_scope_id = 0; /* No need here. */ \
+ } while (0)
+#else
+#define tcp_probe_copy_fl_to_si6(fl, si6, mem) \
+ do { \
+ memset(&si6, 0, sizeof(si6)); \
+ } while (0)
+#endif
+
/*
* Hook inserted to be called before each receive packet.
* Note: arguments must match tcp_rcv_established()!
*/
static int jtcp_rcv_established(struct sock *sk, struct sk_buff *skb,
- struct tcphdr *th, unsigned int len)
+ const struct tcphdr *th, unsigned int len)
{
const struct tcp_sock *tp = tcp_sk(sk);
const struct inet_sock *inet = inet_sk(sk);
@@ -107,15 +135,25 @@ static int jtcp_rcv_established(struct sock *sk, struct sk_buff *skb,
struct tcp_log *p = tcp_probe.log + tcp_probe.head;
p->tstamp = ktime_get();
- p->saddr = inet->inet_saddr;
- p->sport = inet->inet_sport;
- p->daddr = inet->inet_daddr;
- p->dport = inet->inet_dport;
+ switch (sk->sk_family) {
+ case AF_INET:
+ tcp_probe_copy_fl_to_si4(inet, p->src.v4, s);
+ tcp_probe_copy_fl_to_si4(inet, p->dst.v4, d);
+ break;
+ case AF_INET6:
+ tcp_probe_copy_fl_to_si6(inet, p->src.v6, s);
+ tcp_probe_copy_fl_to_si6(inet, p->dst.v6, d);
+ break;
+ default:
+ BUG();
+ }
+
p->length = skb->len;
p->snd_nxt = tp->snd_nxt;
p->snd_una = tp->snd_una;
p->snd_cwnd = tp->snd_cwnd;
p->snd_wnd = tp->snd_wnd;
+ p->rcv_wnd = tp->rcv_wnd;
p->ssthresh = tcp_current_ssthresh(sk);
p->srtt = tp->srtt >> 3;
@@ -157,13 +195,11 @@ static int tcpprobe_sprint(char *tbuf, int n)
= ktime_to_timespec(ktime_sub(p->tstamp, tcp_probe.start));
return scnprintf(tbuf, n,
- "%lu.%09lu %pI4:%u %pI4:%u %d %#x %#x %u %u %u %u\n",
+ "%lu.%09lu %pISpc %pISpc %d %#x %#x %u %u %u %u %u\n",
(unsigned long) tv.tv_sec,
(unsigned long) tv.tv_nsec,
- &p->saddr, ntohs(p->sport),
- &p->daddr, ntohs(p->dport),
- p->length, p->snd_nxt, p->snd_una,
- p->snd_cwnd, p->ssthresh, p->snd_wnd, p->srtt);
+ &p->src, &p->dst, p->length, p->snd_nxt, p->snd_una,
+ p->snd_cwnd, p->ssthresh, p->snd_wnd, p->srtt, p->rcv_wnd);
}
static ssize_t tcpprobe_read(struct file *file, char __user *buf,
@@ -223,6 +259,13 @@ static __init int tcpprobe_init(void)
{
int ret = -ENOMEM;
+ /* Warning: if the function signature of tcp_rcv_established,
+ * has been changed, you also have to change the signature of
+ * jtcp_rcv_established, otherwise you end up right here!
+ */
+ BUILD_BUG_ON(__same_type(tcp_rcv_established,
+ jtcp_rcv_established) == 0);
+
init_waitqueue_head(&tcp_probe.wait);
spin_lock_init(&tcp_probe.lock);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 766e6ba..0b24508 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -704,7 +704,7 @@ EXPORT_SYMBOL(udp_flush_pending_frames);
* @src: source IP address
* @dst: destination IP address
*/
-static void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst)
+void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst)
{
struct udphdr *uh = udp_hdr(skb);
struct sk_buff *frags = skb_shinfo(skb)->frag_list;
@@ -740,6 +740,7 @@ static void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst)
uh->check = CSUM_MANGLED_0;
}
}
+EXPORT_SYMBOL_GPL(udp4_hwcsum);
static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4)
{
@@ -2158,7 +2159,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
__u16 srcp = ntohs(inet->inet_sport);
seq_printf(f, "%5d: %08X:%04X %08X:%04X"
- " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d%n",
+ " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d%n",
bucket, src, srcp, dest, destp, sp->sk_state,
sk_wmem_alloc_get(sp),
sk_rmem_alloc_get(sp),
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 498ea99..2d6d179 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -99,9 +99,9 @@
#define ACONF_DEBUG 2
#if ACONF_DEBUG >= 3
-#define ADBG(x) printk x
+#define ADBG(fmt, ...) printk(fmt, ##__VA_ARGS__)
#else
-#define ADBG(x)
+#define ADBG(fmt, ...) do { if (0) printk(fmt, ##__VA_ARGS__); } while (0)
#endif
#define INFINITY_LIFE_TIME 0xFFFFFFFF
@@ -177,6 +177,8 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
.accept_redirects = 1,
.autoconf = 1,
.force_mld_version = 0,
+ .mldv1_unsolicited_report_interval = 10 * HZ,
+ .mldv2_unsolicited_report_interval = HZ,
.dad_transmits = 1,
.rtr_solicits = MAX_RTR_SOLICITATIONS,
.rtr_solicit_interval = RTR_SOLICITATION_INTERVAL,
@@ -211,6 +213,9 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
.accept_ra = 1,
.accept_redirects = 1,
.autoconf = 1,
+ .force_mld_version = 0,
+ .mldv1_unsolicited_report_interval = 10 * HZ,
+ .mldv2_unsolicited_report_interval = HZ,
.dad_transmits = 1,
.rtr_solicits = MAX_RTR_SOLICITATIONS,
.rtr_solicit_interval = RTR_SOLICITATION_INTERVAL,
@@ -369,9 +374,9 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
dev_hold(dev);
if (snmp6_alloc_dev(ndev) < 0) {
- ADBG((KERN_WARNING
+ ADBG(KERN_WARNING
"%s: cannot allocate memory for statistics; dev=%s.\n",
- __func__, dev->name));
+ __func__, dev->name);
neigh_parms_release(&nd_tbl, ndev->nd_parms);
dev_put(dev);
kfree(ndev);
@@ -379,9 +384,9 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
}
if (snmp6_register_dev(ndev) < 0) {
- ADBG((KERN_WARNING
+ ADBG(KERN_WARNING
"%s: cannot create /proc/net/dev_snmp6/%s\n",
- __func__, dev->name));
+ __func__, dev->name);
neigh_parms_release(&nd_tbl, ndev->nd_parms);
ndev->dead = 1;
in6_dev_finish_destroy(ndev);
@@ -844,7 +849,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
/* Ignore adding duplicate addresses on an interface */
if (ipv6_chk_same_addr(dev_net(idev->dev), addr, idev->dev)) {
- ADBG(("ipv6_add_addr: already assigned\n"));
+ ADBG("ipv6_add_addr: already assigned\n");
err = -EEXIST;
goto out;
}
@@ -852,7 +857,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
ifa = kzalloc(sizeof(struct inet6_ifaddr), GFP_ATOMIC);
if (ifa == NULL) {
- ADBG(("ipv6_add_addr: malloc failed\n"));
+ ADBG("ipv6_add_addr: malloc failed\n");
err = -ENOBUFS;
goto out;
}
@@ -1807,6 +1812,16 @@ static int addrconf_ifid_gre(u8 *eui, struct net_device *dev)
return __ipv6_isatap_ifid(eui, *(__be32 *)dev->dev_addr);
}
+static int addrconf_ifid_ip6tnl(u8 *eui, struct net_device *dev)
+{
+ memcpy(eui, dev->perm_addr, 3);
+ memcpy(eui + 5, dev->perm_addr + 3, 3);
+ eui[3] = 0xFF;
+ eui[4] = 0xFE;
+ eui[0] ^= 2;
+ return 0;
+}
+
static int ipv6_generate_eui64(u8 *eui, struct net_device *dev)
{
switch (dev->type) {
@@ -1825,6 +1840,8 @@ static int ipv6_generate_eui64(u8 *eui, struct net_device *dev)
return addrconf_ifid_eui64(eui, dev);
case ARPHRD_IEEE1394:
return addrconf_ifid_ieee1394(eui, dev);
+ case ARPHRD_TUNNEL6:
+ return addrconf_ifid_ip6tnl(eui, dev);
}
return -1;
}
@@ -2050,7 +2067,7 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
pinfo = (struct prefix_info *) opt;
if (len < sizeof(struct prefix_info)) {
- ADBG(("addrconf: prefix option too short\n"));
+ ADBG("addrconf: prefix option too short\n");
return;
}
@@ -2702,7 +2719,8 @@ static void addrconf_dev_config(struct net_device *dev)
(dev->type != ARPHRD_ARCNET) &&
(dev->type != ARPHRD_INFINIBAND) &&
(dev->type != ARPHRD_IEEE802154) &&
- (dev->type != ARPHRD_IEEE1394)) {
+ (dev->type != ARPHRD_IEEE1394) &&
+ (dev->type != ARPHRD_TUNNEL6)) {
/* Alas, we support only Ethernet autoconfiguration. */
return;
}
@@ -2788,44 +2806,6 @@ ipv6_inherit_linklocal(struct inet6_dev *idev, struct net_device *link_dev)
return -1;
}
-static void ip6_tnl_add_linklocal(struct inet6_dev *idev)
-{
- struct net_device *link_dev;
- struct net *net = dev_net(idev->dev);
-
- /* first try to inherit the link-local address from the link device */
- if (idev->dev->iflink &&
- (link_dev = __dev_get_by_index(net, idev->dev->iflink))) {
- if (!ipv6_inherit_linklocal(idev, link_dev))
- return;
- }
- /* then try to inherit it from any device */
- for_each_netdev(net, link_dev) {
- if (!ipv6_inherit_linklocal(idev, link_dev))
- return;
- }
- pr_debug("init ip6-ip6: add_linklocal failed\n");
-}
-
-/*
- * Autoconfigure tunnel with a link-local address so routing protocols,
- * DHCPv6, MLD etc. can be run over the virtual link
- */
-
-static void addrconf_ip6_tnl_config(struct net_device *dev)
-{
- struct inet6_dev *idev;
-
- ASSERT_RTNL();
-
- idev = addrconf_add_dev(dev);
- if (IS_ERR(idev)) {
- pr_debug("init ip6-ip6: add_dev failed\n");
- return;
- }
- ip6_tnl_add_linklocal(idev);
-}
-
static int addrconf_notify(struct notifier_block *this, unsigned long event,
void *ptr)
{
@@ -2893,9 +2873,6 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
addrconf_gre_config(dev);
break;
#endif
- case ARPHRD_TUNNEL6:
- addrconf_ip6_tnl_config(dev);
- break;
case ARPHRD_LOOPBACK:
init_loopback(dev);
break;
@@ -3630,8 +3607,8 @@ restart:
if (time_before(next_sched, jiffies + ADDRCONF_TIMER_FUZZ_MAX))
next_sched = jiffies + ADDRCONF_TIMER_FUZZ_MAX;
- ADBG((KERN_DEBUG "now = %lu, schedule = %lu, rounded schedule = %lu => %lu\n",
- now, next, next_sec, next_sched));
+ ADBG(KERN_DEBUG "now = %lu, schedule = %lu, rounded schedule = %lu => %lu\n",
+ now, next, next_sec, next_sched);
addr_chk_timer.expires = next_sched;
add_timer(&addr_chk_timer);
@@ -4177,6 +4154,10 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
array[DEVCONF_RTR_SOLICIT_DELAY] =
jiffies_to_msecs(cnf->rtr_solicit_delay);
array[DEVCONF_FORCE_MLD_VERSION] = cnf->force_mld_version;
+ array[DEVCONF_MLDV1_UNSOLICITED_REPORT_INTERVAL] =
+ jiffies_to_msecs(cnf->mldv1_unsolicited_report_interval);
+ array[DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL] =
+ jiffies_to_msecs(cnf->mldv2_unsolicited_report_interval);
#ifdef CONFIG_IPV6_PRIVACY
array[DEVCONF_USE_TEMPADDR] = cnf->use_tempaddr;
array[DEVCONF_TEMP_VALID_LFT] = cnf->temp_valid_lft;
@@ -4652,6 +4633,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
break;
}
atomic_inc(&net->ipv6.dev_addr_genid);
+ rt_genid_bump_ipv6(net);
}
static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
@@ -4859,6 +4841,22 @@ static struct addrconf_sysctl_table
.mode = 0644,
.proc_handler = proc_dointvec,
},
+ {
+ .procname = "mldv1_unsolicited_report_interval",
+ .data =
+ &ipv6_devconf.mldv1_unsolicited_report_interval,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_ms_jiffies,
+ },
+ {
+ .procname = "mldv2_unsolicited_report_interval",
+ .data =
+ &ipv6_devconf.mldv2_unsolicited_report_interval,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_ms_jiffies,
+ },
#ifdef CONFIG_IPV6_PRIVACY
{
.procname = "use_tempaddr",
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index a5ac969..0d1a9b1 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -766,6 +766,7 @@ static int __net_init inet6_net_init(struct net *net)
net->ipv6.sysctl.bindv6only = 0;
net->ipv6.sysctl.icmpv6_time = 1*HZ;
+ atomic_set(&net->ipv6.rt_genid, 0);
err = ipv6_init_mibs(net);
if (err)
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 197e6f4..48b6bd2 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -890,7 +890,7 @@ void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
src = &np->rcv_saddr;
seq_printf(seq,
"%5d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
- "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d\n",
+ "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d\n",
bucket,
src->s6_addr32[0], src->s6_addr32[1],
src->s6_addr32[2], src->s6_addr32[3], srcp,
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index 2e1a432..a6c58ce 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -55,26 +55,33 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
struct fib6_table *table;
struct net *net = rule->fr_net;
pol_lookup_t lookup = arg->lookup_ptr;
+ int err = 0;
switch (rule->action) {
case FR_ACT_TO_TBL:
break;
case FR_ACT_UNREACHABLE:
+ err = -ENETUNREACH;
rt = net->ipv6.ip6_null_entry;
goto discard_pkt;
default:
case FR_ACT_BLACKHOLE:
+ err = -EINVAL;
rt = net->ipv6.ip6_blk_hole_entry;
goto discard_pkt;
case FR_ACT_PROHIBIT:
+ err = -EACCES;
rt = net->ipv6.ip6_prohibit_entry;
goto discard_pkt;
}
table = fib6_get_table(net, rule->table);
- if (table)
- rt = lookup(net, table, flp6, flags);
+ if (!table) {
+ err = -EAGAIN;
+ goto out;
+ }
+ rt = lookup(net, table, flp6, flags);
if (rt != net->ipv6.ip6_null_entry) {
struct fib6_rule *r = (struct fib6_rule *)rule;
@@ -101,6 +108,7 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
}
again:
ip6_rt_put(rt);
+ err = -EAGAIN;
rt = NULL;
goto out;
@@ -108,9 +116,31 @@ discard_pkt:
dst_hold(&rt->dst);
out:
arg->result = rt;
- return rt == NULL ? -EAGAIN : 0;
+ return err;
}
+static bool fib6_rule_suppress(struct fib_rule *rule, struct fib_lookup_arg *arg)
+{
+ struct rt6_info *rt = (struct rt6_info *) arg->result;
+ struct net_device *dev = rt->rt6i_idev->dev;
+ /* do not accept result if the route does
+ * not meet the required prefix length
+ */
+ if (rt->rt6i_dst.plen <= rule->suppress_prefixlen)
+ goto suppress_route;
+
+ /* do not accept result if the route uses a device
+ * belonging to a forbidden interface group
+ */
+ if (rule->suppress_ifgroup != -1 && dev && dev->group == rule->suppress_ifgroup)
+ goto suppress_route;
+
+ return false;
+
+suppress_route:
+ ip6_rt_put(rt);
+ return true;
+}
static int fib6_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)
{
@@ -244,6 +274,7 @@ static const struct fib_rules_ops __net_initconst fib6_rules_ops_template = {
.addr_size = sizeof(struct in6_addr),
.action = fib6_rule_action,
.match = fib6_rule_match,
+ .suppress = fib6_rule_suppress,
.configure = fib6_rule_configure,
.compare = fib6_rule_compare,
.fill = fib6_rule_fill,
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index c4ff5bb..73db48e 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -425,8 +425,8 @@ out:
* node.
*/
-static struct fib6_node * fib6_add_1(struct fib6_node *root, void *addr,
- int addrlen, int plen,
+static struct fib6_node *fib6_add_1(struct fib6_node *root,
+ struct in6_addr *addr, int plen,
int offset, int allow_create,
int replace_required)
{
@@ -543,7 +543,7 @@ insert_above:
but if it is >= plen, the value is ignored in any case.
*/
- bit = __ipv6_addr_diff(addr, &key->addr, addrlen);
+ bit = __ipv6_addr_diff(addr, &key->addr, sizeof(*addr));
/*
* (intermediate)[in]
@@ -822,9 +822,9 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info)
if (!allow_create && !replace_required)
pr_warn("RTM_NEWROUTE with no NLM_F_CREATE or NLM_F_REPLACE\n");
- fn = fib6_add_1(root, &rt->rt6i_dst.addr, sizeof(struct in6_addr),
- rt->rt6i_dst.plen, offsetof(struct rt6_info, rt6i_dst),
- allow_create, replace_required);
+ fn = fib6_add_1(root, &rt->rt6i_dst.addr, rt->rt6i_dst.plen,
+ offsetof(struct rt6_info, rt6i_dst), allow_create,
+ replace_required);
if (IS_ERR(fn)) {
err = PTR_ERR(fn);
@@ -863,7 +863,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info)
/* Now add the first leaf node to new subtree */
sn = fib6_add_1(sfn, &rt->rt6i_src.addr,
- sizeof(struct in6_addr), rt->rt6i_src.plen,
+ rt->rt6i_src.plen,
offsetof(struct rt6_info, rt6i_src),
allow_create, replace_required);
@@ -882,7 +882,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info)
fn->subtree = sfn;
} else {
sn = fib6_add_1(fn->subtree, &rt->rt6i_src.addr,
- sizeof(struct in6_addr), rt->rt6i_src.plen,
+ rt->rt6i_src.plen,
offsetof(struct rt6_info, rt6i_src),
allow_create, replace_required);
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index ecd6073..f2d0a42 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -335,6 +335,7 @@ static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net,
dev->rtnl_link_ops = &ip6gre_link_ops;
nt->dev = dev;
+ nt->net = dev_net(dev);
ip6gre_tnl_link_config(nt, 1);
if (register_netdevice(dev) < 0)
@@ -1255,6 +1256,7 @@ static int ip6gre_tunnel_init(struct net_device *dev)
tunnel = netdev_priv(dev);
tunnel->dev = dev;
+ tunnel->net = dev_net(dev);
strcpy(tunnel->parms.name, dev->name);
memcpy(dev->dev_addr, &tunnel->parms.laddr, sizeof(struct in6_addr));
@@ -1275,6 +1277,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
struct ip6_tnl *tunnel = netdev_priv(dev);
tunnel->dev = dev;
+ tunnel->net = dev_net(dev);
strcpy(tunnel->parms.name, dev->name);
tunnel->hlen = sizeof(struct ipv6hdr) + 4;
@@ -1450,6 +1453,7 @@ static int ip6gre_tap_init(struct net_device *dev)
tunnel = netdev_priv(dev);
tunnel->dev = dev;
+ tunnel->net = dev_net(dev);
strcpy(tunnel->parms.name, dev->name);
ip6gre_tnl_link_config(tunnel, 1);
@@ -1501,6 +1505,7 @@ static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
eth_hw_addr_random(dev);
nt->dev = dev;
+ nt->net = dev_net(dev);
ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]);
/* Can use a lockless transmit, unless we generate output sequences */
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 2bab2aa..302d6fb 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -44,7 +44,7 @@
#include <net/ip6_route.h>
#include <net/addrconf.h>
#include <net/xfrm.h>
-
+#include <net/inet_ecn.h>
int ip6_rcv_finish(struct sk_buff *skb)
@@ -109,6 +109,10 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
if (hdr->version != 6)
goto err;
+ IP6_ADD_STATS_BH(dev_net(dev), idev,
+ IPSTATS_MIB_NOECTPKTS +
+ (ipv6_get_dsfield(hdr) & INET_ECN_MASK),
+ max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs));
/*
* RFC4291 2.5.3
* A packet received on an interface with a destination address
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 1e55866..d6e00a3 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -41,6 +41,7 @@
#include <linux/netfilter_ipv6.h>
#include <linux/slab.h>
#include <linux/hash.h>
+#include <linux/etherdevice.h>
#include <asm/uaccess.h>
#include <linux/atomic.h>
@@ -315,6 +316,7 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
t = netdev_priv(dev);
t->parms = *p;
+ t->net = dev_net(dev);
err = ip6_tnl_create2(dev);
if (err < 0)
goto failed_free;
@@ -374,7 +376,7 @@ static void
ip6_tnl_dev_uninit(struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
- struct net *net = dev_net(dev);
+ struct net *net = t->net;
struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
if (dev == ip6n->fb_tnl_dev)
@@ -741,7 +743,7 @@ int ip6_tnl_rcv_ctl(struct ip6_tnl *t,
{
struct __ip6_tnl_parm *p = &t->parms;
int ret = 0;
- struct net *net = dev_net(t->dev);
+ struct net *net = t->net;
if ((p->flags & IP6_TNL_F_CAP_RCV) ||
((p->flags & IP6_TNL_F_CAP_PER_PACKET) &&
@@ -827,6 +829,9 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
tstats->rx_packets++;
tstats->rx_bytes += skb->len;
+ if (!net_eq(t->net, dev_net(t->dev)))
+ skb_scrub_packet(skb);
+
netif_rx(skb);
rcu_read_unlock();
@@ -895,7 +900,7 @@ int ip6_tnl_xmit_ctl(struct ip6_tnl *t)
{
struct __ip6_tnl_parm *p = &t->parms;
int ret = 0;
- struct net *net = dev_net(t->dev);
+ struct net *net = t->net;
if (p->flags & IP6_TNL_F_CAP_XMIT) {
struct net_device *ldev = NULL;
@@ -945,8 +950,8 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
int encap_limit,
__u32 *pmtu)
{
- struct net *net = dev_net(dev);
struct ip6_tnl *t = netdev_priv(dev);
+ struct net *net = t->net;
struct net_device_stats *stats = &t->dev->stats;
struct ipv6hdr *ipv6h = ipv6_hdr(skb);
struct ipv6_tel_txoption opt;
@@ -996,6 +1001,9 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
goto tx_err_dst_release;
}
+ if (!net_eq(t->net, dev_net(dev)))
+ skb_scrub_packet(skb);
+
/*
* Okay, now see if we can stuff it in the buffer as-is.
*/
@@ -1202,7 +1210,7 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
int strict = (ipv6_addr_type(&p->raddr) &
(IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL));
- struct rt6_info *rt = rt6_lookup(dev_net(dev),
+ struct rt6_info *rt = rt6_lookup(t->net,
&p->raddr, &p->laddr,
p->link, strict);
@@ -1251,7 +1259,7 @@ ip6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p)
static int ip6_tnl_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
{
- struct net *net = dev_net(t->dev);
+ struct net *net = t->net;
struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
int err;
@@ -1463,8 +1471,10 @@ static void ip6_tnl_dev_setup(struct net_device *dev)
dev->mtu-=8;
dev->flags |= IFF_NOARP;
dev->addr_len = sizeof(struct in6_addr);
- dev->features |= NETIF_F_NETNS_LOCAL;
dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
+ /* This perm addr will be used as interface identifier by IPv6 */
+ dev->addr_assign_type = NET_ADDR_RANDOM;
+ eth_random_addr(dev->perm_addr);
}
@@ -1479,6 +1489,7 @@ ip6_tnl_dev_init_gen(struct net_device *dev)
struct ip6_tnl *t = netdev_priv(dev);
t->dev = dev;
+ t->net = dev_net(dev);
dev->tstats = alloc_percpu(struct pcpu_tstats);
if (!dev->tstats)
return -ENOMEM;
@@ -1596,9 +1607,9 @@ static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev,
static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[],
struct nlattr *data[])
{
- struct ip6_tnl *t;
+ struct ip6_tnl *t = netdev_priv(dev);
struct __ip6_tnl_parm p;
- struct net *net = dev_net(dev);
+ struct net *net = t->net;
struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
if (dev == ip6n->fb_tnl_dev)
@@ -1699,14 +1710,24 @@ static struct xfrm6_tunnel ip6ip6_handler __read_mostly = {
static void __net_exit ip6_tnl_destroy_tunnels(struct ip6_tnl_net *ip6n)
{
+ struct net *net = dev_net(ip6n->fb_tnl_dev);
+ struct net_device *dev, *aux;
int h;
struct ip6_tnl *t;
LIST_HEAD(list);
+ for_each_netdev_safe(net, dev, aux)
+ if (dev->rtnl_link_ops == &ip6_link_ops)
+ unregister_netdevice_queue(dev, &list);
+
for (h = 0; h < HASH_SIZE; h++) {
t = rtnl_dereference(ip6n->tnls_r_l[h]);
while (t != NULL) {
- unregister_netdevice_queue(t->dev, &list);
+ /* If dev is in the same netns, it has already
+ * been added to the list by the previous loop.
+ */
+ if (!net_eq(dev_net(t->dev), net))
+ unregister_netdevice_queue(t->dev, &list);
t = rtnl_dereference(t->next);
}
}
@@ -1732,6 +1753,10 @@ static int __net_init ip6_tnl_init_net(struct net *net)
if (!ip6n->fb_tnl_dev)
goto err_alloc_dev;
dev_net_set(ip6n->fb_tnl_dev, net);
+ /* FB netdevice is special: we have one, and only one per netns.
+ * Allowing to move it to another netns is clearly unsafe.
+ */
+ ip6n->fb_tnl_dev->features |= NETIF_F_NETNS_LOCAL;
err = ip6_fb_tnl_dev_init(ip6n->fb_tnl_dev);
if (err < 0)
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 03986d3..a60a84e 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -110,8 +110,8 @@ static struct kmem_cache *mrt_cachep __read_mostly;
static struct mr6_table *ip6mr_new_table(struct net *net, u32 id);
static void ip6mr_free_table(struct mr6_table *mrt);
-static int ip6_mr_forward(struct net *net, struct mr6_table *mrt,
- struct sk_buff *skb, struct mfc6_cache *cache);
+static void ip6_mr_forward(struct net *net, struct mr6_table *mrt,
+ struct sk_buff *skb, struct mfc6_cache *cache);
static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
mifi_t mifi, int assert);
static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
@@ -2074,8 +2074,8 @@ static int ip6mr_find_vif(struct mr6_table *mrt, struct net_device *dev)
return ct;
}
-static int ip6_mr_forward(struct net *net, struct mr6_table *mrt,
- struct sk_buff *skb, struct mfc6_cache *cache)
+static void ip6_mr_forward(struct net *net, struct mr6_table *mrt,
+ struct sk_buff *skb, struct mfc6_cache *cache)
{
int psend = -1;
int vif, ct;
@@ -2156,12 +2156,11 @@ forward:
last_forward:
if (psend != -1) {
ip6mr_forward2(net, mrt, skb, cache, psend);
- return 0;
+ return;
}
dont_forward:
kfree_skb(skb);
- return 0;
}
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 99cd65c..98ead2b 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -44,6 +44,7 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
+#include <linux/pkt_sched.h>
#include <net/mld.h>
#include <linux/netfilter.h>
@@ -106,10 +107,12 @@ static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
struct inet6_dev *idev);
-
-#define IGMP6_UNSOLICITED_IVAL (10*HZ)
#define MLD_QRV_DEFAULT 2
+/* RFC3810, 8.1 Query Version Distinctions */
+#define MLD_V1_QUERY_LEN 24
+#define MLD_V2_QUERY_LEN_MIN 28
+
#define MLD_V1_SEEN(idev) (dev_net((idev)->dev)->ipv6.devconf_all->force_mld_version == 1 || \
(idev)->cnf.force_mld_version == 1 || \
((idev)->mc_v1_seen && \
@@ -128,6 +131,18 @@ int sysctl_mld_max_msf __read_mostly = IPV6_MLD_MAX_MSF;
pmc != NULL; \
pmc = rcu_dereference(pmc->next))
+static int unsolicited_report_interval(struct inet6_dev *idev)
+{
+ int iv;
+
+ if (MLD_V1_SEEN(idev))
+ iv = idev->cnf.mldv1_unsolicited_report_interval;
+ else
+ iv = idev->cnf.mldv2_unsolicited_report_interval;
+
+ return iv > 0 ? iv : 1;
+}
+
int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
{
struct net_device *dev = NULL;
@@ -984,24 +999,24 @@ bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group,
static void mld_gq_start_timer(struct inet6_dev *idev)
{
- int tv = net_random() % idev->mc_maxdelay;
+ unsigned long tv = net_random() % idev->mc_maxdelay;
idev->mc_gq_running = 1;
if (!mod_timer(&idev->mc_gq_timer, jiffies+tv+2))
in6_dev_hold(idev);
}
-static void mld_ifc_start_timer(struct inet6_dev *idev, int delay)
+static void mld_ifc_start_timer(struct inet6_dev *idev, unsigned long delay)
{
- int tv = net_random() % delay;
+ unsigned long tv = net_random() % delay;
if (!mod_timer(&idev->mc_ifc_timer, jiffies+tv+2))
in6_dev_hold(idev);
}
-static void mld_dad_start_timer(struct inet6_dev *idev, int delay)
+static void mld_dad_start_timer(struct inet6_dev *idev, unsigned long delay)
{
- int tv = net_random() % delay;
+ unsigned long tv = net_random() % delay;
if (!mod_timer(&idev->mc_dad_timer, jiffies+tv+2))
in6_dev_hold(idev);
@@ -1134,13 +1149,11 @@ int igmp6_event_query(struct sk_buff *skb)
!(group_type&IPV6_ADDR_MULTICAST))
return -EINVAL;
- if (len == 24) {
+ if (len == MLD_V1_QUERY_LEN) {
int switchback;
/* MLDv1 router present */
- /* Translate milliseconds to jiffies */
- max_delay = (ntohs(mld->mld_maxdelay)*HZ)/1000;
-
+ max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay));
switchback = (idev->mc_qrv + 1) * max_delay;
idev->mc_v1_seen = jiffies + switchback;
@@ -1150,17 +1163,18 @@ int igmp6_event_query(struct sk_buff *skb)
__in6_dev_put(idev);
/* clear deleted report items */
mld_clear_delrec(idev);
- } else if (len >= 28) {
+ } else if (len >= MLD_V2_QUERY_LEN_MIN) {
int srcs_offset = sizeof(struct mld2_query) -
sizeof(struct icmp6hdr);
if (!pskb_may_pull(skb, srcs_offset))
return -EINVAL;
mlh2 = (struct mld2_query *)skb_transport_header(skb);
- max_delay = (MLDV2_MRC(ntohs(mlh2->mld2q_mrc))*HZ)/1000;
- if (!max_delay)
- max_delay = 1;
+
+ max_delay = max(msecs_to_jiffies(MLDV2_MRC(ntohs(mlh2->mld2q_mrc))), 1UL);
+
idev->mc_maxdelay = max_delay;
+
if (mlh2->mld2q_qrv)
idev->mc_qrv = mlh2->mld2q_qrv;
if (group_type == IPV6_ADDR_ANY) { /* general query */
@@ -1376,6 +1390,7 @@ static struct sk_buff *mld_newpack(struct inet6_dev *idev, int size)
if (!skb)
return NULL;
+ skb->priority = TC_PRIO_CONTROL;
skb_reserve(skb, hlen);
if (__ipv6_get_lladdr(idev, &addr_buf, IFA_F_TENTATIVE)) {
@@ -1769,7 +1784,7 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
rcu_read_unlock();
return;
}
-
+ skb->priority = TC_PRIO_CONTROL;
skb_reserve(skb, hlen);
if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) {
@@ -2156,7 +2171,7 @@ static void igmp6_join_group(struct ifmcaddr6 *ma)
igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT);
- delay = net_random() % IGMP6_UNSOLICITED_IVAL;
+ delay = net_random() % unsolicited_report_interval(ma->idev);
spin_lock_bh(&ma->mca_lock);
if (del_timer(&ma->mca_timer)) {
@@ -2323,7 +2338,7 @@ void ipv6_mc_init_dev(struct inet6_dev *idev)
setup_timer(&idev->mc_dad_timer, mld_dad_timer_expire,
(unsigned long)idev);
idev->mc_qrv = MLD_QRV_DEFAULT;
- idev->mc_maxdelay = IGMP6_UNSOLICITED_IVAL;
+ idev->mc_maxdelay = unsolicited_report_interval(idev);
idev->mc_v1_seen = 0;
write_unlock_bh(&idev->lock);
}
diff --git a/net/ipv6/netfilter/ip6t_MASQUERADE.c b/net/ipv6/netfilter/ip6t_MASQUERADE.c
index 47bff61..3e4e92d 100644
--- a/net/ipv6/netfilter/ip6t_MASQUERADE.c
+++ b/net/ipv6/netfilter/ip6t_MASQUERADE.c
@@ -76,7 +76,7 @@ static int masq_device_event(struct notifier_block *this,
if (event == NETDEV_DOWN)
nf_ct_iterate_cleanup(net, device_cmp,
- (void *)(long)dev->ifindex);
+ (void *)(long)dev->ifindex, 0, 0);
return NOTIFY_DONE;
}
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index 51c3285..091d066 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -91,6 +91,10 @@ static const struct snmp_mib snmp6_ipstats_list[] = {
SNMP_MIB_ITEM("Ip6InBcastOctets", IPSTATS_MIB_INBCASTOCTETS),
SNMP_MIB_ITEM("Ip6OutBcastOctets", IPSTATS_MIB_OUTBCASTOCTETS),
/* IPSTATS_MIB_CSUMERRORS is not relevant in IPv6 (no checksum) */
+ SNMP_MIB_ITEM("Ip6InNoECTPkts", IPSTATS_MIB_NOECTPKTS),
+ SNMP_MIB_ITEM("Ip6InECT1Pkts", IPSTATS_MIB_ECT1PKTS),
+ SNMP_MIB_ITEM("Ip6InECT0Pkts", IPSTATS_MIB_ECT0PKTS),
+ SNMP_MIB_ITEM("Ip6InCEPkts", IPSTATS_MIB_CEPKTS),
SNMP_MIB_SENTINEL
};
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index c45f7a5..c1e5334 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -63,6 +63,8 @@
#include <linux/seq_file.h>
#include <linux/export.h>
+#define ICMPV6_HDRLEN 4 /* ICMPv6 header, RFC 4443 Section 2.1 */
+
static struct raw_hashinfo raw_v6_hashinfo = {
.lock = __RW_LOCK_UNLOCKED(raw_v6_hashinfo.lock),
};
@@ -108,11 +110,14 @@ found:
*/
static int icmpv6_filter(const struct sock *sk, const struct sk_buff *skb)
{
- struct icmp6hdr *_hdr;
+ struct icmp6hdr _hdr;
const struct icmp6hdr *hdr;
+ /* We require only the four bytes of the ICMPv6 header, not any
+ * additional bytes of message body in "struct icmp6hdr".
+ */
hdr = skb_header_pointer(skb, skb_transport_offset(skb),
- sizeof(_hdr), &_hdr);
+ ICMPV6_HDRLEN, &_hdr);
if (hdr) {
const __u32 *data = &raw6_sk(sk)->filter.data[0];
unsigned int type = hdr->icmp6_type;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 8d9a93e..55236a8 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -283,9 +283,8 @@ static inline struct rt6_info *ip6_dst_alloc(struct net *net,
memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
rt6_init_peer(rt, table ? &table->tb6_peers : net->ipv6.peers);
- rt->rt6i_genid = rt_genid(net);
+ rt->rt6i_genid = rt_genid_ipv6(net);
INIT_LIST_HEAD(&rt->rt6i_siblings);
- rt->rt6i_nsiblings = 0;
}
return rt;
}
@@ -1062,7 +1061,7 @@ static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
* DST_OBSOLETE_FORCE_CHK which forces validation calls down
* into this function always.
*/
- if (rt->rt6i_genid != rt_genid(dev_net(rt->dst.dev)))
+ if (rt->rt6i_genid != rt_genid_ipv6(dev_net(rt->dst.dev)))
return NULL;
if (rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie))
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index a3437a4..f18f842 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -621,7 +621,7 @@ static int ipip6_rcv(struct sk_buff *skb)
tstats->rx_packets++;
tstats->rx_bytes += skb->len;
- if (tunnel->net != dev_net(tunnel->dev))
+ if (!net_eq(tunnel->net, dev_net(tunnel->dev)))
skb_scrub_packet(skb);
netif_rx(skb);
@@ -860,7 +860,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
tunnel->err_count = 0;
}
- if (tunnel->net != dev_net(dev))
+ if (!net_eq(tunnel->net, dev_net(dev)))
skb_scrub_packet(skb);
/*
@@ -1589,7 +1589,7 @@ static void __net_exit sit_destroy_tunnels(struct sit_net *sitn, struct list_hea
/* If dev is in the same netns, it has already
* been added to the list by the previous loop.
*/
- if (dev_net(t->dev) != net)
+ if (!net_eq(dev_net(t->dev), net))
unregister_netdevice_queue(t->dev,
head);
t = rtnl_dereference(t->next);
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 6e1649d..5bcfadf 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -963,7 +963,8 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
if (!ipv6_unicast_destination(skb))
goto drop;
- if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
+ if ((sysctl_tcp_syncookies == 2 ||
+ inet_csk_reqsk_queue_is_full(sk)) && !isn) {
want_cookie = tcp_syn_flood_action(sk, skb, "TCPv6");
if (!want_cookie)
goto drop;
@@ -1237,8 +1238,6 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
tcp_initialize_rcv_mss(newsk);
- tcp_synack_rtt_meas(newsk, req);
- newtp->total_retrans = req->num_retrans;
newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
@@ -1732,7 +1731,7 @@ static void get_openreq6(struct seq_file *seq,
seq_printf(seq,
"%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
- "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
+ "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
i,
src->s6_addr32[0], src->s6_addr32[1],
src->s6_addr32[2], src->s6_addr32[3],
@@ -1783,7 +1782,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
seq_printf(seq,
"%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
- "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %lu %lu %u %u %d\n",
+ "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
i,
src->s6_addr32[0], src->s6_addr32[1],
src->s6_addr32[2], src->s6_addr32[3], srcp,
@@ -1926,6 +1925,7 @@ struct proto tcpv6_prot = {
.unhash = inet_unhash,
.get_port = inet_csk_get_port,
.enter_memory_pressure = tcp_enter_memory_pressure,
+ .stream_memory_free = tcp_stream_memory_free,
.sockets_allocated = &tcp_sockets_allocated,
.memory_allocated = &tcp_memory_allocated,
.memory_pressure = &tcp_memory_pressure,
diff --git a/net/ipx/ipx_proc.c b/net/ipx/ipx_proc.c
index 65e8833..e15c16a 100644
--- a/net/ipx/ipx_proc.c
+++ b/net/ipx/ipx_proc.c
@@ -213,7 +213,7 @@ static int ipx_seq_socket_show(struct seq_file *seq, void *v)
ntohs(ipxs->dest_addr.sock));
}
- seq_printf(seq, "%08X %08X %02X %03d\n",
+ seq_printf(seq, "%08X %08X %02X %03u\n",
sk_wmem_alloc_get(s),
sk_rmem_alloc_get(s),
s->sk_state,
diff --git a/net/irda/irttp.c b/net/irda/irttp.c
index ae43c62..85372cf 100644
--- a/net/irda/irttp.c
+++ b/net/irda/irttp.c
@@ -75,7 +75,7 @@ static pi_minor_info_t pi_minor_call_table[] = {
{ NULL, 0 }, /* 0x00 */
{ irttp_param_max_sdu_size, PV_INTEGER | PV_BIG_ENDIAN } /* 0x01 */
};
-static pi_major_info_t pi_major_call_table[] = {{ pi_minor_call_table, 2 }};
+static pi_major_info_t pi_major_call_table[] = { { pi_minor_call_table, 2 } };
static pi_param_info_t param_info = { pi_major_call_table, 1, 0x0f, 4 };
/************************ GLOBAL PROCEDURES ************************/
@@ -205,7 +205,7 @@ static void irttp_todo_expired(unsigned long data)
*/
static void irttp_flush_queues(struct tsap_cb *self)
{
- struct sk_buff* skb;
+ struct sk_buff *skb;
IRDA_DEBUG(4, "%s()\n", __func__);
@@ -400,7 +400,7 @@ struct tsap_cb *irttp_open_tsap(__u8 stsap_sel, int credit, notify_t *notify)
/* The IrLMP spec (IrLMP 1.1 p10) says that we have the right to
* use only 0x01-0x6F. Of course, we can use LSAP_ANY as well.
* JeanII */
- if((stsap_sel != LSAP_ANY) &&
+ if ((stsap_sel != LSAP_ANY) &&
((stsap_sel < 0x01) || (stsap_sel >= 0x70))) {
IRDA_DEBUG(0, "%s(), invalid tsap!\n", __func__);
return NULL;
@@ -427,7 +427,7 @@ struct tsap_cb *irttp_open_tsap(__u8 stsap_sel, int credit, notify_t *notify)
ttp_notify.data_indication = irttp_data_indication;
ttp_notify.udata_indication = irttp_udata_indication;
ttp_notify.flow_indication = irttp_flow_indication;
- if(notify->status_indication != NULL)
+ if (notify->status_indication != NULL)
ttp_notify.status_indication = irttp_status_indication;
ttp_notify.instance = self;
strncpy(ttp_notify.name, notify->name, NOTIFY_MAX_NAME);
@@ -639,8 +639,7 @@ int irttp_data_request(struct tsap_cb *self, struct sk_buff *skb)
*/
if ((self->tx_max_sdu_size != 0) &&
(self->tx_max_sdu_size != TTP_SAR_UNBOUND) &&
- (skb->len > self->tx_max_sdu_size))
- {
+ (skb->len > self->tx_max_sdu_size)) {
IRDA_ERROR("%s: SAR enabled, but data is larger than TxMaxSduSize!\n",
__func__);
ret = -EMSGSIZE;
@@ -733,8 +732,7 @@ static void irttp_run_tx_queue(struct tsap_cb *self)
* poll us through irttp_flow_indication() - Jean II */
while ((self->send_credit > 0) &&
(!irlmp_lap_tx_queue_full(self->lsap)) &&
- (skb = skb_dequeue(&self->tx_queue)))
- {
+ (skb = skb_dequeue(&self->tx_queue))) {
/*
* Since we can transmit and receive frames concurrently,
* the code below is a critical region and we must assure that
@@ -798,8 +796,7 @@ static void irttp_run_tx_queue(struct tsap_cb *self)
* where we can spend a bit of time doing stuff. - Jean II */
if ((self->tx_sdu_busy) &&
(skb_queue_len(&self->tx_queue) < TTP_TX_LOW_THRESHOLD) &&
- (!self->close_pend))
- {
+ (!self->close_pend)) {
if (self->notify.flow_indication)
self->notify.flow_indication(self->notify.instance,
self, FLOW_START);
@@ -892,7 +889,7 @@ static int irttp_udata_indication(void *instance, void *sap,
/* Just pass data to layer above */
if (self->notify.udata_indication) {
err = self->notify.udata_indication(self->notify.instance,
- self,skb);
+ self, skb);
/* Same comment as in irttp_do_data_indication() */
if (!err)
return 0;
@@ -1057,7 +1054,7 @@ static void irttp_flow_indication(void *instance, void *sap, LOCAL_FLOW flow)
* to do that. Jean II */
/* If we need to send disconnect. try to do it now */
- if(self->disconnect_pend)
+ if (self->disconnect_pend)
irttp_start_todo_timer(self, 0);
}
@@ -1116,7 +1113,7 @@ int irttp_connect_request(struct tsap_cb *self, __u8 dtsap_sel,
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -EBADR;);
if (self->connected) {
- if(userdata)
+ if (userdata)
dev_kfree_skb(userdata);
return -EISCONN;
}
@@ -1137,7 +1134,7 @@ int irttp_connect_request(struct tsap_cb *self, __u8 dtsap_sel,
* headers
*/
IRDA_ASSERT(skb_headroom(userdata) >= TTP_MAX_HEADER,
- { dev_kfree_skb(userdata); return -1; } );
+ { dev_kfree_skb(userdata); return -1; });
}
/* Initialize connection parameters */
@@ -1157,7 +1154,7 @@ int irttp_connect_request(struct tsap_cb *self, __u8 dtsap_sel,
* Give away max 127 credits for now
*/
if (n > 127) {
- self->avail_credit=n-127;
+ self->avail_credit = n - 127;
n = 127;
}
@@ -1166,10 +1163,10 @@ int irttp_connect_request(struct tsap_cb *self, __u8 dtsap_sel,
/* SAR enabled? */
if (max_sdu_size > 0) {
IRDA_ASSERT(skb_headroom(tx_skb) >= (TTP_MAX_HEADER + TTP_SAR_HEADER),
- { dev_kfree_skb(tx_skb); return -1; } );
+ { dev_kfree_skb(tx_skb); return -1; });
/* Insert SAR parameters */
- frame = skb_push(tx_skb, TTP_HEADER+TTP_SAR_HEADER);
+ frame = skb_push(tx_skb, TTP_HEADER + TTP_SAR_HEADER);
frame[0] = TTP_PARAMETERS | n;
frame[1] = 0x04; /* Length */
@@ -1386,7 +1383,7 @@ int irttp_connect_response(struct tsap_cb *self, __u32 max_sdu_size,
* headers
*/
IRDA_ASSERT(skb_headroom(userdata) >= TTP_MAX_HEADER,
- { dev_kfree_skb(userdata); return -1; } );
+ { dev_kfree_skb(userdata); return -1; });
}
self->avail_credit = 0;
@@ -1409,10 +1406,10 @@ int irttp_connect_response(struct tsap_cb *self, __u32 max_sdu_size,
/* SAR enabled? */
if (max_sdu_size > 0) {
IRDA_ASSERT(skb_headroom(tx_skb) >= (TTP_MAX_HEADER + TTP_SAR_HEADER),
- { dev_kfree_skb(tx_skb); return -1; } );
+ { dev_kfree_skb(tx_skb); return -1; });
/* Insert TTP header with SAR parameters */
- frame = skb_push(tx_skb, TTP_HEADER+TTP_SAR_HEADER);
+ frame = skb_push(tx_skb, TTP_HEADER + TTP_SAR_HEADER);
frame[0] = TTP_PARAMETERS | n;
frame[1] = 0x04; /* Length */
@@ -1522,7 +1519,7 @@ int irttp_disconnect_request(struct tsap_cb *self, struct sk_buff *userdata,
* function may be called from various context, like user, timer
* for following a disconnect_indication() (i.e. net_bh).
* Jean II */
- if(test_and_set_bit(0, &self->disconnect_pend)) {
+ if (test_and_set_bit(0, &self->disconnect_pend)) {
IRDA_DEBUG(0, "%s(), disconnect already pending\n",
__func__);
if (userdata)
@@ -1627,7 +1624,7 @@ static void irttp_disconnect_indication(void *instance, void *sap,
* Jean II */
/* No need to notify the client if has already tried to disconnect */
- if(self->notify.disconnect_indication)
+ if (self->notify.disconnect_indication)
self->notify.disconnect_indication(self->notify.instance, self,
reason, skb);
else
@@ -1738,8 +1735,7 @@ static void irttp_run_rx_queue(struct tsap_cb *self)
* This is the last fragment, so time to reassemble!
*/
if ((self->rx_sdu_size <= self->rx_max_sdu_size) ||
- (self->rx_max_sdu_size == TTP_SAR_UNBOUND))
- {
+ (self->rx_max_sdu_size == TTP_SAR_UNBOUND)) {
/*
* A little optimizing. Only queue the fragment if
* there are other fragments. Since if this is the
@@ -1860,7 +1856,7 @@ static int irttp_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, "dtsap_sel: %02x\n",
self->dtsap_sel);
seq_printf(seq, " connected: %s, ",
- self->connected? "TRUE":"FALSE");
+ self->connected ? "TRUE" : "FALSE");
seq_printf(seq, "avail credit: %d, ",
self->avail_credit);
seq_printf(seq, "remote credit: %d, ",
@@ -1876,9 +1872,9 @@ static int irttp_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, "rx_queue len: %u\n",
skb_queue_len(&self->rx_queue));
seq_printf(seq, " tx_sdu_busy: %s, ",
- self->tx_sdu_busy? "TRUE":"FALSE");
+ self->tx_sdu_busy ? "TRUE" : "FALSE");
seq_printf(seq, "rx_sdu_busy: %s\n",
- self->rx_sdu_busy? "TRUE":"FALSE");
+ self->rx_sdu_busy ? "TRUE" : "FALSE");
seq_printf(seq, " max_seg_size: %u, ",
self->max_seg_size);
seq_printf(seq, "tx_max_sdu_size: %u, ",
diff --git a/net/key/af_key.c b/net/key/af_key.c
index ab8bd2c..9d58537 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -45,7 +45,7 @@ struct netns_pfkey {
static DEFINE_MUTEX(pfkey_mutex);
#define DUMMY_MARK 0
-static struct xfrm_mark dummy_mark = {0, 0};
+static const struct xfrm_mark dummy_mark = {0, 0};
struct pfkey_sock {
/* struct sock must be the first member of struct pfkey_sock */
struct sock sk;
@@ -338,7 +338,7 @@ static int pfkey_error(const struct sadb_msg *orig, int err, struct sock *sk)
return 0;
}
-static u8 sadb_ext_min_len[] = {
+static const u8 sadb_ext_min_len[] = {
[SADB_EXT_RESERVED] = (u8) 0,
[SADB_EXT_SA] = (u8) sizeof(struct sadb_sa),
[SADB_EXT_LIFETIME_CURRENT] = (u8) sizeof(struct sadb_lifetime),
@@ -1196,10 +1196,6 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
x->props.family = pfkey_sadb_addr2xfrm_addr((struct sadb_address *) ext_hdrs[SADB_EXT_ADDRESS_SRC-1],
&x->props.saddr);
- if (!x->props.family) {
- err = -EAFNOSUPPORT;
- goto out;
- }
pfkey_sadb_addr2xfrm_addr((struct sadb_address *) ext_hdrs[SADB_EXT_ADDRESS_DST-1],
&x->id.daddr);
@@ -2205,10 +2201,6 @@ static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, const struct sadb_
sa = ext_hdrs[SADB_EXT_ADDRESS_SRC-1];
xp->family = pfkey_sadb_addr2xfrm_addr(sa, &xp->selector.saddr);
- if (!xp->family) {
- err = -EINVAL;
- goto out;
- }
xp->selector.family = xp->family;
xp->selector.prefixlen_s = sa->sadb_address_prefixlen;
xp->selector.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto);
@@ -2737,7 +2729,7 @@ static int pfkey_spdflush(struct sock *sk, struct sk_buff *skb, const struct sad
typedef int (*pfkey_handler)(struct sock *sk, struct sk_buff *skb,
const struct sadb_msg *hdr, void * const *ext_hdrs);
-static pfkey_handler pfkey_funcs[SADB_MAX + 1] = {
+static const pfkey_handler pfkey_funcs[SADB_MAX + 1] = {
[SADB_RESERVED] = pfkey_reserved,
[SADB_GETSPI] = pfkey_getspi,
[SADB_UPDATE] = pfkey_add,
diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c
index 7b4799c..1a3c7e0 100644
--- a/net/llc/llc_proc.c
+++ b/net/llc/llc_proc.c
@@ -147,7 +147,7 @@ static int llc_seq_socket_show(struct seq_file *seq, void *v)
}
seq_printf(seq, "@%02X ", llc->sap->laddr.lsap);
llc_ui_format_mac(seq, llc->daddr.mac);
- seq_printf(seq, "@%02X %8d %8d %2d %3d %4d\n", llc->daddr.lsap,
+ seq_printf(seq, "@%02X %8d %8d %2d %3u %4d\n", llc->daddr.lsap,
sk_wmem_alloc_get(sk),
sk_rmem_alloc_get(sk) - llc->copied_seq,
sk->sk_state,
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 43dd752..31fc224 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -395,9 +395,13 @@ void sta_set_rate_info_tx(struct sta_info *sta,
rinfo->nss = ieee80211_rate_get_vht_nss(rate);
} else {
struct ieee80211_supported_band *sband;
+ int shift = ieee80211_vif_get_shift(&sta->sdata->vif);
+ u16 brate;
+
sband = sta->local->hw.wiphy->bands[
ieee80211_get_sdata_band(sta->sdata)];
- rinfo->legacy = sband->bitrates[rate->idx].bitrate;
+ brate = sband->bitrates[rate->idx].bitrate;
+ rinfo->legacy = DIV_ROUND_UP(brate, 1 << shift);
}
if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
rinfo->flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH;
@@ -422,11 +426,13 @@ void sta_set_rate_info_rx(struct sta_info *sta, struct rate_info *rinfo)
rinfo->mcs = sta->last_rx_rate_idx;
} else {
struct ieee80211_supported_band *sband;
+ int shift = ieee80211_vif_get_shift(&sta->sdata->vif);
+ u16 brate;
sband = sta->local->hw.wiphy->bands[
ieee80211_get_sdata_band(sta->sdata)];
- rinfo->legacy =
- sband->bitrates[sta->last_rx_rate_idx].bitrate;
+ brate = sband->bitrates[sta->last_rx_rate_idx].bitrate;
+ rinfo->legacy = DIV_ROUND_UP(brate, 1 << shift);
}
if (sta->last_rx_rate_flag & RX_FLAG_40MHZ)
@@ -856,8 +862,8 @@ static int ieee80211_set_probe_resp(struct ieee80211_sub_if_data *sdata,
return 0;
}
-static int ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata,
- struct cfg80211_beacon_data *params)
+int ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_beacon_data *params)
{
struct beacon_data *new, *old;
int new_head_len, new_tail_len;
@@ -1020,6 +1026,12 @@ static int ieee80211_change_beacon(struct wiphy *wiphy, struct net_device *dev,
sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ /* don't allow changing the beacon while CSA is in place - offset
+ * of channel switch counter may change
+ */
+ if (sdata->vif.csa_active)
+ return -EBUSY;
+
old = rtnl_dereference(sdata->u.ap.beacon);
if (!old)
return -ENOENT;
@@ -1044,6 +1056,10 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
return -ENOENT;
old_probe_resp = rtnl_dereference(sdata->u.ap.probe_resp);
+ /* abort any running channel switch */
+ sdata->vif.csa_active = false;
+ cancel_work_sync(&sdata->csa_finalize_work);
+
/* turn off carrier for this interface and dependent VLANs */
list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
netif_carrier_off(vlan->dev);
@@ -1192,8 +1208,6 @@ static int sta_apply_parameters(struct ieee80211_local *local,
struct station_parameters *params)
{
int ret = 0;
- u32 rates;
- int i, j;
struct ieee80211_supported_band *sband;
struct ieee80211_sub_if_data *sdata = sta->sdata;
enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
@@ -1286,16 +1300,10 @@ static int sta_apply_parameters(struct ieee80211_local *local,
sta->listen_interval = params->listen_interval;
if (params->supported_rates) {
- rates = 0;
-
- for (i = 0; i < params->supported_rates_len; i++) {
- int rate = (params->supported_rates[i] & 0x7f) * 5;
- for (j = 0; j < sband->n_bitrates; j++) {
- if (sband->bitrates[j].bitrate == rate)
- rates |= BIT(j);
- }
- }
- sta->sta.supp_rates[band] = rates;
+ ieee80211_parse_bitrates(&sdata->vif.bss_conf.chandef,
+ sband, params->supported_rates,
+ params->supported_rates_len,
+ &sta->sta.supp_rates[band]);
}
if (params->ht_capa)
@@ -1958,18 +1966,11 @@ static int ieee80211_change_bss(struct wiphy *wiphy,
}
if (params->basic_rates) {
- int i, j;
- u32 rates = 0;
- struct ieee80211_supported_band *sband = wiphy->bands[band];
-
- for (i = 0; i < params->basic_rates_len; i++) {
- int rate = (params->basic_rates[i] & 0x7f) * 5;
- for (j = 0; j < sband->n_bitrates; j++) {
- if (sband->bitrates[j].bitrate == rate)
- rates |= BIT(j);
- }
- }
- sdata->vif.bss_conf.basic_rates = rates;
+ ieee80211_parse_bitrates(&sdata->vif.bss_conf.chandef,
+ wiphy->bands[band],
+ params->basic_rates,
+ params->basic_rates_len,
+ &sdata->vif.bss_conf.basic_rates);
changed |= BSS_CHANGED_BASIC_RATES;
}
@@ -2786,6 +2787,178 @@ static int ieee80211_start_radar_detection(struct wiphy *wiphy,
return 0;
}
+static struct cfg80211_beacon_data *
+cfg80211_beacon_dup(struct cfg80211_beacon_data *beacon)
+{
+ struct cfg80211_beacon_data *new_beacon;
+ u8 *pos;
+ int len;
+
+ len = beacon->head_len + beacon->tail_len + beacon->beacon_ies_len +
+ beacon->proberesp_ies_len + beacon->assocresp_ies_len +
+ beacon->probe_resp_len;
+
+ new_beacon = kzalloc(sizeof(*new_beacon) + len, GFP_KERNEL);
+ if (!new_beacon)
+ return NULL;
+
+ pos = (u8 *)(new_beacon + 1);
+ if (beacon->head_len) {
+ new_beacon->head_len = beacon->head_len;
+ new_beacon->head = pos;
+ memcpy(pos, beacon->head, beacon->head_len);
+ pos += beacon->head_len;
+ }
+ if (beacon->tail_len) {
+ new_beacon->tail_len = beacon->tail_len;
+ new_beacon->tail = pos;
+ memcpy(pos, beacon->tail, beacon->tail_len);
+ pos += beacon->tail_len;
+ }
+ if (beacon->beacon_ies_len) {
+ new_beacon->beacon_ies_len = beacon->beacon_ies_len;
+ new_beacon->beacon_ies = pos;
+ memcpy(pos, beacon->beacon_ies, beacon->beacon_ies_len);
+ pos += beacon->beacon_ies_len;
+ }
+ if (beacon->proberesp_ies_len) {
+ new_beacon->proberesp_ies_len = beacon->proberesp_ies_len;
+ new_beacon->proberesp_ies = pos;
+ memcpy(pos, beacon->proberesp_ies, beacon->proberesp_ies_len);
+ pos += beacon->proberesp_ies_len;
+ }
+ if (beacon->assocresp_ies_len) {
+ new_beacon->assocresp_ies_len = beacon->assocresp_ies_len;
+ new_beacon->assocresp_ies = pos;
+ memcpy(pos, beacon->assocresp_ies, beacon->assocresp_ies_len);
+ pos += beacon->assocresp_ies_len;
+ }
+ if (beacon->probe_resp_len) {
+ new_beacon->probe_resp_len = beacon->probe_resp_len;
+ beacon->probe_resp = pos;
+ memcpy(pos, beacon->probe_resp, beacon->probe_resp_len);
+ pos += beacon->probe_resp_len;
+ }
+
+ return new_beacon;
+}
+
+void ieee80211_csa_finalize_work(struct work_struct *work)
+{
+ struct ieee80211_sub_if_data *sdata =
+ container_of(work, struct ieee80211_sub_if_data,
+ csa_finalize_work);
+ struct ieee80211_local *local = sdata->local;
+ int err, changed;
+
+ if (!ieee80211_sdata_running(sdata))
+ return;
+
+ if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_AP))
+ return;
+
+ sdata->radar_required = sdata->csa_radar_required;
+ err = ieee80211_vif_change_channel(sdata, &local->csa_chandef,
+ &changed);
+ if (WARN_ON(err < 0))
+ return;
+
+ err = ieee80211_assign_beacon(sdata, sdata->u.ap.next_beacon);
+ if (err < 0)
+ return;
+
+ changed |= err;
+ kfree(sdata->u.ap.next_beacon);
+ sdata->u.ap.next_beacon = NULL;
+ sdata->vif.csa_active = false;
+
+ ieee80211_wake_queues_by_reason(&sdata->local->hw,
+ IEEE80211_MAX_QUEUE_MAP,
+ IEEE80211_QUEUE_STOP_REASON_CSA);
+
+ ieee80211_bss_info_change_notify(sdata, changed);
+
+ cfg80211_ch_switch_notify(sdata->dev, &local->csa_chandef);
+}
+
+static int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_csa_settings *params)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_chanctx_conf *chanctx_conf;
+ struct ieee80211_chanctx *chanctx;
+ int err, num_chanctx;
+
+ if (!list_empty(&local->roc_list) || local->scanning)
+ return -EBUSY;
+
+ if (sdata->wdev.cac_started)
+ return -EBUSY;
+
+ if (cfg80211_chandef_identical(&params->chandef,
+ &sdata->vif.bss_conf.chandef))
+ return -EINVAL;
+
+ rcu_read_lock();
+ chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+ if (!chanctx_conf) {
+ rcu_read_unlock();
+ return -EBUSY;
+ }
+
+ /* don't handle for multi-VIF cases */
+ chanctx = container_of(chanctx_conf, struct ieee80211_chanctx, conf);
+ if (chanctx->refcount > 1) {
+ rcu_read_unlock();
+ return -EBUSY;
+ }
+ num_chanctx = 0;
+ list_for_each_entry_rcu(chanctx, &local->chanctx_list, list)
+ num_chanctx++;
+ rcu_read_unlock();
+
+ if (num_chanctx > 1)
+ return -EBUSY;
+
+ /* don't allow another channel switch if one is already active. */
+ if (sdata->vif.csa_active)
+ return -EBUSY;
+
+ /* only handle AP for now. */
+ switch (sdata->vif.type) {
+ case NL80211_IFTYPE_AP:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ sdata->u.ap.next_beacon = cfg80211_beacon_dup(&params->beacon_after);
+ if (!sdata->u.ap.next_beacon)
+ return -ENOMEM;
+
+ sdata->csa_counter_offset_beacon = params->counter_offset_beacon;
+ sdata->csa_counter_offset_presp = params->counter_offset_presp;
+ sdata->csa_radar_required = params->radar_required;
+
+ if (params->block_tx)
+ ieee80211_stop_queues_by_reason(&local->hw,
+ IEEE80211_MAX_QUEUE_MAP,
+ IEEE80211_QUEUE_STOP_REASON_CSA);
+
+ err = ieee80211_assign_beacon(sdata, &params->beacon_csa);
+ if (err < 0)
+ return err;
+
+ local->csa_chandef = params->chandef;
+ sdata->vif.csa_active = true;
+
+ ieee80211_bss_info_change_notify(sdata, err);
+ drv_channel_switch_beacon(sdata, &params->chandef);
+
+ return 0;
+}
+
static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
struct ieee80211_channel *chan, bool offchan,
unsigned int wait, const u8 *buf, size_t len,
@@ -3503,4 +3676,5 @@ struct cfg80211_ops mac80211_config_ops = {
.get_et_strings = ieee80211_get_et_strings,
.get_channel = ieee80211_cfg_get_channel,
.start_radar_detection = ieee80211_start_radar_detection,
+ .channel_switch = ieee80211_channel_switch,
};
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index 03e8d2e..3a4764b 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -410,6 +410,64 @@ int ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
return ret;
}
+int ieee80211_vif_change_channel(struct ieee80211_sub_if_data *sdata,
+ const struct cfg80211_chan_def *chandef,
+ u32 *changed)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_chanctx_conf *conf;
+ struct ieee80211_chanctx *ctx;
+ int ret;
+ u32 chanctx_changed = 0;
+
+ /* should never be called if not performing a channel switch. */
+ if (WARN_ON(!sdata->vif.csa_active))
+ return -EINVAL;
+
+ if (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef,
+ IEEE80211_CHAN_DISABLED))
+ return -EINVAL;
+
+ mutex_lock(&local->chanctx_mtx);
+ conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
+ lockdep_is_held(&local->chanctx_mtx));
+ if (!conf) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ctx = container_of(conf, struct ieee80211_chanctx, conf);
+ if (ctx->refcount != 1) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (sdata->vif.bss_conf.chandef.width != chandef->width) {
+ chanctx_changed = IEEE80211_CHANCTX_CHANGE_WIDTH;
+ *changed |= BSS_CHANGED_BANDWIDTH;
+ }
+
+ sdata->vif.bss_conf.chandef = *chandef;
+ ctx->conf.def = *chandef;
+
+ chanctx_changed |= IEEE80211_CHANCTX_CHANGE_CHANNEL;
+ drv_change_chanctx(local, ctx, chanctx_changed);
+
+ if (!local->use_chanctx) {
+ local->_oper_chandef = *chandef;
+ ieee80211_hw_config(local, 0);
+ }
+
+ ieee80211_recalc_chanctx_chantype(local, ctx);
+ ieee80211_recalc_smps_chanctx(local, ctx);
+ ieee80211_recalc_radar_chanctx(local, ctx);
+
+ ret = 0;
+ out:
+ mutex_unlock(&local->chanctx_mtx);
+ return ret;
+}
+
int ieee80211_vif_change_bandwidth(struct ieee80211_sub_if_data *sdata,
const struct cfg80211_chan_def *chandef,
u32 *changed)
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 44e201d..19c54a4 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -455,6 +455,15 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta)
DEBUGFS_ADD_COUNTER(tx_retry_count, tx_retry_count);
DEBUGFS_ADD_COUNTER(wep_weak_iv_count, wep_weak_iv_count);
+ if (sizeof(sta->driver_buffered_tids) == sizeof(u32))
+ debugfs_create_x32("driver_buffered_tids", 0400,
+ sta->debugfs.dir,
+ (u32 *)&sta->driver_buffered_tids);
+ else
+ debugfs_create_x64("driver_buffered_tids", 0400,
+ sta->debugfs.dir,
+ (u64 *)&sta->driver_buffered_tids);
+
drv_sta_add_debugfs(local, sdata, &sta->sta, sta->debugfs.dir);
}
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index b931c96..b3ea11f 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -1072,4 +1072,17 @@ static inline void drv_ipv6_addr_change(struct ieee80211_local *local,
}
#endif
+static inline void
+drv_channel_switch_beacon(struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_chan_def *chandef)
+{
+ struct ieee80211_local *local = sdata->local;
+
+ if (local->ops->channel_switch_beacon) {
+ trace_drv_channel_switch_beacon(local, sdata, chandef);
+ local->ops->channel_switch_beacon(&local->hw, &sdata->vif,
+ chandef);
+ }
+}
+
#endif /* __MAC80211_DRIVER_OPS */
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index f83534f..529bf58 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -19,13 +19,14 @@
#include "ieee80211_i.h"
#include "rate.h"
-static void __check_htcap_disable(struct ieee80211_sub_if_data *sdata,
+static void __check_htcap_disable(struct ieee80211_ht_cap *ht_capa,
+ struct ieee80211_ht_cap *ht_capa_mask,
struct ieee80211_sta_ht_cap *ht_cap,
u16 flag)
{
__le16 le_flag = cpu_to_le16(flag);
- if (sdata->u.mgd.ht_capa_mask.cap_info & le_flag) {
- if (!(sdata->u.mgd.ht_capa.cap_info & le_flag))
+ if (ht_capa_mask->cap_info & le_flag) {
+ if (!(ht_capa->cap_info & le_flag))
ht_cap->cap &= ~flag;
}
}
@@ -33,13 +34,30 @@ static void __check_htcap_disable(struct ieee80211_sub_if_data *sdata,
void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
struct ieee80211_sta_ht_cap *ht_cap)
{
- u8 *scaps = (u8 *)(&sdata->u.mgd.ht_capa.mcs.rx_mask);
- u8 *smask = (u8 *)(&sdata->u.mgd.ht_capa_mask.mcs.rx_mask);
+ struct ieee80211_ht_cap *ht_capa, *ht_capa_mask;
+ u8 *scaps, *smask;
int i;
if (!ht_cap->ht_supported)
return;
+ switch (sdata->vif.type) {
+ case NL80211_IFTYPE_STATION:
+ ht_capa = &sdata->u.mgd.ht_capa;
+ ht_capa_mask = &sdata->u.mgd.ht_capa_mask;
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ ht_capa = &sdata->u.ibss.ht_capa;
+ ht_capa_mask = &sdata->u.ibss.ht_capa_mask;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return;
+ }
+
+ scaps = (u8 *)(&ht_capa->mcs.rx_mask);
+ smask = (u8 *)(&ht_capa_mask->mcs.rx_mask);
+
/* NOTE: If you add more over-rides here, update register_hw
* ht_capa_mod_msk logic in main.c as well.
* And, if this method can ever change ht_cap.ht_supported, fix
@@ -55,28 +73,32 @@ void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
}
/* Force removal of HT-40 capabilities? */
- __check_htcap_disable(sdata, ht_cap, IEEE80211_HT_CAP_SUP_WIDTH_20_40);
- __check_htcap_disable(sdata, ht_cap, IEEE80211_HT_CAP_SGI_40);
+ __check_htcap_disable(ht_capa, ht_capa_mask, ht_cap,
+ IEEE80211_HT_CAP_SUP_WIDTH_20_40);
+ __check_htcap_disable(ht_capa, ht_capa_mask, ht_cap,
+ IEEE80211_HT_CAP_SGI_40);
/* Allow user to disable SGI-20 (SGI-40 is handled above) */
- __check_htcap_disable(sdata, ht_cap, IEEE80211_HT_CAP_SGI_20);
+ __check_htcap_disable(ht_capa, ht_capa_mask, ht_cap,
+ IEEE80211_HT_CAP_SGI_20);
/* Allow user to disable the max-AMSDU bit. */
- __check_htcap_disable(sdata, ht_cap, IEEE80211_HT_CAP_MAX_AMSDU);
+ __check_htcap_disable(ht_capa, ht_capa_mask, ht_cap,
+ IEEE80211_HT_CAP_MAX_AMSDU);
/* Allow user to decrease AMPDU factor */
- if (sdata->u.mgd.ht_capa_mask.ampdu_params_info &
+ if (ht_capa_mask->ampdu_params_info &
IEEE80211_HT_AMPDU_PARM_FACTOR) {
- u8 n = sdata->u.mgd.ht_capa.ampdu_params_info
- & IEEE80211_HT_AMPDU_PARM_FACTOR;
+ u8 n = ht_capa->ampdu_params_info &
+ IEEE80211_HT_AMPDU_PARM_FACTOR;
if (n < ht_cap->ampdu_factor)
ht_cap->ampdu_factor = n;
}
/* Allow the user to increase AMPDU density. */
- if (sdata->u.mgd.ht_capa_mask.ampdu_params_info &
+ if (ht_capa_mask->ampdu_params_info &
IEEE80211_HT_AMPDU_PARM_DENSITY) {
- u8 n = (sdata->u.mgd.ht_capa.ampdu_params_info &
+ u8 n = (ht_capa->ampdu_params_info &
IEEE80211_HT_AMPDU_PARM_DENSITY)
>> IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT;
if (n > ht_cap->ampdu_density)
@@ -112,7 +134,8 @@ bool ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
* we advertised a restricted capability set to. Override
* our own capabilities and then use those below.
*/
- if (sdata->vif.type == NL80211_IFTYPE_STATION &&
+ if ((sdata->vif.type == NL80211_IFTYPE_STATION ||
+ sdata->vif.type == NL80211_IFTYPE_ADHOC) &&
!test_sta_flag(sta, WLAN_STA_TDLS_PEER))
ieee80211_apply_htcap_overrides(sdata, &own_cap);
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index ea7b9c2..e08387c 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -30,6 +30,7 @@
#define IEEE80211_IBSS_MERGE_INTERVAL (30 * HZ)
#define IEEE80211_IBSS_INACTIVITY_LIMIT (60 * HZ)
+#define IEEE80211_IBSS_RSN_INACTIVITY_LIMIT (10 * HZ)
#define IEEE80211_IBSS_MAX_STA_ENTRIES 128
@@ -43,16 +44,18 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
{
struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
struct ieee80211_local *local = sdata->local;
- int rates, i;
+ int rates_n = 0, i, ri;
struct ieee80211_mgmt *mgmt;
u8 *pos;
struct ieee80211_supported_band *sband;
struct cfg80211_bss *bss;
- u32 bss_change;
- u8 supp_rates[IEEE80211_MAX_SUPP_RATES];
+ u32 bss_change, rate_flags, rates = 0, rates_added = 0;
struct cfg80211_chan_def chandef;
+ enum nl80211_bss_scan_width scan_width;
+ bool have_higher_than_11mbit = false;
struct beacon_data *presp;
int frame_len;
+ int shift;
sdata_assert_lock(sdata);
@@ -83,6 +86,14 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
chandef = ifibss->chandef;
if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chandef)) {
+ if (chandef.width == NL80211_CHAN_WIDTH_5 ||
+ chandef.width == NL80211_CHAN_WIDTH_10 ||
+ chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
+ chandef.width == NL80211_CHAN_WIDTH_20) {
+ sdata_info(sdata,
+ "Failed to join IBSS, beacons forbidden\n");
+ return;
+ }
chandef.width = NL80211_CHAN_WIDTH_20;
chandef.center_freq1 = chan->center_freq;
}
@@ -99,6 +110,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
memcpy(ifibss->bssid, bssid, ETH_ALEN);
sband = local->hw.wiphy->bands[chan->band];
+ shift = ieee80211_vif_get_shift(&sdata->vif);
/* Build IBSS probe response */
frame_len = sizeof(struct ieee80211_hdr_3addr) +
@@ -134,15 +146,33 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
memcpy(pos, ifibss->ssid, ifibss->ssid_len);
pos += ifibss->ssid_len;
- rates = min_t(int, 8, sband->n_bitrates);
+ rate_flags = ieee80211_chandef_rate_flags(&chandef);
+ for (i = 0; i < sband->n_bitrates; i++) {
+ if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
+ continue;
+ if (sband->bitrates[i].bitrate > 110)
+ have_higher_than_11mbit = true;
+
+ rates |= BIT(i);
+ rates_n++;
+ }
+
*pos++ = WLAN_EID_SUPP_RATES;
- *pos++ = rates;
- for (i = 0; i < rates; i++) {
- int rate = sband->bitrates[i].bitrate;
+ *pos++ = min_t(int, 8, rates_n);
+ for (ri = 0; ri < sband->n_bitrates; ri++) {
+ int rate = DIV_ROUND_UP(sband->bitrates[ri].bitrate,
+ 5 * (1 << shift));
u8 basic = 0;
- if (basic_rates & BIT(i))
+ if (!(rates & BIT(ri)))
+ continue;
+
+ if (basic_rates & BIT(ri))
basic = 0x80;
- *pos++ = basic | (u8) (rate / 5);
+ *pos++ = basic | (u8) rate;
+ if (++rates_added == 8) {
+ ri++; /* continue at next rate for EXT_SUPP_RATES */
+ break;
+ }
}
if (sband->band == IEEE80211_BAND_2GHZ) {
@@ -157,15 +187,20 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
*pos++ = 0;
*pos++ = 0;
- if (sband->n_bitrates > 8) {
+ /* put the remaining rates in WLAN_EID_EXT_SUPP_RATES */
+ if (rates_n > 8) {
*pos++ = WLAN_EID_EXT_SUPP_RATES;
- *pos++ = sband->n_bitrates - 8;
- for (i = 8; i < sband->n_bitrates; i++) {
- int rate = sband->bitrates[i].bitrate;
+ *pos++ = rates_n - 8;
+ for (; ri < sband->n_bitrates; ri++) {
+ int rate = DIV_ROUND_UP(sband->bitrates[ri].bitrate,
+ 5 * (1 << shift));
u8 basic = 0;
- if (basic_rates & BIT(i))
+ if (!(rates & BIT(ri)))
+ continue;
+
+ if (basic_rates & BIT(ri))
basic = 0x80;
- *pos++ = basic | (u8) (rate / 5);
+ *pos++ = basic | (u8) rate;
}
}
@@ -179,8 +214,12 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
chandef.width != NL80211_CHAN_WIDTH_5 &&
chandef.width != NL80211_CHAN_WIDTH_10 &&
sband->ht_cap.ht_supported) {
- pos = ieee80211_ie_build_ht_cap(pos, &sband->ht_cap,
- sband->ht_cap.cap);
+ struct ieee80211_sta_ht_cap ht_cap;
+
+ memcpy(&ht_cap, &sband->ht_cap, sizeof(ht_cap));
+ ieee80211_apply_htcap_overrides(sdata, &ht_cap);
+
+ pos = ieee80211_ie_build_ht_cap(pos, &ht_cap, ht_cap.cap);
/*
* Note: According to 802.11n-2009 9.13.3.1, HT Protection
* field and RIFS Mode are reserved in IBSS mode, therefore
@@ -236,18 +275,26 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
sdata->vif.bss_conf.use_short_slot = chan->band == IEEE80211_BAND_5GHZ;
bss_change |= BSS_CHANGED_ERP_SLOT;
+ /* cf. IEEE 802.11 9.2.12 */
+ if (chan->band == IEEE80211_BAND_2GHZ && have_higher_than_11mbit)
+ sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE;
+ else
+ sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE;
+
sdata->vif.bss_conf.ibss_joined = true;
sdata->vif.bss_conf.ibss_creator = creator;
ieee80211_bss_info_change_notify(sdata, bss_change);
- ieee80211_sta_def_wmm_params(sdata, sband->n_bitrates, supp_rates);
+ ieee80211_set_wmm_default(sdata, true);
ifibss->state = IEEE80211_IBSS_MLME_JOINED;
mod_timer(&ifibss->timer,
round_jiffies(jiffies + IEEE80211_IBSS_MERGE_INTERVAL));
- bss = cfg80211_inform_bss_frame(local->hw.wiphy, chan,
- mgmt, presp->head_len, 0, GFP_KERNEL);
+ scan_width = cfg80211_chandef_to_scan_width(&chandef);
+ bss = cfg80211_inform_bss_width_frame(local->hw.wiphy, chan,
+ scan_width, mgmt,
+ presp->head_len, 0, GFP_KERNEL);
cfg80211_put_bss(local->hw.wiphy, bss);
netif_carrier_on(sdata->dev);
cfg80211_ibss_joined(sdata->dev, ifibss->bssid, GFP_KERNEL);
@@ -264,6 +311,8 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
u16 beacon_int = cbss->beacon_interval;
const struct cfg80211_bss_ies *ies;
u64 tsf;
+ u32 rate_flags;
+ int shift;
sdata_assert_lock(sdata);
@@ -271,15 +320,24 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
beacon_int = 10;
sband = sdata->local->hw.wiphy->bands[cbss->channel->band];
+ rate_flags = ieee80211_chandef_rate_flags(&sdata->u.ibss.chandef);
+ shift = ieee80211_vif_get_shift(&sdata->vif);
basic_rates = 0;
for (i = 0; i < bss->supp_rates_len; i++) {
- int rate = (bss->supp_rates[i] & 0x7f) * 5;
+ int rate = bss->supp_rates[i] & 0x7f;
bool is_basic = !!(bss->supp_rates[i] & 0x80);
for (j = 0; j < sband->n_bitrates; j++) {
- if (sband->bitrates[j].bitrate == rate) {
+ int brate;
+ if ((rate_flags & sband->bitrates[j].flags)
+ != rate_flags)
+ continue;
+
+ brate = DIV_ROUND_UP(sband->bitrates[j].bitrate,
+ 5 * (1 << shift));
+ if (brate == rate) {
if (is_basic)
basic_rates |= BIT(j);
break;
@@ -335,6 +393,7 @@ ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, const u8 *bssid,
struct sta_info *sta;
struct ieee80211_chanctx_conf *chanctx_conf;
struct ieee80211_supported_band *sband;
+ enum nl80211_bss_scan_width scan_width;
int band;
/*
@@ -363,6 +422,7 @@ ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, const u8 *bssid,
if (WARN_ON_ONCE(!chanctx_conf))
return NULL;
band = chanctx_conf->def.chan->band;
+ scan_width = cfg80211_chandef_to_scan_width(&chanctx_conf->def);
rcu_read_unlock();
sta = sta_info_alloc(sdata, addr, GFP_KERNEL);
@@ -376,7 +436,7 @@ ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, const u8 *bssid,
/* make sure mandatory rates are always added */
sband = local->hw.wiphy->bands[band];
sta->sta.supp_rates[band] = supp_rates |
- ieee80211_mandatory_rates(sband);
+ ieee80211_mandatory_rates(sband, scan_width);
return ieee80211_ibss_finish_sta(sta);
}
@@ -440,6 +500,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
u64 beacon_timestamp, rx_timestamp;
u32 supp_rates = 0;
enum ieee80211_band band = rx_status->band;
+ enum nl80211_bss_scan_width scan_width;
struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band];
bool rates_updated = false;
@@ -461,16 +522,22 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
sta = sta_info_get(sdata, mgmt->sa);
if (elems->supp_rates) {
- supp_rates = ieee80211_sta_get_rates(local, elems,
+ supp_rates = ieee80211_sta_get_rates(sdata, elems,
band, NULL);
if (sta) {
u32 prev_rates;
prev_rates = sta->sta.supp_rates[band];
/* make sure mandatory rates are always added */
- sta->sta.supp_rates[band] = supp_rates |
- ieee80211_mandatory_rates(sband);
+ scan_width = NL80211_BSS_CHAN_WIDTH_20;
+ if (rx_status->flag & RX_FLAG_5MHZ)
+ scan_width = NL80211_BSS_CHAN_WIDTH_5;
+ if (rx_status->flag & RX_FLAG_10MHZ)
+ scan_width = NL80211_BSS_CHAN_WIDTH_10;
+ sta->sta.supp_rates[band] = supp_rates |
+ ieee80211_mandatory_rates(sband,
+ scan_width);
if (sta->sta.supp_rates[band] != prev_rates) {
ibss_dbg(sdata,
"updated supp_rates set for %pM based on beacon/probe_resp (0x%x -> 0x%x)\n",
@@ -585,7 +652,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
"beacon TSF higher than local TSF - IBSS merge with BSSID %pM\n",
mgmt->bssid);
ieee80211_sta_join_ibss(sdata, bss);
- supp_rates = ieee80211_sta_get_rates(local, elems, band, NULL);
+ supp_rates = ieee80211_sta_get_rates(sdata, elems, band, NULL);
ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa,
supp_rates);
rcu_read_unlock();
@@ -604,6 +671,7 @@ void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta;
struct ieee80211_chanctx_conf *chanctx_conf;
struct ieee80211_supported_band *sband;
+ enum nl80211_bss_scan_width scan_width;
int band;
/*
@@ -629,6 +697,7 @@ void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata,
return;
}
band = chanctx_conf->def.chan->band;
+ scan_width = cfg80211_chandef_to_scan_width(&chanctx_conf->def);
rcu_read_unlock();
sta = sta_info_alloc(sdata, addr, GFP_ATOMIC);
@@ -640,7 +709,7 @@ void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata,
/* make sure mandatory rates are always added */
sband = local->hw.wiphy->bands[band];
sta->sta.supp_rates[band] = supp_rates |
- ieee80211_mandatory_rates(sband);
+ ieee80211_mandatory_rates(sband, scan_width);
spin_lock(&ifibss->incomplete_lock);
list_add(&sta->list, &ifibss->incomplete_stations);
@@ -672,6 +741,33 @@ static int ieee80211_sta_active_ibss(struct ieee80211_sub_if_data *sdata)
return active;
}
+static void ieee80211_ibss_sta_expire(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct sta_info *sta, *tmp;
+ unsigned long exp_time = IEEE80211_IBSS_INACTIVITY_LIMIT;
+ unsigned long exp_rsn_time = IEEE80211_IBSS_RSN_INACTIVITY_LIMIT;
+
+ mutex_lock(&local->sta_mtx);
+
+ list_for_each_entry_safe(sta, tmp, &local->sta_list, list) {
+ if (sdata != sta->sdata)
+ continue;
+
+ if (time_after(jiffies, sta->last_rx + exp_time) ||
+ (time_after(jiffies, sta->last_rx + exp_rsn_time) &&
+ sta->sta_state != IEEE80211_STA_AUTHORIZED)) {
+ sta_dbg(sta->sdata, "expiring inactive %sSTA %pM\n",
+ sta->sta_state != IEEE80211_STA_AUTHORIZED ?
+ "not authorized " : "", sta->sta.addr);
+
+ WARN_ON(__sta_info_destroy(sta));
+ }
+ }
+
+ mutex_unlock(&local->sta_mtx);
+}
+
/*
* This function is called with state == IEEE80211_IBSS_MLME_JOINED
*/
@@ -679,13 +775,14 @@ static int ieee80211_sta_active_ibss(struct ieee80211_sub_if_data *sdata)
static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
+ enum nl80211_bss_scan_width scan_width;
sdata_assert_lock(sdata);
mod_timer(&ifibss->timer,
round_jiffies(jiffies + IEEE80211_IBSS_MERGE_INTERVAL));
- ieee80211_sta_expire(sdata, IEEE80211_IBSS_INACTIVITY_LIMIT);
+ ieee80211_ibss_sta_expire(sdata);
if (time_before(jiffies, ifibss->last_scan_completed +
IEEE80211_IBSS_MERGE_INTERVAL))
@@ -700,8 +797,9 @@ static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata)
sdata_info(sdata,
"No active IBSS STAs - trying to scan for other IBSS networks with same SSID (merge)\n");
+ scan_width = cfg80211_chandef_to_scan_width(&ifibss->chandef);
ieee80211_request_ibss_scan(sdata, ifibss->ssid, ifibss->ssid_len,
- NULL);
+ NULL, scan_width);
}
static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata)
@@ -751,6 +849,7 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
struct cfg80211_bss *cbss;
struct ieee80211_channel *chan = NULL;
const u8 *bssid = NULL;
+ enum nl80211_bss_scan_width scan_width;
int active_ibss;
u16 capability;
@@ -799,8 +898,10 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
IEEE80211_SCAN_INTERVAL)) {
sdata_info(sdata, "Trigger new scan to find an IBSS to join\n");
+ scan_width = cfg80211_chandef_to_scan_width(&ifibss->chandef);
ieee80211_request_ibss_scan(sdata, ifibss->ssid,
- ifibss->ssid_len, chan);
+ ifibss->ssid_len, chan,
+ scan_width);
} else {
int interval = IEEE80211_SCAN_INTERVAL;
@@ -1020,6 +1121,9 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
struct cfg80211_ibss_params *params)
{
u32 changed = 0;
+ u32 rate_flags;
+ struct ieee80211_supported_band *sband;
+ int i;
if (params->bssid) {
memcpy(sdata->u.ibss.bssid, params->bssid, ETH_ALEN);
@@ -1030,6 +1134,14 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
sdata->u.ibss.privacy = params->privacy;
sdata->u.ibss.control_port = params->control_port;
sdata->u.ibss.basic_rates = params->basic_rates;
+
+ /* fix basic_rates if channel does not support these rates */
+ rate_flags = ieee80211_chandef_rate_flags(&params->chandef);
+ sband = sdata->local->hw.wiphy->bands[params->chandef.chan->band];
+ for (i = 0; i < sband->n_bitrates; i++) {
+ if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
+ sdata->u.ibss.basic_rates &= ~BIT(i);
+ }
memcpy(sdata->vif.bss_conf.mcast_rate, params->mcast_rate,
sizeof(params->mcast_rate));
@@ -1051,6 +1163,11 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
memcpy(sdata->u.ibss.ssid, params->ssid, params->ssid_len);
sdata->u.ibss.ssid_len = params->ssid_len;
+ memcpy(&sdata->u.ibss.ht_capa, &params->ht_capa,
+ sizeof(sdata->u.ibss.ht_capa));
+ memcpy(&sdata->u.ibss.ht_capa_mask, &params->ht_capa_mask,
+ sizeof(sdata->u.ibss.ht_capa_mask));
+
/*
* 802.11n-2009 9.13.3.1: In an IBSS, the HT Protection field is
* reserved, but an HT STA shall protect HT transmissions as though
@@ -1131,6 +1248,11 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
presp = rcu_dereference_protected(ifibss->presp,
lockdep_is_held(&sdata->wdev.mtx));
RCU_INIT_POINTER(sdata->u.ibss.presp, NULL);
+
+ /* on the next join, re-program HT parameters */
+ memset(&ifibss->ht_capa, 0, sizeof(ifibss->ht_capa));
+ memset(&ifibss->ht_capa_mask, 0, sizeof(ifibss->ht_capa_mask));
+
sdata->vif.bss_conf.ibss_joined = false;
sdata->vif.bss_conf.ibss_creator = false;
sdata->vif.bss_conf.enable_beacon = false;
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 8412a30..e94c840 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -259,6 +259,8 @@ struct ieee80211_if_ap {
struct beacon_data __rcu *beacon;
struct probe_resp __rcu *probe_resp;
+ /* to be used after channel switch. */
+ struct cfg80211_beacon_data *next_beacon;
struct list_head vlans;
struct ps_data ps;
@@ -509,6 +511,9 @@ struct ieee80211_if_ibss {
/* probe response/beacon for IBSS */
struct beacon_data __rcu *presp;
+ struct ieee80211_ht_cap ht_capa; /* configured ht-cap over-rides */
+ struct ieee80211_ht_cap ht_capa_mask; /* Valid parts of ht_capa */
+
spinlock_t incomplete_lock;
struct list_head incomplete_stations;
@@ -713,6 +718,11 @@ struct ieee80211_sub_if_data {
struct ieee80211_tx_queue_params tx_conf[IEEE80211_NUM_ACS];
+ struct work_struct csa_finalize_work;
+ int csa_counter_offset_beacon;
+ int csa_counter_offset_presp;
+ bool csa_radar_required;
+
/* used to reconfigure hardware SM PS */
struct work_struct recalc_smps;
@@ -809,6 +819,34 @@ ieee80211_get_sdata_band(struct ieee80211_sub_if_data *sdata)
return band;
}
+static inline int
+ieee80211_chandef_get_shift(struct cfg80211_chan_def *chandef)
+{
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_5:
+ return 2;
+ case NL80211_CHAN_WIDTH_10:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static inline int
+ieee80211_vif_get_shift(struct ieee80211_vif *vif)
+{
+ struct ieee80211_chanctx_conf *chanctx_conf;
+ int shift = 0;
+
+ rcu_read_lock();
+ chanctx_conf = rcu_dereference(vif->chanctx_conf);
+ if (chanctx_conf)
+ shift = ieee80211_chandef_get_shift(&chanctx_conf->def);
+ rcu_read_unlock();
+
+ return shift;
+}
+
enum sdata_queue_type {
IEEE80211_SDATA_QUEUE_TYPE_FRAME = 0,
IEEE80211_SDATA_QUEUE_AGG_START = 1,
@@ -1026,7 +1064,7 @@ struct ieee80211_local {
struct cfg80211_ssid scan_ssid;
struct cfg80211_scan_request *int_scan_req;
struct cfg80211_scan_request *scan_req, *hw_scan_req;
- struct ieee80211_channel *scan_channel;
+ struct cfg80211_chan_def scan_chandef;
enum ieee80211_band hw_scan_band;
int scan_channel_idx;
int scan_ies_len;
@@ -1063,7 +1101,6 @@ struct ieee80211_local {
u32 dot11TransmittedFrameCount;
#ifdef CONFIG_MAC80211_LEDS
- int tx_led_counter, rx_led_counter;
struct led_trigger *tx_led, *rx_led, *assoc_led, *radio_led;
struct tpt_led_trigger *tpt_led_trigger;
char tx_led_name[32], rx_led_name[32],
@@ -1306,7 +1343,8 @@ void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
void ieee80211_scan_work(struct work_struct *work);
int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata,
const u8 *ssid, u8 ssid_len,
- struct ieee80211_channel *chan);
+ struct ieee80211_channel *chan,
+ enum nl80211_bss_scan_width scan_width);
int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
struct cfg80211_scan_request *req);
void ieee80211_scan_cancel(struct ieee80211_local *local);
@@ -1341,6 +1379,9 @@ void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc, bool free);
void ieee80211_sw_roc_work(struct work_struct *work);
void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc);
+/* channel switch handling */
+void ieee80211_csa_finalize_work(struct work_struct *work);
+
/* interface handling */
int ieee80211_iface_init(void);
void ieee80211_iface_exit(void);
@@ -1362,6 +1403,8 @@ void ieee80211_del_virtual_monitor(struct ieee80211_local *local);
bool __ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata);
void ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata);
+int ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_beacon_data *params);
static inline bool ieee80211_sdata_running(struct ieee80211_sub_if_data *sdata)
{
@@ -1465,7 +1508,8 @@ extern void *mac80211_wiphy_privid; /* for wiphy privid */
u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
enum nl80211_iftype type);
int ieee80211_frame_duration(enum ieee80211_band band, size_t len,
- int rate, int erp, int short_preamble);
+ int rate, int erp, int short_preamble,
+ int shift);
void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int keyidx,
struct ieee80211_hdr *hdr, const u8 *tsc,
gfp_t gfp);
@@ -1569,7 +1613,7 @@ void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
size_t buffer_len, const u8 *ie, size_t ie_len,
enum ieee80211_band band, u32 rate_mask,
- u8 channel);
+ struct cfg80211_chan_def *chandef);
struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
u8 *dst, u32 ratemask,
struct ieee80211_channel *chan,
@@ -1582,10 +1626,7 @@ void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
u32 ratemask, bool directed, u32 tx_flags,
struct ieee80211_channel *channel, bool scan);
-void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata,
- const size_t supp_rates_len,
- const u8 *supp_rates);
-u32 ieee80211_sta_get_rates(struct ieee80211_local *local,
+u32 ieee80211_sta_get_rates(struct ieee80211_sub_if_data *sdata,
struct ieee802_11_elems *elems,
enum ieee80211_band band, u32 *basic_rates);
int __ieee80211_request_smps(struct ieee80211_sub_if_data *sdata,
@@ -1602,6 +1643,9 @@ u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
u16 prot_mode);
u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
u32 cap);
+int ieee80211_parse_bitrates(struct cfg80211_chan_def *chandef,
+ const struct ieee80211_supported_band *sband,
+ const u8 *srates, int srates_len, u32 *rates);
int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, bool need_basic,
enum ieee80211_band band);
@@ -1622,6 +1666,11 @@ int __must_check
ieee80211_vif_change_bandwidth(struct ieee80211_sub_if_data *sdata,
const struct cfg80211_chan_def *chandef,
u32 *changed);
+/* NOTE: only use ieee80211_vif_change_channel() for channel switch */
+int __must_check
+ieee80211_vif_change_channel(struct ieee80211_sub_if_data *sdata,
+ const struct cfg80211_chan_def *chandef,
+ u32 *changed);
void ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata);
void ieee80211_vif_vlan_copy_chanctx(struct ieee80211_sub_if_data *sdata);
void ieee80211_vif_copy_chanctx_to_vlans(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index cc11759..7ca534b 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -54,7 +54,7 @@ bool __ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata)
return false;
}
- power = chanctx_conf->def.chan->max_power;
+ power = ieee80211_chandef_max_power(&chanctx_conf->def);
rcu_read_unlock();
if (sdata->user_power_level != IEEE80211_UNSET_POWER_LEVEL)
@@ -274,6 +274,12 @@ static int ieee80211_check_concurrent_iface(struct ieee80211_sub_if_data *sdata,
if (iftype == NL80211_IFTYPE_ADHOC &&
nsdata->vif.type == NL80211_IFTYPE_ADHOC)
return -EBUSY;
+ /*
+ * will not add another interface while any channel
+ * switch is active.
+ */
+ if (nsdata->vif.csa_active)
+ return -EBUSY;
/*
* The remaining checks are only performed for interfaces
@@ -804,6 +810,8 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
cancel_work_sync(&local->dynamic_ps_enable_work);
cancel_work_sync(&sdata->recalc_smps);
+ sdata->vif.csa_active = false;
+ cancel_work_sync(&sdata->csa_finalize_work);
cancel_delayed_work_sync(&sdata->dfs_cac_timer_work);
@@ -1267,6 +1275,7 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
skb_queue_head_init(&sdata->skb_queue);
INIT_WORK(&sdata->work, ieee80211_iface_work);
INIT_WORK(&sdata->recalc_smps, ieee80211_recalc_smps_work);
+ INIT_WORK(&sdata->csa_finalize_work, ieee80211_csa_finalize_work);
switch (type) {
case NL80211_IFTYPE_P2P_GO:
diff --git a/net/mac80211/led.c b/net/mac80211/led.c
index bcffa69..e2b8364 100644
--- a/net/mac80211/led.c
+++ b/net/mac80211/led.c
@@ -12,27 +12,22 @@
#include <linux/export.h>
#include "led.h"
+#define MAC80211_BLINK_DELAY 50 /* ms */
+
void ieee80211_led_rx(struct ieee80211_local *local)
{
+ unsigned long led_delay = MAC80211_BLINK_DELAY;
if (unlikely(!local->rx_led))
return;
- if (local->rx_led_counter++ % 2 == 0)
- led_trigger_event(local->rx_led, LED_OFF);
- else
- led_trigger_event(local->rx_led, LED_FULL);
+ led_trigger_blink_oneshot(local->rx_led, &led_delay, &led_delay, 0);
}
-/* q is 1 if a packet was enqueued, 0 if it has been transmitted */
-void ieee80211_led_tx(struct ieee80211_local *local, int q)
+void ieee80211_led_tx(struct ieee80211_local *local)
{
+ unsigned long led_delay = MAC80211_BLINK_DELAY;
if (unlikely(!local->tx_led))
return;
- /* not sure how this is supposed to work ... */
- local->tx_led_counter += 2*q-1;
- if (local->tx_led_counter % 2 == 0)
- led_trigger_event(local->tx_led, LED_OFF);
- else
- led_trigger_event(local->tx_led, LED_FULL);
+ led_trigger_blink_oneshot(local->tx_led, &led_delay, &led_delay, 0);
}
void ieee80211_led_assoc(struct ieee80211_local *local, bool associated)
diff --git a/net/mac80211/led.h b/net/mac80211/led.h
index e0275d9..89f4344 100644
--- a/net/mac80211/led.h
+++ b/net/mac80211/led.h
@@ -13,7 +13,7 @@
#ifdef CONFIG_MAC80211_LEDS
void ieee80211_led_rx(struct ieee80211_local *local);
-void ieee80211_led_tx(struct ieee80211_local *local, int q);
+void ieee80211_led_tx(struct ieee80211_local *local);
void ieee80211_led_assoc(struct ieee80211_local *local,
bool associated);
void ieee80211_led_radio(struct ieee80211_local *local,
@@ -27,7 +27,7 @@ void ieee80211_mod_tpt_led_trig(struct ieee80211_local *local,
static inline void ieee80211_led_rx(struct ieee80211_local *local)
{
}
-static inline void ieee80211_led_tx(struct ieee80211_local *local, int q)
+static inline void ieee80211_led_tx(struct ieee80211_local *local)
{
}
static inline void ieee80211_led_assoc(struct ieee80211_local *local,
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 091088a..25eb35b 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -102,17 +102,8 @@ static u32 ieee80211_hw_conf_chan(struct ieee80211_local *local)
offchannel_flag = local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL;
- if (local->scan_channel) {
- chandef.chan = local->scan_channel;
- /* If scanning on oper channel, use whatever channel-type
- * is currently in use.
- */
- if (chandef.chan == local->_oper_chandef.chan) {
- chandef = local->_oper_chandef;
- } else {
- chandef.width = NL80211_CHAN_WIDTH_20_NOHT;
- chandef.center_freq1 = chandef.chan->center_freq;
- }
+ if (local->scan_chandef.chan) {
+ chandef = local->scan_chandef;
} else if (local->tmp_channel) {
chandef.chan = local->tmp_channel;
chandef.width = NL80211_CHAN_WIDTH_20_NOHT;
@@ -151,7 +142,7 @@ static u32 ieee80211_hw_conf_chan(struct ieee80211_local *local)
changed |= IEEE80211_CONF_CHANGE_SMPS;
}
- power = chandef.chan->max_power;
+ power = ieee80211_chandef_max_power(&chandef);
rcu_read_lock();
list_for_each_entry_rcu(sdata, &local->interfaces, list) {
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 447f41b..885a5f6 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -62,7 +62,6 @@ bool mesh_matches_local(struct ieee80211_sub_if_data *sdata,
struct ieee802_11_elems *ie)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
- struct ieee80211_local *local = sdata->local;
u32 basic_rates = 0;
struct cfg80211_chan_def sta_chan_def;
@@ -85,7 +84,7 @@ bool mesh_matches_local(struct ieee80211_sub_if_data *sdata,
(ifmsh->mesh_auth_id == ie->mesh_config->meshconf_auth)))
return false;
- ieee80211_sta_get_rates(local, ie, ieee80211_get_sdata_band(sdata),
+ ieee80211_sta_get_rates(sdata, ie, ieee80211_get_sdata_band(sdata),
&basic_rates);
if (sdata->vif.bss_conf.basic_rates != basic_rates)
@@ -274,7 +273,9 @@ int mesh_add_meshconf_ie(struct ieee80211_sub_if_data *sdata,
neighbors = min_t(int, neighbors, IEEE80211_MAX_MESH_PEERINGS);
*pos++ = neighbors << 1;
/* Mesh capability */
- *pos = IEEE80211_MESHCONF_CAPAB_FORWARDING;
+ *pos = 0x00;
+ *pos |= ifmsh->mshcfg.dot11MeshForwarding ?
+ IEEE80211_MESHCONF_CAPAB_FORWARDING : 0x00;
*pos |= ifmsh->accepting_plinks ?
IEEE80211_MESHCONF_CAPAB_ACCEPT_PLINKS : 0x00;
/* Mesh PS mode. See IEEE802.11-2012 8.4.2.100.8 */
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 02c05fa..6b65d50 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -379,7 +379,7 @@ static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata,
u32 rates, basic_rates = 0, changed = 0;
sband = local->hw.wiphy->bands[band];
- rates = ieee80211_sta_get_rates(local, elems, band, &basic_rates);
+ rates = ieee80211_sta_get_rates(sdata, elems, band, &basic_rates);
spin_lock_bh(&sta->lock);
sta->last_rx = jiffies;
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index cc9e02d..21bccd8 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -489,27 +489,6 @@ static int ieee80211_config_bw(struct ieee80211_sub_if_data *sdata,
/* frame sending functions */
-static int ieee80211_compatible_rates(const u8 *supp_rates, int supp_rates_len,
- struct ieee80211_supported_band *sband,
- u32 *rates)
-{
- int i, j, count;
- *rates = 0;
- count = 0;
- for (i = 0; i < supp_rates_len; i++) {
- int rate = (supp_rates[i] & 0x7F) * 5;
-
- for (j = 0; j < sband->n_bitrates; j++)
- if (sband->bitrates[j].bitrate == rate) {
- *rates |= BIT(j);
- count++;
- break;
- }
- }
-
- return count;
-}
-
static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, u8 ap_ht_param,
struct ieee80211_supported_band *sband,
@@ -628,12 +607,12 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
struct ieee80211_mgmt *mgmt;
u8 *pos, qos_info;
size_t offset = 0, noffset;
- int i, count, rates_len, supp_rates_len;
+ int i, count, rates_len, supp_rates_len, shift;
u16 capab;
struct ieee80211_supported_band *sband;
struct ieee80211_chanctx_conf *chanctx_conf;
struct ieee80211_channel *chan;
- u32 rates = 0;
+ u32 rate_flags, rates = 0;
sdata_assert_lock(sdata);
@@ -644,8 +623,10 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
return;
}
chan = chanctx_conf->def.chan;
+ rate_flags = ieee80211_chandef_rate_flags(&chanctx_conf->def);
rcu_read_unlock();
sband = local->hw.wiphy->bands[chan->band];
+ shift = ieee80211_vif_get_shift(&sdata->vif);
if (assoc_data->supp_rates_len) {
/*
@@ -654,17 +635,24 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
* in the association request (e.g. D-Link DAP 1353 in
* b-only mode)...
*/
- rates_len = ieee80211_compatible_rates(assoc_data->supp_rates,
- assoc_data->supp_rates_len,
- sband, &rates);
+ rates_len = ieee80211_parse_bitrates(&chanctx_conf->def, sband,
+ assoc_data->supp_rates,
+ assoc_data->supp_rates_len,
+ &rates);
} else {
/*
* In case AP not provide any supported rates information
* before association, we send information element(s) with
* all rates that we support.
*/
- rates = ~0;
- rates_len = sband->n_bitrates;
+ rates_len = 0;
+ for (i = 0; i < sband->n_bitrates; i++) {
+ if ((rate_flags & sband->bitrates[i].flags)
+ != rate_flags)
+ continue;
+ rates |= BIT(i);
+ rates_len++;
+ }
}
skb = alloc_skb(local->hw.extra_tx_headroom +
@@ -741,8 +729,9 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
count = 0;
for (i = 0; i < sband->n_bitrates; i++) {
if (BIT(i) & rates) {
- int rate = sband->bitrates[i].bitrate;
- *pos++ = (u8) (rate / 5);
+ int rate = DIV_ROUND_UP(sband->bitrates[i].bitrate,
+ 5 * (1 << shift));
+ *pos++ = (u8) rate;
if (++count == 8)
break;
}
@@ -755,8 +744,10 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
for (i++; i < sband->n_bitrates; i++) {
if (BIT(i) & rates) {
- int rate = sband->bitrates[i].bitrate;
- *pos++ = (u8) (rate / 5);
+ int rate;
+ rate = DIV_ROUND_UP(sband->bitrates[i].bitrate,
+ 5 * (1 << shift));
+ *pos++ = (u8) rate;
}
}
}
@@ -767,7 +758,8 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
*pos++ = WLAN_EID_PWR_CAPABILITY;
*pos++ = 2;
*pos++ = 0; /* min tx power */
- *pos++ = chan->max_power; /* max tx power */
+ /* max tx power */
+ *pos++ = ieee80211_chandef_max_power(&chanctx_conf->def);
/* 2. supported channels */
/* TODO: get this in reg domain format */
@@ -2443,15 +2435,16 @@ static void ieee80211_get_rates(struct ieee80211_supported_band *sband,
u8 *supp_rates, unsigned int supp_rates_len,
u32 *rates, u32 *basic_rates,
bool *have_higher_than_11mbit,
- int *min_rate, int *min_rate_index)
+ int *min_rate, int *min_rate_index,
+ int shift, u32 rate_flags)
{
int i, j;
for (i = 0; i < supp_rates_len; i++) {
- int rate = (supp_rates[i] & 0x7f) * 5;
+ int rate = supp_rates[i] & 0x7f;
bool is_basic = !!(supp_rates[i] & 0x80);
- if (rate > 110)
+ if ((rate * 5 * (1 << shift)) > 110)
*have_higher_than_11mbit = true;
/*
@@ -2467,12 +2460,20 @@ static void ieee80211_get_rates(struct ieee80211_supported_band *sband,
continue;
for (j = 0; j < sband->n_bitrates; j++) {
- if (sband->bitrates[j].bitrate == rate) {
+ struct ieee80211_rate *br;
+ int brate;
+
+ br = &sband->bitrates[j];
+ if ((rate_flags & br->flags) != rate_flags)
+ continue;
+
+ brate = DIV_ROUND_UP(br->bitrate, (1 << shift) * 5);
+ if (brate == rate) {
*rates |= BIT(j);
if (is_basic)
*basic_rates |= BIT(j);
- if (rate < *min_rate) {
- *min_rate = rate;
+ if ((rate * 5) < *min_rate) {
+ *min_rate = rate * 5;
*min_rate_index = j;
}
break;
@@ -3902,27 +3903,40 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
if (!new_sta)
return -ENOMEM;
}
-
if (new_sta) {
u32 rates = 0, basic_rates = 0;
bool have_higher_than_11mbit;
int min_rate = INT_MAX, min_rate_index = -1;
+ struct ieee80211_chanctx_conf *chanctx_conf;
struct ieee80211_supported_band *sband;
const struct cfg80211_bss_ies *ies;
+ int shift;
+ u32 rate_flags;
sband = local->hw.wiphy->bands[cbss->channel->band];
err = ieee80211_prep_channel(sdata, cbss);
if (err) {
sta_info_free(local, new_sta);
- return err;
+ return -EINVAL;
}
+ shift = ieee80211_vif_get_shift(&sdata->vif);
+
+ rcu_read_lock();
+ chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+ if (WARN_ON(!chanctx_conf)) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+ rate_flags = ieee80211_chandef_rate_flags(&chanctx_conf->def);
+ rcu_read_unlock();
ieee80211_get_rates(sband, bss->supp_rates,
bss->supp_rates_len,
&rates, &basic_rates,
&have_higher_than_11mbit,
- &min_rate, &min_rate_index);
+ &min_rate, &min_rate_index,
+ shift, rate_flags);
/*
* This used to be a workaround for basic rates missing
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index 30d58d2..ba63ac8 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -232,37 +232,28 @@ static void rc_send_low_broadcast(s8 *idx, u32 basic_rates,
/* could not find a basic rate; use original selection */
}
-static inline s8
-rate_lowest_non_cck_index(struct ieee80211_supported_band *sband,
- struct ieee80211_sta *sta)
+static void __rate_control_send_low(struct ieee80211_hw *hw,
+ struct ieee80211_supported_band *sband,
+ struct ieee80211_sta *sta,
+ struct ieee80211_tx_info *info)
{
int i;
+ u32 rate_flags =
+ ieee80211_chandef_rate_flags(&hw->conf.chandef);
+ if ((sband->band == IEEE80211_BAND_2GHZ) &&
+ (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE))
+ rate_flags |= IEEE80211_RATE_ERP_G;
+
+ info->control.rates[0].idx = 0;
for (i = 0; i < sband->n_bitrates; i++) {
- struct ieee80211_rate *srate = &sband->bitrates[i];
- if ((srate->bitrate == 10) || (srate->bitrate == 20) ||
- (srate->bitrate == 55) || (srate->bitrate == 110))
+ if (!rate_supported(sta, sband->band, i))
continue;
- if (rate_supported(sta, sband->band, i))
- return i;
+ info->control.rates[0].idx = i;
+ break;
}
-
- /* No matching rate found */
- return 0;
-}
-
-static void __rate_control_send_low(struct ieee80211_hw *hw,
- struct ieee80211_supported_band *sband,
- struct ieee80211_sta *sta,
- struct ieee80211_tx_info *info)
-{
- if ((sband->band != IEEE80211_BAND_2GHZ) ||
- !(info->flags & IEEE80211_TX_CTL_NO_CCK_RATE))
- info->control.rates[0].idx = rate_lowest_index(sband, sta);
- else
- info->control.rates[0].idx =
- rate_lowest_non_cck_index(sband, sta);
+ WARN_ON_ONCE(i == sband->n_bitrates);
info->control.rates[0].count =
(info->flags & IEEE80211_TX_CTL_NO_ACK) ?
@@ -585,6 +576,7 @@ static void rate_control_apply_mask(struct ieee80211_sub_if_data *sdata,
u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN];
bool has_mcs_mask;
u32 mask;
+ u32 rate_flags;
int i;
/*
@@ -594,6 +586,12 @@ static void rate_control_apply_mask(struct ieee80211_sub_if_data *sdata,
*/
mask = sdata->rc_rateidx_mask[info->band];
has_mcs_mask = sdata->rc_has_mcs_mask[info->band];
+ rate_flags =
+ ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chandef);
+ for (i = 0; i < sband->n_bitrates; i++)
+ if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
+ mask &= ~BIT(i);
+
if (mask == (1 << sband->n_bitrates) - 1 && !has_mcs_mask)
return;
diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h
index d35a5dd..5dedc56 100644
--- a/net/mac80211/rate.h
+++ b/net/mac80211/rate.h
@@ -66,11 +66,12 @@ static inline void rate_control_rate_init(struct sta_info *sta)
}
sband = local->hw.wiphy->bands[chanctx_conf->def.chan->band];
- rcu_read_unlock();
ieee80211_sta_set_rx_nss(sta);
- ref->ops->rate_init(ref->priv, sband, ista, priv_sta);
+ ref->ops->rate_init(ref->priv, sband, &chanctx_conf->def, ista,
+ priv_sta);
+ rcu_read_unlock();
set_sta_flag(sta, WLAN_STA_RATE_CONTROL);
}
@@ -81,10 +82,21 @@ static inline void rate_control_rate_update(struct ieee80211_local *local,
struct rate_control_ref *ref = local->rate_ctrl;
struct ieee80211_sta *ista = &sta->sta;
void *priv_sta = sta->rate_ctrl_priv;
+ struct ieee80211_chanctx_conf *chanctx_conf;
+
+ if (ref && ref->ops->rate_update) {
+ rcu_read_lock();
- if (ref && ref->ops->rate_update)
- ref->ops->rate_update(ref->priv, sband, ista,
- priv_sta, changed);
+ chanctx_conf = rcu_dereference(sta->sdata->vif.chanctx_conf);
+ if (WARN_ON(!chanctx_conf)) {
+ rcu_read_unlock();
+ return;
+ }
+
+ ref->ops->rate_update(ref->priv, sband, &chanctx_conf->def,
+ ista, priv_sta, changed);
+ rcu_read_unlock();
+ }
drv_sta_rc_update(local, sta->sdata, &sta->sta, changed);
}
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index e6512e2..8b5f7ef 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -383,14 +383,18 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
static void
calc_rate_durations(enum ieee80211_band band,
struct minstrel_rate *d,
- struct ieee80211_rate *rate)
+ struct ieee80211_rate *rate,
+ struct cfg80211_chan_def *chandef)
{
int erp = !!(rate->flags & IEEE80211_RATE_ERP_G);
+ int shift = ieee80211_chandef_get_shift(chandef);
d->perfect_tx_time = ieee80211_frame_duration(band, 1200,
- rate->bitrate, erp, 1);
+ DIV_ROUND_UP(rate->bitrate, 1 << shift), erp, 1,
+ shift);
d->ack_time = ieee80211_frame_duration(band, 10,
- rate->bitrate, erp, 1);
+ DIV_ROUND_UP(rate->bitrate, 1 << shift), erp, 1,
+ shift);
}
static void
@@ -418,21 +422,25 @@ init_sample_table(struct minstrel_sta_info *mi)
static void
minstrel_rate_init(void *priv, struct ieee80211_supported_band *sband,
- struct ieee80211_sta *sta, void *priv_sta)
+ struct cfg80211_chan_def *chandef,
+ struct ieee80211_sta *sta, void *priv_sta)
{
struct minstrel_sta_info *mi = priv_sta;
struct minstrel_priv *mp = priv;
struct ieee80211_rate *ctl_rate;
unsigned int i, n = 0;
unsigned int t_slot = 9; /* FIXME: get real slot time */
+ u32 rate_flags;
mi->sta = sta;
mi->lowest_rix = rate_lowest_index(sband, sta);
ctl_rate = &sband->bitrates[mi->lowest_rix];
mi->sp_ack_dur = ieee80211_frame_duration(sband->band, 10,
ctl_rate->bitrate,
- !!(ctl_rate->flags & IEEE80211_RATE_ERP_G), 1);
+ !!(ctl_rate->flags & IEEE80211_RATE_ERP_G), 1,
+ ieee80211_chandef_get_shift(chandef));
+ rate_flags = ieee80211_chandef_rate_flags(&mp->hw->conf.chandef);
memset(mi->max_tp_rate, 0, sizeof(mi->max_tp_rate));
mi->max_prob_rate = 0;
@@ -441,15 +449,22 @@ minstrel_rate_init(void *priv, struct ieee80211_supported_band *sband,
unsigned int tx_time = 0, tx_time_cts = 0, tx_time_rtscts = 0;
unsigned int tx_time_single;
unsigned int cw = mp->cw_min;
+ int shift;
if (!rate_supported(sta, sband->band, i))
continue;
+ if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
+ continue;
+
n++;
memset(mr, 0, sizeof(*mr));
mr->rix = i;
- mr->bitrate = sband->bitrates[i].bitrate / 5;
- calc_rate_durations(sband->band, mr, &sband->bitrates[i]);
+ shift = ieee80211_chandef_get_shift(chandef);
+ mr->bitrate = DIV_ROUND_UP(sband->bitrates[i].bitrate,
+ (1 << shift) * 5);
+ calc_rate_durations(sband->band, mr, &sband->bitrates[i],
+ chandef);
/* calculate maximum number of retransmissions before
* fallback (based on maximum segment size) */
@@ -547,6 +562,7 @@ minstrel_init_cck_rates(struct minstrel_priv *mp)
{
static const int bitrates[4] = { 10, 20, 55, 110 };
struct ieee80211_supported_band *sband;
+ u32 rate_flags = ieee80211_chandef_rate_flags(&mp->hw->conf.chandef);
int i, j;
sband = mp->hw->wiphy->bands[IEEE80211_BAND_2GHZ];
@@ -559,6 +575,9 @@ minstrel_init_cck_rates(struct minstrel_priv *mp)
if (rate->flags & IEEE80211_RATE_ERP_G)
continue;
+ if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
+ continue;
+
for (j = 0; j < ARRAY_SIZE(bitrates); j++) {
if (rate->bitrate != bitrates[j])
continue;
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index f5aed96..6156942 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -844,6 +844,7 @@ minstrel_ht_update_cck(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
static void
minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
+ struct cfg80211_chan_def *chandef,
struct ieee80211_sta *sta, void *priv_sta)
{
struct minstrel_priv *mp = priv;
@@ -869,8 +870,9 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
mi->sta = sta;
mi->stats_update = jiffies;
- ack_dur = ieee80211_frame_duration(sband->band, 10, 60, 1, 1);
- mi->overhead = ieee80211_frame_duration(sband->band, 0, 60, 1, 1) + ack_dur;
+ ack_dur = ieee80211_frame_duration(sband->band, 10, 60, 1, 1, 0);
+ mi->overhead = ieee80211_frame_duration(sband->band, 0, 60, 1, 1, 0);
+ mi->overhead += ack_dur;
mi->overhead_rtscts = mi->overhead + 2 * ack_dur;
mi->avg_ampdu_len = MINSTREL_FRAC(1, 1);
@@ -939,22 +941,25 @@ use_legacy:
memset(&msp->legacy, 0, sizeof(msp->legacy));
msp->legacy.r = msp->ratelist;
msp->legacy.sample_table = msp->sample_table;
- return mac80211_minstrel.rate_init(priv, sband, sta, &msp->legacy);
+ return mac80211_minstrel.rate_init(priv, sband, chandef, sta,
+ &msp->legacy);
}
static void
minstrel_ht_rate_init(void *priv, struct ieee80211_supported_band *sband,
+ struct cfg80211_chan_def *chandef,
struct ieee80211_sta *sta, void *priv_sta)
{
- minstrel_ht_update_caps(priv, sband, sta, priv_sta);
+ minstrel_ht_update_caps(priv, sband, chandef, sta, priv_sta);
}
static void
minstrel_ht_rate_update(void *priv, struct ieee80211_supported_band *sband,
+ struct cfg80211_chan_def *chandef,
struct ieee80211_sta *sta, void *priv_sta,
u32 changed)
{
- minstrel_ht_update_caps(priv, sband, sta, priv_sta);
+ minstrel_ht_update_caps(priv, sband, chandef, sta, priv_sta);
}
static void *
diff --git a/net/mac80211/rc80211_pid_algo.c b/net/mac80211/rc80211_pid_algo.c
index 502d3ec..958fad0 100644
--- a/net/mac80211/rc80211_pid_algo.c
+++ b/net/mac80211/rc80211_pid_algo.c
@@ -293,6 +293,7 @@ rate_control_pid_get_rate(void *priv, struct ieee80211_sta *sta,
static void
rate_control_pid_rate_init(void *priv, struct ieee80211_supported_band *sband,
+ struct cfg80211_chan_def *chandef,
struct ieee80211_sta *sta, void *priv_sta)
{
struct rc_pid_sta_info *spinfo = priv_sta;
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 2c5a79b..6b85f95 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -87,11 +87,13 @@ ieee80211_rx_radiotap_space(struct ieee80211_local *local,
int len;
/* always present fields */
- len = sizeof(struct ieee80211_radiotap_header) + 9;
+ len = sizeof(struct ieee80211_radiotap_header) + 8;
- /* allocate extra bitmap */
+ /* allocate extra bitmaps */
if (status->vendor_radiotap_len)
len += 4;
+ if (status->chains)
+ len += 4 * hweight8(status->chains);
if (ieee80211_have_rx_timestamp(status)) {
len = ALIGN(len, 8);
@@ -100,6 +102,10 @@ ieee80211_rx_radiotap_space(struct ieee80211_local *local,
if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
len += 1;
+ /* antenna field, if we don't have per-chain info */
+ if (!status->chains)
+ len += 1;
+
/* padding for RX_FLAGS if necessary */
len = ALIGN(len, 2);
@@ -116,6 +122,11 @@ ieee80211_rx_radiotap_space(struct ieee80211_local *local,
len += 12;
}
+ if (status->chains) {
+ /* antenna and antenna signal fields */
+ len += 2 * hweight8(status->chains);
+ }
+
if (status->vendor_radiotap_len) {
if (WARN_ON_ONCE(status->vendor_radiotap_align == 0))
status->vendor_radiotap_align = 1;
@@ -145,8 +156,12 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_radiotap_header *rthdr;
unsigned char *pos;
+ __le32 *it_present;
+ u32 it_present_val;
u16 rx_flags = 0;
- int mpdulen;
+ u16 channel_flags = 0;
+ int mpdulen, chain;
+ unsigned long chains = status->chains;
mpdulen = skb->len;
if (!(has_fcs && (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)))
@@ -154,25 +169,39 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
rthdr = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len);
memset(rthdr, 0, rtap_len);
+ it_present = &rthdr->it_present;
/* radiotap header, set always present flags */
- rthdr->it_present =
- cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
- (1 << IEEE80211_RADIOTAP_CHANNEL) |
- (1 << IEEE80211_RADIOTAP_ANTENNA) |
- (1 << IEEE80211_RADIOTAP_RX_FLAGS));
rthdr->it_len = cpu_to_le16(rtap_len + status->vendor_radiotap_len);
+ it_present_val = BIT(IEEE80211_RADIOTAP_FLAGS) |
+ BIT(IEEE80211_RADIOTAP_CHANNEL) |
+ BIT(IEEE80211_RADIOTAP_RX_FLAGS);
- pos = (unsigned char *)(rthdr + 1);
+ if (!status->chains)
+ it_present_val |= BIT(IEEE80211_RADIOTAP_ANTENNA);
+
+ for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) {
+ it_present_val |=
+ BIT(IEEE80211_RADIOTAP_EXT) |
+ BIT(IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE);
+ put_unaligned_le32(it_present_val, it_present);
+ it_present++;
+ it_present_val = BIT(IEEE80211_RADIOTAP_ANTENNA) |
+ BIT(IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
+ }
if (status->vendor_radiotap_len) {
- rthdr->it_present |=
- cpu_to_le32(BIT(IEEE80211_RADIOTAP_VENDOR_NAMESPACE)) |
- cpu_to_le32(BIT(IEEE80211_RADIOTAP_EXT));
- put_unaligned_le32(status->vendor_radiotap_bitmap, pos);
- pos += 4;
+ it_present_val |= BIT(IEEE80211_RADIOTAP_VENDOR_NAMESPACE) |
+ BIT(IEEE80211_RADIOTAP_EXT);
+ put_unaligned_le32(it_present_val, it_present);
+ it_present++;
+ it_present_val = status->vendor_radiotap_bitmap;
}
+ put_unaligned_le32(it_present_val, it_present);
+
+ pos = (void *)(it_present + 1);
+
/* the order of the following fields is important */
/* IEEE80211_RADIOTAP_TSFT */
@@ -207,28 +236,35 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
*/
*pos = 0;
} else {
+ int shift = 0;
rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
- *pos = rate->bitrate / 5;
+ if (status->flag & RX_FLAG_10MHZ)
+ shift = 1;
+ else if (status->flag & RX_FLAG_5MHZ)
+ shift = 2;
+ *pos = DIV_ROUND_UP(rate->bitrate, 5 * (1 << shift));
}
pos++;
/* IEEE80211_RADIOTAP_CHANNEL */
put_unaligned_le16(status->freq, pos);
pos += 2;
+ if (status->flag & RX_FLAG_10MHZ)
+ channel_flags |= IEEE80211_CHAN_HALF;
+ else if (status->flag & RX_FLAG_5MHZ)
+ channel_flags |= IEEE80211_CHAN_QUARTER;
+
if (status->band == IEEE80211_BAND_5GHZ)
- put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ,
- pos);
+ channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ;
else if (status->flag & (RX_FLAG_HT | RX_FLAG_VHT))
- put_unaligned_le16(IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ,
- pos);
+ channel_flags |= IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ;
else if (rate && rate->flags & IEEE80211_RATE_ERP_G)
- put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ,
- pos);
+ channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ;
else if (rate)
- put_unaligned_le16(IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ,
- pos);
+ channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ;
else
- put_unaligned_le16(IEEE80211_CHAN_2GHZ, pos);
+ channel_flags |= IEEE80211_CHAN_2GHZ;
+ put_unaligned_le16(channel_flags, pos);
pos += 2;
/* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */
@@ -242,9 +278,11 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
/* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */
- /* IEEE80211_RADIOTAP_ANTENNA */
- *pos = status->antenna;
- pos++;
+ if (!status->chains) {
+ /* IEEE80211_RADIOTAP_ANTENNA */
+ *pos = status->antenna;
+ pos++;
+ }
/* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */
@@ -341,6 +379,11 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
pos += 2;
}
+ for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) {
+ *pos++ = status->chain_signal[chain];
+ *pos++ = chain;
+ }
+
if (status->vendor_radiotap_len) {
/* ensure 2 byte alignment for the vendor field as required */
if ((pos - (u8 *)rthdr) & 1)
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 1b122a7..08afe74 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -66,6 +66,7 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
struct cfg80211_bss *cbss;
struct ieee80211_bss *bss;
int clen, srlen;
+ enum nl80211_bss_scan_width scan_width;
s32 signal = 0;
if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
@@ -73,8 +74,15 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
else if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)
signal = (rx_status->signal * 100) / local->hw.max_signal;
- cbss = cfg80211_inform_bss_frame(local->hw.wiphy, channel,
- mgmt, len, signal, GFP_ATOMIC);
+ scan_width = NL80211_BSS_CHAN_WIDTH_20;
+ if (rx_status->flag & RX_FLAG_5MHZ)
+ scan_width = NL80211_BSS_CHAN_WIDTH_5;
+ if (rx_status->flag & RX_FLAG_10MHZ)
+ scan_width = NL80211_BSS_CHAN_WIDTH_10;
+
+ cbss = cfg80211_inform_bss_width_frame(local->hw.wiphy, channel,
+ scan_width, mgmt, len, signal,
+ GFP_ATOMIC);
if (!cbss)
return NULL;
@@ -204,10 +212,29 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb)
ieee80211_rx_bss_put(local, bss);
}
+static void
+ieee80211_prepare_scan_chandef(struct cfg80211_chan_def *chandef,
+ enum nl80211_bss_scan_width scan_width)
+{
+ memset(chandef, 0, sizeof(*chandef));
+ switch (scan_width) {
+ case NL80211_BSS_CHAN_WIDTH_5:
+ chandef->width = NL80211_CHAN_WIDTH_5;
+ break;
+ case NL80211_BSS_CHAN_WIDTH_10:
+ chandef->width = NL80211_CHAN_WIDTH_10;
+ break;
+ default:
+ chandef->width = NL80211_CHAN_WIDTH_20_NOHT;
+ break;
+ }
+}
+
/* return false if no more work */
static bool ieee80211_prep_hw_scan(struct ieee80211_local *local)
{
struct cfg80211_scan_request *req = local->scan_req;
+ struct cfg80211_chan_def chandef;
enum ieee80211_band band;
int i, ielen, n_chans;
@@ -229,11 +256,12 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local)
} while (!n_chans);
local->hw_scan_req->n_channels = n_chans;
+ ieee80211_prepare_scan_chandef(&chandef, req->scan_width);
ielen = ieee80211_build_preq_ies(local, (u8 *)local->hw_scan_req->ie,
local->hw_scan_ies_bufsize,
req->ie, req->ie_len, band,
- req->rates[band], 0);
+ req->rates[band], &chandef);
local->hw_scan_req->ie_len = ielen;
local->hw_scan_req->no_cck = req->no_cck;
@@ -280,7 +308,7 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted,
rcu_assign_pointer(local->scan_sdata, NULL);
local->scanning = 0;
- local->scan_channel = NULL;
+ local->scan_chandef.chan = NULL;
/* Set power back to normal operating levels. */
ieee80211_hw_config(local, 0);
@@ -615,11 +643,34 @@ static void ieee80211_scan_state_set_channel(struct ieee80211_local *local,
{
int skip;
struct ieee80211_channel *chan;
+ enum nl80211_bss_scan_width oper_scan_width;
skip = 0;
chan = local->scan_req->channels[local->scan_channel_idx];
- local->scan_channel = chan;
+ local->scan_chandef.chan = chan;
+ local->scan_chandef.center_freq1 = chan->center_freq;
+ local->scan_chandef.center_freq2 = 0;
+ switch (local->scan_req->scan_width) {
+ case NL80211_BSS_CHAN_WIDTH_5:
+ local->scan_chandef.width = NL80211_CHAN_WIDTH_5;
+ break;
+ case NL80211_BSS_CHAN_WIDTH_10:
+ local->scan_chandef.width = NL80211_CHAN_WIDTH_10;
+ break;
+ case NL80211_BSS_CHAN_WIDTH_20:
+ /* If scanning on oper channel, use whatever channel-type
+ * is currently in use.
+ */
+ oper_scan_width = cfg80211_chandef_to_scan_width(
+ &local->_oper_chandef);
+ if (chan == local->_oper_chandef.chan &&
+ oper_scan_width == local->scan_req->scan_width)
+ local->scan_chandef = local->_oper_chandef;
+ else
+ local->scan_chandef.width = NL80211_CHAN_WIDTH_20_NOHT;
+ break;
+ }
if (ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL))
skip = 1;
@@ -659,7 +710,7 @@ static void ieee80211_scan_state_suspend(struct ieee80211_local *local,
unsigned long *next_delay)
{
/* switch back to the operating channel */
- local->scan_channel = NULL;
+ local->scan_chandef.chan = NULL;
ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
/* disable PS */
@@ -801,7 +852,8 @@ int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata,
const u8 *ssid, u8 ssid_len,
- struct ieee80211_channel *chan)
+ struct ieee80211_channel *chan,
+ enum nl80211_bss_scan_width scan_width)
{
struct ieee80211_local *local = sdata->local;
int ret = -EBUSY;
@@ -851,6 +903,7 @@ int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata,
local->int_scan_req->ssids = &local->scan_ssid;
local->int_scan_req->n_ssids = 1;
+ local->int_scan_req->scan_width = scan_width;
memcpy(local->int_scan_req->ssids[0].ssid, ssid, IEEE80211_MAX_SSID_LEN);
local->int_scan_req->ssids[0].ssid_len = ssid_len;
@@ -912,6 +965,7 @@ int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_sched_scan_ies sched_scan_ies = {};
+ struct cfg80211_chan_def chandef;
int ret, i, iebufsz;
iebufsz = 2 + IEEE80211_MAX_SSID_LEN +
@@ -939,10 +993,12 @@ int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
goto out_free;
}
+ ieee80211_prepare_scan_chandef(&chandef, req->scan_width);
+
sched_scan_ies.len[i] =
ieee80211_build_preq_ies(local, sched_scan_ies.ie[i],
iebufsz, req->ie, req->ie_len,
- i, (u32) -1, 0);
+ i, (u32) -1, &chandef);
}
ret = drv_sched_scan_start(local, sdata, req, &sched_scan_ies);
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 4343920..368837f 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -235,7 +235,8 @@ static int ieee80211_tx_radiotap_len(struct ieee80211_tx_info *info)
/* IEEE80211_RADIOTAP_RATE rate */
if (info->status.rates[0].idx >= 0 &&
- !(info->status.rates[0].flags & IEEE80211_TX_RC_MCS))
+ !(info->status.rates[0].flags & (IEEE80211_TX_RC_MCS |
+ IEEE80211_TX_RC_VHT_MCS)))
len += 2;
/* IEEE80211_RADIOTAP_TX_FLAGS */
@@ -244,17 +245,23 @@ static int ieee80211_tx_radiotap_len(struct ieee80211_tx_info *info)
/* IEEE80211_RADIOTAP_DATA_RETRIES */
len += 1;
- /* IEEE80211_TX_RC_MCS */
- if (info->status.rates[0].idx >= 0 &&
- info->status.rates[0].flags & IEEE80211_TX_RC_MCS)
- len += 3;
+ /* IEEE80211_RADIOTAP_MCS
+ * IEEE80211_RADIOTAP_VHT */
+ if (info->status.rates[0].idx >= 0) {
+ if (info->status.rates[0].flags & IEEE80211_TX_RC_MCS)
+ len += 3;
+ else if (info->status.rates[0].flags & IEEE80211_TX_RC_VHT_MCS)
+ len = ALIGN(len, 2) + 12;
+ }
return len;
}
-static void ieee80211_add_tx_radiotap_header(struct ieee80211_supported_band
- *sband, struct sk_buff *skb,
- int retry_count, int rtap_len)
+static void
+ieee80211_add_tx_radiotap_header(struct ieee80211_local *local,
+ struct ieee80211_supported_band *sband,
+ struct sk_buff *skb, int retry_count,
+ int rtap_len, int shift)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
@@ -279,9 +286,13 @@ static void ieee80211_add_tx_radiotap_header(struct ieee80211_supported_band
/* IEEE80211_RADIOTAP_RATE */
if (info->status.rates[0].idx >= 0 &&
- !(info->status.rates[0].flags & IEEE80211_TX_RC_MCS)) {
+ !(info->status.rates[0].flags & (IEEE80211_TX_RC_MCS |
+ IEEE80211_TX_RC_VHT_MCS))) {
+ u16 rate;
+
rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
- *pos = sband->bitrates[info->status.rates[0].idx].bitrate / 5;
+ rate = sband->bitrates[info->status.rates[0].idx].bitrate;
+ *pos = DIV_ROUND_UP(rate, 5 * (1 << shift));
/* padding for tx flags */
pos += 2;
}
@@ -306,9 +317,12 @@ static void ieee80211_add_tx_radiotap_header(struct ieee80211_supported_band
*pos = retry_count;
pos++;
- /* IEEE80211_TX_RC_MCS */
- if (info->status.rates[0].idx >= 0 &&
- info->status.rates[0].flags & IEEE80211_TX_RC_MCS) {
+ if (info->status.rates[0].idx < 0)
+ return;
+
+ /* IEEE80211_RADIOTAP_MCS
+ * IEEE80211_RADIOTAP_VHT */
+ if (info->status.rates[0].flags & IEEE80211_TX_RC_MCS) {
rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS);
pos[0] = IEEE80211_RADIOTAP_MCS_HAVE_MCS |
IEEE80211_RADIOTAP_MCS_HAVE_GI |
@@ -321,8 +335,48 @@ static void ieee80211_add_tx_radiotap_header(struct ieee80211_supported_band
pos[1] |= IEEE80211_RADIOTAP_MCS_FMT_GF;
pos[2] = info->status.rates[0].idx;
pos += 3;
- }
+ } else if (info->status.rates[0].flags & IEEE80211_TX_RC_VHT_MCS) {
+ u16 known = local->hw.radiotap_vht_details &
+ (IEEE80211_RADIOTAP_VHT_KNOWN_GI |
+ IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH);
+
+ rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_VHT);
+
+ /* required alignment from rthdr */
+ pos = (u8 *)rthdr + ALIGN(pos - (u8 *)rthdr, 2);
+ /* u16 known - IEEE80211_RADIOTAP_VHT_KNOWN_* */
+ put_unaligned_le16(known, pos);
+ pos += 2;
+
+ /* u8 flags - IEEE80211_RADIOTAP_VHT_FLAG_* */
+ if (info->status.rates[0].flags & IEEE80211_TX_RC_SHORT_GI)
+ *pos |= IEEE80211_RADIOTAP_VHT_FLAG_SGI;
+ pos++;
+
+ /* u8 bandwidth */
+ if (info->status.rates[0].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ *pos = 1;
+ else if (info->status.rates[0].flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
+ *pos = 4;
+ else if (info->status.rates[0].flags & IEEE80211_TX_RC_160_MHZ_WIDTH)
+ *pos = 11;
+ else /* IEEE80211_TX_RC_{20_MHZ_WIDTH,FIXME:DUP_DATA} */
+ *pos = 0;
+ pos++;
+
+ /* u8 mcs_nss[4] */
+ *pos = (ieee80211_rate_get_vht_mcs(&info->status.rates[0]) << 4) |
+ ieee80211_rate_get_vht_nss(&info->status.rates[0]);
+ pos += 4;
+
+ /* u8 coding */
+ pos++;
+ /* u8 group_id */
+ pos++;
+ /* u16 partial_aid */
+ pos += 2;
+ }
}
static void ieee80211_report_used_skb(struct ieee80211_local *local,
@@ -424,6 +478,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
bool acked;
struct ieee80211_bar *bar;
int rtap_len;
+ int shift = 0;
for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
@@ -458,6 +513,8 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
if (!ether_addr_equal(hdr->addr2, sta->sdata->vif.addr))
continue;
+ shift = ieee80211_vif_get_shift(&sta->sdata->vif);
+
if (info->flags & IEEE80211_TX_STATUS_EOSP)
clear_sta_flag(sta, WLAN_STA_SP);
@@ -557,7 +614,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
rcu_read_unlock();
- ieee80211_led_tx(local, 0);
+ ieee80211_led_tx(local);
/* SNMP counters
* Fragments are passed to low-level drivers as separate skbs, so these
@@ -624,7 +681,8 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
dev_kfree_skb(skb);
return;
}
- ieee80211_add_tx_radiotap_header(sband, skb, retry_count, rtap_len);
+ ieee80211_add_tx_radiotap_header(local, sband, skb, retry_count,
+ rtap_len, shift);
/* XXX: is this sufficient for BPF? */
skb_set_mac_header(skb, 0);
diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h
index c215fafd7..1aba645 100644
--- a/net/mac80211/trace.h
+++ b/net/mac80211/trace.h
@@ -1906,6 +1906,32 @@ TRACE_EVENT(api_radar_detected,
)
);
+TRACE_EVENT(drv_channel_switch_beacon,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_chan_def *chandef),
+
+ TP_ARGS(local, sdata, chandef),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ VIF_ENTRY
+ CHANDEF_ENTRY
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ VIF_ASSIGN;
+ CHANDEF_ASSIGN(chandef);
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT VIF_PR_FMT " channel switch to " CHANDEF_PR_FMT,
+ LOCAL_PR_ARG, VIF_PR_ARG, CHANDEF_PR_ARG
+ )
+);
+
+
#ifdef CONFIG_MAC80211_MESSAGE_TRACING
#undef TRACE_SYSTEM
#define TRACE_SYSTEM mac80211_msg
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 4105d0c..0e42322 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -40,12 +40,22 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
struct sk_buff *skb, int group_addr,
int next_frag_len)
{
- int rate, mrate, erp, dur, i;
+ int rate, mrate, erp, dur, i, shift = 0;
struct ieee80211_rate *txrate;
struct ieee80211_local *local = tx->local;
struct ieee80211_supported_band *sband;
struct ieee80211_hdr *hdr;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_chanctx_conf *chanctx_conf;
+ u32 rate_flags = 0;
+
+ rcu_read_lock();
+ chanctx_conf = rcu_dereference(tx->sdata->vif.chanctx_conf);
+ if (chanctx_conf) {
+ shift = ieee80211_chandef_get_shift(&chanctx_conf->def);
+ rate_flags = ieee80211_chandef_rate_flags(&chanctx_conf->def);
+ }
+ rcu_read_unlock();
/* assume HW handles this */
if (tx->rate.flags & IEEE80211_TX_RC_MCS)
@@ -122,8 +132,11 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
if (r->bitrate > txrate->bitrate)
break;
+ if ((rate_flags & r->flags) != rate_flags)
+ continue;
+
if (tx->sdata->vif.bss_conf.basic_rates & BIT(i))
- rate = r->bitrate;
+ rate = DIV_ROUND_UP(r->bitrate, 1 << shift);
switch (sband->band) {
case IEEE80211_BAND_2GHZ: {
@@ -150,7 +163,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
if (rate == -1) {
/* No matching basic rate found; use highest suitable mandatory
* PHY rate */
- rate = mrate;
+ rate = DIV_ROUND_UP(mrate, 1 << shift);
}
/* Don't calculate ACKs for QoS Frames with NoAck Policy set */
@@ -162,7 +175,8 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
* (10 bytes + 4-byte FCS = 112 bits) plus SIFS; rounded up
* to closest integer */
dur = ieee80211_frame_duration(sband->band, 10, rate, erp,
- tx->sdata->vif.bss_conf.use_short_preamble);
+ tx->sdata->vif.bss_conf.use_short_preamble,
+ shift);
if (next_frag_len) {
/* Frame is fragmented: duration increases with time needed to
@@ -171,7 +185,8 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
/* next fragment */
dur += ieee80211_frame_duration(sband->band, next_frag_len,
txrate->bitrate, erp,
- tx->sdata->vif.bss_conf.use_short_preamble);
+ tx->sdata->vif.bss_conf.use_short_preamble,
+ shift);
}
return cpu_to_le16(dur);
@@ -1257,6 +1272,10 @@ static bool __ieee80211_tx(struct ieee80211_local *local,
switch (sdata->vif.type) {
case NL80211_IFTYPE_MONITOR:
+ if (sdata->u.mntr_flags & MONITOR_FLAG_ACTIVE) {
+ vif = &sdata->vif;
+ break;
+ }
sdata = rcu_dereference(local->monitor_sdata);
if (sdata) {
vif = &sdata->vif;
@@ -1281,7 +1300,6 @@ static bool __ieee80211_tx(struct ieee80211_local *local,
txpending);
ieee80211_tpt_led_trig_tx(local, fc, led_len);
- ieee80211_led_tx(local, 1);
WARN_ON_ONCE(!skb_queue_empty(skbs));
@@ -2320,6 +2338,81 @@ static int ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
return 0;
}
+void ieee80211_csa_finish(struct ieee80211_vif *vif)
+{
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+
+ ieee80211_queue_work(&sdata->local->hw,
+ &sdata->csa_finalize_work);
+}
+EXPORT_SYMBOL(ieee80211_csa_finish);
+
+static void ieee80211_update_csa(struct ieee80211_sub_if_data *sdata,
+ struct beacon_data *beacon)
+{
+ struct probe_resp *resp;
+ int counter_offset_beacon = sdata->csa_counter_offset_beacon;
+ int counter_offset_presp = sdata->csa_counter_offset_presp;
+
+ /* warn if the driver did not check for/react to csa completeness */
+ if (WARN_ON(((u8 *)beacon->tail)[counter_offset_beacon] == 0))
+ return;
+
+ ((u8 *)beacon->tail)[counter_offset_beacon]--;
+
+ if (sdata->vif.type == NL80211_IFTYPE_AP &&
+ counter_offset_presp) {
+ rcu_read_lock();
+ resp = rcu_dereference(sdata->u.ap.probe_resp);
+
+ /* if nl80211 accepted the offset, this should not happen. */
+ if (WARN_ON(!resp)) {
+ rcu_read_unlock();
+ return;
+ }
+ resp->data[counter_offset_presp]--;
+ rcu_read_unlock();
+ }
+}
+
+bool ieee80211_csa_is_complete(struct ieee80211_vif *vif)
+{
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+ struct beacon_data *beacon = NULL;
+ u8 *beacon_data;
+ size_t beacon_data_len;
+ int counter_beacon = sdata->csa_counter_offset_beacon;
+ int ret = false;
+
+ if (!ieee80211_sdata_running(sdata))
+ return false;
+
+ rcu_read_lock();
+ if (vif->type == NL80211_IFTYPE_AP) {
+ struct ieee80211_if_ap *ap = &sdata->u.ap;
+
+ beacon = rcu_dereference(ap->beacon);
+ if (WARN_ON(!beacon || !beacon->tail))
+ goto out;
+ beacon_data = beacon->tail;
+ beacon_data_len = beacon->tail_len;
+ } else {
+ WARN_ON(1);
+ goto out;
+ }
+
+ if (WARN_ON(counter_beacon > beacon_data_len))
+ goto out;
+
+ if (beacon_data[counter_beacon] == 0)
+ ret = true;
+ out:
+ rcu_read_unlock();
+
+ return ret;
+}
+EXPORT_SYMBOL(ieee80211_csa_is_complete);
+
struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
u16 *tim_offset, u16 *tim_length)
@@ -2350,6 +2443,9 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
struct beacon_data *beacon = rcu_dereference(ap->beacon);
if (beacon) {
+ if (sdata->vif.csa_active)
+ ieee80211_update_csa(sdata, beacon);
+
/*
* headroom, head length,
* tail length and maximum TIM length
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 2265445..d23c5a7 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -107,7 +107,8 @@ void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx)
}
int ieee80211_frame_duration(enum ieee80211_band band, size_t len,
- int rate, int erp, int short_preamble)
+ int rate, int erp, int short_preamble,
+ int shift)
{
int dur;
@@ -118,6 +119,9 @@ int ieee80211_frame_duration(enum ieee80211_band band, size_t len,
*
* rate is in 100 kbps, so divident is multiplied by 10 in the
* DIV_ROUND_UP() operations.
+ *
+ * shift may be 2 for 5 MHz channels or 1 for 10 MHz channels, and
+ * is assumed to be 0 otherwise.
*/
if (band == IEEE80211_BAND_5GHZ || erp) {
@@ -130,13 +134,23 @@ int ieee80211_frame_duration(enum ieee80211_band band, size_t len,
* TXTIME = T_PREAMBLE + T_SIGNAL + T_SYM x N_SYM + Signal Ext
*
* T_SYM = 4 usec
- * 802.11a - 17.5.2: aSIFSTime = 16 usec
+ * 802.11a - 18.5.2: aSIFSTime = 16 usec
* 802.11g - 19.8.4: aSIFSTime = 10 usec +
* signal ext = 6 usec
*/
dur = 16; /* SIFS + signal ext */
- dur += 16; /* 17.3.2.3: T_PREAMBLE = 16 usec */
- dur += 4; /* 17.3.2.3: T_SIGNAL = 4 usec */
+ dur += 16; /* IEEE 802.11-2012 18.3.2.4: T_PREAMBLE = 16 usec */
+ dur += 4; /* IEEE 802.11-2012 18.3.2.4: T_SIGNAL = 4 usec */
+
+ /* IEEE 802.11-2012 18.3.2.4: all values above are:
+ * * times 4 for 5 MHz
+ * * times 2 for 10 MHz
+ */
+ dur *= 1 << shift;
+
+ /* rates should already consider the channel bandwidth,
+ * don't apply divisor again.
+ */
dur += 4 * DIV_ROUND_UP((16 + 8 * (len + 4) + 6) * 10,
4 * rate); /* T_SYM x N_SYM */
} else {
@@ -168,7 +182,7 @@ __le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw,
{
struct ieee80211_sub_if_data *sdata;
u16 dur;
- int erp;
+ int erp, shift = 0;
bool short_preamble = false;
erp = 0;
@@ -177,10 +191,11 @@ __le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw,
short_preamble = sdata->vif.bss_conf.use_short_preamble;
if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE)
erp = rate->flags & IEEE80211_RATE_ERP_G;
+ shift = ieee80211_vif_get_shift(vif);
}
dur = ieee80211_frame_duration(band, frame_len, rate->bitrate, erp,
- short_preamble);
+ short_preamble, shift);
return cpu_to_le16(dur);
}
@@ -194,7 +209,7 @@ __le16 ieee80211_rts_duration(struct ieee80211_hw *hw,
struct ieee80211_rate *rate;
struct ieee80211_sub_if_data *sdata;
bool short_preamble;
- int erp;
+ int erp, shift = 0, bitrate;
u16 dur;
struct ieee80211_supported_band *sband;
@@ -210,17 +225,20 @@ __le16 ieee80211_rts_duration(struct ieee80211_hw *hw,
short_preamble = sdata->vif.bss_conf.use_short_preamble;
if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE)
erp = rate->flags & IEEE80211_RATE_ERP_G;
+ shift = ieee80211_vif_get_shift(vif);
}
+ bitrate = DIV_ROUND_UP(rate->bitrate, 1 << shift);
+
/* CTS duration */
- dur = ieee80211_frame_duration(sband->band, 10, rate->bitrate,
- erp, short_preamble);
+ dur = ieee80211_frame_duration(sband->band, 10, bitrate,
+ erp, short_preamble, shift);
/* Data frame duration */
- dur += ieee80211_frame_duration(sband->band, frame_len, rate->bitrate,
- erp, short_preamble);
+ dur += ieee80211_frame_duration(sband->band, frame_len, bitrate,
+ erp, short_preamble, shift);
/* ACK duration */
- dur += ieee80211_frame_duration(sband->band, 10, rate->bitrate,
- erp, short_preamble);
+ dur += ieee80211_frame_duration(sband->band, 10, bitrate,
+ erp, short_preamble, shift);
return cpu_to_le16(dur);
}
@@ -235,7 +253,7 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw,
struct ieee80211_rate *rate;
struct ieee80211_sub_if_data *sdata;
bool short_preamble;
- int erp;
+ int erp, shift = 0, bitrate;
u16 dur;
struct ieee80211_supported_band *sband;
@@ -250,15 +268,18 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw,
short_preamble = sdata->vif.bss_conf.use_short_preamble;
if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE)
erp = rate->flags & IEEE80211_RATE_ERP_G;
+ shift = ieee80211_vif_get_shift(vif);
}
+ bitrate = DIV_ROUND_UP(rate->bitrate, 1 << shift);
+
/* Data frame duration */
- dur = ieee80211_frame_duration(sband->band, frame_len, rate->bitrate,
- erp, short_preamble);
+ dur = ieee80211_frame_duration(sband->band, frame_len, bitrate,
+ erp, short_preamble, shift);
if (!(frame_txctl->flags & IEEE80211_TX_CTL_NO_ACK)) {
/* ACK duration */
- dur += ieee80211_frame_duration(sband->band, 10, rate->bitrate,
- erp, short_preamble);
+ dur += ieee80211_frame_duration(sband->band, 10, bitrate,
+ erp, short_preamble, shift);
}
return cpu_to_le16(dur);
@@ -1052,32 +1073,6 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
}
}
-void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata,
- const size_t supp_rates_len,
- const u8 *supp_rates)
-{
- struct ieee80211_chanctx_conf *chanctx_conf;
- int i, have_higher_than_11mbit = 0;
-
- /* cf. IEEE 802.11 9.2.12 */
- for (i = 0; i < supp_rates_len; i++)
- if ((supp_rates[i] & 0x7f) * 5 > 110)
- have_higher_than_11mbit = 1;
-
- rcu_read_lock();
- chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
-
- if (chanctx_conf &&
- chanctx_conf->def.chan->band == IEEE80211_BAND_2GHZ &&
- have_higher_than_11mbit)
- sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE;
- else
- sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE;
- rcu_read_unlock();
-
- ieee80211_set_wmm_default(sdata, true);
-}
-
void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
u16 transaction, u16 auth_alg, u16 status,
const u8 *extra, size_t extra_len, const u8 *da,
@@ -1162,7 +1157,7 @@ void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
size_t buffer_len, const u8 *ie, size_t ie_len,
enum ieee80211_band band, u32 rate_mask,
- u8 channel)
+ struct cfg80211_chan_def *chandef)
{
struct ieee80211_supported_band *sband;
u8 *pos = buffer, *end = buffer + buffer_len;
@@ -1171,16 +1166,26 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
u8 rates[32];
int num_rates;
int ext_rates_len;
+ int shift;
+ u32 rate_flags;
sband = local->hw.wiphy->bands[band];
if (WARN_ON_ONCE(!sband))
return 0;
+ rate_flags = ieee80211_chandef_rate_flags(chandef);
+ shift = ieee80211_chandef_get_shift(chandef);
+
num_rates = 0;
for (i = 0; i < sband->n_bitrates; i++) {
if ((BIT(i) & rate_mask) == 0)
continue; /* skip rate */
- rates[num_rates++] = (u8) (sband->bitrates[i].bitrate / 5);
+ if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
+ continue;
+
+ rates[num_rates++] =
+ (u8) DIV_ROUND_UP(sband->bitrates[i].bitrate,
+ (1 << shift) * 5);
}
supp_rates_len = min_t(int, num_rates, 8);
@@ -1220,12 +1225,13 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
pos += ext_rates_len;
}
- if (channel && sband->band == IEEE80211_BAND_2GHZ) {
+ if (chandef->chan && sband->band == IEEE80211_BAND_2GHZ) {
if (end - pos < 3)
goto out_err;
*pos++ = WLAN_EID_DS_PARAMS;
*pos++ = 1;
- *pos++ = channel;
+ *pos++ = ieee80211_frequency_to_channel(
+ chandef->chan->center_freq);
}
/* insert custom IEs that go before HT */
@@ -1290,9 +1296,9 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
bool directed)
{
struct ieee80211_local *local = sdata->local;
+ struct cfg80211_chan_def chandef;
struct sk_buff *skb;
struct ieee80211_mgmt *mgmt;
- u8 chan_no;
int ies_len;
/*
@@ -1300,10 +1306,11 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
* in order to maximize the chance that we get a response. Some
* badly-behaved APs don't respond when this parameter is included.
*/
+ chandef.width = sdata->vif.bss_conf.chandef.width;
if (directed)
- chan_no = 0;
+ chandef.chan = NULL;
else
- chan_no = ieee80211_frequency_to_channel(chan->center_freq);
+ chandef.chan = chan;
skb = ieee80211_probereq_get(&local->hw, &sdata->vif,
ssid, ssid_len, 100 + ie_len);
@@ -1313,7 +1320,7 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
ies_len = ieee80211_build_preq_ies(local, skb_tail_pointer(skb),
skb_tailroom(skb),
ie, ie_len, chan->band,
- ratemask, chan_no);
+ ratemask, &chandef);
skb_put(skb, ies_len);
if (dst) {
@@ -1347,16 +1354,19 @@ void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
}
}
-u32 ieee80211_sta_get_rates(struct ieee80211_local *local,
+u32 ieee80211_sta_get_rates(struct ieee80211_sub_if_data *sdata,
struct ieee802_11_elems *elems,
enum ieee80211_band band, u32 *basic_rates)
{
struct ieee80211_supported_band *sband;
struct ieee80211_rate *bitrates;
size_t num_rates;
- u32 supp_rates;
- int i, j;
- sband = local->hw.wiphy->bands[band];
+ u32 supp_rates, rate_flags;
+ int i, j, shift;
+ sband = sdata->local->hw.wiphy->bands[band];
+
+ rate_flags = ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chandef);
+ shift = ieee80211_vif_get_shift(&sdata->vif);
if (WARN_ON(!sband))
return 1;
@@ -1381,7 +1391,15 @@ u32 ieee80211_sta_get_rates(struct ieee80211_local *local,
continue;
for (j = 0; j < num_rates; j++) {
- if (bitrates[j].bitrate == own_rate) {
+ int brate;
+ if ((rate_flags & sband->bitrates[j].flags)
+ != rate_flags)
+ continue;
+
+ brate = DIV_ROUND_UP(sband->bitrates[j].bitrate,
+ 1 << shift);
+
+ if (brate == own_rate) {
supp_rates |= BIT(j);
if (basic_rates && is_basic)
*basic_rates |= BIT(j);
@@ -2004,18 +2022,56 @@ void ieee80211_ht_oper_to_chandef(struct ieee80211_channel *control_chan,
cfg80211_chandef_create(chandef, control_chan, channel_type);
}
+int ieee80211_parse_bitrates(struct cfg80211_chan_def *chandef,
+ const struct ieee80211_supported_band *sband,
+ const u8 *srates, int srates_len, u32 *rates)
+{
+ u32 rate_flags = ieee80211_chandef_rate_flags(chandef);
+ int shift = ieee80211_chandef_get_shift(chandef);
+ struct ieee80211_rate *br;
+ int brate, rate, i, j, count = 0;
+
+ *rates = 0;
+
+ for (i = 0; i < srates_len; i++) {
+ rate = srates[i] & 0x7f;
+
+ for (j = 0; j < sband->n_bitrates; j++) {
+ br = &sband->bitrates[j];
+ if ((rate_flags & br->flags) != rate_flags)
+ continue;
+
+ brate = DIV_ROUND_UP(br->bitrate, (1 << shift) * 5);
+ if (brate == rate) {
+ *rates |= BIT(j);
+ count++;
+ break;
+ }
+ }
+ }
+ return count;
+}
+
int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, bool need_basic,
enum ieee80211_band band)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_supported_band *sband;
- int rate;
+ int rate, shift;
u8 i, rates, *pos;
u32 basic_rates = sdata->vif.bss_conf.basic_rates;
+ u32 rate_flags;
+ shift = ieee80211_vif_get_shift(&sdata->vif);
+ rate_flags = ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chandef);
sband = local->hw.wiphy->bands[band];
- rates = sband->n_bitrates;
+ rates = 0;
+ for (i = 0; i < sband->n_bitrates; i++) {
+ if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
+ continue;
+ rates++;
+ }
if (rates > 8)
rates = 8;
@@ -2027,10 +2083,15 @@ int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata,
*pos++ = rates;
for (i = 0; i < rates; i++) {
u8 basic = 0;
+ if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
+ continue;
+
if (need_basic && basic_rates & BIT(i))
basic = 0x80;
rate = sband->bitrates[i].bitrate;
- *pos++ = basic | (u8) (rate / 5);
+ rate = DIV_ROUND_UP(sband->bitrates[i].bitrate,
+ 5 * (1 << shift));
+ *pos++ = basic | (u8) rate;
}
return 0;
@@ -2042,12 +2103,22 @@ int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_supported_band *sband;
- int rate;
+ int rate, skip, shift;
u8 i, exrates, *pos;
u32 basic_rates = sdata->vif.bss_conf.basic_rates;
+ u32 rate_flags;
+
+ rate_flags = ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chandef);
+ shift = ieee80211_vif_get_shift(&sdata->vif);
sband = local->hw.wiphy->bands[band];
- exrates = sband->n_bitrates;
+ exrates = 0;
+ for (i = 0; i < sband->n_bitrates; i++) {
+ if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
+ continue;
+ exrates++;
+ }
+
if (exrates > 8)
exrates -= 8;
else
@@ -2060,12 +2131,19 @@ int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
pos = skb_put(skb, exrates + 2);
*pos++ = WLAN_EID_EXT_SUPP_RATES;
*pos++ = exrates;
+ skip = 0;
for (i = 8; i < sband->n_bitrates; i++) {
u8 basic = 0;
+ if ((rate_flags & sband->bitrates[i].flags)
+ != rate_flags)
+ continue;
+ if (skip++ < 8)
+ continue;
if (need_basic && basic_rates & BIT(i))
basic = 0x80;
- rate = sband->bitrates[i].bitrate;
- *pos++ = basic | (u8) (rate / 5);
+ rate = DIV_ROUND_UP(sband->bitrates[i].bitrate,
+ 5 * (1 << shift));
+ *pos++ = basic | (u8) rate;
}
}
return 0;
@@ -2149,9 +2227,17 @@ u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local,
ri.flags |= RATE_INFO_FLAGS_SHORT_GI;
} else {
struct ieee80211_supported_band *sband;
+ int shift = 0;
+ int bitrate;
+
+ if (status->flag & RX_FLAG_10MHZ)
+ shift = 1;
+ if (status->flag & RX_FLAG_5MHZ)
+ shift = 2;
sband = local->hw.wiphy->bands[status->band];
- ri.legacy = sband->bitrates[status->rate_idx].bitrate;
+ bitrate = sband->bitrates[status->rate_idx].bitrate;
+ ri.legacy = DIV_ROUND_UP(bitrate, (1 << shift));
}
rate = cfg80211_calculate_bitrate(&ri);
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 56d22ca..c45fc1a 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -410,20 +410,6 @@ config NF_NAT_TFTP
endif # NF_CONNTRACK
-# transparent proxy support
-config NETFILTER_TPROXY
- tristate "Transparent proxying support"
- depends on IP_NF_MANGLE
- depends on NETFILTER_ADVANCED
- help
- This option enables transparent proxying support, that is,
- support for handling non-locally bound IPv4 TCP and UDP sockets.
- For it to work you will have to configure certain iptables rules
- and use policy routing. For more information on how to set it up
- see Documentation/networking/tproxy.txt.
-
- To compile it as a module, choose M here. If unsure, say N.
-
config NETFILTER_XTABLES
tristate "Netfilter Xtables support (required for ip_tables)"
default m if NETFILTER_ADVANCED=n
@@ -720,10 +706,10 @@ config NETFILTER_XT_TARGET_TEE
this clone be rerouted to another nexthop.
config NETFILTER_XT_TARGET_TPROXY
- tristate '"TPROXY" target support'
- depends on NETFILTER_TPROXY
+ tristate '"TPROXY" target transparent proxying support'
depends on NETFILTER_XTABLES
depends on NETFILTER_ADVANCED
+ depends on IP_NF_MANGLE
select NF_DEFRAG_IPV4
select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES
help
@@ -731,6 +717,9 @@ config NETFILTER_XT_TARGET_TPROXY
REDIRECT. It can only be used in the mangle table and is useful
to redirect traffic to a transparent proxy. It does _not_ depend
on Netfilter connection tracking and NAT, unlike REDIRECT.
+ For it to work you will have to configure certain iptables rules
+ and use policy routing. For more information on how to set it up
+ see Documentation/networking/tproxy.txt.
To compile it as a module, choose M here. If unsure, say N.
@@ -1180,7 +1169,6 @@ config NETFILTER_XT_MATCH_SCTP
config NETFILTER_XT_MATCH_SOCKET
tristate '"socket" match support'
- depends on NETFILTER_TPROXY
depends on NETFILTER_XTABLES
depends on NETFILTER_ADVANCED
depends on !NF_CONNTRACK || NF_CONNTRACK
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index a1abf87..ebfa7dc 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -61,9 +61,6 @@ obj-$(CONFIG_NF_NAT_IRC) += nf_nat_irc.o
obj-$(CONFIG_NF_NAT_SIP) += nf_nat_sip.o
obj-$(CONFIG_NF_NAT_TFTP) += nf_nat_tftp.o
-# transparent proxy support
-obj-$(CONFIG_NETFILTER_TPROXY) += nf_tproxy_core.o
-
# generic X tables
obj-$(CONFIG_NETFILTER_XTABLES) += x_tables.o xt_tcpudp.o
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 2217363..593b16e 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -234,12 +234,13 @@ EXPORT_SYMBOL(skb_make_writable);
/* This does not belong here, but locally generated errors need it if connection
tracking in use: without this, connection may not be in hash table, and hence
manufactured ICMP or RST packets will not be associated with it. */
-void (*ip_ct_attach)(struct sk_buff *, struct sk_buff *) __rcu __read_mostly;
+void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *)
+ __rcu __read_mostly;
EXPORT_SYMBOL(ip_ct_attach);
-void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb)
+void nf_ct_attach(struct sk_buff *new, const struct sk_buff *skb)
{
- void (*attach)(struct sk_buff *, struct sk_buff *);
+ void (*attach)(struct sk_buff *, const struct sk_buff *);
if (skb->nfct) {
rcu_read_lock();
diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
index 3cd85b2..5199448 100644
--- a/net/netfilter/ipvs/ip_vs_lblcr.c
+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
@@ -414,7 +414,7 @@ static void ip_vs_lblcr_flush(struct ip_vs_service *svc)
spin_lock_bh(&svc->sched_lock);
tbl->dead = 1;
- for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) {
+ for (i = 0; i < IP_VS_LBLCR_TAB_SIZE; i++) {
hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) {
ip_vs_lblcr_free(en);
}
@@ -440,7 +440,7 @@ static inline void ip_vs_lblcr_full_check(struct ip_vs_service *svc)
struct ip_vs_lblcr_entry *en;
struct hlist_node *next;
- for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) {
+ for (i = 0, j = tbl->rover; i < IP_VS_LBLCR_TAB_SIZE; i++) {
j = (j + 1) & IP_VS_LBLCR_TAB_MASK;
spin_lock(&svc->sched_lock);
@@ -495,7 +495,7 @@ static void ip_vs_lblcr_check_expire(unsigned long data)
if (goal > tbl->max_size/2)
goal = tbl->max_size/2;
- for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) {
+ for (i = 0, j = tbl->rover; i < IP_VS_LBLCR_TAB_SIZE; i++) {
j = (j + 1) & IP_VS_LBLCR_TAB_MASK;
spin_lock(&svc->sched_lock);
@@ -536,7 +536,7 @@ static int ip_vs_lblcr_init_svc(struct ip_vs_service *svc)
/*
* Initialize the hash buckets
*/
- for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) {
+ for (i = 0; i < IP_VS_LBLCR_TAB_SIZE; i++) {
INIT_HLIST_HEAD(&tbl->bucket[i]);
}
tbl->max_size = IP_VS_LBLCR_TAB_SIZE*16;
diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c
index 3c0da87..23e596e 100644
--- a/net/netfilter/ipvs/ip_vs_proto_sctp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c
@@ -66,15 +66,7 @@ sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
static void sctp_nat_csum(struct sk_buff *skb, sctp_sctphdr_t *sctph,
unsigned int sctphoff)
{
- __u32 crc32;
- struct sk_buff *iter;
-
- crc32 = sctp_start_cksum((__u8 *)sctph, skb_headlen(skb) - sctphoff);
- skb_walk_frags(skb, iter)
- crc32 = sctp_update_cksum((u8 *) iter->data,
- skb_headlen(iter), crc32);
- sctph->checksum = sctp_end_cksum(crc32);
-
+ sctph->checksum = sctp_compute_cksum(skb, sctphoff);
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
@@ -151,10 +143,7 @@ sctp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp)
{
unsigned int sctphoff;
struct sctphdr *sh, _sctph;
- struct sk_buff *iter;
- __le32 cmp;
- __le32 val;
- __u32 tmp;
+ __le32 cmp, val;
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6)
@@ -168,13 +157,7 @@ sctp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp)
return 0;
cmp = sh->checksum;
-
- tmp = sctp_start_cksum((__u8 *) sh, skb_headlen(skb));
- skb_walk_frags(skb, iter)
- tmp = sctp_update_cksum((__u8 *) iter->data,
- skb_headlen(iter), tmp);
-
- val = sctp_end_cksum(tmp);
+ val = sctp_compute_cksum(skb, sctphoff);
if (val != cmp) {
/* CRC failure, dump it. */
diff --git a/net/netfilter/ipvs/ip_vs_sh.c b/net/netfilter/ipvs/ip_vs_sh.c
index f16c027..3588fae 100644
--- a/net/netfilter/ipvs/ip_vs_sh.c
+++ b/net/netfilter/ipvs/ip_vs_sh.c
@@ -269,14 +269,20 @@ ip_vs_sh_get_port(const struct sk_buff *skb, struct ip_vs_iphdr *iph)
switch (iph->protocol) {
case IPPROTO_TCP:
th = skb_header_pointer(skb, iph->len, sizeof(_tcph), &_tcph);
+ if (unlikely(th == NULL))
+ return 0;
port = th->source;
break;
case IPPROTO_UDP:
uh = skb_header_pointer(skb, iph->len, sizeof(_udph), &_udph);
+ if (unlikely(uh == NULL))
+ return 0;
port = uh->source;
break;
case IPPROTO_SCTP:
sh = skb_header_pointer(skb, iph->len, sizeof(_sctph), &_sctph);
+ if (unlikely(sh == NULL))
+ return 0;
port = sh->source;
break;
default:
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 0283bae..da6f178 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -238,7 +238,7 @@ destroy_conntrack(struct nf_conntrack *nfct)
nf_conntrack_free(ct);
}
-void nf_ct_delete_from_lists(struct nf_conn *ct)
+static void nf_ct_delete_from_lists(struct nf_conn *ct)
{
struct net *net = nf_ct_net(ct);
@@ -253,7 +253,6 @@ void nf_ct_delete_from_lists(struct nf_conn *ct)
&net->ct.dying);
spin_unlock_bh(&nf_conntrack_lock);
}
-EXPORT_SYMBOL_GPL(nf_ct_delete_from_lists);
static void death_by_event(unsigned long ul_conntrack)
{
@@ -275,7 +274,7 @@ static void death_by_event(unsigned long ul_conntrack)
nf_ct_put(ct);
}
-void nf_ct_dying_timeout(struct nf_conn *ct)
+static void nf_ct_dying_timeout(struct nf_conn *ct)
{
struct net *net = nf_ct_net(ct);
struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct);
@@ -288,27 +287,33 @@ void nf_ct_dying_timeout(struct nf_conn *ct)
(prandom_u32() % net->ct.sysctl_events_retry_timeout);
add_timer(&ecache->timeout);
}
-EXPORT_SYMBOL_GPL(nf_ct_dying_timeout);
-static void death_by_timeout(unsigned long ul_conntrack)
+bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report)
{
- struct nf_conn *ct = (void *)ul_conntrack;
struct nf_conn_tstamp *tstamp;
tstamp = nf_conn_tstamp_find(ct);
if (tstamp && tstamp->stop == 0)
tstamp->stop = ktime_to_ns(ktime_get_real());
- if (!test_bit(IPS_DYING_BIT, &ct->status) &&
- unlikely(nf_conntrack_event(IPCT_DESTROY, ct) < 0)) {
+ if (!nf_ct_is_dying(ct) &&
+ unlikely(nf_conntrack_event_report(IPCT_DESTROY, ct,
+ portid, report) < 0)) {
/* destroy event was not delivered */
nf_ct_delete_from_lists(ct);
nf_ct_dying_timeout(ct);
- return;
+ return false;
}
set_bit(IPS_DYING_BIT, &ct->status);
nf_ct_delete_from_lists(ct);
nf_ct_put(ct);
+ return true;
+}
+EXPORT_SYMBOL_GPL(nf_ct_delete);
+
+static void death_by_timeout(unsigned long ul_conntrack)
+{
+ nf_ct_delete((struct nf_conn *)ul_conntrack, 0, 0);
}
/*
@@ -643,10 +648,7 @@ static noinline int early_drop(struct net *net, unsigned int hash)
return dropped;
if (del_timer(&ct->timeout)) {
- death_by_timeout((unsigned long)ct);
- /* Check if we indeed killed this entry. Reliable event
- delivery may have inserted it into the dying list. */
- if (test_bit(IPS_DYING_BIT, &ct->status)) {
+ if (nf_ct_delete(ct, 0, 0)) {
dropped = 1;
NF_CT_STAT_INC_ATOMIC(net, early_drop);
}
@@ -1192,7 +1194,7 @@ EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_tuple_size);
#endif
/* Used by ipt_REJECT and ip6t_REJECT. */
-static void nf_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb)
+static void nf_conntrack_attach(struct sk_buff *nskb, const struct sk_buff *skb)
{
struct nf_conn *ct;
enum ip_conntrack_info ctinfo;
@@ -1244,7 +1246,7 @@ found:
void nf_ct_iterate_cleanup(struct net *net,
int (*iter)(struct nf_conn *i, void *data),
- void *data)
+ void *data, u32 portid, int report)
{
struct nf_conn *ct;
unsigned int bucket = 0;
@@ -1252,7 +1254,8 @@ void nf_ct_iterate_cleanup(struct net *net,
while ((ct = get_next_corpse(net, iter, data, &bucket)) != NULL) {
/* Time to push up daises... */
if (del_timer(&ct->timeout))
- death_by_timeout((unsigned long)ct);
+ nf_ct_delete(ct, portid, report);
+
/* ... else the timer will get him soon. */
nf_ct_put(ct);
@@ -1260,30 +1263,6 @@ void nf_ct_iterate_cleanup(struct net *net,
}
EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup);
-struct __nf_ct_flush_report {
- u32 portid;
- int report;
-};
-
-static int kill_report(struct nf_conn *i, void *data)
-{
- struct __nf_ct_flush_report *fr = (struct __nf_ct_flush_report *)data;
- struct nf_conn_tstamp *tstamp;
-
- tstamp = nf_conn_tstamp_find(i);
- if (tstamp && tstamp->stop == 0)
- tstamp->stop = ktime_to_ns(ktime_get_real());
-
- /* If we fail to deliver the event, death_by_timeout() will retry */
- if (nf_conntrack_event_report(IPCT_DESTROY, i,
- fr->portid, fr->report) < 0)
- return 1;
-
- /* Avoid the delivery of the destroy event in death_by_timeout(). */
- set_bit(IPS_DYING_BIT, &i->status);
- return 1;
-}
-
static int kill_all(struct nf_conn *i, void *data)
{
return 1;
@@ -1301,11 +1280,7 @@ EXPORT_SYMBOL_GPL(nf_ct_free_hashtable);
void nf_conntrack_flush_report(struct net *net, u32 portid, int report)
{
- struct __nf_ct_flush_report fr = {
- .portid = portid,
- .report = report,
- };
- nf_ct_iterate_cleanup(net, kill_report, &fr);
+ nf_ct_iterate_cleanup(net, kill_all, NULL, portid, report);
}
EXPORT_SYMBOL_GPL(nf_conntrack_flush_report);
@@ -1386,7 +1361,7 @@ void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list)
i_see_dead_people:
busy = 0;
list_for_each_entry(net, net_exit_list, exit_list) {
- nf_ct_iterate_cleanup(net, kill_all, NULL);
+ nf_ct_iterate_cleanup(net, kill_all, NULL, 0, 0);
nf_ct_release_dying_list(net);
if (atomic_read(&net->ct.count) != 0)
busy = 1;
@@ -1692,7 +1667,7 @@ err_stat:
return ret;
}
-s16 (*nf_ct_nat_offset)(const struct nf_conn *ct,
+s32 (*nf_ct_nat_offset)(const struct nf_conn *ct,
enum ip_conntrack_dir dir,
u32 seq);
EXPORT_SYMBOL_GPL(nf_ct_nat_offset);
diff --git a/net/netfilter/nf_conntrack_labels.c b/net/netfilter/nf_conntrack_labels.c
index 355d2ef..bb53f12 100644
--- a/net/netfilter/nf_conntrack_labels.c
+++ b/net/netfilter/nf_conntrack_labels.c
@@ -8,12 +8,8 @@
* published by the Free Software Foundation.
*/
-#include <linux/ctype.h>
#include <linux/export.h>
-#include <linux/jhash.h>
-#include <linux/spinlock.h>
#include <linux/types.h>
-#include <linux/slab.h>
#include <net/netfilter/nf_conntrack_ecache.h>
#include <net/netfilter/nf_conntrack_labels.h>
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index edc410e..fa61fea 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1038,21 +1038,9 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
}
}
- if (del_timer(&ct->timeout)) {
- if (nf_conntrack_event_report(IPCT_DESTROY, ct,
- NETLINK_CB(skb).portid,
- nlmsg_report(nlh)) < 0) {
- nf_ct_delete_from_lists(ct);
- /* we failed to report the event, try later */
- nf_ct_dying_timeout(ct);
- nf_ct_put(ct);
- return 0;
- }
- /* death_by_timeout would report the event again */
- set_bit(IPS_DYING_BIT, &ct->status);
- nf_ct_delete_from_lists(ct);
- nf_ct_put(ct);
- }
+ if (del_timer(&ct->timeout))
+ nf_ct_delete(ct, NETLINK_CB(skb).portid, nlmsg_report(nlh));
+
nf_ct_put(ct);
return 0;
@@ -1999,6 +1987,27 @@ out:
return err == -EAGAIN ? -ENOBUFS : err;
}
+static const struct nla_policy exp_nla_policy[CTA_EXPECT_MAX+1] = {
+ [CTA_EXPECT_MASTER] = { .type = NLA_NESTED },
+ [CTA_EXPECT_TUPLE] = { .type = NLA_NESTED },
+ [CTA_EXPECT_MASK] = { .type = NLA_NESTED },
+ [CTA_EXPECT_TIMEOUT] = { .type = NLA_U32 },
+ [CTA_EXPECT_ID] = { .type = NLA_U32 },
+ [CTA_EXPECT_HELP_NAME] = { .type = NLA_NUL_STRING,
+ .len = NF_CT_HELPER_NAME_LEN - 1 },
+ [CTA_EXPECT_ZONE] = { .type = NLA_U16 },
+ [CTA_EXPECT_FLAGS] = { .type = NLA_U32 },
+ [CTA_EXPECT_CLASS] = { .type = NLA_U32 },
+ [CTA_EXPECT_NAT] = { .type = NLA_NESTED },
+ [CTA_EXPECT_FN] = { .type = NLA_NUL_STRING },
+};
+
+static struct nf_conntrack_expect *
+ctnetlink_alloc_expect(const struct nlattr *const cda[], struct nf_conn *ct,
+ struct nf_conntrack_helper *helper,
+ struct nf_conntrack_tuple *tuple,
+ struct nf_conntrack_tuple *mask);
+
#ifdef CONFIG_NETFILTER_NETLINK_QUEUE_CT
static size_t
ctnetlink_nfqueue_build_size(const struct nf_conn *ct)
@@ -2139,10 +2148,69 @@ ctnetlink_nfqueue_parse(const struct nlattr *attr, struct nf_conn *ct)
return ret;
}
+static int ctnetlink_nfqueue_exp_parse(const struct nlattr * const *cda,
+ const struct nf_conn *ct,
+ struct nf_conntrack_tuple *tuple,
+ struct nf_conntrack_tuple *mask)
+{
+ int err;
+
+ err = ctnetlink_parse_tuple(cda, tuple, CTA_EXPECT_TUPLE,
+ nf_ct_l3num(ct));
+ if (err < 0)
+ return err;
+
+ return ctnetlink_parse_tuple(cda, mask, CTA_EXPECT_MASK,
+ nf_ct_l3num(ct));
+}
+
+static int
+ctnetlink_nfqueue_attach_expect(const struct nlattr *attr, struct nf_conn *ct,
+ u32 portid, u32 report)
+{
+ struct nlattr *cda[CTA_EXPECT_MAX+1];
+ struct nf_conntrack_tuple tuple, mask;
+ struct nf_conntrack_helper *helper;
+ struct nf_conntrack_expect *exp;
+ int err;
+
+ err = nla_parse_nested(cda, CTA_EXPECT_MAX, attr, exp_nla_policy);
+ if (err < 0)
+ return err;
+
+ err = ctnetlink_nfqueue_exp_parse((const struct nlattr * const *)cda,
+ ct, &tuple, &mask);
+ if (err < 0)
+ return err;
+
+ if (cda[CTA_EXPECT_HELP_NAME]) {
+ const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]);
+
+ helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
+ nf_ct_protonum(ct));
+ if (helper == NULL)
+ return -EOPNOTSUPP;
+ }
+
+ exp = ctnetlink_alloc_expect((const struct nlattr * const *)cda, ct,
+ helper, &tuple, &mask);
+ if (IS_ERR(exp))
+ return PTR_ERR(exp);
+
+ err = nf_ct_expect_related_report(exp, portid, report);
+ if (err < 0) {
+ nf_ct_expect_put(exp);
+ return err;
+ }
+
+ return 0;
+}
+
static struct nfq_ct_hook ctnetlink_nfqueue_hook = {
.build_size = ctnetlink_nfqueue_build_size,
.build = ctnetlink_nfqueue_build,
.parse = ctnetlink_nfqueue_parse,
+ .attach_expect = ctnetlink_nfqueue_attach_expect,
};
#endif /* CONFIG_NETFILTER_NETLINK_QUEUE_CT */
@@ -2510,21 +2578,6 @@ static int ctnetlink_dump_exp_ct(struct sock *ctnl, struct sk_buff *skb,
return err;
}
-static const struct nla_policy exp_nla_policy[CTA_EXPECT_MAX+1] = {
- [CTA_EXPECT_MASTER] = { .type = NLA_NESTED },
- [CTA_EXPECT_TUPLE] = { .type = NLA_NESTED },
- [CTA_EXPECT_MASK] = { .type = NLA_NESTED },
- [CTA_EXPECT_TIMEOUT] = { .type = NLA_U32 },
- [CTA_EXPECT_ID] = { .type = NLA_U32 },
- [CTA_EXPECT_HELP_NAME] = { .type = NLA_NUL_STRING,
- .len = NF_CT_HELPER_NAME_LEN - 1 },
- [CTA_EXPECT_ZONE] = { .type = NLA_U16 },
- [CTA_EXPECT_FLAGS] = { .type = NLA_U32 },
- [CTA_EXPECT_CLASS] = { .type = NLA_U32 },
- [CTA_EXPECT_NAT] = { .type = NLA_NESTED },
- [CTA_EXPECT_FN] = { .type = NLA_NUL_STRING },
-};
-
static int
ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
const struct nlmsghdr *nlh,
@@ -2747,76 +2800,26 @@ ctnetlink_parse_expect_nat(const struct nlattr *attr,
#endif
}
-static int
-ctnetlink_create_expect(struct net *net, u16 zone,
- const struct nlattr * const cda[],
- u_int8_t u3,
- u32 portid, int report)
+static struct nf_conntrack_expect *
+ctnetlink_alloc_expect(const struct nlattr * const cda[], struct nf_conn *ct,
+ struct nf_conntrack_helper *helper,
+ struct nf_conntrack_tuple *tuple,
+ struct nf_conntrack_tuple *mask)
{
- struct nf_conntrack_tuple tuple, mask, master_tuple;
- struct nf_conntrack_tuple_hash *h = NULL;
+ u_int32_t class = 0;
struct nf_conntrack_expect *exp;
- struct nf_conn *ct;
struct nf_conn_help *help;
- struct nf_conntrack_helper *helper = NULL;
- u_int32_t class = 0;
- int err = 0;
-
- /* caller guarantees that those three CTA_EXPECT_* exist */
- err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
- if (err < 0)
- return err;
- err = ctnetlink_parse_tuple(cda, &mask, CTA_EXPECT_MASK, u3);
- if (err < 0)
- return err;
- err = ctnetlink_parse_tuple(cda, &master_tuple, CTA_EXPECT_MASTER, u3);
- if (err < 0)
- return err;
-
- /* Look for master conntrack of this expectation */
- h = nf_conntrack_find_get(net, zone, &master_tuple);
- if (!h)
- return -ENOENT;
- ct = nf_ct_tuplehash_to_ctrack(h);
-
- /* Look for helper of this expectation */
- if (cda[CTA_EXPECT_HELP_NAME]) {
- const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]);
-
- helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
- nf_ct_protonum(ct));
- if (helper == NULL) {
-#ifdef CONFIG_MODULES
- if (request_module("nfct-helper-%s", helpname) < 0) {
- err = -EOPNOTSUPP;
- goto out;
- }
-
- helper = __nf_conntrack_helper_find(helpname,
- nf_ct_l3num(ct),
- nf_ct_protonum(ct));
- if (helper) {
- err = -EAGAIN;
- goto out;
- }
-#endif
- err = -EOPNOTSUPP;
- goto out;
- }
- }
+ int err;
if (cda[CTA_EXPECT_CLASS] && helper) {
class = ntohl(nla_get_be32(cda[CTA_EXPECT_CLASS]));
- if (class > helper->expect_class_max) {
- err = -EINVAL;
- goto out;
- }
+ if (class > helper->expect_class_max)
+ return ERR_PTR(-EINVAL);
}
exp = nf_ct_expect_alloc(ct);
- if (!exp) {
- err = -ENOMEM;
- goto out;
- }
+ if (!exp)
+ return ERR_PTR(-ENOMEM);
+
help = nfct_help(ct);
if (!help) {
if (!cda[CTA_EXPECT_TIMEOUT]) {
@@ -2854,21 +2857,89 @@ ctnetlink_create_expect(struct net *net, u16 zone,
exp->class = class;
exp->master = ct;
exp->helper = helper;
- memcpy(&exp->tuple, &tuple, sizeof(struct nf_conntrack_tuple));
- memcpy(&exp->mask.src.u3, &mask.src.u3, sizeof(exp->mask.src.u3));
- exp->mask.src.u.all = mask.src.u.all;
+ exp->tuple = *tuple;
+ exp->mask.src.u3 = mask->src.u3;
+ exp->mask.src.u.all = mask->src.u.all;
if (cda[CTA_EXPECT_NAT]) {
err = ctnetlink_parse_expect_nat(cda[CTA_EXPECT_NAT],
- exp, u3);
+ exp, nf_ct_l3num(ct));
if (err < 0)
goto err_out;
}
- err = nf_ct_expect_related_report(exp, portid, report);
+ return exp;
err_out:
nf_ct_expect_put(exp);
-out:
- nf_ct_put(nf_ct_tuplehash_to_ctrack(h));
+ return ERR_PTR(err);
+}
+
+static int
+ctnetlink_create_expect(struct net *net, u16 zone,
+ const struct nlattr * const cda[],
+ u_int8_t u3, u32 portid, int report)
+{
+ struct nf_conntrack_tuple tuple, mask, master_tuple;
+ struct nf_conntrack_tuple_hash *h = NULL;
+ struct nf_conntrack_helper *helper = NULL;
+ struct nf_conntrack_expect *exp;
+ struct nf_conn *ct;
+ int err;
+
+ /* caller guarantees that those three CTA_EXPECT_* exist */
+ err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
+ if (err < 0)
+ return err;
+ err = ctnetlink_parse_tuple(cda, &mask, CTA_EXPECT_MASK, u3);
+ if (err < 0)
+ return err;
+ err = ctnetlink_parse_tuple(cda, &master_tuple, CTA_EXPECT_MASTER, u3);
+ if (err < 0)
+ return err;
+
+ /* Look for master conntrack of this expectation */
+ h = nf_conntrack_find_get(net, zone, &master_tuple);
+ if (!h)
+ return -ENOENT;
+ ct = nf_ct_tuplehash_to_ctrack(h);
+
+ if (cda[CTA_EXPECT_HELP_NAME]) {
+ const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]);
+
+ helper = __nf_conntrack_helper_find(helpname, u3,
+ nf_ct_protonum(ct));
+ if (helper == NULL) {
+#ifdef CONFIG_MODULES
+ if (request_module("nfct-helper-%s", helpname) < 0) {
+ err = -EOPNOTSUPP;
+ goto err_ct;
+ }
+ helper = __nf_conntrack_helper_find(helpname, u3,
+ nf_ct_protonum(ct));
+ if (helper) {
+ err = -EAGAIN;
+ goto err_ct;
+ }
+#endif
+ err = -EOPNOTSUPP;
+ goto err_ct;
+ }
+ }
+
+ exp = ctnetlink_alloc_expect(cda, ct, helper, &tuple, &mask);
+ if (IS_ERR(exp)) {
+ err = PTR_ERR(exp);
+ goto err_ct;
+ }
+
+ err = nf_ct_expect_related_report(exp, portid, report);
+ if (err < 0)
+ goto err_exp;
+
+ return 0;
+err_exp:
+ nf_ct_expect_put(exp);
+err_ct:
+ nf_ct_put(ct);
return err;
}
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index 0ab9636..ce30041 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -281,7 +281,7 @@ void nf_ct_l3proto_pernet_unregister(struct net *net,
nf_ct_l3proto_unregister_sysctl(net, proto);
/* Remove all contrack entries for this protocol */
- nf_ct_iterate_cleanup(net, kill_l3proto, proto);
+ nf_ct_iterate_cleanup(net, kill_l3proto, proto, 0, 0);
}
EXPORT_SYMBOL_GPL(nf_ct_l3proto_pernet_unregister);
@@ -476,7 +476,7 @@ void nf_ct_l4proto_pernet_unregister(struct net *net,
nf_ct_l4proto_unregister_sysctl(net, pn, l4proto);
/* Remove all contrack entries for this protocol */
- nf_ct_iterate_cleanup(net, kill_l4proto, l4proto);
+ nf_ct_iterate_cleanup(net, kill_l4proto, l4proto, 0, 0);
}
EXPORT_SYMBOL_GPL(nf_ct_l4proto_pernet_unregister);
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 2f80107..d224e00 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -496,7 +496,7 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
}
#ifdef CONFIG_NF_NAT_NEEDED
-static inline s16 nat_offset(const struct nf_conn *ct,
+static inline s32 nat_offset(const struct nf_conn *ct,
enum ip_conntrack_dir dir,
u32 seq)
{
@@ -525,7 +525,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
struct ip_ct_tcp_state *receiver = &state->seen[!dir];
const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
__u32 seq, ack, sack, end, win, swin;
- s16 receiver_offset;
+ s32 receiver_offset;
bool res, in_recv_win;
/*
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index 038eee5..6ff8083 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -497,7 +497,7 @@ static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto)
rtnl_lock();
for_each_net(net)
- nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean);
+ nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean, 0, 0);
rtnl_unlock();
}
@@ -511,7 +511,7 @@ static void nf_nat_l3proto_clean(u8 l3proto)
rtnl_lock();
for_each_net(net)
- nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean);
+ nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean, 0, 0);
rtnl_unlock();
}
@@ -749,7 +749,7 @@ static void __net_exit nf_nat_net_exit(struct net *net)
{
struct nf_nat_proto_clean clean = {};
- nf_ct_iterate_cleanup(net, &nf_nat_proto_remove, &clean);
+ nf_ct_iterate_cleanup(net, &nf_nat_proto_remove, &clean, 0, 0);
synchronize_rcu();
nf_ct_free_hashtable(net->ct.nat_bysource, net->ct.nat_htable_size);
}
diff --git a/net/netfilter/nf_nat_helper.c b/net/netfilter/nf_nat_helper.c
index 85e20a9..46b9baa 100644
--- a/net/netfilter/nf_nat_helper.c
+++ b/net/netfilter/nf_nat_helper.c
@@ -30,8 +30,6 @@
pr_debug("offset_before=%d, offset_after=%d, correction_pos=%u\n", \
x->offset_before, x->offset_after, x->correction_pos);
-static DEFINE_SPINLOCK(nf_nat_seqofs_lock);
-
/* Setup TCP sequence correction given this change at this sequence */
static inline void
adjust_tcp_sequence(u32 seq,
@@ -49,7 +47,7 @@ adjust_tcp_sequence(u32 seq,
pr_debug("adjust_tcp_sequence: Seq_offset before: ");
DUMP_OFFSET(this_way);
- spin_lock_bh(&nf_nat_seqofs_lock);
+ spin_lock_bh(&ct->lock);
/* SYN adjust. If it's uninitialized, or this is after last
* correction, record it: we don't handle more than one
@@ -61,31 +59,26 @@ adjust_tcp_sequence(u32 seq,
this_way->offset_before = this_way->offset_after;
this_way->offset_after += sizediff;
}
- spin_unlock_bh(&nf_nat_seqofs_lock);
+ spin_unlock_bh(&ct->lock);
pr_debug("adjust_tcp_sequence: Seq_offset after: ");
DUMP_OFFSET(this_way);
}
-/* Get the offset value, for conntrack */
-s16 nf_nat_get_offset(const struct nf_conn *ct,
+/* Get the offset value, for conntrack. Caller must have the conntrack locked */
+s32 nf_nat_get_offset(const struct nf_conn *ct,
enum ip_conntrack_dir dir,
u32 seq)
{
struct nf_conn_nat *nat = nfct_nat(ct);
struct nf_nat_seq *this_way;
- s16 offset;
if (!nat)
return 0;
this_way = &nat->seq[dir];
- spin_lock_bh(&nf_nat_seqofs_lock);
- offset = after(seq, this_way->correction_pos)
+ return after(seq, this_way->correction_pos)
? this_way->offset_after : this_way->offset_before;
- spin_unlock_bh(&nf_nat_seqofs_lock);
-
- return offset;
}
/* Frobs data inside this packet, which is linear. */
@@ -143,7 +136,7 @@ static int enlarge_skb(struct sk_buff *skb, unsigned int extra)
}
void nf_nat_set_seq_adjust(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
- __be32 seq, s16 off)
+ __be32 seq, s32 off)
{
if (!off)
return;
@@ -370,9 +363,10 @@ nf_nat_seq_adjust(struct sk_buff *skb,
struct tcphdr *tcph;
int dir;
__be32 newseq, newack;
- s16 seqoff, ackoff;
+ s32 seqoff, ackoff;
struct nf_conn_nat *nat = nfct_nat(ct);
struct nf_nat_seq *this_way, *other_way;
+ int res;
dir = CTINFO2DIR(ctinfo);
@@ -383,6 +377,7 @@ nf_nat_seq_adjust(struct sk_buff *skb,
return 0;
tcph = (void *)skb->data + protoff;
+ spin_lock_bh(&ct->lock);
if (after(ntohl(tcph->seq), this_way->correction_pos))
seqoff = this_way->offset_after;
else
@@ -407,7 +402,10 @@ nf_nat_seq_adjust(struct sk_buff *skb,
tcph->seq = newseq;
tcph->ack_seq = newack;
- return nf_nat_sack_adjust(skb, protoff, tcph, ct, ctinfo);
+ res = nf_nat_sack_adjust(skb, protoff, tcph, ct, ctinfo);
+ spin_unlock_bh(&ct->lock);
+
+ return res;
}
/* Setup NAT on this expected conntrack so it follows master. */
diff --git a/net/netfilter/nf_nat_proto_sctp.c b/net/netfilter/nf_nat_proto_sctp.c
index 396e55d..754536f 100644
--- a/net/netfilter/nf_nat_proto_sctp.c
+++ b/net/netfilter/nf_nat_proto_sctp.c
@@ -34,9 +34,7 @@ sctp_manip_pkt(struct sk_buff *skb,
const struct nf_conntrack_tuple *tuple,
enum nf_nat_manip_type maniptype)
{
- struct sk_buff *frag;
sctp_sctphdr_t *hdr;
- __u32 crc32;
if (!skb_make_writable(skb, hdroff + sizeof(*hdr)))
return false;
@@ -51,11 +49,7 @@ sctp_manip_pkt(struct sk_buff *skb,
hdr->dest = tuple->dst.u.sctp.port;
}
- crc32 = sctp_start_cksum((u8 *)hdr, skb_headlen(skb) - hdroff);
- skb_walk_frags(skb, frag)
- crc32 = sctp_update_cksum((u8 *)frag->data, skb_headlen(frag),
- crc32);
- hdr->checksum = sctp_end_cksum(crc32);
+ hdr->checksum = sctp_compute_cksum(skb, hdroff);
return true;
}
diff --git a/net/netfilter/nf_tproxy_core.c b/net/netfilter/nf_tproxy_core.c
deleted file mode 100644
index 474d621..0000000
--- a/net/netfilter/nf_tproxy_core.c
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Transparent proxy support for Linux/iptables
- *
- * Copyright (c) 2006-2007 BalaBit IT Ltd.
- * Author: Balazs Scheidler, Krisztian Kovacs
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/module.h>
-
-#include <linux/net.h>
-#include <linux/if.h>
-#include <linux/netdevice.h>
-#include <net/udp.h>
-#include <net/netfilter/nf_tproxy_core.h>
-
-
-static void
-nf_tproxy_destructor(struct sk_buff *skb)
-{
- struct sock *sk = skb->sk;
-
- skb->sk = NULL;
- skb->destructor = NULL;
-
- if (sk)
- sock_put(sk);
-}
-
-/* consumes sk */
-void
-nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk)
-{
- /* assigning tw sockets complicates things; most
- * skb->sk->X checks would have to test sk->sk_state first */
- if (sk->sk_state == TCP_TIME_WAIT) {
- inet_twsk_put(inet_twsk(sk));
- return;
- }
-
- skb_orphan(skb);
- skb->sk = sk;
- skb->destructor = nf_tproxy_destructor;
-}
-EXPORT_SYMBOL_GPL(nf_tproxy_assign_sock);
-
-static int __init nf_tproxy_init(void)
-{
- pr_info("NF_TPROXY: Transparent proxy support initialized, version 4.1.0\n");
- pr_info("NF_TPROXY: Copyright (c) 2006-2007 BalaBit IT Ltd.\n");
- return 0;
-}
-
-module_init(nf_tproxy_init);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Krisztian Kovacs");
-MODULE_DESCRIPTION("Transparent proxy support core routines");
diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
index 8a703c3..95a98c8 100644
--- a/net/netfilter/nfnetlink_queue_core.c
+++ b/net/netfilter/nfnetlink_queue_core.c
@@ -862,6 +862,7 @@ static const struct nla_policy nfqa_verdict_policy[NFQA_MAX+1] = {
[NFQA_MARK] = { .type = NLA_U32 },
[NFQA_PAYLOAD] = { .type = NLA_UNSPEC },
[NFQA_CT] = { .type = NLA_UNSPEC },
+ [NFQA_EXP] = { .type = NLA_UNSPEC },
};
static const struct nla_policy nfqa_verdict_batch_policy[NFQA_MAX+1] = {
@@ -990,9 +991,14 @@ nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
if (entry == NULL)
return -ENOENT;
- rcu_read_lock();
- if (nfqa[NFQA_CT] && (queue->flags & NFQA_CFG_F_CONNTRACK))
+ if (nfqa[NFQA_CT]) {
ct = nfqnl_ct_parse(entry->skb, nfqa[NFQA_CT], &ctinfo);
+ if (ct && nfqa[NFQA_EXP]) {
+ nfqnl_attach_expect(ct, nfqa[NFQA_EXP],
+ NETLINK_CB(skb).portid,
+ nlmsg_report(nlh));
+ }
+ }
if (nfqa[NFQA_PAYLOAD]) {
u16 payload_len = nla_len(nfqa[NFQA_PAYLOAD]);
@@ -1005,7 +1011,6 @@ nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
if (ct)
nfqnl_ct_seq_adjust(skb, ct, ctinfo, diff);
}
- rcu_read_unlock();
if (nfqa[NFQA_MARK])
entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK]));
diff --git a/net/netfilter/nfnetlink_queue_ct.c b/net/netfilter/nfnetlink_queue_ct.c
index ab61d66..be89303 100644
--- a/net/netfilter/nfnetlink_queue_ct.c
+++ b/net/netfilter/nfnetlink_queue_ct.c
@@ -96,3 +96,18 @@ void nfqnl_ct_seq_adjust(struct sk_buff *skb, struct nf_conn *ct,
if ((ct->status & IPS_NAT_MASK) && diff)
nfq_nat_ct->seq_adjust(skb, ct, ctinfo, diff);
}
+
+int nfqnl_attach_expect(struct nf_conn *ct, const struct nlattr *attr,
+ u32 portid, u32 report)
+{
+ struct nfq_ct_hook *nfq_ct;
+
+ if (nf_ct_is_untracked(ct))
+ return 0;
+
+ nfq_ct = rcu_dereference(nfq_ct_hook);
+ if (nfq_ct == NULL)
+ return -EOPNOTSUPP;
+
+ return nfq_ct->attach_expect(attr, ct, portid, report);
+}
diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c
index d7f1953..5d8a3a3 100644
--- a/net/netfilter/xt_TPROXY.c
+++ b/net/netfilter/xt_TPROXY.c
@@ -15,7 +15,9 @@
#include <linux/ip.h>
#include <net/checksum.h>
#include <net/udp.h>
+#include <net/tcp.h>
#include <net/inet_sock.h>
+#include <net/inet_hashtables.h>
#include <linux/inetdevice.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_ipv4/ip_tables.h>
@@ -26,13 +28,18 @@
#define XT_TPROXY_HAVE_IPV6 1
#include <net/if_inet6.h>
#include <net/addrconf.h>
+#include <net/inet6_hashtables.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
#endif
-#include <net/netfilter/nf_tproxy_core.h>
#include <linux/netfilter/xt_TPROXY.h>
+enum nf_tproxy_lookup_t {
+ NFT_LOOKUP_LISTENER,
+ NFT_LOOKUP_ESTABLISHED,
+};
+
static bool tproxy_sk_is_transparent(struct sock *sk)
{
if (sk->sk_state != TCP_TIME_WAIT) {
@@ -68,6 +75,157 @@ tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr)
return laddr ? laddr : daddr;
}
+/*
+ * This is used when the user wants to intercept a connection matching
+ * an explicit iptables rule. In this case the sockets are assumed
+ * matching in preference order:
+ *
+ * - match: if there's a fully established connection matching the
+ * _packet_ tuple, it is returned, assuming the redirection
+ * already took place and we process a packet belonging to an
+ * established connection
+ *
+ * - match: if there's a listening socket matching the redirection
+ * (e.g. on-port & on-ip of the connection), it is returned,
+ * regardless if it was bound to 0.0.0.0 or an explicit
+ * address. The reasoning is that if there's an explicit rule, it
+ * does not really matter if the listener is bound to an interface
+ * or to 0. The user already stated that he wants redirection
+ * (since he added the rule).
+ *
+ * Please note that there's an overlap between what a TPROXY target
+ * and a socket match will match. Normally if you have both rules the
+ * "socket" match will be the first one, effectively all packets
+ * belonging to established connections going through that one.
+ */
+static inline struct sock *
+nf_tproxy_get_sock_v4(struct net *net, const u8 protocol,
+ const __be32 saddr, const __be32 daddr,
+ const __be16 sport, const __be16 dport,
+ const struct net_device *in,
+ const enum nf_tproxy_lookup_t lookup_type)
+{
+ struct sock *sk;
+
+ switch (protocol) {
+ case IPPROTO_TCP:
+ switch (lookup_type) {
+ case NFT_LOOKUP_LISTENER:
+ sk = inet_lookup_listener(net, &tcp_hashinfo,
+ saddr, sport,
+ daddr, dport,
+ in->ifindex);
+
+ /* NOTE: we return listeners even if bound to
+ * 0.0.0.0, those are filtered out in
+ * xt_socket, since xt_TPROXY needs 0 bound
+ * listeners too
+ */
+ break;
+ case NFT_LOOKUP_ESTABLISHED:
+ sk = inet_lookup_established(net, &tcp_hashinfo,
+ saddr, sport, daddr, dport,
+ in->ifindex);
+ break;
+ default:
+ BUG();
+ }
+ break;
+ case IPPROTO_UDP:
+ sk = udp4_lib_lookup(net, saddr, sport, daddr, dport,
+ in->ifindex);
+ if (sk) {
+ int connected = (sk->sk_state == TCP_ESTABLISHED);
+ int wildcard = (inet_sk(sk)->inet_rcv_saddr == 0);
+
+ /* NOTE: we return listeners even if bound to
+ * 0.0.0.0, those are filtered out in
+ * xt_socket, since xt_TPROXY needs 0 bound
+ * listeners too
+ */
+ if ((lookup_type == NFT_LOOKUP_ESTABLISHED && (!connected || wildcard)) ||
+ (lookup_type == NFT_LOOKUP_LISTENER && connected)) {
+ sock_put(sk);
+ sk = NULL;
+ }
+ }
+ break;
+ default:
+ WARN_ON(1);
+ sk = NULL;
+ }
+
+ pr_debug("tproxy socket lookup: proto %u %08x:%u -> %08x:%u, lookup type: %d, sock %p\n",
+ protocol, ntohl(saddr), ntohs(sport), ntohl(daddr), ntohs(dport), lookup_type, sk);
+
+ return sk;
+}
+
+#ifdef XT_TPROXY_HAVE_IPV6
+static inline struct sock *
+nf_tproxy_get_sock_v6(struct net *net, const u8 protocol,
+ const struct in6_addr *saddr, const struct in6_addr *daddr,
+ const __be16 sport, const __be16 dport,
+ const struct net_device *in,
+ const enum nf_tproxy_lookup_t lookup_type)
+{
+ struct sock *sk;
+
+ switch (protocol) {
+ case IPPROTO_TCP:
+ switch (lookup_type) {
+ case NFT_LOOKUP_LISTENER:
+ sk = inet6_lookup_listener(net, &tcp_hashinfo,
+ saddr, sport,
+ daddr, ntohs(dport),
+ in->ifindex);
+
+ /* NOTE: we return listeners even if bound to
+ * 0.0.0.0, those are filtered out in
+ * xt_socket, since xt_TPROXY needs 0 bound
+ * listeners too
+ */
+ break;
+ case NFT_LOOKUP_ESTABLISHED:
+ sk = __inet6_lookup_established(net, &tcp_hashinfo,
+ saddr, sport, daddr, ntohs(dport),
+ in->ifindex);
+ break;
+ default:
+ BUG();
+ }
+ break;
+ case IPPROTO_UDP:
+ sk = udp6_lib_lookup(net, saddr, sport, daddr, dport,
+ in->ifindex);
+ if (sk) {
+ int connected = (sk->sk_state == TCP_ESTABLISHED);
+ int wildcard = ipv6_addr_any(&inet6_sk(sk)->rcv_saddr);
+
+ /* NOTE: we return listeners even if bound to
+ * 0.0.0.0, those are filtered out in
+ * xt_socket, since xt_TPROXY needs 0 bound
+ * listeners too
+ */
+ if ((lookup_type == NFT_LOOKUP_ESTABLISHED && (!connected || wildcard)) ||
+ (lookup_type == NFT_LOOKUP_LISTENER && connected)) {
+ sock_put(sk);
+ sk = NULL;
+ }
+ }
+ break;
+ default:
+ WARN_ON(1);
+ sk = NULL;
+ }
+
+ pr_debug("tproxy socket lookup: proto %u %pI6:%u -> %pI6:%u, lookup type: %d, sock %p\n",
+ protocol, saddr, ntohs(sport), daddr, ntohs(dport), lookup_type, sk);
+
+ return sk;
+}
+#endif
+
/**
* tproxy_handle_time_wait4 - handle IPv4 TCP TIME_WAIT reopen redirections
* @skb: The skb being processed.
@@ -117,6 +275,15 @@ tproxy_handle_time_wait4(struct sk_buff *skb, __be32 laddr, __be16 lport,
return sk;
}
+/* assign a socket to the skb -- consumes sk */
+static void
+nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk)
+{
+ skb_orphan(skb);
+ skb->sk = sk;
+ skb->destructor = sock_edemux;
+}
+
static unsigned int
tproxy_tg4(struct sk_buff *skb, __be32 laddr, __be16 lport,
u_int32_t mark_mask, u_int32_t mark_value)
diff --git a/net/netfilter/xt_addrtype.c b/net/netfilter/xt_addrtype.c
index 68ff29f..fab6eea 100644
--- a/net/netfilter/xt_addrtype.c
+++ b/net/netfilter/xt_addrtype.c
@@ -202,7 +202,7 @@ static int addrtype_mt_checkentry_v1(const struct xt_mtchk_param *par)
return -EINVAL;
}
if ((info->source | info->dest) >= XT_ADDRTYPE_PROHIBIT) {
- pr_err("ipv6 PROHIBT (THROW, NAT ..) matching not supported\n");
+ pr_err("ipv6 PROHIBIT (THROW, NAT ..) matching not supported\n");
return -EINVAL;
}
if ((info->source | info->dest) & XT_ADDRTYPE_BROADCAST) {
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index 20b1591..06df2b9 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -19,12 +19,12 @@
#include <net/icmp.h>
#include <net/sock.h>
#include <net/inet_sock.h>
-#include <net/netfilter/nf_tproxy_core.h>
#include <net/netfilter/ipv4/nf_defrag_ipv4.h>
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
#define XT_SOCKET_HAVE_IPV6 1
#include <linux/netfilter_ipv6/ip6_tables.h>
+#include <net/inet6_hashtables.h>
#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
#endif
@@ -101,6 +101,43 @@ extract_icmp4_fields(const struct sk_buff *skb,
return 0;
}
+/* "socket" match based redirection (no specific rule)
+ * ===================================================
+ *
+ * There are connections with dynamic endpoints (e.g. FTP data
+ * connection) that the user is unable to add explicit rules
+ * for. These are taken care of by a generic "socket" rule. It is
+ * assumed that the proxy application is trusted to open such
+ * connections without explicit iptables rule (except of course the
+ * generic 'socket' rule). In this case the following sockets are
+ * matched in preference order:
+ *
+ * - match: if there's a fully established connection matching the
+ * _packet_ tuple
+ *
+ * - match: if there's a non-zero bound listener (possibly with a
+ * non-local address) We don't accept zero-bound listeners, since
+ * then local services could intercept traffic going through the
+ * box.
+ */
+static struct sock *
+xt_socket_get_sock_v4(struct net *net, const u8 protocol,
+ const __be32 saddr, const __be32 daddr,
+ const __be16 sport, const __be16 dport,
+ const struct net_device *in)
+{
+ switch (protocol) {
+ case IPPROTO_TCP:
+ return __inet_lookup(net, &tcp_hashinfo,
+ saddr, sport, daddr, dport,
+ in->ifindex);
+ case IPPROTO_UDP:
+ return udp4_lib_lookup(net, saddr, sport, daddr, dport,
+ in->ifindex);
+ }
+ return NULL;
+}
+
static bool
socket_match(const struct sk_buff *skb, struct xt_action_param *par,
const struct xt_socket_mtinfo1 *info)
@@ -156,9 +193,9 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
#endif
if (!sk)
- sk = nf_tproxy_get_sock_v4(dev_net(skb->dev), protocol,
+ sk = xt_socket_get_sock_v4(dev_net(skb->dev), protocol,
saddr, daddr, sport, dport,
- par->in, NFT_LOOKUP_ANY);
+ par->in);
if (sk) {
bool wildcard;
bool transparent = true;
@@ -265,6 +302,25 @@ extract_icmp6_fields(const struct sk_buff *skb,
return 0;
}
+static struct sock *
+xt_socket_get_sock_v6(struct net *net, const u8 protocol,
+ const struct in6_addr *saddr, const struct in6_addr *daddr,
+ const __be16 sport, const __be16 dport,
+ const struct net_device *in)
+{
+ switch (protocol) {
+ case IPPROTO_TCP:
+ return inet6_lookup(net, &tcp_hashinfo,
+ saddr, sport, daddr, dport,
+ in->ifindex);
+ case IPPROTO_UDP:
+ return udp6_lib_lookup(net, saddr, sport, daddr, dport,
+ in->ifindex);
+ }
+
+ return NULL;
+}
+
static bool
socket_mt6_v1_v2(const struct sk_buff *skb, struct xt_action_param *par)
{
@@ -302,9 +358,9 @@ socket_mt6_v1_v2(const struct sk_buff *skb, struct xt_action_param *par)
}
if (!sk)
- sk = nf_tproxy_get_sock_v6(dev_net(skb->dev), tproto,
+ sk = xt_socket_get_sock_v6(dev_net(skb->dev), tproto,
saddr, daddr, sport, dport,
- par->in, NFT_LOOKUP_ANY);
+ par->in);
if (sk) {
bool wildcard;
bool transparent = true;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 0c61b59..a17dda1 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -294,14 +294,14 @@ static void **alloc_pg_vec(struct netlink_sock *nlk,
{
unsigned int block_nr = req->nm_block_nr;
unsigned int i;
- void **pg_vec, *ptr;
+ void **pg_vec;
pg_vec = kcalloc(block_nr, sizeof(void *), GFP_KERNEL);
if (pg_vec == NULL)
return NULL;
for (i = 0; i < block_nr; i++) {
- pg_vec[i] = ptr = alloc_one_pg_vec_page(order);
+ pg_vec[i] = alloc_one_pg_vec_page(order);
if (pg_vec[i] == NULL)
goto err1;
}
@@ -595,7 +595,7 @@ static unsigned int netlink_poll(struct file *file, struct socket *sock,
* for dumps is performed here. A dump is allowed to continue
* if at least half the ring is unused.
*/
- while (nlk->cb != NULL && netlink_dump_space(nlk)) {
+ while (nlk->cb_running && netlink_dump_space(nlk)) {
err = netlink_dump(sk);
if (err < 0) {
sk->sk_err = err;
@@ -802,18 +802,6 @@ static void netlink_ring_set_copied(struct sock *sk, struct sk_buff *skb)
#define netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group, siocb) 0
#endif /* CONFIG_NETLINK_MMAP */
-static void netlink_destroy_callback(struct netlink_callback *cb)
-{
- kfree_skb(cb->skb);
- kfree(cb);
-}
-
-static void netlink_consume_callback(struct netlink_callback *cb)
-{
- consume_skb(cb->skb);
- kfree(cb);
-}
-
static void netlink_skb_destructor(struct sk_buff *skb)
{
#ifdef CONFIG_NETLINK_MMAP
@@ -872,12 +860,12 @@ static void netlink_sock_destruct(struct sock *sk)
{
struct netlink_sock *nlk = nlk_sk(sk);
- if (nlk->cb) {
- if (nlk->cb->done)
- nlk->cb->done(nlk->cb);
+ if (nlk->cb_running) {
+ if (nlk->cb.done)
+ nlk->cb.done(&nlk->cb);
- module_put(nlk->cb->module);
- netlink_destroy_callback(nlk->cb);
+ module_put(nlk->cb.module);
+ kfree_skb(nlk->cb.skb);
}
skb_queue_purge(&sk->sk_receive_queue);
@@ -2350,7 +2338,8 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
skb_free_datagram(sk, skb);
- if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
+ if (nlk->cb_running &&
+ atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
ret = netlink_dump(sk);
if (ret) {
sk->sk_err = ret;
@@ -2566,13 +2555,12 @@ static int netlink_dump(struct sock *sk)
int alloc_size;
mutex_lock(nlk->cb_mutex);
-
- cb = nlk->cb;
- if (cb == NULL) {
+ if (!nlk->cb_running) {
err = -EINVAL;
goto errout_skb;
}
+ cb = &nlk->cb;
alloc_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
if (!netlink_rx_is_mmaped(sk) &&
@@ -2610,11 +2598,11 @@ static int netlink_dump(struct sock *sk)
if (cb->done)
cb->done(cb);
- nlk->cb = NULL;
- mutex_unlock(nlk->cb_mutex);
+ nlk->cb_running = false;
+ mutex_unlock(nlk->cb_mutex);
module_put(cb->module);
- netlink_consume_callback(cb);
+ consume_skb(cb->skb);
return 0;
errout_skb:
@@ -2632,59 +2620,51 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
struct netlink_sock *nlk;
int ret;
- cb = kzalloc(sizeof(*cb), GFP_KERNEL);
- if (cb == NULL)
- return -ENOBUFS;
-
/* Memory mapped dump requests need to be copied to avoid looping
* on the pending state in netlink_mmap_sendmsg() while the CB hold
* a reference to the skb.
*/
if (netlink_skb_is_mmaped(skb)) {
skb = skb_copy(skb, GFP_KERNEL);
- if (skb == NULL) {
- kfree(cb);
+ if (skb == NULL)
return -ENOBUFS;
- }
} else
atomic_inc(&skb->users);
- cb->dump = control->dump;
- cb->done = control->done;
- cb->nlh = nlh;
- cb->data = control->data;
- cb->module = control->module;
- cb->min_dump_alloc = control->min_dump_alloc;
- cb->skb = skb;
-
sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid);
if (sk == NULL) {
- netlink_destroy_callback(cb);
- return -ECONNREFUSED;
+ ret = -ECONNREFUSED;
+ goto error_free;
}
- nlk = nlk_sk(sk);
+ nlk = nlk_sk(sk);
mutex_lock(nlk->cb_mutex);
/* A dump is in progress... */
- if (nlk->cb) {
- mutex_unlock(nlk->cb_mutex);
- netlink_destroy_callback(cb);
+ if (nlk->cb_running) {
ret = -EBUSY;
- goto out;
+ goto error_unlock;
}
/* add reference of module which cb->dump belongs to */
- if (!try_module_get(cb->module)) {
- mutex_unlock(nlk->cb_mutex);
- netlink_destroy_callback(cb);
+ if (!try_module_get(control->module)) {
ret = -EPROTONOSUPPORT;
- goto out;
+ goto error_unlock;
}
- nlk->cb = cb;
+ cb = &nlk->cb;
+ memset(cb, 0, sizeof(*cb));
+ cb->dump = control->dump;
+ cb->done = control->done;
+ cb->nlh = nlh;
+ cb->data = control->data;
+ cb->module = control->module;
+ cb->min_dump_alloc = control->min_dump_alloc;
+ cb->skb = skb;
+
+ nlk->cb_running = true;
+
mutex_unlock(nlk->cb_mutex);
ret = netlink_dump(sk);
-out:
sock_put(sk);
if (ret)
@@ -2694,6 +2674,13 @@ out:
* signal not to send ACK even if it was requested.
*/
return -EINTR;
+
+error_unlock:
+ sock_put(sk);
+ mutex_unlock(nlk->cb_mutex);
+error_free:
+ kfree_skb(skb);
+ return ret;
}
EXPORT_SYMBOL(__netlink_dump_start);
@@ -2916,14 +2903,14 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
struct sock *s = v;
struct netlink_sock *nlk = nlk_sk(s);
- seq_printf(seq, "%pK %-3d %-6u %08x %-8d %-8d %pK %-8d %-8d %-8lu\n",
+ seq_printf(seq, "%pK %-3d %-6u %08x %-8d %-8d %d %-8d %-8d %-8lu\n",
s,
s->sk_protocol,
nlk->portid,
nlk->groups ? (u32)nlk->groups[0] : 0,
sk_rmem_alloc_get(s),
sk_wmem_alloc_get(s),
- nlk->cb,
+ nlk->cb_running,
atomic_read(&s->sk_refcnt),
atomic_read(&s->sk_drops),
sock_i_ino(s)
diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h
index eaa88d1..acbd774 100644
--- a/net/netlink/af_netlink.h
+++ b/net/netlink/af_netlink.h
@@ -32,7 +32,8 @@ struct netlink_sock {
unsigned long *groups;
unsigned long state;
wait_queue_head_t wait;
- struct netlink_callback *cb;
+ bool cb_running;
+ struct netlink_callback cb;
struct mutex *cb_mutex;
struct mutex cb_def_mutex;
void (*netlink_rcv)(struct sk_buff *skb);
diff --git a/net/openvswitch/Kconfig b/net/openvswitch/Kconfig
index 27ee56b..bed30e6 100644
--- a/net/openvswitch/Kconfig
+++ b/net/openvswitch/Kconfig
@@ -40,3 +40,16 @@ config OPENVSWITCH_GRE
Say N to exclude this support and reduce the binary size.
If unsure, say Y.
+
+config OPENVSWITCH_VXLAN
+ bool "Open vSwitch VXLAN tunneling support"
+ depends on INET
+ depends on OPENVSWITCH
+ depends on VXLAN && !(OPENVSWITCH=y && VXLAN=m)
+ default y
+ ---help---
+ If you say Y here, then the Open vSwitch will be able create vxlan vport.
+
+ Say N to exclude this support and reduce the binary size.
+
+ If unsure, say Y.
diff --git a/net/openvswitch/Makefile b/net/openvswitch/Makefile
index 01bddb2..82e4ee5 100644
--- a/net/openvswitch/Makefile
+++ b/net/openvswitch/Makefile
@@ -13,3 +13,7 @@ openvswitch-y := \
vport-gre.o \
vport-internal_dev.o \
vport-netdev.o
+
+ifneq ($(CONFIG_OPENVSWITCH_VXLAN),)
+openvswitch-y += vport-vxlan.o
+endif
diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c
new file mode 100644
index 0000000..36848bd
--- /dev/null
+++ b/net/openvswitch/vport-vxlan.c
@@ -0,0 +1,204 @@
+/*
+ * Copyright (c) 2013 Nicira, Inc.
+ * Copyright (c) 2013 Cisco Systems, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/net.h>
+#include <linux/rculist.h>
+#include <linux/udp.h>
+
+#include <net/icmp.h>
+#include <net/ip.h>
+#include <net/udp.h>
+#include <net/ip_tunnels.h>
+#include <net/udp.h>
+#include <net/rtnetlink.h>
+#include <net/route.h>
+#include <net/dsfield.h>
+#include <net/inet_ecn.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
+#include <net/vxlan.h>
+
+#include "datapath.h"
+#include "vport.h"
+
+/**
+ * struct vxlan_port - Keeps track of open UDP ports
+ * @vs: vxlan_sock created for the port.
+ * @name: vport name.
+ */
+struct vxlan_port {
+ struct vxlan_sock *vs;
+ char name[IFNAMSIZ];
+};
+
+static inline struct vxlan_port *vxlan_vport(const struct vport *vport)
+{
+ return vport_priv(vport);
+}
+
+/* Called with rcu_read_lock and BH disabled. */
+static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb, __be32 vx_vni)
+{
+ struct ovs_key_ipv4_tunnel tun_key;
+ struct vport *vport = vs->data;
+ struct iphdr *iph;
+ __be64 key;
+
+ /* Save outer tunnel values */
+ iph = ip_hdr(skb);
+ key = cpu_to_be64(ntohl(vx_vni) >> 8);
+ ovs_flow_tun_key_init(&tun_key, iph, key, TUNNEL_KEY);
+
+ ovs_vport_receive(vport, skb, &tun_key);
+}
+
+static int vxlan_get_options(const struct vport *vport, struct sk_buff *skb)
+{
+ struct vxlan_port *vxlan_port = vxlan_vport(vport);
+ __be16 dst_port = inet_sk(vxlan_port->vs->sock->sk)->inet_sport;
+
+ if (nla_put_u16(skb, OVS_TUNNEL_ATTR_DST_PORT, ntohs(dst_port)))
+ return -EMSGSIZE;
+ return 0;
+}
+
+static void vxlan_tnl_destroy(struct vport *vport)
+{
+ struct vxlan_port *vxlan_port = vxlan_vport(vport);
+
+ vxlan_sock_release(vxlan_port->vs);
+
+ ovs_vport_deferred_free(vport);
+}
+
+static struct vport *vxlan_tnl_create(const struct vport_parms *parms)
+{
+ struct net *net = ovs_dp_get_net(parms->dp);
+ struct nlattr *options = parms->options;
+ struct vxlan_port *vxlan_port;
+ struct vxlan_sock *vs;
+ struct vport *vport;
+ struct nlattr *a;
+ u16 dst_port;
+ int err;
+
+ if (!options) {
+ err = -EINVAL;
+ goto error;
+ }
+ a = nla_find_nested(options, OVS_TUNNEL_ATTR_DST_PORT);
+ if (a && nla_len(a) == sizeof(u16)) {
+ dst_port = nla_get_u16(a);
+ } else {
+ /* Require destination port from userspace. */
+ err = -EINVAL;
+ goto error;
+ }
+
+ vport = ovs_vport_alloc(sizeof(struct vxlan_port),
+ &ovs_vxlan_vport_ops, parms);
+ if (IS_ERR(vport))
+ return vport;
+
+ vxlan_port = vxlan_vport(vport);
+ strncpy(vxlan_port->name, parms->name, IFNAMSIZ);
+
+ vs = vxlan_sock_add(net, htons(dst_port), vxlan_rcv, vport, true);
+ if (IS_ERR(vs)) {
+ ovs_vport_free(vport);
+ return (void *)vs;
+ }
+ vxlan_port->vs = vs;
+
+ return vport;
+
+error:
+ return ERR_PTR(err);
+}
+
+static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb)
+{
+ struct net *net = ovs_dp_get_net(vport->dp);
+ struct vxlan_port *vxlan_port = vxlan_vport(vport);
+ __be16 dst_port = inet_sk(vxlan_port->vs->sock->sk)->inet_sport;
+ struct rtable *rt;
+ struct flowi4 fl;
+ __be16 src_port;
+ int port_min;
+ int port_max;
+ __be16 df;
+ int err;
+
+ if (unlikely(!OVS_CB(skb)->tun_key)) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ /* Route lookup */
+ memset(&fl, 0, sizeof(fl));
+ fl.daddr = OVS_CB(skb)->tun_key->ipv4_dst;
+ fl.saddr = OVS_CB(skb)->tun_key->ipv4_src;
+ fl.flowi4_tos = RT_TOS(OVS_CB(skb)->tun_key->ipv4_tos);
+ fl.flowi4_mark = skb->mark;
+ fl.flowi4_proto = IPPROTO_UDP;
+
+ rt = ip_route_output_key(net, &fl);
+ if (IS_ERR(rt)) {
+ err = PTR_ERR(rt);
+ goto error;
+ }
+
+ df = OVS_CB(skb)->tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ?
+ htons(IP_DF) : 0;
+
+ skb->local_df = 1;
+
+ inet_get_local_port_range(&port_min, &port_max);
+ src_port = vxlan_src_port(port_min, port_max, skb);
+
+ err = vxlan_xmit_skb(net, vxlan_port->vs, rt, skb,
+ fl.saddr, OVS_CB(skb)->tun_key->ipv4_dst,
+ OVS_CB(skb)->tun_key->ipv4_tos,
+ OVS_CB(skb)->tun_key->ipv4_ttl, df,
+ src_port, dst_port,
+ htonl(be64_to_cpu(OVS_CB(skb)->tun_key->tun_id) << 8));
+ if (err < 0)
+ ip_rt_put(rt);
+error:
+ return err;
+}
+
+static const char *vxlan_get_name(const struct vport *vport)
+{
+ struct vxlan_port *vxlan_port = vxlan_vport(vport);
+ return vxlan_port->name;
+}
+
+const struct vport_ops ovs_vxlan_vport_ops = {
+ .type = OVS_VPORT_TYPE_VXLAN,
+ .create = vxlan_tnl_create,
+ .destroy = vxlan_tnl_destroy,
+ .get_name = vxlan_get_name,
+ .get_options = vxlan_get_options,
+ .send = vxlan_tnl_send,
+};
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index d4c7fa0..d69e0c0 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -42,6 +42,9 @@ static const struct vport_ops *vport_ops_list[] = {
#ifdef CONFIG_OPENVSWITCH_GRE
&ovs_gre_vport_ops,
#endif
+#ifdef CONFIG_OPENVSWITCH_VXLAN
+ &ovs_vxlan_vport_ops,
+#endif
};
/* Protected by RCU read lock for reading, ovs_mutex for writing. */
diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h
index 376045c..1a9fbce 100644
--- a/net/openvswitch/vport.h
+++ b/net/openvswitch/vport.h
@@ -199,6 +199,7 @@ void ovs_vport_record_error(struct vport *, enum vport_err_type err_type);
extern const struct vport_ops ovs_netdev_vport_ops;
extern const struct vport_ops ovs_internal_vport_ops;
extern const struct vport_ops ovs_gre_vport_ops;
+extern const struct vport_ops ovs_vxlan_vport_ops;
static inline void ovs_skb_postpush_rcsum(struct sk_buff *skb,
const void *start, unsigned int len)
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 75c8bbf..1fdf9ab 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2181,7 +2181,7 @@ static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
linear = len;
skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
- err);
+ err, 0);
if (!skb)
return NULL;
@@ -2638,51 +2638,6 @@ out:
return err;
}
-static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len)
-{
- struct sock_exterr_skb *serr;
- struct sk_buff *skb, *skb2;
- int copied, err;
-
- err = -EAGAIN;
- skb = skb_dequeue(&sk->sk_error_queue);
- if (skb == NULL)
- goto out;
-
- copied = skb->len;
- if (copied > len) {
- msg->msg_flags |= MSG_TRUNC;
- copied = len;
- }
- err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
- if (err)
- goto out_free_skb;
-
- sock_recv_timestamp(msg, sk, skb);
-
- serr = SKB_EXT_ERR(skb);
- put_cmsg(msg, SOL_PACKET, PACKET_TX_TIMESTAMP,
- sizeof(serr->ee), &serr->ee);
-
- msg->msg_flags |= MSG_ERRQUEUE;
- err = copied;
-
- /* Reset and regenerate socket error */
- spin_lock_bh(&sk->sk_error_queue.lock);
- sk->sk_err = 0;
- if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) {
- sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
- spin_unlock_bh(&sk->sk_error_queue.lock);
- sk->sk_error_report(sk);
- } else
- spin_unlock_bh(&sk->sk_error_queue.lock);
-
-out_free_skb:
- kfree_skb(skb);
-out:
- return err;
-}
-
/*
* Pull a packet from our receive queue and hand it to the user.
* If necessary we block.
@@ -2708,7 +2663,8 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
#endif
if (flags & MSG_ERRQUEUE) {
- err = packet_recv_error(sk, msg, len);
+ err = sock_recv_errqueue(sk, msg, len,
+ SOL_PACKET, PACKET_TX_TIMESTAMP);
goto out;
}
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index 1afd138..77e38f7 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -793,7 +793,7 @@ static int pn_res_seq_show(struct seq_file *seq, void *v)
struct sock **psk = v;
struct sock *sk = *psk;
- seq_printf(seq, "%02X %5d %lu%n",
+ seq_printf(seq, "%02X %5u %lu%n",
(int) (psk - pnres.sk),
from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
sock_i_ino(sk), &len);
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 82f6016..a6d788d 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -412,12 +412,9 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
/* If a delay is expected, orphan the skb. (orphaning usually takes
* place at TX completion time, so _before_ the link transit delay)
- * Ideally, this orphaning should be done after the rate limiting
- * module, because this breaks TCP Small Queue, and other mechanisms
- * based on socket sk_wmem_alloc.
*/
if (q->latency || q->jitter)
- skb_orphan(skb);
+ skb_orphan_partial(skb);
/*
* If we need to duplicate packet, then re-insert at top of the
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index ab67efc..cef5099 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -28,10 +28,7 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
@@ -43,9 +40,6 @@
* Daisy Chang <daisyc@us.ibm.com>
* Ryan Layer <rmlayer@us.ibm.com>
* Kevin Gao <kevin.gao@intel.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index ba1dfc3..8c4fa5d 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -22,16 +22,10 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* Vlad Yasevich <vladislav.yasevich@hp.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#include <linux/slab.h>
diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c
index 64977ea..077bb07 100644
--- a/net/sctp/bind_addr.c
+++ b/net/sctp/bind_addr.c
@@ -27,19 +27,13 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
* Karl Knutson <karl@athena.chicago.il.us>
* Jon Grimm <jgrimm@us.ibm.com>
* Daisy Chang <daisyc@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#include <linux/types.h>
diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c
index 5780565..bd0bdd0 100644
--- a/net/sctp/chunk.c
+++ b/net/sctp/chunk.c
@@ -24,17 +24,11 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* Jon Grimm <jgrimm@us.ibm.com>
* Sridhar Samudrala <sri@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/net/sctp/command.c b/net/sctp/command.c
index c004401..3d9a9ff 100644
--- a/net/sctp/command.c
+++ b/net/sctp/command.c
@@ -25,17 +25,11 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
* Karl Knutson <karl@athena.chicago.il.us>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#include <linux/types.h>
diff --git a/net/sctp/debug.c b/net/sctp/debug.c
index f499878..e89015d 100644
--- a/net/sctp/debug.c
+++ b/net/sctp/debug.c
@@ -28,10 +28,7 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
@@ -40,9 +37,6 @@
* Jon Grimm <jgrimm@us.ibm.com>
* Daisy Chang <daisyc@us.ibm.com>
* Sridhar Samudrala <sri@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#include <net/sctp/sctp.h>
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 9e3d257..09b8daa 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -29,10 +29,7 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
@@ -40,9 +37,6 @@
* Jon Grimm <jgrimm@austin.ibm.com>
* Daisy Chang <daisyc@us.ibm.com>
* Dajiang Zhang <dajiang.zhang@nokia.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#include <linux/types.h>
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 3fa4d85..5f20686 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -29,10 +29,7 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
@@ -43,9 +40,6 @@
* Daisy Chang <daisyc@us.ibm.com>
* Sridhar Samudrala <sri@us.ibm.com>
* Ardelle Fan <ardelle.fan@intel.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#include <linux/types.h>
@@ -87,15 +81,7 @@ static inline int sctp_rcv_checksum(struct net *net, struct sk_buff *skb)
{
struct sctphdr *sh = sctp_hdr(skb);
__le32 cmp = sh->checksum;
- struct sk_buff *list;
- __le32 val;
- __u32 tmp = sctp_start_cksum((__u8 *)sh, skb_headlen(skb));
-
- skb_walk_frags(skb, list)
- tmp = sctp_update_cksum((__u8 *)list->data, skb_headlen(list),
- tmp);
-
- val = sctp_end_cksum(tmp);
+ __le32 val = sctp_compute_cksum(skb, 0);
if (val != cmp) {
/* CRC failure, dump it. */
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
index cb25f04..5856932 100644
--- a/net/sctp/inqueue.c
+++ b/net/sctp/inqueue.c
@@ -30,17 +30,11 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
* Karl Knutson <karl@athena.chicago.il.us>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 09ffcc9..da613ce 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -27,10 +27,7 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* Le Yanqun <yanqun.le@nokia.com>
@@ -42,9 +39,6 @@
*
* Based on:
* linux/net/ipv6/tcp_ipv6.c
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -351,7 +345,7 @@ out:
rt = (struct rt6_info *)dst;
t->dst = dst;
-
+ t->dst_cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
pr_debug("rt6_dst:%pI6 rt6_src:%pI6\n", &rt->rt6i_dst.addr,
&fl6->saddr);
} else {
diff --git a/net/sctp/objcnt.c b/net/sctp/objcnt.c
index fe012c4..5ea573b 100644
--- a/net/sctp/objcnt.c
+++ b/net/sctp/objcnt.c
@@ -26,16 +26,10 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* Jon Grimm <jgrimm@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/net/sctp/output.c b/net/sctp/output.c
index a46d1eb..0ac3a65 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -26,19 +26,13 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
* Karl Knutson <karl@athena.chicago.il.us>
* Jon Grimm <jgrimm@austin.ibm.com>
* Sridhar Samudrala <sri@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index ef9e2bb..94df758 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -28,10 +28,7 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
@@ -41,9 +38,6 @@
* Hui Huang <hui.huang@nokia.com>
* Sridhar Samudrala <sri@us.ibm.com>
* Jon Grimm <jgrimm@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/net/sctp/primitive.c b/net/sctp/primitive.c
index 794bb14..ce1ffd8 100644
--- a/net/sctp/primitive.c
+++ b/net/sctp/primitive.c
@@ -29,10 +29,7 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
@@ -40,9 +37,6 @@
* Karl Knutson <karl@athena.chicago.il.us>
* Ardelle Fan <ardelle.fan@intel.com>
* Kevin Gao <kevin.gao@intel.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#include <linux/types.h>
diff --git a/net/sctp/probe.c b/net/sctp/probe.c
index e62c225..cd72ae5 100644
--- a/net/sctp/probe.c
+++ b/net/sctp/probe.c
@@ -155,13 +155,8 @@ static sctp_disposition_t jsctp_sf_eat_sack(struct net *net,
if (sp == asoc->peer.primary_path)
printl("*");
- if (sp->ipaddr.sa.sa_family == AF_INET)
- printl("%pI4 ", &sp->ipaddr.v4.sin_addr);
- else
- printl("%pI6 ", &sp->ipaddr.v6.sin6_addr);
-
- printl("%2u %8u %8u %8u %8u %8u ",
- sp->state, sp->cwnd, sp->ssthresh,
+ printl("%pISc %2u %8u %8u %8u %8u %8u ",
+ &sp->ipaddr, sp->state, sp->cwnd, sp->ssthresh,
sp->flight_size, sp->partial_bytes_acked,
sp->pathmtu);
}
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index 62526c4..0c06421 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -22,16 +22,10 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* Sridhar Samudrala <sri@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#include <linux/types.h>
@@ -232,7 +226,7 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v)
sk = epb->sk;
if (!net_eq(sock_net(sk), seq_file_net(seq)))
continue;
- seq_printf(seq, "%8pK %8pK %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk,
+ seq_printf(seq, "%8pK %8pK %-3d %-3d %-4d %-5d %5u %5lu ", ep, sk,
sctp_sk(sk)->type, sk->sk_state, hash,
epb->bind_addr.port,
from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
@@ -342,7 +336,7 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
continue;
seq_printf(seq,
"%8pK %8pK %-3d %-3d %-2d %-4d "
- "%4d %8d %8d %7d %5lu %-5d %5d ",
+ "%4d %8d %8d %7u %5lu %-5d %5d ",
assoc, sk, sctp_sk(sk)->type, sk->sk_state,
assoc->state, hash,
assoc->assoc_id,
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 4a17494d..5e17092 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -29,10 +29,7 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
@@ -41,9 +38,6 @@
* Sridhar Samudrala <sri@us.ibm.com>
* Daisy Chang <daisyc@us.ibm.com>
* Ardelle Fan <ardelle.fan@intel.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -1547,7 +1541,7 @@ module_exit(sctp_exit);
*/
MODULE_ALIAS("net-pf-" __stringify(PF_INET) "-proto-132");
MODULE_ALIAS("net-pf-" __stringify(PF_INET6) "-proto-132");
-MODULE_AUTHOR("Linux Kernel SCTP developers <lksctp-developers@lists.sourceforge.net>");
+MODULE_AUTHOR("Linux Kernel SCTP developers <linux-sctp@vger.kernel.org>");
MODULE_DESCRIPTION("Support for the SCTP protocol (RFC2960)");
module_param_named(no_checksums, sctp_checksum_disable, bool, 0644);
MODULE_PARM_DESC(no_checksums, "Disable checksums computing and verification");
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 362ae6e..01e9783 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -29,10 +29,7 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
@@ -45,9 +42,6 @@
* Daisy Chang <daisyc@us.ibm.com>
* Ardelle Fan <ardelle.fan@intel.com>
* Kevin Gao <kevin.gao@intel.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -68,8 +62,12 @@
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
-static struct sctp_chunk *sctp_make_chunk(const struct sctp_association *asoc,
- __u8 type, __u8 flags, int paylen);
+static struct sctp_chunk *sctp_make_control(const struct sctp_association *asoc,
+ __u8 type, __u8 flags, int paylen);
+static struct sctp_chunk *sctp_make_data(const struct sctp_association *asoc,
+ __u8 flags, int paylen);
+static struct sctp_chunk *_sctp_make_chunk(const struct sctp_association *asoc,
+ __u8 type, __u8 flags, int paylen);
static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const struct sctp_chunk *init_chunk,
@@ -82,6 +80,28 @@ static int sctp_process_param(struct sctp_association *asoc,
static void *sctp_addto_param(struct sctp_chunk *chunk, int len,
const void *data);
+/* Control chunk destructor */
+static void sctp_control_release_owner(struct sk_buff *skb)
+{
+ /*TODO: do memory release */
+}
+
+static void sctp_control_set_owner_w(struct sctp_chunk *chunk)
+{
+ struct sctp_association *asoc = chunk->asoc;
+ struct sk_buff *skb = chunk->skb;
+
+ /* TODO: properly account for control chunks.
+ * To do it right we'll need:
+ * 1) endpoint if association isn't known.
+ * 2) proper memory accounting.
+ *
+ * For now don't do anything for now.
+ */
+ skb->sk = asoc ? asoc->base.sk : NULL;
+ skb->destructor = sctp_control_release_owner;
+}
+
/* What was the inbound interface for this chunk? */
int sctp_chunk_iif(const struct sctp_chunk *chunk)
{
@@ -296,7 +316,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
* PLEASE DO NOT FIXME [This version does not support Host Name.]
*/
- retval = sctp_make_chunk(asoc, SCTP_CID_INIT, 0, chunksize);
+ retval = sctp_make_control(asoc, SCTP_CID_INIT, 0, chunksize);
if (!retval)
goto nodata;
@@ -443,7 +463,7 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
num_ext);
/* Now allocate and fill out the chunk. */
- retval = sctp_make_chunk(asoc, SCTP_CID_INIT_ACK, 0, chunksize);
+ retval = sctp_make_control(asoc, SCTP_CID_INIT_ACK, 0, chunksize);
if (!retval)
goto nomem_chunk;
@@ -548,7 +568,7 @@ struct sctp_chunk *sctp_make_cookie_echo(const struct sctp_association *asoc,
cookie_len = asoc->peer.cookie_len;
/* Build a cookie echo chunk. */
- retval = sctp_make_chunk(asoc, SCTP_CID_COOKIE_ECHO, 0, cookie_len);
+ retval = sctp_make_control(asoc, SCTP_CID_COOKIE_ECHO, 0, cookie_len);
if (!retval)
goto nodata;
retval->subh.cookie_hdr =
@@ -593,7 +613,7 @@ struct sctp_chunk *sctp_make_cookie_ack(const struct sctp_association *asoc,
{
struct sctp_chunk *retval;
- retval = sctp_make_chunk(asoc, SCTP_CID_COOKIE_ACK, 0, 0);
+ retval = sctp_make_control(asoc, SCTP_CID_COOKIE_ACK, 0, 0);
/* RFC 2960 6.4 Multi-homed SCTP Endpoints
*
@@ -641,8 +661,8 @@ struct sctp_chunk *sctp_make_cwr(const struct sctp_association *asoc,
sctp_cwrhdr_t cwr;
cwr.lowest_tsn = htonl(lowest_tsn);
- retval = sctp_make_chunk(asoc, SCTP_CID_ECN_CWR, 0,
- sizeof(sctp_cwrhdr_t));
+ retval = sctp_make_control(asoc, SCTP_CID_ECN_CWR, 0,
+ sizeof(sctp_cwrhdr_t));
if (!retval)
goto nodata;
@@ -675,8 +695,8 @@ struct sctp_chunk *sctp_make_ecne(const struct sctp_association *asoc,
sctp_ecnehdr_t ecne;
ecne.lowest_tsn = htonl(lowest_tsn);
- retval = sctp_make_chunk(asoc, SCTP_CID_ECN_ECNE, 0,
- sizeof(sctp_ecnehdr_t));
+ retval = sctp_make_control(asoc, SCTP_CID_ECN_ECNE, 0,
+ sizeof(sctp_ecnehdr_t));
if (!retval)
goto nodata;
retval->subh.ecne_hdr =
@@ -712,7 +732,7 @@ struct sctp_chunk *sctp_make_datafrag_empty(struct sctp_association *asoc,
dp.ssn = htons(ssn);
chunk_len = sizeof(dp) + data_len;
- retval = sctp_make_chunk(asoc, SCTP_CID_DATA, flags, chunk_len);
+ retval = sctp_make_data(asoc, flags, chunk_len);
if (!retval)
goto nodata;
@@ -759,7 +779,7 @@ struct sctp_chunk *sctp_make_sack(const struct sctp_association *asoc)
+ sizeof(__u32) * num_dup_tsns;
/* Create the chunk. */
- retval = sctp_make_chunk(asoc, SCTP_CID_SACK, 0, len);
+ retval = sctp_make_control(asoc, SCTP_CID_SACK, 0, len);
if (!retval)
goto nodata;
@@ -838,8 +858,8 @@ struct sctp_chunk *sctp_make_shutdown(const struct sctp_association *asoc,
ctsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map);
shut.cum_tsn_ack = htonl(ctsn);
- retval = sctp_make_chunk(asoc, SCTP_CID_SHUTDOWN, 0,
- sizeof(sctp_shutdownhdr_t));
+ retval = sctp_make_control(asoc, SCTP_CID_SHUTDOWN, 0,
+ sizeof(sctp_shutdownhdr_t));
if (!retval)
goto nodata;
@@ -857,7 +877,7 @@ struct sctp_chunk *sctp_make_shutdown_ack(const struct sctp_association *asoc,
{
struct sctp_chunk *retval;
- retval = sctp_make_chunk(asoc, SCTP_CID_SHUTDOWN_ACK, 0, 0);
+ retval = sctp_make_control(asoc, SCTP_CID_SHUTDOWN_ACK, 0, 0);
/* RFC 2960 6.4 Multi-homed SCTP Endpoints
*
@@ -886,7 +906,7 @@ struct sctp_chunk *sctp_make_shutdown_complete(
*/
flags |= asoc ? 0 : SCTP_CHUNK_FLAG_T;
- retval = sctp_make_chunk(asoc, SCTP_CID_SHUTDOWN_COMPLETE, flags, 0);
+ retval = sctp_make_control(asoc, SCTP_CID_SHUTDOWN_COMPLETE, flags, 0);
/* RFC 2960 6.4 Multi-homed SCTP Endpoints
*
@@ -925,7 +945,7 @@ struct sctp_chunk *sctp_make_abort(const struct sctp_association *asoc,
flags = SCTP_CHUNK_FLAG_T;
}
- retval = sctp_make_chunk(asoc, SCTP_CID_ABORT, flags, hint);
+ retval = sctp_make_control(asoc, SCTP_CID_ABORT, flags, hint);
/* RFC 2960 6.4 Multi-homed SCTP Endpoints
*
@@ -1117,7 +1137,7 @@ struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *asoc,
struct sctp_chunk *retval;
sctp_sender_hb_info_t hbinfo;
- retval = sctp_make_chunk(asoc, SCTP_CID_HEARTBEAT, 0, sizeof(hbinfo));
+ retval = sctp_make_control(asoc, SCTP_CID_HEARTBEAT, 0, sizeof(hbinfo));
if (!retval)
goto nodata;
@@ -1145,7 +1165,7 @@ struct sctp_chunk *sctp_make_heartbeat_ack(const struct sctp_association *asoc,
{
struct sctp_chunk *retval;
- retval = sctp_make_chunk(asoc, SCTP_CID_HEARTBEAT_ACK, 0, paylen);
+ retval = sctp_make_control(asoc, SCTP_CID_HEARTBEAT_ACK, 0, paylen);
if (!retval)
goto nodata;
@@ -1177,8 +1197,8 @@ static struct sctp_chunk *sctp_make_op_error_space(
{
struct sctp_chunk *retval;
- retval = sctp_make_chunk(asoc, SCTP_CID_ERROR, 0,
- sizeof(sctp_errhdr_t) + size);
+ retval = sctp_make_control(asoc, SCTP_CID_ERROR, 0,
+ sizeof(sctp_errhdr_t) + size);
if (!retval)
goto nodata;
@@ -1248,7 +1268,7 @@ struct sctp_chunk *sctp_make_auth(const struct sctp_association *asoc)
if (unlikely(!hmac_desc))
return NULL;
- retval = sctp_make_chunk(asoc, SCTP_CID_AUTH, 0,
+ retval = sctp_make_control(asoc, SCTP_CID_AUTH, 0,
hmac_desc->hmac_len + sizeof(sctp_authhdr_t));
if (!retval)
return NULL;
@@ -1351,8 +1371,8 @@ const union sctp_addr *sctp_source(const struct sctp_chunk *chunk)
/* Create a new chunk, setting the type and flags headers from the
* arguments, reserving enough space for a 'paylen' byte payload.
*/
-static struct sctp_chunk *sctp_make_chunk(const struct sctp_association *asoc,
- __u8 type, __u8 flags, int paylen)
+static struct sctp_chunk *_sctp_make_chunk(const struct sctp_association *asoc,
+ __u8 type, __u8 flags, int paylen)
{
struct sctp_chunk *retval;
sctp_chunkhdr_t *chunk_hdr;
@@ -1385,14 +1405,27 @@ static struct sctp_chunk *sctp_make_chunk(const struct sctp_association *asoc,
if (sctp_auth_send_cid(type, asoc))
retval->auth = 1;
- /* Set the skb to the belonging sock for accounting. */
- skb->sk = sk;
-
return retval;
nodata:
return NULL;
}
+static struct sctp_chunk *sctp_make_data(const struct sctp_association *asoc,
+ __u8 flags, int paylen)
+{
+ return _sctp_make_chunk(asoc, SCTP_CID_DATA, flags, paylen);
+}
+
+static struct sctp_chunk *sctp_make_control(const struct sctp_association *asoc,
+ __u8 type, __u8 flags, int paylen)
+{
+ struct sctp_chunk *chunk = _sctp_make_chunk(asoc, type, flags, paylen);
+
+ if (chunk)
+ sctp_control_set_owner_w(chunk);
+
+ return chunk;
+}
/* Release the memory occupied by a chunk. */
static void sctp_chunk_destroy(struct sctp_chunk *chunk)
@@ -2733,7 +2766,7 @@ static struct sctp_chunk *sctp_make_asconf(struct sctp_association *asoc,
length += addrlen;
/* Create the chunk. */
- retval = sctp_make_chunk(asoc, SCTP_CID_ASCONF, 0, length);
+ retval = sctp_make_control(asoc, SCTP_CID_ASCONF, 0, length);
if (!retval)
return NULL;
@@ -2917,7 +2950,7 @@ static struct sctp_chunk *sctp_make_asconf_ack(const struct sctp_association *as
int length = sizeof(asconf) + vparam_len;
/* Create the chunk. */
- retval = sctp_make_chunk(asoc, SCTP_CID_ASCONF_ACK, 0, length);
+ retval = sctp_make_control(asoc, SCTP_CID_ASCONF_ACK, 0, length);
if (!retval)
return NULL;
@@ -3448,7 +3481,7 @@ struct sctp_chunk *sctp_make_fwdtsn(const struct sctp_association *asoc,
hint = (nstreams + 1) * sizeof(__u32);
- retval = sctp_make_chunk(asoc, SCTP_CID_FWD_TSN, 0, hint);
+ retval = sctp_make_control(asoc, SCTP_CID_FWD_TSN, 0, hint);
if (!retval)
return NULL;
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 9da6885..666c668 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -28,10 +28,7 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
@@ -42,9 +39,6 @@
* Daisy Chang <daisyc@us.ibm.com>
* Sridhar Samudrala <sri@us.ibm.com>
* Ardelle Fan <ardelle.fan@intel.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index f6b7109..dfe3f36 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -28,10 +28,7 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
@@ -45,9 +42,6 @@
* Ardelle Fan <ardelle.fan@intel.com>
* Ryan Layer <rmlayer@us.ibm.com>
* Kevin Gao <kevin.gao@intel.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/net/sctp/sm_statetable.c b/net/sctp/sm_statetable.c
index 84d98d8..c5999b2 100644
--- a/net/sctp/sm_statetable.c
+++ b/net/sctp/sm_statetable.c
@@ -28,10 +28,7 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
@@ -41,9 +38,6 @@
* Daisy Chang <daisyc@us.ibm.com>
* Ardelle Fan <ardelle.fan@intel.com>
* Sridhar Samudrala <sri@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index c6670d2..d5d5882 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -34,10 +34,7 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
@@ -52,9 +49,6 @@
* Ryan Layer <rmlayer@us.ibm.com>
* Anup Pemmaiah <pemmaiah@cc.usu.edu>
* Kevin Gao <kevin.gao@intel.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/net/sctp/ssnmap.c b/net/sctp/ssnmap.c
index da86035..6007124 100644
--- a/net/sctp/ssnmap.c
+++ b/net/sctp/ssnmap.c
@@ -24,16 +24,10 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* Jon Grimm <jgrimm@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#include <linux/types.h>
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index 9a5c4c9..6b36561 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -25,10 +25,7 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* Mingqin Liu <liuming@us.ibm.com>
@@ -36,9 +33,6 @@
* Ardelle Fan <ardelle.fan@intel.com>
* Ryan Layer <rmlayer@us.ibm.com>
* Sridhar Samudrala <sri@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#include <net/sctp/structs.h>
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 8fdd160..e332efb 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -30,10 +30,7 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
@@ -43,9 +40,6 @@
* Hui Huang <hui.huang@nokia.com>
* Sridhar Samudrala <sri@us.ibm.com>
* Ardelle Fan <ardelle.fan@intel.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/net/sctp/tsnmap.c b/net/sctp/tsnmap.c
index b460195..fbda200 100644
--- a/net/sctp/tsnmap.c
+++ b/net/sctp/tsnmap.c
@@ -27,19 +27,13 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
* Jon Grimm <jgrimm@us.ibm.com>
* Karl Knutson <karl@athena.chicago.il.us>
* Sridhar Samudrala <sri@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#include <linux/slab.h>
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index 44a45db..81089ed 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -28,19 +28,13 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* Jon Grimm <jgrimm@us.ibm.com>
* La Monte H.P. Yarroll <piggy@acm.org>
* Ardelle Fan <ardelle.fan@intel.com>
* Sridhar Samudrala <sri@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#include <linux/slab.h>
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index 04e3d47..1c1484e 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -27,18 +27,12 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* Jon Grimm <jgrimm@us.ibm.com>
* La Monte H.P. Yarroll <piggy@acm.org>
* Sridhar Samudrala <sri@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#include <linux/slab.h>
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 7762b9f..9c9caaa 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -442,7 +442,7 @@ static void svc_tcp_write_space(struct sock *sk)
{
struct socket *sock = sk->sk_socket;
- if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) && sock)
+ if (sk_stream_is_writeable(sk) && sock)
clear_bit(SOCK_NOSPACE, &sock->flags);
svc_write_space(sk);
}
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index ddf0602..d6656d7 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -1602,7 +1602,7 @@ static void xs_tcp_write_space(struct sock *sk)
read_lock_bh(&sk->sk_callback_lock);
/* from net/core/stream.c:sk_stream_write_space */
- if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
+ if (sk_stream_is_writeable(sk))
xs_write_space(sk);
read_unlock_bh(&sk->sk_callback_lock);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index c4ce243..86de99a 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1479,7 +1479,8 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
MAX_SKB_FRAGS * PAGE_SIZE);
skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
- msg->msg_flags & MSG_DONTWAIT, &err);
+ msg->msg_flags & MSG_DONTWAIT, &err,
+ PAGE_ALLOC_COSTLY_ORDER);
if (skb == NULL)
goto out;
@@ -1596,6 +1597,10 @@ out:
return err;
}
+/* We use paged skbs for stream sockets, and limit occupancy to 32768
+ * bytes, and a minimun of a full page.
+ */
+#define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768))
static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
struct msghdr *msg, size_t len)
@@ -1609,6 +1614,7 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
struct scm_cookie tmp_scm;
bool fds_sent = false;
int max_level;
+ int data_len;
if (NULL == siocb->scm)
siocb->scm = &tmp_scm;
@@ -1635,40 +1641,22 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
goto pipe_err;
while (sent < len) {
- /*
- * Optimisation for the fact that under 0.01% of X
- * messages typically need breaking up.
- */
-
- size = len-sent;
+ size = len - sent;
/* Keep two messages in the pipe so it schedules better */
- if (size > ((sk->sk_sndbuf >> 1) - 64))
- size = (sk->sk_sndbuf >> 1) - 64;
+ size = min_t(int, size, (sk->sk_sndbuf >> 1) - 64);
- if (size > SKB_MAX_ALLOC)
- size = SKB_MAX_ALLOC;
-
- /*
- * Grab a buffer
- */
+ /* allow fallback to order-0 allocations */
+ size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ);
- skb = sock_alloc_send_skb(sk, size, msg->msg_flags&MSG_DONTWAIT,
- &err);
+ data_len = max_t(int, 0, size - SKB_MAX_HEAD(0));
- if (skb == NULL)
+ skb = sock_alloc_send_pskb(sk, size - data_len, data_len,
+ msg->msg_flags & MSG_DONTWAIT, &err,
+ get_order(UNIX_SKB_FRAGS_SZ));
+ if (!skb)
goto out_err;
- /*
- * If you pass two values to the sock_alloc_send_skb
- * it tries to grab the large buffer with GFP_NOFS
- * (which can fail easily), and if it fails grab the
- * fallback size buffer which is under a page and will
- * succeed. [Alan]
- */
- size = min_t(int, size, skb_tailroom(skb));
-
-
/* Only send the fds in the first buffer */
err = unix_scm_to_skb(siocb->scm, skb, !fds_sent);
if (err < 0) {
@@ -1678,7 +1666,11 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
max_level = err + 1;
fds_sent = true;
- err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
+ skb_put(skb, size - data_len);
+ skb->data_len = data_len;
+ skb->len = size;
+ err = skb_copy_datagram_from_iovec(skb, 0, msg->msg_iov,
+ sent, size);
if (err) {
kfree_skb(skb);
goto out_err;
@@ -1890,6 +1882,11 @@ static long unix_stream_data_wait(struct sock *sk, long timeo,
return timeo;
}
+static unsigned int unix_skb_len(const struct sk_buff *skb)
+{
+ return skb->len - UNIXCB(skb).consumed;
+}
+
static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t size,
int flags)
@@ -1977,8 +1974,8 @@ again:
}
skip = sk_peek_offset(sk, flags);
- while (skip >= skb->len) {
- skip -= skb->len;
+ while (skip >= unix_skb_len(skb)) {
+ skip -= unix_skb_len(skb);
last = skb;
skb = skb_peek_next(skb, &sk->sk_receive_queue);
if (!skb)
@@ -2005,8 +2002,9 @@ again:
sunaddr = NULL;
}
- chunk = min_t(unsigned int, skb->len - skip, size);
- if (memcpy_toiovec(msg->msg_iov, skb->data + skip, chunk)) {
+ chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
+ if (skb_copy_datagram_iovec(skb, UNIXCB(skb).consumed + skip,
+ msg->msg_iov, chunk)) {
if (copied == 0)
copied = -EFAULT;
break;
@@ -2016,14 +2014,14 @@ again:
/* Mark read part of skb as used */
if (!(flags & MSG_PEEK)) {
- skb_pull(skb, chunk);
+ UNIXCB(skb).consumed += chunk;
sk_peek_offset_bwd(sk, chunk);
if (UNIXCB(skb).fp)
unix_detach_fds(siocb->scm, skb);
- if (skb->len)
+ if (unix_skb_len(skb))
break;
skb_unlink(skb, &sk->sk_receive_queue);
@@ -2107,7 +2105,7 @@ long unix_inq_len(struct sock *sk)
if (sk->sk_type == SOCK_STREAM ||
sk->sk_type == SOCK_SEQPACKET) {
skb_queue_walk(&sk->sk_receive_queue, skb)
- amount += skb->len;
+ amount += unix_skb_len(skb);
} else {
skb = skb_peek(&sk->sk_receive_queue);
if (skb)
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 4d93346..545c08b 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -96,8 +96,7 @@
#include <linux/wait.h>
#include <linux/workqueue.h>
#include <net/sock.h>
-
-#include "af_vsock.h"
+#include <net/af_vsock.h>
static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr);
static void vsock_sk_destruct(struct sock *sk);
diff --git a/net/vmw_vsock/af_vsock.h b/net/vmw_vsock/af_vsock.h
deleted file mode 100644
index 7d64d36..0000000
--- a/net/vmw_vsock/af_vsock.h
+++ /dev/null
@@ -1,175 +0,0 @@
-/*
- * VMware vSockets Driver
- *
- * Copyright (C) 2007-2013 VMware, Inc. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation version 2 and no later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- */
-
-#ifndef __AF_VSOCK_H__
-#define __AF_VSOCK_H__
-
-#include <linux/kernel.h>
-#include <linux/workqueue.h>
-#include <linux/vm_sockets.h>
-
-#include "vsock_addr.h"
-
-#define LAST_RESERVED_PORT 1023
-
-#define vsock_sk(__sk) ((struct vsock_sock *)__sk)
-#define sk_vsock(__vsk) (&(__vsk)->sk)
-
-struct vsock_sock {
- /* sk must be the first member. */
- struct sock sk;
- struct sockaddr_vm local_addr;
- struct sockaddr_vm remote_addr;
- /* Links for the global tables of bound and connected sockets. */
- struct list_head bound_table;
- struct list_head connected_table;
- /* Accessed without the socket lock held. This means it can never be
- * modified outsided of socket create or destruct.
- */
- bool trusted;
- bool cached_peer_allow_dgram; /* Dgram communication allowed to
- * cached peer?
- */
- u32 cached_peer; /* Context ID of last dgram destination check. */
- const struct cred *owner;
- /* Rest are SOCK_STREAM only. */
- long connect_timeout;
- /* Listening socket that this came from. */
- struct sock *listener;
- /* Used for pending list and accept queue during connection handshake.
- * The listening socket is the head for both lists. Sockets created
- * for connection requests are placed in the pending list until they
- * are connected, at which point they are put in the accept queue list
- * so they can be accepted in accept(). If accept() cannot accept the
- * connection, it is marked as rejected so the cleanup function knows
- * to clean up the socket.
- */
- struct list_head pending_links;
- struct list_head accept_queue;
- bool rejected;
- struct delayed_work dwork;
- u32 peer_shutdown;
- bool sent_request;
- bool ignore_connecting_rst;
-
- /* Private to transport. */
- void *trans;
-};
-
-s64 vsock_stream_has_data(struct vsock_sock *vsk);
-s64 vsock_stream_has_space(struct vsock_sock *vsk);
-void vsock_pending_work(struct work_struct *work);
-struct sock *__vsock_create(struct net *net,
- struct socket *sock,
- struct sock *parent,
- gfp_t priority, unsigned short type);
-
-/**** TRANSPORT ****/
-
-struct vsock_transport_recv_notify_data {
- u64 data1; /* Transport-defined. */
- u64 data2; /* Transport-defined. */
- bool notify_on_block;
-};
-
-struct vsock_transport_send_notify_data {
- u64 data1; /* Transport-defined. */
- u64 data2; /* Transport-defined. */
-};
-
-struct vsock_transport {
- /* Initialize/tear-down socket. */
- int (*init)(struct vsock_sock *, struct vsock_sock *);
- void (*destruct)(struct vsock_sock *);
- void (*release)(struct vsock_sock *);
-
- /* Connections. */
- int (*connect)(struct vsock_sock *);
-
- /* DGRAM. */
- int (*dgram_bind)(struct vsock_sock *, struct sockaddr_vm *);
- int (*dgram_dequeue)(struct kiocb *kiocb, struct vsock_sock *vsk,
- struct msghdr *msg, size_t len, int flags);
- int (*dgram_enqueue)(struct vsock_sock *, struct sockaddr_vm *,
- struct iovec *, size_t len);
- bool (*dgram_allow)(u32 cid, u32 port);
-
- /* STREAM. */
- /* TODO: stream_bind() */
- ssize_t (*stream_dequeue)(struct vsock_sock *, struct iovec *,
- size_t len, int flags);
- ssize_t (*stream_enqueue)(struct vsock_sock *, struct iovec *,
- size_t len);
- s64 (*stream_has_data)(struct vsock_sock *);
- s64 (*stream_has_space)(struct vsock_sock *);
- u64 (*stream_rcvhiwat)(struct vsock_sock *);
- bool (*stream_is_active)(struct vsock_sock *);
- bool (*stream_allow)(u32 cid, u32 port);
-
- /* Notification. */
- int (*notify_poll_in)(struct vsock_sock *, size_t, bool *);
- int (*notify_poll_out)(struct vsock_sock *, size_t, bool *);
- int (*notify_recv_init)(struct vsock_sock *, size_t,
- struct vsock_transport_recv_notify_data *);
- int (*notify_recv_pre_block)(struct vsock_sock *, size_t,
- struct vsock_transport_recv_notify_data *);
- int (*notify_recv_pre_dequeue)(struct vsock_sock *, size_t,
- struct vsock_transport_recv_notify_data *);
- int (*notify_recv_post_dequeue)(struct vsock_sock *, size_t,
- ssize_t, bool, struct vsock_transport_recv_notify_data *);
- int (*notify_send_init)(struct vsock_sock *,
- struct vsock_transport_send_notify_data *);
- int (*notify_send_pre_block)(struct vsock_sock *,
- struct vsock_transport_send_notify_data *);
- int (*notify_send_pre_enqueue)(struct vsock_sock *,
- struct vsock_transport_send_notify_data *);
- int (*notify_send_post_enqueue)(struct vsock_sock *, ssize_t,
- struct vsock_transport_send_notify_data *);
-
- /* Shutdown. */
- int (*shutdown)(struct vsock_sock *, int);
-
- /* Buffer sizes. */
- void (*set_buffer_size)(struct vsock_sock *, u64);
- void (*set_min_buffer_size)(struct vsock_sock *, u64);
- void (*set_max_buffer_size)(struct vsock_sock *, u64);
- u64 (*get_buffer_size)(struct vsock_sock *);
- u64 (*get_min_buffer_size)(struct vsock_sock *);
- u64 (*get_max_buffer_size)(struct vsock_sock *);
-
- /* Addressing. */
- u32 (*get_local_cid)(void);
-};
-
-/**** CORE ****/
-
-int vsock_core_init(const struct vsock_transport *t);
-void vsock_core_exit(void);
-
-/**** UTILS ****/
-
-void vsock_release_pending(struct sock *pending);
-void vsock_add_pending(struct sock *listener, struct sock *pending);
-void vsock_remove_pending(struct sock *listener, struct sock *pending);
-void vsock_enqueue_accept(struct sock *listener, struct sock *connected);
-void vsock_insert_connected(struct vsock_sock *vsk);
-void vsock_remove_bound(struct vsock_sock *vsk);
-void vsock_remove_connected(struct vsock_sock *vsk);
-struct sock *vsock_find_bound_socket(struct sockaddr_vm *addr);
-struct sock *vsock_find_connected_socket(struct sockaddr_vm *src,
- struct sockaddr_vm *dst);
-void vsock_for_each_connected_socket(void (*fn)(struct sock *sk));
-
-#endif /* __AF_VSOCK_H__ */
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
index ffc11df..9d69866 100644
--- a/net/vmw_vsock/vmci_transport.c
+++ b/net/vmw_vsock/vmci_transport.c
@@ -34,8 +34,8 @@
#include <linux/wait.h>
#include <linux/workqueue.h>
#include <net/sock.h>
+#include <net/af_vsock.h>
-#include "af_vsock.h"
#include "vmci_transport_notify.h"
static int vmci_transport_recv_dgram_cb(void *data, struct vmci_datagram *dg);
diff --git a/net/vmw_vsock/vmci_transport.h b/net/vmw_vsock/vmci_transport.h
index fd88ea8..ce6c962 100644
--- a/net/vmw_vsock/vmci_transport.h
+++ b/net/vmw_vsock/vmci_transport.h
@@ -19,8 +19,8 @@
#include <linux/vmw_vmci_defs.h>
#include <linux/vmw_vmci_api.h>
-#include "vsock_addr.h"
-#include "af_vsock.h"
+#include <net/vsock_addr.h>
+#include <net/af_vsock.h>
/* If the packet format changes in a release then this should change too. */
#define VMCI_TRANSPORT_PACKET_VERSION 1
diff --git a/net/vmw_vsock/vsock_addr.c b/net/vmw_vsock/vsock_addr.c
index ec2611b..82486ee 100644
--- a/net/vmw_vsock/vsock_addr.c
+++ b/net/vmw_vsock/vsock_addr.c
@@ -17,8 +17,7 @@
#include <linux/socket.h>
#include <linux/stddef.h>
#include <net/sock.h>
-
-#include "vsock_addr.h"
+#include <net/vsock_addr.h>
void vsock_addr_init(struct sockaddr_vm *addr, u32 cid, u32 port)
{
diff --git a/net/vmw_vsock/vsock_addr.h b/net/vmw_vsock/vsock_addr.h
deleted file mode 100644
index 9ccd531..0000000
--- a/net/vmw_vsock/vsock_addr.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * VMware vSockets Driver
- *
- * Copyright (C) 2007-2013 VMware, Inc. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation version 2 and no later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _VSOCK_ADDR_H_
-#define _VSOCK_ADDR_H_
-
-#include <linux/vm_sockets.h>
-
-void vsock_addr_init(struct sockaddr_vm *addr, u32 cid, u32 port);
-int vsock_addr_validate(const struct sockaddr_vm *addr);
-bool vsock_addr_bound(const struct sockaddr_vm *addr);
-void vsock_addr_unbind(struct sockaddr_vm *addr);
-bool vsock_addr_equals_addr(const struct sockaddr_vm *addr,
- const struct sockaddr_vm *other);
-int vsock_addr_cast(const struct sockaddr *addr, size_t len,
- struct sockaddr_vm **out_addr);
-
-#endif
diff --git a/net/wireless/core.c b/net/wireless/core.c
index a8c29fa..6715396 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -462,6 +462,14 @@ int wiphy_register(struct wiphy *wiphy)
return -EINVAL;
#endif
+ if (WARN_ON(wiphy->coalesce &&
+ (!wiphy->coalesce->n_rules ||
+ !wiphy->coalesce->n_patterns) &&
+ (!wiphy->coalesce->pattern_min_len ||
+ wiphy->coalesce->pattern_min_len >
+ wiphy->coalesce->pattern_max_len)))
+ return -EINVAL;
+
if (WARN_ON(wiphy->ap_sme_capa &&
!(wiphy->flags & WIPHY_FLAG_HAVE_AP_SME)))
return -EINVAL;
@@ -668,6 +676,7 @@ void wiphy_unregister(struct wiphy *wiphy)
rdev_set_wakeup(rdev, false);
#endif
cfg80211_rdev_free_wowlan(rdev);
+ cfg80211_rdev_free_coalesce(rdev);
}
EXPORT_SYMBOL(wiphy_unregister);
diff --git a/net/wireless/core.h b/net/wireless/core.h
index a6b45bf..9ad43c6 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -79,6 +79,8 @@ struct cfg80211_registered_device {
/* netlink port which started critical protocol (0 means not started) */
u32 crit_proto_nlportid;
+ struct cfg80211_coalesce *coalesce;
+
/* must be last because of the way we do wiphy_priv(),
* and it should at least be aligned to NETDEV_ALIGN */
struct wiphy wiphy __aligned(NETDEV_ALIGN);
diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c
index 30c4920..0553fd4 100644
--- a/net/wireless/mesh.c
+++ b/net/wireless/mesh.c
@@ -167,9 +167,12 @@ int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
* basic rates
*/
if (!setup->basic_rates) {
+ enum nl80211_bss_scan_width scan_width;
struct ieee80211_supported_band *sband =
rdev->wiphy.bands[setup->chandef.chan->band];
- setup->basic_rates = ieee80211_mandatory_rates(sband);
+ scan_width = cfg80211_chandef_to_scan_width(&setup->chandef);
+ setup->basic_rates = ieee80211_mandatory_rates(sband,
+ scan_width);
}
if (!cfg80211_reg_can_beacon(&rdev->wiphy, &setup->chandef))
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 5f6e982..170c0ab 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -349,6 +349,11 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
[NL80211_ATTR_IE_RIC] = { .type = NLA_BINARY,
.len = IEEE80211_MAX_DATA_LEN },
[NL80211_ATTR_PEER_AID] = { .type = NLA_U16 },
+ [NL80211_ATTR_CH_SWITCH_COUNT] = { .type = NLA_U32 },
+ [NL80211_ATTR_CH_SWITCH_BLOCK_TX] = { .type = NLA_FLAG },
+ [NL80211_ATTR_CSA_IES] = { .type = NLA_NESTED },
+ [NL80211_ATTR_CSA_C_OFF_BEACON] = { .type = NLA_U16 },
+ [NL80211_ATTR_CSA_C_OFF_PRESP] = { .type = NLA_U16 },
};
/* policy for the key attributes */
@@ -403,6 +408,14 @@ nl80211_wowlan_tcp_policy[NUM_NL80211_WOWLAN_TCP] = {
[NL80211_WOWLAN_TCP_WAKE_MASK] = { .len = 1 },
};
+/* policy for coalesce rule attributes */
+static const struct nla_policy
+nl80211_coalesce_policy[NUM_NL80211_ATTR_COALESCE_RULE] = {
+ [NL80211_ATTR_COALESCE_RULE_DELAY] = { .type = NLA_U32 },
+ [NL80211_ATTR_COALESCE_RULE_CONDITION] = { .type = NLA_U32 },
+ [NL80211_ATTR_COALESCE_RULE_PKT_PATTERN] = { .type = NLA_NESTED },
+};
+
/* policy for GTK rekey offload attributes */
static const struct nla_policy
nl80211_rekey_policy[NUM_NL80211_REKEY_DATA] = {
@@ -976,7 +989,7 @@ static int nl80211_send_wowlan(struct sk_buff *msg,
return -ENOBUFS;
if (dev->wiphy.wowlan->n_patterns) {
- struct nl80211_wowlan_pattern_support pat = {
+ struct nl80211_pattern_support pat = {
.max_patterns = dev->wiphy.wowlan->n_patterns,
.min_pattern_len = dev->wiphy.wowlan->pattern_min_len,
.max_pattern_len = dev->wiphy.wowlan->pattern_max_len,
@@ -997,6 +1010,27 @@ static int nl80211_send_wowlan(struct sk_buff *msg,
}
#endif
+static int nl80211_send_coalesce(struct sk_buff *msg,
+ struct cfg80211_registered_device *dev)
+{
+ struct nl80211_coalesce_rule_support rule;
+
+ if (!dev->wiphy.coalesce)
+ return 0;
+
+ rule.max_rules = dev->wiphy.coalesce->n_rules;
+ rule.max_delay = dev->wiphy.coalesce->max_delay;
+ rule.pat.max_patterns = dev->wiphy.coalesce->n_patterns;
+ rule.pat.min_pattern_len = dev->wiphy.coalesce->pattern_min_len;
+ rule.pat.max_pattern_len = dev->wiphy.coalesce->pattern_max_len;
+ rule.pat.max_pkt_offset = dev->wiphy.coalesce->max_pkt_offset;
+
+ if (nla_put(msg, NL80211_ATTR_COALESCE_RULE, sizeof(rule), &rule))
+ return -ENOBUFS;
+
+ return 0;
+}
+
static int nl80211_send_band_rateinfo(struct sk_buff *msg,
struct ieee80211_supported_band *sband)
{
@@ -1395,6 +1429,8 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
if (state->split) {
CMD(crit_proto_start, CRIT_PROTOCOL_START);
CMD(crit_proto_stop, CRIT_PROTOCOL_STOP);
+ if (dev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH)
+ CMD(channel_switch, CHANNEL_SWITCH);
}
#ifdef CONFIG_NL80211_TESTMODE
@@ -1515,6 +1551,12 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
dev->wiphy.vht_capa_mod_mask))
goto nla_put_failure;
+ state->split_start++;
+ break;
+ case 10:
+ if (nl80211_send_coalesce(msg, dev))
+ goto nla_put_failure;
+
/* done */
state->split_start = 0;
break;
@@ -5580,6 +5622,111 @@ static int nl80211_start_radar_detection(struct sk_buff *skb,
return err;
}
+static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct net_device *dev = info->user_ptr[1];
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct cfg80211_csa_settings params;
+ /* csa_attrs is defined static to avoid waste of stack size - this
+ * function is called under RTNL lock, so this should not be a problem.
+ */
+ static struct nlattr *csa_attrs[NL80211_ATTR_MAX+1];
+ u8 radar_detect_width = 0;
+ int err;
+
+ if (!rdev->ops->channel_switch ||
+ !(rdev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH))
+ return -EOPNOTSUPP;
+
+ /* may add IBSS support later */
+ if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
+ dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
+ return -EOPNOTSUPP;
+
+ memset(&params, 0, sizeof(params));
+
+ if (!info->attrs[NL80211_ATTR_WIPHY_FREQ] ||
+ !info->attrs[NL80211_ATTR_CH_SWITCH_COUNT])
+ return -EINVAL;
+
+ /* only important for AP, IBSS and mesh create IEs internally */
+ if (!info->attrs[NL80211_ATTR_CSA_IES])
+ return -EINVAL;
+
+ /* useless if AP is not running */
+ if (!wdev->beacon_interval)
+ return -EINVAL;
+
+ params.count = nla_get_u32(info->attrs[NL80211_ATTR_CH_SWITCH_COUNT]);
+
+ err = nl80211_parse_beacon(info->attrs, &params.beacon_after);
+ if (err)
+ return err;
+
+ err = nla_parse_nested(csa_attrs, NL80211_ATTR_MAX,
+ info->attrs[NL80211_ATTR_CSA_IES],
+ nl80211_policy);
+ if (err)
+ return err;
+
+ err = nl80211_parse_beacon(csa_attrs, &params.beacon_csa);
+ if (err)
+ return err;
+
+ if (!csa_attrs[NL80211_ATTR_CSA_C_OFF_BEACON])
+ return -EINVAL;
+
+ params.counter_offset_beacon =
+ nla_get_u16(csa_attrs[NL80211_ATTR_CSA_C_OFF_BEACON]);
+ if (params.counter_offset_beacon >= params.beacon_csa.tail_len)
+ return -EINVAL;
+
+ /* sanity check - counters should be the same */
+ if (params.beacon_csa.tail[params.counter_offset_beacon] !=
+ params.count)
+ return -EINVAL;
+
+ if (csa_attrs[NL80211_ATTR_CSA_C_OFF_PRESP]) {
+ params.counter_offset_presp =
+ nla_get_u16(csa_attrs[NL80211_ATTR_CSA_C_OFF_PRESP]);
+ if (params.counter_offset_presp >=
+ params.beacon_csa.probe_resp_len)
+ return -EINVAL;
+
+ if (params.beacon_csa.probe_resp[params.counter_offset_presp] !=
+ params.count)
+ return -EINVAL;
+ }
+
+ err = nl80211_parse_chandef(rdev, info, &params.chandef);
+ if (err)
+ return err;
+
+ if (!cfg80211_reg_can_beacon(&rdev->wiphy, &params.chandef))
+ return -EINVAL;
+
+ err = cfg80211_chandef_dfs_required(wdev->wiphy, &params.chandef);
+ if (err < 0) {
+ return err;
+ } else if (err) {
+ radar_detect_width = BIT(params.chandef.width);
+ params.radar_required = true;
+ }
+
+ err = cfg80211_can_use_iftype_chan(rdev, wdev, wdev->iftype,
+ params.chandef.chan,
+ CHAN_MODE_SHARED,
+ radar_detect_width);
+ if (err)
+ return err;
+
+ if (info->attrs[NL80211_ATTR_CH_SWITCH_BLOCK_TX])
+ params.block_tx = true;
+
+ return rdev_channel_switch(rdev, dev, &params);
+}
+
static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
u32 seq, int flags,
struct cfg80211_registered_device *rdev,
@@ -5641,6 +5788,7 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
goto nla_put_failure;
if (nla_put_u16(msg, NL80211_BSS_CAPABILITY, res->capability) ||
nla_put_u32(msg, NL80211_BSS_FREQUENCY, res->channel->center_freq) ||
+ nla_put_u32(msg, NL80211_BSS_CHAN_WIDTH, res->scan_width) ||
nla_put_u32(msg, NL80211_BSS_SEEN_MS_AGO,
jiffies_to_msecs(jiffies - intbss->ts)))
goto nla_put_failure;
@@ -6321,6 +6469,8 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
return -EINVAL;
switch (ibss.chandef.width) {
+ case NL80211_CHAN_WIDTH_5:
+ case NL80211_CHAN_WIDTH_10:
case NL80211_CHAN_WIDTH_20_NOHT:
break;
case NL80211_CHAN_WIDTH_20:
@@ -6348,6 +6498,19 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
return err;
}
+ if (info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK])
+ memcpy(&ibss.ht_capa_mask,
+ nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK]),
+ sizeof(ibss.ht_capa_mask));
+
+ if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) {
+ if (!info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK])
+ return -EINVAL;
+ memcpy(&ibss.ht_capa,
+ nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]),
+ sizeof(ibss.ht_capa));
+ }
+
if (info->attrs[NL80211_ATTR_MCAST_RATE] &&
!nl80211_parse_mcast_rate(rdev, ibss.mcast_rate,
nla_get_u32(info->attrs[NL80211_ATTR_MCAST_RATE])))
@@ -7596,12 +7759,11 @@ static int nl80211_send_wowlan_patterns(struct sk_buff *msg,
if (!nl_pat)
return -ENOBUFS;
pat_len = wowlan->patterns[i].pattern_len;
- if (nla_put(msg, NL80211_WOWLAN_PKTPAT_MASK,
- DIV_ROUND_UP(pat_len, 8),
+ if (nla_put(msg, NL80211_PKTPAT_MASK, DIV_ROUND_UP(pat_len, 8),
wowlan->patterns[i].mask) ||
- nla_put(msg, NL80211_WOWLAN_PKTPAT_PATTERN,
- pat_len, wowlan->patterns[i].pattern) ||
- nla_put_u32(msg, NL80211_WOWLAN_PKTPAT_OFFSET,
+ nla_put(msg, NL80211_PKTPAT_PATTERN, pat_len,
+ wowlan->patterns[i].pattern) ||
+ nla_put_u32(msg, NL80211_PKTPAT_OFFSET,
wowlan->patterns[i].pkt_offset))
return -ENOBUFS;
nla_nest_end(msg, nl_pat);
@@ -7942,7 +8104,7 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
struct nlattr *pat;
int n_patterns = 0;
int rem, pat_len, mask_len, pkt_offset;
- struct nlattr *pat_tb[NUM_NL80211_WOWLAN_PKTPAT];
+ struct nlattr *pat_tb[NUM_NL80211_PKTPAT];
nla_for_each_nested(pat, tb[NL80211_WOWLAN_TRIG_PKT_PATTERN],
rem)
@@ -7961,26 +8123,25 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
nla_for_each_nested(pat, tb[NL80211_WOWLAN_TRIG_PKT_PATTERN],
rem) {
- nla_parse(pat_tb, MAX_NL80211_WOWLAN_PKTPAT,
- nla_data(pat), nla_len(pat), NULL);
+ nla_parse(pat_tb, MAX_NL80211_PKTPAT, nla_data(pat),
+ nla_len(pat), NULL);
err = -EINVAL;
- if (!pat_tb[NL80211_WOWLAN_PKTPAT_MASK] ||
- !pat_tb[NL80211_WOWLAN_PKTPAT_PATTERN])
+ if (!pat_tb[NL80211_PKTPAT_MASK] ||
+ !pat_tb[NL80211_PKTPAT_PATTERN])
goto error;
- pat_len = nla_len(pat_tb[NL80211_WOWLAN_PKTPAT_PATTERN]);
+ pat_len = nla_len(pat_tb[NL80211_PKTPAT_PATTERN]);
mask_len = DIV_ROUND_UP(pat_len, 8);
- if (nla_len(pat_tb[NL80211_WOWLAN_PKTPAT_MASK]) !=
- mask_len)
+ if (nla_len(pat_tb[NL80211_PKTPAT_MASK]) != mask_len)
goto error;
if (pat_len > wowlan->pattern_max_len ||
pat_len < wowlan->pattern_min_len)
goto error;
- if (!pat_tb[NL80211_WOWLAN_PKTPAT_OFFSET])
+ if (!pat_tb[NL80211_PKTPAT_OFFSET])
pkt_offset = 0;
else
pkt_offset = nla_get_u32(
- pat_tb[NL80211_WOWLAN_PKTPAT_OFFSET]);
+ pat_tb[NL80211_PKTPAT_OFFSET]);
if (pkt_offset > wowlan->max_pkt_offset)
goto error;
new_triggers.patterns[i].pkt_offset = pkt_offset;
@@ -7994,11 +8155,11 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
new_triggers.patterns[i].pattern =
new_triggers.patterns[i].mask + mask_len;
memcpy(new_triggers.patterns[i].mask,
- nla_data(pat_tb[NL80211_WOWLAN_PKTPAT_MASK]),
+ nla_data(pat_tb[NL80211_PKTPAT_MASK]),
mask_len);
new_triggers.patterns[i].pattern_len = pat_len;
memcpy(new_triggers.patterns[i].pattern,
- nla_data(pat_tb[NL80211_WOWLAN_PKTPAT_PATTERN]),
+ nla_data(pat_tb[NL80211_PKTPAT_PATTERN]),
pat_len);
i++;
}
@@ -8037,6 +8198,264 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
}
#endif
+static int nl80211_send_coalesce_rules(struct sk_buff *msg,
+ struct cfg80211_registered_device *rdev)
+{
+ struct nlattr *nl_pats, *nl_pat, *nl_rule, *nl_rules;
+ int i, j, pat_len;
+ struct cfg80211_coalesce_rules *rule;
+
+ if (!rdev->coalesce->n_rules)
+ return 0;
+
+ nl_rules = nla_nest_start(msg, NL80211_ATTR_COALESCE_RULE);
+ if (!nl_rules)
+ return -ENOBUFS;
+
+ for (i = 0; i < rdev->coalesce->n_rules; i++) {
+ nl_rule = nla_nest_start(msg, i + 1);
+ if (!nl_rule)
+ return -ENOBUFS;
+
+ rule = &rdev->coalesce->rules[i];
+ if (nla_put_u32(msg, NL80211_ATTR_COALESCE_RULE_DELAY,
+ rule->delay))
+ return -ENOBUFS;
+
+ if (nla_put_u32(msg, NL80211_ATTR_COALESCE_RULE_CONDITION,
+ rule->condition))
+ return -ENOBUFS;
+
+ nl_pats = nla_nest_start(msg,
+ NL80211_ATTR_COALESCE_RULE_PKT_PATTERN);
+ if (!nl_pats)
+ return -ENOBUFS;
+
+ for (j = 0; j < rule->n_patterns; j++) {
+ nl_pat = nla_nest_start(msg, j + 1);
+ if (!nl_pat)
+ return -ENOBUFS;
+ pat_len = rule->patterns[j].pattern_len;
+ if (nla_put(msg, NL80211_PKTPAT_MASK,
+ DIV_ROUND_UP(pat_len, 8),
+ rule->patterns[j].mask) ||
+ nla_put(msg, NL80211_PKTPAT_PATTERN, pat_len,
+ rule->patterns[j].pattern) ||
+ nla_put_u32(msg, NL80211_PKTPAT_OFFSET,
+ rule->patterns[j].pkt_offset))
+ return -ENOBUFS;
+ nla_nest_end(msg, nl_pat);
+ }
+ nla_nest_end(msg, nl_pats);
+ nla_nest_end(msg, nl_rule);
+ }
+ nla_nest_end(msg, nl_rules);
+
+ return 0;
+}
+
+static int nl80211_get_coalesce(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct sk_buff *msg;
+ void *hdr;
+
+ if (!rdev->wiphy.coalesce)
+ return -EOPNOTSUPP;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
+ NL80211_CMD_GET_COALESCE);
+ if (!hdr)
+ goto nla_put_failure;
+
+ if (rdev->coalesce && nl80211_send_coalesce_rules(msg, rdev))
+ goto nla_put_failure;
+
+ genlmsg_end(msg, hdr);
+ return genlmsg_reply(msg, info);
+
+nla_put_failure:
+ nlmsg_free(msg);
+ return -ENOBUFS;
+}
+
+void cfg80211_rdev_free_coalesce(struct cfg80211_registered_device *rdev)
+{
+ struct cfg80211_coalesce *coalesce = rdev->coalesce;
+ int i, j;
+ struct cfg80211_coalesce_rules *rule;
+
+ if (!coalesce)
+ return;
+
+ for (i = 0; i < coalesce->n_rules; i++) {
+ rule = &coalesce->rules[i];
+ for (j = 0; j < rule->n_patterns; j++)
+ kfree(rule->patterns[j].mask);
+ kfree(rule->patterns);
+ }
+ kfree(coalesce->rules);
+ kfree(coalesce);
+ rdev->coalesce = NULL;
+}
+
+static int nl80211_parse_coalesce_rule(struct cfg80211_registered_device *rdev,
+ struct nlattr *rule,
+ struct cfg80211_coalesce_rules *new_rule)
+{
+ int err, i;
+ const struct wiphy_coalesce_support *coalesce = rdev->wiphy.coalesce;
+ struct nlattr *tb[NUM_NL80211_ATTR_COALESCE_RULE], *pat;
+ int rem, pat_len, mask_len, pkt_offset, n_patterns = 0;
+ struct nlattr *pat_tb[NUM_NL80211_PKTPAT];
+
+ err = nla_parse(tb, NL80211_ATTR_COALESCE_RULE_MAX, nla_data(rule),
+ nla_len(rule), nl80211_coalesce_policy);
+ if (err)
+ return err;
+
+ if (tb[NL80211_ATTR_COALESCE_RULE_DELAY])
+ new_rule->delay =
+ nla_get_u32(tb[NL80211_ATTR_COALESCE_RULE_DELAY]);
+ if (new_rule->delay > coalesce->max_delay)
+ return -EINVAL;
+
+ if (tb[NL80211_ATTR_COALESCE_RULE_CONDITION])
+ new_rule->condition =
+ nla_get_u32(tb[NL80211_ATTR_COALESCE_RULE_CONDITION]);
+ if (new_rule->condition != NL80211_COALESCE_CONDITION_MATCH &&
+ new_rule->condition != NL80211_COALESCE_CONDITION_NO_MATCH)
+ return -EINVAL;
+
+ if (!tb[NL80211_ATTR_COALESCE_RULE_PKT_PATTERN])
+ return -EINVAL;
+
+ nla_for_each_nested(pat, tb[NL80211_ATTR_COALESCE_RULE_PKT_PATTERN],
+ rem)
+ n_patterns++;
+ if (n_patterns > coalesce->n_patterns)
+ return -EINVAL;
+
+ new_rule->patterns = kcalloc(n_patterns, sizeof(new_rule->patterns[0]),
+ GFP_KERNEL);
+ if (!new_rule->patterns)
+ return -ENOMEM;
+
+ new_rule->n_patterns = n_patterns;
+ i = 0;
+
+ nla_for_each_nested(pat, tb[NL80211_ATTR_COALESCE_RULE_PKT_PATTERN],
+ rem) {
+ nla_parse(pat_tb, MAX_NL80211_PKTPAT, nla_data(pat),
+ nla_len(pat), NULL);
+ if (!pat_tb[NL80211_PKTPAT_MASK] ||
+ !pat_tb[NL80211_PKTPAT_PATTERN])
+ return -EINVAL;
+ pat_len = nla_len(pat_tb[NL80211_PKTPAT_PATTERN]);
+ mask_len = DIV_ROUND_UP(pat_len, 8);
+ if (nla_len(pat_tb[NL80211_PKTPAT_MASK]) != mask_len)
+ return -EINVAL;
+ if (pat_len > coalesce->pattern_max_len ||
+ pat_len < coalesce->pattern_min_len)
+ return -EINVAL;
+
+ if (!pat_tb[NL80211_PKTPAT_OFFSET])
+ pkt_offset = 0;
+ else
+ pkt_offset = nla_get_u32(pat_tb[NL80211_PKTPAT_OFFSET]);
+ if (pkt_offset > coalesce->max_pkt_offset)
+ return -EINVAL;
+ new_rule->patterns[i].pkt_offset = pkt_offset;
+
+ new_rule->patterns[i].mask =
+ kmalloc(mask_len + pat_len, GFP_KERNEL);
+ if (!new_rule->patterns[i].mask)
+ return -ENOMEM;
+ new_rule->patterns[i].pattern =
+ new_rule->patterns[i].mask + mask_len;
+ memcpy(new_rule->patterns[i].mask,
+ nla_data(pat_tb[NL80211_PKTPAT_MASK]), mask_len);
+ new_rule->patterns[i].pattern_len = pat_len;
+ memcpy(new_rule->patterns[i].pattern,
+ nla_data(pat_tb[NL80211_PKTPAT_PATTERN]), pat_len);
+ i++;
+ }
+
+ return 0;
+}
+
+static int nl80211_set_coalesce(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ const struct wiphy_coalesce_support *coalesce = rdev->wiphy.coalesce;
+ struct cfg80211_coalesce new_coalesce = {};
+ struct cfg80211_coalesce *n_coalesce;
+ int err, rem_rule, n_rules = 0, i, j;
+ struct nlattr *rule;
+ struct cfg80211_coalesce_rules *tmp_rule;
+
+ if (!rdev->wiphy.coalesce || !rdev->ops->set_coalesce)
+ return -EOPNOTSUPP;
+
+ if (!info->attrs[NL80211_ATTR_COALESCE_RULE]) {
+ cfg80211_rdev_free_coalesce(rdev);
+ rdev->ops->set_coalesce(&rdev->wiphy, NULL);
+ return 0;
+ }
+
+ nla_for_each_nested(rule, info->attrs[NL80211_ATTR_COALESCE_RULE],
+ rem_rule)
+ n_rules++;
+ if (n_rules > coalesce->n_rules)
+ return -EINVAL;
+
+ new_coalesce.rules = kcalloc(n_rules, sizeof(new_coalesce.rules[0]),
+ GFP_KERNEL);
+ if (!new_coalesce.rules)
+ return -ENOMEM;
+
+ new_coalesce.n_rules = n_rules;
+ i = 0;
+
+ nla_for_each_nested(rule, info->attrs[NL80211_ATTR_COALESCE_RULE],
+ rem_rule) {
+ err = nl80211_parse_coalesce_rule(rdev, rule,
+ &new_coalesce.rules[i]);
+ if (err)
+ goto error;
+
+ i++;
+ }
+
+ err = rdev->ops->set_coalesce(&rdev->wiphy, &new_coalesce);
+ if (err)
+ goto error;
+
+ n_coalesce = kmemdup(&new_coalesce, sizeof(new_coalesce), GFP_KERNEL);
+ if (!n_coalesce) {
+ err = -ENOMEM;
+ goto error;
+ }
+ cfg80211_rdev_free_coalesce(rdev);
+ rdev->coalesce = n_coalesce;
+
+ return 0;
+error:
+ for (i = 0; i < new_coalesce.n_rules; i++) {
+ tmp_rule = &new_coalesce.rules[i];
+ for (j = 0; j < tmp_rule->n_patterns; j++)
+ kfree(tmp_rule->patterns[j].mask);
+ kfree(tmp_rule->patterns);
+ }
+ kfree(new_coalesce.rules);
+
+ return err;
+}
+
static int nl80211_set_rekey_data(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -9043,7 +9462,30 @@ static struct genl_ops nl80211_ops[] = {
.flags = GENL_ADMIN_PERM,
.internal_flags = NL80211_FLAG_NEED_WDEV_UP |
NL80211_FLAG_NEED_RTNL,
- }
+ },
+ {
+ .cmd = NL80211_CMD_GET_COALESCE,
+ .doit = nl80211_get_coalesce,
+ .policy = nl80211_policy,
+ .internal_flags = NL80211_FLAG_NEED_WIPHY |
+ NL80211_FLAG_NEED_RTNL,
+ },
+ {
+ .cmd = NL80211_CMD_SET_COALESCE,
+ .doit = nl80211_set_coalesce,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL80211_FLAG_NEED_WIPHY |
+ NL80211_FLAG_NEED_RTNL,
+ },
+ {
+ .cmd = NL80211_CMD_CHANNEL_SWITCH,
+ .doit = nl80211_channel_switch,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
+ NL80211_FLAG_NEED_RTNL,
+ },
};
static struct genl_multicast_group nl80211_mlme_mcgrp = {
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index a4073e8..44341bf 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -74,4 +74,6 @@ nl80211_radar_notify(struct cfg80211_registered_device *rdev,
enum nl80211_radar_event event,
struct net_device *netdev, gfp_t gfp);
+void cfg80211_rdev_free_coalesce(struct cfg80211_registered_device *rdev);
+
#endif /* __NET_WIRELESS_NL80211_H */
diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h
index 9f15f0a..de870d4 100644
--- a/net/wireless/rdev-ops.h
+++ b/net/wireless/rdev-ops.h
@@ -923,4 +923,16 @@ static inline void rdev_crit_proto_stop(struct cfg80211_registered_device *rdev,
trace_rdev_return_void(&rdev->wiphy);
}
+static inline int rdev_channel_switch(struct cfg80211_registered_device *rdev,
+ struct net_device *dev,
+ struct cfg80211_csa_settings *params)
+{
+ int ret;
+
+ trace_rdev_channel_switch(&rdev->wiphy, dev, params);
+ ret = rdev->ops->channel_switch(&rdev->wiphy, dev, params);
+ trace_rdev_return_int(&rdev->wiphy, ret);
+ return ret;
+}
+
#endif /* __CFG80211_RDEV_OPS */
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index ae8c186..ad1e406 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -651,6 +651,8 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *dev,
continue;
if (bss->pub.channel != new->pub.channel)
continue;
+ if (bss->pub.scan_width != new->pub.scan_width)
+ continue;
if (rcu_access_pointer(bss->pub.beacon_ies))
continue;
ies = rcu_access_pointer(bss->pub.ies);
@@ -870,11 +872,12 @@ cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen,
/* Returned bss is reference counted and must be cleaned up appropriately. */
struct cfg80211_bss*
-cfg80211_inform_bss(struct wiphy *wiphy,
- struct ieee80211_channel *channel,
- const u8 *bssid, u64 tsf, u16 capability,
- u16 beacon_interval, const u8 *ie, size_t ielen,
- s32 signal, gfp_t gfp)
+cfg80211_inform_bss_width(struct wiphy *wiphy,
+ struct ieee80211_channel *channel,
+ enum nl80211_bss_scan_width scan_width,
+ const u8 *bssid, u64 tsf, u16 capability,
+ u16 beacon_interval, const u8 *ie, size_t ielen,
+ s32 signal, gfp_t gfp)
{
struct cfg80211_bss_ies *ies;
struct cfg80211_internal_bss tmp = {}, *res;
@@ -892,6 +895,7 @@ cfg80211_inform_bss(struct wiphy *wiphy,
memcpy(tmp.pub.bssid, bssid, ETH_ALEN);
tmp.pub.channel = channel;
+ tmp.pub.scan_width = scan_width;
tmp.pub.signal = signal;
tmp.pub.beacon_interval = beacon_interval;
tmp.pub.capability = capability;
@@ -924,14 +928,15 @@ cfg80211_inform_bss(struct wiphy *wiphy,
/* cfg80211_bss_update gives us a referenced result */
return &res->pub;
}
-EXPORT_SYMBOL(cfg80211_inform_bss);
+EXPORT_SYMBOL(cfg80211_inform_bss_width);
/* Returned bss is reference counted and must be cleaned up appropriately. */
struct cfg80211_bss *
-cfg80211_inform_bss_frame(struct wiphy *wiphy,
- struct ieee80211_channel *channel,
- struct ieee80211_mgmt *mgmt, size_t len,
- s32 signal, gfp_t gfp)
+cfg80211_inform_bss_width_frame(struct wiphy *wiphy,
+ struct ieee80211_channel *channel,
+ enum nl80211_bss_scan_width scan_width,
+ struct ieee80211_mgmt *mgmt, size_t len,
+ s32 signal, gfp_t gfp)
{
struct cfg80211_internal_bss tmp = {}, *res;
struct cfg80211_bss_ies *ies;
@@ -941,7 +946,8 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy,
BUILD_BUG_ON(offsetof(struct ieee80211_mgmt, u.probe_resp.variable) !=
offsetof(struct ieee80211_mgmt, u.beacon.variable));
- trace_cfg80211_inform_bss_frame(wiphy, channel, mgmt, len, signal);
+ trace_cfg80211_inform_bss_width_frame(wiphy, channel, scan_width, mgmt,
+ len, signal);
if (WARN_ON(!mgmt))
return NULL;
@@ -976,6 +982,7 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy,
memcpy(tmp.pub.bssid, mgmt->bssid, ETH_ALEN);
tmp.pub.channel = channel;
+ tmp.pub.scan_width = scan_width;
tmp.pub.signal = signal;
tmp.pub.beacon_interval = le16_to_cpu(mgmt->u.probe_resp.beacon_int);
tmp.pub.capability = le16_to_cpu(mgmt->u.probe_resp.capab_info);
@@ -991,7 +998,7 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy,
/* cfg80211_bss_update gives us a referenced result */
return &res->pub;
}
-EXPORT_SYMBOL(cfg80211_inform_bss_frame);
+EXPORT_SYMBOL(cfg80211_inform_bss_width_frame);
void cfg80211_ref_bss(struct wiphy *wiphy, struct cfg80211_bss *pub)
{
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index e1534baf..f0ebdcd 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -1841,6 +1841,39 @@ TRACE_EVENT(rdev_crit_proto_stop,
WIPHY_PR_ARG, WDEV_PR_ARG)
);
+TRACE_EVENT(rdev_channel_switch,
+ TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+ struct cfg80211_csa_settings *params),
+ TP_ARGS(wiphy, netdev, params),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ NETDEV_ENTRY
+ CHAN_DEF_ENTRY
+ __field(u16, counter_offset_beacon)
+ __field(u16, counter_offset_presp)
+ __field(bool, radar_required)
+ __field(bool, block_tx)
+ __field(u8, count)
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ NETDEV_ASSIGN;
+ CHAN_DEF_ASSIGN(&params->chandef);
+ __entry->counter_offset_beacon = params->counter_offset_beacon;
+ __entry->counter_offset_presp = params->counter_offset_presp;
+ __entry->radar_required = params->radar_required;
+ __entry->block_tx = params->block_tx;
+ __entry->count = params->count;
+ ),
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT
+ ", block_tx: %d, count: %u, radar_required: %d"
+ ", counter offsets (beacon/presp): %u/%u",
+ WIPHY_PR_ARG, NETDEV_PR_ARG, CHAN_DEF_PR_ARG,
+ __entry->block_tx, __entry->count, __entry->radar_required,
+ __entry->counter_offset_beacon,
+ __entry->counter_offset_presp)
+);
+
/*************************************************************
* cfg80211 exported functions traces *
*************************************************************/
@@ -2391,26 +2424,30 @@ TRACE_EVENT(cfg80211_get_bss,
__entry->capa_mask, __entry->capa_val)
);
-TRACE_EVENT(cfg80211_inform_bss_frame,
+TRACE_EVENT(cfg80211_inform_bss_width_frame,
TP_PROTO(struct wiphy *wiphy, struct ieee80211_channel *channel,
+ enum nl80211_bss_scan_width scan_width,
struct ieee80211_mgmt *mgmt, size_t len,
s32 signal),
- TP_ARGS(wiphy, channel, mgmt, len, signal),
+ TP_ARGS(wiphy, channel, scan_width, mgmt, len, signal),
TP_STRUCT__entry(
WIPHY_ENTRY
CHAN_ENTRY
+ __field(enum nl80211_bss_scan_width, scan_width)
__dynamic_array(u8, mgmt, len)
__field(s32, signal)
),
TP_fast_assign(
WIPHY_ASSIGN;
CHAN_ASSIGN(channel);
+ __entry->scan_width = scan_width;
if (mgmt)
memcpy(__get_dynamic_array(mgmt), mgmt, len);
__entry->signal = signal;
),
- TP_printk(WIPHY_PR_FMT ", " CHAN_PR_FMT "signal: %d",
- WIPHY_PR_ARG, CHAN_PR_ARG, __entry->signal)
+ TP_printk(WIPHY_PR_FMT ", " CHAN_PR_FMT "(scan_width: %d) signal: %d",
+ WIPHY_PR_ARG, CHAN_PR_ARG, __entry->scan_width,
+ __entry->signal)
);
DECLARE_EVENT_CLASS(cfg80211_bss_evt,
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 74458b7..ce090c1 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -33,7 +33,8 @@ ieee80211_get_response_rate(struct ieee80211_supported_band *sband,
}
EXPORT_SYMBOL(ieee80211_get_response_rate);
-u32 ieee80211_mandatory_rates(struct ieee80211_supported_band *sband)
+u32 ieee80211_mandatory_rates(struct ieee80211_supported_band *sband,
+ enum nl80211_bss_scan_width scan_width)
{
struct ieee80211_rate *bitrates;
u32 mandatory_rates = 0;
@@ -43,10 +44,15 @@ u32 ieee80211_mandatory_rates(struct ieee80211_supported_band *sband)
if (WARN_ON(!sband))
return 1;
- if (sband->band == IEEE80211_BAND_2GHZ)
- mandatory_flag = IEEE80211_RATE_MANDATORY_B;
- else
+ if (sband->band == IEEE80211_BAND_2GHZ) {
+ if (scan_width == NL80211_BSS_CHAN_WIDTH_5 ||
+ scan_width == NL80211_BSS_CHAN_WIDTH_10)
+ mandatory_flag = IEEE80211_RATE_MANDATORY_G;
+ else
+ mandatory_flag = IEEE80211_RATE_MANDATORY_B;
+ } else {
mandatory_flag = IEEE80211_RATE_MANDATORY_A;
+ }
bitrates = sband->bitrates;
for (i = 0; i < sband->n_bitrates; i++)
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index e52cab3..ad8cc7b 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -308,7 +308,7 @@ void xfrm_policy_destroy(struct xfrm_policy *policy)
{
BUG_ON(!policy->walk.dead);
- if (del_timer(&policy->timer))
+ if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer))
BUG();
security_xfrm_policy_free(policy->security);
@@ -660,7 +660,13 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
xfrm_pol_hold(policy);
net->xfrm.policy_count[dir]++;
atomic_inc(&flow_cache_genid);
- rt_genid_bump(net);
+
+ /* After previous checking, family can either be AF_INET or AF_INET6 */
+ if (policy->family == AF_INET)
+ rt_genid_bump_ipv4(net);
+ else
+ rt_genid_bump_ipv6(net);
+
if (delpol) {
xfrm_policy_requeue(delpol, policy);
__xfrm_policy_unlink(delpol, dir);
@@ -2126,8 +2132,6 @@ restart:
* have the xfrm_state's. We need to wait for KM to
* negotiate new SA's or bail out with error.*/
if (net->xfrm.sysctl_larval_drop) {
- /* EREMOTE tells the caller to generate
- * a one-shot blackhole route. */
dst_release(dst);
xfrm_pols_put(pols, drop_pols);
XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 78f66fa..4f8ace8 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -499,7 +499,8 @@ struct xfrm_state *xfrm_state_alloc(struct net *net)
INIT_HLIST_NODE(&x->bydst);
INIT_HLIST_NODE(&x->bysrc);
INIT_HLIST_NODE(&x->byspi);
- tasklet_hrtimer_init(&x->mtimer, xfrm_timer_handler, CLOCK_REALTIME, HRTIMER_MODE_ABS);
+ tasklet_hrtimer_init(&x->mtimer, xfrm_timer_handler,
+ CLOCK_BOOTTIME, HRTIMER_MODE_ABS);
setup_timer(&x->rtimer, xfrm_replay_timer_handler,
(unsigned long)x);
x->curlft.add_time = get_seconds();
@@ -990,11 +991,13 @@ void xfrm_state_insert(struct xfrm_state *x)
EXPORT_SYMBOL(xfrm_state_insert);
/* xfrm_state_lock is held */
-static struct xfrm_state *__find_acq_core(struct net *net, struct xfrm_mark *m,
+static struct xfrm_state *__find_acq_core(struct net *net,
+ const struct xfrm_mark *m,
unsigned short family, u8 mode,
u32 reqid, u8 proto,
const xfrm_address_t *daddr,
- const xfrm_address_t *saddr, int create)
+ const xfrm_address_t *saddr,
+ int create)
{
unsigned int h = xfrm_dst_hash(net, daddr, saddr, reqid, family);
struct xfrm_state *x;
@@ -1399,9 +1402,9 @@ xfrm_state_lookup_byaddr(struct net *net, u32 mark,
EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
struct xfrm_state *
-xfrm_find_acq(struct net *net, struct xfrm_mark *mark, u8 mode, u32 reqid, u8 proto,
- const xfrm_address_t *daddr, const xfrm_address_t *saddr,
- int create, unsigned short family)
+xfrm_find_acq(struct net *net, const struct xfrm_mark *mark, u8 mode, u32 reqid,
+ u8 proto, const xfrm_address_t *daddr,
+ const xfrm_address_t *saddr, int create, unsigned short family)
{
struct xfrm_state *x;