summaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
authorstephen hemminger <stephen@networkplumber.org>2013-07-18 15:40:15 (GMT)
committerDavid S. Miller <davem@davemloft.net>2013-07-20 00:07:25 (GMT)
commit3fc2de2faba387218bdf9dbc6b13f513ac3b060a (patch)
tree5f87a0e7419248212efa433802b3a5cc39583dcb /drivers/net
parent372675a4a9ac0a0af962d44dadeea69926ce45e0 (diff)
downloadlinux-fsl-qoriq-3fc2de2faba387218bdf9dbc6b13f513ac3b060a.tar.xz
vxlan: fix igmp races
There are two race conditions in existing code for doing IGMP management in workqueue in vxlan. First, the vxlan_group_used function checks the list of vxlan's without any protection, and it is possible for open followed by close to occur before the igmp work queue runs. To solve these move the check into vxlan_open/stop so it is protected by RTNL. And split into two work structures so that there is no racy reference to underlying device state. Signed-off-by: Stephen Hemminger <stephen@networkplumber.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/vxlan.c53
1 files changed, 38 insertions, 15 deletions
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index f101034..f4c6db4 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -136,7 +136,8 @@ struct vxlan_dev {
u32 flags; /* VXLAN_F_* below */
struct work_struct sock_work;
- struct work_struct igmp_work;
+ struct work_struct igmp_join;
+ struct work_struct igmp_leave;
unsigned long age_interval;
struct timer_list age_timer;
@@ -736,7 +737,6 @@ static bool vxlan_snoop(struct net_device *dev,
return false;
}
-
/* See if multicast group is already in use by other ID */
static bool vxlan_group_used(struct vxlan_net *vn, __be32 remote_ip)
{
@@ -770,12 +770,13 @@ static void vxlan_sock_release(struct vxlan_net *vn, struct vxlan_sock *vs)
queue_work(vxlan_wq, &vs->del_work);
}
-/* Callback to update multicast group membership.
- * Scheduled when vxlan goes up/down.
+/* Callback to update multicast group membership when first VNI on
+ * multicast asddress is brought up
+ * Done as workqueue because ip_mc_join_group acquires RTNL.
*/
-static void vxlan_igmp_work(struct work_struct *work)
+static void vxlan_igmp_join(struct work_struct *work)
{
- struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, igmp_work);
+ struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, igmp_join);
struct vxlan_net *vn = net_generic(dev_net(vxlan->dev), vxlan_net_id);
struct vxlan_sock *vs = vxlan->vn_sock;
struct sock *sk = vs->sock->sk;
@@ -785,10 +786,27 @@ static void vxlan_igmp_work(struct work_struct *work)
};
lock_sock(sk);
- if (vxlan_group_used(vn, vxlan->default_dst.remote_ip))
- ip_mc_join_group(sk, &mreq);
- else
- ip_mc_leave_group(sk, &mreq);
+ ip_mc_join_group(sk, &mreq);
+ release_sock(sk);
+
+ vxlan_sock_release(vn, vs);
+ dev_put(vxlan->dev);
+}
+
+/* Inverse of vxlan_igmp_join when last VNI is brought down */
+static void vxlan_igmp_leave(struct work_struct *work)
+{
+ struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, igmp_leave);
+ struct vxlan_net *vn = net_generic(dev_net(vxlan->dev), vxlan_net_id);
+ struct vxlan_sock *vs = vxlan->vn_sock;
+ struct sock *sk = vs->sock->sk;
+ struct ip_mreqn mreq = {
+ .imr_multiaddr.s_addr = vxlan->default_dst.remote_ip,
+ .imr_ifindex = vxlan->default_dst.remote_ifindex,
+ };
+
+ lock_sock(sk);
+ ip_mc_leave_group(sk, &mreq);
release_sock(sk);
vxlan_sock_release(vn, vs);
@@ -1359,6 +1377,7 @@ static void vxlan_uninit(struct net_device *dev)
/* Start ageing timer and join group when device is brought up */
static int vxlan_open(struct net_device *dev)
{
+ struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_sock *vs = vxlan->vn_sock;
@@ -1366,10 +1385,11 @@ static int vxlan_open(struct net_device *dev)
if (!vs)
return -ENOTCONN;
- if (IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip))) {
+ if (IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip)) &&
+ ! vxlan_group_used(vn, vxlan->default_dst.remote_ip)) {
vxlan_sock_hold(vs);
dev_hold(dev);
- queue_work(vxlan_wq, &vxlan->igmp_work);
+ queue_work(vxlan_wq, &vxlan->igmp_join);
}
if (vxlan->age_interval)
@@ -1400,13 +1420,15 @@ static void vxlan_flush(struct vxlan_dev *vxlan)
/* Cleanup timer and forwarding table on shutdown */
static int vxlan_stop(struct net_device *dev)
{
+ struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_sock *vs = vxlan->vn_sock;
- if (vs && IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip))) {
+ if (vs && IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip)) &&
+ ! vxlan_group_used(vn, vxlan->default_dst.remote_ip)) {
vxlan_sock_hold(vs);
dev_hold(dev);
- queue_work(vxlan_wq, &vxlan->igmp_work);
+ queue_work(vxlan_wq, &vxlan->igmp_leave);
}
del_timer_sync(&vxlan->age_timer);
@@ -1471,7 +1493,8 @@ static void vxlan_setup(struct net_device *dev)
INIT_LIST_HEAD(&vxlan->next);
spin_lock_init(&vxlan->hash_lock);
- INIT_WORK(&vxlan->igmp_work, vxlan_igmp_work);
+ INIT_WORK(&vxlan->igmp_join, vxlan_igmp_join);
+ INIT_WORK(&vxlan->igmp_leave, vxlan_igmp_leave);
INIT_WORK(&vxlan->sock_work, vxlan_sock_work);
init_timer_deferrable(&vxlan->age_timer);