summaryrefslogtreecommitdiff
path: root/net/core
diff options
context:
space:
mode:
authorScott Wood <scottwood@freescale.com>2013-10-29 20:03:43 (GMT)
committerScott Wood <scottwood@freescale.com>2013-10-29 20:03:43 (GMT)
commitae60d5d27c429b13cf28a09ab8b9d30682433c5a (patch)
tree16b67511ef66b0580c267a5438d1face3a3778e6 /net/core
parentb095c5c2577aeedce2db847fa117596628d4e7cb (diff)
parentd0ebef8230e267ec47d4d4a65fe3262e2ebb8026 (diff)
downloadlinux-fsl-qoriq-ae60d5d27c429b13cf28a09ab8b9d30682433c5a.tar.xz
Revert to 3.8 (no rt, no stable)
This is a merge from rtmerge, which has been similarly reverted. Conflicts: drivers/crypto/caam/caamalg.c drivers/misc/Makefile
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c113
-rw-r--r--net/core/dev_addr_lists.c6
-rw-r--r--net/core/dst.c1
-rw-r--r--net/core/flow.c2
-rw-r--r--net/core/rtnetlink.c7
-rw-r--r--net/core/scm.c4
-rw-r--r--net/core/skbuff.c6
-rw-r--r--net/core/sock.c11
-rw-r--r--net/core/sock_diag.c3
9 files changed, 45 insertions, 108 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 67c8074..147a682 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -203,7 +203,7 @@ static struct list_head offload_base __read_mostly;
DEFINE_RWLOCK(dev_base_lock);
EXPORT_SYMBOL(dev_base_lock);
-DEFINE_MUTEX(devnet_rename_mutex);
+seqcount_t devnet_rename_seq;
static inline void dev_base_seq_inc(struct net *net)
{
@@ -225,14 +225,14 @@ static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
static inline void rps_lock(struct softnet_data *sd)
{
#ifdef CONFIG_RPS
- raw_spin_lock(&sd->input_pkt_queue.raw_lock);
+ spin_lock(&sd->input_pkt_queue.lock);
#endif
}
static inline void rps_unlock(struct softnet_data *sd)
{
#ifdef CONFIG_RPS
- raw_spin_unlock(&sd->input_pkt_queue.raw_lock);
+ spin_unlock(&sd->input_pkt_queue.lock);
#endif
}
@@ -1093,11 +1093,10 @@ int dev_change_name(struct net_device *dev, const char *newname)
if (dev->flags & IFF_UP)
return -EBUSY;
-
- mutex_lock(&devnet_rename_mutex);
+ write_seqcount_begin(&devnet_rename_seq);
if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
- mutex_unlock(&devnet_rename_mutex);
+ write_seqcount_end(&devnet_rename_seq);
return 0;
}
@@ -1105,7 +1104,7 @@ int dev_change_name(struct net_device *dev, const char *newname)
err = dev_get_valid_name(net, dev, newname);
if (err < 0) {
- mutex_unlock(&devnet_rename_mutex);
+ write_seqcount_end(&devnet_rename_seq);
return err;
}
@@ -1113,11 +1112,11 @@ rollback:
ret = device_rename(&dev->dev, dev->name);
if (ret) {
memcpy(dev->name, oldname, IFNAMSIZ);
- mutex_unlock(&devnet_rename_mutex);
+ write_seqcount_end(&devnet_rename_seq);
return ret;
}
- mutex_unlock(&devnet_rename_mutex);
+ write_seqcount_end(&devnet_rename_seq);
write_lock_bh(&dev_base_lock);
hlist_del_rcu(&dev->name_hlist);
@@ -1136,7 +1135,7 @@ rollback:
/* err >= 0 after dev_alloc_name() or stores the first errno */
if (err >= 0) {
err = ret;
- mutex_lock(&devnet_rename_mutex);
+ write_seqcount_begin(&devnet_rename_seq);
memcpy(dev->name, oldname, IFNAMSIZ);
goto rollback;
} else {
@@ -1592,6 +1591,7 @@ void net_enable_timestamp(void)
return;
}
#endif
+ WARN_ON(in_interrupt());
static_key_slow_inc(&netstamp_needed);
}
EXPORT_SYMBOL(net_enable_timestamp);
@@ -1738,7 +1738,6 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
skb->mark = 0;
secpath_reset(skb);
nf_reset(skb);
- nf_reset_trace(skb);
return netif_rx(skb);
}
EXPORT_SYMBOL_GPL(dev_forward_skb);
@@ -1947,7 +1946,6 @@ static inline void __netif_reschedule(struct Qdisc *q)
sd->output_queue_tailp = &q->next_sched;
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_restore(flags);
- preempt_check_resched_rt();
}
void __netif_schedule(struct Qdisc *q)
@@ -1969,7 +1967,6 @@ void dev_kfree_skb_irq(struct sk_buff *skb)
sd->completion_queue = skb;
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_restore(flags);
- preempt_check_resched_rt();
}
}
EXPORT_SYMBOL(dev_kfree_skb_irq);
@@ -2021,9 +2018,6 @@ static void skb_warn_bad_offload(const struct sk_buff *skb)
struct net_device *dev = skb->dev;
const char *driver = "";
- if (!net_ratelimit())
- return;
-
if (dev && dev->dev.parent)
driver = dev_driver_string(dev->dev.parent);
@@ -3084,7 +3078,6 @@ enqueue:
rps_unlock(sd);
local_irq_restore(flags);
- preempt_check_resched_rt();
atomic_long_inc(&skb->dev->rx_dropped);
kfree_skb(skb);
@@ -3122,7 +3115,7 @@ int netif_rx(struct sk_buff *skb)
struct rps_dev_flow voidflow, *rflow = &voidflow;
int cpu;
- migrate_disable();
+ preempt_disable();
rcu_read_lock();
cpu = get_rps_cpu(skb->dev, skb, &rflow);
@@ -3132,13 +3125,13 @@ int netif_rx(struct sk_buff *skb)
ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
rcu_read_unlock();
- migrate_enable();
+ preempt_enable();
} else
#endif
{
unsigned int qtail;
- ret = enqueue_to_backlog(skb, get_cpu_light(), &qtail);
- put_cpu_light();
+ ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
+ put_cpu();
}
return ret;
}
@@ -3148,44 +3141,16 @@ int netif_rx_ni(struct sk_buff *skb)
{
int err;
- local_bh_disable();
+ preempt_disable();
err = netif_rx(skb);
- local_bh_enable();
+ if (local_softirq_pending())
+ do_softirq();
+ preempt_enable();
return err;
}
EXPORT_SYMBOL(netif_rx_ni);
-#ifdef CONFIG_PREEMPT_RT_FULL
-/*
- * RT runs ksoftirqd as a real time thread and the root_lock is a
- * "sleeping spinlock". If the trylock fails then we can go into an
- * infinite loop when ksoftirqd preempted the task which actually
- * holds the lock, because we requeue q and raise NET_TX softirq
- * causing ksoftirqd to loop forever.
- *
- * It's safe to use spin_lock on RT here as softirqs run in thread
- * context and cannot deadlock against the thread which is holding
- * root_lock.
- *
- * On !RT the trylock might fail, but there we bail out from the
- * softirq loop after 10 attempts which we can't do on RT. And the
- * task holding root_lock cannot be preempted, so the only downside of
- * that trylock is that we need 10 loops to decide that we should have
- * given up in the first one :)
- */
-static inline int take_root_lock(spinlock_t *lock)
-{
- spin_lock(lock);
- return 1;
-}
-#else
-static inline int take_root_lock(spinlock_t *lock)
-{
- return spin_trylock(lock);
-}
-#endif
-
static void net_tx_action(struct softirq_action *h)
{
struct softnet_data *sd = &__get_cpu_var(softnet_data);
@@ -3224,7 +3189,7 @@ static void net_tx_action(struct softirq_action *h)
head = head->next_sched;
root_lock = qdisc_lock(q);
- if (take_root_lock(root_lock)) {
+ if (spin_trylock(root_lock)) {
smp_mb__before_clear_bit();
clear_bit(__QDISC_STATE_SCHED,
&q->state);
@@ -3338,7 +3303,6 @@ int netdev_rx_handler_register(struct net_device *dev,
if (dev->rx_handler)
return -EBUSY;
- /* Note: rx_handler_data must be set before rx_handler */
rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
rcu_assign_pointer(dev->rx_handler, rx_handler);
@@ -3359,11 +3323,6 @@ void netdev_rx_handler_unregister(struct net_device *dev)
ASSERT_RTNL();
RCU_INIT_POINTER(dev->rx_handler, NULL);
- /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
- * section has a guarantee to see a non NULL rx_handler_data
- * as well.
- */
- synchronize_net();
RCU_INIT_POINTER(dev->rx_handler_data, NULL);
}
EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
@@ -3486,7 +3445,6 @@ ncls:
}
switch (rx_handler(&skb)) {
case RX_HANDLER_CONSUMED:
- ret = NET_RX_SUCCESS;
goto unlock;
case RX_HANDLER_ANOTHER:
goto another_round;
@@ -3595,7 +3553,7 @@ static void flush_backlog(void *arg)
skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
if (skb->dev == dev) {
__skb_unlink(skb, &sd->input_pkt_queue);
- __skb_queue_tail(&sd->tofree_queue, skb);
+ kfree_skb(skb);
input_queue_head_incr(sd);
}
}
@@ -3604,13 +3562,10 @@ static void flush_backlog(void *arg)
skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
if (skb->dev == dev) {
__skb_unlink(skb, &sd->process_queue);
- __skb_queue_tail(&sd->tofree_queue, skb);
+ kfree_skb(skb);
input_queue_head_incr(sd);
}
}
-
- if (!skb_queue_empty(&sd->tofree_queue))
- raise_softirq_irqoff(NET_RX_SOFTIRQ);
}
static int napi_gro_complete(struct sk_buff *skb)
@@ -3969,7 +3924,6 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd)
} else
#endif
local_irq_enable();
- preempt_check_resched_rt();
}
static int process_backlog(struct napi_struct *napi, int quota)
@@ -4042,7 +3996,6 @@ void __napi_schedule(struct napi_struct *n)
local_irq_save(flags);
____napi_schedule(&__get_cpu_var(softnet_data), n);
local_irq_restore(flags);
- preempt_check_resched_rt();
}
EXPORT_SYMBOL(__napi_schedule);
@@ -4117,17 +4070,10 @@ static void net_rx_action(struct softirq_action *h)
struct softnet_data *sd = &__get_cpu_var(softnet_data);
unsigned long time_limit = jiffies + 2;
int budget = netdev_budget;
- struct sk_buff *skb;
void *have;
local_irq_disable();
- while ((skb = __skb_dequeue(&sd->tofree_queue))) {
- local_irq_enable();
- kfree_skb(skb);
- local_irq_disable();
- }
-
while (!list_empty(&sd->poll_list)) {
struct napi_struct *n;
int work, weight;
@@ -4250,6 +4196,7 @@ static int dev_ifname(struct net *net, struct ifreq __user *arg)
{
struct net_device *dev;
struct ifreq ifr;
+ unsigned seq;
/*
* Fetch the caller's info block.
@@ -4258,18 +4205,19 @@ static int dev_ifname(struct net *net, struct ifreq __user *arg)
if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
return -EFAULT;
- mutex_lock(&devnet_rename_mutex);
+retry:
+ seq = read_seqcount_begin(&devnet_rename_seq);
rcu_read_lock();
dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
if (!dev) {
rcu_read_unlock();
- mutex_unlock(&devnet_rename_mutex);
return -ENODEV;
}
strcpy(ifr.ifr_name, dev->name);
rcu_read_unlock();
- mutex_unlock(&devnet_rename_mutex);
+ if (read_seqcount_retry(&devnet_rename_seq, seq))
+ goto retry;
if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
return -EFAULT;
@@ -6597,7 +6545,6 @@ static int dev_cpu_callback(struct notifier_block *nfb,
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_enable();
- preempt_check_resched_rt();
/* Process offline CPU's input_pkt_queue */
while ((skb = __skb_dequeue(&oldsd->process_queue))) {
@@ -6608,9 +6555,6 @@ static int dev_cpu_callback(struct notifier_block *nfb,
netif_rx(skb);
input_queue_head_incr(oldsd);
}
- while ((skb = __skb_dequeue(&oldsd->tofree_queue))) {
- kfree_skb(skb);
- }
return NOTIFY_OK;
}
@@ -6883,9 +6827,8 @@ static int __init net_dev_init(void)
struct softnet_data *sd = &per_cpu(softnet_data, i);
memset(sd, 0, sizeof(*sd));
- skb_queue_head_init_raw(&sd->input_pkt_queue);
- skb_queue_head_init_raw(&sd->process_queue);
- skb_queue_head_init_raw(&sd->tofree_queue);
+ skb_queue_head_init(&sd->input_pkt_queue);
+ skb_queue_head_init(&sd->process_queue);
sd->completion_queue = NULL;
INIT_LIST_HEAD(&sd->poll_list);
sd->output_queue = NULL;
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index 7841d87..b079c7b 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -38,7 +38,7 @@ static int __hw_addr_create_ex(struct netdev_hw_addr_list *list,
ha->type = addr_type;
ha->refcount = 1;
ha->global_use = global;
- ha->synced = 0;
+ ha->synced = false;
list_add_tail_rcu(&ha->list, &list->list);
list->count++;
@@ -166,7 +166,7 @@ int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
addr_len, ha->type);
if (err)
break;
- ha->synced++;
+ ha->synced = true;
ha->refcount++;
} else if (ha->refcount == 1) {
__hw_addr_del(to_list, ha->addr, addr_len, ha->type);
@@ -187,7 +187,7 @@ void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
if (ha->synced) {
__hw_addr_del(to_list, ha->addr,
addr_len, ha->type);
- ha->synced--;
+ ha->synced = false;
__hw_addr_del(from_list, ha->addr,
addr_len, ha->type);
}
diff --git a/net/core/dst.c b/net/core/dst.c
index 35fd12f..ee6153e 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -179,7 +179,6 @@ void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
dst_init_metrics(dst, dst_default_metrics, true);
dst->expires = 0UL;
dst->path = dst;
- dst->from = NULL;
#ifdef CONFIG_XFRM
dst->xfrm = NULL;
#endif
diff --git a/net/core/flow.c b/net/core/flow.c
index 3bad824..b0901ee 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -329,7 +329,7 @@ static void flow_cache_flush_per_cpu(void *data)
struct flow_flush_info *info = data;
struct tasklet_struct *tasklet;
- tasklet = &this_cpu_ptr(info->cache->percpu)->flush_tasklet;
+ tasklet = this_cpu_ptr(&info->cache->percpu->flush_tasklet);
tasklet->data = (unsigned long)info;
tasklet_schedule(tasklet);
}
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 055fb13..1868625 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -976,7 +976,6 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
* report anything.
*/
ivi.spoofchk = -1;
- memset(ivi.mac, 0, sizeof(ivi.mac));
if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi))
break;
vf_mac.vf =
@@ -1068,7 +1067,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
rcu_read_lock();
cb->seq = net->dev_base_seq;
- if (nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,
+ if (nlmsg_parse(cb->nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX,
ifla_policy) >= 0) {
if (tb[IFLA_EXT_MASK])
@@ -1924,7 +1923,7 @@ static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
u32 ext_filter_mask = 0;
u16 min_ifinfo_dump_size = 0;
- if (nlmsg_parse(nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,
+ if (nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX,
ifla_policy) >= 0) {
if (tb[IFLA_EXT_MASK])
ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
@@ -2539,7 +2538,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
struct rtattr *attr = (void *)nlh + NLMSG_ALIGN(min_len);
while (RTA_OK(attr, attrlen)) {
- unsigned int flavor = attr->rta_type & NLA_TYPE_MASK;
+ unsigned int flavor = attr->rta_type;
if (flavor) {
if (flavor > rta_max[sz_idx])
return -EINVAL;
diff --git a/net/core/scm.c b/net/core/scm.c
index 2dc6cda..905dcc6 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -24,7 +24,6 @@
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/security.h>
-#include <linux/pid_namespace.h>
#include <linux/pid.h>
#include <linux/nsproxy.h>
#include <linux/slab.h>
@@ -53,8 +52,7 @@ static __inline__ int scm_check_creds(struct ucred *creds)
if (!uid_valid(uid) || !gid_valid(gid))
return -EINVAL;
- if ((creds->pid == task_tgid_vnr(current) ||
- ns_capable(current->nsproxy->pid_ns->user_ns, CAP_SYS_ADMIN)) &&
+ if ((creds->pid == task_tgid_vnr(current) || nsown_capable(CAP_SYS_ADMIN)) &&
((uid_eq(uid, cred->uid) || uid_eq(uid, cred->euid) ||
uid_eq(uid, cred->suid)) || nsown_capable(CAP_SETUID)) &&
((gid_eq(gid, cred->gid) || gid_eq(gid, cred->egid) ||
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 1a9dda5..fe1a598 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -60,7 +60,6 @@
#include <linux/scatterlist.h>
#include <linux/errqueue.h>
#include <linux/prefetch.h>
-#include <linux/locallock.h>
#include <net/protocol.h>
#include <net/dst.h>
@@ -348,7 +347,6 @@ struct netdev_alloc_cache {
unsigned int pagecnt_bias;
};
static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache);
-static DEFINE_LOCAL_IRQ_LOCK(netdev_alloc_lock);
#define NETDEV_FRAG_PAGE_MAX_ORDER get_order(32768)
#define NETDEV_FRAG_PAGE_MAX_SIZE (PAGE_SIZE << NETDEV_FRAG_PAGE_MAX_ORDER)
@@ -361,7 +359,7 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
int order;
unsigned long flags;
- local_lock_irqsave(netdev_alloc_lock, flags);
+ local_irq_save(flags);
nc = &__get_cpu_var(netdev_alloc_cache);
if (unlikely(!nc->frag.page)) {
refill:
@@ -395,7 +393,7 @@ recycle:
nc->frag.offset += fragsz;
nc->pagecnt_bias--;
end:
- local_unlock_irqrestore(netdev_alloc_lock, flags);
+ local_irq_restore(flags);
return data;
}
diff --git a/net/core/sock.c b/net/core/sock.c
index 2754c99..bc131d4 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -571,6 +571,7 @@ static int sock_getbindtodevice(struct sock *sk, char __user *optval,
struct net *net = sock_net(sk);
struct net_device *dev;
char devname[IFNAMSIZ];
+ unsigned seq;
if (sk->sk_bound_dev_if == 0) {
len = 0;
@@ -581,19 +582,20 @@ static int sock_getbindtodevice(struct sock *sk, char __user *optval,
if (len < IFNAMSIZ)
goto out;
- mutex_lock(&devnet_rename_mutex);
+retry:
+ seq = read_seqcount_begin(&devnet_rename_seq);
rcu_read_lock();
dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if);
ret = -ENODEV;
if (!dev) {
rcu_read_unlock();
- mutex_unlock(&devnet_rename_mutex);
goto out;
}
strcpy(devname, dev->name);
rcu_read_unlock();
- mutex_unlock(&devnet_rename_mutex);
+ if (read_seqcount_retry(&devnet_rename_seq, seq))
+ goto retry;
len = strlen(devname) + 1;
@@ -2285,11 +2287,12 @@ void lock_sock_nested(struct sock *sk, int subclass)
if (sk->sk_lock.owned)
__lock_sock(sk);
sk->sk_lock.owned = 1;
- spin_unlock_bh(&sk->sk_lock.slock);
+ spin_unlock(&sk->sk_lock.slock);
/*
* The sk_lock has mutex_lock() semantics here:
*/
mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
+ local_bh_enable();
}
EXPORT_SYMBOL(lock_sock_nested);
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
index 750f44f..602cd63 100644
--- a/net/core/sock_diag.c
+++ b/net/core/sock_diag.c
@@ -121,9 +121,6 @@ static int __sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
if (nlmsg_len(nlh) < sizeof(*req))
return -EINVAL;
- if (req->sdiag_family >= AF_MAX)
- return -EINVAL;
-
hndl = sock_diag_lock_handler(req->sdiag_family);
if (hndl == NULL)
err = -ENOENT;