summaryrefslogtreecommitdiff
path: root/net/core
diff options
context:
space:
mode:
authorScott Wood <scottwood@freescale.com>2014-05-14 18:19:12 (GMT)
committerScott Wood <scottwood@freescale.com>2014-05-14 18:37:18 (GMT)
commit86ba38e6f5f2fbfe9b49e153ea89593b26482019 (patch)
treef99d2906b0eafca507f37289e68052fc105cc2dc /net/core
parent07c8b57b111585a617b2b456497fc9b33c00743c (diff)
downloadlinux-fsl-qoriq-86ba38e6f5f2fbfe9b49e153ea89593b26482019.tar.xz
Reset to 3.12.19
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c112
-rw-r--r--net/core/netpoll.c2
-rw-r--r--net/core/rtnetlink.c10
-rw-r--r--net/core/skbuff.c6
-rw-r--r--net/core/sock.c8
5 files changed, 44 insertions, 94 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index ab4df3d..b327975 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -175,7 +175,6 @@ static unsigned int napi_gen_id;
static DEFINE_HASHTABLE(napi_hash, 8);
static seqcount_t devnet_rename_seq;
-static DEFINE_MUTEX(devnet_rename_mutex);
static inline void dev_base_seq_inc(struct net *net)
{
@@ -197,14 +196,14 @@ static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
static inline void rps_lock(struct softnet_data *sd)
{
#ifdef CONFIG_RPS
- raw_spin_lock(&sd->input_pkt_queue.raw_lock);
+ spin_lock(&sd->input_pkt_queue.lock);
#endif
}
static inline void rps_unlock(struct softnet_data *sd)
{
#ifdef CONFIG_RPS
- raw_spin_unlock(&sd->input_pkt_queue.raw_lock);
+ spin_unlock(&sd->input_pkt_queue.lock);
#endif
}
@@ -827,8 +826,7 @@ retry:
strcpy(name, dev->name);
rcu_read_unlock();
if (read_seqcount_retry(&devnet_rename_seq, seq)) {
- mutex_lock(&devnet_rename_mutex);
- mutex_unlock(&devnet_rename_mutex);
+ cond_resched();
goto retry;
}
@@ -1094,28 +1092,30 @@ int dev_change_name(struct net_device *dev, const char *newname)
if (dev->flags & IFF_UP)
return -EBUSY;
- mutex_lock(&devnet_rename_mutex);
- __write_seqcount_begin(&devnet_rename_seq);
+ write_seqcount_begin(&devnet_rename_seq);
- if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
- goto outunlock;
+ if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
+ write_seqcount_end(&devnet_rename_seq);
+ return 0;
+ }
memcpy(oldname, dev->name, IFNAMSIZ);
err = dev_get_valid_name(net, dev, newname);
- if (err < 0)
- goto outunlock;
+ if (err < 0) {
+ write_seqcount_end(&devnet_rename_seq);
+ return err;
+ }
rollback:
ret = device_rename(&dev->dev, dev->name);
if (ret) {
memcpy(dev->name, oldname, IFNAMSIZ);
- err = ret;
- goto outunlock;
+ write_seqcount_end(&devnet_rename_seq);
+ return ret;
}
- __write_seqcount_end(&devnet_rename_seq);
- mutex_unlock(&devnet_rename_mutex);
+ write_seqcount_end(&devnet_rename_seq);
write_lock_bh(&dev_base_lock);
hlist_del_rcu(&dev->name_hlist);
@@ -1134,8 +1134,7 @@ rollback:
/* err >= 0 after dev_alloc_name() or stores the first errno */
if (err >= 0) {
err = ret;
- mutex_lock(&devnet_rename_mutex);
- __write_seqcount_begin(&devnet_rename_seq);
+ write_seqcount_begin(&devnet_rename_seq);
memcpy(dev->name, oldname, IFNAMSIZ);
goto rollback;
} else {
@@ -1145,11 +1144,6 @@ rollback:
}
return err;
-
-outunlock:
- __write_seqcount_end(&devnet_rename_seq);
- mutex_unlock(&devnet_rename_mutex);
- return err;
}
/**
@@ -2138,7 +2132,6 @@ static inline void __netif_reschedule(struct Qdisc *q)
sd->output_queue_tailp = &q->next_sched;
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_restore(flags);
- preempt_check_resched_rt();
}
void __netif_schedule(struct Qdisc *q)
@@ -2160,7 +2153,6 @@ void dev_kfree_skb_irq(struct sk_buff *skb)
sd->completion_queue = skb;
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_restore(flags);
- preempt_check_resched_rt();
}
}
EXPORT_SYMBOL(dev_kfree_skb_irq);
@@ -3212,7 +3204,6 @@ enqueue:
rps_unlock(sd);
local_irq_restore(flags);
- preempt_check_resched_rt();
atomic_long_inc(&skb->dev->rx_dropped);
kfree_skb(skb);
@@ -3250,7 +3241,7 @@ int netif_rx(struct sk_buff *skb)
struct rps_dev_flow voidflow, *rflow = &voidflow;
int cpu;
- migrate_disable();
+ preempt_disable();
rcu_read_lock();
cpu = get_rps_cpu(skb->dev, skb, &rflow);
@@ -3260,13 +3251,13 @@ int netif_rx(struct sk_buff *skb)
ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
rcu_read_unlock();
- migrate_enable();
+ preempt_enable();
} else
#endif
{
unsigned int qtail;
- ret = enqueue_to_backlog(skb, get_cpu_light(), &qtail);
- put_cpu_light();
+ ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
+ put_cpu();
}
return ret;
}
@@ -3276,44 +3267,16 @@ int netif_rx_ni(struct sk_buff *skb)
{
int err;
- local_bh_disable();
+ preempt_disable();
err = netif_rx(skb);
- local_bh_enable();
+ if (local_softirq_pending())
+ do_softirq();
+ preempt_enable();
return err;
}
EXPORT_SYMBOL(netif_rx_ni);
-#ifdef CONFIG_PREEMPT_RT_FULL
-/*
- * RT runs ksoftirqd as a real time thread and the root_lock is a
- * "sleeping spinlock". If the trylock fails then we can go into an
- * infinite loop when ksoftirqd preempted the task which actually
- * holds the lock, because we requeue q and raise NET_TX softirq
- * causing ksoftirqd to loop forever.
- *
- * It's safe to use spin_lock on RT here as softirqs run in thread
- * context and cannot deadlock against the thread which is holding
- * root_lock.
- *
- * On !RT the trylock might fail, but there we bail out from the
- * softirq loop after 10 attempts which we can't do on RT. And the
- * task holding root_lock cannot be preempted, so the only downside of
- * that trylock is that we need 10 loops to decide that we should have
- * given up in the first one :)
- */
-static inline int take_root_lock(spinlock_t *lock)
-{
- spin_lock(lock);
- return 1;
-}
-#else
-static inline int take_root_lock(spinlock_t *lock)
-{
- return spin_trylock(lock);
-}
-#endif
-
static void net_tx_action(struct softirq_action *h)
{
struct softnet_data *sd = &__get_cpu_var(softnet_data);
@@ -3352,7 +3315,7 @@ static void net_tx_action(struct softirq_action *h)
head = head->next_sched;
root_lock = qdisc_lock(q);
- if (take_root_lock(root_lock)) {
+ if (spin_trylock(root_lock)) {
smp_mb__before_clear_bit();
clear_bit(__QDISC_STATE_SCHED,
&q->state);
@@ -3743,7 +3706,7 @@ static void flush_backlog(void *arg)
skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
if (skb->dev == dev) {
__skb_unlink(skb, &sd->input_pkt_queue);
- __skb_queue_tail(&sd->tofree_queue, skb);
+ kfree_skb(skb);
input_queue_head_incr(sd);
}
}
@@ -3752,13 +3715,10 @@ static void flush_backlog(void *arg)
skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
if (skb->dev == dev) {
__skb_unlink(skb, &sd->process_queue);
- __skb_queue_tail(&sd->tofree_queue, skb);
+ kfree_skb(skb);
input_queue_head_incr(sd);
}
}
-
- if (!skb_queue_empty(&sd->tofree_queue))
- raise_softirq_irqoff(NET_RX_SOFTIRQ);
}
static int napi_gro_complete(struct sk_buff *skb)
@@ -4115,7 +4075,6 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd)
} else
#endif
local_irq_enable();
- preempt_check_resched_rt();
}
static int process_backlog(struct napi_struct *napi, int quota)
@@ -4188,7 +4147,6 @@ void __napi_schedule(struct napi_struct *n)
local_irq_save(flags);
____napi_schedule(&__get_cpu_var(softnet_data), n);
local_irq_restore(flags);
- preempt_check_resched_rt();
}
EXPORT_SYMBOL(__napi_schedule);
@@ -4318,17 +4276,10 @@ static void net_rx_action(struct softirq_action *h)
struct softnet_data *sd = &__get_cpu_var(softnet_data);
unsigned long time_limit = jiffies + 2;
int budget = netdev_budget;
- struct sk_buff *skb;
void *have;
local_irq_disable();
- while ((skb = __skb_dequeue(&sd->tofree_queue))) {
- local_irq_enable();
- kfree_skb(skb);
- local_irq_disable();
- }
-
while (!list_empty(&sd->poll_list)) {
struct napi_struct *n;
int work, weight;
@@ -6447,7 +6398,6 @@ static int dev_cpu_callback(struct notifier_block *nfb,
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_enable();
- preempt_check_resched_rt();
/* Process offline CPU's input_pkt_queue */
while ((skb = __skb_dequeue(&oldsd->process_queue))) {
@@ -6458,9 +6408,6 @@ static int dev_cpu_callback(struct notifier_block *nfb,
netif_rx(skb);
input_queue_head_incr(oldsd);
}
- while ((skb = __skb_dequeue(&oldsd->tofree_queue))) {
- kfree_skb(skb);
- }
return NOTIFY_OK;
}
@@ -6772,9 +6719,8 @@ static int __init net_dev_init(void)
struct softnet_data *sd = &per_cpu(softnet_data, i);
memset(sd, 0, sizeof(*sd));
- skb_queue_head_init_raw(&sd->input_pkt_queue);
- skb_queue_head_init_raw(&sd->process_queue);
- skb_queue_head_init_raw(&sd->tofree_queue);
+ skb_queue_head_init(&sd->input_pkt_queue);
+ skb_queue_head_init(&sd->process_queue);
sd->completion_queue = NULL;
INIT_LIST_HEAD(&sd->poll_list);
sd->output_queue = NULL;
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 462cdc9..9b40f23 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -740,7 +740,7 @@ static bool pkt_is_ns(struct sk_buff *skb)
struct nd_msg *msg;
struct ipv6hdr *hdr;
- if (skb->protocol != htons(ETH_P_ARP))
+ if (skb->protocol != htons(ETH_P_IPV6))
return false;
if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + sizeof(struct nd_msg)))
return false;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 2a0e21d..37b492e 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -2014,12 +2014,13 @@ EXPORT_SYMBOL(rtmsg_ifinfo);
static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
struct net_device *dev,
u8 *addr, u32 pid, u32 seq,
- int type, unsigned int flags)
+ int type, unsigned int flags,
+ int nlflags)
{
struct nlmsghdr *nlh;
struct ndmsg *ndm;
- nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), NLM_F_MULTI);
+ nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), nlflags);
if (!nlh)
return -EMSGSIZE;
@@ -2057,7 +2058,7 @@ static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, int type)
if (!skb)
goto errout;
- err = nlmsg_populate_fdb_fill(skb, dev, addr, 0, 0, type, NTF_SELF);
+ err = nlmsg_populate_fdb_fill(skb, dev, addr, 0, 0, type, NTF_SELF, 0);
if (err < 0) {
kfree_skb(skb);
goto errout;
@@ -2282,7 +2283,8 @@ static int nlmsg_populate_fdb(struct sk_buff *skb,
err = nlmsg_populate_fdb_fill(skb, dev, ha->addr,
portid, seq,
- RTM_NEWNEIGH, NTF_SELF);
+ RTM_NEWNEIGH, NTF_SELF,
+ NLM_F_MULTI);
if (err < 0)
return err;
skip:
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index da24627..21571dc 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -62,7 +62,6 @@
#include <linux/scatterlist.h>
#include <linux/errqueue.h>
#include <linux/prefetch.h>
-#include <linux/locallock.h>
#include <net/protocol.h>
#include <net/dst.h>
@@ -335,7 +334,6 @@ struct netdev_alloc_cache {
unsigned int pagecnt_bias;
};
static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache);
-static DEFINE_LOCAL_IRQ_LOCK(netdev_alloc_lock);
static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
{
@@ -344,7 +342,7 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
int order;
unsigned long flags;
- local_lock_irqsave(netdev_alloc_lock, flags);
+ local_irq_save(flags);
nc = &__get_cpu_var(netdev_alloc_cache);
if (unlikely(!nc->frag.page)) {
refill:
@@ -378,7 +376,7 @@ recycle:
nc->frag.offset += fragsz;
nc->pagecnt_bias--;
end:
- local_unlock_irqrestore(netdev_alloc_lock, flags);
+ local_irq_restore(flags);
return data;
}
diff --git a/net/core/sock.c b/net/core/sock.c
index e57770c..ec228a3 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2339,11 +2339,12 @@ void lock_sock_nested(struct sock *sk, int subclass)
if (sk->sk_lock.owned)
__lock_sock(sk);
sk->sk_lock.owned = 1;
- spin_unlock_bh(&sk->sk_lock.slock);
+ spin_unlock(&sk->sk_lock.slock);
/*
* The sk_lock has mutex_lock() semantics here:
*/
mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
+ local_bh_enable();
}
EXPORT_SYMBOL(lock_sock_nested);
@@ -2358,10 +2359,13 @@ void release_sock(struct sock *sk)
if (sk->sk_backlog.tail)
__release_sock(sk);
+ /* Warning : release_cb() might need to release sk ownership,
+ * ie call sock_release_ownership(sk) before us.
+ */
if (sk->sk_prot->release_cb)
sk->sk_prot->release_cb(sk);
- sk->sk_lock.owned = 0;
+ sock_release_ownership(sk);
if (waitqueue_active(&sk->sk_lock.wq))
wake_up(&sk->sk_lock.wq);
spin_unlock_bh(&sk->sk_lock.slock);