diff options
author | Ingo Molnar <mingo@elte.hu> | 2010-05-07 09:30:29 (GMT) |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-05-07 09:30:30 (GMT) |
commit | cce913178118b0b36742eb7544c2b38a0c957ee7 (patch) | |
tree | 25a6d7b4e01fea2932e6e2962a75f7a3d8c19a4f /kernel | |
parent | d9f599e1e6d019968b35d2dc63074b9e8964fa69 (diff) | |
parent | 4fd38e4595e2f6c9d27732c042a0e16b2753049c (diff) | |
download | linux-cce913178118b0b36742eb7544c2b38a0c957ee7.tar.xz |
Merge branch 'perf/urgent' into perf/core
Merge reason: Resolve patch dependency
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/cgroup_freezer.c | 5 | ||||
-rw-r--r-- | kernel/perf_event.c | 7 | ||||
-rw-r--r-- | kernel/sched.c | 18 | ||||
-rw-r--r-- | kernel/workqueue.c | 2 |
4 files changed, 25 insertions, 7 deletions
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c index da5e139..e5c0244 100644 --- a/kernel/cgroup_freezer.c +++ b/kernel/cgroup_freezer.c @@ -205,9 +205,12 @@ static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task) * No lock is needed, since the task isn't on tasklist yet, * so it can't be moved to another cgroup, which means the * freezer won't be removed and will be valid during this - * function call. + * function call. Nevertheless, apply RCU read-side critical + * section to suppress RCU lockdep false positives. */ + rcu_read_lock(); freezer = task_freezer(task); + rcu_read_unlock(); /* * The root cgroup is non-freezable, so we can skip the diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 9dbe8cd..49d8be5 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -342,6 +342,9 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx) if (event->state > PERF_EVENT_STATE_OFF) event->state = PERF_EVENT_STATE_OFF; + if (event->state > PERF_EVENT_STATE_FREE) + return; + /* * If this was a group event with sibling events then * upgrade the siblings to singleton events by adding them @@ -1861,6 +1864,8 @@ int perf_event_release_kernel(struct perf_event *event) { struct perf_event_context *ctx = event->ctx; + event->state = PERF_EVENT_STATE_FREE; + WARN_ON_ONCE(ctx->parent_ctx); mutex_lock(&ctx->mutex); perf_event_remove_from_context(event); @@ -5021,7 +5026,7 @@ err_fput_free_put_context: err_free_put_context: if (err < 0) - kfree(event); + free_event(event); err_put_context: if (err < 0) diff --git a/kernel/sched.c b/kernel/sched.c index b0bbadc..b11b80a 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -323,6 +323,15 @@ static inline struct task_group *task_group(struct task_struct *p) /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { + /* + * Strictly speaking this rcu_read_lock() is not needed since the + * task_group is tied to the cgroup, which in turn can never go away + * as long as there are tasks attached to it. + * + * However since task_group() uses task_subsys_state() which is an + * rcu_dereference() user, this quiets CONFIG_PROVE_RCU. + */ + rcu_read_lock(); #ifdef CONFIG_FAIR_GROUP_SCHED p->se.cfs_rq = task_group(p)->cfs_rq[cpu]; p->se.parent = task_group(p)->se[cpu]; @@ -332,6 +341,7 @@ static inline void set_task_rq(struct task_struct *p, unsigned int cpu) p->rt.rt_rq = task_group(p)->rt_rq[cpu]; p->rt.parent = task_group(p)->rt_se[cpu]; #endif + rcu_read_unlock(); } #else @@ -3737,7 +3747,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner) * the mutex owner just released it and exited. */ if (probe_kernel_address(&owner->cpu, cpu)) - goto out; + return 0; #else cpu = owner->cpu; #endif @@ -3747,14 +3757,14 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner) * the cpu field may no longer be valid. */ if (cpu >= nr_cpumask_bits) - goto out; + return 0; /* * We need to validate that we can do a * get_cpu() and that we have the percpu area. */ if (!cpu_online(cpu)) - goto out; + return 0; rq = cpu_rq(cpu); @@ -3773,7 +3783,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner) cpu_relax(); } -out: + return 1; } #endif diff --git a/kernel/workqueue.c b/kernel/workqueue.c index dee4865..5bfb213 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -774,7 +774,7 @@ void flush_delayed_work(struct delayed_work *dwork) { if (del_timer_sync(&dwork->timer)) { struct cpu_workqueue_struct *cwq; - cwq = wq_per_cpu(keventd_wq, get_cpu()); + cwq = wq_per_cpu(get_wq_data(&dwork->work)->wq, get_cpu()); __queue_work(cwq, &dwork->work); put_cpu(); } |