summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2013-08-29 16:21:04 (GMT)
committerScott Wood <scottwood@freescale.com>2014-05-14 18:37:45 (GMT)
commit55d0c53740aa94a99615540ef5ab5afb98f5b55c (patch)
tree652c2d38ab140d4914e61d7e05a04022c1e687b6 /include/linux
parent10622edfde1dfb8717b43727828e651df45c5498 (diff)
downloadlinux-fsl-qoriq-55d0c53740aa94a99615540ef5ab5afb98f5b55c.tar.xz
ptrace: fix ptrace vs tasklist_lock race
As explained by Alexander Fyodorov <halcy@yandex.ru>: |read_lock(&tasklist_lock) in ptrace_stop() is converted to mutex on RT kernel, |and it can remove __TASK_TRACED from task->state (by moving it to |task->saved_state). If parent does wait() on child followed by a sys_ptrace |call, the following race can happen: | |- child sets __TASK_TRACED in ptrace_stop() |- parent does wait() which eventually calls wait_task_stopped() and returns | child's pid |- child blocks on read_lock(&tasklist_lock) in ptrace_stop() and moves | __TASK_TRACED flag to saved_state |- parent calls sys_ptrace, which calls ptrace_check_attach() and wait_task_inactive() The patch is based on his initial patch where an additional check is added in case the __TASK_TRACED moved to ->saved_state. The pi_lock is taken in case the caller is interrupted between looking into ->state and ->saved_state. Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/sched.h48
1 files changed, 45 insertions, 3 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 465e66d..230094d 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -165,11 +165,8 @@ extern char ___assert_task_state[1 - 2*!!(
TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
__TASK_TRACED)
-#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
#define task_is_dead(task) ((task)->exit_state != 0)
-#define task_is_stopped_or_traced(task) \
- ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
#define task_contributes_to_load(task) \
((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
(task->flags & PF_FROZEN) == 0)
@@ -2414,6 +2411,51 @@ static inline int need_resched(void)
return unlikely(test_thread_flag(TIF_NEED_RESCHED));
}
+static inline bool __task_is_stopped_or_traced(struct task_struct *task)
+{
+ if (task->state & (__TASK_STOPPED | __TASK_TRACED))
+ return true;
+#ifdef CONFIG_PREEMPT_RT_FULL
+ if (task->saved_state & (__TASK_STOPPED | __TASK_TRACED))
+ return true;
+#endif
+ return false;
+}
+
+static inline bool task_is_stopped_or_traced(struct task_struct *task)
+{
+ bool traced_stopped;
+
+#ifdef CONFIG_PREEMPT_RT_FULL
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&task->pi_lock, flags);
+ traced_stopped = __task_is_stopped_or_traced(task);
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+#else
+ traced_stopped = __task_is_stopped_or_traced(task);
+#endif
+ return traced_stopped;
+}
+
+static inline bool task_is_traced(struct task_struct *task)
+{
+ bool traced = false;
+
+ if (task->state & __TASK_TRACED)
+ return true;
+#ifdef CONFIG_PREEMPT_RT_FULL
+ /* in case the task is sleeping on tasklist_lock */
+ raw_spin_lock_irq(&task->pi_lock);
+ if (task->state & __TASK_TRACED)
+ traced = true;
+ else if (task->saved_state & __TASK_TRACED)
+ traced = true;
+ raw_spin_unlock_irq(&task->pi_lock);
+#endif
+ return traced;
+}
+
/*
* cond_resched() and cond_resched_lock(): latency reduction via
* explicit rescheduling in places that are safe. The return