summaryrefslogtreecommitdiff
path: root/arch/x86
diff options
context:
space:
mode:
authorAndy Lutomirski <luto@amacapital.net>2015-01-31 12:53:53 (GMT)
committerAndy Lutomirski <luto@amacapital.net>2015-02-01 12:02:53 (GMT)
commitb926e6f61a26036ee9eabe6761483954d481ad25 (patch)
tree0ed87a7d7acc4e8f6e1e443f032b2444fd8eaa1e /arch/x86
parent772a9aca12567badb5b9caf2af249a5991f47ea8 (diff)
downloadlinux-b926e6f61a26036ee9eabe6761483954d481ad25.tar.xz
x86, traps: Fix ist_enter from userspace
context_tracking_user_exit() has no effect if in_interrupt() returns true, so ist_enter() didn't work. Fix it by calling exception_enter(), and thus context_tracking_user_exit(), before incrementing the preempt count. This also adds an assertion that will catch the problem reliably if CONFIG_PROVE_RCU=y to help prevent the bug from being reintroduced. Link: http://lkml.kernel.org/r/261ebee6aee55a4724746d0d7024697013c40a08.1422709102.git.luto@amacapital.net Fixes: 959274753857 x86, traps: Track entry into and exit from IST context Reported-and-tested-by: Sasha Levin <sasha.levin@oracle.com> Signed-off-by: Andy Lutomirski <luto@amacapital.net>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/traps.c25
1 files changed, 18 insertions, 7 deletions
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 7176f84..c74f2f5 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -110,15 +110,11 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
enum ctx_state ist_enter(struct pt_regs *regs)
{
- /*
- * We are atomic because we're on the IST stack (or we're on x86_32,
- * in which case we still shouldn't schedule.
- */
- preempt_count_add(HARDIRQ_OFFSET);
+ enum ctx_state prev_state;
if (user_mode_vm(regs)) {
/* Other than that, we're just an exception. */
- return exception_enter();
+ prev_state = exception_enter();
} else {
/*
* We might have interrupted pretty much anything. In
@@ -127,12 +123,27 @@ enum ctx_state ist_enter(struct pt_regs *regs)
* but we need to notify RCU.
*/
rcu_nmi_enter();
- return IN_KERNEL; /* the value is irrelevant. */
+ prev_state = IN_KERNEL; /* the value is irrelevant. */
}
+
+ /*
+ * We are atomic because we're on the IST stack (or we're on x86_32,
+ * in which case we still shouldn't schedule).
+ *
+ * This must be after exception_enter(), because exception_enter()
+ * won't do anything if in_interrupt() returns true.
+ */
+ preempt_count_add(HARDIRQ_OFFSET);
+
+ /* This code is a bit fragile. Test it. */
+ rcu_lockdep_assert(rcu_is_watching(), "ist_enter didn't work");
+
+ return prev_state;
}
void ist_exit(struct pt_regs *regs, enum ctx_state prev_state)
{
+ /* Must be before exception_exit. */
preempt_count_sub(HARDIRQ_OFFSET);
if (user_mode_vm(regs))