summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/i386/kernel/process.c29
-rw-r--r--arch/x86_64/kernel/process.c29
-rw-r--r--include/asm-i386/tlbflush.h12
-rw-r--r--include/asm-x86_64/tlbflush.h12
-rw-r--r--include/linux/seccomp.h10
5 files changed, 82 insertions, 10 deletions
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
index 5f8cfa6..ba243a4 100644
--- a/arch/i386/kernel/process.c
+++ b/arch/i386/kernel/process.c
@@ -617,6 +617,33 @@ handle_io_bitmap(struct thread_struct *next, struct tss_struct *tss)
}
/*
+ * This function selects if the context switch from prev to next
+ * has to tweak the TSC disable bit in the cr4.
+ */
+static inline void disable_tsc(struct task_struct *prev_p,
+ struct task_struct *next_p)
+{
+ struct thread_info *prev, *next;
+
+ /*
+ * gcc should eliminate the ->thread_info dereference if
+ * has_secure_computing returns 0 at compile time (SECCOMP=n).
+ */
+ prev = prev_p->thread_info;
+ next = next_p->thread_info;
+
+ if (has_secure_computing(prev) || has_secure_computing(next)) {
+ /* slow path here */
+ if (has_secure_computing(prev) &&
+ !has_secure_computing(next)) {
+ write_cr4(read_cr4() & ~X86_CR4_TSD);
+ } else if (!has_secure_computing(prev) &&
+ has_secure_computing(next))
+ write_cr4(read_cr4() | X86_CR4_TSD);
+ }
+}
+
+/*
* switch_to(x,yn) should switch tasks from x to y.
*
* We fsave/fwait so that an exception goes off at the right time
@@ -695,6 +722,8 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr))
handle_io_bitmap(next, tss);
+ disable_tsc(prev_p, next_p);
+
return prev_p;
}
diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c
index 1d91271..7577f9d 100644
--- a/arch/x86_64/kernel/process.c
+++ b/arch/x86_64/kernel/process.c
@@ -482,6 +482,33 @@ out:
}
/*
+ * This function selects if the context switch from prev to next
+ * has to tweak the TSC disable bit in the cr4.
+ */
+static inline void disable_tsc(struct task_struct *prev_p,
+ struct task_struct *next_p)
+{
+ struct thread_info *prev, *next;
+
+ /*
+ * gcc should eliminate the ->thread_info dereference if
+ * has_secure_computing returns 0 at compile time (SECCOMP=n).
+ */
+ prev = prev_p->thread_info;
+ next = next_p->thread_info;
+
+ if (has_secure_computing(prev) || has_secure_computing(next)) {
+ /* slow path here */
+ if (has_secure_computing(prev) &&
+ !has_secure_computing(next)) {
+ write_cr4(read_cr4() & ~X86_CR4_TSD);
+ } else if (!has_secure_computing(prev) &&
+ has_secure_computing(next))
+ write_cr4(read_cr4() | X86_CR4_TSD);
+ }
+}
+
+/*
* This special macro can be used to load a debugging register
*/
#define loaddebug(thread,r) set_debug(thread->debugreg ## r, r)
@@ -599,6 +626,8 @@ struct task_struct *__switch_to(struct task_struct *prev_p, struct task_struct *
}
}
+ disable_tsc(prev_p, next_p);
+
return prev_p;
}
diff --git a/include/asm-i386/tlbflush.h b/include/asm-i386/tlbflush.h
index f22fab0..ab216e1 100644
--- a/include/asm-i386/tlbflush.h
+++ b/include/asm-i386/tlbflush.h
@@ -22,16 +22,18 @@
*/
#define __flush_tlb_global() \
do { \
- unsigned int tmpreg; \
+ unsigned int tmpreg, cr4, cr4_orig; \
\
__asm__ __volatile__( \
- "movl %1, %%cr4; # turn off PGE \n" \
+ "movl %%cr4, %2; # turn off PGE \n" \
+ "movl %2, %1; \n" \
+ "andl %3, %1; \n" \
+ "movl %1, %%cr4; \n" \
"movl %%cr3, %0; \n" \
"movl %0, %%cr3; # flush TLB \n" \
"movl %2, %%cr4; # turn PGE back on \n" \
- : "=&r" (tmpreg) \
- : "r" (mmu_cr4_features & ~X86_CR4_PGE), \
- "r" (mmu_cr4_features) \
+ : "=&r" (tmpreg), "=&r" (cr4), "=&r" (cr4_orig) \
+ : "i" (~X86_CR4_PGE) \
: "memory"); \
} while (0)
diff --git a/include/asm-x86_64/tlbflush.h b/include/asm-x86_64/tlbflush.h
index 2e811ac..0617423 100644
--- a/include/asm-x86_64/tlbflush.h
+++ b/include/asm-x86_64/tlbflush.h
@@ -22,16 +22,18 @@
*/
#define __flush_tlb_global() \
do { \
- unsigned long tmpreg; \
+ unsigned long tmpreg, cr4, cr4_orig; \
\
__asm__ __volatile__( \
- "movq %1, %%cr4; # turn off PGE \n" \
+ "movq %%cr4, %2; # turn off PGE \n" \
+ "movq %2, %1; \n" \
+ "andq %3, %1; \n" \
+ "movq %1, %%cr4; \n" \
"movq %%cr3, %0; # flush TLB \n" \
"movq %0, %%cr3; \n" \
"movq %2, %%cr4; # turn PGE back on \n" \
- : "=&r" (tmpreg) \
- : "r" (mmu_cr4_features & ~X86_CR4_PGE), \
- "r" (mmu_cr4_features) \
+ : "=&r" (tmpreg), "=&r" (cr4), "=&r" (cr4_orig) \
+ : "i" (~X86_CR4_PGE) \
: "memory"); \
} while (0)
diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h
index 3a2702b..dc89116 100644
--- a/include/linux/seccomp.h
+++ b/include/linux/seccomp.h
@@ -19,6 +19,11 @@ static inline void secure_computing(int this_syscall)
__secure_computing(this_syscall);
}
+static inline int has_secure_computing(struct thread_info *ti)
+{
+ return unlikely(test_ti_thread_flag(ti, TIF_SECCOMP));
+}
+
#else /* CONFIG_SECCOMP */
#if (__GNUC__ > 2)
@@ -28,6 +33,11 @@ static inline void secure_computing(int this_syscall)
#endif
#define secure_computing(x) do { } while (0)
+/* static inline to preserve typechecking */
+static inline int has_secure_computing(struct thread_info *ti)
+{
+ return 0;
+}
#endif /* CONFIG_SECCOMP */