summaryrefslogtreecommitdiff
path: root/arch/x86/include/asm/mmu_context_64.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/include/asm/mmu_context_64.h')
-rw-r--r--arch/x86/include/asm/mmu_context_64.h54
1 files changed, 54 insertions, 0 deletions
diff --git a/arch/x86/include/asm/mmu_context_64.h b/arch/x86/include/asm/mmu_context_64.h
new file mode 100644
index 0000000..2675867
--- /dev/null
+++ b/arch/x86/include/asm/mmu_context_64.h
@@ -0,0 +1,54 @@
+#ifndef ASM_X86__MMU_CONTEXT_64_H
+#define ASM_X86__MMU_CONTEXT_64_H
+
+#include <asm/pda.h>
+
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+{
+#ifdef CONFIG_SMP
+ if (read_pda(mmu_state) == TLBSTATE_OK)
+ write_pda(mmu_state, TLBSTATE_LAZY);
+#endif
+}
+
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
+{
+ unsigned cpu = smp_processor_id();
+ if (likely(prev != next)) {
+ /* stop flush ipis for the previous mm */
+ cpu_clear(cpu, prev->cpu_vm_mask);
+#ifdef CONFIG_SMP
+ write_pda(mmu_state, TLBSTATE_OK);
+ write_pda(active_mm, next);
+#endif
+ cpu_set(cpu, next->cpu_vm_mask);
+ load_cr3(next->pgd);
+
+ if (unlikely(next->context.ldt != prev->context.ldt))
+ load_LDT_nolock(&next->context);
+ }
+#ifdef CONFIG_SMP
+ else {
+ write_pda(mmu_state, TLBSTATE_OK);
+ if (read_pda(active_mm) != next)
+ BUG();
+ if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
+ /* We were in lazy tlb mode and leave_mm disabled
+ * tlb flush IPI delivery. We must reload CR3
+ * to make sure to use no freed page tables.
+ */
+ load_cr3(next->pgd);
+ load_LDT_nolock(&next->context);
+ }
+ }
+#endif
+}
+
+#define deactivate_mm(tsk, mm) \
+do { \
+ load_gs_index(0); \
+ asm volatile("movl %0,%%fs"::"r"(0)); \
+} while (0)
+
+#endif /* ASM_X86__MMU_CONTEXT_64_H */