From d6dd61c831226f9cd7750885da04d360d6455101 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Wed, 2 May 2007 19:27:14 +0200 Subject: [PATCH] x86: PARAVIRT: add hooks to intercept mm creation and destruction Add hooks to allow a paravirt implementation to track the lifetime of an mm. Paravirtualization requires three hooks, but only two are needed in common code. They are: arch_dup_mmap, which is called when a new mmap is created at fork arch_exit_mmap, which is called when the last process reference to an mm is dropped, which typically happens on exit and exec. The third hook is activate_mm, which is called from the arch-specific activate_mm() macro/function, and so doesn't need stub versions for other architectures. It's called when an mm is first used. Signed-off-by: Jeremy Fitzhardinge Signed-off-by: Andi Kleen Cc: linux-arch@vger.kernel.org Cc: James Bottomley Acked-by: Ingo Molnar diff --git a/arch/i386/kernel/paravirt.c b/arch/i386/kernel/paravirt.c index 2040a83..54cc14d 100644 --- a/arch/i386/kernel/paravirt.c +++ b/arch/i386/kernel/paravirt.c @@ -237,6 +237,10 @@ struct paravirt_ops paravirt_ops = { .irq_enable_sysexit = native_irq_enable_sysexit, .iret = native_iret, + .dup_mmap = paravirt_nop, + .exit_mmap = paravirt_nop, + .activate_mm = paravirt_nop, + .startup_ipi_hook = paravirt_nop, }; diff --git a/include/asm-alpha/mmu_context.h b/include/asm-alpha/mmu_context.h index fe249e9..0bd7bd2 100644 --- a/include/asm-alpha/mmu_context.h +++ b/include/asm-alpha/mmu_context.h @@ -10,6 +10,7 @@ #include #include #include +#include /* * Force a context reload. This is needed when we change the page diff --git a/include/asm-arm/mmu_context.h b/include/asm-arm/mmu_context.h index d1a65b1..f8755c8 100644 --- a/include/asm-arm/mmu_context.h +++ b/include/asm-arm/mmu_context.h @@ -16,6 +16,7 @@ #include #include #include +#include void __check_kvm_seq(struct mm_struct *mm); diff --git a/include/asm-arm26/mmu_context.h b/include/asm-arm26/mmu_context.h index 1a929bf..16c821f 100644 --- a/include/asm-arm26/mmu_context.h +++ b/include/asm-arm26/mmu_context.h @@ -13,6 +13,8 @@ #ifndef __ASM_ARM_MMU_CONTEXT_H #define __ASM_ARM_MMU_CONTEXT_H +#include + #define init_new_context(tsk,mm) 0 #define destroy_context(mm) do { } while(0) diff --git a/include/asm-avr32/mmu_context.h b/include/asm-avr32/mmu_context.h index 31add1a..c37c391 100644 --- a/include/asm-avr32/mmu_context.h +++ b/include/asm-avr32/mmu_context.h @@ -15,6 +15,7 @@ #include #include #include +#include /* * The MMU "context" consists of two things: diff --git a/include/asm-cris/mmu_context.h b/include/asm-cris/mmu_context.h index e6e659d..72ba08d 100644 --- a/include/asm-cris/mmu_context.h +++ b/include/asm-cris/mmu_context.h @@ -1,6 +1,8 @@ #ifndef __CRIS_MMU_CONTEXT_H #define __CRIS_MMU_CONTEXT_H +#include + extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm); extern void get_mmu_context(struct mm_struct *mm); extern void destroy_context(struct mm_struct *mm); diff --git a/include/asm-frv/mmu_context.h b/include/asm-frv/mmu_context.h index 72edcaa..c7daa39 100644 --- a/include/asm-frv/mmu_context.h +++ b/include/asm-frv/mmu_context.h @@ -15,6 +15,7 @@ #include #include #include +#include static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { diff --git a/include/asm-generic/mm_hooks.h b/include/asm-generic/mm_hooks.h new file mode 100644 index 0000000..67dea81 --- /dev/null +++ b/include/asm-generic/mm_hooks.h @@ -0,0 +1,18 @@ +/* + * Define generic no-op hooks for arch_dup_mmap and arch_exit_mmap, to + * be included in asm-FOO/mmu_context.h for any arch FOO which doesn't + * need to hook these. + */ +#ifndef _ASM_GENERIC_MM_HOOKS_H +#define _ASM_GENERIC_MM_HOOKS_H + +static inline void arch_dup_mmap(struct mm_struct *oldmm, + struct mm_struct *mm) +{ +} + +static inline void arch_exit_mmap(struct mm_struct *mm) +{ +} + +#endif /* _ASM_GENERIC_MM_HOOKS_H */ diff --git a/include/asm-h8300/mmu_context.h b/include/asm-h8300/mmu_context.h index 5c165f7..f44b730 100644 --- a/include/asm-h8300/mmu_context.h +++ b/include/asm-h8300/mmu_context.h @@ -4,6 +4,7 @@ #include #include #include +#include static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { diff --git a/include/asm-i386/mmu_context.h b/include/asm-i386/mmu_context.h index e6aa30f..8198d1c 100644 --- a/include/asm-i386/mmu_context.h +++ b/include/asm-i386/mmu_context.h @@ -5,6 +5,16 @@ #include #include #include +#include +#ifndef CONFIG_PARAVIRT +#include + +static inline void paravirt_activate_mm(struct mm_struct *prev, + struct mm_struct *next) +{ +} +#endif /* !CONFIG_PARAVIRT */ + /* * Used for LDT copy/destruction. @@ -65,7 +75,10 @@ static inline void switch_mm(struct mm_struct *prev, #define deactivate_mm(tsk, mm) \ asm("movl %0,%%gs": :"r" (0)); -#define activate_mm(prev, next) \ - switch_mm((prev),(next),NULL) +#define activate_mm(prev, next) \ + do { \ + paravirt_activate_mm(prev, next); \ + switch_mm((prev),(next),NULL); \ + } while(0); #endif diff --git a/include/asm-i386/paravirt.h b/include/asm-i386/paravirt.h index f93599d..61c03f1 100644 --- a/include/asm-i386/paravirt.h +++ b/include/asm-i386/paravirt.h @@ -119,6 +119,12 @@ struct paravirt_ops void (*io_delay)(void); + void (*activate_mm)(struct mm_struct *prev, + struct mm_struct *next); + void (*dup_mmap)(struct mm_struct *oldmm, + struct mm_struct *mm); + void (*exit_mmap)(struct mm_struct *mm); + #ifdef CONFIG_X86_LOCAL_APIC void (*apic_write)(unsigned long reg, unsigned long v); void (*apic_write_atomic)(unsigned long reg, unsigned long v); @@ -395,6 +401,23 @@ static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip, } #endif +static inline void paravirt_activate_mm(struct mm_struct *prev, + struct mm_struct *next) +{ + paravirt_ops.activate_mm(prev, next); +} + +static inline void arch_dup_mmap(struct mm_struct *oldmm, + struct mm_struct *mm) +{ + paravirt_ops.dup_mmap(oldmm, mm); +} + +static inline void arch_exit_mmap(struct mm_struct *mm) +{ + paravirt_ops.exit_mmap(mm); +} + #define __flush_tlb() paravirt_ops.flush_tlb_user() #define __flush_tlb_global() paravirt_ops.flush_tlb_kernel() #define __flush_tlb_single(addr) paravirt_ops.flush_tlb_single(addr) diff --git a/include/asm-ia64/mmu_context.h b/include/asm-ia64/mmu_context.h index b5c6508..cef2400 100644 --- a/include/asm-ia64/mmu_context.h +++ b/include/asm-ia64/mmu_context.h @@ -29,6 +29,7 @@ #include #include +#include struct ia64_ctx { spinlock_t lock; diff --git a/include/asm-m32r/mmu_context.h b/include/asm-m32r/mmu_context.h index 1f40d4a..91909e5 100644 --- a/include/asm-m32r/mmu_context.h +++ b/include/asm-m32r/mmu_context.h @@ -15,6 +15,7 @@ #include #include #include +#include /* * Cache of MMU context last used. diff --git a/include/asm-m68k/mmu_context.h b/include/asm-m68k/mmu_context.h index 231d11b..894dacb 100644 --- a/include/asm-m68k/mmu_context.h +++ b/include/asm-m68k/mmu_context.h @@ -1,6 +1,7 @@ #ifndef __M68K_MMU_CONTEXT_H #define __M68K_MMU_CONTEXT_H +#include static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { diff --git a/include/asm-m68knommu/mmu_context.h b/include/asm-m68knommu/mmu_context.h index 6c077d3..9ccee42 100644 --- a/include/asm-m68knommu/mmu_context.h +++ b/include/asm-m68knommu/mmu_context.h @@ -4,6 +4,7 @@ #include #include #include +#include static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { diff --git a/include/asm-mips/mmu_context.h b/include/asm-mips/mmu_context.h index fe065d6..65024ff 100644 --- a/include/asm-mips/mmu_context.h +++ b/include/asm-mips/mmu_context.h @@ -20,6 +20,7 @@ #include #include #endif /* SMTC */ +#include /* * For the fast tlb miss handlers, we keep a per cpu array of pointers diff --git a/include/asm-parisc/mmu_context.h b/include/asm-parisc/mmu_context.h index 9c05836..bad6902 100644 --- a/include/asm-parisc/mmu_context.h +++ b/include/asm-parisc/mmu_context.h @@ -5,6 +5,7 @@ #include #include #include +#include static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { diff --git a/include/asm-powerpc/mmu_context.h b/include/asm-powerpc/mmu_context.h index 083ac91..c0d7795 100644 --- a/include/asm-powerpc/mmu_context.h +++ b/include/asm-powerpc/mmu_context.h @@ -10,6 +10,7 @@ #include #include #include +#include /* * Copyright (C) 2001 PPC 64 Team, IBM Corp diff --git a/include/asm-ppc/mmu_context.h b/include/asm-ppc/mmu_context.h index 2bc8589..a6441a0 100644 --- a/include/asm-ppc/mmu_context.h +++ b/include/asm-ppc/mmu_context.h @@ -6,6 +6,7 @@ #include #include #include +#include /* * On 32-bit PowerPC 6xx/7xx/7xxx CPUs, we use a set of 16 VSIDs diff --git a/include/asm-s390/mmu_context.h b/include/asm-s390/mmu_context.h index 1d21da2..501cb9b 100644 --- a/include/asm-s390/mmu_context.h +++ b/include/asm-s390/mmu_context.h @@ -10,6 +10,8 @@ #define __S390_MMU_CONTEXT_H #include +#include + /* * get a new mmu context.. S390 don't know about contexts. */ diff --git a/include/asm-sh/mmu_context.h b/include/asm-sh/mmu_context.h index 3420244..01acaaa 100644 --- a/include/asm-sh/mmu_context.h +++ b/include/asm-sh/mmu_context.h @@ -12,6 +12,7 @@ #include #include #include +#include /* * The MMU "context" consists of two things: diff --git a/include/asm-sh64/mmu_context.h b/include/asm-sh64/mmu_context.h index 8c860da..507bf72 100644 --- a/include/asm-sh64/mmu_context.h +++ b/include/asm-sh64/mmu_context.h @@ -27,7 +27,7 @@ extern unsigned long mmu_context_cache; #include - +#include /* Current mm's pgd */ extern pgd_t *mmu_pdtp_cache; diff --git a/include/asm-sparc/mmu_context.h b/include/asm-sparc/mmu_context.h index ed1e01d..671a997 100644 --- a/include/asm-sparc/mmu_context.h +++ b/include/asm-sparc/mmu_context.h @@ -5,6 +5,8 @@ #ifndef __ASSEMBLY__ +#include + static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { } diff --git a/include/asm-sparc64/mmu_context.h b/include/asm-sparc64/mmu_context.h index 2337eb4..8d12903 100644 --- a/include/asm-sparc64/mmu_context.h +++ b/include/asm-sparc64/mmu_context.h @@ -9,6 +9,7 @@ #include #include #include +#include static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { diff --git a/include/asm-um/mmu_context.h b/include/asm-um/mmu_context.h index f709c78..9aa4b44 100644 --- a/include/asm-um/mmu_context.h +++ b/include/asm-um/mmu_context.h @@ -6,6 +6,8 @@ #ifndef __UM_MMU_CONTEXT_H #define __UM_MMU_CONTEXT_H +#include + #include "linux/sched.h" #include "choose-mode.h" #include "um_mmu.h" diff --git a/include/asm-v850/mmu_context.h b/include/asm-v850/mmu_context.h index f521c80..01daacd 100644 --- a/include/asm-v850/mmu_context.h +++ b/include/asm-v850/mmu_context.h @@ -1,6 +1,8 @@ #ifndef __V850_MMU_CONTEXT_H__ #define __V850_MMU_CONTEXT_H__ +#include + #define destroy_context(mm) ((void)0) #define init_new_context(tsk,mm) 0 #define switch_mm(prev,next,tsk) ((void)0) diff --git a/include/asm-x86_64/mmu_context.h b/include/asm-x86_64/mmu_context.h index af03b9f..0cce83a 100644 --- a/include/asm-x86_64/mmu_context.h +++ b/include/asm-x86_64/mmu_context.h @@ -7,6 +7,7 @@ #include #include #include +#include /* * possibly do the LDT unload here? diff --git a/include/asm-xtensa/mmu_context.h b/include/asm-xtensa/mmu_context.h index f14851f..92f9483 100644 --- a/include/asm-xtensa/mmu_context.h +++ b/include/asm-xtensa/mmu_context.h @@ -18,6 +18,7 @@ #include #include #include +#include #define XCHAL_MMU_ASID_BITS 8 diff --git a/kernel/fork.c b/kernel/fork.c index 6af959c..ffccefb 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -286,6 +286,8 @@ static inline int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) if (retval) goto out; } + /* a new mm has just been created */ + arch_dup_mmap(oldmm, mm); retval = 0; out: up_write(&mm->mmap_sem); diff --git a/mm/mmap.c b/mm/mmap.c index 84f997d..88da687 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -29,6 +29,7 @@ #include #include #include +#include #ifndef arch_mmap_check #define arch_mmap_check(addr, len, flags) (0) @@ -1979,6 +1980,9 @@ void exit_mmap(struct mm_struct *mm) unsigned long nr_accounted = 0; unsigned long end; + /* mm's last user has gone, and its about to be pulled down */ + arch_exit_mmap(mm); + lru_add_drain(); flush_cache_mm(mm); tlb = tlb_gather_mmu(mm, 1); -- cgit v0.10.2