diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm/Kconfig | 2 | ||||
-rw-r--r-- | arch/arm/include/asm/switch_to.h | 8 | ||||
-rw-r--r-- | arch/arm/mm/highmem.c | 41 |
3 files changed, 48 insertions, 3 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index ef77778..1ad6fb6 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1759,7 +1759,7 @@ config HAVE_ARCH_PFN_VALID config HIGHMEM bool "High Memory Support" - depends on MMU && !PREEMPT_RT_FULL + depends on MMU help The address space of ARM processors is only 4 Gigabytes large and it has to accommodate user address space, kernel address diff --git a/arch/arm/include/asm/switch_to.h b/arch/arm/include/asm/switch_to.h index c99e259..f3e3d80 100644 --- a/arch/arm/include/asm/switch_to.h +++ b/arch/arm/include/asm/switch_to.h @@ -3,6 +3,13 @@ #include <linux/thread_info.h> +#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM +void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p); +#else +static inline void +switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { } +#endif + /* * For v7 SMP cores running a preemptible kernel we may be pre-empted * during a TLB maintenance operation, so execute an inner-shareable dsb @@ -22,6 +29,7 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info #define switch_to(prev,next,last) \ do { \ + switch_kmaps(prev, next); \ last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \ } while (0) diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c index 3688c8b..bd41dd8 100644 --- a/arch/arm/mm/highmem.c +++ b/arch/arm/mm/highmem.c @@ -38,6 +38,7 @@ EXPORT_SYMBOL(kunmap); void *kmap_atomic(struct page *page) { + pte_t pte = mk_pte(page, kmap_prot); unsigned int idx; unsigned long vaddr; void *kmap; @@ -76,7 +77,10 @@ void *kmap_atomic(struct page *page) * in place, so the contained TLB flush ensures the TLB is updated * with the new mapping. */ - set_top_pte(vaddr, mk_pte(page, kmap_prot)); +#ifdef CONFIG_PREEMPT_RT_FULL + current->kmap_pte[type] = pte; +#endif + set_top_pte(vaddr, pte); return (void *)vaddr; } @@ -93,6 +97,9 @@ void __kunmap_atomic(void *kvaddr) if (cache_is_vivt()) __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); +#ifdef CONFIG_PREEMPT_RT_FULL + current->kmap_pte[type] = __pte(0); +#endif #ifdef CONFIG_DEBUG_HIGHMEM BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); #else @@ -110,6 +117,7 @@ EXPORT_SYMBOL(__kunmap_atomic); void *kmap_atomic_pfn(unsigned long pfn) { + pte_t pte = pfn_pte(pfn, kmap_prot); unsigned long vaddr; int idx, type; @@ -121,7 +129,10 @@ void *kmap_atomic_pfn(unsigned long pfn) #ifdef CONFIG_DEBUG_HIGHMEM BUG_ON(!pte_none(get_top_pte(vaddr))); #endif - set_top_pte(vaddr, pfn_pte(pfn, kmap_prot)); +#ifdef CONFIG_PREEMPT_RT_FULL + current->kmap_pte[type] = pte; +#endif + set_top_pte(vaddr, pte); return (void *)vaddr; } @@ -135,3 +146,29 @@ struct page *kmap_atomic_to_page(const void *ptr) return pte_page(get_top_pte(vaddr)); } + +#if defined CONFIG_PREEMPT_RT_FULL +void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) +{ + int i; + + /* + * Clear @prev's kmap_atomic mappings + */ + for (i = 0; i < prev_p->kmap_idx; i++) { + int idx = i + KM_TYPE_NR * smp_processor_id(); + + set_top_pte(__fix_to_virt(FIX_KMAP_BEGIN + idx), __pte(0)); + } + /* + * Restore @next_p's kmap_atomic mappings + */ + for (i = 0; i < next_p->kmap_idx; i++) { + int idx = i + KM_TYPE_NR * smp_processor_id(); + + if (!pte_none(next_p->kmap_pte[i])) + set_top_pte(__fix_to_virt(FIX_KMAP_BEGIN + idx), + next_p->kmap_pte[i]); + } +} +#endif |