summaryrefslogtreecommitdiff
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/fault.c2
-rw-r--r--arch/arm/mm/highmem.c43
-rw-r--r--arch/arm/mm/mm.h1
-rw-r--r--arch/arm/mm/mmap.c6
-rw-r--r--arch/arm/mm/mmu.c7
5 files changed, 13 insertions, 46 deletions
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index b40d4ba..eb8830a 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -277,7 +277,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
- if (!mm || pagefault_disabled())
+ if (in_atomic() || !mm)
goto no_context;
if (user_mode(regs))
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index bd41dd8..21b9e1b 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -38,7 +38,6 @@ EXPORT_SYMBOL(kunmap);
void *kmap_atomic(struct page *page)
{
- pte_t pte = mk_pte(page, kmap_prot);
unsigned int idx;
unsigned long vaddr;
void *kmap;
@@ -77,10 +76,7 @@ void *kmap_atomic(struct page *page)
* in place, so the contained TLB flush ensures the TLB is updated
* with the new mapping.
*/
-#ifdef CONFIG_PREEMPT_RT_FULL
- current->kmap_pte[type] = pte;
-#endif
- set_top_pte(vaddr, pte);
+ set_top_pte(vaddr, mk_pte(page, kmap_prot));
return (void *)vaddr;
}
@@ -97,15 +93,12 @@ void __kunmap_atomic(void *kvaddr)
if (cache_is_vivt())
__cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
-#ifdef CONFIG_PREEMPT_RT_FULL
- current->kmap_pte[type] = __pte(0);
-#endif
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
+ set_top_pte(vaddr, __pte(0));
#else
(void) idx; /* to kill a warning */
#endif
- set_top_pte(vaddr, __pte(0));
kmap_atomic_idx_pop();
} else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
/* this address was obtained through kmap_high_get() */
@@ -117,7 +110,6 @@ EXPORT_SYMBOL(__kunmap_atomic);
void *kmap_atomic_pfn(unsigned long pfn)
{
- pte_t pte = pfn_pte(pfn, kmap_prot);
unsigned long vaddr;
int idx, type;
@@ -129,10 +121,7 @@ void *kmap_atomic_pfn(unsigned long pfn)
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(!pte_none(get_top_pte(vaddr)));
#endif
-#ifdef CONFIG_PREEMPT_RT_FULL
- current->kmap_pte[type] = pte;
-#endif
- set_top_pte(vaddr, pte);
+ set_top_pte(vaddr, pfn_pte(pfn, kmap_prot));
return (void *)vaddr;
}
@@ -146,29 +135,3 @@ struct page *kmap_atomic_to_page(const void *ptr)
return pte_page(get_top_pte(vaddr));
}
-
-#if defined CONFIG_PREEMPT_RT_FULL
-void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p)
-{
- int i;
-
- /*
- * Clear @prev's kmap_atomic mappings
- */
- for (i = 0; i < prev_p->kmap_idx; i++) {
- int idx = i + KM_TYPE_NR * smp_processor_id();
-
- set_top_pte(__fix_to_virt(FIX_KMAP_BEGIN + idx), __pte(0));
- }
- /*
- * Restore @next_p's kmap_atomic mappings
- */
- for (i = 0; i < next_p->kmap_idx; i++) {
- int idx = i + KM_TYPE_NR * smp_processor_id();
-
- if (!pte_none(next_p->kmap_pte[i]))
- set_top_pte(__fix_to_virt(FIX_KMAP_BEGIN + idx),
- next_p->kmap_pte[i]);
- }
-}
-#endif
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index d5a4e9a..33eab61 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -38,6 +38,7 @@ static inline pmd_t *pmd_off_k(unsigned long virt)
struct mem_type {
pteval_t prot_pte;
+ pteval_t prot_pte_s2;
pmdval_t prot_l1;
pmdval_t prot_sect;
unsigned int domain;
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index 304661d..5e85ed3 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -202,13 +202,11 @@ int valid_phys_addr_range(phys_addr_t addr, size_t size)
}
/*
- * We don't use supersection mappings for mmap() on /dev/mem, which
- * means that we can't map the memory area above the 4G barrier into
- * userspace.
+ * Do not allow /dev/mem mappings beyond the supported physical range.
*/
int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
{
- return !(pfn + (size >> PAGE_SHIFT) > 0x00100000);
+ return (pfn + (size >> PAGE_SHIFT)) <= (1 + (PHYS_MASK >> PAGE_SHIFT));
}
#ifdef CONFIG_STRICT_DEVMEM
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index b1d17ee..0222ba7 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -229,12 +229,16 @@ __setup("noalign", noalign_setup);
#endif /* ifdef CONFIG_CPU_CP15 / else */
#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
+#define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE
#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
static struct mem_type mem_types[] = {
[MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
.prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
L_PTE_SHARED,
+ .prot_pte_s2 = s2_policy(PROT_PTE_S2_DEVICE) |
+ s2_policy(L_PTE_S2_MT_DEV_SHARED) |
+ L_PTE_SHARED,
.prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PROT_SECT_DEVICE | PMD_SECT_S,
.domain = DOMAIN_IO,
@@ -456,7 +460,8 @@ static void __init build_mem_type_table(void)
cp = &cache_policies[cachepolicy];
vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
s2_pgprot = cp->pte_s2;
- hyp_device_pgprot = s2_device_pgprot = mem_types[MT_DEVICE].prot_pte;
+ hyp_device_pgprot = mem_types[MT_DEVICE].prot_pte;
+ s2_device_pgprot = mem_types[MT_DEVICE].prot_pte_s2;
/*
* ARMv6 and above have extended page tables.