summaryrefslogtreecommitdiff
path: root/arch/arm/mm/mmu.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-04-03 23:15:17 (GMT)
committerLinus Torvalds <torvalds@linux-foundation.org>2013-04-03 23:15:17 (GMT)
commit66ade474237745a57b7e87da9a93c7ec69fd52bb (patch)
treea3e08728d8584fde056ec51f4b2249e3dd6032c5 /arch/arm/mm/mmu.c
parentcbfa0e7204159515e63321142bcc2d6dcb854045 (diff)
parent4e1db26a0b42e2b6e27c05d68adcc01709c2eed2 (diff)
downloadlinux-fsl-qoriq-66ade474237745a57b7e87da9a93c7ec69fd52bb.tar.xz
Merge branch 'fixes' of git://git.linaro.org/people/rmk/linux-arm
Pull ARM fixes from Russell King: "Another round of ARM fixes, which include: - Fixing a problem with LPAE mapping sections - Reporting of some hwcaps on Krait CPUs - Avoiding repetitive warnings in the breakpoint code - Fixing a build error noticed on Dove platforms with PJ4 CPUs - Fix masking of level 2 cache revision. - Fixing timer-based udelay() - A larger fix for an erratum causing people major grief with Cortex A15 CPUs" * 'fixes' of git://git.linaro.org/people/rmk/linux-arm: ARM: 7690/1: mm: fix CONFIG_LPAE typos ARM: 7689/1: add unwind annotations to ftrace asm ARM: 7685/1: delay: use private ticks_per_jiffy field for timer-based delay ops ARM: 7684/1: errata: Workaround for Cortex-A15 erratum 798181 (TLBI/DSB operations) ARM: 7682/1: cache-l2x0: fix masking of RTL revision numbering and set_debug init ARM: iWMMXt: always enable iWMMXt support with PJ4 CPUs ARM: 7681/1: hw_breakpoint: use warn_once to avoid spam from reset_ctrl_regs() ARM: 7678/1: Work around faulty ISAR0 register in some Krait CPUs ARM: 7680/1: Detect support for SDIV/UDIV from ISAR0 register ARM: 7679/1: Clear IDIVT hwcap if CONFIG_ARM_THUMB=n ARM: 7677/1: LPAE: Fix mapping in alloc_init_section for unaligned addresses ARM: KVM: vgic: take distributor lock on sync_hwstate path ARM: KVM: vgic: force EOIed LRs to the empty state
Diffstat (limited to 'arch/arm/mm/mmu.c')
-rw-r--r--arch/arm/mm/mmu.c73
1 files changed, 47 insertions, 26 deletions
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index e95a996..7897894 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -598,39 +598,60 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
} while (pte++, addr += PAGE_SIZE, addr != end);
}
-static void __init alloc_init_section(pud_t *pud, unsigned long addr,
- unsigned long end, phys_addr_t phys,
- const struct mem_type *type)
+static void __init map_init_section(pmd_t *pmd, unsigned long addr,
+ unsigned long end, phys_addr_t phys,
+ const struct mem_type *type)
{
- pmd_t *pmd = pmd_offset(pud, addr);
-
+#ifndef CONFIG_ARM_LPAE
/*
- * Try a section mapping - end, addr and phys must all be aligned
- * to a section boundary. Note that PMDs refer to the individual
- * L1 entries, whereas PGDs refer to a group of L1 entries making
- * up one logical pointer to an L2 table.
+ * In classic MMU format, puds and pmds are folded in to
+ * the pgds. pmd_offset gives the PGD entry. PGDs refer to a
+ * group of L1 entries making up one logical pointer to
+ * an L2 table (2MB), where as PMDs refer to the individual
+ * L1 entries (1MB). Hence increment to get the correct
+ * offset for odd 1MB sections.
+ * (See arch/arm/include/asm/pgtable-2level.h)
*/
- if (type->prot_sect && ((addr | end | phys) & ~SECTION_MASK) == 0) {
- pmd_t *p = pmd;
-
-#ifndef CONFIG_ARM_LPAE
- if (addr & SECTION_SIZE)
- pmd++;
+ if (addr & SECTION_SIZE)
+ pmd++;
#endif
+ do {
+ *pmd = __pmd(phys | type->prot_sect);
+ phys += SECTION_SIZE;
+ } while (pmd++, addr += SECTION_SIZE, addr != end);
- do {
- *pmd = __pmd(phys | type->prot_sect);
- phys += SECTION_SIZE;
- } while (pmd++, addr += SECTION_SIZE, addr != end);
+ flush_pmd_entry(pmd);
+}
- flush_pmd_entry(p);
- } else {
+static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
+ unsigned long end, phys_addr_t phys,
+ const struct mem_type *type)
+{
+ pmd_t *pmd = pmd_offset(pud, addr);
+ unsigned long next;
+
+ do {
/*
- * No need to loop; pte's aren't interested in the
- * individual L1 entries.
+ * With LPAE, we must loop over to map
+ * all the pmds for the given range.
*/
- alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type);
- }
+ next = pmd_addr_end(addr, end);
+
+ /*
+ * Try a section mapping - addr, next and phys must all be
+ * aligned to a section boundary.
+ */
+ if (type->prot_sect &&
+ ((addr | next | phys) & ~SECTION_MASK) == 0) {
+ map_init_section(pmd, addr, next, phys, type);
+ } else {
+ alloc_init_pte(pmd, addr, next,
+ __phys_to_pfn(phys), type);
+ }
+
+ phys += next - addr;
+
+ } while (pmd++, addr = next, addr != end);
}
static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
@@ -641,7 +662,7 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
do {
next = pud_addr_end(addr, end);
- alloc_init_section(pud, addr, next, phys, type);
+ alloc_init_pmd(pud, addr, next, phys, type);
phys += next - addr;
} while (pud++, addr = next, addr != end);
}