summaryrefslogtreecommitdiff
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/Makefile1
-rw-r--r--arch/powerpc/mm/gup.c10
-rw-r--r--arch/powerpc/mm/highmem.c77
-rw-r--r--arch/powerpc/mm/hugetlbpage.c4
-rw-r--r--arch/powerpc/mm/mmu_context_nohash.c17
-rw-r--r--arch/powerpc/mm/pgtable.c4
-rw-r--r--arch/powerpc/mm/slb.c13
-rw-r--r--arch/powerpc/mm/tlb_hash64.c2
8 files changed, 100 insertions, 28 deletions
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index 2d2192e..3e68363 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -30,3 +30,4 @@ obj-$(CONFIG_PPC_MM_SLICES) += slice.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_PPC_SUBPAGE_PROT) += subpage-prot.o
obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-noncoherent.o
+obj-$(CONFIG_HIGHMEM) += highmem.o
diff --git a/arch/powerpc/mm/gup.c b/arch/powerpc/mm/gup.c
index bc400c7..bc122a1 100644
--- a/arch/powerpc/mm/gup.c
+++ b/arch/powerpc/mm/gup.c
@@ -159,7 +159,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
int psize;
#endif
- pr_debug("%s(%lx,%x,%s)\n", __func__, start, nr_pages, write ? "write" : "read");
+ pr_devel("%s(%lx,%x,%s)\n", __func__, start, nr_pages, write ? "write" : "read");
start &= PAGE_MASK;
addr = start;
@@ -170,7 +170,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
start, len)))
goto slow_irqon;
- pr_debug(" aligned: %lx .. %lx\n", start, end);
+ pr_devel(" aligned: %lx .. %lx\n", start, end);
#ifdef CONFIG_HUGETLB_PAGE
/* We bail out on slice boundary crossing when hugetlb is
@@ -234,7 +234,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
do {
VM_BUG_ON(shift != mmu_psize_defs[get_slice_psize(mm, a)].shift);
ptep = huge_pte_offset(mm, a);
- pr_debug(" %016lx: huge ptep %p\n", a, ptep);
+ pr_devel(" %016lx: huge ptep %p\n", a, ptep);
if (!ptep || !gup_huge_pte(ptep, hstate, &a, end, write, pages,
&nr))
goto slow;
@@ -249,7 +249,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
#ifdef CONFIG_PPC64
VM_BUG_ON(shift != mmu_psize_defs[get_slice_psize(mm, addr)].shift);
#endif
- pr_debug(" %016lx: normal pgd %p\n", addr,
+ pr_devel(" %016lx: normal pgd %p\n", addr,
(void *)pgd_val(pgd));
next = pgd_addr_end(addr, end);
if (pgd_none(pgd))
@@ -269,7 +269,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
slow:
local_irq_enable();
slow_irqon:
- pr_debug(" slow path ! nr = %d\n", nr);
+ pr_devel(" slow path ! nr = %d\n", nr);
/* Try to get the remaining pages with get_user_pages */
start += nr << PAGE_SHIFT;
diff --git a/arch/powerpc/mm/highmem.c b/arch/powerpc/mm/highmem.c
new file mode 100644
index 0000000..c2186c7
--- /dev/null
+++ b/arch/powerpc/mm/highmem.c
@@ -0,0 +1,77 @@
+/*
+ * highmem.c: virtual kernel memory mappings for high memory
+ *
+ * PowerPC version, stolen from the i386 version.
+ *
+ * Used in CONFIG_HIGHMEM systems for memory pages which
+ * are not addressable by direct kernel virtual addresses.
+ *
+ * Copyright (C) 1999 Gerhard Wichert, Siemens AG
+ * Gerhard.Wichert@pdb.siemens.de
+ *
+ *
+ * Redesigned the x86 32-bit VM architecture to deal with
+ * up to 16 Terrabyte physical memory. With current x86 CPUs
+ * we now support up to 64 Gigabytes physical RAM.
+ *
+ * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
+ *
+ * Reworked for PowerPC by various contributors. Moved from
+ * highmem.h by Benjamin Herrenschmidt (c) 2009 IBM Corp.
+ */
+
+#include <linux/highmem.h>
+#include <linux/module.h>
+
+/*
+ * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
+ * gives a more generic (and caching) interface. But kmap_atomic can
+ * be used in IRQ contexts, so in some (very limited) cases we need
+ * it.
+ */
+void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
+{
+ unsigned int idx;
+ unsigned long vaddr;
+
+ /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
+ pagefault_disable();
+ if (!PageHighMem(page))
+ return page_address(page);
+
+ debug_kmap_atomic(type);
+ idx = type + KM_TYPE_NR*smp_processor_id();
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+#ifdef CONFIG_DEBUG_HIGHMEM
+ BUG_ON(!pte_none(*(kmap_pte-idx)));
+#endif
+ __set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot), 1);
+ local_flush_tlb_page(NULL, vaddr);
+
+ return (void*) vaddr;
+}
+EXPORT_SYMBOL(kmap_atomic_prot);
+
+void kunmap_atomic(void *kvaddr, enum km_type type)
+{
+#ifdef CONFIG_DEBUG_HIGHMEM
+ unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
+ enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
+
+ if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
+ pagefault_enable();
+ return;
+ }
+
+ BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
+
+ /*
+ * force other mappings to Oops if they'll try to access
+ * this pte without first remap it
+ */
+ pte_clear(&init_mm, vaddr, kmap_pte-idx);
+ local_flush_tlb_page(NULL, vaddr);
+#endif
+ pagefault_enable();
+}
+EXPORT_SYMBOL(kunmap_atomic);
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 9920d6a..c46ef2f 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -305,7 +305,7 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
pmd = pmd_offset(pud, start);
pud_clear(pud);
- pmd_free_tlb(tlb, pmd);
+ pmd_free_tlb(tlb, pmd, start);
}
static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
@@ -348,7 +348,7 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
pud = pud_offset(pgd, start);
pgd_clear(pgd);
- pud_free_tlb(tlb, pud);
+ pud_free_tlb(tlb, pud, start);
}
/*
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
index 8343986..b1a727d 100644
--- a/arch/powerpc/mm/mmu_context_nohash.c
+++ b/arch/powerpc/mm/mmu_context_nohash.c
@@ -89,7 +89,7 @@ static unsigned int steal_context_smp(unsigned int id)
id = first_context;
continue;
}
- pr_debug("[%d] steal context %d from mm @%p\n",
+ pr_devel("[%d] steal context %d from mm @%p\n",
smp_processor_id(), id, mm);
/* Mark this mm has having no context anymore */
@@ -126,7 +126,7 @@ static unsigned int steal_context_up(unsigned int id)
/* Pick up the victim mm */
mm = context_mm[id];
- pr_debug("[%d] steal context %d from mm @%p\n", cpu, id, mm);
+ pr_devel("[%d] steal context %d from mm @%p\n", cpu, id, mm);
/* Flush the TLB for that context */
local_flush_tlb_mm(mm);
@@ -180,7 +180,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
spin_lock(&context_lock);
#ifndef DEBUG_STEAL_ONLY
- pr_debug("[%d] activating context for mm @%p, active=%d, id=%d\n",
+ pr_devel("[%d] activating context for mm @%p, active=%d, id=%d\n",
cpu, next, next->context.active, next->context.id);
#endif
@@ -189,7 +189,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
next->context.active++;
if (prev) {
#ifndef DEBUG_STEAL_ONLY
- pr_debug(" old context %p active was: %d\n",
+ pr_devel(" old context %p active was: %d\n",
prev, prev->context.active);
#endif
WARN_ON(prev->context.active < 1);
@@ -217,6 +217,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
id = steal_context_smp(id);
if (id == MMU_NO_CONTEXT)
goto again;
+ goto stolen;
}
#endif /* CONFIG_SMP */
id = steal_context_up(id);
@@ -236,7 +237,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
next->context.id = id;
#ifndef DEBUG_STEAL_ONLY
- pr_debug("[%d] picked up new id %d, nrf is now %d\n",
+ pr_devel("[%d] picked up new id %d, nrf is now %d\n",
cpu, id, nr_free_contexts);
#endif
@@ -247,7 +248,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
* local TLB for it and unmark it before we use it
*/
if (test_bit(id, stale_map[cpu])) {
- pr_debug("[%d] flushing stale context %d for mm @%p !\n",
+ pr_devel("[%d] flushing stale context %d for mm @%p !\n",
cpu, id, next);
local_flush_tlb_mm(next);
@@ -314,13 +315,13 @@ static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self,
switch (action) {
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
- pr_debug("MMU: Allocating stale context map for CPU %d\n", cpu);
+ pr_devel("MMU: Allocating stale context map for CPU %d\n", cpu);
stale_map[cpu] = kzalloc(CTX_MAP_SIZE, GFP_KERNEL);
break;
#ifdef CONFIG_HOTPLUG_CPU
case CPU_DEAD:
case CPU_DEAD_FROZEN:
- pr_debug("MMU: Freeing stale context map for CPU %d\n", cpu);
+ pr_devel("MMU: Freeing stale context map for CPU %d\n", cpu);
kfree(stale_map[cpu]);
stale_map[cpu] = NULL;
break;
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index ae1d67c..627767d 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -129,12 +129,12 @@ static pte_t do_dcache_icache_coherency(pte_t pte)
page = pfn_to_page(pfn);
if (!PageReserved(page) && !test_bit(PG_arch_1, &page->flags)) {
- pr_debug("do_dcache_icache_coherency... flushing\n");
+ pr_devel("do_dcache_icache_coherency... flushing\n");
flush_dcache_icache_page(page);
set_bit(PG_arch_1, &page->flags);
}
else
- pr_debug("do_dcache_icache_coherency... already clean\n");
+ pr_devel("do_dcache_icache_coherency... already clean\n");
return __pte(pte_val(pte) | _PAGE_HWEXEC);
}
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 3b52c80..5b7038f 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -14,8 +14,6 @@
* 2 of the License, or (at your option) any later version.
*/
-#undef DEBUG
-
#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
@@ -27,11 +25,6 @@
#include <linux/compiler.h>
#include <asm/udbg.h>
-#ifdef DEBUG
-#define DBG(fmt...) printk(fmt)
-#else
-#define DBG pr_debug
-#endif
extern void slb_allocate_realmode(unsigned long ea);
extern void slb_allocate_user(unsigned long ea);
@@ -285,13 +278,13 @@ void slb_initialize(void)
patch_slb_encoding(slb_compare_rr_to_size,
mmu_slb_size);
- DBG("SLB: linear LLP = %04lx\n", linear_llp);
- DBG("SLB: io LLP = %04lx\n", io_llp);
+ pr_devel("SLB: linear LLP = %04lx\n", linear_llp);
+ pr_devel("SLB: io LLP = %04lx\n", io_llp);
#ifdef CONFIG_SPARSEMEM_VMEMMAP
patch_slb_encoding(slb_miss_kernel_load_vmemmap,
SLB_VSID_KERNEL | vmemmap_llp);
- DBG("SLB: vmemmap LLP = %04lx\n", vmemmap_llp);
+ pr_devel("SLB: vmemmap LLP = %04lx\n", vmemmap_llp);
#endif
}
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
index 1be1b5e..937eb90 100644
--- a/arch/powerpc/mm/tlb_hash64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -72,7 +72,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
*/
if (huge) {
#ifdef CONFIG_HUGETLB_PAGE
- psize = get_slice_psize(mm, addr);;
+ psize = get_slice_psize(mm, addr);
#else
BUG();
psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */