summaryrefslogtreecommitdiff
path: root/arch/powerpc/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/include')
-rw-r--r--arch/powerpc/include/asm/pgalloc-32.h10
-rw-r--r--arch/powerpc/include/asm/pgalloc-64.h60
-rw-r--r--arch/powerpc/include/asm/pgalloc.h30
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64.h1
4 files changed, 45 insertions, 56 deletions
diff --git a/arch/powerpc/include/asm/pgalloc-32.h b/arch/powerpc/include/asm/pgalloc-32.h
index c9500d6..580cf73 100644
--- a/arch/powerpc/include/asm/pgalloc-32.h
+++ b/arch/powerpc/include/asm/pgalloc-32.h
@@ -3,7 +3,8 @@
#include <linux/threads.h>
-#define PTE_NONCACHE_NUM 0 /* dummy for now to share code w/ppc64 */
+/* For 32-bit, all levels of page tables are just drawn from get_free_page() */
+#define MAX_PGTABLE_INDEX_SIZE 0
extern void __bad_pte(pmd_t *pmd);
@@ -36,11 +37,10 @@ extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr);
extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr);
-static inline void pgtable_free(pgtable_free_t pgf)
+static inline void pgtable_free(void *table, unsigned index_size)
{
- void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK);
-
- free_page((unsigned long)p);
+ BUG_ON(index_size); /* 32-bit doesn't use this */
+ free_page((unsigned long)table);
}
#define check_pgt_cache() do { } while (0)
diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
index e6f069c..5c1cd73 100644
--- a/arch/powerpc/include/asm/pgalloc-64.h
+++ b/arch/powerpc/include/asm/pgalloc-64.h
@@ -11,27 +11,39 @@
#include <linux/cpumask.h>
#include <linux/percpu.h>
+/*
+ * Functions that deal with pagetables that could be at any level of
+ * the table need to be passed an "index_size" so they know how to
+ * handle allocation. For PTE pages (which are linked to a struct
+ * page for now, and drawn from the main get_free_pages() pool), the
+ * allocation size will be (2^index_size * sizeof(pointer)) and
+ * allocations are drawn from the kmem_cache in PGT_CACHE(index_size).
+ *
+ * The maximum index size needs to be big enough to allow any
+ * pagetable sizes we need, but small enough to fit in the low bits of
+ * any page table pointer. In other words all pagetables, even tiny
+ * ones, must be aligned to allow at least enough low 0 bits to
+ * contain this value. This value is also used as a mask, so it must
+ * be one less than a power of two.
+ */
+#define MAX_PGTABLE_INDEX_SIZE 0xf
+
#ifndef CONFIG_PPC_SUBPAGE_PROT
static inline void subpage_prot_free(pgd_t *pgd) {}
#endif
extern struct kmem_cache *pgtable_cache[];
-
-#define PGD_CACHE_NUM 0
-#define PUD_CACHE_NUM 1
-#define PMD_CACHE_NUM 1
-#define HUGEPTE_CACHE_NUM 2
-#define PTE_NONCACHE_NUM 7 /* from GFP rather than kmem_cache */
+#define PGT_CACHE(shift) (pgtable_cache[(shift)-1])
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
- return kmem_cache_alloc(pgtable_cache[PGD_CACHE_NUM], GFP_KERNEL);
+ return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), GFP_KERNEL);
}
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
subpage_prot_free(pgd);
- kmem_cache_free(pgtable_cache[PGD_CACHE_NUM], pgd);
+ kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd);
}
#ifndef CONFIG_PPC_64K_PAGES
@@ -40,13 +52,13 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- return kmem_cache_alloc(pgtable_cache[PUD_CACHE_NUM],
+ return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE),
GFP_KERNEL|__GFP_REPEAT);
}
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
{
- kmem_cache_free(pgtable_cache[PUD_CACHE_NUM], pud);
+ kmem_cache_free(PGT_CACHE(PUD_INDEX_SIZE), pud);
}
static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
@@ -78,13 +90,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- return kmem_cache_alloc(pgtable_cache[PMD_CACHE_NUM],
+ return kmem_cache_alloc(PGT_CACHE(PMD_INDEX_SIZE),
GFP_KERNEL|__GFP_REPEAT);
}
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
- kmem_cache_free(pgtable_cache[PMD_CACHE_NUM], pmd);
+ kmem_cache_free(PGT_CACHE(PMD_INDEX_SIZE), pmd);
}
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
@@ -107,24 +119,22 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
return page;
}
-static inline void pgtable_free(pgtable_free_t pgf)
+static inline void pgtable_free(void *table, unsigned index_size)
{
- void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK);
- int cachenum = pgf.val & PGF_CACHENUM_MASK;
-
- if (cachenum == PTE_NONCACHE_NUM)
- free_page((unsigned long)p);
- else
- kmem_cache_free(pgtable_cache[cachenum], p);
+ if (!index_size)
+ free_page((unsigned long)table);
+ else {
+ BUG_ON(index_size > MAX_PGTABLE_INDEX_SIZE);
+ kmem_cache_free(PGT_CACHE(index_size), table);
+ }
}
-#define __pmd_free_tlb(tlb, pmd,addr) \
- pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \
- PMD_CACHE_NUM, PMD_TABLE_SIZE-1))
+#define __pmd_free_tlb(tlb, pmd, addr) \
+ pgtable_free_tlb(tlb, pmd, PMD_INDEX_SIZE)
#ifndef CONFIG_PPC_64K_PAGES
#define __pud_free_tlb(tlb, pud, addr) \
- pgtable_free_tlb(tlb, pgtable_free_cache(pud, \
- PUD_CACHE_NUM, PUD_TABLE_SIZE-1))
+ pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE)
+
#endif /* CONFIG_PPC_64K_PAGES */
#define check_pgt_cache() do { } while (0)
diff --git a/arch/powerpc/include/asm/pgalloc.h b/arch/powerpc/include/asm/pgalloc.h
index f2e812d..abe8532 100644
--- a/arch/powerpc/include/asm/pgalloc.h
+++ b/arch/powerpc/include/asm/pgalloc.h
@@ -24,25 +24,6 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
__free_page(ptepage);
}
-typedef struct pgtable_free {
- unsigned long val;
-} pgtable_free_t;
-
-/* This needs to be big enough to allow for MMU_PAGE_COUNT + 2 to be stored
- * and small enough to fit in the low bits of any naturally aligned page
- * table cache entry. Arbitrarily set to 0x1f, that should give us some
- * room to grow
- */
-#define PGF_CACHENUM_MASK 0x1f
-
-static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum,
- unsigned long mask)
-{
- BUG_ON(cachenum > PGF_CACHENUM_MASK);
-
- return (pgtable_free_t){.val = ((unsigned long) p & ~mask) | cachenum};
-}
-
#ifdef CONFIG_PPC64
#include <asm/pgalloc-64.h>
#else
@@ -50,12 +31,12 @@ static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum,
#endif
#ifdef CONFIG_SMP
-extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);
+extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift);
extern void pte_free_finish(void);
#else /* CONFIG_SMP */
-static inline void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf)
+static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift)
{
- pgtable_free(pgf);
+ pgtable_free(table, shift);
}
static inline void pte_free_finish(void) { }
#endif /* !CONFIG_SMP */
@@ -63,12 +44,9 @@ static inline void pte_free_finish(void) { }
static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *ptepage,
unsigned long address)
{
- pgtable_free_t pgf = pgtable_free_cache(page_address(ptepage),
- PTE_NONCACHE_NUM,
- PTE_TABLE_SIZE-1);
tlb_flush_pgtable(tlb, address);
pgtable_page_dtor(ptepage);
- pgtable_free_tlb(tlb, pgf);
+ pgtable_free_tlb(tlb, page_address(ptepage), 0);
}
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
index 806abe7..8697d65 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64.h
@@ -354,6 +354,7 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
#define pgoff_to_pte(off) ((pte_t) {((off) << PTE_RPN_SHIFT)|_PAGE_FILE})
#define PTE_FILE_MAX_BITS (BITS_PER_LONG - PTE_RPN_SHIFT)
+void pgtable_cache_add(unsigned shift, void (*ctor)(void *));
void pgtable_cache_init(void);
/*