summaryrefslogtreecommitdiff
path: root/include/asm-powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-powerpc')
-rw-r--r--include/asm-powerpc/mmu.h13
-rw-r--r--include/asm-powerpc/paca.h1
-rw-r--r--include/asm-powerpc/pgtable-4k.h2
-rw-r--r--include/asm-powerpc/pgtable-64k.h2
-rw-r--r--include/asm-powerpc/pgtable.h10
5 files changed, 20 insertions, 8 deletions
diff --git a/include/asm-powerpc/mmu.h b/include/asm-powerpc/mmu.h
index 8853974..3a5ebe2 100644
--- a/include/asm-powerpc/mmu.h
+++ b/include/asm-powerpc/mmu.h
@@ -165,6 +165,16 @@ struct mmu_psize_def
extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
extern int mmu_linear_psize;
extern int mmu_virtual_psize;
+extern int mmu_vmalloc_psize;
+extern int mmu_io_psize;
+
+/*
+ * If the processor supports 64k normal pages but not 64k cache
+ * inhibited pages, we have to be prepared to switch processes
+ * to use 4k pages when they create cache-inhibited mappings.
+ * If this is the case, mmu_ci_restrictions will be set to 1.
+ */
+extern int mmu_ci_restrictions;
#ifdef CONFIG_HUGETLB_PAGE
/*
@@ -256,6 +266,7 @@ extern long iSeries_hpte_insert(unsigned long hpte_group,
extern void stabs_alloc(void);
extern void slb_initialize(void);
+extern void slb_flush_and_rebolt(void);
extern void stab_initialize(unsigned long stab);
#endif /* __ASSEMBLY__ */
@@ -359,6 +370,8 @@ typedef unsigned long mm_context_id_t;
typedef struct {
mm_context_id_t id;
+ u16 user_psize; /* page size index */
+ u16 sllp; /* SLB entry page size encoding */
#ifdef CONFIG_HUGETLB_PAGE
u16 low_htlb_areas, high_htlb_areas;
#endif
diff --git a/include/asm-powerpc/paca.h b/include/asm-powerpc/paca.h
index c17fd54..1740635 100644
--- a/include/asm-powerpc/paca.h
+++ b/include/asm-powerpc/paca.h
@@ -81,6 +81,7 @@ struct paca_struct {
* on the linear mapping */
mm_context_t context;
+ u16 vmalloc_sllp;
u16 slb_cache[SLB_CACHE_ENTRIES];
u16 slb_cache_ptr;
diff --git a/include/asm-powerpc/pgtable-4k.h b/include/asm-powerpc/pgtable-4k.h
index b2e1862..e703615 100644
--- a/include/asm-powerpc/pgtable-4k.h
+++ b/include/asm-powerpc/pgtable-4k.h
@@ -78,6 +78,8 @@
#define pte_iterate_hashed_end() } while(0)
+#define pte_pagesize_index(pte) MMU_PAGE_4K
+
/*
* 4-level page tables related bits
*/
diff --git a/include/asm-powerpc/pgtable-64k.h b/include/asm-powerpc/pgtable-64k.h
index 6539150..4b7126c 100644
--- a/include/asm-powerpc/pgtable-64k.h
+++ b/include/asm-powerpc/pgtable-64k.h
@@ -90,6 +90,8 @@
#define pte_iterate_hashed_end() } while(0); } } while(0)
+#define pte_pagesize_index(pte) \
+ (((pte) & _PAGE_COMBO)? MMU_PAGE_4K: MMU_PAGE_64K)
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/include/asm-powerpc/pgtable.h b/include/asm-powerpc/pgtable.h
index e9f1f46..260a0fa 100644
--- a/include/asm-powerpc/pgtable.h
+++ b/include/asm-powerpc/pgtable.h
@@ -47,8 +47,8 @@ struct mm_struct;
/*
* Define the address range of the vmalloc VM area.
*/
-#define VMALLOC_START (0xD000000000000000ul)
-#define VMALLOC_SIZE (0x80000000000UL)
+#define VMALLOC_START ASM_CONST(0xD000000000000000)
+#define VMALLOC_SIZE ASM_CONST(0x80000000000)
#define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE)
/*
@@ -413,12 +413,6 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
flush_tlb_pending();
}
pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
-
-#ifdef CONFIG_PPC_64K_PAGES
- if (mmu_virtual_psize != MMU_PAGE_64K)
- pte = __pte(pte_val(pte) | _PAGE_COMBO);
-#endif /* CONFIG_PPC_64K_PAGES */
-
*ptep = pte;
}