summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash.h1
-rw-r--r--arch/powerpc/mm/hash64_4k.c13
-rw-r--r--arch/powerpc/mm/hash64_64k.c35
-rw-r--r--arch/powerpc/mm/hash_utils_64.c22
-rw-r--r--arch/powerpc/mm/hugepage-hash64.c13
-rw-r--r--arch/powerpc/mm/hugetlbpage-hash64.c4
6 files changed, 21 insertions, 67 deletions
diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h
index e4ea9d7..9c212449 100644
--- a/arch/powerpc/include/asm/book3s/64/hash.h
+++ b/arch/powerpc/include/asm/book3s/64/hash.h
@@ -236,6 +236,7 @@ extern unsigned long pmd_hugepage_update(struct mm_struct *mm,
pmd_t *pmdp,
unsigned long clr,
unsigned long set);
+extern unsigned long htab_convert_pte_flags(unsigned long pteflags);
/* Atomic PTE updates */
static inline unsigned long pte_update(struct mm_struct *mm,
unsigned long addr,
diff --git a/arch/powerpc/mm/hash64_4k.c b/arch/powerpc/mm/hash64_4k.c
index 3b49c6f..ee86313 100644
--- a/arch/powerpc/mm/hash64_4k.c
+++ b/arch/powerpc/mm/hash64_4k.c
@@ -53,18 +53,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
* PP bits. _PAGE_USER is already PP bit 0x2, so we only
* need to add in 0x1 if it's a read-only user page
*/
- rflags = new_pte & _PAGE_USER;
- if ((new_pte & _PAGE_USER) && !((new_pte & _PAGE_RW) &&
- (new_pte & _PAGE_DIRTY)))
- rflags |= 0x1;
- /*
- * _PAGE_EXEC -> HW_NO_EXEC since it's inverted
- */
- rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N);
- /*
- * Always add C and Memory coherence bit
- */
- rflags |= HPTE_R_C | HPTE_R_M;
+ rflags = htab_convert_pte_flags(new_pte);
/*
* Add in WIMG bits
*/
diff --git a/arch/powerpc/mm/hash64_64k.c b/arch/powerpc/mm/hash64_64k.c
index fc0898e..b14280e 100644
--- a/arch/powerpc/mm/hash64_64k.c
+++ b/arch/powerpc/mm/hash64_64k.c
@@ -85,22 +85,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
* Handle the subpage protection bits
*/
subpg_pte = new_pte & ~subpg_prot;
- /*
- * PP bits. _PAGE_USER is already PP bit 0x2, so we only
- * need to add in 0x1 if it's a read-only user page
- */
- rflags = subpg_pte & _PAGE_USER;
- if ((subpg_pte & _PAGE_USER) && !((subpg_pte & _PAGE_RW) &&
- (subpg_pte & _PAGE_DIRTY)))
- rflags |= 0x1;
- /*
- * _PAGE_EXEC -> HW_NO_EXEC since it's inverted
- */
- rflags |= ((subpg_pte & _PAGE_EXEC) ? 0 : HPTE_R_N);
- /*
- * Always add C and Memory coherence bit
- */
- rflags |= HPTE_R_C | HPTE_R_M;
+ rflags = htab_convert_pte_flags(subpg_pte);
/*
* Add in WIMG bits
*/
@@ -271,22 +256,8 @@ int __hash_page_64K(unsigned long ea, unsigned long access,
new_pte |= _PAGE_DIRTY;
} while (old_pte != __cmpxchg_u64((unsigned long *)ptep,
old_pte, new_pte));
- /*
- * PP bits. _PAGE_USER is already PP bit 0x2, so we only
- * need to add in 0x1 if it's a read-only user page
- */
- rflags = new_pte & _PAGE_USER;
- if ((new_pte & _PAGE_USER) && !((new_pte & _PAGE_RW) &&
- (new_pte & _PAGE_DIRTY)))
- rflags |= 0x1;
- /*
- * _PAGE_EXEC -> HW_NO_EXEC since it's inverted
- */
- rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N);
- /*
- * Always add C and Memory coherence bit
- */
- rflags |= HPTE_R_C | HPTE_R_M;
+
+ rflags = htab_convert_pte_flags(new_pte);
/*
* Add in WIMG bits
*/
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index cab2deb..6c67bd0 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -159,20 +159,26 @@ static struct mmu_psize_def mmu_psize_defaults_gp[] = {
},
};
-static unsigned long htab_convert_pte_flags(unsigned long pteflags)
+unsigned long htab_convert_pte_flags(unsigned long pteflags)
{
- unsigned long rflags = pteflags & 0x1fa;
+ unsigned long rflags = 0;
/* _PAGE_EXEC -> NOEXEC */
if ((pteflags & _PAGE_EXEC) == 0)
rflags |= HPTE_R_N;
-
- /* PP bits. PAGE_USER is already PP bit 0x2, so we only
- * need to add in 0x1 if it's a read-only user page
+ /*
+ * PP bits:
+ * Linux use slb key 0 for kernel and 1 for user.
+ * kernel areas are mapped by PP bits 00
+ * and and there is no kernel RO (_PAGE_KERNEL_RO).
+ * User area mapped by 0x2 and read only use by
+ * 0x3.
*/
- if ((pteflags & _PAGE_USER) && !((pteflags & _PAGE_RW) &&
- (pteflags & _PAGE_DIRTY)))
- rflags |= 1;
+ if (pteflags & _PAGE_USER) {
+ rflags |= 0x2;
+ if (!((pteflags & _PAGE_RW) && (pteflags & _PAGE_DIRTY)))
+ rflags |= 0x1;
+ }
/*
* Always add "C" bit for perf. Memory coherence is always enabled
*/
diff --git a/arch/powerpc/mm/hugepage-hash64.c b/arch/powerpc/mm/hugepage-hash64.c
index 4d87122..91fcac6 100644
--- a/arch/powerpc/mm/hugepage-hash64.c
+++ b/arch/powerpc/mm/hugepage-hash64.c
@@ -54,18 +54,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
new_pmd |= _PAGE_DIRTY;
} while (old_pmd != __cmpxchg_u64((unsigned long *)pmdp,
old_pmd, new_pmd));
- /*
- * PP bits. _PAGE_USER is already PP bit 0x2, so we only
- * need to add in 0x1 if it's a read-only user page
- */
- rflags = new_pmd & _PAGE_USER;
- if ((new_pmd & _PAGE_USER) && !((new_pmd & _PAGE_RW) &&
- (new_pmd & _PAGE_DIRTY)))
- rflags |= 0x1;
- /*
- * _PAGE_EXEC -> HW_NO_EXEC since it's inverted
- */
- rflags |= ((new_pmd & _PAGE_EXEC) ? 0 : HPTE_R_N);
+ rflags = htab_convert_pte_flags(new_pmd);
#if 0
if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) {
diff --git a/arch/powerpc/mm/hugetlbpage-hash64.c b/arch/powerpc/mm/hugetlbpage-hash64.c
index 7584e84..304c852 100644
--- a/arch/powerpc/mm/hugetlbpage-hash64.c
+++ b/arch/powerpc/mm/hugetlbpage-hash64.c
@@ -59,10 +59,8 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
new_pte |= _PAGE_DIRTY;
} while(old_pte != __cmpxchg_u64((unsigned long *)ptep,
old_pte, new_pte));
+ rflags = htab_convert_pte_flags(new_pte);
- rflags = 0x2 | (!(new_pte & _PAGE_RW));
- /* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */
- rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N);
sz = ((1UL) << shift);
if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
/* No CPU has hugepages but lacks no execute, so we