summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Gibson <david@gibson.dropbear.id.au>2005-09-06 04:59:47 (GMT)
committerPaul Mackerras <paulus@samba.org>2005-09-06 06:57:46 (GMT)
commit14b34661615ec036ab4c91637913706e4caccc93 (patch)
tree98915b1889422383a85186d8455ecca69fa2327b
parent0fdf0b8634055b016f7b93cfcdea2eb9091f0271 (diff)
downloadlinux-fsl-qoriq-14b34661615ec036ab4c91637913706e4caccc93.tar.xz
[PATCH] Invert sense of SLB class bit
Currently, we set the class bit in kernel SLB entries, and clear it on user SLB entries. On POWER5, ERAT entries created in real mode have the class bit clear. So to avoid flushing kernel ERAT entries on each context switch, this patch inverts our usage of the class bit, setting it on user SLB entries and clearing it on kernel SLB entries. Booted on POWER5 and G5. Signed-off-by: David Gibson <dwg@au1.ibm.com> Signed-off-by: Paul Mackerras <paulus@samba.org>
-rw-r--r--arch/ppc64/kernel/entry.S11
-rw-r--r--arch/ppc64/mm/hugetlbpage.c6
-rw-r--r--arch/ppc64/mm/slb.c4
-rw-r--r--include/asm-ppc64/mmu.h6
4 files changed, 15 insertions, 12 deletions
diff --git a/arch/ppc64/kernel/entry.S b/arch/ppc64/kernel/entry.S
index b61572e..bf99b4a 100644
--- a/arch/ppc64/kernel/entry.S
+++ b/arch/ppc64/kernel/entry.S
@@ -400,15 +400,14 @@ BEGIN_FTR_SECTION
cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
cror eq,4*cr1+eq,eq
beq 2f /* if yes, don't slbie it */
- oris r0,r6,0x0800 /* set C (class) bit */
/* Bolt in the new stack SLB entry */
ld r7,KSP_VSID(r4) /* Get new stack's VSID */
- oris r6,r6,(SLB_ESID_V)@h
- ori r6,r6,(SLB_NUM_BOLTED-1)@l
- slbie r0
- slbie r0 /* Workaround POWER5 < DD2.1 issue */
- slbmte r7,r6
+ oris r0,r6,(SLB_ESID_V)@h
+ ori r0,r0,(SLB_NUM_BOLTED-1)@l
+ slbie r6
+ slbie r6 /* Workaround POWER5 < DD2.1 issue */
+ slbmte r7,r0
isync
2:
diff --git a/arch/ppc64/mm/hugetlbpage.c b/arch/ppc64/mm/hugetlbpage.c
index e7833c8..338771e 100644
--- a/arch/ppc64/mm/hugetlbpage.c
+++ b/arch/ppc64/mm/hugetlbpage.c
@@ -144,7 +144,8 @@ static void flush_low_segments(void *parm)
for (i = 0; i < NUM_LOW_AREAS; i++) {
if (! (areas & (1U << i)))
continue;
- asm volatile("slbie %0" : : "r" (i << SID_SHIFT));
+ asm volatile("slbie %0"
+ : : "r" ((i << SID_SHIFT) | SLBIE_C));
}
asm volatile("isync" : : : "memory");
@@ -164,7 +165,8 @@ static void flush_high_segments(void *parm)
continue;
for (j = 0; j < (1UL << (HTLB_AREA_SHIFT-SID_SHIFT)); j++)
asm volatile("slbie %0"
- :: "r" ((i << HTLB_AREA_SHIFT) + (j << SID_SHIFT)));
+ :: "r" (((i << HTLB_AREA_SHIFT)
+ + (j << SID_SHIFT)) | SLBIE_C));
}
asm volatile("isync" : : : "memory");
diff --git a/arch/ppc64/mm/slb.c b/arch/ppc64/mm/slb.c
index 244150a..0473953 100644
--- a/arch/ppc64/mm/slb.c
+++ b/arch/ppc64/mm/slb.c
@@ -87,8 +87,8 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
int i;
asm volatile("isync" : : : "memory");
for (i = 0; i < offset; i++) {
- esid_data = (unsigned long)get_paca()->slb_cache[i]
- << SID_SHIFT;
+ esid_data = ((unsigned long)get_paca()->slb_cache[i]
+ << SID_SHIFT) | SLBIE_C;
asm volatile("slbie %0" : : "r" (esid_data));
}
asm volatile("isync" : : : "memory");
diff --git a/include/asm-ppc64/mmu.h b/include/asm-ppc64/mmu.h
index ad36bb2..7bc42eb 100644
--- a/include/asm-ppc64/mmu.h
+++ b/include/asm-ppc64/mmu.h
@@ -54,8 +54,10 @@ extern char initial_stab[];
#define SLB_VSID_C ASM_CONST(0x0000000000000080) /* class */
#define SLB_VSID_LS ASM_CONST(0x0000000000000070) /* size of largepage */
-#define SLB_VSID_KERNEL (SLB_VSID_KP|SLB_VSID_C)
-#define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS)
+#define SLB_VSID_KERNEL (SLB_VSID_KP)
+#define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C)
+
+#define SLBIE_C (0x08000000)
/*
* Hash table