summaryrefslogtreecommitdiff
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
authorScott Wood <scottwood@freescale.com>2012-06-12 05:52:47 (GMT)
committerEmil Medve <Emilian.Medve@Freescale.com>2013-03-21 18:42:15 (GMT)
commit7f14ca3731ae3707b90f2bfa1b3ef5156afa3a03 (patch)
tree3697075f8986662f76465b5e7322a2ef08473c92 /arch/powerpc/mm
parent0dcd97576a8a922fcb524d821b12c5128b9f516d (diff)
downloadlinux-fsl-qoriq-7f14ca3731ae3707b90f2bfa1b3ef5156afa3a03.tar.xz
powerpc/e6500: add tlb spinlock to non-tablewalk tlb miss
We need this because we didn't implement "tlbsrx.". Properly testing this will require a multithreaded user program that generates a lot of TLB misses for a relatively small number of pages. Signed-off-by: Scott Wood <scottwood@freescale.com> (cherry picked from commit 7e4474fef3d817f9dee339bb61eb0c6f6b700eea)
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/tlb_low_64e.S70
1 files changed, 61 insertions, 9 deletions
diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S
index c726c16..f9b7738 100644
--- a/arch/powerpc/mm/tlb_low_64e.S
+++ b/arch/powerpc/mm/tlb_low_64e.S
@@ -110,6 +110,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
ori r10,r10,_PAGE_PRESENT
oris r11,r10,_PAGE_ACCESSED@h
+#ifdef CONFIG_FSL_THREADS
+ ld r10,PACA_TLB_PER_CORE_PTR(r13)
+#endif
+
TLB_MISS_STATS_SAVE_INFO_BOLTED
bne tlb_miss_kernel_bolted
@@ -123,23 +127,53 @@ tlb_miss_common_bolted:
* r14 = page table base
* r13 = PACA
* r11 = PTE permission mask
- * r10 = crap (free to use)
+ * r10 = tlb_per_core ptr
*/
+
+#ifdef CONFIG_FSL_THREADS
+ /*
+ * Search if we already have an entry for that virtual
+ * address, and if we do, bail out.
+ */
+ mtocrf 0x01,r10
+ addi r10,r10,PACA_TLB_LOCK-1 /* -1 to compensate for low bit set */
+ bf 31,1f /* no lock if TLB_PER_CORE_HAS_LOCK clear */
+2: lbarx r15,0,r10
+ cmpdi r15,0
+ bne 3f
+ li r15,1
+ stbcx. r15,0,r10
+ bne 2b
+ .subsection 1
+3: lbz r15,0(r10)
+ cmpdi r15,0
+ bne 3b
+ b 2b
+ .previous
+1:
+#endif
+
rldicl r15,r16,64-PGDIR_SHIFT+3,64-PGD_INDEX_SIZE-3
cmpldi cr0,r14,0
clrrdi r15,r15,3
beq tlb_miss_fault_bolted /* No PGDIR, bail */
-BEGIN_MMU_FTR_SECTION
- /* Set the TLB reservation and search for existing entry. Then load
- * the entry.
- */
- PPC_TLBSRX_DOT(0,R16)
+#ifdef CONFIG_FSL_THREADS
+ mfspr r10,SPRN_MAS2
+ tlbsx 0,r16
ldx r14,r14,r15 /* grab pgd entry */
- beq normal_tlb_miss_done /* tlb exists already, bail */
-MMU_FTR_SECTION_ELSE
+ mfspr r15,SPRN_MAS1
+ andis. r15,r15,MAS1_VALID@h
+ bne tlb_miss_done_bolted /* tlb exists already, bail */
+
+ /* Undo MAS-damage from the tlbsx */
+ mfspr r15,SPRN_MAS1
+ oris r15,r15,MAS1_VALID@h
+ mtspr SPRN_MAS1,r15
+ mtspr SPRN_MAS2,r10
+#else
ldx r14,r14,r15 /* grab pgd entry */
-ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_USE_TLBRSRV)
+#endif
#ifndef CONFIG_PPC_64K_PAGES
rldicl r15,r16,64-PUD_SHIFT+3,64-PUD_INDEX_SIZE-3
@@ -192,6 +226,20 @@ ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_USE_TLBRSRV)
mtspr SPRN_MAS7_MAS3,r15
tlbwe
+tlb_miss_done_bolted:
+ .macro tlb_unlock_bolted
+#ifdef CONFIG_FSL_THREADS
+ ld r10,PACA_TLB_PER_CORE_PTR(r13)
+ bf 31,1f
+ li r15,0
+ isync
+ stb r15,PACA_TLB_LOCK-1(r10)
+1:
+#endif
+ .endm
+
+ tlb_unlock_bolted
+
TLB_MISS_STATS_X(MMSTAT_TLB_MISS_NORM_OK)
tlb_epilog_bolted
rfi
@@ -208,6 +256,7 @@ tlb_miss_kernel_bolted:
beq+ tlb_miss_common_bolted
tlb_miss_fault_bolted:
+ tlb_unlock_bolted
/* We need to check if it was an instruction miss */
andi. r10,r11,_PAGE_EXEC|_PAGE_BAP_SX
bne itlb_miss_fault_bolted
@@ -229,6 +278,9 @@ itlb_miss_fault_bolted:
TLB_MISS_STATS_SAVE_INFO_BOLTED
bne- itlb_miss_fault_bolted
+#ifdef CONFIG_FSL_THREADS
+ ld r10,PACA_TLB_PER_CORE_PTR(r13)
+#endif
li r11,_PAGE_PRESENT|_PAGE_EXEC /* Base perm */
/* We do the user/kernel test for the PID here along with the RW test