summaryrefslogtreecommitdiff
path: root/arch/sparc64/kernel
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2006-03-22 08:49:59 (GMT)
committerDavid S. Miller <davem@sunset.davemloft.net>2006-03-22 09:15:14 (GMT)
commitdcc1e8dd88d4bc55e32a26dad7633d20ffe606d2 (patch)
treea47592213d94f918867d3dd81bb91dac3e727dea /arch/sparc64/kernel
parent14778d9072e53d2171f66ffd9657daff41acfaed (diff)
downloadlinux-dcc1e8dd88d4bc55e32a26dad7633d20ffe606d2.tar.xz
[SPARC64]: Add a secondary TSB for hugepage mappings.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64/kernel')
-rw-r--r--arch/sparc64/kernel/sun4v_tlb_miss.S39
-rw-r--r--arch/sparc64/kernel/traps.c21
-rw-r--r--arch/sparc64/kernel/tsb.S210
3 files changed, 202 insertions, 68 deletions
diff --git a/arch/sparc64/kernel/sun4v_tlb_miss.S b/arch/sparc64/kernel/sun4v_tlb_miss.S
index ab23ddb..b731881 100644
--- a/arch/sparc64/kernel/sun4v_tlb_miss.S
+++ b/arch/sparc64/kernel/sun4v_tlb_miss.S
@@ -29,15 +29,15 @@
*
* index_mask = (512 << (tsb_reg & 0x7UL)) - 1UL;
* tsb_base = tsb_reg & ~0x7UL;
- * tsb_index = ((vaddr >> PAGE_SHIFT) & tsb_mask);
+ * tsb_index = ((vaddr >> HASH_SHIFT) & tsb_mask);
* tsb_ptr = tsb_base + (tsb_index * 16);
*/
-#define COMPUTE_TSB_PTR(TSB_PTR, VADDR, TMP1, TMP2) \
+#define COMPUTE_TSB_PTR(TSB_PTR, VADDR, HASH_SHIFT, TMP1, TMP2) \
and TSB_PTR, 0x7, TMP1; \
mov 512, TMP2; \
andn TSB_PTR, 0x7, TSB_PTR; \
sllx TMP2, TMP1, TMP2; \
- srlx VADDR, PAGE_SHIFT, TMP1; \
+ srlx VADDR, HASH_SHIFT, TMP1; \
sub TMP2, 1, TMP2; \
and TMP1, TMP2, TMP1; \
sllx TMP1, 4, TMP1; \
@@ -53,7 +53,7 @@ sun4v_itlb_miss:
LOAD_ITLB_INFO(%g2, %g4, %g5)
COMPUTE_TAG_TARGET(%g6, %g4, %g5, kvmap_itlb_4v)
- COMPUTE_TSB_PTR(%g1, %g4, %g3, %g7)
+ COMPUTE_TSB_PTR(%g1, %g4, PAGE_SHIFT, %g3, %g7)
/* Load TSB tag/pte into %g2/%g3 and compare the tag. */
ldda [%g1] ASI_QUAD_LDD_PHYS_4V, %g2
@@ -99,7 +99,7 @@ sun4v_dtlb_miss:
LOAD_DTLB_INFO(%g2, %g4, %g5)
COMPUTE_TAG_TARGET(%g6, %g4, %g5, kvmap_dtlb_4v)
- COMPUTE_TSB_PTR(%g1, %g4, %g3, %g7)
+ COMPUTE_TSB_PTR(%g1, %g4, PAGE_SHIFT, %g3, %g7)
/* Load TSB tag/pte into %g2/%g3 and compare the tag. */
ldda [%g1] ASI_QUAD_LDD_PHYS_4V, %g2
@@ -171,21 +171,26 @@ sun4v_dtsb_miss:
/* fallthrough */
- /* Create TSB pointer into %g1. This is something like:
- *
- * index_mask = (512 << (tsb_reg & 0x7UL)) - 1UL;
- * tsb_base = tsb_reg & ~0x7UL;
- * tsb_index = ((vaddr >> PAGE_SHIFT) & tsb_mask);
- * tsb_ptr = tsb_base + (tsb_index * 16);
- */
sun4v_tsb_miss_common:
- COMPUTE_TSB_PTR(%g1, %g4, %g5, %g7)
+ COMPUTE_TSB_PTR(%g1, %g4, PAGE_SHIFT, %g5, %g7)
- /* Branch directly to page table lookup. We have SCRATCHPAD_MMU_MISS
- * still in %g2, so it's quite trivial to get at the PGD PHYS value
- * so we can preload it into %g7.
- */
sub %g2, TRAP_PER_CPU_FAULT_INFO, %g2
+
+#ifdef CONFIG_HUGETLB_PAGE
+ mov SCRATCHPAD_UTSBREG2, %g5
+ ldxa [%g5] ASI_SCRATCHPAD, %g5
+ cmp %g5, -1
+ be,pt %xcc, 80f
+ nop
+ COMPUTE_TSB_PTR(%g5, %g4, HPAGE_SHIFT, %g2, %g7)
+
+ /* That clobbered %g2, reload it. */
+ ldxa [%g0] ASI_SCRATCHPAD, %g2
+ sub %g2, TRAP_PER_CPU_FAULT_INFO, %g2
+
+80: stx %g5, [%g2 + TRAP_PER_CPU_TSB_HUGE_TEMP]
+#endif
+
ba,pt %xcc, tsb_miss_page_table_walk_sun4v_fastpath
ldx [%g2 + TRAP_PER_CPU_PGD_PADDR], %g7
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c
index 7f7dba0..df612e4 100644
--- a/arch/sparc64/kernel/traps.c
+++ b/arch/sparc64/kernel/traps.c
@@ -2482,6 +2482,7 @@ void init_cur_cpu_trap(struct thread_info *t)
extern void thread_info_offsets_are_bolixed_dave(void);
extern void trap_per_cpu_offsets_are_bolixed_dave(void);
+extern void tsb_config_offsets_are_bolixed_dave(void);
/* Only invoked on boot processor. */
void __init trap_init(void)
@@ -2535,9 +2536,27 @@ void __init trap_init(void)
(TRAP_PER_CPU_CPU_MONDO_BLOCK_PA !=
offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) ||
(TRAP_PER_CPU_CPU_LIST_PA !=
- offsetof(struct trap_per_cpu, cpu_list_pa)))
+ offsetof(struct trap_per_cpu, cpu_list_pa)) ||
+ (TRAP_PER_CPU_TSB_HUGE !=
+ offsetof(struct trap_per_cpu, tsb_huge)) ||
+ (TRAP_PER_CPU_TSB_HUGE_TEMP !=
+ offsetof(struct trap_per_cpu, tsb_huge_temp)))
trap_per_cpu_offsets_are_bolixed_dave();
+ if ((TSB_CONFIG_TSB !=
+ offsetof(struct tsb_config, tsb)) ||
+ (TSB_CONFIG_RSS_LIMIT !=
+ offsetof(struct tsb_config, tsb_rss_limit)) ||
+ (TSB_CONFIG_NENTRIES !=
+ offsetof(struct tsb_config, tsb_nentries)) ||
+ (TSB_CONFIG_REG_VAL !=
+ offsetof(struct tsb_config, tsb_reg_val)) ||
+ (TSB_CONFIG_MAP_VADDR !=
+ offsetof(struct tsb_config, tsb_map_vaddr)) ||
+ (TSB_CONFIG_MAP_PTE !=
+ offsetof(struct tsb_config, tsb_map_pte)))
+ tsb_config_offsets_are_bolixed_dave();
+
/* Attach to the address space of init_task. On SMP we
* do this in smp.c:smp_callin for other cpus.
*/
diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S
index 118baea..a0c8ba5 100644
--- a/arch/sparc64/kernel/tsb.S
+++ b/arch/sparc64/kernel/tsb.S
@@ -3,8 +3,13 @@
* Copyright (C) 2006 David S. Miller <davem@davemloft.net>
*/
+#include <linux/config.h>
+
#include <asm/tsb.h>
#include <asm/hypervisor.h>
+#include <asm/page.h>
+#include <asm/cpudata.h>
+#include <asm/mmu.h>
.text
.align 32
@@ -34,34 +39,124 @@ tsb_miss_itlb:
ldxa [%g4] ASI_IMMU, %g4
/* At this point we have:
- * %g1 -- TSB entry address
+ * %g1 -- PAGE_SIZE TSB entry address
* %g3 -- FAULT_CODE_{D,I}TLB
* %g4 -- missing virtual address
* %g6 -- TAG TARGET (vaddr >> 22)
*/
tsb_miss_page_table_walk:
- TRAP_LOAD_PGD_PHYS(%g7, %g5)
+ TRAP_LOAD_TRAP_BLOCK(%g7, %g5)
- /* And now we have the PGD base physical address in %g7. */
-tsb_miss_page_table_walk_sun4v_fastpath:
- USER_PGTABLE_WALK_TL1(%g4, %g7, %g5, %g2, tsb_do_fault)
+ /* Before committing to a full page table walk,
+ * check the huge page TSB.
+ */
+#ifdef CONFIG_HUGETLB_PAGE
+
+661: ldx [%g7 + TRAP_PER_CPU_TSB_HUGE], %g5
+ nop
+ .section .sun4v_2insn_patch, "ax"
+ .word 661b
+ mov SCRATCHPAD_UTSBREG2, %g5
+ ldxa [%g5] ASI_SCRATCHPAD, %g5
+ .previous
+
+ cmp %g5, -1
+ be,pt %xcc, 80f
+ nop
+
+ /* We need an aligned pair of registers containing 2 values
+ * which can be easily rematerialized. %g6 and %g7 foot the
+ * bill just nicely. We'll save %g6 away into %g2 for the
+ * huge page TSB TAG comparison.
+ *
+ * Perform a huge page TSB lookup.
+ */
+ mov %g6, %g2
+ and %g5, 0x7, %g6
+ mov 512, %g7
+ andn %g5, 0x7, %g5
+ sllx %g7, %g6, %g7
+ srlx %g4, HPAGE_SHIFT, %g6
+ sub %g7, 1, %g7
+ and %g6, %g7, %g6
+ sllx %g6, 4, %g6
+ add %g5, %g6, %g5
+
+ TSB_LOAD_QUAD(%g5, %g6)
+ cmp %g6, %g2
+ be,a,pt %xcc, tsb_tlb_reload
+ mov %g7, %g5
+
+ /* No match, remember the huge page TSB entry address,
+ * and restore %g6 and %g7.
+ */
+ TRAP_LOAD_TRAP_BLOCK(%g7, %g6)
+ srlx %g4, 22, %g6
+80: stx %g5, [%g7 + TRAP_PER_CPU_TSB_HUGE_TEMP]
+
+#endif
+
+ ldx [%g7 + TRAP_PER_CPU_PGD_PADDR], %g7
/* At this point we have:
* %g1 -- TSB entry address
* %g3 -- FAULT_CODE_{D,I}TLB
- * %g5 -- physical address of PTE in Linux page tables
+ * %g4 -- missing virtual address
* %g6 -- TAG TARGET (vaddr >> 22)
+ * %g7 -- page table physical address
+ *
+ * We know that both the base PAGE_SIZE TSB and the HPAGE_SIZE
+ * TSB both lack a matching entry.
*/
-tsb_reload:
- TSB_LOCK_TAG(%g1, %g2, %g7)
+tsb_miss_page_table_walk_sun4v_fastpath:
+ USER_PGTABLE_WALK_TL1(%g4, %g7, %g5, %g2, tsb_do_fault)
/* Load and check PTE. */
ldxa [%g5] ASI_PHYS_USE_EC, %g5
- mov 1, %g7
- sllx %g7, TSB_TAG_INVALID_BIT, %g7
- brgez,a,pn %g5, tsb_do_fault
- TSB_STORE(%g1, %g7)
+ brgez,pn %g5, tsb_do_fault
+ nop
+
+#ifdef CONFIG_HUGETLB_PAGE
+661: sethi %uhi(_PAGE_SZALL_4U), %g7
+ sllx %g7, 32, %g7
+ .section .sun4v_2insn_patch, "ax"
+ .word 661b
+ mov _PAGE_SZALL_4V, %g7
+ nop
+ .previous
+
+ and %g5, %g7, %g2
+
+661: sethi %uhi(_PAGE_SZHUGE_4U), %g7
+ sllx %g7, 32, %g7
+ .section .sun4v_2insn_patch, "ax"
+ .word 661b
+ mov _PAGE_SZHUGE_4V, %g7
+ nop
+ .previous
+
+ cmp %g2, %g7
+ bne,pt %xcc, 60f
+ nop
+
+ /* It is a huge page, use huge page TSB entry address we
+ * calculated above.
+ */
+ TRAP_LOAD_TRAP_BLOCK(%g7, %g2)
+ ldx [%g7 + TRAP_PER_CPU_TSB_HUGE_TEMP], %g2
+ cmp %g2, -1
+ movne %xcc, %g2, %g1
+60:
+#endif
+ /* At this point we have:
+ * %g1 -- TSB entry address
+ * %g3 -- FAULT_CODE_{D,I}TLB
+ * %g5 -- valid PTE
+ * %g6 -- TAG TARGET (vaddr >> 22)
+ */
+tsb_reload:
+ TSB_LOCK_TAG(%g1, %g2, %g7)
TSB_WRITE(%g1, %g5, %g6)
/* Finally, load TLB and return from trap. */
@@ -240,10 +335,9 @@ tsb_flush:
* schedule() time.
*
* %o0: page table physical address
- * %o1: TSB register value
- * %o2: TSB virtual address
- * %o3: TSB mapping locked PTE
- * %o4: Hypervisor TSB descriptor physical address
+ * %o1: TSB base config pointer
+ * %o2: TSB huge config pointer, or NULL if none
+ * %o3: Hypervisor TSB descriptor physical address
*
* We have to run this whole thing with interrupts
* disabled so that the current cpu doesn't change
@@ -253,63 +347,79 @@ tsb_flush:
.globl __tsb_context_switch
.type __tsb_context_switch,#function
__tsb_context_switch:
- rdpr %pstate, %o5
- wrpr %o5, PSTATE_IE, %pstate
+ rdpr %pstate, %g1
+ wrpr %g1, PSTATE_IE, %pstate
+
+ TRAP_LOAD_TRAP_BLOCK(%g2, %g3)
- ldub [%g6 + TI_CPU], %g1
- sethi %hi(trap_block), %g2
- sllx %g1, TRAP_BLOCK_SZ_SHIFT, %g1
- or %g2, %lo(trap_block), %g2
- add %g2, %g1, %g2
stx %o0, [%g2 + TRAP_PER_CPU_PGD_PADDR]
- sethi %hi(tlb_type), %g1
- lduw [%g1 + %lo(tlb_type)], %g1
- cmp %g1, 3
- bne,pt %icc, 1f
+ ldx [%o1 + TSB_CONFIG_REG_VAL], %o0
+ brz,pt %o2, 1f
+ mov -1, %g3
+
+ ldx [%o2 + TSB_CONFIG_REG_VAL], %g3
+
+1: stx %g3, [%g2 + TRAP_PER_CPU_TSB_HUGE]
+
+ sethi %hi(tlb_type), %g2
+ lduw [%g2 + %lo(tlb_type)], %g2
+ cmp %g2, 3
+ bne,pt %icc, 50f
nop
/* Hypervisor TSB switch. */
- mov SCRATCHPAD_UTSBREG1, %g1
- stxa %o1, [%g1] ASI_SCRATCHPAD
- mov -1, %g2
- mov SCRATCHPAD_UTSBREG2, %g1
- stxa %g2, [%g1] ASI_SCRATCHPAD
-
- /* Save away %o5's %pstate, we have to use %o5 for
- * the hypervisor call.
- */
- mov %o5, %g1
+ mov SCRATCHPAD_UTSBREG1, %o5
+ stxa %o0, [%o5] ASI_SCRATCHPAD
+ mov SCRATCHPAD_UTSBREG2, %o5
+ stxa %g3, [%o5] ASI_SCRATCHPAD
+
+ mov 2, %o0
+ cmp %g3, -1
+ move %xcc, 1, %o0
mov HV_FAST_MMU_TSB_CTXNON0, %o5
- mov 1, %o0
- mov %o4, %o1
+ mov %o3, %o1
ta HV_FAST_TRAP
- /* Finish up and restore %o5. */
+ /* Finish up. */
ba,pt %xcc, 9f
- mov %g1, %o5
+ nop
/* SUN4U TSB switch. */
-1: mov TSB_REG, %g1
- stxa %o1, [%g1] ASI_DMMU
+50: mov TSB_REG, %o5
+ stxa %o0, [%o5] ASI_DMMU
membar #Sync
- stxa %o1, [%g1] ASI_IMMU
+ stxa %o0, [%o5] ASI_IMMU
membar #Sync
-2: brz %o2, 9f
- nop
+2: ldx [%o1 + TSB_CONFIG_MAP_VADDR], %o4
+ brz %o4, 9f
+ ldx [%o1 + TSB_CONFIG_MAP_PTE], %o5
sethi %hi(sparc64_highest_unlocked_tlb_ent), %g2
- mov TLB_TAG_ACCESS, %g1
+ mov TLB_TAG_ACCESS, %g3
lduw [%g2 + %lo(sparc64_highest_unlocked_tlb_ent)], %g2
- stxa %o2, [%g1] ASI_DMMU
+ stxa %o4, [%g3] ASI_DMMU
membar #Sync
sllx %g2, 3, %g2
- stxa %o3, [%g2] ASI_DTLB_DATA_ACCESS
+ stxa %o5, [%g2] ASI_DTLB_DATA_ACCESS
+ membar #Sync
+
+ brz,pt %o2, 9f
+ nop
+
+ ldx [%o2 + TSB_CONFIG_MAP_VADDR], %o4
+ ldx [%o2 + TSB_CONFIG_MAP_PTE], %o5
+ mov TLB_TAG_ACCESS, %g3
+ stxa %o4, [%g3] ASI_DMMU
+ membar #Sync
+ sub %g2, (1 << 3), %g2
+ stxa %o5, [%g2] ASI_DTLB_DATA_ACCESS
membar #Sync
+
9:
- wrpr %o5, %pstate
+ wrpr %g1, %pstate
retl
nop