summaryrefslogtreecommitdiff
path: root/arch/sparc64/kernel/ktlb.S
blob: d9244d3c9f73dd9d47a9c08b95190afa9cee0e45 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
/* arch/sparc64/kernel/ktlb.S: Kernel mapping TLB miss handling.
 *
 * Copyright (C) 1995, 1997, 2005 David S. Miller <davem@davemloft.net>
 * Copyright (C) 1996 Eddie C. Dost        (ecd@brainaid.de)
 * Copyright (C) 1996 Miguel de Icaza      (miguel@nuclecu.unam.mx)
 * Copyright (C) 1996,98,99 Jakub Jelinek  (jj@sunsite.mff.cuni.cz)
*/

#include <linux/config.h>
#include <asm/head.h>
#include <asm/asi.h>
#include <asm/page.h>
#include <asm/pgtable.h>

	.text
	.align		32

/*
 * On a second level vpte miss, check whether the original fault is to the OBP 
 * range (note that this is only possible for instruction miss, data misses to
 * obp range do not use vpte). If so, go back directly to the faulting address.
 * This is because we want to read the tpc, otherwise we have no way of knowing
 * the 8k aligned faulting address if we are using >8k kernel pagesize. This
 * also ensures no vpte range addresses are dropped into tlb while obp is
 * executing (see inherit_locked_prom_mappings() rant).
 */
sparc64_vpte_nucleus:
	/* Note that kvmap below has verified that the address is
	 * in the range MODULES_VADDR --> VMALLOC_END already.  So
	 * here we need only check if it is an OBP address or not.
	 */
	sethi		%hi(LOW_OBP_ADDRESS), %g5
	cmp		%g4, %g5
	blu,pn		%xcc, kern_vpte
	 mov		0x1, %g5
	sllx		%g5, 32, %g5
	cmp		%g4, %g5
	blu,pn		%xcc, vpte_insn_obp
	 nop

	/* These two instructions are patched by paginig_init().  */
kern_vpte:
	sethi		%hi(swapper_pgd_zero), %g5
	lduw		[%g5 + %lo(swapper_pgd_zero)], %g5

	/* With kernel PGD in %g5, branch back into dtlb_backend.  */
	ba,pt		%xcc, sparc64_kpte_continue
	 andn		%g1, 0x3, %g1	/* Finish PMD offset adjustment.  */

vpte_noent:
	/* Restore previous TAG_ACCESS, %g5 is zero, and we will
	 * skip over the trap instruction so that the top level
	 * TLB miss handler will thing this %g5 value is just an
	 * invalid PTE, thus branching to full fault processing.
	 */
	mov		TLB_SFSR, %g1
	stxa		%g4, [%g1 + %g1] ASI_DMMU
	done

vpte_insn_obp:
	/* Behave as if we are at TL0.  */
	wrpr		%g0, 1, %tl
	rdpr		%tpc, %g4	/* Find original faulting iaddr */
	srlx		%g4, 13, %g4	/* Throw out context bits */
	sllx		%g4, 13, %g4	/* g4 has vpn + ctx0 now */

	/* Restore previous TAG_ACCESS.  */
	mov		TLB_SFSR, %g1
	stxa		%g4, [%g1 + %g1] ASI_IMMU

	sethi		%hi(prom_trans), %g5
	or		%g5, %lo(prom_trans), %g5

1:	ldx		[%g5 + 0x00], %g6	! base
	brz,a,pn	%g6, longpath		! no more entries, fail
	 mov		TLB_SFSR, %g1		! and restore %g1
	ldx		[%g5 + 0x08], %g1	! len
	add		%g6, %g1, %g1		! end
	cmp		%g6, %g4
	bgu,pt		%xcc, 2f
	 cmp		%g4, %g1
	bgeu,pt		%xcc, 2f
	 ldx		[%g5 + 0x10], %g1	! PTE

	/* TLB load, restore %g1, and return from trap.  */
	sub		%g4, %g6, %g6
	add		%g1, %g6, %g5
	mov		TLB_SFSR, %g1
	stxa		%g5, [%g0] ASI_ITLB_DATA_IN
	retry

2:	ba,pt		%xcc, 1b
	 add		%g5, (3 * 8), %g5	! next entry

kvmap_do_obp:
	sethi		%hi(prom_trans), %g5
	or		%g5, %lo(prom_trans), %g5
	srlx		%g4, 13, %g4
	sllx		%g4, 13, %g4

1:	ldx		[%g5 + 0x00], %g6	! base
	brz,a,pn	%g6, longpath		! no more entries, fail
	 mov		TLB_SFSR, %g1		! and restore %g1
	ldx		[%g5 + 0x08], %g1	! len
	add		%g6, %g1, %g1		! end
	cmp		%g6, %g4
	bgu,pt		%xcc, 2f
	 cmp		%g4, %g1
	bgeu,pt		%xcc, 2f
	 ldx		[%g5 + 0x10], %g1	! PTE

	/* TLB load, restore %g1, and return from trap.  */
	sub		%g4, %g6, %g6
	add		%g1, %g6, %g5
	mov		TLB_SFSR, %g1
	stxa		%g5, [%g0] ASI_DTLB_DATA_IN
	retry

2:	ba,pt		%xcc, 1b
	 add		%g5, (3 * 8), %g5	! next entry

/*
 * On a first level data miss, check whether this is to the OBP range (note
 * that such accesses can be made by prom, as well as by kernel using
 * prom_getproperty on "address"), and if so, do not use vpte access ...
 * rather, use information saved during inherit_prom_mappings() using 8k
 * pagesize.
 */
	.align		32
kvmap:
	brgez,pn	%g4, kvmap_nonlinear
	 nop

#ifdef CONFIG_DEBUG_PAGEALLOC
	.globl		kvmap_linear_patch
kvmap_linear_patch:
#endif
	ba,pt		%xcc, kvmap_load
	 xor		%g2, %g4, %g5

#ifdef CONFIG_DEBUG_PAGEALLOC
	sethi		%hi(swapper_pg_dir), %g5
	or		%g5, %lo(swapper_pg_dir), %g5
	sllx		%g4, 64 - (PGDIR_SHIFT + PGDIR_BITS), %g6
	srlx		%g6, 64 - PAGE_SHIFT, %g6
	andn		%g6, 0x3, %g6
	lduw		[%g5 + %g6], %g5
	brz,pn		%g5, longpath
	 sllx		%g4, 64 - (PMD_SHIFT + PMD_BITS), %g6
	srlx		%g6, 64 - PAGE_SHIFT, %g6
	sllx		%g5, 11, %g5
	andn		%g6, 0x3, %g6
	lduwa		[%g5 + %g6] ASI_PHYS_USE_EC, %g5
	brz,pn		%g5, longpath
	 sllx		%g4, 64 - PMD_SHIFT, %g6
	srlx		%g6, 64 - PAGE_SHIFT, %g6
	sllx		%g5, 11, %g5
	andn		%g6, 0x7, %g6
	ldxa		[%g5 + %g6] ASI_PHYS_USE_EC, %g5
	brz,pn		%g5, longpath
	 nop
	ba,a,pt		%xcc, kvmap_load
#endif

kvmap_nonlinear:
	sethi		%hi(MODULES_VADDR), %g5
	cmp		%g4, %g5
	blu,pn		%xcc, longpath
	 mov		(VMALLOC_END >> 24), %g5
	sllx		%g5, 24, %g5
	cmp		%g4, %g5
	bgeu,pn		%xcc, longpath
	 nop

kvmap_check_obp:
	sethi		%hi(LOW_OBP_ADDRESS), %g5
	cmp		%g4, %g5
	blu,pn		%xcc, kvmap_vmalloc_addr
	 mov		0x1, %g5
	sllx		%g5, 32, %g5
	cmp		%g4, %g5
	blu,pn		%xcc, kvmap_do_obp
	 nop

kvmap_vmalloc_addr:
	/* If we get here, a vmalloc addr was accessed, load kernel VPTE.  */
	ldxa		[%g3 + %g6] ASI_N, %g5
	brgez,pn	%g5, longpath
	 nop

kvmap_load:
	/* PTE is valid, load into TLB and return from trap.  */
	stxa		%g5, [%g0] ASI_DTLB_DATA_IN	! Reload TLB
	retry