summaryrefslogtreecommitdiff
path: root/arch/powerpc/platforms/powermac/pmac_cache.S
blob: fb977de6b704a88a4a27c3567bb68bc5334a84a8 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
/*
 * This file contains low-level cache management functions
 * used for sleep and CPU speed changes on Apple machines.
 * (In fact the only thing that is Apple-specific is that we assume
 * that we can read from ROM at physical address 0xfff00000.)
 *
 *    Copyright (C) 2004 Paul Mackerras (paulus@samba.org) and
 *                       Benjamin Herrenschmidt (benh@kernel.crashing.org)
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 *
 */

#include <linux/config.h>
#include <asm/processor.h>
#include <asm/ppc_asm.h>
#include <asm/cputable.h>

/*
 * Flush and disable all data caches (dL1, L2, L3). This is used
 * when going to sleep, when doing a PMU based cpufreq transition,
 * or when "offlining" a CPU on SMP machines. This code is over
 * paranoid, but I've had enough issues with various CPU revs and
 * bugs that I decided it was worth beeing over cautious
 */

_GLOBAL(flush_disable_caches)
#ifndef CONFIG_6xx
	blr
#else
BEGIN_FTR_SECTION
	b	flush_disable_745x
END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
BEGIN_FTR_SECTION
	b	flush_disable_75x
END_FTR_SECTION_IFSET(CPU_FTR_L2CR)
	b	__flush_disable_L1

/* This is the code for G3 and 74[01]0 */
flush_disable_75x:
	mflr	r10

	/* Turn off EE and DR in MSR */
	mfmsr	r11
	rlwinm	r0,r11,0,~MSR_EE
	rlwinm	r0,r0,0,~MSR_DR
	sync
	mtmsr	r0
	isync

	/* Stop DST streams */
BEGIN_FTR_SECTION
	DSSALL
	sync
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)

	/* Stop DPM */
	mfspr	r8,SPRN_HID0		/* Save SPRN_HID0 in r8 */
	rlwinm	r4,r8,0,12,10		/* Turn off HID0[DPM] */
	sync
	mtspr	SPRN_HID0,r4		/* Disable DPM */
	sync

	/* Disp-flush L1. We have a weird problem here that I never
	 * totally figured out. On 750FX, using the ROM for the flush
	 * results in a non-working flush. We use that workaround for
	 * now until I finally understand what's going on. --BenH
	 */

	/* ROM base by default */
	lis	r4,0xfff0
	mfpvr	r3
	srwi	r3,r3,16
	cmplwi	cr0,r3,0x7000
	bne+	1f
	/* RAM base on 750FX */
	li	r4,0
1:	li	r4,0x4000
	mtctr	r4
1:	lwz	r0,0(r4)
	addi	r4,r4,32
	bdnz	1b
	sync
	isync

	/* Disable / invalidate / enable L1 data */
	mfspr	r3,SPRN_HID0
	rlwinm	r3,r3,0,~(HID0_DCE | HID0_ICE)
	mtspr	SPRN_HID0,r3
	sync
	isync
	ori	r3,r3,(HID0_DCE|HID0_DCI|HID0_ICE|HID0_ICFI)
	sync
	isync
	mtspr	SPRN_HID0,r3
	xori	r3,r3,(HID0_DCI|HID0_ICFI)
	mtspr	SPRN_HID0,r3
	sync

	/* Get the current enable bit of the L2CR into r4 */
	mfspr	r5,SPRN_L2CR
	/* Set to data-only (pre-745x bit) */
	oris	r3,r5,L2CR_L2DO@h
	b	2f
	/* When disabling L2, code must be in L1 */
	.balign 32
1:	mtspr	SPRN_L2CR,r3
3:	sync
	isync
	b	1f
2:	b	3f
3:	sync
	isync
	b	1b
1:	/* disp-flush L2. The interesting thing here is that the L2 can be
	 * up to 2Mb ... so using the ROM, we'll end up wrapping back to memory
	 * but that is probbaly fine. We disp-flush over 4Mb to be safe
	 */
	lis	r4,2
	mtctr	r4
	lis	r4,0xfff0
1:	lwz	r0,0(r4)
	addi	r4,r4,32
	bdnz	1b
	sync
	isync
	lis	r4,2
	mtctr	r4
	lis	r4,0xfff0
1:	dcbf	0,r4
	addi	r4,r4,32
	bdnz	1b
	sync
	isync

	/* now disable L2 */
	rlwinm	r5,r5,0,~L2CR_L2E
	b	2f
	/* When disabling L2, code must be in L1 */
	.balign 32
1:	mtspr	SPRN_L2CR,r5
3:	sync
	isync
	b	1f
2:	b	3f
3:	sync
	isync
	b	1b
1:	sync
	isync
	/* Invalidate L2. This is pre-745x, we clear the L2I bit ourselves */
	oris	r4,r5,L2CR_L2I@h
	mtspr	SPRN_L2CR,r4
	sync
	isync

	/* Wait for the invalidation to complete */
1:	mfspr	r3,SPRN_L2CR
	rlwinm.	r0,r3,0,31,31
	bne	1b

	/* Clear L2I */
	xoris	r4,r4,L2CR_L2I@h
	sync
	mtspr	SPRN_L2CR,r4
	sync

	/* now disable the L1 data cache */
	mfspr	r0,SPRN_HID0
	rlwinm	r0,r0,0,~(HID0_DCE|HID0_ICE)
	mtspr	SPRN_HID0,r0
	sync
	isync

	/* Restore HID0[DPM] to whatever it was before */
	sync
	mfspr	r0,SPRN_HID0
	rlwimi	r0,r8,0,11,11		/* Turn back HID0[DPM] */
	mtspr	SPRN_HID0,r0
	sync

	/* restore DR and EE */
	sync
	mtmsr	r11
	isync

	mtlr	r10
	blr

/* This code is for 745x processors */
flush_disable_745x:
	/* Turn off EE and DR in MSR */
	mfmsr	r11
	rlwinm	r0,r11,0,~MSR_EE
	rlwinm	r0,r0,0,~MSR_DR
	sync
	mtmsr	r0
	isync

	/* Stop prefetch streams */
	DSSALL
	sync

	/* Disable L2 prefetching */
	mfspr	r0,SPRN_MSSCR0
	rlwinm	r0,r0,0,0,29
	mtspr	SPRN_MSSCR0,r0
	sync
	isync
	lis	r4,0
	dcbf	0,r4
	dcbf	0,r4
	dcbf	0,r4
	dcbf	0,r4
	dcbf	0,r4
	dcbf	0,r4
	dcbf	0,r4
	dcbf	0,r4

	/* Due to a bug with the HW flush on some CPU revs, we occasionally
	 * experience data corruption. I'm adding a displacement flush along
	 * with a dcbf loop over a few Mb to "help". The problem isn't totally
	 * fixed by this in theory, but at least, in practice, I couldn't reproduce
	 * it even with a big hammer...
	 */

        lis     r4,0x0002
        mtctr   r4
 	li      r4,0
1:
        lwz     r0,0(r4)
        addi    r4,r4,32                /* Go to start of next cache line */
        bdnz    1b
        isync

        /* Now, flush the first 4MB of memory */
        lis     r4,0x0002
        mtctr   r4
	li      r4,0
        sync
1:
        dcbf    0,r4
        addi    r4,r4,32                /* Go to start of next cache line */
        bdnz    1b

	/* Flush and disable the L1 data cache */
	mfspr	r6,SPRN_LDSTCR
	lis	r3,0xfff0	/* read from ROM for displacement flush */
	li	r4,0xfe		/* start with only way 0 unlocked */
	li	r5,128		/* 128 lines in each way */
1:	mtctr	r5
	rlwimi	r6,r4,0,24,31
	mtspr	SPRN_LDSTCR,r6
	sync
	isync
2:	lwz	r0,0(r3)	/* touch each cache line */
	addi	r3,r3,32
	bdnz	2b
	rlwinm	r4,r4,1,24,30	/* move on to the next way */
	ori	r4,r4,1
	cmpwi	r4,0xff		/* all done? */
	bne	1b
	/* now unlock the L1 data cache */
	li	r4,0
	rlwimi	r6,r4,0,24,31
	sync
	mtspr	SPRN_LDSTCR,r6
	sync
	isync

	/* Flush the L2 cache using the hardware assist */
	mfspr	r3,SPRN_L2CR
	cmpwi	r3,0		/* check if it is enabled first */
	bge	4f
	oris	r0,r3,(L2CR_L2IO_745x|L2CR_L2DO_745x)@h
	b	2f
	/* When disabling/locking L2, code must be in L1 */
	.balign 32
1:	mtspr	SPRN_L2CR,r0	/* lock the L2 cache */
3:	sync
	isync
	b	1f
2:	b	3f
3:	sync
	isync
	b	1b
1:	sync
	isync
	ori	r0,r3,L2CR_L2HWF_745x
	sync
	mtspr	SPRN_L2CR,r0	/* set the hardware flush bit */
3:	mfspr	r0,SPRN_L2CR	/* wait for it to go to 0 */
	andi.	r0,r0,L2CR_L2HWF_745x
	bne	3b
	sync
	rlwinm	r3,r3,0,~L2CR_L2E
	b	2f
	/* When disabling L2, code must be in L1 */
	.balign 32
1:	mtspr	SPRN_L2CR,r3	/* disable the L2 cache */
3:	sync
	isync
	b	1f
2:	b	3f
3:	sync
	isync
	b	1b
1:	sync
	isync
	oris	r4,r3,L2CR_L2I@h
	mtspr	SPRN_L2CR,r4
	sync
	isync
1:	mfspr	r4,SPRN_L2CR
	andis.	r0,r4,L2CR_L2I@h
	bne	1b
	sync

BEGIN_FTR_SECTION
	/* Flush the L3 cache using the hardware assist */
4:	mfspr	r3,SPRN_L3CR
	cmpwi	r3,0		/* check if it is enabled */
	bge	6f
	oris	r0,r3,L3CR_L3IO@h
	ori	r0,r0,L3CR_L3DO
	sync
	mtspr	SPRN_L3CR,r0	/* lock the L3 cache */
	sync
	isync
	ori	r0,r0,L3CR_L3HWF
	sync
	mtspr	SPRN_L3CR,r0	/* set the hardware flush bit */
5:	mfspr	r0,SPRN_L3CR	/* wait for it to go to zero */
	andi.	r0,r0,L3CR_L3HWF
	bne	5b
	rlwinm	r3,r3,0,~L3CR_L3E
	sync
	mtspr	SPRN_L3CR,r3	/* disable the L3 cache */
	sync
	ori	r4,r3,L3CR_L3I
	mtspr	SPRN_L3CR,r4
1:	mfspr	r4,SPRN_L3CR
	andi.	r0,r4,L3CR_L3I
	bne	1b
	sync
END_FTR_SECTION_IFSET(CPU_FTR_L3CR)

6:	mfspr	r0,SPRN_HID0	/* now disable the L1 data cache */
	rlwinm	r0,r0,0,~HID0_DCE
	mtspr	SPRN_HID0,r0
	sync
	isync
	mtmsr	r11		/* restore DR and EE */
	isync
	blr
#endif	/* CONFIG_6xx */