aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ppc/kernel
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2006-03-26 23:22:28 -0500
committerPaul Mackerras <paulus@samba.org>2006-03-26 23:22:28 -0500
commit0eb4cb9b16aba6d610a0716503b96d299b308d44 (patch)
tree3a8b8ed7e3f4b5bde9b690b17aca7926ba9bef90 /arch/ppc/kernel
parentfbd7740fdfdf9475f92287a84085a1913541cd5d (diff)
powerpc: Move l2cr.S over to arch/powerpc
No functional changes, but call it l2cr_6xx.S since it is specific to 6xx-family (including G3/750 and G4/74xx) processors. Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/ppc/kernel')
-rw-r--r--arch/ppc/kernel/Makefile4
-rw-r--r--arch/ppc/kernel/l2cr.S471
2 files changed, 2 insertions, 473 deletions
diff --git a/arch/ppc/kernel/Makefile b/arch/ppc/kernel/Makefile
index 1b2c7458a3d0..3d50a2686839 100644
--- a/arch/ppc/kernel/Makefile
+++ b/arch/ppc/kernel/Makefile
@@ -13,7 +13,7 @@ extra-y += vmlinux.lds
13obj-y := entry.o traps.o time.o misc.o \ 13obj-y := entry.o traps.o time.o misc.o \
14 setup.o \ 14 setup.o \
15 ppc_htab.o 15 ppc_htab.o
16obj-$(CONFIG_6xx) += l2cr.o cpu_setup_6xx.o 16obj-$(CONFIG_6xx) += cpu_setup_6xx.o
17obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o 17obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o
18obj-$(CONFIG_MODULES) += module.o ppc_ksyms.o 18obj-$(CONFIG_MODULES) += module.o ppc_ksyms.o
19obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-mapping.o 19obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-mapping.o
@@ -34,7 +34,7 @@ endif
34# These are here while we do the architecture merge 34# These are here while we do the architecture merge
35 35
36else 36else
37obj-$(CONFIG_6xx) += l2cr.o cpu_setup_6xx.o 37obj-$(CONFIG_6xx) += cpu_setup_6xx.o
38obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o 38obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o
39obj-$(CONFIG_MODULES) += module.o 39obj-$(CONFIG_MODULES) += module.o
40obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-mapping.o 40obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-mapping.o
diff --git a/arch/ppc/kernel/l2cr.S b/arch/ppc/kernel/l2cr.S
deleted file mode 100644
index d7f4e982b539..000000000000
--- a/arch/ppc/kernel/l2cr.S
+++ /dev/null
@@ -1,471 +0,0 @@
1/*
2 L2CR functions
3 Copyright © 1997-1998 by PowerLogix R & D, Inc.
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18*/
19/*
20 Thur, Dec. 12, 1998.
21 - First public release, contributed by PowerLogix.
22 ***********
23 Sat, Aug. 7, 1999.
24 - Terry: Made sure code disabled interrupts before running. (Previously
25 it was assumed interrupts were already disabled).
26 - Terry: Updated for tentative G4 support. 4MB of memory is now flushed
27 instead of 2MB. (Prob. only 3 is necessary).
28 - Terry: Updated for workaround to HID0[DPM] processor bug
29 during global invalidates.
30 ***********
31 Thu, July 13, 2000.
32 - Terry: Added isync to correct for an errata.
33
34 22 August 2001.
35 - DanM: Finally added the 7450 patch I've had for the past
36 several months. The L2CR is similar, but I'm going
37 to assume the user of this functions knows what they
38 are doing.
39
40 Author: Terry Greeniaus (tgree@phys.ualberta.ca)
41 Please e-mail updates to this file to me, thanks!
42*/
43#include <linux/config.h>
44#include <asm/processor.h>
45#include <asm/cputable.h>
46#include <asm/ppc_asm.h>
47#include <asm/cache.h>
48#include <asm/page.h>
49
50/* Usage:
51
52 When setting the L2CR register, you must do a few special
53 things. If you are enabling the cache, you must perform a
54 global invalidate. If you are disabling the cache, you must
55 flush the cache contents first. This routine takes care of
56 doing these things. When first enabling the cache, make sure
57 you pass in the L2CR you want, as well as passing in the
58 global invalidate bit set. A global invalidate will only be
59 performed if the L2I bit is set in applyThis. When enabling
60 the cache, you should also set the L2E bit in applyThis. If
61 you want to modify the L2CR contents after the cache has been
62 enabled, the recommended procedure is to first call
63 __setL2CR(0) to disable the cache and then call it again with
64 the new values for L2CR. Examples:
65
66 _setL2CR(0) - disables the cache
67 _setL2CR(0xB3A04000) - enables my G3 upgrade card:
68 - L2E set to turn on the cache
69 - L2SIZ set to 1MB
70 - L2CLK set to 1:1
71 - L2RAM set to pipelined synchronous late-write
72 - L2I set to perform a global invalidation
73 - L2OH set to 0.5 nS
74 - L2DF set because this upgrade card
75 requires it
76
77 A similar call should work for your card. You need to know
78 the correct setting for your card and then place them in the
79 fields I have outlined above. Other fields support optional
80 features, such as L2DO which caches only data, or L2TS which
81 causes cache pushes from the L1 cache to go to the L2 cache
82 instead of to main memory.
83
84IMPORTANT:
85 Starting with the 7450, the bits in this register have moved
86 or behave differently. The Enable, Parity Enable, Size,
87 and L2 Invalidate are the only bits that have not moved.
88 The size is read-only for these processors with internal L2
89 cache, and the invalidate is a control as well as status.
90 -- Dan
91
92*/
93/*
94 * Summary: this procedure ignores the L2I bit in the value passed in,
95 * flushes the cache if it was already enabled, always invalidates the
96 * cache, then enables the cache if the L2E bit is set in the value
97 * passed in.
98 * -- paulus.
99 */
100_GLOBAL(_set_L2CR)
101 /* Make sure this is a 750 or 7400 chip */
102BEGIN_FTR_SECTION
103 li r3,-1
104 blr
105END_FTR_SECTION_IFCLR(CPU_FTR_L2CR)
106
107 mflr r9
108
109 /* Stop DST streams */
110BEGIN_FTR_SECTION
111 DSSALL
112 sync
113END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
114
115 /* Turn off interrupts and data relocation. */
116 mfmsr r7 /* Save MSR in r7 */
117 rlwinm r4,r7,0,17,15
118 rlwinm r4,r4,0,28,26 /* Turn off DR bit */
119 sync
120 mtmsr r4
121 isync
122
123 /* Before we perform the global invalidation, we must disable dynamic
124 * power management via HID0[DPM] to work around a processor bug where
125 * DPM can possibly interfere with the state machine in the processor
126 * that invalidates the L2 cache tags.
127 */
128 mfspr r8,SPRN_HID0 /* Save HID0 in r8 */
129 rlwinm r4,r8,0,12,10 /* Turn off HID0[DPM] */
130 sync
131 mtspr SPRN_HID0,r4 /* Disable DPM */
132 sync
133
134 /* Get the current enable bit of the L2CR into r4 */
135 mfspr r4,SPRN_L2CR
136
137 /* Tweak some bits */
138 rlwinm r5,r3,0,0,0 /* r5 contains the new enable bit */
139 rlwinm r3,r3,0,11,9 /* Turn off the invalidate bit */
140 rlwinm r3,r3,0,1,31 /* Turn off the enable bit */
141
142 /* Check to see if we need to flush */
143 rlwinm. r4,r4,0,0,0
144 beq 2f
145
146 /* Flush the cache. First, read the first 4MB of memory (physical) to
147 * put new data in the cache. (Actually we only need
148 * the size of the L2 cache plus the size of the L1 cache, but 4MB will
149 * cover everything just to be safe).
150 */
151
152 /**** Might be a good idea to set L2DO here - to prevent instructions
153 from getting into the cache. But since we invalidate
154 the next time we enable the cache it doesn't really matter.
155 Don't do this unless you accomodate all processor variations.
156 The bit moved on the 7450.....
157 ****/
158
159BEGIN_FTR_SECTION
160 /* Disable L2 prefetch on some 745x and try to ensure
161 * L2 prefetch engines are idle. As explained by errata
162 * text, we can't be sure they are, we just hope very hard
163 * that well be enough (sic !). At least I noticed Apple
164 * doesn't even bother doing the dcbf's here...
165 */
166 mfspr r4,SPRN_MSSCR0
167 rlwinm r4,r4,0,0,29
168 sync
169 mtspr SPRN_MSSCR0,r4
170 sync
171 isync
172 lis r4,KERNELBASE@h
173 dcbf 0,r4
174 dcbf 0,r4
175 dcbf 0,r4
176 dcbf 0,r4
177END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
178
179 /* TODO: use HW flush assist when available */
180
181 lis r4,0x0002
182 mtctr r4
183 li r4,0
1841:
185 lwzx r0,r0,r4
186 addi r4,r4,32 /* Go to start of next cache line */
187 bdnz 1b
188 isync
189
190 /* Now, flush the first 4MB of memory */
191 lis r4,0x0002
192 mtctr r4
193 li r4,0
194 sync
1951:
196 dcbf 0,r4
197 addi r4,r4,32 /* Go to start of next cache line */
198 bdnz 1b
199
2002:
201 /* Set up the L2CR configuration bits (and switch L2 off) */
202 /* CPU errata: Make sure the mtspr below is already in the
203 * L1 icache
204 */
205 b 20f
206 .balign L1_CACHE_BYTES
20722:
208 sync
209 mtspr SPRN_L2CR,r3
210 sync
211 b 23f
21220:
213 b 21f
21421: sync
215 isync
216 b 22b
217
21823:
219 /* Perform a global invalidation */
220 oris r3,r3,0x0020
221 sync
222 mtspr SPRN_L2CR,r3
223 sync
224 isync /* For errata */
225
226BEGIN_FTR_SECTION
227 /* On the 7450, we wait for the L2I bit to clear......
228 */
22910: mfspr r3,SPRN_L2CR
230 andis. r4,r3,0x0020
231 bne 10b
232 b 11f
233END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
234
235 /* Wait for the invalidation to complete */
2363: mfspr r3,SPRN_L2CR
237 rlwinm. r4,r3,0,31,31
238 bne 3b
239
24011: rlwinm r3,r3,0,11,9 /* Turn off the L2I bit */
241 sync
242 mtspr SPRN_L2CR,r3
243 sync
244
245 /* See if we need to enable the cache */
246 cmplwi r5,0
247 beq 4f
248
249 /* Enable the cache */
250 oris r3,r3,0x8000
251 mtspr SPRN_L2CR,r3
252 sync
253
254 /* Enable L2 HW prefetch on 744x/745x */
255BEGIN_FTR_SECTION
256 mfspr r3,SPRN_MSSCR0
257 ori r3,r3,3
258 sync
259 mtspr SPRN_MSSCR0,r3
260 sync
261 isync
262END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
2634:
264
265 /* Restore HID0[DPM] to whatever it was before */
266 sync
267 mtspr 1008,r8
268 sync
269
270 /* Restore MSR (restores EE and DR bits to original state) */
271 SYNC
272 mtmsr r7
273 isync
274
275 mtlr r9
276 blr
277
278_GLOBAL(_get_L2CR)
279 /* Return the L2CR contents */
280 li r3,0
281BEGIN_FTR_SECTION
282 mfspr r3,SPRN_L2CR
283END_FTR_SECTION_IFSET(CPU_FTR_L2CR)
284 blr
285
286
287/*
288 * Here is a similar routine for dealing with the L3 cache
289 * on the 745x family of chips
290 */
291
292_GLOBAL(_set_L3CR)
293 /* Make sure this is a 745x chip */
294BEGIN_FTR_SECTION
295 li r3,-1
296 blr
297END_FTR_SECTION_IFCLR(CPU_FTR_L3CR)
298
299 /* Turn off interrupts and data relocation. */
300 mfmsr r7 /* Save MSR in r7 */
301 rlwinm r4,r7,0,17,15
302 rlwinm r4,r4,0,28,26 /* Turn off DR bit */
303 sync
304 mtmsr r4
305 isync
306
307 /* Stop DST streams */
308 DSSALL
309 sync
310
311 /* Get the current enable bit of the L3CR into r4 */
312 mfspr r4,SPRN_L3CR
313
314 /* Tweak some bits */
315 rlwinm r5,r3,0,0,0 /* r5 contains the new enable bit */
316 rlwinm r3,r3,0,22,20 /* Turn off the invalidate bit */
317 rlwinm r3,r3,0,2,31 /* Turn off the enable & PE bits */
318 rlwinm r3,r3,0,5,3 /* Turn off the clken bit */
319 /* Check to see if we need to flush */
320 rlwinm. r4,r4,0,0,0
321 beq 2f
322
323 /* Flush the cache.
324 */
325
326 /* TODO: use HW flush assist */
327
328 lis r4,0x0008
329 mtctr r4
330 li r4,0
3311:
332 lwzx r0,r0,r4
333 dcbf 0,r4
334 addi r4,r4,32 /* Go to start of next cache line */
335 bdnz 1b
336
3372:
338 /* Set up the L3CR configuration bits (and switch L3 off) */
339 sync
340 mtspr SPRN_L3CR,r3
341 sync
342
343 oris r3,r3,L3CR_L3RES@h /* Set reserved bit 5 */
344 mtspr SPRN_L3CR,r3
345 sync
346 oris r3,r3,L3CR_L3CLKEN@h /* Set clken */
347 mtspr SPRN_L3CR,r3
348 sync
349
350 /* Wait for stabilize */
351 li r0,256
352 mtctr r0
3531: bdnz 1b
354
355 /* Perform a global invalidation */
356 ori r3,r3,0x0400
357 sync
358 mtspr SPRN_L3CR,r3
359 sync
360 isync
361
362 /* We wait for the L3I bit to clear...... */
36310: mfspr r3,SPRN_L3CR
364 andi. r4,r3,0x0400
365 bne 10b
366
367 /* Clear CLKEN */
368 rlwinm r3,r3,0,5,3 /* Turn off the clken bit */
369 mtspr SPRN_L3CR,r3
370 sync
371
372 /* Wait for stabilize */
373 li r0,256
374 mtctr r0
3751: bdnz 1b
376
377 /* See if we need to enable the cache */
378 cmplwi r5,0
379 beq 4f
380
381 /* Enable the cache */
382 oris r3,r3,(L3CR_L3E | L3CR_L3CLKEN)@h
383 mtspr SPRN_L3CR,r3
384 sync
385
386 /* Wait for stabilize */
387 li r0,256
388 mtctr r0
3891: bdnz 1b
390
391 /* Restore MSR (restores EE and DR bits to original state) */
3924: SYNC
393 mtmsr r7
394 isync
395 blr
396
397_GLOBAL(_get_L3CR)
398 /* Return the L3CR contents */
399 li r3,0
400BEGIN_FTR_SECTION
401 mfspr r3,SPRN_L3CR
402END_FTR_SECTION_IFSET(CPU_FTR_L3CR)
403 blr
404
405/* --- End of PowerLogix code ---
406 */
407
408
409/* flush_disable_L1() - Flush and disable L1 cache
410 *
411 * clobbers r0, r3, ctr, cr0
412 * Must be called with interrupts disabled and MMU enabled.
413 */
414_GLOBAL(__flush_disable_L1)
415 /* Stop pending alitvec streams and memory accesses */
416BEGIN_FTR_SECTION
417 DSSALL
418END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
419 sync
420
421 /* Load counter to 0x4000 cache lines (512k) and
422 * load cache with datas
423 */
424 li r3,0x4000 /* 512kB / 32B */
425 mtctr r3
426 lis r3,KERNELBASE@h
4271:
428 lwz r0,0(r3)
429 addi r3,r3,0x0020 /* Go to start of next cache line */
430 bdnz 1b
431 isync
432 sync
433
434 /* Now flush those cache lines */
435 li r3,0x4000 /* 512kB / 32B */
436 mtctr r3
437 lis r3,KERNELBASE@h
4381:
439 dcbf 0,r3
440 addi r3,r3,0x0020 /* Go to start of next cache line */
441 bdnz 1b
442 sync
443
444 /* We can now disable the L1 cache (HID0:DCE, HID0:ICE) */
445 mfspr r3,SPRN_HID0
446 rlwinm r3,r3,0,18,15
447 mtspr SPRN_HID0,r3
448 sync
449 isync
450 blr
451
452/* inval_enable_L1 - Invalidate and enable L1 cache
453 *
454 * Assumes L1 is already disabled and MSR:EE is off
455 *
456 * clobbers r3
457 */
458_GLOBAL(__inval_enable_L1)
459 /* Enable and then Flash inval the instruction & data cache */
460 mfspr r3,SPRN_HID0
461 ori r3,r3, HID0_ICE|HID0_ICFI|HID0_DCE|HID0_DCI
462 sync
463 isync
464 mtspr SPRN_HID0,r3
465 xori r3,r3, HID0_ICFI|HID0_DCI
466 mtspr SPRN_HID0,r3
467 sync
468
469 blr
470
471