diff options
author | Paul Mackerras <paulus@samba.org> | 2005-10-10 08:36:14 -0400 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2005-10-10 08:36:14 -0400 |
commit | 9994a33865f4d55c44c9731c01e1f891543278de (patch) | |
tree | 77d8fe580493dbf9ce1820a703c482fba291b6b9 /arch/powerpc/kernel/misc_64.S | |
parent | 06d67d54741a5bfefa31945ef195dfa748c29025 (diff) |
powerpc: Introduce entry_{32,64}.S, misc_{32,64}.S, systbl.S
The system call table has been consolidated into systbl.S. We have
separate 32-bit and 64-bit versions of entry.S and misc.S since the
code is mostly sufficiently different to be not worth merging.
There are some common bits that will be extracted in future.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/kernel/misc_64.S')
-rw-r--r-- | arch/powerpc/kernel/misc_64.S | 898 |
1 files changed, 898 insertions, 0 deletions
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S new file mode 100644 index 000000000000..8fe295693c09 --- /dev/null +++ b/arch/powerpc/kernel/misc_64.S | |||
@@ -0,0 +1,898 @@ | |||
1 | /* | ||
2 | * arch/powerpc/kernel/misc64.S | ||
3 | * | ||
4 | * This file contains miscellaneous low-level functions. | ||
5 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | ||
6 | * | ||
7 | * Largely rewritten by Cort Dougan (cort@cs.nmt.edu) | ||
8 | * and Paul Mackerras. | ||
9 | * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com) | ||
10 | * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com) | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or | ||
13 | * modify it under the terms of the GNU General Public License | ||
14 | * as published by the Free Software Foundation; either version | ||
15 | * 2 of the License, or (at your option) any later version. | ||
16 | * | ||
17 | */ | ||
18 | |||
19 | #include <linux/config.h> | ||
20 | #include <linux/sys.h> | ||
21 | #include <asm/unistd.h> | ||
22 | #include <asm/errno.h> | ||
23 | #include <asm/processor.h> | ||
24 | #include <asm/page.h> | ||
25 | #include <asm/cache.h> | ||
26 | #include <asm/ppc_asm.h> | ||
27 | #include <asm/asm-offsets.h> | ||
28 | #include <asm/cputable.h> | ||
29 | |||
30 | .text | ||
31 | |||
32 | /* | ||
33 | * Returns (address we are running at) - (address we were linked at) | ||
34 | * for use before the text and data are mapped to KERNELBASE. | ||
35 | */ | ||
36 | |||
37 | _GLOBAL(reloc_offset) | ||
38 | mflr r0 | ||
39 | bl 1f | ||
40 | 1: mflr r3 | ||
41 | LOADADDR(r4,1b) | ||
42 | subf r3,r4,r3 | ||
43 | mtlr r0 | ||
44 | blr | ||
45 | |||
46 | /* | ||
47 | * add_reloc_offset(x) returns x + reloc_offset(). | ||
48 | */ | ||
49 | _GLOBAL(add_reloc_offset) | ||
50 | mflr r0 | ||
51 | bl 1f | ||
52 | 1: mflr r5 | ||
53 | LOADADDR(r4,1b) | ||
54 | subf r5,r4,r5 | ||
55 | add r3,r3,r5 | ||
56 | mtlr r0 | ||
57 | blr | ||
58 | |||
59 | _GLOBAL(get_msr) | ||
60 | mfmsr r3 | ||
61 | blr | ||
62 | |||
63 | _GLOBAL(get_dar) | ||
64 | mfdar r3 | ||
65 | blr | ||
66 | |||
67 | _GLOBAL(get_srr0) | ||
68 | mfsrr0 r3 | ||
69 | blr | ||
70 | |||
71 | _GLOBAL(get_srr1) | ||
72 | mfsrr1 r3 | ||
73 | blr | ||
74 | |||
75 | _GLOBAL(get_sp) | ||
76 | mr r3,r1 | ||
77 | blr | ||
78 | |||
79 | #ifdef CONFIG_IRQSTACKS | ||
80 | _GLOBAL(call_do_softirq) | ||
81 | mflr r0 | ||
82 | std r0,16(r1) | ||
83 | stdu r1,THREAD_SIZE-112(r3) | ||
84 | mr r1,r3 | ||
85 | bl .__do_softirq | ||
86 | ld r1,0(r1) | ||
87 | ld r0,16(r1) | ||
88 | mtlr r0 | ||
89 | blr | ||
90 | |||
91 | _GLOBAL(call_handle_IRQ_event) | ||
92 | mflr r0 | ||
93 | std r0,16(r1) | ||
94 | stdu r1,THREAD_SIZE-112(r6) | ||
95 | mr r1,r6 | ||
96 | bl .handle_IRQ_event | ||
97 | ld r1,0(r1) | ||
98 | ld r0,16(r1) | ||
99 | mtlr r0 | ||
100 | blr | ||
101 | #endif /* CONFIG_IRQSTACKS */ | ||
102 | |||
103 | /* | ||
104 | * To be called by C code which needs to do some operations with MMU | ||
105 | * disabled. Note that interrupts have to be disabled by the caller | ||
106 | * prior to calling us. The code called _MUST_ be in the RMO of course | ||
107 | * and part of the linear mapping as we don't attempt to translate the | ||
108 | * stack pointer at all. The function is called with the stack switched | ||
109 | * to this CPU emergency stack | ||
110 | * | ||
111 | * prototype is void *call_with_mmu_off(void *func, void *data); | ||
112 | * | ||
113 | * the called function is expected to be of the form | ||
114 | * | ||
115 | * void *called(void *data); | ||
116 | */ | ||
117 | _GLOBAL(call_with_mmu_off) | ||
118 | mflr r0 /* get link, save it on stackframe */ | ||
119 | std r0,16(r1) | ||
120 | mr r1,r5 /* save old stack ptr */ | ||
121 | ld r1,PACAEMERGSP(r13) /* get emerg. stack */ | ||
122 | subi r1,r1,STACK_FRAME_OVERHEAD | ||
123 | std r0,16(r1) /* save link on emerg. stack */ | ||
124 | std r5,0(r1) /* save old stack ptr in backchain */ | ||
125 | ld r3,0(r3) /* get to real function ptr (assume same TOC) */ | ||
126 | bl 2f /* we need LR to return, continue at label 2 */ | ||
127 | |||
128 | ld r0,16(r1) /* we return here from the call, get LR and */ | ||
129 | ld r1,0(r1) /* .. old stack ptr */ | ||
130 | mtspr SPRN_SRR0,r0 /* and get back to virtual mode with these */ | ||
131 | mfmsr r4 | ||
132 | ori r4,r4,MSR_IR|MSR_DR | ||
133 | mtspr SPRN_SRR1,r4 | ||
134 | rfid | ||
135 | |||
136 | 2: mtspr SPRN_SRR0,r3 /* coming from above, enter real mode */ | ||
137 | mr r3,r4 /* get parameter */ | ||
138 | mfmsr r0 | ||
139 | ori r0,r0,MSR_IR|MSR_DR | ||
140 | xori r0,r0,MSR_IR|MSR_DR | ||
141 | mtspr SPRN_SRR1,r0 | ||
142 | rfid | ||
143 | |||
144 | |||
145 | .section ".toc","aw" | ||
146 | PPC64_CACHES: | ||
147 | .tc ppc64_caches[TC],ppc64_caches | ||
148 | .section ".text" | ||
149 | |||
150 | /* | ||
151 | * Write any modified data cache blocks out to memory | ||
152 | * and invalidate the corresponding instruction cache blocks. | ||
153 | * | ||
154 | * flush_icache_range(unsigned long start, unsigned long stop) | ||
155 | * | ||
156 | * flush all bytes from start through stop-1 inclusive | ||
157 | */ | ||
158 | |||
159 | _KPROBE(__flush_icache_range) | ||
160 | |||
161 | /* | ||
162 | * Flush the data cache to memory | ||
163 | * | ||
164 | * Different systems have different cache line sizes | ||
165 | * and in some cases i-cache and d-cache line sizes differ from | ||
166 | * each other. | ||
167 | */ | ||
168 | ld r10,PPC64_CACHES@toc(r2) | ||
169 | lwz r7,DCACHEL1LINESIZE(r10)/* Get cache line size */ | ||
170 | addi r5,r7,-1 | ||
171 | andc r6,r3,r5 /* round low to line bdy */ | ||
172 | subf r8,r6,r4 /* compute length */ | ||
173 | add r8,r8,r5 /* ensure we get enough */ | ||
174 | lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of cache line size */ | ||
175 | srw. r8,r8,r9 /* compute line count */ | ||
176 | beqlr /* nothing to do? */ | ||
177 | mtctr r8 | ||
178 | 1: dcbst 0,r6 | ||
179 | add r6,r6,r7 | ||
180 | bdnz 1b | ||
181 | sync | ||
182 | |||
183 | /* Now invalidate the instruction cache */ | ||
184 | |||
185 | lwz r7,ICACHEL1LINESIZE(r10) /* Get Icache line size */ | ||
186 | addi r5,r7,-1 | ||
187 | andc r6,r3,r5 /* round low to line bdy */ | ||
188 | subf r8,r6,r4 /* compute length */ | ||
189 | add r8,r8,r5 | ||
190 | lwz r9,ICACHEL1LOGLINESIZE(r10) /* Get log-2 of Icache line size */ | ||
191 | srw. r8,r8,r9 /* compute line count */ | ||
192 | beqlr /* nothing to do? */ | ||
193 | mtctr r8 | ||
194 | 2: icbi 0,r6 | ||
195 | add r6,r6,r7 | ||
196 | bdnz 2b | ||
197 | isync | ||
198 | blr | ||
199 | .previous .text | ||
200 | /* | ||
201 | * Like above, but only do the D-cache. | ||
202 | * | ||
203 | * flush_dcache_range(unsigned long start, unsigned long stop) | ||
204 | * | ||
205 | * flush all bytes from start to stop-1 inclusive | ||
206 | */ | ||
207 | _GLOBAL(flush_dcache_range) | ||
208 | |||
209 | /* | ||
210 | * Flush the data cache to memory | ||
211 | * | ||
212 | * Different systems have different cache line sizes | ||
213 | */ | ||
214 | ld r10,PPC64_CACHES@toc(r2) | ||
215 | lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */ | ||
216 | addi r5,r7,-1 | ||
217 | andc r6,r3,r5 /* round low to line bdy */ | ||
218 | subf r8,r6,r4 /* compute length */ | ||
219 | add r8,r8,r5 /* ensure we get enough */ | ||
220 | lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of dcache line size */ | ||
221 | srw. r8,r8,r9 /* compute line count */ | ||
222 | beqlr /* nothing to do? */ | ||
223 | mtctr r8 | ||
224 | 0: dcbst 0,r6 | ||
225 | add r6,r6,r7 | ||
226 | bdnz 0b | ||
227 | sync | ||
228 | blr | ||
229 | |||
230 | /* | ||
231 | * Like above, but works on non-mapped physical addresses. | ||
232 | * Use only for non-LPAR setups ! It also assumes real mode | ||
233 | * is cacheable. Used for flushing out the DART before using | ||
234 | * it as uncacheable memory | ||
235 | * | ||
236 | * flush_dcache_phys_range(unsigned long start, unsigned long stop) | ||
237 | * | ||
238 | * flush all bytes from start to stop-1 inclusive | ||
239 | */ | ||
240 | _GLOBAL(flush_dcache_phys_range) | ||
241 | ld r10,PPC64_CACHES@toc(r2) | ||
242 | lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */ | ||
243 | addi r5,r7,-1 | ||
244 | andc r6,r3,r5 /* round low to line bdy */ | ||
245 | subf r8,r6,r4 /* compute length */ | ||
246 | add r8,r8,r5 /* ensure we get enough */ | ||
247 | lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of dcache line size */ | ||
248 | srw. r8,r8,r9 /* compute line count */ | ||
249 | beqlr /* nothing to do? */ | ||
250 | mfmsr r5 /* Disable MMU Data Relocation */ | ||
251 | ori r0,r5,MSR_DR | ||
252 | xori r0,r0,MSR_DR | ||
253 | sync | ||
254 | mtmsr r0 | ||
255 | sync | ||
256 | isync | ||
257 | mtctr r8 | ||
258 | 0: dcbst 0,r6 | ||
259 | add r6,r6,r7 | ||
260 | bdnz 0b | ||
261 | sync | ||
262 | isync | ||
263 | mtmsr r5 /* Re-enable MMU Data Relocation */ | ||
264 | sync | ||
265 | isync | ||
266 | blr | ||
267 | |||
268 | _GLOBAL(flush_inval_dcache_range) | ||
269 | ld r10,PPC64_CACHES@toc(r2) | ||
270 | lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */ | ||
271 | addi r5,r7,-1 | ||
272 | andc r6,r3,r5 /* round low to line bdy */ | ||
273 | subf r8,r6,r4 /* compute length */ | ||
274 | add r8,r8,r5 /* ensure we get enough */ | ||
275 | lwz r9,DCACHEL1LOGLINESIZE(r10)/* Get log-2 of dcache line size */ | ||
276 | srw. r8,r8,r9 /* compute line count */ | ||
277 | beqlr /* nothing to do? */ | ||
278 | sync | ||
279 | isync | ||
280 | mtctr r8 | ||
281 | 0: dcbf 0,r6 | ||
282 | add r6,r6,r7 | ||
283 | bdnz 0b | ||
284 | sync | ||
285 | isync | ||
286 | blr | ||
287 | |||
288 | |||
289 | /* | ||
290 | * Flush a particular page from the data cache to RAM. | ||
291 | * Note: this is necessary because the instruction cache does *not* | ||
292 | * snoop from the data cache. | ||
293 | * | ||
294 | * void __flush_dcache_icache(void *page) | ||
295 | */ | ||
296 | _GLOBAL(__flush_dcache_icache) | ||
297 | /* | ||
298 | * Flush the data cache to memory | ||
299 | * | ||
300 | * Different systems have different cache line sizes | ||
301 | */ | ||
302 | |||
303 | /* Flush the dcache */ | ||
304 | ld r7,PPC64_CACHES@toc(r2) | ||
305 | clrrdi r3,r3,PAGE_SHIFT /* Page align */ | ||
306 | lwz r4,DCACHEL1LINESPERPAGE(r7) /* Get # dcache lines per page */ | ||
307 | lwz r5,DCACHEL1LINESIZE(r7) /* Get dcache line size */ | ||
308 | mr r6,r3 | ||
309 | mtctr r4 | ||
310 | 0: dcbst 0,r6 | ||
311 | add r6,r6,r5 | ||
312 | bdnz 0b | ||
313 | sync | ||
314 | |||
315 | /* Now invalidate the icache */ | ||
316 | |||
317 | lwz r4,ICACHEL1LINESPERPAGE(r7) /* Get # icache lines per page */ | ||
318 | lwz r5,ICACHEL1LINESIZE(r7) /* Get icache line size */ | ||
319 | mtctr r4 | ||
320 | 1: icbi 0,r3 | ||
321 | add r3,r3,r5 | ||
322 | bdnz 1b | ||
323 | isync | ||
324 | blr | ||
325 | |||
326 | /* | ||
327 | * I/O string operations | ||
328 | * | ||
329 | * insb(port, buf, len) | ||
330 | * outsb(port, buf, len) | ||
331 | * insw(port, buf, len) | ||
332 | * outsw(port, buf, len) | ||
333 | * insl(port, buf, len) | ||
334 | * outsl(port, buf, len) | ||
335 | * insw_ns(port, buf, len) | ||
336 | * outsw_ns(port, buf, len) | ||
337 | * insl_ns(port, buf, len) | ||
338 | * outsl_ns(port, buf, len) | ||
339 | * | ||
340 | * The *_ns versions don't do byte-swapping. | ||
341 | */ | ||
342 | _GLOBAL(_insb) | ||
343 | cmpwi 0,r5,0 | ||
344 | mtctr r5 | ||
345 | subi r4,r4,1 | ||
346 | blelr- | ||
347 | 00: lbz r5,0(r3) | ||
348 | eieio | ||
349 | stbu r5,1(r4) | ||
350 | bdnz 00b | ||
351 | twi 0,r5,0 | ||
352 | isync | ||
353 | blr | ||
354 | |||
355 | _GLOBAL(_outsb) | ||
356 | cmpwi 0,r5,0 | ||
357 | mtctr r5 | ||
358 | subi r4,r4,1 | ||
359 | blelr- | ||
360 | 00: lbzu r5,1(r4) | ||
361 | stb r5,0(r3) | ||
362 | bdnz 00b | ||
363 | sync | ||
364 | blr | ||
365 | |||
366 | _GLOBAL(_insw) | ||
367 | cmpwi 0,r5,0 | ||
368 | mtctr r5 | ||
369 | subi r4,r4,2 | ||
370 | blelr- | ||
371 | 00: lhbrx r5,0,r3 | ||
372 | eieio | ||
373 | sthu r5,2(r4) | ||
374 | bdnz 00b | ||
375 | twi 0,r5,0 | ||
376 | isync | ||
377 | blr | ||
378 | |||
379 | _GLOBAL(_outsw) | ||
380 | cmpwi 0,r5,0 | ||
381 | mtctr r5 | ||
382 | subi r4,r4,2 | ||
383 | blelr- | ||
384 | 00: lhzu r5,2(r4) | ||
385 | sthbrx r5,0,r3 | ||
386 | bdnz 00b | ||
387 | sync | ||
388 | blr | ||
389 | |||
390 | _GLOBAL(_insl) | ||
391 | cmpwi 0,r5,0 | ||
392 | mtctr r5 | ||
393 | subi r4,r4,4 | ||
394 | blelr- | ||
395 | 00: lwbrx r5,0,r3 | ||
396 | eieio | ||
397 | stwu r5,4(r4) | ||
398 | bdnz 00b | ||
399 | twi 0,r5,0 | ||
400 | isync | ||
401 | blr | ||
402 | |||
403 | _GLOBAL(_outsl) | ||
404 | cmpwi 0,r5,0 | ||
405 | mtctr r5 | ||
406 | subi r4,r4,4 | ||
407 | blelr- | ||
408 | 00: lwzu r5,4(r4) | ||
409 | stwbrx r5,0,r3 | ||
410 | bdnz 00b | ||
411 | sync | ||
412 | blr | ||
413 | |||
414 | /* _GLOBAL(ide_insw) now in drivers/ide/ide-iops.c */ | ||
415 | _GLOBAL(_insw_ns) | ||
416 | cmpwi 0,r5,0 | ||
417 | mtctr r5 | ||
418 | subi r4,r4,2 | ||
419 | blelr- | ||
420 | 00: lhz r5,0(r3) | ||
421 | eieio | ||
422 | sthu r5,2(r4) | ||
423 | bdnz 00b | ||
424 | twi 0,r5,0 | ||
425 | isync | ||
426 | blr | ||
427 | |||
428 | /* _GLOBAL(ide_outsw) now in drivers/ide/ide-iops.c */ | ||
429 | _GLOBAL(_outsw_ns) | ||
430 | cmpwi 0,r5,0 | ||
431 | mtctr r5 | ||
432 | subi r4,r4,2 | ||
433 | blelr- | ||
434 | 00: lhzu r5,2(r4) | ||
435 | sth r5,0(r3) | ||
436 | bdnz 00b | ||
437 | sync | ||
438 | blr | ||
439 | |||
440 | _GLOBAL(_insl_ns) | ||
441 | cmpwi 0,r5,0 | ||
442 | mtctr r5 | ||
443 | subi r4,r4,4 | ||
444 | blelr- | ||
445 | 00: lwz r5,0(r3) | ||
446 | eieio | ||
447 | stwu r5,4(r4) | ||
448 | bdnz 00b | ||
449 | twi 0,r5,0 | ||
450 | isync | ||
451 | blr | ||
452 | |||
453 | _GLOBAL(_outsl_ns) | ||
454 | cmpwi 0,r5,0 | ||
455 | mtctr r5 | ||
456 | subi r4,r4,4 | ||
457 | blelr- | ||
458 | 00: lwzu r5,4(r4) | ||
459 | stw r5,0(r3) | ||
460 | bdnz 00b | ||
461 | sync | ||
462 | blr | ||
463 | |||
464 | |||
465 | _GLOBAL(cvt_fd) | ||
466 | lfd 0,0(r5) /* load up fpscr value */ | ||
467 | mtfsf 0xff,0 | ||
468 | lfs 0,0(r3) | ||
469 | stfd 0,0(r4) | ||
470 | mffs 0 /* save new fpscr value */ | ||
471 | stfd 0,0(r5) | ||
472 | blr | ||
473 | |||
474 | _GLOBAL(cvt_df) | ||
475 | lfd 0,0(r5) /* load up fpscr value */ | ||
476 | mtfsf 0xff,0 | ||
477 | lfd 0,0(r3) | ||
478 | stfs 0,0(r4) | ||
479 | mffs 0 /* save new fpscr value */ | ||
480 | stfd 0,0(r5) | ||
481 | blr | ||
482 | |||
483 | /* | ||
484 | * identify_cpu and calls setup_cpu | ||
485 | * In: r3 = base of the cpu_specs array | ||
486 | * r4 = address of cur_cpu_spec | ||
487 | * r5 = relocation offset | ||
488 | */ | ||
489 | _GLOBAL(identify_cpu) | ||
490 | mfpvr r7 | ||
491 | 1: | ||
492 | lwz r8,CPU_SPEC_PVR_MASK(r3) | ||
493 | and r8,r8,r7 | ||
494 | lwz r9,CPU_SPEC_PVR_VALUE(r3) | ||
495 | cmplw 0,r9,r8 | ||
496 | beq 1f | ||
497 | addi r3,r3,CPU_SPEC_ENTRY_SIZE | ||
498 | b 1b | ||
499 | 1: | ||
500 | sub r0,r3,r5 | ||
501 | std r0,0(r4) | ||
502 | ld r4,CPU_SPEC_SETUP(r3) | ||
503 | add r4,r4,r5 | ||
504 | ld r4,0(r4) | ||
505 | add r4,r4,r5 | ||
506 | mtctr r4 | ||
507 | /* Calling convention for cpu setup is r3=offset, r4=cur_cpu_spec */ | ||
508 | mr r4,r3 | ||
509 | mr r3,r5 | ||
510 | bctr | ||
511 | |||
512 | /* | ||
513 | * do_cpu_ftr_fixups - goes through the list of CPU feature fixups | ||
514 | * and writes nop's over sections of code that don't apply for this cpu. | ||
515 | * r3 = data offset (not changed) | ||
516 | */ | ||
517 | _GLOBAL(do_cpu_ftr_fixups) | ||
518 | /* Get CPU 0 features */ | ||
519 | LOADADDR(r6,cur_cpu_spec) | ||
520 | sub r6,r6,r3 | ||
521 | ld r4,0(r6) | ||
522 | sub r4,r4,r3 | ||
523 | ld r4,CPU_SPEC_FEATURES(r4) | ||
524 | /* Get the fixup table */ | ||
525 | LOADADDR(r6,__start___ftr_fixup) | ||
526 | sub r6,r6,r3 | ||
527 | LOADADDR(r7,__stop___ftr_fixup) | ||
528 | sub r7,r7,r3 | ||
529 | /* Do the fixup */ | ||
530 | 1: cmpld r6,r7 | ||
531 | bgelr | ||
532 | addi r6,r6,32 | ||
533 | ld r8,-32(r6) /* mask */ | ||
534 | and r8,r8,r4 | ||
535 | ld r9,-24(r6) /* value */ | ||
536 | cmpld r8,r9 | ||
537 | beq 1b | ||
538 | ld r8,-16(r6) /* section begin */ | ||
539 | ld r9,-8(r6) /* section end */ | ||
540 | subf. r9,r8,r9 | ||
541 | beq 1b | ||
542 | /* write nops over the section of code */ | ||
543 | /* todo: if large section, add a branch at the start of it */ | ||
544 | srwi r9,r9,2 | ||
545 | mtctr r9 | ||
546 | sub r8,r8,r3 | ||
547 | lis r0,0x60000000@h /* nop */ | ||
548 | 3: stw r0,0(r8) | ||
549 | andi. r10,r4,CPU_FTR_SPLIT_ID_CACHE@l | ||
550 | beq 2f | ||
551 | dcbst 0,r8 /* suboptimal, but simpler */ | ||
552 | sync | ||
553 | icbi 0,r8 | ||
554 | 2: addi r8,r8,4 | ||
555 | bdnz 3b | ||
556 | sync /* additional sync needed on g4 */ | ||
557 | isync | ||
558 | b 1b | ||
559 | |||
560 | #if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) | ||
561 | /* | ||
562 | * Do an IO access in real mode | ||
563 | */ | ||
564 | _GLOBAL(real_readb) | ||
565 | mfmsr r7 | ||
566 | ori r0,r7,MSR_DR | ||
567 | xori r0,r0,MSR_DR | ||
568 | sync | ||
569 | mtmsrd r0 | ||
570 | sync | ||
571 | isync | ||
572 | mfspr r6,SPRN_HID4 | ||
573 | rldicl r5,r6,32,0 | ||
574 | ori r5,r5,0x100 | ||
575 | rldicl r5,r5,32,0 | ||
576 | sync | ||
577 | mtspr SPRN_HID4,r5 | ||
578 | isync | ||
579 | slbia | ||
580 | isync | ||
581 | lbz r3,0(r3) | ||
582 | sync | ||
583 | mtspr SPRN_HID4,r6 | ||
584 | isync | ||
585 | slbia | ||
586 | isync | ||
587 | mtmsrd r7 | ||
588 | sync | ||
589 | isync | ||
590 | blr | ||
591 | |||
592 | /* | ||
593 | * Do an IO access in real mode | ||
594 | */ | ||
595 | _GLOBAL(real_writeb) | ||
596 | mfmsr r7 | ||
597 | ori r0,r7,MSR_DR | ||
598 | xori r0,r0,MSR_DR | ||
599 | sync | ||
600 | mtmsrd r0 | ||
601 | sync | ||
602 | isync | ||
603 | mfspr r6,SPRN_HID4 | ||
604 | rldicl r5,r6,32,0 | ||
605 | ori r5,r5,0x100 | ||
606 | rldicl r5,r5,32,0 | ||
607 | sync | ||
608 | mtspr SPRN_HID4,r5 | ||
609 | isync | ||
610 | slbia | ||
611 | isync | ||
612 | stb r3,0(r4) | ||
613 | sync | ||
614 | mtspr SPRN_HID4,r6 | ||
615 | isync | ||
616 | slbia | ||
617 | isync | ||
618 | mtmsrd r7 | ||
619 | sync | ||
620 | isync | ||
621 | blr | ||
622 | #endif /* defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) */ | ||
623 | |||
624 | /* | ||
625 | * Create a kernel thread | ||
626 | * kernel_thread(fn, arg, flags) | ||
627 | */ | ||
628 | _GLOBAL(kernel_thread) | ||
629 | std r29,-24(r1) | ||
630 | std r30,-16(r1) | ||
631 | stdu r1,-STACK_FRAME_OVERHEAD(r1) | ||
632 | mr r29,r3 | ||
633 | mr r30,r4 | ||
634 | ori r3,r5,CLONE_VM /* flags */ | ||
635 | oris r3,r3,(CLONE_UNTRACED>>16) | ||
636 | li r4,0 /* new sp (unused) */ | ||
637 | li r0,__NR_clone | ||
638 | sc | ||
639 | cmpdi 0,r3,0 /* parent or child? */ | ||
640 | bne 1f /* return if parent */ | ||
641 | li r0,0 | ||
642 | stdu r0,-STACK_FRAME_OVERHEAD(r1) | ||
643 | ld r2,8(r29) | ||
644 | ld r29,0(r29) | ||
645 | mtlr r29 /* fn addr in lr */ | ||
646 | mr r3,r30 /* load arg and call fn */ | ||
647 | blrl | ||
648 | li r0,__NR_exit /* exit after child exits */ | ||
649 | li r3,0 | ||
650 | sc | ||
651 | 1: addi r1,r1,STACK_FRAME_OVERHEAD | ||
652 | ld r29,-24(r1) | ||
653 | ld r30,-16(r1) | ||
654 | blr | ||
655 | |||
656 | /* | ||
657 | * disable_kernel_fp() | ||
658 | * Disable the FPU. | ||
659 | */ | ||
660 | _GLOBAL(disable_kernel_fp) | ||
661 | mfmsr r3 | ||
662 | rldicl r0,r3,(63-MSR_FP_LG),1 | ||
663 | rldicl r3,r0,(MSR_FP_LG+1),0 | ||
664 | mtmsrd r3 /* disable use of fpu now */ | ||
665 | isync | ||
666 | blr | ||
667 | |||
668 | #ifdef CONFIG_ALTIVEC | ||
669 | |||
670 | #if 0 /* this has no callers for now */ | ||
671 | /* | ||
672 | * disable_kernel_altivec() | ||
673 | * Disable the VMX. | ||
674 | */ | ||
675 | _GLOBAL(disable_kernel_altivec) | ||
676 | mfmsr r3 | ||
677 | rldicl r0,r3,(63-MSR_VEC_LG),1 | ||
678 | rldicl r3,r0,(MSR_VEC_LG+1),0 | ||
679 | mtmsrd r3 /* disable use of VMX now */ | ||
680 | isync | ||
681 | blr | ||
682 | #endif /* 0 */ | ||
683 | |||
684 | /* | ||
685 | * giveup_altivec(tsk) | ||
686 | * Disable VMX for the task given as the argument, | ||
687 | * and save the vector registers in its thread_struct. | ||
688 | * Enables the VMX for use in the kernel on return. | ||
689 | */ | ||
690 | _GLOBAL(giveup_altivec) | ||
691 | mfmsr r5 | ||
692 | oris r5,r5,MSR_VEC@h | ||
693 | mtmsrd r5 /* enable use of VMX now */ | ||
694 | isync | ||
695 | cmpdi 0,r3,0 | ||
696 | beqlr- /* if no previous owner, done */ | ||
697 | addi r3,r3,THREAD /* want THREAD of task */ | ||
698 | ld r5,PT_REGS(r3) | ||
699 | cmpdi 0,r5,0 | ||
700 | SAVE_32VRS(0,r4,r3) | ||
701 | mfvscr vr0 | ||
702 | li r4,THREAD_VSCR | ||
703 | stvx vr0,r4,r3 | ||
704 | beq 1f | ||
705 | ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
706 | lis r3,MSR_VEC@h | ||
707 | andc r4,r4,r3 /* disable FP for previous task */ | ||
708 | std r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
709 | 1: | ||
710 | #ifndef CONFIG_SMP | ||
711 | li r5,0 | ||
712 | ld r4,last_task_used_altivec@got(r2) | ||
713 | std r5,0(r4) | ||
714 | #endif /* CONFIG_SMP */ | ||
715 | blr | ||
716 | |||
717 | #endif /* CONFIG_ALTIVEC */ | ||
718 | |||
719 | _GLOBAL(__setup_cpu_power3) | ||
720 | blr | ||
721 | |||
722 | _GLOBAL(execve) | ||
723 | li r0,__NR_execve | ||
724 | sc | ||
725 | bnslr | ||
726 | neg r3,r3 | ||
727 | blr | ||
728 | |||
729 | /* kexec_wait(phys_cpu) | ||
730 | * | ||
731 | * wait for the flag to change, indicating this kernel is going away but | ||
732 | * the slave code for the next one is at addresses 0 to 100. | ||
733 | * | ||
734 | * This is used by all slaves. | ||
735 | * | ||
736 | * Physical (hardware) cpu id should be in r3. | ||
737 | */ | ||
738 | _GLOBAL(kexec_wait) | ||
739 | bl 1f | ||
740 | 1: mflr r5 | ||
741 | addi r5,r5,kexec_flag-1b | ||
742 | |||
743 | 99: HMT_LOW | ||
744 | #ifdef CONFIG_KEXEC /* use no memory without kexec */ | ||
745 | lwz r4,0(r5) | ||
746 | cmpwi 0,r4,0 | ||
747 | bnea 0x60 | ||
748 | #endif | ||
749 | b 99b | ||
750 | |||
751 | /* this can be in text because we won't change it until we are | ||
752 | * running in real anyways | ||
753 | */ | ||
754 | kexec_flag: | ||
755 | .long 0 | ||
756 | |||
757 | |||
758 | #ifdef CONFIG_KEXEC | ||
759 | |||
760 | /* kexec_smp_wait(void) | ||
761 | * | ||
762 | * call with interrupts off | ||
763 | * note: this is a terminal routine, it does not save lr | ||
764 | * | ||
765 | * get phys id from paca | ||
766 | * set paca id to -1 to say we got here | ||
767 | * switch to real mode | ||
768 | * join other cpus in kexec_wait(phys_id) | ||
769 | */ | ||
770 | _GLOBAL(kexec_smp_wait) | ||
771 | lhz r3,PACAHWCPUID(r13) | ||
772 | li r4,-1 | ||
773 | sth r4,PACAHWCPUID(r13) /* let others know we left */ | ||
774 | bl real_mode | ||
775 | b .kexec_wait | ||
776 | |||
777 | /* | ||
778 | * switch to real mode (turn mmu off) | ||
779 | * we use the early kernel trick that the hardware ignores bits | ||
780 | * 0 and 1 (big endian) of the effective address in real mode | ||
781 | * | ||
782 | * don't overwrite r3 here, it is live for kexec_wait above. | ||
783 | */ | ||
784 | real_mode: /* assume normal blr return */ | ||
785 | 1: li r9,MSR_RI | ||
786 | li r10,MSR_DR|MSR_IR | ||
787 | mflr r11 /* return address to SRR0 */ | ||
788 | mfmsr r12 | ||
789 | andc r9,r12,r9 | ||
790 | andc r10,r12,r10 | ||
791 | |||
792 | mtmsrd r9,1 | ||
793 | mtspr SPRN_SRR1,r10 | ||
794 | mtspr SPRN_SRR0,r11 | ||
795 | rfid | ||
796 | |||
797 | |||
798 | /* | ||
799 | * kexec_sequence(newstack, start, image, control, clear_all()) | ||
800 | * | ||
801 | * does the grungy work with stack switching and real mode switches | ||
802 | * also does simple calls to other code | ||
803 | */ | ||
804 | |||
805 | _GLOBAL(kexec_sequence) | ||
806 | mflr r0 | ||
807 | std r0,16(r1) | ||
808 | |||
809 | /* switch stacks to newstack -- &kexec_stack.stack */ | ||
810 | stdu r1,THREAD_SIZE-112(r3) | ||
811 | mr r1,r3 | ||
812 | |||
813 | li r0,0 | ||
814 | std r0,16(r1) | ||
815 | |||
816 | /* save regs for local vars on new stack. | ||
817 | * yes, we won't go back, but ... | ||
818 | */ | ||
819 | std r31,-8(r1) | ||
820 | std r30,-16(r1) | ||
821 | std r29,-24(r1) | ||
822 | std r28,-32(r1) | ||
823 | std r27,-40(r1) | ||
824 | std r26,-48(r1) | ||
825 | std r25,-56(r1) | ||
826 | |||
827 | stdu r1,-112-64(r1) | ||
828 | |||
829 | /* save args into preserved regs */ | ||
830 | mr r31,r3 /* newstack (both) */ | ||
831 | mr r30,r4 /* start (real) */ | ||
832 | mr r29,r5 /* image (virt) */ | ||
833 | mr r28,r6 /* control, unused */ | ||
834 | mr r27,r7 /* clear_all() fn desc */ | ||
835 | mr r26,r8 /* spare */ | ||
836 | lhz r25,PACAHWCPUID(r13) /* get our phys cpu from paca */ | ||
837 | |||
838 | /* disable interrupts, we are overwriting kernel data next */ | ||
839 | mfmsr r3 | ||
840 | rlwinm r3,r3,0,17,15 | ||
841 | mtmsrd r3,1 | ||
842 | |||
843 | /* copy dest pages, flush whole dest image */ | ||
844 | mr r3,r29 | ||
845 | bl .kexec_copy_flush /* (image) */ | ||
846 | |||
847 | /* turn off mmu */ | ||
848 | bl real_mode | ||
849 | |||
850 | /* clear out hardware hash page table and tlb */ | ||
851 | ld r5,0(r27) /* deref function descriptor */ | ||
852 | mtctr r5 | ||
853 | bctrl /* ppc_md.hash_clear_all(void); */ | ||
854 | |||
855 | /* | ||
856 | * kexec image calling is: | ||
857 | * the first 0x100 bytes of the entry point are copied to 0 | ||
858 | * | ||
859 | * all slaves branch to slave = 0x60 (absolute) | ||
860 | * slave(phys_cpu_id); | ||
861 | * | ||
862 | * master goes to start = entry point | ||
863 | * start(phys_cpu_id, start, 0); | ||
864 | * | ||
865 | * | ||
866 | * a wrapper is needed to call existing kernels, here is an approximate | ||
867 | * description of one method: | ||
868 | * | ||
869 | * v2: (2.6.10) | ||
870 | * start will be near the boot_block (maybe 0x100 bytes before it?) | ||
871 | * it will have a 0x60, which will b to boot_block, where it will wait | ||
872 | * and 0 will store phys into struct boot-block and load r3 from there, | ||
873 | * copy kernel 0-0x100 and tell slaves to back down to 0x60 again | ||
874 | * | ||
875 | * v1: (2.6.9) | ||
876 | * boot block will have all cpus scanning device tree to see if they | ||
877 | * are the boot cpu ????? | ||
878 | * other device tree differences (prop sizes, va vs pa, etc)... | ||
879 | */ | ||
880 | |||
881 | /* copy 0x100 bytes starting at start to 0 */ | ||
882 | li r3,0 | ||
883 | mr r4,r30 | ||
884 | li r5,0x100 | ||
885 | li r6,0 | ||
886 | bl .copy_and_flush /* (dest, src, copy limit, start offset) */ | ||
887 | 1: /* assume normal blr return */ | ||
888 | |||
889 | /* release other cpus to the new kernel secondary start at 0x60 */ | ||
890 | mflr r5 | ||
891 | li r6,1 | ||
892 | stw r6,kexec_flag-1b(5) | ||
893 | mr r3,r25 # my phys cpu | ||
894 | mr r4,r30 # start, aka phys mem offset | ||
895 | mtlr 4 | ||
896 | li r5,0 | ||
897 | blr /* image->start(physid, image->start, 0); */ | ||
898 | #endif /* CONFIG_KEXEC */ | ||