aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/misc_32.S
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-12-28 19:54:33 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2008-12-28 19:54:33 -0500
commit3c92ec8ae91ecf59d88c798301833d7cf83f2179 (patch)
tree08a38cd3523c42bd49882f17cd501fd879e7ca1c /arch/powerpc/kernel/misc_32.S
parentc4c9f0183b7c4e97836e8fecbb67898b06c47e78 (diff)
parentca9153a3a2a7556d091dfe080e42b0e67881fff6 (diff)
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc
* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc: (144 commits) powerpc/44x: Support 16K/64K base page sizes on 44x powerpc: Force memory size to be a multiple of PAGE_SIZE powerpc/32: Wire up the trampoline code for kdump powerpc/32: Add the ability for a classic ppc kernel to be loaded at 32M powerpc/32: Allow __ioremap on RAM addresses for kdump kernel powerpc/32: Setup OF properties for kdump powerpc/32/kdump: Implement crash_setup_regs() using ppc_save_regs() powerpc: Prepare xmon_save_regs for use with kdump powerpc: Remove default kexec/crash_kernel ops assignments powerpc: Make default kexec/crash_kernel ops implicit powerpc: Setup OF properties for ppc32 kexec powerpc/pseries: Fix cpu hotplug powerpc: Fix KVM build on ppc440 powerpc/cell: add QPACE as a separate Cell platform powerpc/cell: fix build breakage with CONFIG_SPUFS disabled powerpc/mpc5200: fix error paths in PSC UART probe function powerpc/mpc5200: add rts/cts handling in PSC UART driver powerpc/mpc5200: Make PSC UART driver update serial errors counters powerpc/mpc5200: Remove obsolete code from mpc5200 MDIO driver powerpc/mpc5200: Add MDMA/UDMA support to MPC5200 ATA driver ... Fix trivial conflict in drivers/char/Makefile as per Paul's directions
Diffstat (limited to 'arch/powerpc/kernel/misc_32.S')
-rw-r--r--arch/powerpc/kernel/misc_32.S238
1 files changed, 7 insertions, 231 deletions
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 5c33bc14bd9f..15f28e0de78d 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -29,6 +29,7 @@
29#include <asm/asm-offsets.h> 29#include <asm/asm-offsets.h>
30#include <asm/processor.h> 30#include <asm/processor.h>
31#include <asm/kexec.h> 31#include <asm/kexec.h>
32#include <asm/bug.h>
32 33
33 .text 34 .text
34 35
@@ -271,231 +272,6 @@ _GLOBAL(real_writeb)
271 272
272#endif /* CONFIG_40x */ 273#endif /* CONFIG_40x */
273 274
274/*
275 * Flush MMU TLB
276 */
277#ifndef CONFIG_FSL_BOOKE
278_GLOBAL(_tlbil_all)
279_GLOBAL(_tlbil_pid)
280#endif
281_GLOBAL(_tlbia)
282#if defined(CONFIG_40x)
283 sync /* Flush to memory before changing mapping */
284 tlbia
285 isync /* Flush shadow TLB */
286#elif defined(CONFIG_44x)
287 li r3,0
288 sync
289
290 /* Load high watermark */
291 lis r4,tlb_44x_hwater@ha
292 lwz r5,tlb_44x_hwater@l(r4)
293
2941: tlbwe r3,r3,PPC44x_TLB_PAGEID
295 addi r3,r3,1
296 cmpw 0,r3,r5
297 ble 1b
298
299 isync
300#elif defined(CONFIG_FSL_BOOKE)
301 /* Invalidate all entries in TLB0 */
302 li r3, 0x04
303 tlbivax 0,3
304 /* Invalidate all entries in TLB1 */
305 li r3, 0x0c
306 tlbivax 0,3
307 msync
308#ifdef CONFIG_SMP
309 tlbsync
310#endif /* CONFIG_SMP */
311#else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */
312#if defined(CONFIG_SMP)
313 rlwinm r8,r1,0,0,(31-THREAD_SHIFT)
314 lwz r8,TI_CPU(r8)
315 oris r8,r8,10
316 mfmsr r10
317 SYNC
318 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
319 rlwinm r0,r0,0,28,26 /* clear DR */
320 mtmsr r0
321 SYNC_601
322 isync
323 lis r9,mmu_hash_lock@h
324 ori r9,r9,mmu_hash_lock@l
325 tophys(r9,r9)
32610: lwarx r7,0,r9
327 cmpwi 0,r7,0
328 bne- 10b
329 stwcx. r8,0,r9
330 bne- 10b
331 sync
332 tlbia
333 sync
334 TLBSYNC
335 li r0,0
336 stw r0,0(r9) /* clear mmu_hash_lock */
337 mtmsr r10
338 SYNC_601
339 isync
340#else /* CONFIG_SMP */
341 sync
342 tlbia
343 sync
344#endif /* CONFIG_SMP */
345#endif /* ! defined(CONFIG_40x) */
346 blr
347
348/*
349 * Flush MMU TLB for a particular address
350 */
351#ifndef CONFIG_FSL_BOOKE
352_GLOBAL(_tlbil_va)
353#endif
354_GLOBAL(_tlbie)
355#if defined(CONFIG_40x)
356 /* We run the search with interrupts disabled because we have to change
357 * the PID and I don't want to preempt when that happens.
358 */
359 mfmsr r5
360 mfspr r6,SPRN_PID
361 wrteei 0
362 mtspr SPRN_PID,r4
363 tlbsx. r3, 0, r3
364 mtspr SPRN_PID,r6
365 wrtee r5
366 bne 10f
367 sync
368 /* There are only 64 TLB entries, so r3 < 64, which means bit 25 is clear.
369 * Since 25 is the V bit in the TLB_TAG, loading this value will invalidate
370 * the TLB entry. */
371 tlbwe r3, r3, TLB_TAG
372 isync
37310:
374
375#elif defined(CONFIG_44x)
376 mfspr r5,SPRN_MMUCR
377 rlwimi r5,r4,0,24,31 /* Set TID */
378
379 /* We have to run the search with interrupts disabled, even critical
380 * and debug interrupts (in fact the only critical exceptions we have
381 * are debug and machine check). Otherwise an interrupt which causes
382 * a TLB miss can clobber the MMUCR between the mtspr and the tlbsx. */
383 mfmsr r4
384 lis r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@ha
385 addi r6,r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l
386 andc r6,r4,r6
387 mtmsr r6
388 mtspr SPRN_MMUCR,r5
389 tlbsx. r3, 0, r3
390 mtmsr r4
391 bne 10f
392 sync
393 /* There are only 64 TLB entries, so r3 < 64,
394 * which means bit 22, is clear. Since 22 is
395 * the V bit in the TLB_PAGEID, loading this
396 * value will invalidate the TLB entry.
397 */
398 tlbwe r3, r3, PPC44x_TLB_PAGEID
399 isync
40010:
401#elif defined(CONFIG_FSL_BOOKE)
402 rlwinm r4, r3, 0, 0, 19
403 ori r5, r4, 0x08 /* TLBSEL = 1 */
404 tlbivax 0, r4
405 tlbivax 0, r5
406 msync
407#if defined(CONFIG_SMP)
408 tlbsync
409#endif /* CONFIG_SMP */
410#else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */
411#if defined(CONFIG_SMP)
412 rlwinm r8,r1,0,0,(31-THREAD_SHIFT)
413 lwz r8,TI_CPU(r8)
414 oris r8,r8,11
415 mfmsr r10
416 SYNC
417 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
418 rlwinm r0,r0,0,28,26 /* clear DR */
419 mtmsr r0
420 SYNC_601
421 isync
422 lis r9,mmu_hash_lock@h
423 ori r9,r9,mmu_hash_lock@l
424 tophys(r9,r9)
42510: lwarx r7,0,r9
426 cmpwi 0,r7,0
427 bne- 10b
428 stwcx. r8,0,r9
429 bne- 10b
430 eieio
431 tlbie r3
432 sync
433 TLBSYNC
434 li r0,0
435 stw r0,0(r9) /* clear mmu_hash_lock */
436 mtmsr r10
437 SYNC_601
438 isync
439#else /* CONFIG_SMP */
440 tlbie r3
441 sync
442#endif /* CONFIG_SMP */
443#endif /* ! CONFIG_40x */
444 blr
445
446#if defined(CONFIG_FSL_BOOKE)
447/*
448 * Flush MMU TLB, but only on the local processor (no broadcast)
449 */
450_GLOBAL(_tlbil_all)
451#define MMUCSR0_TLBFI (MMUCSR0_TLB0FI | MMUCSR0_TLB1FI | \
452 MMUCSR0_TLB2FI | MMUCSR0_TLB3FI)
453 li r3,(MMUCSR0_TLBFI)@l
454 mtspr SPRN_MMUCSR0, r3
4551:
456 mfspr r3,SPRN_MMUCSR0
457 andi. r3,r3,MMUCSR0_TLBFI@l
458 bne 1b
459 blr
460
461/*
462 * Flush MMU TLB for a particular process id, but only on the local processor
463 * (no broadcast)
464 */
465_GLOBAL(_tlbil_pid)
466/* we currently do an invalidate all since we don't have per pid invalidate */
467 li r3,(MMUCSR0_TLBFI)@l
468 mtspr SPRN_MMUCSR0, r3
4691:
470 mfspr r3,SPRN_MMUCSR0
471 andi. r3,r3,MMUCSR0_TLBFI@l
472 bne 1b
473 msync
474 isync
475 blr
476
477/*
478 * Flush MMU TLB for a particular address, but only on the local processor
479 * (no broadcast)
480 */
481_GLOBAL(_tlbil_va)
482 mfmsr r10
483 wrteei 0
484 slwi r4,r4,16
485 mtspr SPRN_MAS6,r4 /* assume AS=0 for now */
486 tlbsx 0,r3
487 mfspr r4,SPRN_MAS1 /* check valid */
488 andis. r3,r4,MAS1_VALID@h
489 beq 1f
490 rlwinm r4,r4,0,1,31
491 mtspr SPRN_MAS1,r4
492 tlbwe
493 msync
494 isync
4951: wrtee r10
496 blr
497#endif /* CONFIG_FSL_BOOKE */
498
499 275
500/* 276/*
501 * Flush instruction cache. 277 * Flush instruction cache.
@@ -650,8 +426,8 @@ _GLOBAL(__flush_dcache_icache)
650BEGIN_FTR_SECTION 426BEGIN_FTR_SECTION
651 blr 427 blr
652END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) 428END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
653 rlwinm r3,r3,0,0,19 /* Get page base address */ 429 rlwinm r3,r3,0,0,31-PAGE_SHIFT /* Get page base address */
654 li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */ 430 li r4,PAGE_SIZE/L1_CACHE_BYTES /* Number of lines in a page */
655 mtctr r4 431 mtctr r4
656 mr r6,r3 432 mr r6,r3
6570: dcbst 0,r3 /* Write line to ram */ 4330: dcbst 0,r3 /* Write line to ram */
@@ -691,8 +467,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
691 rlwinm r0,r10,0,28,26 /* clear DR */ 467 rlwinm r0,r10,0,28,26 /* clear DR */
692 mtmsr r0 468 mtmsr r0
693 isync 469 isync
694 rlwinm r3,r3,0,0,19 /* Get page base address */ 470 rlwinm r3,r3,0,0,31-PAGE_SHIFT /* Get page base address */
695 li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */ 471 li r4,PAGE_SIZE/L1_CACHE_BYTES /* Number of lines in a page */
696 mtctr r4 472 mtctr r4
697 mr r6,r3 473 mr r6,r3
6980: dcbst 0,r3 /* Write line to ram */ 4740: dcbst 0,r3 /* Write line to ram */
@@ -716,7 +492,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
716 * void clear_pages(void *page, int order) ; 492 * void clear_pages(void *page, int order) ;
717 */ 493 */
718_GLOBAL(clear_pages) 494_GLOBAL(clear_pages)
719 li r0,4096/L1_CACHE_BYTES 495 li r0,PAGE_SIZE/L1_CACHE_BYTES
720 slw r0,r0,r4 496 slw r0,r0,r4
721 mtctr r0 497 mtctr r0
722#ifdef CONFIG_8xx 498#ifdef CONFIG_8xx
@@ -774,7 +550,7 @@ _GLOBAL(copy_page)
774 dcbt r5,r4 550 dcbt r5,r4
775 li r11,L1_CACHE_BYTES+4 551 li r11,L1_CACHE_BYTES+4
776#endif /* MAX_COPY_PREFETCH */ 552#endif /* MAX_COPY_PREFETCH */
777 li r0,4096/L1_CACHE_BYTES - MAX_COPY_PREFETCH 553 li r0,PAGE_SIZE/L1_CACHE_BYTES - MAX_COPY_PREFETCH
778 crclr 4*cr0+eq 554 crclr 4*cr0+eq
7792: 5552:
780 mtctr r0 556 mtctr r0