aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2008-12-18 14:13:42 -0500
committerPaul Mackerras <paulus@samba.org>2008-12-20 22:21:16 -0500
commit2a4aca1144394653269720ffbb5a325a77abd5fa (patch)
tree553bbcbb294ac5923f72430b7317b5c80a27141c /arch/powerpc
parentf048aace29e007f2b642097e2da8231e0e9cce2d (diff)
powerpc/mm: Split low level tlb invalidate for nohash processors
Currently, the various forms of low level TLB invalidations are all implemented in misc_32.S for 32-bit processors, in a fairly scary mess of #ifdef's and with interesting duplication such as a whole bunch of code for FSL _tlbie and _tlbia which are no longer used. This moves things around such that _tlbie is now defined in hash_low_32.S and is only used by the 32-bit hash code, and all nohash CPUs use the various _tlbil_* forms that are now moved to a new file, tlb_nohash_low.S. I moved all the definitions for that stuff out of include/asm/tlbflush.h as they are really internal mm stuff, into mm/mmu_decl.h The code should have no functional changes. I kept some variants inline for trivial forms on things like 40x and 8xx. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Acked-by: Kumar Gala <galak@kernel.crashing.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/tlbflush.h14
-rw-r--r--arch/powerpc/kernel/misc_32.S233
-rw-r--r--arch/powerpc/kvm/powerpc.c2
-rw-r--r--arch/powerpc/mm/Makefile3
-rw-r--r--arch/powerpc/mm/hash_low_32.S76
-rw-r--r--arch/powerpc/mm/mmu_decl.h48
-rw-r--r--arch/powerpc/mm/tlb_nohash_low.S165
7 files changed, 292 insertions, 249 deletions
diff --git a/arch/powerpc/include/asm/tlbflush.h b/arch/powerpc/include/asm/tlbflush.h
index 8c39b27c1ed7..abbe3419d1dd 100644
--- a/arch/powerpc/include/asm/tlbflush.h
+++ b/arch/powerpc/include/asm/tlbflush.h
@@ -33,17 +33,6 @@
33 33
34#define MMU_NO_CONTEXT ((unsigned int)-1) 34#define MMU_NO_CONTEXT ((unsigned int)-1)
35 35
36extern void _tlbil_all(void);
37extern void _tlbil_pid(unsigned int pid);
38extern void _tlbil_va(unsigned long address, unsigned int pid);
39extern void _tlbivax_bcast(unsigned long address, unsigned int pid);
40
41#if defined(CONFIG_40x) || defined(CONFIG_8xx)
42#define _tlbia() asm volatile ("tlbia; sync" : : : "memory")
43#else /* CONFIG_44x || CONFIG_FSL_BOOKE */
44extern void _tlbia(void);
45#endif
46
47extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 36extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
48 unsigned long end); 37 unsigned long end);
49extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); 38extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
@@ -65,9 +54,6 @@ extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
65/* 54/*
66 * TLB flushing for "classic" hash-MMU 32-bit CPUs, 6xx, 7xx, 7xxx 55 * TLB flushing for "classic" hash-MMU 32-bit CPUs, 6xx, 7xx, 7xxx
67 */ 56 */
68extern void _tlbie(unsigned long address);
69extern void _tlbia(void);
70
71extern void flush_tlb_mm(struct mm_struct *mm); 57extern void flush_tlb_mm(struct mm_struct *mm);
72extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); 58extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
73extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr); 59extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr);
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 2c2ab89f0b64..ae0d084b6a24 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -272,239 +272,6 @@ _GLOBAL(real_writeb)
272 272
273#endif /* CONFIG_40x */ 273#endif /* CONFIG_40x */
274 274
275/*
276 * Flush MMU TLB
277 */
278#ifndef CONFIG_FSL_BOOKE
279_GLOBAL(_tlbil_all)
280_GLOBAL(_tlbil_pid)
281#endif
282_GLOBAL(_tlbia)
283#if defined(CONFIG_40x)
284 sync /* Flush to memory before changing mapping */
285 tlbia
286 isync /* Flush shadow TLB */
287#elif defined(CONFIG_44x)
288 li r3,0
289 sync
290
291 /* Load high watermark */
292 lis r4,tlb_44x_hwater@ha
293 lwz r5,tlb_44x_hwater@l(r4)
294
2951: tlbwe r3,r3,PPC44x_TLB_PAGEID
296 addi r3,r3,1
297 cmpw 0,r3,r5
298 ble 1b
299
300 isync
301#elif defined(CONFIG_FSL_BOOKE)
302 /* Invalidate all entries in TLB0 */
303 li r3, 0x04
304 tlbivax 0,3
305 /* Invalidate all entries in TLB1 */
306 li r3, 0x0c
307 tlbivax 0,3
308 msync
309#ifdef CONFIG_SMP
310 tlbsync
311#endif /* CONFIG_SMP */
312#else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */
313#if defined(CONFIG_SMP)
314 rlwinm r8,r1,0,0,(31-THREAD_SHIFT)
315 lwz r8,TI_CPU(r8)
316 oris r8,r8,10
317 mfmsr r10
318 SYNC
319 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
320 rlwinm r0,r0,0,28,26 /* clear DR */
321 mtmsr r0
322 SYNC_601
323 isync
324 lis r9,mmu_hash_lock@h
325 ori r9,r9,mmu_hash_lock@l
326 tophys(r9,r9)
32710: lwarx r7,0,r9
328 cmpwi 0,r7,0
329 bne- 10b
330 stwcx. r8,0,r9
331 bne- 10b
332 sync
333 tlbia
334 sync
335 TLBSYNC
336 li r0,0
337 stw r0,0(r9) /* clear mmu_hash_lock */
338 mtmsr r10
339 SYNC_601
340 isync
341#else /* CONFIG_SMP */
342 sync
343 tlbia
344 sync
345#endif /* CONFIG_SMP */
346#endif /* ! defined(CONFIG_40x) */
347 blr
348
349/*
350 * Flush MMU TLB for a particular address
351 */
352#ifndef CONFIG_FSL_BOOKE
353_GLOBAL(_tlbil_va)
354#endif
355_GLOBAL(_tlbie)
356#if defined(CONFIG_40x)
357 /* We run the search with interrupts disabled because we have to change
358 * the PID and I don't want to preempt when that happens.
359 */
360 mfmsr r5
361 mfspr r6,SPRN_PID
362 wrteei 0
363 mtspr SPRN_PID,r4
364 tlbsx. r3, 0, r3
365 mtspr SPRN_PID,r6
366 wrtee r5
367 bne 10f
368 sync
369 /* There are only 64 TLB entries, so r3 < 64, which means bit 25 is clear.
370 * Since 25 is the V bit in the TLB_TAG, loading this value will invalidate
371 * the TLB entry. */
372 tlbwe r3, r3, TLB_TAG
373 isync
37410:
375
376#elif defined(CONFIG_44x)
377 mfspr r5,SPRN_MMUCR
378 rlwimi r5,r4,0,24,31 /* Set TID */
379
380 /* We have to run the search with interrupts disabled, even critical
381 * and debug interrupts (in fact the only critical exceptions we have
382 * are debug and machine check). Otherwise an interrupt which causes
383 * a TLB miss can clobber the MMUCR between the mtspr and the tlbsx. */
384 mfmsr r4
385 lis r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@ha
386 addi r6,r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l
387 andc r6,r4,r6
388 mtmsr r6
389 mtspr SPRN_MMUCR,r5
390 tlbsx. r3, 0, r3
391 mtmsr r4
392 bne 10f
393 sync
394 /* There are only 64 TLB entries, so r3 < 64,
395 * which means bit 22, is clear. Since 22 is
396 * the V bit in the TLB_PAGEID, loading this
397 * value will invalidate the TLB entry.
398 */
399 tlbwe r3, r3, PPC44x_TLB_PAGEID
400 isync
40110:
402#elif defined(CONFIG_FSL_BOOKE)
403 rlwinm r4, r3, 0, 0, 19
404 ori r5, r4, 0x08 /* TLBSEL = 1 */
405 tlbivax 0, r4
406 tlbivax 0, r5
407 msync
408#if defined(CONFIG_SMP)
409 tlbsync
410#endif /* CONFIG_SMP */
411#else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */
412#if defined(CONFIG_SMP)
413 rlwinm r8,r1,0,0,(31-THREAD_SHIFT)
414 lwz r8,TI_CPU(r8)
415 oris r8,r8,11
416 mfmsr r10
417 SYNC
418 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
419 rlwinm r0,r0,0,28,26 /* clear DR */
420 mtmsr r0
421 SYNC_601
422 isync
423 lis r9,mmu_hash_lock@h
424 ori r9,r9,mmu_hash_lock@l
425 tophys(r9,r9)
42610: lwarx r7,0,r9
427 cmpwi 0,r7,0
428 bne- 10b
429 stwcx. r8,0,r9
430 bne- 10b
431 eieio
432 tlbie r3
433 sync
434 TLBSYNC
435 li r0,0
436 stw r0,0(r9) /* clear mmu_hash_lock */
437 mtmsr r10
438 SYNC_601
439 isync
440#else /* CONFIG_SMP */
441 tlbie r3
442 sync
443#endif /* CONFIG_SMP */
444#endif /* ! CONFIG_40x */
445 blr
446
447#if defined(CONFIG_FSL_BOOKE)
448/*
449 * Flush MMU TLB, but only on the local processor (no broadcast)
450 */
451_GLOBAL(_tlbil_all)
452#define MMUCSR0_TLBFI (MMUCSR0_TLB0FI | MMUCSR0_TLB1FI | \
453 MMUCSR0_TLB2FI | MMUCSR0_TLB3FI)
454 li r3,(MMUCSR0_TLBFI)@l
455 mtspr SPRN_MMUCSR0, r3
4561:
457 mfspr r3,SPRN_MMUCSR0
458 andi. r3,r3,MMUCSR0_TLBFI@l
459 bne 1b
460 blr
461
462/*
463 * Flush MMU TLB for a particular process id, but only on the local processor
464 * (no broadcast)
465 */
466_GLOBAL(_tlbil_pid)
467/* we currently do an invalidate all since we don't have per pid invalidate */
468 li r3,(MMUCSR0_TLBFI)@l
469 mtspr SPRN_MMUCSR0, r3
4701:
471 mfspr r3,SPRN_MMUCSR0
472 andi. r3,r3,MMUCSR0_TLBFI@l
473 bne 1b
474 msync
475 isync
476 blr
477
478/*
479 * Flush MMU TLB for a particular address, but only on the local processor
480 * (no broadcast)
481 */
482_GLOBAL(_tlbil_va)
483 mfmsr r10
484 wrteei 0
485 slwi r4,r4,16
486 mtspr SPRN_MAS6,r4 /* assume AS=0 for now */
487 tlbsx 0,r3
488 mfspr r4,SPRN_MAS1 /* check valid */
489 andis. r3,r4,MAS1_VALID@h
490 beq 1f
491 rlwinm r4,r4,0,1,31
492 mtspr SPRN_MAS1,r4
493 tlbwe
494 msync
495 isync
4961: wrtee r10
497 blr
498#endif /* CONFIG_FSL_BOOKE */
499
500/*
501 * Nobody implements this yet
502 */
503_GLOBAL(_tlbivax_bcast)
5041: trap
505 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0;
506 blr
507
508 275
509/* 276/*
510 * Flush instruction cache. 277 * Flush instruction cache.
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index fda9baada132..eb955d755c9a 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -330,7 +330,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
330 /* XXX It would be nice to differentiate between heavyweight exit and 330 /* XXX It would be nice to differentiate between heavyweight exit and
331 * sched_out here, since we could avoid the TLB flush for heavyweight 331 * sched_out here, since we could avoid the TLB flush for heavyweight
332 * exits. */ 332 * exits. */
333 _tlbia(); 333 _tlbil_all();
334} 334}
335 335
336int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu, 336int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index af987df8d5a3..953cc4a1cde5 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -9,7 +9,8 @@ endif
9obj-y := fault.o mem.o pgtable.o \ 9obj-y := fault.o mem.o pgtable.o \
10 init_$(CONFIG_WORD_SIZE).o \ 10 init_$(CONFIG_WORD_SIZE).o \
11 pgtable_$(CONFIG_WORD_SIZE).o 11 pgtable_$(CONFIG_WORD_SIZE).o
12obj-$(CONFIG_PPC_MMU_NOHASH) += mmu_context_nohash.o tlb_nohash.o 12obj-$(CONFIG_PPC_MMU_NOHASH) += mmu_context_nohash.o tlb_nohash.o \
13 tlb_nohash_low.o
13hash-$(CONFIG_PPC_NATIVE) := hash_native_64.o 14hash-$(CONFIG_PPC_NATIVE) := hash_native_64.o
14obj-$(CONFIG_PPC64) += hash_utils_64.o \ 15obj-$(CONFIG_PPC64) += hash_utils_64.o \
15 slb_low.o slb.o stab.o \ 16 slb_low.o slb.o stab.o \
diff --git a/arch/powerpc/mm/hash_low_32.S b/arch/powerpc/mm/hash_low_32.S
index c5536b8b37a9..c8eac22a8f00 100644
--- a/arch/powerpc/mm/hash_low_32.S
+++ b/arch/powerpc/mm/hash_low_32.S
@@ -633,3 +633,79 @@ _GLOBAL(flush_hash_patch_B)
633 SYNC_601 633 SYNC_601
634 isync 634 isync
635 blr 635 blr
636
637/*
638 * Flush an entry from the TLB
639 */
640_GLOBAL(_tlbie)
641#ifdef CONFIG_SMP
642 rlwinm r8,r1,0,0,(31-THREAD_SHIFT)
643 lwz r8,TI_CPU(r8)
644 oris r8,r8,11
645 mfmsr r10
646 SYNC
647 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
648 rlwinm r0,r0,0,28,26 /* clear DR */
649 mtmsr r0
650 SYNC_601
651 isync
652 lis r9,mmu_hash_lock@h
653 ori r9,r9,mmu_hash_lock@l
654 tophys(r9,r9)
65510: lwarx r7,0,r9
656 cmpwi 0,r7,0
657 bne- 10b
658 stwcx. r8,0,r9
659 bne- 10b
660 eieio
661 tlbie r3
662 sync
663 TLBSYNC
664 li r0,0
665 stw r0,0(r9) /* clear mmu_hash_lock */
666 mtmsr r10
667 SYNC_601
668 isync
669#else /* CONFIG_SMP */
670 tlbie r3
671 sync
672#endif /* CONFIG_SMP */
673 blr
674
675/*
676 * Flush the entire TLB. 603/603e only
677 */
678_GLOBAL(_tlbia)
679#if defined(CONFIG_SMP)
680 rlwinm r8,r1,0,0,(31-THREAD_SHIFT)
681 lwz r8,TI_CPU(r8)
682 oris r8,r8,10
683 mfmsr r10
684 SYNC
685 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
686 rlwinm r0,r0,0,28,26 /* clear DR */
687 mtmsr r0
688 SYNC_601
689 isync
690 lis r9,mmu_hash_lock@h
691 ori r9,r9,mmu_hash_lock@l
692 tophys(r9,r9)
69310: lwarx r7,0,r9
694 cmpwi 0,r7,0
695 bne- 10b
696 stwcx. r8,0,r9
697 bne- 10b
698 sync
699 tlbia
700 sync
701 TLBSYNC
702 li r0,0
703 stw r0,0(r9) /* clear mmu_hash_lock */
704 mtmsr r10
705 SYNC_601
706 isync
707#else /* CONFIG_SMP */
708 sync
709 tlbia
710 sync
711#endif /* CONFIG_SMP */
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index b4344fd30f2a..4314b39b6faf 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -22,10 +22,58 @@
22#include <asm/tlbflush.h> 22#include <asm/tlbflush.h>
23#include <asm/mmu.h> 23#include <asm/mmu.h>
24 24
25#ifdef CONFIG_PPC_MMU_NOHASH
26
27/*
28 * On 40x and 8xx, we directly inline tlbia and tlbivax
29 */
30#if defined(CONFIG_40x) || defined(CONFIG_8xx)
31static inline void _tlbil_all(void)
32{
33 asm volatile ("sync; tlbia; isync" : : : "memory")
34}
35static inline void _tlbil_pid(unsigned int pid)
36{
37 asm volatile ("sync; tlbia; isync" : : : "memory")
38}
39#else /* CONFIG_40x || CONFIG_8xx */
40extern void _tlbil_all(void);
41extern void _tlbil_pid(unsigned int pid);
42#endif /* !(CONFIG_40x || CONFIG_8xx) */
43
44/*
45 * On 8xx, we directly inline tlbie, on others, it's extern
46 */
47#ifdef CONFIG_8xx
48static inline void _tlbil_va(unsigned long address, unsigned int pid)
49{
50 asm volatile ("tlbie %0; sync" : : "r" (address) : "memory")
51}
52#else /* CONFIG_8xx */
53extern void _tlbil_va(unsigned long address, unsigned int pid);
54#endif /* CONIFG_8xx */
55
56/*
57 * As of today, we don't support tlbivax broadcast on any
58 * implementation. When that becomes the case, this will be
59 * an extern.
60 */
61static inline void _tlbivax_bcast(unsigned long address, unsigned int pid)
62{
63 BUG();
64}
65
66#else /* CONFIG_PPC_MMU_NOHASH */
67
25extern void hash_preload(struct mm_struct *mm, unsigned long ea, 68extern void hash_preload(struct mm_struct *mm, unsigned long ea,
26 unsigned long access, unsigned long trap); 69 unsigned long access, unsigned long trap);
27 70
28 71
72extern void _tlbie(unsigned long address);
73extern void _tlbia(void);
74
75#endif /* CONFIG_PPC_MMU_NOHASH */
76
29#ifdef CONFIG_PPC32 77#ifdef CONFIG_PPC32
30extern void mapin_ram(void); 78extern void mapin_ram(void);
31extern int map_page(unsigned long va, phys_addr_t pa, int flags); 79extern int map_page(unsigned long va, phys_addr_t pa, int flags);
diff --git a/arch/powerpc/mm/tlb_nohash_low.S b/arch/powerpc/mm/tlb_nohash_low.S
new file mode 100644
index 000000000000..763c59fe0076
--- /dev/null
+++ b/arch/powerpc/mm/tlb_nohash_low.S
@@ -0,0 +1,165 @@
1/*
2 * This file contains low-level functions for performing various
3 * types of TLB invalidations on various processors with no hash
4 * table.
5 *
6 * This file implements the following functions for all no-hash
7 * processors. Some aren't implemented for some variants. Some
8 * are inline in tlbflush.h
9 *
10 * - tlbil_va
11 * - tlbil_pid
12 * - tlbil_all
13 * - tlbivax_bcast (not yet)
14 *
15 * Code mostly moved over from misc_32.S
16 *
17 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
18 *
19 * Partially rewritten by Cort Dougan (cort@cs.nmt.edu)
20 * Paul Mackerras, Kumar Gala and Benjamin Herrenschmidt.
21 *
22 * This program is free software; you can redistribute it and/or
23 * modify it under the terms of the GNU General Public License
24 * as published by the Free Software Foundation; either version
25 * 2 of the License, or (at your option) any later version.
26 *
27 */
28
29#include <asm/reg.h>
30#include <asm/page.h>
31#include <asm/cputable.h>
32#include <asm/mmu.h>
33#include <asm/ppc_asm.h>
34#include <asm/asm-offsets.h>
35#include <asm/processor.h>
36
37#if defined(CONFIG_40x)
38
39/*
40 * 40x implementation needs only tlbil_va
41 */
42_GLOBAL(_tlbil_va)
43 /* We run the search with interrupts disabled because we have to change
44 * the PID and I don't want to preempt when that happens.
45 */
46 mfmsr r5
47 mfspr r6,SPRN_PID
48 wrteei 0
49 mtspr SPRN_PID,r4
50 tlbsx. r3, 0, r3
51 mtspr SPRN_PID,r6
52 wrtee r5
53 bne 1f
54 sync
55 /* There are only 64 TLB entries, so r3 < 64, which means bit 25 is
56 * clear. Since 25 is the V bit in the TLB_TAG, loading this value
57 * will invalidate the TLB entry. */
58 tlbwe r3, r3, TLB_TAG
59 isync
601: blr
61
62#elif defined(CONFIG_8xx)
63
64/*
65 * Nothing to do for 8xx, everything is inline
66 */
67
68#elif defined(CONFIG_44x)
69
70/*
71 * 440 implementation uses tlbsx/we for tlbil_va and a full sweep
72 * of the TLB for everything else.
73 */
74_GLOBAL(_tlbil_va)
75 mfspr r5,SPRN_MMUCR
76 rlwimi r5,r4,0,24,31 /* Set TID */
77
78 /* We have to run the search with interrupts disabled, even critical
79 * and debug interrupts (in fact the only critical exceptions we have
80 * are debug and machine check). Otherwise an interrupt which causes
81 * a TLB miss can clobber the MMUCR between the mtspr and the tlbsx. */
82 mfmsr r4
83 lis r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@ha
84 addi r6,r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l
85 andc r6,r4,r6
86 mtmsr r6
87 mtspr SPRN_MMUCR,r5
88 tlbsx. r3, 0, r3
89 mtmsr r4
90 bne 1f
91 sync
92 /* There are only 64 TLB entries, so r3 < 64,
93 * which means bit 22, is clear. Since 22 is
94 * the V bit in the TLB_PAGEID, loading this
95 * value will invalidate the TLB entry.
96 */
97 tlbwe r3, r3, PPC44x_TLB_PAGEID
98 isync
991: blr
100
101_GLOBAL(_tlbil_all)
102_GLOBAL(_tlbil_pid)
103 li r3,0
104 sync
105
106 /* Load high watermark */
107 lis r4,tlb_44x_hwater@ha
108 lwz r5,tlb_44x_hwater@l(r4)
109
1101: tlbwe r3,r3,PPC44x_TLB_PAGEID
111 addi r3,r3,1
112 cmpw 0,r3,r5
113 ble 1b
114
115 isync
116 blr
117
118#elif defined(CONFIG_FSL_BOOKE)
119/*
120 * FSL BookE implementations. Currently _pid and _all are the
121 * same. This will change when tlbilx is actually supported and
122 * performs invalidate-by-PID. This change will be driven by
123 * mmu_features conditional
124 */
125
126/*
127 * Flush MMU TLB on the local processor
128 */
129_GLOBAL(_tlbil_pid)
130_GLOBAL(_tlbil_all)
131#define MMUCSR0_TLBFI (MMUCSR0_TLB0FI | MMUCSR0_TLB1FI | \
132 MMUCSR0_TLB2FI | MMUCSR0_TLB3FI)
133 li r3,(MMUCSR0_TLBFI)@l
134 mtspr SPRN_MMUCSR0, r3
1351:
136 mfspr r3,SPRN_MMUCSR0
137 andi. r3,r3,MMUCSR0_TLBFI@l
138 bne 1b
139 msync
140 isync
141 blr
142
143/*
144 * Flush MMU TLB for a particular address, but only on the local processor
145 * (no broadcast)
146 */
147_GLOBAL(_tlbil_va)
148 mfmsr r10
149 wrteei 0
150 slwi r4,r4,16
151 mtspr SPRN_MAS6,r4 /* assume AS=0 for now */
152 tlbsx 0,r3
153 mfspr r4,SPRN_MAS1 /* check valid */
154 andis. r3,r4,MAS1_VALID@h
155 beq 1f
156 rlwinm r4,r4,0,1,31
157 mtspr SPRN_MAS1,r4
158 tlbwe
159 msync
160 isync
1611: wrtee r10
162 blr
163#elif
164#error Unsupported processor type !
165#endif