diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2008-12-18 14:13:42 -0500 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2008-12-20 22:21:16 -0500 |
commit | 2a4aca1144394653269720ffbb5a325a77abd5fa (patch) | |
tree | 553bbcbb294ac5923f72430b7317b5c80a27141c /arch/powerpc/mm/mmu_decl.h | |
parent | f048aace29e007f2b642097e2da8231e0e9cce2d (diff) |
powerpc/mm: Split low level tlb invalidate for nohash processors
Currently, the various forms of low level TLB invalidations are all
implemented in misc_32.S for 32-bit processors, in a fairly scary
mess of #ifdef's and with interesting duplication such as a whole
bunch of code for FSL _tlbie and _tlbia which are no longer used.
This moves things around such that _tlbie is now defined in
hash_low_32.S and is only used by the 32-bit hash code, and all
nohash CPUs use the various _tlbil_* forms that are now moved to
a new file, tlb_nohash_low.S.
I moved all the definitions for that stuff out of
include/asm/tlbflush.h as they are really internal mm stuff, into
mm/mmu_decl.h
The code should have no functional changes. I kept some variants
inline for trivial forms on things like 40x and 8xx.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Acked-by: Kumar Gala <galak@kernel.crashing.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/mm/mmu_decl.h')
-rw-r--r-- | arch/powerpc/mm/mmu_decl.h | 48 |
1 files changed, 48 insertions, 0 deletions
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h index b4344fd30f2..4314b39b6fa 100644 --- a/arch/powerpc/mm/mmu_decl.h +++ b/arch/powerpc/mm/mmu_decl.h | |||
@@ -22,10 +22,58 @@ | |||
22 | #include <asm/tlbflush.h> | 22 | #include <asm/tlbflush.h> |
23 | #include <asm/mmu.h> | 23 | #include <asm/mmu.h> |
24 | 24 | ||
25 | #ifdef CONFIG_PPC_MMU_NOHASH | ||
26 | |||
27 | /* | ||
28 | * On 40x and 8xx, we directly inline tlbia and tlbivax | ||
29 | */ | ||
30 | #if defined(CONFIG_40x) || defined(CONFIG_8xx) | ||
31 | static inline void _tlbil_all(void) | ||
32 | { | ||
33 | asm volatile ("sync; tlbia; isync" : : : "memory") | ||
34 | } | ||
35 | static inline void _tlbil_pid(unsigned int pid) | ||
36 | { | ||
37 | asm volatile ("sync; tlbia; isync" : : : "memory") | ||
38 | } | ||
39 | #else /* CONFIG_40x || CONFIG_8xx */ | ||
40 | extern void _tlbil_all(void); | ||
41 | extern void _tlbil_pid(unsigned int pid); | ||
42 | #endif /* !(CONFIG_40x || CONFIG_8xx) */ | ||
43 | |||
44 | /* | ||
45 | * On 8xx, we directly inline tlbie, on others, it's extern | ||
46 | */ | ||
47 | #ifdef CONFIG_8xx | ||
48 | static inline void _tlbil_va(unsigned long address, unsigned int pid) | ||
49 | { | ||
50 | asm volatile ("tlbie %0; sync" : : "r" (address) : "memory") | ||
51 | } | ||
52 | #else /* CONFIG_8xx */ | ||
53 | extern void _tlbil_va(unsigned long address, unsigned int pid); | ||
54 | #endif /* CONIFG_8xx */ | ||
55 | |||
56 | /* | ||
57 | * As of today, we don't support tlbivax broadcast on any | ||
58 | * implementation. When that becomes the case, this will be | ||
59 | * an extern. | ||
60 | */ | ||
61 | static inline void _tlbivax_bcast(unsigned long address, unsigned int pid) | ||
62 | { | ||
63 | BUG(); | ||
64 | } | ||
65 | |||
66 | #else /* CONFIG_PPC_MMU_NOHASH */ | ||
67 | |||
25 | extern void hash_preload(struct mm_struct *mm, unsigned long ea, | 68 | extern void hash_preload(struct mm_struct *mm, unsigned long ea, |
26 | unsigned long access, unsigned long trap); | 69 | unsigned long access, unsigned long trap); |
27 | 70 | ||
28 | 71 | ||
72 | extern void _tlbie(unsigned long address); | ||
73 | extern void _tlbia(void); | ||
74 | |||
75 | #endif /* CONFIG_PPC_MMU_NOHASH */ | ||
76 | |||
29 | #ifdef CONFIG_PPC32 | 77 | #ifdef CONFIG_PPC32 |
30 | extern void mapin_ram(void); | 78 | extern void mapin_ram(void); |
31 | extern int map_page(unsigned long va, phys_addr_t pa, int flags); | 79 | extern int map_page(unsigned long va, phys_addr_t pa, int flags); |