diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2008-12-18 14:13:42 -0500 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2008-12-20 22:21:16 -0500 |
commit | 2a4aca1144394653269720ffbb5a325a77abd5fa (patch) | |
tree | 553bbcbb294ac5923f72430b7317b5c80a27141c /arch/powerpc/include | |
parent | f048aace29e007f2b642097e2da8231e0e9cce2d (diff) |
powerpc/mm: Split low level tlb invalidate for nohash processors
Currently, the various forms of low level TLB invalidations are all
implemented in misc_32.S for 32-bit processors, in a fairly scary
mess of #ifdef's and with interesting duplication such as a whole
bunch of code for FSL _tlbie and _tlbia which are no longer used.
This moves things around such that _tlbie is now defined in
hash_low_32.S and is only used by the 32-bit hash code, and all
nohash CPUs use the various _tlbil_* forms that are now moved to
a new file, tlb_nohash_low.S.
I moved all the definitions for that stuff out of
include/asm/tlbflush.h as they are really internal mm stuff, into
mm/mmu_decl.h
The code should have no functional changes. I kept some variants
inline for trivial forms on things like 40x and 8xx.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Acked-by: Kumar Gala <galak@kernel.crashing.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/include')
-rw-r--r-- | arch/powerpc/include/asm/tlbflush.h | 14 |
1 files changed, 0 insertions, 14 deletions
diff --git a/arch/powerpc/include/asm/tlbflush.h b/arch/powerpc/include/asm/tlbflush.h index 8c39b27c1ed7..abbe3419d1dd 100644 --- a/arch/powerpc/include/asm/tlbflush.h +++ b/arch/powerpc/include/asm/tlbflush.h | |||
@@ -33,17 +33,6 @@ | |||
33 | 33 | ||
34 | #define MMU_NO_CONTEXT ((unsigned int)-1) | 34 | #define MMU_NO_CONTEXT ((unsigned int)-1) |
35 | 35 | ||
36 | extern void _tlbil_all(void); | ||
37 | extern void _tlbil_pid(unsigned int pid); | ||
38 | extern void _tlbil_va(unsigned long address, unsigned int pid); | ||
39 | extern void _tlbivax_bcast(unsigned long address, unsigned int pid); | ||
40 | |||
41 | #if defined(CONFIG_40x) || defined(CONFIG_8xx) | ||
42 | #define _tlbia() asm volatile ("tlbia; sync" : : : "memory") | ||
43 | #else /* CONFIG_44x || CONFIG_FSL_BOOKE */ | ||
44 | extern void _tlbia(void); | ||
45 | #endif | ||
46 | |||
47 | extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | 36 | extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, |
48 | unsigned long end); | 37 | unsigned long end); |
49 | extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); | 38 | extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); |
@@ -65,9 +54,6 @@ extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); | |||
65 | /* | 54 | /* |
66 | * TLB flushing for "classic" hash-MMU 32-bit CPUs, 6xx, 7xx, 7xxx | 55 | * TLB flushing for "classic" hash-MMU 32-bit CPUs, 6xx, 7xx, 7xxx |
67 | */ | 56 | */ |
68 | extern void _tlbie(unsigned long address); | ||
69 | extern void _tlbia(void); | ||
70 | |||
71 | extern void flush_tlb_mm(struct mm_struct *mm); | 57 | extern void flush_tlb_mm(struct mm_struct *mm); |
72 | extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); | 58 | extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); |
73 | extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr); | 59 | extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr); |