diff options
author | David Gibson <david@gibson.dropbear.id.au> | 2007-04-23 23:09:12 -0400 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2007-04-24 08:08:56 -0400 |
commit | 621023072524fc0155ed16490255e1ea3aa11585 (patch) | |
tree | 77fec16321fe72ef75532c4f07ffee004b57bbfe | |
parent | 687304014f7ca8e2fbb3feaefef356b4a0da65ad (diff) |
[POWERPC] Cleanup and fix breakage in tlbflush.h
BenH's commit a741e67969577163a4cfc78d7fd2753219087ef1 in powerpc.git,
although (AFAICT) only intended to affect ppc64, also has side-effects
which break 44x. I think 40x, 8xx and Freescale Book E are also
affected, though I haven't tested them.
The problem lies in unconditionally removing flush_tlb_pending() from
the versions of flush_tlb_mm(), flush_tlb_range() and
flush_tlb_kernel_range() used on ppc64 - which are also used the
embedded platforms mentioned above.
The patch below cleans up the convoluted #ifdef logic in tlbflush.h,
in the process restoring the necessary flushes for the software TLB
platforms. There are three sets of definitions for the flushing
hooks: the software TLB versions (revised to avoid using names which
appear to related to TLB batching), the 32-bit hash based versions
(external functions) amd the 64-bit hash based versions (which
implement batching).
It also moves the declaration of update_mmu_cache() to always be in
tlbflush.h (previously it was in tlbflush.h except for PPC64, where it
was in pgtable.h).
Booted on Ebony (440GP) and compiled for 64-bit and 32-bit
multiplatform.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
-rw-r--r-- | arch/powerpc/mm/mmu_decl.h | 1 | ||||
-rw-r--r-- | include/asm-powerpc/pgtable.h | 10 | ||||
-rw-r--r-- | include/asm-powerpc/tlbflush.h | 135 |
3 files changed, 81 insertions, 65 deletions
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h index ee55e0bb28bc..9c4538bb04b0 100644 --- a/arch/powerpc/mm/mmu_decl.h +++ b/arch/powerpc/mm/mmu_decl.h | |||
@@ -19,6 +19,7 @@ | |||
19 | * 2 of the License, or (at your option) any later version. | 19 | * 2 of the License, or (at your option) any later version. |
20 | * | 20 | * |
21 | */ | 21 | */ |
22 | #include <linux/mm.h> | ||
22 | #include <asm/tlbflush.h> | 23 | #include <asm/tlbflush.h> |
23 | #include <asm/mmu.h> | 24 | #include <asm/mmu.h> |
24 | 25 | ||
diff --git a/include/asm-powerpc/pgtable.h b/include/asm-powerpc/pgtable.h index c7142c7e0e05..19edb6982b81 100644 --- a/include/asm-powerpc/pgtable.h +++ b/include/asm-powerpc/pgtable.h | |||
@@ -448,16 +448,6 @@ extern pgd_t swapper_pg_dir[]; | |||
448 | 448 | ||
449 | extern void paging_init(void); | 449 | extern void paging_init(void); |
450 | 450 | ||
451 | /* | ||
452 | * This gets called at the end of handling a page fault, when | ||
453 | * the kernel has put a new PTE into the page table for the process. | ||
454 | * We use it to put a corresponding HPTE into the hash table | ||
455 | * ahead of time, instead of waiting for the inevitable extra | ||
456 | * hash-table miss exception. | ||
457 | */ | ||
458 | struct vm_area_struct; | ||
459 | extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); | ||
460 | |||
461 | /* Encode and de-code a swap entry */ | 451 | /* Encode and de-code a swap entry */ |
462 | #define __swp_type(entry) (((entry).val >> 1) & 0x3f) | 452 | #define __swp_type(entry) (((entry).val >> 1) & 0x3f) |
463 | #define __swp_offset(entry) ((entry).val >> 8) | 453 | #define __swp_offset(entry) ((entry).val >> 8) |
diff --git a/include/asm-powerpc/tlbflush.h b/include/asm-powerpc/tlbflush.h index 0bc5a5e506be..86e6266a028b 100644 --- a/include/asm-powerpc/tlbflush.h +++ b/include/asm-powerpc/tlbflush.h | |||
@@ -17,10 +17,73 @@ | |||
17 | */ | 17 | */ |
18 | #ifdef __KERNEL__ | 18 | #ifdef __KERNEL__ |
19 | 19 | ||
20 | |||
21 | struct mm_struct; | 20 | struct mm_struct; |
21 | struct vm_area_struct; | ||
22 | |||
23 | #if defined(CONFIG_4xx) || defined(CONFIG_8xx) || defined(CONFIG_FSL_BOOKE) | ||
24 | /* | ||
25 | * TLB flushing for software loaded TLB chips | ||
26 | * | ||
27 | * TODO: (CONFIG_FSL_BOOKE) determine if flush_tlb_range & | ||
28 | * flush_tlb_kernel_range are best implemented as tlbia vs | ||
29 | * specific tlbie's | ||
30 | */ | ||
31 | |||
32 | extern void _tlbie(unsigned long address); | ||
33 | |||
34 | #if defined(CONFIG_40x) || defined(CONFIG_8xx) | ||
35 | #define _tlbia() asm volatile ("tlbia; sync" : : : "memory") | ||
36 | #else /* CONFIG_44x || CONFIG_FSL_BOOKE */ | ||
37 | extern void _tlbia(void); | ||
38 | #endif | ||
22 | 39 | ||
23 | #ifdef CONFIG_PPC64 | 40 | static inline void flush_tlb_mm(struct mm_struct *mm) |
41 | { | ||
42 | _tlbia(); | ||
43 | } | ||
44 | |||
45 | static inline void flush_tlb_page(struct vm_area_struct *vma, | ||
46 | unsigned long vmaddr) | ||
47 | { | ||
48 | _tlbie(vmaddr); | ||
49 | } | ||
50 | |||
51 | static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, | ||
52 | unsigned long vmaddr) | ||
53 | { | ||
54 | _tlbie(vmaddr); | ||
55 | } | ||
56 | |||
57 | static inline void flush_tlb_range(struct vm_area_struct *vma, | ||
58 | unsigned long start, unsigned long end) | ||
59 | { | ||
60 | _tlbia(); | ||
61 | } | ||
62 | |||
63 | static inline void flush_tlb_kernel_range(unsigned long start, | ||
64 | unsigned long end) | ||
65 | { | ||
66 | _tlbia(); | ||
67 | } | ||
68 | |||
69 | #elif defined(CONFIG_PPC32) | ||
70 | /* | ||
71 | * TLB flushing for "classic" hash-MMMU 32-bit CPUs, 6xx, 7xx, 7xxx | ||
72 | */ | ||
73 | extern void _tlbie(unsigned long address); | ||
74 | extern void _tlbia(void); | ||
75 | |||
76 | extern void flush_tlb_mm(struct mm_struct *mm); | ||
77 | extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); | ||
78 | extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr); | ||
79 | extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | ||
80 | unsigned long end); | ||
81 | extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); | ||
82 | |||
83 | #else | ||
84 | /* | ||
85 | * TLB flushing for 64-bit has-MMU CPUs | ||
86 | */ | ||
24 | 87 | ||
25 | #include <linux/percpu.h> | 88 | #include <linux/percpu.h> |
26 | #include <asm/page.h> | 89 | #include <asm/page.h> |
@@ -67,89 +130,51 @@ extern void flush_hash_page(unsigned long va, real_pte_t pte, int psize, | |||
67 | int local); | 130 | int local); |
68 | extern void flush_hash_range(unsigned long number, int local); | 131 | extern void flush_hash_range(unsigned long number, int local); |
69 | 132 | ||
70 | #else /* CONFIG_PPC64 */ | ||
71 | |||
72 | #include <linux/mm.h> | ||
73 | |||
74 | extern void _tlbie(unsigned long address); | ||
75 | extern void _tlbia(void); | ||
76 | |||
77 | /* | ||
78 | * TODO: (CONFIG_FSL_BOOKE) determine if flush_tlb_range & | ||
79 | * flush_tlb_kernel_range are best implemented as tlbia vs | ||
80 | * specific tlbie's | ||
81 | */ | ||
82 | |||
83 | #if (defined(CONFIG_4xx) && !defined(CONFIG_44x)) || defined(CONFIG_8xx) | ||
84 | #define flush_tlb_pending() asm volatile ("tlbia; sync" : : : "memory") | ||
85 | #elif defined(CONFIG_4xx) || defined(CONFIG_FSL_BOOKE) | ||
86 | #define flush_tlb_pending() _tlbia() | ||
87 | #endif | ||
88 | |||
89 | /* | ||
90 | * This gets called at the end of handling a page fault, when | ||
91 | * the kernel has put a new PTE into the page table for the process. | ||
92 | * We use it to ensure coherency between the i-cache and d-cache | ||
93 | * for the page which has just been mapped in. | ||
94 | * On machines which use an MMU hash table, we use this to put a | ||
95 | * corresponding HPTE into the hash table ahead of time, instead of | ||
96 | * waiting for the inevitable extra hash-table miss exception. | ||
97 | */ | ||
98 | extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); | ||
99 | |||
100 | #endif /* CONFIG_PPC64 */ | ||
101 | |||
102 | #if defined(CONFIG_PPC64) || defined(CONFIG_4xx) || \ | ||
103 | defined(CONFIG_FSL_BOOKE) || defined(CONFIG_8xx) | ||
104 | 133 | ||
105 | static inline void flush_tlb_mm(struct mm_struct *mm) | 134 | static inline void flush_tlb_mm(struct mm_struct *mm) |
106 | { | 135 | { |
107 | } | 136 | } |
108 | 137 | ||
109 | static inline void flush_tlb_page(struct vm_area_struct *vma, | 138 | static inline void flush_tlb_page(struct vm_area_struct *vma, |
110 | unsigned long vmaddr) | 139 | unsigned long vmaddr) |
111 | { | 140 | { |
112 | #ifndef CONFIG_PPC64 | ||
113 | _tlbie(vmaddr); | ||
114 | #endif | ||
115 | } | 141 | } |
116 | 142 | ||
117 | static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, | 143 | static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, |
118 | unsigned long vmaddr) | 144 | unsigned long vmaddr) |
119 | { | 145 | { |
120 | #ifndef CONFIG_PPC64 | ||
121 | _tlbie(vmaddr); | ||
122 | #endif | ||
123 | } | 146 | } |
124 | 147 | ||
125 | static inline void flush_tlb_range(struct vm_area_struct *vma, | 148 | static inline void flush_tlb_range(struct vm_area_struct *vma, |
126 | unsigned long start, unsigned long end) | 149 | unsigned long start, unsigned long end) |
127 | { | 150 | { |
128 | } | 151 | } |
129 | 152 | ||
130 | static inline void flush_tlb_kernel_range(unsigned long start, | 153 | static inline void flush_tlb_kernel_range(unsigned long start, |
131 | unsigned long end) | 154 | unsigned long end) |
132 | { | 155 | { |
133 | } | 156 | } |
134 | 157 | ||
135 | #else /* 6xx, 7xx, 7xxx cpus */ | ||
136 | |||
137 | extern void flush_tlb_mm(struct mm_struct *mm); | ||
138 | extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); | ||
139 | extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr); | ||
140 | extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | ||
141 | unsigned long end); | ||
142 | extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); | ||
143 | |||
144 | #endif | 158 | #endif |
145 | 159 | ||
146 | /* | 160 | /* |
161 | * This gets called at the end of handling a page fault, when | ||
162 | * the kernel has put a new PTE into the page table for the process. | ||
163 | * We use it to ensure coherency between the i-cache and d-cache | ||
164 | * for the page which has just been mapped in. | ||
165 | * On machines which use an MMU hash table, we use this to put a | ||
166 | * corresponding HPTE into the hash table ahead of time, instead of | ||
167 | * waiting for the inevitable extra hash-table miss exception. | ||
168 | */ | ||
169 | extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); | ||
170 | |||
171 | /* | ||
147 | * This is called in munmap when we have freed up some page-table | 172 | * This is called in munmap when we have freed up some page-table |
148 | * pages. We don't need to do anything here, there's nothing special | 173 | * pages. We don't need to do anything here, there's nothing special |
149 | * about our page-table pages. -- paulus | 174 | * about our page-table pages. -- paulus |
150 | */ | 175 | */ |
151 | static inline void flush_tlb_pgtables(struct mm_struct *mm, | 176 | static inline void flush_tlb_pgtables(struct mm_struct *mm, |
152 | unsigned long start, unsigned long end) | 177 | unsigned long start, unsigned long end) |
153 | { | 178 | { |
154 | } | 179 | } |
155 | 180 | ||