diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm/include/asm/tlb.h | 27 | ||||
-rw-r--r-- | arch/ia64/include/asm/tlb.h | 41 |
2 files changed, 12 insertions, 56 deletions
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h index 99a19512ee26..bdf2b8458ec1 100644 --- a/arch/arm/include/asm/tlb.h +++ b/arch/arm/include/asm/tlb.h | |||
@@ -33,18 +33,6 @@ | |||
33 | #include <asm/pgalloc.h> | 33 | #include <asm/pgalloc.h> |
34 | #include <asm/tlbflush.h> | 34 | #include <asm/tlbflush.h> |
35 | 35 | ||
36 | /* | ||
37 | * We need to delay page freeing for SMP as other CPUs can access pages | ||
38 | * which have been removed but not yet had their TLB entries invalidated. | ||
39 | * Also, as ARMv7 speculative prefetch can drag new entries into the TLB, | ||
40 | * we need to apply this same delaying tactic to ensure correct operation. | ||
41 | */ | ||
42 | #if defined(CONFIG_SMP) || defined(CONFIG_CPU_32v7) | ||
43 | #define tlb_fast_mode(tlb) 0 | ||
44 | #else | ||
45 | #define tlb_fast_mode(tlb) 1 | ||
46 | #endif | ||
47 | |||
48 | #define MMU_GATHER_BUNDLE 8 | 36 | #define MMU_GATHER_BUNDLE 8 |
49 | 37 | ||
50 | /* | 38 | /* |
@@ -112,12 +100,10 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb) | |||
112 | static inline void tlb_flush_mmu(struct mmu_gather *tlb) | 100 | static inline void tlb_flush_mmu(struct mmu_gather *tlb) |
113 | { | 101 | { |
114 | tlb_flush(tlb); | 102 | tlb_flush(tlb); |
115 | if (!tlb_fast_mode(tlb)) { | 103 | free_pages_and_swap_cache(tlb->pages, tlb->nr); |
116 | free_pages_and_swap_cache(tlb->pages, tlb->nr); | 104 | tlb->nr = 0; |
117 | tlb->nr = 0; | 105 | if (tlb->pages == tlb->local) |
118 | if (tlb->pages == tlb->local) | 106 | __tlb_alloc_page(tlb); |
119 | __tlb_alloc_page(tlb); | ||
120 | } | ||
121 | } | 107 | } |
122 | 108 | ||
123 | static inline void | 109 | static inline void |
@@ -178,11 +164,6 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) | |||
178 | 164 | ||
179 | static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) | 165 | static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) |
180 | { | 166 | { |
181 | if (tlb_fast_mode(tlb)) { | ||
182 | free_page_and_swap_cache(page); | ||
183 | return 1; /* avoid calling tlb_flush_mmu */ | ||
184 | } | ||
185 | |||
186 | tlb->pages[tlb->nr++] = page; | 167 | tlb->pages[tlb->nr++] = page; |
187 | VM_BUG_ON(tlb->nr > tlb->max); | 168 | VM_BUG_ON(tlb->nr > tlb->max); |
188 | return tlb->max - tlb->nr; | 169 | return tlb->max - tlb->nr; |
diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h index c3ffe3e54edc..ef3a9de01954 100644 --- a/arch/ia64/include/asm/tlb.h +++ b/arch/ia64/include/asm/tlb.h | |||
@@ -46,12 +46,6 @@ | |||
46 | #include <asm/tlbflush.h> | 46 | #include <asm/tlbflush.h> |
47 | #include <asm/machvec.h> | 47 | #include <asm/machvec.h> |
48 | 48 | ||
49 | #ifdef CONFIG_SMP | ||
50 | # define tlb_fast_mode(tlb) ((tlb)->nr == ~0U) | ||
51 | #else | ||
52 | # define tlb_fast_mode(tlb) (1) | ||
53 | #endif | ||
54 | |||
55 | /* | 49 | /* |
56 | * If we can't allocate a page to make a big batch of page pointers | 50 | * If we can't allocate a page to make a big batch of page pointers |
57 | * to work on, then just handle a few from the on-stack structure. | 51 | * to work on, then just handle a few from the on-stack structure. |
@@ -60,7 +54,7 @@ | |||
60 | 54 | ||
61 | struct mmu_gather { | 55 | struct mmu_gather { |
62 | struct mm_struct *mm; | 56 | struct mm_struct *mm; |
63 | unsigned int nr; /* == ~0U => fast mode */ | 57 | unsigned int nr; |
64 | unsigned int max; | 58 | unsigned int max; |
65 | unsigned char fullmm; /* non-zero means full mm flush */ | 59 | unsigned char fullmm; /* non-zero means full mm flush */ |
66 | unsigned char need_flush; /* really unmapped some PTEs? */ | 60 | unsigned char need_flush; /* really unmapped some PTEs? */ |
@@ -103,6 +97,7 @@ extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS]; | |||
103 | static inline void | 97 | static inline void |
104 | ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end) | 98 | ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end) |
105 | { | 99 | { |
100 | unsigned long i; | ||
106 | unsigned int nr; | 101 | unsigned int nr; |
107 | 102 | ||
108 | if (!tlb->need_flush) | 103 | if (!tlb->need_flush) |
@@ -141,13 +136,11 @@ ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long e | |||
141 | 136 | ||
142 | /* lastly, release the freed pages */ | 137 | /* lastly, release the freed pages */ |
143 | nr = tlb->nr; | 138 | nr = tlb->nr; |
144 | if (!tlb_fast_mode(tlb)) { | 139 | |
145 | unsigned long i; | 140 | tlb->nr = 0; |
146 | tlb->nr = 0; | 141 | tlb->start_addr = ~0UL; |
147 | tlb->start_addr = ~0UL; | 142 | for (i = 0; i < nr; ++i) |
148 | for (i = 0; i < nr; ++i) | 143 | free_page_and_swap_cache(tlb->pages[i]); |
149 | free_page_and_swap_cache(tlb->pages[i]); | ||
150 | } | ||
151 | } | 144 | } |
152 | 145 | ||
153 | static inline void __tlb_alloc_page(struct mmu_gather *tlb) | 146 | static inline void __tlb_alloc_page(struct mmu_gather *tlb) |
@@ -167,20 +160,7 @@ tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_m | |||
167 | tlb->mm = mm; | 160 | tlb->mm = mm; |
168 | tlb->max = ARRAY_SIZE(tlb->local); | 161 | tlb->max = ARRAY_SIZE(tlb->local); |
169 | tlb->pages = tlb->local; | 162 | tlb->pages = tlb->local; |
170 | /* | 163 | tlb->nr = 0; |
171 | * Use fast mode if only 1 CPU is online. | ||
172 | * | ||
173 | * It would be tempting to turn on fast-mode for full_mm_flush as well. But this | ||
174 | * doesn't work because of speculative accesses and software prefetching: the page | ||
175 | * table of "mm" may (and usually is) the currently active page table and even | ||
176 | * though the kernel won't do any user-space accesses during the TLB shoot down, a | ||
177 | * compiler might use speculation or lfetch.fault on what happens to be a valid | ||
178 | * user-space address. This in turn could trigger a TLB miss fault (or a VHPT | ||
179 | * walk) and re-insert a TLB entry we just removed. Slow mode avoids such | ||
180 | * problems. (We could make fast-mode work by switching the current task to a | ||
181 | * different "mm" during the shootdown.) --davidm 08/02/2002 | ||
182 | */ | ||
183 | tlb->nr = (num_online_cpus() == 1) ? ~0U : 0; | ||
184 | tlb->fullmm = full_mm_flush; | 164 | tlb->fullmm = full_mm_flush; |
185 | tlb->start_addr = ~0UL; | 165 | tlb->start_addr = ~0UL; |
186 | } | 166 | } |
@@ -214,11 +194,6 @@ static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) | |||
214 | { | 194 | { |
215 | tlb->need_flush = 1; | 195 | tlb->need_flush = 1; |
216 | 196 | ||
217 | if (tlb_fast_mode(tlb)) { | ||
218 | free_page_and_swap_cache(page); | ||
219 | return 1; /* avoid calling tlb_flush_mmu */ | ||
220 | } | ||
221 | |||
222 | if (!tlb->nr && tlb->pages == tlb->local) | 197 | if (!tlb->nr && tlb->pages == tlb->local) |
223 | __tlb_alloc_page(tlb); | 198 | __tlb_alloc_page(tlb); |
224 | 199 | ||