diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2011-05-24 20:12:14 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-05-25 11:39:20 -0400 |
commit | 9547d01bfb9c351dc19067f8a4cea9d3955f4125 (patch) | |
tree | 3c32521dbbf380471e1eef3e11ae656b24164255 /mm/memory.c | |
parent | 88c22088bf235f50b09a10bd9f022b0472bcb6b5 (diff) |
mm: uninline large generic tlb.h functions
Some of these functions have grown beyond inline sanity, move them
out-of-line.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Requested-by: Andrew Morton <akpm@linux-foundation.org>
Requested-by: Hugh Dickins <hughd@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r-- | mm/memory.c | 124 |
1 files changed, 122 insertions, 2 deletions
diff --git a/mm/memory.c b/mm/memory.c index 7bbe4d3df756..b73f677f0bb1 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -182,7 +182,7 @@ void sync_mm_rss(struct task_struct *task, struct mm_struct *mm) | |||
182 | { | 182 | { |
183 | __sync_task_rss_stat(task, mm); | 183 | __sync_task_rss_stat(task, mm); |
184 | } | 184 | } |
185 | #else | 185 | #else /* SPLIT_RSS_COUNTING */ |
186 | 186 | ||
187 | #define inc_mm_counter_fast(mm, member) inc_mm_counter(mm, member) | 187 | #define inc_mm_counter_fast(mm, member) inc_mm_counter(mm, member) |
188 | #define dec_mm_counter_fast(mm, member) dec_mm_counter(mm, member) | 188 | #define dec_mm_counter_fast(mm, member) dec_mm_counter(mm, member) |
@@ -191,8 +191,128 @@ static void check_sync_rss_stat(struct task_struct *task) | |||
191 | { | 191 | { |
192 | } | 192 | } |
193 | 193 | ||
194 | #endif /* SPLIT_RSS_COUNTING */ | ||
195 | |||
196 | #ifdef HAVE_GENERIC_MMU_GATHER | ||
197 | |||
198 | static int tlb_next_batch(struct mmu_gather *tlb) | ||
199 | { | ||
200 | struct mmu_gather_batch *batch; | ||
201 | |||
202 | batch = tlb->active; | ||
203 | if (batch->next) { | ||
204 | tlb->active = batch->next; | ||
205 | return 1; | ||
206 | } | ||
207 | |||
208 | batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0); | ||
209 | if (!batch) | ||
210 | return 0; | ||
211 | |||
212 | batch->next = NULL; | ||
213 | batch->nr = 0; | ||
214 | batch->max = MAX_GATHER_BATCH; | ||
215 | |||
216 | tlb->active->next = batch; | ||
217 | tlb->active = batch; | ||
218 | |||
219 | return 1; | ||
220 | } | ||
221 | |||
222 | /* tlb_gather_mmu | ||
223 | * Called to initialize an (on-stack) mmu_gather structure for page-table | ||
224 | * tear-down from @mm. The @fullmm argument is used when @mm is without | ||
225 | * users and we're going to destroy the full address space (exit/execve). | ||
226 | */ | ||
227 | void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm) | ||
228 | { | ||
229 | tlb->mm = mm; | ||
230 | |||
231 | tlb->fullmm = fullmm; | ||
232 | tlb->need_flush = 0; | ||
233 | tlb->fast_mode = (num_possible_cpus() == 1); | ||
234 | tlb->local.next = NULL; | ||
235 | tlb->local.nr = 0; | ||
236 | tlb->local.max = ARRAY_SIZE(tlb->__pages); | ||
237 | tlb->active = &tlb->local; | ||
238 | |||
239 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE | ||
240 | tlb->batch = NULL; | ||
241 | #endif | ||
242 | } | ||
243 | |||
244 | void tlb_flush_mmu(struct mmu_gather *tlb) | ||
245 | { | ||
246 | struct mmu_gather_batch *batch; | ||
247 | |||
248 | if (!tlb->need_flush) | ||
249 | return; | ||
250 | tlb->need_flush = 0; | ||
251 | tlb_flush(tlb); | ||
252 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE | ||
253 | tlb_table_flush(tlb); | ||
194 | #endif | 254 | #endif |
195 | 255 | ||
256 | if (tlb_fast_mode(tlb)) | ||
257 | return; | ||
258 | |||
259 | for (batch = &tlb->local; batch; batch = batch->next) { | ||
260 | free_pages_and_swap_cache(batch->pages, batch->nr); | ||
261 | batch->nr = 0; | ||
262 | } | ||
263 | tlb->active = &tlb->local; | ||
264 | } | ||
265 | |||
266 | /* tlb_finish_mmu | ||
267 | * Called at the end of the shootdown operation to free up any resources | ||
268 | * that were required. | ||
269 | */ | ||
270 | void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) | ||
271 | { | ||
272 | struct mmu_gather_batch *batch, *next; | ||
273 | |||
274 | tlb_flush_mmu(tlb); | ||
275 | |||
276 | /* keep the page table cache within bounds */ | ||
277 | check_pgt_cache(); | ||
278 | |||
279 | for (batch = tlb->local.next; batch; batch = next) { | ||
280 | next = batch->next; | ||
281 | free_pages((unsigned long)batch, 0); | ||
282 | } | ||
283 | tlb->local.next = NULL; | ||
284 | } | ||
285 | |||
286 | /* __tlb_remove_page | ||
287 | * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while | ||
288 | * handling the additional races in SMP caused by other CPUs caching valid | ||
289 | * mappings in their TLBs. Returns the number of free page slots left. | ||
290 | * When out of page slots we must call tlb_flush_mmu(). | ||
291 | */ | ||
292 | int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) | ||
293 | { | ||
294 | struct mmu_gather_batch *batch; | ||
295 | |||
296 | tlb->need_flush = 1; | ||
297 | |||
298 | if (tlb_fast_mode(tlb)) { | ||
299 | free_page_and_swap_cache(page); | ||
300 | return 1; /* avoid calling tlb_flush_mmu() */ | ||
301 | } | ||
302 | |||
303 | batch = tlb->active; | ||
304 | batch->pages[batch->nr++] = page; | ||
305 | if (batch->nr == batch->max) { | ||
306 | if (!tlb_next_batch(tlb)) | ||
307 | return 0; | ||
308 | } | ||
309 | VM_BUG_ON(batch->nr > batch->max); | ||
310 | |||
311 | return batch->max - batch->nr; | ||
312 | } | ||
313 | |||
314 | #endif /* HAVE_GENERIC_MMU_GATHER */ | ||
315 | |||
196 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE | 316 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE |
197 | 317 | ||
198 | /* | 318 | /* |
@@ -268,7 +388,7 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table) | |||
268 | tlb_table_flush(tlb); | 388 | tlb_table_flush(tlb); |
269 | } | 389 | } |
270 | 390 | ||
271 | #endif | 391 | #endif /* CONFIG_HAVE_RCU_TABLE_FREE */ |
272 | 392 | ||
273 | /* | 393 | /* |
274 | * If a p?d_bad entry is found while walking page tables, report | 394 | * If a p?d_bad entry is found while walking page tables, report |