diff options
Diffstat (limited to 'mm/percpu-vm.c')
-rw-r--r-- | mm/percpu-vm.c | 27 |
1 files changed, 1 insertions, 26 deletions
diff --git a/mm/percpu-vm.c b/mm/percpu-vm.c index d9e0b615492e..edf709793318 100644 --- a/mm/percpu-vm.c +++ b/mm/percpu-vm.c | |||
@@ -265,7 +265,7 @@ static void pcpu_post_map_flush(struct pcpu_chunk *chunk, | |||
265 | * @size: size of the area to populate in bytes | 265 | * @size: size of the area to populate in bytes |
266 | * | 266 | * |
267 | * For each cpu, populate and map pages [@page_start,@page_end) into | 267 | * For each cpu, populate and map pages [@page_start,@page_end) into |
268 | * @chunk. The area is cleared on return. | 268 | * @chunk. |
269 | * | 269 | * |
270 | * CONTEXT: | 270 | * CONTEXT: |
271 | * pcpu_alloc_mutex, does GFP_KERNEL allocation. | 271 | * pcpu_alloc_mutex, does GFP_KERNEL allocation. |
@@ -276,18 +276,8 @@ static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size) | |||
276 | int page_end = PFN_UP(off + size); | 276 | int page_end = PFN_UP(off + size); |
277 | int free_end = page_start, unmap_end = page_start; | 277 | int free_end = page_start, unmap_end = page_start; |
278 | struct page **pages; | 278 | struct page **pages; |
279 | unsigned int cpu; | ||
280 | int rs, re, rc; | 279 | int rs, re, rc; |
281 | 280 | ||
282 | /* quick path, check whether all pages are already there */ | ||
283 | rs = page_start; | ||
284 | pcpu_next_pop(chunk, &rs, &re, page_end); | ||
285 | if (rs == page_start && re == page_end) | ||
286 | goto clear; | ||
287 | |||
288 | /* need to allocate and map pages, this chunk can't be immutable */ | ||
289 | WARN_ON(chunk->immutable); | ||
290 | |||
291 | pages = pcpu_get_pages(chunk); | 281 | pages = pcpu_get_pages(chunk); |
292 | if (!pages) | 282 | if (!pages) |
293 | return -ENOMEM; | 283 | return -ENOMEM; |
@@ -308,10 +298,6 @@ static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size) | |||
308 | } | 298 | } |
309 | pcpu_post_map_flush(chunk, page_start, page_end); | 299 | pcpu_post_map_flush(chunk, page_start, page_end); |
310 | 300 | ||
311 | bitmap_set(chunk->populated, page_start, page_end - page_start); | ||
312 | clear: | ||
313 | for_each_possible_cpu(cpu) | ||
314 | memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size); | ||
315 | return 0; | 301 | return 0; |
316 | 302 | ||
317 | err_unmap: | 303 | err_unmap: |
@@ -345,15 +331,6 @@ static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size) | |||
345 | struct page **pages; | 331 | struct page **pages; |
346 | int rs, re; | 332 | int rs, re; |
347 | 333 | ||
348 | /* quick path, check whether it's empty already */ | ||
349 | rs = page_start; | ||
350 | pcpu_next_unpop(chunk, &rs, &re, page_end); | ||
351 | if (rs == page_start && re == page_end) | ||
352 | return; | ||
353 | |||
354 | /* immutable chunks can't be depopulated */ | ||
355 | WARN_ON(chunk->immutable); | ||
356 | |||
357 | /* | 334 | /* |
358 | * If control reaches here, there must have been at least one | 335 | * If control reaches here, there must have been at least one |
359 | * successful population attempt so the temp pages array must | 336 | * successful population attempt so the temp pages array must |
@@ -372,8 +349,6 @@ static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size) | |||
372 | 349 | ||
373 | pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end) | 350 | pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end) |
374 | pcpu_free_pages(chunk, pages, rs, re); | 351 | pcpu_free_pages(chunk, pages, rs, re); |
375 | |||
376 | bitmap_clear(chunk->populated, page_start, page_end - page_start); | ||
377 | } | 352 | } |
378 | 353 | ||
379 | static struct pcpu_chunk *pcpu_create_chunk(void) | 354 | static struct pcpu_chunk *pcpu_create_chunk(void) |