aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/percpu-km.c6
-rw-r--r--mm/percpu-vm.c57
-rw-r--r--mm/percpu.c19
3 files changed, 28 insertions, 54 deletions
diff --git a/mm/percpu-km.c b/mm/percpu-km.c
index 9a9096f08867..a6e34bc1aff4 100644
--- a/mm/percpu-km.c
+++ b/mm/percpu-km.c
@@ -33,12 +33,14 @@
33 33
34#include <linux/log2.h> 34#include <linux/log2.h>
35 35
36static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size) 36static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
37 int page_start, int page_end)
37{ 38{
38 return 0; 39 return 0;
39} 40}
40 41
41static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size) 42static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk,
43 int page_start, int page_end)
42{ 44{
43 /* nada */ 45 /* nada */
44} 46}
diff --git a/mm/percpu-vm.c b/mm/percpu-vm.c
index edf709793318..538998a137d2 100644
--- a/mm/percpu-vm.c
+++ b/mm/percpu-vm.c
@@ -261,8 +261,8 @@ static void pcpu_post_map_flush(struct pcpu_chunk *chunk,
261/** 261/**
262 * pcpu_populate_chunk - populate and map an area of a pcpu_chunk 262 * pcpu_populate_chunk - populate and map an area of a pcpu_chunk
263 * @chunk: chunk of interest 263 * @chunk: chunk of interest
264 * @off: offset to the area to populate 264 * @page_start: the start page
265 * @size: size of the area to populate in bytes 265 * @page_end: the end page
266 * 266 *
267 * For each cpu, populate and map pages [@page_start,@page_end) into 267 * For each cpu, populate and map pages [@page_start,@page_end) into
268 * @chunk. 268 * @chunk.
@@ -270,66 +270,43 @@ static void pcpu_post_map_flush(struct pcpu_chunk *chunk,
270 * CONTEXT: 270 * CONTEXT:
271 * pcpu_alloc_mutex, does GFP_KERNEL allocation. 271 * pcpu_alloc_mutex, does GFP_KERNEL allocation.
272 */ 272 */
273static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size) 273static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
274 int page_start, int page_end)
274{ 275{
275 int page_start = PFN_DOWN(off);
276 int page_end = PFN_UP(off + size);
277 int free_end = page_start, unmap_end = page_start;
278 struct page **pages; 276 struct page **pages;
279 int rs, re, rc;
280 277
281 pages = pcpu_get_pages(chunk); 278 pages = pcpu_get_pages(chunk);
282 if (!pages) 279 if (!pages)
283 return -ENOMEM; 280 return -ENOMEM;
284 281
285 /* alloc and map */ 282 if (pcpu_alloc_pages(chunk, pages, page_start, page_end))
286 pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) { 283 return -ENOMEM;
287 rc = pcpu_alloc_pages(chunk, pages, rs, re);
288 if (rc)
289 goto err_free;
290 free_end = re;
291 }
292 284
293 pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) { 285 if (pcpu_map_pages(chunk, pages, page_start, page_end)) {
294 rc = pcpu_map_pages(chunk, pages, rs, re); 286 pcpu_free_pages(chunk, pages, page_start, page_end);
295 if (rc) 287 return -ENOMEM;
296 goto err_unmap;
297 unmap_end = re;
298 } 288 }
299 pcpu_post_map_flush(chunk, page_start, page_end); 289 pcpu_post_map_flush(chunk, page_start, page_end);
300 290
301 return 0; 291 return 0;
302
303err_unmap:
304 pcpu_pre_unmap_flush(chunk, page_start, unmap_end);
305 pcpu_for_each_unpop_region(chunk, rs, re, page_start, unmap_end)
306 pcpu_unmap_pages(chunk, pages, rs, re);
307 pcpu_post_unmap_tlb_flush(chunk, page_start, unmap_end);
308err_free:
309 pcpu_for_each_unpop_region(chunk, rs, re, page_start, free_end)
310 pcpu_free_pages(chunk, pages, rs, re);
311 return rc;
312} 292}
313 293
314/** 294/**
315 * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk 295 * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk
316 * @chunk: chunk to depopulate 296 * @chunk: chunk to depopulate
317 * @off: offset to the area to depopulate 297 * @page_start: the start page
318 * @size: size of the area to depopulate in bytes 298 * @page_end: the end page
319 * 299 *
320 * For each cpu, depopulate and unmap pages [@page_start,@page_end) 300 * For each cpu, depopulate and unmap pages [@page_start,@page_end)
321 * from @chunk. If @flush is true, vcache is flushed before unmapping 301 * from @chunk.
322 * and tlb after.
323 * 302 *
324 * CONTEXT: 303 * CONTEXT:
325 * pcpu_alloc_mutex. 304 * pcpu_alloc_mutex.
326 */ 305 */
327static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size) 306static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk,
307 int page_start, int page_end)
328{ 308{
329 int page_start = PFN_DOWN(off);
330 int page_end = PFN_UP(off + size);
331 struct page **pages; 309 struct page **pages;
332 int rs, re;
333 310
334 /* 311 /*
335 * If control reaches here, there must have been at least one 312 * If control reaches here, there must have been at least one
@@ -342,13 +319,11 @@ static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size)
342 /* unmap and free */ 319 /* unmap and free */
343 pcpu_pre_unmap_flush(chunk, page_start, page_end); 320 pcpu_pre_unmap_flush(chunk, page_start, page_end);
344 321
345 pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end) 322 pcpu_unmap_pages(chunk, pages, page_start, page_end);
346 pcpu_unmap_pages(chunk, pages, rs, re);
347 323
348 /* no need to flush tlb, vmalloc will handle it lazily */ 324 /* no need to flush tlb, vmalloc will handle it lazily */
349 325
350 pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end) 326 pcpu_free_pages(chunk, pages, page_start, page_end);
351 pcpu_free_pages(chunk, pages, rs, re);
352} 327}
353 328
354static struct pcpu_chunk *pcpu_create_chunk(void) 329static struct pcpu_chunk *pcpu_create_chunk(void)
diff --git a/mm/percpu.c b/mm/percpu.c
index 6087384f6ef0..fe5de97d7caa 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -807,20 +807,17 @@ area_found:
807 page_start = PFN_DOWN(off); 807 page_start = PFN_DOWN(off);
808 page_end = PFN_UP(off + size); 808 page_end = PFN_UP(off + size);
809 809
810 rs = page_start; 810 pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) {
811 pcpu_next_pop(chunk, &rs, &re, page_end);
812
813 if (rs != page_start || re != page_end) {
814 WARN_ON(chunk->immutable); 811 WARN_ON(chunk->immutable);
815 812
816 if (pcpu_populate_chunk(chunk, off, size)) { 813 if (pcpu_populate_chunk(chunk, rs, re)) {
817 spin_lock_irqsave(&pcpu_lock, flags); 814 spin_lock_irqsave(&pcpu_lock, flags);
818 pcpu_free_area(chunk, off); 815 pcpu_free_area(chunk, off);
819 err = "failed to populate"; 816 err = "failed to populate";
820 goto fail_unlock; 817 goto fail_unlock;
821 } 818 }
822 819
823 bitmap_set(chunk->populated, page_start, page_end - page_start); 820 bitmap_set(chunk->populated, rs, re - rs);
824 } 821 }
825 822
826 mutex_unlock(&pcpu_alloc_mutex); 823 mutex_unlock(&pcpu_alloc_mutex);
@@ -919,12 +916,12 @@ static void pcpu_reclaim(struct work_struct *work)
919 spin_unlock_irq(&pcpu_lock); 916 spin_unlock_irq(&pcpu_lock);
920 917
921 list_for_each_entry_safe(chunk, next, &todo, list) { 918 list_for_each_entry_safe(chunk, next, &todo, list) {
922 int rs = 0, re; 919 int rs, re;
923
924 pcpu_next_unpop(chunk, &rs, &re, PFN_UP(pcpu_unit_size));
925 if (rs || re != PFN_UP(pcpu_unit_size))
926 pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size);
927 920
921 pcpu_for_each_pop_region(chunk, rs, re, 0, pcpu_unit_pages) {
922 pcpu_depopulate_chunk(chunk, rs, re);
923 bitmap_clear(chunk->populated, rs, re - rs);
924 }
928 pcpu_destroy_chunk(chunk); 925 pcpu_destroy_chunk(chunk);
929 } 926 }
930 927