aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2014-09-02 14:46:01 -0400
committerTejun Heo <tj@kernel.org>2014-09-02 14:46:01 -0400
commitdca496451bddea9aa87b7510dc2eb413d1a19dfd (patch)
treec1d926663786fd919d3a9d19659ce80696d7558f /mm
parentcdb4cba5a3c9fa27240d04f4f8dad316b10d995b (diff)
percpu: move common parts out of pcpu_[de]populate_chunk()
percpu-vm and percpu-km implement separate versions of pcpu_[de]populate_chunk() and some part which is or should be common are currently in the specific implementations. Make the following changes. * Allocate area clearing is moved from the pcpu_populate_chunk() implementations to pcpu_alloc(). This makes percpu-km's version noop. * Quick exit tests in pcpu_[de]populate_chunk() of percpu-vm are moved to their respective callers so that they are applied to percpu-km too. This doesn't make any meaningful difference as both functions are noop for percpu-km; however, this is more consistent and will help implementing atomic allocation support. Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/percpu-km.c5
-rw-r--r--mm/percpu-vm.c27
-rw-r--r--mm/percpu.c39
3 files changed, 31 insertions, 40 deletions
diff --git a/mm/percpu-km.c b/mm/percpu-km.c
index 89633fefc6a2..9a9096f08867 100644
--- a/mm/percpu-km.c
+++ b/mm/percpu-km.c
@@ -35,11 +35,6 @@
35 35
36static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size) 36static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size)
37{ 37{
38 unsigned int cpu;
39
40 for_each_possible_cpu(cpu)
41 memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);
42
43 return 0; 38 return 0;
44} 39}
45 40
diff --git a/mm/percpu-vm.c b/mm/percpu-vm.c
index d9e0b615492e..edf709793318 100644
--- a/mm/percpu-vm.c
+++ b/mm/percpu-vm.c
@@ -265,7 +265,7 @@ static void pcpu_post_map_flush(struct pcpu_chunk *chunk,
265 * @size: size of the area to populate in bytes 265 * @size: size of the area to populate in bytes
266 * 266 *
267 * For each cpu, populate and map pages [@page_start,@page_end) into 267 * For each cpu, populate and map pages [@page_start,@page_end) into
268 * @chunk. The area is cleared on return. 268 * @chunk.
269 * 269 *
270 * CONTEXT: 270 * CONTEXT:
271 * pcpu_alloc_mutex, does GFP_KERNEL allocation. 271 * pcpu_alloc_mutex, does GFP_KERNEL allocation.
@@ -276,18 +276,8 @@ static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size)
276 int page_end = PFN_UP(off + size); 276 int page_end = PFN_UP(off + size);
277 int free_end = page_start, unmap_end = page_start; 277 int free_end = page_start, unmap_end = page_start;
278 struct page **pages; 278 struct page **pages;
279 unsigned int cpu;
280 int rs, re, rc; 279 int rs, re, rc;
281 280
282 /* quick path, check whether all pages are already there */
283 rs = page_start;
284 pcpu_next_pop(chunk, &rs, &re, page_end);
285 if (rs == page_start && re == page_end)
286 goto clear;
287
288 /* need to allocate and map pages, this chunk can't be immutable */
289 WARN_ON(chunk->immutable);
290
291 pages = pcpu_get_pages(chunk); 281 pages = pcpu_get_pages(chunk);
292 if (!pages) 282 if (!pages)
293 return -ENOMEM; 283 return -ENOMEM;
@@ -308,10 +298,6 @@ static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size)
308 } 298 }
309 pcpu_post_map_flush(chunk, page_start, page_end); 299 pcpu_post_map_flush(chunk, page_start, page_end);
310 300
311 bitmap_set(chunk->populated, page_start, page_end - page_start);
312clear:
313 for_each_possible_cpu(cpu)
314 memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);
315 return 0; 301 return 0;
316 302
317err_unmap: 303err_unmap:
@@ -345,15 +331,6 @@ static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size)
345 struct page **pages; 331 struct page **pages;
346 int rs, re; 332 int rs, re;
347 333
348 /* quick path, check whether it's empty already */
349 rs = page_start;
350 pcpu_next_unpop(chunk, &rs, &re, page_end);
351 if (rs == page_start && re == page_end)
352 return;
353
354 /* immutable chunks can't be depopulated */
355 WARN_ON(chunk->immutable);
356
357 /* 334 /*
358 * If control reaches here, there must have been at least one 335 * If control reaches here, there must have been at least one
359 * successful population attempt so the temp pages array must 336 * successful population attempt so the temp pages array must
@@ -372,8 +349,6 @@ static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size)
372 349
373 pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end) 350 pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end)
374 pcpu_free_pages(chunk, pages, rs, re); 351 pcpu_free_pages(chunk, pages, rs, re);
375
376 bitmap_clear(chunk->populated, page_start, page_end - page_start);
377} 352}
378 353
379static struct pcpu_chunk *pcpu_create_chunk(void) 354static struct pcpu_chunk *pcpu_create_chunk(void)
diff --git a/mm/percpu.c b/mm/percpu.c
index da997f9800bd..6087384f6ef0 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -709,7 +709,8 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved)
709 static int warn_limit = 10; 709 static int warn_limit = 10;
710 struct pcpu_chunk *chunk; 710 struct pcpu_chunk *chunk;
711 const char *err; 711 const char *err;
712 int slot, off, new_alloc; 712 int slot, off, new_alloc, cpu;
713 int page_start, page_end, rs, re;
713 unsigned long flags; 714 unsigned long flags;
714 void __percpu *ptr; 715 void __percpu *ptr;
715 716
@@ -802,17 +803,32 @@ restart:
802area_found: 803area_found:
803 spin_unlock_irqrestore(&pcpu_lock, flags); 804 spin_unlock_irqrestore(&pcpu_lock, flags);
804 805
805 /* populate, map and clear the area */ 806 /* populate if not all pages are already there */
806 if (pcpu_populate_chunk(chunk, off, size)) { 807 page_start = PFN_DOWN(off);
807 spin_lock_irqsave(&pcpu_lock, flags); 808 page_end = PFN_UP(off + size);
808 pcpu_free_area(chunk, off); 809
809 err = "failed to populate"; 810 rs = page_start;
810 goto fail_unlock; 811 pcpu_next_pop(chunk, &rs, &re, page_end);
812
813 if (rs != page_start || re != page_end) {
814 WARN_ON(chunk->immutable);
815
816 if (pcpu_populate_chunk(chunk, off, size)) {
817 spin_lock_irqsave(&pcpu_lock, flags);
818 pcpu_free_area(chunk, off);
819 err = "failed to populate";
820 goto fail_unlock;
821 }
822
823 bitmap_set(chunk->populated, page_start, page_end - page_start);
811 } 824 }
812 825
813 mutex_unlock(&pcpu_alloc_mutex); 826 mutex_unlock(&pcpu_alloc_mutex);
814 827
815 /* return address relative to base address */ 828 /* clear the areas and return address relative to base address */
829 for_each_possible_cpu(cpu)
830 memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);
831
816 ptr = __addr_to_pcpu_ptr(chunk->base_addr + off); 832 ptr = __addr_to_pcpu_ptr(chunk->base_addr + off);
817 kmemleak_alloc_percpu(ptr, size); 833 kmemleak_alloc_percpu(ptr, size);
818 return ptr; 834 return ptr;
@@ -903,7 +919,12 @@ static void pcpu_reclaim(struct work_struct *work)
903 spin_unlock_irq(&pcpu_lock); 919 spin_unlock_irq(&pcpu_lock);
904 920
905 list_for_each_entry_safe(chunk, next, &todo, list) { 921 list_for_each_entry_safe(chunk, next, &todo, list) {
906 pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size); 922 int rs = 0, re;
923
924 pcpu_next_unpop(chunk, &rs, &re, PFN_UP(pcpu_unit_size));
925 if (rs || re != PFN_UP(pcpu_unit_size))
926 pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size);
927
907 pcpu_destroy_chunk(chunk); 928 pcpu_destroy_chunk(chunk);
908 } 929 }
909 930