aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-10 07:26:02 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-10 07:26:02 -0400
commitc798360cd1438090d51eeaa8e67985da11362eba (patch)
tree0107d3b9ee7476264c3357287787d393545bd2d9 /mm
parentb211e9d7c861bdb37b86d6384da9edfb80949ceb (diff)
parent6ae833c7fe0c6ef1f0ab13cc775da230d6f4c256 (diff)
Merge branch 'for-3.18' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
Pull percpu updates from Tejun Heo: "A lot of activities on percpu front. Notable changes are... - percpu allocator now can take @gfp. If @gfp doesn't contain GFP_KERNEL, it tries to allocate from what's already available to the allocator and a work item tries to keep the reserve around certain level so that these atomic allocations usually succeed. This will replace the ad-hoc percpu memory pool used by blk-throttle and also be used by the planned blkcg support for writeback IOs. Please note that I noticed a bug in how @gfp is interpreted while preparing this pull request and applied the fix 6ae833c7fe0c ("percpu: fix how @gfp is interpreted by the percpu allocator") just now. - percpu_ref now uses longs for percpu and global counters instead of ints. It leads to more sparse packing of the percpu counters on 64bit machines but the overhead should be negligible and this allows using percpu_ref for refcnting pages and in-memory objects directly. - The switching between percpu and single counter modes of a percpu_ref is made independent of putting the base ref and a percpu_ref can now optionally be initialized in single or killed mode. This allows avoiding percpu shutdown latency for cases where the refcounted objects may be synchronously created and destroyed in rapid succession with only a fraction of them reaching fully operational status (SCSI probing does this when combined with blk-mq support). It's also planned to be used to implement forced single mode to detect underflow more timely for debugging. There's a separate branch percpu/for-3.18-consistent-ops which cleans up the duplicate percpu accessors. That branch causes a number of conflicts with s390 and other trees. I'll send a separate pull request w/ resolutions once other branches are merged" * 'for-3.18' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu: (33 commits) percpu: fix how @gfp is interpreted by the percpu allocator blk-mq, percpu_ref: start q->mq_usage_counter in atomic mode percpu_ref: make INIT_ATOMIC and switch_to_atomic() sticky percpu_ref: add PERCPU_REF_INIT_* flags percpu_ref: decouple switching to percpu mode and reinit percpu_ref: decouple switching to atomic mode and killing percpu_ref: add PCPU_REF_DEAD percpu_ref: rename things to prepare for decoupling percpu/atomic mode switch percpu_ref: replace pcpu_ prefix with percpu_ percpu_ref: minor code and comment updates percpu_ref: relocate percpu_ref_reinit() Revert "blk-mq, percpu_ref: implement a kludge for SCSI blk-mq stall during probe" Revert "percpu: free percpu allocation info for uniprocessor system" percpu-refcount: make percpu_ref based on longs instead of ints percpu-refcount: improve WARN messages percpu: fix locking regression in the failure path of pcpu_alloc() percpu-refcount: add @gfp to percpu_ref_init() proportions: add @gfp to init functions percpu_counter: add @gfp to percpu_counter_init() percpu_counter: make percpu_counters_lock irq-safe ...
Diffstat (limited to 'mm')
-rw-r--r--mm/backing-dev.c4
-rw-r--r--mm/mmap.c2
-rw-r--r--mm/nommu.c2
-rw-r--r--mm/page-writeback.c2
-rw-r--r--mm/percpu-km.c16
-rw-r--r--mm/percpu-vm.c162
-rw-r--r--mm/percpu.c526
-rw-r--r--mm/shmem.c2
8 files changed, 477 insertions, 239 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index b27714f1b40f..12a992b62576 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -455,7 +455,7 @@ int bdi_init(struct backing_dev_info *bdi)
455 bdi_wb_init(&bdi->wb, bdi); 455 bdi_wb_init(&bdi->wb, bdi);
456 456
457 for (i = 0; i < NR_BDI_STAT_ITEMS; i++) { 457 for (i = 0; i < NR_BDI_STAT_ITEMS; i++) {
458 err = percpu_counter_init(&bdi->bdi_stat[i], 0); 458 err = percpu_counter_init(&bdi->bdi_stat[i], 0, GFP_KERNEL);
459 if (err) 459 if (err)
460 goto err; 460 goto err;
461 } 461 }
@@ -470,7 +470,7 @@ int bdi_init(struct backing_dev_info *bdi)
470 bdi->write_bandwidth = INIT_BW; 470 bdi->write_bandwidth = INIT_BW;
471 bdi->avg_write_bandwidth = INIT_BW; 471 bdi->avg_write_bandwidth = INIT_BW;
472 472
473 err = fprop_local_init_percpu(&bdi->completions); 473 err = fprop_local_init_percpu(&bdi->completions, GFP_KERNEL);
474 474
475 if (err) { 475 if (err) {
476err: 476err:
diff --git a/mm/mmap.c b/mm/mmap.c
index 16d19b48e2ad..93d28c7e5420 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -3202,7 +3202,7 @@ void __init mmap_init(void)
3202{ 3202{
3203 int ret; 3203 int ret;
3204 3204
3205 ret = percpu_counter_init(&vm_committed_as, 0); 3205 ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL);
3206 VM_BUG_ON(ret); 3206 VM_BUG_ON(ret);
3207} 3207}
3208 3208
diff --git a/mm/nommu.c b/mm/nommu.c
index a881d9673c6b..bd1808e194a7 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -539,7 +539,7 @@ void __init mmap_init(void)
539{ 539{
540 int ret; 540 int ret;
541 541
542 ret = percpu_counter_init(&vm_committed_as, 0); 542 ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL);
543 VM_BUG_ON(ret); 543 VM_BUG_ON(ret);
544 vm_region_jar = KMEM_CACHE(vm_region, SLAB_PANIC); 544 vm_region_jar = KMEM_CACHE(vm_region, SLAB_PANIC);
545} 545}
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 35ca7102d421..ff24c9d83112 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -1777,7 +1777,7 @@ void __init page_writeback_init(void)
1777 writeback_set_ratelimit(); 1777 writeback_set_ratelimit();
1778 register_cpu_notifier(&ratelimit_nb); 1778 register_cpu_notifier(&ratelimit_nb);
1779 1779
1780 fprop_global_init(&writeout_completions); 1780 fprop_global_init(&writeout_completions, GFP_KERNEL);
1781} 1781}
1782 1782
1783/** 1783/**
diff --git a/mm/percpu-km.c b/mm/percpu-km.c
index 89633fefc6a2..10e3d0b8a86d 100644
--- a/mm/percpu-km.c
+++ b/mm/percpu-km.c
@@ -33,17 +33,14 @@
33 33
34#include <linux/log2.h> 34#include <linux/log2.h>
35 35
36static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size) 36static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
37 int page_start, int page_end)
37{ 38{
38 unsigned int cpu;
39
40 for_each_possible_cpu(cpu)
41 memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);
42
43 return 0; 39 return 0;
44} 40}
45 41
46static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size) 42static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk,
43 int page_start, int page_end)
47{ 44{
48 /* nada */ 45 /* nada */
49} 46}
@@ -70,6 +67,11 @@ static struct pcpu_chunk *pcpu_create_chunk(void)
70 67
71 chunk->data = pages; 68 chunk->data = pages;
72 chunk->base_addr = page_address(pages) - pcpu_group_offsets[0]; 69 chunk->base_addr = page_address(pages) - pcpu_group_offsets[0];
70
71 spin_lock_irq(&pcpu_lock);
72 pcpu_chunk_populated(chunk, 0, nr_pages);
73 spin_unlock_irq(&pcpu_lock);
74
73 return chunk; 75 return chunk;
74} 76}
75 77
diff --git a/mm/percpu-vm.c b/mm/percpu-vm.c
index 51108165f829..538998a137d2 100644
--- a/mm/percpu-vm.c
+++ b/mm/percpu-vm.c
@@ -20,46 +20,25 @@ static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk,
20} 20}
21 21
22/** 22/**
23 * pcpu_get_pages_and_bitmap - get temp pages array and bitmap 23 * pcpu_get_pages - get temp pages array
24 * @chunk: chunk of interest 24 * @chunk: chunk of interest
25 * @bitmapp: output parameter for bitmap
26 * @may_alloc: may allocate the array
27 * 25 *
28 * Returns pointer to array of pointers to struct page and bitmap, 26 * Returns pointer to array of pointers to struct page which can be indexed
29 * both of which can be indexed with pcpu_page_idx(). The returned 27 * with pcpu_page_idx(). Note that there is only one array and accesses
30 * array is cleared to zero and *@bitmapp is copied from 28 * should be serialized by pcpu_alloc_mutex.
31 * @chunk->populated. Note that there is only one array and bitmap
32 * and access exclusion is the caller's responsibility.
33 *
34 * CONTEXT:
35 * pcpu_alloc_mutex and does GFP_KERNEL allocation if @may_alloc.
36 * Otherwise, don't care.
37 * 29 *
38 * RETURNS: 30 * RETURNS:
39 * Pointer to temp pages array on success, NULL on failure. 31 * Pointer to temp pages array on success.
40 */ 32 */
41static struct page **pcpu_get_pages_and_bitmap(struct pcpu_chunk *chunk, 33static struct page **pcpu_get_pages(struct pcpu_chunk *chunk_alloc)
42 unsigned long **bitmapp,
43 bool may_alloc)
44{ 34{
45 static struct page **pages; 35 static struct page **pages;
46 static unsigned long *bitmap;
47 size_t pages_size = pcpu_nr_units * pcpu_unit_pages * sizeof(pages[0]); 36 size_t pages_size = pcpu_nr_units * pcpu_unit_pages * sizeof(pages[0]);
48 size_t bitmap_size = BITS_TO_LONGS(pcpu_unit_pages) *
49 sizeof(unsigned long);
50
51 if (!pages || !bitmap) {
52 if (may_alloc && !pages)
53 pages = pcpu_mem_zalloc(pages_size);
54 if (may_alloc && !bitmap)
55 bitmap = pcpu_mem_zalloc(bitmap_size);
56 if (!pages || !bitmap)
57 return NULL;
58 }
59 37
60 bitmap_copy(bitmap, chunk->populated, pcpu_unit_pages); 38 lockdep_assert_held(&pcpu_alloc_mutex);
61 39
62 *bitmapp = bitmap; 40 if (!pages)
41 pages = pcpu_mem_zalloc(pages_size);
63 return pages; 42 return pages;
64} 43}
65 44
@@ -67,7 +46,6 @@ static struct page **pcpu_get_pages_and_bitmap(struct pcpu_chunk *chunk,
67 * pcpu_free_pages - free pages which were allocated for @chunk 46 * pcpu_free_pages - free pages which were allocated for @chunk
68 * @chunk: chunk pages were allocated for 47 * @chunk: chunk pages were allocated for
69 * @pages: array of pages to be freed, indexed by pcpu_page_idx() 48 * @pages: array of pages to be freed, indexed by pcpu_page_idx()
70 * @populated: populated bitmap
71 * @page_start: page index of the first page to be freed 49 * @page_start: page index of the first page to be freed
72 * @page_end: page index of the last page to be freed + 1 50 * @page_end: page index of the last page to be freed + 1
73 * 51 *
@@ -75,8 +53,7 @@ static struct page **pcpu_get_pages_and_bitmap(struct pcpu_chunk *chunk,
75 * The pages were allocated for @chunk. 53 * The pages were allocated for @chunk.
76 */ 54 */
77static void pcpu_free_pages(struct pcpu_chunk *chunk, 55static void pcpu_free_pages(struct pcpu_chunk *chunk,
78 struct page **pages, unsigned long *populated, 56 struct page **pages, int page_start, int page_end)
79 int page_start, int page_end)
80{ 57{
81 unsigned int cpu; 58 unsigned int cpu;
82 int i; 59 int i;
@@ -95,7 +72,6 @@ static void pcpu_free_pages(struct pcpu_chunk *chunk,
95 * pcpu_alloc_pages - allocates pages for @chunk 72 * pcpu_alloc_pages - allocates pages for @chunk
96 * @chunk: target chunk 73 * @chunk: target chunk
97 * @pages: array to put the allocated pages into, indexed by pcpu_page_idx() 74 * @pages: array to put the allocated pages into, indexed by pcpu_page_idx()
98 * @populated: populated bitmap
99 * @page_start: page index of the first page to be allocated 75 * @page_start: page index of the first page to be allocated
100 * @page_end: page index of the last page to be allocated + 1 76 * @page_end: page index of the last page to be allocated + 1
101 * 77 *
@@ -104,8 +80,7 @@ static void pcpu_free_pages(struct pcpu_chunk *chunk,
104 * content of @pages and will pass it verbatim to pcpu_map_pages(). 80 * content of @pages and will pass it verbatim to pcpu_map_pages().
105 */ 81 */
106static int pcpu_alloc_pages(struct pcpu_chunk *chunk, 82static int pcpu_alloc_pages(struct pcpu_chunk *chunk,
107 struct page **pages, unsigned long *populated, 83 struct page **pages, int page_start, int page_end)
108 int page_start, int page_end)
109{ 84{
110 const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD; 85 const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD;
111 unsigned int cpu, tcpu; 86 unsigned int cpu, tcpu;
@@ -164,7 +139,6 @@ static void __pcpu_unmap_pages(unsigned long addr, int nr_pages)
164 * pcpu_unmap_pages - unmap pages out of a pcpu_chunk 139 * pcpu_unmap_pages - unmap pages out of a pcpu_chunk
165 * @chunk: chunk of interest 140 * @chunk: chunk of interest
166 * @pages: pages array which can be used to pass information to free 141 * @pages: pages array which can be used to pass information to free
167 * @populated: populated bitmap
168 * @page_start: page index of the first page to unmap 142 * @page_start: page index of the first page to unmap
169 * @page_end: page index of the last page to unmap + 1 143 * @page_end: page index of the last page to unmap + 1
170 * 144 *
@@ -175,8 +149,7 @@ static void __pcpu_unmap_pages(unsigned long addr, int nr_pages)
175 * proper pre/post flush functions. 149 * proper pre/post flush functions.
176 */ 150 */
177static void pcpu_unmap_pages(struct pcpu_chunk *chunk, 151static void pcpu_unmap_pages(struct pcpu_chunk *chunk,
178 struct page **pages, unsigned long *populated, 152 struct page **pages, int page_start, int page_end)
179 int page_start, int page_end)
180{ 153{
181 unsigned int cpu; 154 unsigned int cpu;
182 int i; 155 int i;
@@ -192,8 +165,6 @@ static void pcpu_unmap_pages(struct pcpu_chunk *chunk,
192 __pcpu_unmap_pages(pcpu_chunk_addr(chunk, cpu, page_start), 165 __pcpu_unmap_pages(pcpu_chunk_addr(chunk, cpu, page_start),
193 page_end - page_start); 166 page_end - page_start);
194 } 167 }
195
196 bitmap_clear(populated, page_start, page_end - page_start);
197} 168}
198 169
199/** 170/**
@@ -228,7 +199,6 @@ static int __pcpu_map_pages(unsigned long addr, struct page **pages,
228 * pcpu_map_pages - map pages into a pcpu_chunk 199 * pcpu_map_pages - map pages into a pcpu_chunk
229 * @chunk: chunk of interest 200 * @chunk: chunk of interest
230 * @pages: pages array containing pages to be mapped 201 * @pages: pages array containing pages to be mapped
231 * @populated: populated bitmap
232 * @page_start: page index of the first page to map 202 * @page_start: page index of the first page to map
233 * @page_end: page index of the last page to map + 1 203 * @page_end: page index of the last page to map + 1
234 * 204 *
@@ -236,13 +206,11 @@ static int __pcpu_map_pages(unsigned long addr, struct page **pages,
236 * caller is responsible for calling pcpu_post_map_flush() after all 206 * caller is responsible for calling pcpu_post_map_flush() after all
237 * mappings are complete. 207 * mappings are complete.
238 * 208 *
239 * This function is responsible for setting corresponding bits in 209 * This function is responsible for setting up whatever is necessary for
240 * @chunk->populated bitmap and whatever is necessary for reverse 210 * reverse lookup (addr -> chunk).
241 * lookup (addr -> chunk).
242 */ 211 */
243static int pcpu_map_pages(struct pcpu_chunk *chunk, 212static int pcpu_map_pages(struct pcpu_chunk *chunk,
244 struct page **pages, unsigned long *populated, 213 struct page **pages, int page_start, int page_end)
245 int page_start, int page_end)
246{ 214{
247 unsigned int cpu, tcpu; 215 unsigned int cpu, tcpu;
248 int i, err; 216 int i, err;
@@ -253,18 +221,12 @@ static int pcpu_map_pages(struct pcpu_chunk *chunk,
253 page_end - page_start); 221 page_end - page_start);
254 if (err < 0) 222 if (err < 0)
255 goto err; 223 goto err;
256 }
257 224
258 /* mapping successful, link chunk and mark populated */ 225 for (i = page_start; i < page_end; i++)
259 for (i = page_start; i < page_end; i++) {
260 for_each_possible_cpu(cpu)
261 pcpu_set_page_chunk(pages[pcpu_page_idx(cpu, i)], 226 pcpu_set_page_chunk(pages[pcpu_page_idx(cpu, i)],
262 chunk); 227 chunk);
263 __set_bit(i, populated);
264 } 228 }
265
266 return 0; 229 return 0;
267
268err: 230err:
269 for_each_possible_cpu(tcpu) { 231 for_each_possible_cpu(tcpu) {
270 if (tcpu == cpu) 232 if (tcpu == cpu)
@@ -299,123 +261,69 @@ static void pcpu_post_map_flush(struct pcpu_chunk *chunk,
299/** 261/**
300 * pcpu_populate_chunk - populate and map an area of a pcpu_chunk 262 * pcpu_populate_chunk - populate and map an area of a pcpu_chunk
301 * @chunk: chunk of interest 263 * @chunk: chunk of interest
302 * @off: offset to the area to populate 264 * @page_start: the start page
303 * @size: size of the area to populate in bytes 265 * @page_end: the end page
304 * 266 *
305 * For each cpu, populate and map pages [@page_start,@page_end) into 267 * For each cpu, populate and map pages [@page_start,@page_end) into
306 * @chunk. The area is cleared on return. 268 * @chunk.
307 * 269 *
308 * CONTEXT: 270 * CONTEXT:
309 * pcpu_alloc_mutex, does GFP_KERNEL allocation. 271 * pcpu_alloc_mutex, does GFP_KERNEL allocation.
310 */ 272 */
311static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size) 273static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
274 int page_start, int page_end)
312{ 275{
313 int page_start = PFN_DOWN(off);
314 int page_end = PFN_UP(off + size);
315 int free_end = page_start, unmap_end = page_start;
316 struct page **pages; 276 struct page **pages;
317 unsigned long *populated;
318 unsigned int cpu;
319 int rs, re, rc;
320
321 /* quick path, check whether all pages are already there */
322 rs = page_start;
323 pcpu_next_pop(chunk, &rs, &re, page_end);
324 if (rs == page_start && re == page_end)
325 goto clear;
326 277
327 /* need to allocate and map pages, this chunk can't be immutable */ 278 pages = pcpu_get_pages(chunk);
328 WARN_ON(chunk->immutable);
329
330 pages = pcpu_get_pages_and_bitmap(chunk, &populated, true);
331 if (!pages) 279 if (!pages)
332 return -ENOMEM; 280 return -ENOMEM;
333 281
334 /* alloc and map */ 282 if (pcpu_alloc_pages(chunk, pages, page_start, page_end))
335 pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) { 283 return -ENOMEM;
336 rc = pcpu_alloc_pages(chunk, pages, populated, rs, re);
337 if (rc)
338 goto err_free;
339 free_end = re;
340 }
341 284
342 pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) { 285 if (pcpu_map_pages(chunk, pages, page_start, page_end)) {
343 rc = pcpu_map_pages(chunk, pages, populated, rs, re); 286 pcpu_free_pages(chunk, pages, page_start, page_end);
344 if (rc) 287 return -ENOMEM;
345 goto err_unmap;
346 unmap_end = re;
347 } 288 }
348 pcpu_post_map_flush(chunk, page_start, page_end); 289 pcpu_post_map_flush(chunk, page_start, page_end);
349 290
350 /* commit new bitmap */
351 bitmap_copy(chunk->populated, populated, pcpu_unit_pages);
352clear:
353 for_each_possible_cpu(cpu)
354 memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);
355 return 0; 291 return 0;
356
357err_unmap:
358 pcpu_pre_unmap_flush(chunk, page_start, unmap_end);
359 pcpu_for_each_unpop_region(chunk, rs, re, page_start, unmap_end)
360 pcpu_unmap_pages(chunk, pages, populated, rs, re);
361 pcpu_post_unmap_tlb_flush(chunk, page_start, unmap_end);
362err_free:
363 pcpu_for_each_unpop_region(chunk, rs, re, page_start, free_end)
364 pcpu_free_pages(chunk, pages, populated, rs, re);
365 return rc;
366} 292}
367 293
368/** 294/**
369 * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk 295 * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk
370 * @chunk: chunk to depopulate 296 * @chunk: chunk to depopulate
371 * @off: offset to the area to depopulate 297 * @page_start: the start page
372 * @size: size of the area to depopulate in bytes 298 * @page_end: the end page
373 * 299 *
374 * For each cpu, depopulate and unmap pages [@page_start,@page_end) 300 * For each cpu, depopulate and unmap pages [@page_start,@page_end)
375 * from @chunk. If @flush is true, vcache is flushed before unmapping 301 * from @chunk.
376 * and tlb after.
377 * 302 *
378 * CONTEXT: 303 * CONTEXT:
379 * pcpu_alloc_mutex. 304 * pcpu_alloc_mutex.
380 */ 305 */
381static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size) 306static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk,
307 int page_start, int page_end)
382{ 308{
383 int page_start = PFN_DOWN(off);
384 int page_end = PFN_UP(off + size);
385 struct page **pages; 309 struct page **pages;
386 unsigned long *populated;
387 int rs, re;
388
389 /* quick path, check whether it's empty already */
390 rs = page_start;
391 pcpu_next_unpop(chunk, &rs, &re, page_end);
392 if (rs == page_start && re == page_end)
393 return;
394
395 /* immutable chunks can't be depopulated */
396 WARN_ON(chunk->immutable);
397 310
398 /* 311 /*
399 * If control reaches here, there must have been at least one 312 * If control reaches here, there must have been at least one
400 * successful population attempt so the temp pages array must 313 * successful population attempt so the temp pages array must
401 * be available now. 314 * be available now.
402 */ 315 */
403 pages = pcpu_get_pages_and_bitmap(chunk, &populated, false); 316 pages = pcpu_get_pages(chunk);
404 BUG_ON(!pages); 317 BUG_ON(!pages);
405 318
406 /* unmap and free */ 319 /* unmap and free */
407 pcpu_pre_unmap_flush(chunk, page_start, page_end); 320 pcpu_pre_unmap_flush(chunk, page_start, page_end);
408 321
409 pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end) 322 pcpu_unmap_pages(chunk, pages, page_start, page_end);
410 pcpu_unmap_pages(chunk, pages, populated, rs, re);
411 323
412 /* no need to flush tlb, vmalloc will handle it lazily */ 324 /* no need to flush tlb, vmalloc will handle it lazily */
413 325
414 pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end) 326 pcpu_free_pages(chunk, pages, page_start, page_end);
415 pcpu_free_pages(chunk, pages, populated, rs, re);
416
417 /* commit new bitmap */
418 bitmap_copy(chunk->populated, populated, pcpu_unit_pages);
419} 327}
420 328
421static struct pcpu_chunk *pcpu_create_chunk(void) 329static struct pcpu_chunk *pcpu_create_chunk(void)
diff --git a/mm/percpu.c b/mm/percpu.c
index da997f9800bd..014bab65e0ff 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -76,6 +76,10 @@
76 76
77#define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */ 77#define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */
78#define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */ 78#define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */
79#define PCPU_ATOMIC_MAP_MARGIN_LOW 32
80#define PCPU_ATOMIC_MAP_MARGIN_HIGH 64
81#define PCPU_EMPTY_POP_PAGES_LOW 2
82#define PCPU_EMPTY_POP_PAGES_HIGH 4
79 83
80#ifdef CONFIG_SMP 84#ifdef CONFIG_SMP
81/* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */ 85/* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
@@ -102,12 +106,16 @@ struct pcpu_chunk {
102 int free_size; /* free bytes in the chunk */ 106 int free_size; /* free bytes in the chunk */
103 int contig_hint; /* max contiguous size hint */ 107 int contig_hint; /* max contiguous size hint */
104 void *base_addr; /* base address of this chunk */ 108 void *base_addr; /* base address of this chunk */
109
105 int map_used; /* # of map entries used before the sentry */ 110 int map_used; /* # of map entries used before the sentry */
106 int map_alloc; /* # of map entries allocated */ 111 int map_alloc; /* # of map entries allocated */
107 int *map; /* allocation map */ 112 int *map; /* allocation map */
113 struct work_struct map_extend_work;/* async ->map[] extension */
114
108 void *data; /* chunk data */ 115 void *data; /* chunk data */
109 int first_free; /* no free below this */ 116 int first_free; /* no free below this */
110 bool immutable; /* no [de]population allowed */ 117 bool immutable; /* no [de]population allowed */
118 int nr_populated; /* # of populated pages */
111 unsigned long populated[]; /* populated bitmap */ 119 unsigned long populated[]; /* populated bitmap */
112}; 120};
113 121
@@ -151,38 +159,33 @@ static struct pcpu_chunk *pcpu_first_chunk;
151static struct pcpu_chunk *pcpu_reserved_chunk; 159static struct pcpu_chunk *pcpu_reserved_chunk;
152static int pcpu_reserved_chunk_limit; 160static int pcpu_reserved_chunk_limit;
153 161
162static DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */
163static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop */
164
165static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
166
154/* 167/*
155 * Synchronization rules. 168 * The number of empty populated pages, protected by pcpu_lock. The
156 * 169 * reserved chunk doesn't contribute to the count.
157 * There are two locks - pcpu_alloc_mutex and pcpu_lock. The former
158 * protects allocation/reclaim paths, chunks, populated bitmap and
159 * vmalloc mapping. The latter is a spinlock and protects the index
160 * data structures - chunk slots, chunks and area maps in chunks.
161 *
162 * During allocation, pcpu_alloc_mutex is kept locked all the time and
163 * pcpu_lock is grabbed and released as necessary. All actual memory
164 * allocations are done using GFP_KERNEL with pcpu_lock released. In
165 * general, percpu memory can't be allocated with irq off but
166 * irqsave/restore are still used in alloc path so that it can be used
167 * from early init path - sched_init() specifically.
168 *
169 * Free path accesses and alters only the index data structures, so it
170 * can be safely called from atomic context. When memory needs to be
171 * returned to the system, free path schedules reclaim_work which
172 * grabs both pcpu_alloc_mutex and pcpu_lock, unlinks chunks to be
173 * reclaimed, release both locks and frees the chunks. Note that it's
174 * necessary to grab both locks to remove a chunk from circulation as
175 * allocation path might be referencing the chunk with only
176 * pcpu_alloc_mutex locked.
177 */ 170 */
178static DEFINE_MUTEX(pcpu_alloc_mutex); /* protects whole alloc and reclaim */ 171static int pcpu_nr_empty_pop_pages;
179static DEFINE_SPINLOCK(pcpu_lock); /* protects index data structures */
180 172
181static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */ 173/*
174 * Balance work is used to populate or destroy chunks asynchronously. We
175 * try to keep the number of populated free pages between
176 * PCPU_EMPTY_POP_PAGES_LOW and HIGH for atomic allocations and at most one
177 * empty chunk.
178 */
179static void pcpu_balance_workfn(struct work_struct *work);
180static DECLARE_WORK(pcpu_balance_work, pcpu_balance_workfn);
181static bool pcpu_async_enabled __read_mostly;
182static bool pcpu_atomic_alloc_failed;
182 183
183/* reclaim work to release fully free chunks, scheduled from free path */ 184static void pcpu_schedule_balance_work(void)
184static void pcpu_reclaim(struct work_struct *work); 185{
185static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim); 186 if (pcpu_async_enabled)
187 schedule_work(&pcpu_balance_work);
188}
186 189
187static bool pcpu_addr_in_first_chunk(void *addr) 190static bool pcpu_addr_in_first_chunk(void *addr)
188{ 191{
@@ -315,6 +318,38 @@ static void pcpu_mem_free(void *ptr, size_t size)
315} 318}
316 319
317/** 320/**
321 * pcpu_count_occupied_pages - count the number of pages an area occupies
322 * @chunk: chunk of interest
323 * @i: index of the area in question
324 *
325 * Count the number of pages chunk's @i'th area occupies. When the area's
326 * start and/or end address isn't aligned to page boundary, the straddled
327 * page is included in the count iff the rest of the page is free.
328 */
329static int pcpu_count_occupied_pages(struct pcpu_chunk *chunk, int i)
330{
331 int off = chunk->map[i] & ~1;
332 int end = chunk->map[i + 1] & ~1;
333
334 if (!PAGE_ALIGNED(off) && i > 0) {
335 int prev = chunk->map[i - 1];
336
337 if (!(prev & 1) && prev <= round_down(off, PAGE_SIZE))
338 off = round_down(off, PAGE_SIZE);
339 }
340
341 if (!PAGE_ALIGNED(end) && i + 1 < chunk->map_used) {
342 int next = chunk->map[i + 1];
343 int nend = chunk->map[i + 2] & ~1;
344
345 if (!(next & 1) && nend >= round_up(end, PAGE_SIZE))
346 end = round_up(end, PAGE_SIZE);
347 }
348
349 return max_t(int, PFN_DOWN(end) - PFN_UP(off), 0);
350}
351
352/**
318 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot 353 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
319 * @chunk: chunk of interest 354 * @chunk: chunk of interest
320 * @oslot: the previous slot it was on 355 * @oslot: the previous slot it was on
@@ -342,9 +377,14 @@ static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
342/** 377/**
343 * pcpu_need_to_extend - determine whether chunk area map needs to be extended 378 * pcpu_need_to_extend - determine whether chunk area map needs to be extended
344 * @chunk: chunk of interest 379 * @chunk: chunk of interest
380 * @is_atomic: the allocation context
345 * 381 *
346 * Determine whether area map of @chunk needs to be extended to 382 * Determine whether area map of @chunk needs to be extended. If
347 * accommodate a new allocation. 383 * @is_atomic, only the amount necessary for a new allocation is
384 * considered; however, async extension is scheduled if the left amount is
385 * low. If !@is_atomic, it aims for more empty space. Combined, this
386 * ensures that the map is likely to have enough available space to
387 * accomodate atomic allocations which can't extend maps directly.
348 * 388 *
349 * CONTEXT: 389 * CONTEXT:
350 * pcpu_lock. 390 * pcpu_lock.
@@ -353,15 +393,26 @@ static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
353 * New target map allocation length if extension is necessary, 0 393 * New target map allocation length if extension is necessary, 0
354 * otherwise. 394 * otherwise.
355 */ 395 */
356static int pcpu_need_to_extend(struct pcpu_chunk *chunk) 396static int pcpu_need_to_extend(struct pcpu_chunk *chunk, bool is_atomic)
357{ 397{
358 int new_alloc; 398 int margin, new_alloc;
399
400 if (is_atomic) {
401 margin = 3;
402
403 if (chunk->map_alloc <
404 chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW &&
405 pcpu_async_enabled)
406 schedule_work(&chunk->map_extend_work);
407 } else {
408 margin = PCPU_ATOMIC_MAP_MARGIN_HIGH;
409 }
359 410
360 if (chunk->map_alloc >= chunk->map_used + 3) 411 if (chunk->map_alloc >= chunk->map_used + margin)
361 return 0; 412 return 0;
362 413
363 new_alloc = PCPU_DFL_MAP_ALLOC; 414 new_alloc = PCPU_DFL_MAP_ALLOC;
364 while (new_alloc < chunk->map_used + 3) 415 while (new_alloc < chunk->map_used + margin)
365 new_alloc *= 2; 416 new_alloc *= 2;
366 417
367 return new_alloc; 418 return new_alloc;
@@ -418,11 +469,76 @@ out_unlock:
418 return 0; 469 return 0;
419} 470}
420 471
472static void pcpu_map_extend_workfn(struct work_struct *work)
473{
474 struct pcpu_chunk *chunk = container_of(work, struct pcpu_chunk,
475 map_extend_work);
476 int new_alloc;
477
478 spin_lock_irq(&pcpu_lock);
479 new_alloc = pcpu_need_to_extend(chunk, false);
480 spin_unlock_irq(&pcpu_lock);
481
482 if (new_alloc)
483 pcpu_extend_area_map(chunk, new_alloc);
484}
485
486/**
487 * pcpu_fit_in_area - try to fit the requested allocation in a candidate area
488 * @chunk: chunk the candidate area belongs to
489 * @off: the offset to the start of the candidate area
490 * @this_size: the size of the candidate area
491 * @size: the size of the target allocation
492 * @align: the alignment of the target allocation
493 * @pop_only: only allocate from already populated region
494 *
495 * We're trying to allocate @size bytes aligned at @align. @chunk's area
496 * at @off sized @this_size is a candidate. This function determines
497 * whether the target allocation fits in the candidate area and returns the
498 * number of bytes to pad after @off. If the target area doesn't fit, -1
499 * is returned.
500 *
501 * If @pop_only is %true, this function only considers the already
502 * populated part of the candidate area.
503 */
504static int pcpu_fit_in_area(struct pcpu_chunk *chunk, int off, int this_size,
505 int size, int align, bool pop_only)
506{
507 int cand_off = off;
508
509 while (true) {
510 int head = ALIGN(cand_off, align) - off;
511 int page_start, page_end, rs, re;
512
513 if (this_size < head + size)
514 return -1;
515
516 if (!pop_only)
517 return head;
518
519 /*
520 * If the first unpopulated page is beyond the end of the
521 * allocation, the whole allocation is populated;
522 * otherwise, retry from the end of the unpopulated area.
523 */
524 page_start = PFN_DOWN(head + off);
525 page_end = PFN_UP(head + off + size);
526
527 rs = page_start;
528 pcpu_next_unpop(chunk, &rs, &re, PFN_UP(off + this_size));
529 if (rs >= page_end)
530 return head;
531 cand_off = re * PAGE_SIZE;
532 }
533}
534
421/** 535/**
422 * pcpu_alloc_area - allocate area from a pcpu_chunk 536 * pcpu_alloc_area - allocate area from a pcpu_chunk
423 * @chunk: chunk of interest 537 * @chunk: chunk of interest
424 * @size: wanted size in bytes 538 * @size: wanted size in bytes
425 * @align: wanted align 539 * @align: wanted align
540 * @pop_only: allocate only from the populated area
541 * @occ_pages_p: out param for the number of pages the area occupies
426 * 542 *
427 * Try to allocate @size bytes area aligned at @align from @chunk. 543 * Try to allocate @size bytes area aligned at @align from @chunk.
428 * Note that this function only allocates the offset. It doesn't 544 * Note that this function only allocates the offset. It doesn't
@@ -437,7 +553,8 @@ out_unlock:
437 * Allocated offset in @chunk on success, -1 if no matching area is 553 * Allocated offset in @chunk on success, -1 if no matching area is
438 * found. 554 * found.
439 */ 555 */
440static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align) 556static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align,
557 bool pop_only, int *occ_pages_p)
441{ 558{
442 int oslot = pcpu_chunk_slot(chunk); 559 int oslot = pcpu_chunk_slot(chunk);
443 int max_contig = 0; 560 int max_contig = 0;
@@ -453,11 +570,11 @@ static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align)
453 if (off & 1) 570 if (off & 1)
454 continue; 571 continue;
455 572
456 /* extra for alignment requirement */
457 head = ALIGN(off, align) - off;
458
459 this_size = (p[1] & ~1) - off; 573 this_size = (p[1] & ~1) - off;
460 if (this_size < head + size) { 574
575 head = pcpu_fit_in_area(chunk, off, this_size, size, align,
576 pop_only);
577 if (head < 0) {
461 if (!seen_free) { 578 if (!seen_free) {
462 chunk->first_free = i; 579 chunk->first_free = i;
463 seen_free = true; 580 seen_free = true;
@@ -526,6 +643,7 @@ static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align)
526 chunk->free_size -= size; 643 chunk->free_size -= size;
527 *p |= 1; 644 *p |= 1;
528 645
646 *occ_pages_p = pcpu_count_occupied_pages(chunk, i);
529 pcpu_chunk_relocate(chunk, oslot); 647 pcpu_chunk_relocate(chunk, oslot);
530 return off; 648 return off;
531 } 649 }
@@ -541,6 +659,7 @@ static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align)
541 * pcpu_free_area - free area to a pcpu_chunk 659 * pcpu_free_area - free area to a pcpu_chunk
542 * @chunk: chunk of interest 660 * @chunk: chunk of interest
543 * @freeme: offset of area to free 661 * @freeme: offset of area to free
662 * @occ_pages_p: out param for the number of pages the area occupies
544 * 663 *
545 * Free area starting from @freeme to @chunk. Note that this function 664 * Free area starting from @freeme to @chunk. Note that this function
546 * only modifies the allocation map. It doesn't depopulate or unmap 665 * only modifies the allocation map. It doesn't depopulate or unmap
@@ -549,7 +668,8 @@ static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align)
549 * CONTEXT: 668 * CONTEXT:
550 * pcpu_lock. 669 * pcpu_lock.
551 */ 670 */
552static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme) 671static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme,
672 int *occ_pages_p)
553{ 673{
554 int oslot = pcpu_chunk_slot(chunk); 674 int oslot = pcpu_chunk_slot(chunk);
555 int off = 0; 675 int off = 0;
@@ -580,6 +700,8 @@ static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme)
580 *p = off &= ~1; 700 *p = off &= ~1;
581 chunk->free_size += (p[1] & ~1) - off; 701 chunk->free_size += (p[1] & ~1) - off;
582 702
703 *occ_pages_p = pcpu_count_occupied_pages(chunk, i);
704
583 /* merge with next? */ 705 /* merge with next? */
584 if (!(p[1] & 1)) 706 if (!(p[1] & 1))
585 to_free++; 707 to_free++;
@@ -620,6 +742,7 @@ static struct pcpu_chunk *pcpu_alloc_chunk(void)
620 chunk->map_used = 1; 742 chunk->map_used = 1;
621 743
622 INIT_LIST_HEAD(&chunk->list); 744 INIT_LIST_HEAD(&chunk->list);
745 INIT_WORK(&chunk->map_extend_work, pcpu_map_extend_workfn);
623 chunk->free_size = pcpu_unit_size; 746 chunk->free_size = pcpu_unit_size;
624 chunk->contig_hint = pcpu_unit_size; 747 chunk->contig_hint = pcpu_unit_size;
625 748
@@ -634,6 +757,50 @@ static void pcpu_free_chunk(struct pcpu_chunk *chunk)
634 pcpu_mem_free(chunk, pcpu_chunk_struct_size); 757 pcpu_mem_free(chunk, pcpu_chunk_struct_size);
635} 758}
636 759
760/**
761 * pcpu_chunk_populated - post-population bookkeeping
762 * @chunk: pcpu_chunk which got populated
763 * @page_start: the start page
764 * @page_end: the end page
765 *
766 * Pages in [@page_start,@page_end) have been populated to @chunk. Update
767 * the bookkeeping information accordingly. Must be called after each
768 * successful population.
769 */
770static void pcpu_chunk_populated(struct pcpu_chunk *chunk,
771 int page_start, int page_end)
772{
773 int nr = page_end - page_start;
774
775 lockdep_assert_held(&pcpu_lock);
776
777 bitmap_set(chunk->populated, page_start, nr);
778 chunk->nr_populated += nr;
779 pcpu_nr_empty_pop_pages += nr;
780}
781
782/**
783 * pcpu_chunk_depopulated - post-depopulation bookkeeping
784 * @chunk: pcpu_chunk which got depopulated
785 * @page_start: the start page
786 * @page_end: the end page
787 *
788 * Pages in [@page_start,@page_end) have been depopulated from @chunk.
789 * Update the bookkeeping information accordingly. Must be called after
790 * each successful depopulation.
791 */
792static void pcpu_chunk_depopulated(struct pcpu_chunk *chunk,
793 int page_start, int page_end)
794{
795 int nr = page_end - page_start;
796
797 lockdep_assert_held(&pcpu_lock);
798
799 bitmap_clear(chunk->populated, page_start, nr);
800 chunk->nr_populated -= nr;
801 pcpu_nr_empty_pop_pages -= nr;
802}
803
637/* 804/*
638 * Chunk management implementation. 805 * Chunk management implementation.
639 * 806 *
@@ -695,21 +862,23 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
695 * @size: size of area to allocate in bytes 862 * @size: size of area to allocate in bytes
696 * @align: alignment of area (max PAGE_SIZE) 863 * @align: alignment of area (max PAGE_SIZE)
697 * @reserved: allocate from the reserved chunk if available 864 * @reserved: allocate from the reserved chunk if available
865 * @gfp: allocation flags
698 * 866 *
699 * Allocate percpu area of @size bytes aligned at @align. 867 * Allocate percpu area of @size bytes aligned at @align. If @gfp doesn't
700 * 868 * contain %GFP_KERNEL, the allocation is atomic.
701 * CONTEXT:
702 * Does GFP_KERNEL allocation.
703 * 869 *
704 * RETURNS: 870 * RETURNS:
705 * Percpu pointer to the allocated area on success, NULL on failure. 871 * Percpu pointer to the allocated area on success, NULL on failure.
706 */ 872 */
707static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved) 873static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
874 gfp_t gfp)
708{ 875{
709 static int warn_limit = 10; 876 static int warn_limit = 10;
710 struct pcpu_chunk *chunk; 877 struct pcpu_chunk *chunk;
711 const char *err; 878 const char *err;
712 int slot, off, new_alloc; 879 bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
880 int occ_pages = 0;
881 int slot, off, new_alloc, cpu, ret;
713 unsigned long flags; 882 unsigned long flags;
714 void __percpu *ptr; 883 void __percpu *ptr;
715 884
@@ -728,7 +897,6 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved)
728 return NULL; 897 return NULL;
729 } 898 }
730 899
731 mutex_lock(&pcpu_alloc_mutex);
732 spin_lock_irqsave(&pcpu_lock, flags); 900 spin_lock_irqsave(&pcpu_lock, flags);
733 901
734 /* serve reserved allocations from the reserved chunk if available */ 902 /* serve reserved allocations from the reserved chunk if available */
@@ -740,16 +908,18 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved)
740 goto fail_unlock; 908 goto fail_unlock;
741 } 909 }
742 910
743 while ((new_alloc = pcpu_need_to_extend(chunk))) { 911 while ((new_alloc = pcpu_need_to_extend(chunk, is_atomic))) {
744 spin_unlock_irqrestore(&pcpu_lock, flags); 912 spin_unlock_irqrestore(&pcpu_lock, flags);
745 if (pcpu_extend_area_map(chunk, new_alloc) < 0) { 913 if (is_atomic ||
914 pcpu_extend_area_map(chunk, new_alloc) < 0) {
746 err = "failed to extend area map of reserved chunk"; 915 err = "failed to extend area map of reserved chunk";
747 goto fail_unlock_mutex; 916 goto fail;
748 } 917 }
749 spin_lock_irqsave(&pcpu_lock, flags); 918 spin_lock_irqsave(&pcpu_lock, flags);
750 } 919 }
751 920
752 off = pcpu_alloc_area(chunk, size, align); 921 off = pcpu_alloc_area(chunk, size, align, is_atomic,
922 &occ_pages);
753 if (off >= 0) 923 if (off >= 0)
754 goto area_found; 924 goto area_found;
755 925
@@ -764,13 +934,15 @@ restart:
764 if (size > chunk->contig_hint) 934 if (size > chunk->contig_hint)
765 continue; 935 continue;
766 936
767 new_alloc = pcpu_need_to_extend(chunk); 937 new_alloc = pcpu_need_to_extend(chunk, is_atomic);
768 if (new_alloc) { 938 if (new_alloc) {
939 if (is_atomic)
940 continue;
769 spin_unlock_irqrestore(&pcpu_lock, flags); 941 spin_unlock_irqrestore(&pcpu_lock, flags);
770 if (pcpu_extend_area_map(chunk, 942 if (pcpu_extend_area_map(chunk,
771 new_alloc) < 0) { 943 new_alloc) < 0) {
772 err = "failed to extend area map"; 944 err = "failed to extend area map";
773 goto fail_unlock_mutex; 945 goto fail;
774 } 946 }
775 spin_lock_irqsave(&pcpu_lock, flags); 947 spin_lock_irqsave(&pcpu_lock, flags);
776 /* 948 /*
@@ -780,74 +952,134 @@ restart:
780 goto restart; 952 goto restart;
781 } 953 }
782 954
783 off = pcpu_alloc_area(chunk, size, align); 955 off = pcpu_alloc_area(chunk, size, align, is_atomic,
956 &occ_pages);
784 if (off >= 0) 957 if (off >= 0)
785 goto area_found; 958 goto area_found;
786 } 959 }
787 } 960 }
788 961
789 /* hmmm... no space left, create a new chunk */
790 spin_unlock_irqrestore(&pcpu_lock, flags); 962 spin_unlock_irqrestore(&pcpu_lock, flags);
791 963
792 chunk = pcpu_create_chunk(); 964 /*
793 if (!chunk) { 965 * No space left. Create a new chunk. We don't want multiple
794 err = "failed to allocate new chunk"; 966 * tasks to create chunks simultaneously. Serialize and create iff
795 goto fail_unlock_mutex; 967 * there's still no empty chunk after grabbing the mutex.
968 */
969 if (is_atomic)
970 goto fail;
971
972 mutex_lock(&pcpu_alloc_mutex);
973
974 if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) {
975 chunk = pcpu_create_chunk();
976 if (!chunk) {
977 mutex_unlock(&pcpu_alloc_mutex);
978 err = "failed to allocate new chunk";
979 goto fail;
980 }
981
982 spin_lock_irqsave(&pcpu_lock, flags);
983 pcpu_chunk_relocate(chunk, -1);
984 } else {
985 spin_lock_irqsave(&pcpu_lock, flags);
796 } 986 }
797 987
798 spin_lock_irqsave(&pcpu_lock, flags); 988 mutex_unlock(&pcpu_alloc_mutex);
799 pcpu_chunk_relocate(chunk, -1);
800 goto restart; 989 goto restart;
801 990
802area_found: 991area_found:
803 spin_unlock_irqrestore(&pcpu_lock, flags); 992 spin_unlock_irqrestore(&pcpu_lock, flags);
804 993
805 /* populate, map and clear the area */ 994 /* populate if not all pages are already there */
806 if (pcpu_populate_chunk(chunk, off, size)) { 995 if (!is_atomic) {
807 spin_lock_irqsave(&pcpu_lock, flags); 996 int page_start, page_end, rs, re;
808 pcpu_free_area(chunk, off); 997
809 err = "failed to populate"; 998 mutex_lock(&pcpu_alloc_mutex);
810 goto fail_unlock; 999
1000 page_start = PFN_DOWN(off);
1001 page_end = PFN_UP(off + size);
1002
1003 pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) {
1004 WARN_ON(chunk->immutable);
1005
1006 ret = pcpu_populate_chunk(chunk, rs, re);
1007
1008 spin_lock_irqsave(&pcpu_lock, flags);
1009 if (ret) {
1010 mutex_unlock(&pcpu_alloc_mutex);
1011 pcpu_free_area(chunk, off, &occ_pages);
1012 err = "failed to populate";
1013 goto fail_unlock;
1014 }
1015 pcpu_chunk_populated(chunk, rs, re);
1016 spin_unlock_irqrestore(&pcpu_lock, flags);
1017 }
1018
1019 mutex_unlock(&pcpu_alloc_mutex);
811 } 1020 }
812 1021
813 mutex_unlock(&pcpu_alloc_mutex); 1022 if (chunk != pcpu_reserved_chunk)
1023 pcpu_nr_empty_pop_pages -= occ_pages;
1024
1025 if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW)
1026 pcpu_schedule_balance_work();
1027
1028 /* clear the areas and return address relative to base address */
1029 for_each_possible_cpu(cpu)
1030 memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);
814 1031
815 /* return address relative to base address */
816 ptr = __addr_to_pcpu_ptr(chunk->base_addr + off); 1032 ptr = __addr_to_pcpu_ptr(chunk->base_addr + off);
817 kmemleak_alloc_percpu(ptr, size); 1033 kmemleak_alloc_percpu(ptr, size);
818 return ptr; 1034 return ptr;
819 1035
820fail_unlock: 1036fail_unlock:
821 spin_unlock_irqrestore(&pcpu_lock, flags); 1037 spin_unlock_irqrestore(&pcpu_lock, flags);
822fail_unlock_mutex: 1038fail:
823 mutex_unlock(&pcpu_alloc_mutex); 1039 if (!is_atomic && warn_limit) {
824 if (warn_limit) { 1040 pr_warning("PERCPU: allocation failed, size=%zu align=%zu atomic=%d, %s\n",
825 pr_warning("PERCPU: allocation failed, size=%zu align=%zu, " 1041 size, align, is_atomic, err);
826 "%s\n", size, align, err);
827 dump_stack(); 1042 dump_stack();
828 if (!--warn_limit) 1043 if (!--warn_limit)
829 pr_info("PERCPU: limit reached, disable warning\n"); 1044 pr_info("PERCPU: limit reached, disable warning\n");
830 } 1045 }
1046 if (is_atomic) {
1047 /* see the flag handling in pcpu_blance_workfn() */
1048 pcpu_atomic_alloc_failed = true;
1049 pcpu_schedule_balance_work();
1050 }
831 return NULL; 1051 return NULL;
832} 1052}
833 1053
834/** 1054/**
835 * __alloc_percpu - allocate dynamic percpu area 1055 * __alloc_percpu_gfp - allocate dynamic percpu area
836 * @size: size of area to allocate in bytes 1056 * @size: size of area to allocate in bytes
837 * @align: alignment of area (max PAGE_SIZE) 1057 * @align: alignment of area (max PAGE_SIZE)
1058 * @gfp: allocation flags
838 * 1059 *
839 * Allocate zero-filled percpu area of @size bytes aligned at @align. 1060 * Allocate zero-filled percpu area of @size bytes aligned at @align. If
840 * Might sleep. Might trigger writeouts. 1061 * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can
841 * 1062 * be called from any context but is a lot more likely to fail.
842 * CONTEXT:
843 * Does GFP_KERNEL allocation.
844 * 1063 *
845 * RETURNS: 1064 * RETURNS:
846 * Percpu pointer to the allocated area on success, NULL on failure. 1065 * Percpu pointer to the allocated area on success, NULL on failure.
847 */ 1066 */
1067void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp)
1068{
1069 return pcpu_alloc(size, align, false, gfp);
1070}
1071EXPORT_SYMBOL_GPL(__alloc_percpu_gfp);
1072
1073/**
1074 * __alloc_percpu - allocate dynamic percpu area
1075 * @size: size of area to allocate in bytes
1076 * @align: alignment of area (max PAGE_SIZE)
1077 *
1078 * Equivalent to __alloc_percpu_gfp(size, align, %GFP_KERNEL).
1079 */
848void __percpu *__alloc_percpu(size_t size, size_t align) 1080void __percpu *__alloc_percpu(size_t size, size_t align)
849{ 1081{
850 return pcpu_alloc(size, align, false); 1082 return pcpu_alloc(size, align, false, GFP_KERNEL);
851} 1083}
852EXPORT_SYMBOL_GPL(__alloc_percpu); 1084EXPORT_SYMBOL_GPL(__alloc_percpu);
853 1085
@@ -869,44 +1101,121 @@ EXPORT_SYMBOL_GPL(__alloc_percpu);
869 */ 1101 */
870void __percpu *__alloc_reserved_percpu(size_t size, size_t align) 1102void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
871{ 1103{
872 return pcpu_alloc(size, align, true); 1104 return pcpu_alloc(size, align, true, GFP_KERNEL);
873} 1105}
874 1106
875/** 1107/**
876 * pcpu_reclaim - reclaim fully free chunks, workqueue function 1108 * pcpu_balance_workfn - manage the amount of free chunks and populated pages
877 * @work: unused 1109 * @work: unused
878 * 1110 *
879 * Reclaim all fully free chunks except for the first one. 1111 * Reclaim all fully free chunks except for the first one.
880 *
881 * CONTEXT:
882 * workqueue context.
883 */ 1112 */
884static void pcpu_reclaim(struct work_struct *work) 1113static void pcpu_balance_workfn(struct work_struct *work)
885{ 1114{
886 LIST_HEAD(todo); 1115 LIST_HEAD(to_free);
887 struct list_head *head = &pcpu_slot[pcpu_nr_slots - 1]; 1116 struct list_head *free_head = &pcpu_slot[pcpu_nr_slots - 1];
888 struct pcpu_chunk *chunk, *next; 1117 struct pcpu_chunk *chunk, *next;
1118 int slot, nr_to_pop, ret;
889 1119
1120 /*
1121 * There's no reason to keep around multiple unused chunks and VM
1122 * areas can be scarce. Destroy all free chunks except for one.
1123 */
890 mutex_lock(&pcpu_alloc_mutex); 1124 mutex_lock(&pcpu_alloc_mutex);
891 spin_lock_irq(&pcpu_lock); 1125 spin_lock_irq(&pcpu_lock);
892 1126
893 list_for_each_entry_safe(chunk, next, head, list) { 1127 list_for_each_entry_safe(chunk, next, free_head, list) {
894 WARN_ON(chunk->immutable); 1128 WARN_ON(chunk->immutable);
895 1129
896 /* spare the first one */ 1130 /* spare the first one */
897 if (chunk == list_first_entry(head, struct pcpu_chunk, list)) 1131 if (chunk == list_first_entry(free_head, struct pcpu_chunk, list))
898 continue; 1132 continue;
899 1133
900 list_move(&chunk->list, &todo); 1134 list_move(&chunk->list, &to_free);
901 } 1135 }
902 1136
903 spin_unlock_irq(&pcpu_lock); 1137 spin_unlock_irq(&pcpu_lock);
904 1138
905 list_for_each_entry_safe(chunk, next, &todo, list) { 1139 list_for_each_entry_safe(chunk, next, &to_free, list) {
906 pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size); 1140 int rs, re;
1141
1142 pcpu_for_each_pop_region(chunk, rs, re, 0, pcpu_unit_pages) {
1143 pcpu_depopulate_chunk(chunk, rs, re);
1144 spin_lock_irq(&pcpu_lock);
1145 pcpu_chunk_depopulated(chunk, rs, re);
1146 spin_unlock_irq(&pcpu_lock);
1147 }
907 pcpu_destroy_chunk(chunk); 1148 pcpu_destroy_chunk(chunk);
908 } 1149 }
909 1150
1151 /*
1152 * Ensure there are certain number of free populated pages for
1153 * atomic allocs. Fill up from the most packed so that atomic
1154 * allocs don't increase fragmentation. If atomic allocation
1155 * failed previously, always populate the maximum amount. This
1156 * should prevent atomic allocs larger than PAGE_SIZE from keeping
1157 * failing indefinitely; however, large atomic allocs are not
1158 * something we support properly and can be highly unreliable and
1159 * inefficient.
1160 */
1161retry_pop:
1162 if (pcpu_atomic_alloc_failed) {
1163 nr_to_pop = PCPU_EMPTY_POP_PAGES_HIGH;
1164 /* best effort anyway, don't worry about synchronization */
1165 pcpu_atomic_alloc_failed = false;
1166 } else {
1167 nr_to_pop = clamp(PCPU_EMPTY_POP_PAGES_HIGH -
1168 pcpu_nr_empty_pop_pages,
1169 0, PCPU_EMPTY_POP_PAGES_HIGH);
1170 }
1171
1172 for (slot = pcpu_size_to_slot(PAGE_SIZE); slot < pcpu_nr_slots; slot++) {
1173 int nr_unpop = 0, rs, re;
1174
1175 if (!nr_to_pop)
1176 break;
1177
1178 spin_lock_irq(&pcpu_lock);
1179 list_for_each_entry(chunk, &pcpu_slot[slot], list) {
1180 nr_unpop = pcpu_unit_pages - chunk->nr_populated;
1181 if (nr_unpop)
1182 break;
1183 }
1184 spin_unlock_irq(&pcpu_lock);
1185
1186 if (!nr_unpop)
1187 continue;
1188
1189 /* @chunk can't go away while pcpu_alloc_mutex is held */
1190 pcpu_for_each_unpop_region(chunk, rs, re, 0, pcpu_unit_pages) {
1191 int nr = min(re - rs, nr_to_pop);
1192
1193 ret = pcpu_populate_chunk(chunk, rs, rs + nr);
1194 if (!ret) {
1195 nr_to_pop -= nr;
1196 spin_lock_irq(&pcpu_lock);
1197 pcpu_chunk_populated(chunk, rs, rs + nr);
1198 spin_unlock_irq(&pcpu_lock);
1199 } else {
1200 nr_to_pop = 0;
1201 }
1202
1203 if (!nr_to_pop)
1204 break;
1205 }
1206 }
1207
1208 if (nr_to_pop) {
1209 /* ran out of chunks to populate, create a new one and retry */
1210 chunk = pcpu_create_chunk();
1211 if (chunk) {
1212 spin_lock_irq(&pcpu_lock);
1213 pcpu_chunk_relocate(chunk, -1);
1214 spin_unlock_irq(&pcpu_lock);
1215 goto retry_pop;
1216 }
1217 }
1218
910 mutex_unlock(&pcpu_alloc_mutex); 1219 mutex_unlock(&pcpu_alloc_mutex);
911} 1220}
912 1221
@@ -924,7 +1233,7 @@ void free_percpu(void __percpu *ptr)
924 void *addr; 1233 void *addr;
925 struct pcpu_chunk *chunk; 1234 struct pcpu_chunk *chunk;
926 unsigned long flags; 1235 unsigned long flags;
927 int off; 1236 int off, occ_pages;
928 1237
929 if (!ptr) 1238 if (!ptr)
930 return; 1239 return;
@@ -938,7 +1247,10 @@ void free_percpu(void __percpu *ptr)
938 chunk = pcpu_chunk_addr_search(addr); 1247 chunk = pcpu_chunk_addr_search(addr);
939 off = addr - chunk->base_addr; 1248 off = addr - chunk->base_addr;
940 1249
941 pcpu_free_area(chunk, off); 1250 pcpu_free_area(chunk, off, &occ_pages);
1251
1252 if (chunk != pcpu_reserved_chunk)
1253 pcpu_nr_empty_pop_pages += occ_pages;
942 1254
943 /* if there are more than one fully free chunks, wake up grim reaper */ 1255 /* if there are more than one fully free chunks, wake up grim reaper */
944 if (chunk->free_size == pcpu_unit_size) { 1256 if (chunk->free_size == pcpu_unit_size) {
@@ -946,7 +1258,7 @@ void free_percpu(void __percpu *ptr)
946 1258
947 list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list) 1259 list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
948 if (pos != chunk) { 1260 if (pos != chunk) {
949 schedule_work(&pcpu_reclaim_work); 1261 pcpu_schedule_balance_work();
950 break; 1262 break;
951 } 1263 }
952 } 1264 }
@@ -1336,11 +1648,13 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1336 */ 1648 */
1337 schunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0); 1649 schunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
1338 INIT_LIST_HEAD(&schunk->list); 1650 INIT_LIST_HEAD(&schunk->list);
1651 INIT_WORK(&schunk->map_extend_work, pcpu_map_extend_workfn);
1339 schunk->base_addr = base_addr; 1652 schunk->base_addr = base_addr;
1340 schunk->map = smap; 1653 schunk->map = smap;
1341 schunk->map_alloc = ARRAY_SIZE(smap); 1654 schunk->map_alloc = ARRAY_SIZE(smap);
1342 schunk->immutable = true; 1655 schunk->immutable = true;
1343 bitmap_fill(schunk->populated, pcpu_unit_pages); 1656 bitmap_fill(schunk->populated, pcpu_unit_pages);
1657 schunk->nr_populated = pcpu_unit_pages;
1344 1658
1345 if (ai->reserved_size) { 1659 if (ai->reserved_size) {
1346 schunk->free_size = ai->reserved_size; 1660 schunk->free_size = ai->reserved_size;
@@ -1364,11 +1678,13 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1364 if (dyn_size) { 1678 if (dyn_size) {
1365 dchunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0); 1679 dchunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
1366 INIT_LIST_HEAD(&dchunk->list); 1680 INIT_LIST_HEAD(&dchunk->list);
1681 INIT_WORK(&dchunk->map_extend_work, pcpu_map_extend_workfn);
1367 dchunk->base_addr = base_addr; 1682 dchunk->base_addr = base_addr;
1368 dchunk->map = dmap; 1683 dchunk->map = dmap;
1369 dchunk->map_alloc = ARRAY_SIZE(dmap); 1684 dchunk->map_alloc = ARRAY_SIZE(dmap);
1370 dchunk->immutable = true; 1685 dchunk->immutable = true;
1371 bitmap_fill(dchunk->populated, pcpu_unit_pages); 1686 bitmap_fill(dchunk->populated, pcpu_unit_pages);
1687 dchunk->nr_populated = pcpu_unit_pages;
1372 1688
1373 dchunk->contig_hint = dchunk->free_size = dyn_size; 1689 dchunk->contig_hint = dchunk->free_size = dyn_size;
1374 dchunk->map[0] = 1; 1690 dchunk->map[0] = 1;
@@ -1379,6 +1695,8 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1379 1695
1380 /* link the first chunk in */ 1696 /* link the first chunk in */
1381 pcpu_first_chunk = dchunk ?: schunk; 1697 pcpu_first_chunk = dchunk ?: schunk;
1698 pcpu_nr_empty_pop_pages +=
1699 pcpu_count_occupied_pages(pcpu_first_chunk, 1);
1382 pcpu_chunk_relocate(pcpu_first_chunk, -1); 1700 pcpu_chunk_relocate(pcpu_first_chunk, -1);
1383 1701
1384 /* we're done */ 1702 /* we're done */
@@ -1932,8 +2250,6 @@ void __init setup_per_cpu_areas(void)
1932 2250
1933 if (pcpu_setup_first_chunk(ai, fc) < 0) 2251 if (pcpu_setup_first_chunk(ai, fc) < 0)
1934 panic("Failed to initialize percpu areas."); 2252 panic("Failed to initialize percpu areas.");
1935
1936 pcpu_free_alloc_info(ai);
1937} 2253}
1938 2254
1939#endif /* CONFIG_SMP */ 2255#endif /* CONFIG_SMP */
@@ -1967,3 +2283,15 @@ void __init percpu_init_late(void)
1967 spin_unlock_irqrestore(&pcpu_lock, flags); 2283 spin_unlock_irqrestore(&pcpu_lock, flags);
1968 } 2284 }
1969} 2285}
2286
2287/*
2288 * Percpu allocator is initialized early during boot when neither slab or
2289 * workqueue is available. Plug async management until everything is up
2290 * and running.
2291 */
2292static int __init percpu_enable_async(void)
2293{
2294 pcpu_async_enabled = true;
2295 return 0;
2296}
2297subsys_initcall(percpu_enable_async);
diff --git a/mm/shmem.c b/mm/shmem.c
index 4fad61bb41e5..cd6fc7590e54 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2995,7 +2995,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
2995#endif 2995#endif
2996 2996
2997 spin_lock_init(&sbinfo->stat_lock); 2997 spin_lock_init(&sbinfo->stat_lock);
2998 if (percpu_counter_init(&sbinfo->used_blocks, 0)) 2998 if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL))
2999 goto failed; 2999 goto failed;
3000 sbinfo->free_inodes = sbinfo->max_inodes; 3000 sbinfo->free_inodes = sbinfo->max_inodes;
3001 3001