aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDennis Zhou <dennisszhou@gmail.com>2018-02-16 13:09:58 -0500
committerTejun Heo <tj@kernel.org>2018-02-18 08:33:01 -0500
commit554fef1c39ee148623a496e04569dabb11463406 (patch)
treef1d187aad200c97bdf4b0b9f2c1c66735deab08a
parent47504ee04b9241548ae2c28be7d0b01cff3b7aa6 (diff)
percpu: allow select gfp to be passed to underlying allocators
The prior patch added support for passing gfp flags through to the underlying allocators. This patch allows users to pass along gfp flags (currently only __GFP_NORETRY and __GFP_NOWARN) to the underlying allocators. This should allow users to decide if they are ok with failing allocations recovering in a more graceful way. Additionally, gfp passing was done as additional flags in the previous patch. Instead, change this to caller passed semantics. GFP_KERNEL is also removed as the default flag. It continues to be used for internally caused underlying percpu allocations. V2: Removed gfp_percpu_mask in favor of doing it inline. Removed GFP_KERNEL as a default flag for __alloc_percpu_gfp. Signed-off-by: Dennis Zhou <dennisszhou@gmail.com> Suggested-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Christoph Lameter <cl@linux.com> Signed-off-by: Tejun Heo <tj@kernel.org>
-rw-r--r--mm/percpu-km.c2
-rw-r--r--mm/percpu-vm.c4
-rw-r--r--mm/percpu.c16
3 files changed, 10 insertions, 12 deletions
diff --git a/mm/percpu-km.c b/mm/percpu-km.c
index 0d88d7bd5706..38de70ab1a0d 100644
--- a/mm/percpu-km.c
+++ b/mm/percpu-km.c
@@ -56,7 +56,7 @@ static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp)
56 if (!chunk) 56 if (!chunk)
57 return NULL; 57 return NULL;
58 58
59 pages = alloc_pages(gfp | GFP_KERNEL, order_base_2(nr_pages)); 59 pages = alloc_pages(gfp, order_base_2(nr_pages));
60 if (!pages) { 60 if (!pages) {
61 pcpu_free_chunk(chunk); 61 pcpu_free_chunk(chunk);
62 return NULL; 62 return NULL;
diff --git a/mm/percpu-vm.c b/mm/percpu-vm.c
index 0af71eb2fff0..d8078de912de 100644
--- a/mm/percpu-vm.c
+++ b/mm/percpu-vm.c
@@ -37,7 +37,7 @@ static struct page **pcpu_get_pages(void)
37 lockdep_assert_held(&pcpu_alloc_mutex); 37 lockdep_assert_held(&pcpu_alloc_mutex);
38 38
39 if (!pages) 39 if (!pages)
40 pages = pcpu_mem_zalloc(pages_size, 0); 40 pages = pcpu_mem_zalloc(pages_size, GFP_KERNEL);
41 return pages; 41 return pages;
42} 42}
43 43
@@ -86,7 +86,7 @@ static int pcpu_alloc_pages(struct pcpu_chunk *chunk,
86 unsigned int cpu, tcpu; 86 unsigned int cpu, tcpu;
87 int i; 87 int i;
88 88
89 gfp |= GFP_KERNEL | __GFP_HIGHMEM; 89 gfp |= __GFP_HIGHMEM;
90 90
91 for_each_possible_cpu(cpu) { 91 for_each_possible_cpu(cpu) {
92 for (i = page_start; i < page_end; i++) { 92 for (i = page_start; i < page_end; i++) {
diff --git a/mm/percpu.c b/mm/percpu.c
index f97443d488a8..fa3f854634a1 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -454,9 +454,6 @@ static void pcpu_next_fit_region(struct pcpu_chunk *chunk, int alloc_bits,
454 * This is to facilitate passing through whitelisted flags. The 454 * This is to facilitate passing through whitelisted flags. The
455 * returned memory is always zeroed. 455 * returned memory is always zeroed.
456 * 456 *
457 * CONTEXT:
458 * Does GFP_KERNEL allocation.
459 *
460 * RETURNS: 457 * RETURNS:
461 * Pointer to the allocated area on success, NULL on failure. 458 * Pointer to the allocated area on success, NULL on failure.
462 */ 459 */
@@ -466,10 +463,9 @@ static void *pcpu_mem_zalloc(size_t size, gfp_t gfp)
466 return NULL; 463 return NULL;
467 464
468 if (size <= PAGE_SIZE) 465 if (size <= PAGE_SIZE)
469 return kzalloc(size, gfp | GFP_KERNEL); 466 return kzalloc(size, gfp);
470 else 467 else
471 return __vmalloc(size, gfp | GFP_KERNEL | __GFP_ZERO, 468 return __vmalloc(size, gfp | __GFP_ZERO, PAGE_KERNEL);
472 PAGE_KERNEL);
473} 469}
474 470
475/** 471/**
@@ -1344,6 +1340,8 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
1344static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved, 1340static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
1345 gfp_t gfp) 1341 gfp_t gfp)
1346{ 1342{
1343 /* whitelisted flags that can be passed to the backing allocators */
1344 gfp_t pcpu_gfp = gfp & (GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
1347 bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL; 1345 bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
1348 bool do_warn = !(gfp & __GFP_NOWARN); 1346 bool do_warn = !(gfp & __GFP_NOWARN);
1349 static int warn_limit = 10; 1347 static int warn_limit = 10;
@@ -1426,7 +1424,7 @@ restart:
1426 } 1424 }
1427 1425
1428 if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) { 1426 if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) {
1429 chunk = pcpu_create_chunk(0); 1427 chunk = pcpu_create_chunk(pcpu_gfp);
1430 if (!chunk) { 1428 if (!chunk) {
1431 err = "failed to allocate new chunk"; 1429 err = "failed to allocate new chunk";
1432 goto fail; 1430 goto fail;
@@ -1455,7 +1453,7 @@ area_found:
1455 page_start, page_end) { 1453 page_start, page_end) {
1456 WARN_ON(chunk->immutable); 1454 WARN_ON(chunk->immutable);
1457 1455
1458 ret = pcpu_populate_chunk(chunk, rs, re, 0); 1456 ret = pcpu_populate_chunk(chunk, rs, re, pcpu_gfp);
1459 1457
1460 spin_lock_irqsave(&pcpu_lock, flags); 1458 spin_lock_irqsave(&pcpu_lock, flags);
1461 if (ret) { 1459 if (ret) {
@@ -1576,7 +1574,7 @@ void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
1576static void pcpu_balance_workfn(struct work_struct *work) 1574static void pcpu_balance_workfn(struct work_struct *work)
1577{ 1575{
1578 /* gfp flags passed to underlying allocators */ 1576 /* gfp flags passed to underlying allocators */
1579 const gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN; 1577 const gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
1580 LIST_HEAD(to_free); 1578 LIST_HEAD(to_free);
1581 struct list_head *free_head = &pcpu_slot[pcpu_nr_slots - 1]; 1579 struct list_head *free_head = &pcpu_slot[pcpu_nr_slots - 1];
1582 struct pcpu_chunk *chunk, *next; 1580 struct pcpu_chunk *chunk, *next;