summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--mm/percpu-km.c2
-rw-r--r--mm/percpu-vm.c4
-rw-r--r--mm/percpu.c16
3 files changed, 10 insertions, 12 deletions
diff --git a/mm/percpu-km.c b/mm/percpu-km.c
index 0d88d7bd5706..38de70ab1a0d 100644
--- a/mm/percpu-km.c
+++ b/mm/percpu-km.c
@@ -56,7 +56,7 @@ static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp)
56 if (!chunk) 56 if (!chunk)
57 return NULL; 57 return NULL;
58 58
59 pages = alloc_pages(gfp | GFP_KERNEL, order_base_2(nr_pages)); 59 pages = alloc_pages(gfp, order_base_2(nr_pages));
60 if (!pages) { 60 if (!pages) {
61 pcpu_free_chunk(chunk); 61 pcpu_free_chunk(chunk);
62 return NULL; 62 return NULL;
diff --git a/mm/percpu-vm.c b/mm/percpu-vm.c
index 0af71eb2fff0..d8078de912de 100644
--- a/mm/percpu-vm.c
+++ b/mm/percpu-vm.c
@@ -37,7 +37,7 @@ static struct page **pcpu_get_pages(void)
37 lockdep_assert_held(&pcpu_alloc_mutex); 37 lockdep_assert_held(&pcpu_alloc_mutex);
38 38
39 if (!pages) 39 if (!pages)
40 pages = pcpu_mem_zalloc(pages_size, 0); 40 pages = pcpu_mem_zalloc(pages_size, GFP_KERNEL);
41 return pages; 41 return pages;
42} 42}
43 43
@@ -86,7 +86,7 @@ static int pcpu_alloc_pages(struct pcpu_chunk *chunk,
86 unsigned int cpu, tcpu; 86 unsigned int cpu, tcpu;
87 int i; 87 int i;
88 88
89 gfp |= GFP_KERNEL | __GFP_HIGHMEM; 89 gfp |= __GFP_HIGHMEM;
90 90
91 for_each_possible_cpu(cpu) { 91 for_each_possible_cpu(cpu) {
92 for (i = page_start; i < page_end; i++) { 92 for (i = page_start; i < page_end; i++) {
diff --git a/mm/percpu.c b/mm/percpu.c
index f97443d488a8..fa3f854634a1 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -454,9 +454,6 @@ static void pcpu_next_fit_region(struct pcpu_chunk *chunk, int alloc_bits,
454 * This is to facilitate passing through whitelisted flags. The 454 * This is to facilitate passing through whitelisted flags. The
455 * returned memory is always zeroed. 455 * returned memory is always zeroed.
456 * 456 *
457 * CONTEXT:
458 * Does GFP_KERNEL allocation.
459 *
460 * RETURNS: 457 * RETURNS:
461 * Pointer to the allocated area on success, NULL on failure. 458 * Pointer to the allocated area on success, NULL on failure.
462 */ 459 */
@@ -466,10 +463,9 @@ static void *pcpu_mem_zalloc(size_t size, gfp_t gfp)
466 return NULL; 463 return NULL;
467 464
468 if (size <= PAGE_SIZE) 465 if (size <= PAGE_SIZE)
469 return kzalloc(size, gfp | GFP_KERNEL); 466 return kzalloc(size, gfp);
470 else 467 else
471 return __vmalloc(size, gfp | GFP_KERNEL | __GFP_ZERO, 468 return __vmalloc(size, gfp | __GFP_ZERO, PAGE_KERNEL);
472 PAGE_KERNEL);
473} 469}
474 470
475/** 471/**
@@ -1344,6 +1340,8 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
1344static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved, 1340static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
1345 gfp_t gfp) 1341 gfp_t gfp)
1346{ 1342{
1343 /* whitelisted flags that can be passed to the backing allocators */
1344 gfp_t pcpu_gfp = gfp & (GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
1347 bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL; 1345 bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
1348 bool do_warn = !(gfp & __GFP_NOWARN); 1346 bool do_warn = !(gfp & __GFP_NOWARN);
1349 static int warn_limit = 10; 1347 static int warn_limit = 10;
@@ -1426,7 +1424,7 @@ restart:
1426 } 1424 }
1427 1425
1428 if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) { 1426 if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) {
1429 chunk = pcpu_create_chunk(0); 1427 chunk = pcpu_create_chunk(pcpu_gfp);
1430 if (!chunk) { 1428 if (!chunk) {
1431 err = "failed to allocate new chunk"; 1429 err = "failed to allocate new chunk";
1432 goto fail; 1430 goto fail;
@@ -1455,7 +1453,7 @@ area_found:
1455 page_start, page_end) { 1453 page_start, page_end) {
1456 WARN_ON(chunk->immutable); 1454 WARN_ON(chunk->immutable);
1457 1455
1458 ret = pcpu_populate_chunk(chunk, rs, re, 0); 1456 ret = pcpu_populate_chunk(chunk, rs, re, pcpu_gfp);
1459 1457
1460 spin_lock_irqsave(&pcpu_lock, flags); 1458 spin_lock_irqsave(&pcpu_lock, flags);
1461 if (ret) { 1459 if (ret) {
@@ -1576,7 +1574,7 @@ void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
1576static void pcpu_balance_workfn(struct work_struct *work) 1574static void pcpu_balance_workfn(struct work_struct *work)
1577{ 1575{
1578 /* gfp flags passed to underlying allocators */ 1576 /* gfp flags passed to underlying allocators */
1579 const gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN; 1577 const gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
1580 LIST_HEAD(to_free); 1578 LIST_HEAD(to_free);
1581 struct list_head *free_head = &pcpu_slot[pcpu_nr_slots - 1]; 1579 struct list_head *free_head = &pcpu_slot[pcpu_nr_slots - 1];
1582 struct pcpu_chunk *chunk, *next; 1580 struct pcpu_chunk *chunk, *next;