diff options
Diffstat (limited to 'mm/percpu-vm.c')
-rw-r--r-- | mm/percpu-vm.c | 18 |
1 files changed, 11 insertions, 7 deletions
diff --git a/mm/percpu-vm.c b/mm/percpu-vm.c index 9158e5a81391..d8078de912de 100644 --- a/mm/percpu-vm.c +++ b/mm/percpu-vm.c | |||
@@ -37,7 +37,7 @@ static struct page **pcpu_get_pages(void) | |||
37 | lockdep_assert_held(&pcpu_alloc_mutex); | 37 | lockdep_assert_held(&pcpu_alloc_mutex); |
38 | 38 | ||
39 | if (!pages) | 39 | if (!pages) |
40 | pages = pcpu_mem_zalloc(pages_size); | 40 | pages = pcpu_mem_zalloc(pages_size, GFP_KERNEL); |
41 | return pages; | 41 | return pages; |
42 | } | 42 | } |
43 | 43 | ||
@@ -73,18 +73,21 @@ static void pcpu_free_pages(struct pcpu_chunk *chunk, | |||
73 | * @pages: array to put the allocated pages into, indexed by pcpu_page_idx() | 73 | * @pages: array to put the allocated pages into, indexed by pcpu_page_idx() |
74 | * @page_start: page index of the first page to be allocated | 74 | * @page_start: page index of the first page to be allocated |
75 | * @page_end: page index of the last page to be allocated + 1 | 75 | * @page_end: page index of the last page to be allocated + 1 |
76 | * @gfp: allocation flags passed to the underlying allocator | ||
76 | * | 77 | * |
77 | * Allocate pages [@page_start,@page_end) into @pages for all units. | 78 | * Allocate pages [@page_start,@page_end) into @pages for all units. |
78 | * The allocation is for @chunk. Percpu core doesn't care about the | 79 | * The allocation is for @chunk. Percpu core doesn't care about the |
79 | * content of @pages and will pass it verbatim to pcpu_map_pages(). | 80 | * content of @pages and will pass it verbatim to pcpu_map_pages(). |
80 | */ | 81 | */ |
81 | static int pcpu_alloc_pages(struct pcpu_chunk *chunk, | 82 | static int pcpu_alloc_pages(struct pcpu_chunk *chunk, |
82 | struct page **pages, int page_start, int page_end) | 83 | struct page **pages, int page_start, int page_end, |
84 | gfp_t gfp) | ||
83 | { | 85 | { |
84 | const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM; | ||
85 | unsigned int cpu, tcpu; | 86 | unsigned int cpu, tcpu; |
86 | int i; | 87 | int i; |
87 | 88 | ||
89 | gfp |= __GFP_HIGHMEM; | ||
90 | |||
88 | for_each_possible_cpu(cpu) { | 91 | for_each_possible_cpu(cpu) { |
89 | for (i = page_start; i < page_end; i++) { | 92 | for (i = page_start; i < page_end; i++) { |
90 | struct page **pagep = &pages[pcpu_page_idx(cpu, i)]; | 93 | struct page **pagep = &pages[pcpu_page_idx(cpu, i)]; |
@@ -262,6 +265,7 @@ static void pcpu_post_map_flush(struct pcpu_chunk *chunk, | |||
262 | * @chunk: chunk of interest | 265 | * @chunk: chunk of interest |
263 | * @page_start: the start page | 266 | * @page_start: the start page |
264 | * @page_end: the end page | 267 | * @page_end: the end page |
268 | * @gfp: allocation flags passed to the underlying memory allocator | ||
265 | * | 269 | * |
266 | * For each cpu, populate and map pages [@page_start,@page_end) into | 270 | * For each cpu, populate and map pages [@page_start,@page_end) into |
267 | * @chunk. | 271 | * @chunk. |
@@ -270,7 +274,7 @@ static void pcpu_post_map_flush(struct pcpu_chunk *chunk, | |||
270 | * pcpu_alloc_mutex, does GFP_KERNEL allocation. | 274 | * pcpu_alloc_mutex, does GFP_KERNEL allocation. |
271 | */ | 275 | */ |
272 | static int pcpu_populate_chunk(struct pcpu_chunk *chunk, | 276 | static int pcpu_populate_chunk(struct pcpu_chunk *chunk, |
273 | int page_start, int page_end) | 277 | int page_start, int page_end, gfp_t gfp) |
274 | { | 278 | { |
275 | struct page **pages; | 279 | struct page **pages; |
276 | 280 | ||
@@ -278,7 +282,7 @@ static int pcpu_populate_chunk(struct pcpu_chunk *chunk, | |||
278 | if (!pages) | 282 | if (!pages) |
279 | return -ENOMEM; | 283 | return -ENOMEM; |
280 | 284 | ||
281 | if (pcpu_alloc_pages(chunk, pages, page_start, page_end)) | 285 | if (pcpu_alloc_pages(chunk, pages, page_start, page_end, gfp)) |
282 | return -ENOMEM; | 286 | return -ENOMEM; |
283 | 287 | ||
284 | if (pcpu_map_pages(chunk, pages, page_start, page_end)) { | 288 | if (pcpu_map_pages(chunk, pages, page_start, page_end)) { |
@@ -325,12 +329,12 @@ static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, | |||
325 | pcpu_free_pages(chunk, pages, page_start, page_end); | 329 | pcpu_free_pages(chunk, pages, page_start, page_end); |
326 | } | 330 | } |
327 | 331 | ||
328 | static struct pcpu_chunk *pcpu_create_chunk(void) | 332 | static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp) |
329 | { | 333 | { |
330 | struct pcpu_chunk *chunk; | 334 | struct pcpu_chunk *chunk; |
331 | struct vm_struct **vms; | 335 | struct vm_struct **vms; |
332 | 336 | ||
333 | chunk = pcpu_alloc_chunk(); | 337 | chunk = pcpu_alloc_chunk(gfp); |
334 | if (!chunk) | 338 | if (!chunk) |
335 | return NULL; | 339 | return NULL; |
336 | 340 | ||