aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/vmalloc.h2
-rw-r--r--mm/percpu-vm.c2
-rw-r--r--mm/vmalloc.c21
3 files changed, 11 insertions, 14 deletions
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index cb73c755fac8..c7348b8d0a81 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -117,7 +117,7 @@ extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
117#ifdef CONFIG_SMP 117#ifdef CONFIG_SMP
118struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, 118struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
119 const size_t *sizes, int nr_vms, 119 const size_t *sizes, int nr_vms,
120 size_t align, gfp_t gfp_mask); 120 size_t align);
121 121
122void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms); 122void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
123#endif 123#endif
diff --git a/mm/percpu-vm.c b/mm/percpu-vm.c
index 7d9c1d0ebd3f..ea534960a04b 100644
--- a/mm/percpu-vm.c
+++ b/mm/percpu-vm.c
@@ -421,7 +421,7 @@ static struct pcpu_chunk *pcpu_create_chunk(void)
421 return NULL; 421 return NULL;
422 422
423 vms = pcpu_get_vm_areas(pcpu_group_offsets, pcpu_group_sizes, 423 vms = pcpu_get_vm_areas(pcpu_group_offsets, pcpu_group_sizes,
424 pcpu_nr_groups, pcpu_atom_size, GFP_KERNEL); 424 pcpu_nr_groups, pcpu_atom_size);
425 if (!vms) { 425 if (!vms) {
426 pcpu_free_chunk(chunk); 426 pcpu_free_chunk(chunk);
427 return NULL; 427 return NULL;
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 78ec9d8bc57c..f67546636322 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -2196,17 +2196,16 @@ static unsigned long pvm_determine_end(struct vmap_area **pnext,
2196 * @sizes: array containing size of each area 2196 * @sizes: array containing size of each area
2197 * @nr_vms: the number of areas to allocate 2197 * @nr_vms: the number of areas to allocate
2198 * @align: alignment, all entries in @offsets and @sizes must be aligned to this 2198 * @align: alignment, all entries in @offsets and @sizes must be aligned to this
2199 * @gfp_mask: allocation mask
2200 * 2199 *
2201 * Returns: kmalloc'd vm_struct pointer array pointing to allocated 2200 * Returns: kmalloc'd vm_struct pointer array pointing to allocated
2202 * vm_structs on success, %NULL on failure 2201 * vm_structs on success, %NULL on failure
2203 * 2202 *
2204 * Percpu allocator wants to use congruent vm areas so that it can 2203 * Percpu allocator wants to use congruent vm areas so that it can
2205 * maintain the offsets among percpu areas. This function allocates 2204 * maintain the offsets among percpu areas. This function allocates
2206 * congruent vmalloc areas for it. These areas tend to be scattered 2205 * congruent vmalloc areas for it with GFP_KERNEL. These areas tend to
2207 * pretty far, distance between two areas easily going up to 2206 * be scattered pretty far, distance between two areas easily going up
2208 * gigabytes. To avoid interacting with regular vmallocs, these areas 2207 * to gigabytes. To avoid interacting with regular vmallocs, these
2209 * are allocated from top. 2208 * areas are allocated from top.
2210 * 2209 *
2211 * Despite its complicated look, this allocator is rather simple. It 2210 * Despite its complicated look, this allocator is rather simple. It
2212 * does everything top-down and scans areas from the end looking for 2211 * does everything top-down and scans areas from the end looking for
@@ -2217,7 +2216,7 @@ static unsigned long pvm_determine_end(struct vmap_area **pnext,
2217 */ 2216 */
2218struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, 2217struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
2219 const size_t *sizes, int nr_vms, 2218 const size_t *sizes, int nr_vms,
2220 size_t align, gfp_t gfp_mask) 2219 size_t align)
2221{ 2220{
2222 const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align); 2221 const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
2223 const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); 2222 const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
@@ -2227,8 +2226,6 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
2227 unsigned long base, start, end, last_end; 2226 unsigned long base, start, end, last_end;
2228 bool purged = false; 2227 bool purged = false;
2229 2228
2230 gfp_mask &= GFP_RECLAIM_MASK;
2231
2232 /* verify parameters and allocate data structures */ 2229 /* verify parameters and allocate data structures */
2233 BUG_ON(align & ~PAGE_MASK || !is_power_of_2(align)); 2230 BUG_ON(align & ~PAGE_MASK || !is_power_of_2(align));
2234 for (last_area = 0, area = 0; area < nr_vms; area++) { 2231 for (last_area = 0, area = 0; area < nr_vms; area++) {
@@ -2261,14 +2258,14 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
2261 return NULL; 2258 return NULL;
2262 } 2259 }
2263 2260
2264 vms = kzalloc(sizeof(vms[0]) * nr_vms, gfp_mask); 2261 vms = kzalloc(sizeof(vms[0]) * nr_vms, GFP_KERNEL);
2265 vas = kzalloc(sizeof(vas[0]) * nr_vms, gfp_mask); 2262 vas = kzalloc(sizeof(vas[0]) * nr_vms, GFP_KERNEL);
2266 if (!vas || !vms) 2263 if (!vas || !vms)
2267 goto err_free; 2264 goto err_free;
2268 2265
2269 for (area = 0; area < nr_vms; area++) { 2266 for (area = 0; area < nr_vms; area++) {
2270 vas[area] = kzalloc(sizeof(struct vmap_area), gfp_mask); 2267 vas[area] = kzalloc(sizeof(struct vmap_area), GFP_KERNEL);
2271 vms[area] = kzalloc(sizeof(struct vm_struct), gfp_mask); 2268 vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL);
2272 if (!vas[area] || !vms[area]) 2269 if (!vas[area] || !vms[area])
2273 goto err_free; 2270 goto err_free;
2274 } 2271 }