aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmalloc.c
diff options
context:
space:
mode:
authorDavid Rientjes <rientjes@google.com>2011-01-13 18:46:01 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2011-01-13 20:32:34 -0500
commitec3f64fc9c196a304c4b7db3e1ff56d640628509 (patch)
tree43de86d9fbb6543b99e1f450b1a3c15a3f151fa0 /mm/vmalloc.c
parente5a5623b28198aa91ea71ee5d3846757fc76bc87 (diff)
mm: remove gfp mask from pcpu_get_vm_areas
pcpu_get_vm_areas() only uses GFP_KERNEL allocations, so remove the gfp_t formal and use the mask internally. Signed-off-by: David Rientjes <rientjes@google.com> Cc: Christoph Lameter <cl@linux.com> Cc: Tejun Heo <tj@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r--mm/vmalloc.c21
1 files changed, 9 insertions, 12 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 78ec9d8bc57c..f67546636322 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -2196,17 +2196,16 @@ static unsigned long pvm_determine_end(struct vmap_area **pnext,
2196 * @sizes: array containing size of each area 2196 * @sizes: array containing size of each area
2197 * @nr_vms: the number of areas to allocate 2197 * @nr_vms: the number of areas to allocate
2198 * @align: alignment, all entries in @offsets and @sizes must be aligned to this 2198 * @align: alignment, all entries in @offsets and @sizes must be aligned to this
2199 * @gfp_mask: allocation mask
2200 * 2199 *
2201 * Returns: kmalloc'd vm_struct pointer array pointing to allocated 2200 * Returns: kmalloc'd vm_struct pointer array pointing to allocated
2202 * vm_structs on success, %NULL on failure 2201 * vm_structs on success, %NULL on failure
2203 * 2202 *
2204 * Percpu allocator wants to use congruent vm areas so that it can 2203 * Percpu allocator wants to use congruent vm areas so that it can
2205 * maintain the offsets among percpu areas. This function allocates 2204 * maintain the offsets among percpu areas. This function allocates
2206 * congruent vmalloc areas for it. These areas tend to be scattered 2205 * congruent vmalloc areas for it with GFP_KERNEL. These areas tend to
2207 * pretty far, distance between two areas easily going up to 2206 * be scattered pretty far, distance between two areas easily going up
2208 * gigabytes. To avoid interacting with regular vmallocs, these areas 2207 * to gigabytes. To avoid interacting with regular vmallocs, these
2209 * are allocated from top. 2208 * areas are allocated from top.
2210 * 2209 *
2211 * Despite its complicated look, this allocator is rather simple. It 2210 * Despite its complicated look, this allocator is rather simple. It
2212 * does everything top-down and scans areas from the end looking for 2211 * does everything top-down and scans areas from the end looking for
@@ -2217,7 +2216,7 @@ static unsigned long pvm_determine_end(struct vmap_area **pnext,
2217 */ 2216 */
2218struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, 2217struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
2219 const size_t *sizes, int nr_vms, 2218 const size_t *sizes, int nr_vms,
2220 size_t align, gfp_t gfp_mask) 2219 size_t align)
2221{ 2220{
2222 const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align); 2221 const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
2223 const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); 2222 const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
@@ -2227,8 +2226,6 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
2227 unsigned long base, start, end, last_end; 2226 unsigned long base, start, end, last_end;
2228 bool purged = false; 2227 bool purged = false;
2229 2228
2230 gfp_mask &= GFP_RECLAIM_MASK;
2231
2232 /* verify parameters and allocate data structures */ 2229 /* verify parameters and allocate data structures */
2233 BUG_ON(align & ~PAGE_MASK || !is_power_of_2(align)); 2230 BUG_ON(align & ~PAGE_MASK || !is_power_of_2(align));
2234 for (last_area = 0, area = 0; area < nr_vms; area++) { 2231 for (last_area = 0, area = 0; area < nr_vms; area++) {
@@ -2261,14 +2258,14 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
2261 return NULL; 2258 return NULL;
2262 } 2259 }
2263 2260
2264 vms = kzalloc(sizeof(vms[0]) * nr_vms, gfp_mask); 2261 vms = kzalloc(sizeof(vms[0]) * nr_vms, GFP_KERNEL);
2265 vas = kzalloc(sizeof(vas[0]) * nr_vms, gfp_mask); 2262 vas = kzalloc(sizeof(vas[0]) * nr_vms, GFP_KERNEL);
2266 if (!vas || !vms) 2263 if (!vas || !vms)
2267 goto err_free; 2264 goto err_free;
2268 2265
2269 for (area = 0; area < nr_vms; area++) { 2266 for (area = 0; area < nr_vms; area++) {
2270 vas[area] = kzalloc(sizeof(struct vmap_area), gfp_mask); 2267 vas[area] = kzalloc(sizeof(struct vmap_area), GFP_KERNEL);
2271 vms[area] = kzalloc(sizeof(struct vm_struct), gfp_mask); 2268 vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL);
2272 if (!vas[area] || !vms[area]) 2269 if (!vas[area] || !vms[area])
2273 goto err_free; 2270 goto err_free;
2274 } 2271 }