aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/allocpercpu.c9
-rw-r--r--mm/mempool.c3
-rw-r--r--mm/vmalloc.c6
3 files changed, 7 insertions, 11 deletions
diff --git a/mm/allocpercpu.c b/mm/allocpercpu.c
index b2486cf887a0..00b02623f008 100644
--- a/mm/allocpercpu.c
+++ b/mm/allocpercpu.c
@@ -53,12 +53,9 @@ void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, int cpu)
53 int node = cpu_to_node(cpu); 53 int node = cpu_to_node(cpu);
54 54
55 BUG_ON(pdata->ptrs[cpu]); 55 BUG_ON(pdata->ptrs[cpu]);
56 if (node_online(node)) { 56 if (node_online(node))
57 /* FIXME: kzalloc_node(size, gfp, node) */ 57 pdata->ptrs[cpu] = kmalloc_node(size, gfp|__GFP_ZERO, node);
58 pdata->ptrs[cpu] = kmalloc_node(size, gfp, node); 58 else
59 if (pdata->ptrs[cpu])
60 memset(pdata->ptrs[cpu], 0, size);
61 } else
62 pdata->ptrs[cpu] = kzalloc(size, gfp); 59 pdata->ptrs[cpu] = kzalloc(size, gfp);
63 return pdata->ptrs[cpu]; 60 return pdata->ptrs[cpu];
64} 61}
diff --git a/mm/mempool.c b/mm/mempool.c
index 3e8f1fed0e1f..02d5ec3feabc 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -62,10 +62,9 @@ mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
62 mempool_free_t *free_fn, void *pool_data, int node_id) 62 mempool_free_t *free_fn, void *pool_data, int node_id)
63{ 63{
64 mempool_t *pool; 64 mempool_t *pool;
65 pool = kmalloc_node(sizeof(*pool), GFP_KERNEL, node_id); 65 pool = kmalloc_node(sizeof(*pool), GFP_KERNEL | __GFP_ZERO, node_id);
66 if (!pool) 66 if (!pool)
67 return NULL; 67 return NULL;
68 memset(pool, 0, sizeof(*pool));
69 pool->elements = kmalloc_node(min_nr * sizeof(void *), 68 pool->elements = kmalloc_node(min_nr * sizeof(void *),
70 GFP_KERNEL, node_id); 69 GFP_KERNEL, node_id);
71 if (!pool->elements) { 70 if (!pool->elements) {
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index ddf87145cc49..8e05a11155c9 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -432,11 +432,12 @@ void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
432 area->nr_pages = nr_pages; 432 area->nr_pages = nr_pages;
433 /* Please note that the recursion is strictly bounded. */ 433 /* Please note that the recursion is strictly bounded. */
434 if (array_size > PAGE_SIZE) { 434 if (array_size > PAGE_SIZE) {
435 pages = __vmalloc_node(array_size, gfp_mask, PAGE_KERNEL, node); 435 pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO,
436 PAGE_KERNEL, node);
436 area->flags |= VM_VPAGES; 437 area->flags |= VM_VPAGES;
437 } else { 438 } else {
438 pages = kmalloc_node(array_size, 439 pages = kmalloc_node(array_size,
439 (gfp_mask & GFP_LEVEL_MASK), 440 (gfp_mask & GFP_LEVEL_MASK) | __GFP_ZERO,
440 node); 441 node);
441 } 442 }
442 area->pages = pages; 443 area->pages = pages;
@@ -445,7 +446,6 @@ void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
445 kfree(area); 446 kfree(area);
446 return NULL; 447 return NULL;
447 } 448 }
448 memset(area->pages, 0, array_size);
449 449
450 for (i = 0; i < area->nr_pages; i++) { 450 for (i = 0; i < area->nr_pages; i++) {
451 if (node < 0) 451 if (node < 0)