aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--block/as-iosched.c3
-rw-r--r--block/cfq-iosched.c18
-rw-r--r--block/deadline-iosched.c3
-rw-r--r--block/elevator.c3
-rw-r--r--block/genhd.c8
-rw-r--r--block/ll_rw_blk.c4
-rw-r--r--drivers/ide/ide-probe.c4
-rw-r--r--kernel/timer.c4
-rw-r--r--lib/genalloc.c3
-rw-r--r--mm/allocpercpu.c9
-rw-r--r--mm/mempool.c3
-rw-r--r--mm/vmalloc.c6
12 files changed, 30 insertions, 38 deletions
diff --git a/block/as-iosched.c b/block/as-iosched.c
index 109e91b91ffa..3e316dd72529 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -1322,10 +1322,9 @@ static void *as_init_queue(request_queue_t *q)
1322{ 1322{
1323 struct as_data *ad; 1323 struct as_data *ad;
1324 1324
1325 ad = kmalloc_node(sizeof(*ad), GFP_KERNEL, q->node); 1325 ad = kmalloc_node(sizeof(*ad), GFP_KERNEL | __GFP_ZERO, q->node);
1326 if (!ad) 1326 if (!ad)
1327 return NULL; 1327 return NULL;
1328 memset(ad, 0, sizeof(*ad));
1329 1328
1330 ad->q = q; /* Identify what queue the data belongs to */ 1329 ad->q = q; /* Identify what queue the data belongs to */
1331 1330
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index e0aa4dad6742..9755a3cfad26 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1251,9 +1251,9 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
1251{ 1251{
1252 struct cfq_io_context *cic; 1252 struct cfq_io_context *cic;
1253 1253
1254 cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask, cfqd->queue->node); 1254 cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask | __GFP_ZERO,
1255 cfqd->queue->node);
1255 if (cic) { 1256 if (cic) {
1256 memset(cic, 0, sizeof(*cic));
1257 cic->last_end_request = jiffies; 1257 cic->last_end_request = jiffies;
1258 INIT_LIST_HEAD(&cic->queue_list); 1258 INIT_LIST_HEAD(&cic->queue_list);
1259 cic->dtor = cfq_free_io_context; 1259 cic->dtor = cfq_free_io_context;
@@ -1376,17 +1376,19 @@ retry:
1376 * free memory. 1376 * free memory.
1377 */ 1377 */
1378 spin_unlock_irq(cfqd->queue->queue_lock); 1378 spin_unlock_irq(cfqd->queue->queue_lock);
1379 new_cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask|__GFP_NOFAIL, cfqd->queue->node); 1379 new_cfqq = kmem_cache_alloc_node(cfq_pool,
1380 gfp_mask | __GFP_NOFAIL | __GFP_ZERO,
1381 cfqd->queue->node);
1380 spin_lock_irq(cfqd->queue->queue_lock); 1382 spin_lock_irq(cfqd->queue->queue_lock);
1381 goto retry; 1383 goto retry;
1382 } else { 1384 } else {
1383 cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask, cfqd->queue->node); 1385 cfqq = kmem_cache_alloc_node(cfq_pool,
1386 gfp_mask | __GFP_ZERO,
1387 cfqd->queue->node);
1384 if (!cfqq) 1388 if (!cfqq)
1385 goto out; 1389 goto out;
1386 } 1390 }
1387 1391
1388 memset(cfqq, 0, sizeof(*cfqq));
1389
1390 RB_CLEAR_NODE(&cfqq->rb_node); 1392 RB_CLEAR_NODE(&cfqq->rb_node);
1391 INIT_LIST_HEAD(&cfqq->fifo); 1393 INIT_LIST_HEAD(&cfqq->fifo);
1392 1394
@@ -2079,12 +2081,10 @@ static void *cfq_init_queue(request_queue_t *q)
2079{ 2081{
2080 struct cfq_data *cfqd; 2082 struct cfq_data *cfqd;
2081 2083
2082 cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node); 2084 cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
2083 if (!cfqd) 2085 if (!cfqd)
2084 return NULL; 2086 return NULL;
2085 2087
2086 memset(cfqd, 0, sizeof(*cfqd));
2087
2088 cfqd->service_tree = CFQ_RB_ROOT; 2088 cfqd->service_tree = CFQ_RB_ROOT;
2089 INIT_LIST_HEAD(&cfqd->cic_list); 2089 INIT_LIST_HEAD(&cfqd->cic_list);
2090 2090
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index 6d673e938d3e..87ca02ac84cb 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -360,10 +360,9 @@ static void *deadline_init_queue(request_queue_t *q)
360{ 360{
361 struct deadline_data *dd; 361 struct deadline_data *dd;
362 362
363 dd = kmalloc_node(sizeof(*dd), GFP_KERNEL, q->node); 363 dd = kmalloc_node(sizeof(*dd), GFP_KERNEL | __GFP_ZERO, q->node);
364 if (!dd) 364 if (!dd)
365 return NULL; 365 return NULL;
366 memset(dd, 0, sizeof(*dd));
367 366
368 INIT_LIST_HEAD(&dd->fifo_list[READ]); 367 INIT_LIST_HEAD(&dd->fifo_list[READ]);
369 INIT_LIST_HEAD(&dd->fifo_list[WRITE]); 368 INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
diff --git a/block/elevator.c b/block/elevator.c
index 4769a25d7037..d265963d1ed3 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -177,11 +177,10 @@ static elevator_t *elevator_alloc(request_queue_t *q, struct elevator_type *e)
177 elevator_t *eq; 177 elevator_t *eq;
178 int i; 178 int i;
179 179
180 eq = kmalloc_node(sizeof(elevator_t), GFP_KERNEL, q->node); 180 eq = kmalloc_node(sizeof(elevator_t), GFP_KERNEL | __GFP_ZERO, q->node);
181 if (unlikely(!eq)) 181 if (unlikely(!eq))
182 goto err; 182 goto err;
183 183
184 memset(eq, 0, sizeof(*eq));
185 eq->ops = &e->ops; 184 eq->ops = &e->ops;
186 eq->elevator_type = e; 185 eq->elevator_type = e;
187 kobject_init(&eq->kobj); 186 kobject_init(&eq->kobj);
diff --git a/block/genhd.c b/block/genhd.c
index 863a8c0623ed..b321cadd6e65 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -726,21 +726,21 @@ struct gendisk *alloc_disk_node(int minors, int node_id)
726{ 726{
727 struct gendisk *disk; 727 struct gendisk *disk;
728 728
729 disk = kmalloc_node(sizeof(struct gendisk), GFP_KERNEL, node_id); 729 disk = kmalloc_node(sizeof(struct gendisk),
730 GFP_KERNEL | __GFP_ZERO, node_id);
730 if (disk) { 731 if (disk) {
731 memset(disk, 0, sizeof(struct gendisk));
732 if (!init_disk_stats(disk)) { 732 if (!init_disk_stats(disk)) {
733 kfree(disk); 733 kfree(disk);
734 return NULL; 734 return NULL;
735 } 735 }
736 if (minors > 1) { 736 if (minors > 1) {
737 int size = (minors - 1) * sizeof(struct hd_struct *); 737 int size = (minors - 1) * sizeof(struct hd_struct *);
738 disk->part = kmalloc_node(size, GFP_KERNEL, node_id); 738 disk->part = kmalloc_node(size,
739 GFP_KERNEL | __GFP_ZERO, node_id);
739 if (!disk->part) { 740 if (!disk->part) {
740 kfree(disk); 741 kfree(disk);
741 return NULL; 742 return NULL;
742 } 743 }
743 memset(disk->part, 0, size);
744 } 744 }
745 disk->minors = minors; 745 disk->minors = minors;
746 kobj_set_kset_s(disk,block_subsys); 746 kobj_set_kset_s(disk,block_subsys);
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 11e4235d0b0c..d7cadf304168 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -1829,11 +1829,11 @@ request_queue_t *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
1829{ 1829{
1830 request_queue_t *q; 1830 request_queue_t *q;
1831 1831
1832 q = kmem_cache_alloc_node(requestq_cachep, gfp_mask, node_id); 1832 q = kmem_cache_alloc_node(requestq_cachep,
1833 gfp_mask | __GFP_ZERO, node_id);
1833 if (!q) 1834 if (!q)
1834 return NULL; 1835 return NULL;
1835 1836
1836 memset(q, 0, sizeof(*q));
1837 init_timer(&q->unplug_timer); 1837 init_timer(&q->unplug_timer);
1838 1838
1839 snprintf(q->kobj.name, KOBJ_NAME_LEN, "%s", "queue"); 1839 snprintf(q->kobj.name, KOBJ_NAME_LEN, "%s", "queue");
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index cc5801399467..5a4c5ea12f89 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -1073,14 +1073,14 @@ static int init_irq (ide_hwif_t *hwif)
1073 hwgroup->hwif->next = hwif; 1073 hwgroup->hwif->next = hwif;
1074 spin_unlock_irq(&ide_lock); 1074 spin_unlock_irq(&ide_lock);
1075 } else { 1075 } else {
1076 hwgroup = kmalloc_node(sizeof(ide_hwgroup_t), GFP_KERNEL, 1076 hwgroup = kmalloc_node(sizeof(ide_hwgroup_t),
1077 GFP_KERNEL | __GFP_ZERO,
1077 hwif_to_node(hwif->drives[0].hwif)); 1078 hwif_to_node(hwif->drives[0].hwif));
1078 if (!hwgroup) 1079 if (!hwgroup)
1079 goto out_up; 1080 goto out_up;
1080 1081
1081 hwif->hwgroup = hwgroup; 1082 hwif->hwgroup = hwgroup;
1082 1083
1083 memset(hwgroup, 0, sizeof(ide_hwgroup_t));
1084 hwgroup->hwif = hwif->next = hwif; 1084 hwgroup->hwif = hwif->next = hwif;
1085 hwgroup->rq = NULL; 1085 hwgroup->rq = NULL;
1086 hwgroup->handler = NULL; 1086 hwgroup->handler = NULL;
diff --git a/kernel/timer.c b/kernel/timer.c
index 1258371e0d2b..b7792fb03387 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1221,7 +1221,8 @@ static int __devinit init_timers_cpu(int cpu)
1221 /* 1221 /*
1222 * The APs use this path later in boot 1222 * The APs use this path later in boot
1223 */ 1223 */
1224 base = kmalloc_node(sizeof(*base), GFP_KERNEL, 1224 base = kmalloc_node(sizeof(*base),
1225 GFP_KERNEL | __GFP_ZERO,
1225 cpu_to_node(cpu)); 1226 cpu_to_node(cpu));
1226 if (!base) 1227 if (!base)
1227 return -ENOMEM; 1228 return -ENOMEM;
@@ -1232,7 +1233,6 @@ static int __devinit init_timers_cpu(int cpu)
1232 kfree(base); 1233 kfree(base);
1233 return -ENOMEM; 1234 return -ENOMEM;
1234 } 1235 }
1235 memset(base, 0, sizeof(*base));
1236 per_cpu(tvec_bases, cpu) = base; 1236 per_cpu(tvec_bases, cpu) = base;
1237 } else { 1237 } else {
1238 /* 1238 /*
diff --git a/lib/genalloc.c b/lib/genalloc.c
index eb7c2bab9ebf..f6d276db2d58 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -54,11 +54,10 @@ int gen_pool_add(struct gen_pool *pool, unsigned long addr, size_t size,
54 int nbytes = sizeof(struct gen_pool_chunk) + 54 int nbytes = sizeof(struct gen_pool_chunk) +
55 (nbits + BITS_PER_BYTE - 1) / BITS_PER_BYTE; 55 (nbits + BITS_PER_BYTE - 1) / BITS_PER_BYTE;
56 56
57 chunk = kmalloc_node(nbytes, GFP_KERNEL, nid); 57 chunk = kmalloc_node(nbytes, GFP_KERNEL | __GFP_ZERO, nid);
58 if (unlikely(chunk == NULL)) 58 if (unlikely(chunk == NULL))
59 return -1; 59 return -1;
60 60
61 memset(chunk, 0, nbytes);
62 spin_lock_init(&chunk->lock); 61 spin_lock_init(&chunk->lock);
63 chunk->start_addr = addr; 62 chunk->start_addr = addr;
64 chunk->end_addr = addr + size; 63 chunk->end_addr = addr + size;
diff --git a/mm/allocpercpu.c b/mm/allocpercpu.c
index b2486cf887a0..00b02623f008 100644
--- a/mm/allocpercpu.c
+++ b/mm/allocpercpu.c
@@ -53,12 +53,9 @@ void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, int cpu)
53 int node = cpu_to_node(cpu); 53 int node = cpu_to_node(cpu);
54 54
55 BUG_ON(pdata->ptrs[cpu]); 55 BUG_ON(pdata->ptrs[cpu]);
56 if (node_online(node)) { 56 if (node_online(node))
57 /* FIXME: kzalloc_node(size, gfp, node) */ 57 pdata->ptrs[cpu] = kmalloc_node(size, gfp|__GFP_ZERO, node);
58 pdata->ptrs[cpu] = kmalloc_node(size, gfp, node); 58 else
59 if (pdata->ptrs[cpu])
60 memset(pdata->ptrs[cpu], 0, size);
61 } else
62 pdata->ptrs[cpu] = kzalloc(size, gfp); 59 pdata->ptrs[cpu] = kzalloc(size, gfp);
63 return pdata->ptrs[cpu]; 60 return pdata->ptrs[cpu];
64} 61}
diff --git a/mm/mempool.c b/mm/mempool.c
index 3e8f1fed0e1f..02d5ec3feabc 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -62,10 +62,9 @@ mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
62 mempool_free_t *free_fn, void *pool_data, int node_id) 62 mempool_free_t *free_fn, void *pool_data, int node_id)
63{ 63{
64 mempool_t *pool; 64 mempool_t *pool;
65 pool = kmalloc_node(sizeof(*pool), GFP_KERNEL, node_id); 65 pool = kmalloc_node(sizeof(*pool), GFP_KERNEL | __GFP_ZERO, node_id);
66 if (!pool) 66 if (!pool)
67 return NULL; 67 return NULL;
68 memset(pool, 0, sizeof(*pool));
69 pool->elements = kmalloc_node(min_nr * sizeof(void *), 68 pool->elements = kmalloc_node(min_nr * sizeof(void *),
70 GFP_KERNEL, node_id); 69 GFP_KERNEL, node_id);
71 if (!pool->elements) { 70 if (!pool->elements) {
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index ddf87145cc49..8e05a11155c9 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -432,11 +432,12 @@ void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
432 area->nr_pages = nr_pages; 432 area->nr_pages = nr_pages;
433 /* Please note that the recursion is strictly bounded. */ 433 /* Please note that the recursion is strictly bounded. */
434 if (array_size > PAGE_SIZE) { 434 if (array_size > PAGE_SIZE) {
435 pages = __vmalloc_node(array_size, gfp_mask, PAGE_KERNEL, node); 435 pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO,
436 PAGE_KERNEL, node);
436 area->flags |= VM_VPAGES; 437 area->flags |= VM_VPAGES;
437 } else { 438 } else {
438 pages = kmalloc_node(array_size, 439 pages = kmalloc_node(array_size,
439 (gfp_mask & GFP_LEVEL_MASK), 440 (gfp_mask & GFP_LEVEL_MASK) | __GFP_ZERO,
440 node); 441 node);
441 } 442 }
442 area->pages = pages; 443 area->pages = pages;
@@ -445,7 +446,6 @@ void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
445 kfree(area); 446 kfree(area);
446 return NULL; 447 return NULL;
447 } 448 }
448 memset(area->pages, 0, array_size);
449 449
450 for (i = 0; i < area->nr_pages; i++) { 450 for (i = 0; i < area->nr_pages; i++) {
451 if (node < 0) 451 if (node < 0)