diff options
author | Christoph Lameter <clameter@sgi.com> | 2007-07-17 07:03:29 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-17 13:23:02 -0400 |
commit | 94f6030ca792c57422f04a73e7a872d8325946d3 (patch) | |
tree | 0197f24d82b1706f1b0521f2cf68feeff64123df /block/cfq-iosched.c | |
parent | 81cda6626178cd55297831296ba8ecedbfd8b52d (diff) |
Slab allocators: Replace explicit zeroing with __GFP_ZERO
kmalloc_node() and kmem_cache_alloc_node() were not available in a zeroing
variant in the past. But with __GFP_ZERO it is possible now to do zeroing
while allocating.
Use __GFP_ZERO to remove the explicit clearing of memory via memset whereever
we can.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r-- | block/cfq-iosched.c | 18 |
1 files changed, 9 insertions, 9 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index e0aa4dad6742..9755a3cfad26 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -1251,9 +1251,9 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) | |||
1251 | { | 1251 | { |
1252 | struct cfq_io_context *cic; | 1252 | struct cfq_io_context *cic; |
1253 | 1253 | ||
1254 | cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask, cfqd->queue->node); | 1254 | cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask | __GFP_ZERO, |
1255 | cfqd->queue->node); | ||
1255 | if (cic) { | 1256 | if (cic) { |
1256 | memset(cic, 0, sizeof(*cic)); | ||
1257 | cic->last_end_request = jiffies; | 1257 | cic->last_end_request = jiffies; |
1258 | INIT_LIST_HEAD(&cic->queue_list); | 1258 | INIT_LIST_HEAD(&cic->queue_list); |
1259 | cic->dtor = cfq_free_io_context; | 1259 | cic->dtor = cfq_free_io_context; |
@@ -1376,17 +1376,19 @@ retry: | |||
1376 | * free memory. | 1376 | * free memory. |
1377 | */ | 1377 | */ |
1378 | spin_unlock_irq(cfqd->queue->queue_lock); | 1378 | spin_unlock_irq(cfqd->queue->queue_lock); |
1379 | new_cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask|__GFP_NOFAIL, cfqd->queue->node); | 1379 | new_cfqq = kmem_cache_alloc_node(cfq_pool, |
1380 | gfp_mask | __GFP_NOFAIL | __GFP_ZERO, | ||
1381 | cfqd->queue->node); | ||
1380 | spin_lock_irq(cfqd->queue->queue_lock); | 1382 | spin_lock_irq(cfqd->queue->queue_lock); |
1381 | goto retry; | 1383 | goto retry; |
1382 | } else { | 1384 | } else { |
1383 | cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask, cfqd->queue->node); | 1385 | cfqq = kmem_cache_alloc_node(cfq_pool, |
1386 | gfp_mask | __GFP_ZERO, | ||
1387 | cfqd->queue->node); | ||
1384 | if (!cfqq) | 1388 | if (!cfqq) |
1385 | goto out; | 1389 | goto out; |
1386 | } | 1390 | } |
1387 | 1391 | ||
1388 | memset(cfqq, 0, sizeof(*cfqq)); | ||
1389 | |||
1390 | RB_CLEAR_NODE(&cfqq->rb_node); | 1392 | RB_CLEAR_NODE(&cfqq->rb_node); |
1391 | INIT_LIST_HEAD(&cfqq->fifo); | 1393 | INIT_LIST_HEAD(&cfqq->fifo); |
1392 | 1394 | ||
@@ -2079,12 +2081,10 @@ static void *cfq_init_queue(request_queue_t *q) | |||
2079 | { | 2081 | { |
2080 | struct cfq_data *cfqd; | 2082 | struct cfq_data *cfqd; |
2081 | 2083 | ||
2082 | cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node); | 2084 | cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node); |
2083 | if (!cfqd) | 2085 | if (!cfqd) |
2084 | return NULL; | 2086 | return NULL; |
2085 | 2087 | ||
2086 | memset(cfqd, 0, sizeof(*cfqd)); | ||
2087 | |||
2088 | cfqd->service_tree = CFQ_RB_ROOT; | 2088 | cfqd->service_tree = CFQ_RB_ROOT; |
2089 | INIT_LIST_HEAD(&cfqd->cic_list); | 2089 | INIT_LIST_HEAD(&cfqd->cic_list); |
2090 | 2090 | ||