diff options
author | Jens Axboe <axboe@suse.de> | 2006-07-19 17:39:40 -0400 |
---|---|---|
committer | Jens Axboe <axboe@nelson.home.kernel.dk> | 2006-09-30 14:29:39 -0400 |
commit | b5deef901282628d88c784f4c9d2f0583ec3b355 (patch) | |
tree | 1d3be92f18c9afd9426b06739c8f76931acbf03f /block/ll_rw_blk.c | |
parent | a3b05e8f58c95dfccbf2c824d0c68e5990571f24 (diff) |
[PATCH] Make sure all block/io scheduler setups are node aware
Some were kmalloc_node(), some were still kmalloc(). Change them all to
kmalloc_node().
Signed-off-by: Jens Axboe <axboe@suse.de>
Diffstat (limited to 'block/ll_rw_blk.c')
-rw-r--r-- | block/ll_rw_blk.c | 13 |
1 files changed, 7 insertions, 6 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 4b7b4461e8d6..c6dfa889206c 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c | |||
@@ -39,6 +39,7 @@ static void blk_unplug_timeout(unsigned long data); | |||
39 | static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io); | 39 | static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io); |
40 | static void init_request_from_bio(struct request *req, struct bio *bio); | 40 | static void init_request_from_bio(struct request *req, struct bio *bio); |
41 | static int __make_request(request_queue_t *q, struct bio *bio); | 41 | static int __make_request(request_queue_t *q, struct bio *bio); |
42 | static struct io_context *current_io_context(gfp_t gfp_flags, int node); | ||
42 | 43 | ||
43 | /* | 44 | /* |
44 | * For the allocated request tables | 45 | * For the allocated request tables |
@@ -2114,7 +2115,7 @@ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio, | |||
2114 | 2115 | ||
2115 | if (rl->count[rw]+1 >= queue_congestion_on_threshold(q)) { | 2116 | if (rl->count[rw]+1 >= queue_congestion_on_threshold(q)) { |
2116 | if (rl->count[rw]+1 >= q->nr_requests) { | 2117 | if (rl->count[rw]+1 >= q->nr_requests) { |
2117 | ioc = current_io_context(GFP_ATOMIC); | 2118 | ioc = current_io_context(GFP_ATOMIC, q->node); |
2118 | /* | 2119 | /* |
2119 | * The queue will fill after this allocation, so set | 2120 | * The queue will fill after this allocation, so set |
2120 | * it as full, and mark this process as "batching". | 2121 | * it as full, and mark this process as "batching". |
@@ -2234,7 +2235,7 @@ static struct request *get_request_wait(request_queue_t *q, int rw, | |||
2234 | * up to a big batch of them for a small period time. | 2235 | * up to a big batch of them for a small period time. |
2235 | * See ioc_batching, ioc_set_batching | 2236 | * See ioc_batching, ioc_set_batching |
2236 | */ | 2237 | */ |
2237 | ioc = current_io_context(GFP_NOIO); | 2238 | ioc = current_io_context(GFP_NOIO, q->node); |
2238 | ioc_set_batching(q, ioc); | 2239 | ioc_set_batching(q, ioc); |
2239 | 2240 | ||
2240 | spin_lock_irq(q->queue_lock); | 2241 | spin_lock_irq(q->queue_lock); |
@@ -3641,7 +3642,7 @@ void exit_io_context(void) | |||
3641 | * but since the current task itself holds a reference, the context can be | 3642 | * but since the current task itself holds a reference, the context can be |
3642 | * used in general code, so long as it stays within `current` context. | 3643 | * used in general code, so long as it stays within `current` context. |
3643 | */ | 3644 | */ |
3644 | struct io_context *current_io_context(gfp_t gfp_flags) | 3645 | static struct io_context *current_io_context(gfp_t gfp_flags, int node) |
3645 | { | 3646 | { |
3646 | struct task_struct *tsk = current; | 3647 | struct task_struct *tsk = current; |
3647 | struct io_context *ret; | 3648 | struct io_context *ret; |
@@ -3650,7 +3651,7 @@ struct io_context *current_io_context(gfp_t gfp_flags) | |||
3650 | if (likely(ret)) | 3651 | if (likely(ret)) |
3651 | return ret; | 3652 | return ret; |
3652 | 3653 | ||
3653 | ret = kmem_cache_alloc(iocontext_cachep, gfp_flags); | 3654 | ret = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node); |
3654 | if (ret) { | 3655 | if (ret) { |
3655 | atomic_set(&ret->refcount, 1); | 3656 | atomic_set(&ret->refcount, 1); |
3656 | ret->task = current; | 3657 | ret->task = current; |
@@ -3674,10 +3675,10 @@ EXPORT_SYMBOL(current_io_context); | |||
3674 | * | 3675 | * |
3675 | * This is always called in the context of the task which submitted the I/O. | 3676 | * This is always called in the context of the task which submitted the I/O. |
3676 | */ | 3677 | */ |
3677 | struct io_context *get_io_context(gfp_t gfp_flags) | 3678 | struct io_context *get_io_context(gfp_t gfp_flags, int node) |
3678 | { | 3679 | { |
3679 | struct io_context *ret; | 3680 | struct io_context *ret; |
3680 | ret = current_io_context(gfp_flags); | 3681 | ret = current_io_context(gfp_flags, node); |
3681 | if (likely(ret)) | 3682 | if (likely(ret)) |
3682 | atomic_inc(&ret->refcount); | 3683 | atomic_inc(&ret->refcount); |
3683 | return ret; | 3684 | return ret; |