diff options
author | Jens Axboe <axboe@suse.de> | 2006-07-19 17:39:40 -0400 |
---|---|---|
committer | Jens Axboe <axboe@nelson.home.kernel.dk> | 2006-09-30 14:29:39 -0400 |
commit | b5deef901282628d88c784f4c9d2f0583ec3b355 (patch) | |
tree | 1d3be92f18c9afd9426b06739c8f76931acbf03f | |
parent | a3b05e8f58c95dfccbf2c824d0c68e5990571f24 (diff) |
[PATCH] Make sure all block/io scheduler setups are node aware
Some were kmalloc_node(), some were still kmalloc(). Change them all to
kmalloc_node().
Signed-off-by: Jens Axboe <axboe@suse.de>
-rw-r--r-- | block/as-iosched.c | 8 | ||||
-rw-r--r-- | block/cfq-iosched.c | 13 | ||||
-rw-r--r-- | block/elevator.c | 11 | ||||
-rw-r--r-- | block/ll_rw_blk.c | 13 | ||||
-rw-r--r-- | block/noop-iosched.c | 2 | ||||
-rw-r--r-- | include/linux/blkdev.h | 3 |
6 files changed, 26 insertions, 24 deletions
diff --git a/block/as-iosched.c b/block/as-iosched.c index 8e1fef1eafc9..f6dc95489316 100644 --- a/block/as-iosched.c +++ b/block/as-iosched.c | |||
@@ -210,9 +210,9 @@ static struct as_io_context *alloc_as_io_context(void) | |||
210 | * If the current task has no AS IO context then create one and initialise it. | 210 | * If the current task has no AS IO context then create one and initialise it. |
211 | * Then take a ref on the task's io context and return it. | 211 | * Then take a ref on the task's io context and return it. |
212 | */ | 212 | */ |
213 | static struct io_context *as_get_io_context(void) | 213 | static struct io_context *as_get_io_context(int node) |
214 | { | 214 | { |
215 | struct io_context *ioc = get_io_context(GFP_ATOMIC); | 215 | struct io_context *ioc = get_io_context(GFP_ATOMIC, node); |
216 | if (ioc && !ioc->aic) { | 216 | if (ioc && !ioc->aic) { |
217 | ioc->aic = alloc_as_io_context(); | 217 | ioc->aic = alloc_as_io_context(); |
218 | if (!ioc->aic) { | 218 | if (!ioc->aic) { |
@@ -1148,7 +1148,7 @@ static void as_add_request(request_queue_t *q, struct request *rq) | |||
1148 | 1148 | ||
1149 | data_dir = rq_is_sync(rq); | 1149 | data_dir = rq_is_sync(rq); |
1150 | 1150 | ||
1151 | rq->elevator_private = as_get_io_context(); | 1151 | rq->elevator_private = as_get_io_context(q->node); |
1152 | 1152 | ||
1153 | if (RQ_IOC(rq)) { | 1153 | if (RQ_IOC(rq)) { |
1154 | as_update_iohist(ad, RQ_IOC(rq)->aic, rq); | 1154 | as_update_iohist(ad, RQ_IOC(rq)->aic, rq); |
@@ -1292,7 +1292,7 @@ static int as_may_queue(request_queue_t *q, int rw) | |||
1292 | struct io_context *ioc; | 1292 | struct io_context *ioc; |
1293 | if (ad->antic_status == ANTIC_WAIT_REQ || | 1293 | if (ad->antic_status == ANTIC_WAIT_REQ || |
1294 | ad->antic_status == ANTIC_WAIT_NEXT) { | 1294 | ad->antic_status == ANTIC_WAIT_NEXT) { |
1295 | ioc = as_get_io_context(); | 1295 | ioc = as_get_io_context(q->node); |
1296 | if (ad->io_context == ioc) | 1296 | if (ad->io_context == ioc) |
1297 | ret = ELV_MQUEUE_MUST; | 1297 | ret = ELV_MQUEUE_MUST; |
1298 | put_io_context(ioc); | 1298 | put_io_context(ioc); |
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 85f1d87e86d4..0452108a932e 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -1148,8 +1148,9 @@ static void cfq_exit_io_context(struct io_context *ioc) | |||
1148 | static struct cfq_io_context * | 1148 | static struct cfq_io_context * |
1149 | cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) | 1149 | cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) |
1150 | { | 1150 | { |
1151 | struct cfq_io_context *cic = kmem_cache_alloc(cfq_ioc_pool, gfp_mask); | 1151 | struct cfq_io_context *cic; |
1152 | 1152 | ||
1153 | cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask, cfqd->queue->node); | ||
1153 | if (cic) { | 1154 | if (cic) { |
1154 | memset(cic, 0, sizeof(*cic)); | 1155 | memset(cic, 0, sizeof(*cic)); |
1155 | cic->last_end_request = jiffies; | 1156 | cic->last_end_request = jiffies; |
@@ -1277,11 +1278,11 @@ retry: | |||
1277 | * free memory. | 1278 | * free memory. |
1278 | */ | 1279 | */ |
1279 | spin_unlock_irq(cfqd->queue->queue_lock); | 1280 | spin_unlock_irq(cfqd->queue->queue_lock); |
1280 | new_cfqq = kmem_cache_alloc(cfq_pool, gfp_mask|__GFP_NOFAIL); | 1281 | new_cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask|__GFP_NOFAIL, cfqd->queue->node); |
1281 | spin_lock_irq(cfqd->queue->queue_lock); | 1282 | spin_lock_irq(cfqd->queue->queue_lock); |
1282 | goto retry; | 1283 | goto retry; |
1283 | } else { | 1284 | } else { |
1284 | cfqq = kmem_cache_alloc(cfq_pool, gfp_mask); | 1285 | cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask, cfqd->queue->node); |
1285 | if (!cfqq) | 1286 | if (!cfqq) |
1286 | goto out; | 1287 | goto out; |
1287 | } | 1288 | } |
@@ -1407,7 +1408,7 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) | |||
1407 | 1408 | ||
1408 | might_sleep_if(gfp_mask & __GFP_WAIT); | 1409 | might_sleep_if(gfp_mask & __GFP_WAIT); |
1409 | 1410 | ||
1410 | ioc = get_io_context(gfp_mask); | 1411 | ioc = get_io_context(gfp_mask, cfqd->queue->node); |
1411 | if (!ioc) | 1412 | if (!ioc) |
1412 | return NULL; | 1413 | return NULL; |
1413 | 1414 | ||
@@ -1955,7 +1956,7 @@ static void *cfq_init_queue(request_queue_t *q, elevator_t *e) | |||
1955 | struct cfq_data *cfqd; | 1956 | struct cfq_data *cfqd; |
1956 | int i; | 1957 | int i; |
1957 | 1958 | ||
1958 | cfqd = kmalloc(sizeof(*cfqd), GFP_KERNEL); | 1959 | cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node); |
1959 | if (!cfqd) | 1960 | if (!cfqd) |
1960 | return NULL; | 1961 | return NULL; |
1961 | 1962 | ||
@@ -1970,7 +1971,7 @@ static void *cfq_init_queue(request_queue_t *q, elevator_t *e) | |||
1970 | INIT_LIST_HEAD(&cfqd->empty_list); | 1971 | INIT_LIST_HEAD(&cfqd->empty_list); |
1971 | INIT_LIST_HEAD(&cfqd->cic_list); | 1972 | INIT_LIST_HEAD(&cfqd->cic_list); |
1972 | 1973 | ||
1973 | cfqd->cfq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_QHASH_ENTRIES, GFP_KERNEL); | 1974 | cfqd->cfq_hash = kmalloc_node(sizeof(struct hlist_head) * CFQ_QHASH_ENTRIES, GFP_KERNEL, q->node); |
1974 | if (!cfqd->cfq_hash) | 1975 | if (!cfqd->cfq_hash) |
1975 | goto out_free; | 1976 | goto out_free; |
1976 | 1977 | ||
diff --git a/block/elevator.c b/block/elevator.c index 788d2d81994c..e643291793a4 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
@@ -161,12 +161,12 @@ __setup("elevator=", elevator_setup); | |||
161 | 161 | ||
162 | static struct kobj_type elv_ktype; | 162 | static struct kobj_type elv_ktype; |
163 | 163 | ||
164 | static elevator_t *elevator_alloc(struct elevator_type *e) | 164 | static elevator_t *elevator_alloc(request_queue_t *q, struct elevator_type *e) |
165 | { | 165 | { |
166 | elevator_t *eq; | 166 | elevator_t *eq; |
167 | int i; | 167 | int i; |
168 | 168 | ||
169 | eq = kmalloc(sizeof(elevator_t), GFP_KERNEL); | 169 | eq = kmalloc_node(sizeof(elevator_t), GFP_KERNEL, q->node); |
170 | if (unlikely(!eq)) | 170 | if (unlikely(!eq)) |
171 | goto err; | 171 | goto err; |
172 | 172 | ||
@@ -178,7 +178,8 @@ static elevator_t *elevator_alloc(struct elevator_type *e) | |||
178 | eq->kobj.ktype = &elv_ktype; | 178 | eq->kobj.ktype = &elv_ktype; |
179 | mutex_init(&eq->sysfs_lock); | 179 | mutex_init(&eq->sysfs_lock); |
180 | 180 | ||
181 | eq->hash = kmalloc(sizeof(struct hlist_head) * ELV_HASH_ENTRIES, GFP_KERNEL); | 181 | eq->hash = kmalloc_node(sizeof(struct hlist_head) * ELV_HASH_ENTRIES, |
182 | GFP_KERNEL, q->node); | ||
182 | if (!eq->hash) | 183 | if (!eq->hash) |
183 | goto err; | 184 | goto err; |
184 | 185 | ||
@@ -224,7 +225,7 @@ int elevator_init(request_queue_t *q, char *name) | |||
224 | e = elevator_get("noop"); | 225 | e = elevator_get("noop"); |
225 | } | 226 | } |
226 | 227 | ||
227 | eq = elevator_alloc(e); | 228 | eq = elevator_alloc(q, e); |
228 | if (!eq) | 229 | if (!eq) |
229 | return -ENOMEM; | 230 | return -ENOMEM; |
230 | 231 | ||
@@ -987,7 +988,7 @@ static int elevator_switch(request_queue_t *q, struct elevator_type *new_e) | |||
987 | /* | 988 | /* |
988 | * Allocate new elevator | 989 | * Allocate new elevator |
989 | */ | 990 | */ |
990 | e = elevator_alloc(new_e); | 991 | e = elevator_alloc(q, new_e); |
991 | if (!e) | 992 | if (!e) |
992 | return 0; | 993 | return 0; |
993 | 994 | ||
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 4b7b4461e8d6..c6dfa889206c 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c | |||
@@ -39,6 +39,7 @@ static void blk_unplug_timeout(unsigned long data); | |||
39 | static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io); | 39 | static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io); |
40 | static void init_request_from_bio(struct request *req, struct bio *bio); | 40 | static void init_request_from_bio(struct request *req, struct bio *bio); |
41 | static int __make_request(request_queue_t *q, struct bio *bio); | 41 | static int __make_request(request_queue_t *q, struct bio *bio); |
42 | static struct io_context *current_io_context(gfp_t gfp_flags, int node); | ||
42 | 43 | ||
43 | /* | 44 | /* |
44 | * For the allocated request tables | 45 | * For the allocated request tables |
@@ -2114,7 +2115,7 @@ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio, | |||
2114 | 2115 | ||
2115 | if (rl->count[rw]+1 >= queue_congestion_on_threshold(q)) { | 2116 | if (rl->count[rw]+1 >= queue_congestion_on_threshold(q)) { |
2116 | if (rl->count[rw]+1 >= q->nr_requests) { | 2117 | if (rl->count[rw]+1 >= q->nr_requests) { |
2117 | ioc = current_io_context(GFP_ATOMIC); | 2118 | ioc = current_io_context(GFP_ATOMIC, q->node); |
2118 | /* | 2119 | /* |
2119 | * The queue will fill after this allocation, so set | 2120 | * The queue will fill after this allocation, so set |
2120 | * it as full, and mark this process as "batching". | 2121 | * it as full, and mark this process as "batching". |
@@ -2234,7 +2235,7 @@ static struct request *get_request_wait(request_queue_t *q, int rw, | |||
2234 | * up to a big batch of them for a small period time. | 2235 | * up to a big batch of them for a small period time. |
2235 | * See ioc_batching, ioc_set_batching | 2236 | * See ioc_batching, ioc_set_batching |
2236 | */ | 2237 | */ |
2237 | ioc = current_io_context(GFP_NOIO); | 2238 | ioc = current_io_context(GFP_NOIO, q->node); |
2238 | ioc_set_batching(q, ioc); | 2239 | ioc_set_batching(q, ioc); |
2239 | 2240 | ||
2240 | spin_lock_irq(q->queue_lock); | 2241 | spin_lock_irq(q->queue_lock); |
@@ -3641,7 +3642,7 @@ void exit_io_context(void) | |||
3641 | * but since the current task itself holds a reference, the context can be | 3642 | * but since the current task itself holds a reference, the context can be |
3642 | * used in general code, so long as it stays within `current` context. | 3643 | * used in general code, so long as it stays within `current` context. |
3643 | */ | 3644 | */ |
3644 | struct io_context *current_io_context(gfp_t gfp_flags) | 3645 | static struct io_context *current_io_context(gfp_t gfp_flags, int node) |
3645 | { | 3646 | { |
3646 | struct task_struct *tsk = current; | 3647 | struct task_struct *tsk = current; |
3647 | struct io_context *ret; | 3648 | struct io_context *ret; |
@@ -3650,7 +3651,7 @@ struct io_context *current_io_context(gfp_t gfp_flags) | |||
3650 | if (likely(ret)) | 3651 | if (likely(ret)) |
3651 | return ret; | 3652 | return ret; |
3652 | 3653 | ||
3653 | ret = kmem_cache_alloc(iocontext_cachep, gfp_flags); | 3654 | ret = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node); |
3654 | if (ret) { | 3655 | if (ret) { |
3655 | atomic_set(&ret->refcount, 1); | 3656 | atomic_set(&ret->refcount, 1); |
3656 | ret->task = current; | 3657 | ret->task = current; |
@@ -3674,10 +3675,10 @@ EXPORT_SYMBOL(current_io_context); | |||
3674 | * | 3675 | * |
3675 | * This is always called in the context of the task which submitted the I/O. | 3676 | * This is always called in the context of the task which submitted the I/O. |
3676 | */ | 3677 | */ |
3677 | struct io_context *get_io_context(gfp_t gfp_flags) | 3678 | struct io_context *get_io_context(gfp_t gfp_flags, int node) |
3678 | { | 3679 | { |
3679 | struct io_context *ret; | 3680 | struct io_context *ret; |
3680 | ret = current_io_context(gfp_flags); | 3681 | ret = current_io_context(gfp_flags, node); |
3681 | if (likely(ret)) | 3682 | if (likely(ret)) |
3682 | atomic_inc(&ret->refcount); | 3683 | atomic_inc(&ret->refcount); |
3683 | return ret; | 3684 | return ret; |
diff --git a/block/noop-iosched.c b/block/noop-iosched.c index 56a7c620574f..79af43179421 100644 --- a/block/noop-iosched.c +++ b/block/noop-iosched.c | |||
@@ -69,7 +69,7 @@ static void *noop_init_queue(request_queue_t *q, elevator_t *e) | |||
69 | { | 69 | { |
70 | struct noop_data *nd; | 70 | struct noop_data *nd; |
71 | 71 | ||
72 | nd = kmalloc(sizeof(*nd), GFP_KERNEL); | 72 | nd = kmalloc_node(sizeof(*nd), GFP_KERNEL, q->node); |
73 | if (!nd) | 73 | if (!nd) |
74 | return NULL; | 74 | return NULL; |
75 | INIT_LIST_HEAD(&nd->queue); | 75 | INIT_LIST_HEAD(&nd->queue); |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 593386162f47..6609371c303e 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -104,8 +104,7 @@ struct io_context { | |||
104 | 104 | ||
105 | void put_io_context(struct io_context *ioc); | 105 | void put_io_context(struct io_context *ioc); |
106 | void exit_io_context(void); | 106 | void exit_io_context(void); |
107 | struct io_context *current_io_context(gfp_t gfp_flags); | 107 | struct io_context *get_io_context(gfp_t gfp_flags, int node); |
108 | struct io_context *get_io_context(gfp_t gfp_flags); | ||
109 | void copy_io_context(struct io_context **pdst, struct io_context **psrc); | 108 | void copy_io_context(struct io_context **pdst, struct io_context **psrc); |
110 | void swap_io_context(struct io_context **ioc1, struct io_context **ioc2); | 109 | void swap_io_context(struct io_context **ioc1, struct io_context **ioc2); |
111 | 110 | ||