diff options
author | Tejun Heo <tj@kernel.org> | 2011-12-13 18:33:37 -0500 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2011-12-13 18:33:37 -0500 |
commit | a73f730d013ff2788389fd0c46ad3e5510f124e6 (patch) | |
tree | 773987c8cbec56745d46f46382ad268ed91adf98 /block | |
parent | 8ba61435d73f2274e12d4d823fde06735e8f6a54 (diff) |
block, cfq: move cfqd->cic_index to q->id
cfq allocates per-queue id using ida and uses it to index cic radix
tree from io_context. Move it to q->id and allocate on queue init and
free on queue release. This simplifies cfq a bit and will allow for
further improvements of io context life-cycle management.
This patch doesn't introduce any functional difference.
Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-core.c | 24 | ||||
-rw-r--r-- | block/blk-sysfs.c | 2 | ||||
-rw-r--r-- | block/blk.h | 3 | ||||
-rw-r--r-- | block/cfq-iosched.c | 52 |
4 files changed, 26 insertions, 55 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 30add45a87ef..af7301581172 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -39,6 +39,8 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap); | |||
39 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); | 39 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); |
40 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete); | 40 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete); |
41 | 41 | ||
42 | DEFINE_IDA(blk_queue_ida); | ||
43 | |||
42 | /* | 44 | /* |
43 | * For the allocated request tables | 45 | * For the allocated request tables |
44 | */ | 46 | */ |
@@ -474,6 +476,10 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) | |||
474 | if (!q) | 476 | if (!q) |
475 | return NULL; | 477 | return NULL; |
476 | 478 | ||
479 | q->id = ida_simple_get(&blk_queue_ida, 0, 0, GFP_KERNEL); | ||
480 | if (q->id < 0) | ||
481 | goto fail_q; | ||
482 | |||
477 | q->backing_dev_info.ra_pages = | 483 | q->backing_dev_info.ra_pages = |
478 | (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; | 484 | (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; |
479 | q->backing_dev_info.state = 0; | 485 | q->backing_dev_info.state = 0; |
@@ -481,15 +487,11 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) | |||
481 | q->backing_dev_info.name = "block"; | 487 | q->backing_dev_info.name = "block"; |
482 | 488 | ||
483 | err = bdi_init(&q->backing_dev_info); | 489 | err = bdi_init(&q->backing_dev_info); |
484 | if (err) { | 490 | if (err) |
485 | kmem_cache_free(blk_requestq_cachep, q); | 491 | goto fail_id; |
486 | return NULL; | ||
487 | } | ||
488 | 492 | ||
489 | if (blk_throtl_init(q)) { | 493 | if (blk_throtl_init(q)) |
490 | kmem_cache_free(blk_requestq_cachep, q); | 494 | goto fail_id; |
491 | return NULL; | ||
492 | } | ||
493 | 495 | ||
494 | setup_timer(&q->backing_dev_info.laptop_mode_wb_timer, | 496 | setup_timer(&q->backing_dev_info.laptop_mode_wb_timer, |
495 | laptop_mode_timer_fn, (unsigned long) q); | 497 | laptop_mode_timer_fn, (unsigned long) q); |
@@ -512,6 +514,12 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) | |||
512 | q->queue_lock = &q->__queue_lock; | 514 | q->queue_lock = &q->__queue_lock; |
513 | 515 | ||
514 | return q; | 516 | return q; |
517 | |||
518 | fail_id: | ||
519 | ida_simple_remove(&blk_queue_ida, q->id); | ||
520 | fail_q: | ||
521 | kmem_cache_free(blk_requestq_cachep, q); | ||
522 | return NULL; | ||
515 | } | 523 | } |
516 | EXPORT_SYMBOL(blk_alloc_queue_node); | 524 | EXPORT_SYMBOL(blk_alloc_queue_node); |
517 | 525 | ||
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index f0b2ca8f66d0..5b4b4ab5e785 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c | |||
@@ -494,6 +494,8 @@ static void blk_release_queue(struct kobject *kobj) | |||
494 | blk_trace_shutdown(q); | 494 | blk_trace_shutdown(q); |
495 | 495 | ||
496 | bdi_destroy(&q->backing_dev_info); | 496 | bdi_destroy(&q->backing_dev_info); |
497 | |||
498 | ida_simple_remove(&blk_queue_ida, q->id); | ||
497 | kmem_cache_free(blk_requestq_cachep, q); | 499 | kmem_cache_free(blk_requestq_cachep, q); |
498 | } | 500 | } |
499 | 501 | ||
diff --git a/block/blk.h b/block/blk.h index e38691dbb329..aae4d88fc523 100644 --- a/block/blk.h +++ b/block/blk.h | |||
@@ -1,6 +1,8 @@ | |||
1 | #ifndef BLK_INTERNAL_H | 1 | #ifndef BLK_INTERNAL_H |
2 | #define BLK_INTERNAL_H | 2 | #define BLK_INTERNAL_H |
3 | 3 | ||
4 | #include <linux/idr.h> | ||
5 | |||
4 | /* Amount of time in which a process may batch requests */ | 6 | /* Amount of time in which a process may batch requests */ |
5 | #define BLK_BATCH_TIME (HZ/50UL) | 7 | #define BLK_BATCH_TIME (HZ/50UL) |
6 | 8 | ||
@@ -9,6 +11,7 @@ | |||
9 | 11 | ||
10 | extern struct kmem_cache *blk_requestq_cachep; | 12 | extern struct kmem_cache *blk_requestq_cachep; |
11 | extern struct kobj_type blk_queue_ktype; | 13 | extern struct kobj_type blk_queue_ktype; |
14 | extern struct ida blk_queue_ida; | ||
12 | 15 | ||
13 | void init_request_from_bio(struct request *req, struct bio *bio); | 16 | void init_request_from_bio(struct request *req, struct bio *bio); |
14 | void blk_rq_bio_prep(struct request_queue *q, struct request *rq, | 17 | void blk_rq_bio_prep(struct request_queue *q, struct request *rq, |
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 16ace89613bc..ec3f5e8ba564 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -65,9 +65,6 @@ static DEFINE_PER_CPU(unsigned long, cfq_ioc_count); | |||
65 | static struct completion *ioc_gone; | 65 | static struct completion *ioc_gone; |
66 | static DEFINE_SPINLOCK(ioc_gone_lock); | 66 | static DEFINE_SPINLOCK(ioc_gone_lock); |
67 | 67 | ||
68 | static DEFINE_SPINLOCK(cic_index_lock); | ||
69 | static DEFINE_IDA(cic_index_ida); | ||
70 | |||
71 | #define CFQ_PRIO_LISTS IOPRIO_BE_NR | 68 | #define CFQ_PRIO_LISTS IOPRIO_BE_NR |
72 | #define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE) | 69 | #define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE) |
73 | #define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT) | 70 | #define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT) |
@@ -290,7 +287,6 @@ struct cfq_data { | |||
290 | unsigned int cfq_group_idle; | 287 | unsigned int cfq_group_idle; |
291 | unsigned int cfq_latency; | 288 | unsigned int cfq_latency; |
292 | 289 | ||
293 | unsigned int cic_index; | ||
294 | struct list_head cic_list; | 290 | struct list_head cic_list; |
295 | 291 | ||
296 | /* | 292 | /* |
@@ -484,7 +480,7 @@ static inline void cic_set_cfqq(struct cfq_io_context *cic, | |||
484 | 480 | ||
485 | static inline void *cfqd_dead_key(struct cfq_data *cfqd) | 481 | static inline void *cfqd_dead_key(struct cfq_data *cfqd) |
486 | { | 482 | { |
487 | return (void *)(cfqd->cic_index << CIC_DEAD_INDEX_SHIFT | CIC_DEAD_KEY); | 483 | return (void *)(cfqd->queue->id << CIC_DEAD_INDEX_SHIFT | CIC_DEAD_KEY); |
488 | } | 484 | } |
489 | 485 | ||
490 | static inline struct cfq_data *cic_to_cfqd(struct cfq_io_context *cic) | 486 | static inline struct cfq_data *cic_to_cfqd(struct cfq_io_context *cic) |
@@ -3105,7 +3101,7 @@ cfq_drop_dead_cic(struct cfq_data *cfqd, struct io_context *ioc, | |||
3105 | BUG_ON(rcu_dereference_check(ioc->ioc_data, | 3101 | BUG_ON(rcu_dereference_check(ioc->ioc_data, |
3106 | lockdep_is_held(&ioc->lock)) == cic); | 3102 | lockdep_is_held(&ioc->lock)) == cic); |
3107 | 3103 | ||
3108 | radix_tree_delete(&ioc->radix_root, cfqd->cic_index); | 3104 | radix_tree_delete(&ioc->radix_root, cfqd->queue->id); |
3109 | hlist_del_rcu(&cic->cic_list); | 3105 | hlist_del_rcu(&cic->cic_list); |
3110 | spin_unlock_irqrestore(&ioc->lock, flags); | 3106 | spin_unlock_irqrestore(&ioc->lock, flags); |
3111 | 3107 | ||
@@ -3133,7 +3129,7 @@ cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc) | |||
3133 | } | 3129 | } |
3134 | 3130 | ||
3135 | do { | 3131 | do { |
3136 | cic = radix_tree_lookup(&ioc->radix_root, cfqd->cic_index); | 3132 | cic = radix_tree_lookup(&ioc->radix_root, cfqd->queue->id); |
3137 | rcu_read_unlock(); | 3133 | rcu_read_unlock(); |
3138 | if (!cic) | 3134 | if (!cic) |
3139 | break; | 3135 | break; |
@@ -3169,8 +3165,7 @@ static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc, | |||
3169 | cic->key = cfqd; | 3165 | cic->key = cfqd; |
3170 | 3166 | ||
3171 | spin_lock_irqsave(&ioc->lock, flags); | 3167 | spin_lock_irqsave(&ioc->lock, flags); |
3172 | ret = radix_tree_insert(&ioc->radix_root, | 3168 | ret = radix_tree_insert(&ioc->radix_root, cfqd->queue->id, cic); |
3173 | cfqd->cic_index, cic); | ||
3174 | if (!ret) | 3169 | if (!ret) |
3175 | hlist_add_head_rcu(&cic->cic_list, &ioc->cic_list); | 3170 | hlist_add_head_rcu(&cic->cic_list, &ioc->cic_list); |
3176 | spin_unlock_irqrestore(&ioc->lock, flags); | 3171 | spin_unlock_irqrestore(&ioc->lock, flags); |
@@ -3944,10 +3939,6 @@ static void cfq_exit_queue(struct elevator_queue *e) | |||
3944 | 3939 | ||
3945 | cfq_shutdown_timer_wq(cfqd); | 3940 | cfq_shutdown_timer_wq(cfqd); |
3946 | 3941 | ||
3947 | spin_lock(&cic_index_lock); | ||
3948 | ida_remove(&cic_index_ida, cfqd->cic_index); | ||
3949 | spin_unlock(&cic_index_lock); | ||
3950 | |||
3951 | /* | 3942 | /* |
3952 | * Wait for cfqg->blkg->key accessors to exit their grace periods. | 3943 | * Wait for cfqg->blkg->key accessors to exit their grace periods. |
3953 | * Do this wait only if there are other unlinked groups out | 3944 | * Do this wait only if there are other unlinked groups out |
@@ -3969,24 +3960,6 @@ static void cfq_exit_queue(struct elevator_queue *e) | |||
3969 | kfree(cfqd); | 3960 | kfree(cfqd); |
3970 | } | 3961 | } |
3971 | 3962 | ||
3972 | static int cfq_alloc_cic_index(void) | ||
3973 | { | ||
3974 | int index, error; | ||
3975 | |||
3976 | do { | ||
3977 | if (!ida_pre_get(&cic_index_ida, GFP_KERNEL)) | ||
3978 | return -ENOMEM; | ||
3979 | |||
3980 | spin_lock(&cic_index_lock); | ||
3981 | error = ida_get_new(&cic_index_ida, &index); | ||
3982 | spin_unlock(&cic_index_lock); | ||
3983 | if (error && error != -EAGAIN) | ||
3984 | return error; | ||
3985 | } while (error); | ||
3986 | |||
3987 | return index; | ||
3988 | } | ||
3989 | |||
3990 | static void *cfq_init_queue(struct request_queue *q) | 3963 | static void *cfq_init_queue(struct request_queue *q) |
3991 | { | 3964 | { |
3992 | struct cfq_data *cfqd; | 3965 | struct cfq_data *cfqd; |
@@ -3994,23 +3967,9 @@ static void *cfq_init_queue(struct request_queue *q) | |||
3994 | struct cfq_group *cfqg; | 3967 | struct cfq_group *cfqg; |
3995 | struct cfq_rb_root *st; | 3968 | struct cfq_rb_root *st; |
3996 | 3969 | ||
3997 | i = cfq_alloc_cic_index(); | ||
3998 | if (i < 0) | ||
3999 | return NULL; | ||
4000 | |||
4001 | cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node); | 3970 | cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node); |
4002 | if (!cfqd) { | 3971 | if (!cfqd) |
4003 | spin_lock(&cic_index_lock); | ||
4004 | ida_remove(&cic_index_ida, i); | ||
4005 | spin_unlock(&cic_index_lock); | ||
4006 | return NULL; | 3972 | return NULL; |
4007 | } | ||
4008 | |||
4009 | /* | ||
4010 | * Don't need take queue_lock in the routine, since we are | ||
4011 | * initializing the ioscheduler, and nobody is using cfqd | ||
4012 | */ | ||
4013 | cfqd->cic_index = i; | ||
4014 | 3973 | ||
4015 | /* Init root service tree */ | 3974 | /* Init root service tree */ |
4016 | cfqd->grp_service_tree = CFQ_RB_ROOT; | 3975 | cfqd->grp_service_tree = CFQ_RB_ROOT; |
@@ -4294,7 +4253,6 @@ static void __exit cfq_exit(void) | |||
4294 | */ | 4253 | */ |
4295 | if (elv_ioc_count_read(cfq_ioc_count)) | 4254 | if (elv_ioc_count_read(cfq_ioc_count)) |
4296 | wait_for_completion(&all_gone); | 4255 | wait_for_completion(&all_gone); |
4297 | ida_destroy(&cic_index_ida); | ||
4298 | cfq_slab_kill(); | 4256 | cfq_slab_kill(); |
4299 | } | 4257 | } |
4300 | 4258 | ||