aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2011-12-13 18:33:39 -0500
committerJens Axboe <axboe@kernel.dk>2011-12-13 18:33:39 -0500
commitb50b636bce6293fa858cc7ff6c3ffe4920d90006 (patch)
tree65a4c509ff491aa89bf4d1a5f3e26d600eeea49f /block
parentb9a1920837bc53430d339380e393a6e4c372939f (diff)
block, cfq: kill ioc_gone
Now that cic's are immediately unlinked under both locks, there's no need to count and drain cic's before module unload. RCU callback completion is waited with rcu_barrier(). While at it, remove residual RCU operations on cic_list. Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/cfq-iosched.c43
1 files changed, 5 insertions, 38 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index ff44435fad50..ae7791a8ded9 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -62,10 +62,6 @@ static const int cfq_hist_divisor = 4;
62static struct kmem_cache *cfq_pool; 62static struct kmem_cache *cfq_pool;
63static struct kmem_cache *cfq_ioc_pool; 63static struct kmem_cache *cfq_ioc_pool;
64 64
65static DEFINE_PER_CPU(unsigned long, cfq_ioc_count);
66static struct completion *ioc_gone;
67static DEFINE_SPINLOCK(ioc_gone_lock);
68
69#define CFQ_PRIO_LISTS IOPRIO_BE_NR 65#define CFQ_PRIO_LISTS IOPRIO_BE_NR
70#define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE) 66#define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
71#define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT) 67#define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
@@ -2671,26 +2667,8 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
2671 2667
2672static void cfq_cic_free_rcu(struct rcu_head *head) 2668static void cfq_cic_free_rcu(struct rcu_head *head)
2673{ 2669{
2674 struct cfq_io_context *cic; 2670 kmem_cache_free(cfq_ioc_pool,
2675 2671 container_of(head, struct cfq_io_context, rcu_head));
2676 cic = container_of(head, struct cfq_io_context, rcu_head);
2677
2678 kmem_cache_free(cfq_ioc_pool, cic);
2679 elv_ioc_count_dec(cfq_ioc_count);
2680
2681 if (ioc_gone) {
2682 /*
2683 * CFQ scheduler is exiting, grab exit lock and check
2684 * the pending io context count. If it hits zero,
2685 * complete ioc_gone and set it back to NULL
2686 */
2687 spin_lock(&ioc_gone_lock);
2688 if (ioc_gone && !elv_ioc_count_read(cfq_ioc_count)) {
2689 complete(ioc_gone);
2690 ioc_gone = NULL;
2691 }
2692 spin_unlock(&ioc_gone_lock);
2693 }
2694} 2672}
2695 2673
2696static void cfq_cic_free(struct cfq_io_context *cic) 2674static void cfq_cic_free(struct cfq_io_context *cic)
@@ -2705,7 +2683,7 @@ static void cfq_release_cic(struct cfq_io_context *cic)
2705 2683
2706 BUG_ON(!(dead_key & CIC_DEAD_KEY)); 2684 BUG_ON(!(dead_key & CIC_DEAD_KEY));
2707 radix_tree_delete(&ioc->radix_root, dead_key >> CIC_DEAD_INDEX_SHIFT); 2685 radix_tree_delete(&ioc->radix_root, dead_key >> CIC_DEAD_INDEX_SHIFT);
2708 hlist_del_rcu(&cic->cic_list); 2686 hlist_del(&cic->cic_list);
2709 cfq_cic_free(cic); 2687 cfq_cic_free(cic);
2710} 2688}
2711 2689
@@ -2782,7 +2760,6 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
2782 INIT_HLIST_NODE(&cic->cic_list); 2760 INIT_HLIST_NODE(&cic->cic_list);
2783 cic->exit = cfq_exit_cic; 2761 cic->exit = cfq_exit_cic;
2784 cic->release = cfq_release_cic; 2762 cic->release = cfq_release_cic;
2785 elv_ioc_count_inc(cfq_ioc_count);
2786 } 2763 }
2787 2764
2788 return cic; 2765 return cic;
@@ -3072,7 +3049,7 @@ static int cfq_create_cic(struct cfq_data *cfqd, gfp_t gfp_mask)
3072 3049
3073 ret = radix_tree_insert(&ioc->radix_root, q->id, cic); 3050 ret = radix_tree_insert(&ioc->radix_root, q->id, cic);
3074 if (likely(!ret)) { 3051 if (likely(!ret)) {
3075 hlist_add_head_rcu(&cic->cic_list, &ioc->cic_list); 3052 hlist_add_head(&cic->cic_list, &ioc->cic_list);
3076 list_add(&cic->queue_list, &cfqd->cic_list); 3053 list_add(&cic->queue_list, &cfqd->cic_list);
3077 cic = NULL; 3054 cic = NULL;
3078 } else if (ret == -EEXIST) { 3055 } else if (ret == -EEXIST) {
@@ -4156,19 +4133,9 @@ static int __init cfq_init(void)
4156 4133
4157static void __exit cfq_exit(void) 4134static void __exit cfq_exit(void)
4158{ 4135{
4159 DECLARE_COMPLETION_ONSTACK(all_gone);
4160 blkio_policy_unregister(&blkio_policy_cfq); 4136 blkio_policy_unregister(&blkio_policy_cfq);
4161 elv_unregister(&iosched_cfq); 4137 elv_unregister(&iosched_cfq);
4162 ioc_gone = &all_gone; 4138 rcu_barrier(); /* make sure all cic RCU frees are complete */
4163 /* ioc_gone's update must be visible before reading ioc_count */
4164 smp_wmb();
4165
4166 /*
4167 * this also protects us from entering cfq_slab_kill() with
4168 * pending RCU callbacks
4169 */
4170 if (elv_ioc_count_read(cfq_ioc_count))
4171 wait_for_completion(&all_gone);
4172 cfq_slab_kill(); 4139 cfq_slab_kill();
4173} 4140}
4174 4141