aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2011-12-13 18:33:39 -0500
committerJens Axboe <axboe@kernel.dk>2011-12-13 18:33:39 -0500
commitb9a1920837bc53430d339380e393a6e4c372939f (patch)
treee3f7b8389fe58ebf88381ad404ad408cef9792c2 /block
parentb2efa05265d62bc29f3a64400fad4b44340eedb8 (diff)
block, cfq: remove delayed unlink
Now that all cic's are immediately unlinked from both ioc and queue, lazy dropping from lookup path and trimming on elevator unregister are unnecessary. Kill them and remove now unused elevator_ops->trim(). This also leaves call_for_each_cic() without any user. Removed. Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/cfq-iosched.c92
-rw-r--r--block/elevator.c16
2 files changed, 10 insertions, 98 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 6cc606560402..ff44435fad50 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -2669,24 +2669,6 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
2669 cfq_put_cfqg(cfqg); 2669 cfq_put_cfqg(cfqg);
2670} 2670}
2671 2671
2672/*
2673 * Call func for each cic attached to this ioc.
2674 */
2675static void
2676call_for_each_cic(struct io_context *ioc,
2677 void (*func)(struct io_context *, struct cfq_io_context *))
2678{
2679 struct cfq_io_context *cic;
2680 struct hlist_node *n;
2681
2682 rcu_read_lock();
2683
2684 hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list)
2685 func(ioc, cic);
2686
2687 rcu_read_unlock();
2688}
2689
2690static void cfq_cic_free_rcu(struct rcu_head *head) 2672static void cfq_cic_free_rcu(struct rcu_head *head)
2691{ 2673{
2692 struct cfq_io_context *cic; 2674 struct cfq_io_context *cic;
@@ -2727,31 +2709,6 @@ static void cfq_release_cic(struct cfq_io_context *cic)
2727 cfq_cic_free(cic); 2709 cfq_cic_free(cic);
2728} 2710}
2729 2711
2730static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic)
2731{
2732 unsigned long flags;
2733
2734 spin_lock_irqsave(&ioc->lock, flags);
2735 cfq_release_cic(cic);
2736 spin_unlock_irqrestore(&ioc->lock, flags);
2737}
2738
2739/*
2740 * Must be called with rcu_read_lock() held or preemption otherwise disabled.
2741 * Only two callers of this - ->dtor() which is called with the rcu_read_lock(),
2742 * and ->trim() which is called with the task lock held
2743 */
2744static void cfq_free_io_context(struct io_context *ioc)
2745{
2746 /*
2747 * ioc->refcount is zero here, or we are called from elv_unregister(),
2748 * so no more cic's are allowed to be linked into this ioc. So it
2749 * should be ok to iterate over the known list, we will see all cic's
2750 * since no new ones are added.
2751 */
2752 call_for_each_cic(ioc, cic_free_func);
2753}
2754
2755static void cfq_put_cooperator(struct cfq_queue *cfqq) 2712static void cfq_put_cooperator(struct cfq_queue *cfqq)
2756{ 2713{
2757 struct cfq_queue *__cfqq, *next; 2714 struct cfq_queue *__cfqq, *next;
@@ -3037,30 +2994,6 @@ cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
3037 return cfqq; 2994 return cfqq;
3038} 2995}
3039 2996
3040/*
3041 * We drop cfq io contexts lazily, so we may find a dead one.
3042 */
3043static void
3044cfq_drop_dead_cic(struct cfq_data *cfqd, struct io_context *ioc,
3045 struct cfq_io_context *cic)
3046{
3047 unsigned long flags;
3048
3049 WARN_ON(!list_empty(&cic->queue_list));
3050 BUG_ON(cic->key != cfqd_dead_key(cfqd));
3051
3052 spin_lock_irqsave(&ioc->lock, flags);
3053
3054 BUG_ON(rcu_dereference_check(ioc->ioc_data,
3055 lockdep_is_held(&ioc->lock)) == cic);
3056
3057 radix_tree_delete(&ioc->radix_root, cfqd->queue->id);
3058 hlist_del_rcu(&cic->cic_list);
3059 spin_unlock_irqrestore(&ioc->lock, flags);
3060
3061 cfq_cic_free(cic);
3062}
3063
3064/** 2997/**
3065 * cfq_cic_lookup - lookup cfq_io_context 2998 * cfq_cic_lookup - lookup cfq_io_context
3066 * @cfqd: the associated cfq_data 2999 * @cfqd: the associated cfq_data
@@ -3078,26 +3011,22 @@ cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc)
3078 if (unlikely(!ioc)) 3011 if (unlikely(!ioc))
3079 return NULL; 3012 return NULL;
3080 3013
3081 rcu_read_lock();
3082
3083 /* 3014 /*
3084 * we maintain a last-hit cache, to avoid browsing over the tree 3015 * cic's are indexed from @ioc using radix tree and hint pointer,
3016 * both of which are protected with RCU. All removals are done
3017 * holding both q and ioc locks, and we're holding q lock - if we
3018 * find a cic which points to us, it's guaranteed to be valid.
3085 */ 3019 */
3020 rcu_read_lock();
3086 cic = rcu_dereference(ioc->ioc_data); 3021 cic = rcu_dereference(ioc->ioc_data);
3087 if (cic && cic->key == cfqd) 3022 if (cic && cic->key == cfqd)
3088 goto out; 3023 goto out;
3089 3024
3090 do { 3025 cic = radix_tree_lookup(&ioc->radix_root, cfqd->queue->id);
3091 cic = radix_tree_lookup(&ioc->radix_root, cfqd->queue->id); 3026 if (cic && cic->key == cfqd)
3092 if (!cic) 3027 rcu_assign_pointer(ioc->ioc_data, cic); /* allowed to race */
3093 break; 3028 else
3094 if (likely(cic->key == cfqd)) { 3029 cic = NULL;
3095 /* hint assignment itself can race safely */
3096 rcu_assign_pointer(ioc->ioc_data, cic);
3097 break;
3098 }
3099 cfq_drop_dead_cic(cfqd, ioc, cic);
3100 } while (1);
3101out: 3030out:
3102 rcu_read_unlock(); 3031 rcu_read_unlock();
3103 return cic; 3032 return cic;
@@ -4182,7 +4111,6 @@ static struct elevator_type iosched_cfq = {
4182 .elevator_may_queue_fn = cfq_may_queue, 4111 .elevator_may_queue_fn = cfq_may_queue,
4183 .elevator_init_fn = cfq_init_queue, 4112 .elevator_init_fn = cfq_init_queue,
4184 .elevator_exit_fn = cfq_exit_queue, 4113 .elevator_exit_fn = cfq_exit_queue,
4185 .trim = cfq_free_io_context,
4186 }, 4114 },
4187 .elevator_attrs = cfq_attrs, 4115 .elevator_attrs = cfq_attrs,
4188 .elevator_name = "cfq", 4116 .elevator_name = "cfq",
diff --git a/block/elevator.c b/block/elevator.c
index 66343d6917d0..6a343e8f8319 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -913,22 +913,6 @@ EXPORT_SYMBOL_GPL(elv_register);
913 913
914void elv_unregister(struct elevator_type *e) 914void elv_unregister(struct elevator_type *e)
915{ 915{
916 struct task_struct *g, *p;
917
918 /*
919 * Iterate every thread in the process to remove the io contexts.
920 */
921 if (e->ops.trim) {
922 read_lock(&tasklist_lock);
923 do_each_thread(g, p) {
924 task_lock(p);
925 if (p->io_context)
926 e->ops.trim(p->io_context);
927 task_unlock(p);
928 } while_each_thread(g, p);
929 read_unlock(&tasklist_lock);
930 }
931
932 spin_lock(&elv_list_lock); 916 spin_lock(&elv_list_lock);
933 list_del_init(&e->list); 917 list_del_init(&e->list);
934 spin_unlock(&elv_list_lock); 918 spin_unlock(&elv_list_lock);