aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/cfq-iosched.c67
1 files changed, 35 insertions, 32 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 181a63d36691..e617b088c59b 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1682,12 +1682,19 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq,
1682 return false; 1682 return false;
1683 1683
1684 /* 1684 /*
1685 * Lookup the cfqq that this bio will be queued with. Allow 1685 * Lookup the cfqq that this bio will be queued with and allow
1686 * merge only if rq is queued there. 1686 * merge only if rq is queued there. This function can be called
1687 * from plug merge without queue_lock. In such cases, ioc of @rq
1688 * and %current are guaranteed to be equal. Avoid lookup which
1689 * requires queue_lock by using @rq's cic.
1687 */ 1690 */
1688 cic = cfq_cic_lookup(cfqd, current->io_context); 1691 if (current->io_context == RQ_CIC(rq)->ioc) {
1689 if (!cic) 1692 cic = RQ_CIC(rq);
1690 return false; 1693 } else {
1694 cic = cfq_cic_lookup(cfqd, current->io_context);
1695 if (!cic)
1696 return false;
1697 }
1691 1698
1692 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio)); 1699 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
1693 return cfqq == RQ_CFQQ(rq); 1700 return cfqq == RQ_CFQQ(rq);
@@ -2784,21 +2791,15 @@ static void cfq_exit_cic(struct cfq_io_context *cic)
2784 struct io_context *ioc = cic->ioc; 2791 struct io_context *ioc = cic->ioc;
2785 2792
2786 list_del_init(&cic->queue_list); 2793 list_del_init(&cic->queue_list);
2794 cic->key = cfqd_dead_key(cfqd);
2787 2795
2788 /* 2796 /*
2789 * Make sure dead mark is seen for dead queues 2797 * Both setting lookup hint to and clearing it from @cic are done
2798 * under queue_lock. If it's not pointing to @cic now, it never
2799 * will. Hint assignment itself can race safely.
2790 */ 2800 */
2791 smp_wmb(); 2801 if (rcu_dereference_raw(ioc->ioc_data) == cic)
2792 cic->key = cfqd_dead_key(cfqd);
2793
2794 rcu_read_lock();
2795 if (rcu_dereference(ioc->ioc_data) == cic) {
2796 rcu_read_unlock();
2797 spin_lock(&ioc->lock);
2798 rcu_assign_pointer(ioc->ioc_data, NULL); 2802 rcu_assign_pointer(ioc->ioc_data, NULL);
2799 spin_unlock(&ioc->lock);
2800 } else
2801 rcu_read_unlock();
2802 2803
2803 if (cic->cfqq[BLK_RW_ASYNC]) { 2804 if (cic->cfqq[BLK_RW_ASYNC]) {
2804 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]); 2805 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
@@ -3092,12 +3093,20 @@ cfq_drop_dead_cic(struct cfq_data *cfqd, struct io_context *ioc,
3092 cfq_cic_free(cic); 3093 cfq_cic_free(cic);
3093} 3094}
3094 3095
3096/**
3097 * cfq_cic_lookup - lookup cfq_io_context
3098 * @cfqd: the associated cfq_data
3099 * @ioc: the associated io_context
3100 *
3101 * Look up cfq_io_context associated with @cfqd - @ioc pair. Must be
3102 * called with queue_lock held.
3103 */
3095static struct cfq_io_context * 3104static struct cfq_io_context *
3096cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc) 3105cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc)
3097{ 3106{
3098 struct cfq_io_context *cic; 3107 struct cfq_io_context *cic;
3099 unsigned long flags;
3100 3108
3109 lockdep_assert_held(cfqd->queue->queue_lock);
3101 if (unlikely(!ioc)) 3110 if (unlikely(!ioc))
3102 return NULL; 3111 return NULL;
3103 3112
@@ -3107,28 +3116,22 @@ cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc)
3107 * we maintain a last-hit cache, to avoid browsing over the tree 3116 * we maintain a last-hit cache, to avoid browsing over the tree
3108 */ 3117 */
3109 cic = rcu_dereference(ioc->ioc_data); 3118 cic = rcu_dereference(ioc->ioc_data);
3110 if (cic && cic->key == cfqd) { 3119 if (cic && cic->key == cfqd)
3111 rcu_read_unlock(); 3120 goto out;
3112 return cic;
3113 }
3114 3121
3115 do { 3122 do {
3116 cic = radix_tree_lookup(&ioc->radix_root, cfqd->queue->id); 3123 cic = radix_tree_lookup(&ioc->radix_root, cfqd->queue->id);
3117 rcu_read_unlock();
3118 if (!cic) 3124 if (!cic)
3119 break; 3125 break;
3120 if (unlikely(cic->key != cfqd)) { 3126 if (likely(cic->key == cfqd)) {
3121 cfq_drop_dead_cic(cfqd, ioc, cic); 3127 /* hint assignment itself can race safely */
3122 rcu_read_lock(); 3128 rcu_assign_pointer(ioc->ioc_data, cic);
3123 continue; 3129 break;
3124 } 3130 }
3125 3131 cfq_drop_dead_cic(cfqd, ioc, cic);
3126 spin_lock_irqsave(&ioc->lock, flags);
3127 rcu_assign_pointer(ioc->ioc_data, cic);
3128 spin_unlock_irqrestore(&ioc->lock, flags);
3129 break;
3130 } while (1); 3132 } while (1);
3131 3133out:
3134 rcu_read_unlock();
3132 return cic; 3135 return cic;
3133} 3136}
3134 3137