aboutsummaryrefslogtreecommitdiffstats
path: root/block/cfq-iosched.c
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2008-05-28 08:46:59 -0400
committerJens Axboe <jens.axboe@oracle.com>2008-05-28 08:49:28 -0400
commitd6de8be711b28049a5cb93c954722c311c7d3f7f (patch)
tree001dfc1147a64bd05798e29b15ce5d995486a6fa /block/cfq-iosched.c
parent64565911cdb57c2f512a9715b985b5617402cc67 (diff)
cfq-iosched: fix RCU problem in cfq_cic_lookup()
cfq_cic_lookup() needs to properly protect ioc->ioc_data before dereferencing it and also exclude updaters of ioc->ioc_data as well. Also add a number of comments documenting why the existing RCU usage is OK. Thanks a lot to "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> for review and comments! Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r--block/cfq-iosched.c28
1 files changed, 26 insertions, 2 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 4df3f0522435..d01b411c72f0 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1142,6 +1142,9 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
1142 kmem_cache_free(cfq_pool, cfqq); 1142 kmem_cache_free(cfq_pool, cfqq);
1143} 1143}
1144 1144
1145/*
1146 * Must always be called with the rcu_read_lock() held
1147 */
1145static void 1148static void
1146__call_for_each_cic(struct io_context *ioc, 1149__call_for_each_cic(struct io_context *ioc,
1147 void (*func)(struct io_context *, struct cfq_io_context *)) 1150 void (*func)(struct io_context *, struct cfq_io_context *))
@@ -1197,6 +1200,11 @@ static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic)
1197 cfq_cic_free(cic); 1200 cfq_cic_free(cic);
1198} 1201}
1199 1202
1203/*
1204 * Must be called with rcu_read_lock() held or preemption otherwise disabled.
1205 * Only two callers of this - ->dtor() which is called with the rcu_read_lock(),
1206 * and ->trim() which is called with the task lock held
1207 */
1200static void cfq_free_io_context(struct io_context *ioc) 1208static void cfq_free_io_context(struct io_context *ioc)
1201{ 1209{
1202 /* 1210 /*
@@ -1502,20 +1510,24 @@ static struct cfq_io_context *
1502cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc) 1510cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc)
1503{ 1511{
1504 struct cfq_io_context *cic; 1512 struct cfq_io_context *cic;
1513 unsigned long flags;
1505 void *k; 1514 void *k;
1506 1515
1507 if (unlikely(!ioc)) 1516 if (unlikely(!ioc))
1508 return NULL; 1517 return NULL;
1509 1518
1519 rcu_read_lock();
1520
1510 /* 1521 /*
1511 * we maintain a last-hit cache, to avoid browsing over the tree 1522 * we maintain a last-hit cache, to avoid browsing over the tree
1512 */ 1523 */
1513 cic = rcu_dereference(ioc->ioc_data); 1524 cic = rcu_dereference(ioc->ioc_data);
1514 if (cic && cic->key == cfqd) 1525 if (cic && cic->key == cfqd) {
1526 rcu_read_unlock();
1515 return cic; 1527 return cic;
1528 }
1516 1529
1517 do { 1530 do {
1518 rcu_read_lock();
1519 cic = radix_tree_lookup(&ioc->radix_root, (unsigned long) cfqd); 1531 cic = radix_tree_lookup(&ioc->radix_root, (unsigned long) cfqd);
1520 rcu_read_unlock(); 1532 rcu_read_unlock();
1521 if (!cic) 1533 if (!cic)
@@ -1524,10 +1536,13 @@ cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc)
1524 k = cic->key; 1536 k = cic->key;
1525 if (unlikely(!k)) { 1537 if (unlikely(!k)) {
1526 cfq_drop_dead_cic(cfqd, ioc, cic); 1538 cfq_drop_dead_cic(cfqd, ioc, cic);
1539 rcu_read_lock();
1527 continue; 1540 continue;
1528 } 1541 }
1529 1542
1543 spin_lock_irqsave(&ioc->lock, flags);
1530 rcu_assign_pointer(ioc->ioc_data, cic); 1544 rcu_assign_pointer(ioc->ioc_data, cic);
1545 spin_unlock_irqrestore(&ioc->lock, flags);
1531 break; 1546 break;
1532 } while (1); 1547 } while (1);
1533 1548
@@ -2134,6 +2149,10 @@ static void *cfq_init_queue(struct request_queue *q)
2134 2149
2135static void cfq_slab_kill(void) 2150static void cfq_slab_kill(void)
2136{ 2151{
2152 /*
2153 * Caller already ensured that pending RCU callbacks are completed,
2154 * so we should have no busy allocations at this point.
2155 */
2137 if (cfq_pool) 2156 if (cfq_pool)
2138 kmem_cache_destroy(cfq_pool); 2157 kmem_cache_destroy(cfq_pool);
2139 if (cfq_ioc_pool) 2158 if (cfq_ioc_pool)
@@ -2292,6 +2311,11 @@ static void __exit cfq_exit(void)
2292 ioc_gone = &all_gone; 2311 ioc_gone = &all_gone;
2293 /* ioc_gone's update must be visible before reading ioc_count */ 2312 /* ioc_gone's update must be visible before reading ioc_count */
2294 smp_wmb(); 2313 smp_wmb();
2314
2315 /*
2316 * this also protects us from entering cfq_slab_kill() with
2317 * pending RCU callbacks
2318 */
2295 if (elv_ioc_count_read(ioc_count)) 2319 if (elv_ioc_count_read(ioc_count))
2296 wait_for_completion(ioc_gone); 2320 wait_for_completion(ioc_gone);
2297 cfq_slab_kill(); 2321 cfq_slab_kill();