aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-02-15 03:45:52 -0500
committerJens Axboe <axboe@kernel.dk>2012-02-15 03:45:52 -0500
commit2274b029f640cd652ab59c363e5beebf5f50e609 (patch)
tree652a2e774c537b83cd70481a936f5c7485436491 /block
parentd705ae6b133f9f6a8beee617b1224b6a5c99c5da (diff)
block: simplify ioc_release_fn()
Reverse double lock dancing in ioc_release_fn() can be simplified by just using trylock on the queue_lock and back out from ioc lock on trylock failure. Simplify it. Signed-off-by: Tejun Heo <tj@kernel.org> Tested-by: Shaohua Li <shaohua.li@intel.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-ioc.c46
1 files changed, 10 insertions, 36 deletions
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index 811879c752e4..f53c80ecaf07 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -79,7 +79,6 @@ static void ioc_release_fn(struct work_struct *work)
79{ 79{
80 struct io_context *ioc = container_of(work, struct io_context, 80 struct io_context *ioc = container_of(work, struct io_context,
81 release_work); 81 release_work);
82 struct request_queue *last_q = NULL;
83 unsigned long flags; 82 unsigned long flags;
84 83
85 /* 84 /*
@@ -93,44 +92,19 @@ static void ioc_release_fn(struct work_struct *work)
93 while (!hlist_empty(&ioc->icq_list)) { 92 while (!hlist_empty(&ioc->icq_list)) {
94 struct io_cq *icq = hlist_entry(ioc->icq_list.first, 93 struct io_cq *icq = hlist_entry(ioc->icq_list.first,
95 struct io_cq, ioc_node); 94 struct io_cq, ioc_node);
96 struct request_queue *this_q = icq->q; 95 struct request_queue *q = icq->q;
97 96
98 if (this_q != last_q) { 97 if (spin_trylock(q->queue_lock)) {
99 /* 98 ioc_exit_icq(icq);
100 * Need to switch to @this_q. Once we release 99 spin_unlock(q->queue_lock);
101 * @ioc->lock, it can go away along with @cic. 100 } else {
102 * Hold on to it. 101 spin_unlock_irqrestore(&ioc->lock, flags);
103 */ 102 cpu_relax();
104 __blk_get_queue(this_q); 103 spin_lock_irqsave_nested(&ioc->lock, flags, 1);
105
106 /*
107 * blk_put_queue() might sleep thanks to kobject
108 * idiocy. Always release both locks, put and
109 * restart.
110 */
111 if (last_q) {
112 spin_unlock(last_q->queue_lock);
113 spin_unlock_irqrestore(&ioc->lock, flags);
114 blk_put_queue(last_q);
115 } else {
116 spin_unlock_irqrestore(&ioc->lock, flags);
117 }
118
119 last_q = this_q;
120 spin_lock_irqsave(this_q->queue_lock, flags);
121 spin_lock_nested(&ioc->lock, 1);
122 continue;
123 } 104 }
124 ioc_exit_icq(icq);
125 } 105 }
126 106
127 if (last_q) { 107 spin_unlock_irqrestore(&ioc->lock, flags);
128 spin_unlock(last_q->queue_lock);
129 spin_unlock_irqrestore(&ioc->lock, flags);
130 blk_put_queue(last_q);
131 } else {
132 spin_unlock_irqrestore(&ioc->lock, flags);
133 }
134 108
135 kmem_cache_free(iocontext_cachep, ioc); 109 kmem_cache_free(iocontext_cachep, ioc);
136} 110}