diff options
Diffstat (limited to 'block/blk-ioc.c')
-rw-r--r-- | block/blk-ioc.c | 58 |
1 files changed, 28 insertions, 30 deletions
diff --git a/block/blk-ioc.c b/block/blk-ioc.c index e23c797b4685..dc5e69d335a0 100644 --- a/block/blk-ioc.c +++ b/block/blk-ioc.c | |||
@@ -46,7 +46,7 @@ EXPORT_SYMBOL(get_io_context); | |||
46 | 46 | ||
47 | /* | 47 | /* |
48 | * Slow path for ioc release in put_io_context(). Performs double-lock | 48 | * Slow path for ioc release in put_io_context(). Performs double-lock |
49 | * dancing to unlink all cic's and then frees ioc. | 49 | * dancing to unlink all icq's and then frees ioc. |
50 | */ | 50 | */ |
51 | static void ioc_release_fn(struct work_struct *work) | 51 | static void ioc_release_fn(struct work_struct *work) |
52 | { | 52 | { |
@@ -56,11 +56,10 @@ static void ioc_release_fn(struct work_struct *work) | |||
56 | 56 | ||
57 | spin_lock_irq(&ioc->lock); | 57 | spin_lock_irq(&ioc->lock); |
58 | 58 | ||
59 | while (!hlist_empty(&ioc->cic_list)) { | 59 | while (!hlist_empty(&ioc->icq_list)) { |
60 | struct cfq_io_context *cic = hlist_entry(ioc->cic_list.first, | 60 | struct io_cq *icq = hlist_entry(ioc->icq_list.first, |
61 | struct cfq_io_context, | 61 | struct io_cq, ioc_node); |
62 | cic_list); | 62 | struct request_queue *this_q = icq->q; |
63 | struct request_queue *this_q = cic->q; | ||
64 | 63 | ||
65 | if (this_q != last_q) { | 64 | if (this_q != last_q) { |
66 | /* | 65 | /* |
@@ -89,8 +88,8 @@ static void ioc_release_fn(struct work_struct *work) | |||
89 | continue; | 88 | continue; |
90 | } | 89 | } |
91 | ioc_release_depth_inc(this_q); | 90 | ioc_release_depth_inc(this_q); |
92 | cic->exit(cic); | 91 | icq->exit(icq); |
93 | cic->release(cic); | 92 | icq->release(icq); |
94 | ioc_release_depth_dec(this_q); | 93 | ioc_release_depth_dec(this_q); |
95 | } | 94 | } |
96 | 95 | ||
@@ -131,10 +130,10 @@ void put_io_context(struct io_context *ioc, struct request_queue *locked_q) | |||
131 | return; | 130 | return; |
132 | 131 | ||
133 | /* | 132 | /* |
134 | * Destroy @ioc. This is a bit messy because cic's are chained | 133 | * Destroy @ioc. This is a bit messy because icq's are chained |
135 | * from both ioc and queue, and ioc->lock nests inside queue_lock. | 134 | * from both ioc and queue, and ioc->lock nests inside queue_lock. |
136 | * The inner ioc->lock should be held to walk our cic_list and then | 135 | * The inner ioc->lock should be held to walk our icq_list and then |
137 | * for each cic the outer matching queue_lock should be grabbed. | 136 | * for each icq the outer matching queue_lock should be grabbed. |
138 | * ie. We need to do reverse-order double lock dancing. | 137 | * ie. We need to do reverse-order double lock dancing. |
139 | * | 138 | * |
140 | * Another twist is that we are often called with one of the | 139 | * Another twist is that we are often called with one of the |
@@ -153,11 +152,10 @@ void put_io_context(struct io_context *ioc, struct request_queue *locked_q) | |||
153 | spin_lock_irqsave_nested(&ioc->lock, flags, | 152 | spin_lock_irqsave_nested(&ioc->lock, flags, |
154 | ioc_release_depth(locked_q)); | 153 | ioc_release_depth(locked_q)); |
155 | 154 | ||
156 | while (!hlist_empty(&ioc->cic_list)) { | 155 | while (!hlist_empty(&ioc->icq_list)) { |
157 | struct cfq_io_context *cic = hlist_entry(ioc->cic_list.first, | 156 | struct io_cq *icq = hlist_entry(ioc->icq_list.first, |
158 | struct cfq_io_context, | 157 | struct io_cq, ioc_node); |
159 | cic_list); | 158 | struct request_queue *this_q = icq->q; |
160 | struct request_queue *this_q = cic->q; | ||
161 | 159 | ||
162 | if (this_q != last_q) { | 160 | if (this_q != last_q) { |
163 | if (last_q && last_q != locked_q) | 161 | if (last_q && last_q != locked_q) |
@@ -170,8 +168,8 @@ void put_io_context(struct io_context *ioc, struct request_queue *locked_q) | |||
170 | continue; | 168 | continue; |
171 | } | 169 | } |
172 | ioc_release_depth_inc(this_q); | 170 | ioc_release_depth_inc(this_q); |
173 | cic->exit(cic); | 171 | icq->exit(icq); |
174 | cic->release(cic); | 172 | icq->release(icq); |
175 | ioc_release_depth_dec(this_q); | 173 | ioc_release_depth_dec(this_q); |
176 | } | 174 | } |
177 | 175 | ||
@@ -180,8 +178,8 @@ void put_io_context(struct io_context *ioc, struct request_queue *locked_q) | |||
180 | 178 | ||
181 | spin_unlock_irqrestore(&ioc->lock, flags); | 179 | spin_unlock_irqrestore(&ioc->lock, flags); |
182 | 180 | ||
183 | /* if no cic's left, we're done; otherwise, kick release_work */ | 181 | /* if no icq is left, we're done; otherwise, kick release_work */ |
184 | if (hlist_empty(&ioc->cic_list)) | 182 | if (hlist_empty(&ioc->icq_list)) |
185 | kmem_cache_free(iocontext_cachep, ioc); | 183 | kmem_cache_free(iocontext_cachep, ioc); |
186 | else | 184 | else |
187 | schedule_work(&ioc->release_work); | 185 | schedule_work(&ioc->release_work); |
@@ -219,8 +217,8 @@ void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_flags, | |||
219 | atomic_long_set(&ioc->refcount, 1); | 217 | atomic_long_set(&ioc->refcount, 1); |
220 | atomic_set(&ioc->nr_tasks, 1); | 218 | atomic_set(&ioc->nr_tasks, 1); |
221 | spin_lock_init(&ioc->lock); | 219 | spin_lock_init(&ioc->lock); |
222 | INIT_RADIX_TREE(&ioc->radix_root, GFP_ATOMIC | __GFP_HIGH); | 220 | INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC | __GFP_HIGH); |
223 | INIT_HLIST_HEAD(&ioc->cic_list); | 221 | INIT_HLIST_HEAD(&ioc->icq_list); |
224 | INIT_WORK(&ioc->release_work, ioc_release_fn); | 222 | INIT_WORK(&ioc->release_work, ioc_release_fn); |
225 | 223 | ||
226 | /* try to install, somebody might already have beaten us to it */ | 224 | /* try to install, somebody might already have beaten us to it */ |
@@ -270,11 +268,11 @@ EXPORT_SYMBOL(get_task_io_context); | |||
270 | 268 | ||
271 | void ioc_set_changed(struct io_context *ioc, int which) | 269 | void ioc_set_changed(struct io_context *ioc, int which) |
272 | { | 270 | { |
273 | struct cfq_io_context *cic; | 271 | struct io_cq *icq; |
274 | struct hlist_node *n; | 272 | struct hlist_node *n; |
275 | 273 | ||
276 | hlist_for_each_entry(cic, n, &ioc->cic_list, cic_list) | 274 | hlist_for_each_entry(icq, n, &ioc->icq_list, ioc_node) |
277 | set_bit(which, &cic->changed); | 275 | set_bit(which, &icq->changed); |
278 | } | 276 | } |
279 | 277 | ||
280 | /** | 278 | /** |
@@ -282,8 +280,8 @@ void ioc_set_changed(struct io_context *ioc, int which) | |||
282 | * @ioc: io_context of interest | 280 | * @ioc: io_context of interest |
283 | * @ioprio: new ioprio | 281 | * @ioprio: new ioprio |
284 | * | 282 | * |
285 | * @ioc's ioprio has changed to @ioprio. Set %CIC_IOPRIO_CHANGED for all | 283 | * @ioc's ioprio has changed to @ioprio. Set %ICQ_IOPRIO_CHANGED for all |
286 | * cic's. iosched is responsible for checking the bit and applying it on | 284 | * icq's. iosched is responsible for checking the bit and applying it on |
287 | * request issue path. | 285 | * request issue path. |
288 | */ | 286 | */ |
289 | void ioc_ioprio_changed(struct io_context *ioc, int ioprio) | 287 | void ioc_ioprio_changed(struct io_context *ioc, int ioprio) |
@@ -292,7 +290,7 @@ void ioc_ioprio_changed(struct io_context *ioc, int ioprio) | |||
292 | 290 | ||
293 | spin_lock_irqsave(&ioc->lock, flags); | 291 | spin_lock_irqsave(&ioc->lock, flags); |
294 | ioc->ioprio = ioprio; | 292 | ioc->ioprio = ioprio; |
295 | ioc_set_changed(ioc, CIC_IOPRIO_CHANGED); | 293 | ioc_set_changed(ioc, ICQ_IOPRIO_CHANGED); |
296 | spin_unlock_irqrestore(&ioc->lock, flags); | 294 | spin_unlock_irqrestore(&ioc->lock, flags); |
297 | } | 295 | } |
298 | 296 | ||
@@ -300,7 +298,7 @@ void ioc_ioprio_changed(struct io_context *ioc, int ioprio) | |||
300 | * ioc_cgroup_changed - notify cgroup change | 298 | * ioc_cgroup_changed - notify cgroup change |
301 | * @ioc: io_context of interest | 299 | * @ioc: io_context of interest |
302 | * | 300 | * |
303 | * @ioc's cgroup has changed. Set %CIC_CGROUP_CHANGED for all cic's. | 301 | * @ioc's cgroup has changed. Set %ICQ_CGROUP_CHANGED for all icq's. |
304 | * iosched is responsible for checking the bit and applying it on request | 302 | * iosched is responsible for checking the bit and applying it on request |
305 | * issue path. | 303 | * issue path. |
306 | */ | 304 | */ |
@@ -309,7 +307,7 @@ void ioc_cgroup_changed(struct io_context *ioc) | |||
309 | unsigned long flags; | 307 | unsigned long flags; |
310 | 308 | ||
311 | spin_lock_irqsave(&ioc->lock, flags); | 309 | spin_lock_irqsave(&ioc->lock, flags); |
312 | ioc_set_changed(ioc, CIC_CGROUP_CHANGED); | 310 | ioc_set_changed(ioc, ICQ_CGROUP_CHANGED); |
313 | spin_unlock_irqrestore(&ioc->lock, flags); | 311 | spin_unlock_irqrestore(&ioc->lock, flags); |
314 | } | 312 | } |
315 | 313 | ||