diff options
Diffstat (limited to 'block/blk-ioc.c')
-rw-r--r-- | block/blk-ioc.c | 126 |
1 files changed, 35 insertions, 91 deletions
diff --git a/block/blk-ioc.c b/block/blk-ioc.c index fb95dd2f889a..1e2d53b04858 100644 --- a/block/blk-ioc.c +++ b/block/blk-ioc.c | |||
@@ -155,20 +155,20 @@ void put_io_context(struct io_context *ioc) | |||
155 | } | 155 | } |
156 | EXPORT_SYMBOL(put_io_context); | 156 | EXPORT_SYMBOL(put_io_context); |
157 | 157 | ||
158 | /* Called by the exiting task */ | 158 | /** |
159 | void exit_io_context(struct task_struct *task) | 159 | * put_io_context_active - put active reference on ioc |
160 | * @ioc: ioc of interest | ||
161 | * | ||
162 | * Undo get_io_context_active(). If active reference reaches zero after | ||
163 | * put, @ioc can never issue further IOs and ioscheds are notified. | ||
164 | */ | ||
165 | void put_io_context_active(struct io_context *ioc) | ||
160 | { | 166 | { |
161 | struct io_context *ioc; | ||
162 | struct io_cq *icq; | ||
163 | struct hlist_node *n; | 167 | struct hlist_node *n; |
164 | unsigned long flags; | 168 | unsigned long flags; |
169 | struct io_cq *icq; | ||
165 | 170 | ||
166 | task_lock(task); | 171 | if (!atomic_dec_and_test(&ioc->active_ref)) { |
167 | ioc = task->io_context; | ||
168 | task->io_context = NULL; | ||
169 | task_unlock(task); | ||
170 | |||
171 | if (!atomic_dec_and_test(&ioc->nr_tasks)) { | ||
172 | put_io_context(ioc); | 172 | put_io_context(ioc); |
173 | return; | 173 | return; |
174 | } | 174 | } |
@@ -197,6 +197,20 @@ retry: | |||
197 | put_io_context(ioc); | 197 | put_io_context(ioc); |
198 | } | 198 | } |
199 | 199 | ||
200 | /* Called by the exiting task */ | ||
201 | void exit_io_context(struct task_struct *task) | ||
202 | { | ||
203 | struct io_context *ioc; | ||
204 | |||
205 | task_lock(task); | ||
206 | ioc = task->io_context; | ||
207 | task->io_context = NULL; | ||
208 | task_unlock(task); | ||
209 | |||
210 | atomic_dec(&ioc->nr_tasks); | ||
211 | put_io_context_active(ioc); | ||
212 | } | ||
213 | |||
200 | /** | 214 | /** |
201 | * ioc_clear_queue - break any ioc association with the specified queue | 215 | * ioc_clear_queue - break any ioc association with the specified queue |
202 | * @q: request_queue being cleared | 216 | * @q: request_queue being cleared |
@@ -218,19 +232,18 @@ void ioc_clear_queue(struct request_queue *q) | |||
218 | } | 232 | } |
219 | } | 233 | } |
220 | 234 | ||
221 | void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_flags, | 235 | int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node) |
222 | int node) | ||
223 | { | 236 | { |
224 | struct io_context *ioc; | 237 | struct io_context *ioc; |
225 | 238 | ||
226 | ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO, | 239 | ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO, |
227 | node); | 240 | node); |
228 | if (unlikely(!ioc)) | 241 | if (unlikely(!ioc)) |
229 | return; | 242 | return -ENOMEM; |
230 | 243 | ||
231 | /* initialize */ | 244 | /* initialize */ |
232 | atomic_long_set(&ioc->refcount, 1); | 245 | atomic_long_set(&ioc->refcount, 1); |
233 | atomic_set(&ioc->nr_tasks, 1); | 246 | atomic_set(&ioc->active_ref, 1); |
234 | spin_lock_init(&ioc->lock); | 247 | spin_lock_init(&ioc->lock); |
235 | INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC | __GFP_HIGH); | 248 | INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC | __GFP_HIGH); |
236 | INIT_HLIST_HEAD(&ioc->icq_list); | 249 | INIT_HLIST_HEAD(&ioc->icq_list); |
@@ -250,6 +263,8 @@ void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_flags, | |||
250 | else | 263 | else |
251 | kmem_cache_free(iocontext_cachep, ioc); | 264 | kmem_cache_free(iocontext_cachep, ioc); |
252 | task_unlock(task); | 265 | task_unlock(task); |
266 | |||
267 | return 0; | ||
253 | } | 268 | } |
254 | 269 | ||
255 | /** | 270 | /** |
@@ -281,7 +296,7 @@ struct io_context *get_task_io_context(struct task_struct *task, | |||
281 | return ioc; | 296 | return ioc; |
282 | } | 297 | } |
283 | task_unlock(task); | 298 | task_unlock(task); |
284 | } while (create_io_context(task, gfp_flags, node)); | 299 | } while (!create_task_io_context(task, gfp_flags, node)); |
285 | 300 | ||
286 | return NULL; | 301 | return NULL; |
287 | } | 302 | } |
@@ -325,26 +340,23 @@ EXPORT_SYMBOL(ioc_lookup_icq); | |||
325 | 340 | ||
326 | /** | 341 | /** |
327 | * ioc_create_icq - create and link io_cq | 342 | * ioc_create_icq - create and link io_cq |
343 | * @ioc: io_context of interest | ||
328 | * @q: request_queue of interest | 344 | * @q: request_queue of interest |
329 | * @gfp_mask: allocation mask | 345 | * @gfp_mask: allocation mask |
330 | * | 346 | * |
331 | * Make sure io_cq linking %current->io_context and @q exists. If either | 347 | * Make sure io_cq linking @ioc and @q exists. If icq doesn't exist, they |
332 | * io_context and/or icq don't exist, they will be created using @gfp_mask. | 348 | * will be created using @gfp_mask. |
333 | * | 349 | * |
334 | * The caller is responsible for ensuring @ioc won't go away and @q is | 350 | * The caller is responsible for ensuring @ioc won't go away and @q is |
335 | * alive and will stay alive until this function returns. | 351 | * alive and will stay alive until this function returns. |
336 | */ | 352 | */ |
337 | struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask) | 353 | struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q, |
354 | gfp_t gfp_mask) | ||
338 | { | 355 | { |
339 | struct elevator_type *et = q->elevator->type; | 356 | struct elevator_type *et = q->elevator->type; |
340 | struct io_context *ioc; | ||
341 | struct io_cq *icq; | 357 | struct io_cq *icq; |
342 | 358 | ||
343 | /* allocate stuff */ | 359 | /* allocate stuff */ |
344 | ioc = create_io_context(current, gfp_mask, q->node); | ||
345 | if (!ioc) | ||
346 | return NULL; | ||
347 | |||
348 | icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO, | 360 | icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO, |
349 | q->node); | 361 | q->node); |
350 | if (!icq) | 362 | if (!icq) |
@@ -382,74 +394,6 @@ struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask) | |||
382 | return icq; | 394 | return icq; |
383 | } | 395 | } |
384 | 396 | ||
385 | void ioc_set_icq_flags(struct io_context *ioc, unsigned int flags) | ||
386 | { | ||
387 | struct io_cq *icq; | ||
388 | struct hlist_node *n; | ||
389 | |||
390 | hlist_for_each_entry(icq, n, &ioc->icq_list, ioc_node) | ||
391 | icq->flags |= flags; | ||
392 | } | ||
393 | |||
394 | /** | ||
395 | * ioc_ioprio_changed - notify ioprio change | ||
396 | * @ioc: io_context of interest | ||
397 | * @ioprio: new ioprio | ||
398 | * | ||
399 | * @ioc's ioprio has changed to @ioprio. Set %ICQ_IOPRIO_CHANGED for all | ||
400 | * icq's. iosched is responsible for checking the bit and applying it on | ||
401 | * request issue path. | ||
402 | */ | ||
403 | void ioc_ioprio_changed(struct io_context *ioc, int ioprio) | ||
404 | { | ||
405 | unsigned long flags; | ||
406 | |||
407 | spin_lock_irqsave(&ioc->lock, flags); | ||
408 | ioc->ioprio = ioprio; | ||
409 | ioc_set_icq_flags(ioc, ICQ_IOPRIO_CHANGED); | ||
410 | spin_unlock_irqrestore(&ioc->lock, flags); | ||
411 | } | ||
412 | |||
413 | /** | ||
414 | * ioc_cgroup_changed - notify cgroup change | ||
415 | * @ioc: io_context of interest | ||
416 | * | ||
417 | * @ioc's cgroup has changed. Set %ICQ_CGROUP_CHANGED for all icq's. | ||
418 | * iosched is responsible for checking the bit and applying it on request | ||
419 | * issue path. | ||
420 | */ | ||
421 | void ioc_cgroup_changed(struct io_context *ioc) | ||
422 | { | ||
423 | unsigned long flags; | ||
424 | |||
425 | spin_lock_irqsave(&ioc->lock, flags); | ||
426 | ioc_set_icq_flags(ioc, ICQ_CGROUP_CHANGED); | ||
427 | spin_unlock_irqrestore(&ioc->lock, flags); | ||
428 | } | ||
429 | EXPORT_SYMBOL(ioc_cgroup_changed); | ||
430 | |||
431 | /** | ||
432 | * icq_get_changed - fetch and clear icq changed mask | ||
433 | * @icq: icq of interest | ||
434 | * | ||
435 | * Fetch and clear ICQ_*_CHANGED bits from @icq. Grabs and releases | ||
436 | * @icq->ioc->lock. | ||
437 | */ | ||
438 | unsigned icq_get_changed(struct io_cq *icq) | ||
439 | { | ||
440 | unsigned int changed = 0; | ||
441 | unsigned long flags; | ||
442 | |||
443 | if (unlikely(icq->flags & ICQ_CHANGED_MASK)) { | ||
444 | spin_lock_irqsave(&icq->ioc->lock, flags); | ||
445 | changed = icq->flags & ICQ_CHANGED_MASK; | ||
446 | icq->flags &= ~ICQ_CHANGED_MASK; | ||
447 | spin_unlock_irqrestore(&icq->ioc->lock, flags); | ||
448 | } | ||
449 | return changed; | ||
450 | } | ||
451 | EXPORT_SYMBOL(icq_get_changed); | ||
452 | |||
453 | static int __init blk_ioc_init(void) | 397 | static int __init blk_ioc_init(void) |
454 | { | 398 | { |
455 | iocontext_cachep = kmem_cache_create("blkdev_ioc", | 399 | iocontext_cachep = kmem_cache_create("blkdev_ioc", |