aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-ioc.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2011-12-13 18:33:42 -0500
committerJens Axboe <axboe@kernel.dk>2011-12-13 18:33:42 -0500
commitf1f8cc94651738b418ba54c039df536303b91704 (patch)
treeeb8bc5a33dec104ab32a935a5bb1e1da2e7cdd34 /block/blk-ioc.c
parent9b84cacd013996f244d85b3d873287c2a8f88658 (diff)
block, cfq: move icq creation and rq->elv.icq association to block core
Now block layer knows everything necessary to create and associate icq's with requests. Move ioc_create_icq() to blk-ioc.c and update get_request() such that, if elevator_type->icq_size is set, requests are automatically associated with their matching icq's before elv_set_request(). io_context reference is also managed by block core on request alloc/free. * Only ioprio/cgroup changed handling remains from cfq_get_cic(). Collapsed into cfq_set_request(). * This removes queue kicking on icq allocation failure (for now). As icq allocation failure is rare and the only effect of queue kicking achieved was possibily accelerating queue processing, this change shouldn't be noticeable. There is a larger underlying problem. Unlike request allocation, icq allocation is not guaranteed to succeed eventually after retries. The number of icq is unbound and thus mempool can't be the solution either. This effectively adds allocation dependency on memory free path and thus possibility of deadlock. This usually wouldn't happen because icq allocation is not a hot path and, even when the condition triggers, it's highly unlikely that none of the writeback workers already has icq. However, this is still possible especially if elevator is being switched under high memory pressure, so we better get it fixed. Probably the only solution is just bypassing elevator and appending to dispatch queue on any elevator allocation failure. * Comment added to explain how icq's are managed and synchronized. This completes cleanup of io_context interface. Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-ioc.c')
-rw-r--r--block/blk-ioc.c60
1 files changed, 59 insertions, 1 deletions
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index 0910a5584d38..c04d16b02225 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -289,7 +289,6 @@ void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_flags,
289 kmem_cache_free(iocontext_cachep, ioc); 289 kmem_cache_free(iocontext_cachep, ioc);
290 task_unlock(task); 290 task_unlock(task);
291} 291}
292EXPORT_SYMBOL(create_io_context_slowpath);
293 292
294/** 293/**
295 * get_task_io_context - get io_context of a task 294 * get_task_io_context - get io_context of a task
@@ -362,6 +361,65 @@ out:
362} 361}
363EXPORT_SYMBOL(ioc_lookup_icq); 362EXPORT_SYMBOL(ioc_lookup_icq);
364 363
364/**
365 * ioc_create_icq - create and link io_cq
366 * @q: request_queue of interest
367 * @gfp_mask: allocation mask
368 *
369 * Make sure io_cq linking %current->io_context and @q exists. If either
370 * io_context and/or icq don't exist, they will be created using @gfp_mask.
371 *
372 * The caller is responsible for ensuring @ioc won't go away and @q is
373 * alive and will stay alive until this function returns.
374 */
375struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask)
376{
377 struct elevator_type *et = q->elevator->type;
378 struct io_context *ioc;
379 struct io_cq *icq;
380
381 /* allocate stuff */
382 ioc = create_io_context(current, gfp_mask, q->node);
383 if (!ioc)
384 return NULL;
385
386 icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
387 q->node);
388 if (!icq)
389 return NULL;
390
391 if (radix_tree_preload(gfp_mask) < 0) {
392 kmem_cache_free(et->icq_cache, icq);
393 return NULL;
394 }
395
396 icq->ioc = ioc;
397 icq->q = q;
398 INIT_LIST_HEAD(&icq->q_node);
399 INIT_HLIST_NODE(&icq->ioc_node);
400
401 /* lock both q and ioc and try to link @icq */
402 spin_lock_irq(q->queue_lock);
403 spin_lock(&ioc->lock);
404
405 if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
406 hlist_add_head(&icq->ioc_node, &ioc->icq_list);
407 list_add(&icq->q_node, &q->icq_list);
408 if (et->ops.elevator_init_icq_fn)
409 et->ops.elevator_init_icq_fn(icq);
410 } else {
411 kmem_cache_free(et->icq_cache, icq);
412 icq = ioc_lookup_icq(ioc, q);
413 if (!icq)
414 printk(KERN_ERR "cfq: icq link failed!\n");
415 }
416
417 spin_unlock(&ioc->lock);
418 spin_unlock_irq(q->queue_lock);
419 radix_tree_preload_end();
420 return icq;
421}
422
365void ioc_set_changed(struct io_context *ioc, int which) 423void ioc_set_changed(struct io_context *ioc, int which)
366{ 424{
367 struct io_cq *icq; 425 struct io_cq *icq;