aboutsummaryrefslogtreecommitdiffstats
path: root/block/cfq-iosched.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2011-12-13 18:33:42 -0500
committerJens Axboe <axboe@kernel.dk>2011-12-13 18:33:42 -0500
commit7e5a8794492e43e9eebb68a98a23be055888ccd0 (patch)
treecc049a23b2c994f910d3101860bc1c2ecb7aa35f /block/cfq-iosched.c
parent3d3c2379feb177a5fd55bb0ed76776dc9d4f3243 (diff)
block, cfq: move io_cq exit/release to blk-ioc.c
With kmem_cache managed by blk-ioc, io_cq exit/release can be moved to blk-ioc too. The odd ->io_cq->exit/release() callbacks are replaced with elevator_ops->elevator_exit_icq_fn() with unlinking from both ioc and q, and freeing automatically handled by blk-ioc. The elevator operation only need to perform exit operation specific to the elevator - in cfq's case, exiting the cfqq's. Also, clearing of io_cq's on q detach is moved to block core and automatically performed on elevator switch and q release. Because the q io_cq points to might be freed before RCU callback for the io_cq runs, blk-ioc code should remember to which cache the io_cq needs to be freed when the io_cq is released. New field io_cq->__rcu_icq_cache is added for this purpose. As both the new field and rcu_head are used only after io_cq is released and the q/ioc_node fields aren't, they are put into unions. Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r--block/cfq-iosched.c47
1 files changed, 2 insertions, 45 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 06e59abcb57f..f6d315551496 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -2674,26 +2674,6 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
2674 cfq_put_cfqg(cfqg); 2674 cfq_put_cfqg(cfqg);
2675} 2675}
2676 2676
2677static void cfq_icq_free_rcu(struct rcu_head *head)
2678{
2679 kmem_cache_free(cfq_icq_pool,
2680 icq_to_cic(container_of(head, struct io_cq, rcu_head)));
2681}
2682
2683static void cfq_icq_free(struct io_cq *icq)
2684{
2685 call_rcu(&icq->rcu_head, cfq_icq_free_rcu);
2686}
2687
2688static void cfq_release_icq(struct io_cq *icq)
2689{
2690 struct io_context *ioc = icq->ioc;
2691
2692 radix_tree_delete(&ioc->icq_tree, icq->q->id);
2693 hlist_del(&icq->ioc_node);
2694 cfq_icq_free(icq);
2695}
2696
2697static void cfq_put_cooperator(struct cfq_queue *cfqq) 2677static void cfq_put_cooperator(struct cfq_queue *cfqq)
2698{ 2678{
2699 struct cfq_queue *__cfqq, *next; 2679 struct cfq_queue *__cfqq, *next;
@@ -2731,17 +2711,6 @@ static void cfq_exit_icq(struct io_cq *icq)
2731{ 2711{
2732 struct cfq_io_cq *cic = icq_to_cic(icq); 2712 struct cfq_io_cq *cic = icq_to_cic(icq);
2733 struct cfq_data *cfqd = cic_to_cfqd(cic); 2713 struct cfq_data *cfqd = cic_to_cfqd(cic);
2734 struct io_context *ioc = icq->ioc;
2735
2736 list_del_init(&icq->q_node);
2737
2738 /*
2739 * Both setting lookup hint to and clearing it from @icq are done
2740 * under queue_lock. If it's not pointing to @icq now, it never
2741 * will. Hint assignment itself can race safely.
2742 */
2743 if (rcu_dereference_raw(ioc->icq_hint) == icq)
2744 rcu_assign_pointer(ioc->icq_hint, NULL);
2745 2714
2746 if (cic->cfqq[BLK_RW_ASYNC]) { 2715 if (cic->cfqq[BLK_RW_ASYNC]) {
2747 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]); 2716 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
@@ -2764,8 +2733,6 @@ static struct cfq_io_cq *cfq_alloc_cic(struct cfq_data *cfqd, gfp_t gfp_mask)
2764 cic->ttime.last_end_request = jiffies; 2733 cic->ttime.last_end_request = jiffies;
2765 INIT_LIST_HEAD(&cic->icq.q_node); 2734 INIT_LIST_HEAD(&cic->icq.q_node);
2766 INIT_HLIST_NODE(&cic->icq.ioc_node); 2735 INIT_HLIST_NODE(&cic->icq.ioc_node);
2767 cic->icq.exit = cfq_exit_icq;
2768 cic->icq.release = cfq_release_icq;
2769 } 2736 }
2770 2737
2771 return cic; 2738 return cic;
@@ -3034,7 +3001,7 @@ out:
3034 if (ret) 3001 if (ret)
3035 printk(KERN_ERR "cfq: icq link failed!\n"); 3002 printk(KERN_ERR "cfq: icq link failed!\n");
3036 if (icq) 3003 if (icq)
3037 cfq_icq_free(icq); 3004 kmem_cache_free(cfq_icq_pool, icq);
3038 return ret; 3005 return ret;
3039} 3006}
3040 3007
@@ -3774,17 +3741,6 @@ static void cfq_exit_queue(struct elevator_queue *e)
3774 if (cfqd->active_queue) 3741 if (cfqd->active_queue)
3775 __cfq_slice_expired(cfqd, cfqd->active_queue, 0); 3742 __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
3776 3743
3777 while (!list_empty(&q->icq_list)) {
3778 struct io_cq *icq = list_entry(q->icq_list.next,
3779 struct io_cq, q_node);
3780 struct io_context *ioc = icq->ioc;
3781
3782 spin_lock(&ioc->lock);
3783 cfq_exit_icq(icq);
3784 cfq_release_icq(icq);
3785 spin_unlock(&ioc->lock);
3786 }
3787
3788 cfq_put_async_queues(cfqd); 3744 cfq_put_async_queues(cfqd);
3789 cfq_release_cfq_groups(cfqd); 3745 cfq_release_cfq_groups(cfqd);
3790 3746
@@ -4019,6 +3975,7 @@ static struct elevator_type iosched_cfq = {
4019 .elevator_completed_req_fn = cfq_completed_request, 3975 .elevator_completed_req_fn = cfq_completed_request,
4020 .elevator_former_req_fn = elv_rb_former_request, 3976 .elevator_former_req_fn = elv_rb_former_request,
4021 .elevator_latter_req_fn = elv_rb_latter_request, 3977 .elevator_latter_req_fn = elv_rb_latter_request,
3978 .elevator_exit_icq_fn = cfq_exit_icq,
4022 .elevator_set_req_fn = cfq_set_request, 3979 .elevator_set_req_fn = cfq_set_request,
4023 .elevator_put_req_fn = cfq_put_request, 3980 .elevator_put_req_fn = cfq_put_request,
4024 .elevator_may_queue_fn = cfq_may_queue, 3981 .elevator_may_queue_fn = cfq_may_queue,