diff options
author | Tejun Heo <tj@kernel.org> | 2011-12-13 18:33:42 -0500 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2011-12-13 18:33:42 -0500 |
commit | 9b84cacd013996f244d85b3d873287c2a8f88658 (patch) | |
tree | a11b850f9c8b182a7a8141276244a831aaae5291 /block | |
parent | 7e5a8794492e43e9eebb68a98a23be055888ccd0 (diff) |
block, cfq: restructure io_cq creation path for io_context interface cleanup
Add elevator_ops->elevator_init_icq_fn() and restructure
cfq_create_cic() and rename it to ioc_create_icq().
The new function expects its caller to pass in io_context, uses
elevator_type->icq_cache, handles generic init, calls the new elevator
operation for elevator specific initialization, and returns pointer to
created or looked up icq. This leaves cfq_icq_pool variable without
any user. Removed.
This prepares for io_context interface cleanup and doesn't introduce
any functional difference.
Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r-- | block/cfq-iosched.c | 94 |
1 files changed, 41 insertions, 53 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index f6d315551496..11f49d036845 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -59,7 +59,6 @@ static const int cfq_hist_divisor = 4; | |||
59 | #define RQ_CFQG(rq) (struct cfq_group *) ((rq)->elv.priv[1]) | 59 | #define RQ_CFQG(rq) (struct cfq_group *) ((rq)->elv.priv[1]) |
60 | 60 | ||
61 | static struct kmem_cache *cfq_pool; | 61 | static struct kmem_cache *cfq_pool; |
62 | static struct kmem_cache *cfq_icq_pool; | ||
63 | 62 | ||
64 | #define CFQ_PRIO_LISTS IOPRIO_BE_NR | 63 | #define CFQ_PRIO_LISTS IOPRIO_BE_NR |
65 | #define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE) | 64 | #define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE) |
@@ -2707,6 +2706,13 @@ static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
2707 | cfq_put_queue(cfqq); | 2706 | cfq_put_queue(cfqq); |
2708 | } | 2707 | } |
2709 | 2708 | ||
2709 | static void cfq_init_icq(struct io_cq *icq) | ||
2710 | { | ||
2711 | struct cfq_io_cq *cic = icq_to_cic(icq); | ||
2712 | |||
2713 | cic->ttime.last_end_request = jiffies; | ||
2714 | } | ||
2715 | |||
2710 | static void cfq_exit_icq(struct io_cq *icq) | 2716 | static void cfq_exit_icq(struct io_cq *icq) |
2711 | { | 2717 | { |
2712 | struct cfq_io_cq *cic = icq_to_cic(icq); | 2718 | struct cfq_io_cq *cic = icq_to_cic(icq); |
@@ -2723,21 +2729,6 @@ static void cfq_exit_icq(struct io_cq *icq) | |||
2723 | } | 2729 | } |
2724 | } | 2730 | } |
2725 | 2731 | ||
2726 | static struct cfq_io_cq *cfq_alloc_cic(struct cfq_data *cfqd, gfp_t gfp_mask) | ||
2727 | { | ||
2728 | struct cfq_io_cq *cic; | ||
2729 | |||
2730 | cic = kmem_cache_alloc_node(cfq_icq_pool, gfp_mask | __GFP_ZERO, | ||
2731 | cfqd->queue->node); | ||
2732 | if (cic) { | ||
2733 | cic->ttime.last_end_request = jiffies; | ||
2734 | INIT_LIST_HEAD(&cic->icq.q_node); | ||
2735 | INIT_HLIST_NODE(&cic->icq.ioc_node); | ||
2736 | } | ||
2737 | |||
2738 | return cic; | ||
2739 | } | ||
2740 | |||
2741 | static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc) | 2732 | static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc) |
2742 | { | 2733 | { |
2743 | struct task_struct *tsk = current; | 2734 | struct task_struct *tsk = current; |
@@ -2945,64 +2936,62 @@ cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc, | |||
2945 | } | 2936 | } |
2946 | 2937 | ||
2947 | /** | 2938 | /** |
2948 | * cfq_create_cic - create and link a cfq_io_cq | 2939 | * ioc_create_icq - create and link io_cq |
2949 | * @cfqd: cfqd of interest | 2940 | * @q: request_queue of interest |
2950 | * @gfp_mask: allocation mask | 2941 | * @gfp_mask: allocation mask |
2951 | * | 2942 | * |
2952 | * Make sure cfq_io_cq linking %current->io_context and @cfqd exists. If | 2943 | * Make sure io_cq linking %current->io_context and @q exists. If either |
2953 | * ioc and/or cic doesn't exist, they will be created using @gfp_mask. | 2944 | * io_context and/or icq don't exist, they will be created using @gfp_mask. |
2945 | * | ||
2946 | * The caller is responsible for ensuring @ioc won't go away and @q is | ||
2947 | * alive and will stay alive until this function returns. | ||
2954 | */ | 2948 | */ |
2955 | static int cfq_create_cic(struct cfq_data *cfqd, gfp_t gfp_mask) | 2949 | static struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask) |
2956 | { | 2950 | { |
2957 | struct request_queue *q = cfqd->queue; | 2951 | struct elevator_type *et = q->elevator->type; |
2958 | struct io_cq *icq = NULL; | ||
2959 | struct cfq_io_cq *cic; | ||
2960 | struct io_context *ioc; | 2952 | struct io_context *ioc; |
2961 | int ret = -ENOMEM; | 2953 | struct io_cq *icq; |
2962 | |||
2963 | might_sleep_if(gfp_mask & __GFP_WAIT); | ||
2964 | 2954 | ||
2965 | /* allocate stuff */ | 2955 | /* allocate stuff */ |
2966 | ioc = create_io_context(current, gfp_mask, q->node); | 2956 | ioc = create_io_context(current, gfp_mask, q->node); |
2967 | if (!ioc) | 2957 | if (!ioc) |
2968 | goto out; | 2958 | return NULL; |
2969 | 2959 | ||
2970 | cic = cfq_alloc_cic(cfqd, gfp_mask); | 2960 | icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO, |
2971 | if (!cic) | 2961 | q->node); |
2972 | goto out; | 2962 | if (!icq) |
2973 | icq = &cic->icq; | 2963 | return NULL; |
2974 | 2964 | ||
2975 | ret = radix_tree_preload(gfp_mask); | 2965 | if (radix_tree_preload(gfp_mask) < 0) { |
2976 | if (ret) | 2966 | kmem_cache_free(et->icq_cache, icq); |
2977 | goto out; | 2967 | return NULL; |
2968 | } | ||
2978 | 2969 | ||
2979 | icq->ioc = ioc; | 2970 | icq->ioc = ioc; |
2980 | icq->q = cfqd->queue; | 2971 | icq->q = q; |
2972 | INIT_LIST_HEAD(&icq->q_node); | ||
2973 | INIT_HLIST_NODE(&icq->ioc_node); | ||
2981 | 2974 | ||
2982 | /* lock both q and ioc and try to link @icq */ | 2975 | /* lock both q and ioc and try to link @icq */ |
2983 | spin_lock_irq(q->queue_lock); | 2976 | spin_lock_irq(q->queue_lock); |
2984 | spin_lock(&ioc->lock); | 2977 | spin_lock(&ioc->lock); |
2985 | 2978 | ||
2986 | ret = radix_tree_insert(&ioc->icq_tree, q->id, icq); | 2979 | if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) { |
2987 | if (likely(!ret)) { | ||
2988 | hlist_add_head(&icq->ioc_node, &ioc->icq_list); | 2980 | hlist_add_head(&icq->ioc_node, &ioc->icq_list); |
2989 | list_add(&icq->q_node, &q->icq_list); | 2981 | list_add(&icq->q_node, &q->icq_list); |
2990 | icq = NULL; | 2982 | if (et->ops.elevator_init_icq_fn) |
2991 | } else if (ret == -EEXIST) { | 2983 | et->ops.elevator_init_icq_fn(icq); |
2992 | /* someone else already did it */ | 2984 | } else { |
2993 | ret = 0; | 2985 | kmem_cache_free(et->icq_cache, icq); |
2986 | icq = ioc_lookup_icq(ioc, q); | ||
2987 | if (!icq) | ||
2988 | printk(KERN_ERR "cfq: icq link failed!\n"); | ||
2994 | } | 2989 | } |
2995 | 2990 | ||
2996 | spin_unlock(&ioc->lock); | 2991 | spin_unlock(&ioc->lock); |
2997 | spin_unlock_irq(q->queue_lock); | 2992 | spin_unlock_irq(q->queue_lock); |
2998 | |||
2999 | radix_tree_preload_end(); | 2993 | radix_tree_preload_end(); |
3000 | out: | 2994 | return icq; |
3001 | if (ret) | ||
3002 | printk(KERN_ERR "cfq: icq link failed!\n"); | ||
3003 | if (icq) | ||
3004 | kmem_cache_free(cfq_icq_pool, icq); | ||
3005 | return ret; | ||
3006 | } | 2995 | } |
3007 | 2996 | ||
3008 | /** | 2997 | /** |
@@ -3022,7 +3011,6 @@ static struct cfq_io_cq *cfq_get_cic(struct cfq_data *cfqd, gfp_t gfp_mask) | |||
3022 | struct request_queue *q = cfqd->queue; | 3011 | struct request_queue *q = cfqd->queue; |
3023 | struct cfq_io_cq *cic = NULL; | 3012 | struct cfq_io_cq *cic = NULL; |
3024 | struct io_context *ioc; | 3013 | struct io_context *ioc; |
3025 | int err; | ||
3026 | 3014 | ||
3027 | lockdep_assert_held(q->queue_lock); | 3015 | lockdep_assert_held(q->queue_lock); |
3028 | 3016 | ||
@@ -3037,9 +3025,9 @@ static struct cfq_io_cq *cfq_get_cic(struct cfq_data *cfqd, gfp_t gfp_mask) | |||
3037 | 3025 | ||
3038 | /* slow path - unlock, create missing ones and retry */ | 3026 | /* slow path - unlock, create missing ones and retry */ |
3039 | spin_unlock_irq(q->queue_lock); | 3027 | spin_unlock_irq(q->queue_lock); |
3040 | err = cfq_create_cic(cfqd, gfp_mask); | 3028 | cic = icq_to_cic(ioc_create_icq(q, gfp_mask)); |
3041 | spin_lock_irq(q->queue_lock); | 3029 | spin_lock_irq(q->queue_lock); |
3042 | if (err) | 3030 | if (!cic) |
3043 | return NULL; | 3031 | return NULL; |
3044 | } | 3032 | } |
3045 | 3033 | ||
@@ -3975,6 +3963,7 @@ static struct elevator_type iosched_cfq = { | |||
3975 | .elevator_completed_req_fn = cfq_completed_request, | 3963 | .elevator_completed_req_fn = cfq_completed_request, |
3976 | .elevator_former_req_fn = elv_rb_former_request, | 3964 | .elevator_former_req_fn = elv_rb_former_request, |
3977 | .elevator_latter_req_fn = elv_rb_latter_request, | 3965 | .elevator_latter_req_fn = elv_rb_latter_request, |
3966 | .elevator_init_icq_fn = cfq_init_icq, | ||
3978 | .elevator_exit_icq_fn = cfq_exit_icq, | 3967 | .elevator_exit_icq_fn = cfq_exit_icq, |
3979 | .elevator_set_req_fn = cfq_set_request, | 3968 | .elevator_set_req_fn = cfq_set_request, |
3980 | .elevator_put_req_fn = cfq_put_request, | 3969 | .elevator_put_req_fn = cfq_put_request, |
@@ -4028,7 +4017,6 @@ static int __init cfq_init(void) | |||
4028 | kmem_cache_destroy(cfq_pool); | 4017 | kmem_cache_destroy(cfq_pool); |
4029 | return ret; | 4018 | return ret; |
4030 | } | 4019 | } |
4031 | cfq_icq_pool = iosched_cfq.icq_cache; | ||
4032 | 4020 | ||
4033 | blkio_policy_register(&blkio_policy_cfq); | 4021 | blkio_policy_register(&blkio_policy_cfq); |
4034 | 4022 | ||