diff options
author | Shaohua Li <shaohua.li@intel.com> | 2011-01-07 02:48:28 -0500 |
---|---|---|
committer | Jens Axboe <jaxboe@fusionio.com> | 2011-01-07 02:48:28 -0500 |
commit | 329a67815b596d23daf0caa588ae0800e925320f (patch) | |
tree | 5fcf8a7c4a4d43b2cc7dc220d6bb25bdf27d153f /block | |
parent | 30d7b9448f03f2c82d0fd44738674cc156a8ce0a (diff) |
block cfq: don't use atomic_t for cfq_group
cfq_group->ref is used with queue_lock hold, the only exception is
cfq_set_request, which looks like a bug to me, so ref doesn't need
to be an atomic and atomic operation is slower.
Signed-off-by: Shaohua Li <shaohua.li@intel.com>
Reviewed-by: Jeff Moyer <jmoyer@redhat.com>
Acked-by: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/cfq-iosched.c | 23 |
1 files changed, 12 insertions, 11 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 4cb4cf73ac00..f083bda30546 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -207,7 +207,7 @@ struct cfq_group { | |||
207 | struct blkio_group blkg; | 207 | struct blkio_group blkg; |
208 | #ifdef CONFIG_CFQ_GROUP_IOSCHED | 208 | #ifdef CONFIG_CFQ_GROUP_IOSCHED |
209 | struct hlist_node cfqd_node; | 209 | struct hlist_node cfqd_node; |
210 | atomic_t ref; | 210 | int ref; |
211 | #endif | 211 | #endif |
212 | /* number of requests that are on the dispatch list or inside driver */ | 212 | /* number of requests that are on the dispatch list or inside driver */ |
213 | int dispatched; | 213 | int dispatched; |
@@ -1014,7 +1014,7 @@ cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create) | |||
1014 | * elevator which will be dropped by either elevator exit | 1014 | * elevator which will be dropped by either elevator exit |
1015 | * or cgroup deletion path depending on who is exiting first. | 1015 | * or cgroup deletion path depending on who is exiting first. |
1016 | */ | 1016 | */ |
1017 | atomic_set(&cfqg->ref, 1); | 1017 | cfqg->ref = 1; |
1018 | 1018 | ||
1019 | /* | 1019 | /* |
1020 | * Add group onto cgroup list. It might happen that bdi->dev is | 1020 | * Add group onto cgroup list. It might happen that bdi->dev is |
@@ -1059,7 +1059,7 @@ static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create) | |||
1059 | 1059 | ||
1060 | static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg) | 1060 | static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg) |
1061 | { | 1061 | { |
1062 | atomic_inc(&cfqg->ref); | 1062 | cfqg->ref++; |
1063 | return cfqg; | 1063 | return cfqg; |
1064 | } | 1064 | } |
1065 | 1065 | ||
@@ -1071,7 +1071,7 @@ static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) | |||
1071 | 1071 | ||
1072 | cfqq->cfqg = cfqg; | 1072 | cfqq->cfqg = cfqg; |
1073 | /* cfqq reference on cfqg */ | 1073 | /* cfqq reference on cfqg */ |
1074 | atomic_inc(&cfqq->cfqg->ref); | 1074 | cfqq->cfqg->ref++; |
1075 | } | 1075 | } |
1076 | 1076 | ||
1077 | static void cfq_put_cfqg(struct cfq_group *cfqg) | 1077 | static void cfq_put_cfqg(struct cfq_group *cfqg) |
@@ -1079,8 +1079,9 @@ static void cfq_put_cfqg(struct cfq_group *cfqg) | |||
1079 | struct cfq_rb_root *st; | 1079 | struct cfq_rb_root *st; |
1080 | int i, j; | 1080 | int i, j; |
1081 | 1081 | ||
1082 | BUG_ON(atomic_read(&cfqg->ref) <= 0); | 1082 | BUG_ON(cfqg->ref <= 0); |
1083 | if (!atomic_dec_and_test(&cfqg->ref)) | 1083 | cfqg->ref--; |
1084 | if (cfqg->ref) | ||
1084 | return; | 1085 | return; |
1085 | for_each_cfqg_st(cfqg, i, j, st) | 1086 | for_each_cfqg_st(cfqg, i, j, st) |
1086 | BUG_ON(!RB_EMPTY_ROOT(&st->rb)); | 1087 | BUG_ON(!RB_EMPTY_ROOT(&st->rb)); |
@@ -1188,7 +1189,7 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
1188 | cfq_group_service_tree_del(cfqd, cfqq->cfqg); | 1189 | cfq_group_service_tree_del(cfqd, cfqq->cfqg); |
1189 | cfqq->orig_cfqg = cfqq->cfqg; | 1190 | cfqq->orig_cfqg = cfqq->cfqg; |
1190 | cfqq->cfqg = &cfqd->root_group; | 1191 | cfqq->cfqg = &cfqd->root_group; |
1191 | atomic_inc(&cfqd->root_group.ref); | 1192 | cfqd->root_group.ref++; |
1192 | group_changed = 1; | 1193 | group_changed = 1; |
1193 | } else if (!cfqd->cfq_group_isolation | 1194 | } else if (!cfqd->cfq_group_isolation |
1194 | && cfqq_type(cfqq) == SYNC_WORKLOAD && cfqq->orig_cfqg) { | 1195 | && cfqq_type(cfqq) == SYNC_WORKLOAD && cfqq->orig_cfqg) { |
@@ -3681,12 +3682,12 @@ new_queue: | |||
3681 | 3682 | ||
3682 | cfqq->allocated[rw]++; | 3683 | cfqq->allocated[rw]++; |
3683 | cfqq->ref++; | 3684 | cfqq->ref++; |
3684 | |||
3685 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
3686 | |||
3687 | rq->elevator_private = cic; | 3685 | rq->elevator_private = cic; |
3688 | rq->elevator_private2 = cfqq; | 3686 | rq->elevator_private2 = cfqq; |
3689 | rq->elevator_private3 = cfq_ref_get_cfqg(cfqq->cfqg); | 3687 | rq->elevator_private3 = cfq_ref_get_cfqg(cfqq->cfqg); |
3688 | |||
3689 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
3690 | |||
3690 | return 0; | 3691 | return 0; |
3691 | 3692 | ||
3692 | queue_fail: | 3693 | queue_fail: |
@@ -3884,7 +3885,7 @@ static void *cfq_init_queue(struct request_queue *q) | |||
3884 | * Take a reference to root group which we never drop. This is just | 3885 | * Take a reference to root group which we never drop. This is just |
3885 | * to make sure that cfq_put_cfqg() does not try to kfree root group | 3886 | * to make sure that cfq_put_cfqg() does not try to kfree root group |
3886 | */ | 3887 | */ |
3887 | atomic_set(&cfqg->ref, 1); | 3888 | cfqg->ref = 1; |
3888 | rcu_read_lock(); | 3889 | rcu_read_lock(); |
3889 | cfq_blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg, | 3890 | cfq_blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg, |
3890 | (void *)cfqd, 0); | 3891 | (void *)cfqd, 0); |