diff options
author | Jens Axboe <axboe@suse.de> | 2006-07-18 23:10:01 -0400 |
---|---|---|
committer | Jens Axboe <axboe@nelson.home.kernel.dk> | 2006-09-30 14:29:37 -0400 |
commit | e4313dd423148fa729571b50c06cbc0bedf5c494 (patch) | |
tree | 7524020d6d822b06aa433c1252a4926b91a8b328 /block/as-iosched.c | |
parent | 4a893e837bb470867d74c05d6c6b97bba5a96185 (diff) |
[PATCH] as-iosched: use new io context counting mechanism
It's ok if the read path is a lot more costly, as long as inc/dec is
really cheap. The inc/dec will happen for each created/freed io context,
while the reading only happens when a disk queue exits.
Signed-off-by: Jens Axboe <axboe@suse.de>
Diffstat (limited to 'block/as-iosched.c')
-rw-r--r-- | block/as-iosched.c | 9 |
1 files changed, 5 insertions, 4 deletions
diff --git a/block/as-iosched.c b/block/as-iosched.c index 66015bc79e6f..8e1fef1eafc9 100644 --- a/block/as-iosched.c +++ b/block/as-iosched.c | |||
@@ -149,7 +149,7 @@ enum arq_state { | |||
149 | #define RQ_STATE(rq) ((enum arq_state)(rq)->elevator_private2) | 149 | #define RQ_STATE(rq) ((enum arq_state)(rq)->elevator_private2) |
150 | #define RQ_SET_STATE(rq, state) ((rq)->elevator_private2 = (void *) state) | 150 | #define RQ_SET_STATE(rq, state) ((rq)->elevator_private2 = (void *) state) |
151 | 151 | ||
152 | static atomic_t ioc_count = ATOMIC_INIT(0); | 152 | static DEFINE_PER_CPU(unsigned long, ioc_count); |
153 | static struct completion *ioc_gone; | 153 | static struct completion *ioc_gone; |
154 | 154 | ||
155 | static void as_move_to_dispatch(struct as_data *ad, struct request *rq); | 155 | static void as_move_to_dispatch(struct as_data *ad, struct request *rq); |
@@ -163,7 +163,8 @@ static void as_antic_stop(struct as_data *ad); | |||
163 | static void free_as_io_context(struct as_io_context *aic) | 163 | static void free_as_io_context(struct as_io_context *aic) |
164 | { | 164 | { |
165 | kfree(aic); | 165 | kfree(aic); |
166 | if (atomic_dec_and_test(&ioc_count) && ioc_gone) | 166 | elv_ioc_count_dec(ioc_count); |
167 | if (ioc_gone && !elv_ioc_count_read(ioc_count)) | ||
167 | complete(ioc_gone); | 168 | complete(ioc_gone); |
168 | } | 169 | } |
169 | 170 | ||
@@ -199,7 +200,7 @@ static struct as_io_context *alloc_as_io_context(void) | |||
199 | ret->seek_total = 0; | 200 | ret->seek_total = 0; |
200 | ret->seek_samples = 0; | 201 | ret->seek_samples = 0; |
201 | ret->seek_mean = 0; | 202 | ret->seek_mean = 0; |
202 | atomic_inc(&ioc_count); | 203 | elv_ioc_count_inc(ioc_count); |
203 | } | 204 | } |
204 | 205 | ||
205 | return ret; | 206 | return ret; |
@@ -1484,7 +1485,7 @@ static void __exit as_exit(void) | |||
1484 | ioc_gone = &all_gone; | 1485 | ioc_gone = &all_gone; |
1485 | /* ioc_gone's update must be visible before reading ioc_count */ | 1486 | /* ioc_gone's update must be visible before reading ioc_count */ |
1486 | smp_wmb(); | 1487 | smp_wmb(); |
1487 | if (atomic_read(&ioc_count)) | 1488 | if (elv_ioc_count_read(ioc_count)) |
1488 | wait_for_completion(ioc_gone); | 1489 | wait_for_completion(ioc_gone); |
1489 | synchronize_rcu(); | 1490 | synchronize_rcu(); |
1490 | } | 1491 | } |