diff options
author | Jens Axboe <axboe@suse.de> | 2006-07-18 23:07:12 -0400 |
---|---|---|
committer | Jens Axboe <axboe@nelson.home.kernel.dk> | 2006-09-30 14:29:37 -0400 |
commit | 4050cf1674c632c73801a561689543d4887df2ef (patch) | |
tree | f7a3e55857e917bb129d16d27458c2ff897a1864 /block/cfq-iosched.c | |
parent | e4313dd423148fa729571b50c06cbc0bedf5c494 (diff) |
[PATCH] cfq-iosched: use new io context counting mechanism
It's ok if the read path is a lot more costly, as long as inc/dec is
really cheap. The inc/dec will happen for each created/freed io context,
while the reading only happens when a disk queue exits.
Signed-off-by: Jens Axboe <axboe@suse.de>
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r-- | block/cfq-iosched.c | 12 |
1 files changed, 7 insertions, 5 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 33e0b0c5e31d..c988aa75dd55 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -46,7 +46,7 @@ static int cfq_slice_idle = HZ / 125; | |||
46 | static kmem_cache_t *cfq_pool; | 46 | static kmem_cache_t *cfq_pool; |
47 | static kmem_cache_t *cfq_ioc_pool; | 47 | static kmem_cache_t *cfq_ioc_pool; |
48 | 48 | ||
49 | static atomic_t ioc_count = ATOMIC_INIT(0); | 49 | static DEFINE_PER_CPU(unsigned long, ioc_count); |
50 | static struct completion *ioc_gone; | 50 | static struct completion *ioc_gone; |
51 | 51 | ||
52 | #define CFQ_PRIO_LISTS IOPRIO_BE_NR | 52 | #define CFQ_PRIO_LISTS IOPRIO_BE_NR |
@@ -1078,7 +1078,9 @@ static void cfq_free_io_context(struct io_context *ioc) | |||
1078 | freed++; | 1078 | freed++; |
1079 | } | 1079 | } |
1080 | 1080 | ||
1081 | if (atomic_sub_and_test(freed, &ioc_count) && ioc_gone) | 1081 | elv_ioc_count_mod(ioc_count, -freed); |
1082 | |||
1083 | if (ioc_gone && !elv_ioc_count_read(ioc_count)) | ||
1082 | complete(ioc_gone); | 1084 | complete(ioc_gone); |
1083 | } | 1085 | } |
1084 | 1086 | ||
@@ -1154,7 +1156,7 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) | |||
1154 | INIT_LIST_HEAD(&cic->queue_list); | 1156 | INIT_LIST_HEAD(&cic->queue_list); |
1155 | cic->dtor = cfq_free_io_context; | 1157 | cic->dtor = cfq_free_io_context; |
1156 | cic->exit = cfq_exit_io_context; | 1158 | cic->exit = cfq_exit_io_context; |
1157 | atomic_inc(&ioc_count); | 1159 | elv_ioc_count_inc(ioc_count); |
1158 | } | 1160 | } |
1159 | 1161 | ||
1160 | return cic; | 1162 | return cic; |
@@ -1319,7 +1321,7 @@ cfq_drop_dead_cic(struct io_context *ioc, struct cfq_io_context *cic) | |||
1319 | WARN_ON(!list_empty(&cic->queue_list)); | 1321 | WARN_ON(!list_empty(&cic->queue_list)); |
1320 | rb_erase(&cic->rb_node, &ioc->cic_root); | 1322 | rb_erase(&cic->rb_node, &ioc->cic_root); |
1321 | kmem_cache_free(cfq_ioc_pool, cic); | 1323 | kmem_cache_free(cfq_ioc_pool, cic); |
1322 | atomic_dec(&ioc_count); | 1324 | elv_ioc_count_dec(ioc_count); |
1323 | } | 1325 | } |
1324 | 1326 | ||
1325 | static struct cfq_io_context * | 1327 | static struct cfq_io_context * |
@@ -2165,7 +2167,7 @@ static void __exit cfq_exit(void) | |||
2165 | ioc_gone = &all_gone; | 2167 | ioc_gone = &all_gone; |
2166 | /* ioc_gone's update must be visible before reading ioc_count */ | 2168 | /* ioc_gone's update must be visible before reading ioc_count */ |
2167 | smp_wmb(); | 2169 | smp_wmb(); |
2168 | if (atomic_read(&ioc_count)) | 2170 | if (elv_ioc_count_read(ioc_count)) |
2169 | wait_for_completion(ioc_gone); | 2171 | wait_for_completion(ioc_gone); |
2170 | synchronize_rcu(); | 2172 | synchronize_rcu(); |
2171 | cfq_slab_kill(); | 2173 | cfq_slab_kill(); |