diff options
author | Jens Axboe <jens.axboe@oracle.com> | 2008-05-29 03:35:22 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2008-07-03 07:21:12 -0400 |
commit | 863fddcb4b0caee4c2d5bd6e3b28779920516db3 (patch) | |
tree | 9170df5fb0e26d77d45017069b7756f7c80afde5 | |
parent | 9a11b4ed0e7c44bca7c939aa544c3c47aae40c12 (diff) |
as-iosched: properly protect ioc_gone and ioc count
If we have multiple tasks freeing io contexts when as-iosched
is being unloaded, we could complete() ioc_gone twice. Fix that by
protecting ioc_gone complete() and clearing with a spinlock for
just that purpose. Doesn't matter from a performance perspective,
since it'll only enter that path when ioc_gone != NULL (when as-iosched
is being rmmod'ed).
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
-rw-r--r-- | block/as-iosched.c | 18 |
1 files changed, 15 insertions, 3 deletions
diff --git a/block/as-iosched.c b/block/as-iosched.c index 743f33a01a07..9735acb5b4f5 100644 --- a/block/as-iosched.c +++ b/block/as-iosched.c | |||
@@ -151,6 +151,7 @@ enum arq_state { | |||
151 | 151 | ||
152 | static DEFINE_PER_CPU(unsigned long, ioc_count); | 152 | static DEFINE_PER_CPU(unsigned long, ioc_count); |
153 | static struct completion *ioc_gone; | 153 | static struct completion *ioc_gone; |
154 | static DEFINE_SPINLOCK(ioc_gone_lock); | ||
154 | 155 | ||
155 | static void as_move_to_dispatch(struct as_data *ad, struct request *rq); | 156 | static void as_move_to_dispatch(struct as_data *ad, struct request *rq); |
156 | static void as_antic_stop(struct as_data *ad); | 157 | static void as_antic_stop(struct as_data *ad); |
@@ -164,8 +165,19 @@ static void free_as_io_context(struct as_io_context *aic) | |||
164 | { | 165 | { |
165 | kfree(aic); | 166 | kfree(aic); |
166 | elv_ioc_count_dec(ioc_count); | 167 | elv_ioc_count_dec(ioc_count); |
167 | if (ioc_gone && !elv_ioc_count_read(ioc_count)) | 168 | if (ioc_gone) { |
168 | complete(ioc_gone); | 169 | /* |
170 | * AS scheduler is exiting, grab exit lock and check | ||
171 | * the pending io context count. If it hits zero, | ||
172 | * complete ioc_gone and set it back to NULL. | ||
173 | */ | ||
174 | spin_lock(&ioc_gone_lock); | ||
175 | if (ioc_gone && !elv_ioc_count_read(ioc_count)) { | ||
176 | complete(ioc_gone); | ||
177 | ioc_gone = NULL; | ||
178 | } | ||
179 | spin_unlock(&ioc_gone_lock); | ||
180 | } | ||
169 | } | 181 | } |
170 | 182 | ||
171 | static void as_trim(struct io_context *ioc) | 183 | static void as_trim(struct io_context *ioc) |
@@ -1493,7 +1505,7 @@ static void __exit as_exit(void) | |||
1493 | /* ioc_gone's update must be visible before reading ioc_count */ | 1505 | /* ioc_gone's update must be visible before reading ioc_count */ |
1494 | smp_wmb(); | 1506 | smp_wmb(); |
1495 | if (elv_ioc_count_read(ioc_count)) | 1507 | if (elv_ioc_count_read(ioc_count)) |
1496 | wait_for_completion(ioc_gone); | 1508 | wait_for_completion(&all_gone); |
1497 | synchronize_rcu(); | 1509 | synchronize_rcu(); |
1498 | } | 1510 | } |
1499 | 1511 | ||