diff options
| author | Jens Axboe <jens.axboe@oracle.com> | 2008-05-07 03:17:12 -0400 | 
|---|---|---|
| committer | Jens Axboe <jens.axboe@oracle.com> | 2008-05-07 03:28:57 -0400 | 
| commit | 07416d29bcf608257f1e5280642dcbe0021518a3 (patch) | |
| tree | 6b88b2b043cac10b34234320c68e06848c00127c | |
| parent | aa94b5371f6f898558d9fa5690cc6e4bf917a572 (diff) | |
cfq-iosched: fix RCU race in the cfq io_context destructor handling
put_io_context() drops the RCU read lock before calling into cfq_dtor(),
however we need to hold off freeing there before grabbing and
dereferencing the first object on the list.
So extend the rcu_read_lock() scope to cover the calling of cfq_dtor(),
and optimize cfq_free_io_context() to use a new variant for
call_for_each_cic() that assumes the RCU read lock is already held.
Hit in the wild by Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
| -rw-r--r-- | block/blk-ioc.c | 2 | ||||
| -rw-r--r-- | block/cfq-iosched.c | 19 | 
2 files changed, 14 insertions, 7 deletions
diff --git a/block/blk-ioc.c b/block/blk-ioc.c index e34df7c9fc3..012f065ac8e 100644 --- a/block/blk-ioc.c +++ b/block/blk-ioc.c  | |||
| @@ -41,8 +41,8 @@ int put_io_context(struct io_context *ioc) | |||
| 41 | rcu_read_lock(); | 41 | rcu_read_lock(); | 
| 42 | if (ioc->aic && ioc->aic->dtor) | 42 | if (ioc->aic && ioc->aic->dtor) | 
| 43 | ioc->aic->dtor(ioc->aic); | 43 | ioc->aic->dtor(ioc->aic); | 
| 44 | rcu_read_unlock(); | ||
| 45 | cfq_dtor(ioc); | 44 | cfq_dtor(ioc); | 
| 45 | rcu_read_unlock(); | ||
| 46 | 46 | ||
| 47 | kmem_cache_free(iocontext_cachep, ioc); | 47 | kmem_cache_free(iocontext_cachep, ioc); | 
| 48 | return 1; | 48 | return 1; | 
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index f4e1006c253..7f909d2f488 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c  | |||
| @@ -1142,6 +1142,17 @@ static void cfq_put_queue(struct cfq_queue *cfqq) | |||
| 1142 | kmem_cache_free(cfq_pool, cfqq); | 1142 | kmem_cache_free(cfq_pool, cfqq); | 
| 1143 | } | 1143 | } | 
| 1144 | 1144 | ||
| 1145 | static void | ||
| 1146 | __call_for_each_cic(struct io_context *ioc, | ||
| 1147 | void (*func)(struct io_context *, struct cfq_io_context *)) | ||
| 1148 | { | ||
| 1149 | struct cfq_io_context *cic; | ||
| 1150 | struct hlist_node *n; | ||
| 1151 | |||
| 1152 | hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list) | ||
| 1153 | func(ioc, cic); | ||
| 1154 | } | ||
| 1155 | |||
| 1145 | /* | 1156 | /* | 
| 1146 | * Call func for each cic attached to this ioc. | 1157 | * Call func for each cic attached to this ioc. | 
| 1147 | */ | 1158 | */ | 
| @@ -1149,12 +1160,8 @@ static void | |||
| 1149 | call_for_each_cic(struct io_context *ioc, | 1160 | call_for_each_cic(struct io_context *ioc, | 
| 1150 | void (*func)(struct io_context *, struct cfq_io_context *)) | 1161 | void (*func)(struct io_context *, struct cfq_io_context *)) | 
| 1151 | { | 1162 | { | 
| 1152 | struct cfq_io_context *cic; | ||
| 1153 | struct hlist_node *n; | ||
| 1154 | |||
| 1155 | rcu_read_lock(); | 1163 | rcu_read_lock(); | 
| 1156 | hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list) | 1164 | __call_for_each_cic(ioc, func); | 
| 1157 | func(ioc, cic); | ||
| 1158 | rcu_read_unlock(); | 1165 | rcu_read_unlock(); | 
| 1159 | } | 1166 | } | 
| 1160 | 1167 | ||
| @@ -1198,7 +1205,7 @@ static void cfq_free_io_context(struct io_context *ioc) | |||
| 1198 | * should be ok to iterate over the known list, we will see all cic's | 1205 | * should be ok to iterate over the known list, we will see all cic's | 
| 1199 | * since no new ones are added. | 1206 | * since no new ones are added. | 
| 1200 | */ | 1207 | */ | 
| 1201 | call_for_each_cic(ioc, cic_free_func); | 1208 | __call_for_each_cic(ioc, cic_free_func); | 
| 1202 | } | 1209 | } | 
| 1203 | 1210 | ||
| 1204 | static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq) | 1211 | static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq) | 
