diff options
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r-- | block/cfq-iosched.c | 90 |
1 files changed, 38 insertions, 52 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index ca198e61fa65..f4e1006c253d 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -1143,43 +1143,37 @@ static void cfq_put_queue(struct cfq_queue *cfqq) | |||
1143 | } | 1143 | } |
1144 | 1144 | ||
1145 | /* | 1145 | /* |
1146 | * Call func for each cic attached to this ioc. Returns number of cic's seen. | 1146 | * Call func for each cic attached to this ioc. |
1147 | */ | 1147 | */ |
1148 | #define CIC_GANG_NR 16 | 1148 | static void |
1149 | static unsigned int | ||
1150 | call_for_each_cic(struct io_context *ioc, | 1149 | call_for_each_cic(struct io_context *ioc, |
1151 | void (*func)(struct io_context *, struct cfq_io_context *)) | 1150 | void (*func)(struct io_context *, struct cfq_io_context *)) |
1152 | { | 1151 | { |
1153 | struct cfq_io_context *cics[CIC_GANG_NR]; | 1152 | struct cfq_io_context *cic; |
1154 | unsigned long index = 0; | 1153 | struct hlist_node *n; |
1155 | unsigned int called = 0; | ||
1156 | int nr; | ||
1157 | 1154 | ||
1158 | rcu_read_lock(); | 1155 | rcu_read_lock(); |
1156 | hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list) | ||
1157 | func(ioc, cic); | ||
1158 | rcu_read_unlock(); | ||
1159 | } | ||
1159 | 1160 | ||
1160 | do { | 1161 | static void cfq_cic_free_rcu(struct rcu_head *head) |
1161 | int i; | 1162 | { |
1162 | 1163 | struct cfq_io_context *cic; | |
1163 | /* | ||
1164 | * Perhaps there's a better way - this just gang lookups from | ||
1165 | * 0 to the end, restarting after each CIC_GANG_NR from the | ||
1166 | * last key + 1. | ||
1167 | */ | ||
1168 | nr = radix_tree_gang_lookup(&ioc->radix_root, (void **) cics, | ||
1169 | index, CIC_GANG_NR); | ||
1170 | if (!nr) | ||
1171 | break; | ||
1172 | 1164 | ||
1173 | called += nr; | 1165 | cic = container_of(head, struct cfq_io_context, rcu_head); |
1174 | index = 1 + (unsigned long) cics[nr - 1]->key; | ||
1175 | 1166 | ||
1176 | for (i = 0; i < nr; i++) | 1167 | kmem_cache_free(cfq_ioc_pool, cic); |
1177 | func(ioc, cics[i]); | 1168 | elv_ioc_count_dec(ioc_count); |
1178 | } while (nr == CIC_GANG_NR); | ||
1179 | 1169 | ||
1180 | rcu_read_unlock(); | 1170 | if (ioc_gone && !elv_ioc_count_read(ioc_count)) |
1171 | complete(ioc_gone); | ||
1172 | } | ||
1181 | 1173 | ||
1182 | return called; | 1174 | static void cfq_cic_free(struct cfq_io_context *cic) |
1175 | { | ||
1176 | call_rcu(&cic->rcu_head, cfq_cic_free_rcu); | ||
1183 | } | 1177 | } |
1184 | 1178 | ||
1185 | static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic) | 1179 | static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic) |
@@ -1190,26 +1184,21 @@ static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic) | |||
1190 | 1184 | ||
1191 | spin_lock_irqsave(&ioc->lock, flags); | 1185 | spin_lock_irqsave(&ioc->lock, flags); |
1192 | radix_tree_delete(&ioc->radix_root, cic->dead_key); | 1186 | radix_tree_delete(&ioc->radix_root, cic->dead_key); |
1187 | hlist_del_rcu(&cic->cic_list); | ||
1193 | spin_unlock_irqrestore(&ioc->lock, flags); | 1188 | spin_unlock_irqrestore(&ioc->lock, flags); |
1194 | 1189 | ||
1195 | kmem_cache_free(cfq_ioc_pool, cic); | 1190 | cfq_cic_free(cic); |
1196 | } | 1191 | } |
1197 | 1192 | ||
1198 | static void cfq_free_io_context(struct io_context *ioc) | 1193 | static void cfq_free_io_context(struct io_context *ioc) |
1199 | { | 1194 | { |
1200 | int freed; | ||
1201 | |||
1202 | /* | 1195 | /* |
1203 | * ioc->refcount is zero here, so no more cic's are allowed to be | 1196 | * ioc->refcount is zero here, or we are called from elv_unregister(), |
1204 | * linked into this ioc. So it should be ok to iterate over the known | 1197 | * so no more cic's are allowed to be linked into this ioc. So it |
1205 | * list, we will see all cic's since no new ones are added. | 1198 | * should be ok to iterate over the known list, we will see all cic's |
1199 | * since no new ones are added. | ||
1206 | */ | 1200 | */ |
1207 | freed = call_for_each_cic(ioc, cic_free_func); | 1201 | call_for_each_cic(ioc, cic_free_func); |
1208 | |||
1209 | elv_ioc_count_mod(ioc_count, -freed); | ||
1210 | |||
1211 | if (ioc_gone && !elv_ioc_count_read(ioc_count)) | ||
1212 | complete(ioc_gone); | ||
1213 | } | 1202 | } |
1214 | 1203 | ||
1215 | static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq) | 1204 | static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
@@ -1225,6 +1214,8 @@ static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
1225 | static void __cfq_exit_single_io_context(struct cfq_data *cfqd, | 1214 | static void __cfq_exit_single_io_context(struct cfq_data *cfqd, |
1226 | struct cfq_io_context *cic) | 1215 | struct cfq_io_context *cic) |
1227 | { | 1216 | { |
1217 | struct io_context *ioc = cic->ioc; | ||
1218 | |||
1228 | list_del_init(&cic->queue_list); | 1219 | list_del_init(&cic->queue_list); |
1229 | 1220 | ||
1230 | /* | 1221 | /* |
@@ -1234,6 +1225,9 @@ static void __cfq_exit_single_io_context(struct cfq_data *cfqd, | |||
1234 | cic->dead_key = (unsigned long) cic->key; | 1225 | cic->dead_key = (unsigned long) cic->key; |
1235 | cic->key = NULL; | 1226 | cic->key = NULL; |
1236 | 1227 | ||
1228 | if (ioc->ioc_data == cic) | ||
1229 | rcu_assign_pointer(ioc->ioc_data, NULL); | ||
1230 | |||
1237 | if (cic->cfqq[ASYNC]) { | 1231 | if (cic->cfqq[ASYNC]) { |
1238 | cfq_exit_cfqq(cfqd, cic->cfqq[ASYNC]); | 1232 | cfq_exit_cfqq(cfqd, cic->cfqq[ASYNC]); |
1239 | cic->cfqq[ASYNC] = NULL; | 1233 | cic->cfqq[ASYNC] = NULL; |
@@ -1266,7 +1260,6 @@ static void cfq_exit_single_io_context(struct io_context *ioc, | |||
1266 | */ | 1260 | */ |
1267 | static void cfq_exit_io_context(struct io_context *ioc) | 1261 | static void cfq_exit_io_context(struct io_context *ioc) |
1268 | { | 1262 | { |
1269 | rcu_assign_pointer(ioc->ioc_data, NULL); | ||
1270 | call_for_each_cic(ioc, cfq_exit_single_io_context); | 1263 | call_for_each_cic(ioc, cfq_exit_single_io_context); |
1271 | } | 1264 | } |
1272 | 1265 | ||
@@ -1280,6 +1273,7 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) | |||
1280 | if (cic) { | 1273 | if (cic) { |
1281 | cic->last_end_request = jiffies; | 1274 | cic->last_end_request = jiffies; |
1282 | INIT_LIST_HEAD(&cic->queue_list); | 1275 | INIT_LIST_HEAD(&cic->queue_list); |
1276 | INIT_HLIST_NODE(&cic->cic_list); | ||
1283 | cic->dtor = cfq_free_io_context; | 1277 | cic->dtor = cfq_free_io_context; |
1284 | cic->exit = cfq_exit_io_context; | 1278 | cic->exit = cfq_exit_io_context; |
1285 | elv_ioc_count_inc(ioc_count); | 1279 | elv_ioc_count_inc(ioc_count); |
@@ -1475,15 +1469,6 @@ cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct io_context *ioc, | |||
1475 | return cfqq; | 1469 | return cfqq; |
1476 | } | 1470 | } |
1477 | 1471 | ||
1478 | static void cfq_cic_free(struct cfq_io_context *cic) | ||
1479 | { | ||
1480 | kmem_cache_free(cfq_ioc_pool, cic); | ||
1481 | elv_ioc_count_dec(ioc_count); | ||
1482 | |||
1483 | if (ioc_gone && !elv_ioc_count_read(ioc_count)) | ||
1484 | complete(ioc_gone); | ||
1485 | } | ||
1486 | |||
1487 | /* | 1472 | /* |
1488 | * We drop cfq io contexts lazily, so we may find a dead one. | 1473 | * We drop cfq io contexts lazily, so we may find a dead one. |
1489 | */ | 1474 | */ |
@@ -1497,10 +1482,10 @@ cfq_drop_dead_cic(struct cfq_data *cfqd, struct io_context *ioc, | |||
1497 | 1482 | ||
1498 | spin_lock_irqsave(&ioc->lock, flags); | 1483 | spin_lock_irqsave(&ioc->lock, flags); |
1499 | 1484 | ||
1500 | if (ioc->ioc_data == cic) | 1485 | BUG_ON(ioc->ioc_data == cic); |
1501 | rcu_assign_pointer(ioc->ioc_data, NULL); | ||
1502 | 1486 | ||
1503 | radix_tree_delete(&ioc->radix_root, (unsigned long) cfqd); | 1487 | radix_tree_delete(&ioc->radix_root, (unsigned long) cfqd); |
1488 | hlist_del_rcu(&cic->cic_list); | ||
1504 | spin_unlock_irqrestore(&ioc->lock, flags); | 1489 | spin_unlock_irqrestore(&ioc->lock, flags); |
1505 | 1490 | ||
1506 | cfq_cic_free(cic); | 1491 | cfq_cic_free(cic); |
@@ -1561,6 +1546,8 @@ static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc, | |||
1561 | spin_lock_irqsave(&ioc->lock, flags); | 1546 | spin_lock_irqsave(&ioc->lock, flags); |
1562 | ret = radix_tree_insert(&ioc->radix_root, | 1547 | ret = radix_tree_insert(&ioc->radix_root, |
1563 | (unsigned long) cfqd, cic); | 1548 | (unsigned long) cfqd, cic); |
1549 | if (!ret) | ||
1550 | hlist_add_head_rcu(&cic->cic_list, &ioc->cic_list); | ||
1564 | spin_unlock_irqrestore(&ioc->lock, flags); | 1551 | spin_unlock_irqrestore(&ioc->lock, flags); |
1565 | 1552 | ||
1566 | radix_tree_preload_end(); | 1553 | radix_tree_preload_end(); |
@@ -2152,7 +2139,7 @@ static int __init cfq_slab_setup(void) | |||
2152 | if (!cfq_pool) | 2139 | if (!cfq_pool) |
2153 | goto fail; | 2140 | goto fail; |
2154 | 2141 | ||
2155 | cfq_ioc_pool = KMEM_CACHE(cfq_io_context, SLAB_DESTROY_BY_RCU); | 2142 | cfq_ioc_pool = KMEM_CACHE(cfq_io_context, 0); |
2156 | if (!cfq_ioc_pool) | 2143 | if (!cfq_ioc_pool) |
2157 | goto fail; | 2144 | goto fail; |
2158 | 2145 | ||
@@ -2300,7 +2287,6 @@ static void __exit cfq_exit(void) | |||
2300 | smp_wmb(); | 2287 | smp_wmb(); |
2301 | if (elv_ioc_count_read(ioc_count)) | 2288 | if (elv_ioc_count_read(ioc_count)) |
2302 | wait_for_completion(ioc_gone); | 2289 | wait_for_completion(ioc_gone); |
2303 | synchronize_rcu(); | ||
2304 | cfq_slab_kill(); | 2290 | cfq_slab_kill(); |
2305 | } | 2291 | } |
2306 | 2292 | ||