aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--block/cfq-iosched.c57
-rw-r--r--include/linux/iocontext.h3
2 files changed, 30 insertions, 30 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 0f962ecae91f..f26da2bfcc15 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1143,24 +1143,37 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
1143} 1143}
1144 1144
1145/* 1145/*
1146 * Call func for each cic attached to this ioc. Returns number of cic's seen. 1146 * Call func for each cic attached to this ioc.
1147 */ 1147 */
1148static unsigned int 1148static void
1149call_for_each_cic(struct io_context *ioc, 1149call_for_each_cic(struct io_context *ioc,
1150 void (*func)(struct io_context *, struct cfq_io_context *)) 1150 void (*func)(struct io_context *, struct cfq_io_context *))
1151{ 1151{
1152 struct cfq_io_context *cic; 1152 struct cfq_io_context *cic;
1153 struct hlist_node *n; 1153 struct hlist_node *n;
1154 int called = 0;
1155 1154
1156 rcu_read_lock(); 1155 rcu_read_lock();
1157 hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list) { 1156 hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list)
1158 func(ioc, cic); 1157 func(ioc, cic);
1159 called++;
1160 }
1161 rcu_read_unlock(); 1158 rcu_read_unlock();
1159}
1160
1161static void cfq_cic_free_rcu(struct rcu_head *head)
1162{
1163 struct cfq_io_context *cic;
1164
1165 cic = container_of(head, struct cfq_io_context, rcu_head);
1166
1167 kmem_cache_free(cfq_ioc_pool, cic);
1168 elv_ioc_count_dec(ioc_count);
1169
1170 if (ioc_gone && !elv_ioc_count_read(ioc_count))
1171 complete(ioc_gone);
1172}
1162 1173
1163 return called; 1174static void cfq_cic_free(struct cfq_io_context *cic)
1175{
1176 call_rcu(&cic->rcu_head, cfq_cic_free_rcu);
1164} 1177}
1165 1178
1166static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic) 1179static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic)
@@ -1174,24 +1187,18 @@ static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic)
1174 hlist_del_rcu(&cic->cic_list); 1187 hlist_del_rcu(&cic->cic_list);
1175 spin_unlock_irqrestore(&ioc->lock, flags); 1188 spin_unlock_irqrestore(&ioc->lock, flags);
1176 1189
1177 kmem_cache_free(cfq_ioc_pool, cic); 1190 cfq_cic_free(cic);
1178} 1191}
1179 1192
1180static void cfq_free_io_context(struct io_context *ioc) 1193static void cfq_free_io_context(struct io_context *ioc)
1181{ 1194{
1182 int freed;
1183
1184 /* 1195 /*
1185 * ioc->refcount is zero here, so no more cic's are allowed to be 1196 * ioc->refcount is zero here, or we are called from elv_unregister(),
1186 * linked into this ioc. So it should be ok to iterate over the known 1197 * so no more cic's are allowed to be linked into this ioc. So it
1187 * list, we will see all cic's since no new ones are added. 1198 * should be ok to iterate over the known list, we will see all cic's
1199 * since no new ones are added.
1188 */ 1200 */
1189 freed = call_for_each_cic(ioc, cic_free_func); 1201 call_for_each_cic(ioc, cic_free_func);
1190
1191 elv_ioc_count_mod(ioc_count, -freed);
1192
1193 if (ioc_gone && !elv_ioc_count_read(ioc_count))
1194 complete(ioc_gone);
1195} 1202}
1196 1203
1197static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq) 1204static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
@@ -1458,15 +1465,6 @@ cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct io_context *ioc,
1458 return cfqq; 1465 return cfqq;
1459} 1466}
1460 1467
1461static void cfq_cic_free(struct cfq_io_context *cic)
1462{
1463 kmem_cache_free(cfq_ioc_pool, cic);
1464 elv_ioc_count_dec(ioc_count);
1465
1466 if (ioc_gone && !elv_ioc_count_read(ioc_count))
1467 complete(ioc_gone);
1468}
1469
1470/* 1468/*
1471 * We drop cfq io contexts lazily, so we may find a dead one. 1469 * We drop cfq io contexts lazily, so we may find a dead one.
1472 */ 1470 */
@@ -2138,7 +2136,7 @@ static int __init cfq_slab_setup(void)
2138 if (!cfq_pool) 2136 if (!cfq_pool)
2139 goto fail; 2137 goto fail;
2140 2138
2141 cfq_ioc_pool = KMEM_CACHE(cfq_io_context, SLAB_DESTROY_BY_RCU); 2139 cfq_ioc_pool = KMEM_CACHE(cfq_io_context, 0);
2142 if (!cfq_ioc_pool) 2140 if (!cfq_ioc_pool)
2143 goto fail; 2141 goto fail;
2144 2142
@@ -2286,7 +2284,6 @@ static void __exit cfq_exit(void)
2286 smp_wmb(); 2284 smp_wmb();
2287 if (elv_ioc_count_read(ioc_count)) 2285 if (elv_ioc_count_read(ioc_count))
2288 wait_for_completion(ioc_gone); 2286 wait_for_completion(ioc_gone);
2289 synchronize_rcu();
2290 cfq_slab_kill(); 2287 cfq_slab_kill();
2291} 2288}
2292 2289
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h
index 1b4ccf25b4d2..cac4b364cd40 100644
--- a/include/linux/iocontext.h
+++ b/include/linux/iocontext.h
@@ -2,6 +2,7 @@
2#define IOCONTEXT_H 2#define IOCONTEXT_H
3 3
4#include <linux/radix-tree.h> 4#include <linux/radix-tree.h>
5#include <linux/rcupdate.h>
5 6
6/* 7/*
7 * This is the per-process anticipatory I/O scheduler state. 8 * This is the per-process anticipatory I/O scheduler state.
@@ -54,6 +55,8 @@ struct cfq_io_context {
54 55
55 void (*dtor)(struct io_context *); /* destructor */ 56 void (*dtor)(struct io_context *); /* destructor */
56 void (*exit)(struct io_context *); /* called on task exit */ 57 void (*exit)(struct io_context *); /* called on task exit */
58
59 struct rcu_head rcu_head;
57}; 60};
58 61
59/* 62/*