aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorJens Axboe <axboe@suse.de>2006-08-29 03:05:44 -0400
committerJens Axboe <axboe@nelson.home.kernel.dk>2006-09-30 14:29:36 -0400
commitfc46379daf90dce57bf765c81d3b39f55150aac2 (patch)
tree43aa28f0db6c73f137550b0b2e2ec29b9f9a1fc6 /block
parent89850f7ee905410c89f9295e89dc4c33502a34ac (diff)
[PATCH] cfq-iosched: kill cfq_exit_lock
cfq_exit_lock is protecting two things now: - The per-ioc rbtree of cfq_io_contexts - The per-cfqd linked list of cfq_io_contexts The per-cfqd linked list can be protected by the queue lock, as it is (by definition) per cfqd as the queue lock is. The per-ioc rbtree is mainly used and updated by the process itself only. The only outside use is the io priority changing. If we move the priority changing to not browsing the rbtree, we can remove any locking from the rbtree updates and lookup completely. Let the sys_ioprio syscall just mark processes as having the iopriority changed and lazily update the private cfq io contexts the next time io is queued, and we can remove this locking as well. Signed-off-by: Jens Axboe <axboe@suse.de>
Diffstat (limited to 'block')
-rw-r--r--block/cfq-iosched.c54
-rw-r--r--block/ll_rw_blk.c2
2 files changed, 18 insertions, 38 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index ec24284e9d39..33e0b0c5e31d 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -31,8 +31,6 @@ static int cfq_slice_idle = HZ / 125;
31 31
32#define CFQ_KEY_ASYNC (0) 32#define CFQ_KEY_ASYNC (0)
33 33
34static DEFINE_SPINLOCK(cfq_exit_lock);
35
36/* 34/*
37 * for the hash of cfqq inside the cfqd 35 * for the hash of cfqq inside the cfqd
38 */ 36 */
@@ -1084,12 +1082,6 @@ static void cfq_free_io_context(struct io_context *ioc)
1084 complete(ioc_gone); 1082 complete(ioc_gone);
1085} 1083}
1086 1084
1087static void cfq_trim(struct io_context *ioc)
1088{
1089 ioc->set_ioprio = NULL;
1090 cfq_free_io_context(ioc);
1091}
1092
1093static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq) 1085static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1094{ 1086{
1095 if (unlikely(cfqq == cfqd->active_queue)) 1087 if (unlikely(cfqq == cfqd->active_queue))
@@ -1101,6 +1093,10 @@ static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1101static void __cfq_exit_single_io_context(struct cfq_data *cfqd, 1093static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
1102 struct cfq_io_context *cic) 1094 struct cfq_io_context *cic)
1103{ 1095{
1096 list_del_init(&cic->queue_list);
1097 smp_wmb();
1098 cic->key = NULL;
1099
1104 if (cic->cfqq[ASYNC]) { 1100 if (cic->cfqq[ASYNC]) {
1105 cfq_exit_cfqq(cfqd, cic->cfqq[ASYNC]); 1101 cfq_exit_cfqq(cfqd, cic->cfqq[ASYNC]);
1106 cic->cfqq[ASYNC] = NULL; 1102 cic->cfqq[ASYNC] = NULL;
@@ -1110,9 +1106,6 @@ static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
1110 cfq_exit_cfqq(cfqd, cic->cfqq[SYNC]); 1106 cfq_exit_cfqq(cfqd, cic->cfqq[SYNC]);
1111 cic->cfqq[SYNC] = NULL; 1107 cic->cfqq[SYNC] = NULL;
1112 } 1108 }
1113
1114 cic->key = NULL;
1115 list_del_init(&cic->queue_list);
1116} 1109}
1117 1110
1118 1111
@@ -1123,27 +1116,23 @@ static void cfq_exit_single_io_context(struct cfq_io_context *cic)
1123{ 1116{
1124 struct cfq_data *cfqd = cic->key; 1117 struct cfq_data *cfqd = cic->key;
1125 1118
1126 WARN_ON(!irqs_disabled());
1127
1128 if (cfqd) { 1119 if (cfqd) {
1129 request_queue_t *q = cfqd->queue; 1120 request_queue_t *q = cfqd->queue;
1130 1121
1131 spin_lock(q->queue_lock); 1122 spin_lock_irq(q->queue_lock);
1132 __cfq_exit_single_io_context(cfqd, cic); 1123 __cfq_exit_single_io_context(cfqd, cic);
1133 spin_unlock(q->queue_lock); 1124 spin_unlock_irq(q->queue_lock);
1134 } 1125 }
1135} 1126}
1136 1127
1137static void cfq_exit_io_context(struct io_context *ioc) 1128static void cfq_exit_io_context(struct io_context *ioc)
1138{ 1129{
1139 struct cfq_io_context *__cic; 1130 struct cfq_io_context *__cic;
1140 unsigned long flags;
1141 struct rb_node *n; 1131 struct rb_node *n;
1142 1132
1143 /* 1133 /*
1144 * put the reference this task is holding to the various queues 1134 * put the reference this task is holding to the various queues
1145 */ 1135 */
1146 spin_lock_irqsave(&cfq_exit_lock, flags);
1147 1136
1148 n = rb_first(&ioc->cic_root); 1137 n = rb_first(&ioc->cic_root);
1149 while (n != NULL) { 1138 while (n != NULL) {
@@ -1152,8 +1141,6 @@ static void cfq_exit_io_context(struct io_context *ioc)
1152 cfq_exit_single_io_context(__cic); 1141 cfq_exit_single_io_context(__cic);
1153 n = rb_next(n); 1142 n = rb_next(n);
1154 } 1143 }
1155
1156 spin_unlock_irqrestore(&cfq_exit_lock, flags);
1157} 1144}
1158 1145
1159static struct cfq_io_context * 1146static struct cfq_io_context *
@@ -1248,15 +1235,12 @@ static inline void changed_ioprio(struct cfq_io_context *cic)
1248 spin_unlock(cfqd->queue->queue_lock); 1235 spin_unlock(cfqd->queue->queue_lock);
1249} 1236}
1250 1237
1251/* 1238static void cfq_ioc_set_ioprio(struct io_context *ioc)
1252 * callback from sys_ioprio_set, irqs are disabled
1253 */
1254static int cfq_ioc_set_ioprio(struct io_context *ioc, unsigned int ioprio)
1255{ 1239{
1256 struct cfq_io_context *cic; 1240 struct cfq_io_context *cic;
1257 struct rb_node *n; 1241 struct rb_node *n;
1258 1242
1259 spin_lock(&cfq_exit_lock); 1243 ioc->ioprio_changed = 0;
1260 1244
1261 n = rb_first(&ioc->cic_root); 1245 n = rb_first(&ioc->cic_root);
1262 while (n != NULL) { 1246 while (n != NULL) {
@@ -1265,10 +1249,6 @@ static int cfq_ioc_set_ioprio(struct io_context *ioc, unsigned int ioprio)
1265 changed_ioprio(cic); 1249 changed_ioprio(cic);
1266 n = rb_next(n); 1250 n = rb_next(n);
1267 } 1251 }
1268
1269 spin_unlock(&cfq_exit_lock);
1270
1271 return 0;
1272} 1252}
1273 1253
1274static struct cfq_queue * 1254static struct cfq_queue *
@@ -1336,10 +1316,8 @@ out:
1336static void 1316static void
1337cfq_drop_dead_cic(struct io_context *ioc, struct cfq_io_context *cic) 1317cfq_drop_dead_cic(struct io_context *ioc, struct cfq_io_context *cic)
1338{ 1318{
1339 spin_lock(&cfq_exit_lock); 1319 WARN_ON(!list_empty(&cic->queue_list));
1340 rb_erase(&cic->rb_node, &ioc->cic_root); 1320 rb_erase(&cic->rb_node, &ioc->cic_root);
1341 list_del_init(&cic->queue_list);
1342 spin_unlock(&cfq_exit_lock);
1343 kmem_cache_free(cfq_ioc_pool, cic); 1321 kmem_cache_free(cfq_ioc_pool, cic);
1344 atomic_dec(&ioc_count); 1322 atomic_dec(&ioc_count);
1345} 1323}
@@ -1385,7 +1363,6 @@ cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
1385 cic->ioc = ioc; 1363 cic->ioc = ioc;
1386 cic->key = cfqd; 1364 cic->key = cfqd;
1387 1365
1388 ioc->set_ioprio = cfq_ioc_set_ioprio;
1389restart: 1366restart:
1390 parent = NULL; 1367 parent = NULL;
1391 p = &ioc->cic_root.rb_node; 1368 p = &ioc->cic_root.rb_node;
@@ -1407,11 +1384,12 @@ restart:
1407 BUG(); 1384 BUG();
1408 } 1385 }
1409 1386
1410 spin_lock(&cfq_exit_lock);
1411 rb_link_node(&cic->rb_node, parent, p); 1387 rb_link_node(&cic->rb_node, parent, p);
1412 rb_insert_color(&cic->rb_node, &ioc->cic_root); 1388 rb_insert_color(&cic->rb_node, &ioc->cic_root);
1389
1390 spin_lock_irq(cfqd->queue->queue_lock);
1413 list_add(&cic->queue_list, &cfqd->cic_list); 1391 list_add(&cic->queue_list, &cfqd->cic_list);
1414 spin_unlock(&cfq_exit_lock); 1392 spin_unlock_irq(cfqd->queue->queue_lock);
1415} 1393}
1416 1394
1417/* 1395/*
@@ -1441,6 +1419,10 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
1441 1419
1442 cfq_cic_link(cfqd, ioc, cic); 1420 cfq_cic_link(cfqd, ioc, cic);
1443out: 1421out:
1422 smp_read_barrier_depends();
1423 if (unlikely(ioc->ioprio_changed))
1424 cfq_ioc_set_ioprio(ioc);
1425
1444 return cic; 1426 return cic;
1445err: 1427err:
1446 put_io_context(ioc); 1428 put_io_context(ioc);
@@ -1945,7 +1927,6 @@ static void cfq_exit_queue(elevator_t *e)
1945 1927
1946 cfq_shutdown_timer_wq(cfqd); 1928 cfq_shutdown_timer_wq(cfqd);
1947 1929
1948 spin_lock(&cfq_exit_lock);
1949 spin_lock_irq(q->queue_lock); 1930 spin_lock_irq(q->queue_lock);
1950 1931
1951 if (cfqd->active_queue) 1932 if (cfqd->active_queue)
@@ -1960,7 +1941,6 @@ static void cfq_exit_queue(elevator_t *e)
1960 } 1941 }
1961 1942
1962 spin_unlock_irq(q->queue_lock); 1943 spin_unlock_irq(q->queue_lock);
1963 spin_unlock(&cfq_exit_lock);
1964 1944
1965 cfq_shutdown_timer_wq(cfqd); 1945 cfq_shutdown_timer_wq(cfqd);
1966 1946
@@ -2149,7 +2129,7 @@ static struct elevator_type iosched_cfq = {
2149 .elevator_may_queue_fn = cfq_may_queue, 2129 .elevator_may_queue_fn = cfq_may_queue,
2150 .elevator_init_fn = cfq_init_queue, 2130 .elevator_init_fn = cfq_init_queue,
2151 .elevator_exit_fn = cfq_exit_queue, 2131 .elevator_exit_fn = cfq_exit_queue,
2152 .trim = cfq_trim, 2132 .trim = cfq_free_io_context,
2153 }, 2133 },
2154 .elevator_attrs = cfq_attrs, 2134 .elevator_attrs = cfq_attrs,
2155 .elevator_name = "cfq", 2135 .elevator_name = "cfq",
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index e25b4cd2dcd1..508548b834f1 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -3654,7 +3654,7 @@ struct io_context *current_io_context(gfp_t gfp_flags)
3654 if (ret) { 3654 if (ret) {
3655 atomic_set(&ret->refcount, 1); 3655 atomic_set(&ret->refcount, 1);
3656 ret->task = current; 3656 ret->task = current;
3657 ret->set_ioprio = NULL; 3657 ret->ioprio_changed = 0;
3658 ret->last_waited = jiffies; /* doesn't matter... */ 3658 ret->last_waited = jiffies; /* doesn't matter... */
3659 ret->nr_batch_requests = 0; /* because this is 0 */ 3659 ret->nr_batch_requests = 0; /* because this is 0 */
3660 ret->aic = NULL; 3660 ret->aic = NULL;