aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorJeff Moyer <jmoyer@redhat.com>2009-10-23 17:14:52 -0400
committerJens Axboe <jens.axboe@oracle.com>2009-10-26 09:34:47 -0400
commite6c5bc737ab71e4af6025ef7d150f5a26ae5f146 (patch)
tree01127225a83bdcae30b261b9f21bb89faa7db8ce /block
parentb3b6d0408c953524f979468562e7e210d8634150 (diff)
cfq: break apart merged cfqqs if they stop cooperating
cfq_queues are merged if they are issuing requests within the mean seek distance of one another. This patch detects when the coopearting stops and breaks the queues back up. Signed-off-by: Jeff Moyer <jmoyer@redhat.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block')
-rw-r--r--block/cfq-iosched.c79
1 files changed, 76 insertions, 3 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 5e01a0a92c02..47d6aaca0c51 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -38,6 +38,12 @@ static int cfq_slice_idle = HZ / 125;
38 */ 38 */
39#define CFQ_MIN_TT (2) 39#define CFQ_MIN_TT (2)
40 40
41/*
42 * Allow merged cfqqs to perform this amount of seeky I/O before
43 * deciding to break the queues up again.
44 */
45#define CFQQ_COOP_TOUT (HZ)
46
41#define CFQ_SLICE_SCALE (5) 47#define CFQ_SLICE_SCALE (5)
42#define CFQ_HW_QUEUE_MIN (5) 48#define CFQ_HW_QUEUE_MIN (5)
43 49
@@ -116,6 +122,7 @@ struct cfq_queue {
116 u64 seek_total; 122 u64 seek_total;
117 sector_t seek_mean; 123 sector_t seek_mean;
118 sector_t last_request_pos; 124 sector_t last_request_pos;
125 unsigned long seeky_start;
119 126
120 pid_t pid; 127 pid_t pid;
121 128
@@ -1036,6 +1043,11 @@ static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
1036{ 1043{
1037 struct cfq_queue *cfqq; 1044 struct cfq_queue *cfqq;
1038 1045
1046 if (!cfq_cfqq_sync(cur_cfqq))
1047 return NULL;
1048 if (CFQQ_SEEKY(cur_cfqq))
1049 return NULL;
1050
1039 /* 1051 /*
1040 * We should notice if some of the queues are cooperating, eg 1052 * We should notice if some of the queues are cooperating, eg
1041 * working closely on the same area of the disk. In that case, 1053 * working closely on the same area of the disk. In that case,
@@ -1050,6 +1062,8 @@ static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
1050 */ 1062 */
1051 if (!cfq_cfqq_sync(cfqq)) 1063 if (!cfq_cfqq_sync(cfqq))
1052 return NULL; 1064 return NULL;
1065 if (CFQQ_SEEKY(cfqq))
1066 return NULL;
1053 1067
1054 return cfqq; 1068 return cfqq;
1055} 1069}
@@ -1181,7 +1195,7 @@ static int cfqq_process_refs(struct cfq_queue *cfqq)
1181 1195
1182static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq) 1196static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
1183{ 1197{
1184 int process_refs; 1198 int process_refs, new_process_refs;
1185 struct cfq_queue *__cfqq; 1199 struct cfq_queue *__cfqq;
1186 1200
1187 /* Avoid a circular list and skip interim queue merges */ 1201 /* Avoid a circular list and skip interim queue merges */
@@ -1199,8 +1213,17 @@ static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
1199 if (process_refs == 0) 1213 if (process_refs == 0)
1200 return; 1214 return;
1201 1215
1202 cfqq->new_cfqq = new_cfqq; 1216 /*
1203 atomic_add(process_refs, &new_cfqq->ref); 1217 * Merge in the direction of the lesser amount of work.
1218 */
1219 new_process_refs = cfqq_process_refs(new_cfqq);
1220 if (new_process_refs >= process_refs) {
1221 cfqq->new_cfqq = new_cfqq;
1222 atomic_add(process_refs, &new_cfqq->ref);
1223 } else {
1224 new_cfqq->new_cfqq = cfqq;
1225 atomic_add(new_process_refs, &cfqq->ref);
1226 }
1204} 1227}
1205 1228
1206/* 1229/*
@@ -2029,6 +2052,19 @@ cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2029 total = cfqq->seek_total + (cfqq->seek_samples/2); 2052 total = cfqq->seek_total + (cfqq->seek_samples/2);
2030 do_div(total, cfqq->seek_samples); 2053 do_div(total, cfqq->seek_samples);
2031 cfqq->seek_mean = (sector_t)total; 2054 cfqq->seek_mean = (sector_t)total;
2055
2056 /*
2057 * If this cfqq is shared between multiple processes, check to
2058 * make sure that those processes are still issuing I/Os within
2059 * the mean seek distance. If not, it may be time to break the
2060 * queues apart again.
2061 */
2062 if (cfq_cfqq_coop(cfqq)) {
2063 if (CFQQ_SEEKY(cfqq) && !cfqq->seeky_start)
2064 cfqq->seeky_start = jiffies;
2065 else if (!CFQQ_SEEKY(cfqq))
2066 cfqq->seeky_start = 0;
2067 }
2032} 2068}
2033 2069
2034/* 2070/*
@@ -2391,6 +2427,32 @@ cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_context *cic,
2391 return cic_to_cfqq(cic, 1); 2427 return cic_to_cfqq(cic, 1);
2392} 2428}
2393 2429
2430static int should_split_cfqq(struct cfq_queue *cfqq)
2431{
2432 if (cfqq->seeky_start &&
2433 time_after(jiffies, cfqq->seeky_start + CFQQ_COOP_TOUT))
2434 return 1;
2435 return 0;
2436}
2437
2438/*
2439 * Returns NULL if a new cfqq should be allocated, or the old cfqq if this
2440 * was the last process referring to said cfqq.
2441 */
2442static struct cfq_queue *
2443split_cfqq(struct cfq_io_context *cic, struct cfq_queue *cfqq)
2444{
2445 if (cfqq_process_refs(cfqq) == 1) {
2446 cfqq->seeky_start = 0;
2447 cfqq->pid = current->pid;
2448 cfq_clear_cfqq_coop(cfqq);
2449 return cfqq;
2450 }
2451
2452 cic_set_cfqq(cic, NULL, 1);
2453 cfq_put_queue(cfqq);
2454 return NULL;
2455}
2394/* 2456/*
2395 * Allocate cfq data structures associated with this request. 2457 * Allocate cfq data structures associated with this request.
2396 */ 2458 */
@@ -2413,12 +2475,23 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
2413 if (!cic) 2475 if (!cic)
2414 goto queue_fail; 2476 goto queue_fail;
2415 2477
2478new_queue:
2416 cfqq = cic_to_cfqq(cic, is_sync); 2479 cfqq = cic_to_cfqq(cic, is_sync);
2417 if (!cfqq || cfqq == &cfqd->oom_cfqq) { 2480 if (!cfqq || cfqq == &cfqd->oom_cfqq) {
2418 cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask); 2481 cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask);
2419 cic_set_cfqq(cic, cfqq, is_sync); 2482 cic_set_cfqq(cic, cfqq, is_sync);
2420 } else { 2483 } else {
2421 /* 2484 /*
2485 * If the queue was seeky for too long, break it apart.
2486 */
2487 if (cfq_cfqq_coop(cfqq) && should_split_cfqq(cfqq)) {
2488 cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
2489 cfqq = split_cfqq(cic, cfqq);
2490 if (!cfqq)
2491 goto new_queue;
2492 }
2493
2494 /*
2422 * Check to see if this queue is scheduled to merge with 2495 * Check to see if this queue is scheduled to merge with
2423 * another, closely cooperating queue. The merging of 2496 * another, closely cooperating queue. The merging of
2424 * queues happens here as it must be done in process context. 2497 * queues happens here as it must be done in process context.