aboutsummaryrefslogtreecommitdiffstats
path: root/block/cfq-iosched.c
diff options
context:
space:
mode:
authorVasily Tarasov <vtaras@openvz.org>2007-07-20 04:06:38 -0400
committerJens Axboe <jens.axboe@oracle.com>2007-07-20 04:06:38 -0400
commitc2dea2d1fdbce86942dba0a968c523d8b7858bb5 (patch)
treec876b93785ee3b25c341be0dd5080a5176f27736 /block/cfq-iosched.c
parent9a79b2274186fade17134929d4f85b70d59a3840 (diff)
cfq: async queue allocation per priority
If we have two processes with different ioprio_class, but the same ioprio_data, their async requests will fall into the same queue. I guess such behavior is not expected, because it's not right to put real-time requests and best-effort requests in the same queue. The attached patch fixes the problem by introducing additional *cfqq fields on cfqd, pointing to per-(class,priority) async queues. Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r--block/cfq-iosched.c56
1 files changed, 44 insertions, 12 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 9755a3cfad26..bc7190eed10d 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -92,7 +92,11 @@ struct cfq_data {
92 struct cfq_queue *active_queue; 92 struct cfq_queue *active_queue;
93 struct cfq_io_context *active_cic; 93 struct cfq_io_context *active_cic;
94 94
95 struct cfq_queue *async_cfqq[IOPRIO_BE_NR]; 95 /*
96 * async queue for each priority case
97 */
98 struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
99 struct cfq_queue *async_idle_cfqq;
96 100
97 struct timer_list idle_class_timer; 101 struct timer_list idle_class_timer;
98 102
@@ -1414,24 +1418,44 @@ out:
1414 return cfqq; 1418 return cfqq;
1415} 1419}
1416 1420
1421static struct cfq_queue **
1422cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
1423{
1424 switch(ioprio_class) {
1425 case IOPRIO_CLASS_RT:
1426 return &cfqd->async_cfqq[0][ioprio];
1427 case IOPRIO_CLASS_BE:
1428 return &cfqd->async_cfqq[1][ioprio];
1429 case IOPRIO_CLASS_IDLE:
1430 return &cfqd->async_idle_cfqq;
1431 default:
1432 BUG();
1433 }
1434}
1435
1417static struct cfq_queue * 1436static struct cfq_queue *
1418cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk, 1437cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk,
1419 gfp_t gfp_mask) 1438 gfp_t gfp_mask)
1420{ 1439{
1421 const int ioprio = task_ioprio(tsk); 1440 const int ioprio = task_ioprio(tsk);
1441 const int ioprio_class = task_ioprio_class(tsk);
1442 struct cfq_queue **async_cfqq = NULL;
1422 struct cfq_queue *cfqq = NULL; 1443 struct cfq_queue *cfqq = NULL;
1423 1444
1424 if (!is_sync) 1445 if (!is_sync) {
1425 cfqq = cfqd->async_cfqq[ioprio]; 1446 async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
1447 cfqq = *async_cfqq;
1448 }
1449
1426 if (!cfqq) 1450 if (!cfqq)
1427 cfqq = cfq_find_alloc_queue(cfqd, is_sync, tsk, gfp_mask); 1451 cfqq = cfq_find_alloc_queue(cfqd, is_sync, tsk, gfp_mask);
1428 1452
1429 /* 1453 /*
1430 * pin the queue now that it's allocated, scheduler exit will prune it 1454 * pin the queue now that it's allocated, scheduler exit will prune it
1431 */ 1455 */
1432 if (!is_sync && !cfqd->async_cfqq[ioprio]) { 1456 if (!is_sync && !(*async_cfqq)) {
1433 atomic_inc(&cfqq->ref); 1457 atomic_inc(&cfqq->ref);
1434 cfqd->async_cfqq[ioprio] = cfqq; 1458 *async_cfqq = cfqq;
1435 } 1459 }
1436 1460
1437 atomic_inc(&cfqq->ref); 1461 atomic_inc(&cfqq->ref);
@@ -2042,11 +2066,24 @@ static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
2042 blk_sync_queue(cfqd->queue); 2066 blk_sync_queue(cfqd->queue);
2043} 2067}
2044 2068
2069static void cfq_put_async_queues(struct cfq_data *cfqd)
2070{
2071 int i;
2072
2073 for (i = 0; i < IOPRIO_BE_NR; i++) {
2074 if (cfqd->async_cfqq[0][i])
2075 cfq_put_queue(cfqd->async_cfqq[0][i]);
2076 if (cfqd->async_cfqq[1][i])
2077 cfq_put_queue(cfqd->async_cfqq[1][i]);
2078 if (cfqd->async_idle_cfqq)
2079 cfq_put_queue(cfqd->async_idle_cfqq);
2080 }
2081}
2082
2045static void cfq_exit_queue(elevator_t *e) 2083static void cfq_exit_queue(elevator_t *e)
2046{ 2084{
2047 struct cfq_data *cfqd = e->elevator_data; 2085 struct cfq_data *cfqd = e->elevator_data;
2048 request_queue_t *q = cfqd->queue; 2086 request_queue_t *q = cfqd->queue;
2049 int i;
2050 2087
2051 cfq_shutdown_timer_wq(cfqd); 2088 cfq_shutdown_timer_wq(cfqd);
2052 2089
@@ -2063,12 +2100,7 @@ static void cfq_exit_queue(elevator_t *e)
2063 __cfq_exit_single_io_context(cfqd, cic); 2100 __cfq_exit_single_io_context(cfqd, cic);
2064 } 2101 }
2065 2102
2066 /* 2103 cfq_put_async_queues(cfqd);
2067 * Put the async queues
2068 */
2069 for (i = 0; i < IOPRIO_BE_NR; i++)
2070 if (cfqd->async_cfqq[i])
2071 cfq_put_queue(cfqd->async_cfqq[i]);
2072 2104
2073 spin_unlock_irq(q->queue_lock); 2105 spin_unlock_irq(q->queue_lock);
2074 2106