aboutsummaryrefslogtreecommitdiffstats
path: root/block/cfq-iosched.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r--block/cfq-iosched.c67
1 files changed, 44 insertions, 23 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 9755a3cfad26..d148ccbc36d1 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -92,7 +92,11 @@ struct cfq_data {
92 struct cfq_queue *active_queue; 92 struct cfq_queue *active_queue;
93 struct cfq_io_context *active_cic; 93 struct cfq_io_context *active_cic;
94 94
95 struct cfq_queue *async_cfqq[IOPRIO_BE_NR]; 95 /*
96 * async queue for each priority case
97 */
98 struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
99 struct cfq_queue *async_idle_cfqq;
96 100
97 struct timer_list idle_class_timer; 101 struct timer_list idle_class_timer;
98 102
@@ -111,9 +115,6 @@ struct cfq_data {
111 unsigned int cfq_slice_idle; 115 unsigned int cfq_slice_idle;
112 116
113 struct list_head cic_list; 117 struct list_head cic_list;
114
115 sector_t new_seek_mean;
116 u64 new_seek_total;
117}; 118};
118 119
119/* 120/*
@@ -153,8 +154,6 @@ struct cfq_queue {
153 154
154 /* various state flags, see below */ 155 /* various state flags, see below */
155 unsigned int flags; 156 unsigned int flags;
156
157 sector_t last_request_pos;
158}; 157};
159 158
160enum cfqq_state_flags { 159enum cfqq_state_flags {
@@ -1414,24 +1413,44 @@ out:
1414 return cfqq; 1413 return cfqq;
1415} 1414}
1416 1415
1416static struct cfq_queue **
1417cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
1418{
1419 switch(ioprio_class) {
1420 case IOPRIO_CLASS_RT:
1421 return &cfqd->async_cfqq[0][ioprio];
1422 case IOPRIO_CLASS_BE:
1423 return &cfqd->async_cfqq[1][ioprio];
1424 case IOPRIO_CLASS_IDLE:
1425 return &cfqd->async_idle_cfqq;
1426 default:
1427 BUG();
1428 }
1429}
1430
1417static struct cfq_queue * 1431static struct cfq_queue *
1418cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk, 1432cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk,
1419 gfp_t gfp_mask) 1433 gfp_t gfp_mask)
1420{ 1434{
1421 const int ioprio = task_ioprio(tsk); 1435 const int ioprio = task_ioprio(tsk);
1436 const int ioprio_class = task_ioprio_class(tsk);
1437 struct cfq_queue **async_cfqq = NULL;
1422 struct cfq_queue *cfqq = NULL; 1438 struct cfq_queue *cfqq = NULL;
1423 1439
1424 if (!is_sync) 1440 if (!is_sync) {
1425 cfqq = cfqd->async_cfqq[ioprio]; 1441 async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
1442 cfqq = *async_cfqq;
1443 }
1444
1426 if (!cfqq) 1445 if (!cfqq)
1427 cfqq = cfq_find_alloc_queue(cfqd, is_sync, tsk, gfp_mask); 1446 cfqq = cfq_find_alloc_queue(cfqd, is_sync, tsk, gfp_mask);
1428 1447
1429 /* 1448 /*
1430 * pin the queue now that it's allocated, scheduler exit will prune it 1449 * pin the queue now that it's allocated, scheduler exit will prune it
1431 */ 1450 */
1432 if (!is_sync && !cfqd->async_cfqq[ioprio]) { 1451 if (!is_sync && !(*async_cfqq)) {
1433 atomic_inc(&cfqq->ref); 1452 atomic_inc(&cfqq->ref);
1434 cfqd->async_cfqq[ioprio] = cfqq; 1453 *async_cfqq = cfqq;
1435 } 1454 }
1436 1455
1437 atomic_inc(&cfqq->ref); 1456 atomic_inc(&cfqq->ref);
@@ -1597,11 +1616,6 @@ cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic,
1597 else 1616 else
1598 sdist = cic->last_request_pos - rq->sector; 1617 sdist = cic->last_request_pos - rq->sector;
1599 1618
1600 if (!cic->seek_samples) {
1601 cfqd->new_seek_total = (7*cic->seek_total + (u64)256*sdist) / 8;
1602 cfqd->new_seek_mean = cfqd->new_seek_total / 256;
1603 }
1604
1605 /* 1619 /*
1606 * Don't allow the seek distance to get too large from the 1620 * Don't allow the seek distance to get too large from the
1607 * odd fragment, pagein, etc 1621 * odd fragment, pagein, etc
@@ -1737,7 +1751,6 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1737 cfq_update_idle_window(cfqd, cfqq, cic); 1751 cfq_update_idle_window(cfqd, cfqq, cic);
1738 1752
1739 cic->last_request_pos = rq->sector + rq->nr_sectors; 1753 cic->last_request_pos = rq->sector + rq->nr_sectors;
1740 cfqq->last_request_pos = cic->last_request_pos;
1741 1754
1742 if (cfqq == cfqd->active_queue) { 1755 if (cfqq == cfqd->active_queue) {
1743 /* 1756 /*
@@ -2042,11 +2055,24 @@ static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
2042 blk_sync_queue(cfqd->queue); 2055 blk_sync_queue(cfqd->queue);
2043} 2056}
2044 2057
2058static void cfq_put_async_queues(struct cfq_data *cfqd)
2059{
2060 int i;
2061
2062 for (i = 0; i < IOPRIO_BE_NR; i++) {
2063 if (cfqd->async_cfqq[0][i])
2064 cfq_put_queue(cfqd->async_cfqq[0][i]);
2065 if (cfqd->async_cfqq[1][i])
2066 cfq_put_queue(cfqd->async_cfqq[1][i]);
2067 if (cfqd->async_idle_cfqq)
2068 cfq_put_queue(cfqd->async_idle_cfqq);
2069 }
2070}
2071
2045static void cfq_exit_queue(elevator_t *e) 2072static void cfq_exit_queue(elevator_t *e)
2046{ 2073{
2047 struct cfq_data *cfqd = e->elevator_data; 2074 struct cfq_data *cfqd = e->elevator_data;
2048 request_queue_t *q = cfqd->queue; 2075 request_queue_t *q = cfqd->queue;
2049 int i;
2050 2076
2051 cfq_shutdown_timer_wq(cfqd); 2077 cfq_shutdown_timer_wq(cfqd);
2052 2078
@@ -2063,12 +2089,7 @@ static void cfq_exit_queue(elevator_t *e)
2063 __cfq_exit_single_io_context(cfqd, cic); 2089 __cfq_exit_single_io_context(cfqd, cic);
2064 } 2090 }
2065 2091
2066 /* 2092 cfq_put_async_queues(cfqd);
2067 * Put the async queues
2068 */
2069 for (i = 0; i < IOPRIO_BE_NR; i++)
2070 if (cfqd->async_cfqq[i])
2071 cfq_put_queue(cfqd->async_cfqq[i]);
2072 2093
2073 spin_unlock_irq(q->queue_lock); 2094 spin_unlock_irq(q->queue_lock);
2074 2095