aboutsummaryrefslogtreecommitdiffstats
path: root/block/cfq-iosched.c
diff options
context:
space:
mode:
authorJeff Garzik <jeff@garzik.org>2006-06-08 15:24:46 -0400
committerJeff Garzik <jeff@garzik.org>2006-06-08 15:24:46 -0400
commitd15a88fc21ef225768ce31be16edfc9c6e2e02e3 (patch)
treed4cb0a1bc97973bb947e2667ae56bc4bc2256e9d /block/cfq-iosched.c
parentb53471711f21ba0e151075f0e1d6d531eb50f1b1 (diff)
parent1def630a6a49dda5bc89dfbd86656293640456f0 (diff)
Merge branch 'master' into upstream
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r--block/cfq-iosched.c77
1 files changed, 54 insertions, 23 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 2540dfaa3e38..8e9d84825e1c 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -33,7 +33,7 @@ static int cfq_slice_idle = HZ / 70;
33 33
34#define CFQ_KEY_ASYNC (0) 34#define CFQ_KEY_ASYNC (0)
35 35
36static DEFINE_RWLOCK(cfq_exit_lock); 36static DEFINE_SPINLOCK(cfq_exit_lock);
37 37
38/* 38/*
39 * for the hash of cfqq inside the cfqd 39 * for the hash of cfqq inside the cfqd
@@ -133,6 +133,7 @@ struct cfq_data {
133 mempool_t *crq_pool; 133 mempool_t *crq_pool;
134 134
135 int rq_in_driver; 135 int rq_in_driver;
136 int hw_tag;
136 137
137 /* 138 /*
138 * schedule slice state info 139 * schedule slice state info
@@ -500,10 +501,13 @@ static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted)
500 501
501 /* 502 /*
502 * if queue was preempted, just add to front to be fair. busy_rr 503 * if queue was preempted, just add to front to be fair. busy_rr
503 * isn't sorted. 504 * isn't sorted, but insert at the back for fairness.
504 */ 505 */
505 if (preempted || list == &cfqd->busy_rr) { 506 if (preempted || list == &cfqd->busy_rr) {
506 list_add(&cfqq->cfq_list, list); 507 if (preempted)
508 list = list->prev;
509
510 list_add_tail(&cfqq->cfq_list, list);
507 return; 511 return;
508 } 512 }
509 513
@@ -664,6 +668,15 @@ static void cfq_activate_request(request_queue_t *q, struct request *rq)
664 struct cfq_data *cfqd = q->elevator->elevator_data; 668 struct cfq_data *cfqd = q->elevator->elevator_data;
665 669
666 cfqd->rq_in_driver++; 670 cfqd->rq_in_driver++;
671
672 /*
673 * If the depth is larger 1, it really could be queueing. But lets
674 * make the mark a little higher - idling could still be good for
675 * low queueing, and a low queueing number could also just indicate
676 * a SCSI mid layer like behaviour where limit+1 is often seen.
677 */
678 if (!cfqd->hw_tag && cfqd->rq_in_driver > 4)
679 cfqd->hw_tag = 1;
667} 680}
668 681
669static void cfq_deactivate_request(request_queue_t *q, struct request *rq) 682static void cfq_deactivate_request(request_queue_t *q, struct request *rq)
@@ -879,6 +892,13 @@ static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd)
879 cfqq = list_entry_cfqq(cfqd->cur_rr.next); 892 cfqq = list_entry_cfqq(cfqd->cur_rr.next);
880 893
881 /* 894 /*
895 * If no new queues are available, check if the busy list has some
896 * before falling back to idle io.
897 */
898 if (!cfqq && !list_empty(&cfqd->busy_rr))
899 cfqq = list_entry_cfqq(cfqd->busy_rr.next);
900
901 /*
882 * if we have idle queues and no rt or be queues had pending 902 * if we have idle queues and no rt or be queues had pending
883 * requests, either allow immediate service if the grace period 903 * requests, either allow immediate service if the grace period
884 * has passed or arm the idle grace timer 904 * has passed or arm the idle grace timer
@@ -1284,7 +1304,7 @@ static void cfq_exit_io_context(struct io_context *ioc)
1284 /* 1304 /*
1285 * put the reference this task is holding to the various queues 1305 * put the reference this task is holding to the various queues
1286 */ 1306 */
1287 read_lock_irqsave(&cfq_exit_lock, flags); 1307 spin_lock_irqsave(&cfq_exit_lock, flags);
1288 1308
1289 n = rb_first(&ioc->cic_root); 1309 n = rb_first(&ioc->cic_root);
1290 while (n != NULL) { 1310 while (n != NULL) {
@@ -1294,7 +1314,7 @@ static void cfq_exit_io_context(struct io_context *ioc)
1294 n = rb_next(n); 1314 n = rb_next(n);
1295 } 1315 }
1296 1316
1297 read_unlock_irqrestore(&cfq_exit_lock, flags); 1317 spin_unlock_irqrestore(&cfq_exit_lock, flags);
1298} 1318}
1299 1319
1300static struct cfq_io_context * 1320static struct cfq_io_context *
@@ -1400,17 +1420,17 @@ static int cfq_ioc_set_ioprio(struct io_context *ioc, unsigned int ioprio)
1400 struct cfq_io_context *cic; 1420 struct cfq_io_context *cic;
1401 struct rb_node *n; 1421 struct rb_node *n;
1402 1422
1403 write_lock(&cfq_exit_lock); 1423 spin_lock(&cfq_exit_lock);
1404 1424
1405 n = rb_first(&ioc->cic_root); 1425 n = rb_first(&ioc->cic_root);
1406 while (n != NULL) { 1426 while (n != NULL) {
1407 cic = rb_entry(n, struct cfq_io_context, rb_node); 1427 cic = rb_entry(n, struct cfq_io_context, rb_node);
1408 1428
1409 changed_ioprio(cic); 1429 changed_ioprio(cic);
1410 n = rb_next(n); 1430 n = rb_next(n);
1411 } 1431 }
1412 1432
1413 write_unlock(&cfq_exit_lock); 1433 spin_unlock(&cfq_exit_lock);
1414 1434
1415 return 0; 1435 return 0;
1416} 1436}
@@ -1458,7 +1478,8 @@ retry:
1458 * set ->slice_left to allow preemption for a new process 1478 * set ->slice_left to allow preemption for a new process
1459 */ 1479 */
1460 cfqq->slice_left = 2 * cfqd->cfq_slice_idle; 1480 cfqq->slice_left = 2 * cfqd->cfq_slice_idle;
1461 cfq_mark_cfqq_idle_window(cfqq); 1481 if (!cfqd->hw_tag)
1482 cfq_mark_cfqq_idle_window(cfqq);
1462 cfq_mark_cfqq_prio_changed(cfqq); 1483 cfq_mark_cfqq_prio_changed(cfqq);
1463 cfq_init_prio_data(cfqq); 1484 cfq_init_prio_data(cfqq);
1464 } 1485 }
@@ -1475,9 +1496,10 @@ out:
1475static void 1496static void
1476cfq_drop_dead_cic(struct io_context *ioc, struct cfq_io_context *cic) 1497cfq_drop_dead_cic(struct io_context *ioc, struct cfq_io_context *cic)
1477{ 1498{
1478 read_lock(&cfq_exit_lock); 1499 spin_lock(&cfq_exit_lock);
1479 rb_erase(&cic->rb_node, &ioc->cic_root); 1500 rb_erase(&cic->rb_node, &ioc->cic_root);
1480 read_unlock(&cfq_exit_lock); 1501 list_del_init(&cic->queue_list);
1502 spin_unlock(&cfq_exit_lock);
1481 kmem_cache_free(cfq_ioc_pool, cic); 1503 kmem_cache_free(cfq_ioc_pool, cic);
1482 atomic_dec(&ioc_count); 1504 atomic_dec(&ioc_count);
1483} 1505}
@@ -1545,11 +1567,11 @@ restart:
1545 BUG(); 1567 BUG();
1546 } 1568 }
1547 1569
1548 read_lock(&cfq_exit_lock); 1570 spin_lock(&cfq_exit_lock);
1549 rb_link_node(&cic->rb_node, parent, p); 1571 rb_link_node(&cic->rb_node, parent, p);
1550 rb_insert_color(&cic->rb_node, &ioc->cic_root); 1572 rb_insert_color(&cic->rb_node, &ioc->cic_root);
1551 list_add(&cic->queue_list, &cfqd->cic_list); 1573 list_add(&cic->queue_list, &cfqd->cic_list);
1552 read_unlock(&cfq_exit_lock); 1574 spin_unlock(&cfq_exit_lock);
1553} 1575}
1554 1576
1555/* 1577/*
@@ -1648,7 +1670,7 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1648{ 1670{
1649 int enable_idle = cfq_cfqq_idle_window(cfqq); 1671 int enable_idle = cfq_cfqq_idle_window(cfqq);
1650 1672
1651 if (!cic->ioc->task || !cfqd->cfq_slice_idle) 1673 if (!cic->ioc->task || !cfqd->cfq_slice_idle || cfqd->hw_tag)
1652 enable_idle = 0; 1674 enable_idle = 0;
1653 else if (sample_valid(cic->ttime_samples)) { 1675 else if (sample_valid(cic->ttime_samples)) {
1654 if (cic->ttime_mean > cfqd->cfq_slice_idle) 1676 if (cic->ttime_mean > cfqd->cfq_slice_idle)
@@ -1739,14 +1761,24 @@ cfq_crq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1739 1761
1740 cfqq->next_crq = cfq_choose_req(cfqd, cfqq->next_crq, crq); 1762 cfqq->next_crq = cfq_choose_req(cfqd, cfqq->next_crq, crq);
1741 1763
1764 cic = crq->io_context;
1765
1742 /* 1766 /*
1743 * we never wait for an async request and we don't allow preemption 1767 * we never wait for an async request and we don't allow preemption
1744 * of an async request. so just return early 1768 * of an async request. so just return early
1745 */ 1769 */
1746 if (!cfq_crq_is_sync(crq)) 1770 if (!cfq_crq_is_sync(crq)) {
1771 /*
1772 * sync process issued an async request, if it's waiting
1773 * then expire it and kick rq handling.
1774 */
1775 if (cic == cfqd->active_cic &&
1776 del_timer(&cfqd->idle_slice_timer)) {
1777 cfq_slice_expired(cfqd, 0);
1778 cfq_start_queueing(cfqd, cfqq);
1779 }
1747 return; 1780 return;
1748 1781 }
1749 cic = crq->io_context;
1750 1782
1751 cfq_update_io_thinktime(cfqd, cic); 1783 cfq_update_io_thinktime(cfqd, cic);
1752 cfq_update_io_seektime(cfqd, cic, crq); 1784 cfq_update_io_seektime(cfqd, cic, crq);
@@ -2164,10 +2196,9 @@ static void cfq_idle_class_timer(unsigned long data)
2164 * race with a non-idle queue, reset timer 2196 * race with a non-idle queue, reset timer
2165 */ 2197 */
2166 end = cfqd->last_end_request + CFQ_IDLE_GRACE; 2198 end = cfqd->last_end_request + CFQ_IDLE_GRACE;
2167 if (!time_after_eq(jiffies, end)) { 2199 if (!time_after_eq(jiffies, end))
2168 cfqd->idle_class_timer.expires = end; 2200 mod_timer(&cfqd->idle_class_timer, end);
2169 add_timer(&cfqd->idle_class_timer); 2201 else
2170 } else
2171 cfq_schedule_dispatch(cfqd); 2202 cfq_schedule_dispatch(cfqd);
2172 2203
2173 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); 2204 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
@@ -2187,7 +2218,7 @@ static void cfq_exit_queue(elevator_t *e)
2187 2218
2188 cfq_shutdown_timer_wq(cfqd); 2219 cfq_shutdown_timer_wq(cfqd);
2189 2220
2190 write_lock(&cfq_exit_lock); 2221 spin_lock(&cfq_exit_lock);
2191 spin_lock_irq(q->queue_lock); 2222 spin_lock_irq(q->queue_lock);
2192 2223
2193 if (cfqd->active_queue) 2224 if (cfqd->active_queue)
@@ -2210,7 +2241,7 @@ static void cfq_exit_queue(elevator_t *e)
2210 } 2241 }
2211 2242
2212 spin_unlock_irq(q->queue_lock); 2243 spin_unlock_irq(q->queue_lock);
2213 write_unlock(&cfq_exit_lock); 2244 spin_unlock(&cfq_exit_lock);
2214 2245
2215 cfq_shutdown_timer_wq(cfqd); 2246 cfq_shutdown_timer_wq(cfqd);
2216 2247