aboutsummaryrefslogtreecommitdiffstats
path: root/block/cfq-iosched.c
diff options
context:
space:
mode:
authorAaron Carroll <aaronc@gelato.unsw.edu.au>2008-08-26 09:52:36 -0400
committerJens Axboe <jens.axboe@oracle.com>2008-10-09 02:56:09 -0400
commit45333d5a31296d0af886d94f1d08f128231cab8e (patch)
treeea5b1afb70fb17935128bac6df588e6b5ae764b4 /block/cfq-iosched.c
parent605401618ce4409045bc4db86e88d4b38f2ad585 (diff)
cfq-iosched: fix queue depth detection
CFQ's detection of queueing devices assumes a non-queuing device and detects if the queue depth reaches a certain threshold. Under some workloads (e.g. synchronous reads), CFQ effectively forces a unit queue depth, thus defeating the detection logic. This leads to poor performance on queuing hardware, since the idle window remains enabled. This patch inverts the sense of the logic: assume a queuing-capable device, and detect if the depth does not exceed the threshold. Signed-off-by: Aaron Carroll <aaronc@gelato.unsw.edu.au> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r--block/cfq-iosched.c47
1 files changed, 38 insertions, 9 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 5f6fd287c18..494b6fdcb18 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -39,6 +39,7 @@ static int cfq_slice_idle = HZ / 125;
39#define CFQ_MIN_TT (2) 39#define CFQ_MIN_TT (2)
40 40
41#define CFQ_SLICE_SCALE (5) 41#define CFQ_SLICE_SCALE (5)
42#define CFQ_HW_QUEUE_MIN (5)
42 43
43#define RQ_CIC(rq) \ 44#define RQ_CIC(rq) \
44 ((struct cfq_io_context *) (rq)->elevator_private) 45 ((struct cfq_io_context *) (rq)->elevator_private)
@@ -86,7 +87,14 @@ struct cfq_data {
86 87
87 int rq_in_driver; 88 int rq_in_driver;
88 int sync_flight; 89 int sync_flight;
90
91 /*
92 * queue-depth detection
93 */
94 int rq_queued;
89 int hw_tag; 95 int hw_tag;
96 int hw_tag_samples;
97 int rq_in_driver_peak;
90 98
91 /* 99 /*
92 * idle window management 100 * idle window management
@@ -654,15 +662,6 @@ static void cfq_activate_request(struct request_queue *q, struct request *rq)
654 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d", 662 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
655 cfqd->rq_in_driver); 663 cfqd->rq_in_driver);
656 664
657 /*
658 * If the depth is larger 1, it really could be queueing. But lets
659 * make the mark a little higher - idling could still be good for
660 * low queueing, and a low queueing number could also just indicate
661 * a SCSI mid layer like behaviour where limit+1 is often seen.
662 */
663 if (!cfqd->hw_tag && cfqd->rq_in_driver > 4)
664 cfqd->hw_tag = 1;
665
666 cfqd->last_position = rq->hard_sector + rq->hard_nr_sectors; 665 cfqd->last_position = rq->hard_sector + rq->hard_nr_sectors;
667} 666}
668 667
@@ -686,6 +685,7 @@ static void cfq_remove_request(struct request *rq)
686 list_del_init(&rq->queuelist); 685 list_del_init(&rq->queuelist);
687 cfq_del_rq_rb(rq); 686 cfq_del_rq_rb(rq);
688 687
688 cfqq->cfqd->rq_queued--;
689 if (rq_is_meta(rq)) { 689 if (rq_is_meta(rq)) {
690 WARN_ON(!cfqq->meta_pending); 690 WARN_ON(!cfqq->meta_pending);
691 cfqq->meta_pending--; 691 cfqq->meta_pending--;
@@ -1833,6 +1833,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1833{ 1833{
1834 struct cfq_io_context *cic = RQ_CIC(rq); 1834 struct cfq_io_context *cic = RQ_CIC(rq);
1835 1835
1836 cfqd->rq_queued++;
1836 if (rq_is_meta(rq)) 1837 if (rq_is_meta(rq))
1837 cfqq->meta_pending++; 1838 cfqq->meta_pending++;
1838 1839
@@ -1880,6 +1881,31 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq)
1880 cfq_rq_enqueued(cfqd, cfqq, rq); 1881 cfq_rq_enqueued(cfqd, cfqq, rq);
1881} 1882}
1882 1883
1884/*
1885 * Update hw_tag based on peak queue depth over 50 samples under
1886 * sufficient load.
1887 */
1888static void cfq_update_hw_tag(struct cfq_data *cfqd)
1889{
1890 if (cfqd->rq_in_driver > cfqd->rq_in_driver_peak)
1891 cfqd->rq_in_driver_peak = cfqd->rq_in_driver;
1892
1893 if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
1894 cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
1895 return;
1896
1897 if (cfqd->hw_tag_samples++ < 50)
1898 return;
1899
1900 if (cfqd->rq_in_driver_peak >= CFQ_HW_QUEUE_MIN)
1901 cfqd->hw_tag = 1;
1902 else
1903 cfqd->hw_tag = 0;
1904
1905 cfqd->hw_tag_samples = 0;
1906 cfqd->rq_in_driver_peak = 0;
1907}
1908
1883static void cfq_completed_request(struct request_queue *q, struct request *rq) 1909static void cfq_completed_request(struct request_queue *q, struct request *rq)
1884{ 1910{
1885 struct cfq_queue *cfqq = RQ_CFQQ(rq); 1911 struct cfq_queue *cfqq = RQ_CFQQ(rq);
@@ -1890,6 +1916,8 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
1890 now = jiffies; 1916 now = jiffies;
1891 cfq_log_cfqq(cfqd, cfqq, "complete"); 1917 cfq_log_cfqq(cfqd, cfqq, "complete");
1892 1918
1919 cfq_update_hw_tag(cfqd);
1920
1893 WARN_ON(!cfqd->rq_in_driver); 1921 WARN_ON(!cfqd->rq_in_driver);
1894 WARN_ON(!cfqq->dispatched); 1922 WARN_ON(!cfqq->dispatched);
1895 cfqd->rq_in_driver--; 1923 cfqd->rq_in_driver--;
@@ -2200,6 +2228,7 @@ static void *cfq_init_queue(struct request_queue *q)
2200 cfqd->cfq_slice[1] = cfq_slice_sync; 2228 cfqd->cfq_slice[1] = cfq_slice_sync;
2201 cfqd->cfq_slice_async_rq = cfq_slice_async_rq; 2229 cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
2202 cfqd->cfq_slice_idle = cfq_slice_idle; 2230 cfqd->cfq_slice_idle = cfq_slice_idle;
2231 cfqd->hw_tag = 1;
2203 2232
2204 return cfqd; 2233 return cfqd;
2205} 2234}