diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-10-10 13:52:45 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-10-10 13:52:45 -0400 |
commit | e26feff647ef34423b048b940540a0059001ddb0 (patch) | |
tree | acafe68602ee2f6f1a438c113073ffcc0040e949 /block/cfq-iosched.c | |
parent | d403a6484f0341bf0624d17ece46f24f741b6a92 (diff) | |
parent | b911e473d24633c19414b54b82b9ff0b1a2419d7 (diff) |
Merge branch 'for-2.6.28' of git://git.kernel.dk/linux-2.6-block
* 'for-2.6.28' of git://git.kernel.dk/linux-2.6-block: (132 commits)
doc/cdrom: Trvial documentation error, file not present
block_dev: fix kernel-doc in new functions
block: add some comments around the bio read-write flags
block: mark bio_split_pool static
block: Find bio sector offset given idx and offset
block: gendisk integrity wrapper
block: Switch blk_integrity_compare from bdev to gendisk
block: Fix double put in blk_integrity_unregister
block: Introduce integrity data ownership flag
block: revert part of d7533ad0e132f92e75c1b2eb7c26387b25a583c1
bio.h: Remove unused conditional code
block: remove end_{queued|dequeued}_request()
block: change elevator to use __blk_end_request()
gdrom: change to use __blk_end_request()
memstick: change to use __blk_end_request()
virtio_blk: change to use __blk_end_request()
blktrace: use BLKTRACE_BDEV_SIZE as the name size for setup structure
block: add lld busy state exporting interface
block: Fix blk_start_queueing() to not kick a stopped queue
include blktrace_api.h in headers_install
...
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r-- | block/cfq-iosched.c | 57 |
1 files changed, 47 insertions, 10 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 1e2aff812ee2..6a062eebbd15 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -39,6 +39,7 @@ static int cfq_slice_idle = HZ / 125; | |||
39 | #define CFQ_MIN_TT (2) | 39 | #define CFQ_MIN_TT (2) |
40 | 40 | ||
41 | #define CFQ_SLICE_SCALE (5) | 41 | #define CFQ_SLICE_SCALE (5) |
42 | #define CFQ_HW_QUEUE_MIN (5) | ||
42 | 43 | ||
43 | #define RQ_CIC(rq) \ | 44 | #define RQ_CIC(rq) \ |
44 | ((struct cfq_io_context *) (rq)->elevator_private) | 45 | ((struct cfq_io_context *) (rq)->elevator_private) |
@@ -86,7 +87,14 @@ struct cfq_data { | |||
86 | 87 | ||
87 | int rq_in_driver; | 88 | int rq_in_driver; |
88 | int sync_flight; | 89 | int sync_flight; |
90 | |||
91 | /* | ||
92 | * queue-depth detection | ||
93 | */ | ||
94 | int rq_queued; | ||
89 | int hw_tag; | 95 | int hw_tag; |
96 | int hw_tag_samples; | ||
97 | int rq_in_driver_peak; | ||
90 | 98 | ||
91 | /* | 99 | /* |
92 | * idle window management | 100 | * idle window management |
@@ -244,7 +252,7 @@ static inline void cfq_schedule_dispatch(struct cfq_data *cfqd) | |||
244 | { | 252 | { |
245 | if (cfqd->busy_queues) { | 253 | if (cfqd->busy_queues) { |
246 | cfq_log(cfqd, "schedule dispatch"); | 254 | cfq_log(cfqd, "schedule dispatch"); |
247 | kblockd_schedule_work(&cfqd->unplug_work); | 255 | kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work); |
248 | } | 256 | } |
249 | } | 257 | } |
250 | 258 | ||
@@ -654,15 +662,6 @@ static void cfq_activate_request(struct request_queue *q, struct request *rq) | |||
654 | cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d", | 662 | cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d", |
655 | cfqd->rq_in_driver); | 663 | cfqd->rq_in_driver); |
656 | 664 | ||
657 | /* | ||
658 | * If the depth is larger 1, it really could be queueing. But lets | ||
659 | * make the mark a little higher - idling could still be good for | ||
660 | * low queueing, and a low queueing number could also just indicate | ||
661 | * a SCSI mid layer like behaviour where limit+1 is often seen. | ||
662 | */ | ||
663 | if (!cfqd->hw_tag && cfqd->rq_in_driver > 4) | ||
664 | cfqd->hw_tag = 1; | ||
665 | |||
666 | cfqd->last_position = rq->hard_sector + rq->hard_nr_sectors; | 665 | cfqd->last_position = rq->hard_sector + rq->hard_nr_sectors; |
667 | } | 666 | } |
668 | 667 | ||
@@ -686,6 +685,7 @@ static void cfq_remove_request(struct request *rq) | |||
686 | list_del_init(&rq->queuelist); | 685 | list_del_init(&rq->queuelist); |
687 | cfq_del_rq_rb(rq); | 686 | cfq_del_rq_rb(rq); |
688 | 687 | ||
688 | cfqq->cfqd->rq_queued--; | ||
689 | if (rq_is_meta(rq)) { | 689 | if (rq_is_meta(rq)) { |
690 | WARN_ON(!cfqq->meta_pending); | 690 | WARN_ON(!cfqq->meta_pending); |
691 | cfqq->meta_pending--; | 691 | cfqq->meta_pending--; |
@@ -878,6 +878,14 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd) | |||
878 | struct cfq_io_context *cic; | 878 | struct cfq_io_context *cic; |
879 | unsigned long sl; | 879 | unsigned long sl; |
880 | 880 | ||
881 | /* | ||
882 | * SSD device without seek penalty, disable idling. But only do so | ||
883 | * for devices that support queuing, otherwise we still have a problem | ||
884 | * with sync vs async workloads. | ||
885 | */ | ||
886 | if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag) | ||
887 | return; | ||
888 | |||
881 | WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list)); | 889 | WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list)); |
882 | WARN_ON(cfq_cfqq_slice_new(cfqq)); | 890 | WARN_ON(cfq_cfqq_slice_new(cfqq)); |
883 | 891 | ||
@@ -1833,6 +1841,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
1833 | { | 1841 | { |
1834 | struct cfq_io_context *cic = RQ_CIC(rq); | 1842 | struct cfq_io_context *cic = RQ_CIC(rq); |
1835 | 1843 | ||
1844 | cfqd->rq_queued++; | ||
1836 | if (rq_is_meta(rq)) | 1845 | if (rq_is_meta(rq)) |
1837 | cfqq->meta_pending++; | 1846 | cfqq->meta_pending++; |
1838 | 1847 | ||
@@ -1880,6 +1889,31 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq) | |||
1880 | cfq_rq_enqueued(cfqd, cfqq, rq); | 1889 | cfq_rq_enqueued(cfqd, cfqq, rq); |
1881 | } | 1890 | } |
1882 | 1891 | ||
1892 | /* | ||
1893 | * Update hw_tag based on peak queue depth over 50 samples under | ||
1894 | * sufficient load. | ||
1895 | */ | ||
1896 | static void cfq_update_hw_tag(struct cfq_data *cfqd) | ||
1897 | { | ||
1898 | if (cfqd->rq_in_driver > cfqd->rq_in_driver_peak) | ||
1899 | cfqd->rq_in_driver_peak = cfqd->rq_in_driver; | ||
1900 | |||
1901 | if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN && | ||
1902 | cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN) | ||
1903 | return; | ||
1904 | |||
1905 | if (cfqd->hw_tag_samples++ < 50) | ||
1906 | return; | ||
1907 | |||
1908 | if (cfqd->rq_in_driver_peak >= CFQ_HW_QUEUE_MIN) | ||
1909 | cfqd->hw_tag = 1; | ||
1910 | else | ||
1911 | cfqd->hw_tag = 0; | ||
1912 | |||
1913 | cfqd->hw_tag_samples = 0; | ||
1914 | cfqd->rq_in_driver_peak = 0; | ||
1915 | } | ||
1916 | |||
1883 | static void cfq_completed_request(struct request_queue *q, struct request *rq) | 1917 | static void cfq_completed_request(struct request_queue *q, struct request *rq) |
1884 | { | 1918 | { |
1885 | struct cfq_queue *cfqq = RQ_CFQQ(rq); | 1919 | struct cfq_queue *cfqq = RQ_CFQQ(rq); |
@@ -1890,6 +1924,8 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) | |||
1890 | now = jiffies; | 1924 | now = jiffies; |
1891 | cfq_log_cfqq(cfqd, cfqq, "complete"); | 1925 | cfq_log_cfqq(cfqd, cfqq, "complete"); |
1892 | 1926 | ||
1927 | cfq_update_hw_tag(cfqd); | ||
1928 | |||
1893 | WARN_ON(!cfqd->rq_in_driver); | 1929 | WARN_ON(!cfqd->rq_in_driver); |
1894 | WARN_ON(!cfqq->dispatched); | 1930 | WARN_ON(!cfqq->dispatched); |
1895 | cfqd->rq_in_driver--; | 1931 | cfqd->rq_in_driver--; |
@@ -2200,6 +2236,7 @@ static void *cfq_init_queue(struct request_queue *q) | |||
2200 | cfqd->cfq_slice[1] = cfq_slice_sync; | 2236 | cfqd->cfq_slice[1] = cfq_slice_sync; |
2201 | cfqd->cfq_slice_async_rq = cfq_slice_async_rq; | 2237 | cfqd->cfq_slice_async_rq = cfq_slice_async_rq; |
2202 | cfqd->cfq_slice_idle = cfq_slice_idle; | 2238 | cfqd->cfq_slice_idle = cfq_slice_idle; |
2239 | cfqd->hw_tag = 1; | ||
2203 | 2240 | ||
2204 | return cfqd; | 2241 | return cfqd; |
2205 | } | 2242 | } |