aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFengguang Wu <wfg@mail.ustc.edu.cn>2007-10-16 04:24:36 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 12:42:53 -0400
commitf2e189827a914b66e435e68b1c9e37775cb995ed (patch)
treefa5748fa0ecdef500c616c28d2885d2385b17e4d
parent535443f51543df61111bbd234300ae549d220448 (diff)
readahead: remove the limit max_sectors_kb imposed on max_readahead_kb
Remove the size limit max_sectors_kb imposed on max_readahead_kb. The size restriction is unreasonable. Especially when max_sectors_kb cannot grow larger than max_hw_sectors_kb, which can be rather small for some disk drives. Cc: Jens Axboe <jens.axboe@oracle.com> Signed-off-by: Fengguang Wu <wfg@mail.ustc.edu.cn> Acked-by: Jens Axboe <jens.axboe@oracle.com> Cc: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--block/ll_rw_blk.c9
1 files changed, 0 insertions, 9 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index d875673e76cd..a83823fcd74f 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -3928,7 +3928,6 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
3928 max_hw_sectors_kb = q->max_hw_sectors >> 1, 3928 max_hw_sectors_kb = q->max_hw_sectors >> 1,
3929 page_kb = 1 << (PAGE_CACHE_SHIFT - 10); 3929 page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
3930 ssize_t ret = queue_var_store(&max_sectors_kb, page, count); 3930 ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
3931 int ra_kb;
3932 3931
3933 if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb) 3932 if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
3934 return -EINVAL; 3933 return -EINVAL;
@@ -3937,14 +3936,6 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
3937 * values synchronously: 3936 * values synchronously:
3938 */ 3937 */
3939 spin_lock_irq(q->queue_lock); 3938 spin_lock_irq(q->queue_lock);
3940 /*
3941 * Trim readahead window as well, if necessary:
3942 */
3943 ra_kb = q->backing_dev_info.ra_pages << (PAGE_CACHE_SHIFT - 10);
3944 if (ra_kb > max_sectors_kb)
3945 q->backing_dev_info.ra_pages =
3946 max_sectors_kb >> (PAGE_CACHE_SHIFT - 10);
3947
3948 q->max_sectors = max_sectors_kb << 1; 3939 q->max_sectors = max_sectors_kb << 1;
3949 spin_unlock_irq(q->queue_lock); 3940 spin_unlock_irq(q->queue_lock);
3950 3941