aboutsummaryrefslogtreecommitdiffstats
path: root/block/ll_rw_blk.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@suse.de>2006-07-21 14:30:28 -0400
committerJens Axboe <axboe@nelson.home.kernel.dk>2006-09-30 14:29:41 -0400
commitda20a20f3b5c175648fa797c899dd577e4dacb51 (patch)
tree690ba6f8f4f62a9deaa2b6d5d3cf6bd3220dac1b /block/ll_rw_blk.c
parentbf57225670bcbeb357182d800736b4782cde7295 (diff)
[PATCH] ll_rw_blk: allow more flexibility for read_ahead_kb store
It can make sense to set read-ahead larger than a single request. We should not be enforcing such policy on the user. Additionally, using the BLKRASET ioctl doesn't impose such a restriction. So additionally we now expose identical behaviour through the two. Issue also reported by Anton <cbou@mail.ru> Signed-off-by: Jens Axboe <axboe@suse.de>
Diffstat (limited to 'block/ll_rw_blk.c')
-rw-r--r--block/ll_rw_blk.c3
1 files changed, 0 insertions, 3 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 346be9ae31f6..e3980ec747c1 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -3806,9 +3806,6 @@ queue_ra_store(struct request_queue *q, const char *page, size_t count)
3806 ssize_t ret = queue_var_store(&ra_kb, page, count); 3806 ssize_t ret = queue_var_store(&ra_kb, page, count);
3807 3807
3808 spin_lock_irq(q->queue_lock); 3808 spin_lock_irq(q->queue_lock);
3809 if (ra_kb > (q->max_sectors >> 1))
3810 ra_kb = (q->max_sectors >> 1);
3811
3812 q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10); 3809 q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
3813 spin_unlock_irq(q->queue_lock); 3810 spin_unlock_irq(q->queue_lock);
3814 3811