aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-sysfs.c
diff options
context:
space:
mode:
authorWu Fengguang <fengguang.wu@intel.com>2008-11-25 03:08:39 -0500
committerJens Axboe <jens.axboe@oracle.com>2008-12-29 02:28:43 -0500
commit7c239517d9f18427fc2e7ed259fb3b866595f5af (patch)
treecd2149cc2f2eb0faa83d38fe64e1228f7c703a85 /block/blk-sysfs.c
parent42364690992e592c05f85c76fda4055820b48c1b (diff)
block: don't take lock on changing ra_pages
There's no need to take queue_lock or kernel_lock when modifying bdi->ra_pages. So remove them. Also remove out of date comment for queue_max_sectors_store(). Signed-off-by: Wu Fengguang <wfg@linux.intel.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/blk-sysfs.c')
-rw-r--r--block/blk-sysfs.c7
1 files changed, 1 insertions, 6 deletions
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 21e275d7eed9..a29cb788e408 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -88,9 +88,7 @@ queue_ra_store(struct request_queue *q, const char *page, size_t count)
88 unsigned long ra_kb; 88 unsigned long ra_kb;
89 ssize_t ret = queue_var_store(&ra_kb, page, count); 89 ssize_t ret = queue_var_store(&ra_kb, page, count);
90 90
91 spin_lock_irq(q->queue_lock);
92 q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10); 91 q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
93 spin_unlock_irq(q->queue_lock);
94 92
95 return ret; 93 return ret;
96} 94}
@@ -117,10 +115,7 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
117 115
118 if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb) 116 if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
119 return -EINVAL; 117 return -EINVAL;
120 /* 118
121 * Take the queue lock to update the readahead and max_sectors
122 * values synchronously:
123 */
124 spin_lock_irq(q->queue_lock); 119 spin_lock_irq(q->queue_lock);
125 q->max_sectors = max_sectors_kb << 1; 120 q->max_sectors = max_sectors_kb << 1;
126 spin_unlock_irq(q->queue_lock); 121 spin_unlock_irq(q->queue_lock);