aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorDave Reisner <dreisner@archlinux.org>2012-09-08 11:55:45 -0400
committerJens Axboe <axboe@kernel.dk>2012-09-09 04:39:18 -0400
commitb1f3b64d76cf88cc250e5cdd1de783ba9737078e (patch)
treec7ec8496a2b9a8af239fa78090f65178f30063c7 /block
parentbf800ef1816b4283a885e55ad38068aec9711e4d (diff)
block: reject invalid queue attribute values
Instead of using simple_strtoul which "converts" invalid numbers to 0, use strict_strtoul and perform error checking to ensure that userspace passes us a valid unsigned long. This addresses problems with functions such as writev, which might want to write a trailing newline -- the newline should rightfully be rejected, but the value preceeding it should be preserved. Fixes BZ#46981. Signed-off-by: Dave Reisner <dreisner@archlinux.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-sysfs.c25
1 files changed, 23 insertions, 2 deletions
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 9628b291f960..ea51d827a0bb 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -26,9 +26,15 @@ queue_var_show(unsigned long var, char *page)
26static ssize_t 26static ssize_t
27queue_var_store(unsigned long *var, const char *page, size_t count) 27queue_var_store(unsigned long *var, const char *page, size_t count)
28{ 28{
29 char *p = (char *) page; 29 int err;
30 unsigned long v;
31
32 err = strict_strtoul(page, 10, &v);
33 if (err || v > UINT_MAX)
34 return -EINVAL;
35
36 *var = v;
30 37
31 *var = simple_strtoul(p, &p, 10);
32 return count; 38 return count;
33} 39}
34 40
@@ -48,6 +54,9 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
48 return -EINVAL; 54 return -EINVAL;
49 55
50 ret = queue_var_store(&nr, page, count); 56 ret = queue_var_store(&nr, page, count);
57 if (ret < 0)
58 return ret;
59
51 if (nr < BLKDEV_MIN_RQ) 60 if (nr < BLKDEV_MIN_RQ)
52 nr = BLKDEV_MIN_RQ; 61 nr = BLKDEV_MIN_RQ;
53 62
@@ -102,6 +111,9 @@ queue_ra_store(struct request_queue *q, const char *page, size_t count)
102 unsigned long ra_kb; 111 unsigned long ra_kb;
103 ssize_t ret = queue_var_store(&ra_kb, page, count); 112 ssize_t ret = queue_var_store(&ra_kb, page, count);
104 113
114 if (ret < 0)
115 return ret;
116
105 q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10); 117 q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
106 118
107 return ret; 119 return ret;
@@ -176,6 +188,9 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
176 page_kb = 1 << (PAGE_CACHE_SHIFT - 10); 188 page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
177 ssize_t ret = queue_var_store(&max_sectors_kb, page, count); 189 ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
178 190
191 if (ret < 0)
192 return ret;
193
179 if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb) 194 if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
180 return -EINVAL; 195 return -EINVAL;
181 196
@@ -236,6 +251,9 @@ static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
236 unsigned long nm; 251 unsigned long nm;
237 ssize_t ret = queue_var_store(&nm, page, count); 252 ssize_t ret = queue_var_store(&nm, page, count);
238 253
254 if (ret < 0)
255 return ret;
256
239 spin_lock_irq(q->queue_lock); 257 spin_lock_irq(q->queue_lock);
240 queue_flag_clear(QUEUE_FLAG_NOMERGES, q); 258 queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
241 queue_flag_clear(QUEUE_FLAG_NOXMERGES, q); 259 queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
@@ -264,6 +282,9 @@ queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
264 unsigned long val; 282 unsigned long val;
265 283
266 ret = queue_var_store(&val, page, count); 284 ret = queue_var_store(&val, page, count);
285 if (ret < 0)
286 return ret;
287
267 spin_lock_irq(q->queue_lock); 288 spin_lock_irq(q->queue_lock);
268 if (val == 2) { 289 if (val == 2) {
269 queue_flag_set(QUEUE_FLAG_SAME_COMP, q); 290 queue_flag_set(QUEUE_FLAG_SAME_COMP, q);