aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorJens Axboe <jaxboe@fusionio.com>2010-08-07 12:13:50 -0400
committerJens Axboe <jaxboe@fusionio.com>2010-08-07 12:13:50 -0400
commit956bcb7c1a9a73c6d5db66e83f32c785d06dc8fc (patch)
treedca3c08c3bff512c9d47a78c6762b006e165cc5d /block
parente2e1a148bc45855816ae6b4692ce29d0020fa22e (diff)
block: add helpers for the trivial queue flag sysfs show/store entries
The code for nonrot, random, and io stats are completely identical. Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-sysfs.c104
1 files changed, 36 insertions, 68 deletions
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 58b53c354c2c..001ab18078f5 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -180,26 +180,36 @@ static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
180 return queue_var_show(max_hw_sectors_kb, (page)); 180 return queue_var_show(max_hw_sectors_kb, (page));
181} 181}
182 182
183static ssize_t queue_nonrot_show(struct request_queue *q, char *page) 183#define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \
184{ 184static ssize_t \
185 return queue_var_show(!blk_queue_nonrot(q), page); 185queue_show_##name(struct request_queue *q, char *page) \
186} 186{ \
187 187 int bit; \
188static ssize_t queue_nonrot_store(struct request_queue *q, const char *page, 188 bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \
189 size_t count) 189 return queue_var_show(neg ? !bit : bit, page); \
190{ 190} \
191 unsigned long nm; 191static ssize_t \
192 ssize_t ret = queue_var_store(&nm, page, count); 192queue_store_##name(struct request_queue *q, const char *page, size_t count) \
193 193{ \
194 spin_lock_irq(q->queue_lock); 194 unsigned long val; \
195 if (nm) 195 ssize_t ret; \
196 queue_flag_clear(QUEUE_FLAG_NONROT, q); 196 ret = queue_var_store(&val, page, count); \
197 else 197 if (neg) \
198 queue_flag_set(QUEUE_FLAG_NONROT, q); 198 val = !val; \
199 spin_unlock_irq(q->queue_lock); 199 \
200 200 spin_lock_irq(q->queue_lock); \
201 return ret; 201 if (val) \
202} 202 queue_flag_set(QUEUE_FLAG_##flag, q); \
203 else \
204 queue_flag_clear(QUEUE_FLAG_##flag, q); \
205 spin_unlock_irq(q->queue_lock); \
206 return ret; \
207}
208
209QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1);
210QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0);
211QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0);
212#undef QUEUE_SYSFS_BIT_FNS
203 213
204static ssize_t queue_nomerges_show(struct request_queue *q, char *page) 214static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
205{ 215{
@@ -250,48 +260,6 @@ queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
250 return ret; 260 return ret;
251} 261}
252 262
253static ssize_t queue_random_show(struct request_queue *q, char *page)
254{
255 return queue_var_show(blk_queue_add_random(q), page);
256}
257
258static ssize_t queue_random_store(struct request_queue *q, const char *page,
259 size_t count)
260{
261 unsigned long val;
262 ssize_t ret = queue_var_store(&val, page, count);
263
264 spin_lock_irq(q->queue_lock);
265 if (val)
266 queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q);
267 else
268 queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
269 spin_unlock_irq(q->queue_lock);
270
271 return ret;
272}
273
274static ssize_t queue_iostats_show(struct request_queue *q, char *page)
275{
276 return queue_var_show(blk_queue_io_stat(q), page);
277}
278
279static ssize_t queue_iostats_store(struct request_queue *q, const char *page,
280 size_t count)
281{
282 unsigned long stats;
283 ssize_t ret = queue_var_store(&stats, page, count);
284
285 spin_lock_irq(q->queue_lock);
286 if (stats)
287 queue_flag_set(QUEUE_FLAG_IO_STAT, q);
288 else
289 queue_flag_clear(QUEUE_FLAG_IO_STAT, q);
290 spin_unlock_irq(q->queue_lock);
291
292 return ret;
293}
294
295static struct queue_sysfs_entry queue_requests_entry = { 263static struct queue_sysfs_entry queue_requests_entry = {
296 .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR }, 264 .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
297 .show = queue_requests_show, 265 .show = queue_requests_show,
@@ -373,8 +341,8 @@ static struct queue_sysfs_entry queue_discard_zeroes_data_entry = {
373 341
374static struct queue_sysfs_entry queue_nonrot_entry = { 342static struct queue_sysfs_entry queue_nonrot_entry = {
375 .attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR }, 343 .attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR },
376 .show = queue_nonrot_show, 344 .show = queue_show_nonrot,
377 .store = queue_nonrot_store, 345 .store = queue_store_nonrot,
378}; 346};
379 347
380static struct queue_sysfs_entry queue_nomerges_entry = { 348static struct queue_sysfs_entry queue_nomerges_entry = {
@@ -391,14 +359,14 @@ static struct queue_sysfs_entry queue_rq_affinity_entry = {
391 359
392static struct queue_sysfs_entry queue_iostats_entry = { 360static struct queue_sysfs_entry queue_iostats_entry = {
393 .attr = {.name = "iostats", .mode = S_IRUGO | S_IWUSR }, 361 .attr = {.name = "iostats", .mode = S_IRUGO | S_IWUSR },
394 .show = queue_iostats_show, 362 .show = queue_show_iostats,
395 .store = queue_iostats_store, 363 .store = queue_store_iostats,
396}; 364};
397 365
398static struct queue_sysfs_entry queue_random_entry = { 366static struct queue_sysfs_entry queue_random_entry = {
399 .attr = {.name = "add_random", .mode = S_IRUGO | S_IWUSR }, 367 .attr = {.name = "add_random", .mode = S_IRUGO | S_IWUSR },
400 .show = queue_random_show, 368 .show = queue_show_random,
401 .store = queue_random_store, 369 .store = queue_store_random,
402}; 370};
403 371
404static struct attribute *default_attrs[] = { 372static struct attribute *default_attrs[] = {