diff options
author | Jens Axboe <jaxboe@fusionio.com> | 2010-06-09 04:42:09 -0400 |
---|---|---|
committer | Jens Axboe <jaxboe@fusionio.com> | 2010-08-07 12:13:00 -0400 |
commit | e2e1a148bc45855816ae6b4692ce29d0020fa22e (patch) | |
tree | fd5ec8a580d4333b471acfe50f6f92b4cc880087 | |
parent | 841fdffdd382722d33579a6aa1487e8a4e526dbd (diff) |
block: add sysfs knob for turning off disk entropy contributions
There are two reasons for doing this:
- On SSD disks, the completion times aren't as random as they
are for rotational drives. So it's questionable whether they
should contribute to the random pool in the first place.
- Calling add_disk_randomness() has a lot of overhead.
This adds /sys/block/<dev>/queue/add_random that will allow you to
switch off on a per-device basis. The default setting is on, so there
should be no functional changes from this patch.
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
-rw-r--r-- | block/blk-core.c | 3 | ||||
-rw-r--r-- | block/blk-sysfs.c | 28 | ||||
-rw-r--r-- | include/linux/blkdev.h | 5 |
3 files changed, 34 insertions, 2 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index f0640d7f800f..b4131d29148c 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -2111,7 +2111,8 @@ static bool blk_update_bidi_request(struct request *rq, int error, | |||
2111 | blk_update_request(rq->next_rq, error, bidi_bytes)) | 2111 | blk_update_request(rq->next_rq, error, bidi_bytes)) |
2112 | return true; | 2112 | return true; |
2113 | 2113 | ||
2114 | add_disk_randomness(rq->rq_disk); | 2114 | if (blk_queue_add_random(rq->q)) |
2115 | add_disk_randomness(rq->rq_disk); | ||
2115 | 2116 | ||
2116 | return false; | 2117 | return false; |
2117 | } | 2118 | } |
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 306759bbdf1b..58b53c354c2c 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c | |||
@@ -250,6 +250,27 @@ queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) | |||
250 | return ret; | 250 | return ret; |
251 | } | 251 | } |
252 | 252 | ||
253 | static ssize_t queue_random_show(struct request_queue *q, char *page) | ||
254 | { | ||
255 | return queue_var_show(blk_queue_add_random(q), page); | ||
256 | } | ||
257 | |||
258 | static ssize_t queue_random_store(struct request_queue *q, const char *page, | ||
259 | size_t count) | ||
260 | { | ||
261 | unsigned long val; | ||
262 | ssize_t ret = queue_var_store(&val, page, count); | ||
263 | |||
264 | spin_lock_irq(q->queue_lock); | ||
265 | if (val) | ||
266 | queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q); | ||
267 | else | ||
268 | queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); | ||
269 | spin_unlock_irq(q->queue_lock); | ||
270 | |||
271 | return ret; | ||
272 | } | ||
273 | |||
253 | static ssize_t queue_iostats_show(struct request_queue *q, char *page) | 274 | static ssize_t queue_iostats_show(struct request_queue *q, char *page) |
254 | { | 275 | { |
255 | return queue_var_show(blk_queue_io_stat(q), page); | 276 | return queue_var_show(blk_queue_io_stat(q), page); |
@@ -374,6 +395,12 @@ static struct queue_sysfs_entry queue_iostats_entry = { | |||
374 | .store = queue_iostats_store, | 395 | .store = queue_iostats_store, |
375 | }; | 396 | }; |
376 | 397 | ||
398 | static struct queue_sysfs_entry queue_random_entry = { | ||
399 | .attr = {.name = "add_random", .mode = S_IRUGO | S_IWUSR }, | ||
400 | .show = queue_random_show, | ||
401 | .store = queue_random_store, | ||
402 | }; | ||
403 | |||
377 | static struct attribute *default_attrs[] = { | 404 | static struct attribute *default_attrs[] = { |
378 | &queue_requests_entry.attr, | 405 | &queue_requests_entry.attr, |
379 | &queue_ra_entry.attr, | 406 | &queue_ra_entry.attr, |
@@ -394,6 +421,7 @@ static struct attribute *default_attrs[] = { | |||
394 | &queue_nomerges_entry.attr, | 421 | &queue_nomerges_entry.attr, |
395 | &queue_rq_affinity_entry.attr, | 422 | &queue_rq_affinity_entry.attr, |
396 | &queue_iostats_entry.attr, | 423 | &queue_iostats_entry.attr, |
424 | &queue_random_entry.attr, | ||
397 | NULL, | 425 | NULL, |
398 | }; | 426 | }; |
399 | 427 | ||
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 09a840264d6f..b8224ea4a5de 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -467,11 +467,13 @@ struct request_queue | |||
467 | #define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ | 467 | #define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ |
468 | #define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */ | 468 | #define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */ |
469 | #define QUEUE_FLAG_NOXMERGES 17 /* No extended merges */ | 469 | #define QUEUE_FLAG_NOXMERGES 17 /* No extended merges */ |
470 | #define QUEUE_FLAG_ADD_RANDOM 18 /* Contributes to random pool */ | ||
470 | 471 | ||
471 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ | 472 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ |
472 | (1 << QUEUE_FLAG_CLUSTER) | \ | 473 | (1 << QUEUE_FLAG_CLUSTER) | \ |
473 | (1 << QUEUE_FLAG_STACKABLE) | \ | 474 | (1 << QUEUE_FLAG_STACKABLE) | \ |
474 | (1 << QUEUE_FLAG_SAME_COMP)) | 475 | (1 << QUEUE_FLAG_SAME_COMP) | \ |
476 | (1 << QUEUE_FLAG_ADD_RANDOM)) | ||
475 | 477 | ||
476 | static inline int queue_is_locked(struct request_queue *q) | 478 | static inline int queue_is_locked(struct request_queue *q) |
477 | { | 479 | { |
@@ -596,6 +598,7 @@ enum { | |||
596 | test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) | 598 | test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) |
597 | #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) | 599 | #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) |
598 | #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) | 600 | #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) |
601 | #define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) | ||
599 | #define blk_queue_flushing(q) ((q)->ordseq) | 602 | #define blk_queue_flushing(q) ((q)->ordseq) |
600 | #define blk_queue_stackable(q) \ | 603 | #define blk_queue_stackable(q) \ |
601 | test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) | 604 | test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) |