diff options
author | Jens Axboe <jaxboe@fusionio.com> | 2010-06-09 04:42:09 -0400 |
---|---|---|
committer | Jens Axboe <jaxboe@fusionio.com> | 2010-08-07 12:13:00 -0400 |
commit | e2e1a148bc45855816ae6b4692ce29d0020fa22e (patch) | |
tree | fd5ec8a580d4333b471acfe50f6f92b4cc880087 /include | |
parent | 841fdffdd382722d33579a6aa1487e8a4e526dbd (diff) |
block: add sysfs knob for turning off disk entropy contributions
There are two reasons for doing this:
- On SSD disks, the completion times aren't as random as they
are for rotational drives. So it's questionable whether they
should contribute to the random pool in the first place.
- Calling add_disk_randomness() has a lot of overhead.
This adds /sys/block/<dev>/queue/add_random that will allow you to
switch off on a per-device basis. The default setting is on, so there
should be no functional changes from this patch.
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/blkdev.h | 5 |
1 files changed, 4 insertions, 1 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 09a840264d6f..b8224ea4a5de 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -467,11 +467,13 @@ struct request_queue | |||
467 | #define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ | 467 | #define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ |
468 | #define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */ | 468 | #define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */ |
469 | #define QUEUE_FLAG_NOXMERGES 17 /* No extended merges */ | 469 | #define QUEUE_FLAG_NOXMERGES 17 /* No extended merges */ |
470 | #define QUEUE_FLAG_ADD_RANDOM 18 /* Contributes to random pool */ | ||
470 | 471 | ||
471 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ | 472 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ |
472 | (1 << QUEUE_FLAG_CLUSTER) | \ | 473 | (1 << QUEUE_FLAG_CLUSTER) | \ |
473 | (1 << QUEUE_FLAG_STACKABLE) | \ | 474 | (1 << QUEUE_FLAG_STACKABLE) | \ |
474 | (1 << QUEUE_FLAG_SAME_COMP)) | 475 | (1 << QUEUE_FLAG_SAME_COMP) | \ |
476 | (1 << QUEUE_FLAG_ADD_RANDOM)) | ||
475 | 477 | ||
476 | static inline int queue_is_locked(struct request_queue *q) | 478 | static inline int queue_is_locked(struct request_queue *q) |
477 | { | 479 | { |
@@ -596,6 +598,7 @@ enum { | |||
596 | test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) | 598 | test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) |
597 | #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) | 599 | #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) |
598 | #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) | 600 | #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) |
601 | #define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) | ||
599 | #define blk_queue_flushing(q) ((q)->ordseq) | 602 | #define blk_queue_flushing(q) ((q)->ordseq) |
600 | #define blk_queue_stackable(q) \ | 603 | #define blk_queue_stackable(q) \ |
601 | test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) | 604 | test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) |