diff options
author | Dan Williams <dan.j.williams@intel.com> | 2011-07-23 14:44:25 -0400 |
---|---|---|
committer | Jens Axboe <jaxboe@fusionio.com> | 2011-07-23 14:44:25 -0400 |
commit | 5757a6d76cdf6dda2a492c09b985c015e86779b1 (patch) | |
tree | 6356a6353639eb473dd917a1b2062f9e7e20de22 /include/linux/blkdev.h | |
parent | ef3230880abd36553ab442363d3c9a0661f00769 (diff) |
block: strict rq_affinity
Some systems benefit from completions always being steered to the strict
requester cpu rather than the looser "per-socket" steering that
blk_cpu_to_group() attempts by default. This is because the first
CPU in the group mask ends up being completely overloaded with work,
while the others (including the original submitter) has power left
to spare.
Allow the strict mode to be set by writing '2' to the sysfs control
file. This is identical to the scheme used for the nomerges file,
where '2' is a more aggressive setting than just being turned on.
echo 2 > /sys/block/<bdev>/queue/rq_affinity
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Roland Dreier <roland@purestorage.com>
Tested-by: Dave Jiang <dave.jiang@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r-- | include/linux/blkdev.h | 3 |
1 files changed, 2 insertions, 1 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index c0cd9a2f22ef..0e67c45b3bc9 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -392,7 +392,7 @@ struct request_queue { | |||
392 | #define QUEUE_FLAG_ELVSWITCH 6 /* don't use elevator, just do FIFO */ | 392 | #define QUEUE_FLAG_ELVSWITCH 6 /* don't use elevator, just do FIFO */ |
393 | #define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */ | 393 | #define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */ |
394 | #define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */ | 394 | #define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */ |
395 | #define QUEUE_FLAG_SAME_COMP 9 /* force complete on same CPU */ | 395 | #define QUEUE_FLAG_SAME_COMP 9 /* complete on same CPU-group */ |
396 | #define QUEUE_FLAG_FAIL_IO 10 /* fake timeout */ | 396 | #define QUEUE_FLAG_FAIL_IO 10 /* fake timeout */ |
397 | #define QUEUE_FLAG_STACKABLE 11 /* supports request stacking */ | 397 | #define QUEUE_FLAG_STACKABLE 11 /* supports request stacking */ |
398 | #define QUEUE_FLAG_NONROT 12 /* non-rotational device (SSD) */ | 398 | #define QUEUE_FLAG_NONROT 12 /* non-rotational device (SSD) */ |
@@ -402,6 +402,7 @@ struct request_queue { | |||
402 | #define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */ | 402 | #define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */ |
403 | #define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */ | 403 | #define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */ |
404 | #define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */ | 404 | #define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */ |
405 | #define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */ | ||
405 | 406 | ||
406 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ | 407 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ |
407 | (1 << QUEUE_FLAG_STACKABLE) | \ | 408 | (1 << QUEUE_FLAG_STACKABLE) | \ |