aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/Kconfig11
-rw-r--r--block/blk-settings.c77
2 files changed, 53 insertions, 35 deletions
diff --git a/block/Kconfig b/block/Kconfig
index 95a86adc33a1..9be0b56eaee1 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -48,9 +48,9 @@ config LBDAF
48 If unsure, say Y. 48 If unsure, say Y.
49 49
50config BLK_DEV_BSG 50config BLK_DEV_BSG
51 bool "Block layer SG support v4 (EXPERIMENTAL)" 51 bool "Block layer SG support v4"
52 depends on EXPERIMENTAL 52 default y
53 ---help--- 53 help
54 Saying Y here will enable generic SG (SCSI generic) v4 support 54 Saying Y here will enable generic SG (SCSI generic) v4 support
55 for any block device. 55 for any block device.
56 56
@@ -60,7 +60,10 @@ config BLK_DEV_BSG
60 protocols (e.g. Task Management Functions and SMP in Serial 60 protocols (e.g. Task Management Functions and SMP in Serial
61 Attached SCSI). 61 Attached SCSI).
62 62
63 If unsure, say N. 63 This option is required by recent UDEV versions to properly
64 access device serial numbers, etc.
65
66 If unsure, say Y.
64 67
65config BLK_DEV_INTEGRITY 68config BLK_DEV_INTEGRITY
66 bool "Block layer data integrity support" 69 bool "Block layer data integrity support"
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 8a3ea3bba10d..476d87065073 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -7,6 +7,7 @@
7#include <linux/bio.h> 7#include <linux/bio.h>
8#include <linux/blkdev.h> 8#include <linux/blkdev.h>
9#include <linux/bootmem.h> /* for max_pfn/max_low_pfn */ 9#include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
10#include <linux/gcd.h>
10 11
11#include "blk.h" 12#include "blk.h"
12 13
@@ -384,8 +385,8 @@ void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
384EXPORT_SYMBOL(blk_queue_alignment_offset); 385EXPORT_SYMBOL(blk_queue_alignment_offset);
385 386
386/** 387/**
387 * blk_queue_io_min - set minimum request size for the queue 388 * blk_limits_io_min - set minimum request size for a device
388 * @q: the request queue for the device 389 * @limits: the queue limits
389 * @min: smallest I/O size in bytes 390 * @min: smallest I/O size in bytes
390 * 391 *
391 * Description: 392 * Description:
@@ -394,15 +395,35 @@ EXPORT_SYMBOL(blk_queue_alignment_offset);
394 * smallest I/O the device can perform without incurring a performance 395 * smallest I/O the device can perform without incurring a performance
395 * penalty. 396 * penalty.
396 */ 397 */
397void blk_queue_io_min(struct request_queue *q, unsigned int min) 398void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
398{ 399{
399 q->limits.io_min = min; 400 limits->io_min = min;
400 401
401 if (q->limits.io_min < q->limits.logical_block_size) 402 if (limits->io_min < limits->logical_block_size)
402 q->limits.io_min = q->limits.logical_block_size; 403 limits->io_min = limits->logical_block_size;
403 404
404 if (q->limits.io_min < q->limits.physical_block_size) 405 if (limits->io_min < limits->physical_block_size)
405 q->limits.io_min = q->limits.physical_block_size; 406 limits->io_min = limits->physical_block_size;
407}
408EXPORT_SYMBOL(blk_limits_io_min);
409
410/**
411 * blk_queue_io_min - set minimum request size for the queue
412 * @q: the request queue for the device
413 * @min: smallest I/O size in bytes
414 *
415 * Description:
416 * Storage devices may report a granularity or preferred minimum I/O
417 * size which is the smallest request the device can perform without
418 * incurring a performance penalty. For disk drives this is often the
419 * physical block size. For RAID arrays it is often the stripe chunk
420 * size. A properly aligned multiple of minimum_io_size is the
421 * preferred request size for workloads where a high number of I/O
422 * operations is desired.
423 */
424void blk_queue_io_min(struct request_queue *q, unsigned int min)
425{
426 blk_limits_io_min(&q->limits, min);
406} 427}
407EXPORT_SYMBOL(blk_queue_io_min); 428EXPORT_SYMBOL(blk_queue_io_min);
408 429
@@ -412,8 +433,12 @@ EXPORT_SYMBOL(blk_queue_io_min);
412 * @opt: optimal request size in bytes 433 * @opt: optimal request size in bytes
413 * 434 *
414 * Description: 435 * Description:
415 * Drivers can call this function to set the preferred I/O request 436 * Storage devices may report an optimal I/O size, which is the
416 * size for devices that report such a value. 437 * device's preferred unit for sustained I/O. This is rarely reported
438 * for disk drives. For RAID arrays it is usually the stripe width or
439 * the internal track size. A properly aligned multiple of
440 * optimal_io_size is the preferred request size for workloads where
441 * sustained throughput is desired.
417 */ 442 */
418void blk_queue_io_opt(struct request_queue *q, unsigned int opt) 443void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
419{ 444{
@@ -433,27 +458,7 @@ EXPORT_SYMBOL(blk_queue_io_opt);
433 **/ 458 **/
434void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) 459void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
435{ 460{
436 /* zero is "infinity" */ 461 blk_stack_limits(&t->limits, &b->limits, 0);
437 t->limits.max_sectors = min_not_zero(queue_max_sectors(t),
438 queue_max_sectors(b));
439
440 t->limits.max_hw_sectors = min_not_zero(queue_max_hw_sectors(t),
441 queue_max_hw_sectors(b));
442
443 t->limits.seg_boundary_mask = min_not_zero(queue_segment_boundary(t),
444 queue_segment_boundary(b));
445
446 t->limits.max_phys_segments = min_not_zero(queue_max_phys_segments(t),
447 queue_max_phys_segments(b));
448
449 t->limits.max_hw_segments = min_not_zero(queue_max_hw_segments(t),
450 queue_max_hw_segments(b));
451
452 t->limits.max_segment_size = min_not_zero(queue_max_segment_size(t),
453 queue_max_segment_size(b));
454
455 t->limits.logical_block_size = max(queue_logical_block_size(t),
456 queue_logical_block_size(b));
457 462
458 if (!t->queue_lock) 463 if (!t->queue_lock)
459 WARN_ON_ONCE(1); 464 WARN_ON_ONCE(1);
@@ -523,6 +528,16 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
523 return -1; 528 return -1;
524 } 529 }
525 530
531 /* Find lcm() of optimal I/O size */
532 if (t->io_opt && b->io_opt)
533 t->io_opt = (t->io_opt * b->io_opt) / gcd(t->io_opt, b->io_opt);
534 else if (b->io_opt)
535 t->io_opt = b->io_opt;
536
537 /* Verify that optimal I/O size is a multiple of io_min */
538 if (t->io_min && t->io_opt % t->io_min)
539 return -1;
540
526 return 0; 541 return 0;
527} 542}
528EXPORT_SYMBOL(blk_stack_limits); 543EXPORT_SYMBOL(blk_stack_limits);