aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-settings.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-settings.c')
-rw-r--r--block/blk-settings.c84
1 files changed, 53 insertions, 31 deletions
diff --git a/block/blk-settings.c b/block/blk-settings.c
index bd582a7f5310..476d87065073 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -7,6 +7,7 @@
7#include <linux/bio.h> 7#include <linux/bio.h>
8#include <linux/blkdev.h> 8#include <linux/blkdev.h>
9#include <linux/bootmem.h> /* for max_pfn/max_low_pfn */ 9#include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
10#include <linux/gcd.h>
10 11
11#include "blk.h" 12#include "blk.h"
12 13
@@ -165,6 +166,13 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
165 blk_set_default_limits(&q->limits); 166 blk_set_default_limits(&q->limits);
166 167
167 /* 168 /*
169 * If the caller didn't supply a lock, fall back to our embedded
170 * per-queue locks
171 */
172 if (!q->queue_lock)
173 q->queue_lock = &q->__queue_lock;
174
175 /*
168 * by default assume old behaviour and bounce for any highmem page 176 * by default assume old behaviour and bounce for any highmem page
169 */ 177 */
170 blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); 178 blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
@@ -377,8 +385,8 @@ void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
377EXPORT_SYMBOL(blk_queue_alignment_offset); 385EXPORT_SYMBOL(blk_queue_alignment_offset);
378 386
379/** 387/**
380 * blk_queue_io_min - set minimum request size for the queue 388 * blk_limits_io_min - set minimum request size for a device
381 * @q: the request queue for the device 389 * @limits: the queue limits
382 * @min: smallest I/O size in bytes 390 * @min: smallest I/O size in bytes
383 * 391 *
384 * Description: 392 * Description:
@@ -387,15 +395,35 @@ EXPORT_SYMBOL(blk_queue_alignment_offset);
387 * smallest I/O the device can perform without incurring a performance 395 * smallest I/O the device can perform without incurring a performance
388 * penalty. 396 * penalty.
389 */ 397 */
390void blk_queue_io_min(struct request_queue *q, unsigned int min) 398void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
391{ 399{
392 q->limits.io_min = min; 400 limits->io_min = min;
393 401
394 if (q->limits.io_min < q->limits.logical_block_size) 402 if (limits->io_min < limits->logical_block_size)
395 q->limits.io_min = q->limits.logical_block_size; 403 limits->io_min = limits->logical_block_size;
396 404
397 if (q->limits.io_min < q->limits.physical_block_size) 405 if (limits->io_min < limits->physical_block_size)
398 q->limits.io_min = q->limits.physical_block_size; 406 limits->io_min = limits->physical_block_size;
407}
408EXPORT_SYMBOL(blk_limits_io_min);
409
410/**
411 * blk_queue_io_min - set minimum request size for the queue
412 * @q: the request queue for the device
413 * @min: smallest I/O size in bytes
414 *
415 * Description:
416 * Storage devices may report a granularity or preferred minimum I/O
417 * size which is the smallest request the device can perform without
418 * incurring a performance penalty. For disk drives this is often the
419 * physical block size. For RAID arrays it is often the stripe chunk
420 * size. A properly aligned multiple of minimum_io_size is the
421 * preferred request size for workloads where a high number of I/O
422 * operations is desired.
423 */
424void blk_queue_io_min(struct request_queue *q, unsigned int min)
425{
426 blk_limits_io_min(&q->limits, min);
399} 427}
400EXPORT_SYMBOL(blk_queue_io_min); 428EXPORT_SYMBOL(blk_queue_io_min);
401 429
@@ -405,8 +433,12 @@ EXPORT_SYMBOL(blk_queue_io_min);
405 * @opt: optimal request size in bytes 433 * @opt: optimal request size in bytes
406 * 434 *
407 * Description: 435 * Description:
408 * Drivers can call this function to set the preferred I/O request 436 * Storage devices may report an optimal I/O size, which is the
409 * size for devices that report such a value. 437 * device's preferred unit for sustained I/O. This is rarely reported
438 * for disk drives. For RAID arrays it is usually the stripe width or
439 * the internal track size. A properly aligned multiple of
440 * optimal_io_size is the preferred request size for workloads where
441 * sustained throughput is desired.
410 */ 442 */
411void blk_queue_io_opt(struct request_queue *q, unsigned int opt) 443void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
412{ 444{
@@ -426,27 +458,7 @@ EXPORT_SYMBOL(blk_queue_io_opt);
426 **/ 458 **/
427void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) 459void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
428{ 460{
429 /* zero is "infinity" */ 461 blk_stack_limits(&t->limits, &b->limits, 0);
430 t->limits.max_sectors = min_not_zero(queue_max_sectors(t),
431 queue_max_sectors(b));
432
433 t->limits.max_hw_sectors = min_not_zero(queue_max_hw_sectors(t),
434 queue_max_hw_sectors(b));
435
436 t->limits.seg_boundary_mask = min_not_zero(queue_segment_boundary(t),
437 queue_segment_boundary(b));
438
439 t->limits.max_phys_segments = min_not_zero(queue_max_phys_segments(t),
440 queue_max_phys_segments(b));
441
442 t->limits.max_hw_segments = min_not_zero(queue_max_hw_segments(t),
443 queue_max_hw_segments(b));
444
445 t->limits.max_segment_size = min_not_zero(queue_max_segment_size(t),
446 queue_max_segment_size(b));
447
448 t->limits.logical_block_size = max(queue_logical_block_size(t),
449 queue_logical_block_size(b));
450 462
451 if (!t->queue_lock) 463 if (!t->queue_lock)
452 WARN_ON_ONCE(1); 464 WARN_ON_ONCE(1);
@@ -516,6 +528,16 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
516 return -1; 528 return -1;
517 } 529 }
518 530
531 /* Find lcm() of optimal I/O size */
532 if (t->io_opt && b->io_opt)
533 t->io_opt = (t->io_opt * b->io_opt) / gcd(t->io_opt, b->io_opt);
534 else if (b->io_opt)
535 t->io_opt = b->io_opt;
536
537 /* Verify that optimal I/O size is a multiple of io_min */
538 if (t->io_min && t->io_opt % t->io_min)
539 return -1;
540
519 return 0; 541 return 0;
520} 542}
521EXPORT_SYMBOL(blk_stack_limits); 543EXPORT_SYMBOL(blk_stack_limits);