diff options
author | Martin K. Petersen <martin.petersen@oracle.com> | 2009-11-10 05:50:21 -0500 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2009-11-10 05:50:21 -0500 |
commit | 86b37281411cf1e9bc0a6b5406c45edb7bd9ea5d (patch) | |
tree | 729db57dd52054af1bc16b4afb131093dfc9d255 /block/blk-settings.c | |
parent | cf7c25cf91f632a3528669fc0876e1fc8355ff9b (diff) |
block: Expose discard granularity
While SSDs track block usage on a per-sector basis, RAID arrays often
have allocation blocks that are bigger. Allow the discard granularity
and alignment to be set and teach the topology stacking logic how to
handle them.
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/blk-settings.c')
-rw-r--r-- | block/blk-settings.c | 46 |
1 files changed, 36 insertions, 10 deletions
diff --git a/block/blk-settings.c b/block/blk-settings.c index 66d4aa8799b7..7f986cafacd5 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c | |||
@@ -96,7 +96,10 @@ void blk_set_default_limits(struct queue_limits *lim) | |||
96 | lim->max_segment_size = MAX_SEGMENT_SIZE; | 96 | lim->max_segment_size = MAX_SEGMENT_SIZE; |
97 | lim->max_sectors = BLK_DEF_MAX_SECTORS; | 97 | lim->max_sectors = BLK_DEF_MAX_SECTORS; |
98 | lim->max_hw_sectors = INT_MAX; | 98 | lim->max_hw_sectors = INT_MAX; |
99 | lim->max_discard_sectors = SAFE_MAX_SECTORS; | 99 | lim->max_discard_sectors = 0; |
100 | lim->discard_granularity = 0; | ||
101 | lim->discard_alignment = 0; | ||
102 | lim->discard_misaligned = 0; | ||
100 | lim->logical_block_size = lim->physical_block_size = lim->io_min = 512; | 103 | lim->logical_block_size = lim->physical_block_size = lim->io_min = 512; |
101 | lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT); | 104 | lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT); |
102 | lim->alignment_offset = 0; | 105 | lim->alignment_offset = 0; |
@@ -488,6 +491,16 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) | |||
488 | } | 491 | } |
489 | EXPORT_SYMBOL(blk_queue_stack_limits); | 492 | EXPORT_SYMBOL(blk_queue_stack_limits); |
490 | 493 | ||
494 | static unsigned int lcm(unsigned int a, unsigned int b) | ||
495 | { | ||
496 | if (a && b) | ||
497 | return (a * b) / gcd(a, b); | ||
498 | else if (b) | ||
499 | return b; | ||
500 | |||
501 | return a; | ||
502 | } | ||
503 | |||
491 | /** | 504 | /** |
492 | * blk_stack_limits - adjust queue_limits for stacked devices | 505 | * blk_stack_limits - adjust queue_limits for stacked devices |
493 | * @t: the stacking driver limits (top) | 506 | * @t: the stacking driver limits (top) |
@@ -502,6 +515,10 @@ EXPORT_SYMBOL(blk_queue_stack_limits); | |||
502 | int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, | 515 | int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, |
503 | sector_t offset) | 516 | sector_t offset) |
504 | { | 517 | { |
518 | int ret; | ||
519 | |||
520 | ret = 0; | ||
521 | |||
505 | t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); | 522 | t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); |
506 | t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); | 523 | t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); |
507 | t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn); | 524 | t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn); |
@@ -531,7 +548,13 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, | |||
531 | if (offset && | 548 | if (offset && |
532 | (offset & (b->physical_block_size - 1)) != b->alignment_offset) { | 549 | (offset & (b->physical_block_size - 1)) != b->alignment_offset) { |
533 | t->misaligned = 1; | 550 | t->misaligned = 1; |
534 | return -1; | 551 | ret = -1; |
552 | } | ||
553 | |||
554 | if (offset && | ||
555 | (offset & (b->discard_granularity - 1)) != b->discard_alignment) { | ||
556 | t->discard_misaligned = 1; | ||
557 | ret = -1; | ||
535 | } | 558 | } |
536 | 559 | ||
537 | /* If top has no alignment offset, inherit from bottom */ | 560 | /* If top has no alignment offset, inherit from bottom */ |
@@ -539,23 +562,26 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, | |||
539 | t->alignment_offset = | 562 | t->alignment_offset = |
540 | b->alignment_offset & (b->physical_block_size - 1); | 563 | b->alignment_offset & (b->physical_block_size - 1); |
541 | 564 | ||
565 | if (!t->discard_alignment) | ||
566 | t->discard_alignment = | ||
567 | b->discard_alignment & (b->discard_granularity - 1); | ||
568 | |||
542 | /* Top device aligned on logical block boundary? */ | 569 | /* Top device aligned on logical block boundary? */ |
543 | if (t->alignment_offset & (t->logical_block_size - 1)) { | 570 | if (t->alignment_offset & (t->logical_block_size - 1)) { |
544 | t->misaligned = 1; | 571 | t->misaligned = 1; |
545 | return -1; | 572 | ret = -1; |
546 | } | 573 | } |
547 | 574 | ||
548 | /* Find lcm() of optimal I/O size */ | 575 | /* Find lcm() of optimal I/O size and granularity */ |
549 | if (t->io_opt && b->io_opt) | 576 | t->io_opt = lcm(t->io_opt, b->io_opt); |
550 | t->io_opt = (t->io_opt * b->io_opt) / gcd(t->io_opt, b->io_opt); | 577 | t->discard_granularity = lcm(t->discard_granularity, |
551 | else if (b->io_opt) | 578 | b->discard_granularity); |
552 | t->io_opt = b->io_opt; | ||
553 | 579 | ||
554 | /* Verify that optimal I/O size is a multiple of io_min */ | 580 | /* Verify that optimal I/O size is a multiple of io_min */ |
555 | if (t->io_min && t->io_opt % t->io_min) | 581 | if (t->io_min && t->io_opt % t->io_min) |
556 | return -1; | 582 | ret = -1; |
557 | 583 | ||
558 | return 0; | 584 | return ret; |
559 | } | 585 | } |
560 | EXPORT_SYMBOL(blk_stack_limits); | 586 | EXPORT_SYMBOL(blk_stack_limits); |
561 | 587 | ||