diff options
author | Shaohua Li <shli@kernel.org> | 2012-12-13 22:15:36 -0500 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2012-12-14 14:46:04 -0500 |
commit | 8dd2cb7e880d2f77fba53b523c99133ad5054cfd (patch) | |
tree | ea51e89f8c8bf9ca8e888d68ecf6732a52e8e99d /block | |
parent | 75274551c81796b636c5acb0c2597dec7ec2e6c4 (diff) |
block: discard granularity might not be power of 2
In MD raid case, discard granularity might not be power of 2, for example, a
4-disk raid5 has 3*chunk_size discard granularity. Correct the calculation for
such cases.
Reported-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fusionio.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-lib.c | 23 | ||||
-rw-r--r-- | block/blk-settings.c | 6 |
2 files changed, 16 insertions, 13 deletions
diff --git a/block/blk-lib.c b/block/blk-lib.c index 9373b58dfab1..5677fd33d7d2 100644 --- a/block/blk-lib.c +++ b/block/blk-lib.c | |||
@@ -43,8 +43,8 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, | |||
43 | DECLARE_COMPLETION_ONSTACK(wait); | 43 | DECLARE_COMPLETION_ONSTACK(wait); |
44 | struct request_queue *q = bdev_get_queue(bdev); | 44 | struct request_queue *q = bdev_get_queue(bdev); |
45 | int type = REQ_WRITE | REQ_DISCARD; | 45 | int type = REQ_WRITE | REQ_DISCARD; |
46 | unsigned int max_discard_sectors; | 46 | sector_t max_discard_sectors; |
47 | unsigned int granularity, alignment, mask; | 47 | sector_t granularity, alignment; |
48 | struct bio_batch bb; | 48 | struct bio_batch bb; |
49 | struct bio *bio; | 49 | struct bio *bio; |
50 | int ret = 0; | 50 | int ret = 0; |
@@ -57,15 +57,16 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, | |||
57 | 57 | ||
58 | /* Zero-sector (unknown) and one-sector granularities are the same. */ | 58 | /* Zero-sector (unknown) and one-sector granularities are the same. */ |
59 | granularity = max(q->limits.discard_granularity >> 9, 1U); | 59 | granularity = max(q->limits.discard_granularity >> 9, 1U); |
60 | mask = granularity - 1; | 60 | alignment = bdev_discard_alignment(bdev) >> 9; |
61 | alignment = (bdev_discard_alignment(bdev) >> 9) & mask; | 61 | alignment = sector_div(alignment, granularity); |
62 | 62 | ||
63 | /* | 63 | /* |
64 | * Ensure that max_discard_sectors is of the proper | 64 | * Ensure that max_discard_sectors is of the proper |
65 | * granularity, so that requests stay aligned after a split. | 65 | * granularity, so that requests stay aligned after a split. |
66 | */ | 66 | */ |
67 | max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9); | 67 | max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9); |
68 | max_discard_sectors = round_down(max_discard_sectors, granularity); | 68 | sector_div(max_discard_sectors, granularity); |
69 | max_discard_sectors *= granularity; | ||
69 | if (unlikely(!max_discard_sectors)) { | 70 | if (unlikely(!max_discard_sectors)) { |
70 | /* Avoid infinite loop below. Being cautious never hurts. */ | 71 | /* Avoid infinite loop below. Being cautious never hurts. */ |
71 | return -EOPNOTSUPP; | 72 | return -EOPNOTSUPP; |
@@ -83,7 +84,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, | |||
83 | 84 | ||
84 | while (nr_sects) { | 85 | while (nr_sects) { |
85 | unsigned int req_sects; | 86 | unsigned int req_sects; |
86 | sector_t end_sect; | 87 | sector_t end_sect, tmp; |
87 | 88 | ||
88 | bio = bio_alloc(gfp_mask, 1); | 89 | bio = bio_alloc(gfp_mask, 1); |
89 | if (!bio) { | 90 | if (!bio) { |
@@ -98,10 +99,12 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, | |||
98 | * misaligned, stop the discard at the previous aligned sector. | 99 | * misaligned, stop the discard at the previous aligned sector. |
99 | */ | 100 | */ |
100 | end_sect = sector + req_sects; | 101 | end_sect = sector + req_sects; |
101 | if (req_sects < nr_sects && (end_sect & mask) != alignment) { | 102 | tmp = end_sect; |
102 | end_sect = | 103 | if (req_sects < nr_sects && |
103 | round_down(end_sect - alignment, granularity) | 104 | sector_div(tmp, granularity) != alignment) { |
104 | + alignment; | 105 | end_sect = end_sect - alignment; |
106 | sector_div(end_sect, granularity); | ||
107 | end_sect = end_sect * granularity + alignment; | ||
105 | req_sects = end_sect - sector; | 108 | req_sects = end_sect - sector; |
106 | } | 109 | } |
107 | 110 | ||
diff --git a/block/blk-settings.c b/block/blk-settings.c index 779bb7646bcd..c50ecf0ea3b1 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c | |||
@@ -611,7 +611,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, | |||
611 | bottom = b->discard_granularity + alignment; | 611 | bottom = b->discard_granularity + alignment; |
612 | 612 | ||
613 | /* Verify that top and bottom intervals line up */ | 613 | /* Verify that top and bottom intervals line up */ |
614 | if (max(top, bottom) & (min(top, bottom) - 1)) | 614 | if ((max(top, bottom) % min(top, bottom)) != 0) |
615 | t->discard_misaligned = 1; | 615 | t->discard_misaligned = 1; |
616 | } | 616 | } |
617 | 617 | ||
@@ -619,8 +619,8 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, | |||
619 | b->max_discard_sectors); | 619 | b->max_discard_sectors); |
620 | t->discard_granularity = max(t->discard_granularity, | 620 | t->discard_granularity = max(t->discard_granularity, |
621 | b->discard_granularity); | 621 | b->discard_granularity); |
622 | t->discard_alignment = lcm(t->discard_alignment, alignment) & | 622 | t->discard_alignment = lcm(t->discard_alignment, alignment) % |
623 | (t->discard_granularity - 1); | 623 | t->discard_granularity; |
624 | } | 624 | } |
625 | 625 | ||
626 | return ret; | 626 | return ret; |