diff options
author | Shaohua Li <shli@fusionio.com> | 2012-10-10 22:25:44 -0400 |
---|---|---|
committer | NeilBrown <neilb@suse.de> | 2012-10-10 22:25:44 -0400 |
commit | c83057a1f4f987327c49448b046d9625c612ed8e (patch) | |
tree | c929eb135da2161461150bee82b1fa39305801a8 /drivers/md | |
parent | f1cad2b68ed12c0f82d3f56e150691f62b6f5edf (diff) |
md: raid 0 supports TRIM
This makes md raid 0 support TRIM.
Signed-off-by: Shaohua Li <shli@fusionio.com>
Signed-off-by: NeilBrown <neilb@suse.de>
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/raid0.c | 19 |
1 files changed, 18 insertions, 1 deletions
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index a9e4fa95dfaa..24b359717a7e 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c | |||
@@ -88,6 +88,7 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf) | |||
88 | char b[BDEVNAME_SIZE]; | 88 | char b[BDEVNAME_SIZE]; |
89 | char b2[BDEVNAME_SIZE]; | 89 | char b2[BDEVNAME_SIZE]; |
90 | struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL); | 90 | struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL); |
91 | bool discard_supported = false; | ||
91 | 92 | ||
92 | if (!conf) | 93 | if (!conf) |
93 | return -ENOMEM; | 94 | return -ENOMEM; |
@@ -195,6 +196,9 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf) | |||
195 | if (!smallest || (rdev1->sectors < smallest->sectors)) | 196 | if (!smallest || (rdev1->sectors < smallest->sectors)) |
196 | smallest = rdev1; | 197 | smallest = rdev1; |
197 | cnt++; | 198 | cnt++; |
199 | |||
200 | if (blk_queue_discard(bdev_get_queue(rdev1->bdev))) | ||
201 | discard_supported = true; | ||
198 | } | 202 | } |
199 | if (cnt != mddev->raid_disks) { | 203 | if (cnt != mddev->raid_disks) { |
200 | printk(KERN_ERR "md/raid0:%s: too few disks (%d of %d) - " | 204 | printk(KERN_ERR "md/raid0:%s: too few disks (%d of %d) - " |
@@ -272,6 +276,11 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf) | |||
272 | blk_queue_io_opt(mddev->queue, | 276 | blk_queue_io_opt(mddev->queue, |
273 | (mddev->chunk_sectors << 9) * mddev->raid_disks); | 277 | (mddev->chunk_sectors << 9) * mddev->raid_disks); |
274 | 278 | ||
279 | if (!discard_supported) | ||
280 | queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); | ||
281 | else | ||
282 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); | ||
283 | |||
275 | pr_debug("md/raid0:%s: done.\n", mdname(mddev)); | 284 | pr_debug("md/raid0:%s: done.\n", mdname(mddev)); |
276 | *private_conf = conf; | 285 | *private_conf = conf; |
277 | 286 | ||
@@ -423,6 +432,7 @@ static int raid0_run(struct mddev *mddev) | |||
423 | return -EINVAL; | 432 | return -EINVAL; |
424 | blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); | 433 | blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); |
425 | blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors); | 434 | blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors); |
435 | blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors); | ||
426 | 436 | ||
427 | /* if private is not null, we are here after takeover */ | 437 | /* if private is not null, we are here after takeover */ |
428 | if (mddev->private == NULL) { | 438 | if (mddev->private == NULL) { |
@@ -510,7 +520,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio) | |||
510 | sector_t sector = bio->bi_sector; | 520 | sector_t sector = bio->bi_sector; |
511 | struct bio_pair *bp; | 521 | struct bio_pair *bp; |
512 | /* Sanity check -- queue functions should prevent this happening */ | 522 | /* Sanity check -- queue functions should prevent this happening */ |
513 | if (bio->bi_vcnt != 1 || | 523 | if ((bio->bi_vcnt != 1 && bio->bi_vcnt != 0) || |
514 | bio->bi_idx != 0) | 524 | bio->bi_idx != 0) |
515 | goto bad_map; | 525 | goto bad_map; |
516 | /* This is a one page bio that upper layers | 526 | /* This is a one page bio that upper layers |
@@ -536,6 +546,13 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio) | |||
536 | bio->bi_sector = sector_offset + zone->dev_start + | 546 | bio->bi_sector = sector_offset + zone->dev_start + |
537 | tmp_dev->data_offset; | 547 | tmp_dev->data_offset; |
538 | 548 | ||
549 | if (unlikely((bio->bi_rw & REQ_DISCARD) && | ||
550 | !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) { | ||
551 | /* Just ignore it */ | ||
552 | bio_endio(bio, 0); | ||
553 | return; | ||
554 | } | ||
555 | |||
539 | generic_make_request(bio); | 556 | generic_make_request(bio); |
540 | return; | 557 | return; |
541 | 558 | ||