aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/md/raid10.c29
1 files changed, 25 insertions, 4 deletions
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 1c2eb38f3c51..f92e0ed59be0 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -911,7 +911,12 @@ static void flush_pending_writes(struct r10conf *conf)
911 while (bio) { /* submit pending writes */ 911 while (bio) { /* submit pending writes */
912 struct bio *next = bio->bi_next; 912 struct bio *next = bio->bi_next;
913 bio->bi_next = NULL; 913 bio->bi_next = NULL;
914 generic_make_request(bio); 914 if (unlikely((bio->bi_rw & REQ_DISCARD) &&
915 !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
916 /* Just ignore it */
917 bio_endio(bio, 0);
918 else
919 generic_make_request(bio);
915 bio = next; 920 bio = next;
916 } 921 }
917 } else 922 } else
@@ -1061,6 +1066,8 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1061 const int rw = bio_data_dir(bio); 1066 const int rw = bio_data_dir(bio);
1062 const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); 1067 const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
1063 const unsigned long do_fua = (bio->bi_rw & REQ_FUA); 1068 const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
1069 const unsigned long do_discard = (bio->bi_rw
1070 & (REQ_DISCARD | REQ_SECURE));
1064 unsigned long flags; 1071 unsigned long flags;
1065 struct md_rdev *blocked_rdev; 1072 struct md_rdev *blocked_rdev;
1066 int sectors_handled; 1073 int sectors_handled;
@@ -1081,7 +1088,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1081 || conf->prev.near_copies < conf->prev.raid_disks))) { 1088 || conf->prev.near_copies < conf->prev.raid_disks))) {
1082 struct bio_pair *bp; 1089 struct bio_pair *bp;
1083 /* Sanity check -- queue functions should prevent this happening */ 1090 /* Sanity check -- queue functions should prevent this happening */
1084 if (bio->bi_vcnt != 1 || 1091 if ((bio->bi_vcnt != 1 && bio->bi_vcnt != 0) ||
1085 bio->bi_idx != 0) 1092 bio->bi_idx != 0)
1086 goto bad_map; 1093 goto bad_map;
1087 /* This is a one page bio that upper layers 1094 /* This is a one page bio that upper layers
@@ -1410,7 +1417,7 @@ retry_write:
1410 conf->mirrors[d].rdev)); 1417 conf->mirrors[d].rdev));
1411 mbio->bi_bdev = conf->mirrors[d].rdev->bdev; 1418 mbio->bi_bdev = conf->mirrors[d].rdev->bdev;
1412 mbio->bi_end_io = raid10_end_write_request; 1419 mbio->bi_end_io = raid10_end_write_request;
1413 mbio->bi_rw = WRITE | do_sync | do_fua; 1420 mbio->bi_rw = WRITE | do_sync | do_fua | do_discard;
1414 mbio->bi_private = r10_bio; 1421 mbio->bi_private = r10_bio;
1415 1422
1416 atomic_inc(&r10_bio->remaining); 1423 atomic_inc(&r10_bio->remaining);
@@ -1439,7 +1446,7 @@ retry_write:
1439 conf->mirrors[d].replacement)); 1446 conf->mirrors[d].replacement));
1440 mbio->bi_bdev = conf->mirrors[d].replacement->bdev; 1447 mbio->bi_bdev = conf->mirrors[d].replacement->bdev;
1441 mbio->bi_end_io = raid10_end_write_request; 1448 mbio->bi_end_io = raid10_end_write_request;
1442 mbio->bi_rw = WRITE | do_sync | do_fua; 1449 mbio->bi_rw = WRITE | do_sync | do_fua | do_discard;
1443 mbio->bi_private = r10_bio; 1450 mbio->bi_private = r10_bio;
1444 1451
1445 atomic_inc(&r10_bio->remaining); 1452 atomic_inc(&r10_bio->remaining);
@@ -1723,6 +1730,9 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
1723 clear_bit(Unmerged, &rdev->flags); 1730 clear_bit(Unmerged, &rdev->flags);
1724 } 1731 }
1725 md_integrity_add_rdev(rdev, mddev); 1732 md_integrity_add_rdev(rdev, mddev);
1733 if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
1734 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
1735
1726 print_conf(conf); 1736 print_conf(conf);
1727 return err; 1737 return err;
1728} 1738}
@@ -3480,6 +3490,7 @@ static int run(struct mddev *mddev)
3480 sector_t size; 3490 sector_t size;
3481 sector_t min_offset_diff = 0; 3491 sector_t min_offset_diff = 0;
3482 int first = 1; 3492 int first = 1;
3493 bool discard_supported = false;
3483 3494
3484 if (mddev->private == NULL) { 3495 if (mddev->private == NULL) {
3485 conf = setup_conf(mddev); 3496 conf = setup_conf(mddev);
@@ -3496,6 +3507,8 @@ static int run(struct mddev *mddev)
3496 3507
3497 chunk_size = mddev->chunk_sectors << 9; 3508 chunk_size = mddev->chunk_sectors << 9;
3498 if (mddev->queue) { 3509 if (mddev->queue) {
3510 blk_queue_max_discard_sectors(mddev->queue,
3511 mddev->chunk_sectors);
3499 blk_queue_io_min(mddev->queue, chunk_size); 3512 blk_queue_io_min(mddev->queue, chunk_size);
3500 if (conf->geo.raid_disks % conf->geo.near_copies) 3513 if (conf->geo.raid_disks % conf->geo.near_copies)
3501 blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks); 3514 blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);
@@ -3541,8 +3554,16 @@ static int run(struct mddev *mddev)
3541 rdev->data_offset << 9); 3554 rdev->data_offset << 9);
3542 3555
3543 disk->head_position = 0; 3556 disk->head_position = 0;
3557
3558 if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
3559 discard_supported = true;
3544 } 3560 }
3545 3561
3562 if (discard_supported)
3563 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
3564 else
3565 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
3566
3546 /* need to check that every block has at least one working mirror */ 3567 /* need to check that every block has at least one working mirror */
3547 if (!enough(conf, -1)) { 3568 if (!enough(conf, -1)) {
3548 printk(KERN_ERR "md/raid10:%s: not enough operational mirrors.\n", 3569 printk(KERN_ERR "md/raid10:%s: not enough operational mirrors.\n",