aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoe Lawrence <Joe.Lawrence@stratus.com>2013-02-20 21:28:09 -0500
committerNeilBrown <neilb@suse.de>2013-02-25 19:55:21 -0500
commitc8dc9c654794a765ca61baed07f84ed8aaa7ca8c (patch)
tree81dea728ef7bcaa01fb7e06c1e731d76dd258dff
parentbbfa57c0f2243a7c31fd248d22e9861a2802cad5 (diff)
md: raid1,10: Handle REQ_WRITE_SAME flag in write bios
Set mddev queue's max_write_same_sectors to its chunk_sector value (before disk_stack_limits merges the underlying disk limits.) With that in place, be sure to handle writes coming down from the block layer that have the REQ_WRITE_SAME flag set. That flag needs to be copied into any newly cloned write bio. Signed-off-by: Joe Lawrence <joe.lawrence@stratus.com> Acked-by: "Martin K. Petersen" <martin.petersen@oracle.com> Signed-off-by: NeilBrown <neilb@suse.de>
-rw-r--r--drivers/md/raid1.c7
-rw-r--r--drivers/md/raid10.c9
2 files changed, 13 insertions, 3 deletions
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index d5bddfc4010e..6e5d5a5f9cb4 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1000,6 +1000,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1000 const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA)); 1000 const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
1001 const unsigned long do_discard = (bio->bi_rw 1001 const unsigned long do_discard = (bio->bi_rw
1002 & (REQ_DISCARD | REQ_SECURE)); 1002 & (REQ_DISCARD | REQ_SECURE));
1003 const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME);
1003 struct md_rdev *blocked_rdev; 1004 struct md_rdev *blocked_rdev;
1004 struct blk_plug_cb *cb; 1005 struct blk_plug_cb *cb;
1005 struct raid1_plug_cb *plug = NULL; 1006 struct raid1_plug_cb *plug = NULL;
@@ -1301,7 +1302,8 @@ read_again:
1301 conf->mirrors[i].rdev->data_offset); 1302 conf->mirrors[i].rdev->data_offset);
1302 mbio->bi_bdev = conf->mirrors[i].rdev->bdev; 1303 mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
1303 mbio->bi_end_io = raid1_end_write_request; 1304 mbio->bi_end_io = raid1_end_write_request;
1304 mbio->bi_rw = WRITE | do_flush_fua | do_sync | do_discard; 1305 mbio->bi_rw =
1306 WRITE | do_flush_fua | do_sync | do_discard | do_same;
1305 mbio->bi_private = r1_bio; 1307 mbio->bi_private = r1_bio;
1306 1308
1307 atomic_inc(&r1_bio->remaining); 1309 atomic_inc(&r1_bio->remaining);
@@ -2818,6 +2820,9 @@ static int run(struct mddev *mddev)
2818 if (IS_ERR(conf)) 2820 if (IS_ERR(conf))
2819 return PTR_ERR(conf); 2821 return PTR_ERR(conf);
2820 2822
2823 if (mddev->queue)
2824 blk_queue_max_write_same_sectors(mddev->queue,
2825 mddev->chunk_sectors);
2821 rdev_for_each(rdev, mddev) { 2826 rdev_for_each(rdev, mddev) {
2822 if (!mddev->gendisk) 2827 if (!mddev->gendisk)
2823 continue; 2828 continue;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 64d48249c03b..1a74c12f0a6e 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1105,6 +1105,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1105 const unsigned long do_fua = (bio->bi_rw & REQ_FUA); 1105 const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
1106 const unsigned long do_discard = (bio->bi_rw 1106 const unsigned long do_discard = (bio->bi_rw
1107 & (REQ_DISCARD | REQ_SECURE)); 1107 & (REQ_DISCARD | REQ_SECURE));
1108 const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME);
1108 unsigned long flags; 1109 unsigned long flags;
1109 struct md_rdev *blocked_rdev; 1110 struct md_rdev *blocked_rdev;
1110 struct blk_plug_cb *cb; 1111 struct blk_plug_cb *cb;
@@ -1460,7 +1461,8 @@ retry_write:
1460 rdev)); 1461 rdev));
1461 mbio->bi_bdev = rdev->bdev; 1462 mbio->bi_bdev = rdev->bdev;
1462 mbio->bi_end_io = raid10_end_write_request; 1463 mbio->bi_end_io = raid10_end_write_request;
1463 mbio->bi_rw = WRITE | do_sync | do_fua | do_discard; 1464 mbio->bi_rw =
1465 WRITE | do_sync | do_fua | do_discard | do_same;
1464 mbio->bi_private = r10_bio; 1466 mbio->bi_private = r10_bio;
1465 1467
1466 atomic_inc(&r10_bio->remaining); 1468 atomic_inc(&r10_bio->remaining);
@@ -1502,7 +1504,8 @@ retry_write:
1502 r10_bio, rdev)); 1504 r10_bio, rdev));
1503 mbio->bi_bdev = rdev->bdev; 1505 mbio->bi_bdev = rdev->bdev;
1504 mbio->bi_end_io = raid10_end_write_request; 1506 mbio->bi_end_io = raid10_end_write_request;
1505 mbio->bi_rw = WRITE | do_sync | do_fua | do_discard; 1507 mbio->bi_rw =
1508 WRITE | do_sync | do_fua | do_discard | do_same;
1506 mbio->bi_private = r10_bio; 1509 mbio->bi_private = r10_bio;
1507 1510
1508 atomic_inc(&r10_bio->remaining); 1511 atomic_inc(&r10_bio->remaining);
@@ -3569,6 +3572,8 @@ static int run(struct mddev *mddev)
3569 if (mddev->queue) { 3572 if (mddev->queue) {
3570 blk_queue_max_discard_sectors(mddev->queue, 3573 blk_queue_max_discard_sectors(mddev->queue,
3571 mddev->chunk_sectors); 3574 mddev->chunk_sectors);
3575 blk_queue_max_write_same_sectors(mddev->queue,
3576 mddev->chunk_sectors);
3572 blk_queue_io_min(mddev->queue, chunk_size); 3577 blk_queue_io_min(mddev->queue, chunk_size);
3573 if (conf->geo.raid_disks % conf->geo.near_copies) 3578 if (conf->geo.raid_disks % conf->geo.near_copies)
3574 blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks); 3579 blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);