aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/raid10.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/raid10.c')
-rw-r--r--drivers/md/raid10.c57
1 files changed, 54 insertions, 3 deletions
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index f92e0ed59be0..05dc96a950d5 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1055,6 +1055,44 @@ static sector_t choose_data_offset(struct r10bio *r10_bio,
1055 return rdev->new_data_offset; 1055 return rdev->new_data_offset;
1056} 1056}
1057 1057
1058struct raid10_plug_cb {
1059 struct blk_plug_cb cb;
1060 struct bio_list pending;
1061 int pending_cnt;
1062};
1063
1064static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
1065{
1066 struct raid10_plug_cb *plug = container_of(cb, struct raid10_plug_cb,
1067 cb);
1068 struct mddev *mddev = plug->cb.data;
1069 struct r10conf *conf = mddev->private;
1070 struct bio *bio;
1071
1072 if (from_schedule) {
1073 spin_lock_irq(&conf->device_lock);
1074 bio_list_merge(&conf->pending_bio_list, &plug->pending);
1075 conf->pending_count += plug->pending_cnt;
1076 spin_unlock_irq(&conf->device_lock);
1077 md_wakeup_thread(mddev->thread);
1078 kfree(plug);
1079 return;
1080 }
1081
1082 /* we aren't scheduling, so we can do the write-out directly. */
1083 bio = bio_list_get(&plug->pending);
1084 bitmap_unplug(mddev->bitmap);
1085 wake_up(&conf->wait_barrier);
1086
1087 while (bio) { /* submit pending writes */
1088 struct bio *next = bio->bi_next;
1089 bio->bi_next = NULL;
1090 generic_make_request(bio);
1091 bio = next;
1092 }
1093 kfree(plug);
1094}
1095
1058static void make_request(struct mddev *mddev, struct bio * bio) 1096static void make_request(struct mddev *mddev, struct bio * bio)
1059{ 1097{
1060 struct r10conf *conf = mddev->private; 1098 struct r10conf *conf = mddev->private;
@@ -1070,6 +1108,8 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1070 & (REQ_DISCARD | REQ_SECURE)); 1108 & (REQ_DISCARD | REQ_SECURE));
1071 unsigned long flags; 1109 unsigned long flags;
1072 struct md_rdev *blocked_rdev; 1110 struct md_rdev *blocked_rdev;
1111 struct blk_plug_cb *cb;
1112 struct raid10_plug_cb *plug = NULL;
1073 int sectors_handled; 1113 int sectors_handled;
1074 int max_sectors; 1114 int max_sectors;
1075 int sectors; 1115 int sectors;
@@ -1421,11 +1461,22 @@ retry_write:
1421 mbio->bi_private = r10_bio; 1461 mbio->bi_private = r10_bio;
1422 1462
1423 atomic_inc(&r10_bio->remaining); 1463 atomic_inc(&r10_bio->remaining);
1464
1465 cb = blk_check_plugged(raid10_unplug, mddev, sizeof(*plug));
1466 if (cb)
1467 plug = container_of(cb, struct raid10_plug_cb, cb);
1468 else
1469 plug = NULL;
1424 spin_lock_irqsave(&conf->device_lock, flags); 1470 spin_lock_irqsave(&conf->device_lock, flags);
1425 bio_list_add(&conf->pending_bio_list, mbio); 1471 if (plug) {
1426 conf->pending_count++; 1472 bio_list_add(&plug->pending, mbio);
1473 plug->pending_cnt++;
1474 } else {
1475 bio_list_add(&conf->pending_bio_list, mbio);
1476 conf->pending_count++;
1477 }
1427 spin_unlock_irqrestore(&conf->device_lock, flags); 1478 spin_unlock_irqrestore(&conf->device_lock, flags);
1428 if (!mddev_check_plugged(mddev)) 1479 if (!plug)
1429 md_wakeup_thread(mddev->thread); 1480 md_wakeup_thread(mddev->thread);
1430 1481
1431 if (!r10_bio->devs[i].repl_bio) 1482 if (!r10_bio->devs[i].repl_bio)