diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-08-02 14:34:40 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-08-02 14:34:40 -0400 |
commit | 25aa6a7ae46c6a041c46a2d314b9ab7c4f2baa41 (patch) | |
tree | b99c627c269e38450d5d0f9713862d2ed06d6e5e /drivers/md/raid1.c | |
parent | c8924234bd9c06fe86bae648c472d56cb10640a5 (diff) | |
parent | d9f691c365a83ce2530f0e46b947365c2db44ea0 (diff) |
Merge tag 'md-3.6' of git://neil.brown.name/md
Pull additional md update from NeilBrown:
"This contains a few patches that depend on plugging changes in the
block layer so needed to wait for those.
It also contains a Kconfig fix for the new RAID10 support in dm-raid."
* tag 'md-3.6' of git://neil.brown.name/md:
md/dm-raid: DM_RAID should select MD_RAID10
md/raid1: submit IO from originating thread instead of md thread.
raid5: raid5d handle stripe in batch way
raid5: make_request use batch stripe release
Diffstat (limited to 'drivers/md/raid1.c')
-rw-r--r-- | drivers/md/raid1.c | 57 |
1 files changed, 54 insertions, 3 deletions
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 9f7f8bee8442..611b5f797618 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -944,6 +944,44 @@ do_sync_io: | |||
944 | pr_debug("%dB behind alloc failed, doing sync I/O\n", bio->bi_size); | 944 | pr_debug("%dB behind alloc failed, doing sync I/O\n", bio->bi_size); |
945 | } | 945 | } |
946 | 946 | ||
947 | struct raid1_plug_cb { | ||
948 | struct blk_plug_cb cb; | ||
949 | struct bio_list pending; | ||
950 | int pending_cnt; | ||
951 | }; | ||
952 | |||
953 | static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule) | ||
954 | { | ||
955 | struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb, | ||
956 | cb); | ||
957 | struct mddev *mddev = plug->cb.data; | ||
958 | struct r1conf *conf = mddev->private; | ||
959 | struct bio *bio; | ||
960 | |||
961 | if (from_schedule) { | ||
962 | spin_lock_irq(&conf->device_lock); | ||
963 | bio_list_merge(&conf->pending_bio_list, &plug->pending); | ||
964 | conf->pending_count += plug->pending_cnt; | ||
965 | spin_unlock_irq(&conf->device_lock); | ||
966 | md_wakeup_thread(mddev->thread); | ||
967 | kfree(plug); | ||
968 | return; | ||
969 | } | ||
970 | |||
971 | /* we aren't scheduling, so we can do the write-out directly. */ | ||
972 | bio = bio_list_get(&plug->pending); | ||
973 | bitmap_unplug(mddev->bitmap); | ||
974 | wake_up(&conf->wait_barrier); | ||
975 | |||
976 | while (bio) { /* submit pending writes */ | ||
977 | struct bio *next = bio->bi_next; | ||
978 | bio->bi_next = NULL; | ||
979 | generic_make_request(bio); | ||
980 | bio = next; | ||
981 | } | ||
982 | kfree(plug); | ||
983 | } | ||
984 | |||
947 | static void make_request(struct mddev *mddev, struct bio * bio) | 985 | static void make_request(struct mddev *mddev, struct bio * bio) |
948 | { | 986 | { |
949 | struct r1conf *conf = mddev->private; | 987 | struct r1conf *conf = mddev->private; |
@@ -957,6 +995,8 @@ static void make_request(struct mddev *mddev, struct bio * bio) | |||
957 | const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); | 995 | const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); |
958 | const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA)); | 996 | const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA)); |
959 | struct md_rdev *blocked_rdev; | 997 | struct md_rdev *blocked_rdev; |
998 | struct blk_plug_cb *cb; | ||
999 | struct raid1_plug_cb *plug = NULL; | ||
960 | int first_clone; | 1000 | int first_clone; |
961 | int sectors_handled; | 1001 | int sectors_handled; |
962 | int max_sectors; | 1002 | int max_sectors; |
@@ -1259,11 +1299,22 @@ read_again: | |||
1259 | mbio->bi_private = r1_bio; | 1299 | mbio->bi_private = r1_bio; |
1260 | 1300 | ||
1261 | atomic_inc(&r1_bio->remaining); | 1301 | atomic_inc(&r1_bio->remaining); |
1302 | |||
1303 | cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug)); | ||
1304 | if (cb) | ||
1305 | plug = container_of(cb, struct raid1_plug_cb, cb); | ||
1306 | else | ||
1307 | plug = NULL; | ||
1262 | spin_lock_irqsave(&conf->device_lock, flags); | 1308 | spin_lock_irqsave(&conf->device_lock, flags); |
1263 | bio_list_add(&conf->pending_bio_list, mbio); | 1309 | if (plug) { |
1264 | conf->pending_count++; | 1310 | bio_list_add(&plug->pending, mbio); |
1311 | plug->pending_cnt++; | ||
1312 | } else { | ||
1313 | bio_list_add(&conf->pending_bio_list, mbio); | ||
1314 | conf->pending_count++; | ||
1315 | } | ||
1265 | spin_unlock_irqrestore(&conf->device_lock, flags); | 1316 | spin_unlock_irqrestore(&conf->device_lock, flags); |
1266 | if (!mddev_check_plugged(mddev)) | 1317 | if (!plug) |
1267 | md_wakeup_thread(mddev->thread); | 1318 | md_wakeup_thread(mddev->thread); |
1268 | } | 1319 | } |
1269 | /* Mustn't call r1_bio_write_done before this next test, | 1320 | /* Mustn't call r1_bio_write_done before this next test, |