diff options
author | Mikulas Patocka <mpatocka@redhat.com> | 2009-12-10 18:51:59 -0500 |
---|---|---|
committer | Alasdair G Kergon <agk@redhat.com> | 2009-12-10 18:51:59 -0500 |
commit | 4184153f9e483f9bb63339ed316e059962fe9794 (patch) | |
tree | 054ae52af1a464d49bded004de64cdf342f40e68 | |
parent | f1e539874655ae9e74c1644fd54133b19f1b14e2 (diff) |
dm raid1: support flush
Flush support for dm-raid1.
When it receives an empty barrier, submit it to all the devices via dm-io.
Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
-rw-r--r-- | drivers/md/dm-raid1.c | 13 | ||||
-rw-r--r-- | drivers/md/dm-region-hash.c | 25 |
2 files changed, 34 insertions, 4 deletions
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index cc9dc79b0784..752a29e1855b 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c | |||
@@ -396,6 +396,8 @@ static int mirror_available(struct mirror_set *ms, struct bio *bio) | |||
396 | */ | 396 | */ |
397 | static sector_t map_sector(struct mirror *m, struct bio *bio) | 397 | static sector_t map_sector(struct mirror *m, struct bio *bio) |
398 | { | 398 | { |
399 | if (unlikely(!bio->bi_size)) | ||
400 | return 0; | ||
399 | return m->offset + (bio->bi_sector - m->ms->ti->begin); | 401 | return m->offset + (bio->bi_sector - m->ms->ti->begin); |
400 | } | 402 | } |
401 | 403 | ||
@@ -562,7 +564,7 @@ static void do_write(struct mirror_set *ms, struct bio *bio) | |||
562 | struct dm_io_region io[ms->nr_mirrors], *dest = io; | 564 | struct dm_io_region io[ms->nr_mirrors], *dest = io; |
563 | struct mirror *m; | 565 | struct mirror *m; |
564 | struct dm_io_request io_req = { | 566 | struct dm_io_request io_req = { |
565 | .bi_rw = WRITE, | 567 | .bi_rw = WRITE | (bio->bi_rw & WRITE_BARRIER), |
566 | .mem.type = DM_IO_BVEC, | 568 | .mem.type = DM_IO_BVEC, |
567 | .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx, | 569 | .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx, |
568 | .notify.fn = write_callback, | 570 | .notify.fn = write_callback, |
@@ -603,6 +605,11 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes) | |||
603 | bio_list_init(&requeue); | 605 | bio_list_init(&requeue); |
604 | 606 | ||
605 | while ((bio = bio_list_pop(writes))) { | 607 | while ((bio = bio_list_pop(writes))) { |
608 | if (unlikely(bio_empty_barrier(bio))) { | ||
609 | bio_list_add(&sync, bio); | ||
610 | continue; | ||
611 | } | ||
612 | |||
606 | region = dm_rh_bio_to_region(ms->rh, bio); | 613 | region = dm_rh_bio_to_region(ms->rh, bio); |
607 | 614 | ||
608 | if (log->type->is_remote_recovering && | 615 | if (log->type->is_remote_recovering && |
@@ -995,6 +1002,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
995 | 1002 | ||
996 | ti->private = ms; | 1003 | ti->private = ms; |
997 | ti->split_io = dm_rh_get_region_size(ms->rh); | 1004 | ti->split_io = dm_rh_get_region_size(ms->rh); |
1005 | ti->num_flush_requests = 1; | ||
998 | 1006 | ||
999 | ms->kmirrord_wq = create_singlethread_workqueue("kmirrord"); | 1007 | ms->kmirrord_wq = create_singlethread_workqueue("kmirrord"); |
1000 | if (!ms->kmirrord_wq) { | 1008 | if (!ms->kmirrord_wq) { |
@@ -1122,7 +1130,8 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, | |||
1122 | * We need to dec pending if this was a write. | 1130 | * We need to dec pending if this was a write. |
1123 | */ | 1131 | */ |
1124 | if (rw == WRITE) { | 1132 | if (rw == WRITE) { |
1125 | dm_rh_dec(ms->rh, map_context->ll); | 1133 | if (likely(!bio_empty_barrier(bio))) |
1134 | dm_rh_dec(ms->rh, map_context->ll); | ||
1126 | return error; | 1135 | return error; |
1127 | } | 1136 | } |
1128 | 1137 | ||
diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c index 36dbe29f2fd6..00806b760ccd 100644 --- a/drivers/md/dm-region-hash.c +++ b/drivers/md/dm-region-hash.c | |||
@@ -79,6 +79,11 @@ struct dm_region_hash { | |||
79 | struct list_head recovered_regions; | 79 | struct list_head recovered_regions; |
80 | struct list_head failed_recovered_regions; | 80 | struct list_head failed_recovered_regions; |
81 | 81 | ||
82 | /* | ||
83 | * If there was a barrier failure no regions can be marked clean. | ||
84 | */ | ||
85 | int barrier_failure; | ||
86 | |||
82 | void *context; | 87 | void *context; |
83 | sector_t target_begin; | 88 | sector_t target_begin; |
84 | 89 | ||
@@ -211,6 +216,7 @@ struct dm_region_hash *dm_region_hash_create( | |||
211 | INIT_LIST_HEAD(&rh->quiesced_regions); | 216 | INIT_LIST_HEAD(&rh->quiesced_regions); |
212 | INIT_LIST_HEAD(&rh->recovered_regions); | 217 | INIT_LIST_HEAD(&rh->recovered_regions); |
213 | INIT_LIST_HEAD(&rh->failed_recovered_regions); | 218 | INIT_LIST_HEAD(&rh->failed_recovered_regions); |
219 | rh->barrier_failure = 0; | ||
214 | 220 | ||
215 | rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS, | 221 | rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS, |
216 | sizeof(struct dm_region)); | 222 | sizeof(struct dm_region)); |
@@ -395,6 +401,11 @@ void dm_rh_mark_nosync(struct dm_region_hash *rh, | |||
395 | region_t region = dm_rh_bio_to_region(rh, bio); | 401 | region_t region = dm_rh_bio_to_region(rh, bio); |
396 | int recovering = 0; | 402 | int recovering = 0; |
397 | 403 | ||
404 | if (bio_empty_barrier(bio)) { | ||
405 | rh->barrier_failure = 1; | ||
406 | return; | ||
407 | } | ||
408 | |||
398 | /* We must inform the log that the sync count has changed. */ | 409 | /* We must inform the log that the sync count has changed. */ |
399 | log->type->set_region_sync(log, region, 0); | 410 | log->type->set_region_sync(log, region, 0); |
400 | 411 | ||
@@ -515,8 +526,11 @@ void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios) | |||
515 | { | 526 | { |
516 | struct bio *bio; | 527 | struct bio *bio; |
517 | 528 | ||
518 | for (bio = bios->head; bio; bio = bio->bi_next) | 529 | for (bio = bios->head; bio; bio = bio->bi_next) { |
530 | if (bio_empty_barrier(bio)) | ||
531 | continue; | ||
519 | rh_inc(rh, dm_rh_bio_to_region(rh, bio)); | 532 | rh_inc(rh, dm_rh_bio_to_region(rh, bio)); |
533 | } | ||
520 | } | 534 | } |
521 | EXPORT_SYMBOL_GPL(dm_rh_inc_pending); | 535 | EXPORT_SYMBOL_GPL(dm_rh_inc_pending); |
522 | 536 | ||
@@ -544,7 +558,14 @@ void dm_rh_dec(struct dm_region_hash *rh, region_t region) | |||
544 | */ | 558 | */ |
545 | 559 | ||
546 | /* do nothing for DM_RH_NOSYNC */ | 560 | /* do nothing for DM_RH_NOSYNC */ |
547 | if (reg->state == DM_RH_RECOVERING) { | 561 | if (unlikely(rh->barrier_failure)) { |
562 | /* | ||
563 | * If a write barrier failed some time ago, we | ||
564 | * don't know whether or not this write made it | ||
565 | * to the disk, so we must resync the device. | ||
566 | */ | ||
567 | reg->state = DM_RH_NOSYNC; | ||
568 | } else if (reg->state == DM_RH_RECOVERING) { | ||
548 | list_add_tail(®->list, &rh->quiesced_regions); | 569 | list_add_tail(®->list, &rh->quiesced_regions); |
549 | } else if (reg->state == DM_RH_DIRTY) { | 570 | } else if (reg->state == DM_RH_DIRTY) { |
550 | reg->state = DM_RH_CLEAN; | 571 | reg->state = DM_RH_CLEAN; |