aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-region-hash.c
diff options
context:
space:
mode:
authorMikulas Patocka <mpatocka@redhat.com>2009-12-10 18:51:59 -0500
committerAlasdair G Kergon <agk@redhat.com>2009-12-10 18:51:59 -0500
commit4184153f9e483f9bb63339ed316e059962fe9794 (patch)
tree054ae52af1a464d49bded004de64cdf342f40e68 /drivers/md/dm-region-hash.c
parentf1e539874655ae9e74c1644fd54133b19f1b14e2 (diff)
dm raid1: support flush
Flush support for dm-raid1. When it receives an empty barrier, submit it to all the devices via dm-io. Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers/md/dm-region-hash.c')
-rw-r--r--drivers/md/dm-region-hash.c25
1 files changed, 23 insertions, 2 deletions
diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c
index 36dbe29f2fd6..00806b760ccd 100644
--- a/drivers/md/dm-region-hash.c
+++ b/drivers/md/dm-region-hash.c
@@ -79,6 +79,11 @@ struct dm_region_hash {
79 struct list_head recovered_regions; 79 struct list_head recovered_regions;
80 struct list_head failed_recovered_regions; 80 struct list_head failed_recovered_regions;
81 81
82 /*
83 * If there was a barrier failure no regions can be marked clean.
84 */
85 int barrier_failure;
86
82 void *context; 87 void *context;
83 sector_t target_begin; 88 sector_t target_begin;
84 89
@@ -211,6 +216,7 @@ struct dm_region_hash *dm_region_hash_create(
211 INIT_LIST_HEAD(&rh->quiesced_regions); 216 INIT_LIST_HEAD(&rh->quiesced_regions);
212 INIT_LIST_HEAD(&rh->recovered_regions); 217 INIT_LIST_HEAD(&rh->recovered_regions);
213 INIT_LIST_HEAD(&rh->failed_recovered_regions); 218 INIT_LIST_HEAD(&rh->failed_recovered_regions);
219 rh->barrier_failure = 0;
214 220
215 rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS, 221 rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS,
216 sizeof(struct dm_region)); 222 sizeof(struct dm_region));
@@ -395,6 +401,11 @@ void dm_rh_mark_nosync(struct dm_region_hash *rh,
395 region_t region = dm_rh_bio_to_region(rh, bio); 401 region_t region = dm_rh_bio_to_region(rh, bio);
396 int recovering = 0; 402 int recovering = 0;
397 403
404 if (bio_empty_barrier(bio)) {
405 rh->barrier_failure = 1;
406 return;
407 }
408
398 /* We must inform the log that the sync count has changed. */ 409 /* We must inform the log that the sync count has changed. */
399 log->type->set_region_sync(log, region, 0); 410 log->type->set_region_sync(log, region, 0);
400 411
@@ -515,8 +526,11 @@ void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios)
515{ 526{
516 struct bio *bio; 527 struct bio *bio;
517 528
518 for (bio = bios->head; bio; bio = bio->bi_next) 529 for (bio = bios->head; bio; bio = bio->bi_next) {
530 if (bio_empty_barrier(bio))
531 continue;
519 rh_inc(rh, dm_rh_bio_to_region(rh, bio)); 532 rh_inc(rh, dm_rh_bio_to_region(rh, bio));
533 }
520} 534}
521EXPORT_SYMBOL_GPL(dm_rh_inc_pending); 535EXPORT_SYMBOL_GPL(dm_rh_inc_pending);
522 536
@@ -544,7 +558,14 @@ void dm_rh_dec(struct dm_region_hash *rh, region_t region)
544 */ 558 */
545 559
546 /* do nothing for DM_RH_NOSYNC */ 560 /* do nothing for DM_RH_NOSYNC */
547 if (reg->state == DM_RH_RECOVERING) { 561 if (unlikely(rh->barrier_failure)) {
562 /*
563 * If a write barrier failed some time ago, we
564 * don't know whether or not this write made it
565 * to the disk, so we must resync the device.
566 */
567 reg->state = DM_RH_NOSYNC;
568 } else if (reg->state == DM_RH_RECOVERING) {
548 list_add_tail(&reg->list, &rh->quiesced_regions); 569 list_add_tail(&reg->list, &rh->quiesced_regions);
549 } else if (reg->state == DM_RH_DIRTY) { 570 } else if (reg->state == DM_RH_DIRTY) {
550 reg->state = DM_RH_CLEAN; 571 reg->state = DM_RH_CLEAN;