diff options
Diffstat (limited to 'drivers/md/dm-region-hash.c')
-rw-r--r-- | drivers/md/dm-region-hash.c | 37 |
1 files changed, 27 insertions, 10 deletions
diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c index 36dbe29f2fd6..bd5c58b28868 100644 --- a/drivers/md/dm-region-hash.c +++ b/drivers/md/dm-region-hash.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/ctype.h> | 11 | #include <linux/ctype.h> |
12 | #include <linux/init.h> | 12 | #include <linux/init.h> |
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | #include <linux/slab.h> | ||
14 | #include <linux/vmalloc.h> | 15 | #include <linux/vmalloc.h> |
15 | 16 | ||
16 | #include "dm.h" | 17 | #include "dm.h" |
@@ -79,6 +80,11 @@ struct dm_region_hash { | |||
79 | struct list_head recovered_regions; | 80 | struct list_head recovered_regions; |
80 | struct list_head failed_recovered_regions; | 81 | struct list_head failed_recovered_regions; |
81 | 82 | ||
83 | /* | ||
84 | * If there was a barrier failure no regions can be marked clean. | ||
85 | */ | ||
86 | int barrier_failure; | ||
87 | |||
82 | void *context; | 88 | void *context; |
83 | sector_t target_begin; | 89 | sector_t target_begin; |
84 | 90 | ||
@@ -211,6 +217,7 @@ struct dm_region_hash *dm_region_hash_create( | |||
211 | INIT_LIST_HEAD(&rh->quiesced_regions); | 217 | INIT_LIST_HEAD(&rh->quiesced_regions); |
212 | INIT_LIST_HEAD(&rh->recovered_regions); | 218 | INIT_LIST_HEAD(&rh->recovered_regions); |
213 | INIT_LIST_HEAD(&rh->failed_recovered_regions); | 219 | INIT_LIST_HEAD(&rh->failed_recovered_regions); |
220 | rh->barrier_failure = 0; | ||
214 | 221 | ||
215 | rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS, | 222 | rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS, |
216 | sizeof(struct dm_region)); | 223 | sizeof(struct dm_region)); |
@@ -377,8 +384,6 @@ static void complete_resync_work(struct dm_region *reg, int success) | |||
377 | /* dm_rh_mark_nosync | 384 | /* dm_rh_mark_nosync |
378 | * @ms | 385 | * @ms |
379 | * @bio | 386 | * @bio |
380 | * @done | ||
381 | * @error | ||
382 | * | 387 | * |
383 | * The bio was written on some mirror(s) but failed on other mirror(s). | 388 | * The bio was written on some mirror(s) but failed on other mirror(s). |
384 | * We can successfully endio the bio but should avoid the region being | 389 | * We can successfully endio the bio but should avoid the region being |
@@ -386,8 +391,7 @@ static void complete_resync_work(struct dm_region *reg, int success) | |||
386 | * | 391 | * |
387 | * This function is _not_ safe in interrupt context! | 392 | * This function is _not_ safe in interrupt context! |
388 | */ | 393 | */ |
389 | void dm_rh_mark_nosync(struct dm_region_hash *rh, | 394 | void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio) |
390 | struct bio *bio, unsigned done, int error) | ||
391 | { | 395 | { |
392 | unsigned long flags; | 396 | unsigned long flags; |
393 | struct dm_dirty_log *log = rh->log; | 397 | struct dm_dirty_log *log = rh->log; |
@@ -395,6 +399,11 @@ void dm_rh_mark_nosync(struct dm_region_hash *rh, | |||
395 | region_t region = dm_rh_bio_to_region(rh, bio); | 399 | region_t region = dm_rh_bio_to_region(rh, bio); |
396 | int recovering = 0; | 400 | int recovering = 0; |
397 | 401 | ||
402 | if (bio_empty_barrier(bio)) { | ||
403 | rh->barrier_failure = 1; | ||
404 | return; | ||
405 | } | ||
406 | |||
398 | /* We must inform the log that the sync count has changed. */ | 407 | /* We must inform the log that the sync count has changed. */ |
399 | log->type->set_region_sync(log, region, 0); | 408 | log->type->set_region_sync(log, region, 0); |
400 | 409 | ||
@@ -419,7 +428,6 @@ void dm_rh_mark_nosync(struct dm_region_hash *rh, | |||
419 | BUG_ON(!list_empty(®->list)); | 428 | BUG_ON(!list_empty(®->list)); |
420 | spin_unlock_irqrestore(&rh->region_lock, flags); | 429 | spin_unlock_irqrestore(&rh->region_lock, flags); |
421 | 430 | ||
422 | bio_endio(bio, error); | ||
423 | if (recovering) | 431 | if (recovering) |
424 | complete_resync_work(reg, 0); | 432 | complete_resync_work(reg, 0); |
425 | } | 433 | } |
@@ -515,8 +523,11 @@ void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios) | |||
515 | { | 523 | { |
516 | struct bio *bio; | 524 | struct bio *bio; |
517 | 525 | ||
518 | for (bio = bios->head; bio; bio = bio->bi_next) | 526 | for (bio = bios->head; bio; bio = bio->bi_next) { |
527 | if (bio_empty_barrier(bio)) | ||
528 | continue; | ||
519 | rh_inc(rh, dm_rh_bio_to_region(rh, bio)); | 529 | rh_inc(rh, dm_rh_bio_to_region(rh, bio)); |
530 | } | ||
520 | } | 531 | } |
521 | EXPORT_SYMBOL_GPL(dm_rh_inc_pending); | 532 | EXPORT_SYMBOL_GPL(dm_rh_inc_pending); |
522 | 533 | ||
@@ -544,7 +555,14 @@ void dm_rh_dec(struct dm_region_hash *rh, region_t region) | |||
544 | */ | 555 | */ |
545 | 556 | ||
546 | /* do nothing for DM_RH_NOSYNC */ | 557 | /* do nothing for DM_RH_NOSYNC */ |
547 | if (reg->state == DM_RH_RECOVERING) { | 558 | if (unlikely(rh->barrier_failure)) { |
559 | /* | ||
560 | * If a write barrier failed some time ago, we | ||
561 | * don't know whether or not this write made it | ||
562 | * to the disk, so we must resync the device. | ||
563 | */ | ||
564 | reg->state = DM_RH_NOSYNC; | ||
565 | } else if (reg->state == DM_RH_RECOVERING) { | ||
548 | list_add_tail(®->list, &rh->quiesced_regions); | 566 | list_add_tail(®->list, &rh->quiesced_regions); |
549 | } else if (reg->state == DM_RH_DIRTY) { | 567 | } else if (reg->state == DM_RH_DIRTY) { |
550 | reg->state = DM_RH_CLEAN; | 568 | reg->state = DM_RH_CLEAN; |
@@ -643,10 +661,9 @@ void dm_rh_recovery_end(struct dm_region *reg, int success) | |||
643 | spin_lock_irq(&rh->region_lock); | 661 | spin_lock_irq(&rh->region_lock); |
644 | if (success) | 662 | if (success) |
645 | list_add(®->list, ®->rh->recovered_regions); | 663 | list_add(®->list, ®->rh->recovered_regions); |
646 | else { | 664 | else |
647 | reg->state = DM_RH_NOSYNC; | ||
648 | list_add(®->list, ®->rh->failed_recovered_regions); | 665 | list_add(®->list, ®->rh->failed_recovered_regions); |
649 | } | 666 | |
650 | spin_unlock_irq(&rh->region_lock); | 667 | spin_unlock_irq(&rh->region_lock); |
651 | 668 | ||
652 | rh->wakeup_workers(rh->context); | 669 | rh->wakeup_workers(rh->context); |