diff options
author | Tejun Heo <tj@kernel.org> | 2010-09-03 05:56:19 -0400 |
---|---|---|
committer | Jens Axboe <jaxboe@fusionio.com> | 2010-09-10 06:35:38 -0400 |
commit | d87f4c14f27dc82d215108d8392a7d26687148a1 (patch) | |
tree | 55f2a81f3df5d70fd85c4428089f6fe28540bcf4 /drivers/md/dm-region-hash.c | |
parent | 3a2edd0d6ddbd5fa3b389ea6db811285415ce6c8 (diff) |
dm: implement REQ_FLUSH/FUA support for bio-based dm
This patch converts bio-based dm to support REQ_FLUSH/FUA instead of
now deprecated REQ_HARDBARRIER.
* -EOPNOTSUPP handling logic dropped.
* Preflush is handled as before but postflush is dropped and replaced
with passing down REQ_FUA to member request_queues. This replaces
one array wide cache flush w/ member specific FUA writes.
* __split_and_process_bio() now calls __clone_and_map_flush() directly
for flushes and guarantees all FLUSH bio's going to targets are zero
` length.
* It's now guaranteed that all FLUSH bio's which are passed onto dm
targets are zero length. bio_empty_barrier() tests are replaced
with REQ_FLUSH tests.
* Empty WRITE_BARRIERs are replaced with WRITE_FLUSHes.
* Dropped unlikely() around REQ_FLUSH tests. Flushes are not unlikely
enough to be marked with unlikely().
* Block layer now filters out REQ_FLUSH/FUA bio's if the request_queue
doesn't support cache flushing. Advertise REQ_FLUSH | REQ_FUA
capability.
* Request based dm isn't converted yet. dm_init_request_based_queue()
resets flush support to 0 for now. To avoid disturbing request
based dm code, dm->flush_error is added for bio based dm while
requested based dm continues to use dm->barrier_error.
Lightly tested linear, stripe, raid1, snap and crypt targets. Please
proceed with caution as I'm not familiar with the code base.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: dm-devel@redhat.com
Cc: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'drivers/md/dm-region-hash.c')
-rw-r--r-- | drivers/md/dm-region-hash.c | 16 |
1 files changed, 8 insertions, 8 deletions
diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c index bd5c58b28868..dad011aed0c9 100644 --- a/drivers/md/dm-region-hash.c +++ b/drivers/md/dm-region-hash.c | |||
@@ -81,9 +81,9 @@ struct dm_region_hash { | |||
81 | struct list_head failed_recovered_regions; | 81 | struct list_head failed_recovered_regions; |
82 | 82 | ||
83 | /* | 83 | /* |
84 | * If there was a barrier failure no regions can be marked clean. | 84 | * If there was a flush failure no regions can be marked clean. |
85 | */ | 85 | */ |
86 | int barrier_failure; | 86 | int flush_failure; |
87 | 87 | ||
88 | void *context; | 88 | void *context; |
89 | sector_t target_begin; | 89 | sector_t target_begin; |
@@ -217,7 +217,7 @@ struct dm_region_hash *dm_region_hash_create( | |||
217 | INIT_LIST_HEAD(&rh->quiesced_regions); | 217 | INIT_LIST_HEAD(&rh->quiesced_regions); |
218 | INIT_LIST_HEAD(&rh->recovered_regions); | 218 | INIT_LIST_HEAD(&rh->recovered_regions); |
219 | INIT_LIST_HEAD(&rh->failed_recovered_regions); | 219 | INIT_LIST_HEAD(&rh->failed_recovered_regions); |
220 | rh->barrier_failure = 0; | 220 | rh->flush_failure = 0; |
221 | 221 | ||
222 | rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS, | 222 | rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS, |
223 | sizeof(struct dm_region)); | 223 | sizeof(struct dm_region)); |
@@ -399,8 +399,8 @@ void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio) | |||
399 | region_t region = dm_rh_bio_to_region(rh, bio); | 399 | region_t region = dm_rh_bio_to_region(rh, bio); |
400 | int recovering = 0; | 400 | int recovering = 0; |
401 | 401 | ||
402 | if (bio_empty_barrier(bio)) { | 402 | if (bio->bi_rw & REQ_FLUSH) { |
403 | rh->barrier_failure = 1; | 403 | rh->flush_failure = 1; |
404 | return; | 404 | return; |
405 | } | 405 | } |
406 | 406 | ||
@@ -524,7 +524,7 @@ void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios) | |||
524 | struct bio *bio; | 524 | struct bio *bio; |
525 | 525 | ||
526 | for (bio = bios->head; bio; bio = bio->bi_next) { | 526 | for (bio = bios->head; bio; bio = bio->bi_next) { |
527 | if (bio_empty_barrier(bio)) | 527 | if (bio->bi_rw & REQ_FLUSH) |
528 | continue; | 528 | continue; |
529 | rh_inc(rh, dm_rh_bio_to_region(rh, bio)); | 529 | rh_inc(rh, dm_rh_bio_to_region(rh, bio)); |
530 | } | 530 | } |
@@ -555,9 +555,9 @@ void dm_rh_dec(struct dm_region_hash *rh, region_t region) | |||
555 | */ | 555 | */ |
556 | 556 | ||
557 | /* do nothing for DM_RH_NOSYNC */ | 557 | /* do nothing for DM_RH_NOSYNC */ |
558 | if (unlikely(rh->barrier_failure)) { | 558 | if (unlikely(rh->flush_failure)) { |
559 | /* | 559 | /* |
560 | * If a write barrier failed some time ago, we | 560 | * If a write flush failed some time ago, we |
561 | * don't know whether or not this write made it | 561 | * don't know whether or not this write made it |
562 | * to the disk, so we must resync the device. | 562 | * to the disk, so we must resync the device. |
563 | */ | 563 | */ |