diff options
Diffstat (limited to 'drivers/md/dm-cache-target.c')
-rw-r--r-- | drivers/md/dm-cache-target.c | 15 |
1 files changed, 4 insertions, 11 deletions
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index e04c61e0839e..894bc14469c8 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c | |||
@@ -787,8 +787,7 @@ static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio) | |||
787 | struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); | 787 | struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); |
788 | 788 | ||
789 | spin_lock_irqsave(&cache->lock, flags); | 789 | spin_lock_irqsave(&cache->lock, flags); |
790 | if (cache->need_tick_bio && | 790 | if (cache->need_tick_bio && !op_is_flush(bio->bi_opf) && |
791 | !(bio->bi_opf & (REQ_FUA | REQ_PREFLUSH)) && | ||
792 | bio_op(bio) != REQ_OP_DISCARD) { | 791 | bio_op(bio) != REQ_OP_DISCARD) { |
793 | pb->tick = true; | 792 | pb->tick = true; |
794 | cache->need_tick_bio = false; | 793 | cache->need_tick_bio = false; |
@@ -828,11 +827,6 @@ static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio) | |||
828 | return to_oblock(block_nr); | 827 | return to_oblock(block_nr); |
829 | } | 828 | } |
830 | 829 | ||
831 | static int bio_triggers_commit(struct cache *cache, struct bio *bio) | ||
832 | { | ||
833 | return bio->bi_opf & (REQ_PREFLUSH | REQ_FUA); | ||
834 | } | ||
835 | |||
836 | /* | 830 | /* |
837 | * You must increment the deferred set whilst the prison cell is held. To | 831 | * You must increment the deferred set whilst the prison cell is held. To |
838 | * encourage this, we ask for 'cell' to be passed in. | 832 | * encourage this, we ask for 'cell' to be passed in. |
@@ -884,7 +878,7 @@ static void issue(struct cache *cache, struct bio *bio) | |||
884 | { | 878 | { |
885 | unsigned long flags; | 879 | unsigned long flags; |
886 | 880 | ||
887 | if (!bio_triggers_commit(cache, bio)) { | 881 | if (!op_is_flush(bio->bi_opf)) { |
888 | accounted_request(cache, bio); | 882 | accounted_request(cache, bio); |
889 | return; | 883 | return; |
890 | } | 884 | } |
@@ -1069,8 +1063,7 @@ static void dec_io_migrations(struct cache *cache) | |||
1069 | 1063 | ||
1070 | static bool discard_or_flush(struct bio *bio) | 1064 | static bool discard_or_flush(struct bio *bio) |
1071 | { | 1065 | { |
1072 | return bio_op(bio) == REQ_OP_DISCARD || | 1066 | return bio_op(bio) == REQ_OP_DISCARD || op_is_flush(bio->bi_opf); |
1073 | bio->bi_opf & (REQ_PREFLUSH | REQ_FUA); | ||
1074 | } | 1067 | } |
1075 | 1068 | ||
1076 | static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell) | 1069 | static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell) |
@@ -2291,7 +2284,7 @@ static void do_waker(struct work_struct *ws) | |||
2291 | static int is_congested(struct dm_dev *dev, int bdi_bits) | 2284 | static int is_congested(struct dm_dev *dev, int bdi_bits) |
2292 | { | 2285 | { |
2293 | struct request_queue *q = bdev_get_queue(dev->bdev); | 2286 | struct request_queue *q = bdev_get_queue(dev->bdev); |
2294 | return bdi_congested(&q->backing_dev_info, bdi_bits); | 2287 | return bdi_congested(q->backing_dev_info, bdi_bits); |
2295 | } | 2288 | } |
2296 | 2289 | ||
2297 | static int cache_is_congested(struct dm_target_callbacks *cb, int bdi_bits) | 2290 | static int cache_is_congested(struct dm_target_callbacks *cb, int bdi_bits) |