aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/dm.c')
-rw-r--r--drivers/md/dm.c55
1 files changed, 55 insertions, 0 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index ef7bf1dd6900..628ba001bb3c 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -972,10 +972,61 @@ void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
972} 972}
973EXPORT_SYMBOL_GPL(dm_accept_partial_bio); 973EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
974 974
975/*
976 * Flush current->bio_list when the target map method blocks.
977 * This fixes deadlocks in snapshot and possibly in other targets.
978 */
979struct dm_offload {
980 struct blk_plug plug;
981 struct blk_plug_cb cb;
982};
983
984static void flush_current_bio_list(struct blk_plug_cb *cb, bool from_schedule)
985{
986 struct dm_offload *o = container_of(cb, struct dm_offload, cb);
987 struct bio_list list;
988 struct bio *bio;
989
990 INIT_LIST_HEAD(&o->cb.list);
991
992 if (unlikely(!current->bio_list))
993 return;
994
995 list = *current->bio_list;
996 bio_list_init(current->bio_list);
997
998 while ((bio = bio_list_pop(&list))) {
999 struct bio_set *bs = bio->bi_pool;
1000 if (unlikely(!bs) || bs == fs_bio_set) {
1001 bio_list_add(current->bio_list, bio);
1002 continue;
1003 }
1004
1005 spin_lock(&bs->rescue_lock);
1006 bio_list_add(&bs->rescue_list, bio);
1007 queue_work(bs->rescue_workqueue, &bs->rescue_work);
1008 spin_unlock(&bs->rescue_lock);
1009 }
1010}
1011
1012static void dm_offload_start(struct dm_offload *o)
1013{
1014 blk_start_plug(&o->plug);
1015 o->cb.callback = flush_current_bio_list;
1016 list_add(&o->cb.list, &current->plug->cb_list);
1017}
1018
1019static void dm_offload_end(struct dm_offload *o)
1020{
1021 list_del(&o->cb.list);
1022 blk_finish_plug(&o->plug);
1023}
1024
975static void __map_bio(struct dm_target_io *tio) 1025static void __map_bio(struct dm_target_io *tio)
976{ 1026{
977 int r; 1027 int r;
978 sector_t sector; 1028 sector_t sector;
1029 struct dm_offload o;
979 struct bio *clone = &tio->clone; 1030 struct bio *clone = &tio->clone;
980 struct dm_target *ti = tio->ti; 1031 struct dm_target *ti = tio->ti;
981 1032
@@ -988,7 +1039,11 @@ static void __map_bio(struct dm_target_io *tio)
988 */ 1039 */
989 atomic_inc(&tio->io->io_count); 1040 atomic_inc(&tio->io->io_count);
990 sector = clone->bi_iter.bi_sector; 1041 sector = clone->bi_iter.bi_sector;
1042
1043 dm_offload_start(&o);
991 r = ti->type->map(ti, clone); 1044 r = ti->type->map(ti, clone);
1045 dm_offload_end(&o);
1046
992 if (r == DM_MAPIO_REMAPPED) { 1047 if (r == DM_MAPIO_REMAPPED) {
993 /* the bio has been remapped so dispatch it */ 1048 /* the bio has been remapped so dispatch it */
994 1049