diff options
author | Mike Snitzer <snitzer@redhat.com> | 2013-12-05 15:47:24 -0500 |
---|---|---|
committer | Mike Snitzer <snitzer@redhat.com> | 2014-01-07 10:14:29 -0500 |
commit | 8c0f0e8c9f07e6554b2281f86f00e769cf805fd9 (patch) | |
tree | 06e1aac7a4705eb7cb5c494c50cd8852a5415f60 /drivers/md/dm-thin.c | |
parent | 399caddfb16f5fa30c66056a32477cf95c947e2b (diff) |
dm thin: requeue bios to DM core if no_free_space and in read-only mode
Now that we switch the pool to read-only mode when the data device runs
out of space it causes active writers to get IO errors once we resume
after resizing the data device.
If no_free_space is set, save bios to the 'retry_on_resume_list' and
requeue them on resume (once the data or metadata device may have been
resized).
With this patch the resize_io test passes again (on slower storage):
dmtest run --suite thin-provisioning -n /resize_io/
Later patches fix some subtle races associated with the pool mode
transitions done as part of the pool's -ENOSPC handling. These races
are exposed on fast storage (e.g. PCIe SSD).
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Acked-by: Joe Thornber <ejt@redhat.com>
Diffstat (limited to 'drivers/md/dm-thin.c')
-rw-r--r-- | drivers/md/dm-thin.c | 26 |
1 files changed, 20 insertions, 6 deletions
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 96ce36a1a764..53252d2af249 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c | |||
@@ -164,7 +164,7 @@ struct pool { | |||
164 | 164 | ||
165 | struct pool_features pf; | 165 | struct pool_features pf; |
166 | bool low_water_triggered:1; /* A dm event has been sent */ | 166 | bool low_water_triggered:1; /* A dm event has been sent */ |
167 | bool no_free_space:1; /* A -ENOSPC warning has been issued */ | 167 | bool no_free_space:1; /* bios will be requeued if set */ |
168 | 168 | ||
169 | struct dm_bio_prison *prison; | 169 | struct dm_bio_prison *prison; |
170 | struct dm_kcopyd_client *copier; | 170 | struct dm_kcopyd_client *copier; |
@@ -982,6 +982,20 @@ static void retry_on_resume(struct bio *bio) | |||
982 | spin_unlock_irqrestore(&pool->lock, flags); | 982 | spin_unlock_irqrestore(&pool->lock, flags); |
983 | } | 983 | } |
984 | 984 | ||
985 | static void handle_unserviceable_bio(struct pool *pool, struct bio *bio) | ||
986 | { | ||
987 | /* | ||
988 | * When pool is read-only, no cell locking is needed because | ||
989 | * nothing is changing. | ||
990 | */ | ||
991 | WARN_ON_ONCE(get_pool_mode(pool) != PM_READ_ONLY); | ||
992 | |||
993 | if (pool->no_free_space) | ||
994 | retry_on_resume(bio); | ||
995 | else | ||
996 | bio_io_error(bio); | ||
997 | } | ||
998 | |||
985 | static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *cell) | 999 | static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *cell) |
986 | { | 1000 | { |
987 | struct bio *bio; | 1001 | struct bio *bio; |
@@ -991,7 +1005,7 @@ static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *c | |||
991 | cell_release(pool, cell, &bios); | 1005 | cell_release(pool, cell, &bios); |
992 | 1006 | ||
993 | while ((bio = bio_list_pop(&bios))) | 1007 | while ((bio = bio_list_pop(&bios))) |
994 | retry_on_resume(bio); | 1008 | handle_unserviceable_bio(pool, bio); |
995 | } | 1009 | } |
996 | 1010 | ||
997 | static void process_discard(struct thin_c *tc, struct bio *bio) | 1011 | static void process_discard(struct thin_c *tc, struct bio *bio) |
@@ -1245,7 +1259,7 @@ static void process_bio_read_only(struct thin_c *tc, struct bio *bio) | |||
1245 | switch (r) { | 1259 | switch (r) { |
1246 | case 0: | 1260 | case 0: |
1247 | if (lookup_result.shared && (rw == WRITE) && bio->bi_size) | 1261 | if (lookup_result.shared && (rw == WRITE) && bio->bi_size) |
1248 | bio_io_error(bio); | 1262 | handle_unserviceable_bio(tc->pool, bio); |
1249 | else { | 1263 | else { |
1250 | inc_all_io_entry(tc->pool, bio); | 1264 | inc_all_io_entry(tc->pool, bio); |
1251 | remap_and_issue(tc, bio, lookup_result.block); | 1265 | remap_and_issue(tc, bio, lookup_result.block); |
@@ -1254,7 +1268,7 @@ static void process_bio_read_only(struct thin_c *tc, struct bio *bio) | |||
1254 | 1268 | ||
1255 | case -ENODATA: | 1269 | case -ENODATA: |
1256 | if (rw != READ) { | 1270 | if (rw != READ) { |
1257 | bio_io_error(bio); | 1271 | handle_unserviceable_bio(tc->pool, bio); |
1258 | break; | 1272 | break; |
1259 | } | 1273 | } |
1260 | 1274 | ||
@@ -1565,9 +1579,9 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio) | |||
1565 | if (get_pool_mode(tc->pool) == PM_READ_ONLY) { | 1579 | if (get_pool_mode(tc->pool) == PM_READ_ONLY) { |
1566 | /* | 1580 | /* |
1567 | * This block isn't provisioned, and we have no way | 1581 | * This block isn't provisioned, and we have no way |
1568 | * of doing so. Just error it. | 1582 | * of doing so. |
1569 | */ | 1583 | */ |
1570 | bio_io_error(bio); | 1584 | handle_unserviceable_bio(tc->pool, bio); |
1571 | return DM_MAPIO_SUBMITTED; | 1585 | return DM_MAPIO_SUBMITTED; |
1572 | } | 1586 | } |
1573 | /* fall through */ | 1587 | /* fall through */ |