diff options
Diffstat (limited to 'drivers/md/dm-thin.c')
-rw-r--r-- | drivers/md/dm-thin.c | 93 |
1 files changed, 60 insertions, 33 deletions
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 242ac2ea5f29..fc9c848a60c9 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c | |||
@@ -310,13 +310,18 @@ static void cell_defer_no_holder_no_free(struct thin_c *tc, | |||
310 | wake_worker(pool); | 310 | wake_worker(pool); |
311 | } | 311 | } |
312 | 312 | ||
313 | static void cell_error(struct pool *pool, | 313 | static void cell_error_with_code(struct pool *pool, |
314 | struct dm_bio_prison_cell *cell) | 314 | struct dm_bio_prison_cell *cell, int error_code) |
315 | { | 315 | { |
316 | dm_cell_error(pool->prison, cell); | 316 | dm_cell_error(pool->prison, cell, error_code); |
317 | dm_bio_prison_free_cell(pool->prison, cell); | 317 | dm_bio_prison_free_cell(pool->prison, cell); |
318 | } | 318 | } |
319 | 319 | ||
320 | static void cell_error(struct pool *pool, struct dm_bio_prison_cell *cell) | ||
321 | { | ||
322 | cell_error_with_code(pool, cell, -EIO); | ||
323 | } | ||
324 | |||
320 | /*----------------------------------------------------------------*/ | 325 | /*----------------------------------------------------------------*/ |
321 | 326 | ||
322 | /* | 327 | /* |
@@ -1027,7 +1032,7 @@ static void retry_on_resume(struct bio *bio) | |||
1027 | spin_unlock_irqrestore(&tc->lock, flags); | 1032 | spin_unlock_irqrestore(&tc->lock, flags); |
1028 | } | 1033 | } |
1029 | 1034 | ||
1030 | static bool should_error_unserviceable_bio(struct pool *pool) | 1035 | static int should_error_unserviceable_bio(struct pool *pool) |
1031 | { | 1036 | { |
1032 | enum pool_mode m = get_pool_mode(pool); | 1037 | enum pool_mode m = get_pool_mode(pool); |
1033 | 1038 | ||
@@ -1035,25 +1040,27 @@ static bool should_error_unserviceable_bio(struct pool *pool) | |||
1035 | case PM_WRITE: | 1040 | case PM_WRITE: |
1036 | /* Shouldn't get here */ | 1041 | /* Shouldn't get here */ |
1037 | DMERR_LIMIT("bio unserviceable, yet pool is in PM_WRITE mode"); | 1042 | DMERR_LIMIT("bio unserviceable, yet pool is in PM_WRITE mode"); |
1038 | return true; | 1043 | return -EIO; |
1039 | 1044 | ||
1040 | case PM_OUT_OF_DATA_SPACE: | 1045 | case PM_OUT_OF_DATA_SPACE: |
1041 | return pool->pf.error_if_no_space; | 1046 | return pool->pf.error_if_no_space ? -ENOSPC : 0; |
1042 | 1047 | ||
1043 | case PM_READ_ONLY: | 1048 | case PM_READ_ONLY: |
1044 | case PM_FAIL: | 1049 | case PM_FAIL: |
1045 | return true; | 1050 | return -EIO; |
1046 | default: | 1051 | default: |
1047 | /* Shouldn't get here */ | 1052 | /* Shouldn't get here */ |
1048 | DMERR_LIMIT("bio unserviceable, yet pool has an unknown mode"); | 1053 | DMERR_LIMIT("bio unserviceable, yet pool has an unknown mode"); |
1049 | return true; | 1054 | return -EIO; |
1050 | } | 1055 | } |
1051 | } | 1056 | } |
1052 | 1057 | ||
1053 | static void handle_unserviceable_bio(struct pool *pool, struct bio *bio) | 1058 | static void handle_unserviceable_bio(struct pool *pool, struct bio *bio) |
1054 | { | 1059 | { |
1055 | if (should_error_unserviceable_bio(pool)) | 1060 | int error = should_error_unserviceable_bio(pool); |
1056 | bio_io_error(bio); | 1061 | |
1062 | if (error) | ||
1063 | bio_endio(bio, error); | ||
1057 | else | 1064 | else |
1058 | retry_on_resume(bio); | 1065 | retry_on_resume(bio); |
1059 | } | 1066 | } |
@@ -1062,18 +1069,21 @@ static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *c | |||
1062 | { | 1069 | { |
1063 | struct bio *bio; | 1070 | struct bio *bio; |
1064 | struct bio_list bios; | 1071 | struct bio_list bios; |
1072 | int error; | ||
1065 | 1073 | ||
1066 | if (should_error_unserviceable_bio(pool)) { | 1074 | error = should_error_unserviceable_bio(pool); |
1067 | cell_error(pool, cell); | 1075 | if (error) { |
1076 | cell_error_with_code(pool, cell, error); | ||
1068 | return; | 1077 | return; |
1069 | } | 1078 | } |
1070 | 1079 | ||
1071 | bio_list_init(&bios); | 1080 | bio_list_init(&bios); |
1072 | cell_release(pool, cell, &bios); | 1081 | cell_release(pool, cell, &bios); |
1073 | 1082 | ||
1074 | if (should_error_unserviceable_bio(pool)) | 1083 | error = should_error_unserviceable_bio(pool); |
1084 | if (error) | ||
1075 | while ((bio = bio_list_pop(&bios))) | 1085 | while ((bio = bio_list_pop(&bios))) |
1076 | bio_io_error(bio); | 1086 | bio_endio(bio, error); |
1077 | else | 1087 | else |
1078 | while ((bio = bio_list_pop(&bios))) | 1088 | while ((bio = bio_list_pop(&bios))) |
1079 | retry_on_resume(bio); | 1089 | retry_on_resume(bio); |
@@ -1610,47 +1620,63 @@ static void do_no_space_timeout(struct work_struct *ws) | |||
1610 | 1620 | ||
1611 | /*----------------------------------------------------------------*/ | 1621 | /*----------------------------------------------------------------*/ |
1612 | 1622 | ||
1613 | struct noflush_work { | 1623 | struct pool_work { |
1614 | struct work_struct worker; | 1624 | struct work_struct worker; |
1615 | struct thin_c *tc; | 1625 | struct completion complete; |
1626 | }; | ||
1627 | |||
1628 | static struct pool_work *to_pool_work(struct work_struct *ws) | ||
1629 | { | ||
1630 | return container_of(ws, struct pool_work, worker); | ||
1631 | } | ||
1616 | 1632 | ||
1617 | atomic_t complete; | 1633 | static void pool_work_complete(struct pool_work *pw) |
1618 | wait_queue_head_t wait; | 1634 | { |
1635 | complete(&pw->complete); | ||
1636 | } | ||
1637 | |||
1638 | static void pool_work_wait(struct pool_work *pw, struct pool *pool, | ||
1639 | void (*fn)(struct work_struct *)) | ||
1640 | { | ||
1641 | INIT_WORK_ONSTACK(&pw->worker, fn); | ||
1642 | init_completion(&pw->complete); | ||
1643 | queue_work(pool->wq, &pw->worker); | ||
1644 | wait_for_completion(&pw->complete); | ||
1645 | } | ||
1646 | |||
1647 | /*----------------------------------------------------------------*/ | ||
1648 | |||
1649 | struct noflush_work { | ||
1650 | struct pool_work pw; | ||
1651 | struct thin_c *tc; | ||
1619 | }; | 1652 | }; |
1620 | 1653 | ||
1621 | static void complete_noflush_work(struct noflush_work *w) | 1654 | static struct noflush_work *to_noflush(struct work_struct *ws) |
1622 | { | 1655 | { |
1623 | atomic_set(&w->complete, 1); | 1656 | return container_of(to_pool_work(ws), struct noflush_work, pw); |
1624 | wake_up(&w->wait); | ||
1625 | } | 1657 | } |
1626 | 1658 | ||
1627 | static void do_noflush_start(struct work_struct *ws) | 1659 | static void do_noflush_start(struct work_struct *ws) |
1628 | { | 1660 | { |
1629 | struct noflush_work *w = container_of(ws, struct noflush_work, worker); | 1661 | struct noflush_work *w = to_noflush(ws); |
1630 | w->tc->requeue_mode = true; | 1662 | w->tc->requeue_mode = true; |
1631 | requeue_io(w->tc); | 1663 | requeue_io(w->tc); |
1632 | complete_noflush_work(w); | 1664 | pool_work_complete(&w->pw); |
1633 | } | 1665 | } |
1634 | 1666 | ||
1635 | static void do_noflush_stop(struct work_struct *ws) | 1667 | static void do_noflush_stop(struct work_struct *ws) |
1636 | { | 1668 | { |
1637 | struct noflush_work *w = container_of(ws, struct noflush_work, worker); | 1669 | struct noflush_work *w = to_noflush(ws); |
1638 | w->tc->requeue_mode = false; | 1670 | w->tc->requeue_mode = false; |
1639 | complete_noflush_work(w); | 1671 | pool_work_complete(&w->pw); |
1640 | } | 1672 | } |
1641 | 1673 | ||
1642 | static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *)) | 1674 | static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *)) |
1643 | { | 1675 | { |
1644 | struct noflush_work w; | 1676 | struct noflush_work w; |
1645 | 1677 | ||
1646 | INIT_WORK_ONSTACK(&w.worker, fn); | ||
1647 | w.tc = tc; | 1678 | w.tc = tc; |
1648 | atomic_set(&w.complete, 0); | 1679 | pool_work_wait(&w.pw, tc->pool, fn); |
1649 | init_waitqueue_head(&w.wait); | ||
1650 | |||
1651 | queue_work(tc->pool->wq, &w.worker); | ||
1652 | |||
1653 | wait_event(w.wait, atomic_read(&w.complete)); | ||
1654 | } | 1680 | } |
1655 | 1681 | ||
1656 | /*----------------------------------------------------------------*/ | 1682 | /*----------------------------------------------------------------*/ |
@@ -3068,7 +3094,8 @@ static void set_discard_limits(struct pool_c *pt, struct queue_limits *limits) | |||
3068 | */ | 3094 | */ |
3069 | if (pt->adjusted_pf.discard_passdown) { | 3095 | if (pt->adjusted_pf.discard_passdown) { |
3070 | data_limits = &bdev_get_queue(pt->data_dev->bdev)->limits; | 3096 | data_limits = &bdev_get_queue(pt->data_dev->bdev)->limits; |
3071 | limits->discard_granularity = data_limits->discard_granularity; | 3097 | limits->discard_granularity = max(data_limits->discard_granularity, |
3098 | pool->sectors_per_block << SECTOR_SHIFT); | ||
3072 | } else | 3099 | } else |
3073 | limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT; | 3100 | limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT; |
3074 | } | 3101 | } |