diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-06-29 17:23:02 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-06-29 17:23:02 -0400 |
commit | 27bc344014289a298a9fad4b828fcf61daa60aff (patch) | |
tree | 37b04a28d5afd9e95522889a425b64fae95cd8f2 | |
parent | 374bf8831aa99a445d6d26c56a65fb35db747d91 (diff) | |
parent | 00a0ea33b495ee6149bf5a77ac5807ce87323abb (diff) |
Merge tag 'for-4.12/dm-fixes-5' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm
Pull device mapper fixes from Mike Snitzer:
- dm thinp fix for crash that will occur when metadata device failure
races with discard passdown to the underlying data device.
- dm raid fix to not access the superblock's >= 1.9.0 'sectors' member
unconditionally.
* tag 'for-4.12/dm-fixes-5' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
dm thin: do not queue freed thin mapping for next stage processing
dm raid: fix oops on upgrading to extended superblock format
-rw-r--r-- | drivers/md/dm-raid.c | 17 | ||||
-rw-r--r-- | drivers/md/dm-thin.c | 26 |
2 files changed, 27 insertions, 16 deletions
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 7d893228c40f..b4b75dad816a 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c | |||
@@ -1927,7 +1927,7 @@ struct dm_raid_superblock { | |||
1927 | /******************************************************************** | 1927 | /******************************************************************** |
1928 | * BELOW FOLLOW V1.9.0 EXTENSIONS TO THE PRISTINE SUPERBLOCK FORMAT!!! | 1928 | * BELOW FOLLOW V1.9.0 EXTENSIONS TO THE PRISTINE SUPERBLOCK FORMAT!!! |
1929 | * | 1929 | * |
1930 | * FEATURE_FLAG_SUPPORTS_V190 in the features member indicates that those exist | 1930 | * FEATURE_FLAG_SUPPORTS_V190 in the compat_features member indicates that those exist |
1931 | */ | 1931 | */ |
1932 | 1932 | ||
1933 | __le32 flags; /* Flags defining array states for reshaping */ | 1933 | __le32 flags; /* Flags defining array states for reshaping */ |
@@ -2092,6 +2092,11 @@ static void super_sync(struct mddev *mddev, struct md_rdev *rdev) | |||
2092 | sb->layout = cpu_to_le32(mddev->layout); | 2092 | sb->layout = cpu_to_le32(mddev->layout); |
2093 | sb->stripe_sectors = cpu_to_le32(mddev->chunk_sectors); | 2093 | sb->stripe_sectors = cpu_to_le32(mddev->chunk_sectors); |
2094 | 2094 | ||
2095 | /******************************************************************** | ||
2096 | * BELOW FOLLOW V1.9.0 EXTENSIONS TO THE PRISTINE SUPERBLOCK FORMAT!!! | ||
2097 | * | ||
2098 | * FEATURE_FLAG_SUPPORTS_V190 in the compat_features member indicates that those exist | ||
2099 | */ | ||
2095 | sb->new_level = cpu_to_le32(mddev->new_level); | 2100 | sb->new_level = cpu_to_le32(mddev->new_level); |
2096 | sb->new_layout = cpu_to_le32(mddev->new_layout); | 2101 | sb->new_layout = cpu_to_le32(mddev->new_layout); |
2097 | sb->new_stripe_sectors = cpu_to_le32(mddev->new_chunk_sectors); | 2102 | sb->new_stripe_sectors = cpu_to_le32(mddev->new_chunk_sectors); |
@@ -2438,8 +2443,14 @@ static int super_validate(struct raid_set *rs, struct md_rdev *rdev) | |||
2438 | mddev->bitmap_info.default_offset = mddev->bitmap_info.offset; | 2443 | mddev->bitmap_info.default_offset = mddev->bitmap_info.offset; |
2439 | 2444 | ||
2440 | if (!test_and_clear_bit(FirstUse, &rdev->flags)) { | 2445 | if (!test_and_clear_bit(FirstUse, &rdev->flags)) { |
2441 | /* Retrieve device size stored in superblock to be prepared for shrink */ | 2446 | /* |
2442 | rdev->sectors = le64_to_cpu(sb->sectors); | 2447 | * Retrieve rdev size stored in superblock to be prepared for shrink. |
2448 | * Check extended superblock members are present otherwise the size | ||
2449 | * will not be set! | ||
2450 | */ | ||
2451 | if (le32_to_cpu(sb->compat_features) & FEATURE_FLAG_SUPPORTS_V190) | ||
2452 | rdev->sectors = le64_to_cpu(sb->sectors); | ||
2453 | |||
2443 | rdev->recovery_offset = le64_to_cpu(sb->disk_recovery_offset); | 2454 | rdev->recovery_offset = le64_to_cpu(sb->disk_recovery_offset); |
2444 | if (rdev->recovery_offset == MaxSector) | 2455 | if (rdev->recovery_offset == MaxSector) |
2445 | set_bit(In_sync, &rdev->flags); | 2456 | set_bit(In_sync, &rdev->flags); |
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 17ad50daed08..28808e5ec0fd 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c | |||
@@ -1094,6 +1094,19 @@ static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m) | |||
1094 | return; | 1094 | return; |
1095 | } | 1095 | } |
1096 | 1096 | ||
1097 | /* | ||
1098 | * Increment the unmapped blocks. This prevents a race between the | ||
1099 | * passdown io and reallocation of freed blocks. | ||
1100 | */ | ||
1101 | r = dm_pool_inc_data_range(pool->pmd, m->data_block, data_end); | ||
1102 | if (r) { | ||
1103 | metadata_operation_failed(pool, "dm_pool_inc_data_range", r); | ||
1104 | bio_io_error(m->bio); | ||
1105 | cell_defer_no_holder(tc, m->cell); | ||
1106 | mempool_free(m, pool->mapping_pool); | ||
1107 | return; | ||
1108 | } | ||
1109 | |||
1097 | discard_parent = bio_alloc(GFP_NOIO, 1); | 1110 | discard_parent = bio_alloc(GFP_NOIO, 1); |
1098 | if (!discard_parent) { | 1111 | if (!discard_parent) { |
1099 | DMWARN("%s: unable to allocate top level discard bio for passdown. Skipping passdown.", | 1112 | DMWARN("%s: unable to allocate top level discard bio for passdown. Skipping passdown.", |
@@ -1114,19 +1127,6 @@ static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m) | |||
1114 | end_discard(&op, r); | 1127 | end_discard(&op, r); |
1115 | } | 1128 | } |
1116 | } | 1129 | } |
1117 | |||
1118 | /* | ||
1119 | * Increment the unmapped blocks. This prevents a race between the | ||
1120 | * passdown io and reallocation of freed blocks. | ||
1121 | */ | ||
1122 | r = dm_pool_inc_data_range(pool->pmd, m->data_block, data_end); | ||
1123 | if (r) { | ||
1124 | metadata_operation_failed(pool, "dm_pool_inc_data_range", r); | ||
1125 | bio_io_error(m->bio); | ||
1126 | cell_defer_no_holder(tc, m->cell); | ||
1127 | mempool_free(m, pool->mapping_pool); | ||
1128 | return; | ||
1129 | } | ||
1130 | } | 1130 | } |
1131 | 1131 | ||
1132 | static void process_prepared_discard_passdown_pt2(struct dm_thin_new_mapping *m) | 1132 | static void process_prepared_discard_passdown_pt2(struct dm_thin_new_mapping *m) |