diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-08-12 13:16:46 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-08-12 13:16:46 -0400 |
commit | 8357422d4bf33bc2c35884d4016c3fc9efbbc1d2 (patch) | |
tree | 5779dfe36de828d3ef2dacfda48b7961cdc44525 | |
parent | 1021a645344d4a77333e19e60d37b9343be0d7b7 (diff) | |
parent | 959eb4e5592cc0b0b07db0ca30d2b1efd790020f (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-2.6-dm
* git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-2.6-dm: (33 commits)
dm mpath: support discard
dm stripe: support discards
dm: split discard requests on target boundaries
dm stripe: optimize sector division
dm stripe: move sector translation to a function
dm: error return error for discards
dm delay: support discard
dm: zero silently drop discards
dm: use dm_target_offset macro
dm: factor out max_io_len_target_boundary
dm: use common __issue_target_request for flush and discard support
dm: linear support discard
dm crypt: simplify crypt_ctr
dm crypt: simplify crypt_config destruction logic
dm: allow autoloading of dm mod
dm: rename map_info flush_request to target_request_nr
dm ioctl: refactor dm_table_complete
dm snapshot: implement merge
dm: do not initialise full request queue when bio based
dm ioctl: make bio or request based device type immutable
...
-rw-r--r-- | Documentation/devices.txt | 1 | ||||
-rw-r--r-- | drivers/md/dm-crypt.c | 342 | ||||
-rw-r--r-- | drivers/md/dm-delay.c | 6 | ||||
-rw-r--r-- | drivers/md/dm-exception-store.c | 4 | ||||
-rw-r--r-- | drivers/md/dm-exception-store.h | 3 | ||||
-rw-r--r-- | drivers/md/dm-ioctl.c | 207 | ||||
-rw-r--r-- | drivers/md/dm-linear.c | 3 | ||||
-rw-r--r-- | drivers/md/dm-mpath.c | 11 | ||||
-rw-r--r-- | drivers/md/dm-raid1.c | 2 | ||||
-rw-r--r-- | drivers/md/dm-snap-persistent.c | 6 | ||||
-rw-r--r-- | drivers/md/dm-snap.c | 62 | ||||
-rw-r--r-- | drivers/md/dm-stripe.c | 87 | ||||
-rw-r--r-- | drivers/md/dm-table.c | 99 | ||||
-rw-r--r-- | drivers/md/dm-target.c | 5 | ||||
-rw-r--r-- | drivers/md/dm-zero.c | 5 | ||||
-rw-r--r-- | drivers/md/dm.c | 329 | ||||
-rw-r--r-- | drivers/md/dm.h | 14 | ||||
-rw-r--r-- | include/linux/device-mapper.h | 16 | ||||
-rw-r--r-- | include/linux/dm-ioctl.h | 5 | ||||
-rw-r--r-- | include/linux/miscdevice.h | 1 |
20 files changed, 822 insertions, 386 deletions
diff --git a/Documentation/devices.txt b/Documentation/devices.txt index f2da781705b2..d0d1df6cb5de 100644 --- a/Documentation/devices.txt +++ b/Documentation/devices.txt | |||
@@ -445,6 +445,7 @@ Your cooperation is appreciated. | |||
445 | 233 = /dev/kmview View-OS A process with a view | 445 | 233 = /dev/kmview View-OS A process with a view |
446 | 234 = /dev/btrfs-control Btrfs control device | 446 | 234 = /dev/btrfs-control Btrfs control device |
447 | 235 = /dev/autofs Autofs control device | 447 | 235 = /dev/autofs Autofs control device |
448 | 236 = /dev/mapper/control Device-Mapper control device | ||
448 | 240-254 Reserved for local use | 449 | 240-254 Reserved for local use |
449 | 255 Reserved for MISC_DYNAMIC_MINOR | 450 | 255 Reserved for MISC_DYNAMIC_MINOR |
450 | 451 | ||
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 3bdbb6115702..368e8e98f705 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -107,11 +107,10 @@ struct crypt_config { | |||
107 | struct workqueue_struct *io_queue; | 107 | struct workqueue_struct *io_queue; |
108 | struct workqueue_struct *crypt_queue; | 108 | struct workqueue_struct *crypt_queue; |
109 | 109 | ||
110 | /* | 110 | char *cipher; |
111 | * crypto related data | 111 | char *cipher_mode; |
112 | */ | 112 | |
113 | struct crypt_iv_operations *iv_gen_ops; | 113 | struct crypt_iv_operations *iv_gen_ops; |
114 | char *iv_mode; | ||
115 | union { | 114 | union { |
116 | struct iv_essiv_private essiv; | 115 | struct iv_essiv_private essiv; |
117 | struct iv_benbi_private benbi; | 116 | struct iv_benbi_private benbi; |
@@ -135,8 +134,6 @@ struct crypt_config { | |||
135 | unsigned int dmreq_start; | 134 | unsigned int dmreq_start; |
136 | struct ablkcipher_request *req; | 135 | struct ablkcipher_request *req; |
137 | 136 | ||
138 | char cipher[CRYPTO_MAX_ALG_NAME]; | ||
139 | char chainmode[CRYPTO_MAX_ALG_NAME]; | ||
140 | struct crypto_ablkcipher *tfm; | 137 | struct crypto_ablkcipher *tfm; |
141 | unsigned long flags; | 138 | unsigned long flags; |
142 | unsigned int key_size; | 139 | unsigned int key_size; |
@@ -999,82 +996,135 @@ static int crypt_wipe_key(struct crypt_config *cc) | |||
999 | return crypto_ablkcipher_setkey(cc->tfm, cc->key, cc->key_size); | 996 | return crypto_ablkcipher_setkey(cc->tfm, cc->key, cc->key_size); |
1000 | } | 997 | } |
1001 | 998 | ||
1002 | /* | 999 | static void crypt_dtr(struct dm_target *ti) |
1003 | * Construct an encryption mapping: | ||
1004 | * <cipher> <key> <iv_offset> <dev_path> <start> | ||
1005 | */ | ||
1006 | static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) | ||
1007 | { | 1000 | { |
1008 | struct crypt_config *cc; | 1001 | struct crypt_config *cc = ti->private; |
1009 | struct crypto_ablkcipher *tfm; | ||
1010 | char *tmp; | ||
1011 | char *cipher; | ||
1012 | char *chainmode; | ||
1013 | char *ivmode; | ||
1014 | char *ivopts; | ||
1015 | unsigned int key_size; | ||
1016 | unsigned long long tmpll; | ||
1017 | 1002 | ||
1018 | if (argc != 5) { | 1003 | ti->private = NULL; |
1019 | ti->error = "Not enough arguments"; | 1004 | |
1005 | if (!cc) | ||
1006 | return; | ||
1007 | |||
1008 | if (cc->io_queue) | ||
1009 | destroy_workqueue(cc->io_queue); | ||
1010 | if (cc->crypt_queue) | ||
1011 | destroy_workqueue(cc->crypt_queue); | ||
1012 | |||
1013 | if (cc->bs) | ||
1014 | bioset_free(cc->bs); | ||
1015 | |||
1016 | if (cc->page_pool) | ||
1017 | mempool_destroy(cc->page_pool); | ||
1018 | if (cc->req_pool) | ||
1019 | mempool_destroy(cc->req_pool); | ||
1020 | if (cc->io_pool) | ||
1021 | mempool_destroy(cc->io_pool); | ||
1022 | |||
1023 | if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) | ||
1024 | cc->iv_gen_ops->dtr(cc); | ||
1025 | |||
1026 | if (cc->tfm && !IS_ERR(cc->tfm)) | ||
1027 | crypto_free_ablkcipher(cc->tfm); | ||
1028 | |||
1029 | if (cc->dev) | ||
1030 | dm_put_device(ti, cc->dev); | ||
1031 | |||
1032 | kzfree(cc->cipher); | ||
1033 | kzfree(cc->cipher_mode); | ||
1034 | |||
1035 | /* Must zero key material before freeing */ | ||
1036 | kzfree(cc); | ||
1037 | } | ||
1038 | |||
1039 | static int crypt_ctr_cipher(struct dm_target *ti, | ||
1040 | char *cipher_in, char *key) | ||
1041 | { | ||
1042 | struct crypt_config *cc = ti->private; | ||
1043 | char *tmp, *cipher, *chainmode, *ivmode, *ivopts; | ||
1044 | char *cipher_api = NULL; | ||
1045 | int ret = -EINVAL; | ||
1046 | |||
1047 | /* Convert to crypto api definition? */ | ||
1048 | if (strchr(cipher_in, '(')) { | ||
1049 | ti->error = "Bad cipher specification"; | ||
1020 | return -EINVAL; | 1050 | return -EINVAL; |
1021 | } | 1051 | } |
1022 | 1052 | ||
1023 | tmp = argv[0]; | 1053 | /* |
1054 | * Legacy dm-crypt cipher specification | ||
1055 | * cipher-mode-iv:ivopts | ||
1056 | */ | ||
1057 | tmp = cipher_in; | ||
1024 | cipher = strsep(&tmp, "-"); | 1058 | cipher = strsep(&tmp, "-"); |
1059 | |||
1060 | cc->cipher = kstrdup(cipher, GFP_KERNEL); | ||
1061 | if (!cc->cipher) | ||
1062 | goto bad_mem; | ||
1063 | |||
1064 | if (tmp) { | ||
1065 | cc->cipher_mode = kstrdup(tmp, GFP_KERNEL); | ||
1066 | if (!cc->cipher_mode) | ||
1067 | goto bad_mem; | ||
1068 | } | ||
1069 | |||
1025 | chainmode = strsep(&tmp, "-"); | 1070 | chainmode = strsep(&tmp, "-"); |
1026 | ivopts = strsep(&tmp, "-"); | 1071 | ivopts = strsep(&tmp, "-"); |
1027 | ivmode = strsep(&ivopts, ":"); | 1072 | ivmode = strsep(&ivopts, ":"); |
1028 | 1073 | ||
1029 | if (tmp) | 1074 | if (tmp) |
1030 | DMWARN("Unexpected additional cipher options"); | 1075 | DMWARN("Ignoring unexpected additional cipher options"); |
1031 | |||
1032 | key_size = strlen(argv[1]) >> 1; | ||
1033 | |||
1034 | cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL); | ||
1035 | if (cc == NULL) { | ||
1036 | ti->error = | ||
1037 | "Cannot allocate transparent encryption context"; | ||
1038 | return -ENOMEM; | ||
1039 | } | ||
1040 | 1076 | ||
1041 | /* Compatibility mode for old dm-crypt cipher strings */ | 1077 | /* Compatibility mode for old dm-crypt mappings */ |
1042 | if (!chainmode || (strcmp(chainmode, "plain") == 0 && !ivmode)) { | 1078 | if (!chainmode || (!strcmp(chainmode, "plain") && !ivmode)) { |
1079 | kfree(cc->cipher_mode); | ||
1080 | cc->cipher_mode = kstrdup("cbc-plain", GFP_KERNEL); | ||
1043 | chainmode = "cbc"; | 1081 | chainmode = "cbc"; |
1044 | ivmode = "plain"; | 1082 | ivmode = "plain"; |
1045 | } | 1083 | } |
1046 | 1084 | ||
1047 | if (strcmp(chainmode, "ecb") && !ivmode) { | 1085 | if (strcmp(chainmode, "ecb") && !ivmode) { |
1048 | ti->error = "This chaining mode requires an IV mechanism"; | 1086 | ti->error = "IV mechanism required"; |
1049 | goto bad_cipher; | 1087 | return -EINVAL; |
1050 | } | 1088 | } |
1051 | 1089 | ||
1052 | if (snprintf(cc->cipher, CRYPTO_MAX_ALG_NAME, "%s(%s)", | 1090 | cipher_api = kmalloc(CRYPTO_MAX_ALG_NAME, GFP_KERNEL); |
1053 | chainmode, cipher) >= CRYPTO_MAX_ALG_NAME) { | 1091 | if (!cipher_api) |
1054 | ti->error = "Chain mode + cipher name is too long"; | 1092 | goto bad_mem; |
1055 | goto bad_cipher; | 1093 | |
1094 | ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME, | ||
1095 | "%s(%s)", chainmode, cipher); | ||
1096 | if (ret < 0) { | ||
1097 | kfree(cipher_api); | ||
1098 | goto bad_mem; | ||
1056 | } | 1099 | } |
1057 | 1100 | ||
1058 | tfm = crypto_alloc_ablkcipher(cc->cipher, 0, 0); | 1101 | /* Allocate cipher */ |
1059 | if (IS_ERR(tfm)) { | 1102 | cc->tfm = crypto_alloc_ablkcipher(cipher_api, 0, 0); |
1103 | if (IS_ERR(cc->tfm)) { | ||
1104 | ret = PTR_ERR(cc->tfm); | ||
1060 | ti->error = "Error allocating crypto tfm"; | 1105 | ti->error = "Error allocating crypto tfm"; |
1061 | goto bad_cipher; | 1106 | goto bad; |
1062 | } | 1107 | } |
1063 | 1108 | ||
1064 | strcpy(cc->cipher, cipher); | 1109 | /* Initialize and set key */ |
1065 | strcpy(cc->chainmode, chainmode); | 1110 | ret = crypt_set_key(cc, key); |
1066 | cc->tfm = tfm; | 1111 | if (ret < 0) { |
1067 | |||
1068 | if (crypt_set_key(cc, argv[1]) < 0) { | ||
1069 | ti->error = "Error decoding and setting key"; | 1112 | ti->error = "Error decoding and setting key"; |
1070 | goto bad_ivmode; | 1113 | goto bad; |
1071 | } | 1114 | } |
1072 | 1115 | ||
1073 | /* | 1116 | /* Initialize IV */ |
1074 | * Choose ivmode. Valid modes: "plain", "essiv:<esshash>", "benbi". | 1117 | cc->iv_size = crypto_ablkcipher_ivsize(cc->tfm); |
1075 | * See comments at iv code | 1118 | if (cc->iv_size) |
1076 | */ | 1119 | /* at least a 64 bit sector number should fit in our buffer */ |
1120 | cc->iv_size = max(cc->iv_size, | ||
1121 | (unsigned int)(sizeof(u64) / sizeof(u8))); | ||
1122 | else if (ivmode) { | ||
1123 | DMWARN("Selected cipher does not support IVs"); | ||
1124 | ivmode = NULL; | ||
1125 | } | ||
1077 | 1126 | ||
1127 | /* Choose ivmode, see comments at iv code. */ | ||
1078 | if (ivmode == NULL) | 1128 | if (ivmode == NULL) |
1079 | cc->iv_gen_ops = NULL; | 1129 | cc->iv_gen_ops = NULL; |
1080 | else if (strcmp(ivmode, "plain") == 0) | 1130 | else if (strcmp(ivmode, "plain") == 0) |
@@ -1088,159 +1138,138 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
1088 | else if (strcmp(ivmode, "null") == 0) | 1138 | else if (strcmp(ivmode, "null") == 0) |
1089 | cc->iv_gen_ops = &crypt_iv_null_ops; | 1139 | cc->iv_gen_ops = &crypt_iv_null_ops; |
1090 | else { | 1140 | else { |
1141 | ret = -EINVAL; | ||
1091 | ti->error = "Invalid IV mode"; | 1142 | ti->error = "Invalid IV mode"; |
1092 | goto bad_ivmode; | 1143 | goto bad; |
1093 | } | 1144 | } |
1094 | 1145 | ||
1095 | if (cc->iv_gen_ops && cc->iv_gen_ops->ctr && | 1146 | /* Allocate IV */ |
1096 | cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0) | 1147 | if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) { |
1097 | goto bad_ivmode; | 1148 | ret = cc->iv_gen_ops->ctr(cc, ti, ivopts); |
1098 | 1149 | if (ret < 0) { | |
1099 | if (cc->iv_gen_ops && cc->iv_gen_ops->init && | 1150 | ti->error = "Error creating IV"; |
1100 | cc->iv_gen_ops->init(cc) < 0) { | 1151 | goto bad; |
1101 | ti->error = "Error initialising IV"; | 1152 | } |
1102 | goto bad_slab_pool; | ||
1103 | } | 1153 | } |
1104 | 1154 | ||
1105 | cc->iv_size = crypto_ablkcipher_ivsize(tfm); | 1155 | /* Initialize IV (set keys for ESSIV etc) */ |
1106 | if (cc->iv_size) | 1156 | if (cc->iv_gen_ops && cc->iv_gen_ops->init) { |
1107 | /* at least a 64 bit sector number should fit in our buffer */ | 1157 | ret = cc->iv_gen_ops->init(cc); |
1108 | cc->iv_size = max(cc->iv_size, | 1158 | if (ret < 0) { |
1109 | (unsigned int)(sizeof(u64) / sizeof(u8))); | 1159 | ti->error = "Error initialising IV"; |
1110 | else { | 1160 | goto bad; |
1111 | if (cc->iv_gen_ops) { | ||
1112 | DMWARN("Selected cipher does not support IVs"); | ||
1113 | if (cc->iv_gen_ops->dtr) | ||
1114 | cc->iv_gen_ops->dtr(cc); | ||
1115 | cc->iv_gen_ops = NULL; | ||
1116 | } | 1161 | } |
1117 | } | 1162 | } |
1118 | 1163 | ||
1164 | ret = 0; | ||
1165 | bad: | ||
1166 | kfree(cipher_api); | ||
1167 | return ret; | ||
1168 | |||
1169 | bad_mem: | ||
1170 | ti->error = "Cannot allocate cipher strings"; | ||
1171 | return -ENOMEM; | ||
1172 | } | ||
1173 | |||
1174 | /* | ||
1175 | * Construct an encryption mapping: | ||
1176 | * <cipher> <key> <iv_offset> <dev_path> <start> | ||
1177 | */ | ||
1178 | static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) | ||
1179 | { | ||
1180 | struct crypt_config *cc; | ||
1181 | unsigned int key_size; | ||
1182 | unsigned long long tmpll; | ||
1183 | int ret; | ||
1184 | |||
1185 | if (argc != 5) { | ||
1186 | ti->error = "Not enough arguments"; | ||
1187 | return -EINVAL; | ||
1188 | } | ||
1189 | |||
1190 | key_size = strlen(argv[1]) >> 1; | ||
1191 | |||
1192 | cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL); | ||
1193 | if (!cc) { | ||
1194 | ti->error = "Cannot allocate encryption context"; | ||
1195 | return -ENOMEM; | ||
1196 | } | ||
1197 | |||
1198 | ti->private = cc; | ||
1199 | ret = crypt_ctr_cipher(ti, argv[0], argv[1]); | ||
1200 | if (ret < 0) | ||
1201 | goto bad; | ||
1202 | |||
1203 | ret = -ENOMEM; | ||
1119 | cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool); | 1204 | cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool); |
1120 | if (!cc->io_pool) { | 1205 | if (!cc->io_pool) { |
1121 | ti->error = "Cannot allocate crypt io mempool"; | 1206 | ti->error = "Cannot allocate crypt io mempool"; |
1122 | goto bad_slab_pool; | 1207 | goto bad; |
1123 | } | 1208 | } |
1124 | 1209 | ||
1125 | cc->dmreq_start = sizeof(struct ablkcipher_request); | 1210 | cc->dmreq_start = sizeof(struct ablkcipher_request); |
1126 | cc->dmreq_start += crypto_ablkcipher_reqsize(tfm); | 1211 | cc->dmreq_start += crypto_ablkcipher_reqsize(cc->tfm); |
1127 | cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment()); | 1212 | cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment()); |
1128 | cc->dmreq_start += crypto_ablkcipher_alignmask(tfm) & | 1213 | cc->dmreq_start += crypto_ablkcipher_alignmask(cc->tfm) & |
1129 | ~(crypto_tfm_ctx_alignment() - 1); | 1214 | ~(crypto_tfm_ctx_alignment() - 1); |
1130 | 1215 | ||
1131 | cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start + | 1216 | cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start + |
1132 | sizeof(struct dm_crypt_request) + cc->iv_size); | 1217 | sizeof(struct dm_crypt_request) + cc->iv_size); |
1133 | if (!cc->req_pool) { | 1218 | if (!cc->req_pool) { |
1134 | ti->error = "Cannot allocate crypt request mempool"; | 1219 | ti->error = "Cannot allocate crypt request mempool"; |
1135 | goto bad_req_pool; | 1220 | goto bad; |
1136 | } | 1221 | } |
1137 | cc->req = NULL; | 1222 | cc->req = NULL; |
1138 | 1223 | ||
1139 | cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0); | 1224 | cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0); |
1140 | if (!cc->page_pool) { | 1225 | if (!cc->page_pool) { |
1141 | ti->error = "Cannot allocate page mempool"; | 1226 | ti->error = "Cannot allocate page mempool"; |
1142 | goto bad_page_pool; | 1227 | goto bad; |
1143 | } | 1228 | } |
1144 | 1229 | ||
1145 | cc->bs = bioset_create(MIN_IOS, 0); | 1230 | cc->bs = bioset_create(MIN_IOS, 0); |
1146 | if (!cc->bs) { | 1231 | if (!cc->bs) { |
1147 | ti->error = "Cannot allocate crypt bioset"; | 1232 | ti->error = "Cannot allocate crypt bioset"; |
1148 | goto bad_bs; | 1233 | goto bad; |
1149 | } | 1234 | } |
1150 | 1235 | ||
1236 | ret = -EINVAL; | ||
1151 | if (sscanf(argv[2], "%llu", &tmpll) != 1) { | 1237 | if (sscanf(argv[2], "%llu", &tmpll) != 1) { |
1152 | ti->error = "Invalid iv_offset sector"; | 1238 | ti->error = "Invalid iv_offset sector"; |
1153 | goto bad_device; | 1239 | goto bad; |
1154 | } | 1240 | } |
1155 | cc->iv_offset = tmpll; | 1241 | cc->iv_offset = tmpll; |
1156 | 1242 | ||
1157 | if (sscanf(argv[4], "%llu", &tmpll) != 1) { | ||
1158 | ti->error = "Invalid device sector"; | ||
1159 | goto bad_device; | ||
1160 | } | ||
1161 | cc->start = tmpll; | ||
1162 | |||
1163 | if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev)) { | 1243 | if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev)) { |
1164 | ti->error = "Device lookup failed"; | 1244 | ti->error = "Device lookup failed"; |
1165 | goto bad_device; | 1245 | goto bad; |
1166 | } | 1246 | } |
1167 | 1247 | ||
1168 | if (ivmode && cc->iv_gen_ops) { | 1248 | if (sscanf(argv[4], "%llu", &tmpll) != 1) { |
1169 | if (ivopts) | 1249 | ti->error = "Invalid device sector"; |
1170 | *(ivopts - 1) = ':'; | 1250 | goto bad; |
1171 | cc->iv_mode = kmalloc(strlen(ivmode) + 1, GFP_KERNEL); | 1251 | } |
1172 | if (!cc->iv_mode) { | 1252 | cc->start = tmpll; |
1173 | ti->error = "Error kmallocing iv_mode string"; | ||
1174 | goto bad_ivmode_string; | ||
1175 | } | ||
1176 | strcpy(cc->iv_mode, ivmode); | ||
1177 | } else | ||
1178 | cc->iv_mode = NULL; | ||
1179 | 1253 | ||
1254 | ret = -ENOMEM; | ||
1180 | cc->io_queue = create_singlethread_workqueue("kcryptd_io"); | 1255 | cc->io_queue = create_singlethread_workqueue("kcryptd_io"); |
1181 | if (!cc->io_queue) { | 1256 | if (!cc->io_queue) { |
1182 | ti->error = "Couldn't create kcryptd io queue"; | 1257 | ti->error = "Couldn't create kcryptd io queue"; |
1183 | goto bad_io_queue; | 1258 | goto bad; |
1184 | } | 1259 | } |
1185 | 1260 | ||
1186 | cc->crypt_queue = create_singlethread_workqueue("kcryptd"); | 1261 | cc->crypt_queue = create_singlethread_workqueue("kcryptd"); |
1187 | if (!cc->crypt_queue) { | 1262 | if (!cc->crypt_queue) { |
1188 | ti->error = "Couldn't create kcryptd queue"; | 1263 | ti->error = "Couldn't create kcryptd queue"; |
1189 | goto bad_crypt_queue; | 1264 | goto bad; |
1190 | } | 1265 | } |
1191 | 1266 | ||
1192 | ti->num_flush_requests = 1; | 1267 | ti->num_flush_requests = 1; |
1193 | ti->private = cc; | ||
1194 | return 0; | 1268 | return 0; |
1195 | 1269 | ||
1196 | bad_crypt_queue: | 1270 | bad: |
1197 | destroy_workqueue(cc->io_queue); | 1271 | crypt_dtr(ti); |
1198 | bad_io_queue: | 1272 | return ret; |
1199 | kfree(cc->iv_mode); | ||
1200 | bad_ivmode_string: | ||
1201 | dm_put_device(ti, cc->dev); | ||
1202 | bad_device: | ||
1203 | bioset_free(cc->bs); | ||
1204 | bad_bs: | ||
1205 | mempool_destroy(cc->page_pool); | ||
1206 | bad_page_pool: | ||
1207 | mempool_destroy(cc->req_pool); | ||
1208 | bad_req_pool: | ||
1209 | mempool_destroy(cc->io_pool); | ||
1210 | bad_slab_pool: | ||
1211 | if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) | ||
1212 | cc->iv_gen_ops->dtr(cc); | ||
1213 | bad_ivmode: | ||
1214 | crypto_free_ablkcipher(tfm); | ||
1215 | bad_cipher: | ||
1216 | /* Must zero key material before freeing */ | ||
1217 | kzfree(cc); | ||
1218 | return -EINVAL; | ||
1219 | } | ||
1220 | |||
1221 | static void crypt_dtr(struct dm_target *ti) | ||
1222 | { | ||
1223 | struct crypt_config *cc = (struct crypt_config *) ti->private; | ||
1224 | |||
1225 | destroy_workqueue(cc->io_queue); | ||
1226 | destroy_workqueue(cc->crypt_queue); | ||
1227 | |||
1228 | if (cc->req) | ||
1229 | mempool_free(cc->req, cc->req_pool); | ||
1230 | |||
1231 | bioset_free(cc->bs); | ||
1232 | mempool_destroy(cc->page_pool); | ||
1233 | mempool_destroy(cc->req_pool); | ||
1234 | mempool_destroy(cc->io_pool); | ||
1235 | |||
1236 | kfree(cc->iv_mode); | ||
1237 | if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) | ||
1238 | cc->iv_gen_ops->dtr(cc); | ||
1239 | crypto_free_ablkcipher(cc->tfm); | ||
1240 | dm_put_device(ti, cc->dev); | ||
1241 | |||
1242 | /* Must zero key material before freeing */ | ||
1243 | kzfree(cc); | ||
1244 | } | 1273 | } |
1245 | 1274 | ||
1246 | static int crypt_map(struct dm_target *ti, struct bio *bio, | 1275 | static int crypt_map(struct dm_target *ti, struct bio *bio, |
@@ -1255,7 +1284,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio, | |||
1255 | return DM_MAPIO_REMAPPED; | 1284 | return DM_MAPIO_REMAPPED; |
1256 | } | 1285 | } |
1257 | 1286 | ||
1258 | io = crypt_io_alloc(ti, bio, bio->bi_sector - ti->begin); | 1287 | io = crypt_io_alloc(ti, bio, dm_target_offset(ti, bio->bi_sector)); |
1259 | 1288 | ||
1260 | if (bio_data_dir(io->base_bio) == READ) | 1289 | if (bio_data_dir(io->base_bio) == READ) |
1261 | kcryptd_queue_io(io); | 1290 | kcryptd_queue_io(io); |
@@ -1268,7 +1297,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio, | |||
1268 | static int crypt_status(struct dm_target *ti, status_type_t type, | 1297 | static int crypt_status(struct dm_target *ti, status_type_t type, |
1269 | char *result, unsigned int maxlen) | 1298 | char *result, unsigned int maxlen) |
1270 | { | 1299 | { |
1271 | struct crypt_config *cc = (struct crypt_config *) ti->private; | 1300 | struct crypt_config *cc = ti->private; |
1272 | unsigned int sz = 0; | 1301 | unsigned int sz = 0; |
1273 | 1302 | ||
1274 | switch (type) { | 1303 | switch (type) { |
@@ -1277,11 +1306,10 @@ static int crypt_status(struct dm_target *ti, status_type_t type, | |||
1277 | break; | 1306 | break; |
1278 | 1307 | ||
1279 | case STATUSTYPE_TABLE: | 1308 | case STATUSTYPE_TABLE: |
1280 | if (cc->iv_mode) | 1309 | if (cc->cipher_mode) |
1281 | DMEMIT("%s-%s-%s ", cc->cipher, cc->chainmode, | 1310 | DMEMIT("%s-%s ", cc->cipher, cc->cipher_mode); |
1282 | cc->iv_mode); | ||
1283 | else | 1311 | else |
1284 | DMEMIT("%s-%s ", cc->cipher, cc->chainmode); | 1312 | DMEMIT("%s ", cc->cipher); |
1285 | 1313 | ||
1286 | if (cc->key_size > 0) { | 1314 | if (cc->key_size > 0) { |
1287 | if ((maxlen - sz) < ((cc->key_size << 1) + 1)) | 1315 | if ((maxlen - sz) < ((cc->key_size << 1) + 1)) |
@@ -1378,7 +1406,7 @@ static int crypt_merge(struct dm_target *ti, struct bvec_merge_data *bvm, | |||
1378 | return max_size; | 1406 | return max_size; |
1379 | 1407 | ||
1380 | bvm->bi_bdev = cc->dev->bdev; | 1408 | bvm->bi_bdev = cc->dev->bdev; |
1381 | bvm->bi_sector = cc->start + bvm->bi_sector - ti->begin; | 1409 | bvm->bi_sector = cc->start + dm_target_offset(ti, bvm->bi_sector); |
1382 | 1410 | ||
1383 | return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); | 1411 | return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); |
1384 | } | 1412 | } |
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c index 852052880d7a..baa11912cc94 100644 --- a/drivers/md/dm-delay.c +++ b/drivers/md/dm-delay.c | |||
@@ -198,6 +198,7 @@ out: | |||
198 | atomic_set(&dc->may_delay, 1); | 198 | atomic_set(&dc->may_delay, 1); |
199 | 199 | ||
200 | ti->num_flush_requests = 1; | 200 | ti->num_flush_requests = 1; |
201 | ti->num_discard_requests = 1; | ||
201 | ti->private = dc; | 202 | ti->private = dc; |
202 | return 0; | 203 | return 0; |
203 | 204 | ||
@@ -281,14 +282,13 @@ static int delay_map(struct dm_target *ti, struct bio *bio, | |||
281 | bio->bi_bdev = dc->dev_write->bdev; | 282 | bio->bi_bdev = dc->dev_write->bdev; |
282 | if (bio_sectors(bio)) | 283 | if (bio_sectors(bio)) |
283 | bio->bi_sector = dc->start_write + | 284 | bio->bi_sector = dc->start_write + |
284 | (bio->bi_sector - ti->begin); | 285 | dm_target_offset(ti, bio->bi_sector); |
285 | 286 | ||
286 | return delay_bio(dc, dc->write_delay, bio); | 287 | return delay_bio(dc, dc->write_delay, bio); |
287 | } | 288 | } |
288 | 289 | ||
289 | bio->bi_bdev = dc->dev_read->bdev; | 290 | bio->bi_bdev = dc->dev_read->bdev; |
290 | bio->bi_sector = dc->start_read + | 291 | bio->bi_sector = dc->start_read + dm_target_offset(ti, bio->bi_sector); |
291 | (bio->bi_sector - ti->begin); | ||
292 | 292 | ||
293 | return delay_bio(dc, dc->read_delay, bio); | 293 | return delay_bio(dc, dc->read_delay, bio); |
294 | } | 294 | } |
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c index 2b7907b6dd09..0bdb201c2c2a 100644 --- a/drivers/md/dm-exception-store.c +++ b/drivers/md/dm-exception-store.c | |||
@@ -173,7 +173,9 @@ int dm_exception_store_set_chunk_size(struct dm_exception_store *store, | |||
173 | 173 | ||
174 | /* Validate the chunk size against the device block size */ | 174 | /* Validate the chunk size against the device block size */ |
175 | if (chunk_size % | 175 | if (chunk_size % |
176 | (bdev_logical_block_size(dm_snap_cow(store->snap)->bdev) >> 9)) { | 176 | (bdev_logical_block_size(dm_snap_cow(store->snap)->bdev) >> 9) || |
177 | chunk_size % | ||
178 | (bdev_logical_block_size(dm_snap_origin(store->snap)->bdev) >> 9)) { | ||
177 | *error = "Chunk size is not a multiple of device blocksize"; | 179 | *error = "Chunk size is not a multiple of device blocksize"; |
178 | return -EINVAL; | 180 | return -EINVAL; |
179 | } | 181 | } |
diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h index e8dfa06af3ba..0b2536247cf5 100644 --- a/drivers/md/dm-exception-store.h +++ b/drivers/md/dm-exception-store.h | |||
@@ -126,8 +126,9 @@ struct dm_exception_store { | |||
126 | }; | 126 | }; |
127 | 127 | ||
128 | /* | 128 | /* |
129 | * Obtain the cow device used by a given snapshot. | 129 | * Obtain the origin or cow device used by a given snapshot. |
130 | */ | 130 | */ |
131 | struct dm_dev *dm_snap_origin(struct dm_snapshot *snap); | ||
131 | struct dm_dev *dm_snap_cow(struct dm_snapshot *snap); | 132 | struct dm_dev *dm_snap_cow(struct dm_snapshot *snap); |
132 | 133 | ||
133 | /* | 134 | /* |
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index d7500e1c26f2..3e39193e5036 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c | |||
@@ -249,55 +249,66 @@ static void __hash_remove(struct hash_cell *hc) | |||
249 | 249 | ||
250 | static void dm_hash_remove_all(int keep_open_devices) | 250 | static void dm_hash_remove_all(int keep_open_devices) |
251 | { | 251 | { |
252 | int i, dev_skipped, dev_removed; | 252 | int i, dev_skipped; |
253 | struct hash_cell *hc; | 253 | struct hash_cell *hc; |
254 | struct list_head *tmp, *n; | 254 | struct mapped_device *md; |
255 | |||
256 | retry: | ||
257 | dev_skipped = 0; | ||
255 | 258 | ||
256 | down_write(&_hash_lock); | 259 | down_write(&_hash_lock); |
257 | 260 | ||
258 | retry: | ||
259 | dev_skipped = dev_removed = 0; | ||
260 | for (i = 0; i < NUM_BUCKETS; i++) { | 261 | for (i = 0; i < NUM_BUCKETS; i++) { |
261 | list_for_each_safe (tmp, n, _name_buckets + i) { | 262 | list_for_each_entry(hc, _name_buckets + i, name_list) { |
262 | hc = list_entry(tmp, struct hash_cell, name_list); | 263 | md = hc->md; |
264 | dm_get(md); | ||
263 | 265 | ||
264 | if (keep_open_devices && | 266 | if (keep_open_devices && dm_lock_for_deletion(md)) { |
265 | dm_lock_for_deletion(hc->md)) { | 267 | dm_put(md); |
266 | dev_skipped++; | 268 | dev_skipped++; |
267 | continue; | 269 | continue; |
268 | } | 270 | } |
271 | |||
269 | __hash_remove(hc); | 272 | __hash_remove(hc); |
270 | dev_removed = 1; | ||
271 | } | ||
272 | } | ||
273 | 273 | ||
274 | /* | 274 | up_write(&_hash_lock); |
275 | * Some mapped devices may be using other mapped devices, so if any | ||
276 | * still exist, repeat until we make no further progress. | ||
277 | */ | ||
278 | if (dev_skipped) { | ||
279 | if (dev_removed) | ||
280 | goto retry; | ||
281 | 275 | ||
282 | DMWARN("remove_all left %d open device(s)", dev_skipped); | 276 | dm_put(md); |
277 | if (likely(keep_open_devices)) | ||
278 | dm_destroy(md); | ||
279 | else | ||
280 | dm_destroy_immediate(md); | ||
281 | |||
282 | /* | ||
283 | * Some mapped devices may be using other mapped | ||
284 | * devices, so repeat until we make no further | ||
285 | * progress. If a new mapped device is created | ||
286 | * here it will also get removed. | ||
287 | */ | ||
288 | goto retry; | ||
289 | } | ||
283 | } | 290 | } |
284 | 291 | ||
285 | up_write(&_hash_lock); | 292 | up_write(&_hash_lock); |
293 | |||
294 | if (dev_skipped) | ||
295 | DMWARN("remove_all left %d open device(s)", dev_skipped); | ||
286 | } | 296 | } |
287 | 297 | ||
288 | static int dm_hash_rename(uint32_t cookie, uint32_t *flags, const char *old, | 298 | static struct mapped_device *dm_hash_rename(struct dm_ioctl *param, |
289 | const char *new) | 299 | const char *new) |
290 | { | 300 | { |
291 | char *new_name, *old_name; | 301 | char *new_name, *old_name; |
292 | struct hash_cell *hc; | 302 | struct hash_cell *hc; |
293 | struct dm_table *table; | 303 | struct dm_table *table; |
304 | struct mapped_device *md; | ||
294 | 305 | ||
295 | /* | 306 | /* |
296 | * duplicate new. | 307 | * duplicate new. |
297 | */ | 308 | */ |
298 | new_name = kstrdup(new, GFP_KERNEL); | 309 | new_name = kstrdup(new, GFP_KERNEL); |
299 | if (!new_name) | 310 | if (!new_name) |
300 | return -ENOMEM; | 311 | return ERR_PTR(-ENOMEM); |
301 | 312 | ||
302 | down_write(&_hash_lock); | 313 | down_write(&_hash_lock); |
303 | 314 | ||
@@ -306,24 +317,24 @@ static int dm_hash_rename(uint32_t cookie, uint32_t *flags, const char *old, | |||
306 | */ | 317 | */ |
307 | hc = __get_name_cell(new); | 318 | hc = __get_name_cell(new); |
308 | if (hc) { | 319 | if (hc) { |
309 | DMWARN("asked to rename to an already existing name %s -> %s", | 320 | DMWARN("asked to rename to an already-existing name %s -> %s", |
310 | old, new); | 321 | param->name, new); |
311 | dm_put(hc->md); | 322 | dm_put(hc->md); |
312 | up_write(&_hash_lock); | 323 | up_write(&_hash_lock); |
313 | kfree(new_name); | 324 | kfree(new_name); |
314 | return -EBUSY; | 325 | return ERR_PTR(-EBUSY); |
315 | } | 326 | } |
316 | 327 | ||
317 | /* | 328 | /* |
318 | * Is there such a device as 'old' ? | 329 | * Is there such a device as 'old' ? |
319 | */ | 330 | */ |
320 | hc = __get_name_cell(old); | 331 | hc = __get_name_cell(param->name); |
321 | if (!hc) { | 332 | if (!hc) { |
322 | DMWARN("asked to rename a non existent device %s -> %s", | 333 | DMWARN("asked to rename a non-existent device %s -> %s", |
323 | old, new); | 334 | param->name, new); |
324 | up_write(&_hash_lock); | 335 | up_write(&_hash_lock); |
325 | kfree(new_name); | 336 | kfree(new_name); |
326 | return -ENXIO; | 337 | return ERR_PTR(-ENXIO); |
327 | } | 338 | } |
328 | 339 | ||
329 | /* | 340 | /* |
@@ -345,13 +356,14 @@ static int dm_hash_rename(uint32_t cookie, uint32_t *flags, const char *old, | |||
345 | dm_table_put(table); | 356 | dm_table_put(table); |
346 | } | 357 | } |
347 | 358 | ||
348 | if (!dm_kobject_uevent(hc->md, KOBJ_CHANGE, cookie)) | 359 | if (!dm_kobject_uevent(hc->md, KOBJ_CHANGE, param->event_nr)) |
349 | *flags |= DM_UEVENT_GENERATED_FLAG; | 360 | param->flags |= DM_UEVENT_GENERATED_FLAG; |
350 | 361 | ||
351 | dm_put(hc->md); | 362 | md = hc->md; |
352 | up_write(&_hash_lock); | 363 | up_write(&_hash_lock); |
353 | kfree(old_name); | 364 | kfree(old_name); |
354 | return 0; | 365 | |
366 | return md; | ||
355 | } | 367 | } |
356 | 368 | ||
357 | /*----------------------------------------------------------------- | 369 | /*----------------------------------------------------------------- |
@@ -573,7 +585,7 @@ static struct dm_table *dm_get_live_or_inactive_table(struct mapped_device *md, | |||
573 | * Fills in a dm_ioctl structure, ready for sending back to | 585 | * Fills in a dm_ioctl structure, ready for sending back to |
574 | * userland. | 586 | * userland. |
575 | */ | 587 | */ |
576 | static int __dev_status(struct mapped_device *md, struct dm_ioctl *param) | 588 | static void __dev_status(struct mapped_device *md, struct dm_ioctl *param) |
577 | { | 589 | { |
578 | struct gendisk *disk = dm_disk(md); | 590 | struct gendisk *disk = dm_disk(md); |
579 | struct dm_table *table; | 591 | struct dm_table *table; |
@@ -617,8 +629,6 @@ static int __dev_status(struct mapped_device *md, struct dm_ioctl *param) | |||
617 | dm_table_put(table); | 629 | dm_table_put(table); |
618 | } | 630 | } |
619 | } | 631 | } |
620 | |||
621 | return 0; | ||
622 | } | 632 | } |
623 | 633 | ||
624 | static int dev_create(struct dm_ioctl *param, size_t param_size) | 634 | static int dev_create(struct dm_ioctl *param, size_t param_size) |
@@ -640,15 +650,17 @@ static int dev_create(struct dm_ioctl *param, size_t param_size) | |||
640 | r = dm_hash_insert(param->name, *param->uuid ? param->uuid : NULL, md); | 650 | r = dm_hash_insert(param->name, *param->uuid ? param->uuid : NULL, md); |
641 | if (r) { | 651 | if (r) { |
642 | dm_put(md); | 652 | dm_put(md); |
653 | dm_destroy(md); | ||
643 | return r; | 654 | return r; |
644 | } | 655 | } |
645 | 656 | ||
646 | param->flags &= ~DM_INACTIVE_PRESENT_FLAG; | 657 | param->flags &= ~DM_INACTIVE_PRESENT_FLAG; |
647 | 658 | ||
648 | r = __dev_status(md, param); | 659 | __dev_status(md, param); |
660 | |||
649 | dm_put(md); | 661 | dm_put(md); |
650 | 662 | ||
651 | return r; | 663 | return 0; |
652 | } | 664 | } |
653 | 665 | ||
654 | /* | 666 | /* |
@@ -742,6 +754,7 @@ static int dev_remove(struct dm_ioctl *param, size_t param_size) | |||
742 | param->flags |= DM_UEVENT_GENERATED_FLAG; | 754 | param->flags |= DM_UEVENT_GENERATED_FLAG; |
743 | 755 | ||
744 | dm_put(md); | 756 | dm_put(md); |
757 | dm_destroy(md); | ||
745 | return 0; | 758 | return 0; |
746 | } | 759 | } |
747 | 760 | ||
@@ -762,6 +775,7 @@ static int dev_rename(struct dm_ioctl *param, size_t param_size) | |||
762 | { | 775 | { |
763 | int r; | 776 | int r; |
764 | char *new_name = (char *) param + param->data_start; | 777 | char *new_name = (char *) param + param->data_start; |
778 | struct mapped_device *md; | ||
765 | 779 | ||
766 | if (new_name < param->data || | 780 | if (new_name < param->data || |
767 | invalid_str(new_name, (void *) param + param_size) || | 781 | invalid_str(new_name, (void *) param + param_size) || |
@@ -774,10 +788,14 @@ static int dev_rename(struct dm_ioctl *param, size_t param_size) | |||
774 | if (r) | 788 | if (r) |
775 | return r; | 789 | return r; |
776 | 790 | ||
777 | param->data_size = 0; | 791 | md = dm_hash_rename(param, new_name); |
792 | if (IS_ERR(md)) | ||
793 | return PTR_ERR(md); | ||
794 | |||
795 | __dev_status(md, param); | ||
796 | dm_put(md); | ||
778 | 797 | ||
779 | return dm_hash_rename(param->event_nr, ¶m->flags, param->name, | 798 | return 0; |
780 | new_name); | ||
781 | } | 799 | } |
782 | 800 | ||
783 | static int dev_set_geometry(struct dm_ioctl *param, size_t param_size) | 801 | static int dev_set_geometry(struct dm_ioctl *param, size_t param_size) |
@@ -818,8 +836,6 @@ static int dev_set_geometry(struct dm_ioctl *param, size_t param_size) | |||
818 | geometry.start = indata[3]; | 836 | geometry.start = indata[3]; |
819 | 837 | ||
820 | r = dm_set_geometry(md, &geometry); | 838 | r = dm_set_geometry(md, &geometry); |
821 | if (!r) | ||
822 | r = __dev_status(md, param); | ||
823 | 839 | ||
824 | param->data_size = 0; | 840 | param->data_size = 0; |
825 | 841 | ||
@@ -843,13 +859,17 @@ static int do_suspend(struct dm_ioctl *param) | |||
843 | if (param->flags & DM_NOFLUSH_FLAG) | 859 | if (param->flags & DM_NOFLUSH_FLAG) |
844 | suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG; | 860 | suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG; |
845 | 861 | ||
846 | if (!dm_suspended_md(md)) | 862 | if (!dm_suspended_md(md)) { |
847 | r = dm_suspend(md, suspend_flags); | 863 | r = dm_suspend(md, suspend_flags); |
864 | if (r) | ||
865 | goto out; | ||
866 | } | ||
848 | 867 | ||
849 | if (!r) | 868 | __dev_status(md, param); |
850 | r = __dev_status(md, param); | ||
851 | 869 | ||
870 | out: | ||
852 | dm_put(md); | 871 | dm_put(md); |
872 | |||
853 | return r; | 873 | return r; |
854 | } | 874 | } |
855 | 875 | ||
@@ -911,7 +931,7 @@ static int do_resume(struct dm_ioctl *param) | |||
911 | dm_table_destroy(old_map); | 931 | dm_table_destroy(old_map); |
912 | 932 | ||
913 | if (!r) | 933 | if (!r) |
914 | r = __dev_status(md, param); | 934 | __dev_status(md, param); |
915 | 935 | ||
916 | dm_put(md); | 936 | dm_put(md); |
917 | return r; | 937 | return r; |
@@ -935,16 +955,16 @@ static int dev_suspend(struct dm_ioctl *param, size_t param_size) | |||
935 | */ | 955 | */ |
936 | static int dev_status(struct dm_ioctl *param, size_t param_size) | 956 | static int dev_status(struct dm_ioctl *param, size_t param_size) |
937 | { | 957 | { |
938 | int r; | ||
939 | struct mapped_device *md; | 958 | struct mapped_device *md; |
940 | 959 | ||
941 | md = find_device(param); | 960 | md = find_device(param); |
942 | if (!md) | 961 | if (!md) |
943 | return -ENXIO; | 962 | return -ENXIO; |
944 | 963 | ||
945 | r = __dev_status(md, param); | 964 | __dev_status(md, param); |
946 | dm_put(md); | 965 | dm_put(md); |
947 | return r; | 966 | |
967 | return 0; | ||
948 | } | 968 | } |
949 | 969 | ||
950 | /* | 970 | /* |
@@ -1019,7 +1039,7 @@ static void retrieve_status(struct dm_table *table, | |||
1019 | */ | 1039 | */ |
1020 | static int dev_wait(struct dm_ioctl *param, size_t param_size) | 1040 | static int dev_wait(struct dm_ioctl *param, size_t param_size) |
1021 | { | 1041 | { |
1022 | int r; | 1042 | int r = 0; |
1023 | struct mapped_device *md; | 1043 | struct mapped_device *md; |
1024 | struct dm_table *table; | 1044 | struct dm_table *table; |
1025 | 1045 | ||
@@ -1040,9 +1060,7 @@ static int dev_wait(struct dm_ioctl *param, size_t param_size) | |||
1040 | * changed to trigger the event, so we may as well tell | 1060 | * changed to trigger the event, so we may as well tell |
1041 | * him and save an ioctl. | 1061 | * him and save an ioctl. |
1042 | */ | 1062 | */ |
1043 | r = __dev_status(md, param); | 1063 | __dev_status(md, param); |
1044 | if (r) | ||
1045 | goto out; | ||
1046 | 1064 | ||
1047 | table = dm_get_live_or_inactive_table(md, param); | 1065 | table = dm_get_live_or_inactive_table(md, param); |
1048 | if (table) { | 1066 | if (table) { |
@@ -1050,8 +1068,9 @@ static int dev_wait(struct dm_ioctl *param, size_t param_size) | |||
1050 | dm_table_put(table); | 1068 | dm_table_put(table); |
1051 | } | 1069 | } |
1052 | 1070 | ||
1053 | out: | 1071 | out: |
1054 | dm_put(md); | 1072 | dm_put(md); |
1073 | |||
1055 | return r; | 1074 | return r; |
1056 | } | 1075 | } |
1057 | 1076 | ||
@@ -1112,28 +1131,9 @@ static int populate_table(struct dm_table *table, | |||
1112 | next = spec->next; | 1131 | next = spec->next; |
1113 | } | 1132 | } |
1114 | 1133 | ||
1115 | r = dm_table_set_type(table); | ||
1116 | if (r) { | ||
1117 | DMWARN("unable to set table type"); | ||
1118 | return r; | ||
1119 | } | ||
1120 | |||
1121 | return dm_table_complete(table); | 1134 | return dm_table_complete(table); |
1122 | } | 1135 | } |
1123 | 1136 | ||
1124 | static int table_prealloc_integrity(struct dm_table *t, | ||
1125 | struct mapped_device *md) | ||
1126 | { | ||
1127 | struct list_head *devices = dm_table_get_devices(t); | ||
1128 | struct dm_dev_internal *dd; | ||
1129 | |||
1130 | list_for_each_entry(dd, devices, list) | ||
1131 | if (bdev_get_integrity(dd->dm_dev.bdev)) | ||
1132 | return blk_integrity_register(dm_disk(md), NULL); | ||
1133 | |||
1134 | return 0; | ||
1135 | } | ||
1136 | |||
1137 | static int table_load(struct dm_ioctl *param, size_t param_size) | 1137 | static int table_load(struct dm_ioctl *param, size_t param_size) |
1138 | { | 1138 | { |
1139 | int r; | 1139 | int r; |
@@ -1155,21 +1155,30 @@ static int table_load(struct dm_ioctl *param, size_t param_size) | |||
1155 | goto out; | 1155 | goto out; |
1156 | } | 1156 | } |
1157 | 1157 | ||
1158 | r = table_prealloc_integrity(t, md); | 1158 | /* Protect md->type and md->queue against concurrent table loads. */ |
1159 | if (r) { | 1159 | dm_lock_md_type(md); |
1160 | DMERR("%s: could not register integrity profile.", | 1160 | if (dm_get_md_type(md) == DM_TYPE_NONE) |
1161 | dm_device_name(md)); | 1161 | /* Initial table load: acquire type of table. */ |
1162 | dm_set_md_type(md, dm_table_get_type(t)); | ||
1163 | else if (dm_get_md_type(md) != dm_table_get_type(t)) { | ||
1164 | DMWARN("can't change device type after initial table load."); | ||
1162 | dm_table_destroy(t); | 1165 | dm_table_destroy(t); |
1166 | dm_unlock_md_type(md); | ||
1167 | r = -EINVAL; | ||
1163 | goto out; | 1168 | goto out; |
1164 | } | 1169 | } |
1165 | 1170 | ||
1166 | r = dm_table_alloc_md_mempools(t); | 1171 | /* setup md->queue to reflect md's type (may block) */ |
1172 | r = dm_setup_md_queue(md); | ||
1167 | if (r) { | 1173 | if (r) { |
1168 | DMWARN("unable to allocate mempools for this table"); | 1174 | DMWARN("unable to set up device queue for new table."); |
1169 | dm_table_destroy(t); | 1175 | dm_table_destroy(t); |
1176 | dm_unlock_md_type(md); | ||
1170 | goto out; | 1177 | goto out; |
1171 | } | 1178 | } |
1179 | dm_unlock_md_type(md); | ||
1172 | 1180 | ||
1181 | /* stage inactive table */ | ||
1173 | down_write(&_hash_lock); | 1182 | down_write(&_hash_lock); |
1174 | hc = dm_get_mdptr(md); | 1183 | hc = dm_get_mdptr(md); |
1175 | if (!hc || hc->md != md) { | 1184 | if (!hc || hc->md != md) { |
@@ -1186,7 +1195,7 @@ static int table_load(struct dm_ioctl *param, size_t param_size) | |||
1186 | up_write(&_hash_lock); | 1195 | up_write(&_hash_lock); |
1187 | 1196 | ||
1188 | param->flags |= DM_INACTIVE_PRESENT_FLAG; | 1197 | param->flags |= DM_INACTIVE_PRESENT_FLAG; |
1189 | r = __dev_status(md, param); | 1198 | __dev_status(md, param); |
1190 | 1199 | ||
1191 | out: | 1200 | out: |
1192 | dm_put(md); | 1201 | dm_put(md); |
@@ -1196,7 +1205,6 @@ out: | |||
1196 | 1205 | ||
1197 | static int table_clear(struct dm_ioctl *param, size_t param_size) | 1206 | static int table_clear(struct dm_ioctl *param, size_t param_size) |
1198 | { | 1207 | { |
1199 | int r; | ||
1200 | struct hash_cell *hc; | 1208 | struct hash_cell *hc; |
1201 | struct mapped_device *md; | 1209 | struct mapped_device *md; |
1202 | 1210 | ||
@@ -1216,11 +1224,12 @@ static int table_clear(struct dm_ioctl *param, size_t param_size) | |||
1216 | 1224 | ||
1217 | param->flags &= ~DM_INACTIVE_PRESENT_FLAG; | 1225 | param->flags &= ~DM_INACTIVE_PRESENT_FLAG; |
1218 | 1226 | ||
1219 | r = __dev_status(hc->md, param); | 1227 | __dev_status(hc->md, param); |
1220 | md = hc->md; | 1228 | md = hc->md; |
1221 | up_write(&_hash_lock); | 1229 | up_write(&_hash_lock); |
1222 | dm_put(md); | 1230 | dm_put(md); |
1223 | return r; | 1231 | |
1232 | return 0; | ||
1224 | } | 1233 | } |
1225 | 1234 | ||
1226 | /* | 1235 | /* |
@@ -1265,7 +1274,6 @@ static void retrieve_deps(struct dm_table *table, | |||
1265 | 1274 | ||
1266 | static int table_deps(struct dm_ioctl *param, size_t param_size) | 1275 | static int table_deps(struct dm_ioctl *param, size_t param_size) |
1267 | { | 1276 | { |
1268 | int r = 0; | ||
1269 | struct mapped_device *md; | 1277 | struct mapped_device *md; |
1270 | struct dm_table *table; | 1278 | struct dm_table *table; |
1271 | 1279 | ||
@@ -1273,9 +1281,7 @@ static int table_deps(struct dm_ioctl *param, size_t param_size) | |||
1273 | if (!md) | 1281 | if (!md) |
1274 | return -ENXIO; | 1282 | return -ENXIO; |
1275 | 1283 | ||
1276 | r = __dev_status(md, param); | 1284 | __dev_status(md, param); |
1277 | if (r) | ||
1278 | goto out; | ||
1279 | 1285 | ||
1280 | table = dm_get_live_or_inactive_table(md, param); | 1286 | table = dm_get_live_or_inactive_table(md, param); |
1281 | if (table) { | 1287 | if (table) { |
@@ -1283,9 +1289,9 @@ static int table_deps(struct dm_ioctl *param, size_t param_size) | |||
1283 | dm_table_put(table); | 1289 | dm_table_put(table); |
1284 | } | 1290 | } |
1285 | 1291 | ||
1286 | out: | ||
1287 | dm_put(md); | 1292 | dm_put(md); |
1288 | return r; | 1293 | |
1294 | return 0; | ||
1289 | } | 1295 | } |
1290 | 1296 | ||
1291 | /* | 1297 | /* |
@@ -1294,7 +1300,6 @@ static int table_deps(struct dm_ioctl *param, size_t param_size) | |||
1294 | */ | 1300 | */ |
1295 | static int table_status(struct dm_ioctl *param, size_t param_size) | 1301 | static int table_status(struct dm_ioctl *param, size_t param_size) |
1296 | { | 1302 | { |
1297 | int r; | ||
1298 | struct mapped_device *md; | 1303 | struct mapped_device *md; |
1299 | struct dm_table *table; | 1304 | struct dm_table *table; |
1300 | 1305 | ||
@@ -1302,9 +1307,7 @@ static int table_status(struct dm_ioctl *param, size_t param_size) | |||
1302 | if (!md) | 1307 | if (!md) |
1303 | return -ENXIO; | 1308 | return -ENXIO; |
1304 | 1309 | ||
1305 | r = __dev_status(md, param); | 1310 | __dev_status(md, param); |
1306 | if (r) | ||
1307 | goto out; | ||
1308 | 1311 | ||
1309 | table = dm_get_live_or_inactive_table(md, param); | 1312 | table = dm_get_live_or_inactive_table(md, param); |
1310 | if (table) { | 1313 | if (table) { |
@@ -1312,9 +1315,9 @@ static int table_status(struct dm_ioctl *param, size_t param_size) | |||
1312 | dm_table_put(table); | 1315 | dm_table_put(table); |
1313 | } | 1316 | } |
1314 | 1317 | ||
1315 | out: | ||
1316 | dm_put(md); | 1318 | dm_put(md); |
1317 | return r; | 1319 | |
1320 | return 0; | ||
1318 | } | 1321 | } |
1319 | 1322 | ||
1320 | /* | 1323 | /* |
@@ -1333,10 +1336,6 @@ static int target_message(struct dm_ioctl *param, size_t param_size) | |||
1333 | if (!md) | 1336 | if (!md) |
1334 | return -ENXIO; | 1337 | return -ENXIO; |
1335 | 1338 | ||
1336 | r = __dev_status(md, param); | ||
1337 | if (r) | ||
1338 | goto out; | ||
1339 | |||
1340 | if (tmsg < (struct dm_target_msg *) param->data || | 1339 | if (tmsg < (struct dm_target_msg *) param->data || |
1341 | invalid_str(tmsg->message, (void *) param + param_size)) { | 1340 | invalid_str(tmsg->message, (void *) param + param_size)) { |
1342 | DMWARN("Invalid target message parameters."); | 1341 | DMWARN("Invalid target message parameters."); |
@@ -1593,18 +1592,22 @@ static long dm_compat_ctl_ioctl(struct file *file, uint command, ulong u) | |||
1593 | #endif | 1592 | #endif |
1594 | 1593 | ||
1595 | static const struct file_operations _ctl_fops = { | 1594 | static const struct file_operations _ctl_fops = { |
1595 | .open = nonseekable_open, | ||
1596 | .unlocked_ioctl = dm_ctl_ioctl, | 1596 | .unlocked_ioctl = dm_ctl_ioctl, |
1597 | .compat_ioctl = dm_compat_ctl_ioctl, | 1597 | .compat_ioctl = dm_compat_ctl_ioctl, |
1598 | .owner = THIS_MODULE, | 1598 | .owner = THIS_MODULE, |
1599 | }; | 1599 | }; |
1600 | 1600 | ||
1601 | static struct miscdevice _dm_misc = { | 1601 | static struct miscdevice _dm_misc = { |
1602 | .minor = MISC_DYNAMIC_MINOR, | 1602 | .minor = MAPPER_CTRL_MINOR, |
1603 | .name = DM_NAME, | 1603 | .name = DM_NAME, |
1604 | .nodename = "mapper/control", | 1604 | .nodename = DM_DIR "/" DM_CONTROL_NODE, |
1605 | .fops = &_ctl_fops | 1605 | .fops = &_ctl_fops |
1606 | }; | 1606 | }; |
1607 | 1607 | ||
1608 | MODULE_ALIAS_MISCDEV(MAPPER_CTRL_MINOR); | ||
1609 | MODULE_ALIAS("devname:" DM_DIR "/" DM_CONTROL_NODE); | ||
1610 | |||
1608 | /* | 1611 | /* |
1609 | * Create misc character device and link to DM_DIR/control. | 1612 | * Create misc character device and link to DM_DIR/control. |
1610 | */ | 1613 | */ |
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c index 9200dbf2391a..3921e3bb43c1 100644 --- a/drivers/md/dm-linear.c +++ b/drivers/md/dm-linear.c | |||
@@ -53,6 +53,7 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
53 | } | 53 | } |
54 | 54 | ||
55 | ti->num_flush_requests = 1; | 55 | ti->num_flush_requests = 1; |
56 | ti->num_discard_requests = 1; | ||
56 | ti->private = lc; | 57 | ti->private = lc; |
57 | return 0; | 58 | return 0; |
58 | 59 | ||
@@ -73,7 +74,7 @@ static sector_t linear_map_sector(struct dm_target *ti, sector_t bi_sector) | |||
73 | { | 74 | { |
74 | struct linear_c *lc = ti->private; | 75 | struct linear_c *lc = ti->private; |
75 | 76 | ||
76 | return lc->start + (bi_sector - ti->begin); | 77 | return lc->start + dm_target_offset(ti, bi_sector); |
77 | } | 78 | } |
78 | 79 | ||
79 | static void linear_map_bio(struct dm_target *ti, struct bio *bio) | 80 | static void linear_map_bio(struct dm_target *ti, struct bio *bio) |
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 826bce7343b3..487ecda90ad4 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c | |||
@@ -706,6 +706,7 @@ static struct priority_group *parse_priority_group(struct arg_set *as, | |||
706 | 706 | ||
707 | if (as->argc < nr_params) { | 707 | if (as->argc < nr_params) { |
708 | ti->error = "not enough path parameters"; | 708 | ti->error = "not enough path parameters"; |
709 | r = -EINVAL; | ||
709 | goto bad; | 710 | goto bad; |
710 | } | 711 | } |
711 | 712 | ||
@@ -892,6 +893,7 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc, | |||
892 | } | 893 | } |
893 | 894 | ||
894 | ti->num_flush_requests = 1; | 895 | ti->num_flush_requests = 1; |
896 | ti->num_discard_requests = 1; | ||
895 | 897 | ||
896 | return 0; | 898 | return 0; |
897 | 899 | ||
@@ -1271,6 +1273,15 @@ static int do_end_io(struct multipath *m, struct request *clone, | |||
1271 | if (error == -EOPNOTSUPP) | 1273 | if (error == -EOPNOTSUPP) |
1272 | return error; | 1274 | return error; |
1273 | 1275 | ||
1276 | if (clone->cmd_flags & REQ_DISCARD) | ||
1277 | /* | ||
1278 | * Pass all discard request failures up. | ||
1279 | * FIXME: only fail_path if the discard failed due to a | ||
1280 | * transport problem. This requires precise understanding | ||
1281 | * of the underlying failure (e.g. the SCSI sense). | ||
1282 | */ | ||
1283 | return error; | ||
1284 | |||
1274 | if (mpio->pgpath) | 1285 | if (mpio->pgpath) |
1275 | fail_path(mpio->pgpath); | 1286 | fail_path(mpio->pgpath); |
1276 | 1287 | ||
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index 74136262d654..7c081bcbc3cf 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c | |||
@@ -445,7 +445,7 @@ static sector_t map_sector(struct mirror *m, struct bio *bio) | |||
445 | { | 445 | { |
446 | if (unlikely(!bio->bi_size)) | 446 | if (unlikely(!bio->bi_size)) |
447 | return 0; | 447 | return 0; |
448 | return m->offset + (bio->bi_sector - m->ms->ti->begin); | 448 | return m->offset + dm_target_offset(m->ms->ti, bio->bi_sector); |
449 | } | 449 | } |
450 | 450 | ||
451 | static void map_bio(struct mirror *m, struct bio *bio) | 451 | static void map_bio(struct mirror *m, struct bio *bio) |
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c index c097d8a4823d..cc2bdb83f9ad 100644 --- a/drivers/md/dm-snap-persistent.c +++ b/drivers/md/dm-snap-persistent.c | |||
@@ -266,7 +266,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw, | |||
266 | */ | 266 | */ |
267 | static chunk_t area_location(struct pstore *ps, chunk_t area) | 267 | static chunk_t area_location(struct pstore *ps, chunk_t area) |
268 | { | 268 | { |
269 | return 1 + ((ps->exceptions_per_area + 1) * area); | 269 | return NUM_SNAPSHOT_HDR_CHUNKS + ((ps->exceptions_per_area + 1) * area); |
270 | } | 270 | } |
271 | 271 | ||
272 | /* | 272 | /* |
@@ -780,8 +780,8 @@ static int persistent_commit_merge(struct dm_exception_store *store, | |||
780 | * ps->current_area does not get reduced by prepare_merge() until | 780 | * ps->current_area does not get reduced by prepare_merge() until |
781 | * after commit_merge() has removed the nr_merged previous exceptions. | 781 | * after commit_merge() has removed the nr_merged previous exceptions. |
782 | */ | 782 | */ |
783 | ps->next_free = (area_location(ps, ps->current_area) - 1) + | 783 | ps->next_free = area_location(ps, ps->current_area) + |
784 | (ps->current_committed + 1) + NUM_SNAPSHOT_HDR_CHUNKS; | 784 | ps->current_committed + 1; |
785 | 785 | ||
786 | return 0; | 786 | return 0; |
787 | } | 787 | } |
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 54853773510c..5974d3094d97 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c | |||
@@ -148,6 +148,12 @@ struct dm_snapshot { | |||
148 | #define RUNNING_MERGE 0 | 148 | #define RUNNING_MERGE 0 |
149 | #define SHUTDOWN_MERGE 1 | 149 | #define SHUTDOWN_MERGE 1 |
150 | 150 | ||
151 | struct dm_dev *dm_snap_origin(struct dm_snapshot *s) | ||
152 | { | ||
153 | return s->origin; | ||
154 | } | ||
155 | EXPORT_SYMBOL(dm_snap_origin); | ||
156 | |||
151 | struct dm_dev *dm_snap_cow(struct dm_snapshot *s) | 157 | struct dm_dev *dm_snap_cow(struct dm_snapshot *s) |
152 | { | 158 | { |
153 | return s->cow; | 159 | return s->cow; |
@@ -1065,10 +1071,6 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
1065 | origin_mode = FMODE_WRITE; | 1071 | origin_mode = FMODE_WRITE; |
1066 | } | 1072 | } |
1067 | 1073 | ||
1068 | origin_path = argv[0]; | ||
1069 | argv++; | ||
1070 | argc--; | ||
1071 | |||
1072 | s = kmalloc(sizeof(*s), GFP_KERNEL); | 1074 | s = kmalloc(sizeof(*s), GFP_KERNEL); |
1073 | if (!s) { | 1075 | if (!s) { |
1074 | ti->error = "Cannot allocate snapshot context private " | 1076 | ti->error = "Cannot allocate snapshot context private " |
@@ -1077,6 +1079,16 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
1077 | goto bad; | 1079 | goto bad; |
1078 | } | 1080 | } |
1079 | 1081 | ||
1082 | origin_path = argv[0]; | ||
1083 | argv++; | ||
1084 | argc--; | ||
1085 | |||
1086 | r = dm_get_device(ti, origin_path, origin_mode, &s->origin); | ||
1087 | if (r) { | ||
1088 | ti->error = "Cannot get origin device"; | ||
1089 | goto bad_origin; | ||
1090 | } | ||
1091 | |||
1080 | cow_path = argv[0]; | 1092 | cow_path = argv[0]; |
1081 | argv++; | 1093 | argv++; |
1082 | argc--; | 1094 | argc--; |
@@ -1097,12 +1109,6 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
1097 | argv += args_used; | 1109 | argv += args_used; |
1098 | argc -= args_used; | 1110 | argc -= args_used; |
1099 | 1111 | ||
1100 | r = dm_get_device(ti, origin_path, origin_mode, &s->origin); | ||
1101 | if (r) { | ||
1102 | ti->error = "Cannot get origin device"; | ||
1103 | goto bad_origin; | ||
1104 | } | ||
1105 | |||
1106 | s->ti = ti; | 1112 | s->ti = ti; |
1107 | s->valid = 1; | 1113 | s->valid = 1; |
1108 | s->active = 0; | 1114 | s->active = 0; |
@@ -1212,15 +1218,15 @@ bad_kcopyd: | |||
1212 | dm_exception_table_exit(&s->complete, exception_cache); | 1218 | dm_exception_table_exit(&s->complete, exception_cache); |
1213 | 1219 | ||
1214 | bad_hash_tables: | 1220 | bad_hash_tables: |
1215 | dm_put_device(ti, s->origin); | ||
1216 | |||
1217 | bad_origin: | ||
1218 | dm_exception_store_destroy(s->store); | 1221 | dm_exception_store_destroy(s->store); |
1219 | 1222 | ||
1220 | bad_store: | 1223 | bad_store: |
1221 | dm_put_device(ti, s->cow); | 1224 | dm_put_device(ti, s->cow); |
1222 | 1225 | ||
1223 | bad_cow: | 1226 | bad_cow: |
1227 | dm_put_device(ti, s->origin); | ||
1228 | |||
1229 | bad_origin: | ||
1224 | kfree(s); | 1230 | kfree(s); |
1225 | 1231 | ||
1226 | bad: | 1232 | bad: |
@@ -1314,12 +1320,12 @@ static void snapshot_dtr(struct dm_target *ti) | |||
1314 | 1320 | ||
1315 | mempool_destroy(s->pending_pool); | 1321 | mempool_destroy(s->pending_pool); |
1316 | 1322 | ||
1317 | dm_put_device(ti, s->origin); | ||
1318 | |||
1319 | dm_exception_store_destroy(s->store); | 1323 | dm_exception_store_destroy(s->store); |
1320 | 1324 | ||
1321 | dm_put_device(ti, s->cow); | 1325 | dm_put_device(ti, s->cow); |
1322 | 1326 | ||
1327 | dm_put_device(ti, s->origin); | ||
1328 | |||
1323 | kfree(s); | 1329 | kfree(s); |
1324 | } | 1330 | } |
1325 | 1331 | ||
@@ -1686,7 +1692,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio, | |||
1686 | chunk_t chunk; | 1692 | chunk_t chunk; |
1687 | 1693 | ||
1688 | if (unlikely(bio_empty_barrier(bio))) { | 1694 | if (unlikely(bio_empty_barrier(bio))) { |
1689 | if (!map_context->flush_request) | 1695 | if (!map_context->target_request_nr) |
1690 | bio->bi_bdev = s->origin->bdev; | 1696 | bio->bi_bdev = s->origin->bdev; |
1691 | else | 1697 | else |
1692 | bio->bi_bdev = s->cow->bdev; | 1698 | bio->bi_bdev = s->cow->bdev; |
@@ -1899,8 +1905,14 @@ static int snapshot_iterate_devices(struct dm_target *ti, | |||
1899 | iterate_devices_callout_fn fn, void *data) | 1905 | iterate_devices_callout_fn fn, void *data) |
1900 | { | 1906 | { |
1901 | struct dm_snapshot *snap = ti->private; | 1907 | struct dm_snapshot *snap = ti->private; |
1908 | int r; | ||
1909 | |||
1910 | r = fn(ti, snap->origin, 0, ti->len, data); | ||
1911 | |||
1912 | if (!r) | ||
1913 | r = fn(ti, snap->cow, 0, get_dev_size(snap->cow->bdev), data); | ||
1902 | 1914 | ||
1903 | return fn(ti, snap->origin, 0, ti->len, data); | 1915 | return r; |
1904 | } | 1916 | } |
1905 | 1917 | ||
1906 | 1918 | ||
@@ -2159,6 +2171,21 @@ static int origin_status(struct dm_target *ti, status_type_t type, char *result, | |||
2159 | return 0; | 2171 | return 0; |
2160 | } | 2172 | } |
2161 | 2173 | ||
2174 | static int origin_merge(struct dm_target *ti, struct bvec_merge_data *bvm, | ||
2175 | struct bio_vec *biovec, int max_size) | ||
2176 | { | ||
2177 | struct dm_dev *dev = ti->private; | ||
2178 | struct request_queue *q = bdev_get_queue(dev->bdev); | ||
2179 | |||
2180 | if (!q->merge_bvec_fn) | ||
2181 | return max_size; | ||
2182 | |||
2183 | bvm->bi_bdev = dev->bdev; | ||
2184 | bvm->bi_sector = bvm->bi_sector; | ||
2185 | |||
2186 | return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); | ||
2187 | } | ||
2188 | |||
2162 | static int origin_iterate_devices(struct dm_target *ti, | 2189 | static int origin_iterate_devices(struct dm_target *ti, |
2163 | iterate_devices_callout_fn fn, void *data) | 2190 | iterate_devices_callout_fn fn, void *data) |
2164 | { | 2191 | { |
@@ -2176,6 +2203,7 @@ static struct target_type origin_target = { | |||
2176 | .map = origin_map, | 2203 | .map = origin_map, |
2177 | .resume = origin_resume, | 2204 | .resume = origin_resume, |
2178 | .status = origin_status, | 2205 | .status = origin_status, |
2206 | .merge = origin_merge, | ||
2179 | .iterate_devices = origin_iterate_devices, | 2207 | .iterate_devices = origin_iterate_devices, |
2180 | }; | 2208 | }; |
2181 | 2209 | ||
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index d6e28d732b4d..c297f6da91ea 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c | |||
@@ -25,6 +25,8 @@ struct stripe { | |||
25 | 25 | ||
26 | struct stripe_c { | 26 | struct stripe_c { |
27 | uint32_t stripes; | 27 | uint32_t stripes; |
28 | int stripes_shift; | ||
29 | sector_t stripes_mask; | ||
28 | 30 | ||
29 | /* The size of this target / num. stripes */ | 31 | /* The size of this target / num. stripes */ |
30 | sector_t stripe_width; | 32 | sector_t stripe_width; |
@@ -162,16 +164,22 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
162 | 164 | ||
163 | /* Set pointer to dm target; used in trigger_event */ | 165 | /* Set pointer to dm target; used in trigger_event */ |
164 | sc->ti = ti; | 166 | sc->ti = ti; |
165 | |||
166 | sc->stripes = stripes; | 167 | sc->stripes = stripes; |
167 | sc->stripe_width = width; | 168 | sc->stripe_width = width; |
169 | |||
170 | if (stripes & (stripes - 1)) | ||
171 | sc->stripes_shift = -1; | ||
172 | else { | ||
173 | sc->stripes_shift = ffs(stripes) - 1; | ||
174 | sc->stripes_mask = ((sector_t) stripes) - 1; | ||
175 | } | ||
176 | |||
168 | ti->split_io = chunk_size; | 177 | ti->split_io = chunk_size; |
169 | ti->num_flush_requests = stripes; | 178 | ti->num_flush_requests = stripes; |
179 | ti->num_discard_requests = stripes; | ||
170 | 180 | ||
181 | sc->chunk_shift = ffs(chunk_size) - 1; | ||
171 | sc->chunk_mask = ((sector_t) chunk_size) - 1; | 182 | sc->chunk_mask = ((sector_t) chunk_size) - 1; |
172 | for (sc->chunk_shift = 0; chunk_size; sc->chunk_shift++) | ||
173 | chunk_size >>= 1; | ||
174 | sc->chunk_shift--; | ||
175 | 183 | ||
176 | /* | 184 | /* |
177 | * Get the stripe destinations. | 185 | * Get the stripe destinations. |
@@ -207,26 +215,79 @@ static void stripe_dtr(struct dm_target *ti) | |||
207 | kfree(sc); | 215 | kfree(sc); |
208 | } | 216 | } |
209 | 217 | ||
218 | static void stripe_map_sector(struct stripe_c *sc, sector_t sector, | ||
219 | uint32_t *stripe, sector_t *result) | ||
220 | { | ||
221 | sector_t offset = dm_target_offset(sc->ti, sector); | ||
222 | sector_t chunk = offset >> sc->chunk_shift; | ||
223 | |||
224 | if (sc->stripes_shift < 0) | ||
225 | *stripe = sector_div(chunk, sc->stripes); | ||
226 | else { | ||
227 | *stripe = chunk & sc->stripes_mask; | ||
228 | chunk >>= sc->stripes_shift; | ||
229 | } | ||
230 | |||
231 | *result = (chunk << sc->chunk_shift) | (offset & sc->chunk_mask); | ||
232 | } | ||
233 | |||
234 | static void stripe_map_range_sector(struct stripe_c *sc, sector_t sector, | ||
235 | uint32_t target_stripe, sector_t *result) | ||
236 | { | ||
237 | uint32_t stripe; | ||
238 | |||
239 | stripe_map_sector(sc, sector, &stripe, result); | ||
240 | if (stripe == target_stripe) | ||
241 | return; | ||
242 | *result &= ~sc->chunk_mask; /* round down */ | ||
243 | if (target_stripe < stripe) | ||
244 | *result += sc->chunk_mask + 1; /* next chunk */ | ||
245 | } | ||
246 | |||
247 | static int stripe_map_discard(struct stripe_c *sc, struct bio *bio, | ||
248 | uint32_t target_stripe) | ||
249 | { | ||
250 | sector_t begin, end; | ||
251 | |||
252 | stripe_map_range_sector(sc, bio->bi_sector, target_stripe, &begin); | ||
253 | stripe_map_range_sector(sc, bio->bi_sector + bio_sectors(bio), | ||
254 | target_stripe, &end); | ||
255 | if (begin < end) { | ||
256 | bio->bi_bdev = sc->stripe[target_stripe].dev->bdev; | ||
257 | bio->bi_sector = begin + sc->stripe[target_stripe].physical_start; | ||
258 | bio->bi_size = to_bytes(end - begin); | ||
259 | return DM_MAPIO_REMAPPED; | ||
260 | } else { | ||
261 | /* The range doesn't map to the target stripe */ | ||
262 | bio_endio(bio, 0); | ||
263 | return DM_MAPIO_SUBMITTED; | ||
264 | } | ||
265 | } | ||
266 | |||
210 | static int stripe_map(struct dm_target *ti, struct bio *bio, | 267 | static int stripe_map(struct dm_target *ti, struct bio *bio, |
211 | union map_info *map_context) | 268 | union map_info *map_context) |
212 | { | 269 | { |
213 | struct stripe_c *sc = (struct stripe_c *) ti->private; | 270 | struct stripe_c *sc = ti->private; |
214 | sector_t offset, chunk; | ||
215 | uint32_t stripe; | 271 | uint32_t stripe; |
272 | unsigned target_request_nr; | ||
216 | 273 | ||
217 | if (unlikely(bio_empty_barrier(bio))) { | 274 | if (unlikely(bio_empty_barrier(bio))) { |
218 | BUG_ON(map_context->flush_request >= sc->stripes); | 275 | target_request_nr = map_context->target_request_nr; |
219 | bio->bi_bdev = sc->stripe[map_context->flush_request].dev->bdev; | 276 | BUG_ON(target_request_nr >= sc->stripes); |
277 | bio->bi_bdev = sc->stripe[target_request_nr].dev->bdev; | ||
220 | return DM_MAPIO_REMAPPED; | 278 | return DM_MAPIO_REMAPPED; |
221 | } | 279 | } |
280 | if (unlikely(bio->bi_rw & REQ_DISCARD)) { | ||
281 | target_request_nr = map_context->target_request_nr; | ||
282 | BUG_ON(target_request_nr >= sc->stripes); | ||
283 | return stripe_map_discard(sc, bio, target_request_nr); | ||
284 | } | ||
222 | 285 | ||
223 | offset = bio->bi_sector - ti->begin; | 286 | stripe_map_sector(sc, bio->bi_sector, &stripe, &bio->bi_sector); |
224 | chunk = offset >> sc->chunk_shift; | ||
225 | stripe = sector_div(chunk, sc->stripes); | ||
226 | 287 | ||
288 | bio->bi_sector += sc->stripe[stripe].physical_start; | ||
227 | bio->bi_bdev = sc->stripe[stripe].dev->bdev; | 289 | bio->bi_bdev = sc->stripe[stripe].dev->bdev; |
228 | bio->bi_sector = sc->stripe[stripe].physical_start + | 290 | |
229 | (chunk << sc->chunk_shift) + (offset & sc->chunk_mask); | ||
230 | return DM_MAPIO_REMAPPED; | 291 | return DM_MAPIO_REMAPPED; |
231 | } | 292 | } |
232 | 293 | ||
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 9924ea23032d..f9fc07d7a4b9 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c | |||
@@ -54,6 +54,8 @@ struct dm_table { | |||
54 | sector_t *highs; | 54 | sector_t *highs; |
55 | struct dm_target *targets; | 55 | struct dm_target *targets; |
56 | 56 | ||
57 | unsigned discards_supported:1; | ||
58 | |||
57 | /* | 59 | /* |
58 | * Indicates the rw permissions for the new logical | 60 | * Indicates the rw permissions for the new logical |
59 | * device. This should be a combination of FMODE_READ | 61 | * device. This should be a combination of FMODE_READ |
@@ -203,6 +205,7 @@ int dm_table_create(struct dm_table **result, fmode_t mode, | |||
203 | 205 | ||
204 | INIT_LIST_HEAD(&t->devices); | 206 | INIT_LIST_HEAD(&t->devices); |
205 | atomic_set(&t->holders, 0); | 207 | atomic_set(&t->holders, 0); |
208 | t->discards_supported = 1; | ||
206 | 209 | ||
207 | if (!num_targets) | 210 | if (!num_targets) |
208 | num_targets = KEYS_PER_NODE; | 211 | num_targets = KEYS_PER_NODE; |
@@ -245,7 +248,7 @@ void dm_table_destroy(struct dm_table *t) | |||
245 | msleep(1); | 248 | msleep(1); |
246 | smp_mb(); | 249 | smp_mb(); |
247 | 250 | ||
248 | /* free the indexes (see dm_table_complete) */ | 251 | /* free the indexes */ |
249 | if (t->depth >= 2) | 252 | if (t->depth >= 2) |
250 | vfree(t->index[t->depth - 2]); | 253 | vfree(t->index[t->depth - 2]); |
251 | 254 | ||
@@ -770,6 +773,9 @@ int dm_table_add_target(struct dm_table *t, const char *type, | |||
770 | 773 | ||
771 | t->highs[t->num_targets++] = tgt->begin + tgt->len - 1; | 774 | t->highs[t->num_targets++] = tgt->begin + tgt->len - 1; |
772 | 775 | ||
776 | if (!tgt->num_discard_requests) | ||
777 | t->discards_supported = 0; | ||
778 | |||
773 | return 0; | 779 | return 0; |
774 | 780 | ||
775 | bad: | 781 | bad: |
@@ -778,7 +784,7 @@ int dm_table_add_target(struct dm_table *t, const char *type, | |||
778 | return r; | 784 | return r; |
779 | } | 785 | } |
780 | 786 | ||
781 | int dm_table_set_type(struct dm_table *t) | 787 | static int dm_table_set_type(struct dm_table *t) |
782 | { | 788 | { |
783 | unsigned i; | 789 | unsigned i; |
784 | unsigned bio_based = 0, request_based = 0; | 790 | unsigned bio_based = 0, request_based = 0; |
@@ -900,7 +906,7 @@ static int setup_indexes(struct dm_table *t) | |||
900 | /* | 906 | /* |
901 | * Builds the btree to index the map. | 907 | * Builds the btree to index the map. |
902 | */ | 908 | */ |
903 | int dm_table_complete(struct dm_table *t) | 909 | static int dm_table_build_index(struct dm_table *t) |
904 | { | 910 | { |
905 | int r = 0; | 911 | int r = 0; |
906 | unsigned int leaf_nodes; | 912 | unsigned int leaf_nodes; |
@@ -919,6 +925,55 @@ int dm_table_complete(struct dm_table *t) | |||
919 | return r; | 925 | return r; |
920 | } | 926 | } |
921 | 927 | ||
928 | /* | ||
929 | * Register the mapped device for blk_integrity support if | ||
930 | * the underlying devices support it. | ||
931 | */ | ||
932 | static int dm_table_prealloc_integrity(struct dm_table *t, struct mapped_device *md) | ||
933 | { | ||
934 | struct list_head *devices = dm_table_get_devices(t); | ||
935 | struct dm_dev_internal *dd; | ||
936 | |||
937 | list_for_each_entry(dd, devices, list) | ||
938 | if (bdev_get_integrity(dd->dm_dev.bdev)) | ||
939 | return blk_integrity_register(dm_disk(md), NULL); | ||
940 | |||
941 | return 0; | ||
942 | } | ||
943 | |||
944 | /* | ||
945 | * Prepares the table for use by building the indices, | ||
946 | * setting the type, and allocating mempools. | ||
947 | */ | ||
948 | int dm_table_complete(struct dm_table *t) | ||
949 | { | ||
950 | int r; | ||
951 | |||
952 | r = dm_table_set_type(t); | ||
953 | if (r) { | ||
954 | DMERR("unable to set table type"); | ||
955 | return r; | ||
956 | } | ||
957 | |||
958 | r = dm_table_build_index(t); | ||
959 | if (r) { | ||
960 | DMERR("unable to build btrees"); | ||
961 | return r; | ||
962 | } | ||
963 | |||
964 | r = dm_table_prealloc_integrity(t, t->md); | ||
965 | if (r) { | ||
966 | DMERR("could not register integrity profile."); | ||
967 | return r; | ||
968 | } | ||
969 | |||
970 | r = dm_table_alloc_md_mempools(t); | ||
971 | if (r) | ||
972 | DMERR("unable to allocate mempools"); | ||
973 | |||
974 | return r; | ||
975 | } | ||
976 | |||
922 | static DEFINE_MUTEX(_event_lock); | 977 | static DEFINE_MUTEX(_event_lock); |
923 | void dm_table_event_callback(struct dm_table *t, | 978 | void dm_table_event_callback(struct dm_table *t, |
924 | void (*fn)(void *), void *context) | 979 | void (*fn)(void *), void *context) |
@@ -1086,6 +1141,11 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, | |||
1086 | else | 1141 | else |
1087 | queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, q); | 1142 | queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, q); |
1088 | 1143 | ||
1144 | if (!dm_table_supports_discards(t)) | ||
1145 | queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q); | ||
1146 | else | ||
1147 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); | ||
1148 | |||
1089 | dm_table_set_integrity(t); | 1149 | dm_table_set_integrity(t); |
1090 | 1150 | ||
1091 | /* | 1151 | /* |
@@ -1232,6 +1292,39 @@ struct mapped_device *dm_table_get_md(struct dm_table *t) | |||
1232 | return t->md; | 1292 | return t->md; |
1233 | } | 1293 | } |
1234 | 1294 | ||
1295 | static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev, | ||
1296 | sector_t start, sector_t len, void *data) | ||
1297 | { | ||
1298 | struct request_queue *q = bdev_get_queue(dev->bdev); | ||
1299 | |||
1300 | return q && blk_queue_discard(q); | ||
1301 | } | ||
1302 | |||
1303 | bool dm_table_supports_discards(struct dm_table *t) | ||
1304 | { | ||
1305 | struct dm_target *ti; | ||
1306 | unsigned i = 0; | ||
1307 | |||
1308 | if (!t->discards_supported) | ||
1309 | return 0; | ||
1310 | |||
1311 | /* | ||
1312 | * Ensure that at least one underlying device supports discards. | ||
1313 | * t->devices includes internal dm devices such as mirror logs | ||
1314 | * so we need to use iterate_devices here, which targets | ||
1315 | * supporting discard must provide. | ||
1316 | */ | ||
1317 | while (i < dm_table_get_num_targets(t)) { | ||
1318 | ti = dm_table_get_target(t, i++); | ||
1319 | |||
1320 | if (ti->type->iterate_devices && | ||
1321 | ti->type->iterate_devices(ti, device_discard_capable, NULL)) | ||
1322 | return 1; | ||
1323 | } | ||
1324 | |||
1325 | return 0; | ||
1326 | } | ||
1327 | |||
1235 | EXPORT_SYMBOL(dm_vcalloc); | 1328 | EXPORT_SYMBOL(dm_vcalloc); |
1236 | EXPORT_SYMBOL(dm_get_device); | 1329 | EXPORT_SYMBOL(dm_get_device); |
1237 | EXPORT_SYMBOL(dm_put_device); | 1330 | EXPORT_SYMBOL(dm_put_device); |
diff --git a/drivers/md/dm-target.c b/drivers/md/dm-target.c index 11dea11dc0b6..8da366cf381c 100644 --- a/drivers/md/dm-target.c +++ b/drivers/md/dm-target.c | |||
@@ -113,6 +113,11 @@ void dm_unregister_target(struct target_type *tt) | |||
113 | */ | 113 | */ |
114 | static int io_err_ctr(struct dm_target *tt, unsigned int argc, char **args) | 114 | static int io_err_ctr(struct dm_target *tt, unsigned int argc, char **args) |
115 | { | 115 | { |
116 | /* | ||
117 | * Return error for discards instead of -EOPNOTSUPP | ||
118 | */ | ||
119 | tt->num_discard_requests = 1; | ||
120 | |||
116 | return 0; | 121 | return 0; |
117 | } | 122 | } |
118 | 123 | ||
diff --git a/drivers/md/dm-zero.c b/drivers/md/dm-zero.c index bbc97030c0c2..cc2b3cb81946 100644 --- a/drivers/md/dm-zero.c +++ b/drivers/md/dm-zero.c | |||
@@ -22,6 +22,11 @@ static int zero_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
22 | return -EINVAL; | 22 | return -EINVAL; |
23 | } | 23 | } |
24 | 24 | ||
25 | /* | ||
26 | * Silently drop discards, avoiding -EOPNOTSUPP. | ||
27 | */ | ||
28 | ti->num_discard_requests = 1; | ||
29 | |||
25 | return 0; | 30 | return 0; |
26 | } | 31 | } |
27 | 32 | ||
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index a3f21dc02bd8..ac384b2a6a33 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/slab.h> | 20 | #include <linux/slab.h> |
21 | #include <linux/idr.h> | 21 | #include <linux/idr.h> |
22 | #include <linux/hdreg.h> | 22 | #include <linux/hdreg.h> |
23 | #include <linux/delay.h> | ||
23 | 24 | ||
24 | #include <trace/events/block.h> | 25 | #include <trace/events/block.h> |
25 | 26 | ||
@@ -124,6 +125,10 @@ struct mapped_device { | |||
124 | unsigned long flags; | 125 | unsigned long flags; |
125 | 126 | ||
126 | struct request_queue *queue; | 127 | struct request_queue *queue; |
128 | unsigned type; | ||
129 | /* Protect queue and type against concurrent access. */ | ||
130 | struct mutex type_lock; | ||
131 | |||
127 | struct gendisk *disk; | 132 | struct gendisk *disk; |
128 | char name[16]; | 133 | char name[16]; |
129 | 134 | ||
@@ -638,8 +643,14 @@ static void dec_pending(struct dm_io *io, int error) | |||
638 | * There can be just one barrier request so we use | 643 | * There can be just one barrier request so we use |
639 | * a per-device variable for error reporting. | 644 | * a per-device variable for error reporting. |
640 | * Note that you can't touch the bio after end_io_acct | 645 | * Note that you can't touch the bio after end_io_acct |
646 | * | ||
647 | * We ignore -EOPNOTSUPP for empty flush reported by | ||
648 | * underlying devices. We assume that if the device | ||
649 | * doesn't support empty barriers, it doesn't need | ||
650 | * cache flushing commands. | ||
641 | */ | 651 | */ |
642 | if (!md->barrier_error && io_error != -EOPNOTSUPP) | 652 | if (!md->barrier_error && |
653 | !(bio_empty_barrier(bio) && io_error == -EOPNOTSUPP)) | ||
643 | md->barrier_error = io_error; | 654 | md->barrier_error = io_error; |
644 | end_io_acct(io); | 655 | end_io_acct(io); |
645 | free_io(md, io); | 656 | free_io(md, io); |
@@ -1019,17 +1030,27 @@ static void end_clone_request(struct request *clone, int error) | |||
1019 | dm_complete_request(clone, error); | 1030 | dm_complete_request(clone, error); |
1020 | } | 1031 | } |
1021 | 1032 | ||
1022 | static sector_t max_io_len(struct mapped_device *md, | 1033 | /* |
1023 | sector_t sector, struct dm_target *ti) | 1034 | * Return maximum size of I/O possible at the supplied sector up to the current |
1035 | * target boundary. | ||
1036 | */ | ||
1037 | static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti) | ||
1038 | { | ||
1039 | sector_t target_offset = dm_target_offset(ti, sector); | ||
1040 | |||
1041 | return ti->len - target_offset; | ||
1042 | } | ||
1043 | |||
1044 | static sector_t max_io_len(sector_t sector, struct dm_target *ti) | ||
1024 | { | 1045 | { |
1025 | sector_t offset = sector - ti->begin; | 1046 | sector_t len = max_io_len_target_boundary(sector, ti); |
1026 | sector_t len = ti->len - offset; | ||
1027 | 1047 | ||
1028 | /* | 1048 | /* |
1029 | * Does the target need to split even further ? | 1049 | * Does the target need to split even further ? |
1030 | */ | 1050 | */ |
1031 | if (ti->split_io) { | 1051 | if (ti->split_io) { |
1032 | sector_t boundary; | 1052 | sector_t boundary; |
1053 | sector_t offset = dm_target_offset(ti, sector); | ||
1033 | boundary = ((offset + ti->split_io) & ~(ti->split_io - 1)) | 1054 | boundary = ((offset + ti->split_io) & ~(ti->split_io - 1)) |
1034 | - offset; | 1055 | - offset; |
1035 | if (len > boundary) | 1056 | if (len > boundary) |
@@ -1171,36 +1192,96 @@ static struct dm_target_io *alloc_tio(struct clone_info *ci, | |||
1171 | return tio; | 1192 | return tio; |
1172 | } | 1193 | } |
1173 | 1194 | ||
1174 | static void __flush_target(struct clone_info *ci, struct dm_target *ti, | 1195 | static void __issue_target_request(struct clone_info *ci, struct dm_target *ti, |
1175 | unsigned flush_nr) | 1196 | unsigned request_nr, sector_t len) |
1176 | { | 1197 | { |
1177 | struct dm_target_io *tio = alloc_tio(ci, ti); | 1198 | struct dm_target_io *tio = alloc_tio(ci, ti); |
1178 | struct bio *clone; | 1199 | struct bio *clone; |
1179 | 1200 | ||
1180 | tio->info.flush_request = flush_nr; | 1201 | tio->info.target_request_nr = request_nr; |
1181 | 1202 | ||
1182 | clone = bio_alloc_bioset(GFP_NOIO, 0, ci->md->bs); | 1203 | /* |
1204 | * Discard requests require the bio's inline iovecs be initialized. | ||
1205 | * ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush | ||
1206 | * and discard, so no need for concern about wasted bvec allocations. | ||
1207 | */ | ||
1208 | clone = bio_alloc_bioset(GFP_NOIO, ci->bio->bi_max_vecs, ci->md->bs); | ||
1183 | __bio_clone(clone, ci->bio); | 1209 | __bio_clone(clone, ci->bio); |
1184 | clone->bi_destructor = dm_bio_destructor; | 1210 | clone->bi_destructor = dm_bio_destructor; |
1211 | if (len) { | ||
1212 | clone->bi_sector = ci->sector; | ||
1213 | clone->bi_size = to_bytes(len); | ||
1214 | } | ||
1185 | 1215 | ||
1186 | __map_bio(ti, clone, tio); | 1216 | __map_bio(ti, clone, tio); |
1187 | } | 1217 | } |
1188 | 1218 | ||
1219 | static void __issue_target_requests(struct clone_info *ci, struct dm_target *ti, | ||
1220 | unsigned num_requests, sector_t len) | ||
1221 | { | ||
1222 | unsigned request_nr; | ||
1223 | |||
1224 | for (request_nr = 0; request_nr < num_requests; request_nr++) | ||
1225 | __issue_target_request(ci, ti, request_nr, len); | ||
1226 | } | ||
1227 | |||
1189 | static int __clone_and_map_empty_barrier(struct clone_info *ci) | 1228 | static int __clone_and_map_empty_barrier(struct clone_info *ci) |
1190 | { | 1229 | { |
1191 | unsigned target_nr = 0, flush_nr; | 1230 | unsigned target_nr = 0; |
1192 | struct dm_target *ti; | 1231 | struct dm_target *ti; |
1193 | 1232 | ||
1194 | while ((ti = dm_table_get_target(ci->map, target_nr++))) | 1233 | while ((ti = dm_table_get_target(ci->map, target_nr++))) |
1195 | for (flush_nr = 0; flush_nr < ti->num_flush_requests; | 1234 | __issue_target_requests(ci, ti, ti->num_flush_requests, 0); |
1196 | flush_nr++) | ||
1197 | __flush_target(ci, ti, flush_nr); | ||
1198 | 1235 | ||
1199 | ci->sector_count = 0; | 1236 | ci->sector_count = 0; |
1200 | 1237 | ||
1201 | return 0; | 1238 | return 0; |
1202 | } | 1239 | } |
1203 | 1240 | ||
1241 | /* | ||
1242 | * Perform all io with a single clone. | ||
1243 | */ | ||
1244 | static void __clone_and_map_simple(struct clone_info *ci, struct dm_target *ti) | ||
1245 | { | ||
1246 | struct bio *clone, *bio = ci->bio; | ||
1247 | struct dm_target_io *tio; | ||
1248 | |||
1249 | tio = alloc_tio(ci, ti); | ||
1250 | clone = clone_bio(bio, ci->sector, ci->idx, | ||
1251 | bio->bi_vcnt - ci->idx, ci->sector_count, | ||
1252 | ci->md->bs); | ||
1253 | __map_bio(ti, clone, tio); | ||
1254 | ci->sector_count = 0; | ||
1255 | } | ||
1256 | |||
1257 | static int __clone_and_map_discard(struct clone_info *ci) | ||
1258 | { | ||
1259 | struct dm_target *ti; | ||
1260 | sector_t len; | ||
1261 | |||
1262 | do { | ||
1263 | ti = dm_table_find_target(ci->map, ci->sector); | ||
1264 | if (!dm_target_is_valid(ti)) | ||
1265 | return -EIO; | ||
1266 | |||
1267 | /* | ||
1268 | * Even though the device advertised discard support, | ||
1269 | * reconfiguration might have changed that since the | ||
1270 | * check was performed. | ||
1271 | */ | ||
1272 | if (!ti->num_discard_requests) | ||
1273 | return -EOPNOTSUPP; | ||
1274 | |||
1275 | len = min(ci->sector_count, max_io_len_target_boundary(ci->sector, ti)); | ||
1276 | |||
1277 | __issue_target_requests(ci, ti, ti->num_discard_requests, len); | ||
1278 | |||
1279 | ci->sector += len; | ||
1280 | } while (ci->sector_count -= len); | ||
1281 | |||
1282 | return 0; | ||
1283 | } | ||
1284 | |||
1204 | static int __clone_and_map(struct clone_info *ci) | 1285 | static int __clone_and_map(struct clone_info *ci) |
1205 | { | 1286 | { |
1206 | struct bio *clone, *bio = ci->bio; | 1287 | struct bio *clone, *bio = ci->bio; |
@@ -1211,27 +1292,21 @@ static int __clone_and_map(struct clone_info *ci) | |||
1211 | if (unlikely(bio_empty_barrier(bio))) | 1292 | if (unlikely(bio_empty_barrier(bio))) |
1212 | return __clone_and_map_empty_barrier(ci); | 1293 | return __clone_and_map_empty_barrier(ci); |
1213 | 1294 | ||
1295 | if (unlikely(bio->bi_rw & REQ_DISCARD)) | ||
1296 | return __clone_and_map_discard(ci); | ||
1297 | |||
1214 | ti = dm_table_find_target(ci->map, ci->sector); | 1298 | ti = dm_table_find_target(ci->map, ci->sector); |
1215 | if (!dm_target_is_valid(ti)) | 1299 | if (!dm_target_is_valid(ti)) |
1216 | return -EIO; | 1300 | return -EIO; |
1217 | 1301 | ||
1218 | max = max_io_len(ci->md, ci->sector, ti); | 1302 | max = max_io_len(ci->sector, ti); |
1219 | |||
1220 | /* | ||
1221 | * Allocate a target io object. | ||
1222 | */ | ||
1223 | tio = alloc_tio(ci, ti); | ||
1224 | 1303 | ||
1225 | if (ci->sector_count <= max) { | 1304 | if (ci->sector_count <= max) { |
1226 | /* | 1305 | /* |
1227 | * Optimise for the simple case where we can do all of | 1306 | * Optimise for the simple case where we can do all of |
1228 | * the remaining io with a single clone. | 1307 | * the remaining io with a single clone. |
1229 | */ | 1308 | */ |
1230 | clone = clone_bio(bio, ci->sector, ci->idx, | 1309 | __clone_and_map_simple(ci, ti); |
1231 | bio->bi_vcnt - ci->idx, ci->sector_count, | ||
1232 | ci->md->bs); | ||
1233 | __map_bio(ti, clone, tio); | ||
1234 | ci->sector_count = 0; | ||
1235 | 1310 | ||
1236 | } else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) { | 1311 | } else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) { |
1237 | /* | 1312 | /* |
@@ -1252,6 +1327,7 @@ static int __clone_and_map(struct clone_info *ci) | |||
1252 | len += bv_len; | 1327 | len += bv_len; |
1253 | } | 1328 | } |
1254 | 1329 | ||
1330 | tio = alloc_tio(ci, ti); | ||
1255 | clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len, | 1331 | clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len, |
1256 | ci->md->bs); | 1332 | ci->md->bs); |
1257 | __map_bio(ti, clone, tio); | 1333 | __map_bio(ti, clone, tio); |
@@ -1274,13 +1350,12 @@ static int __clone_and_map(struct clone_info *ci) | |||
1274 | if (!dm_target_is_valid(ti)) | 1350 | if (!dm_target_is_valid(ti)) |
1275 | return -EIO; | 1351 | return -EIO; |
1276 | 1352 | ||
1277 | max = max_io_len(ci->md, ci->sector, ti); | 1353 | max = max_io_len(ci->sector, ti); |
1278 | |||
1279 | tio = alloc_tio(ci, ti); | ||
1280 | } | 1354 | } |
1281 | 1355 | ||
1282 | len = min(remaining, max); | 1356 | len = min(remaining, max); |
1283 | 1357 | ||
1358 | tio = alloc_tio(ci, ti); | ||
1284 | clone = split_bvec(bio, ci->sector, ci->idx, | 1359 | clone = split_bvec(bio, ci->sector, ci->idx, |
1285 | bv->bv_offset + offset, len, | 1360 | bv->bv_offset + offset, len, |
1286 | ci->md->bs); | 1361 | ci->md->bs); |
@@ -1362,7 +1437,7 @@ static int dm_merge_bvec(struct request_queue *q, | |||
1362 | /* | 1437 | /* |
1363 | * Find maximum amount of I/O that won't need splitting | 1438 | * Find maximum amount of I/O that won't need splitting |
1364 | */ | 1439 | */ |
1365 | max_sectors = min(max_io_len(md, bvm->bi_sector, ti), | 1440 | max_sectors = min(max_io_len(bvm->bi_sector, ti), |
1366 | (sector_t) BIO_MAX_SECTORS); | 1441 | (sector_t) BIO_MAX_SECTORS); |
1367 | max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size; | 1442 | max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size; |
1368 | if (max_size < 0) | 1443 | if (max_size < 0) |
@@ -1845,6 +1920,28 @@ static const struct block_device_operations dm_blk_dops; | |||
1845 | static void dm_wq_work(struct work_struct *work); | 1920 | static void dm_wq_work(struct work_struct *work); |
1846 | static void dm_rq_barrier_work(struct work_struct *work); | 1921 | static void dm_rq_barrier_work(struct work_struct *work); |
1847 | 1922 | ||
1923 | static void dm_init_md_queue(struct mapped_device *md) | ||
1924 | { | ||
1925 | /* | ||
1926 | * Request-based dm devices cannot be stacked on top of bio-based dm | ||
1927 | * devices. The type of this dm device has not been decided yet. | ||
1928 | * The type is decided at the first table loading time. | ||
1929 | * To prevent problematic device stacking, clear the queue flag | ||
1930 | * for request stacking support until then. | ||
1931 | * | ||
1932 | * This queue is new, so no concurrency on the queue_flags. | ||
1933 | */ | ||
1934 | queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue); | ||
1935 | |||
1936 | md->queue->queuedata = md; | ||
1937 | md->queue->backing_dev_info.congested_fn = dm_any_congested; | ||
1938 | md->queue->backing_dev_info.congested_data = md; | ||
1939 | blk_queue_make_request(md->queue, dm_request); | ||
1940 | blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); | ||
1941 | md->queue->unplug_fn = dm_unplug_all; | ||
1942 | blk_queue_merge_bvec(md->queue, dm_merge_bvec); | ||
1943 | } | ||
1944 | |||
1848 | /* | 1945 | /* |
1849 | * Allocate and initialise a blank device with a given minor. | 1946 | * Allocate and initialise a blank device with a given minor. |
1850 | */ | 1947 | */ |
@@ -1870,8 +1967,10 @@ static struct mapped_device *alloc_dev(int minor) | |||
1870 | if (r < 0) | 1967 | if (r < 0) |
1871 | goto bad_minor; | 1968 | goto bad_minor; |
1872 | 1969 | ||
1970 | md->type = DM_TYPE_NONE; | ||
1873 | init_rwsem(&md->io_lock); | 1971 | init_rwsem(&md->io_lock); |
1874 | mutex_init(&md->suspend_lock); | 1972 | mutex_init(&md->suspend_lock); |
1973 | mutex_init(&md->type_lock); | ||
1875 | spin_lock_init(&md->deferred_lock); | 1974 | spin_lock_init(&md->deferred_lock); |
1876 | spin_lock_init(&md->barrier_error_lock); | 1975 | spin_lock_init(&md->barrier_error_lock); |
1877 | rwlock_init(&md->map_lock); | 1976 | rwlock_init(&md->map_lock); |
@@ -1882,33 +1981,11 @@ static struct mapped_device *alloc_dev(int minor) | |||
1882 | INIT_LIST_HEAD(&md->uevent_list); | 1981 | INIT_LIST_HEAD(&md->uevent_list); |
1883 | spin_lock_init(&md->uevent_lock); | 1982 | spin_lock_init(&md->uevent_lock); |
1884 | 1983 | ||
1885 | md->queue = blk_init_queue(dm_request_fn, NULL); | 1984 | md->queue = blk_alloc_queue(GFP_KERNEL); |
1886 | if (!md->queue) | 1985 | if (!md->queue) |
1887 | goto bad_queue; | 1986 | goto bad_queue; |
1888 | 1987 | ||
1889 | /* | 1988 | dm_init_md_queue(md); |
1890 | * Request-based dm devices cannot be stacked on top of bio-based dm | ||
1891 | * devices. The type of this dm device has not been decided yet, | ||
1892 | * although we initialized the queue using blk_init_queue(). | ||
1893 | * The type is decided at the first table loading time. | ||
1894 | * To prevent problematic device stacking, clear the queue flag | ||
1895 | * for request stacking support until then. | ||
1896 | * | ||
1897 | * This queue is new, so no concurrency on the queue_flags. | ||
1898 | */ | ||
1899 | queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue); | ||
1900 | md->saved_make_request_fn = md->queue->make_request_fn; | ||
1901 | md->queue->queuedata = md; | ||
1902 | md->queue->backing_dev_info.congested_fn = dm_any_congested; | ||
1903 | md->queue->backing_dev_info.congested_data = md; | ||
1904 | blk_queue_make_request(md->queue, dm_request); | ||
1905 | blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); | ||
1906 | md->queue->unplug_fn = dm_unplug_all; | ||
1907 | blk_queue_merge_bvec(md->queue, dm_merge_bvec); | ||
1908 | blk_queue_softirq_done(md->queue, dm_softirq_done); | ||
1909 | blk_queue_prep_rq(md->queue, dm_prep_fn); | ||
1910 | blk_queue_lld_busy(md->queue, dm_lld_busy); | ||
1911 | blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN_FLUSH); | ||
1912 | 1989 | ||
1913 | md->disk = alloc_disk(1); | 1990 | md->disk = alloc_disk(1); |
1914 | if (!md->disk) | 1991 | if (!md->disk) |
@@ -2123,6 +2200,72 @@ int dm_create(int minor, struct mapped_device **result) | |||
2123 | return 0; | 2200 | return 0; |
2124 | } | 2201 | } |
2125 | 2202 | ||
2203 | /* | ||
2204 | * Functions to manage md->type. | ||
2205 | * All are required to hold md->type_lock. | ||
2206 | */ | ||
2207 | void dm_lock_md_type(struct mapped_device *md) | ||
2208 | { | ||
2209 | mutex_lock(&md->type_lock); | ||
2210 | } | ||
2211 | |||
2212 | void dm_unlock_md_type(struct mapped_device *md) | ||
2213 | { | ||
2214 | mutex_unlock(&md->type_lock); | ||
2215 | } | ||
2216 | |||
2217 | void dm_set_md_type(struct mapped_device *md, unsigned type) | ||
2218 | { | ||
2219 | md->type = type; | ||
2220 | } | ||
2221 | |||
2222 | unsigned dm_get_md_type(struct mapped_device *md) | ||
2223 | { | ||
2224 | return md->type; | ||
2225 | } | ||
2226 | |||
2227 | /* | ||
2228 | * Fully initialize a request-based queue (->elevator, ->request_fn, etc). | ||
2229 | */ | ||
2230 | static int dm_init_request_based_queue(struct mapped_device *md) | ||
2231 | { | ||
2232 | struct request_queue *q = NULL; | ||
2233 | |||
2234 | if (md->queue->elevator) | ||
2235 | return 1; | ||
2236 | |||
2237 | /* Fully initialize the queue */ | ||
2238 | q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL); | ||
2239 | if (!q) | ||
2240 | return 0; | ||
2241 | |||
2242 | md->queue = q; | ||
2243 | md->saved_make_request_fn = md->queue->make_request_fn; | ||
2244 | dm_init_md_queue(md); | ||
2245 | blk_queue_softirq_done(md->queue, dm_softirq_done); | ||
2246 | blk_queue_prep_rq(md->queue, dm_prep_fn); | ||
2247 | blk_queue_lld_busy(md->queue, dm_lld_busy); | ||
2248 | blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN_FLUSH); | ||
2249 | |||
2250 | elv_register_queue(md->queue); | ||
2251 | |||
2252 | return 1; | ||
2253 | } | ||
2254 | |||
2255 | /* | ||
2256 | * Setup the DM device's queue based on md's type | ||
2257 | */ | ||
2258 | int dm_setup_md_queue(struct mapped_device *md) | ||
2259 | { | ||
2260 | if ((dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) && | ||
2261 | !dm_init_request_based_queue(md)) { | ||
2262 | DMWARN("Cannot initialize queue for request-based mapped device"); | ||
2263 | return -EINVAL; | ||
2264 | } | ||
2265 | |||
2266 | return 0; | ||
2267 | } | ||
2268 | |||
2126 | static struct mapped_device *dm_find_md(dev_t dev) | 2269 | static struct mapped_device *dm_find_md(dev_t dev) |
2127 | { | 2270 | { |
2128 | struct mapped_device *md; | 2271 | struct mapped_device *md; |
@@ -2136,6 +2279,7 @@ static struct mapped_device *dm_find_md(dev_t dev) | |||
2136 | md = idr_find(&_minor_idr, minor); | 2279 | md = idr_find(&_minor_idr, minor); |
2137 | if (md && (md == MINOR_ALLOCED || | 2280 | if (md && (md == MINOR_ALLOCED || |
2138 | (MINOR(disk_devt(dm_disk(md))) != minor) || | 2281 | (MINOR(disk_devt(dm_disk(md))) != minor) || |
2282 | dm_deleting_md(md) || | ||
2139 | test_bit(DMF_FREEING, &md->flags))) { | 2283 | test_bit(DMF_FREEING, &md->flags))) { |
2140 | md = NULL; | 2284 | md = NULL; |
2141 | goto out; | 2285 | goto out; |
@@ -2170,6 +2314,7 @@ void dm_set_mdptr(struct mapped_device *md, void *ptr) | |||
2170 | void dm_get(struct mapped_device *md) | 2314 | void dm_get(struct mapped_device *md) |
2171 | { | 2315 | { |
2172 | atomic_inc(&md->holders); | 2316 | atomic_inc(&md->holders); |
2317 | BUG_ON(test_bit(DMF_FREEING, &md->flags)); | ||
2173 | } | 2318 | } |
2174 | 2319 | ||
2175 | const char *dm_device_name(struct mapped_device *md) | 2320 | const char *dm_device_name(struct mapped_device *md) |
@@ -2178,27 +2323,55 @@ const char *dm_device_name(struct mapped_device *md) | |||
2178 | } | 2323 | } |
2179 | EXPORT_SYMBOL_GPL(dm_device_name); | 2324 | EXPORT_SYMBOL_GPL(dm_device_name); |
2180 | 2325 | ||
2181 | void dm_put(struct mapped_device *md) | 2326 | static void __dm_destroy(struct mapped_device *md, bool wait) |
2182 | { | 2327 | { |
2183 | struct dm_table *map; | 2328 | struct dm_table *map; |
2184 | 2329 | ||
2185 | BUG_ON(test_bit(DMF_FREEING, &md->flags)); | 2330 | might_sleep(); |
2186 | 2331 | ||
2187 | if (atomic_dec_and_lock(&md->holders, &_minor_lock)) { | 2332 | spin_lock(&_minor_lock); |
2188 | map = dm_get_live_table(md); | 2333 | map = dm_get_live_table(md); |
2189 | idr_replace(&_minor_idr, MINOR_ALLOCED, | 2334 | idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); |
2190 | MINOR(disk_devt(dm_disk(md)))); | 2335 | set_bit(DMF_FREEING, &md->flags); |
2191 | set_bit(DMF_FREEING, &md->flags); | 2336 | spin_unlock(&_minor_lock); |
2192 | spin_unlock(&_minor_lock); | 2337 | |
2193 | if (!dm_suspended_md(md)) { | 2338 | if (!dm_suspended_md(md)) { |
2194 | dm_table_presuspend_targets(map); | 2339 | dm_table_presuspend_targets(map); |
2195 | dm_table_postsuspend_targets(map); | 2340 | dm_table_postsuspend_targets(map); |
2196 | } | ||
2197 | dm_sysfs_exit(md); | ||
2198 | dm_table_put(map); | ||
2199 | dm_table_destroy(__unbind(md)); | ||
2200 | free_dev(md); | ||
2201 | } | 2341 | } |
2342 | |||
2343 | /* | ||
2344 | * Rare, but there may be I/O requests still going to complete, | ||
2345 | * for example. Wait for all references to disappear. | ||
2346 | * No one should increment the reference count of the mapped_device, | ||
2347 | * after the mapped_device state becomes DMF_FREEING. | ||
2348 | */ | ||
2349 | if (wait) | ||
2350 | while (atomic_read(&md->holders)) | ||
2351 | msleep(1); | ||
2352 | else if (atomic_read(&md->holders)) | ||
2353 | DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)", | ||
2354 | dm_device_name(md), atomic_read(&md->holders)); | ||
2355 | |||
2356 | dm_sysfs_exit(md); | ||
2357 | dm_table_put(map); | ||
2358 | dm_table_destroy(__unbind(md)); | ||
2359 | free_dev(md); | ||
2360 | } | ||
2361 | |||
2362 | void dm_destroy(struct mapped_device *md) | ||
2363 | { | ||
2364 | __dm_destroy(md, true); | ||
2365 | } | ||
2366 | |||
2367 | void dm_destroy_immediate(struct mapped_device *md) | ||
2368 | { | ||
2369 | __dm_destroy(md, false); | ||
2370 | } | ||
2371 | |||
2372 | void dm_put(struct mapped_device *md) | ||
2373 | { | ||
2374 | atomic_dec(&md->holders); | ||
2202 | } | 2375 | } |
2203 | EXPORT_SYMBOL_GPL(dm_put); | 2376 | EXPORT_SYMBOL_GPL(dm_put); |
2204 | 2377 | ||
@@ -2253,7 +2426,12 @@ static void process_barrier(struct mapped_device *md, struct bio *bio) | |||
2253 | 2426 | ||
2254 | if (!bio_empty_barrier(bio)) { | 2427 | if (!bio_empty_barrier(bio)) { |
2255 | __split_and_process_bio(md, bio); | 2428 | __split_and_process_bio(md, bio); |
2256 | dm_flush(md); | 2429 | /* |
2430 | * If the request isn't supported, don't waste time with | ||
2431 | * the second flush. | ||
2432 | */ | ||
2433 | if (md->barrier_error != -EOPNOTSUPP) | ||
2434 | dm_flush(md); | ||
2257 | } | 2435 | } |
2258 | 2436 | ||
2259 | if (md->barrier_error != DM_ENDIO_REQUEUE) | 2437 | if (md->barrier_error != DM_ENDIO_REQUEUE) |
@@ -2310,11 +2488,11 @@ static void dm_queue_flush(struct mapped_device *md) | |||
2310 | queue_work(md->wq, &md->work); | 2488 | queue_work(md->wq, &md->work); |
2311 | } | 2489 | } |
2312 | 2490 | ||
2313 | static void dm_rq_set_flush_nr(struct request *clone, unsigned flush_nr) | 2491 | static void dm_rq_set_target_request_nr(struct request *clone, unsigned request_nr) |
2314 | { | 2492 | { |
2315 | struct dm_rq_target_io *tio = clone->end_io_data; | 2493 | struct dm_rq_target_io *tio = clone->end_io_data; |
2316 | 2494 | ||
2317 | tio->info.flush_request = flush_nr; | 2495 | tio->info.target_request_nr = request_nr; |
2318 | } | 2496 | } |
2319 | 2497 | ||
2320 | /* Issue barrier requests to targets and wait for their completion. */ | 2498 | /* Issue barrier requests to targets and wait for their completion. */ |
@@ -2332,7 +2510,7 @@ static int dm_rq_barrier(struct mapped_device *md) | |||
2332 | ti = dm_table_get_target(map, i); | 2510 | ti = dm_table_get_target(map, i); |
2333 | for (j = 0; j < ti->num_flush_requests; j++) { | 2511 | for (j = 0; j < ti->num_flush_requests; j++) { |
2334 | clone = clone_rq(md->flush_request, md, GFP_NOIO); | 2512 | clone = clone_rq(md->flush_request, md, GFP_NOIO); |
2335 | dm_rq_set_flush_nr(clone, j); | 2513 | dm_rq_set_target_request_nr(clone, j); |
2336 | atomic_inc(&md->pending[rq_data_dir(clone)]); | 2514 | atomic_inc(&md->pending[rq_data_dir(clone)]); |
2337 | map_request(ti, clone, md); | 2515 | map_request(ti, clone, md); |
2338 | } | 2516 | } |
@@ -2398,13 +2576,6 @@ struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) | |||
2398 | goto out; | 2576 | goto out; |
2399 | } | 2577 | } |
2400 | 2578 | ||
2401 | /* cannot change the device type, once a table is bound */ | ||
2402 | if (md->map && | ||
2403 | (dm_table_get_type(md->map) != dm_table_get_type(table))) { | ||
2404 | DMWARN("can't change the device type after a table is bound"); | ||
2405 | goto out; | ||
2406 | } | ||
2407 | |||
2408 | map = __bind(md, table, &limits); | 2579 | map = __bind(md, table, &limits); |
2409 | 2580 | ||
2410 | out: | 2581 | out: |
diff --git a/drivers/md/dm.h b/drivers/md/dm.h index bad1724d4869..0c2dd5f4af76 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h | |||
@@ -59,13 +59,20 @@ void dm_table_postsuspend_targets(struct dm_table *t); | |||
59 | int dm_table_resume_targets(struct dm_table *t); | 59 | int dm_table_resume_targets(struct dm_table *t); |
60 | int dm_table_any_congested(struct dm_table *t, int bdi_bits); | 60 | int dm_table_any_congested(struct dm_table *t, int bdi_bits); |
61 | int dm_table_any_busy_target(struct dm_table *t); | 61 | int dm_table_any_busy_target(struct dm_table *t); |
62 | int dm_table_set_type(struct dm_table *t); | ||
63 | unsigned dm_table_get_type(struct dm_table *t); | 62 | unsigned dm_table_get_type(struct dm_table *t); |
64 | bool dm_table_request_based(struct dm_table *t); | 63 | bool dm_table_request_based(struct dm_table *t); |
64 | bool dm_table_supports_discards(struct dm_table *t); | ||
65 | int dm_table_alloc_md_mempools(struct dm_table *t); | 65 | int dm_table_alloc_md_mempools(struct dm_table *t); |
66 | void dm_table_free_md_mempools(struct dm_table *t); | 66 | void dm_table_free_md_mempools(struct dm_table *t); |
67 | struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t); | 67 | struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t); |
68 | 68 | ||
69 | void dm_lock_md_type(struct mapped_device *md); | ||
70 | void dm_unlock_md_type(struct mapped_device *md); | ||
71 | void dm_set_md_type(struct mapped_device *md, unsigned type); | ||
72 | unsigned dm_get_md_type(struct mapped_device *md); | ||
73 | |||
74 | int dm_setup_md_queue(struct mapped_device *md); | ||
75 | |||
69 | /* | 76 | /* |
70 | * To check the return value from dm_table_find_target(). | 77 | * To check the return value from dm_table_find_target(). |
71 | */ | 78 | */ |
@@ -122,6 +129,11 @@ void dm_linear_exit(void); | |||
122 | int dm_stripe_init(void); | 129 | int dm_stripe_init(void); |
123 | void dm_stripe_exit(void); | 130 | void dm_stripe_exit(void); |
124 | 131 | ||
132 | /* | ||
133 | * mapped_device operations | ||
134 | */ | ||
135 | void dm_destroy(struct mapped_device *md); | ||
136 | void dm_destroy_immediate(struct mapped_device *md); | ||
125 | int dm_open_count(struct mapped_device *md); | 137 | int dm_open_count(struct mapped_device *md); |
126 | int dm_lock_for_deletion(struct mapped_device *md); | 138 | int dm_lock_for_deletion(struct mapped_device *md); |
127 | 139 | ||
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 1381cd97b4ed..2970022faa63 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h | |||
@@ -22,7 +22,7 @@ typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t; | |||
22 | union map_info { | 22 | union map_info { |
23 | void *ptr; | 23 | void *ptr; |
24 | unsigned long long ll; | 24 | unsigned long long ll; |
25 | unsigned flush_request; | 25 | unsigned target_request_nr; |
26 | }; | 26 | }; |
27 | 27 | ||
28 | /* | 28 | /* |
@@ -174,12 +174,18 @@ struct dm_target { | |||
174 | * A number of zero-length barrier requests that will be submitted | 174 | * A number of zero-length barrier requests that will be submitted |
175 | * to the target for the purpose of flushing cache. | 175 | * to the target for the purpose of flushing cache. |
176 | * | 176 | * |
177 | * The request number will be placed in union map_info->flush_request. | 177 | * The request number will be placed in union map_info->target_request_nr. |
178 | * It is a responsibility of the target driver to remap these requests | 178 | * It is a responsibility of the target driver to remap these requests |
179 | * to the real underlying devices. | 179 | * to the real underlying devices. |
180 | */ | 180 | */ |
181 | unsigned num_flush_requests; | 181 | unsigned num_flush_requests; |
182 | 182 | ||
183 | /* | ||
184 | * The number of discard requests that will be submitted to the | ||
185 | * target. map_info->request_nr is used just like num_flush_requests. | ||
186 | */ | ||
187 | unsigned num_discard_requests; | ||
188 | |||
183 | /* target specific data */ | 189 | /* target specific data */ |
184 | void *private; | 190 | void *private; |
185 | 191 | ||
@@ -392,6 +398,12 @@ void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size); | |||
392 | #define dm_array_too_big(fixed, obj, num) \ | 398 | #define dm_array_too_big(fixed, obj, num) \ |
393 | ((num) > (UINT_MAX - (fixed)) / (obj)) | 399 | ((num) > (UINT_MAX - (fixed)) / (obj)) |
394 | 400 | ||
401 | /* | ||
402 | * Sector offset taken relative to the start of the target instead of | ||
403 | * relative to the start of the device. | ||
404 | */ | ||
405 | #define dm_target_offset(ti, sector) ((sector) - (ti)->begin) | ||
406 | |||
395 | static inline sector_t to_sector(unsigned long n) | 407 | static inline sector_t to_sector(unsigned long n) |
396 | { | 408 | { |
397 | return (n >> SECTOR_SHIFT); | 409 | return (n >> SECTOR_SHIFT); |
diff --git a/include/linux/dm-ioctl.h b/include/linux/dm-ioctl.h index 2c445e113790..49eab360d5d4 100644 --- a/include/linux/dm-ioctl.h +++ b/include/linux/dm-ioctl.h | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/types.h> | 11 | #include <linux/types.h> |
12 | 12 | ||
13 | #define DM_DIR "mapper" /* Slashes not supported */ | 13 | #define DM_DIR "mapper" /* Slashes not supported */ |
14 | #define DM_CONTROL_NODE "control" | ||
14 | #define DM_MAX_TYPE_NAME 16 | 15 | #define DM_MAX_TYPE_NAME 16 |
15 | #define DM_NAME_LEN 128 | 16 | #define DM_NAME_LEN 128 |
16 | #define DM_UUID_LEN 129 | 17 | #define DM_UUID_LEN 129 |
@@ -266,9 +267,9 @@ enum { | |||
266 | #define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl) | 267 | #define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl) |
267 | 268 | ||
268 | #define DM_VERSION_MAJOR 4 | 269 | #define DM_VERSION_MAJOR 4 |
269 | #define DM_VERSION_MINOR 17 | 270 | #define DM_VERSION_MINOR 18 |
270 | #define DM_VERSION_PATCHLEVEL 0 | 271 | #define DM_VERSION_PATCHLEVEL 0 |
271 | #define DM_VERSION_EXTRA "-ioctl (2010-03-05)" | 272 | #define DM_VERSION_EXTRA "-ioctl (2010-06-29)" |
272 | 273 | ||
273 | /* Status bits */ | 274 | /* Status bits */ |
274 | #define DM_READONLY_FLAG (1 << 0) /* In/Out */ | 275 | #define DM_READONLY_FLAG (1 << 0) /* In/Out */ |
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h index f6c9b7dcb9fd..bafffc737903 100644 --- a/include/linux/miscdevice.h +++ b/include/linux/miscdevice.h | |||
@@ -38,6 +38,7 @@ | |||
38 | #define KVM_MINOR 232 | 38 | #define KVM_MINOR 232 |
39 | #define BTRFS_MINOR 234 | 39 | #define BTRFS_MINOR 234 |
40 | #define AUTOFS_MINOR 235 | 40 | #define AUTOFS_MINOR 235 |
41 | #define MAPPER_CTRL_MINOR 236 | ||
41 | #define MISC_DYNAMIC_MINOR 255 | 42 | #define MISC_DYNAMIC_MINOR 255 |
42 | 43 | ||
43 | struct device; | 44 | struct device; |