diff options
Diffstat (limited to 'crypto')
| -rw-r--r-- | crypto/lrw.c | 280 |
1 files changed, 51 insertions, 229 deletions
diff --git a/crypto/lrw.c b/crypto/lrw.c index 7377b5b486fd..6fcf0d431185 100644 --- a/crypto/lrw.c +++ b/crypto/lrw.c | |||
| @@ -29,8 +29,6 @@ | |||
| 29 | #include <crypto/b128ops.h> | 29 | #include <crypto/b128ops.h> |
| 30 | #include <crypto/gf128mul.h> | 30 | #include <crypto/gf128mul.h> |
| 31 | 31 | ||
| 32 | #define LRW_BUFFER_SIZE 128u | ||
| 33 | |||
| 34 | #define LRW_BLOCK_SIZE 16 | 32 | #define LRW_BLOCK_SIZE 16 |
| 35 | 33 | ||
| 36 | struct priv { | 34 | struct priv { |
| @@ -56,19 +54,7 @@ struct priv { | |||
| 56 | }; | 54 | }; |
| 57 | 55 | ||
| 58 | struct rctx { | 56 | struct rctx { |
| 59 | be128 buf[LRW_BUFFER_SIZE / sizeof(be128)]; | ||
| 60 | |||
| 61 | be128 t; | 57 | be128 t; |
| 62 | |||
| 63 | be128 *ext; | ||
| 64 | |||
| 65 | struct scatterlist srcbuf[2]; | ||
| 66 | struct scatterlist dstbuf[2]; | ||
| 67 | struct scatterlist *src; | ||
| 68 | struct scatterlist *dst; | ||
| 69 | |||
| 70 | unsigned int left; | ||
| 71 | |||
| 72 | struct skcipher_request subreq; | 58 | struct skcipher_request subreq; |
| 73 | }; | 59 | }; |
| 74 | 60 | ||
| @@ -152,86 +138,31 @@ static int next_index(u32 *counter) | |||
| 152 | return 127; | 138 | return 127; |
| 153 | } | 139 | } |
| 154 | 140 | ||
| 155 | static int post_crypt(struct skcipher_request *req) | 141 | /* |
| 142 | * We compute the tweak masks twice (both before and after the ECB encryption or | ||
| 143 | * decryption) to avoid having to allocate a temporary buffer and/or make | ||
| 144 | * mutliple calls to the 'ecb(..)' instance, which usually would be slower than | ||
| 145 | * just doing the next_index() calls again. | ||
| 146 | */ | ||
| 147 | static int xor_tweak(struct skcipher_request *req, bool second_pass) | ||
| 156 | { | 148 | { |
| 157 | struct rctx *rctx = skcipher_request_ctx(req); | ||
| 158 | be128 *buf = rctx->ext ?: rctx->buf; | ||
| 159 | struct skcipher_request *subreq; | ||
| 160 | const int bs = LRW_BLOCK_SIZE; | 149 | const int bs = LRW_BLOCK_SIZE; |
| 161 | struct skcipher_walk w; | ||
| 162 | struct scatterlist *sg; | ||
| 163 | unsigned offset; | ||
| 164 | int err; | ||
| 165 | |||
| 166 | subreq = &rctx->subreq; | ||
| 167 | err = skcipher_walk_virt(&w, subreq, false); | ||
| 168 | |||
| 169 | while (w.nbytes) { | ||
| 170 | unsigned int avail = w.nbytes; | ||
| 171 | be128 *wdst; | ||
| 172 | |||
| 173 | wdst = w.dst.virt.addr; | ||
| 174 | |||
| 175 | do { | ||
| 176 | be128_xor(wdst, buf++, wdst); | ||
| 177 | wdst++; | ||
| 178 | } while ((avail -= bs) >= bs); | ||
| 179 | |||
| 180 | err = skcipher_walk_done(&w, avail); | ||
| 181 | } | ||
| 182 | |||
| 183 | rctx->left -= subreq->cryptlen; | ||
| 184 | |||
| 185 | if (err || !rctx->left) | ||
| 186 | goto out; | ||
| 187 | |||
| 188 | rctx->dst = rctx->dstbuf; | ||
| 189 | |||
| 190 | scatterwalk_done(&w.out, 0, 1); | ||
| 191 | sg = w.out.sg; | ||
| 192 | offset = w.out.offset; | ||
| 193 | |||
| 194 | if (rctx->dst != sg) { | ||
| 195 | rctx->dst[0] = *sg; | ||
| 196 | sg_unmark_end(rctx->dst); | ||
| 197 | scatterwalk_crypto_chain(rctx->dst, sg_next(sg), 2); | ||
| 198 | } | ||
| 199 | rctx->dst[0].length -= offset - sg->offset; | ||
| 200 | rctx->dst[0].offset = offset; | ||
| 201 | |||
| 202 | out: | ||
| 203 | return err; | ||
| 204 | } | ||
| 205 | |||
| 206 | static int pre_crypt(struct skcipher_request *req) | ||
| 207 | { | ||
| 208 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | 150 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
| 209 | struct rctx *rctx = skcipher_request_ctx(req); | ||
| 210 | struct priv *ctx = crypto_skcipher_ctx(tfm); | 151 | struct priv *ctx = crypto_skcipher_ctx(tfm); |
| 211 | be128 *buf = rctx->ext ?: rctx->buf; | 152 | struct rctx *rctx = skcipher_request_ctx(req); |
| 212 | struct skcipher_request *subreq; | 153 | be128 t = rctx->t; |
| 213 | const int bs = LRW_BLOCK_SIZE; | ||
| 214 | struct skcipher_walk w; | 154 | struct skcipher_walk w; |
| 215 | struct scatterlist *sg; | ||
| 216 | unsigned cryptlen; | ||
| 217 | unsigned offset; | ||
| 218 | bool more; | ||
| 219 | __be32 *iv; | 155 | __be32 *iv; |
| 220 | u32 counter[4]; | 156 | u32 counter[4]; |
| 221 | int err; | 157 | int err; |
| 222 | 158 | ||
| 223 | subreq = &rctx->subreq; | 159 | if (second_pass) { |
| 224 | skcipher_request_set_tfm(subreq, tfm); | 160 | req = &rctx->subreq; |
| 225 | 161 | /* set to our TFM to enforce correct alignment: */ | |
| 226 | cryptlen = subreq->cryptlen; | 162 | skcipher_request_set_tfm(req, tfm); |
| 227 | more = rctx->left > cryptlen; | 163 | } |
| 228 | if (!more) | ||
| 229 | cryptlen = rctx->left; | ||
| 230 | |||
| 231 | skcipher_request_set_crypt(subreq, rctx->src, rctx->dst, | ||
| 232 | cryptlen, req->iv); | ||
| 233 | 164 | ||
| 234 | err = skcipher_walk_virt(&w, subreq, false); | 165 | err = skcipher_walk_virt(&w, req, false); |
| 235 | iv = (__be32 *)w.iv; | 166 | iv = (__be32 *)w.iv; |
| 236 | 167 | ||
| 237 | counter[0] = be32_to_cpu(iv[3]); | 168 | counter[0] = be32_to_cpu(iv[3]); |
| @@ -248,16 +179,14 @@ static int pre_crypt(struct skcipher_request *req) | |||
| 248 | wdst = w.dst.virt.addr; | 179 | wdst = w.dst.virt.addr; |
| 249 | 180 | ||
| 250 | do { | 181 | do { |
| 251 | *buf++ = rctx->t; | 182 | be128_xor(wdst++, &t, wsrc++); |
| 252 | be128_xor(wdst++, &rctx->t, wsrc++); | ||
| 253 | 183 | ||
| 254 | /* T <- I*Key2, using the optimization | 184 | /* T <- I*Key2, using the optimization |
| 255 | * discussed in the specification */ | 185 | * discussed in the specification */ |
| 256 | be128_xor(&rctx->t, &rctx->t, | 186 | be128_xor(&t, &t, &ctx->mulinc[next_index(counter)]); |
| 257 | &ctx->mulinc[next_index(counter)]); | ||
| 258 | } while ((avail -= bs) >= bs); | 187 | } while ((avail -= bs) >= bs); |
| 259 | 188 | ||
| 260 | if (w.nbytes == w.total) { | 189 | if (second_pass && w.nbytes == w.total) { |
| 261 | iv[0] = cpu_to_be32(counter[3]); | 190 | iv[0] = cpu_to_be32(counter[3]); |
| 262 | iv[1] = cpu_to_be32(counter[2]); | 191 | iv[1] = cpu_to_be32(counter[2]); |
| 263 | iv[2] = cpu_to_be32(counter[1]); | 192 | iv[2] = cpu_to_be32(counter[1]); |
| @@ -267,175 +196,68 @@ static int pre_crypt(struct skcipher_request *req) | |||
| 267 | err = skcipher_walk_done(&w, avail); | 196 | err = skcipher_walk_done(&w, avail); |
| 268 | } | 197 | } |
| 269 | 198 | ||
| 270 | skcipher_request_set_tfm(subreq, ctx->child); | ||
| 271 | skcipher_request_set_crypt(subreq, rctx->dst, rctx->dst, | ||
| 272 | cryptlen, NULL); | ||
| 273 | |||
| 274 | if (err || !more) | ||
| 275 | goto out; | ||
| 276 | |||
| 277 | rctx->src = rctx->srcbuf; | ||
| 278 | |||
| 279 | scatterwalk_done(&w.in, 0, 1); | ||
| 280 | sg = w.in.sg; | ||
| 281 | offset = w.in.offset; | ||
| 282 | |||
| 283 | if (rctx->src != sg) { | ||
| 284 | rctx->src[0] = *sg; | ||
| 285 | sg_unmark_end(rctx->src); | ||
| 286 | scatterwalk_crypto_chain(rctx->src, sg_next(sg), 2); | ||
| 287 | } | ||
| 288 | rctx->src[0].length -= offset - sg->offset; | ||
| 289 | rctx->src[0].offset = offset; | ||
| 290 | |||
| 291 | out: | ||
| 292 | return err; | 199 | return err; |
| 293 | } | 200 | } |
| 294 | 201 | ||
| 295 | static int init_crypt(struct skcipher_request *req, crypto_completion_t done) | 202 | static int xor_tweak_pre(struct skcipher_request *req) |
| 296 | { | 203 | { |
| 297 | struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); | 204 | return xor_tweak(req, false); |
| 298 | struct rctx *rctx = skcipher_request_ctx(req); | ||
| 299 | struct skcipher_request *subreq; | ||
| 300 | gfp_t gfp; | ||
| 301 | |||
| 302 | subreq = &rctx->subreq; | ||
| 303 | skcipher_request_set_callback(subreq, req->base.flags, done, req); | ||
| 304 | |||
| 305 | gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : | ||
| 306 | GFP_ATOMIC; | ||
| 307 | rctx->ext = NULL; | ||
| 308 | |||
| 309 | subreq->cryptlen = LRW_BUFFER_SIZE; | ||
| 310 | if (req->cryptlen > LRW_BUFFER_SIZE) { | ||
| 311 | unsigned int n = min(req->cryptlen, (unsigned int)PAGE_SIZE); | ||
| 312 | |||
| 313 | rctx->ext = kmalloc(n, gfp); | ||
| 314 | if (rctx->ext) | ||
| 315 | subreq->cryptlen = n; | ||
| 316 | } | ||
| 317 | |||
| 318 | rctx->src = req->src; | ||
| 319 | rctx->dst = req->dst; | ||
| 320 | rctx->left = req->cryptlen; | ||
| 321 | |||
| 322 | /* calculate first value of T */ | ||
| 323 | memcpy(&rctx->t, req->iv, sizeof(rctx->t)); | ||
| 324 | |||
| 325 | /* T <- I*Key2 */ | ||
| 326 | gf128mul_64k_bbe(&rctx->t, ctx->table); | ||
| 327 | |||
| 328 | return 0; | ||
| 329 | } | 205 | } |
| 330 | 206 | ||
| 331 | static void exit_crypt(struct skcipher_request *req) | 207 | static int xor_tweak_post(struct skcipher_request *req) |
| 332 | { | 208 | { |
| 333 | struct rctx *rctx = skcipher_request_ctx(req); | 209 | return xor_tweak(req, true); |
| 334 | |||
| 335 | rctx->left = 0; | ||
| 336 | |||
| 337 | if (rctx->ext) | ||
| 338 | kzfree(rctx->ext); | ||
| 339 | } | 210 | } |
| 340 | 211 | ||
| 341 | static int do_encrypt(struct skcipher_request *req, int err) | 212 | static void crypt_done(struct crypto_async_request *areq, int err) |
| 342 | { | ||
| 343 | struct rctx *rctx = skcipher_request_ctx(req); | ||
| 344 | struct skcipher_request *subreq; | ||
| 345 | |||
| 346 | subreq = &rctx->subreq; | ||
| 347 | |||
| 348 | while (!err && rctx->left) { | ||
| 349 | err = pre_crypt(req) ?: | ||
| 350 | crypto_skcipher_encrypt(subreq) ?: | ||
| 351 | post_crypt(req); | ||
| 352 | |||
| 353 | if (err == -EINPROGRESS || err == -EBUSY) | ||
| 354 | return err; | ||
| 355 | } | ||
| 356 | |||
| 357 | exit_crypt(req); | ||
| 358 | return err; | ||
| 359 | } | ||
| 360 | |||
| 361 | static void encrypt_done(struct crypto_async_request *areq, int err) | ||
| 362 | { | 213 | { |
| 363 | struct skcipher_request *req = areq->data; | 214 | struct skcipher_request *req = areq->data; |
| 364 | struct skcipher_request *subreq; | ||
| 365 | struct rctx *rctx; | ||
| 366 | |||
| 367 | rctx = skcipher_request_ctx(req); | ||
| 368 | |||
| 369 | if (err == -EINPROGRESS) { | ||
| 370 | if (rctx->left != req->cryptlen) | ||
| 371 | return; | ||
| 372 | goto out; | ||
| 373 | } | ||
| 374 | 215 | ||
| 375 | subreq = &rctx->subreq; | 216 | if (!err) |
| 376 | subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; | 217 | err = xor_tweak_post(req); |
| 377 | 218 | ||
| 378 | err = do_encrypt(req, err ?: post_crypt(req)); | ||
| 379 | if (rctx->left) | ||
| 380 | return; | ||
| 381 | |||
| 382 | out: | ||
| 383 | skcipher_request_complete(req, err); | 219 | skcipher_request_complete(req, err); |
| 384 | } | 220 | } |
| 385 | 221 | ||
| 386 | static int encrypt(struct skcipher_request *req) | 222 | static void init_crypt(struct skcipher_request *req) |
| 387 | { | ||
| 388 | return do_encrypt(req, init_crypt(req, encrypt_done)); | ||
| 389 | } | ||
| 390 | |||
| 391 | static int do_decrypt(struct skcipher_request *req, int err) | ||
| 392 | { | 223 | { |
| 224 | struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); | ||
| 393 | struct rctx *rctx = skcipher_request_ctx(req); | 225 | struct rctx *rctx = skcipher_request_ctx(req); |
| 394 | struct skcipher_request *subreq; | 226 | struct skcipher_request *subreq = &rctx->subreq; |
| 395 | 227 | ||
| 396 | subreq = &rctx->subreq; | 228 | skcipher_request_set_tfm(subreq, ctx->child); |
| 397 | 229 | skcipher_request_set_callback(subreq, req->base.flags, crypt_done, req); | |
| 398 | while (!err && rctx->left) { | 230 | /* pass req->iv as IV (will be used by xor_tweak, ECB will ignore it) */ |
| 399 | err = pre_crypt(req) ?: | 231 | skcipher_request_set_crypt(subreq, req->dst, req->dst, |
| 400 | crypto_skcipher_decrypt(subreq) ?: | 232 | req->cryptlen, req->iv); |
| 401 | post_crypt(req); | ||
| 402 | 233 | ||
| 403 | if (err == -EINPROGRESS || err == -EBUSY) | 234 | /* calculate first value of T */ |
| 404 | return err; | 235 | memcpy(&rctx->t, req->iv, sizeof(rctx->t)); |
| 405 | } | ||
| 406 | 236 | ||
| 407 | exit_crypt(req); | 237 | /* T <- I*Key2 */ |
| 408 | return err; | 238 | gf128mul_64k_bbe(&rctx->t, ctx->table); |
| 409 | } | 239 | } |
| 410 | 240 | ||
| 411 | static void decrypt_done(struct crypto_async_request *areq, int err) | 241 | static int encrypt(struct skcipher_request *req) |
| 412 | { | 242 | { |
| 413 | struct skcipher_request *req = areq->data; | 243 | struct rctx *rctx = skcipher_request_ctx(req); |
| 414 | struct skcipher_request *subreq; | 244 | struct skcipher_request *subreq = &rctx->subreq; |
| 415 | struct rctx *rctx; | ||
| 416 | |||
| 417 | rctx = skcipher_request_ctx(req); | ||
| 418 | |||
| 419 | if (err == -EINPROGRESS) { | ||
| 420 | if (rctx->left != req->cryptlen) | ||
| 421 | return; | ||
| 422 | goto out; | ||
| 423 | } | ||
| 424 | |||
| 425 | subreq = &rctx->subreq; | ||
| 426 | subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; | ||
| 427 | |||
| 428 | err = do_decrypt(req, err ?: post_crypt(req)); | ||
| 429 | if (rctx->left) | ||
| 430 | return; | ||
| 431 | 245 | ||
| 432 | out: | 246 | init_crypt(req); |
| 433 | skcipher_request_complete(req, err); | 247 | return xor_tweak_pre(req) ?: |
| 248 | crypto_skcipher_encrypt(subreq) ?: | ||
| 249 | xor_tweak_post(req); | ||
| 434 | } | 250 | } |
| 435 | 251 | ||
| 436 | static int decrypt(struct skcipher_request *req) | 252 | static int decrypt(struct skcipher_request *req) |
| 437 | { | 253 | { |
| 438 | return do_decrypt(req, init_crypt(req, decrypt_done)); | 254 | struct rctx *rctx = skcipher_request_ctx(req); |
| 255 | struct skcipher_request *subreq = &rctx->subreq; | ||
| 256 | |||
| 257 | init_crypt(req); | ||
| 258 | return xor_tweak_pre(req) ?: | ||
| 259 | crypto_skcipher_decrypt(subreq) ?: | ||
| 260 | xor_tweak_post(req); | ||
| 439 | } | 261 | } |
| 440 | 262 | ||
| 441 | static int init_tfm(struct crypto_skcipher *tfm) | 263 | static int init_tfm(struct crypto_skcipher *tfm) |
