aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2015-08-20 03:21:45 -0400
committerHerbert Xu <herbert@gondor.apana.org.au>2015-08-21 10:21:19 -0400
commit7a7ffe65c8c5fbf272b132d8980b2511d5e5fc98 (patch)
tree23ca20c505ed5638a806d1d8f6ce9ff063e70bf8
parent8f183751a8604be5aaf0ad6dedac4890bb6fa0d5 (diff)
crypto: skcipher - Add top-level skcipher interface
This patch introduces the crypto skcipher interface which aims to replace both blkcipher and ablkcipher. It's very similar to the existing ablkcipher interface. The main difference is the removal of the givcrypt interface. In order to make the transition easier for blkcipher users, there is a helper SKCIPHER_REQUEST_ON_STACK which can be used to place a request on the stack for synchronous transforms. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
-rw-r--r--crypto/Makefile1
-rw-r--r--crypto/skcipher.c245
-rw-r--r--include/crypto/internal/skcipher.h15
-rw-r--r--include/crypto/skcipher.h391
4 files changed, 651 insertions, 1 deletions
diff --git a/crypto/Makefile b/crypto/Makefile
index f6229aef7595..e2c59819b236 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_CRYPTO_AEAD2) += aead.o
17 17
18crypto_blkcipher-y := ablkcipher.o 18crypto_blkcipher-y := ablkcipher.o
19crypto_blkcipher-y += blkcipher.o 19crypto_blkcipher-y += blkcipher.o
20crypto_blkcipher-y += skcipher.o
20obj-$(CONFIG_CRYPTO_BLKCIPHER2) += crypto_blkcipher.o 21obj-$(CONFIG_CRYPTO_BLKCIPHER2) += crypto_blkcipher.o
21obj-$(CONFIG_CRYPTO_BLKCIPHER2) += chainiv.o 22obj-$(CONFIG_CRYPTO_BLKCIPHER2) += chainiv.o
22obj-$(CONFIG_CRYPTO_BLKCIPHER2) += eseqiv.o 23obj-$(CONFIG_CRYPTO_BLKCIPHER2) += eseqiv.o
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
new file mode 100644
index 000000000000..dd5fc1bf6447
--- /dev/null
+++ b/crypto/skcipher.c
@@ -0,0 +1,245 @@
1/*
2 * Symmetric key cipher operations.
3 *
4 * Generic encrypt/decrypt wrapper for ciphers, handles operations across
5 * multiple page boundaries by using temporary blocks. In user context,
6 * the kernel is given a chance to schedule us once per page.
7 *
8 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
13 * any later version.
14 *
15 */
16
17#include <crypto/internal/skcipher.h>
18#include <linux/bug.h>
19#include <linux/module.h>
20
21#include "internal.h"
22
23static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg)
24{
25 if (alg->cra_type == &crypto_blkcipher_type)
26 return sizeof(struct crypto_blkcipher *);
27
28 BUG_ON(alg->cra_type != &crypto_ablkcipher_type &&
29 alg->cra_type != &crypto_givcipher_type);
30
31 return sizeof(struct crypto_ablkcipher *);
32}
33
34static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm,
35 const u8 *key, unsigned int keylen)
36{
37 struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm);
38 struct crypto_blkcipher *blkcipher = *ctx;
39 int err;
40
41 crypto_blkcipher_clear_flags(blkcipher, ~0);
42 crypto_blkcipher_set_flags(blkcipher, crypto_skcipher_get_flags(tfm) &
43 CRYPTO_TFM_REQ_MASK);
44 err = crypto_blkcipher_setkey(blkcipher, key, keylen);
45 crypto_skcipher_set_flags(tfm, crypto_blkcipher_get_flags(blkcipher) &
46 CRYPTO_TFM_RES_MASK);
47
48 return err;
49}
50
51static int skcipher_crypt_blkcipher(struct skcipher_request *req,
52 int (*crypt)(struct blkcipher_desc *,
53 struct scatterlist *,
54 struct scatterlist *,
55 unsigned int))
56{
57 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
58 struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm);
59 struct blkcipher_desc desc = {
60 .tfm = *ctx,
61 .info = req->iv,
62 .flags = req->base.flags,
63 };
64
65
66 return crypt(&desc, req->dst, req->src, req->cryptlen);
67}
68
69static int skcipher_encrypt_blkcipher(struct skcipher_request *req)
70{
71 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
72 struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
73 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
74
75 return skcipher_crypt_blkcipher(req, alg->encrypt);
76}
77
78static int skcipher_decrypt_blkcipher(struct skcipher_request *req)
79{
80 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
81 struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
82 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
83
84 return skcipher_crypt_blkcipher(req, alg->decrypt);
85}
86
87static void crypto_exit_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
88{
89 struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm);
90
91 crypto_free_blkcipher(*ctx);
92}
93
94int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
95{
96 struct crypto_alg *calg = tfm->__crt_alg;
97 struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
98 struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm);
99 struct crypto_blkcipher *blkcipher;
100 struct crypto_tfm *btfm;
101
102 if (!crypto_mod_get(calg))
103 return -EAGAIN;
104
105 btfm = __crypto_alloc_tfm(calg, CRYPTO_ALG_TYPE_BLKCIPHER,
106 CRYPTO_ALG_TYPE_MASK);
107 if (IS_ERR(btfm)) {
108 crypto_mod_put(calg);
109 return PTR_ERR(btfm);
110 }
111
112 blkcipher = __crypto_blkcipher_cast(btfm);
113 *ctx = blkcipher;
114 tfm->exit = crypto_exit_skcipher_ops_blkcipher;
115
116 skcipher->setkey = skcipher_setkey_blkcipher;
117 skcipher->encrypt = skcipher_encrypt_blkcipher;
118 skcipher->decrypt = skcipher_decrypt_blkcipher;
119
120 skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher);
121
122 return 0;
123}
124
125static int skcipher_setkey_ablkcipher(struct crypto_skcipher *tfm,
126 const u8 *key, unsigned int keylen)
127{
128 struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm);
129 struct crypto_ablkcipher *ablkcipher = *ctx;
130 int err;
131
132 crypto_ablkcipher_clear_flags(ablkcipher, ~0);
133 crypto_ablkcipher_set_flags(ablkcipher,
134 crypto_skcipher_get_flags(tfm) &
135 CRYPTO_TFM_REQ_MASK);
136 err = crypto_ablkcipher_setkey(ablkcipher, key, keylen);
137 crypto_skcipher_set_flags(tfm,
138 crypto_ablkcipher_get_flags(ablkcipher) &
139 CRYPTO_TFM_RES_MASK);
140
141 return err;
142}
143
144static int skcipher_crypt_ablkcipher(struct skcipher_request *req,
145 int (*crypt)(struct ablkcipher_request *))
146{
147 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
148 struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm);
149 struct ablkcipher_request *subreq = skcipher_request_ctx(req);
150
151 ablkcipher_request_set_tfm(subreq, *ctx);
152 ablkcipher_request_set_callback(subreq, skcipher_request_flags(req),
153 req->base.complete, req->base.data);
154 ablkcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
155 req->iv);
156
157 return crypt(subreq);
158}
159
160static int skcipher_encrypt_ablkcipher(struct skcipher_request *req)
161{
162 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
163 struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
164 struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
165
166 return skcipher_crypt_ablkcipher(req, alg->encrypt);
167}
168
169static int skcipher_decrypt_ablkcipher(struct skcipher_request *req)
170{
171 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
172 struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
173 struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
174
175 return skcipher_crypt_ablkcipher(req, alg->decrypt);
176}
177
178static void crypto_exit_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
179{
180 struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm);
181
182 crypto_free_ablkcipher(*ctx);
183}
184
185int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
186{
187 struct crypto_alg *calg = tfm->__crt_alg;
188 struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
189 struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm);
190 struct crypto_ablkcipher *ablkcipher;
191 struct crypto_tfm *abtfm;
192
193 if (!crypto_mod_get(calg))
194 return -EAGAIN;
195
196 abtfm = __crypto_alloc_tfm(calg, 0, 0);
197 if (IS_ERR(abtfm)) {
198 crypto_mod_put(calg);
199 return PTR_ERR(abtfm);
200 }
201
202 ablkcipher = __crypto_ablkcipher_cast(abtfm);
203 *ctx = ablkcipher;
204 tfm->exit = crypto_exit_skcipher_ops_ablkcipher;
205
206 skcipher->setkey = skcipher_setkey_ablkcipher;
207 skcipher->encrypt = skcipher_encrypt_ablkcipher;
208 skcipher->decrypt = skcipher_decrypt_ablkcipher;
209
210 skcipher->ivsize = crypto_ablkcipher_ivsize(ablkcipher);
211 skcipher->reqsize = crypto_ablkcipher_reqsize(ablkcipher) +
212 sizeof(struct ablkcipher_request);
213
214 return 0;
215}
216
217static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
218{
219 if (tfm->__crt_alg->cra_type == &crypto_blkcipher_type)
220 return crypto_init_skcipher_ops_blkcipher(tfm);
221
222 BUG_ON(tfm->__crt_alg->cra_type != &crypto_ablkcipher_type &&
223 tfm->__crt_alg->cra_type != &crypto_givcipher_type);
224
225 return crypto_init_skcipher_ops_ablkcipher(tfm);
226}
227
228static const struct crypto_type crypto_skcipher_type2 = {
229 .extsize = crypto_skcipher_extsize,
230 .init_tfm = crypto_skcipher_init_tfm,
231 .maskclear = ~CRYPTO_ALG_TYPE_MASK,
232 .maskset = CRYPTO_ALG_TYPE_BLKCIPHER_MASK,
233 .type = CRYPTO_ALG_TYPE_BLKCIPHER,
234 .tfmsize = offsetof(struct crypto_skcipher, base),
235};
236
237struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
238 u32 type, u32 mask)
239{
240 return crypto_alloc_tfm(alg_name, &crypto_skcipher_type2, type, mask);
241}
242EXPORT_SYMBOL_GPL(crypto_alloc_skcipher);
243
244MODULE_LICENSE("GPL");
245MODULE_DESCRIPTION("Symmetric key cipher type");
diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h
index b3a46c515d1b..2cf7a61ece59 100644
--- a/include/crypto/internal/skcipher.h
+++ b/include/crypto/internal/skcipher.h
@@ -107,5 +107,20 @@ static inline u32 ablkcipher_request_flags(struct ablkcipher_request *req)
107 return req->base.flags; 107 return req->base.flags;
108} 108}
109 109
110static inline void *crypto_skcipher_ctx(struct crypto_skcipher *tfm)
111{
112 return crypto_tfm_ctx(&tfm->base);
113}
114
115static inline void *skcipher_request_ctx(struct skcipher_request *req)
116{
117 return req->__ctx;
118}
119
120static inline u32 skcipher_request_flags(struct skcipher_request *req)
121{
122 return req->base.flags;
123}
124
110#endif /* _CRYPTO_INTERNAL_SKCIPHER_H */ 125#endif /* _CRYPTO_INTERNAL_SKCIPHER_H */
111 126
diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h
index 07d245f073d1..d8dd41fb034f 100644
--- a/include/crypto/skcipher.h
+++ b/include/crypto/skcipher.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Symmetric key ciphers. 2 * Symmetric key ciphers.
3 * 3 *
4 * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au> 4 * Copyright (c) 2007-2015 Herbert Xu <herbert@gondor.apana.org.au>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free 7 * under the terms of the GNU General Public License as published by the Free
@@ -18,6 +18,28 @@
18#include <linux/slab.h> 18#include <linux/slab.h>
19 19
20/** 20/**
21 * struct skcipher_request - Symmetric key cipher request
22 * @cryptlen: Number of bytes to encrypt or decrypt
23 * @iv: Initialisation Vector
24 * @src: Source SG list
25 * @dst: Destination SG list
26 * @base: Underlying async request request
27 * @__ctx: Start of private context data
28 */
29struct skcipher_request {
30 unsigned int cryptlen;
31
32 u8 *iv;
33
34 struct scatterlist *src;
35 struct scatterlist *dst;
36
37 struct crypto_async_request base;
38
39 void *__ctx[] CRYPTO_MINALIGN_ATTR;
40};
41
42/**
21 * struct skcipher_givcrypt_request - Crypto request with IV generation 43 * struct skcipher_givcrypt_request - Crypto request with IV generation
22 * @seq: Sequence number for IV generation 44 * @seq: Sequence number for IV generation
23 * @giv: Space for generated IV 45 * @giv: Space for generated IV
@@ -30,6 +52,23 @@ struct skcipher_givcrypt_request {
30 struct ablkcipher_request creq; 52 struct ablkcipher_request creq;
31}; 53};
32 54
55struct crypto_skcipher {
56 int (*setkey)(struct crypto_skcipher *tfm, const u8 *key,
57 unsigned int keylen);
58 int (*encrypt)(struct skcipher_request *req);
59 int (*decrypt)(struct skcipher_request *req);
60
61 unsigned int ivsize;
62 unsigned int reqsize;
63
64 struct crypto_tfm base;
65};
66
67#define SKCIPHER_REQUEST_ON_STACK(name, tfm) \
68 char __##name##_desc[sizeof(struct skcipher_request) + \
69 crypto_skcipher_reqsize(tfm)] CRYPTO_MINALIGN_ATTR; \
70 struct skcipher_request *name = (void *)__##name##_desc
71
33static inline struct crypto_ablkcipher *skcipher_givcrypt_reqtfm( 72static inline struct crypto_ablkcipher *skcipher_givcrypt_reqtfm(
34 struct skcipher_givcrypt_request *req) 73 struct skcipher_givcrypt_request *req)
35{ 74{
@@ -106,5 +145,355 @@ static inline void skcipher_givcrypt_set_giv(
106 req->seq = seq; 145 req->seq = seq;
107} 146}
108 147
148/**
149 * DOC: Symmetric Key Cipher API
150 *
151 * Symmetric key cipher API is used with the ciphers of type
152 * CRYPTO_ALG_TYPE_SKCIPHER (listed as type "skcipher" in /proc/crypto).
153 *
154 * Asynchronous cipher operations imply that the function invocation for a
155 * cipher request returns immediately before the completion of the operation.
156 * The cipher request is scheduled as a separate kernel thread and therefore
157 * load-balanced on the different CPUs via the process scheduler. To allow
158 * the kernel crypto API to inform the caller about the completion of a cipher
159 * request, the caller must provide a callback function. That function is
160 * invoked with the cipher handle when the request completes.
161 *
162 * To support the asynchronous operation, additional information than just the
163 * cipher handle must be supplied to the kernel crypto API. That additional
164 * information is given by filling in the skcipher_request data structure.
165 *
166 * For the symmetric key cipher API, the state is maintained with the tfm
167 * cipher handle. A single tfm can be used across multiple calls and in
168 * parallel. For asynchronous block cipher calls, context data supplied and
169 * only used by the caller can be referenced the request data structure in
170 * addition to the IV used for the cipher request. The maintenance of such
171 * state information would be important for a crypto driver implementer to
172 * have, because when calling the callback function upon completion of the
173 * cipher operation, that callback function may need some information about
174 * which operation just finished if it invoked multiple in parallel. This
175 * state information is unused by the kernel crypto API.
176 */
177
178static inline struct crypto_skcipher *__crypto_skcipher_cast(
179 struct crypto_tfm *tfm)
180{
181 return container_of(tfm, struct crypto_skcipher, base);
182}
183
184/**
185 * crypto_alloc_skcipher() - allocate symmetric key cipher handle
186 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
187 * skcipher cipher
188 * @type: specifies the type of the cipher
189 * @mask: specifies the mask for the cipher
190 *
191 * Allocate a cipher handle for an skcipher. The returned struct
192 * crypto_skcipher is the cipher handle that is required for any subsequent
193 * API invocation for that skcipher.
194 *
195 * Return: allocated cipher handle in case of success; IS_ERR() is true in case
196 * of an error, PTR_ERR() returns the error code.
197 */
198struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
199 u32 type, u32 mask);
200
201static inline struct crypto_tfm *crypto_skcipher_tfm(
202 struct crypto_skcipher *tfm)
203{
204 return &tfm->base;
205}
206
207/**
208 * crypto_free_skcipher() - zeroize and free cipher handle
209 * @tfm: cipher handle to be freed
210 */
211static inline void crypto_free_skcipher(struct crypto_skcipher *tfm)
212{
213 crypto_destroy_tfm(tfm, crypto_skcipher_tfm(tfm));
214}
215
216/**
217 * crypto_has_skcipher() - Search for the availability of an skcipher.
218 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
219 * skcipher
220 * @type: specifies the type of the cipher
221 * @mask: specifies the mask for the cipher
222 *
223 * Return: true when the skcipher is known to the kernel crypto API; false
224 * otherwise
225 */
226static inline int crypto_has_skcipher(const char *alg_name, u32 type,
227 u32 mask)
228{
229 return crypto_has_alg(alg_name, crypto_skcipher_type(type),
230 crypto_skcipher_mask(mask));
231}
232
233/**
234 * crypto_skcipher_ivsize() - obtain IV size
235 * @tfm: cipher handle
236 *
237 * The size of the IV for the skcipher referenced by the cipher handle is
238 * returned. This IV size may be zero if the cipher does not need an IV.
239 *
240 * Return: IV size in bytes
241 */
242static inline unsigned int crypto_skcipher_ivsize(struct crypto_skcipher *tfm)
243{
244 return tfm->ivsize;
245}
246
247/**
248 * crypto_skcipher_blocksize() - obtain block size of cipher
249 * @tfm: cipher handle
250 *
251 * The block size for the skcipher referenced with the cipher handle is
252 * returned. The caller may use that information to allocate appropriate
253 * memory for the data returned by the encryption or decryption operation
254 *
255 * Return: block size of cipher
256 */
257static inline unsigned int crypto_skcipher_blocksize(
258 struct crypto_skcipher *tfm)
259{
260 return crypto_tfm_alg_blocksize(crypto_skcipher_tfm(tfm));
261}
262
263static inline unsigned int crypto_skcipher_alignmask(
264 struct crypto_skcipher *tfm)
265{
266 return crypto_tfm_alg_alignmask(crypto_skcipher_tfm(tfm));
267}
268
269static inline u32 crypto_skcipher_get_flags(struct crypto_skcipher *tfm)
270{
271 return crypto_tfm_get_flags(crypto_skcipher_tfm(tfm));
272}
273
274static inline void crypto_skcipher_set_flags(struct crypto_skcipher *tfm,
275 u32 flags)
276{
277 crypto_tfm_set_flags(crypto_skcipher_tfm(tfm), flags);
278}
279
280static inline void crypto_skcipher_clear_flags(struct crypto_skcipher *tfm,
281 u32 flags)
282{
283 crypto_tfm_clear_flags(crypto_skcipher_tfm(tfm), flags);
284}
285
286/**
287 * crypto_skcipher_setkey() - set key for cipher
288 * @tfm: cipher handle
289 * @key: buffer holding the key
290 * @keylen: length of the key in bytes
291 *
292 * The caller provided key is set for the skcipher referenced by the cipher
293 * handle.
294 *
295 * Note, the key length determines the cipher type. Many block ciphers implement
296 * different cipher modes depending on the key size, such as AES-128 vs AES-192
297 * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
298 * is performed.
299 *
300 * Return: 0 if the setting of the key was successful; < 0 if an error occurred
301 */
302static inline int crypto_skcipher_setkey(struct crypto_skcipher *tfm,
303 const u8 *key, unsigned int keylen)
304{
305 return tfm->setkey(tfm, key, keylen);
306}
307
308/**
309 * crypto_skcipher_reqtfm() - obtain cipher handle from request
310 * @req: skcipher_request out of which the cipher handle is to be obtained
311 *
312 * Return the crypto_skcipher handle when furnishing an skcipher_request
313 * data structure.
314 *
315 * Return: crypto_skcipher handle
316 */
317static inline struct crypto_skcipher *crypto_skcipher_reqtfm(
318 struct skcipher_request *req)
319{
320 return __crypto_skcipher_cast(req->base.tfm);
321}
322
323/**
324 * crypto_skcipher_encrypt() - encrypt plaintext
325 * @req: reference to the skcipher_request handle that holds all information
326 * needed to perform the cipher operation
327 *
328 * Encrypt plaintext data using the skcipher_request handle. That data
329 * structure and how it is filled with data is discussed with the
330 * skcipher_request_* functions.
331 *
332 * Return: 0 if the cipher operation was successful; < 0 if an error occurred
333 */
334static inline int crypto_skcipher_encrypt(struct skcipher_request *req)
335{
336 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
337
338 return tfm->encrypt(req);
339}
340
341/**
342 * crypto_skcipher_decrypt() - decrypt ciphertext
343 * @req: reference to the skcipher_request handle that holds all information
344 * needed to perform the cipher operation
345 *
346 * Decrypt ciphertext data using the skcipher_request handle. That data
347 * structure and how it is filled with data is discussed with the
348 * skcipher_request_* functions.
349 *
350 * Return: 0 if the cipher operation was successful; < 0 if an error occurred
351 */
352static inline int crypto_skcipher_decrypt(struct skcipher_request *req)
353{
354 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
355
356 return tfm->decrypt(req);
357}
358
359/**
360 * DOC: Symmetric Key Cipher Request Handle
361 *
362 * The skcipher_request data structure contains all pointers to data
363 * required for the symmetric key cipher operation. This includes the cipher
364 * handle (which can be used by multiple skcipher_request instances), pointer
365 * to plaintext and ciphertext, asynchronous callback function, etc. It acts
366 * as a handle to the skcipher_request_* API calls in a similar way as
367 * skcipher handle to the crypto_skcipher_* API calls.
368 */
369
370/**
371 * crypto_skcipher_reqsize() - obtain size of the request data structure
372 * @tfm: cipher handle
373 *
374 * Return: number of bytes
375 */
376static inline unsigned int crypto_skcipher_reqsize(struct crypto_skcipher *tfm)
377{
378 return tfm->reqsize;
379}
380
381/**
382 * skcipher_request_set_tfm() - update cipher handle reference in request
383 * @req: request handle to be modified
384 * @tfm: cipher handle that shall be added to the request handle
385 *
386 * Allow the caller to replace the existing skcipher handle in the request
387 * data structure with a different one.
388 */
389static inline void skcipher_request_set_tfm(struct skcipher_request *req,
390 struct crypto_skcipher *tfm)
391{
392 req->base.tfm = crypto_skcipher_tfm(tfm);
393}
394
395static inline struct skcipher_request *skcipher_request_cast(
396 struct crypto_async_request *req)
397{
398 return container_of(req, struct skcipher_request, base);
399}
400
401/**
402 * skcipher_request_alloc() - allocate request data structure
403 * @tfm: cipher handle to be registered with the request
404 * @gfp: memory allocation flag that is handed to kmalloc by the API call.
405 *
406 * Allocate the request data structure that must be used with the skcipher
407 * encrypt and decrypt API calls. During the allocation, the provided skcipher
408 * handle is registered in the request data structure.
409 *
410 * Return: allocated request handle in case of success; IS_ERR() is true in case
411 * of an error, PTR_ERR() returns the error code.
412 */
413static inline struct skcipher_request *skcipher_request_alloc(
414 struct crypto_skcipher *tfm, gfp_t gfp)
415{
416 struct skcipher_request *req;
417
418 req = kmalloc(sizeof(struct skcipher_request) +
419 crypto_skcipher_reqsize(tfm), gfp);
420
421 if (likely(req))
422 skcipher_request_set_tfm(req, tfm);
423
424 return req;
425}
426
427/**
428 * skcipher_request_free() - zeroize and free request data structure
429 * @req: request data structure cipher handle to be freed
430 */
431static inline void skcipher_request_free(struct skcipher_request *req)
432{
433 kzfree(req);
434}
435
436/**
437 * skcipher_request_set_callback() - set asynchronous callback function
438 * @req: request handle
439 * @flags: specify zero or an ORing of the flags
440 * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
441 * increase the wait queue beyond the initial maximum size;
442 * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep
443 * @compl: callback function pointer to be registered with the request handle
444 * @data: The data pointer refers to memory that is not used by the kernel
445 * crypto API, but provided to the callback function for it to use. Here,
446 * the caller can provide a reference to memory the callback function can
447 * operate on. As the callback function is invoked asynchronously to the
448 * related functionality, it may need to access data structures of the
449 * related functionality which can be referenced using this pointer. The
450 * callback function can access the memory via the "data" field in the
451 * crypto_async_request data structure provided to the callback function.
452 *
453 * This function allows setting the callback function that is triggered once the
454 * cipher operation completes.
455 *
456 * The callback function is registered with the skcipher_request handle and
457 * must comply with the following template
458 *
459 * void callback_function(struct crypto_async_request *req, int error)
460 */
461static inline void skcipher_request_set_callback(struct skcipher_request *req,
462 u32 flags,
463 crypto_completion_t compl,
464 void *data)
465{
466 req->base.complete = compl;
467 req->base.data = data;
468 req->base.flags = flags;
469}
470
471/**
472 * skcipher_request_set_crypt() - set data buffers
473 * @req: request handle
474 * @src: source scatter / gather list
475 * @dst: destination scatter / gather list
476 * @cryptlen: number of bytes to process from @src
477 * @iv: IV for the cipher operation which must comply with the IV size defined
478 * by crypto_skcipher_ivsize
479 *
480 * This function allows setting of the source data and destination data
481 * scatter / gather lists.
482 *
483 * For encryption, the source is treated as the plaintext and the
484 * destination is the ciphertext. For a decryption operation, the use is
485 * reversed - the source is the ciphertext and the destination is the plaintext.
486 */
487static inline void skcipher_request_set_crypt(
488 struct skcipher_request *req,
489 struct scatterlist *src, struct scatterlist *dst,
490 unsigned int cryptlen, void *iv)
491{
492 req->src = src;
493 req->dst = dst;
494 req->cryptlen = cryptlen;
495 req->iv = iv;
496}
497
109#endif /* _CRYPTO_SKCIPHER_H */ 498#endif /* _CRYPTO_SKCIPHER_H */
110 499