aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto/ccree
diff options
context:
space:
mode:
authorGilad Ben-Yossef <gilad@benyossef.com>2018-01-22 04:27:01 -0500
committerHerbert Xu <herbert@gondor.apana.org.au>2018-02-15 10:26:40 -0500
commit63ee04c8b491ee148489347e7da9fbfd982ca2bb (patch)
treecb983e6d07e105a71918f47804e966fd5f9f87a0 /drivers/crypto/ccree
parent4c3f97276e156820a0433bf7b59a4df1100829ae (diff)
crypto: ccree - add skcipher support
Add CryptoCell skcipher support Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto/ccree')
-rw-r--r--drivers/crypto/ccree/Makefile2
-rw-r--r--drivers/crypto/ccree/cc_buffer_mgr.c125
-rw-r--r--drivers/crypto/ccree/cc_buffer_mgr.h8
-rw-r--r--drivers/crypto/ccree/cc_cipher.c1130
-rw-r--r--drivers/crypto/ccree/cc_cipher.h59
-rw-r--r--drivers/crypto/ccree/cc_driver.c11
-rw-r--r--drivers/crypto/ccree/cc_driver.h6
7 files changed, 1339 insertions, 2 deletions
diff --git a/drivers/crypto/ccree/Makefile b/drivers/crypto/ccree/Makefile
index 6b204ab8e4a1..a7fecadbf7cc 100644
--- a/drivers/crypto/ccree/Makefile
+++ b/drivers/crypto/ccree/Makefile
@@ -1,6 +1,6 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2 2
3obj-$(CONFIG_CRYPTO_DEV_CCREE) := ccree.o 3obj-$(CONFIG_CRYPTO_DEV_CCREE) := ccree.o
4ccree-y := cc_driver.o cc_buffer_mgr.o cc_request_mgr.o cc_ivgen.o cc_sram_mgr.o 4ccree-y := cc_driver.o cc_buffer_mgr.o cc_request_mgr.o cc_cipher.o cc_ivgen.o cc_sram_mgr.o
5ccree-$(CONFIG_DEBUG_FS) += cc_debugfs.o 5ccree-$(CONFIG_DEBUG_FS) += cc_debugfs.o
6ccree-$(CONFIG_PM) += cc_pm.o 6ccree-$(CONFIG_PM) += cc_pm.o
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
index 4c6757966fd2..46be101ede56 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.c
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
@@ -8,6 +8,7 @@
8 8
9#include "cc_buffer_mgr.h" 9#include "cc_buffer_mgr.h"
10#include "cc_lli_defs.h" 10#include "cc_lli_defs.h"
11#include "cc_cipher.h"
11 12
12enum dma_buffer_type { 13enum dma_buffer_type {
13 DMA_NULL_TYPE = -1, 14 DMA_NULL_TYPE = -1,
@@ -347,6 +348,130 @@ static int cc_map_sg(struct device *dev, struct scatterlist *sg,
347 return 0; 348 return 0;
348} 349}
349 350
351void cc_unmap_cipher_request(struct device *dev, void *ctx,
352 unsigned int ivsize, struct scatterlist *src,
353 struct scatterlist *dst)
354{
355 struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx;
356
357 if (req_ctx->gen_ctx.iv_dma_addr) {
358 dev_dbg(dev, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n",
359 &req_ctx->gen_ctx.iv_dma_addr, ivsize);
360 dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
361 ivsize,
362 req_ctx->is_giv ? DMA_BIDIRECTIONAL :
363 DMA_TO_DEVICE);
364 }
365 /* Release pool */
366 if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI &&
367 req_ctx->mlli_params.mlli_virt_addr) {
368 dma_pool_free(req_ctx->mlli_params.curr_pool,
369 req_ctx->mlli_params.mlli_virt_addr,
370 req_ctx->mlli_params.mlli_dma_addr);
371 }
372
373 dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL);
374 dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src));
375
376 if (src != dst) {
377 dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_BIDIRECTIONAL);
378 dev_dbg(dev, "Unmapped req->dst=%pK\n", sg_virt(dst));
379 }
380}
381
382int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
383 unsigned int ivsize, unsigned int nbytes,
384 void *info, struct scatterlist *src,
385 struct scatterlist *dst, gfp_t flags)
386{
387 struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx;
388 struct mlli_params *mlli_params = &req_ctx->mlli_params;
389 struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
390 struct device *dev = drvdata_to_dev(drvdata);
391 struct buffer_array sg_data;
392 u32 dummy = 0;
393 int rc = 0;
394 u32 mapped_nents = 0;
395
396 req_ctx->dma_buf_type = CC_DMA_BUF_DLLI;
397 mlli_params->curr_pool = NULL;
398 sg_data.num_of_buffers = 0;
399
400 /* Map IV buffer */
401 if (ivsize) {
402 dump_byte_array("iv", (u8 *)info, ivsize);
403 req_ctx->gen_ctx.iv_dma_addr =
404 dma_map_single(dev, (void *)info,
405 ivsize,
406 req_ctx->is_giv ? DMA_BIDIRECTIONAL :
407 DMA_TO_DEVICE);
408 if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) {
409 dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
410 ivsize, info);
411 return -ENOMEM;
412 }
413 dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
414 ivsize, info, &req_ctx->gen_ctx.iv_dma_addr);
415 } else {
416 req_ctx->gen_ctx.iv_dma_addr = 0;
417 }
418
419 /* Map the src SGL */
420 rc = cc_map_sg(dev, src, nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents,
421 LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
422 if (rc) {
423 rc = -ENOMEM;
424 goto cipher_exit;
425 }
426 if (mapped_nents > 1)
427 req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
428
429 if (src == dst) {
430 /* Handle inplace operation */
431 if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
432 req_ctx->out_nents = 0;
433 cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
434 nbytes, 0, true,
435 &req_ctx->in_mlli_nents);
436 }
437 } else {
438 /* Map the dst sg */
439 if (cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL,
440 &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
441 &dummy, &mapped_nents)) {
442 rc = -ENOMEM;
443 goto cipher_exit;
444 }
445 if (mapped_nents > 1)
446 req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
447
448 if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
449 cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
450 nbytes, 0, true,
451 &req_ctx->in_mlli_nents);
452 cc_add_sg_entry(dev, &sg_data, req_ctx->out_nents, dst,
453 nbytes, 0, true,
454 &req_ctx->out_mlli_nents);
455 }
456 }
457
458 if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
459 mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
460 rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
461 if (rc)
462 goto cipher_exit;
463 }
464
465 dev_dbg(dev, "areq_ctx->dma_buf_type = %s\n",
466 cc_dma_buf_type(req_ctx->dma_buf_type));
467
468 return 0;
469
470cipher_exit:
471 cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
472 return rc;
473}
474
350int cc_buffer_mgr_init(struct cc_drvdata *drvdata) 475int cc_buffer_mgr_init(struct cc_drvdata *drvdata)
351{ 476{
352 struct buff_mgr_handle *buff_mgr_handle; 477 struct buff_mgr_handle *buff_mgr_handle;
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.h b/drivers/crypto/ccree/cc_buffer_mgr.h
index 2a2eba68e0f9..614c5c5b2721 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.h
+++ b/drivers/crypto/ccree/cc_buffer_mgr.h
@@ -40,6 +40,14 @@ int cc_buffer_mgr_init(struct cc_drvdata *drvdata);
40 40
41int cc_buffer_mgr_fini(struct cc_drvdata *drvdata); 41int cc_buffer_mgr_fini(struct cc_drvdata *drvdata);
42 42
43int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
44 unsigned int ivsize, unsigned int nbytes,
45 void *info, struct scatterlist *src,
46 struct scatterlist *dst, gfp_t flags);
47
48void cc_unmap_cipher_request(struct device *dev, void *ctx, unsigned int ivsize,
49 struct scatterlist *src, struct scatterlist *dst);
50
43int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx, 51int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
44 struct scatterlist *src, unsigned int nbytes, 52 struct scatterlist *src, unsigned int nbytes,
45 bool do_update, gfp_t flags); 53 bool do_update, gfp_t flags);
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
new file mode 100644
index 000000000000..5760ca9481ea
--- /dev/null
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -0,0 +1,1130 @@
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
3
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <crypto/algapi.h>
7#include <crypto/internal/skcipher.h>
8#include <crypto/des.h>
9#include <crypto/xts.h>
10#include <crypto/scatterwalk.h>
11
12#include "cc_driver.h"
13#include "cc_lli_defs.h"
14#include "cc_buffer_mgr.h"
15#include "cc_cipher.h"
16#include "cc_request_mgr.h"
17
18#define MAX_ABLKCIPHER_SEQ_LEN 6
19
20#define template_skcipher template_u.skcipher
21
22#define CC_MIN_AES_XTS_SIZE 0x10
23#define CC_MAX_AES_XTS_SIZE 0x2000
24struct cc_cipher_handle {
25 struct list_head alg_list;
26};
27
28struct cc_user_key_info {
29 u8 *key;
30 dma_addr_t key_dma_addr;
31};
32
33struct cc_hw_key_info {
34 enum cc_hw_crypto_key key1_slot;
35 enum cc_hw_crypto_key key2_slot;
36};
37
38struct cc_cipher_ctx {
39 struct cc_drvdata *drvdata;
40 int keylen;
41 int key_round_number;
42 int cipher_mode;
43 int flow_mode;
44 unsigned int flags;
45 struct cc_user_key_info user;
46 struct cc_hw_key_info hw;
47 struct crypto_shash *shash_tfm;
48};
49
50static void cc_cipher_complete(struct device *dev, void *cc_req, int err);
51
52static int validate_keys_sizes(struct cc_cipher_ctx *ctx_p, u32 size)
53{
54 switch (ctx_p->flow_mode) {
55 case S_DIN_to_AES:
56 switch (size) {
57 case CC_AES_128_BIT_KEY_SIZE:
58 case CC_AES_192_BIT_KEY_SIZE:
59 if (ctx_p->cipher_mode != DRV_CIPHER_XTS &&
60 ctx_p->cipher_mode != DRV_CIPHER_ESSIV &&
61 ctx_p->cipher_mode != DRV_CIPHER_BITLOCKER)
62 return 0;
63 break;
64 case CC_AES_256_BIT_KEY_SIZE:
65 return 0;
66 case (CC_AES_192_BIT_KEY_SIZE * 2):
67 case (CC_AES_256_BIT_KEY_SIZE * 2):
68 if (ctx_p->cipher_mode == DRV_CIPHER_XTS ||
69 ctx_p->cipher_mode == DRV_CIPHER_ESSIV ||
70 ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER)
71 return 0;
72 break;
73 default:
74 break;
75 }
76 case S_DIN_to_DES:
77 if (size == DES3_EDE_KEY_SIZE || size == DES_KEY_SIZE)
78 return 0;
79 break;
80 default:
81 break;
82 }
83 return -EINVAL;
84}
85
86static int validate_data_size(struct cc_cipher_ctx *ctx_p,
87 unsigned int size)
88{
89 switch (ctx_p->flow_mode) {
90 case S_DIN_to_AES:
91 switch (ctx_p->cipher_mode) {
92 case DRV_CIPHER_XTS:
93 if (size >= CC_MIN_AES_XTS_SIZE &&
94 size <= CC_MAX_AES_XTS_SIZE &&
95 IS_ALIGNED(size, AES_BLOCK_SIZE))
96 return 0;
97 break;
98 case DRV_CIPHER_CBC_CTS:
99 if (size >= AES_BLOCK_SIZE)
100 return 0;
101 break;
102 case DRV_CIPHER_OFB:
103 case DRV_CIPHER_CTR:
104 return 0;
105 case DRV_CIPHER_ECB:
106 case DRV_CIPHER_CBC:
107 case DRV_CIPHER_ESSIV:
108 case DRV_CIPHER_BITLOCKER:
109 if (IS_ALIGNED(size, AES_BLOCK_SIZE))
110 return 0;
111 break;
112 default:
113 break;
114 }
115 break;
116 case S_DIN_to_DES:
117 if (IS_ALIGNED(size, DES_BLOCK_SIZE))
118 return 0;
119 break;
120 default:
121 break;
122 }
123 return -EINVAL;
124}
125
126static int cc_cipher_init(struct crypto_tfm *tfm)
127{
128 struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
129 struct cc_crypto_alg *cc_alg =
130 container_of(tfm->__crt_alg, struct cc_crypto_alg,
131 skcipher_alg.base);
132 struct device *dev = drvdata_to_dev(cc_alg->drvdata);
133 unsigned int max_key_buf_size = cc_alg->skcipher_alg.max_keysize;
134 int rc = 0;
135
136 dev_dbg(dev, "Initializing context @%p for %s\n", ctx_p,
137 crypto_tfm_alg_name(tfm));
138
139 crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
140 sizeof(struct cipher_req_ctx));
141
142 ctx_p->cipher_mode = cc_alg->cipher_mode;
143 ctx_p->flow_mode = cc_alg->flow_mode;
144 ctx_p->drvdata = cc_alg->drvdata;
145
146 /* Allocate key buffer, cache line aligned */
147 ctx_p->user.key = kmalloc(max_key_buf_size, GFP_KERNEL);
148 if (!ctx_p->user.key)
149 return -ENOMEM;
150
151 dev_dbg(dev, "Allocated key buffer in context. key=@%p\n",
152 ctx_p->user.key);
153
154 /* Map key buffer */
155 ctx_p->user.key_dma_addr = dma_map_single(dev, (void *)ctx_p->user.key,
156 max_key_buf_size,
157 DMA_TO_DEVICE);
158 if (dma_mapping_error(dev, ctx_p->user.key_dma_addr)) {
159 dev_err(dev, "Mapping Key %u B at va=%pK for DMA failed\n",
160 max_key_buf_size, ctx_p->user.key);
161 return -ENOMEM;
162 }
163 dev_dbg(dev, "Mapped key %u B at va=%pK to dma=%pad\n",
164 max_key_buf_size, ctx_p->user.key, &ctx_p->user.key_dma_addr);
165
166 if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
167 /* Alloc hash tfm for essiv */
168 ctx_p->shash_tfm = crypto_alloc_shash("sha256-generic", 0, 0);
169 if (IS_ERR(ctx_p->shash_tfm)) {
170 dev_err(dev, "Error allocating hash tfm for ESSIV.\n");
171 return PTR_ERR(ctx_p->shash_tfm);
172 }
173 }
174
175 return rc;
176}
177
178static void cc_cipher_exit(struct crypto_tfm *tfm)
179{
180 struct crypto_alg *alg = tfm->__crt_alg;
181 struct cc_crypto_alg *cc_alg =
182 container_of(alg, struct cc_crypto_alg,
183 skcipher_alg.base);
184 unsigned int max_key_buf_size = cc_alg->skcipher_alg.max_keysize;
185 struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
186 struct device *dev = drvdata_to_dev(ctx_p->drvdata);
187
188 dev_dbg(dev, "Clearing context @%p for %s\n",
189 crypto_tfm_ctx(tfm), crypto_tfm_alg_name(tfm));
190
191 if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
192 /* Free hash tfm for essiv */
193 crypto_free_shash(ctx_p->shash_tfm);
194 ctx_p->shash_tfm = NULL;
195 }
196
197 /* Unmap key buffer */
198 dma_unmap_single(dev, ctx_p->user.key_dma_addr, max_key_buf_size,
199 DMA_TO_DEVICE);
200 dev_dbg(dev, "Unmapped key buffer key_dma_addr=%pad\n",
201 &ctx_p->user.key_dma_addr);
202
203 /* Free key buffer in context */
204 kzfree(ctx_p->user.key);
205 dev_dbg(dev, "Free key buffer in context. key=@%p\n", ctx_p->user.key);
206}
207
208struct tdes_keys {
209 u8 key1[DES_KEY_SIZE];
210 u8 key2[DES_KEY_SIZE];
211 u8 key3[DES_KEY_SIZE];
212};
213
214static enum cc_hw_crypto_key hw_key_to_cc_hw_key(int slot_num)
215{
216 switch (slot_num) {
217 case 0:
218 return KFDE0_KEY;
219 case 1:
220 return KFDE1_KEY;
221 case 2:
222 return KFDE2_KEY;
223 case 3:
224 return KFDE3_KEY;
225 }
226 return END_OF_KEYS;
227}
228
229static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key,
230 unsigned int keylen)
231{
232 struct crypto_tfm *tfm = crypto_skcipher_tfm(sktfm);
233 struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
234 struct device *dev = drvdata_to_dev(ctx_p->drvdata);
235 u32 tmp[DES3_EDE_EXPKEY_WORDS];
236 struct cc_crypto_alg *cc_alg =
237 container_of(tfm->__crt_alg, struct cc_crypto_alg,
238 skcipher_alg.base);
239 unsigned int max_key_buf_size = cc_alg->skcipher_alg.max_keysize;
240
241 dev_dbg(dev, "Setting key in context @%p for %s. keylen=%u\n",
242 ctx_p, crypto_tfm_alg_name(tfm), keylen);
243 dump_byte_array("key", (u8 *)key, keylen);
244
245 /* STAT_PHASE_0: Init and sanity checks */
246
247 if (validate_keys_sizes(ctx_p, keylen)) {
248 dev_err(dev, "Unsupported key size %d.\n", keylen);
249 crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
250 return -EINVAL;
251 }
252
253 if (cc_is_hw_key(tfm)) {
254 /* setting HW key slots */
255 struct arm_hw_key_info *hki = (struct arm_hw_key_info *)key;
256
257 if (ctx_p->flow_mode != S_DIN_to_AES) {
258 dev_err(dev, "HW key not supported for non-AES flows\n");
259 return -EINVAL;
260 }
261
262 ctx_p->hw.key1_slot = hw_key_to_cc_hw_key(hki->hw_key1);
263 if (ctx_p->hw.key1_slot == END_OF_KEYS) {
264 dev_err(dev, "Unsupported hw key1 number (%d)\n",
265 hki->hw_key1);
266 return -EINVAL;
267 }
268
269 if (ctx_p->cipher_mode == DRV_CIPHER_XTS ||
270 ctx_p->cipher_mode == DRV_CIPHER_ESSIV ||
271 ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER) {
272 if (hki->hw_key1 == hki->hw_key2) {
273 dev_err(dev, "Illegal hw key numbers (%d,%d)\n",
274 hki->hw_key1, hki->hw_key2);
275 return -EINVAL;
276 }
277 ctx_p->hw.key2_slot =
278 hw_key_to_cc_hw_key(hki->hw_key2);
279 if (ctx_p->hw.key2_slot == END_OF_KEYS) {
280 dev_err(dev, "Unsupported hw key2 number (%d)\n",
281 hki->hw_key2);
282 return -EINVAL;
283 }
284 }
285
286 ctx_p->keylen = keylen;
287 dev_dbg(dev, "cc_is_hw_key ret 0");
288
289 return 0;
290 }
291
292 /*
293 * Verify DES weak keys
294 * Note that we're dropping the expanded key since the
295 * HW does the expansion on its own.
296 */
297 if (ctx_p->flow_mode == S_DIN_to_DES) {
298 if (keylen == DES3_EDE_KEY_SIZE &&
299 __des3_ede_setkey(tmp, &tfm->crt_flags, key,
300 DES3_EDE_KEY_SIZE)) {
301 dev_dbg(dev, "weak 3DES key");
302 return -EINVAL;
303 } else if (!des_ekey(tmp, key) &&
304 (crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_WEAK_KEY)) {
305 tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
306 dev_dbg(dev, "weak DES key");
307 return -EINVAL;
308 }
309 }
310
311 if (ctx_p->cipher_mode == DRV_CIPHER_XTS &&
312 xts_check_key(tfm, key, keylen)) {
313 dev_dbg(dev, "weak XTS key");
314 return -EINVAL;
315 }
316
317 /* STAT_PHASE_1: Copy key to ctx */
318 dma_sync_single_for_cpu(dev, ctx_p->user.key_dma_addr,
319 max_key_buf_size, DMA_TO_DEVICE);
320
321 memcpy(ctx_p->user.key, key, keylen);
322 if (keylen == 24)
323 memset(ctx_p->user.key + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
324
325 if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
326 /* sha256 for key2 - use sw implementation */
327 int key_len = keylen >> 1;
328 int err;
329
330 SHASH_DESC_ON_STACK(desc, ctx_p->shash_tfm);
331
332 desc->tfm = ctx_p->shash_tfm;
333
334 err = crypto_shash_digest(desc, ctx_p->user.key, key_len,
335 ctx_p->user.key + key_len);
336 if (err) {
337 dev_err(dev, "Failed to hash ESSIV key.\n");
338 return err;
339 }
340 }
341 dma_sync_single_for_device(dev, ctx_p->user.key_dma_addr,
342 max_key_buf_size, DMA_TO_DEVICE);
343 ctx_p->keylen = keylen;
344
345 dev_dbg(dev, "return safely");
346 return 0;
347}
348
349static void cc_setup_cipher_desc(struct crypto_tfm *tfm,
350 struct cipher_req_ctx *req_ctx,
351 unsigned int ivsize, unsigned int nbytes,
352 struct cc_hw_desc desc[],
353 unsigned int *seq_size)
354{
355 struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
356 struct device *dev = drvdata_to_dev(ctx_p->drvdata);
357 int cipher_mode = ctx_p->cipher_mode;
358 int flow_mode = ctx_p->flow_mode;
359 int direction = req_ctx->gen_ctx.op_type;
360 dma_addr_t key_dma_addr = ctx_p->user.key_dma_addr;
361 unsigned int key_len = ctx_p->keylen;
362 dma_addr_t iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr;
363 unsigned int du_size = nbytes;
364
365 struct cc_crypto_alg *cc_alg =
366 container_of(tfm->__crt_alg, struct cc_crypto_alg,
367 skcipher_alg.base);
368
369 if (cc_alg->data_unit)
370 du_size = cc_alg->data_unit;
371
372 switch (cipher_mode) {
373 case DRV_CIPHER_CBC:
374 case DRV_CIPHER_CBC_CTS:
375 case DRV_CIPHER_CTR:
376 case DRV_CIPHER_OFB:
377 /* Load cipher state */
378 hw_desc_init(&desc[*seq_size]);
379 set_din_type(&desc[*seq_size], DMA_DLLI, iv_dma_addr, ivsize,
380 NS_BIT);
381 set_cipher_config0(&desc[*seq_size], direction);
382 set_flow_mode(&desc[*seq_size], flow_mode);
383 set_cipher_mode(&desc[*seq_size], cipher_mode);
384 if (cipher_mode == DRV_CIPHER_CTR ||
385 cipher_mode == DRV_CIPHER_OFB) {
386 set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE1);
387 } else {
388 set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE0);
389 }
390 (*seq_size)++;
391 /*FALLTHROUGH*/
392 case DRV_CIPHER_ECB:
393 /* Load key */
394 hw_desc_init(&desc[*seq_size]);
395 set_cipher_mode(&desc[*seq_size], cipher_mode);
396 set_cipher_config0(&desc[*seq_size], direction);
397 if (flow_mode == S_DIN_to_AES) {
398 if (cc_is_hw_key(tfm)) {
399 set_hw_crypto_key(&desc[*seq_size],
400 ctx_p->hw.key1_slot);
401 } else {
402 set_din_type(&desc[*seq_size], DMA_DLLI,
403 key_dma_addr, ((key_len == 24) ?
404 AES_MAX_KEY_SIZE :
405 key_len), NS_BIT);
406 }
407 set_key_size_aes(&desc[*seq_size], key_len);
408 } else {
409 /*des*/
410 set_din_type(&desc[*seq_size], DMA_DLLI, key_dma_addr,
411 key_len, NS_BIT);
412 set_key_size_des(&desc[*seq_size], key_len);
413 }
414 set_flow_mode(&desc[*seq_size], flow_mode);
415 set_setup_mode(&desc[*seq_size], SETUP_LOAD_KEY0);
416 (*seq_size)++;
417 break;
418 case DRV_CIPHER_XTS:
419 case DRV_CIPHER_ESSIV:
420 case DRV_CIPHER_BITLOCKER:
421 /* Load AES key */
422 hw_desc_init(&desc[*seq_size]);
423 set_cipher_mode(&desc[*seq_size], cipher_mode);
424 set_cipher_config0(&desc[*seq_size], direction);
425 if (cc_is_hw_key(tfm)) {
426 set_hw_crypto_key(&desc[*seq_size],
427 ctx_p->hw.key1_slot);
428 } else {
429 set_din_type(&desc[*seq_size], DMA_DLLI, key_dma_addr,
430 (key_len / 2), NS_BIT);
431 }
432 set_key_size_aes(&desc[*seq_size], (key_len / 2));
433 set_flow_mode(&desc[*seq_size], flow_mode);
434 set_setup_mode(&desc[*seq_size], SETUP_LOAD_KEY0);
435 (*seq_size)++;
436
437 /* load XEX key */
438 hw_desc_init(&desc[*seq_size]);
439 set_cipher_mode(&desc[*seq_size], cipher_mode);
440 set_cipher_config0(&desc[*seq_size], direction);
441 if (cc_is_hw_key(tfm)) {
442 set_hw_crypto_key(&desc[*seq_size],
443 ctx_p->hw.key2_slot);
444 } else {
445 set_din_type(&desc[*seq_size], DMA_DLLI,
446 (key_dma_addr + (key_len / 2)),
447 (key_len / 2), NS_BIT);
448 }
449 set_xex_data_unit_size(&desc[*seq_size], du_size);
450 set_flow_mode(&desc[*seq_size], S_DIN_to_AES2);
451 set_key_size_aes(&desc[*seq_size], (key_len / 2));
452 set_setup_mode(&desc[*seq_size], SETUP_LOAD_XEX_KEY);
453 (*seq_size)++;
454
455 /* Set state */
456 hw_desc_init(&desc[*seq_size]);
457 set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE1);
458 set_cipher_mode(&desc[*seq_size], cipher_mode);
459 set_cipher_config0(&desc[*seq_size], direction);
460 set_key_size_aes(&desc[*seq_size], (key_len / 2));
461 set_flow_mode(&desc[*seq_size], flow_mode);
462 set_din_type(&desc[*seq_size], DMA_DLLI, iv_dma_addr,
463 CC_AES_BLOCK_SIZE, NS_BIT);
464 (*seq_size)++;
465 break;
466 default:
467 dev_err(dev, "Unsupported cipher mode (%d)\n", cipher_mode);
468 }
469}
470
471static void cc_setup_cipher_data(struct crypto_tfm *tfm,
472 struct cipher_req_ctx *req_ctx,
473 struct scatterlist *dst,
474 struct scatterlist *src, unsigned int nbytes,
475 void *areq, struct cc_hw_desc desc[],
476 unsigned int *seq_size)
477{
478 struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
479 struct device *dev = drvdata_to_dev(ctx_p->drvdata);
480 unsigned int flow_mode = ctx_p->flow_mode;
481
482 switch (ctx_p->flow_mode) {
483 case S_DIN_to_AES:
484 flow_mode = DIN_AES_DOUT;
485 break;
486 case S_DIN_to_DES:
487 flow_mode = DIN_DES_DOUT;
488 break;
489 default:
490 dev_err(dev, "invalid flow mode, flow_mode = %d\n", flow_mode);
491 return;
492 }
493 /* Process */
494 if (req_ctx->dma_buf_type == CC_DMA_BUF_DLLI) {
495 dev_dbg(dev, " data params addr %pad length 0x%X\n",
496 &sg_dma_address(src), nbytes);
497 dev_dbg(dev, " data params addr %pad length 0x%X\n",
498 &sg_dma_address(dst), nbytes);
499 hw_desc_init(&desc[*seq_size]);
500 set_din_type(&desc[*seq_size], DMA_DLLI, sg_dma_address(src),
501 nbytes, NS_BIT);
502 set_dout_dlli(&desc[*seq_size], sg_dma_address(dst),
503 nbytes, NS_BIT, (!areq ? 0 : 1));
504 if (areq)
505 set_queue_last_ind(&desc[*seq_size]);
506
507 set_flow_mode(&desc[*seq_size], flow_mode);
508 (*seq_size)++;
509 } else {
510 /* bypass */
511 dev_dbg(dev, " bypass params addr %pad length 0x%X addr 0x%08X\n",
512 &req_ctx->mlli_params.mlli_dma_addr,
513 req_ctx->mlli_params.mlli_len,
514 (unsigned int)ctx_p->drvdata->mlli_sram_addr);
515 hw_desc_init(&desc[*seq_size]);
516 set_din_type(&desc[*seq_size], DMA_DLLI,
517 req_ctx->mlli_params.mlli_dma_addr,
518 req_ctx->mlli_params.mlli_len, NS_BIT);
519 set_dout_sram(&desc[*seq_size],
520 ctx_p->drvdata->mlli_sram_addr,
521 req_ctx->mlli_params.mlli_len);
522 set_flow_mode(&desc[*seq_size], BYPASS);
523 (*seq_size)++;
524
525 hw_desc_init(&desc[*seq_size]);
526 set_din_type(&desc[*seq_size], DMA_MLLI,
527 ctx_p->drvdata->mlli_sram_addr,
528 req_ctx->in_mlli_nents, NS_BIT);
529 if (req_ctx->out_nents == 0) {
530 dev_dbg(dev, " din/dout params addr 0x%08X addr 0x%08X\n",
531 (unsigned int)ctx_p->drvdata->mlli_sram_addr,
532 (unsigned int)ctx_p->drvdata->mlli_sram_addr);
533 set_dout_mlli(&desc[*seq_size],
534 ctx_p->drvdata->mlli_sram_addr,
535 req_ctx->in_mlli_nents, NS_BIT,
536 (!areq ? 0 : 1));
537 } else {
538 dev_dbg(dev, " din/dout params addr 0x%08X addr 0x%08X\n",
539 (unsigned int)ctx_p->drvdata->mlli_sram_addr,
540 (unsigned int)ctx_p->drvdata->mlli_sram_addr +
541 (u32)LLI_ENTRY_BYTE_SIZE * req_ctx->in_nents);
542 set_dout_mlli(&desc[*seq_size],
543 (ctx_p->drvdata->mlli_sram_addr +
544 (LLI_ENTRY_BYTE_SIZE *
545 req_ctx->in_mlli_nents)),
546 req_ctx->out_mlli_nents, NS_BIT,
547 (!areq ? 0 : 1));
548 }
549 if (areq)
550 set_queue_last_ind(&desc[*seq_size]);
551
552 set_flow_mode(&desc[*seq_size], flow_mode);
553 (*seq_size)++;
554 }
555}
556
557static void cc_cipher_complete(struct device *dev, void *cc_req, int err)
558{
559 struct skcipher_request *req = (struct skcipher_request *)cc_req;
560 struct scatterlist *dst = req->dst;
561 struct scatterlist *src = req->src;
562 struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
563 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
564 unsigned int ivsize = crypto_skcipher_ivsize(tfm);
565
566 cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
567 kzfree(req_ctx->iv);
568
569 /*
570 * The crypto API expects us to set the req->iv to the last
571 * ciphertext block. For encrypt, simply copy from the result.
572 * For decrypt, we must copy from a saved buffer since this
573 * could be an in-place decryption operation and the src is
574 * lost by this point.
575 */
576 if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
577 memcpy(req->iv, req_ctx->backup_info, ivsize);
578 kzfree(req_ctx->backup_info);
579 } else if (!err) {
580 scatterwalk_map_and_copy(req->iv, req->dst,
581 (req->cryptlen - ivsize),
582 ivsize, 0);
583 }
584
585 skcipher_request_complete(req, err);
586}
587
588static int cc_cipher_process(struct skcipher_request *req,
589 enum drv_crypto_direction direction)
590{
591 struct crypto_skcipher *sk_tfm = crypto_skcipher_reqtfm(req);
592 struct crypto_tfm *tfm = crypto_skcipher_tfm(sk_tfm);
593 struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
594 unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm);
595 struct scatterlist *dst = req->dst;
596 struct scatterlist *src = req->src;
597 unsigned int nbytes = req->cryptlen;
598 void *iv = req->iv;
599 struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
600 struct device *dev = drvdata_to_dev(ctx_p->drvdata);
601 struct cc_hw_desc desc[MAX_ABLKCIPHER_SEQ_LEN];
602 struct cc_crypto_req cc_req = {};
603 int rc, cts_restore_flag = 0;
604 unsigned int seq_len = 0;
605 gfp_t flags = cc_gfp_flags(&req->base);
606
607 dev_dbg(dev, "%s req=%p iv=%p nbytes=%d\n",
608 ((direction == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
609 "Encrypt" : "Decrypt"), req, iv, nbytes);
610
611 /* STAT_PHASE_0: Init and sanity checks */
612
613 /* TODO: check data length according to mode */
614 if (validate_data_size(ctx_p, nbytes)) {
615 dev_err(dev, "Unsupported data size %d.\n", nbytes);
616 crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
617 rc = -EINVAL;
618 goto exit_process;
619 }
620 if (nbytes == 0) {
621 /* No data to process is valid */
622 rc = 0;
623 goto exit_process;
624 }
625
626 /* The IV we are handed may be allocted from the stack so
627 * we must copy it to a DMAable buffer before use.
628 */
629 req_ctx->iv = kmalloc(ivsize, flags);
630 if (!req_ctx->iv) {
631 rc = -ENOMEM;
632 goto exit_process;
633 }
634 memcpy(req_ctx->iv, iv, ivsize);
635
636 /*For CTS in case of data size aligned to 16 use CBC mode*/
637 if (((nbytes % AES_BLOCK_SIZE) == 0) &&
638 ctx_p->cipher_mode == DRV_CIPHER_CBC_CTS) {
639 ctx_p->cipher_mode = DRV_CIPHER_CBC;
640 cts_restore_flag = 1;
641 }
642
643 /* Setup request structure */
644 cc_req.user_cb = (void *)cc_cipher_complete;
645 cc_req.user_arg = (void *)req;
646
647#ifdef ENABLE_CYCLE_COUNT
648 cc_req.op_type = (direction == DRV_CRYPTO_DIRECTION_DECRYPT) ?
649 STAT_OP_TYPE_DECODE : STAT_OP_TYPE_ENCODE;
650
651#endif
652
653 /* Setup request context */
654 req_ctx->gen_ctx.op_type = direction;
655
656 /* STAT_PHASE_1: Map buffers */
657
658 rc = cc_map_cipher_request(ctx_p->drvdata, req_ctx, ivsize, nbytes,
659 req_ctx->iv, src, dst, flags);
660 if (rc) {
661 dev_err(dev, "map_request() failed\n");
662 goto exit_process;
663 }
664
665 /* STAT_PHASE_2: Create sequence */
666
667 /* Setup processing */
668 cc_setup_cipher_desc(tfm, req_ctx, ivsize, nbytes, desc, &seq_len);
669 /* Data processing */
670 cc_setup_cipher_data(tfm, req_ctx, dst, src, nbytes, req, desc,
671 &seq_len);
672
673 /* do we need to generate IV? */
674 if (req_ctx->is_giv) {
675 cc_req.ivgen_dma_addr[0] = req_ctx->gen_ctx.iv_dma_addr;
676 cc_req.ivgen_dma_addr_len = 1;
677 /* set the IV size (8/16 B long)*/
678 cc_req.ivgen_size = ivsize;
679 }
680
681 /* STAT_PHASE_3: Lock HW and push sequence */
682
683 rc = cc_send_request(ctx_p->drvdata, &cc_req, desc, seq_len,
684 &req->base);
685 if (rc != -EINPROGRESS && rc != -EBUSY) {
686 /* Failed to send the request or request completed
687 * synchronously
688 */
689 cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
690 }
691
692exit_process:
693 if (cts_restore_flag)
694 ctx_p->cipher_mode = DRV_CIPHER_CBC_CTS;
695
696 if (rc != -EINPROGRESS && rc != -EBUSY) {
697 kzfree(req_ctx->backup_info);
698 kzfree(req_ctx->iv);
699 }
700
701 return rc;
702}
703
704static int cc_cipher_encrypt(struct skcipher_request *req)
705{
706 struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
707
708 req_ctx->is_giv = false;
709 req_ctx->backup_info = NULL;
710
711 return cc_cipher_process(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
712}
713
714static int cc_cipher_decrypt(struct skcipher_request *req)
715{
716 struct crypto_skcipher *sk_tfm = crypto_skcipher_reqtfm(req);
717 struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
718 unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm);
719 gfp_t flags = cc_gfp_flags(&req->base);
720
721 /*
722 * Allocate and save the last IV sized bytes of the source, which will
723 * be lost in case of in-place decryption and might be needed for CTS.
724 */
725 req_ctx->backup_info = kmalloc(ivsize, flags);
726 if (!req_ctx->backup_info)
727 return -ENOMEM;
728
729 scatterwalk_map_and_copy(req_ctx->backup_info, req->src,
730 (req->cryptlen - ivsize), ivsize, 0);
731 req_ctx->is_giv = false;
732
733 return cc_cipher_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
734}
735
736/* Block cipher alg */
737static const struct cc_alg_template skcipher_algs[] = {
738 {
739 .name = "xts(aes)",
740 .driver_name = "xts-aes-ccree",
741 .blocksize = AES_BLOCK_SIZE,
742 .template_skcipher = {
743 .setkey = cc_cipher_setkey,
744 .encrypt = cc_cipher_encrypt,
745 .decrypt = cc_cipher_decrypt,
746 .min_keysize = AES_MIN_KEY_SIZE * 2,
747 .max_keysize = AES_MAX_KEY_SIZE * 2,
748 .ivsize = AES_BLOCK_SIZE,
749 },
750 .cipher_mode = DRV_CIPHER_XTS,
751 .flow_mode = S_DIN_to_AES,
752 },
753 {
754 .name = "xts512(aes)",
755 .driver_name = "xts-aes-du512-ccree",
756 .blocksize = AES_BLOCK_SIZE,
757 .template_skcipher = {
758 .setkey = cc_cipher_setkey,
759 .encrypt = cc_cipher_encrypt,
760 .decrypt = cc_cipher_decrypt,
761 .min_keysize = AES_MIN_KEY_SIZE * 2,
762 .max_keysize = AES_MAX_KEY_SIZE * 2,
763 .ivsize = AES_BLOCK_SIZE,
764 },
765 .cipher_mode = DRV_CIPHER_XTS,
766 .flow_mode = S_DIN_to_AES,
767 .data_unit = 512,
768 },
769 {
770 .name = "xts4096(aes)",
771 .driver_name = "xts-aes-du4096-ccree",
772 .blocksize = AES_BLOCK_SIZE,
773 .template_skcipher = {
774 .setkey = cc_cipher_setkey,
775 .encrypt = cc_cipher_encrypt,
776 .decrypt = cc_cipher_decrypt,
777 .min_keysize = AES_MIN_KEY_SIZE * 2,
778 .max_keysize = AES_MAX_KEY_SIZE * 2,
779 .ivsize = AES_BLOCK_SIZE,
780 },
781 .cipher_mode = DRV_CIPHER_XTS,
782 .flow_mode = S_DIN_to_AES,
783 .data_unit = 4096,
784 },
785 {
786 .name = "essiv(aes)",
787 .driver_name = "essiv-aes-ccree",
788 .blocksize = AES_BLOCK_SIZE,
789 .template_skcipher = {
790 .setkey = cc_cipher_setkey,
791 .encrypt = cc_cipher_encrypt,
792 .decrypt = cc_cipher_decrypt,
793 .min_keysize = AES_MIN_KEY_SIZE * 2,
794 .max_keysize = AES_MAX_KEY_SIZE * 2,
795 .ivsize = AES_BLOCK_SIZE,
796 },
797 .cipher_mode = DRV_CIPHER_ESSIV,
798 .flow_mode = S_DIN_to_AES,
799 },
800 {
801 .name = "essiv512(aes)",
802 .driver_name = "essiv-aes-du512-ccree",
803 .blocksize = AES_BLOCK_SIZE,
804 .template_skcipher = {
805 .setkey = cc_cipher_setkey,
806 .encrypt = cc_cipher_encrypt,
807 .decrypt = cc_cipher_decrypt,
808 .min_keysize = AES_MIN_KEY_SIZE * 2,
809 .max_keysize = AES_MAX_KEY_SIZE * 2,
810 .ivsize = AES_BLOCK_SIZE,
811 },
812 .cipher_mode = DRV_CIPHER_ESSIV,
813 .flow_mode = S_DIN_to_AES,
814 .data_unit = 512,
815 },
816 {
817 .name = "essiv4096(aes)",
818 .driver_name = "essiv-aes-du4096-ccree",
819 .blocksize = AES_BLOCK_SIZE,
820 .template_skcipher = {
821 .setkey = cc_cipher_setkey,
822 .encrypt = cc_cipher_encrypt,
823 .decrypt = cc_cipher_decrypt,
824 .min_keysize = AES_MIN_KEY_SIZE * 2,
825 .max_keysize = AES_MAX_KEY_SIZE * 2,
826 .ivsize = AES_BLOCK_SIZE,
827 },
828 .cipher_mode = DRV_CIPHER_ESSIV,
829 .flow_mode = S_DIN_to_AES,
830 .data_unit = 4096,
831 },
832 {
833 .name = "bitlocker(aes)",
834 .driver_name = "bitlocker-aes-ccree",
835 .blocksize = AES_BLOCK_SIZE,
836 .template_skcipher = {
837 .setkey = cc_cipher_setkey,
838 .encrypt = cc_cipher_encrypt,
839 .decrypt = cc_cipher_decrypt,
840 .min_keysize = AES_MIN_KEY_SIZE * 2,
841 .max_keysize = AES_MAX_KEY_SIZE * 2,
842 .ivsize = AES_BLOCK_SIZE,
843 },
844 .cipher_mode = DRV_CIPHER_BITLOCKER,
845 .flow_mode = S_DIN_to_AES,
846 },
847 {
848 .name = "bitlocker512(aes)",
849 .driver_name = "bitlocker-aes-du512-ccree",
850 .blocksize = AES_BLOCK_SIZE,
851 .template_skcipher = {
852 .setkey = cc_cipher_setkey,
853 .encrypt = cc_cipher_encrypt,
854 .decrypt = cc_cipher_decrypt,
855 .min_keysize = AES_MIN_KEY_SIZE * 2,
856 .max_keysize = AES_MAX_KEY_SIZE * 2,
857 .ivsize = AES_BLOCK_SIZE,
858 },
859 .cipher_mode = DRV_CIPHER_BITLOCKER,
860 .flow_mode = S_DIN_to_AES,
861 .data_unit = 512,
862 },
863 {
864 .name = "bitlocker4096(aes)",
865 .driver_name = "bitlocker-aes-du4096-ccree",
866 .blocksize = AES_BLOCK_SIZE,
867 .template_skcipher = {
868 .setkey = cc_cipher_setkey,
869 .encrypt = cc_cipher_encrypt,
870 .decrypt = cc_cipher_decrypt,
871 .min_keysize = AES_MIN_KEY_SIZE * 2,
872 .max_keysize = AES_MAX_KEY_SIZE * 2,
873 .ivsize = AES_BLOCK_SIZE,
874 },
875 .cipher_mode = DRV_CIPHER_BITLOCKER,
876 .flow_mode = S_DIN_to_AES,
877 .data_unit = 4096,
878 },
879 {
880 .name = "ecb(aes)",
881 .driver_name = "ecb-aes-ccree",
882 .blocksize = AES_BLOCK_SIZE,
883 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
884 .template_skcipher = {
885 .setkey = cc_cipher_setkey,
886 .encrypt = cc_cipher_encrypt,
887 .decrypt = cc_cipher_decrypt,
888 .min_keysize = AES_MIN_KEY_SIZE,
889 .max_keysize = AES_MAX_KEY_SIZE,
890 .ivsize = 0,
891 },
892 .cipher_mode = DRV_CIPHER_ECB,
893 .flow_mode = S_DIN_to_AES,
894 },
895 {
896 .name = "cbc(aes)",
897 .driver_name = "cbc-aes-ccree",
898 .blocksize = AES_BLOCK_SIZE,
899 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
900 .template_skcipher = {
901 .setkey = cc_cipher_setkey,
902 .encrypt = cc_cipher_encrypt,
903 .decrypt = cc_cipher_decrypt,
904 .min_keysize = AES_MIN_KEY_SIZE,
905 .max_keysize = AES_MAX_KEY_SIZE,
906 .ivsize = AES_BLOCK_SIZE,
907 },
908 .cipher_mode = DRV_CIPHER_CBC,
909 .flow_mode = S_DIN_to_AES,
910 },
911 {
912 .name = "ofb(aes)",
913 .driver_name = "ofb-aes-ccree",
914 .blocksize = AES_BLOCK_SIZE,
915 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
916 .template_skcipher = {
917 .setkey = cc_cipher_setkey,
918 .encrypt = cc_cipher_encrypt,
919 .decrypt = cc_cipher_decrypt,
920 .min_keysize = AES_MIN_KEY_SIZE,
921 .max_keysize = AES_MAX_KEY_SIZE,
922 .ivsize = AES_BLOCK_SIZE,
923 },
924 .cipher_mode = DRV_CIPHER_OFB,
925 .flow_mode = S_DIN_to_AES,
926 },
927 {
928 .name = "cts1(cbc(aes))",
929 .driver_name = "cts1-cbc-aes-ccree",
930 .blocksize = AES_BLOCK_SIZE,
931 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
932 .template_skcipher = {
933 .setkey = cc_cipher_setkey,
934 .encrypt = cc_cipher_encrypt,
935 .decrypt = cc_cipher_decrypt,
936 .min_keysize = AES_MIN_KEY_SIZE,
937 .max_keysize = AES_MAX_KEY_SIZE,
938 .ivsize = AES_BLOCK_SIZE,
939 },
940 .cipher_mode = DRV_CIPHER_CBC_CTS,
941 .flow_mode = S_DIN_to_AES,
942 },
943 {
944 .name = "ctr(aes)",
945 .driver_name = "ctr-aes-ccree",
946 .blocksize = 1,
947 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
948 .template_skcipher = {
949 .setkey = cc_cipher_setkey,
950 .encrypt = cc_cipher_encrypt,
951 .decrypt = cc_cipher_decrypt,
952 .min_keysize = AES_MIN_KEY_SIZE,
953 .max_keysize = AES_MAX_KEY_SIZE,
954 .ivsize = AES_BLOCK_SIZE,
955 },
956 .cipher_mode = DRV_CIPHER_CTR,
957 .flow_mode = S_DIN_to_AES,
958 },
959 {
960 .name = "cbc(des3_ede)",
961 .driver_name = "cbc-3des-ccree",
962 .blocksize = DES3_EDE_BLOCK_SIZE,
963 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
964 .template_skcipher = {
965 .setkey = cc_cipher_setkey,
966 .encrypt = cc_cipher_encrypt,
967 .decrypt = cc_cipher_decrypt,
968 .min_keysize = DES3_EDE_KEY_SIZE,
969 .max_keysize = DES3_EDE_KEY_SIZE,
970 .ivsize = DES3_EDE_BLOCK_SIZE,
971 },
972 .cipher_mode = DRV_CIPHER_CBC,
973 .flow_mode = S_DIN_to_DES,
974 },
975 {
976 .name = "ecb(des3_ede)",
977 .driver_name = "ecb-3des-ccree",
978 .blocksize = DES3_EDE_BLOCK_SIZE,
979 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
980 .template_skcipher = {
981 .setkey = cc_cipher_setkey,
982 .encrypt = cc_cipher_encrypt,
983 .decrypt = cc_cipher_decrypt,
984 .min_keysize = DES3_EDE_KEY_SIZE,
985 .max_keysize = DES3_EDE_KEY_SIZE,
986 .ivsize = 0,
987 },
988 .cipher_mode = DRV_CIPHER_ECB,
989 .flow_mode = S_DIN_to_DES,
990 },
991 {
992 .name = "cbc(des)",
993 .driver_name = "cbc-des-ccree",
994 .blocksize = DES_BLOCK_SIZE,
995 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
996 .template_skcipher = {
997 .setkey = cc_cipher_setkey,
998 .encrypt = cc_cipher_encrypt,
999 .decrypt = cc_cipher_decrypt,
1000 .min_keysize = DES_KEY_SIZE,
1001 .max_keysize = DES_KEY_SIZE,
1002 .ivsize = DES_BLOCK_SIZE,
1003 },
1004 .cipher_mode = DRV_CIPHER_CBC,
1005 .flow_mode = S_DIN_to_DES,
1006 },
1007 {
1008 .name = "ecb(des)",
1009 .driver_name = "ecb-des-ccree",
1010 .blocksize = DES_BLOCK_SIZE,
1011 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1012 .template_skcipher = {
1013 .setkey = cc_cipher_setkey,
1014 .encrypt = cc_cipher_encrypt,
1015 .decrypt = cc_cipher_decrypt,
1016 .min_keysize = DES_KEY_SIZE,
1017 .max_keysize = DES_KEY_SIZE,
1018 .ivsize = 0,
1019 },
1020 .cipher_mode = DRV_CIPHER_ECB,
1021 .flow_mode = S_DIN_to_DES,
1022 },
1023};
1024
1025static struct cc_crypto_alg *cc_create_alg(const struct cc_alg_template *tmpl,
1026 struct device *dev)
1027{
1028 struct cc_crypto_alg *t_alg;
1029 struct skcipher_alg *alg;
1030
1031 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
1032 if (!t_alg)
1033 return ERR_PTR(-ENOMEM);
1034
1035 alg = &t_alg->skcipher_alg;
1036
1037 memcpy(alg, &tmpl->template_skcipher, sizeof(*alg));
1038
1039 snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
1040 snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1041 tmpl->driver_name);
1042 alg->base.cra_module = THIS_MODULE;
1043 alg->base.cra_priority = CC_CRA_PRIO;
1044 alg->base.cra_blocksize = tmpl->blocksize;
1045 alg->base.cra_alignmask = 0;
1046 alg->base.cra_ctxsize = sizeof(struct cc_cipher_ctx);
1047
1048 alg->base.cra_init = cc_cipher_init;
1049 alg->base.cra_exit = cc_cipher_exit;
1050 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
1051 CRYPTO_ALG_TYPE_SKCIPHER;
1052
1053 t_alg->cipher_mode = tmpl->cipher_mode;
1054 t_alg->flow_mode = tmpl->flow_mode;
1055 t_alg->data_unit = tmpl->data_unit;
1056
1057 return t_alg;
1058}
1059
1060int cc_cipher_free(struct cc_drvdata *drvdata)
1061{
1062 struct cc_crypto_alg *t_alg, *n;
1063 struct cc_cipher_handle *cipher_handle = drvdata->cipher_handle;
1064
1065 if (cipher_handle) {
1066 /* Remove registered algs */
1067 list_for_each_entry_safe(t_alg, n, &cipher_handle->alg_list,
1068 entry) {
1069 crypto_unregister_skcipher(&t_alg->skcipher_alg);
1070 list_del(&t_alg->entry);
1071 kfree(t_alg);
1072 }
1073 kfree(cipher_handle);
1074 drvdata->cipher_handle = NULL;
1075 }
1076 return 0;
1077}
1078
1079int cc_cipher_alloc(struct cc_drvdata *drvdata)
1080{
1081 struct cc_cipher_handle *cipher_handle;
1082 struct cc_crypto_alg *t_alg;
1083 struct device *dev = drvdata_to_dev(drvdata);
1084 int rc = -ENOMEM;
1085 int alg;
1086
1087 cipher_handle = kmalloc(sizeof(*cipher_handle), GFP_KERNEL);
1088 if (!cipher_handle)
1089 return -ENOMEM;
1090
1091 INIT_LIST_HEAD(&cipher_handle->alg_list);
1092 drvdata->cipher_handle = cipher_handle;
1093
1094 /* Linux crypto */
1095 dev_dbg(dev, "Number of algorithms = %zu\n",
1096 ARRAY_SIZE(skcipher_algs));
1097 for (alg = 0; alg < ARRAY_SIZE(skcipher_algs); alg++) {
1098 dev_dbg(dev, "creating %s\n", skcipher_algs[alg].driver_name);
1099 t_alg = cc_create_alg(&skcipher_algs[alg], dev);
1100 if (IS_ERR(t_alg)) {
1101 rc = PTR_ERR(t_alg);
1102 dev_err(dev, "%s alg allocation failed\n",
1103 skcipher_algs[alg].driver_name);
1104 goto fail0;
1105 }
1106 t_alg->drvdata = drvdata;
1107
1108 dev_dbg(dev, "registering %s\n",
1109 skcipher_algs[alg].driver_name);
1110 rc = crypto_register_skcipher(&t_alg->skcipher_alg);
1111 dev_dbg(dev, "%s alg registration rc = %x\n",
1112 t_alg->skcipher_alg.base.cra_driver_name, rc);
1113 if (rc) {
1114 dev_err(dev, "%s alg registration failed\n",
1115 t_alg->skcipher_alg.base.cra_driver_name);
1116 kfree(t_alg);
1117 goto fail0;
1118 } else {
1119 list_add_tail(&t_alg->entry,
1120 &cipher_handle->alg_list);
1121 dev_dbg(dev, "Registered %s\n",
1122 t_alg->skcipher_alg.base.cra_driver_name);
1123 }
1124 }
1125 return 0;
1126
1127fail0:
1128 cc_cipher_free(drvdata);
1129 return rc;
1130}
diff --git a/drivers/crypto/ccree/cc_cipher.h b/drivers/crypto/ccree/cc_cipher.h
new file mode 100644
index 000000000000..2a2a6f46c515
--- /dev/null
+++ b/drivers/crypto/ccree/cc_cipher.h
@@ -0,0 +1,59 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
3
4/* \file cc_cipher.h
5 * ARM CryptoCell Cipher Crypto API
6 */
7
8#ifndef __CC_CIPHER_H__
9#define __CC_CIPHER_H__
10
11#include <linux/kernel.h>
12#include <crypto/algapi.h>
13#include "cc_driver.h"
14#include "cc_buffer_mgr.h"
15
16/* Crypto cipher flags */
17#define CC_CRYPTO_CIPHER_KEY_KFDE0 BIT(0)
18#define CC_CRYPTO_CIPHER_KEY_KFDE1 BIT(1)
19#define CC_CRYPTO_CIPHER_KEY_KFDE2 BIT(2)
20#define CC_CRYPTO_CIPHER_KEY_KFDE3 BIT(3)
21#define CC_CRYPTO_CIPHER_DU_SIZE_512B BIT(4)
22
23#define CC_CRYPTO_CIPHER_KEY_KFDE_MASK (CC_CRYPTO_CIPHER_KEY_KFDE0 | \
24 CC_CRYPTO_CIPHER_KEY_KFDE1 | \
25 CC_CRYPTO_CIPHER_KEY_KFDE2 | \
26 CC_CRYPTO_CIPHER_KEY_KFDE3)
27
28struct cipher_req_ctx {
29 struct async_gen_req_ctx gen_ctx;
30 enum cc_req_dma_buf_type dma_buf_type;
31 u32 in_nents;
32 u32 in_mlli_nents;
33 u32 out_nents;
34 u32 out_mlli_nents;
35 u8 *backup_info; /*store iv for generated IV flow*/
36 u8 *iv;
37 bool is_giv;
38 struct mlli_params mlli_params;
39};
40
41int cc_cipher_alloc(struct cc_drvdata *drvdata);
42
43int cc_cipher_free(struct cc_drvdata *drvdata);
44
45struct arm_hw_key_info {
46 int hw_key1;
47 int hw_key2;
48};
49
50/*
51 * This is a stub function that will replaced when we
52 * implement secure keys
53 */
54static inline bool cc_is_hw_key(struct crypto_tfm *tfm)
55{
56 return false;
57}
58
59#endif /*__CC_CIPHER_H__*/
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
index 62b902acb5aa..286d0e3e8561 100644
--- a/drivers/crypto/ccree/cc_driver.c
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -19,6 +19,7 @@
19#include "cc_request_mgr.h" 19#include "cc_request_mgr.h"
20#include "cc_buffer_mgr.h" 20#include "cc_buffer_mgr.h"
21#include "cc_debugfs.h" 21#include "cc_debugfs.h"
22#include "cc_cipher.h"
22#include "cc_ivgen.h" 23#include "cc_ivgen.h"
23#include "cc_sram_mgr.h" 24#include "cc_sram_mgr.h"
24#include "cc_pm.h" 25#include "cc_pm.h"
@@ -278,8 +279,17 @@ static int init_cc_resources(struct platform_device *plat_dev)
278 goto post_power_mgr_err; 279 goto post_power_mgr_err;
279 } 280 }
280 281
282 /* Allocate crypto algs */
283 rc = cc_cipher_alloc(new_drvdata);
284 if (rc) {
285 dev_err(dev, "cc_cipher_alloc failed\n");
286 goto post_ivgen_err;
287 }
288
281 return 0; 289 return 0;
282 290
291post_ivgen_err:
292 cc_ivgen_fini(new_drvdata);
283post_power_mgr_err: 293post_power_mgr_err:
284 cc_pm_fini(new_drvdata); 294 cc_pm_fini(new_drvdata);
285post_buf_mgr_err: 295post_buf_mgr_err:
@@ -308,6 +318,7 @@ static void cleanup_cc_resources(struct platform_device *plat_dev)
308 struct cc_drvdata *drvdata = 318 struct cc_drvdata *drvdata =
309 (struct cc_drvdata *)platform_get_drvdata(plat_dev); 319 (struct cc_drvdata *)platform_get_drvdata(plat_dev);
310 320
321 cc_cipher_free(drvdata);
311 cc_ivgen_fini(drvdata); 322 cc_ivgen_fini(drvdata);
312 cc_pm_fini(drvdata); 323 cc_pm_fini(drvdata);
313 cc_buffer_mgr_fini(drvdata); 324 cc_buffer_mgr_fini(drvdata);
diff --git a/drivers/crypto/ccree/cc_driver.h b/drivers/crypto/ccree/cc_driver.h
index f1671bd01885..cd4f62b122c5 100644
--- a/drivers/crypto/ccree/cc_driver.h
+++ b/drivers/crypto/ccree/cc_driver.h
@@ -15,6 +15,7 @@
15#endif 15#endif
16#include <linux/dma-mapping.h> 16#include <linux/dma-mapping.h>
17#include <crypto/algapi.h> 17#include <crypto/algapi.h>
18#include <crypto/internal/skcipher.h>
18#include <crypto/aes.h> 19#include <crypto/aes.h>
19#include <crypto/sha.h> 20#include <crypto/sha.h>
20#include <crypto/aead.h> 21#include <crypto/aead.h>
@@ -111,6 +112,7 @@ struct cc_drvdata {
111 struct platform_device *plat_dev; 112 struct platform_device *plat_dev;
112 cc_sram_addr_t mlli_sram_addr; 113 cc_sram_addr_t mlli_sram_addr;
113 void *buff_mgr_handle; 114 void *buff_mgr_handle;
115 void *cipher_handle;
114 void *request_mgr_handle; 116 void *request_mgr_handle;
115 void *ivgen_handle; 117 void *ivgen_handle;
116 void *sram_mgr_handle; 118 void *sram_mgr_handle;
@@ -124,8 +126,9 @@ struct cc_crypto_alg {
124 int cipher_mode; 126 int cipher_mode;
125 int flow_mode; /* Note: currently, refers to the cipher mode only. */ 127 int flow_mode; /* Note: currently, refers to the cipher mode only. */
126 int auth_mode; 128 int auth_mode;
129 unsigned int data_unit;
127 struct cc_drvdata *drvdata; 130 struct cc_drvdata *drvdata;
128 struct crypto_alg crypto_alg; 131 struct skcipher_alg skcipher_alg;
129}; 132};
130 133
131struct cc_alg_template { 134struct cc_alg_template {
@@ -140,6 +143,7 @@ struct cc_alg_template {
140 int cipher_mode; 143 int cipher_mode;
141 int flow_mode; /* Note: currently, refers to the cipher mode only. */ 144 int flow_mode; /* Note: currently, refers to the cipher mode only. */
142 int auth_mode; 145 int auth_mode;
146 unsigned int data_unit;
143 struct cc_drvdata *drvdata; 147 struct cc_drvdata *drvdata;
144}; 148};
145 149