diff options
Diffstat (limited to 'drivers/crypto')
-rw-r--r-- | drivers/crypto/Kconfig | 19 | ||||
-rw-r--r-- | drivers/crypto/Makefile | 4 | ||||
-rw-r--r-- | drivers/crypto/amcc/crypto4xx_core.c | 5 | ||||
-rw-r--r-- | drivers/crypto/caam/caamalg.c | 1832 | ||||
-rw-r--r-- | drivers/crypto/caam/compat.h | 1 | ||||
-rw-r--r-- | drivers/crypto/caam/ctrl.c | 30 | ||||
-rw-r--r-- | drivers/crypto/caam/desc_constr.h | 58 | ||||
-rw-r--r-- | drivers/crypto/mv_cesa.c | 12 | ||||
-rw-r--r-- | drivers/crypto/n2_core.c | 33 | ||||
-rw-r--r-- | drivers/crypto/omap-sham.c | 180 | ||||
-rw-r--r-- | drivers/crypto/talitos.c | 47 | ||||
-rw-r--r-- | drivers/crypto/tegra-aes.c | 1496 | ||||
-rw-r--r-- | drivers/crypto/tegra-aes.h | 100 | ||||
-rw-r--r-- | drivers/crypto/tegra-se.c | 2455 | ||||
-rw-r--r-- | drivers/crypto/tegra-se.h | 235 |
15 files changed, 5926 insertions, 581 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index e0b25de1e33..b6994751719 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig | |||
@@ -18,7 +18,7 @@ config CRYPTO_DEV_PADLOCK | |||
18 | (so called VIA PadLock ACE, Advanced Cryptography Engine) | 18 | (so called VIA PadLock ACE, Advanced Cryptography Engine) |
19 | that provides instructions for very fast cryptographic | 19 | that provides instructions for very fast cryptographic |
20 | operations with supported algorithms. | 20 | operations with supported algorithms. |
21 | 21 | ||
22 | The instructions are used only when the CPU supports them. | 22 | The instructions are used only when the CPU supports them. |
23 | Otherwise software encryption is used. | 23 | Otherwise software encryption is used. |
24 | 24 | ||
@@ -292,4 +292,21 @@ config CRYPTO_DEV_S5P | |||
292 | Select this to offload Samsung S5PV210 or S5PC110 from AES | 292 | Select this to offload Samsung S5PV210 or S5PC110 from AES |
293 | algorithms execution. | 293 | algorithms execution. |
294 | 294 | ||
295 | config CRYPTO_DEV_TEGRA_AES | ||
296 | tristate "Support for TEGRA AES hw engine" | ||
297 | depends on ARCH_TEGRA_2x_SOC || ARCH_TEGRA_3x_SOC | ||
298 | select CRYPTO_AES | ||
299 | select TEGRA_ARB_SEMAPHORE | ||
300 | help | ||
301 | TEGRA processors have AES module accelerator. Select this if you | ||
302 | want to use the TEGRA module for AES algorithms. | ||
303 | |||
304 | config CRYPTO_DEV_TEGRA_SE | ||
305 | tristate "Tegra SE driver for crypto algorithms" | ||
306 | depends on ARCH_TEGRA_3x_SOC | ||
307 | select CRYPTO_AES | ||
308 | help | ||
309 | This option allows you to have support of Security Engine for crypto | ||
310 | acceleration. | ||
311 | |||
295 | endif # CRYPTO_HW | 312 | endif # CRYPTO_HW |
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index 53ea5015531..e244cfcdd50 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile | |||
@@ -1,3 +1,5 @@ | |||
1 | GCOV_PROFILE := y | ||
2 | |||
1 | obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o | 3 | obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o |
2 | obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o | 4 | obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o |
3 | obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o | 5 | obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o |
@@ -13,3 +15,5 @@ obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o | |||
13 | obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes.o | 15 | obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes.o |
14 | obj-$(CONFIG_CRYPTO_DEV_PICOXCELL) += picoxcell_crypto.o | 16 | obj-$(CONFIG_CRYPTO_DEV_PICOXCELL) += picoxcell_crypto.o |
15 | obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o | 17 | obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o |
18 | obj-$(CONFIG_CRYPTO_DEV_TEGRA_AES) += tegra-aes.o | ||
19 | obj-$(CONFIG_CRYPTO_DEV_TEGRA_SE) += tegra-se.o | ||
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c index 18912521a7a..1d103f997dc 100644 --- a/drivers/crypto/amcc/crypto4xx_core.c +++ b/drivers/crypto/amcc/crypto4xx_core.c | |||
@@ -51,6 +51,7 @@ static void crypto4xx_hw_init(struct crypto4xx_device *dev) | |||
51 | union ce_io_threshold io_threshold; | 51 | union ce_io_threshold io_threshold; |
52 | u32 rand_num; | 52 | u32 rand_num; |
53 | union ce_pe_dma_cfg pe_dma_cfg; | 53 | union ce_pe_dma_cfg pe_dma_cfg; |
54 | u32 device_ctrl; | ||
54 | 55 | ||
55 | writel(PPC4XX_BYTE_ORDER, dev->ce_base + CRYPTO4XX_BYTE_ORDER_CFG); | 56 | writel(PPC4XX_BYTE_ORDER, dev->ce_base + CRYPTO4XX_BYTE_ORDER_CFG); |
56 | /* setup pe dma, include reset sg, pdr and pe, then release reset */ | 57 | /* setup pe dma, include reset sg, pdr and pe, then release reset */ |
@@ -84,7 +85,9 @@ static void crypto4xx_hw_init(struct crypto4xx_device *dev) | |||
84 | writel(ring_size.w, dev->ce_base + CRYPTO4XX_RING_SIZE); | 85 | writel(ring_size.w, dev->ce_base + CRYPTO4XX_RING_SIZE); |
85 | ring_ctrl.w = 0; | 86 | ring_ctrl.w = 0; |
86 | writel(ring_ctrl.w, dev->ce_base + CRYPTO4XX_RING_CTRL); | 87 | writel(ring_ctrl.w, dev->ce_base + CRYPTO4XX_RING_CTRL); |
87 | writel(PPC4XX_DC_3DES_EN, dev->ce_base + CRYPTO4XX_DEVICE_CTRL); | 88 | device_ctrl = readl(dev->ce_base + CRYPTO4XX_DEVICE_CTRL); |
89 | device_ctrl |= PPC4XX_DC_3DES_EN; | ||
90 | writel(device_ctrl, dev->ce_base + CRYPTO4XX_DEVICE_CTRL); | ||
88 | writel(dev->gdr_pa, dev->ce_base + CRYPTO4XX_GATH_RING_BASE); | 91 | writel(dev->gdr_pa, dev->ce_base + CRYPTO4XX_GATH_RING_BASE); |
89 | writel(dev->sdr_pa, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE); | 92 | writel(dev->sdr_pa, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE); |
90 | part_ring_size.w = 0; | 93 | part_ring_size.w = 0; |
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 676d957c22b..4159265b453 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c | |||
@@ -62,10 +62,22 @@ | |||
62 | #define CAAM_MAX_IV_LENGTH 16 | 62 | #define CAAM_MAX_IV_LENGTH 16 |
63 | 63 | ||
64 | /* length of descriptors text */ | 64 | /* length of descriptors text */ |
65 | #define DESC_AEAD_SHARED_TEXT_LEN 4 | 65 | #define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 3 + CAAM_PTR_SZ * 3) |
66 | #define DESC_AEAD_ENCRYPT_TEXT_LEN 21 | 66 | |
67 | #define DESC_AEAD_DECRYPT_TEXT_LEN 24 | 67 | #define DESC_AEAD_BASE (4 * CAAM_CMD_SZ) |
68 | #define DESC_AEAD_GIVENCRYPT_TEXT_LEN 27 | 68 | #define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 16 * CAAM_CMD_SZ) |
69 | #define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 21 * CAAM_CMD_SZ) | ||
70 | #define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ) | ||
71 | |||
72 | #define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ) | ||
73 | #define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \ | ||
74 | 20 * CAAM_CMD_SZ) | ||
75 | #define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \ | ||
76 | 15 * CAAM_CMD_SZ) | ||
77 | |||
78 | #define DESC_MAX_USED_BYTES (DESC_AEAD_GIVENC_LEN + \ | ||
79 | CAAM_MAX_KEY_SIZE) | ||
80 | #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ) | ||
69 | 81 | ||
70 | #ifdef DEBUG | 82 | #ifdef DEBUG |
71 | /* for print_hex_dumps with line references */ | 83 | /* for print_hex_dumps with line references */ |
@@ -76,30 +88,366 @@ | |||
76 | #define debug(format, arg...) | 88 | #define debug(format, arg...) |
77 | #endif | 89 | #endif |
78 | 90 | ||
91 | /* Set DK bit in class 1 operation if shared */ | ||
92 | static inline void append_dec_op1(u32 *desc, u32 type) | ||
93 | { | ||
94 | u32 *jump_cmd, *uncond_jump_cmd; | ||
95 | |||
96 | jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD); | ||
97 | append_operation(desc, type | OP_ALG_AS_INITFINAL | | ||
98 | OP_ALG_DECRYPT); | ||
99 | uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL); | ||
100 | set_jump_tgt_here(desc, jump_cmd); | ||
101 | append_operation(desc, type | OP_ALG_AS_INITFINAL | | ||
102 | OP_ALG_DECRYPT | OP_ALG_AAI_DK); | ||
103 | set_jump_tgt_here(desc, uncond_jump_cmd); | ||
104 | } | ||
105 | |||
106 | /* | ||
107 | * Wait for completion of class 1 key loading before allowing | ||
108 | * error propagation | ||
109 | */ | ||
110 | static inline void append_dec_shr_done(u32 *desc) | ||
111 | { | ||
112 | u32 *jump_cmd; | ||
113 | |||
114 | jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TEST_ALL); | ||
115 | set_jump_tgt_here(desc, jump_cmd); | ||
116 | append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD); | ||
117 | } | ||
118 | |||
119 | /* | ||
120 | * For aead functions, read payload and write payload, | ||
121 | * both of which are specified in req->src and req->dst | ||
122 | */ | ||
123 | static inline void aead_append_src_dst(u32 *desc, u32 msg_type) | ||
124 | { | ||
125 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | | ||
126 | KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH); | ||
127 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); | ||
128 | } | ||
129 | |||
130 | /* | ||
131 | * For aead encrypt and decrypt, read iv for both classes | ||
132 | */ | ||
133 | static inline void aead_append_ld_iv(u32 *desc, int ivsize) | ||
134 | { | ||
135 | append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | | ||
136 | LDST_CLASS_1_CCB | ivsize); | ||
137 | append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize); | ||
138 | } | ||
139 | |||
140 | /* | ||
141 | * For ablkcipher encrypt and decrypt, read from req->src and | ||
142 | * write to req->dst | ||
143 | */ | ||
144 | static inline void ablkcipher_append_src_dst(u32 *desc) | ||
145 | { | ||
146 | append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); \ | ||
147 | append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); \ | ||
148 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | \ | ||
149 | KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1); \ | ||
150 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); \ | ||
151 | } | ||
152 | |||
153 | /* | ||
154 | * If all data, including src (with assoc and iv) or dst (with iv only) are | ||
155 | * contiguous | ||
156 | */ | ||
157 | #define GIV_SRC_CONTIG 1 | ||
158 | #define GIV_DST_CONTIG (1 << 1) | ||
159 | |||
79 | /* | 160 | /* |
80 | * per-session context | 161 | * per-session context |
81 | */ | 162 | */ |
82 | struct caam_ctx { | 163 | struct caam_ctx { |
83 | struct device *jrdev; | 164 | struct device *jrdev; |
84 | u32 *sh_desc; | 165 | u32 sh_desc_enc[DESC_MAX_USED_LEN]; |
85 | dma_addr_t shared_desc_phys; | 166 | u32 sh_desc_dec[DESC_MAX_USED_LEN]; |
167 | u32 sh_desc_givenc[DESC_MAX_USED_LEN]; | ||
168 | dma_addr_t sh_desc_enc_dma; | ||
169 | dma_addr_t sh_desc_dec_dma; | ||
170 | dma_addr_t sh_desc_givenc_dma; | ||
86 | u32 class1_alg_type; | 171 | u32 class1_alg_type; |
87 | u32 class2_alg_type; | 172 | u32 class2_alg_type; |
88 | u32 alg_op; | 173 | u32 alg_op; |
89 | u8 *key; | 174 | u8 key[CAAM_MAX_KEY_SIZE]; |
90 | dma_addr_t key_phys; | 175 | dma_addr_t key_dma; |
91 | unsigned int enckeylen; | 176 | unsigned int enckeylen; |
92 | unsigned int split_key_len; | 177 | unsigned int split_key_len; |
93 | unsigned int split_key_pad_len; | 178 | unsigned int split_key_pad_len; |
94 | unsigned int authsize; | 179 | unsigned int authsize; |
95 | }; | 180 | }; |
96 | 181 | ||
97 | static int aead_authenc_setauthsize(struct crypto_aead *authenc, | 182 | static void append_key_aead(u32 *desc, struct caam_ctx *ctx, |
183 | int keys_fit_inline) | ||
184 | { | ||
185 | if (keys_fit_inline) { | ||
186 | append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len, | ||
187 | ctx->split_key_len, CLASS_2 | | ||
188 | KEY_DEST_MDHA_SPLIT | KEY_ENC); | ||
189 | append_key_as_imm(desc, (void *)ctx->key + | ||
190 | ctx->split_key_pad_len, ctx->enckeylen, | ||
191 | ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); | ||
192 | } else { | ||
193 | append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 | | ||
194 | KEY_DEST_MDHA_SPLIT | KEY_ENC); | ||
195 | append_key(desc, ctx->key_dma + ctx->split_key_pad_len, | ||
196 | ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); | ||
197 | } | ||
198 | } | ||
199 | |||
200 | static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx, | ||
201 | int keys_fit_inline) | ||
202 | { | ||
203 | u32 *key_jump_cmd; | ||
204 | |||
205 | init_sh_desc(desc, HDR_SHARE_WAIT); | ||
206 | |||
207 | /* Skip if already shared */ | ||
208 | key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | | ||
209 | JUMP_COND_SHRD); | ||
210 | |||
211 | append_key_aead(desc, ctx, keys_fit_inline); | ||
212 | |||
213 | set_jump_tgt_here(desc, key_jump_cmd); | ||
214 | |||
215 | /* Propagate errors from shared to job descriptor */ | ||
216 | append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD); | ||
217 | } | ||
218 | |||
219 | static int aead_set_sh_desc(struct crypto_aead *aead) | ||
220 | { | ||
221 | struct aead_tfm *tfm = &aead->base.crt_aead; | ||
222 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | ||
223 | struct device *jrdev = ctx->jrdev; | ||
224 | bool keys_fit_inline = 0; | ||
225 | u32 *key_jump_cmd, *jump_cmd; | ||
226 | u32 geniv, moveiv; | ||
227 | u32 *desc; | ||
228 | |||
229 | if (!ctx->enckeylen || !ctx->authsize) | ||
230 | return 0; | ||
231 | |||
232 | /* | ||
233 | * Job Descriptor and Shared Descriptors | ||
234 | * must all fit into the 64-word Descriptor h/w Buffer | ||
235 | */ | ||
236 | if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN + | ||
237 | ctx->split_key_pad_len + ctx->enckeylen <= | ||
238 | CAAM_DESC_BYTES_MAX) | ||
239 | keys_fit_inline = 1; | ||
240 | |||
241 | /* aead_encrypt shared descriptor */ | ||
242 | desc = ctx->sh_desc_enc; | ||
243 | |||
244 | init_sh_desc_key_aead(desc, ctx, keys_fit_inline); | ||
245 | |||
246 | /* Class 2 operation */ | ||
247 | append_operation(desc, ctx->class2_alg_type | | ||
248 | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); | ||
249 | |||
250 | /* cryptlen = seqoutlen - authsize */ | ||
251 | append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); | ||
252 | |||
253 | /* assoclen + cryptlen = seqinlen - ivsize */ | ||
254 | append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize); | ||
255 | |||
256 | /* assoclen + cryptlen = (assoclen + cryptlen) - cryptlen */ | ||
257 | append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ); | ||
258 | |||
259 | /* read assoc before reading payload */ | ||
260 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | | ||
261 | KEY_VLF); | ||
262 | aead_append_ld_iv(desc, tfm->ivsize); | ||
263 | |||
264 | /* Class 1 operation */ | ||
265 | append_operation(desc, ctx->class1_alg_type | | ||
266 | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); | ||
267 | |||
268 | /* Read and write cryptlen bytes */ | ||
269 | append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); | ||
270 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); | ||
271 | aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2); | ||
272 | |||
273 | /* Write ICV */ | ||
274 | append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB | | ||
275 | LDST_SRCDST_BYTE_CONTEXT); | ||
276 | |||
277 | ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, | ||
278 | desc_bytes(desc), | ||
279 | DMA_TO_DEVICE); | ||
280 | if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { | ||
281 | dev_err(jrdev, "unable to map shared descriptor\n"); | ||
282 | return -ENOMEM; | ||
283 | } | ||
284 | #ifdef DEBUG | ||
285 | print_hex_dump(KERN_ERR, "aead enc shdesc@"xstr(__LINE__)": ", | ||
286 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | ||
287 | desc_bytes(desc), 1); | ||
288 | #endif | ||
289 | |||
290 | /* | ||
291 | * Job Descriptor and Shared Descriptors | ||
292 | * must all fit into the 64-word Descriptor h/w Buffer | ||
293 | */ | ||
294 | if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN + | ||
295 | ctx->split_key_pad_len + ctx->enckeylen <= | ||
296 | CAAM_DESC_BYTES_MAX) | ||
297 | keys_fit_inline = 1; | ||
298 | |||
299 | desc = ctx->sh_desc_dec; | ||
300 | |||
301 | /* aead_decrypt shared descriptor */ | ||
302 | init_sh_desc(desc, HDR_SHARE_WAIT); | ||
303 | |||
304 | /* Skip if already shared */ | ||
305 | key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | | ||
306 | JUMP_COND_SHRD); | ||
307 | |||
308 | append_key_aead(desc, ctx, keys_fit_inline); | ||
309 | |||
310 | /* Only propagate error immediately if shared */ | ||
311 | jump_cmd = append_jump(desc, JUMP_TEST_ALL); | ||
312 | set_jump_tgt_here(desc, key_jump_cmd); | ||
313 | append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD); | ||
314 | set_jump_tgt_here(desc, jump_cmd); | ||
315 | |||
316 | /* Class 2 operation */ | ||
317 | append_operation(desc, ctx->class2_alg_type | | ||
318 | OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON); | ||
319 | |||
320 | /* assoclen + cryptlen = seqinlen - ivsize */ | ||
321 | append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, | ||
322 | ctx->authsize + tfm->ivsize) | ||
323 | /* assoclen = (assoclen + cryptlen) - cryptlen */ | ||
324 | append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ); | ||
325 | append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ); | ||
326 | |||
327 | /* read assoc before reading payload */ | ||
328 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | | ||
329 | KEY_VLF); | ||
330 | |||
331 | aead_append_ld_iv(desc, tfm->ivsize); | ||
332 | |||
333 | append_dec_op1(desc, ctx->class1_alg_type); | ||
334 | |||
335 | /* Read and write cryptlen bytes */ | ||
336 | append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ); | ||
337 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ); | ||
338 | aead_append_src_dst(desc, FIFOLD_TYPE_MSG); | ||
339 | |||
340 | /* Load ICV */ | ||
341 | append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 | | ||
342 | FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV); | ||
343 | append_dec_shr_done(desc); | ||
344 | |||
345 | ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, | ||
346 | desc_bytes(desc), | ||
347 | DMA_TO_DEVICE); | ||
348 | if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) { | ||
349 | dev_err(jrdev, "unable to map shared descriptor\n"); | ||
350 | return -ENOMEM; | ||
351 | } | ||
352 | #ifdef DEBUG | ||
353 | print_hex_dump(KERN_ERR, "aead dec shdesc@"xstr(__LINE__)": ", | ||
354 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | ||
355 | desc_bytes(desc), 1); | ||
356 | #endif | ||
357 | |||
358 | /* | ||
359 | * Job Descriptor and Shared Descriptors | ||
360 | * must all fit into the 64-word Descriptor h/w Buffer | ||
361 | */ | ||
362 | if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN + | ||
363 | ctx->split_key_pad_len + ctx->enckeylen <= | ||
364 | CAAM_DESC_BYTES_MAX) | ||
365 | keys_fit_inline = 1; | ||
366 | |||
367 | /* aead_givencrypt shared descriptor */ | ||
368 | desc = ctx->sh_desc_givenc; | ||
369 | |||
370 | init_sh_desc_key_aead(desc, ctx, keys_fit_inline); | ||
371 | |||
372 | /* Generate IV */ | ||
373 | geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO | | ||
374 | NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | | ||
375 | NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT); | ||
376 | append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB | | ||
377 | LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM); | ||
378 | append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); | ||
379 | append_move(desc, MOVE_SRC_INFIFO | | ||
380 | MOVE_DEST_CLASS1CTX | (tfm->ivsize << MOVE_LEN_SHIFT)); | ||
381 | append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); | ||
382 | |||
383 | /* Copy IV to class 1 context */ | ||
384 | append_move(desc, MOVE_SRC_CLASS1CTX | | ||
385 | MOVE_DEST_OUTFIFO | (tfm->ivsize << MOVE_LEN_SHIFT)); | ||
386 | |||
387 | /* Return to encryption */ | ||
388 | append_operation(desc, ctx->class2_alg_type | | ||
389 | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); | ||
390 | |||
391 | /* ivsize + cryptlen = seqoutlen - authsize */ | ||
392 | append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); | ||
393 | |||
394 | /* assoclen = seqinlen - (ivsize + cryptlen) */ | ||
395 | append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ); | ||
396 | |||
397 | /* read assoc before reading payload */ | ||
398 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | | ||
399 | KEY_VLF); | ||
400 | |||
401 | /* Copy iv from class 1 ctx to class 2 fifo*/ | ||
402 | moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 | | ||
403 | NFIFOENTRY_DTYPE_MSG | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT); | ||
404 | append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB | | ||
405 | LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM); | ||
406 | append_load_imm_u32(desc, tfm->ivsize, LDST_CLASS_2_CCB | | ||
407 | LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM); | ||
408 | |||
409 | /* Class 1 operation */ | ||
410 | append_operation(desc, ctx->class1_alg_type | | ||
411 | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); | ||
412 | |||
413 | /* Will write ivsize + cryptlen */ | ||
414 | append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); | ||
415 | |||
416 | /* Not need to reload iv */ | ||
417 | append_seq_fifo_load(desc, tfm->ivsize, | ||
418 | FIFOLD_CLASS_SKIP); | ||
419 | |||
420 | /* Will read cryptlen */ | ||
421 | append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); | ||
422 | aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2); | ||
423 | |||
424 | /* Write ICV */ | ||
425 | append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB | | ||
426 | LDST_SRCDST_BYTE_CONTEXT); | ||
427 | |||
428 | ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc, | ||
429 | desc_bytes(desc), | ||
430 | DMA_TO_DEVICE); | ||
431 | if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) { | ||
432 | dev_err(jrdev, "unable to map shared descriptor\n"); | ||
433 | return -ENOMEM; | ||
434 | } | ||
435 | #ifdef DEBUG | ||
436 | print_hex_dump(KERN_ERR, "aead givenc shdesc@"xstr(__LINE__)": ", | ||
437 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | ||
438 | desc_bytes(desc), 1); | ||
439 | #endif | ||
440 | |||
441 | return 0; | ||
442 | } | ||
443 | |||
444 | static int aead_setauthsize(struct crypto_aead *authenc, | ||
98 | unsigned int authsize) | 445 | unsigned int authsize) |
99 | { | 446 | { |
100 | struct caam_ctx *ctx = crypto_aead_ctx(authenc); | 447 | struct caam_ctx *ctx = crypto_aead_ctx(authenc); |
101 | 448 | ||
102 | ctx->authsize = authsize; | 449 | ctx->authsize = authsize; |
450 | aead_set_sh_desc(authenc); | ||
103 | 451 | ||
104 | return 0; | 452 | return 0; |
105 | } | 453 | } |
@@ -117,6 +465,7 @@ static void split_key_done(struct device *dev, u32 *desc, u32 err, | |||
117 | #ifdef DEBUG | 465 | #ifdef DEBUG |
118 | dev_err(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | 466 | dev_err(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err); |
119 | #endif | 467 | #endif |
468 | |||
120 | if (err) { | 469 | if (err) { |
121 | char tmp[CAAM_ERROR_STR_MAX]; | 470 | char tmp[CAAM_ERROR_STR_MAX]; |
122 | 471 | ||
@@ -220,73 +569,7 @@ static u32 gen_split_key(struct caam_ctx *ctx, const u8 *key_in, u32 authkeylen) | |||
220 | return ret; | 569 | return ret; |
221 | } | 570 | } |
222 | 571 | ||
223 | static int build_sh_desc_ipsec(struct caam_ctx *ctx) | 572 | static int aead_setkey(struct crypto_aead *aead, |
224 | { | ||
225 | struct device *jrdev = ctx->jrdev; | ||
226 | u32 *sh_desc; | ||
227 | u32 *jump_cmd; | ||
228 | bool keys_fit_inline = 0; | ||
229 | |||
230 | /* | ||
231 | * largest Job Descriptor and its Shared Descriptor | ||
232 | * must both fit into the 64-word Descriptor h/w Buffer | ||
233 | */ | ||
234 | if ((DESC_AEAD_GIVENCRYPT_TEXT_LEN + | ||
235 | DESC_AEAD_SHARED_TEXT_LEN) * CAAM_CMD_SZ + | ||
236 | ctx->split_key_pad_len + ctx->enckeylen <= CAAM_DESC_BYTES_MAX) | ||
237 | keys_fit_inline = 1; | ||
238 | |||
239 | /* build shared descriptor for this session */ | ||
240 | sh_desc = kmalloc(CAAM_CMD_SZ * DESC_AEAD_SHARED_TEXT_LEN + | ||
241 | (keys_fit_inline ? | ||
242 | ctx->split_key_pad_len + ctx->enckeylen : | ||
243 | CAAM_PTR_SZ * 2), GFP_DMA | GFP_KERNEL); | ||
244 | if (!sh_desc) { | ||
245 | dev_err(jrdev, "could not allocate shared descriptor\n"); | ||
246 | return -ENOMEM; | ||
247 | } | ||
248 | |||
249 | init_sh_desc(sh_desc, HDR_SAVECTX | HDR_SHARE_SERIAL); | ||
250 | |||
251 | jump_cmd = append_jump(sh_desc, CLASS_BOTH | JUMP_TEST_ALL | | ||
252 | JUMP_COND_SHRD | JUMP_COND_SELF); | ||
253 | |||
254 | /* | ||
255 | * process keys, starting with class 2/authentication. | ||
256 | */ | ||
257 | if (keys_fit_inline) { | ||
258 | append_key_as_imm(sh_desc, ctx->key, ctx->split_key_pad_len, | ||
259 | ctx->split_key_len, | ||
260 | CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC); | ||
261 | |||
262 | append_key_as_imm(sh_desc, (void *)ctx->key + | ||
263 | ctx->split_key_pad_len, ctx->enckeylen, | ||
264 | ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); | ||
265 | } else { | ||
266 | append_key(sh_desc, ctx->key_phys, ctx->split_key_len, CLASS_2 | | ||
267 | KEY_DEST_MDHA_SPLIT | KEY_ENC); | ||
268 | append_key(sh_desc, ctx->key_phys + ctx->split_key_pad_len, | ||
269 | ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); | ||
270 | } | ||
271 | |||
272 | /* update jump cmd now that we are at the jump target */ | ||
273 | set_jump_tgt_here(sh_desc, jump_cmd); | ||
274 | |||
275 | ctx->shared_desc_phys = dma_map_single(jrdev, sh_desc, | ||
276 | desc_bytes(sh_desc), | ||
277 | DMA_TO_DEVICE); | ||
278 | if (dma_mapping_error(jrdev, ctx->shared_desc_phys)) { | ||
279 | dev_err(jrdev, "unable to map shared descriptor\n"); | ||
280 | kfree(sh_desc); | ||
281 | return -ENOMEM; | ||
282 | } | ||
283 | |||
284 | ctx->sh_desc = sh_desc; | ||
285 | |||
286 | return 0; | ||
287 | } | ||
288 | |||
289 | static int aead_authenc_setkey(struct crypto_aead *aead, | ||
290 | const u8 *key, unsigned int keylen) | 573 | const u8 *key, unsigned int keylen) |
291 | { | 574 | { |
292 | /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */ | 575 | /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */ |
@@ -326,27 +609,19 @@ static int aead_authenc_setkey(struct crypto_aead *aead, | |||
326 | print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ", | 609 | print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ", |
327 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); | 610 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
328 | #endif | 611 | #endif |
329 | ctx->key = kmalloc(ctx->split_key_pad_len + enckeylen, | ||
330 | GFP_KERNEL | GFP_DMA); | ||
331 | if (!ctx->key) { | ||
332 | dev_err(jrdev, "could not allocate key output memory\n"); | ||
333 | return -ENOMEM; | ||
334 | } | ||
335 | 612 | ||
336 | ret = gen_split_key(ctx, key, authkeylen); | 613 | ret = gen_split_key(ctx, key, authkeylen); |
337 | if (ret) { | 614 | if (ret) { |
338 | kfree(ctx->key); | ||
339 | goto badkey; | 615 | goto badkey; |
340 | } | 616 | } |
341 | 617 | ||
342 | /* postpend encryption key to auth split key */ | 618 | /* postpend encryption key to auth split key */ |
343 | memcpy(ctx->key + ctx->split_key_pad_len, key + authkeylen, enckeylen); | 619 | memcpy(ctx->key + ctx->split_key_pad_len, key + authkeylen, enckeylen); |
344 | 620 | ||
345 | ctx->key_phys = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len + | 621 | ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len + |
346 | enckeylen, DMA_TO_DEVICE); | 622 | enckeylen, DMA_TO_DEVICE); |
347 | if (dma_mapping_error(jrdev, ctx->key_phys)) { | 623 | if (dma_mapping_error(jrdev, ctx->key_dma)) { |
348 | dev_err(jrdev, "unable to map key i/o memory\n"); | 624 | dev_err(jrdev, "unable to map key i/o memory\n"); |
349 | kfree(ctx->key); | ||
350 | return -ENOMEM; | 625 | return -ENOMEM; |
351 | } | 626 | } |
352 | #ifdef DEBUG | 627 | #ifdef DEBUG |
@@ -357,11 +632,10 @@ static int aead_authenc_setkey(struct crypto_aead *aead, | |||
357 | 632 | ||
358 | ctx->enckeylen = enckeylen; | 633 | ctx->enckeylen = enckeylen; |
359 | 634 | ||
360 | ret = build_sh_desc_ipsec(ctx); | 635 | ret = aead_set_sh_desc(aead); |
361 | if (ret) { | 636 | if (ret) { |
362 | dma_unmap_single(jrdev, ctx->key_phys, ctx->split_key_pad_len + | 637 | dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len + |
363 | enckeylen, DMA_TO_DEVICE); | 638 | enckeylen, DMA_TO_DEVICE); |
364 | kfree(ctx->key); | ||
365 | } | 639 | } |
366 | 640 | ||
367 | return ret; | 641 | return ret; |
@@ -370,6 +644,119 @@ badkey: | |||
370 | return -EINVAL; | 644 | return -EINVAL; |
371 | } | 645 | } |
372 | 646 | ||
647 | static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, | ||
648 | const u8 *key, unsigned int keylen) | ||
649 | { | ||
650 | struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); | ||
651 | struct ablkcipher_tfm *tfm = &ablkcipher->base.crt_ablkcipher; | ||
652 | struct device *jrdev = ctx->jrdev; | ||
653 | int ret = 0; | ||
654 | u32 *key_jump_cmd, *jump_cmd; | ||
655 | u32 *desc; | ||
656 | |||
657 | #ifdef DEBUG | ||
658 | print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ", | ||
659 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); | ||
660 | #endif | ||
661 | |||
662 | memcpy(ctx->key, key, keylen); | ||
663 | ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen, | ||
664 | DMA_TO_DEVICE); | ||
665 | if (dma_mapping_error(jrdev, ctx->key_dma)) { | ||
666 | dev_err(jrdev, "unable to map key i/o memory\n"); | ||
667 | return -ENOMEM; | ||
668 | } | ||
669 | ctx->enckeylen = keylen; | ||
670 | |||
671 | /* ablkcipher_encrypt shared descriptor */ | ||
672 | desc = ctx->sh_desc_enc; | ||
673 | init_sh_desc(desc, HDR_SHARE_WAIT); | ||
674 | /* Skip if already shared */ | ||
675 | key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | | ||
676 | JUMP_COND_SHRD); | ||
677 | |||
678 | /* Load class1 key only */ | ||
679 | append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, | ||
680 | ctx->enckeylen, CLASS_1 | | ||
681 | KEY_DEST_CLASS_REG); | ||
682 | |||
683 | set_jump_tgt_here(desc, key_jump_cmd); | ||
684 | |||
685 | /* Propagate errors from shared to job descriptor */ | ||
686 | append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD); | ||
687 | |||
688 | /* Load iv */ | ||
689 | append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | | ||
690 | LDST_CLASS_1_CCB | tfm->ivsize); | ||
691 | |||
692 | /* Load operation */ | ||
693 | append_operation(desc, ctx->class1_alg_type | | ||
694 | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); | ||
695 | |||
696 | /* Perform operation */ | ||
697 | ablkcipher_append_src_dst(desc); | ||
698 | |||
699 | ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, | ||
700 | desc_bytes(desc), | ||
701 | DMA_TO_DEVICE); | ||
702 | if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { | ||
703 | dev_err(jrdev, "unable to map shared descriptor\n"); | ||
704 | return -ENOMEM; | ||
705 | } | ||
706 | #ifdef DEBUG | ||
707 | print_hex_dump(KERN_ERR, "ablkcipher enc shdesc@"xstr(__LINE__)": ", | ||
708 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | ||
709 | desc_bytes(desc), 1); | ||
710 | #endif | ||
711 | /* ablkcipher_decrypt shared descriptor */ | ||
712 | desc = ctx->sh_desc_dec; | ||
713 | |||
714 | init_sh_desc(desc, HDR_SHARE_WAIT); | ||
715 | /* Skip if already shared */ | ||
716 | key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | | ||
717 | JUMP_COND_SHRD); | ||
718 | |||
719 | /* Load class1 key only */ | ||
720 | append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, | ||
721 | ctx->enckeylen, CLASS_1 | | ||
722 | KEY_DEST_CLASS_REG); | ||
723 | |||
724 | /* For aead, only propagate error immediately if shared */ | ||
725 | jump_cmd = append_jump(desc, JUMP_TEST_ALL); | ||
726 | set_jump_tgt_here(desc, key_jump_cmd); | ||
727 | append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD); | ||
728 | set_jump_tgt_here(desc, jump_cmd); | ||
729 | |||
730 | /* load IV */ | ||
731 | append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | | ||
732 | LDST_CLASS_1_CCB | tfm->ivsize); | ||
733 | |||
734 | /* Choose operation */ | ||
735 | append_dec_op1(desc, ctx->class1_alg_type); | ||
736 | |||
737 | /* Perform operation */ | ||
738 | ablkcipher_append_src_dst(desc); | ||
739 | |||
740 | /* Wait for key to load before allowing propagating error */ | ||
741 | append_dec_shr_done(desc); | ||
742 | |||
743 | ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, | ||
744 | desc_bytes(desc), | ||
745 | DMA_TO_DEVICE); | ||
746 | if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { | ||
747 | dev_err(jrdev, "unable to map shared descriptor\n"); | ||
748 | return -ENOMEM; | ||
749 | } | ||
750 | |||
751 | #ifdef DEBUG | ||
752 | print_hex_dump(KERN_ERR, "ablkcipher dec shdesc@"xstr(__LINE__)": ", | ||
753 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | ||
754 | desc_bytes(desc), 1); | ||
755 | #endif | ||
756 | |||
757 | return ret; | ||
758 | } | ||
759 | |||
373 | struct link_tbl_entry { | 760 | struct link_tbl_entry { |
374 | u64 ptr; | 761 | u64 ptr; |
375 | u32 len; | 762 | u32 len; |
@@ -379,64 +766,109 @@ struct link_tbl_entry { | |||
379 | }; | 766 | }; |
380 | 767 | ||
381 | /* | 768 | /* |
382 | * ipsec_esp_edesc - s/w-extended ipsec_esp descriptor | 769 | * aead_edesc - s/w-extended aead descriptor |
770 | * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist | ||
383 | * @src_nents: number of segments in input scatterlist | 771 | * @src_nents: number of segments in input scatterlist |
384 | * @dst_nents: number of segments in output scatterlist | 772 | * @dst_nents: number of segments in output scatterlist |
385 | * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist | 773 | * @iv_dma: dma address of iv for checking continuity and link table |
386 | * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE) | 774 | * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE) |
387 | * @link_tbl_bytes: length of dma mapped link_tbl space | 775 | * @link_tbl_bytes: length of dma mapped link_tbl space |
388 | * @link_tbl_dma: bus physical mapped address of h/w link table | 776 | * @link_tbl_dma: bus physical mapped address of h/w link table |
389 | * @hw_desc: the h/w job descriptor followed by any referenced link tables | 777 | * @hw_desc: the h/w job descriptor followed by any referenced link tables |
390 | */ | 778 | */ |
391 | struct ipsec_esp_edesc { | 779 | struct aead_edesc { |
392 | int assoc_nents; | 780 | int assoc_nents; |
393 | int src_nents; | 781 | int src_nents; |
394 | int dst_nents; | 782 | int dst_nents; |
783 | dma_addr_t iv_dma; | ||
395 | int link_tbl_bytes; | 784 | int link_tbl_bytes; |
396 | dma_addr_t link_tbl_dma; | 785 | dma_addr_t link_tbl_dma; |
397 | struct link_tbl_entry *link_tbl; | 786 | struct link_tbl_entry *link_tbl; |
398 | u32 hw_desc[0]; | 787 | u32 hw_desc[0]; |
399 | }; | 788 | }; |
400 | 789 | ||
401 | static void ipsec_esp_unmap(struct device *dev, | 790 | /* |
402 | struct ipsec_esp_edesc *edesc, | 791 | * ablkcipher_edesc - s/w-extended ablkcipher descriptor |
403 | struct aead_request *areq) | 792 | * @src_nents: number of segments in input scatterlist |
404 | { | 793 | * @dst_nents: number of segments in output scatterlist |
405 | dma_unmap_sg(dev, areq->assoc, edesc->assoc_nents, DMA_TO_DEVICE); | 794 | * @iv_dma: dma address of iv for checking continuity and link table |
795 | * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE) | ||
796 | * @link_tbl_bytes: length of dma mapped link_tbl space | ||
797 | * @link_tbl_dma: bus physical mapped address of h/w link table | ||
798 | * @hw_desc: the h/w job descriptor followed by any referenced link tables | ||
799 | */ | ||
800 | struct ablkcipher_edesc { | ||
801 | int src_nents; | ||
802 | int dst_nents; | ||
803 | dma_addr_t iv_dma; | ||
804 | int link_tbl_bytes; | ||
805 | dma_addr_t link_tbl_dma; | ||
806 | struct link_tbl_entry *link_tbl; | ||
807 | u32 hw_desc[0]; | ||
808 | }; | ||
406 | 809 | ||
407 | if (unlikely(areq->dst != areq->src)) { | 810 | static void caam_unmap(struct device *dev, struct scatterlist *src, |
408 | dma_unmap_sg(dev, areq->src, edesc->src_nents, | 811 | struct scatterlist *dst, int src_nents, int dst_nents, |
409 | DMA_TO_DEVICE); | 812 | dma_addr_t iv_dma, int ivsize, dma_addr_t link_tbl_dma, |
410 | dma_unmap_sg(dev, areq->dst, edesc->dst_nents, | 813 | int link_tbl_bytes) |
411 | DMA_FROM_DEVICE); | 814 | { |
815 | if (unlikely(dst != src)) { | ||
816 | dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); | ||
817 | dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); | ||
412 | } else { | 818 | } else { |
413 | dma_unmap_sg(dev, areq->src, edesc->src_nents, | 819 | dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL); |
414 | DMA_BIDIRECTIONAL); | ||
415 | } | 820 | } |
416 | 821 | ||
417 | if (edesc->link_tbl_bytes) | 822 | if (iv_dma) |
418 | dma_unmap_single(dev, edesc->link_tbl_dma, | 823 | dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); |
419 | edesc->link_tbl_bytes, | 824 | if (link_tbl_bytes) |
825 | dma_unmap_single(dev, link_tbl_dma, link_tbl_bytes, | ||
420 | DMA_TO_DEVICE); | 826 | DMA_TO_DEVICE); |
421 | } | 827 | } |
422 | 828 | ||
423 | /* | 829 | static void aead_unmap(struct device *dev, |
424 | * ipsec_esp descriptor callbacks | 830 | struct aead_edesc *edesc, |
425 | */ | 831 | struct aead_request *req) |
426 | static void ipsec_esp_encrypt_done(struct device *jrdev, u32 *desc, u32 err, | 832 | { |
833 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | ||
834 | int ivsize = crypto_aead_ivsize(aead); | ||
835 | |||
836 | dma_unmap_sg(dev, req->assoc, edesc->assoc_nents, DMA_TO_DEVICE); | ||
837 | |||
838 | caam_unmap(dev, req->src, req->dst, | ||
839 | edesc->src_nents, edesc->dst_nents, | ||
840 | edesc->iv_dma, ivsize, edesc->link_tbl_dma, | ||
841 | edesc->link_tbl_bytes); | ||
842 | } | ||
843 | |||
844 | static void ablkcipher_unmap(struct device *dev, | ||
845 | struct ablkcipher_edesc *edesc, | ||
846 | struct ablkcipher_request *req) | ||
847 | { | ||
848 | struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); | ||
849 | int ivsize = crypto_ablkcipher_ivsize(ablkcipher); | ||
850 | |||
851 | caam_unmap(dev, req->src, req->dst, | ||
852 | edesc->src_nents, edesc->dst_nents, | ||
853 | edesc->iv_dma, ivsize, edesc->link_tbl_dma, | ||
854 | edesc->link_tbl_bytes); | ||
855 | } | ||
856 | |||
857 | static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err, | ||
427 | void *context) | 858 | void *context) |
428 | { | 859 | { |
429 | struct aead_request *areq = context; | 860 | struct aead_request *req = context; |
430 | struct ipsec_esp_edesc *edesc; | 861 | struct aead_edesc *edesc; |
431 | #ifdef DEBUG | 862 | #ifdef DEBUG |
432 | struct crypto_aead *aead = crypto_aead_reqtfm(areq); | 863 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
433 | int ivsize = crypto_aead_ivsize(aead); | ||
434 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | 864 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
865 | int ivsize = crypto_aead_ivsize(aead); | ||
435 | 866 | ||
436 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | 867 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); |
437 | #endif | 868 | #endif |
438 | edesc = (struct ipsec_esp_edesc *)((char *)desc - | 869 | |
439 | offsetof(struct ipsec_esp_edesc, hw_desc)); | 870 | edesc = (struct aead_edesc *)((char *)desc - |
871 | offsetof(struct aead_edesc, hw_desc)); | ||
440 | 872 | ||
441 | if (err) { | 873 | if (err) { |
442 | char tmp[CAAM_ERROR_STR_MAX]; | 874 | char tmp[CAAM_ERROR_STR_MAX]; |
@@ -444,39 +876,50 @@ static void ipsec_esp_encrypt_done(struct device *jrdev, u32 *desc, u32 err, | |||
444 | dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); | 876 | dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); |
445 | } | 877 | } |
446 | 878 | ||
447 | ipsec_esp_unmap(jrdev, edesc, areq); | 879 | aead_unmap(jrdev, edesc, req); |
448 | 880 | ||
449 | #ifdef DEBUG | 881 | #ifdef DEBUG |
450 | print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ", | 882 | print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ", |
451 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->assoc), | 883 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc), |
452 | areq->assoclen , 1); | 884 | req->assoclen , 1); |
453 | print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ", | 885 | print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ", |
454 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->src) - ivsize, | 886 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize, |
455 | edesc->src_nents ? 100 : ivsize, 1); | 887 | edesc->src_nents ? 100 : ivsize, 1); |
456 | print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ", | 888 | print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ", |
457 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->src), | 889 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), |
458 | edesc->src_nents ? 100 : areq->cryptlen + | 890 | edesc->src_nents ? 100 : req->cryptlen + |
459 | ctx->authsize + 4, 1); | 891 | ctx->authsize + 4, 1); |
460 | #endif | 892 | #endif |
461 | 893 | ||
462 | kfree(edesc); | 894 | kfree(edesc); |
463 | 895 | ||
464 | aead_request_complete(areq, err); | 896 | aead_request_complete(req, err); |
465 | } | 897 | } |
466 | 898 | ||
467 | static void ipsec_esp_decrypt_done(struct device *jrdev, u32 *desc, u32 err, | 899 | static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err, |
468 | void *context) | 900 | void *context) |
469 | { | 901 | { |
470 | struct aead_request *areq = context; | 902 | struct aead_request *req = context; |
471 | struct ipsec_esp_edesc *edesc; | 903 | struct aead_edesc *edesc; |
472 | #ifdef DEBUG | 904 | #ifdef DEBUG |
473 | struct crypto_aead *aead = crypto_aead_reqtfm(areq); | 905 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
474 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | 906 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
907 | int ivsize = crypto_aead_ivsize(aead); | ||
475 | 908 | ||
476 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | 909 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); |
477 | #endif | 910 | #endif |
478 | edesc = (struct ipsec_esp_edesc *)((char *)desc - | 911 | |
479 | offsetof(struct ipsec_esp_edesc, hw_desc)); | 912 | edesc = (struct aead_edesc *)((char *)desc - |
913 | offsetof(struct aead_edesc, hw_desc)); | ||
914 | |||
915 | #ifdef DEBUG | ||
916 | print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ", | ||
917 | DUMP_PREFIX_ADDRESS, 16, 4, req->iv, | ||
918 | ivsize, 1); | ||
919 | print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ", | ||
920 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst), | ||
921 | req->cryptlen, 1); | ||
922 | #endif | ||
480 | 923 | ||
481 | if (err) { | 924 | if (err) { |
482 | char tmp[CAAM_ERROR_STR_MAX]; | 925 | char tmp[CAAM_ERROR_STR_MAX]; |
@@ -484,7 +927,7 @@ static void ipsec_esp_decrypt_done(struct device *jrdev, u32 *desc, u32 err, | |||
484 | dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); | 927 | dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); |
485 | } | 928 | } |
486 | 929 | ||
487 | ipsec_esp_unmap(jrdev, edesc, areq); | 930 | aead_unmap(jrdev, edesc, req); |
488 | 931 | ||
489 | /* | 932 | /* |
490 | * verify hw auth check passed else return -EBADMSG | 933 | * verify hw auth check passed else return -EBADMSG |
@@ -495,255 +938,413 @@ static void ipsec_esp_decrypt_done(struct device *jrdev, u32 *desc, u32 err, | |||
495 | #ifdef DEBUG | 938 | #ifdef DEBUG |
496 | print_hex_dump(KERN_ERR, "iphdrout@"xstr(__LINE__)": ", | 939 | print_hex_dump(KERN_ERR, "iphdrout@"xstr(__LINE__)": ", |
497 | DUMP_PREFIX_ADDRESS, 16, 4, | 940 | DUMP_PREFIX_ADDRESS, 16, 4, |
498 | ((char *)sg_virt(areq->assoc) - sizeof(struct iphdr)), | 941 | ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)), |
499 | sizeof(struct iphdr) + areq->assoclen + | 942 | sizeof(struct iphdr) + req->assoclen + |
500 | ((areq->cryptlen > 1500) ? 1500 : areq->cryptlen) + | 943 | ((req->cryptlen > 1500) ? 1500 : req->cryptlen) + |
501 | ctx->authsize + 36, 1); | 944 | ctx->authsize + 36, 1); |
502 | if (!err && edesc->link_tbl_bytes) { | 945 | if (!err && edesc->link_tbl_bytes) { |
503 | struct scatterlist *sg = sg_last(areq->src, edesc->src_nents); | 946 | struct scatterlist *sg = sg_last(req->src, edesc->src_nents); |
504 | print_hex_dump(KERN_ERR, "sglastout@"xstr(__LINE__)": ", | 947 | print_hex_dump(KERN_ERR, "sglastout@"xstr(__LINE__)": ", |
505 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg), | 948 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg), |
506 | sg->length + ctx->authsize + 16, 1); | 949 | sg->length + ctx->authsize + 16, 1); |
507 | } | 950 | } |
508 | #endif | 951 | #endif |
952 | |||
509 | kfree(edesc); | 953 | kfree(edesc); |
510 | 954 | ||
511 | aead_request_complete(areq, err); | 955 | aead_request_complete(req, err); |
956 | } | ||
957 | |||
958 | static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err, | ||
959 | void *context) | ||
960 | { | ||
961 | struct ablkcipher_request *req = context; | ||
962 | struct ablkcipher_edesc *edesc; | ||
963 | #ifdef DEBUG | ||
964 | struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); | ||
965 | int ivsize = crypto_ablkcipher_ivsize(ablkcipher); | ||
966 | |||
967 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | ||
968 | #endif | ||
969 | |||
970 | edesc = (struct ablkcipher_edesc *)((char *)desc - | ||
971 | offsetof(struct ablkcipher_edesc, hw_desc)); | ||
972 | |||
973 | if (err) { | ||
974 | char tmp[CAAM_ERROR_STR_MAX]; | ||
975 | |||
976 | dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); | ||
977 | } | ||
978 | |||
979 | #ifdef DEBUG | ||
980 | print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ", | ||
981 | DUMP_PREFIX_ADDRESS, 16, 4, req->info, | ||
982 | edesc->src_nents > 1 ? 100 : ivsize, 1); | ||
983 | print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ", | ||
984 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), | ||
985 | edesc->dst_nents > 1 ? 100 : req->nbytes, 1); | ||
986 | #endif | ||
987 | |||
988 | ablkcipher_unmap(jrdev, edesc, req); | ||
989 | kfree(edesc); | ||
990 | |||
991 | ablkcipher_request_complete(req, err); | ||
992 | } | ||
993 | |||
994 | static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, | ||
995 | void *context) | ||
996 | { | ||
997 | struct ablkcipher_request *req = context; | ||
998 | struct ablkcipher_edesc *edesc; | ||
999 | #ifdef DEBUG | ||
1000 | struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); | ||
1001 | int ivsize = crypto_ablkcipher_ivsize(ablkcipher); | ||
1002 | |||
1003 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | ||
1004 | #endif | ||
1005 | |||
1006 | edesc = (struct ablkcipher_edesc *)((char *)desc - | ||
1007 | offsetof(struct ablkcipher_edesc, hw_desc)); | ||
1008 | if (err) { | ||
1009 | char tmp[CAAM_ERROR_STR_MAX]; | ||
1010 | |||
1011 | dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); | ||
1012 | } | ||
1013 | |||
1014 | #ifdef DEBUG | ||
1015 | print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ", | ||
1016 | DUMP_PREFIX_ADDRESS, 16, 4, req->info, | ||
1017 | ivsize, 1); | ||
1018 | print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ", | ||
1019 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), | ||
1020 | edesc->dst_nents > 1 ? 100 : req->nbytes, 1); | ||
1021 | #endif | ||
1022 | |||
1023 | ablkcipher_unmap(jrdev, edesc, req); | ||
1024 | kfree(edesc); | ||
1025 | |||
1026 | ablkcipher_request_complete(req, err); | ||
1027 | } | ||
1028 | |||
1029 | static void sg_to_link_tbl_one(struct link_tbl_entry *link_tbl_ptr, | ||
1030 | dma_addr_t dma, u32 len, u32 offset) | ||
1031 | { | ||
1032 | link_tbl_ptr->ptr = dma; | ||
1033 | link_tbl_ptr->len = len; | ||
1034 | link_tbl_ptr->reserved = 0; | ||
1035 | link_tbl_ptr->buf_pool_id = 0; | ||
1036 | link_tbl_ptr->offset = offset; | ||
1037 | #ifdef DEBUG | ||
1038 | print_hex_dump(KERN_ERR, "link_tbl_ptr@"xstr(__LINE__)": ", | ||
1039 | DUMP_PREFIX_ADDRESS, 16, 4, link_tbl_ptr, | ||
1040 | sizeof(struct link_tbl_entry), 1); | ||
1041 | #endif | ||
512 | } | 1042 | } |
513 | 1043 | ||
514 | /* | 1044 | /* |
515 | * convert scatterlist to h/w link table format | 1045 | * convert scatterlist to h/w link table format |
516 | * scatterlist must have been previously dma mapped | 1046 | * but does not have final bit; instead, returns last entry |
517 | */ | 1047 | */ |
518 | static void sg_to_link_tbl(struct scatterlist *sg, int sg_count, | 1048 | static struct link_tbl_entry *sg_to_link_tbl(struct scatterlist *sg, |
519 | struct link_tbl_entry *link_tbl_ptr, u32 offset) | 1049 | int sg_count, struct link_tbl_entry |
1050 | *link_tbl_ptr, u32 offset) | ||
520 | { | 1051 | { |
521 | while (sg_count) { | 1052 | while (sg_count) { |
522 | link_tbl_ptr->ptr = sg_dma_address(sg); | 1053 | sg_to_link_tbl_one(link_tbl_ptr, sg_dma_address(sg), |
523 | link_tbl_ptr->len = sg_dma_len(sg); | 1054 | sg_dma_len(sg), offset); |
524 | link_tbl_ptr->reserved = 0; | ||
525 | link_tbl_ptr->buf_pool_id = 0; | ||
526 | link_tbl_ptr->offset = offset; | ||
527 | link_tbl_ptr++; | 1055 | link_tbl_ptr++; |
528 | sg = sg_next(sg); | 1056 | sg = sg_next(sg); |
529 | sg_count--; | 1057 | sg_count--; |
530 | } | 1058 | } |
1059 | return link_tbl_ptr - 1; | ||
1060 | } | ||
531 | 1061 | ||
532 | /* set Final bit (marks end of link table) */ | 1062 | /* |
533 | link_tbl_ptr--; | 1063 | * convert scatterlist to h/w link table format |
1064 | * scatterlist must have been previously dma mapped | ||
1065 | */ | ||
1066 | static void sg_to_link_tbl_last(struct scatterlist *sg, int sg_count, | ||
1067 | struct link_tbl_entry *link_tbl_ptr, u32 offset) | ||
1068 | { | ||
1069 | link_tbl_ptr = sg_to_link_tbl(sg, sg_count, link_tbl_ptr, offset); | ||
534 | link_tbl_ptr->len |= 0x40000000; | 1070 | link_tbl_ptr->len |= 0x40000000; |
535 | } | 1071 | } |
536 | 1072 | ||
537 | /* | 1073 | /* |
538 | * fill in and submit ipsec_esp job descriptor | 1074 | * Fill in aead job descriptor |
539 | */ | 1075 | */ |
540 | static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq, | 1076 | static void init_aead_job(u32 *sh_desc, dma_addr_t ptr, |
541 | u32 encrypt, | 1077 | struct aead_edesc *edesc, |
542 | void (*callback) (struct device *dev, u32 *desc, | 1078 | struct aead_request *req, |
543 | u32 err, void *context)) | 1079 | bool all_contig, bool encrypt) |
544 | { | 1080 | { |
545 | struct crypto_aead *aead = crypto_aead_reqtfm(areq); | 1081 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
546 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | 1082 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
547 | struct device *jrdev = ctx->jrdev; | ||
548 | u32 *desc = edesc->hw_desc, options; | ||
549 | int ret, sg_count, assoc_sg_count; | ||
550 | int ivsize = crypto_aead_ivsize(aead); | 1083 | int ivsize = crypto_aead_ivsize(aead); |
551 | int authsize = ctx->authsize; | 1084 | int authsize = ctx->authsize; |
552 | dma_addr_t ptr, dst_dma, src_dma; | 1085 | u32 *desc = edesc->hw_desc; |
553 | #ifdef DEBUG | 1086 | u32 out_options = 0, in_options; |
554 | u32 *sh_desc = ctx->sh_desc; | 1087 | dma_addr_t dst_dma, src_dma; |
1088 | int len, link_tbl_index = 0; | ||
555 | 1089 | ||
1090 | #ifdef DEBUG | ||
556 | debug("assoclen %d cryptlen %d authsize %d\n", | 1091 | debug("assoclen %d cryptlen %d authsize %d\n", |
557 | areq->assoclen, areq->cryptlen, authsize); | 1092 | req->assoclen, req->cryptlen, authsize); |
558 | print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ", | 1093 | print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ", |
559 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->assoc), | 1094 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc), |
560 | areq->assoclen , 1); | 1095 | req->assoclen , 1); |
561 | print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ", | 1096 | print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ", |
562 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->src) - ivsize, | 1097 | DUMP_PREFIX_ADDRESS, 16, 4, req->iv, |
563 | edesc->src_nents ? 100 : ivsize, 1); | 1098 | edesc->src_nents ? 100 : ivsize, 1); |
564 | print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ", | 1099 | print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ", |
565 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->src), | 1100 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), |
566 | edesc->src_nents ? 100 : areq->cryptlen + authsize, 1); | 1101 | edesc->src_nents ? 100 : req->cryptlen, 1); |
567 | print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ", | 1102 | print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ", |
568 | DUMP_PREFIX_ADDRESS, 16, 4, sh_desc, | 1103 | DUMP_PREFIX_ADDRESS, 16, 4, sh_desc, |
569 | desc_bytes(sh_desc), 1); | 1104 | desc_bytes(sh_desc), 1); |
570 | #endif | 1105 | #endif |
571 | assoc_sg_count = dma_map_sg(jrdev, areq->assoc, edesc->assoc_nents ?: 1, | ||
572 | DMA_TO_DEVICE); | ||
573 | if (areq->src == areq->dst) | ||
574 | sg_count = dma_map_sg(jrdev, areq->src, edesc->src_nents ? : 1, | ||
575 | DMA_BIDIRECTIONAL); | ||
576 | else | ||
577 | sg_count = dma_map_sg(jrdev, areq->src, edesc->src_nents ? : 1, | ||
578 | DMA_TO_DEVICE); | ||
579 | 1106 | ||
580 | /* start auth operation */ | 1107 | len = desc_len(sh_desc); |
581 | append_operation(desc, ctx->class2_alg_type | OP_ALG_AS_INITFINAL | | 1108 | init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); |
582 | (encrypt ? : OP_ALG_ICV_ON)); | ||
583 | 1109 | ||
584 | /* Load FIFO with data for Class 2 CHA */ | 1110 | if (all_contig) { |
585 | options = FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG; | 1111 | src_dma = sg_dma_address(req->assoc); |
586 | if (!edesc->assoc_nents) { | 1112 | in_options = 0; |
587 | ptr = sg_dma_address(areq->assoc); | ||
588 | } else { | 1113 | } else { |
589 | sg_to_link_tbl(areq->assoc, edesc->assoc_nents, | 1114 | src_dma = edesc->link_tbl_dma; |
590 | edesc->link_tbl, 0); | 1115 | link_tbl_index += (edesc->assoc_nents ? : 1) + 1 + |
591 | ptr = edesc->link_tbl_dma; | 1116 | (edesc->src_nents ? : 1); |
592 | options |= LDST_SGF; | 1117 | in_options = LDST_SGF; |
593 | } | 1118 | } |
594 | append_fifo_load(desc, ptr, areq->assoclen, options); | 1119 | if (encrypt) |
595 | 1120 | append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + | |
596 | /* copy iv from cipher/class1 input context to class2 infifo */ | 1121 | req->cryptlen - authsize, in_options); |
597 | append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize); | 1122 | else |
598 | 1123 | append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + | |
599 | if (!encrypt) { | 1124 | req->cryptlen, in_options); |
600 | u32 *jump_cmd, *uncond_jump_cmd; | ||
601 | |||
602 | /* JUMP if shared */ | ||
603 | jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD); | ||
604 | 1125 | ||
605 | /* start class 1 (cipher) operation, non-shared version */ | 1126 | if (likely(req->src == req->dst)) { |
606 | append_operation(desc, ctx->class1_alg_type | | 1127 | if (all_contig) { |
607 | OP_ALG_AS_INITFINAL); | 1128 | dst_dma = sg_dma_address(req->src); |
1129 | } else { | ||
1130 | dst_dma = src_dma + sizeof(struct link_tbl_entry) * | ||
1131 | ((edesc->assoc_nents ? : 1) + 1); | ||
1132 | out_options = LDST_SGF; | ||
1133 | } | ||
1134 | } else { | ||
1135 | if (!edesc->dst_nents) { | ||
1136 | dst_dma = sg_dma_address(req->dst); | ||
1137 | } else { | ||
1138 | dst_dma = edesc->link_tbl_dma + | ||
1139 | link_tbl_index * | ||
1140 | sizeof(struct link_tbl_entry); | ||
1141 | out_options = LDST_SGF; | ||
1142 | } | ||
1143 | } | ||
1144 | if (encrypt) | ||
1145 | append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options); | ||
1146 | else | ||
1147 | append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize, | ||
1148 | out_options); | ||
1149 | } | ||
608 | 1150 | ||
609 | uncond_jump_cmd = append_jump(desc, 0); | 1151 | /* |
1152 | * Fill in aead givencrypt job descriptor | ||
1153 | */ | ||
1154 | static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr, | ||
1155 | struct aead_edesc *edesc, | ||
1156 | struct aead_request *req, | ||
1157 | int contig) | ||
1158 | { | ||
1159 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | ||
1160 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | ||
1161 | int ivsize = crypto_aead_ivsize(aead); | ||
1162 | int authsize = ctx->authsize; | ||
1163 | u32 *desc = edesc->hw_desc; | ||
1164 | u32 out_options = 0, in_options; | ||
1165 | dma_addr_t dst_dma, src_dma; | ||
1166 | int len, link_tbl_index = 0; | ||
610 | 1167 | ||
611 | set_jump_tgt_here(desc, jump_cmd); | 1168 | #ifdef DEBUG |
1169 | debug("assoclen %d cryptlen %d authsize %d\n", | ||
1170 | req->assoclen, req->cryptlen, authsize); | ||
1171 | print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ", | ||
1172 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc), | ||
1173 | req->assoclen , 1); | ||
1174 | print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ", | ||
1175 | DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1); | ||
1176 | print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ", | ||
1177 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), | ||
1178 | edesc->src_nents > 1 ? 100 : req->cryptlen, 1); | ||
1179 | print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ", | ||
1180 | DUMP_PREFIX_ADDRESS, 16, 4, sh_desc, | ||
1181 | desc_bytes(sh_desc), 1); | ||
1182 | #endif | ||
612 | 1183 | ||
613 | /* start class 1 (cipher) operation, shared version */ | 1184 | len = desc_len(sh_desc); |
614 | append_operation(desc, ctx->class1_alg_type | | 1185 | init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); |
615 | OP_ALG_AS_INITFINAL | OP_ALG_AAI_DK); | ||
616 | set_jump_tgt_here(desc, uncond_jump_cmd); | ||
617 | } else | ||
618 | append_operation(desc, ctx->class1_alg_type | | ||
619 | OP_ALG_AS_INITFINAL | encrypt); | ||
620 | 1186 | ||
621 | /* load payload & instruct to class2 to snoop class 1 if encrypting */ | 1187 | if (contig & GIV_SRC_CONTIG) { |
622 | options = 0; | 1188 | src_dma = sg_dma_address(req->assoc); |
623 | if (!edesc->src_nents) { | 1189 | in_options = 0; |
624 | src_dma = sg_dma_address(areq->src); | ||
625 | } else { | 1190 | } else { |
626 | sg_to_link_tbl(areq->src, edesc->src_nents, edesc->link_tbl + | 1191 | src_dma = edesc->link_tbl_dma; |
627 | edesc->assoc_nents, 0); | 1192 | link_tbl_index += edesc->assoc_nents + 1 + edesc->src_nents; |
628 | src_dma = edesc->link_tbl_dma + edesc->assoc_nents * | 1193 | in_options = LDST_SGF; |
629 | sizeof(struct link_tbl_entry); | ||
630 | options |= LDST_SGF; | ||
631 | } | 1194 | } |
632 | append_seq_in_ptr(desc, src_dma, areq->cryptlen + authsize, options); | 1195 | append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + |
633 | append_seq_fifo_load(desc, areq->cryptlen, FIFOLD_CLASS_BOTH | | 1196 | req->cryptlen - authsize, in_options); |
634 | FIFOLD_TYPE_LASTBOTH | | 1197 | |
635 | (encrypt ? FIFOLD_TYPE_MSG1OUT2 | 1198 | if (contig & GIV_DST_CONTIG) { |
636 | : FIFOLD_TYPE_MSG)); | 1199 | dst_dma = edesc->iv_dma; |
637 | |||
638 | /* specify destination */ | ||
639 | if (areq->src == areq->dst) { | ||
640 | dst_dma = src_dma; | ||
641 | } else { | 1200 | } else { |
642 | sg_count = dma_map_sg(jrdev, areq->dst, edesc->dst_nents ? : 1, | 1201 | if (likely(req->src == req->dst)) { |
643 | DMA_FROM_DEVICE); | 1202 | dst_dma = src_dma + sizeof(struct link_tbl_entry) * |
644 | if (!edesc->dst_nents) { | 1203 | edesc->assoc_nents; |
645 | dst_dma = sg_dma_address(areq->dst); | 1204 | out_options = LDST_SGF; |
646 | options = 0; | ||
647 | } else { | 1205 | } else { |
648 | sg_to_link_tbl(areq->dst, edesc->dst_nents, | 1206 | dst_dma = edesc->link_tbl_dma + |
649 | edesc->link_tbl + edesc->assoc_nents + | 1207 | link_tbl_index * |
650 | edesc->src_nents, 0); | ||
651 | dst_dma = edesc->link_tbl_dma + (edesc->assoc_nents + | ||
652 | edesc->src_nents) * | ||
653 | sizeof(struct link_tbl_entry); | 1208 | sizeof(struct link_tbl_entry); |
654 | options = LDST_SGF; | 1209 | out_options = LDST_SGF; |
655 | } | 1210 | } |
656 | } | 1211 | } |
657 | append_seq_out_ptr(desc, dst_dma, areq->cryptlen + authsize, options); | ||
658 | append_seq_fifo_store(desc, areq->cryptlen, FIFOST_TYPE_MESSAGE_DATA); | ||
659 | 1212 | ||
660 | /* ICV */ | 1213 | append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen, out_options); |
661 | if (encrypt) | 1214 | } |
662 | append_seq_store(desc, authsize, LDST_CLASS_2_CCB | | 1215 | |
663 | LDST_SRCDST_BYTE_CONTEXT); | 1216 | /* |
664 | else | 1217 | * Fill in ablkcipher job descriptor |
665 | append_seq_fifo_load(desc, authsize, FIFOLD_CLASS_CLASS2 | | 1218 | */ |
666 | FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV); | 1219 | static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr, |
1220 | struct ablkcipher_edesc *edesc, | ||
1221 | struct ablkcipher_request *req, | ||
1222 | bool iv_contig) | ||
1223 | { | ||
1224 | struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); | ||
1225 | int ivsize = crypto_ablkcipher_ivsize(ablkcipher); | ||
1226 | u32 *desc = edesc->hw_desc; | ||
1227 | u32 out_options = 0, in_options; | ||
1228 | dma_addr_t dst_dma, src_dma; | ||
1229 | int len, link_tbl_index = 0; | ||
667 | 1230 | ||
668 | #ifdef DEBUG | 1231 | #ifdef DEBUG |
669 | debug("job_desc_len %d\n", desc_len(desc)); | 1232 | print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ", |
670 | print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", | 1233 | DUMP_PREFIX_ADDRESS, 16, 4, req->info, |
671 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc) , 1); | 1234 | ivsize, 1); |
672 | print_hex_dump(KERN_ERR, "jdlinkt@"xstr(__LINE__)": ", | 1235 | print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ", |
673 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->link_tbl, | 1236 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), |
674 | edesc->link_tbl_bytes, 1); | 1237 | edesc->src_nents ? 100 : req->nbytes, 1); |
675 | #endif | 1238 | #endif |
676 | 1239 | ||
677 | ret = caam_jr_enqueue(jrdev, desc, callback, areq); | 1240 | len = desc_len(sh_desc); |
678 | if (!ret) | 1241 | init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); |
679 | ret = -EINPROGRESS; | 1242 | |
680 | else { | 1243 | if (iv_contig) { |
681 | ipsec_esp_unmap(jrdev, edesc, areq); | 1244 | src_dma = edesc->iv_dma; |
682 | kfree(edesc); | 1245 | in_options = 0; |
1246 | } else { | ||
1247 | src_dma = edesc->link_tbl_dma; | ||
1248 | link_tbl_index += (iv_contig ? 0 : 1) + edesc->src_nents; | ||
1249 | in_options = LDST_SGF; | ||
683 | } | 1250 | } |
1251 | append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options); | ||
684 | 1252 | ||
685 | return ret; | 1253 | if (likely(req->src == req->dst)) { |
1254 | if (!edesc->src_nents && iv_contig) { | ||
1255 | dst_dma = sg_dma_address(req->src); | ||
1256 | } else { | ||
1257 | dst_dma = edesc->link_tbl_dma + | ||
1258 | sizeof(struct link_tbl_entry); | ||
1259 | out_options = LDST_SGF; | ||
1260 | } | ||
1261 | } else { | ||
1262 | if (!edesc->dst_nents) { | ||
1263 | dst_dma = sg_dma_address(req->dst); | ||
1264 | } else { | ||
1265 | dst_dma = edesc->link_tbl_dma + | ||
1266 | link_tbl_index * sizeof(struct link_tbl_entry); | ||
1267 | out_options = LDST_SGF; | ||
1268 | } | ||
1269 | } | ||
1270 | append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options); | ||
686 | } | 1271 | } |
687 | 1272 | ||
688 | /* | 1273 | /* |
689 | * derive number of elements in scatterlist | 1274 | * derive number of elements in scatterlist |
690 | */ | 1275 | */ |
691 | static int sg_count(struct scatterlist *sg_list, int nbytes, int *chained) | 1276 | static int sg_count(struct scatterlist *sg_list, int nbytes) |
692 | { | 1277 | { |
693 | struct scatterlist *sg = sg_list; | 1278 | struct scatterlist *sg = sg_list; |
694 | int sg_nents = 0; | 1279 | int sg_nents = 0; |
695 | 1280 | ||
696 | *chained = 0; | ||
697 | while (nbytes > 0) { | 1281 | while (nbytes > 0) { |
698 | sg_nents++; | 1282 | sg_nents++; |
699 | nbytes -= sg->length; | 1283 | nbytes -= sg->length; |
700 | if (!sg_is_last(sg) && (sg + 1)->length == 0) | 1284 | if (!sg_is_last(sg) && (sg + 1)->length == 0) |
701 | *chained = 1; | 1285 | BUG(); /* Not support chaining */ |
702 | sg = scatterwalk_sg_next(sg); | 1286 | sg = scatterwalk_sg_next(sg); |
703 | } | 1287 | } |
704 | 1288 | ||
1289 | if (likely(sg_nents == 1)) | ||
1290 | return 0; | ||
1291 | |||
705 | return sg_nents; | 1292 | return sg_nents; |
706 | } | 1293 | } |
707 | 1294 | ||
708 | /* | 1295 | /* |
709 | * allocate and map the ipsec_esp extended descriptor | 1296 | * allocate and map the aead extended descriptor |
710 | */ | 1297 | */ |
711 | static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq, | 1298 | static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, |
712 | int desc_bytes) | 1299 | int desc_bytes, bool *all_contig_ptr) |
713 | { | 1300 | { |
714 | struct crypto_aead *aead = crypto_aead_reqtfm(areq); | 1301 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
715 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | 1302 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
716 | struct device *jrdev = ctx->jrdev; | 1303 | struct device *jrdev = ctx->jrdev; |
717 | gfp_t flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : | 1304 | gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | |
718 | GFP_ATOMIC; | 1305 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; |
719 | int assoc_nents, src_nents, dst_nents = 0, chained, link_tbl_bytes; | 1306 | int assoc_nents, src_nents, dst_nents = 0; |
720 | struct ipsec_esp_edesc *edesc; | 1307 | struct aead_edesc *edesc; |
721 | 1308 | dma_addr_t iv_dma = 0; | |
722 | assoc_nents = sg_count(areq->assoc, areq->assoclen, &chained); | 1309 | int sgc; |
723 | BUG_ON(chained); | 1310 | bool all_contig = true; |
724 | if (likely(assoc_nents == 1)) | 1311 | int ivsize = crypto_aead_ivsize(aead); |
725 | assoc_nents = 0; | 1312 | int link_tbl_index, link_tbl_len = 0, link_tbl_bytes; |
726 | 1313 | ||
727 | src_nents = sg_count(areq->src, areq->cryptlen + ctx->authsize, | 1314 | assoc_nents = sg_count(req->assoc, req->assoclen); |
728 | &chained); | 1315 | src_nents = sg_count(req->src, req->cryptlen); |
729 | BUG_ON(chained); | 1316 | |
730 | if (src_nents == 1) | 1317 | if (unlikely(req->dst != req->src)) |
731 | src_nents = 0; | 1318 | dst_nents = sg_count(req->dst, req->cryptlen); |
732 | 1319 | ||
733 | if (unlikely(areq->dst != areq->src)) { | 1320 | sgc = dma_map_sg(jrdev, req->assoc, assoc_nents ? : 1, |
734 | dst_nents = sg_count(areq->dst, areq->cryptlen + ctx->authsize, | 1321 | DMA_BIDIRECTIONAL); |
735 | &chained); | 1322 | if (likely(req->src == req->dst)) { |
736 | BUG_ON(chained); | 1323 | sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, |
737 | if (dst_nents == 1) | 1324 | DMA_BIDIRECTIONAL); |
738 | dst_nents = 0; | 1325 | } else { |
1326 | sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, | ||
1327 | DMA_TO_DEVICE); | ||
1328 | sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1, | ||
1329 | DMA_FROM_DEVICE); | ||
739 | } | 1330 | } |
740 | 1331 | ||
741 | link_tbl_bytes = (assoc_nents + src_nents + dst_nents) * | 1332 | /* Check if data are contiguous */ |
742 | sizeof(struct link_tbl_entry); | 1333 | iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE); |
743 | debug("link_tbl_bytes %d\n", link_tbl_bytes); | 1334 | if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen != |
1335 | iv_dma || src_nents || iv_dma + ivsize != | ||
1336 | sg_dma_address(req->src)) { | ||
1337 | all_contig = false; | ||
1338 | assoc_nents = assoc_nents ? : 1; | ||
1339 | src_nents = src_nents ? : 1; | ||
1340 | link_tbl_len = assoc_nents + 1 + src_nents; | ||
1341 | } | ||
1342 | link_tbl_len += dst_nents; | ||
1343 | |||
1344 | link_tbl_bytes = link_tbl_len * sizeof(struct link_tbl_entry); | ||
744 | 1345 | ||
745 | /* allocate space for base edesc and hw desc commands, link tables */ | 1346 | /* allocate space for base edesc and hw desc commands, link tables */ |
746 | edesc = kmalloc(sizeof(struct ipsec_esp_edesc) + desc_bytes + | 1347 | edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes + |
747 | link_tbl_bytes, GFP_DMA | flags); | 1348 | link_tbl_bytes, GFP_DMA | flags); |
748 | if (!edesc) { | 1349 | if (!edesc) { |
749 | dev_err(jrdev, "could not allocate extended descriptor\n"); | 1350 | dev_err(jrdev, "could not allocate extended descriptor\n"); |
@@ -753,142 +1354,450 @@ static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq, | |||
753 | edesc->assoc_nents = assoc_nents; | 1354 | edesc->assoc_nents = assoc_nents; |
754 | edesc->src_nents = src_nents; | 1355 | edesc->src_nents = src_nents; |
755 | edesc->dst_nents = dst_nents; | 1356 | edesc->dst_nents = dst_nents; |
756 | edesc->link_tbl = (void *)edesc + sizeof(struct ipsec_esp_edesc) + | 1357 | edesc->iv_dma = iv_dma; |
1358 | edesc->link_tbl_bytes = link_tbl_bytes; | ||
1359 | edesc->link_tbl = (void *)edesc + sizeof(struct aead_edesc) + | ||
757 | desc_bytes; | 1360 | desc_bytes; |
758 | edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl, | 1361 | edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl, |
759 | link_tbl_bytes, DMA_TO_DEVICE); | 1362 | link_tbl_bytes, DMA_TO_DEVICE); |
760 | edesc->link_tbl_bytes = link_tbl_bytes; | 1363 | *all_contig_ptr = all_contig; |
1364 | |||
1365 | link_tbl_index = 0; | ||
1366 | if (!all_contig) { | ||
1367 | sg_to_link_tbl(req->assoc, | ||
1368 | (assoc_nents ? : 1), | ||
1369 | edesc->link_tbl + | ||
1370 | link_tbl_index, 0); | ||
1371 | link_tbl_index += assoc_nents ? : 1; | ||
1372 | sg_to_link_tbl_one(edesc->link_tbl + link_tbl_index, | ||
1373 | iv_dma, ivsize, 0); | ||
1374 | link_tbl_index += 1; | ||
1375 | sg_to_link_tbl_last(req->src, | ||
1376 | (src_nents ? : 1), | ||
1377 | edesc->link_tbl + | ||
1378 | link_tbl_index, 0); | ||
1379 | link_tbl_index += src_nents ? : 1; | ||
1380 | } | ||
1381 | if (dst_nents) { | ||
1382 | sg_to_link_tbl_last(req->dst, dst_nents, | ||
1383 | edesc->link_tbl + link_tbl_index, 0); | ||
1384 | } | ||
761 | 1385 | ||
762 | return edesc; | 1386 | return edesc; |
763 | } | 1387 | } |
764 | 1388 | ||
765 | static int aead_authenc_encrypt(struct aead_request *areq) | 1389 | static int aead_encrypt(struct aead_request *req) |
766 | { | 1390 | { |
767 | struct ipsec_esp_edesc *edesc; | 1391 | struct aead_edesc *edesc; |
768 | struct crypto_aead *aead = crypto_aead_reqtfm(areq); | 1392 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
769 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | 1393 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
770 | struct device *jrdev = ctx->jrdev; | 1394 | struct device *jrdev = ctx->jrdev; |
771 | int ivsize = crypto_aead_ivsize(aead); | 1395 | bool all_contig; |
772 | u32 *desc; | 1396 | u32 *desc; |
773 | dma_addr_t iv_dma; | 1397 | int ret = 0; |
1398 | |||
1399 | req->cryptlen += ctx->authsize; | ||
774 | 1400 | ||
775 | /* allocate extended descriptor */ | 1401 | /* allocate extended descriptor */ |
776 | edesc = ipsec_esp_edesc_alloc(areq, DESC_AEAD_ENCRYPT_TEXT_LEN * | 1402 | edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN * |
777 | CAAM_CMD_SZ); | 1403 | CAAM_CMD_SZ, &all_contig); |
778 | if (IS_ERR(edesc)) | 1404 | if (IS_ERR(edesc)) |
779 | return PTR_ERR(edesc); | 1405 | return PTR_ERR(edesc); |
780 | 1406 | ||
781 | desc = edesc->hw_desc; | 1407 | /* Create and submit job descriptor */ |
782 | 1408 | init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req, | |
783 | /* insert shared descriptor pointer */ | 1409 | all_contig, true); |
784 | init_job_desc_shared(desc, ctx->shared_desc_phys, | 1410 | #ifdef DEBUG |
785 | desc_len(ctx->sh_desc), HDR_SHARE_DEFER); | 1411 | print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ", |
786 | 1412 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, | |
787 | iv_dma = dma_map_single(jrdev, areq->iv, ivsize, DMA_TO_DEVICE); | 1413 | desc_bytes(edesc->hw_desc), 1); |
788 | /* check dma error */ | 1414 | #endif |
789 | 1415 | ||
790 | append_load(desc, iv_dma, ivsize, | 1416 | desc = edesc->hw_desc; |
791 | LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT); | 1417 | ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req); |
1418 | if (!ret) { | ||
1419 | ret = -EINPROGRESS; | ||
1420 | } else { | ||
1421 | aead_unmap(jrdev, edesc, req); | ||
1422 | kfree(edesc); | ||
1423 | } | ||
792 | 1424 | ||
793 | return ipsec_esp(edesc, areq, OP_ALG_ENCRYPT, ipsec_esp_encrypt_done); | 1425 | return ret; |
794 | } | 1426 | } |
795 | 1427 | ||
796 | static int aead_authenc_decrypt(struct aead_request *req) | 1428 | static int aead_decrypt(struct aead_request *req) |
797 | { | 1429 | { |
1430 | struct aead_edesc *edesc; | ||
798 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 1431 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
799 | int ivsize = crypto_aead_ivsize(aead); | ||
800 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | 1432 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
801 | struct device *jrdev = ctx->jrdev; | 1433 | struct device *jrdev = ctx->jrdev; |
802 | struct ipsec_esp_edesc *edesc; | 1434 | bool all_contig; |
803 | u32 *desc; | 1435 | u32 *desc; |
804 | dma_addr_t iv_dma; | 1436 | int ret = 0; |
805 | |||
806 | req->cryptlen -= ctx->authsize; | ||
807 | 1437 | ||
808 | /* allocate extended descriptor */ | 1438 | /* allocate extended descriptor */ |
809 | edesc = ipsec_esp_edesc_alloc(req, DESC_AEAD_DECRYPT_TEXT_LEN * | 1439 | edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN * |
810 | CAAM_CMD_SZ); | 1440 | CAAM_CMD_SZ, &all_contig); |
811 | if (IS_ERR(edesc)) | 1441 | if (IS_ERR(edesc)) |
812 | return PTR_ERR(edesc); | 1442 | return PTR_ERR(edesc); |
813 | 1443 | ||
1444 | #ifdef DEBUG | ||
1445 | print_hex_dump(KERN_ERR, "dec src@"xstr(__LINE__)": ", | ||
1446 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), | ||
1447 | req->cryptlen, 1); | ||
1448 | #endif | ||
1449 | |||
1450 | /* Create and submit job descriptor*/ | ||
1451 | init_aead_job(ctx->sh_desc_dec, | ||
1452 | ctx->sh_desc_dec_dma, edesc, req, all_contig, false); | ||
1453 | #ifdef DEBUG | ||
1454 | print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ", | ||
1455 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, | ||
1456 | desc_bytes(edesc->hw_desc), 1); | ||
1457 | #endif | ||
1458 | |||
814 | desc = edesc->hw_desc; | 1459 | desc = edesc->hw_desc; |
1460 | ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req); | ||
1461 | if (!ret) { | ||
1462 | ret = -EINPROGRESS; | ||
1463 | } else { | ||
1464 | aead_unmap(jrdev, edesc, req); | ||
1465 | kfree(edesc); | ||
1466 | } | ||
815 | 1467 | ||
816 | /* insert shared descriptor pointer */ | 1468 | return ret; |
817 | init_job_desc_shared(desc, ctx->shared_desc_phys, | 1469 | } |
818 | desc_len(ctx->sh_desc), HDR_SHARE_DEFER); | ||
819 | 1470 | ||
820 | iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE); | 1471 | /* |
821 | /* check dma error */ | 1472 | * allocate and map the aead extended descriptor for aead givencrypt |
1473 | */ | ||
1474 | static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request | ||
1475 | *greq, int desc_bytes, | ||
1476 | u32 *contig_ptr) | ||
1477 | { | ||
1478 | struct aead_request *req = &greq->areq; | ||
1479 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | ||
1480 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | ||
1481 | struct device *jrdev = ctx->jrdev; | ||
1482 | gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | | ||
1483 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; | ||
1484 | int assoc_nents, src_nents, dst_nents = 0; | ||
1485 | struct aead_edesc *edesc; | ||
1486 | dma_addr_t iv_dma = 0; | ||
1487 | int sgc; | ||
1488 | u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG; | ||
1489 | int ivsize = crypto_aead_ivsize(aead); | ||
1490 | int link_tbl_index, link_tbl_len = 0, link_tbl_bytes; | ||
1491 | |||
1492 | assoc_nents = sg_count(req->assoc, req->assoclen); | ||
1493 | src_nents = sg_count(req->src, req->cryptlen); | ||
822 | 1494 | ||
823 | append_load(desc, iv_dma, ivsize, | 1495 | if (unlikely(req->dst != req->src)) |
824 | LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT); | 1496 | dst_nents = sg_count(req->dst, req->cryptlen); |
825 | 1497 | ||
826 | return ipsec_esp(edesc, req, !OP_ALG_ENCRYPT, ipsec_esp_decrypt_done); | 1498 | sgc = dma_map_sg(jrdev, req->assoc, assoc_nents ? : 1, |
1499 | DMA_BIDIRECTIONAL); | ||
1500 | if (likely(req->src == req->dst)) { | ||
1501 | sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, | ||
1502 | DMA_BIDIRECTIONAL); | ||
1503 | } else { | ||
1504 | sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, | ||
1505 | DMA_TO_DEVICE); | ||
1506 | sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1, | ||
1507 | DMA_FROM_DEVICE); | ||
1508 | } | ||
1509 | |||
1510 | /* Check if data are contiguous */ | ||
1511 | iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE); | ||
1512 | if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen != | ||
1513 | iv_dma || src_nents || iv_dma + ivsize != sg_dma_address(req->src)) | ||
1514 | contig &= ~GIV_SRC_CONTIG; | ||
1515 | if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst)) | ||
1516 | contig &= ~GIV_DST_CONTIG; | ||
1517 | if (unlikely(req->src != req->dst)) { | ||
1518 | dst_nents = dst_nents ? : 1; | ||
1519 | link_tbl_len += 1; | ||
1520 | } | ||
1521 | if (!(contig & GIV_SRC_CONTIG)) { | ||
1522 | assoc_nents = assoc_nents ? : 1; | ||
1523 | src_nents = src_nents ? : 1; | ||
1524 | link_tbl_len += assoc_nents + 1 + src_nents; | ||
1525 | if (likely(req->src == req->dst)) | ||
1526 | contig &= ~GIV_DST_CONTIG; | ||
1527 | } | ||
1528 | link_tbl_len += dst_nents; | ||
1529 | |||
1530 | link_tbl_bytes = link_tbl_len * sizeof(struct link_tbl_entry); | ||
1531 | |||
1532 | /* allocate space for base edesc and hw desc commands, link tables */ | ||
1533 | edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes + | ||
1534 | link_tbl_bytes, GFP_DMA | flags); | ||
1535 | if (!edesc) { | ||
1536 | dev_err(jrdev, "could not allocate extended descriptor\n"); | ||
1537 | return ERR_PTR(-ENOMEM); | ||
1538 | } | ||
1539 | |||
1540 | edesc->assoc_nents = assoc_nents; | ||
1541 | edesc->src_nents = src_nents; | ||
1542 | edesc->dst_nents = dst_nents; | ||
1543 | edesc->iv_dma = iv_dma; | ||
1544 | edesc->link_tbl_bytes = link_tbl_bytes; | ||
1545 | edesc->link_tbl = (void *)edesc + sizeof(struct aead_edesc) + | ||
1546 | desc_bytes; | ||
1547 | edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl, | ||
1548 | link_tbl_bytes, DMA_TO_DEVICE); | ||
1549 | *contig_ptr = contig; | ||
1550 | |||
1551 | link_tbl_index = 0; | ||
1552 | if (!(contig & GIV_SRC_CONTIG)) { | ||
1553 | sg_to_link_tbl(req->assoc, assoc_nents, | ||
1554 | edesc->link_tbl + | ||
1555 | link_tbl_index, 0); | ||
1556 | link_tbl_index += assoc_nents; | ||
1557 | sg_to_link_tbl_one(edesc->link_tbl + link_tbl_index, | ||
1558 | iv_dma, ivsize, 0); | ||
1559 | link_tbl_index += 1; | ||
1560 | sg_to_link_tbl_last(req->src, src_nents, | ||
1561 | edesc->link_tbl + | ||
1562 | link_tbl_index, 0); | ||
1563 | link_tbl_index += src_nents; | ||
1564 | } | ||
1565 | if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) { | ||
1566 | sg_to_link_tbl_one(edesc->link_tbl + link_tbl_index, | ||
1567 | iv_dma, ivsize, 0); | ||
1568 | link_tbl_index += 1; | ||
1569 | sg_to_link_tbl_last(req->dst, dst_nents, | ||
1570 | edesc->link_tbl + link_tbl_index, 0); | ||
1571 | } | ||
1572 | |||
1573 | return edesc; | ||
827 | } | 1574 | } |
828 | 1575 | ||
829 | static int aead_authenc_givencrypt(struct aead_givcrypt_request *req) | 1576 | static int aead_givencrypt(struct aead_givcrypt_request *areq) |
830 | { | 1577 | { |
831 | struct aead_request *areq = &req->areq; | 1578 | struct aead_request *req = &areq->areq; |
832 | struct ipsec_esp_edesc *edesc; | 1579 | struct aead_edesc *edesc; |
833 | struct crypto_aead *aead = crypto_aead_reqtfm(areq); | 1580 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
834 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | 1581 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
835 | struct device *jrdev = ctx->jrdev; | 1582 | struct device *jrdev = ctx->jrdev; |
836 | int ivsize = crypto_aead_ivsize(aead); | 1583 | u32 contig; |
837 | dma_addr_t iv_dma; | ||
838 | u32 *desc; | 1584 | u32 *desc; |
1585 | int ret = 0; | ||
839 | 1586 | ||
840 | iv_dma = dma_map_single(jrdev, req->giv, ivsize, DMA_FROM_DEVICE); | 1587 | req->cryptlen += ctx->authsize; |
841 | |||
842 | debug("%s: giv %p\n", __func__, req->giv); | ||
843 | 1588 | ||
844 | /* allocate extended descriptor */ | 1589 | /* allocate extended descriptor */ |
845 | edesc = ipsec_esp_edesc_alloc(areq, DESC_AEAD_GIVENCRYPT_TEXT_LEN * | 1590 | edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN * |
846 | CAAM_CMD_SZ); | 1591 | CAAM_CMD_SZ, &contig); |
1592 | |||
847 | if (IS_ERR(edesc)) | 1593 | if (IS_ERR(edesc)) |
848 | return PTR_ERR(edesc); | 1594 | return PTR_ERR(edesc); |
849 | 1595 | ||
1596 | #ifdef DEBUG | ||
1597 | print_hex_dump(KERN_ERR, "giv src@"xstr(__LINE__)": ", | ||
1598 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), | ||
1599 | req->cryptlen, 1); | ||
1600 | #endif | ||
1601 | |||
1602 | /* Create and submit job descriptor*/ | ||
1603 | init_aead_giv_job(ctx->sh_desc_givenc, | ||
1604 | ctx->sh_desc_givenc_dma, edesc, req, contig); | ||
1605 | #ifdef DEBUG | ||
1606 | print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ", | ||
1607 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, | ||
1608 | desc_bytes(edesc->hw_desc), 1); | ||
1609 | #endif | ||
1610 | |||
850 | desc = edesc->hw_desc; | 1611 | desc = edesc->hw_desc; |
1612 | ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req); | ||
1613 | if (!ret) { | ||
1614 | ret = -EINPROGRESS; | ||
1615 | } else { | ||
1616 | aead_unmap(jrdev, edesc, req); | ||
1617 | kfree(edesc); | ||
1618 | } | ||
851 | 1619 | ||
852 | /* insert shared descriptor pointer */ | 1620 | return ret; |
853 | init_job_desc_shared(desc, ctx->shared_desc_phys, | 1621 | } |
854 | desc_len(ctx->sh_desc), HDR_SHARE_DEFER); | ||
855 | 1622 | ||
856 | /* | 1623 | /* |
857 | * LOAD IMM Info FIFO | 1624 | * allocate and map the ablkcipher extended descriptor for ablkcipher |
858 | * to DECO, Last, Padding, Random, Message, 16 bytes | 1625 | */ |
859 | */ | 1626 | static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request |
860 | append_load_imm_u32(desc, NFIFOENTRY_DEST_DECO | NFIFOENTRY_LC1 | | 1627 | *req, int desc_bytes, |
861 | NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DTYPE_MSG | | 1628 | bool *iv_contig_out) |
862 | NFIFOENTRY_PTYPE_RND | ivsize, | 1629 | { |
863 | LDST_SRCDST_WORD_INFO_FIFO); | 1630 | struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); |
1631 | struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); | ||
1632 | struct device *jrdev = ctx->jrdev; | ||
1633 | gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | | ||
1634 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? | ||
1635 | GFP_KERNEL : GFP_ATOMIC; | ||
1636 | int src_nents, dst_nents = 0, link_tbl_bytes; | ||
1637 | struct ablkcipher_edesc *edesc; | ||
1638 | dma_addr_t iv_dma = 0; | ||
1639 | bool iv_contig = false; | ||
1640 | int sgc; | ||
1641 | int ivsize = crypto_ablkcipher_ivsize(ablkcipher); | ||
1642 | int link_tbl_index; | ||
1643 | |||
1644 | src_nents = sg_count(req->src, req->nbytes); | ||
1645 | |||
1646 | if (unlikely(req->dst != req->src)) | ||
1647 | dst_nents = sg_count(req->dst, req->nbytes); | ||
1648 | |||
1649 | if (likely(req->src == req->dst)) { | ||
1650 | sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, | ||
1651 | DMA_BIDIRECTIONAL); | ||
1652 | } else { | ||
1653 | sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, | ||
1654 | DMA_TO_DEVICE); | ||
1655 | sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1, | ||
1656 | DMA_FROM_DEVICE); | ||
1657 | } | ||
864 | 1658 | ||
865 | /* | 1659 | /* |
866 | * disable info fifo entries since the above serves as the entry | 1660 | * Check if iv can be contiguous with source and destination. |
867 | * this way, the MOVE command won't generate an entry. | 1661 | * If so, include it. If not, create scatterlist. |
868 | * Note that this isn't required in more recent versions of | ||
869 | * SEC as a MOVE that doesn't do info FIFO entries is available. | ||
870 | */ | 1662 | */ |
871 | append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); | 1663 | iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE); |
1664 | if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src)) | ||
1665 | iv_contig = true; | ||
1666 | else | ||
1667 | src_nents = src_nents ? : 1; | ||
1668 | link_tbl_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) * | ||
1669 | sizeof(struct link_tbl_entry); | ||
872 | 1670 | ||
873 | /* MOVE DECO Alignment -> C1 Context 16 bytes */ | 1671 | /* allocate space for base edesc and hw desc commands, link tables */ |
874 | append_move(desc, MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX | ivsize); | 1672 | edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes + |
1673 | link_tbl_bytes, GFP_DMA | flags); | ||
1674 | if (!edesc) { | ||
1675 | dev_err(jrdev, "could not allocate extended descriptor\n"); | ||
1676 | return ERR_PTR(-ENOMEM); | ||
1677 | } | ||
875 | 1678 | ||
876 | /* re-enable info fifo entries */ | 1679 | edesc->src_nents = src_nents; |
877 | append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); | 1680 | edesc->dst_nents = dst_nents; |
1681 | edesc->link_tbl_bytes = link_tbl_bytes; | ||
1682 | edesc->link_tbl = (void *)edesc + sizeof(struct ablkcipher_edesc) + | ||
1683 | desc_bytes; | ||
1684 | |||
1685 | link_tbl_index = 0; | ||
1686 | if (!iv_contig) { | ||
1687 | sg_to_link_tbl_one(edesc->link_tbl, iv_dma, ivsize, 0); | ||
1688 | sg_to_link_tbl_last(req->src, src_nents, | ||
1689 | edesc->link_tbl + 1, 0); | ||
1690 | link_tbl_index += 1 + src_nents; | ||
1691 | } | ||
1692 | |||
1693 | if (unlikely(dst_nents)) { | ||
1694 | sg_to_link_tbl_last(req->dst, dst_nents, | ||
1695 | edesc->link_tbl + link_tbl_index, 0); | ||
1696 | } | ||
1697 | |||
1698 | edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl, | ||
1699 | link_tbl_bytes, DMA_TO_DEVICE); | ||
1700 | edesc->iv_dma = iv_dma; | ||
1701 | |||
1702 | #ifdef DEBUG | ||
1703 | print_hex_dump(KERN_ERR, "ablkcipher link_tbl@"xstr(__LINE__)": ", | ||
1704 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->link_tbl, | ||
1705 | link_tbl_bytes, 1); | ||
1706 | #endif | ||
1707 | |||
1708 | *iv_contig_out = iv_contig; | ||
1709 | return edesc; | ||
1710 | } | ||
1711 | |||
1712 | static int ablkcipher_encrypt(struct ablkcipher_request *req) | ||
1713 | { | ||
1714 | struct ablkcipher_edesc *edesc; | ||
1715 | struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); | ||
1716 | struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); | ||
1717 | struct device *jrdev = ctx->jrdev; | ||
1718 | bool iv_contig; | ||
1719 | u32 *desc; | ||
1720 | int ret = 0; | ||
1721 | |||
1722 | /* allocate extended descriptor */ | ||
1723 | edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * | ||
1724 | CAAM_CMD_SZ, &iv_contig); | ||
1725 | if (IS_ERR(edesc)) | ||
1726 | return PTR_ERR(edesc); | ||
878 | 1727 | ||
879 | /* MOVE C1 Context -> OFIFO 16 bytes */ | 1728 | /* Create and submit job descriptor*/ |
880 | append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO | ivsize); | 1729 | init_ablkcipher_job(ctx->sh_desc_enc, |
1730 | ctx->sh_desc_enc_dma, edesc, req, iv_contig); | ||
1731 | #ifdef DEBUG | ||
1732 | print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"xstr(__LINE__)": ", | ||
1733 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, | ||
1734 | desc_bytes(edesc->hw_desc), 1); | ||
1735 | #endif | ||
1736 | desc = edesc->hw_desc; | ||
1737 | ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req); | ||
881 | 1738 | ||
882 | append_fifo_store(desc, iv_dma, ivsize, FIFOST_TYPE_MESSAGE_DATA); | 1739 | if (!ret) { |
1740 | ret = -EINPROGRESS; | ||
1741 | } else { | ||
1742 | ablkcipher_unmap(jrdev, edesc, req); | ||
1743 | kfree(edesc); | ||
1744 | } | ||
883 | 1745 | ||
884 | return ipsec_esp(edesc, areq, OP_ALG_ENCRYPT, ipsec_esp_encrypt_done); | 1746 | return ret; |
885 | } | 1747 | } |
886 | 1748 | ||
1749 | static int ablkcipher_decrypt(struct ablkcipher_request *req) | ||
1750 | { | ||
1751 | struct ablkcipher_edesc *edesc; | ||
1752 | struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); | ||
1753 | struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); | ||
1754 | struct device *jrdev = ctx->jrdev; | ||
1755 | bool iv_contig; | ||
1756 | u32 *desc; | ||
1757 | int ret = 0; | ||
1758 | |||
1759 | /* allocate extended descriptor */ | ||
1760 | edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * | ||
1761 | CAAM_CMD_SZ, &iv_contig); | ||
1762 | if (IS_ERR(edesc)) | ||
1763 | return PTR_ERR(edesc); | ||
1764 | |||
1765 | /* Create and submit job descriptor*/ | ||
1766 | init_ablkcipher_job(ctx->sh_desc_dec, | ||
1767 | ctx->sh_desc_dec_dma, edesc, req, iv_contig); | ||
1768 | desc = edesc->hw_desc; | ||
1769 | #ifdef DEBUG | ||
1770 | print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"xstr(__LINE__)": ", | ||
1771 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, | ||
1772 | desc_bytes(edesc->hw_desc), 1); | ||
1773 | #endif | ||
1774 | |||
1775 | ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req); | ||
1776 | if (!ret) { | ||
1777 | ret = -EINPROGRESS; | ||
1778 | } else { | ||
1779 | ablkcipher_unmap(jrdev, edesc, req); | ||
1780 | kfree(edesc); | ||
1781 | } | ||
1782 | |||
1783 | return ret; | ||
1784 | } | ||
1785 | |||
1786 | #define template_aead template_u.aead | ||
1787 | #define template_ablkcipher template_u.ablkcipher | ||
887 | struct caam_alg_template { | 1788 | struct caam_alg_template { |
888 | char name[CRYPTO_MAX_ALG_NAME]; | 1789 | char name[CRYPTO_MAX_ALG_NAME]; |
889 | char driver_name[CRYPTO_MAX_ALG_NAME]; | 1790 | char driver_name[CRYPTO_MAX_ALG_NAME]; |
890 | unsigned int blocksize; | 1791 | unsigned int blocksize; |
891 | struct aead_alg aead; | 1792 | u32 type; |
1793 | union { | ||
1794 | struct ablkcipher_alg ablkcipher; | ||
1795 | struct aead_alg aead; | ||
1796 | struct blkcipher_alg blkcipher; | ||
1797 | struct cipher_alg cipher; | ||
1798 | struct compress_alg compress; | ||
1799 | struct rng_alg rng; | ||
1800 | } template_u; | ||
892 | u32 class1_alg_type; | 1801 | u32 class1_alg_type; |
893 | u32 class2_alg_type; | 1802 | u32 class2_alg_type; |
894 | u32 alg_op; | 1803 | u32 alg_op; |
@@ -900,12 +1809,13 @@ static struct caam_alg_template driver_algs[] = { | |||
900 | .name = "authenc(hmac(sha1),cbc(aes))", | 1809 | .name = "authenc(hmac(sha1),cbc(aes))", |
901 | .driver_name = "authenc-hmac-sha1-cbc-aes-caam", | 1810 | .driver_name = "authenc-hmac-sha1-cbc-aes-caam", |
902 | .blocksize = AES_BLOCK_SIZE, | 1811 | .blocksize = AES_BLOCK_SIZE, |
903 | .aead = { | 1812 | .type = CRYPTO_ALG_TYPE_AEAD, |
904 | .setkey = aead_authenc_setkey, | 1813 | .template_aead = { |
905 | .setauthsize = aead_authenc_setauthsize, | 1814 | .setkey = aead_setkey, |
906 | .encrypt = aead_authenc_encrypt, | 1815 | .setauthsize = aead_setauthsize, |
907 | .decrypt = aead_authenc_decrypt, | 1816 | .encrypt = aead_encrypt, |
908 | .givencrypt = aead_authenc_givencrypt, | 1817 | .decrypt = aead_decrypt, |
1818 | .givencrypt = aead_givencrypt, | ||
909 | .geniv = "<built-in>", | 1819 | .geniv = "<built-in>", |
910 | .ivsize = AES_BLOCK_SIZE, | 1820 | .ivsize = AES_BLOCK_SIZE, |
911 | .maxauthsize = SHA1_DIGEST_SIZE, | 1821 | .maxauthsize = SHA1_DIGEST_SIZE, |
@@ -918,12 +1828,13 @@ static struct caam_alg_template driver_algs[] = { | |||
918 | .name = "authenc(hmac(sha256),cbc(aes))", | 1828 | .name = "authenc(hmac(sha256),cbc(aes))", |
919 | .driver_name = "authenc-hmac-sha256-cbc-aes-caam", | 1829 | .driver_name = "authenc-hmac-sha256-cbc-aes-caam", |
920 | .blocksize = AES_BLOCK_SIZE, | 1830 | .blocksize = AES_BLOCK_SIZE, |
921 | .aead = { | 1831 | .type = CRYPTO_ALG_TYPE_AEAD, |
922 | .setkey = aead_authenc_setkey, | 1832 | .template_aead = { |
923 | .setauthsize = aead_authenc_setauthsize, | 1833 | .setkey = aead_setkey, |
924 | .encrypt = aead_authenc_encrypt, | 1834 | .setauthsize = aead_setauthsize, |
925 | .decrypt = aead_authenc_decrypt, | 1835 | .encrypt = aead_encrypt, |
926 | .givencrypt = aead_authenc_givencrypt, | 1836 | .decrypt = aead_decrypt, |
1837 | .givencrypt = aead_givencrypt, | ||
927 | .geniv = "<built-in>", | 1838 | .geniv = "<built-in>", |
928 | .ivsize = AES_BLOCK_SIZE, | 1839 | .ivsize = AES_BLOCK_SIZE, |
929 | .maxauthsize = SHA256_DIGEST_SIZE, | 1840 | .maxauthsize = SHA256_DIGEST_SIZE, |
@@ -937,12 +1848,13 @@ static struct caam_alg_template driver_algs[] = { | |||
937 | .name = "authenc(hmac(sha512),cbc(aes))", | 1848 | .name = "authenc(hmac(sha512),cbc(aes))", |
938 | .driver_name = "authenc-hmac-sha512-cbc-aes-caam", | 1849 | .driver_name = "authenc-hmac-sha512-cbc-aes-caam", |
939 | .blocksize = AES_BLOCK_SIZE, | 1850 | .blocksize = AES_BLOCK_SIZE, |
940 | .aead = { | 1851 | .type = CRYPTO_ALG_TYPE_AEAD, |
941 | .setkey = aead_authenc_setkey, | 1852 | .template_aead = { |
942 | .setauthsize = aead_authenc_setauthsize, | 1853 | .setkey = aead_setkey, |
943 | .encrypt = aead_authenc_encrypt, | 1854 | .setauthsize = aead_setauthsize, |
944 | .decrypt = aead_authenc_decrypt, | 1855 | .encrypt = aead_encrypt, |
945 | .givencrypt = aead_authenc_givencrypt, | 1856 | .decrypt = aead_decrypt, |
1857 | .givencrypt = aead_givencrypt, | ||
946 | .geniv = "<built-in>", | 1858 | .geniv = "<built-in>", |
947 | .ivsize = AES_BLOCK_SIZE, | 1859 | .ivsize = AES_BLOCK_SIZE, |
948 | .maxauthsize = SHA512_DIGEST_SIZE, | 1860 | .maxauthsize = SHA512_DIGEST_SIZE, |
@@ -956,12 +1868,13 @@ static struct caam_alg_template driver_algs[] = { | |||
956 | .name = "authenc(hmac(sha1),cbc(des3_ede))", | 1868 | .name = "authenc(hmac(sha1),cbc(des3_ede))", |
957 | .driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam", | 1869 | .driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam", |
958 | .blocksize = DES3_EDE_BLOCK_SIZE, | 1870 | .blocksize = DES3_EDE_BLOCK_SIZE, |
959 | .aead = { | 1871 | .type = CRYPTO_ALG_TYPE_AEAD, |
960 | .setkey = aead_authenc_setkey, | 1872 | .template_aead = { |
961 | .setauthsize = aead_authenc_setauthsize, | 1873 | .setkey = aead_setkey, |
962 | .encrypt = aead_authenc_encrypt, | 1874 | .setauthsize = aead_setauthsize, |
963 | .decrypt = aead_authenc_decrypt, | 1875 | .encrypt = aead_encrypt, |
964 | .givencrypt = aead_authenc_givencrypt, | 1876 | .decrypt = aead_decrypt, |
1877 | .givencrypt = aead_givencrypt, | ||
965 | .geniv = "<built-in>", | 1878 | .geniv = "<built-in>", |
966 | .ivsize = DES3_EDE_BLOCK_SIZE, | 1879 | .ivsize = DES3_EDE_BLOCK_SIZE, |
967 | .maxauthsize = SHA1_DIGEST_SIZE, | 1880 | .maxauthsize = SHA1_DIGEST_SIZE, |
@@ -974,12 +1887,13 @@ static struct caam_alg_template driver_algs[] = { | |||
974 | .name = "authenc(hmac(sha256),cbc(des3_ede))", | 1887 | .name = "authenc(hmac(sha256),cbc(des3_ede))", |
975 | .driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam", | 1888 | .driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam", |
976 | .blocksize = DES3_EDE_BLOCK_SIZE, | 1889 | .blocksize = DES3_EDE_BLOCK_SIZE, |
977 | .aead = { | 1890 | .type = CRYPTO_ALG_TYPE_AEAD, |
978 | .setkey = aead_authenc_setkey, | 1891 | .template_aead = { |
979 | .setauthsize = aead_authenc_setauthsize, | 1892 | .setkey = aead_setkey, |
980 | .encrypt = aead_authenc_encrypt, | 1893 | .setauthsize = aead_setauthsize, |
981 | .decrypt = aead_authenc_decrypt, | 1894 | .encrypt = aead_encrypt, |
982 | .givencrypt = aead_authenc_givencrypt, | 1895 | .decrypt = aead_decrypt, |
1896 | .givencrypt = aead_givencrypt, | ||
983 | .geniv = "<built-in>", | 1897 | .geniv = "<built-in>", |
984 | .ivsize = DES3_EDE_BLOCK_SIZE, | 1898 | .ivsize = DES3_EDE_BLOCK_SIZE, |
985 | .maxauthsize = SHA256_DIGEST_SIZE, | 1899 | .maxauthsize = SHA256_DIGEST_SIZE, |
@@ -993,12 +1907,13 @@ static struct caam_alg_template driver_algs[] = { | |||
993 | .name = "authenc(hmac(sha512),cbc(des3_ede))", | 1907 | .name = "authenc(hmac(sha512),cbc(des3_ede))", |
994 | .driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam", | 1908 | .driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam", |
995 | .blocksize = DES3_EDE_BLOCK_SIZE, | 1909 | .blocksize = DES3_EDE_BLOCK_SIZE, |
996 | .aead = { | 1910 | .type = CRYPTO_ALG_TYPE_AEAD, |
997 | .setkey = aead_authenc_setkey, | 1911 | .template_aead = { |
998 | .setauthsize = aead_authenc_setauthsize, | 1912 | .setkey = aead_setkey, |
999 | .encrypt = aead_authenc_encrypt, | 1913 | .setauthsize = aead_setauthsize, |
1000 | .decrypt = aead_authenc_decrypt, | 1914 | .encrypt = aead_encrypt, |
1001 | .givencrypt = aead_authenc_givencrypt, | 1915 | .decrypt = aead_decrypt, |
1916 | .givencrypt = aead_givencrypt, | ||
1002 | .geniv = "<built-in>", | 1917 | .geniv = "<built-in>", |
1003 | .ivsize = DES3_EDE_BLOCK_SIZE, | 1918 | .ivsize = DES3_EDE_BLOCK_SIZE, |
1004 | .maxauthsize = SHA512_DIGEST_SIZE, | 1919 | .maxauthsize = SHA512_DIGEST_SIZE, |
@@ -1012,12 +1927,13 @@ static struct caam_alg_template driver_algs[] = { | |||
1012 | .name = "authenc(hmac(sha1),cbc(des))", | 1927 | .name = "authenc(hmac(sha1),cbc(des))", |
1013 | .driver_name = "authenc-hmac-sha1-cbc-des-caam", | 1928 | .driver_name = "authenc-hmac-sha1-cbc-des-caam", |
1014 | .blocksize = DES_BLOCK_SIZE, | 1929 | .blocksize = DES_BLOCK_SIZE, |
1015 | .aead = { | 1930 | .type = CRYPTO_ALG_TYPE_AEAD, |
1016 | .setkey = aead_authenc_setkey, | 1931 | .template_aead = { |
1017 | .setauthsize = aead_authenc_setauthsize, | 1932 | .setkey = aead_setkey, |
1018 | .encrypt = aead_authenc_encrypt, | 1933 | .setauthsize = aead_setauthsize, |
1019 | .decrypt = aead_authenc_decrypt, | 1934 | .encrypt = aead_encrypt, |
1020 | .givencrypt = aead_authenc_givencrypt, | 1935 | .decrypt = aead_decrypt, |
1936 | .givencrypt = aead_givencrypt, | ||
1021 | .geniv = "<built-in>", | 1937 | .geniv = "<built-in>", |
1022 | .ivsize = DES_BLOCK_SIZE, | 1938 | .ivsize = DES_BLOCK_SIZE, |
1023 | .maxauthsize = SHA1_DIGEST_SIZE, | 1939 | .maxauthsize = SHA1_DIGEST_SIZE, |
@@ -1030,12 +1946,13 @@ static struct caam_alg_template driver_algs[] = { | |||
1030 | .name = "authenc(hmac(sha256),cbc(des))", | 1946 | .name = "authenc(hmac(sha256),cbc(des))", |
1031 | .driver_name = "authenc-hmac-sha256-cbc-des-caam", | 1947 | .driver_name = "authenc-hmac-sha256-cbc-des-caam", |
1032 | .blocksize = DES_BLOCK_SIZE, | 1948 | .blocksize = DES_BLOCK_SIZE, |
1033 | .aead = { | 1949 | .type = CRYPTO_ALG_TYPE_AEAD, |
1034 | .setkey = aead_authenc_setkey, | 1950 | .template_aead = { |
1035 | .setauthsize = aead_authenc_setauthsize, | 1951 | .setkey = aead_setkey, |
1036 | .encrypt = aead_authenc_encrypt, | 1952 | .setauthsize = aead_setauthsize, |
1037 | .decrypt = aead_authenc_decrypt, | 1953 | .encrypt = aead_encrypt, |
1038 | .givencrypt = aead_authenc_givencrypt, | 1954 | .decrypt = aead_decrypt, |
1955 | .givencrypt = aead_givencrypt, | ||
1039 | .geniv = "<built-in>", | 1956 | .geniv = "<built-in>", |
1040 | .ivsize = DES_BLOCK_SIZE, | 1957 | .ivsize = DES_BLOCK_SIZE, |
1041 | .maxauthsize = SHA256_DIGEST_SIZE, | 1958 | .maxauthsize = SHA256_DIGEST_SIZE, |
@@ -1049,12 +1966,13 @@ static struct caam_alg_template driver_algs[] = { | |||
1049 | .name = "authenc(hmac(sha512),cbc(des))", | 1966 | .name = "authenc(hmac(sha512),cbc(des))", |
1050 | .driver_name = "authenc-hmac-sha512-cbc-des-caam", | 1967 | .driver_name = "authenc-hmac-sha512-cbc-des-caam", |
1051 | .blocksize = DES_BLOCK_SIZE, | 1968 | .blocksize = DES_BLOCK_SIZE, |
1052 | .aead = { | 1969 | .type = CRYPTO_ALG_TYPE_AEAD, |
1053 | .setkey = aead_authenc_setkey, | 1970 | .template_aead = { |
1054 | .setauthsize = aead_authenc_setauthsize, | 1971 | .setkey = aead_setkey, |
1055 | .encrypt = aead_authenc_encrypt, | 1972 | .setauthsize = aead_setauthsize, |
1056 | .decrypt = aead_authenc_decrypt, | 1973 | .encrypt = aead_encrypt, |
1057 | .givencrypt = aead_authenc_givencrypt, | 1974 | .decrypt = aead_decrypt, |
1975 | .givencrypt = aead_givencrypt, | ||
1058 | .geniv = "<built-in>", | 1976 | .geniv = "<built-in>", |
1059 | .ivsize = DES_BLOCK_SIZE, | 1977 | .ivsize = DES_BLOCK_SIZE, |
1060 | .maxauthsize = SHA512_DIGEST_SIZE, | 1978 | .maxauthsize = SHA512_DIGEST_SIZE, |
@@ -1064,6 +1982,55 @@ static struct caam_alg_template driver_algs[] = { | |||
1064 | OP_ALG_AAI_HMAC_PRECOMP, | 1982 | OP_ALG_AAI_HMAC_PRECOMP, |
1065 | .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, | 1983 | .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, |
1066 | }, | 1984 | }, |
1985 | /* ablkcipher descriptor */ | ||
1986 | { | ||
1987 | .name = "cbc(aes)", | ||
1988 | .driver_name = "cbc-aes-caam", | ||
1989 | .blocksize = AES_BLOCK_SIZE, | ||
1990 | .type = CRYPTO_ALG_TYPE_ABLKCIPHER, | ||
1991 | .template_ablkcipher = { | ||
1992 | .setkey = ablkcipher_setkey, | ||
1993 | .encrypt = ablkcipher_encrypt, | ||
1994 | .decrypt = ablkcipher_decrypt, | ||
1995 | .geniv = "eseqiv", | ||
1996 | .min_keysize = AES_MIN_KEY_SIZE, | ||
1997 | .max_keysize = AES_MAX_KEY_SIZE, | ||
1998 | .ivsize = AES_BLOCK_SIZE, | ||
1999 | }, | ||
2000 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, | ||
2001 | }, | ||
2002 | { | ||
2003 | .name = "cbc(des3_ede)", | ||
2004 | .driver_name = "cbc-3des-caam", | ||
2005 | .blocksize = DES3_EDE_BLOCK_SIZE, | ||
2006 | .type = CRYPTO_ALG_TYPE_ABLKCIPHER, | ||
2007 | .template_ablkcipher = { | ||
2008 | .setkey = ablkcipher_setkey, | ||
2009 | .encrypt = ablkcipher_encrypt, | ||
2010 | .decrypt = ablkcipher_decrypt, | ||
2011 | .geniv = "eseqiv", | ||
2012 | .min_keysize = DES3_EDE_KEY_SIZE, | ||
2013 | .max_keysize = DES3_EDE_KEY_SIZE, | ||
2014 | .ivsize = DES3_EDE_BLOCK_SIZE, | ||
2015 | }, | ||
2016 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, | ||
2017 | }, | ||
2018 | { | ||
2019 | .name = "cbc(des)", | ||
2020 | .driver_name = "cbc-des-caam", | ||
2021 | .blocksize = DES_BLOCK_SIZE, | ||
2022 | .type = CRYPTO_ALG_TYPE_ABLKCIPHER, | ||
2023 | .template_ablkcipher = { | ||
2024 | .setkey = ablkcipher_setkey, | ||
2025 | .encrypt = ablkcipher_encrypt, | ||
2026 | .decrypt = ablkcipher_decrypt, | ||
2027 | .geniv = "eseqiv", | ||
2028 | .min_keysize = DES_KEY_SIZE, | ||
2029 | .max_keysize = DES_KEY_SIZE, | ||
2030 | .ivsize = DES_BLOCK_SIZE, | ||
2031 | }, | ||
2032 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, | ||
2033 | } | ||
1067 | }; | 2034 | }; |
1068 | 2035 | ||
1069 | struct caam_crypto_alg { | 2036 | struct caam_crypto_alg { |
@@ -1102,16 +2069,19 @@ static void caam_cra_exit(struct crypto_tfm *tfm) | |||
1102 | { | 2069 | { |
1103 | struct caam_ctx *ctx = crypto_tfm_ctx(tfm); | 2070 | struct caam_ctx *ctx = crypto_tfm_ctx(tfm); |
1104 | 2071 | ||
1105 | if (!dma_mapping_error(ctx->jrdev, ctx->shared_desc_phys)) | 2072 | if (ctx->sh_desc_enc_dma && |
1106 | dma_unmap_single(ctx->jrdev, ctx->shared_desc_phys, | 2073 | !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma)) |
1107 | desc_bytes(ctx->sh_desc), DMA_TO_DEVICE); | 2074 | dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma, |
1108 | kfree(ctx->sh_desc); | 2075 | desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE); |
1109 | 2076 | if (ctx->sh_desc_dec_dma && | |
1110 | if (!dma_mapping_error(ctx->jrdev, ctx->key_phys)) | 2077 | !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma)) |
1111 | dma_unmap_single(ctx->jrdev, ctx->key_phys, | 2078 | dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma, |
1112 | ctx->split_key_pad_len + ctx->enckeylen, | 2079 | desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE); |
2080 | if (ctx->sh_desc_givenc_dma && | ||
2081 | !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma)) | ||
2082 | dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma, | ||
2083 | desc_bytes(ctx->sh_desc_givenc), | ||
1113 | DMA_TO_DEVICE); | 2084 | DMA_TO_DEVICE); |
1114 | kfree(ctx->key); | ||
1115 | } | 2085 | } |
1116 | 2086 | ||
1117 | static void __exit caam_algapi_exit(void) | 2087 | static void __exit caam_algapi_exit(void) |
@@ -1175,12 +2145,20 @@ static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev, | |||
1175 | alg->cra_init = caam_cra_init; | 2145 | alg->cra_init = caam_cra_init; |
1176 | alg->cra_exit = caam_cra_exit; | 2146 | alg->cra_exit = caam_cra_exit; |
1177 | alg->cra_priority = CAAM_CRA_PRIORITY; | 2147 | alg->cra_priority = CAAM_CRA_PRIORITY; |
1178 | alg->cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC; | ||
1179 | alg->cra_blocksize = template->blocksize; | 2148 | alg->cra_blocksize = template->blocksize; |
1180 | alg->cra_alignmask = 0; | 2149 | alg->cra_alignmask = 0; |
1181 | alg->cra_type = &crypto_aead_type; | ||
1182 | alg->cra_ctxsize = sizeof(struct caam_ctx); | 2150 | alg->cra_ctxsize = sizeof(struct caam_ctx); |
1183 | alg->cra_u.aead = template->aead; | 2151 | alg->cra_flags = CRYPTO_ALG_ASYNC | template->type; |
2152 | switch (template->type) { | ||
2153 | case CRYPTO_ALG_TYPE_ABLKCIPHER: | ||
2154 | alg->cra_type = &crypto_ablkcipher_type; | ||
2155 | alg->cra_ablkcipher = template->template_ablkcipher; | ||
2156 | break; | ||
2157 | case CRYPTO_ALG_TYPE_AEAD: | ||
2158 | alg->cra_type = &crypto_aead_type; | ||
2159 | alg->cra_aead = template->template_aead; | ||
2160 | break; | ||
2161 | } | ||
1184 | 2162 | ||
1185 | t_alg->class1_alg_type = template->class1_alg_type; | 2163 | t_alg->class1_alg_type = template->class1_alg_type; |
1186 | t_alg->class2_alg_type = template->class2_alg_type; | 2164 | t_alg->class2_alg_type = template->class2_alg_type; |
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h index 950450346f7..d38f2afaa96 100644 --- a/drivers/crypto/caam/compat.h +++ b/drivers/crypto/caam/compat.h | |||
@@ -31,5 +31,6 @@ | |||
31 | #include <crypto/aead.h> | 31 | #include <crypto/aead.h> |
32 | #include <crypto/authenc.h> | 32 | #include <crypto/authenc.h> |
33 | #include <crypto/scatterwalk.h> | 33 | #include <crypto/scatterwalk.h> |
34 | #include <crypto/internal/skcipher.h> | ||
34 | 35 | ||
35 | #endif /* !defined(CAAM_COMPAT_H) */ | 36 | #endif /* !defined(CAAM_COMPAT_H) */ |
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index 9009713a3c2..73988bb7322 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c | |||
@@ -52,9 +52,11 @@ static int caam_probe(struct platform_device *pdev) | |||
52 | struct caam_ctrl __iomem *ctrl; | 52 | struct caam_ctrl __iomem *ctrl; |
53 | struct caam_full __iomem *topregs; | 53 | struct caam_full __iomem *topregs; |
54 | struct caam_drv_private *ctrlpriv; | 54 | struct caam_drv_private *ctrlpriv; |
55 | struct caam_perfmon *perfmon; | ||
56 | struct caam_deco **deco; | 55 | struct caam_deco **deco; |
57 | u32 deconum; | 56 | u32 deconum; |
57 | #ifdef CONFIG_DEBUG_FS | ||
58 | struct caam_perfmon *perfmon; | ||
59 | #endif | ||
58 | 60 | ||
59 | ctrlpriv = kzalloc(sizeof(struct caam_drv_private), GFP_KERNEL); | 61 | ctrlpriv = kzalloc(sizeof(struct caam_drv_private), GFP_KERNEL); |
60 | if (!ctrlpriv) | 62 | if (!ctrlpriv) |
@@ -164,52 +166,52 @@ static int caam_probe(struct platform_device *pdev) | |||
164 | /* Controller-level - performance monitor counters */ | 166 | /* Controller-level - performance monitor counters */ |
165 | ctrlpriv->ctl_rq_dequeued = | 167 | ctrlpriv->ctl_rq_dequeued = |
166 | debugfs_create_u64("rq_dequeued", | 168 | debugfs_create_u64("rq_dequeued", |
167 | S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH, | 169 | S_IRUSR | S_IRGRP | S_IROTH, |
168 | ctrlpriv->ctl, &perfmon->req_dequeued); | 170 | ctrlpriv->ctl, &perfmon->req_dequeued); |
169 | ctrlpriv->ctl_ob_enc_req = | 171 | ctrlpriv->ctl_ob_enc_req = |
170 | debugfs_create_u64("ob_rq_encrypted", | 172 | debugfs_create_u64("ob_rq_encrypted", |
171 | S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH, | 173 | S_IRUSR | S_IRGRP | S_IROTH, |
172 | ctrlpriv->ctl, &perfmon->ob_enc_req); | 174 | ctrlpriv->ctl, &perfmon->ob_enc_req); |
173 | ctrlpriv->ctl_ib_dec_req = | 175 | ctrlpriv->ctl_ib_dec_req = |
174 | debugfs_create_u64("ib_rq_decrypted", | 176 | debugfs_create_u64("ib_rq_decrypted", |
175 | S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH, | 177 | S_IRUSR | S_IRGRP | S_IROTH, |
176 | ctrlpriv->ctl, &perfmon->ib_dec_req); | 178 | ctrlpriv->ctl, &perfmon->ib_dec_req); |
177 | ctrlpriv->ctl_ob_enc_bytes = | 179 | ctrlpriv->ctl_ob_enc_bytes = |
178 | debugfs_create_u64("ob_bytes_encrypted", | 180 | debugfs_create_u64("ob_bytes_encrypted", |
179 | S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH, | 181 | S_IRUSR | S_IRGRP | S_IROTH, |
180 | ctrlpriv->ctl, &perfmon->ob_enc_bytes); | 182 | ctrlpriv->ctl, &perfmon->ob_enc_bytes); |
181 | ctrlpriv->ctl_ob_prot_bytes = | 183 | ctrlpriv->ctl_ob_prot_bytes = |
182 | debugfs_create_u64("ob_bytes_protected", | 184 | debugfs_create_u64("ob_bytes_protected", |
183 | S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH, | 185 | S_IRUSR | S_IRGRP | S_IROTH, |
184 | ctrlpriv->ctl, &perfmon->ob_prot_bytes); | 186 | ctrlpriv->ctl, &perfmon->ob_prot_bytes); |
185 | ctrlpriv->ctl_ib_dec_bytes = | 187 | ctrlpriv->ctl_ib_dec_bytes = |
186 | debugfs_create_u64("ib_bytes_decrypted", | 188 | debugfs_create_u64("ib_bytes_decrypted", |
187 | S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH, | 189 | S_IRUSR | S_IRGRP | S_IROTH, |
188 | ctrlpriv->ctl, &perfmon->ib_dec_bytes); | 190 | ctrlpriv->ctl, &perfmon->ib_dec_bytes); |
189 | ctrlpriv->ctl_ib_valid_bytes = | 191 | ctrlpriv->ctl_ib_valid_bytes = |
190 | debugfs_create_u64("ib_bytes_validated", | 192 | debugfs_create_u64("ib_bytes_validated", |
191 | S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH, | 193 | S_IRUSR | S_IRGRP | S_IROTH, |
192 | ctrlpriv->ctl, &perfmon->ib_valid_bytes); | 194 | ctrlpriv->ctl, &perfmon->ib_valid_bytes); |
193 | 195 | ||
194 | /* Controller level - global status values */ | 196 | /* Controller level - global status values */ |
195 | ctrlpriv->ctl_faultaddr = | 197 | ctrlpriv->ctl_faultaddr = |
196 | debugfs_create_u64("fault_addr", | 198 | debugfs_create_u64("fault_addr", |
197 | S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH, | 199 | S_IRUSR | S_IRGRP | S_IROTH, |
198 | ctrlpriv->ctl, &perfmon->faultaddr); | 200 | ctrlpriv->ctl, &perfmon->faultaddr); |
199 | ctrlpriv->ctl_faultdetail = | 201 | ctrlpriv->ctl_faultdetail = |
200 | debugfs_create_u32("fault_detail", | 202 | debugfs_create_u32("fault_detail", |
201 | S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH, | 203 | S_IRUSR | S_IRGRP | S_IROTH, |
202 | ctrlpriv->ctl, &perfmon->faultdetail); | 204 | ctrlpriv->ctl, &perfmon->faultdetail); |
203 | ctrlpriv->ctl_faultstatus = | 205 | ctrlpriv->ctl_faultstatus = |
204 | debugfs_create_u32("fault_status", | 206 | debugfs_create_u32("fault_status", |
205 | S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH, | 207 | S_IRUSR | S_IRGRP | S_IROTH, |
206 | ctrlpriv->ctl, &perfmon->status); | 208 | ctrlpriv->ctl, &perfmon->status); |
207 | 209 | ||
208 | /* Internal covering keys (useful in non-secure mode only) */ | 210 | /* Internal covering keys (useful in non-secure mode only) */ |
209 | ctrlpriv->ctl_kek_wrap.data = &ctrlpriv->ctrl->kek[0]; | 211 | ctrlpriv->ctl_kek_wrap.data = &ctrlpriv->ctrl->kek[0]; |
210 | ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32); | 212 | ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32); |
211 | ctrlpriv->ctl_kek = debugfs_create_blob("kek", | 213 | ctrlpriv->ctl_kek = debugfs_create_blob("kek", |
212 | S_IFCHR | S_IRUSR | | 214 | S_IRUSR | |
213 | S_IRGRP | S_IROTH, | 215 | S_IRGRP | S_IROTH, |
214 | ctrlpriv->ctl, | 216 | ctrlpriv->ctl, |
215 | &ctrlpriv->ctl_kek_wrap); | 217 | &ctrlpriv->ctl_kek_wrap); |
@@ -217,7 +219,7 @@ static int caam_probe(struct platform_device *pdev) | |||
217 | ctrlpriv->ctl_tkek_wrap.data = &ctrlpriv->ctrl->tkek[0]; | 219 | ctrlpriv->ctl_tkek_wrap.data = &ctrlpriv->ctrl->tkek[0]; |
218 | ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32); | 220 | ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32); |
219 | ctrlpriv->ctl_tkek = debugfs_create_blob("tkek", | 221 | ctrlpriv->ctl_tkek = debugfs_create_blob("tkek", |
220 | S_IFCHR | S_IRUSR | | 222 | S_IRUSR | |
221 | S_IRGRP | S_IROTH, | 223 | S_IRGRP | S_IROTH, |
222 | ctrlpriv->ctl, | 224 | ctrlpriv->ctl, |
223 | &ctrlpriv->ctl_tkek_wrap); | 225 | &ctrlpriv->ctl_tkek_wrap); |
@@ -225,7 +227,7 @@ static int caam_probe(struct platform_device *pdev) | |||
225 | ctrlpriv->ctl_tdsk_wrap.data = &ctrlpriv->ctrl->tdsk[0]; | 227 | ctrlpriv->ctl_tdsk_wrap.data = &ctrlpriv->ctrl->tdsk[0]; |
226 | ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32); | 228 | ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32); |
227 | ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk", | 229 | ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk", |
228 | S_IFCHR | S_IRUSR | | 230 | S_IRUSR | |
229 | S_IRGRP | S_IROTH, | 231 | S_IRGRP | S_IROTH, |
230 | ctrlpriv->ctl, | 232 | ctrlpriv->ctl, |
231 | &ctrlpriv->ctl_tdsk_wrap); | 233 | &ctrlpriv->ctl_tdsk_wrap); |
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h index 46915800c26..0991323cf3f 100644 --- a/drivers/crypto/caam/desc_constr.h +++ b/drivers/crypto/caam/desc_constr.h | |||
@@ -9,7 +9,7 @@ | |||
9 | #define IMMEDIATE (1 << 23) | 9 | #define IMMEDIATE (1 << 23) |
10 | #define CAAM_CMD_SZ sizeof(u32) | 10 | #define CAAM_CMD_SZ sizeof(u32) |
11 | #define CAAM_PTR_SZ sizeof(dma_addr_t) | 11 | #define CAAM_PTR_SZ sizeof(dma_addr_t) |
12 | #define CAAM_DESC_BYTES_MAX (CAAM_CMD_SZ * 64) | 12 | #define CAAM_DESC_BYTES_MAX (CAAM_CMD_SZ * MAX_CAAM_DESCSIZE) |
13 | 13 | ||
14 | #ifdef DEBUG | 14 | #ifdef DEBUG |
15 | #define PRINT_POS do { printk(KERN_DEBUG "%02d: %s\n", desc_len(desc),\ | 15 | #define PRINT_POS do { printk(KERN_DEBUG "%02d: %s\n", desc_len(desc),\ |
@@ -18,6 +18,9 @@ | |||
18 | #define PRINT_POS | 18 | #define PRINT_POS |
19 | #endif | 19 | #endif |
20 | 20 | ||
21 | #define SET_OK_PROP_ERRORS (IMMEDIATE | LDST_CLASS_DECO | \ | ||
22 | LDST_SRCDST_WORD_DECOCTRL | \ | ||
23 | (LDOFF_CHG_SHARE_OK_PROP << LDST_OFFSET_SHIFT)) | ||
21 | #define DISABLE_AUTO_INFO_FIFO (IMMEDIATE | LDST_CLASS_DECO | \ | 24 | #define DISABLE_AUTO_INFO_FIFO (IMMEDIATE | LDST_CLASS_DECO | \ |
22 | LDST_SRCDST_WORD_DECOCTRL | \ | 25 | LDST_SRCDST_WORD_DECOCTRL | \ |
23 | (LDOFF_DISABLE_AUTO_NFIFO << LDST_OFFSET_SHIFT)) | 26 | (LDOFF_DISABLE_AUTO_NFIFO << LDST_OFFSET_SHIFT)) |
@@ -203,3 +206,56 @@ static inline void append_##cmd##_imm_##type(u32 *desc, type immediate, \ | |||
203 | append_cmd(desc, immediate); \ | 206 | append_cmd(desc, immediate); \ |
204 | } | 207 | } |
205 | APPEND_CMD_RAW_IMM(load, LOAD, u32); | 208 | APPEND_CMD_RAW_IMM(load, LOAD, u32); |
209 | |||
210 | /* | ||
211 | * Append math command. Only the last part of destination and source need to | ||
212 | * be specified | ||
213 | */ | ||
214 | #define APPEND_MATH(op, desc, dest, src_0, src_1, len) \ | ||
215 | append_cmd(desc, CMD_MATH | MATH_FUN_##op | MATH_DEST_##dest | \ | ||
216 | MATH_SRC0_##src_0 | MATH_SRC1_##src_1 | (u32) (len & MATH_LEN_MASK)); | ||
217 | |||
218 | #define append_math_add(desc, dest, src0, src1, len) \ | ||
219 | APPEND_MATH(ADD, desc, dest, src0, src1, len) | ||
220 | #define append_math_sub(desc, dest, src0, src1, len) \ | ||
221 | APPEND_MATH(SUB, desc, dest, src0, src1, len) | ||
222 | #define append_math_add_c(desc, dest, src0, src1, len) \ | ||
223 | APPEND_MATH(ADDC, desc, dest, src0, src1, len) | ||
224 | #define append_math_sub_b(desc, dest, src0, src1, len) \ | ||
225 | APPEND_MATH(SUBB, desc, dest, src0, src1, len) | ||
226 | #define append_math_and(desc, dest, src0, src1, len) \ | ||
227 | APPEND_MATH(AND, desc, dest, src0, src1, len) | ||
228 | #define append_math_or(desc, dest, src0, src1, len) \ | ||
229 | APPEND_MATH(OR, desc, dest, src0, src1, len) | ||
230 | #define append_math_xor(desc, dest, src0, src1, len) \ | ||
231 | APPEND_MATH(XOR, desc, dest, src0, src1, len) | ||
232 | #define append_math_lshift(desc, dest, src0, src1, len) \ | ||
233 | APPEND_MATH(LSHIFT, desc, dest, src0, src1, len) | ||
234 | #define append_math_rshift(desc, dest, src0, src1, len) \ | ||
235 | APPEND_MATH(RSHIFT, desc, dest, src0, src1, len) | ||
236 | |||
237 | /* Exactly one source is IMM. Data is passed in as u32 value */ | ||
238 | #define APPEND_MATH_IMM_u32(op, desc, dest, src_0, src_1, data) \ | ||
239 | do { \ | ||
240 | APPEND_MATH(op, desc, dest, src_0, src_1, CAAM_CMD_SZ); \ | ||
241 | append_cmd(desc, data); \ | ||
242 | } while (0); | ||
243 | |||
244 | #define append_math_add_imm_u32(desc, dest, src0, src1, data) \ | ||
245 | APPEND_MATH_IMM_u32(ADD, desc, dest, src0, src1, data) | ||
246 | #define append_math_sub_imm_u32(desc, dest, src0, src1, data) \ | ||
247 | APPEND_MATH_IMM_u32(SUB, desc, dest, src0, src1, data) | ||
248 | #define append_math_add_c_imm_u32(desc, dest, src0, src1, data) \ | ||
249 | APPEND_MATH_IMM_u32(ADDC, desc, dest, src0, src1, data) | ||
250 | #define append_math_sub_b_imm_u32(desc, dest, src0, src1, data) \ | ||
251 | APPEND_MATH_IMM_u32(SUBB, desc, dest, src0, src1, data) | ||
252 | #define append_math_and_imm_u32(desc, dest, src0, src1, data) \ | ||
253 | APPEND_MATH_IMM_u32(AND, desc, dest, src0, src1, data) | ||
254 | #define append_math_or_imm_u32(desc, dest, src0, src1, data) \ | ||
255 | APPEND_MATH_IMM_u32(OR, desc, dest, src0, src1, data) | ||
256 | #define append_math_xor_imm_u32(desc, dest, src0, src1, data) \ | ||
257 | APPEND_MATH_IMM_u32(XOR, desc, dest, src0, src1, data) | ||
258 | #define append_math_lshift_imm_u32(desc, dest, src0, src1, data) \ | ||
259 | APPEND_MATH_IMM_u32(LSHIFT, desc, dest, src0, src1, data) | ||
260 | #define append_math_rshift_imm_u32(desc, dest, src0, src1, data) \ | ||
261 | APPEND_MATH_IMM_u32(RSHIFT, desc, dest, src0, src1, data) | ||
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c index 3cf303ee3fe..38a3297ae2b 100644 --- a/drivers/crypto/mv_cesa.c +++ b/drivers/crypto/mv_cesa.c | |||
@@ -342,11 +342,13 @@ static void mv_process_hash_current(int first_block) | |||
342 | else | 342 | else |
343 | op.config |= CFG_MID_FRAG; | 343 | op.config |= CFG_MID_FRAG; |
344 | 344 | ||
345 | writel(req_ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A); | 345 | if (first_block) { |
346 | writel(req_ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B); | 346 | writel(req_ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A); |
347 | writel(req_ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C); | 347 | writel(req_ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B); |
348 | writel(req_ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D); | 348 | writel(req_ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C); |
349 | writel(req_ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E); | 349 | writel(req_ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D); |
350 | writel(req_ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E); | ||
351 | } | ||
350 | } | 352 | } |
351 | 353 | ||
352 | memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config)); | 354 | memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config)); |
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c index 2e5b2044c96..d0183ddb307 100644 --- a/drivers/crypto/n2_core.c +++ b/drivers/crypto/n2_core.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* n2_core.c: Niagara2 Stream Processing Unit (SPU) crypto support. | 1 | /* n2_core.c: Niagara2 Stream Processing Unit (SPU) crypto support. |
2 | * | 2 | * |
3 | * Copyright (C) 2010 David S. Miller <davem@davemloft.net> | 3 | * Copyright (C) 2010, 2011 David S. Miller <davem@davemloft.net> |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 6 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
@@ -31,8 +31,8 @@ | |||
31 | #include "n2_core.h" | 31 | #include "n2_core.h" |
32 | 32 | ||
33 | #define DRV_MODULE_NAME "n2_crypto" | 33 | #define DRV_MODULE_NAME "n2_crypto" |
34 | #define DRV_MODULE_VERSION "0.1" | 34 | #define DRV_MODULE_VERSION "0.2" |
35 | #define DRV_MODULE_RELDATE "April 29, 2010" | 35 | #define DRV_MODULE_RELDATE "July 28, 2011" |
36 | 36 | ||
37 | static char version[] __devinitdata = | 37 | static char version[] __devinitdata = |
38 | DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; | 38 | DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; |
@@ -1823,22 +1823,17 @@ static int spu_mdesc_scan(struct mdesc_handle *mdesc, struct platform_device *de | |||
1823 | static int __devinit get_irq_props(struct mdesc_handle *mdesc, u64 node, | 1823 | static int __devinit get_irq_props(struct mdesc_handle *mdesc, u64 node, |
1824 | struct spu_mdesc_info *ip) | 1824 | struct spu_mdesc_info *ip) |
1825 | { | 1825 | { |
1826 | const u64 *intr, *ino; | 1826 | const u64 *ino; |
1827 | int intr_len, ino_len; | 1827 | int ino_len; |
1828 | int i; | 1828 | int i; |
1829 | 1829 | ||
1830 | intr = mdesc_get_property(mdesc, node, "intr", &intr_len); | ||
1831 | if (!intr) | ||
1832 | return -ENODEV; | ||
1833 | |||
1834 | ino = mdesc_get_property(mdesc, node, "ino", &ino_len); | 1830 | ino = mdesc_get_property(mdesc, node, "ino", &ino_len); |
1835 | if (!ino) | 1831 | if (!ino) { |
1832 | printk("NO 'ino'\n"); | ||
1836 | return -ENODEV; | 1833 | return -ENODEV; |
1834 | } | ||
1837 | 1835 | ||
1838 | if (intr_len != ino_len) | 1836 | ip->num_intrs = ino_len / sizeof(u64); |
1839 | return -EINVAL; | ||
1840 | |||
1841 | ip->num_intrs = intr_len / sizeof(u64); | ||
1842 | ip->ino_table = kzalloc((sizeof(struct ino_blob) * | 1837 | ip->ino_table = kzalloc((sizeof(struct ino_blob) * |
1843 | ip->num_intrs), | 1838 | ip->num_intrs), |
1844 | GFP_KERNEL); | 1839 | GFP_KERNEL); |
@@ -1847,7 +1842,7 @@ static int __devinit get_irq_props(struct mdesc_handle *mdesc, u64 node, | |||
1847 | 1842 | ||
1848 | for (i = 0; i < ip->num_intrs; i++) { | 1843 | for (i = 0; i < ip->num_intrs; i++) { |
1849 | struct ino_blob *b = &ip->ino_table[i]; | 1844 | struct ino_blob *b = &ip->ino_table[i]; |
1850 | b->intr = intr[i]; | 1845 | b->intr = i + 1; |
1851 | b->ino = ino[i]; | 1846 | b->ino = ino[i]; |
1852 | } | 1847 | } |
1853 | 1848 | ||
@@ -2204,6 +2199,10 @@ static struct of_device_id n2_crypto_match[] = { | |||
2204 | .name = "n2cp", | 2199 | .name = "n2cp", |
2205 | .compatible = "SUNW,vf-cwq", | 2200 | .compatible = "SUNW,vf-cwq", |
2206 | }, | 2201 | }, |
2202 | { | ||
2203 | .name = "n2cp", | ||
2204 | .compatible = "SUNW,kt-cwq", | ||
2205 | }, | ||
2207 | {}, | 2206 | {}, |
2208 | }; | 2207 | }; |
2209 | 2208 | ||
@@ -2228,6 +2227,10 @@ static struct of_device_id n2_mau_match[] = { | |||
2228 | .name = "ncp", | 2227 | .name = "ncp", |
2229 | .compatible = "SUNW,vf-mau", | 2228 | .compatible = "SUNW,vf-mau", |
2230 | }, | 2229 | }, |
2230 | { | ||
2231 | .name = "ncp", | ||
2232 | .compatible = "SUNW,kt-mau", | ||
2233 | }, | ||
2231 | {}, | 2234 | {}, |
2232 | }; | 2235 | }; |
2233 | 2236 | ||
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index ba8f1ea84c5..6399a8f1938 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c | |||
@@ -72,17 +72,20 @@ | |||
72 | 72 | ||
73 | #define DEFAULT_TIMEOUT_INTERVAL HZ | 73 | #define DEFAULT_TIMEOUT_INTERVAL HZ |
74 | 74 | ||
75 | #define FLAGS_FINUP 0x0002 | 75 | /* mostly device flags */ |
76 | #define FLAGS_FINAL 0x0004 | 76 | #define FLAGS_BUSY 0 |
77 | #define FLAGS_SG 0x0008 | 77 | #define FLAGS_FINAL 1 |
78 | #define FLAGS_SHA1 0x0010 | 78 | #define FLAGS_DMA_ACTIVE 2 |
79 | #define FLAGS_DMA_ACTIVE 0x0020 | 79 | #define FLAGS_OUTPUT_READY 3 |
80 | #define FLAGS_OUTPUT_READY 0x0040 | 80 | #define FLAGS_INIT 4 |
81 | #define FLAGS_INIT 0x0100 | 81 | #define FLAGS_CPU 5 |
82 | #define FLAGS_CPU 0x0200 | 82 | #define FLAGS_DMA_READY 6 |
83 | #define FLAGS_HMAC 0x0400 | 83 | /* context flags */ |
84 | #define FLAGS_ERROR 0x0800 | 84 | #define FLAGS_FINUP 16 |
85 | #define FLAGS_BUSY 0x1000 | 85 | #define FLAGS_SG 17 |
86 | #define FLAGS_SHA1 18 | ||
87 | #define FLAGS_HMAC 19 | ||
88 | #define FLAGS_ERROR 20 | ||
86 | 89 | ||
87 | #define OP_UPDATE 1 | 90 | #define OP_UPDATE 1 |
88 | #define OP_FINAL 2 | 91 | #define OP_FINAL 2 |
@@ -144,7 +147,6 @@ struct omap_sham_dev { | |||
144 | int dma; | 147 | int dma; |
145 | int dma_lch; | 148 | int dma_lch; |
146 | struct tasklet_struct done_task; | 149 | struct tasklet_struct done_task; |
147 | struct tasklet_struct queue_task; | ||
148 | 150 | ||
149 | unsigned long flags; | 151 | unsigned long flags; |
150 | struct crypto_queue queue; | 152 | struct crypto_queue queue; |
@@ -223,7 +225,7 @@ static void omap_sham_copy_ready_hash(struct ahash_request *req) | |||
223 | if (!hash) | 225 | if (!hash) |
224 | return; | 226 | return; |
225 | 227 | ||
226 | if (likely(ctx->flags & FLAGS_SHA1)) { | 228 | if (likely(ctx->flags & BIT(FLAGS_SHA1))) { |
227 | /* SHA1 results are in big endian */ | 229 | /* SHA1 results are in big endian */ |
228 | for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) | 230 | for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) |
229 | hash[i] = be32_to_cpu(in[i]); | 231 | hash[i] = be32_to_cpu(in[i]); |
@@ -238,7 +240,7 @@ static int omap_sham_hw_init(struct omap_sham_dev *dd) | |||
238 | { | 240 | { |
239 | clk_enable(dd->iclk); | 241 | clk_enable(dd->iclk); |
240 | 242 | ||
241 | if (!(dd->flags & FLAGS_INIT)) { | 243 | if (!test_bit(FLAGS_INIT, &dd->flags)) { |
242 | omap_sham_write_mask(dd, SHA_REG_MASK, | 244 | omap_sham_write_mask(dd, SHA_REG_MASK, |
243 | SHA_REG_MASK_SOFTRESET, SHA_REG_MASK_SOFTRESET); | 245 | SHA_REG_MASK_SOFTRESET, SHA_REG_MASK_SOFTRESET); |
244 | 246 | ||
@@ -246,7 +248,7 @@ static int omap_sham_hw_init(struct omap_sham_dev *dd) | |||
246 | SHA_REG_SYSSTATUS_RESETDONE)) | 248 | SHA_REG_SYSSTATUS_RESETDONE)) |
247 | return -ETIMEDOUT; | 249 | return -ETIMEDOUT; |
248 | 250 | ||
249 | dd->flags |= FLAGS_INIT; | 251 | set_bit(FLAGS_INIT, &dd->flags); |
250 | dd->err = 0; | 252 | dd->err = 0; |
251 | } | 253 | } |
252 | 254 | ||
@@ -269,7 +271,7 @@ static void omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length, | |||
269 | * Setting ALGO_CONST only for the first iteration | 271 | * Setting ALGO_CONST only for the first iteration |
270 | * and CLOSE_HASH only for the last one. | 272 | * and CLOSE_HASH only for the last one. |
271 | */ | 273 | */ |
272 | if (ctx->flags & FLAGS_SHA1) | 274 | if (ctx->flags & BIT(FLAGS_SHA1)) |
273 | val |= SHA_REG_CTRL_ALGO; | 275 | val |= SHA_REG_CTRL_ALGO; |
274 | if (!ctx->digcnt) | 276 | if (!ctx->digcnt) |
275 | val |= SHA_REG_CTRL_ALGO_CONST; | 277 | val |= SHA_REG_CTRL_ALGO_CONST; |
@@ -301,7 +303,9 @@ static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf, | |||
301 | return -ETIMEDOUT; | 303 | return -ETIMEDOUT; |
302 | 304 | ||
303 | if (final) | 305 | if (final) |
304 | ctx->flags |= FLAGS_FINAL; /* catch last interrupt */ | 306 | set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */ |
307 | |||
308 | set_bit(FLAGS_CPU, &dd->flags); | ||
305 | 309 | ||
306 | len32 = DIV_ROUND_UP(length, sizeof(u32)); | 310 | len32 = DIV_ROUND_UP(length, sizeof(u32)); |
307 | 311 | ||
@@ -334,9 +338,9 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr, | |||
334 | ctx->digcnt += length; | 338 | ctx->digcnt += length; |
335 | 339 | ||
336 | if (final) | 340 | if (final) |
337 | ctx->flags |= FLAGS_FINAL; /* catch last interrupt */ | 341 | set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */ |
338 | 342 | ||
339 | dd->flags |= FLAGS_DMA_ACTIVE; | 343 | set_bit(FLAGS_DMA_ACTIVE, &dd->flags); |
340 | 344 | ||
341 | omap_start_dma(dd->dma_lch); | 345 | omap_start_dma(dd->dma_lch); |
342 | 346 | ||
@@ -392,7 +396,7 @@ static int omap_sham_xmit_dma_map(struct omap_sham_dev *dd, | |||
392 | return -EINVAL; | 396 | return -EINVAL; |
393 | } | 397 | } |
394 | 398 | ||
395 | ctx->flags &= ~FLAGS_SG; | 399 | ctx->flags &= ~BIT(FLAGS_SG); |
396 | 400 | ||
397 | /* next call does not fail... so no unmap in the case of error */ | 401 | /* next call does not fail... so no unmap in the case of error */ |
398 | return omap_sham_xmit_dma(dd, ctx->dma_addr, length, final); | 402 | return omap_sham_xmit_dma(dd, ctx->dma_addr, length, final); |
@@ -406,7 +410,7 @@ static int omap_sham_update_dma_slow(struct omap_sham_dev *dd) | |||
406 | 410 | ||
407 | omap_sham_append_sg(ctx); | 411 | omap_sham_append_sg(ctx); |
408 | 412 | ||
409 | final = (ctx->flags & FLAGS_FINUP) && !ctx->total; | 413 | final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total; |
410 | 414 | ||
411 | dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: %d, final: %d\n", | 415 | dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: %d, final: %d\n", |
412 | ctx->bufcnt, ctx->digcnt, final); | 416 | ctx->bufcnt, ctx->digcnt, final); |
@@ -452,7 +456,7 @@ static int omap_sham_update_dma_start(struct omap_sham_dev *dd) | |||
452 | length = min(ctx->total, sg->length); | 456 | length = min(ctx->total, sg->length); |
453 | 457 | ||
454 | if (sg_is_last(sg)) { | 458 | if (sg_is_last(sg)) { |
455 | if (!(ctx->flags & FLAGS_FINUP)) { | 459 | if (!(ctx->flags & BIT(FLAGS_FINUP))) { |
456 | /* not last sg must be SHA1_MD5_BLOCK_SIZE aligned */ | 460 | /* not last sg must be SHA1_MD5_BLOCK_SIZE aligned */ |
457 | tail = length & (SHA1_MD5_BLOCK_SIZE - 1); | 461 | tail = length & (SHA1_MD5_BLOCK_SIZE - 1); |
458 | /* without finup() we need one block to close hash */ | 462 | /* without finup() we need one block to close hash */ |
@@ -467,12 +471,12 @@ static int omap_sham_update_dma_start(struct omap_sham_dev *dd) | |||
467 | return -EINVAL; | 471 | return -EINVAL; |
468 | } | 472 | } |
469 | 473 | ||
470 | ctx->flags |= FLAGS_SG; | 474 | ctx->flags |= BIT(FLAGS_SG); |
471 | 475 | ||
472 | ctx->total -= length; | 476 | ctx->total -= length; |
473 | ctx->offset = length; /* offset where to start slow */ | 477 | ctx->offset = length; /* offset where to start slow */ |
474 | 478 | ||
475 | final = (ctx->flags & FLAGS_FINUP) && !ctx->total; | 479 | final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total; |
476 | 480 | ||
477 | /* next call does not fail... so no unmap in the case of error */ | 481 | /* next call does not fail... so no unmap in the case of error */ |
478 | return omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, final); | 482 | return omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, final); |
@@ -495,7 +499,7 @@ static int omap_sham_update_dma_stop(struct omap_sham_dev *dd) | |||
495 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); | 499 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); |
496 | 500 | ||
497 | omap_stop_dma(dd->dma_lch); | 501 | omap_stop_dma(dd->dma_lch); |
498 | if (ctx->flags & FLAGS_SG) { | 502 | if (ctx->flags & BIT(FLAGS_SG)) { |
499 | dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE); | 503 | dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE); |
500 | if (ctx->sg->length == ctx->offset) { | 504 | if (ctx->sg->length == ctx->offset) { |
501 | ctx->sg = sg_next(ctx->sg); | 505 | ctx->sg = sg_next(ctx->sg); |
@@ -537,18 +541,18 @@ static int omap_sham_init(struct ahash_request *req) | |||
537 | crypto_ahash_digestsize(tfm)); | 541 | crypto_ahash_digestsize(tfm)); |
538 | 542 | ||
539 | if (crypto_ahash_digestsize(tfm) == SHA1_DIGEST_SIZE) | 543 | if (crypto_ahash_digestsize(tfm) == SHA1_DIGEST_SIZE) |
540 | ctx->flags |= FLAGS_SHA1; | 544 | ctx->flags |= BIT(FLAGS_SHA1); |
541 | 545 | ||
542 | ctx->bufcnt = 0; | 546 | ctx->bufcnt = 0; |
543 | ctx->digcnt = 0; | 547 | ctx->digcnt = 0; |
544 | ctx->buflen = BUFLEN; | 548 | ctx->buflen = BUFLEN; |
545 | 549 | ||
546 | if (tctx->flags & FLAGS_HMAC) { | 550 | if (tctx->flags & BIT(FLAGS_HMAC)) { |
547 | struct omap_sham_hmac_ctx *bctx = tctx->base; | 551 | struct omap_sham_hmac_ctx *bctx = tctx->base; |
548 | 552 | ||
549 | memcpy(ctx->buffer, bctx->ipad, SHA1_MD5_BLOCK_SIZE); | 553 | memcpy(ctx->buffer, bctx->ipad, SHA1_MD5_BLOCK_SIZE); |
550 | ctx->bufcnt = SHA1_MD5_BLOCK_SIZE; | 554 | ctx->bufcnt = SHA1_MD5_BLOCK_SIZE; |
551 | ctx->flags |= FLAGS_HMAC; | 555 | ctx->flags |= BIT(FLAGS_HMAC); |
552 | } | 556 | } |
553 | 557 | ||
554 | return 0; | 558 | return 0; |
@@ -562,9 +566,9 @@ static int omap_sham_update_req(struct omap_sham_dev *dd) | |||
562 | int err; | 566 | int err; |
563 | 567 | ||
564 | dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n", | 568 | dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n", |
565 | ctx->total, ctx->digcnt, (ctx->flags & FLAGS_FINUP) != 0); | 569 | ctx->total, ctx->digcnt, (ctx->flags & BIT(FLAGS_FINUP)) != 0); |
566 | 570 | ||
567 | if (ctx->flags & FLAGS_CPU) | 571 | if (ctx->flags & BIT(FLAGS_CPU)) |
568 | err = omap_sham_update_cpu(dd); | 572 | err = omap_sham_update_cpu(dd); |
569 | else | 573 | else |
570 | err = omap_sham_update_dma_start(dd); | 574 | err = omap_sham_update_dma_start(dd); |
@@ -624,7 +628,7 @@ static int omap_sham_finish(struct ahash_request *req) | |||
624 | 628 | ||
625 | if (ctx->digcnt) { | 629 | if (ctx->digcnt) { |
626 | omap_sham_copy_ready_hash(req); | 630 | omap_sham_copy_ready_hash(req); |
627 | if (ctx->flags & FLAGS_HMAC) | 631 | if (ctx->flags & BIT(FLAGS_HMAC)) |
628 | err = omap_sham_finish_hmac(req); | 632 | err = omap_sham_finish_hmac(req); |
629 | } | 633 | } |
630 | 634 | ||
@@ -639,18 +643,23 @@ static void omap_sham_finish_req(struct ahash_request *req, int err) | |||
639 | struct omap_sham_dev *dd = ctx->dd; | 643 | struct omap_sham_dev *dd = ctx->dd; |
640 | 644 | ||
641 | if (!err) { | 645 | if (!err) { |
642 | omap_sham_copy_hash(ctx->dd->req, 1); | 646 | omap_sham_copy_hash(req, 1); |
643 | if (ctx->flags & FLAGS_FINAL) | 647 | if (test_bit(FLAGS_FINAL, &dd->flags)) |
644 | err = omap_sham_finish(req); | 648 | err = omap_sham_finish(req); |
645 | } else { | 649 | } else { |
646 | ctx->flags |= FLAGS_ERROR; | 650 | ctx->flags |= BIT(FLAGS_ERROR); |
647 | } | 651 | } |
648 | 652 | ||
653 | /* atomic operation is not needed here */ | ||
654 | dd->flags &= ~(BIT(FLAGS_BUSY) | BIT(FLAGS_FINAL) | BIT(FLAGS_CPU) | | ||
655 | BIT(FLAGS_DMA_READY) | BIT(FLAGS_OUTPUT_READY)); | ||
649 | clk_disable(dd->iclk); | 656 | clk_disable(dd->iclk); |
650 | dd->flags &= ~FLAGS_BUSY; | ||
651 | 657 | ||
652 | if (req->base.complete) | 658 | if (req->base.complete) |
653 | req->base.complete(&req->base, err); | 659 | req->base.complete(&req->base, err); |
660 | |||
661 | /* handle new request */ | ||
662 | tasklet_schedule(&dd->done_task); | ||
654 | } | 663 | } |
655 | 664 | ||
656 | static int omap_sham_handle_queue(struct omap_sham_dev *dd, | 665 | static int omap_sham_handle_queue(struct omap_sham_dev *dd, |
@@ -658,21 +667,20 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd, | |||
658 | { | 667 | { |
659 | struct crypto_async_request *async_req, *backlog; | 668 | struct crypto_async_request *async_req, *backlog; |
660 | struct omap_sham_reqctx *ctx; | 669 | struct omap_sham_reqctx *ctx; |
661 | struct ahash_request *prev_req; | ||
662 | unsigned long flags; | 670 | unsigned long flags; |
663 | int err = 0, ret = 0; | 671 | int err = 0, ret = 0; |
664 | 672 | ||
665 | spin_lock_irqsave(&dd->lock, flags); | 673 | spin_lock_irqsave(&dd->lock, flags); |
666 | if (req) | 674 | if (req) |
667 | ret = ahash_enqueue_request(&dd->queue, req); | 675 | ret = ahash_enqueue_request(&dd->queue, req); |
668 | if (dd->flags & FLAGS_BUSY) { | 676 | if (test_bit(FLAGS_BUSY, &dd->flags)) { |
669 | spin_unlock_irqrestore(&dd->lock, flags); | 677 | spin_unlock_irqrestore(&dd->lock, flags); |
670 | return ret; | 678 | return ret; |
671 | } | 679 | } |
672 | backlog = crypto_get_backlog(&dd->queue); | 680 | backlog = crypto_get_backlog(&dd->queue); |
673 | async_req = crypto_dequeue_request(&dd->queue); | 681 | async_req = crypto_dequeue_request(&dd->queue); |
674 | if (async_req) | 682 | if (async_req) |
675 | dd->flags |= FLAGS_BUSY; | 683 | set_bit(FLAGS_BUSY, &dd->flags); |
676 | spin_unlock_irqrestore(&dd->lock, flags); | 684 | spin_unlock_irqrestore(&dd->lock, flags); |
677 | 685 | ||
678 | if (!async_req) | 686 | if (!async_req) |
@@ -682,16 +690,12 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd, | |||
682 | backlog->complete(backlog, -EINPROGRESS); | 690 | backlog->complete(backlog, -EINPROGRESS); |
683 | 691 | ||
684 | req = ahash_request_cast(async_req); | 692 | req = ahash_request_cast(async_req); |
685 | |||
686 | prev_req = dd->req; | ||
687 | dd->req = req; | 693 | dd->req = req; |
688 | |||
689 | ctx = ahash_request_ctx(req); | 694 | ctx = ahash_request_ctx(req); |
690 | 695 | ||
691 | dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n", | 696 | dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n", |
692 | ctx->op, req->nbytes); | 697 | ctx->op, req->nbytes); |
693 | 698 | ||
694 | |||
695 | err = omap_sham_hw_init(dd); | 699 | err = omap_sham_hw_init(dd); |
696 | if (err) | 700 | if (err) |
697 | goto err1; | 701 | goto err1; |
@@ -712,18 +716,16 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd, | |||
712 | 716 | ||
713 | if (ctx->op == OP_UPDATE) { | 717 | if (ctx->op == OP_UPDATE) { |
714 | err = omap_sham_update_req(dd); | 718 | err = omap_sham_update_req(dd); |
715 | if (err != -EINPROGRESS && (ctx->flags & FLAGS_FINUP)) | 719 | if (err != -EINPROGRESS && (ctx->flags & BIT(FLAGS_FINUP))) |
716 | /* no final() after finup() */ | 720 | /* no final() after finup() */ |
717 | err = omap_sham_final_req(dd); | 721 | err = omap_sham_final_req(dd); |
718 | } else if (ctx->op == OP_FINAL) { | 722 | } else if (ctx->op == OP_FINAL) { |
719 | err = omap_sham_final_req(dd); | 723 | err = omap_sham_final_req(dd); |
720 | } | 724 | } |
721 | err1: | 725 | err1: |
722 | if (err != -EINPROGRESS) { | 726 | if (err != -EINPROGRESS) |
723 | /* done_task will not finish it, so do it here */ | 727 | /* done_task will not finish it, so do it here */ |
724 | omap_sham_finish_req(req, err); | 728 | omap_sham_finish_req(req, err); |
725 | tasklet_schedule(&dd->queue_task); | ||
726 | } | ||
727 | 729 | ||
728 | dev_dbg(dd->dev, "exit, err: %d\n", err); | 730 | dev_dbg(dd->dev, "exit, err: %d\n", err); |
729 | 731 | ||
@@ -752,7 +754,7 @@ static int omap_sham_update(struct ahash_request *req) | |||
752 | ctx->sg = req->src; | 754 | ctx->sg = req->src; |
753 | ctx->offset = 0; | 755 | ctx->offset = 0; |
754 | 756 | ||
755 | if (ctx->flags & FLAGS_FINUP) { | 757 | if (ctx->flags & BIT(FLAGS_FINUP)) { |
756 | if ((ctx->digcnt + ctx->bufcnt + ctx->total) < 9) { | 758 | if ((ctx->digcnt + ctx->bufcnt + ctx->total) < 9) { |
757 | /* | 759 | /* |
758 | * OMAP HW accel works only with buffers >= 9 | 760 | * OMAP HW accel works only with buffers >= 9 |
@@ -765,7 +767,7 @@ static int omap_sham_update(struct ahash_request *req) | |||
765 | /* | 767 | /* |
766 | * faster to use CPU for short transfers | 768 | * faster to use CPU for short transfers |
767 | */ | 769 | */ |
768 | ctx->flags |= FLAGS_CPU; | 770 | ctx->flags |= BIT(FLAGS_CPU); |
769 | } | 771 | } |
770 | } else if (ctx->bufcnt + ctx->total < ctx->buflen) { | 772 | } else if (ctx->bufcnt + ctx->total < ctx->buflen) { |
771 | omap_sham_append_sg(ctx); | 773 | omap_sham_append_sg(ctx); |
@@ -802,9 +804,9 @@ static int omap_sham_final(struct ahash_request *req) | |||
802 | { | 804 | { |
803 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); | 805 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); |
804 | 806 | ||
805 | ctx->flags |= FLAGS_FINUP; | 807 | ctx->flags |= BIT(FLAGS_FINUP); |
806 | 808 | ||
807 | if (ctx->flags & FLAGS_ERROR) | 809 | if (ctx->flags & BIT(FLAGS_ERROR)) |
808 | return 0; /* uncompleted hash is not needed */ | 810 | return 0; /* uncompleted hash is not needed */ |
809 | 811 | ||
810 | /* OMAP HW accel works only with buffers >= 9 */ | 812 | /* OMAP HW accel works only with buffers >= 9 */ |
@@ -823,7 +825,7 @@ static int omap_sham_finup(struct ahash_request *req) | |||
823 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); | 825 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); |
824 | int err1, err2; | 826 | int err1, err2; |
825 | 827 | ||
826 | ctx->flags |= FLAGS_FINUP; | 828 | ctx->flags |= BIT(FLAGS_FINUP); |
827 | 829 | ||
828 | err1 = omap_sham_update(req); | 830 | err1 = omap_sham_update(req); |
829 | if (err1 == -EINPROGRESS || err1 == -EBUSY) | 831 | if (err1 == -EINPROGRESS || err1 == -EBUSY) |
@@ -895,7 +897,7 @@ static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base) | |||
895 | 897 | ||
896 | if (alg_base) { | 898 | if (alg_base) { |
897 | struct omap_sham_hmac_ctx *bctx = tctx->base; | 899 | struct omap_sham_hmac_ctx *bctx = tctx->base; |
898 | tctx->flags |= FLAGS_HMAC; | 900 | tctx->flags |= BIT(FLAGS_HMAC); |
899 | bctx->shash = crypto_alloc_shash(alg_base, 0, | 901 | bctx->shash = crypto_alloc_shash(alg_base, 0, |
900 | CRYPTO_ALG_NEED_FALLBACK); | 902 | CRYPTO_ALG_NEED_FALLBACK); |
901 | if (IS_ERR(bctx->shash)) { | 903 | if (IS_ERR(bctx->shash)) { |
@@ -932,7 +934,7 @@ static void omap_sham_cra_exit(struct crypto_tfm *tfm) | |||
932 | crypto_free_shash(tctx->fallback); | 934 | crypto_free_shash(tctx->fallback); |
933 | tctx->fallback = NULL; | 935 | tctx->fallback = NULL; |
934 | 936 | ||
935 | if (tctx->flags & FLAGS_HMAC) { | 937 | if (tctx->flags & BIT(FLAGS_HMAC)) { |
936 | struct omap_sham_hmac_ctx *bctx = tctx->base; | 938 | struct omap_sham_hmac_ctx *bctx = tctx->base; |
937 | crypto_free_shash(bctx->shash); | 939 | crypto_free_shash(bctx->shash); |
938 | } | 940 | } |
@@ -1036,51 +1038,46 @@ static struct ahash_alg algs[] = { | |||
1036 | static void omap_sham_done_task(unsigned long data) | 1038 | static void omap_sham_done_task(unsigned long data) |
1037 | { | 1039 | { |
1038 | struct omap_sham_dev *dd = (struct omap_sham_dev *)data; | 1040 | struct omap_sham_dev *dd = (struct omap_sham_dev *)data; |
1039 | struct ahash_request *req = dd->req; | 1041 | int err = 0; |
1040 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); | ||
1041 | int ready = 0, err = 0; | ||
1042 | 1042 | ||
1043 | if (ctx->flags & FLAGS_OUTPUT_READY) { | 1043 | if (!test_bit(FLAGS_BUSY, &dd->flags)) { |
1044 | ctx->flags &= ~FLAGS_OUTPUT_READY; | 1044 | omap_sham_handle_queue(dd, NULL); |
1045 | ready = 1; | 1045 | return; |
1046 | } | 1046 | } |
1047 | 1047 | ||
1048 | if (dd->flags & FLAGS_DMA_ACTIVE) { | 1048 | if (test_bit(FLAGS_CPU, &dd->flags)) { |
1049 | dd->flags &= ~FLAGS_DMA_ACTIVE; | 1049 | if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) |
1050 | omap_sham_update_dma_stop(dd); | 1050 | goto finish; |
1051 | if (!dd->err) | 1051 | } else if (test_bit(FLAGS_DMA_READY, &dd->flags)) { |
1052 | if (test_and_clear_bit(FLAGS_DMA_ACTIVE, &dd->flags)) { | ||
1053 | omap_sham_update_dma_stop(dd); | ||
1054 | if (dd->err) { | ||
1055 | err = dd->err; | ||
1056 | goto finish; | ||
1057 | } | ||
1058 | } | ||
1059 | if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) { | ||
1060 | /* hash or semi-hash ready */ | ||
1061 | clear_bit(FLAGS_DMA_READY, &dd->flags); | ||
1052 | err = omap_sham_update_dma_start(dd); | 1062 | err = omap_sham_update_dma_start(dd); |
1063 | if (err != -EINPROGRESS) | ||
1064 | goto finish; | ||
1065 | } | ||
1053 | } | 1066 | } |
1054 | 1067 | ||
1055 | err = dd->err ? : err; | 1068 | return; |
1056 | |||
1057 | if (err != -EINPROGRESS && (ready || err)) { | ||
1058 | dev_dbg(dd->dev, "update done: err: %d\n", err); | ||
1059 | /* finish curent request */ | ||
1060 | omap_sham_finish_req(req, err); | ||
1061 | /* start new request */ | ||
1062 | omap_sham_handle_queue(dd, NULL); | ||
1063 | } | ||
1064 | } | ||
1065 | |||
1066 | static void omap_sham_queue_task(unsigned long data) | ||
1067 | { | ||
1068 | struct omap_sham_dev *dd = (struct omap_sham_dev *)data; | ||
1069 | 1069 | ||
1070 | omap_sham_handle_queue(dd, NULL); | 1070 | finish: |
1071 | dev_dbg(dd->dev, "update done: err: %d\n", err); | ||
1072 | /* finish curent request */ | ||
1073 | omap_sham_finish_req(dd->req, err); | ||
1071 | } | 1074 | } |
1072 | 1075 | ||
1073 | static irqreturn_t omap_sham_irq(int irq, void *dev_id) | 1076 | static irqreturn_t omap_sham_irq(int irq, void *dev_id) |
1074 | { | 1077 | { |
1075 | struct omap_sham_dev *dd = dev_id; | 1078 | struct omap_sham_dev *dd = dev_id; |
1076 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); | ||
1077 | |||
1078 | if (!ctx) { | ||
1079 | dev_err(dd->dev, "unknown interrupt.\n"); | ||
1080 | return IRQ_HANDLED; | ||
1081 | } | ||
1082 | 1079 | ||
1083 | if (unlikely(ctx->flags & FLAGS_FINAL)) | 1080 | if (unlikely(test_bit(FLAGS_FINAL, &dd->flags))) |
1084 | /* final -> allow device to go to power-saving mode */ | 1081 | /* final -> allow device to go to power-saving mode */ |
1085 | omap_sham_write_mask(dd, SHA_REG_CTRL, 0, SHA_REG_CTRL_LENGTH); | 1082 | omap_sham_write_mask(dd, SHA_REG_CTRL, 0, SHA_REG_CTRL_LENGTH); |
1086 | 1083 | ||
@@ -1088,8 +1085,12 @@ static irqreturn_t omap_sham_irq(int irq, void *dev_id) | |||
1088 | SHA_REG_CTRL_OUTPUT_READY); | 1085 | SHA_REG_CTRL_OUTPUT_READY); |
1089 | omap_sham_read(dd, SHA_REG_CTRL); | 1086 | omap_sham_read(dd, SHA_REG_CTRL); |
1090 | 1087 | ||
1091 | ctx->flags |= FLAGS_OUTPUT_READY; | 1088 | if (!test_bit(FLAGS_BUSY, &dd->flags)) { |
1092 | dd->err = 0; | 1089 | dev_warn(dd->dev, "Interrupt when no active requests.\n"); |
1090 | return IRQ_HANDLED; | ||
1091 | } | ||
1092 | |||
1093 | set_bit(FLAGS_OUTPUT_READY, &dd->flags); | ||
1093 | tasklet_schedule(&dd->done_task); | 1094 | tasklet_schedule(&dd->done_task); |
1094 | 1095 | ||
1095 | return IRQ_HANDLED; | 1096 | return IRQ_HANDLED; |
@@ -1102,9 +1103,10 @@ static void omap_sham_dma_callback(int lch, u16 ch_status, void *data) | |||
1102 | if (ch_status != OMAP_DMA_BLOCK_IRQ) { | 1103 | if (ch_status != OMAP_DMA_BLOCK_IRQ) { |
1103 | pr_err("omap-sham DMA error status: 0x%hx\n", ch_status); | 1104 | pr_err("omap-sham DMA error status: 0x%hx\n", ch_status); |
1104 | dd->err = -EIO; | 1105 | dd->err = -EIO; |
1105 | dd->flags &= ~FLAGS_INIT; /* request to re-initialize */ | 1106 | clear_bit(FLAGS_INIT, &dd->flags);/* request to re-initialize */ |
1106 | } | 1107 | } |
1107 | 1108 | ||
1109 | set_bit(FLAGS_DMA_READY, &dd->flags); | ||
1108 | tasklet_schedule(&dd->done_task); | 1110 | tasklet_schedule(&dd->done_task); |
1109 | } | 1111 | } |
1110 | 1112 | ||
@@ -1151,7 +1153,6 @@ static int __devinit omap_sham_probe(struct platform_device *pdev) | |||
1151 | INIT_LIST_HEAD(&dd->list); | 1153 | INIT_LIST_HEAD(&dd->list); |
1152 | spin_lock_init(&dd->lock); | 1154 | spin_lock_init(&dd->lock); |
1153 | tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd); | 1155 | tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd); |
1154 | tasklet_init(&dd->queue_task, omap_sham_queue_task, (unsigned long)dd); | ||
1155 | crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH); | 1156 | crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH); |
1156 | 1157 | ||
1157 | dd->irq = -1; | 1158 | dd->irq = -1; |
@@ -1260,7 +1261,6 @@ static int __devexit omap_sham_remove(struct platform_device *pdev) | |||
1260 | for (i = 0; i < ARRAY_SIZE(algs); i++) | 1261 | for (i = 0; i < ARRAY_SIZE(algs); i++) |
1261 | crypto_unregister_ahash(&algs[i]); | 1262 | crypto_unregister_ahash(&algs[i]); |
1262 | tasklet_kill(&dd->done_task); | 1263 | tasklet_kill(&dd->done_task); |
1263 | tasklet_kill(&dd->queue_task); | ||
1264 | iounmap(dd->io_base); | 1264 | iounmap(dd->io_base); |
1265 | clk_put(dd->iclk); | 1265 | clk_put(dd->iclk); |
1266 | omap_sham_dma_cleanup(dd); | 1266 | omap_sham_dma_cleanup(dd); |
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 854e2632f9a..8a0bb417aa1 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * talitos - Freescale Integrated Security Engine (SEC) device driver | 2 | * talitos - Freescale Integrated Security Engine (SEC) device driver |
3 | * | 3 | * |
4 | * Copyright (c) 2008-2010 Freescale Semiconductor, Inc. | 4 | * Copyright (c) 2008-2011 Freescale Semiconductor, Inc. |
5 | * | 5 | * |
6 | * Scatterlist Crypto API glue code copied from files with the following: | 6 | * Scatterlist Crypto API glue code copied from files with the following: |
7 | * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au> | 7 | * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au> |
@@ -282,6 +282,7 @@ static int init_device(struct device *dev) | |||
282 | /** | 282 | /** |
283 | * talitos_submit - submits a descriptor to the device for processing | 283 | * talitos_submit - submits a descriptor to the device for processing |
284 | * @dev: the SEC device to be used | 284 | * @dev: the SEC device to be used |
285 | * @ch: the SEC device channel to be used | ||
285 | * @desc: the descriptor to be processed by the device | 286 | * @desc: the descriptor to be processed by the device |
286 | * @callback: whom to call when processing is complete | 287 | * @callback: whom to call when processing is complete |
287 | * @context: a handle for use by caller (optional) | 288 | * @context: a handle for use by caller (optional) |
@@ -290,7 +291,7 @@ static int init_device(struct device *dev) | |||
290 | * callback must check err and feedback in descriptor header | 291 | * callback must check err and feedback in descriptor header |
291 | * for device processing status. | 292 | * for device processing status. |
292 | */ | 293 | */ |
293 | static int talitos_submit(struct device *dev, struct talitos_desc *desc, | 294 | static int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc, |
294 | void (*callback)(struct device *dev, | 295 | void (*callback)(struct device *dev, |
295 | struct talitos_desc *desc, | 296 | struct talitos_desc *desc, |
296 | void *context, int error), | 297 | void *context, int error), |
@@ -298,15 +299,9 @@ static int talitos_submit(struct device *dev, struct talitos_desc *desc, | |||
298 | { | 299 | { |
299 | struct talitos_private *priv = dev_get_drvdata(dev); | 300 | struct talitos_private *priv = dev_get_drvdata(dev); |
300 | struct talitos_request *request; | 301 | struct talitos_request *request; |
301 | unsigned long flags, ch; | 302 | unsigned long flags; |
302 | int head; | 303 | int head; |
303 | 304 | ||
304 | /* select done notification */ | ||
305 | desc->hdr |= DESC_HDR_DONE_NOTIFY; | ||
306 | |||
307 | /* emulate SEC's round-robin channel fifo polling scheme */ | ||
308 | ch = atomic_inc_return(&priv->last_chan) & (priv->num_channels - 1); | ||
309 | |||
310 | spin_lock_irqsave(&priv->chan[ch].head_lock, flags); | 305 | spin_lock_irqsave(&priv->chan[ch].head_lock, flags); |
311 | 306 | ||
312 | if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) { | 307 | if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) { |
@@ -706,6 +701,7 @@ static void talitos_unregister_rng(struct device *dev) | |||
706 | 701 | ||
707 | struct talitos_ctx { | 702 | struct talitos_ctx { |
708 | struct device *dev; | 703 | struct device *dev; |
704 | int ch; | ||
709 | __be32 desc_hdr_template; | 705 | __be32 desc_hdr_template; |
710 | u8 key[TALITOS_MAX_KEY_SIZE]; | 706 | u8 key[TALITOS_MAX_KEY_SIZE]; |
711 | u8 iv[TALITOS_MAX_IV_LENGTH]; | 707 | u8 iv[TALITOS_MAX_IV_LENGTH]; |
@@ -1117,7 +1113,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, | |||
1117 | map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, 0, | 1113 | map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, 0, |
1118 | DMA_FROM_DEVICE); | 1114 | DMA_FROM_DEVICE); |
1119 | 1115 | ||
1120 | ret = talitos_submit(dev, desc, callback, areq); | 1116 | ret = talitos_submit(dev, ctx->ch, desc, callback, areq); |
1121 | if (ret != -EINPROGRESS) { | 1117 | if (ret != -EINPROGRESS) { |
1122 | ipsec_esp_unmap(dev, edesc, areq); | 1118 | ipsec_esp_unmap(dev, edesc, areq); |
1123 | kfree(edesc); | 1119 | kfree(edesc); |
@@ -1382,22 +1378,11 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, | |||
1382 | const u8 *key, unsigned int keylen) | 1378 | const u8 *key, unsigned int keylen) |
1383 | { | 1379 | { |
1384 | struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); | 1380 | struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); |
1385 | struct ablkcipher_alg *alg = crypto_ablkcipher_alg(cipher); | ||
1386 | |||
1387 | if (keylen > TALITOS_MAX_KEY_SIZE) | ||
1388 | goto badkey; | ||
1389 | |||
1390 | if (keylen < alg->min_keysize || keylen > alg->max_keysize) | ||
1391 | goto badkey; | ||
1392 | 1381 | ||
1393 | memcpy(&ctx->key, key, keylen); | 1382 | memcpy(&ctx->key, key, keylen); |
1394 | ctx->keylen = keylen; | 1383 | ctx->keylen = keylen; |
1395 | 1384 | ||
1396 | return 0; | 1385 | return 0; |
1397 | |||
1398 | badkey: | ||
1399 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
1400 | return -EINVAL; | ||
1401 | } | 1386 | } |
1402 | 1387 | ||
1403 | static void common_nonsnoop_unmap(struct device *dev, | 1388 | static void common_nonsnoop_unmap(struct device *dev, |
@@ -1433,7 +1418,6 @@ static void ablkcipher_done(struct device *dev, | |||
1433 | 1418 | ||
1434 | static int common_nonsnoop(struct talitos_edesc *edesc, | 1419 | static int common_nonsnoop(struct talitos_edesc *edesc, |
1435 | struct ablkcipher_request *areq, | 1420 | struct ablkcipher_request *areq, |
1436 | u8 *giv, | ||
1437 | void (*callback) (struct device *dev, | 1421 | void (*callback) (struct device *dev, |
1438 | struct talitos_desc *desc, | 1422 | struct talitos_desc *desc, |
1439 | void *context, int error)) | 1423 | void *context, int error)) |
@@ -1453,7 +1437,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc, | |||
1453 | 1437 | ||
1454 | /* cipher iv */ | 1438 | /* cipher iv */ |
1455 | ivsize = crypto_ablkcipher_ivsize(cipher); | 1439 | ivsize = crypto_ablkcipher_ivsize(cipher); |
1456 | map_single_talitos_ptr(dev, &desc->ptr[1], ivsize, giv ?: areq->info, 0, | 1440 | map_single_talitos_ptr(dev, &desc->ptr[1], ivsize, areq->info, 0, |
1457 | DMA_TO_DEVICE); | 1441 | DMA_TO_DEVICE); |
1458 | 1442 | ||
1459 | /* cipher key */ | 1443 | /* cipher key */ |
@@ -1524,7 +1508,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc, | |||
1524 | to_talitos_ptr(&desc->ptr[6], 0); | 1508 | to_talitos_ptr(&desc->ptr[6], 0); |
1525 | desc->ptr[6].j_extent = 0; | 1509 | desc->ptr[6].j_extent = 0; |
1526 | 1510 | ||
1527 | ret = talitos_submit(dev, desc, callback, areq); | 1511 | ret = talitos_submit(dev, ctx->ch, desc, callback, areq); |
1528 | if (ret != -EINPROGRESS) { | 1512 | if (ret != -EINPROGRESS) { |
1529 | common_nonsnoop_unmap(dev, edesc, areq); | 1513 | common_nonsnoop_unmap(dev, edesc, areq); |
1530 | kfree(edesc); | 1514 | kfree(edesc); |
@@ -1556,7 +1540,7 @@ static int ablkcipher_encrypt(struct ablkcipher_request *areq) | |||
1556 | /* set encrypt */ | 1540 | /* set encrypt */ |
1557 | edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT; | 1541 | edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT; |
1558 | 1542 | ||
1559 | return common_nonsnoop(edesc, areq, NULL, ablkcipher_done); | 1543 | return common_nonsnoop(edesc, areq, ablkcipher_done); |
1560 | } | 1544 | } |
1561 | 1545 | ||
1562 | static int ablkcipher_decrypt(struct ablkcipher_request *areq) | 1546 | static int ablkcipher_decrypt(struct ablkcipher_request *areq) |
@@ -1572,7 +1556,7 @@ static int ablkcipher_decrypt(struct ablkcipher_request *areq) | |||
1572 | 1556 | ||
1573 | edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND; | 1557 | edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND; |
1574 | 1558 | ||
1575 | return common_nonsnoop(edesc, areq, NULL, ablkcipher_done); | 1559 | return common_nonsnoop(edesc, areq, ablkcipher_done); |
1576 | } | 1560 | } |
1577 | 1561 | ||
1578 | static void common_nonsnoop_hash_unmap(struct device *dev, | 1562 | static void common_nonsnoop_hash_unmap(struct device *dev, |
@@ -1703,7 +1687,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc, | |||
1703 | /* last DWORD empty */ | 1687 | /* last DWORD empty */ |
1704 | desc->ptr[6] = zero_entry; | 1688 | desc->ptr[6] = zero_entry; |
1705 | 1689 | ||
1706 | ret = talitos_submit(dev, desc, callback, areq); | 1690 | ret = talitos_submit(dev, ctx->ch, desc, callback, areq); |
1707 | if (ret != -EINPROGRESS) { | 1691 | if (ret != -EINPROGRESS) { |
1708 | common_nonsnoop_hash_unmap(dev, edesc, areq); | 1692 | common_nonsnoop_hash_unmap(dev, edesc, areq); |
1709 | kfree(edesc); | 1693 | kfree(edesc); |
@@ -2244,6 +2228,7 @@ static int talitos_cra_init(struct crypto_tfm *tfm) | |||
2244 | struct crypto_alg *alg = tfm->__crt_alg; | 2228 | struct crypto_alg *alg = tfm->__crt_alg; |
2245 | struct talitos_crypto_alg *talitos_alg; | 2229 | struct talitos_crypto_alg *talitos_alg; |
2246 | struct talitos_ctx *ctx = crypto_tfm_ctx(tfm); | 2230 | struct talitos_ctx *ctx = crypto_tfm_ctx(tfm); |
2231 | struct talitos_private *priv; | ||
2247 | 2232 | ||
2248 | if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH) | 2233 | if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH) |
2249 | talitos_alg = container_of(__crypto_ahash_alg(alg), | 2234 | talitos_alg = container_of(__crypto_ahash_alg(alg), |
@@ -2256,9 +2241,17 @@ static int talitos_cra_init(struct crypto_tfm *tfm) | |||
2256 | /* update context with ptr to dev */ | 2241 | /* update context with ptr to dev */ |
2257 | ctx->dev = talitos_alg->dev; | 2242 | ctx->dev = talitos_alg->dev; |
2258 | 2243 | ||
2244 | /* assign SEC channel to tfm in round-robin fashion */ | ||
2245 | priv = dev_get_drvdata(ctx->dev); | ||
2246 | ctx->ch = atomic_inc_return(&priv->last_chan) & | ||
2247 | (priv->num_channels - 1); | ||
2248 | |||
2259 | /* copy descriptor header template value */ | 2249 | /* copy descriptor header template value */ |
2260 | ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template; | 2250 | ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template; |
2261 | 2251 | ||
2252 | /* select done notification */ | ||
2253 | ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY; | ||
2254 | |||
2262 | return 0; | 2255 | return 0; |
2263 | } | 2256 | } |
2264 | 2257 | ||
diff --git a/drivers/crypto/tegra-aes.c b/drivers/crypto/tegra-aes.c new file mode 100644 index 00000000000..22a96ed343e --- /dev/null +++ b/drivers/crypto/tegra-aes.c | |||
@@ -0,0 +1,1496 @@ | |||
1 | /* | ||
2 | * drivers/crypto/tegra-aes.c | ||
3 | * | ||
4 | * aes driver for NVIDIA tegra aes hardware | ||
5 | * | ||
6 | * Copyright (c) 2010-2011, NVIDIA Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
16 | * more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License along | ||
19 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
20 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
21 | */ | ||
22 | |||
23 | #include <linux/module.h> | ||
24 | #include <linux/init.h> | ||
25 | #include <linux/errno.h> | ||
26 | #include <linux/kernel.h> | ||
27 | #include <linux/clk.h> | ||
28 | #include <linux/platform_device.h> | ||
29 | #include <linux/scatterlist.h> | ||
30 | #include <linux/dma-mapping.h> | ||
31 | #include <linux/io.h> | ||
32 | #include <linux/mutex.h> | ||
33 | #include <linux/interrupt.h> | ||
34 | #include <linux/completion.h> | ||
35 | #include <linux/workqueue.h> | ||
36 | #include <linux/delay.h> | ||
37 | |||
38 | #include <mach/arb_sema.h> | ||
39 | #include <mach/clk.h> | ||
40 | #include "../video/tegra/nvmap/nvmap.h" | ||
41 | |||
42 | #include <crypto/scatterwalk.h> | ||
43 | #include <crypto/aes.h> | ||
44 | #include <crypto/internal/rng.h> | ||
45 | |||
46 | #include "tegra-aes.h" | ||
47 | |||
48 | #define FLAGS_MODE_MASK 0x00ff | ||
49 | #define FLAGS_ENCRYPT BIT(0) | ||
50 | #define FLAGS_CBC BIT(1) | ||
51 | #define FLAGS_GIV BIT(2) | ||
52 | #define FLAGS_RNG BIT(3) | ||
53 | #define FLAGS_OFB BIT(4) | ||
54 | #define FLAGS_INIT BIT(5) | ||
55 | #define FLAGS_BUSY 1 | ||
56 | |||
57 | /* | ||
58 | * Defines AES engine Max process bytes size in one go, which takes 1 msec. | ||
59 | * AES engine spends about 176 cycles/16-bytes or 11 cycles/byte | ||
60 | * The duration CPU can use the BSE to 1 msec, then the number of available | ||
61 | * cycles of AVP/BSE is 216K. In this duration, AES can process 216/11 ~= 19KB | ||
62 | * Based on this AES_HW_DMA_BUFFER_SIZE_BYTES is configured to 16KB. | ||
63 | */ | ||
64 | #define AES_HW_DMA_BUFFER_SIZE_BYTES 0x4000 | ||
65 | |||
66 | /* | ||
67 | * The key table length is 64 bytes | ||
68 | * (This includes first upto 32 bytes key + 16 bytes original initial vector | ||
69 | * and 16 bytes updated initial vector) | ||
70 | */ | ||
71 | #define AES_HW_KEY_TABLE_LENGTH_BYTES 64 | ||
72 | |||
73 | #define AES_HW_IV_SIZE 16 | ||
74 | #define AES_HW_KEYSCHEDULE_LEN 256 | ||
75 | #define ARB_SEMA_TIMEOUT 500 | ||
76 | |||
77 | /* | ||
78 | * The memory being used is divides as follows: | ||
79 | * 1. Key - 32 bytes | ||
80 | * 2. Original IV - 16 bytes | ||
81 | * 3. Updated IV - 16 bytes | ||
82 | * 4. Key schedule - 256 bytes | ||
83 | * | ||
84 | * 1+2+3 constitute the hw key table. | ||
85 | */ | ||
86 | #define AES_IVKEY_SIZE (AES_HW_KEY_TABLE_LENGTH_BYTES + AES_HW_KEYSCHEDULE_LEN) | ||
87 | |||
88 | #define DEFAULT_RNG_BLK_SZ 16 | ||
89 | |||
90 | /* As of now only 5 commands are USED for AES encryption/Decryption */ | ||
91 | #define AES_HW_MAX_ICQ_LENGTH 4 | ||
92 | |||
93 | #define ICQBITSHIFT_BLKCNT 0 | ||
94 | |||
95 | /* memdma_vd command */ | ||
96 | #define MEMDMA_DIR_DTOVRAM 0 | ||
97 | #define MEMDMA_DIR_VTODRAM 1 | ||
98 | #define MEMDMABITSHIFT_DIR 25 | ||
99 | #define MEMDMABITSHIFT_NUM_WORDS 12 | ||
100 | |||
101 | /* Define AES Interactive command Queue commands Bit positions */ | ||
102 | enum { | ||
103 | ICQBITSHIFT_KEYTABLEADDR = 0, | ||
104 | ICQBITSHIFT_KEYTABLEID = 17, | ||
105 | ICQBITSHIFT_VRAMSEL = 23, | ||
106 | ICQBITSHIFT_TABLESEL = 24, | ||
107 | ICQBITSHIFT_OPCODE = 26, | ||
108 | }; | ||
109 | |||
110 | /* Define Ucq opcodes required for AES operation */ | ||
111 | enum { | ||
112 | UCQOPCODE_BLKSTARTENGINE = 0x0E, | ||
113 | UCQOPCODE_DMASETUP = 0x10, | ||
114 | UCQOPCODE_DMACOMPLETE = 0x11, | ||
115 | UCQOPCODE_SETTABLE = 0x15, | ||
116 | UCQOPCODE_MEMDMAVD = 0x22, | ||
117 | }; | ||
118 | |||
119 | /* Define Aes command values */ | ||
120 | enum { | ||
121 | UCQCMD_VRAM_SEL = 0x1, | ||
122 | UCQCMD_CRYPTO_TABLESEL = 0x3, | ||
123 | UCQCMD_KEYSCHEDTABLESEL = 0x4, | ||
124 | UCQCMD_KEYTABLESEL = 0x8, | ||
125 | }; | ||
126 | |||
127 | #define UCQCMD_KEYTABLEADDRMASK 0x1FFFF | ||
128 | |||
129 | #define AES_NR_KEYSLOTS 8 | ||
130 | #define SSK_SLOT_NUM 4 | ||
131 | |||
132 | struct tegra_aes_slot { | ||
133 | struct list_head node; | ||
134 | int slot_num; | ||
135 | bool available; | ||
136 | }; | ||
137 | |||
138 | struct tegra_aes_reqctx { | ||
139 | unsigned long mode; | ||
140 | }; | ||
141 | |||
142 | #define TEGRA_AES_QUEUE_LENGTH 500 | ||
143 | |||
144 | struct tegra_aes_engine { | ||
145 | struct tegra_aes_dev *dd; | ||
146 | struct tegra_aes_ctx *ctx; | ||
147 | struct clk *iclk; | ||
148 | struct clk *pclk; | ||
149 | struct ablkcipher_request *req; | ||
150 | struct scatterlist *in_sg; | ||
151 | struct completion op_complete; | ||
152 | struct scatterlist *out_sg; | ||
153 | void __iomem *io_base; | ||
154 | void __iomem *ivkey_base; | ||
155 | unsigned long phys_base; | ||
156 | unsigned long iram_phys; | ||
157 | void *iram_virt; | ||
158 | dma_addr_t ivkey_phys_base; | ||
159 | dma_addr_t dma_buf_in; | ||
160 | dma_addr_t dma_buf_out; | ||
161 | size_t total; | ||
162 | size_t in_offset; | ||
163 | size_t out_offset; | ||
164 | u32 engine_offset; | ||
165 | u32 *buf_in; | ||
166 | u32 *buf_out; | ||
167 | int res_id; | ||
168 | unsigned long busy; | ||
169 | u8 irq; | ||
170 | u32 status; | ||
171 | }; | ||
172 | |||
173 | struct tegra_aes_dev { | ||
174 | struct device *dev; | ||
175 | struct tegra_aes_slot *slots; | ||
176 | struct tegra_aes_engine bsev; | ||
177 | struct tegra_aes_engine bsea; | ||
178 | struct nvmap_client *client; | ||
179 | struct nvmap_handle_ref *h_ref; | ||
180 | struct crypto_queue queue; | ||
181 | spinlock_t lock; | ||
182 | u64 ctr; | ||
183 | unsigned long flags; | ||
184 | u8 dt[DEFAULT_RNG_BLK_SZ]; | ||
185 | }; | ||
186 | |||
187 | static struct tegra_aes_dev *aes_dev; | ||
188 | |||
189 | struct tegra_aes_ctx { | ||
190 | struct tegra_aes_dev *dd; | ||
191 | struct tegra_aes_engine *eng; | ||
192 | struct tegra_aes_slot *slot; | ||
193 | int key[AES_MAX_KEY_SIZE]; | ||
194 | int keylen; | ||
195 | bool use_ssk; | ||
196 | u8 dt[DEFAULT_RNG_BLK_SZ]; | ||
197 | }; | ||
198 | |||
199 | static struct tegra_aes_ctx rng_ctx; | ||
200 | |||
201 | /* keep registered devices data here */ | ||
202 | static LIST_HEAD(slot_list); | ||
203 | static DEFINE_SPINLOCK(list_lock); | ||
204 | |||
205 | /* Engine specific work queues */ | ||
206 | static void bsev_workqueue_handler(struct work_struct *work); | ||
207 | static void bsea_workqueue_handler(struct work_struct *work); | ||
208 | |||
209 | static DECLARE_WORK(bsev_work, bsev_workqueue_handler); | ||
210 | static DECLARE_WORK(bsea_work, bsea_workqueue_handler); | ||
211 | |||
212 | static struct workqueue_struct *bsev_wq; | ||
213 | static struct workqueue_struct *bsea_wq; | ||
214 | |||
215 | extern unsigned long long tegra_chip_uid(void); | ||
216 | |||
217 | static inline u32 aes_readl(struct tegra_aes_engine *engine, u32 offset) | ||
218 | { | ||
219 | return readl(engine->io_base + offset); | ||
220 | } | ||
221 | |||
222 | static inline void aes_writel(struct tegra_aes_engine *engine, | ||
223 | u32 val, u32 offset) | ||
224 | { | ||
225 | writel(val, engine->io_base + offset); | ||
226 | } | ||
227 | |||
228 | static int alloc_iram(struct tegra_aes_dev *dd) | ||
229 | { | ||
230 | size_t size; | ||
231 | int err; | ||
232 | |||
233 | dd->h_ref = NULL; | ||
234 | |||
235 | /* [key+iv+u-iv=64B] * 8 = 512Bytes */ | ||
236 | size = AES_MAX_KEY_SIZE; | ||
237 | dd->client = nvmap_create_client(nvmap_dev, "aes_bsea"); | ||
238 | if (IS_ERR(dd->client)) { | ||
239 | dev_err(dd->dev, "nvmap_create_client failed\n"); | ||
240 | goto out; | ||
241 | } | ||
242 | |||
243 | dd->h_ref = nvmap_create_handle(dd->client, size); | ||
244 | if (IS_ERR(dd->h_ref)) { | ||
245 | dev_err(dd->dev, "nvmap_create_handle failed\n"); | ||
246 | goto out; | ||
247 | } | ||
248 | |||
249 | /* Allocate memory in the iram */ | ||
250 | err = nvmap_alloc_handle_id(dd->client, nvmap_ref_to_id(dd->h_ref), | ||
251 | NVMAP_HEAP_CARVEOUT_IRAM, size, 0); | ||
252 | if (err) { | ||
253 | dev_err(dd->dev, "nvmap_alloc_handle_id failed\n"); | ||
254 | nvmap_free_handle_id(dd->client, nvmap_ref_to_id(dd->h_ref)); | ||
255 | goto out; | ||
256 | } | ||
257 | dd->bsea.iram_phys = nvmap_handle_address(dd->client, | ||
258 | nvmap_ref_to_id(dd->h_ref)); | ||
259 | |||
260 | dd->bsea.iram_virt = nvmap_mmap(dd->h_ref); /* get virtual address */ | ||
261 | if (!dd->bsea.iram_virt) { | ||
262 | dev_err(dd->dev, "%s: no mem, BSEA IRAM alloc failure\n", | ||
263 | __func__); | ||
264 | goto out; | ||
265 | } | ||
266 | |||
267 | memset(dd->bsea.iram_virt, 0, size); | ||
268 | return 0; | ||
269 | |||
270 | out: | ||
271 | if (dd->bsea.iram_virt) | ||
272 | nvmap_munmap(dd->h_ref, dd->bsea.iram_virt); | ||
273 | |||
274 | if (dd->client) { | ||
275 | nvmap_free_handle_id(dd->client, nvmap_ref_to_id(dd->h_ref)); | ||
276 | nvmap_client_put(dd->client); | ||
277 | } | ||
278 | |||
279 | return -ENOMEM; | ||
280 | } | ||
281 | |||
282 | static void free_iram(struct tegra_aes_dev *dd) | ||
283 | { | ||
284 | if (dd->bsea.iram_virt) | ||
285 | nvmap_munmap(dd->h_ref, dd->bsea.iram_virt); | ||
286 | |||
287 | if (dd->client) { | ||
288 | nvmap_free_handle_id(dd->client, nvmap_ref_to_id(dd->h_ref)); | ||
289 | nvmap_client_put(dd->client); | ||
290 | } | ||
291 | } | ||
292 | |||
293 | static int aes_hw_init(struct tegra_aes_engine *engine) | ||
294 | { | ||
295 | struct tegra_aes_dev *dd = aes_dev; | ||
296 | int ret = 0; | ||
297 | |||
298 | if (engine->pclk) { | ||
299 | ret = clk_enable(engine->pclk); | ||
300 | if (ret < 0) { | ||
301 | dev_err(dd->dev, "%s: pclock enable fail(%d)\n", | ||
302 | __func__, ret); | ||
303 | return ret; | ||
304 | } | ||
305 | } | ||
306 | |||
307 | if (engine->iclk) { | ||
308 | ret = clk_enable(engine->iclk); | ||
309 | if (ret < 0) { | ||
310 | dev_err(dd->dev, "%s: iclock enable fail(%d)\n", | ||
311 | __func__, ret); | ||
312 | if (engine->pclk) | ||
313 | clk_disable(engine->pclk); | ||
314 | return ret; | ||
315 | } | ||
316 | } | ||
317 | |||
318 | return ret; | ||
319 | } | ||
320 | |||
321 | static void aes_hw_deinit(struct tegra_aes_engine *engine) | ||
322 | { | ||
323 | if (engine->pclk) | ||
324 | clk_disable(engine->pclk); | ||
325 | |||
326 | if (engine->iclk) | ||
327 | clk_disable(engine->iclk); | ||
328 | } | ||
329 | |||
330 | #define MIN_RETRIES 3 | ||
331 | static int aes_start_crypt(struct tegra_aes_engine *eng, u32 in_addr, | ||
332 | u32 out_addr, int nblocks, int mode, bool upd_iv) | ||
333 | { | ||
334 | u32 cmdq[AES_HW_MAX_ICQ_LENGTH]; | ||
335 | int qlen = 0, i, eng_busy, icq_empty, ret; | ||
336 | u32 value; | ||
337 | int retries = MIN_RETRIES; | ||
338 | |||
339 | start: | ||
340 | do { | ||
341 | value = aes_readl(eng, INTR_STATUS); | ||
342 | eng_busy = value & BIT(0); | ||
343 | icq_empty = value & BIT(3); | ||
344 | } while (eng_busy || (!icq_empty)); | ||
345 | |||
346 | aes_writel(eng, 0xFFFFFFFF, INTR_STATUS); | ||
347 | |||
348 | /* error, dma xfer complete */ | ||
349 | aes_writel(eng, 0x33, INT_ENB); | ||
350 | enable_irq(eng->irq); | ||
351 | |||
352 | cmdq[qlen++] = UCQOPCODE_DMASETUP << ICQBITSHIFT_OPCODE; | ||
353 | cmdq[qlen++] = in_addr; | ||
354 | cmdq[qlen++] = UCQOPCODE_BLKSTARTENGINE << ICQBITSHIFT_OPCODE | | ||
355 | (nblocks-1) << ICQBITSHIFT_BLKCNT; | ||
356 | cmdq[qlen++] = UCQOPCODE_DMACOMPLETE << ICQBITSHIFT_OPCODE; | ||
357 | |||
358 | value = aes_readl(eng, CMDQUE_CONTROL); | ||
359 | /* access SDRAM through AHB */ | ||
360 | value &= (~CMDQ_CTRL_SRC_STM_SEL_FIELD & ~CMDQ_CTRL_DST_STM_SEL_FIELD); | ||
361 | value |= (CMDQ_CTRL_SRC_STM_SEL_FIELD | CMDQ_CTRL_DST_STM_SEL_FIELD | | ||
362 | CMDQ_CTRL_ICMDQEN_FIELD | CMDQ_CTRL_ERROR_FLUSH_ENB); | ||
363 | aes_writel(eng, value, CMDQUE_CONTROL); | ||
364 | |||
365 | value = 0; | ||
366 | if (mode & FLAGS_CBC) { | ||
367 | value = ((0x1 << SECURE_INPUT_ALG_SEL_SHIFT) | | ||
368 | ((eng->ctx->keylen * 8) << SECURE_INPUT_KEY_LEN_SHIFT) | | ||
369 | ((u32)upd_iv << SECURE_IV_SELECT_SHIFT) | | ||
370 | (((mode & FLAGS_ENCRYPT) ? 2 : 3) | ||
371 | << SECURE_XOR_POS_SHIFT) | | ||
372 | (0 << SECURE_INPUT_SEL_SHIFT) | | ||
373 | (((mode & FLAGS_ENCRYPT) ? 2 : 3) | ||
374 | << SECURE_VCTRAM_SEL_SHIFT) | | ||
375 | ((mode & FLAGS_ENCRYPT) ? 1 : 0) | ||
376 | << SECURE_CORE_SEL_SHIFT | | ||
377 | (0 << SECURE_RNG_ENB_SHIFT) | | ||
378 | (0 << SECURE_HASH_ENB_SHIFT)); | ||
379 | } else if (mode & FLAGS_OFB) { | ||
380 | value = ((0x1 << SECURE_INPUT_ALG_SEL_SHIFT) | | ||
381 | ((eng->ctx->keylen * 8) << SECURE_INPUT_KEY_LEN_SHIFT) | | ||
382 | ((u32)upd_iv << SECURE_IV_SELECT_SHIFT) | | ||
383 | ((u32)0 << SECURE_IV_SELECT_SHIFT) | | ||
384 | (SECURE_XOR_POS_FIELD) | | ||
385 | (2 << SECURE_INPUT_SEL_SHIFT) | | ||
386 | (0 << SECURE_VCTRAM_SEL_SHIFT) | | ||
387 | (SECURE_CORE_SEL_FIELD) | | ||
388 | (0 << SECURE_RNG_ENB_SHIFT) | | ||
389 | (0 << SECURE_HASH_ENB_SHIFT)); | ||
390 | } else if (mode & FLAGS_RNG){ | ||
391 | value = ((0x1 << SECURE_INPUT_ALG_SEL_SHIFT) | | ||
392 | ((eng->ctx->keylen * 8) << SECURE_INPUT_KEY_LEN_SHIFT) | | ||
393 | ((u32)upd_iv << SECURE_IV_SELECT_SHIFT) | | ||
394 | (0 << SECURE_XOR_POS_SHIFT) | | ||
395 | (0 << SECURE_INPUT_SEL_SHIFT) | | ||
396 | ((mode & FLAGS_ENCRYPT) ? 1 : 0) | ||
397 | << SECURE_CORE_SEL_SHIFT | | ||
398 | (1 << SECURE_RNG_ENB_SHIFT) | | ||
399 | (0 << SECURE_HASH_ENB_SHIFT)); | ||
400 | } else { | ||
401 | value = ((0x1 << SECURE_INPUT_ALG_SEL_SHIFT) | | ||
402 | ((eng->ctx->keylen * 8) << SECURE_INPUT_KEY_LEN_SHIFT) | | ||
403 | ((u32)upd_iv << SECURE_IV_SELECT_SHIFT) | | ||
404 | (0 << SECURE_XOR_POS_SHIFT) | | ||
405 | (0 << SECURE_INPUT_SEL_SHIFT) | | ||
406 | (((mode & FLAGS_ENCRYPT) ? 1 : 0) | ||
407 | << SECURE_CORE_SEL_SHIFT) | | ||
408 | (0 << SECURE_RNG_ENB_SHIFT) | | ||
409 | (0 << SECURE_HASH_ENB_SHIFT)); | ||
410 | } | ||
411 | aes_writel(eng, value, SECURE_INPUT_SELECT); | ||
412 | |||
413 | aes_writel(eng, out_addr, SECURE_DEST_ADDR); | ||
414 | INIT_COMPLETION(eng->op_complete); | ||
415 | |||
416 | for (i = 0; i < qlen - 1; i++) { | ||
417 | do { | ||
418 | value = aes_readl(eng, INTR_STATUS); | ||
419 | eng_busy = value & BIT(0); | ||
420 | icq_empty = value & BIT(3); | ||
421 | } while (eng_busy || (!icq_empty)); | ||
422 | aes_writel(eng, cmdq[i], ICMDQUE_WR); | ||
423 | } | ||
424 | |||
425 | ret = wait_for_completion_timeout(&eng->op_complete, | ||
426 | msecs_to_jiffies(150)); | ||
427 | if (ret == 0) { | ||
428 | dev_err(aes_dev->dev, "engine%d timed out (0x%x)\n", | ||
429 | eng->res_id, aes_readl(eng, INTR_STATUS)); | ||
430 | disable_irq(eng->irq); | ||
431 | return -ETIMEDOUT; | ||
432 | } | ||
433 | |||
434 | disable_irq(eng->irq); | ||
435 | aes_writel(eng, cmdq[qlen - 1], ICMDQUE_WR); | ||
436 | |||
437 | if ((eng->status != 0) && (retries-- > 0)) { | ||
438 | qlen = 0; | ||
439 | goto start; | ||
440 | } | ||
441 | |||
442 | return 0; | ||
443 | } | ||
444 | |||
445 | static void aes_release_key_slot(struct tegra_aes_ctx *ctx) | ||
446 | { | ||
447 | spin_lock(&list_lock); | ||
448 | ctx->slot->available = true; | ||
449 | ctx->slot = NULL; | ||
450 | spin_unlock(&list_lock); | ||
451 | } | ||
452 | |||
453 | static struct tegra_aes_slot *aes_find_key_slot(struct tegra_aes_dev *dd) | ||
454 | { | ||
455 | struct tegra_aes_slot *slot = NULL; | ||
456 | bool found = 0; | ||
457 | |||
458 | spin_lock(&list_lock); | ||
459 | list_for_each_entry(slot, &slot_list, node) { | ||
460 | dev_dbg(dd->dev, "empty:%d, num:%d\n", slot->available, | ||
461 | slot->slot_num); | ||
462 | if (slot->available) { | ||
463 | slot->available = false; | ||
464 | found = 1; | ||
465 | break; | ||
466 | } | ||
467 | } | ||
468 | |||
469 | spin_unlock(&list_lock); | ||
470 | return found ? slot : NULL; | ||
471 | } | ||
472 | |||
473 | static int aes_set_key(struct tegra_aes_engine *eng, int slot_num) | ||
474 | { | ||
475 | struct tegra_aes_dev *dd = aes_dev; | ||
476 | u32 value, cmdq[2]; | ||
477 | int i, eng_busy, icq_empty, dma_busy; | ||
478 | |||
479 | if (!eng) { | ||
480 | dev_err(dd->dev, "%s: context invalid\n", __func__); | ||
481 | return -EINVAL; | ||
482 | } | ||
483 | |||
484 | /* enable key schedule generation in hardware */ | ||
485 | value = aes_readl(eng, SECURE_CONFIG_EXT); | ||
486 | value &= ~SECURE_KEY_SCH_DIS_FIELD; | ||
487 | aes_writel(eng, value, SECURE_CONFIG_EXT); | ||
488 | |||
489 | /* select the key slot */ | ||
490 | value = aes_readl(eng, SECURE_CONFIG); | ||
491 | value &= ~SECURE_KEY_INDEX_FIELD; | ||
492 | value |= (slot_num << SECURE_KEY_INDEX_SHIFT); | ||
493 | aes_writel(eng, value, SECURE_CONFIG); | ||
494 | |||
495 | if (slot_num == SSK_SLOT_NUM) | ||
496 | goto out; | ||
497 | |||
498 | if (eng->res_id == TEGRA_ARB_BSEV) { | ||
499 | memset(dd->bsev.ivkey_base, 0, AES_HW_KEY_TABLE_LENGTH_BYTES); | ||
500 | memcpy(dd->bsev.ivkey_base, eng->ctx->key, eng->ctx->keylen); | ||
501 | |||
502 | /* copy the key table from sdram to vram */ | ||
503 | cmdq[0] = 0; | ||
504 | cmdq[0] = UCQOPCODE_MEMDMAVD << ICQBITSHIFT_OPCODE | | ||
505 | (MEMDMA_DIR_DTOVRAM << MEMDMABITSHIFT_DIR) | | ||
506 | (AES_HW_KEY_TABLE_LENGTH_BYTES/sizeof(u32)) | ||
507 | << MEMDMABITSHIFT_NUM_WORDS; | ||
508 | cmdq[1] = (u32)eng->ivkey_phys_base; | ||
509 | for (i = 0; i < ARRAY_SIZE(cmdq); i++) | ||
510 | aes_writel(eng, cmdq[i], ICMDQUE_WR); | ||
511 | do { | ||
512 | value = aes_readl(eng, INTR_STATUS); | ||
513 | eng_busy = value & BIT(0); | ||
514 | icq_empty = value & BIT(3); | ||
515 | dma_busy = value & BIT(23); | ||
516 | } while (eng_busy & (!icq_empty) & dma_busy); | ||
517 | |||
518 | /* settable command to get key into internal registers */ | ||
519 | value = 0; | ||
520 | value = UCQOPCODE_SETTABLE << ICQBITSHIFT_OPCODE | | ||
521 | UCQCMD_CRYPTO_TABLESEL << ICQBITSHIFT_TABLESEL | | ||
522 | UCQCMD_VRAM_SEL << ICQBITSHIFT_VRAMSEL | | ||
523 | (UCQCMD_KEYTABLESEL | slot_num) | ||
524 | << ICQBITSHIFT_KEYTABLEID; | ||
525 | aes_writel(eng, value, ICMDQUE_WR); | ||
526 | do { | ||
527 | value = aes_readl(eng, INTR_STATUS); | ||
528 | eng_busy = value & BIT(0); | ||
529 | icq_empty = value & BIT(3); | ||
530 | } while (eng_busy & (!icq_empty)); | ||
531 | } else { | ||
532 | memset(dd->bsea.iram_virt, 0, AES_HW_KEY_TABLE_LENGTH_BYTES); | ||
533 | memcpy(dd->bsea.iram_virt, eng->ctx->key, eng->ctx->keylen); | ||
534 | |||
535 | /* set iram access cfg bit 0 if address >128K */ | ||
536 | if (dd->bsea.iram_phys > 0x00020000) | ||
537 | aes_writel(eng, BIT(0), IRAM_ACCESS_CFG); | ||
538 | else | ||
539 | aes_writel(eng, 0, IRAM_ACCESS_CFG); | ||
540 | |||
541 | /* settable command to get key into internal registers */ | ||
542 | value = 0; | ||
543 | value = UCQOPCODE_SETTABLE << ICQBITSHIFT_OPCODE | | ||
544 | UCQCMD_CRYPTO_TABLESEL << ICQBITSHIFT_TABLESEL | | ||
545 | (UCQCMD_KEYTABLESEL | slot_num) | ||
546 | << ICQBITSHIFT_KEYTABLEID | | ||
547 | dd->bsea.iram_phys >> 2; | ||
548 | aes_writel(eng, value, ICMDQUE_WR); | ||
549 | do { | ||
550 | value = aes_readl(eng, INTR_STATUS); | ||
551 | eng_busy = value & BIT(0); | ||
552 | icq_empty = value & BIT(3); | ||
553 | } while (eng_busy & (!icq_empty)); | ||
554 | } | ||
555 | |||
556 | out: | ||
557 | return 0; | ||
558 | } | ||
559 | |||
560 | static int tegra_aes_handle_req(struct tegra_aes_engine *eng) | ||
561 | { | ||
562 | struct tegra_aes_dev *dd = aes_dev; | ||
563 | struct tegra_aes_ctx *ctx; | ||
564 | struct crypto_async_request *async_req, *backlog; | ||
565 | struct tegra_aes_reqctx *rctx; | ||
566 | struct ablkcipher_request *req; | ||
567 | unsigned long irq_flags; | ||
568 | int dma_max = AES_HW_DMA_BUFFER_SIZE_BYTES; | ||
569 | int nblocks, total, ret = 0, count = 0; | ||
570 | dma_addr_t addr_in, addr_out; | ||
571 | struct scatterlist *in_sg, *out_sg; | ||
572 | |||
573 | spin_lock_irqsave(&dd->lock, irq_flags); | ||
574 | backlog = crypto_get_backlog(&dd->queue); | ||
575 | async_req = crypto_dequeue_request(&dd->queue); | ||
576 | if (!async_req) | ||
577 | clear_bit(FLAGS_BUSY, &eng->busy); | ||
578 | spin_unlock_irqrestore(&dd->lock, irq_flags); | ||
579 | |||
580 | if (!async_req) | ||
581 | return -ENODATA; | ||
582 | |||
583 | if (backlog) | ||
584 | backlog->complete(backlog, -EINPROGRESS); | ||
585 | |||
586 | req = ablkcipher_request_cast(async_req); | ||
587 | dev_dbg(dd->dev, "%s: get new req (engine #%d)\n", __func__, | ||
588 | eng->res_id); | ||
589 | |||
590 | if (!req->src || !req->dst) | ||
591 | return -EINVAL; | ||
592 | |||
593 | /* take the hardware semaphore */ | ||
594 | if (tegra_arb_mutex_lock_timeout(eng->res_id, ARB_SEMA_TIMEOUT) < 0) { | ||
595 | dev_err(dd->dev, "aes hardware (%d) not available\n", | ||
596 | eng->res_id); | ||
597 | return -EBUSY; | ||
598 | } | ||
599 | |||
600 | /* assign new request to device */ | ||
601 | eng->req = req; | ||
602 | eng->total = req->nbytes; | ||
603 | eng->in_offset = 0; | ||
604 | eng->in_sg = req->src; | ||
605 | eng->out_offset = 0; | ||
606 | eng->out_sg = req->dst; | ||
607 | |||
608 | in_sg = eng->in_sg; | ||
609 | out_sg = eng->out_sg; | ||
610 | total = eng->total; | ||
611 | |||
612 | rctx = ablkcipher_request_ctx(req); | ||
613 | ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)); | ||
614 | rctx->mode &= FLAGS_MODE_MASK; | ||
615 | dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode; | ||
616 | eng->ctx = ctx; | ||
617 | |||
618 | if (((dd->flags & FLAGS_CBC) || (dd->flags & FLAGS_OFB)) && req->info) { | ||
619 | /* set iv to the aes hw slot | ||
620 | * Hw generates updated iv only after iv is set in slot. | ||
621 | * So key and iv is passed asynchronously. | ||
622 | */ | ||
623 | memcpy(eng->buf_in, (u8 *)req->info, AES_BLOCK_SIZE); | ||
624 | |||
625 | ret = aes_start_crypt(eng, (u32)eng->dma_buf_in, | ||
626 | (u32)eng->dma_buf_out, 1, FLAGS_CBC, false); | ||
627 | if (ret < 0) { | ||
628 | dev_err(dd->dev, "aes_start_crypt fail(%d)\n", ret); | ||
629 | goto out; | ||
630 | } | ||
631 | } | ||
632 | |||
633 | while (total) { | ||
634 | dev_dbg(dd->dev, "remain: %d\n", total); | ||
635 | ret = dma_map_sg(dd->dev, in_sg, 1, DMA_TO_DEVICE); | ||
636 | if (!ret) { | ||
637 | dev_err(dd->dev, "dma_map_sg() error\n"); | ||
638 | goto out; | ||
639 | } | ||
640 | |||
641 | ret = dma_map_sg(dd->dev, out_sg, 1, DMA_FROM_DEVICE); | ||
642 | if (!ret) { | ||
643 | dev_err(dd->dev, "dma_map_sg() error\n"); | ||
644 | dma_unmap_sg(dd->dev, eng->in_sg, | ||
645 | 1, DMA_TO_DEVICE); | ||
646 | goto out; | ||
647 | } | ||
648 | |||
649 | addr_in = sg_dma_address(in_sg); | ||
650 | addr_out = sg_dma_address(out_sg); | ||
651 | count = min((int)sg_dma_len(in_sg), (int)dma_max); | ||
652 | WARN_ON(sg_dma_len(in_sg) != sg_dma_len(out_sg)); | ||
653 | nblocks = DIV_ROUND_UP(count, AES_BLOCK_SIZE); | ||
654 | |||
655 | ret = aes_start_crypt(eng, addr_in, addr_out, nblocks, | ||
656 | dd->flags, true); | ||
657 | |||
658 | dma_unmap_sg(dd->dev, out_sg, 1, DMA_FROM_DEVICE); | ||
659 | dma_unmap_sg(dd->dev, in_sg, 1, DMA_TO_DEVICE); | ||
660 | if (ret < 0) { | ||
661 | dev_err(dd->dev, "aes_start_crypt fail(%d)\n", ret); | ||
662 | goto out; | ||
663 | } | ||
664 | |||
665 | dev_dbg(dd->dev, "out: copied %d\n", count); | ||
666 | total -= count; | ||
667 | in_sg = sg_next(in_sg); | ||
668 | out_sg = sg_next(out_sg); | ||
669 | WARN_ON(((total != 0) && (!in_sg || !out_sg))); | ||
670 | } | ||
671 | |||
672 | out: | ||
673 | /* release the hardware semaphore */ | ||
674 | tegra_arb_mutex_unlock(eng->res_id); | ||
675 | eng->total = total; | ||
676 | |||
677 | if (eng->req->base.complete) | ||
678 | eng->req->base.complete(&eng->req->base, ret); | ||
679 | |||
680 | dev_dbg(dd->dev, "%s: exit\n", __func__); | ||
681 | return ret; | ||
682 | } | ||
683 | |||
684 | static int tegra_aes_key_save(struct tegra_aes_ctx *ctx) | ||
685 | { | ||
686 | struct tegra_aes_dev *dd = aes_dev; | ||
687 | int retry_count, eng_busy, ret, eng_no; | ||
688 | struct tegra_aes_engine *eng[2] = {&dd->bsev, &dd->bsea}; | ||
689 | unsigned long flags; | ||
690 | |||
691 | /* check for engine free state */ | ||
692 | for (eng_no = 0; eng_no < ARRAY_SIZE(eng); eng_no++) { | ||
693 | for (retry_count = 0; retry_count <= 10; retry_count++) { | ||
694 | spin_lock_irqsave(&dd->lock, flags); | ||
695 | eng_busy = test_and_set_bit(FLAGS_BUSY, | ||
696 | &eng[eng_no]->busy); | ||
697 | spin_unlock_irqrestore(&dd->lock, flags); | ||
698 | |||
699 | if (!eng_busy) | ||
700 | break; | ||
701 | |||
702 | if (retry_count == 10) { | ||
703 | dev_err(dd->dev, | ||
704 | "%s: eng=%d busy, wait timeout\n", | ||
705 | __func__, eng[eng_no]->res_id); | ||
706 | ret = -EBUSY; | ||
707 | goto out; | ||
708 | } | ||
709 | mdelay(5); | ||
710 | } | ||
711 | } | ||
712 | |||
713 | /* save key in the engine */ | ||
714 | for (eng_no = 0; eng_no < ARRAY_SIZE(eng); eng_no++) { | ||
715 | ret = aes_hw_init(eng[eng_no]); | ||
716 | if (ret < 0) { | ||
717 | dev_err(dd->dev, "%s: eng=%d hw init fail(%d)\n", | ||
718 | __func__, eng[eng_no]->res_id, ret); | ||
719 | goto out; | ||
720 | } | ||
721 | eng[eng_no]->ctx = ctx; | ||
722 | if (ctx->use_ssk) | ||
723 | aes_set_key(eng[eng_no], SSK_SLOT_NUM); | ||
724 | else | ||
725 | aes_set_key(eng[eng_no], ctx->slot->slot_num); | ||
726 | |||
727 | aes_hw_deinit(eng[eng_no]); | ||
728 | } | ||
729 | out: | ||
730 | spin_lock_irqsave(&dd->lock, flags); | ||
731 | while (--eng_no >= 0) | ||
732 | clear_bit(FLAGS_BUSY, &eng[eng_no]->busy); | ||
733 | spin_unlock_irqrestore(&dd->lock, flags); | ||
734 | |||
735 | return ret; | ||
736 | } | ||
737 | |||
738 | static int tegra_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | ||
739 | unsigned int keylen) | ||
740 | { | ||
741 | struct tegra_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); | ||
742 | struct tegra_aes_dev *dd = aes_dev; | ||
743 | struct tegra_aes_slot *key_slot; | ||
744 | int ret = 0; | ||
745 | |||
746 | if (!ctx || !dd) { | ||
747 | pr_err("ctx=0x%x, dd=0x%x\n", | ||
748 | (unsigned int)ctx, (unsigned int)dd); | ||
749 | return -EINVAL; | ||
750 | } | ||
751 | |||
752 | if ((keylen != AES_KEYSIZE_128) && (keylen != AES_KEYSIZE_192) && | ||
753 | (keylen != AES_KEYSIZE_256)) { | ||
754 | dev_err(dd->dev, "unsupported key size\n"); | ||
755 | return -EINVAL; | ||
756 | } | ||
757 | |||
758 | /* take the hardware semaphore */ | ||
759 | if (tegra_arb_mutex_lock_timeout(dd->bsev.res_id, ARB_SEMA_TIMEOUT) < 0) { | ||
760 | dev_err(dd->dev, "aes hardware (%d) not available\n", dd->bsev.res_id); | ||
761 | return -EBUSY; | ||
762 | } | ||
763 | |||
764 | if (tegra_arb_mutex_lock_timeout(dd->bsea.res_id, ARB_SEMA_TIMEOUT) < 0) { | ||
765 | dev_err(dd->dev, "aes hardware (%d) not available\n", dd->bsea.res_id); | ||
766 | tegra_arb_mutex_unlock(dd->bsev.res_id); | ||
767 | return -EBUSY; | ||
768 | } | ||
769 | |||
770 | dev_dbg(dd->dev, "keylen: %d\n", keylen); | ||
771 | ctx->dd = dd; | ||
772 | |||
773 | if (key) { | ||
774 | if (!ctx->slot) { | ||
775 | key_slot = aes_find_key_slot(dd); | ||
776 | if (!key_slot) { | ||
777 | dev_err(dd->dev, "no empty slot\n"); | ||
778 | ret = -EBUSY; | ||
779 | goto out; | ||
780 | } | ||
781 | ctx->slot = key_slot; | ||
782 | } | ||
783 | |||
784 | /* copy the key to the proper slot */ | ||
785 | memset(ctx->key, 0, AES_MAX_KEY_SIZE); | ||
786 | memcpy(ctx->key, key, keylen); | ||
787 | ctx->keylen = keylen; | ||
788 | ctx->use_ssk = false; | ||
789 | } else { | ||
790 | ctx->use_ssk = true; | ||
791 | ctx->keylen = AES_KEYSIZE_128; | ||
792 | } | ||
793 | |||
794 | ret = tegra_aes_key_save(ctx); | ||
795 | if (ret != 0) | ||
796 | dev_err(dd->dev, "%s failed\n", __func__); | ||
797 | out: | ||
798 | tegra_arb_mutex_unlock(dd->bsev.res_id); | ||
799 | tegra_arb_mutex_unlock(dd->bsea.res_id); | ||
800 | dev_dbg(dd->dev, "done\n"); | ||
801 | return ret; | ||
802 | } | ||
803 | |||
804 | static void bsev_workqueue_handler(struct work_struct *work) | ||
805 | { | ||
806 | struct tegra_aes_dev *dd = aes_dev; | ||
807 | struct tegra_aes_engine *engine = &dd->bsev; | ||
808 | int ret; | ||
809 | |||
810 | aes_hw_init(engine); | ||
811 | |||
812 | /* empty the crypto queue and then return */ | ||
813 | do { | ||
814 | ret = tegra_aes_handle_req(engine); | ||
815 | } while (!ret); | ||
816 | |||
817 | aes_hw_deinit(engine); | ||
818 | } | ||
819 | |||
820 | static void bsea_workqueue_handler(struct work_struct *work) | ||
821 | { | ||
822 | struct tegra_aes_dev *dd = aes_dev; | ||
823 | struct tegra_aes_engine *engine = &dd->bsea; | ||
824 | int ret; | ||
825 | |||
826 | aes_hw_init(engine); | ||
827 | |||
828 | /* empty the crypto queue and then return */ | ||
829 | do { | ||
830 | ret = tegra_aes_handle_req(engine); | ||
831 | } while (!ret); | ||
832 | |||
833 | aes_hw_deinit(engine); | ||
834 | } | ||
835 | |||
836 | #define INT_ERROR_MASK 0xFFF000 | ||
837 | static irqreturn_t aes_bsev_irq(int irq, void *dev_id) | ||
838 | { | ||
839 | struct tegra_aes_dev *dd = (struct tegra_aes_dev *)dev_id; | ||
840 | u32 value = aes_readl(&dd->bsev, INTR_STATUS); | ||
841 | |||
842 | dev_dbg(dd->dev, "bsev irq_stat: 0x%x", value); | ||
843 | dd->bsev.status = 0; | ||
844 | if (value & INT_ERROR_MASK) { | ||
845 | aes_writel(&dd->bsev, INT_ERROR_MASK, INTR_STATUS); | ||
846 | dd->bsev.status = value & INT_ERROR_MASK; | ||
847 | } | ||
848 | |||
849 | value = aes_readl(&dd->bsev, INTR_STATUS); | ||
850 | if (!(value & ENGINE_BUSY_FIELD)) | ||
851 | complete(&dd->bsev.op_complete); | ||
852 | |||
853 | return IRQ_HANDLED; | ||
854 | } | ||
855 | |||
856 | static irqreturn_t aes_bsea_irq(int irq, void *dev_id) | ||
857 | { | ||
858 | struct tegra_aes_dev *dd = (struct tegra_aes_dev *)dev_id; | ||
859 | u32 value = aes_readl(&dd->bsea, INTR_STATUS); | ||
860 | |||
861 | dev_dbg(dd->dev, "bsea irq_stat: 0x%x", value); | ||
862 | dd->bsea.status = 0; | ||
863 | if (value & INT_ERROR_MASK) { | ||
864 | aes_writel(&dd->bsea, INT_ERROR_MASK, INTR_STATUS); | ||
865 | dd->bsea.status = value & INT_ERROR_MASK; | ||
866 | } | ||
867 | |||
868 | value = aes_readl(&dd->bsea, INTR_STATUS); | ||
869 | if (!(value & ENGINE_BUSY_FIELD)) | ||
870 | complete(&dd->bsea.op_complete); | ||
871 | |||
872 | return IRQ_HANDLED; | ||
873 | } | ||
874 | |||
875 | static int tegra_aes_crypt(struct ablkcipher_request *req, unsigned long mode) | ||
876 | { | ||
877 | struct tegra_aes_reqctx *rctx = ablkcipher_request_ctx(req); | ||
878 | struct tegra_aes_dev *dd = aes_dev; | ||
879 | unsigned long flags; | ||
880 | int err = 0; | ||
881 | int bsev_busy; | ||
882 | int bsea_busy; | ||
883 | |||
884 | dev_dbg(dd->dev, "nbytes: %d, enc: %d, cbc: %d, ofb: %d\n", req->nbytes, | ||
885 | !!(mode & FLAGS_ENCRYPT), | ||
886 | !!(mode & FLAGS_CBC), | ||
887 | !!(mode & FLAGS_OFB)); | ||
888 | |||
889 | rctx->mode = mode; | ||
890 | |||
891 | spin_lock_irqsave(&dd->lock, flags); | ||
892 | err = ablkcipher_enqueue_request(&dd->queue, req); | ||
893 | bsev_busy = test_and_set_bit(FLAGS_BUSY, &dd->bsev.busy); | ||
894 | bsea_busy = test_and_set_bit(FLAGS_BUSY, &dd->bsea.busy); | ||
895 | spin_unlock_irqrestore(&dd->lock, flags); | ||
896 | |||
897 | if (!bsev_busy) | ||
898 | queue_work(bsev_wq, &bsev_work); | ||
899 | if (!bsea_busy) | ||
900 | queue_work(bsea_wq, &bsea_work); | ||
901 | |||
902 | return err; | ||
903 | } | ||
904 | |||
905 | static int tegra_aes_ecb_encrypt(struct ablkcipher_request *req) | ||
906 | { | ||
907 | return tegra_aes_crypt(req, FLAGS_ENCRYPT); | ||
908 | } | ||
909 | |||
910 | static int tegra_aes_ecb_decrypt(struct ablkcipher_request *req) | ||
911 | { | ||
912 | return tegra_aes_crypt(req, 0); | ||
913 | } | ||
914 | |||
915 | static int tegra_aes_cbc_encrypt(struct ablkcipher_request *req) | ||
916 | { | ||
917 | return tegra_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC); | ||
918 | } | ||
919 | |||
920 | static int tegra_aes_cbc_decrypt(struct ablkcipher_request *req) | ||
921 | { | ||
922 | return tegra_aes_crypt(req, FLAGS_CBC); | ||
923 | } | ||
924 | static int tegra_aes_ofb_encrypt(struct ablkcipher_request *req) | ||
925 | { | ||
926 | return tegra_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_OFB); | ||
927 | } | ||
928 | |||
929 | static int tegra_aes_ofb_decrypt(struct ablkcipher_request *req) | ||
930 | { | ||
931 | return tegra_aes_crypt(req, FLAGS_OFB); | ||
932 | } | ||
933 | |||
934 | static int tegra_aes_get_random(struct crypto_rng *tfm, u8 *rdata, | ||
935 | unsigned int dlen) | ||
936 | { | ||
937 | struct tegra_aes_dev *dd = aes_dev; | ||
938 | struct tegra_aes_engine *eng = rng_ctx.eng; | ||
939 | unsigned long flags; | ||
940 | int ret, i; | ||
941 | u8 *dest = rdata, *dt = rng_ctx.dt; | ||
942 | |||
943 | /* take the hardware semaphore */ | ||
944 | if (tegra_arb_mutex_lock_timeout(eng->res_id, ARB_SEMA_TIMEOUT) < 0) { | ||
945 | dev_err(dd->dev, "aes hardware (%d) not available\n", | ||
946 | eng->res_id); | ||
947 | return -EBUSY; | ||
948 | } | ||
949 | |||
950 | ret = aes_hw_init(eng); | ||
951 | if (ret < 0) { | ||
952 | dev_err(dd->dev, "%s: hw init fail(%d)\n", __func__, ret); | ||
953 | dlen = ret; | ||
954 | goto fail; | ||
955 | } | ||
956 | |||
957 | memset(eng->buf_in, 0, AES_BLOCK_SIZE); | ||
958 | memcpy(eng->buf_in, dt, DEFAULT_RNG_BLK_SZ); | ||
959 | |||
960 | ret = aes_start_crypt(eng, (u32)eng->dma_buf_in, (u32)eng->dma_buf_out, | ||
961 | 1, FLAGS_ENCRYPT | FLAGS_RNG, true); | ||
962 | if (ret < 0) { | ||
963 | dev_err(dd->dev, "aes_start_crypt fail(%d)\n", ret); | ||
964 | dlen = ret; | ||
965 | goto out; | ||
966 | } | ||
967 | memcpy(dest, eng->buf_out, dlen); | ||
968 | |||
969 | /* update the DT */ | ||
970 | for (i = DEFAULT_RNG_BLK_SZ - 1; i >= 0; i--) { | ||
971 | dt[i] += 1; | ||
972 | if (dt[i] != 0) | ||
973 | break; | ||
974 | } | ||
975 | |||
976 | out: | ||
977 | aes_hw_deinit(eng); | ||
978 | |||
979 | spin_lock_irqsave(&dd->lock, flags); | ||
980 | clear_bit(FLAGS_BUSY, &eng->busy); | ||
981 | spin_unlock_irqrestore(&dd->lock, flags); | ||
982 | |||
983 | fail: | ||
984 | /* release the hardware semaphore */ | ||
985 | tegra_arb_mutex_unlock(eng->res_id); | ||
986 | dev_dbg(dd->dev, "%s: done\n", __func__); | ||
987 | return dlen; | ||
988 | } | ||
989 | |||
990 | static int tegra_aes_rng_reset(struct crypto_rng *tfm, u8 *seed, | ||
991 | unsigned int slen) | ||
992 | { | ||
993 | struct tegra_aes_dev *dd = aes_dev; | ||
994 | struct tegra_aes_ctx *ctx = &rng_ctx; | ||
995 | struct tegra_aes_engine *eng = NULL; | ||
996 | struct tegra_aes_slot *key_slot; | ||
997 | int bsea_busy = false; | ||
998 | unsigned long flags; | ||
999 | struct timespec ts; | ||
1000 | u64 nsec, tmp[2]; | ||
1001 | int ret = 0; | ||
1002 | u8 *dt; | ||
1003 | |||
1004 | if (!dd) | ||
1005 | return -EINVAL; | ||
1006 | |||
1007 | if (slen < (DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128)) { | ||
1008 | return -ENOMEM; | ||
1009 | } | ||
1010 | |||
1011 | spin_lock_irqsave(&dd->lock, flags); | ||
1012 | bsea_busy = test_and_set_bit(FLAGS_BUSY, &dd->bsea.busy); | ||
1013 | spin_unlock_irqrestore(&dd->lock, flags); | ||
1014 | |||
1015 | if (!bsea_busy) | ||
1016 | eng = &dd->bsea; | ||
1017 | else | ||
1018 | return -EBUSY; | ||
1019 | |||
1020 | ctx->eng = eng; | ||
1021 | dd->flags = FLAGS_ENCRYPT | FLAGS_RNG; | ||
1022 | |||
1023 | if (!ctx->slot) { | ||
1024 | key_slot = aes_find_key_slot(dd); | ||
1025 | if (!key_slot) { | ||
1026 | dev_err(dd->dev, "no empty slot\n"); | ||
1027 | return -ENOMEM; | ||
1028 | } | ||
1029 | ctx->slot = key_slot; | ||
1030 | } | ||
1031 | |||
1032 | /* take the hardware semaphore */ | ||
1033 | if (tegra_arb_mutex_lock_timeout(eng->res_id, ARB_SEMA_TIMEOUT) < 0) { | ||
1034 | dev_err(dd->dev, "aes hardware (%d) not available\n", | ||
1035 | eng->res_id); | ||
1036 | return -EBUSY; | ||
1037 | } | ||
1038 | |||
1039 | ret = aes_hw_init(eng); | ||
1040 | if (ret < 0) { | ||
1041 | dev_err(dd->dev, "%s: hw init fail(%d)\n", __func__, ret); | ||
1042 | goto fail; | ||
1043 | } | ||
1044 | |||
1045 | memcpy(ctx->key, seed + DEFAULT_RNG_BLK_SZ, AES_KEYSIZE_128); | ||
1046 | |||
1047 | eng->ctx = ctx; | ||
1048 | eng->ctx->keylen = AES_KEYSIZE_128; | ||
1049 | aes_set_key(eng, ctx->slot->slot_num); | ||
1050 | |||
1051 | /* set seed to the aes hw slot */ | ||
1052 | memset(eng->buf_in, 0, AES_BLOCK_SIZE); | ||
1053 | memcpy(eng->buf_in, seed, DEFAULT_RNG_BLK_SZ); | ||
1054 | ret = aes_start_crypt(eng, (u32)eng->dma_buf_in, | ||
1055 | (u32)eng->dma_buf_out, 1, FLAGS_CBC, false); | ||
1056 | if (ret < 0) { | ||
1057 | dev_err(dd->dev, "aes_start_crypt fail(%d)\n", ret); | ||
1058 | goto out; | ||
1059 | } | ||
1060 | |||
1061 | if (slen >= (2 * DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128)) { | ||
1062 | dt = seed + DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128; | ||
1063 | } else { | ||
1064 | getnstimeofday(&ts); | ||
1065 | nsec = timespec_to_ns(&ts); | ||
1066 | do_div(nsec, 1000); | ||
1067 | nsec ^= dd->ctr << 56; | ||
1068 | dd->ctr++; | ||
1069 | tmp[0] = nsec; | ||
1070 | tmp[1] = tegra_chip_uid(); | ||
1071 | dt = (u8 *)tmp; | ||
1072 | } | ||
1073 | memcpy(ctx->dt, dt, DEFAULT_RNG_BLK_SZ); | ||
1074 | |||
1075 | out: | ||
1076 | aes_hw_deinit(eng); | ||
1077 | |||
1078 | fail: | ||
1079 | /* release the hardware semaphore */ | ||
1080 | tegra_arb_mutex_unlock(eng->res_id); | ||
1081 | |||
1082 | dev_dbg(dd->dev, "%s: done\n", __func__); | ||
1083 | return ret; | ||
1084 | } | ||
1085 | |||
1086 | static int tegra_aes_cra_init(struct crypto_tfm *tfm) | ||
1087 | { | ||
1088 | tfm->crt_ablkcipher.reqsize = sizeof(struct tegra_aes_reqctx); | ||
1089 | return 0; | ||
1090 | } | ||
1091 | |||
1092 | void tegra_aes_cra_exit(struct crypto_tfm *tfm) | ||
1093 | { | ||
1094 | struct tegra_aes_ctx *ctx = crypto_ablkcipher_ctx((struct crypto_ablkcipher *)tfm); | ||
1095 | |||
1096 | if (ctx && ctx->slot) | ||
1097 | aes_release_key_slot(ctx); | ||
1098 | } | ||
1099 | |||
1100 | static struct crypto_alg algs[] = { | ||
1101 | { | ||
1102 | .cra_name = "disabled_ecb(aes)", | ||
1103 | .cra_driver_name = "ecb-aes-tegra", | ||
1104 | .cra_priority = 100, | ||
1105 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
1106 | .cra_blocksize = AES_BLOCK_SIZE, | ||
1107 | .cra_ctxsize = sizeof(struct tegra_aes_ctx), | ||
1108 | .cra_alignmask = 3, | ||
1109 | .cra_type = &crypto_ablkcipher_type, | ||
1110 | .cra_module = THIS_MODULE, | ||
1111 | .cra_init = tegra_aes_cra_init, | ||
1112 | .cra_exit = tegra_aes_cra_exit, | ||
1113 | .cra_u.ablkcipher = { | ||
1114 | .min_keysize = AES_MIN_KEY_SIZE, | ||
1115 | .max_keysize = AES_MAX_KEY_SIZE, | ||
1116 | .setkey = tegra_aes_setkey, | ||
1117 | .encrypt = tegra_aes_ecb_encrypt, | ||
1118 | .decrypt = tegra_aes_ecb_decrypt, | ||
1119 | }, | ||
1120 | }, { | ||
1121 | .cra_name = "disabled_cbc(aes)", | ||
1122 | .cra_driver_name = "cbc-aes-tegra", | ||
1123 | .cra_priority = 100, | ||
1124 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
1125 | .cra_blocksize = AES_BLOCK_SIZE, | ||
1126 | .cra_ctxsize = sizeof(struct tegra_aes_ctx), | ||
1127 | .cra_alignmask = 3, | ||
1128 | .cra_type = &crypto_ablkcipher_type, | ||
1129 | .cra_module = THIS_MODULE, | ||
1130 | .cra_init = tegra_aes_cra_init, | ||
1131 | .cra_exit = tegra_aes_cra_exit, | ||
1132 | .cra_u.ablkcipher = { | ||
1133 | .min_keysize = AES_MIN_KEY_SIZE, | ||
1134 | .max_keysize = AES_MAX_KEY_SIZE, | ||
1135 | .ivsize = AES_MIN_KEY_SIZE, | ||
1136 | .setkey = tegra_aes_setkey, | ||
1137 | .encrypt = tegra_aes_cbc_encrypt, | ||
1138 | .decrypt = tegra_aes_cbc_decrypt, | ||
1139 | } | ||
1140 | }, { | ||
1141 | .cra_name = "disabled_ofb(aes)", | ||
1142 | .cra_driver_name = "ofb-aes-tegra", | ||
1143 | .cra_priority = 100, | ||
1144 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
1145 | .cra_blocksize = AES_BLOCK_SIZE, | ||
1146 | .cra_ctxsize = sizeof(struct tegra_aes_ctx), | ||
1147 | .cra_alignmask = 3, | ||
1148 | .cra_type = &crypto_ablkcipher_type, | ||
1149 | .cra_module = THIS_MODULE, | ||
1150 | .cra_init = tegra_aes_cra_init, | ||
1151 | .cra_exit = tegra_aes_cra_exit, | ||
1152 | .cra_u.ablkcipher = { | ||
1153 | .min_keysize = AES_MIN_KEY_SIZE, | ||
1154 | .max_keysize = AES_MAX_KEY_SIZE, | ||
1155 | .ivsize = AES_MIN_KEY_SIZE, | ||
1156 | .setkey = tegra_aes_setkey, | ||
1157 | .encrypt = tegra_aes_ofb_encrypt, | ||
1158 | .decrypt = tegra_aes_ofb_decrypt, | ||
1159 | } | ||
1160 | }, { | ||
1161 | .cra_name = "disabled_ansi_cprng", | ||
1162 | .cra_driver_name = "rng-aes-tegra", | ||
1163 | .cra_priority = 100, | ||
1164 | .cra_flags = CRYPTO_ALG_TYPE_RNG, | ||
1165 | .cra_ctxsize = sizeof(struct tegra_aes_ctx), | ||
1166 | .cra_type = &crypto_rng_type, | ||
1167 | .cra_module = THIS_MODULE, | ||
1168 | .cra_init = tegra_aes_cra_init, | ||
1169 | .cra_exit = tegra_aes_cra_exit, | ||
1170 | .cra_u.rng = { | ||
1171 | .rng_make_random = tegra_aes_get_random, | ||
1172 | .rng_reset = tegra_aes_rng_reset, | ||
1173 | .seedsize = AES_KEYSIZE_128 + (2 * DEFAULT_RNG_BLK_SZ), | ||
1174 | } | ||
1175 | } | ||
1176 | }; | ||
1177 | |||
1178 | static int tegra_aes_probe(struct platform_device *pdev) | ||
1179 | { | ||
1180 | struct device *dev = &pdev->dev; | ||
1181 | struct tegra_aes_dev *dd; | ||
1182 | struct resource *res[2]; | ||
1183 | int err = -ENOMEM, i = 0, j; | ||
1184 | |||
1185 | if (aes_dev) | ||
1186 | return -EEXIST; | ||
1187 | |||
1188 | dd = kzalloc(sizeof(struct tegra_aes_dev), GFP_KERNEL); | ||
1189 | if (dd == NULL) { | ||
1190 | dev_err(dev, "unable to alloc data struct.\n"); | ||
1191 | return -ENOMEM;; | ||
1192 | } | ||
1193 | dd->dev = dev; | ||
1194 | platform_set_drvdata(pdev, dd); | ||
1195 | |||
1196 | dd->slots = kzalloc(sizeof(struct tegra_aes_slot) * AES_NR_KEYSLOTS, | ||
1197 | GFP_KERNEL); | ||
1198 | if (dd->slots == NULL) { | ||
1199 | dev_err(dev, "unable to alloc slot struct.\n"); | ||
1200 | goto out; | ||
1201 | } | ||
1202 | |||
1203 | spin_lock_init(&dd->lock); | ||
1204 | crypto_init_queue(&dd->queue, TEGRA_AES_QUEUE_LENGTH); | ||
1205 | |||
1206 | /* Get the module base address */ | ||
1207 | res[0] = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1208 | res[1] = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
1209 | if (!res[0] || !res[1]) { | ||
1210 | dev_err(dev, "invalid resource type: base\n"); | ||
1211 | err = -ENODEV; | ||
1212 | goto out; | ||
1213 | } | ||
1214 | dd->bsev.phys_base = res[0]->start; | ||
1215 | dd->bsev.io_base = ioremap(dd->bsev.phys_base, resource_size(res[0])); | ||
1216 | dd->bsea.phys_base = res[1]->start; | ||
1217 | dd->bsea.io_base = ioremap(dd->bsea.phys_base, resource_size(res[1])); | ||
1218 | |||
1219 | if (!dd->bsev.io_base || !dd->bsea.io_base) { | ||
1220 | dev_err(dev, "can't ioremap phys_base\n"); | ||
1221 | err = -ENOMEM; | ||
1222 | goto out; | ||
1223 | } | ||
1224 | |||
1225 | err = alloc_iram(dd); | ||
1226 | if (err < 0) { | ||
1227 | dev_err(dev, "Failed to allocate IRAM for BSEA\n"); | ||
1228 | goto out; | ||
1229 | } | ||
1230 | |||
1231 | dd->bsev.res_id = TEGRA_ARB_BSEV; | ||
1232 | dd->bsea.res_id = TEGRA_ARB_BSEA; | ||
1233 | |||
1234 | dd->bsev.pclk = clk_get(dev, "bsev"); | ||
1235 | if (IS_ERR(dd->bsev.pclk)) { | ||
1236 | dev_err(dev, "v: pclock intialization failed.\n"); | ||
1237 | err = -ENODEV; | ||
1238 | goto out; | ||
1239 | } | ||
1240 | |||
1241 | dd->bsev.iclk = clk_get(dev, "vde"); | ||
1242 | if (IS_ERR(dd->bsev.iclk)) { | ||
1243 | dev_err(dev, "v: iclock intialization failed.\n"); | ||
1244 | err = -ENODEV; | ||
1245 | goto out; | ||
1246 | } | ||
1247 | |||
1248 | dd->bsea.pclk = clk_get(dev, "bsea"); | ||
1249 | if (IS_ERR(dd->bsea.pclk)) { | ||
1250 | dev_err(dev, "a: pclock intialization failed.\n"); | ||
1251 | err = -ENODEV; | ||
1252 | goto out; | ||
1253 | } | ||
1254 | |||
1255 | dd->bsea.iclk = clk_get(dev, "sclk"); | ||
1256 | if (IS_ERR(dd->bsea.iclk)) { | ||
1257 | dev_err(dev, "a: iclock intialization failed.\n"); | ||
1258 | err = -ENODEV; | ||
1259 | goto out; | ||
1260 | } | ||
1261 | |||
1262 | err = clk_set_rate(dd->bsev.iclk, ULONG_MAX); | ||
1263 | if (err) { | ||
1264 | dev_err(dd->dev, "bsev iclk set_rate fail(%d)\n", err); | ||
1265 | goto out; | ||
1266 | } | ||
1267 | |||
1268 | err = clk_set_rate(dd->bsea.iclk, ULONG_MAX); | ||
1269 | if (err) { | ||
1270 | dev_err(dd->dev, "bsea iclk set_rate fail(%d)\n", err); | ||
1271 | goto out; | ||
1272 | } | ||
1273 | |||
1274 | /* | ||
1275 | * the foll contiguous memory is allocated as follows - | ||
1276 | * - hardware key table | ||
1277 | * - key schedule | ||
1278 | */ | ||
1279 | dd->bsea.ivkey_base = NULL; | ||
1280 | dd->bsev.ivkey_base = dma_alloc_coherent(dev, AES_MAX_KEY_SIZE, | ||
1281 | &dd->bsev.ivkey_phys_base, GFP_KERNEL); | ||
1282 | if (!dd->bsev.ivkey_base) { | ||
1283 | dev_err(dev, "can not allocate iv/key buffer for BSEV\n"); | ||
1284 | err = -ENOMEM; | ||
1285 | goto out; | ||
1286 | } | ||
1287 | memset(dd->bsev.ivkey_base, 0, AES_MAX_KEY_SIZE); | ||
1288 | |||
1289 | dd->bsev.buf_in = dma_alloc_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES, | ||
1290 | &dd->bsev.dma_buf_in, GFP_KERNEL); | ||
1291 | dd->bsea.buf_in = dma_alloc_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES, | ||
1292 | &dd->bsea.dma_buf_in, GFP_KERNEL); | ||
1293 | if (!dd->bsev.buf_in || !dd->bsea.buf_in) { | ||
1294 | dev_err(dev, "can not allocate dma-in buffer\n"); | ||
1295 | err = -ENOMEM; | ||
1296 | goto out; | ||
1297 | } | ||
1298 | |||
1299 | dd->bsev.buf_out = dma_alloc_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES, | ||
1300 | &dd->bsev.dma_buf_out, GFP_KERNEL); | ||
1301 | dd->bsea.buf_out = dma_alloc_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES, | ||
1302 | &dd->bsea.dma_buf_out, GFP_KERNEL); | ||
1303 | if (!dd->bsev.buf_out || !dd->bsea.buf_out) { | ||
1304 | dev_err(dev, "can not allocate dma-out buffer\n"); | ||
1305 | err = -ENOMEM; | ||
1306 | goto out; | ||
1307 | } | ||
1308 | |||
1309 | init_completion(&dd->bsev.op_complete); | ||
1310 | init_completion(&dd->bsea.op_complete); | ||
1311 | |||
1312 | bsev_wq = alloc_workqueue("bsev_wq", WQ_HIGHPRI | WQ_UNBOUND, 1); | ||
1313 | bsea_wq = alloc_workqueue("bsea_wq", WQ_HIGHPRI | WQ_UNBOUND, 1); | ||
1314 | if (!bsev_wq || !bsea_wq) { | ||
1315 | dev_err(dev, "alloc_workqueue failed\n"); | ||
1316 | goto out; | ||
1317 | } | ||
1318 | |||
1319 | /* get the irq */ | ||
1320 | dd->bsev.irq = INT_VDE_BSE_V; | ||
1321 | err = request_irq(dd->bsev.irq, aes_bsev_irq, IRQF_TRIGGER_HIGH, | ||
1322 | "tegra-aes", dd); | ||
1323 | if (err) { | ||
1324 | dev_err(dev, "request_irq failed fir BSEV Engine\n"); | ||
1325 | goto out; | ||
1326 | } | ||
1327 | disable_irq(dd->bsev.irq); | ||
1328 | |||
1329 | dd->bsea.irq = INT_VDE_BSE_A; | ||
1330 | err = request_irq(dd->bsea.irq, aes_bsea_irq, IRQF_TRIGGER_HIGH, | ||
1331 | "tegra-aes", dd); | ||
1332 | if (err) { | ||
1333 | dev_err(dev, "request_irq failed for BSEA Engine\n"); | ||
1334 | goto out; | ||
1335 | } | ||
1336 | disable_irq(dd->bsea.irq); | ||
1337 | |||
1338 | spin_lock_init(&list_lock); | ||
1339 | spin_lock(&list_lock); | ||
1340 | for (i = 0; i < AES_NR_KEYSLOTS; i++) { | ||
1341 | if (i == SSK_SLOT_NUM) | ||
1342 | continue; | ||
1343 | dd->slots[i].available = true; | ||
1344 | dd->slots[i].slot_num = i; | ||
1345 | INIT_LIST_HEAD(&dd->slots[i].node); | ||
1346 | list_add_tail(&dd->slots[i].node, &slot_list); | ||
1347 | } | ||
1348 | spin_unlock(&list_lock); | ||
1349 | |||
1350 | aes_dev = dd; | ||
1351 | |||
1352 | for (i = 0; i < ARRAY_SIZE(algs); i++) { | ||
1353 | INIT_LIST_HEAD(&algs[i].cra_list); | ||
1354 | err = crypto_register_alg(&algs[i]); | ||
1355 | if (err) | ||
1356 | goto out; | ||
1357 | } | ||
1358 | |||
1359 | dev_info(dev, "registered"); | ||
1360 | return 0; | ||
1361 | |||
1362 | out: | ||
1363 | for (j = 0; j < i; j++) | ||
1364 | crypto_unregister_alg(&algs[j]); | ||
1365 | |||
1366 | free_iram(dd); | ||
1367 | |||
1368 | if (dd->bsev.ivkey_base) { | ||
1369 | dma_free_coherent(dev, SZ_512, dd->bsev.ivkey_base, | ||
1370 | dd->bsev.ivkey_phys_base); | ||
1371 | } | ||
1372 | |||
1373 | if (dd->bsev.buf_in && dd->bsea.buf_in) { | ||
1374 | dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES, | ||
1375 | dd->bsev.buf_in, dd->bsev.dma_buf_in); | ||
1376 | dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES, | ||
1377 | dd->bsea.buf_in, dd->bsea.dma_buf_in); | ||
1378 | } | ||
1379 | |||
1380 | if (dd->bsev.buf_out && dd->bsea.buf_out) { | ||
1381 | dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES, | ||
1382 | dd->bsev.buf_out, dd->bsev.dma_buf_out); | ||
1383 | dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES, | ||
1384 | dd->bsea.buf_out, dd->bsea.dma_buf_out); | ||
1385 | } | ||
1386 | |||
1387 | if (dd->bsev.io_base && dd->bsea.io_base) { | ||
1388 | iounmap(dd->bsev.io_base); | ||
1389 | iounmap(dd->bsea.io_base); | ||
1390 | } | ||
1391 | |||
1392 | if (dd->bsev.pclk) | ||
1393 | clk_put(dd->bsev.pclk); | ||
1394 | |||
1395 | if (dd->bsev.iclk) | ||
1396 | clk_put(dd->bsev.iclk); | ||
1397 | |||
1398 | if (dd->bsea.pclk) | ||
1399 | clk_put(dd->bsea.pclk); | ||
1400 | |||
1401 | if (bsev_wq) | ||
1402 | destroy_workqueue(bsev_wq); | ||
1403 | |||
1404 | if (bsea_wq) | ||
1405 | destroy_workqueue(bsea_wq); | ||
1406 | |||
1407 | if (dd->bsev.irq) | ||
1408 | free_irq(dd->bsev.irq, dd); | ||
1409 | |||
1410 | if (dd->bsea.irq) | ||
1411 | free_irq(dd->bsea.irq, dd); | ||
1412 | |||
1413 | spin_lock(&list_lock); | ||
1414 | list_del(&slot_list); | ||
1415 | spin_unlock(&list_lock); | ||
1416 | |||
1417 | kfree(dd->slots); | ||
1418 | kfree(dd); | ||
1419 | aes_dev = NULL; | ||
1420 | |||
1421 | dev_err(dev, "%s: initialization failed.\n", __func__); | ||
1422 | return err; | ||
1423 | } | ||
1424 | |||
1425 | static int __devexit tegra_aes_remove(struct platform_device *pdev) | ||
1426 | { | ||
1427 | struct device *dev = &pdev->dev; | ||
1428 | struct tegra_aes_dev *dd = platform_get_drvdata(pdev); | ||
1429 | int i; | ||
1430 | |||
1431 | if (!dd) | ||
1432 | return -ENODEV; | ||
1433 | |||
1434 | cancel_work_sync(&bsev_work); | ||
1435 | cancel_work_sync(&bsea_work); | ||
1436 | destroy_workqueue(bsev_wq); | ||
1437 | destroy_workqueue(bsea_wq); | ||
1438 | free_irq(dd->bsev.irq, dd); | ||
1439 | free_irq(dd->bsea.irq, dd); | ||
1440 | spin_lock(&list_lock); | ||
1441 | list_del(&slot_list); | ||
1442 | spin_unlock(&list_lock); | ||
1443 | |||
1444 | for (i = 0; i < ARRAY_SIZE(algs); i++) | ||
1445 | crypto_unregister_alg(&algs[i]); | ||
1446 | |||
1447 | free_iram(dd); | ||
1448 | dma_free_coherent(dev, SZ_512, dd->bsev.ivkey_base, | ||
1449 | dd->bsev.ivkey_phys_base); | ||
1450 | dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES, dd->bsev.buf_in, | ||
1451 | dd->bsev.dma_buf_in); | ||
1452 | dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES, dd->bsea.buf_in, | ||
1453 | dd->bsea.dma_buf_in); | ||
1454 | dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES, dd->bsev.buf_out, | ||
1455 | dd->bsev.dma_buf_out); | ||
1456 | dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES, dd->bsea.buf_out, | ||
1457 | dd->bsea.dma_buf_out); | ||
1458 | |||
1459 | iounmap(dd->bsev.io_base); | ||
1460 | iounmap(dd->bsea.io_base); | ||
1461 | clk_put(dd->bsev.iclk); | ||
1462 | clk_put(dd->bsev.pclk); | ||
1463 | clk_put(dd->bsea.pclk); | ||
1464 | kfree(dd->slots); | ||
1465 | kfree(dd); | ||
1466 | aes_dev = NULL; | ||
1467 | |||
1468 | return 0; | ||
1469 | } | ||
1470 | |||
1471 | static struct platform_driver tegra_aes_driver = { | ||
1472 | .probe = tegra_aes_probe, | ||
1473 | .remove = __devexit_p(tegra_aes_remove), | ||
1474 | .driver = { | ||
1475 | .name = "tegra-aes", | ||
1476 | .owner = THIS_MODULE, | ||
1477 | }, | ||
1478 | }; | ||
1479 | |||
1480 | static int __init tegra_aes_mod_init(void) | ||
1481 | { | ||
1482 | INIT_LIST_HEAD(&slot_list); | ||
1483 | return platform_driver_register(&tegra_aes_driver); | ||
1484 | } | ||
1485 | |||
1486 | static void __exit tegra_aes_mod_exit(void) | ||
1487 | { | ||
1488 | platform_driver_unregister(&tegra_aes_driver); | ||
1489 | } | ||
1490 | |||
1491 | module_init(tegra_aes_mod_init); | ||
1492 | module_exit(tegra_aes_mod_exit); | ||
1493 | |||
1494 | MODULE_DESCRIPTION("Tegra AES hw acceleration support."); | ||
1495 | MODULE_AUTHOR("NVIDIA Corporation"); | ||
1496 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/crypto/tegra-aes.h b/drivers/crypto/tegra-aes.h new file mode 100644 index 00000000000..45696467cdf --- /dev/null +++ b/drivers/crypto/tegra-aes.h | |||
@@ -0,0 +1,100 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2010, NVIDIA Corporation. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along | ||
15 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
16 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
17 | */ | ||
18 | |||
19 | #ifndef __CRYPTODEV_TEGRA_AES_H | ||
20 | #define __CRYPTODEV_TEGRA_AES_H | ||
21 | |||
22 | #define ICMDQUE_WR 0x1000 | ||
23 | #define CMDQUE_CONTROL 0x1008 | ||
24 | #define INTR_STATUS 0x1018 | ||
25 | #define INT_ENB 0x1040 | ||
26 | #define CONFIG 0x1044 | ||
27 | #define IRAM_ACCESS_CFG 0x10A0 | ||
28 | #define SECURE_DEST_ADDR 0x1100 | ||
29 | #define SECURE_INPUT_SELECT 0x1104 | ||
30 | #define SECURE_CONFIG 0x1108 | ||
31 | #define SECURE_CONFIG_EXT 0x110C | ||
32 | #define SECURE_SECURITY 0x1110 | ||
33 | #define SECURE_HASH_RESULT0 0x1120 | ||
34 | #define SECURE_HASH_RESULT1 0x1124 | ||
35 | #define SECURE_HASH_RESULT2 0x1128 | ||
36 | #define SECURE_HASH_RESULT3 0x112C | ||
37 | #define SECURE_SEC_SEL0 0x1140 | ||
38 | #define SECURE_SEC_SEL1 0x1144 | ||
39 | #define SECURE_SEC_SEL2 0x1148 | ||
40 | #define SECURE_SEC_SEL3 0x114C | ||
41 | #define SECURE_SEC_SEL4 0x1150 | ||
42 | #define SECURE_SEC_SEL5 0x1154 | ||
43 | #define SECURE_SEC_SEL6 0x1158 | ||
44 | #define SECURE_SEC_SEL7 0x115C | ||
45 | |||
46 | /* interrupt status reg masks and shifts */ | ||
47 | #define DMA_BUSY_FIELD BIT(9) | ||
48 | #define ICQ_EMPTY_FIELD BIT(3) | ||
49 | #define ENGINE_BUSY_FIELD BIT(0) | ||
50 | |||
51 | /* secure select reg masks and shifts */ | ||
52 | #define SECURE_SEL0_KEYREAD_ENB0_FIELD BIT(0) | ||
53 | |||
54 | /* secure config ext masks and shifts */ | ||
55 | #define SECURE_KEY_SCH_DIS_FIELD BIT(15) | ||
56 | |||
57 | /* secure config masks and shifts */ | ||
58 | #define SECURE_KEY_INDEX_SHIFT 20 | ||
59 | #define SECURE_KEY_INDEX_FIELD (0x1F << SECURE_KEY_INDEX_SHIFT) | ||
60 | #define SECURE_BLOCK_CNT_FIELD (0xFFFFF) | ||
61 | |||
62 | /* stream interface select masks and shifts */ | ||
63 | #define CMDQ_CTRL_DST_STM_SEL_FIELD BIT(5) | ||
64 | #define CMDQ_CTRL_SRC_STM_SEL_FIELD BIT(4) | ||
65 | #define CMDQ_CTRL_ERROR_FLUSH_ENB BIT(2) | ||
66 | #define CMDQ_CTRL_ICMDQEN_FIELD BIT(1) | ||
67 | #define CMDQ_CTRL_UCMDQEN_FIELD BIT(0) | ||
68 | |||
69 | /* config regsiter masks and shifts */ | ||
70 | #define CONFIG_ENDIAN_ENB_FIELD BIT(10) | ||
71 | #define CONFIG_MODE_SEL_FIELD BIT(0) | ||
72 | |||
73 | /* extended config */ | ||
74 | #define SECURE_OFFSET_CNT_FIELD (0xFF << 24) | ||
75 | #define SECURE_KEYSCHED_GEN_FIELD BIT(15) | ||
76 | |||
77 | /* init vector select */ | ||
78 | #define SECURE_IV_SELECT_SHIFT 10 | ||
79 | #define SECURE_IV_SELECT_FIELD BIT(10) | ||
80 | |||
81 | /* secure engine input */ | ||
82 | #define SECURE_INPUT_ALG_SEL_SHIFT 28 | ||
83 | #define SECURE_INPUT_ALG_SEL_FIELD (0xF << SECURE_INPUT_ALG_SEL_SHIFT) | ||
84 | #define SECURE_INPUT_KEY_LEN_SHIFT 16 | ||
85 | #define SECURE_INPUT_KEY_LEN_FIELD (0xFFF << SECURE_INPUT_KEY_LEN_SHIFT) | ||
86 | #define SECURE_RNG_ENB_SHIFT 11 | ||
87 | #define SECURE_RNG_ENB_FIELD BIT(11) | ||
88 | #define SECURE_CORE_SEL_SHIFT 9 | ||
89 | #define SECURE_CORE_SEL_FIELD BIT(9) | ||
90 | #define SECURE_VCTRAM_SEL_SHIFT 7 | ||
91 | #define SECURE_VCTRAM_SEL_FIELD (0x3 << SECURE_VCTRAM_SEL_SHIFT) | ||
92 | #define SECURE_INPUT_SEL_SHIFT 5 | ||
93 | #define SECURE_INPUT_SEL_FIELD (0x3 << SECURE_INPUT_SEL_SHIFT) | ||
94 | #define SECURE_XOR_POS_SHIFT 3 | ||
95 | #define SECURE_XOR_POS_FIELD (0x3 << SECURE_XOR_POS_SHIFT) | ||
96 | #define SECURE_HASH_ENB_SHIFT 2 | ||
97 | #define SECURE_HASH_ENB_FIELD BIT(2) | ||
98 | #define SECURE_ON_THE_FLY_FIELD BIT(0) | ||
99 | |||
100 | #endif | ||
diff --git a/drivers/crypto/tegra-se.c b/drivers/crypto/tegra-se.c new file mode 100644 index 00000000000..3d2e9187b94 --- /dev/null +++ b/drivers/crypto/tegra-se.c | |||
@@ -0,0 +1,2455 @@ | |||
1 | /* | ||
2 | * Cryptographic API. | ||
3 | * drivers/crypto/tegra-se.c | ||
4 | * | ||
5 | * Support for Tegra Security Engine hardware crypto algorithms. | ||
6 | * | ||
7 | * Copyright (c) 2011, NVIDIA Corporation. | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License as published by | ||
11 | * the Free Software Foundation; either version 2 of the License, or | ||
12 | * (at your option) any later version. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
15 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
17 | * more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License along | ||
20 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
21 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
22 | */ | ||
23 | |||
24 | #include <linux/module.h> | ||
25 | #include <linux/init.h> | ||
26 | #include <linux/errno.h> | ||
27 | #include <linux/kernel.h> | ||
28 | #include <linux/clk.h> | ||
29 | #include <linux/platform_device.h> | ||
30 | #include <linux/scatterlist.h> | ||
31 | #include <linux/dma-mapping.h> | ||
32 | #include <linux/io.h> | ||
33 | #include <linux/mutex.h> | ||
34 | #include <linux/interrupt.h> | ||
35 | #include <linux/types.h> | ||
36 | #include <linux/errno.h> | ||
37 | #include <crypto/scatterwalk.h> | ||
38 | #include <crypto/algapi.h> | ||
39 | #include <crypto/aes.h> | ||
40 | #include <crypto/internal/rng.h> | ||
41 | #include <crypto/internal/hash.h> | ||
42 | #include <crypto/sha.h> | ||
43 | #include <linux/pm_runtime.h> | ||
44 | |||
45 | #include "tegra-se.h" | ||
46 | |||
47 | #define DRIVER_NAME "tegra-se" | ||
48 | |||
49 | /* Security Engine operation modes */ | ||
50 | enum tegra_se_aes_op_mode { | ||
51 | SE_AES_OP_MODE_CBC, /* Cipher Block Chaining (CBC) mode */ | ||
52 | SE_AES_OP_MODE_ECB, /* Electronic Codebook (ECB) mode */ | ||
53 | SE_AES_OP_MODE_CTR, /* Counter (CTR) mode */ | ||
54 | SE_AES_OP_MODE_OFB, /* Output feedback (CFB) mode */ | ||
55 | SE_AES_OP_MODE_RNG_X931, /* Random number generator (RNG) mode */ | ||
56 | SE_AES_OP_MODE_CMAC, /* Cipher-based MAC (CMAC) mode */ | ||
57 | SE_AES_OP_MODE_SHA1, /* Secure Hash Algorithm-1 (SHA1) mode */ | ||
58 | SE_AES_OP_MODE_SHA224, /* Secure Hash Algorithm-224 (SHA224) mode */ | ||
59 | SE_AES_OP_MODE_SHA256, /* Secure Hash Algorithm-256 (SHA256) mode */ | ||
60 | SE_AES_OP_MODE_SHA384, /* Secure Hash Algorithm-384 (SHA384) mode */ | ||
61 | SE_AES_OP_MODE_SHA512 /* Secure Hash Algorithm-512 (SHA512) mode */ | ||
62 | }; | ||
63 | |||
64 | /* Security Engine key table type */ | ||
65 | enum tegra_se_key_table_type { | ||
66 | SE_KEY_TABLE_TYPE_KEY, /* Key */ | ||
67 | SE_KEY_TABLE_TYPE_ORGIV, /* Original IV */ | ||
68 | SE_KEY_TABLE_TYPE_UPDTDIV /* Updated IV */ | ||
69 | }; | ||
70 | |||
71 | /* Security Engine request context */ | ||
72 | struct tegra_se_req_context { | ||
73 | enum tegra_se_aes_op_mode op_mode; /* Security Engine operation mode */ | ||
74 | bool encrypt; /* Operation type */ | ||
75 | }; | ||
76 | |||
77 | struct tegra_se_dev { | ||
78 | struct device *dev; | ||
79 | void __iomem *io_reg; /* se device memory/io */ | ||
80 | void __iomem *pmc_io_reg; /* pmc device memory/io */ | ||
81 | int irq; /* irq allocated */ | ||
82 | spinlock_t lock; /* spin lock */ | ||
83 | struct clk *pclk; /* Security Engine clock */ | ||
84 | struct crypto_queue queue; /* Security Engine crypto queue */ | ||
85 | struct tegra_se_slot *slot_list; /* pointer to key slots */ | ||
86 | u64 ctr; | ||
87 | u32 *src_ll_buf; /* pointer to source linked list buffer */ | ||
88 | dma_addr_t src_ll_buf_adr; /* Source linked list buffer dma address */ | ||
89 | u32 src_ll_size; /* Size of source linked list buffer */ | ||
90 | u32 *dst_ll_buf; /* pointer to destination linked list buffer */ | ||
91 | dma_addr_t dst_ll_buf_adr; /* Destination linked list dma address */ | ||
92 | u32 dst_ll_size; /* Size of destination linked list buffer */ | ||
93 | u32 *ctx_save_buf; /* LP context buffer pointer*/ | ||
94 | dma_addr_t ctx_save_buf_adr; /* LP context buffer dma address*/ | ||
95 | struct completion complete; /* Tells the task completion */ | ||
96 | bool work_q_busy; /* Work queue busy status */ | ||
97 | }; | ||
98 | |||
99 | static struct tegra_se_dev *sg_tegra_se_dev; | ||
100 | |||
101 | /* Security Engine AES context */ | ||
102 | struct tegra_se_aes_context { | ||
103 | struct tegra_se_dev *se_dev; /* Security Engine device */ | ||
104 | struct tegra_se_slot *slot; /* Security Engine key slot */ | ||
105 | u32 keylen; /* key length in bits */ | ||
106 | u32 op_mode; /* AES operation mode */ | ||
107 | }; | ||
108 | |||
109 | /* Security Engine random number generator context */ | ||
110 | struct tegra_se_rng_context { | ||
111 | struct tegra_se_dev *se_dev; /* Security Engine device */ | ||
112 | struct tegra_se_slot *slot; /* Security Engine key slot */ | ||
113 | u32 *dt_buf; /* Destination buffer pointer */ | ||
114 | dma_addr_t dt_buf_adr; /* Destination buffer dma address */ | ||
115 | u32 *rng_buf; /* RNG buffer pointer */ | ||
116 | dma_addr_t rng_buf_adr; /* RNG buffer dma address */ | ||
117 | bool use_org_iv; /* Tells whether original IV is be used | ||
118 | or not. If it is false updated IV is used*/ | ||
119 | }; | ||
120 | |||
121 | /* Security Engine SHA context */ | ||
122 | struct tegra_se_sha_context { | ||
123 | struct tegra_se_dev *se_dev; /* Security Engine device */ | ||
124 | u32 op_mode; /* SHA operation mode */ | ||
125 | }; | ||
126 | |||
127 | /* Security Engine AES CMAC context */ | ||
128 | struct tegra_se_aes_cmac_context { | ||
129 | struct tegra_se_dev *se_dev; /* Security Engine device */ | ||
130 | struct tegra_se_slot *slot; /* Security Engine key slot */ | ||
131 | u32 keylen; /* key length in bits */ | ||
132 | u8 K1[TEGRA_SE_KEY_128_SIZE]; /* Key1 */ | ||
133 | u8 K2[TEGRA_SE_KEY_128_SIZE]; /* Key2 */ | ||
134 | dma_addr_t dma_addr; /* DMA address of local buffer */ | ||
135 | u32 buflen; /* local buffer length */ | ||
136 | u8 *buffer; /* local buffer pointer */ | ||
137 | }; | ||
138 | |||
139 | /* Security Engine key slot */ | ||
140 | struct tegra_se_slot { | ||
141 | struct list_head node; | ||
142 | u8 slot_num; /* Key slot number */ | ||
143 | bool available; /* Tells whether key slot is free to use */ | ||
144 | }; | ||
145 | |||
146 | static struct tegra_se_slot ssk_slot = { | ||
147 | .slot_num = 15, | ||
148 | .available = false, | ||
149 | }; | ||
150 | |||
151 | static struct tegra_se_slot srk_slot = { | ||
152 | .slot_num = 0, | ||
153 | .available = false, | ||
154 | }; | ||
155 | |||
156 | /* Security Engine Linked List */ | ||
157 | struct tegra_se_ll { | ||
158 | dma_addr_t addr; /* DMA buffer address */ | ||
159 | u32 data_len; /* Data length in DMA buffer */ | ||
160 | }; | ||
161 | |||
162 | static LIST_HEAD(key_slot); | ||
163 | static DEFINE_SPINLOCK(key_slot_lock); | ||
164 | static DEFINE_MUTEX(se_hw_lock); | ||
165 | |||
166 | /* create a work for handling the async transfers */ | ||
167 | static void tegra_se_work_handler(struct work_struct *work); | ||
168 | static DECLARE_WORK(se_work, tegra_se_work_handler); | ||
169 | static struct workqueue_struct *se_work_q; | ||
170 | |||
171 | #define PMC_SCRATCH43_REG_OFFSET 0x22c | ||
172 | #define GET_MSB(x) ((x) >> (8*sizeof(x)-1)) | ||
173 | static void tegra_se_leftshift_onebit(u8 *in_buf, u32 size, u8 *org_msb) | ||
174 | { | ||
175 | u8 carry; | ||
176 | u32 i; | ||
177 | |||
178 | *org_msb = GET_MSB(in_buf[0]); | ||
179 | |||
180 | /* left shift one bit */ | ||
181 | in_buf[0] <<= 1; | ||
182 | for (carry = 0, i = 1; i < size; i++) { | ||
183 | carry = GET_MSB(in_buf[i]); | ||
184 | in_buf[i-1] |= carry; | ||
185 | in_buf[i] <<= 1; | ||
186 | } | ||
187 | } | ||
188 | |||
189 | extern unsigned long long tegra_chip_uid(void); | ||
190 | |||
191 | static inline void se_writel(struct tegra_se_dev *se_dev, | ||
192 | unsigned int val, unsigned int reg_offset) | ||
193 | { | ||
194 | writel(val, se_dev->io_reg + reg_offset); | ||
195 | } | ||
196 | |||
197 | static inline unsigned int se_readl(struct tegra_se_dev *se_dev, | ||
198 | unsigned int reg_offset) | ||
199 | { | ||
200 | unsigned int val; | ||
201 | |||
202 | val = readl(se_dev->io_reg + reg_offset); | ||
203 | |||
204 | return val; | ||
205 | } | ||
206 | |||
207 | static void tegra_se_free_key_slot(struct tegra_se_slot *slot) | ||
208 | { | ||
209 | if (slot) { | ||
210 | spin_lock(&key_slot_lock); | ||
211 | slot->available = true; | ||
212 | spin_unlock(&key_slot_lock); | ||
213 | } | ||
214 | } | ||
215 | |||
216 | static struct tegra_se_slot *tegra_se_alloc_key_slot(void) | ||
217 | { | ||
218 | struct tegra_se_slot *slot = NULL; | ||
219 | bool found = false; | ||
220 | |||
221 | spin_lock(&key_slot_lock); | ||
222 | list_for_each_entry(slot, &key_slot, node) { | ||
223 | if (slot->available) { | ||
224 | slot->available = false; | ||
225 | found = true; | ||
226 | break; | ||
227 | } | ||
228 | } | ||
229 | spin_unlock(&key_slot_lock); | ||
230 | return found ? slot : NULL; | ||
231 | } | ||
232 | |||
233 | static int tegra_init_key_slot(struct tegra_se_dev *se_dev) | ||
234 | { | ||
235 | int i; | ||
236 | |||
237 | se_dev->slot_list = kzalloc(sizeof(struct tegra_se_slot) * | ||
238 | TEGRA_SE_KEYSLOT_COUNT, GFP_KERNEL); | ||
239 | if (se_dev->slot_list == NULL) { | ||
240 | dev_err(se_dev->dev, "slot list memory allocation failed\n"); | ||
241 | return -ENOMEM; | ||
242 | } | ||
243 | spin_lock_init(&key_slot_lock); | ||
244 | spin_lock(&key_slot_lock); | ||
245 | for (i = 0; i < TEGRA_SE_KEYSLOT_COUNT; i++) { | ||
246 | /* | ||
247 | * Slot 0 and 15 are reserved and will not be added to the | ||
248 | * free slots pool. Slot 0 is used for SRK generation and | ||
249 | * Slot 15 is used for SSK operation | ||
250 | */ | ||
251 | if ((i == srk_slot.slot_num) || (i == ssk_slot.slot_num)) | ||
252 | continue; | ||
253 | se_dev->slot_list[i].available = true; | ||
254 | se_dev->slot_list[i].slot_num = i; | ||
255 | INIT_LIST_HEAD(&se_dev->slot_list[i].node); | ||
256 | list_add_tail(&se_dev->slot_list[i].node, &key_slot); | ||
257 | } | ||
258 | spin_unlock(&key_slot_lock); | ||
259 | |||
260 | return 0; | ||
261 | } | ||
262 | |||
263 | static void tegra_se_key_read_disable(u8 slot_num) | ||
264 | { | ||
265 | struct tegra_se_dev *se_dev = sg_tegra_se_dev; | ||
266 | u32 val; | ||
267 | |||
268 | val = se_readl(se_dev, | ||
269 | (SE_KEY_TABLE_ACCESS_REG_OFFSET + (slot_num * 4))); | ||
270 | val &= ~(1 << SE_KEY_READ_DISABLE_SHIFT); | ||
271 | se_writel(se_dev, | ||
272 | val, (SE_KEY_TABLE_ACCESS_REG_OFFSET + (slot_num * 4))); | ||
273 | } | ||
274 | |||
275 | static void tegra_se_key_read_disable_all(void) | ||
276 | { | ||
277 | struct tegra_se_dev *se_dev = sg_tegra_se_dev; | ||
278 | u8 slot_num; | ||
279 | |||
280 | mutex_lock(&se_hw_lock); | ||
281 | pm_runtime_get_sync(se_dev->dev); | ||
282 | |||
283 | for (slot_num = 0; slot_num < TEGRA_SE_KEYSLOT_COUNT; slot_num++) | ||
284 | tegra_se_key_read_disable(slot_num); | ||
285 | |||
286 | pm_runtime_put(se_dev->dev); | ||
287 | mutex_unlock(&se_hw_lock); | ||
288 | } | ||
289 | |||
290 | static void tegra_se_config_algo(struct tegra_se_dev *se_dev, | ||
291 | enum tegra_se_aes_op_mode mode, bool encrypt, u32 key_len) | ||
292 | { | ||
293 | u32 val = 0; | ||
294 | |||
295 | switch (mode) { | ||
296 | case SE_AES_OP_MODE_CBC: | ||
297 | case SE_AES_OP_MODE_CMAC: | ||
298 | if (encrypt) { | ||
299 | val = SE_CONFIG_ENC_ALG(ALG_AES_ENC); | ||
300 | if (key_len == TEGRA_SE_KEY_256_SIZE) | ||
301 | val |= SE_CONFIG_ENC_MODE(MODE_KEY256); | ||
302 | else if (key_len == TEGRA_SE_KEY_192_SIZE) | ||
303 | val |= SE_CONFIG_ENC_MODE(MODE_KEY192); | ||
304 | else | ||
305 | val |= SE_CONFIG_ENC_MODE(MODE_KEY128); | ||
306 | val |= SE_CONFIG_DEC_ALG(ALG_NOP); | ||
307 | } else { | ||
308 | val = SE_CONFIG_DEC_ALG(ALG_AES_DEC); | ||
309 | if (key_len == TEGRA_SE_KEY_256_SIZE) | ||
310 | val |= SE_CONFIG_DEC_MODE(MODE_KEY256); | ||
311 | else if (key_len == TEGRA_SE_KEY_192_SIZE) | ||
312 | val |= SE_CONFIG_DEC_MODE(MODE_KEY192); | ||
313 | else | ||
314 | val |= SE_CONFIG_DEC_MODE(MODE_KEY128); | ||
315 | } | ||
316 | if (mode == SE_AES_OP_MODE_CMAC) | ||
317 | val |= SE_CONFIG_DST(DST_HASHREG); | ||
318 | else | ||
319 | val |= SE_CONFIG_DST(DST_MEMORY); | ||
320 | break; | ||
321 | case SE_AES_OP_MODE_RNG_X931: | ||
322 | val = SE_CONFIG_ENC_ALG(ALG_RNG) | | ||
323 | SE_CONFIG_ENC_MODE(MODE_KEY128) | | ||
324 | SE_CONFIG_DST(DST_MEMORY); | ||
325 | break; | ||
326 | case SE_AES_OP_MODE_ECB: | ||
327 | if (encrypt) { | ||
328 | val = SE_CONFIG_ENC_ALG(ALG_AES_ENC); | ||
329 | if (key_len == TEGRA_SE_KEY_256_SIZE) | ||
330 | val |= SE_CONFIG_ENC_MODE(MODE_KEY256); | ||
331 | else if (key_len == TEGRA_SE_KEY_192_SIZE) | ||
332 | val |= SE_CONFIG_ENC_MODE(MODE_KEY192); | ||
333 | else | ||
334 | val |= SE_CONFIG_ENC_MODE(MODE_KEY128); | ||
335 | } else { | ||
336 | val = SE_CONFIG_DEC_ALG(ALG_AES_DEC); | ||
337 | if (key_len == TEGRA_SE_KEY_256_SIZE) | ||
338 | val |= SE_CONFIG_DEC_MODE(MODE_KEY256); | ||
339 | else if (key_len == TEGRA_SE_KEY_192_SIZE) | ||
340 | val |= SE_CONFIG_DEC_MODE(MODE_KEY192); | ||
341 | else | ||
342 | val |= SE_CONFIG_DEC_MODE(MODE_KEY128); | ||
343 | } | ||
344 | val |= SE_CONFIG_DST(DST_MEMORY); | ||
345 | break; | ||
346 | case SE_AES_OP_MODE_CTR: | ||
347 | if (encrypt) { | ||
348 | val = SE_CONFIG_ENC_ALG(ALG_AES_ENC); | ||
349 | if (key_len == TEGRA_SE_KEY_256_SIZE) | ||
350 | val |= SE_CONFIG_ENC_MODE(MODE_KEY256); | ||
351 | else if (key_len == TEGRA_SE_KEY_192_SIZE) | ||
352 | val |= SE_CONFIG_ENC_MODE(MODE_KEY192); | ||
353 | else | ||
354 | val |= SE_CONFIG_ENC_MODE(MODE_KEY128); | ||
355 | } else { | ||
356 | val = SE_CONFIG_DEC_ALG(ALG_AES_DEC); | ||
357 | if (key_len == TEGRA_SE_KEY_256_SIZE) { | ||
358 | val |= SE_CONFIG_DEC_MODE(MODE_KEY256); | ||
359 | val |= SE_CONFIG_ENC_MODE(MODE_KEY256); | ||
360 | } else if (key_len == TEGRA_SE_KEY_192_SIZE) { | ||
361 | val |= SE_CONFIG_DEC_MODE(MODE_KEY192); | ||
362 | val |= SE_CONFIG_ENC_MODE(MODE_KEY192); | ||
363 | } else { | ||
364 | val |= SE_CONFIG_DEC_MODE(MODE_KEY128); | ||
365 | val |= SE_CONFIG_ENC_MODE(MODE_KEY128); | ||
366 | } | ||
367 | } | ||
368 | val |= SE_CONFIG_DST(DST_MEMORY); | ||
369 | break; | ||
370 | case SE_AES_OP_MODE_OFB: | ||
371 | if (encrypt) { | ||
372 | val = SE_CONFIG_ENC_ALG(ALG_AES_ENC); | ||
373 | if (key_len == TEGRA_SE_KEY_256_SIZE) | ||
374 | val |= SE_CONFIG_ENC_MODE(MODE_KEY256); | ||
375 | else if (key_len == TEGRA_SE_KEY_192_SIZE) | ||
376 | val |= SE_CONFIG_ENC_MODE(MODE_KEY192); | ||
377 | else | ||
378 | val |= SE_CONFIG_ENC_MODE(MODE_KEY128); | ||
379 | } else { | ||
380 | val = SE_CONFIG_DEC_ALG(ALG_AES_DEC); | ||
381 | if (key_len == TEGRA_SE_KEY_256_SIZE) { | ||
382 | val |= SE_CONFIG_DEC_MODE(MODE_KEY256); | ||
383 | val |= SE_CONFIG_ENC_MODE(MODE_KEY256); | ||
384 | } else if (key_len == TEGRA_SE_KEY_192_SIZE) { | ||
385 | val |= SE_CONFIG_DEC_MODE(MODE_KEY192); | ||
386 | val |= SE_CONFIG_ENC_MODE(MODE_KEY192); | ||
387 | } else { | ||
388 | val |= SE_CONFIG_DEC_MODE(MODE_KEY128); | ||
389 | val |= SE_CONFIG_ENC_MODE(MODE_KEY128); | ||
390 | } | ||
391 | } | ||
392 | val |= SE_CONFIG_DST(DST_MEMORY); | ||
393 | break; | ||
394 | case SE_AES_OP_MODE_SHA1: | ||
395 | val = SE_CONFIG_ENC_ALG(ALG_SHA) | | ||
396 | SE_CONFIG_ENC_MODE(MODE_SHA1) | | ||
397 | SE_CONFIG_DST(DST_HASHREG); | ||
398 | break; | ||
399 | case SE_AES_OP_MODE_SHA224: | ||
400 | val = SE_CONFIG_ENC_ALG(ALG_SHA) | | ||
401 | SE_CONFIG_ENC_MODE(MODE_SHA224) | | ||
402 | SE_CONFIG_DST(DST_HASHREG); | ||
403 | break; | ||
404 | case SE_AES_OP_MODE_SHA256: | ||
405 | val = SE_CONFIG_ENC_ALG(ALG_SHA) | | ||
406 | SE_CONFIG_ENC_MODE(MODE_SHA256) | | ||
407 | SE_CONFIG_DST(DST_HASHREG); | ||
408 | break; | ||
409 | case SE_AES_OP_MODE_SHA384: | ||
410 | val = SE_CONFIG_ENC_ALG(ALG_SHA) | | ||
411 | SE_CONFIG_ENC_MODE(MODE_SHA384) | | ||
412 | SE_CONFIG_DST(DST_HASHREG); | ||
413 | break; | ||
414 | case SE_AES_OP_MODE_SHA512: | ||
415 | val = SE_CONFIG_ENC_ALG(ALG_SHA) | | ||
416 | SE_CONFIG_ENC_MODE(MODE_SHA512) | | ||
417 | SE_CONFIG_DST(DST_HASHREG); | ||
418 | break; | ||
419 | default: | ||
420 | dev_warn(se_dev->dev, "Invalid operation mode\n"); | ||
421 | break; | ||
422 | } | ||
423 | |||
424 | se_writel(se_dev, val, SE_CONFIG_REG_OFFSET); | ||
425 | } | ||
426 | |||
427 | static void tegra_se_write_seed(struct tegra_se_dev *se_dev, u32 *pdata) | ||
428 | { | ||
429 | u32 i; | ||
430 | |||
431 | for (i = 0; i < SE_CRYPTO_CTR_REG_COUNT; i++) | ||
432 | se_writel(se_dev, pdata[i], SE_CRYPTO_CTR_REG_OFFSET + (i * 4)); | ||
433 | } | ||
434 | |||
435 | static void tegra_se_write_key_table(u8 *pdata, u32 data_len, | ||
436 | u8 slot_num, enum tegra_se_key_table_type type) | ||
437 | { | ||
438 | struct tegra_se_dev *se_dev = sg_tegra_se_dev; | ||
439 | u32 data_size = SE_KEYTABLE_REG_MAX_DATA; | ||
440 | u32 *pdata_buf = (u32 *)pdata; | ||
441 | u8 pkt = 0, quad = 0; | ||
442 | u32 val = 0, i; | ||
443 | |||
444 | if ((type == SE_KEY_TABLE_TYPE_KEY) && (slot_num == ssk_slot.slot_num)) | ||
445 | return; | ||
446 | |||
447 | if (type == SE_KEY_TABLE_TYPE_ORGIV) | ||
448 | quad = QUAD_ORG_IV; | ||
449 | else if (type == SE_KEY_TABLE_TYPE_UPDTDIV) | ||
450 | quad = QUAD_UPDTD_IV; | ||
451 | else | ||
452 | quad = QUAD_KEYS_128; | ||
453 | |||
454 | /* write data to the key table */ | ||
455 | do { | ||
456 | for (i = 0; i < data_size; i += 4, data_len -= 4) | ||
457 | se_writel(se_dev, *pdata_buf++, | ||
458 | SE_KEYTABLE_DATA0_REG_OFFSET + i); | ||
459 | |||
460 | pkt = SE_KEYTABLE_SLOT(slot_num) | SE_KEYTABLE_QUAD(quad); | ||
461 | val = SE_KEYTABLE_OP_TYPE(OP_WRITE) | | ||
462 | SE_KEYTABLE_TABLE_SEL(TABLE_KEYIV) | | ||
463 | SE_KEYTABLE_PKT(pkt); | ||
464 | |||
465 | se_writel(se_dev, val, SE_KEYTABLE_REG_OFFSET); | ||
466 | |||
467 | data_size = data_len; | ||
468 | quad = QUAD_KEYS_256; | ||
469 | |||
470 | } while (data_len); | ||
471 | } | ||
472 | |||
473 | static void tegra_se_config_crypto(struct tegra_se_dev *se_dev, | ||
474 | enum tegra_se_aes_op_mode mode, bool encrypt, u8 slot_num, bool org_iv) | ||
475 | { | ||
476 | u32 val = 0; | ||
477 | |||
478 | switch (mode) { | ||
479 | case SE_AES_OP_MODE_CMAC: | ||
480 | case SE_AES_OP_MODE_CBC: | ||
481 | if (encrypt) { | ||
482 | val = SE_CRYPTO_INPUT_SEL(INPUT_AHB) | | ||
483 | SE_CRYPTO_VCTRAM_SEL(VCTRAM_AESOUT) | | ||
484 | SE_CRYPTO_XOR_POS(XOR_TOP) | | ||
485 | SE_CRYPTO_CORE_SEL(CORE_ENCRYPT); | ||
486 | } else { | ||
487 | val = SE_CRYPTO_INPUT_SEL(INPUT_AHB) | | ||
488 | SE_CRYPTO_VCTRAM_SEL(VCTRAM_PREVAHB) | | ||
489 | SE_CRYPTO_XOR_POS(XOR_BOTTOM) | | ||
490 | SE_CRYPTO_CORE_SEL(CORE_DECRYPT); | ||
491 | } | ||
492 | break; | ||
493 | case SE_AES_OP_MODE_RNG_X931: | ||
494 | val = SE_CRYPTO_INPUT_SEL(INPUT_AHB) | | ||
495 | SE_CRYPTO_XOR_POS(XOR_BYPASS) | | ||
496 | SE_CRYPTO_CORE_SEL(CORE_ENCRYPT); | ||
497 | break; | ||
498 | case SE_AES_OP_MODE_ECB: | ||
499 | if (encrypt) { | ||
500 | val = SE_CRYPTO_INPUT_SEL(INPUT_AHB) | | ||
501 | SE_CRYPTO_XOR_POS(XOR_BYPASS) | | ||
502 | SE_CRYPTO_CORE_SEL(CORE_ENCRYPT); | ||
503 | } else { | ||
504 | val = SE_CRYPTO_INPUT_SEL(INPUT_AHB) | | ||
505 | SE_CRYPTO_XOR_POS(XOR_BYPASS) | | ||
506 | SE_CRYPTO_CORE_SEL(CORE_DECRYPT); | ||
507 | } | ||
508 | break; | ||
509 | case SE_AES_OP_MODE_CTR: | ||
510 | val = SE_CRYPTO_INPUT_SEL(INPUT_LNR_CTR) | | ||
511 | SE_CRYPTO_VCTRAM_SEL(VCTRAM_AHB) | | ||
512 | SE_CRYPTO_XOR_POS(XOR_BOTTOM) | | ||
513 | SE_CRYPTO_CORE_SEL(CORE_ENCRYPT); | ||
514 | break; | ||
515 | case SE_AES_OP_MODE_OFB: | ||
516 | val = SE_CRYPTO_INPUT_SEL(INPUT_AESOUT) | | ||
517 | SE_CRYPTO_VCTRAM_SEL(VCTRAM_AHB) | | ||
518 | SE_CRYPTO_XOR_POS(XOR_BOTTOM) | | ||
519 | SE_CRYPTO_CORE_SEL(CORE_ENCRYPT); | ||
520 | break; | ||
521 | default: | ||
522 | dev_warn(se_dev->dev, "Invalid operation mode\n"); | ||
523 | break; | ||
524 | } | ||
525 | |||
526 | if (mode == SE_AES_OP_MODE_CTR) { | ||
527 | val |= SE_CRYPTO_HASH(HASH_DISABLE) | | ||
528 | SE_CRYPTO_KEY_INDEX(slot_num) | | ||
529 | SE_CRYPTO_CTR_CNTN(1); | ||
530 | } else { | ||
531 | val |= SE_CRYPTO_HASH(HASH_DISABLE) | | ||
532 | SE_CRYPTO_KEY_INDEX(slot_num) | | ||
533 | (org_iv ? SE_CRYPTO_IV_SEL(IV_ORIGINAL) : | ||
534 | SE_CRYPTO_IV_SEL(IV_UPDATED)); | ||
535 | } | ||
536 | |||
537 | /* enable hash for CMAC */ | ||
538 | if (mode == SE_AES_OP_MODE_CMAC) | ||
539 | val |= SE_CRYPTO_HASH(HASH_ENABLE); | ||
540 | |||
541 | se_writel(se_dev, val, SE_CRYPTO_REG_OFFSET); | ||
542 | |||
543 | if (mode == SE_AES_OP_MODE_CTR) | ||
544 | se_writel(se_dev, 1, SE_SPARE_0_REG_OFFSET); | ||
545 | |||
546 | if (mode == SE_AES_OP_MODE_OFB) | ||
547 | se_writel(se_dev, 1, SE_SPARE_0_REG_OFFSET); | ||
548 | |||
549 | } | ||
550 | |||
551 | static void tegra_se_config_sha(struct tegra_se_dev *se_dev, u32 count) | ||
552 | { | ||
553 | int i; | ||
554 | |||
555 | se_writel(se_dev, (count * 8), SE_SHA_MSG_LENGTH_REG_OFFSET); | ||
556 | se_writel(se_dev, (count * 8), SE_SHA_MSG_LEFT_REG_OFFSET); | ||
557 | for (i = 1; i < 4; i++) { | ||
558 | se_writel(se_dev, 0, SE_SHA_MSG_LENGTH_REG_OFFSET + (4 * i)); | ||
559 | se_writel(se_dev, 0, SE_SHA_MSG_LEFT_REG_OFFSET + (4 * i)); | ||
560 | } | ||
561 | se_writel(se_dev, SHA_ENABLE, SE_SHA_CONFIG_REG_OFFSET); | ||
562 | } | ||
563 | |||
564 | static int tegra_se_start_operation(struct tegra_se_dev *se_dev, u32 nbytes, | ||
565 | bool context_save) | ||
566 | { | ||
567 | u32 nblocks = nbytes / TEGRA_SE_AES_BLOCK_SIZE; | ||
568 | int ret = 0; | ||
569 | u32 val = 0; | ||
570 | |||
571 | /* clear any pending interrupts */ | ||
572 | val = se_readl(se_dev, SE_INT_STATUS_REG_OFFSET); | ||
573 | se_writel(se_dev, val, SE_INT_STATUS_REG_OFFSET); | ||
574 | se_writel(se_dev, se_dev->src_ll_buf_adr, SE_IN_LL_ADDR_REG_OFFSET); | ||
575 | se_writel(se_dev, se_dev->dst_ll_buf_adr, SE_OUT_LL_ADDR_REG_OFFSET); | ||
576 | |||
577 | if (nblocks) | ||
578 | se_writel(se_dev, nblocks-1, SE_BLOCK_COUNT_REG_OFFSET); | ||
579 | |||
580 | /* enable interupts */ | ||
581 | val = SE_INT_ERROR(INT_ENABLE) | SE_INT_OP_DONE(INT_ENABLE); | ||
582 | se_writel(se_dev, val, SE_INT_ENABLE_REG_OFFSET); | ||
583 | |||
584 | INIT_COMPLETION(se_dev->complete); | ||
585 | |||
586 | if (context_save) | ||
587 | se_writel(se_dev, SE_OPERATION(OP_CTX_SAVE), | ||
588 | SE_OPERATION_REG_OFFSET); | ||
589 | else | ||
590 | se_writel(se_dev, SE_OPERATION(OP_SRART), | ||
591 | SE_OPERATION_REG_OFFSET); | ||
592 | |||
593 | ret = wait_for_completion_timeout(&se_dev->complete, | ||
594 | msecs_to_jiffies(1000)); | ||
595 | if (ret == 0) { | ||
596 | dev_err(se_dev->dev, "operation timed out no interrupt\n"); | ||
597 | return -ETIMEDOUT; | ||
598 | } | ||
599 | |||
600 | return 0; | ||
601 | } | ||
602 | |||
603 | static void tegra_se_read_hash_result(struct tegra_se_dev *se_dev, | ||
604 | u8 *pdata, u32 nbytes, bool swap32) | ||
605 | { | ||
606 | u32 *result = (u32 *)pdata; | ||
607 | u32 i; | ||
608 | |||
609 | for (i = 0; i < nbytes/4; i++) { | ||
610 | result[i] = se_readl(se_dev, SE_HASH_RESULT_REG_OFFSET + | ||
611 | (i * sizeof(u32))); | ||
612 | if (swap32) | ||
613 | result[i] = be32_to_cpu(result[i]); | ||
614 | } | ||
615 | } | ||
616 | |||
617 | static int tegra_se_count_sgs(struct scatterlist *sl, u32 total_bytes) | ||
618 | { | ||
619 | int i = 0; | ||
620 | |||
621 | if (!total_bytes) | ||
622 | return 0; | ||
623 | |||
624 | do { | ||
625 | total_bytes -= min(sl[i].length, total_bytes); | ||
626 | i++; | ||
627 | } while (total_bytes); | ||
628 | |||
629 | return i; | ||
630 | } | ||
631 | |||
632 | static int tegra_se_alloc_ll_buf(struct tegra_se_dev *se_dev, | ||
633 | u32 num_src_sgs, u32 num_dst_sgs) | ||
634 | { | ||
635 | if (se_dev->src_ll_buf || se_dev->dst_ll_buf) { | ||
636 | dev_err(se_dev->dev, "trying to allocate memory to allocated memory\n"); | ||
637 | return -EBUSY; | ||
638 | } | ||
639 | |||
640 | if (num_src_sgs) { | ||
641 | se_dev->src_ll_size = | ||
642 | (sizeof(struct tegra_se_ll) * num_src_sgs) + | ||
643 | sizeof(u32); | ||
644 | se_dev->src_ll_buf = dma_alloc_coherent(se_dev->dev, | ||
645 | se_dev->src_ll_size, | ||
646 | &se_dev->src_ll_buf_adr, GFP_KERNEL); | ||
647 | if (!se_dev->src_ll_buf) { | ||
648 | dev_err(se_dev->dev, "can not allocate src lldma buffer\n"); | ||
649 | return -ENOMEM; | ||
650 | } | ||
651 | } | ||
652 | if (num_dst_sgs) { | ||
653 | se_dev->dst_ll_size = | ||
654 | (sizeof(struct tegra_se_ll) * num_dst_sgs) + | ||
655 | sizeof(u32); | ||
656 | se_dev->dst_ll_buf = dma_alloc_coherent(se_dev->dev, | ||
657 | se_dev->dst_ll_size, | ||
658 | &se_dev->dst_ll_buf_adr, GFP_KERNEL); | ||
659 | if (!se_dev->dst_ll_buf) { | ||
660 | dev_err(se_dev->dev, "can not allocate dst ll dma buffer\n"); | ||
661 | return -ENOMEM; | ||
662 | } | ||
663 | } | ||
664 | |||
665 | return 0; | ||
666 | } | ||
667 | |||
668 | static void tegra_se_free_ll_buf(struct tegra_se_dev *se_dev) | ||
669 | { | ||
670 | if (se_dev->src_ll_buf) { | ||
671 | dma_free_coherent(se_dev->dev, se_dev->src_ll_size, | ||
672 | se_dev->src_ll_buf, se_dev->src_ll_buf_adr); | ||
673 | se_dev->src_ll_buf = NULL; | ||
674 | } | ||
675 | |||
676 | if (se_dev->dst_ll_buf) { | ||
677 | dma_free_coherent(se_dev->dev, se_dev->dst_ll_size, | ||
678 | se_dev->dst_ll_buf, se_dev->dst_ll_buf_adr); | ||
679 | se_dev->dst_ll_buf = NULL; | ||
680 | } | ||
681 | } | ||
682 | |||
683 | static int tegra_se_setup_ablk_req(struct tegra_se_dev *se_dev, | ||
684 | struct ablkcipher_request *req) | ||
685 | { | ||
686 | struct scatterlist *src_sg, *dst_sg; | ||
687 | struct tegra_se_ll *src_ll, *dst_ll; | ||
688 | u32 total, num_src_sgs, num_dst_sgs; | ||
689 | int ret = 0; | ||
690 | |||
691 | num_src_sgs = tegra_se_count_sgs(req->src, req->nbytes); | ||
692 | num_dst_sgs = tegra_se_count_sgs(req->dst, req->nbytes); | ||
693 | |||
694 | if ((num_src_sgs > SE_MAX_SRC_SG_COUNT) || | ||
695 | (num_dst_sgs > SE_MAX_DST_SG_COUNT)) { | ||
696 | dev_err(se_dev->dev, "num of SG buffers are more\n"); | ||
697 | return -EINVAL; | ||
698 | } | ||
699 | |||
700 | *se_dev->src_ll_buf = num_src_sgs-1; | ||
701 | *se_dev->dst_ll_buf = num_dst_sgs-1; | ||
702 | |||
703 | src_ll = (struct tegra_se_ll *)(se_dev->src_ll_buf + 1); | ||
704 | dst_ll = (struct tegra_se_ll *)(se_dev->dst_ll_buf + 1); | ||
705 | |||
706 | src_sg = req->src; | ||
707 | dst_sg = req->dst; | ||
708 | total = req->nbytes; | ||
709 | |||
710 | while (total) { | ||
711 | ret = dma_map_sg(se_dev->dev, src_sg, 1, DMA_TO_DEVICE); | ||
712 | if (!ret) { | ||
713 | dev_err(se_dev->dev, "dma_map_sg() error\n"); | ||
714 | return -EINVAL; | ||
715 | } | ||
716 | |||
717 | ret = dma_map_sg(se_dev->dev, dst_sg, 1, DMA_FROM_DEVICE); | ||
718 | if (!ret) { | ||
719 | dev_err(se_dev->dev, "dma_map_sg() error\n"); | ||
720 | dma_unmap_sg(se_dev->dev, src_sg, 1, DMA_TO_DEVICE); | ||
721 | return -EINVAL; | ||
722 | } | ||
723 | |||
724 | WARN_ON(src_sg->length != dst_sg->length); | ||
725 | src_ll->addr = sg_dma_address(src_sg); | ||
726 | src_ll->data_len = min(src_sg->length, total); | ||
727 | dst_ll->addr = sg_dma_address(dst_sg); | ||
728 | dst_ll->data_len = min(dst_sg->length, total); | ||
729 | total -= min(src_sg->length, total); | ||
730 | |||
731 | src_sg = sg_next(src_sg); | ||
732 | dst_sg = sg_next(dst_sg); | ||
733 | dst_ll++; | ||
734 | src_ll++; | ||
735 | WARN_ON(((total != 0) && (!src_sg || !dst_sg))); | ||
736 | } | ||
737 | return ret; | ||
738 | } | ||
739 | |||
740 | static void tegra_se_dequeue_complete_req(struct tegra_se_dev *se_dev, | ||
741 | struct ablkcipher_request *req) | ||
742 | { | ||
743 | struct scatterlist *src_sg, *dst_sg; | ||
744 | u32 total; | ||
745 | |||
746 | if (req) { | ||
747 | src_sg = req->src; | ||
748 | dst_sg = req->dst; | ||
749 | total = req->nbytes; | ||
750 | while (total) { | ||
751 | dma_unmap_sg(se_dev->dev, dst_sg, 1, DMA_FROM_DEVICE); | ||
752 | dma_unmap_sg(se_dev->dev, src_sg, 1, DMA_TO_DEVICE); | ||
753 | total -= min(src_sg->length, total); | ||
754 | src_sg = sg_next(src_sg); | ||
755 | dst_sg = sg_next(dst_sg); | ||
756 | } | ||
757 | } | ||
758 | } | ||
759 | |||
760 | static void tegra_se_process_new_req(struct crypto_async_request *async_req) | ||
761 | { | ||
762 | struct tegra_se_dev *se_dev = sg_tegra_se_dev; | ||
763 | struct ablkcipher_request *req = ablkcipher_request_cast(async_req); | ||
764 | struct tegra_se_req_context *req_ctx = ablkcipher_request_ctx(req); | ||
765 | struct tegra_se_aes_context *aes_ctx = | ||
766 | crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)); | ||
767 | int ret = 0; | ||
768 | |||
769 | /* take access to the hw */ | ||
770 | mutex_lock(&se_hw_lock); | ||
771 | |||
772 | /* write IV */ | ||
773 | if (req->info) { | ||
774 | if (req_ctx->op_mode == SE_AES_OP_MODE_CTR) { | ||
775 | tegra_se_write_seed(se_dev, (u32 *)req->info); | ||
776 | } else { | ||
777 | tegra_se_write_key_table(req->info, | ||
778 | TEGRA_SE_AES_IV_SIZE, | ||
779 | aes_ctx->slot->slot_num, | ||
780 | SE_KEY_TABLE_TYPE_ORGIV); | ||
781 | } | ||
782 | } | ||
783 | tegra_se_setup_ablk_req(se_dev, req); | ||
784 | tegra_se_config_algo(se_dev, req_ctx->op_mode, req_ctx->encrypt, | ||
785 | aes_ctx->keylen); | ||
786 | tegra_se_config_crypto(se_dev, req_ctx->op_mode, req_ctx->encrypt, | ||
787 | aes_ctx->slot->slot_num, req->info ? true : false); | ||
788 | ret = tegra_se_start_operation(se_dev, req->nbytes, false); | ||
789 | tegra_se_dequeue_complete_req(se_dev, req); | ||
790 | |||
791 | mutex_unlock(&se_hw_lock); | ||
792 | req->base.complete(&req->base, ret); | ||
793 | } | ||
794 | |||
795 | static irqreturn_t tegra_se_irq(int irq, void *dev) | ||
796 | { | ||
797 | struct tegra_se_dev *se_dev = dev; | ||
798 | u32 val; | ||
799 | |||
800 | val = se_readl(se_dev, SE_INT_STATUS_REG_OFFSET); | ||
801 | se_writel(se_dev, val, SE_INT_STATUS_REG_OFFSET); | ||
802 | |||
803 | if (val & SE_INT_ERROR(INT_SET)) | ||
804 | dev_err(se_dev->dev, "tegra_se_irq::error"); | ||
805 | |||
806 | if (val & SE_INT_OP_DONE(INT_SET)) | ||
807 | complete(&se_dev->complete); | ||
808 | |||
809 | return IRQ_HANDLED; | ||
810 | } | ||
811 | |||
812 | static void tegra_se_work_handler(struct work_struct *work) | ||
813 | { | ||
814 | struct tegra_se_dev *se_dev = sg_tegra_se_dev; | ||
815 | struct crypto_async_request *async_req = NULL; | ||
816 | struct crypto_async_request *backlog = NULL; | ||
817 | |||
818 | pm_runtime_get_sync(se_dev->dev); | ||
819 | |||
820 | do { | ||
821 | spin_lock_irq(&se_dev->lock); | ||
822 | backlog = crypto_get_backlog(&se_dev->queue); | ||
823 | async_req = crypto_dequeue_request(&se_dev->queue); | ||
824 | if (!async_req) | ||
825 | se_dev->work_q_busy = false; | ||
826 | |||
827 | spin_unlock_irq(&se_dev->lock); | ||
828 | |||
829 | if (backlog) { | ||
830 | backlog->complete(backlog, -EINPROGRESS); | ||
831 | backlog = NULL; | ||
832 | } | ||
833 | |||
834 | if (async_req) { | ||
835 | tegra_se_process_new_req(async_req); | ||
836 | async_req = NULL; | ||
837 | } | ||
838 | } while (se_dev->work_q_busy); | ||
839 | pm_runtime_put(se_dev->dev); | ||
840 | } | ||
841 | |||
842 | static int tegra_se_aes_queue_req(struct ablkcipher_request *req) | ||
843 | { | ||
844 | struct tegra_se_dev *se_dev = sg_tegra_se_dev; | ||
845 | unsigned long flags; | ||
846 | bool idle = true; | ||
847 | int err = 0; | ||
848 | |||
849 | if (!req->nbytes) | ||
850 | return -EINVAL; | ||
851 | |||
852 | spin_lock_irqsave(&se_dev->lock, flags); | ||
853 | err = ablkcipher_enqueue_request(&se_dev->queue, req); | ||
854 | if (se_dev->work_q_busy) | ||
855 | idle = false; | ||
856 | spin_unlock_irqrestore(&se_dev->lock, flags); | ||
857 | |||
858 | if (idle) { | ||
859 | spin_lock_irq(&se_dev->lock); | ||
860 | se_dev->work_q_busy = true; | ||
861 | spin_unlock_irq(&se_dev->lock); | ||
862 | queue_work(se_work_q, &se_work); | ||
863 | } | ||
864 | |||
865 | return err; | ||
866 | } | ||
867 | |||
868 | static int tegra_se_aes_cbc_encrypt(struct ablkcipher_request *req) | ||
869 | { | ||
870 | struct tegra_se_req_context *req_ctx = ablkcipher_request_ctx(req); | ||
871 | |||
872 | req_ctx->encrypt = true; | ||
873 | req_ctx->op_mode = SE_AES_OP_MODE_CBC; | ||
874 | |||
875 | return tegra_se_aes_queue_req(req); | ||
876 | } | ||
877 | |||
878 | static int tegra_se_aes_cbc_decrypt(struct ablkcipher_request *req) | ||
879 | { | ||
880 | struct tegra_se_req_context *req_ctx = ablkcipher_request_ctx(req); | ||
881 | |||
882 | req_ctx->encrypt = false; | ||
883 | req_ctx->op_mode = SE_AES_OP_MODE_CBC; | ||
884 | |||
885 | return tegra_se_aes_queue_req(req); | ||
886 | } | ||
887 | |||
888 | static int tegra_se_aes_ecb_encrypt(struct ablkcipher_request *req) | ||
889 | { | ||
890 | struct tegra_se_req_context *req_ctx = ablkcipher_request_ctx(req); | ||
891 | |||
892 | req_ctx->encrypt = true; | ||
893 | req_ctx->op_mode = SE_AES_OP_MODE_ECB; | ||
894 | |||
895 | return tegra_se_aes_queue_req(req); | ||
896 | } | ||
897 | |||
898 | static int tegra_se_aes_ecb_decrypt(struct ablkcipher_request *req) | ||
899 | { | ||
900 | struct tegra_se_req_context *req_ctx = ablkcipher_request_ctx(req); | ||
901 | |||
902 | req_ctx->encrypt = false; | ||
903 | req_ctx->op_mode = SE_AES_OP_MODE_ECB; | ||
904 | |||
905 | return tegra_se_aes_queue_req(req); | ||
906 | } | ||
907 | |||
908 | static int tegra_se_aes_ctr_encrypt(struct ablkcipher_request *req) | ||
909 | { | ||
910 | struct tegra_se_req_context *req_ctx = ablkcipher_request_ctx(req); | ||
911 | |||
912 | req_ctx->encrypt = true; | ||
913 | req_ctx->op_mode = SE_AES_OP_MODE_CTR; | ||
914 | |||
915 | return tegra_se_aes_queue_req(req); | ||
916 | } | ||
917 | |||
918 | static int tegra_se_aes_ctr_decrypt(struct ablkcipher_request *req) | ||
919 | { | ||
920 | struct tegra_se_req_context *req_ctx = ablkcipher_request_ctx(req); | ||
921 | |||
922 | req_ctx->encrypt = false; | ||
923 | req_ctx->op_mode = SE_AES_OP_MODE_CTR; | ||
924 | |||
925 | return tegra_se_aes_queue_req(req); | ||
926 | } | ||
927 | |||
928 | static int tegra_se_aes_ofb_encrypt(struct ablkcipher_request *req) | ||
929 | { | ||
930 | struct tegra_se_req_context *req_ctx = ablkcipher_request_ctx(req); | ||
931 | |||
932 | req_ctx->encrypt = true; | ||
933 | req_ctx->op_mode = SE_AES_OP_MODE_OFB; | ||
934 | |||
935 | return tegra_se_aes_queue_req(req); | ||
936 | } | ||
937 | |||
938 | static int tegra_se_aes_ofb_decrypt(struct ablkcipher_request *req) | ||
939 | { | ||
940 | struct tegra_se_req_context *req_ctx = ablkcipher_request_ctx(req); | ||
941 | |||
942 | req_ctx->encrypt = false; | ||
943 | req_ctx->op_mode = SE_AES_OP_MODE_OFB; | ||
944 | |||
945 | return tegra_se_aes_queue_req(req); | ||
946 | } | ||
947 | |||
948 | static int tegra_se_aes_setkey(struct crypto_ablkcipher *tfm, | ||
949 | const u8 *key, u32 keylen) | ||
950 | { | ||
951 | struct tegra_se_aes_context *ctx = crypto_ablkcipher_ctx(tfm); | ||
952 | struct tegra_se_dev *se_dev = ctx->se_dev; | ||
953 | struct tegra_se_slot *pslot; | ||
954 | u8 *pdata = (u8 *)key; | ||
955 | |||
956 | if (!ctx) { | ||
957 | dev_err(se_dev->dev, "invalid context"); | ||
958 | return -EINVAL; | ||
959 | } | ||
960 | |||
961 | if ((keylen != TEGRA_SE_KEY_128_SIZE) && | ||
962 | (keylen != TEGRA_SE_KEY_192_SIZE) && | ||
963 | (keylen != TEGRA_SE_KEY_256_SIZE)) { | ||
964 | dev_err(se_dev->dev, "invalid key size"); | ||
965 | return -EINVAL; | ||
966 | } | ||
967 | |||
968 | if (key) { | ||
969 | if (!ctx->slot || (ctx->slot && | ||
970 | ctx->slot->slot_num == ssk_slot.slot_num)) { | ||
971 | pslot = tegra_se_alloc_key_slot(); | ||
972 | if (!pslot) { | ||
973 | dev_err(se_dev->dev, "no free key slot\n"); | ||
974 | return -ENOMEM; | ||
975 | } | ||
976 | ctx->slot = pslot; | ||
977 | } | ||
978 | ctx->keylen = keylen; | ||
979 | } else { | ||
980 | tegra_se_free_key_slot(ctx->slot); | ||
981 | ctx->slot = &ssk_slot; | ||
982 | ctx->keylen = AES_KEYSIZE_128; | ||
983 | } | ||
984 | |||
985 | /* take access to the hw */ | ||
986 | mutex_lock(&se_hw_lock); | ||
987 | pm_runtime_get_sync(se_dev->dev); | ||
988 | |||
989 | /* load the key */ | ||
990 | tegra_se_write_key_table(pdata, keylen, ctx->slot->slot_num, | ||
991 | SE_KEY_TABLE_TYPE_KEY); | ||
992 | |||
993 | pm_runtime_put(se_dev->dev); | ||
994 | mutex_unlock(&se_hw_lock); | ||
995 | |||
996 | return 0; | ||
997 | } | ||
998 | |||
999 | static int tegra_se_aes_cra_init(struct crypto_tfm *tfm) | ||
1000 | { | ||
1001 | struct tegra_se_aes_context *ctx = crypto_tfm_ctx(tfm); | ||
1002 | |||
1003 | ctx->se_dev = sg_tegra_se_dev; | ||
1004 | tfm->crt_ablkcipher.reqsize = sizeof(struct tegra_se_req_context); | ||
1005 | |||
1006 | return 0; | ||
1007 | } | ||
1008 | |||
1009 | static void tegra_se_aes_cra_exit(struct crypto_tfm *tfm) | ||
1010 | { | ||
1011 | struct tegra_se_aes_context *ctx = crypto_tfm_ctx(tfm); | ||
1012 | |||
1013 | tegra_se_free_key_slot(ctx->slot); | ||
1014 | ctx->slot = NULL; | ||
1015 | } | ||
1016 | |||
1017 | static int tegra_se_rng_init(struct crypto_tfm *tfm) | ||
1018 | { | ||
1019 | struct tegra_se_rng_context *rng_ctx = crypto_tfm_ctx(tfm); | ||
1020 | struct tegra_se_dev *se_dev = sg_tegra_se_dev; | ||
1021 | |||
1022 | rng_ctx->se_dev = se_dev; | ||
1023 | rng_ctx->dt_buf = dma_alloc_coherent(se_dev->dev, TEGRA_SE_RNG_DT_SIZE, | ||
1024 | &rng_ctx->dt_buf_adr, GFP_KERNEL); | ||
1025 | if (!rng_ctx->dt_buf) { | ||
1026 | dev_err(se_dev->dev, "can not allocate rng dma buffer"); | ||
1027 | return -ENOMEM; | ||
1028 | } | ||
1029 | |||
1030 | rng_ctx->rng_buf = dma_alloc_coherent(rng_ctx->se_dev->dev, | ||
1031 | TEGRA_SE_RNG_DT_SIZE, &rng_ctx->rng_buf_adr, GFP_KERNEL); | ||
1032 | if (!rng_ctx->rng_buf) { | ||
1033 | dev_err(se_dev->dev, "can not allocate rng dma buffer"); | ||
1034 | dma_free_coherent(rng_ctx->se_dev->dev, TEGRA_SE_RNG_DT_SIZE, | ||
1035 | rng_ctx->dt_buf, rng_ctx->dt_buf_adr); | ||
1036 | return -ENOMEM; | ||
1037 | } | ||
1038 | |||
1039 | rng_ctx->slot = tegra_se_alloc_key_slot(); | ||
1040 | |||
1041 | if (!rng_ctx->slot) { | ||
1042 | dev_err(rng_ctx->se_dev->dev, "no free slot\n"); | ||
1043 | dma_free_coherent(rng_ctx->se_dev->dev, TEGRA_SE_RNG_DT_SIZE, | ||
1044 | rng_ctx->dt_buf, rng_ctx->dt_buf_adr); | ||
1045 | dma_free_coherent(rng_ctx->se_dev->dev, TEGRA_SE_RNG_DT_SIZE, | ||
1046 | rng_ctx->rng_buf, rng_ctx->rng_buf_adr); | ||
1047 | return -ENOMEM; | ||
1048 | } | ||
1049 | |||
1050 | return 0; | ||
1051 | } | ||
1052 | |||
1053 | static void tegra_se_rng_exit(struct crypto_tfm *tfm) | ||
1054 | { | ||
1055 | struct tegra_se_rng_context *rng_ctx = crypto_tfm_ctx(tfm); | ||
1056 | |||
1057 | if (rng_ctx->dt_buf) { | ||
1058 | dma_free_coherent(rng_ctx->se_dev->dev, TEGRA_SE_RNG_DT_SIZE, | ||
1059 | rng_ctx->dt_buf, rng_ctx->dt_buf_adr); | ||
1060 | } | ||
1061 | |||
1062 | if (rng_ctx->rng_buf) { | ||
1063 | dma_free_coherent(rng_ctx->se_dev->dev, TEGRA_SE_RNG_DT_SIZE, | ||
1064 | rng_ctx->rng_buf, rng_ctx->rng_buf_adr); | ||
1065 | } | ||
1066 | |||
1067 | tegra_se_free_key_slot(rng_ctx->slot); | ||
1068 | rng_ctx->slot = NULL; | ||
1069 | rng_ctx->se_dev = NULL; | ||
1070 | } | ||
1071 | |||
1072 | static int tegra_se_rng_get_random(struct crypto_rng *tfm, u8 *rdata, u32 dlen) | ||
1073 | { | ||
1074 | struct tegra_se_rng_context *rng_ctx = crypto_rng_ctx(tfm); | ||
1075 | struct tegra_se_dev *se_dev = rng_ctx->se_dev; | ||
1076 | struct tegra_se_ll *src_ll, *dst_ll; | ||
1077 | unsigned char *dt_buf = (unsigned char *)rng_ctx->dt_buf; | ||
1078 | u8 *rdata_addr; | ||
1079 | int ret = 0, i, j, num_blocks, data_len = 0; | ||
1080 | |||
1081 | num_blocks = (dlen / TEGRA_SE_RNG_DT_SIZE); | ||
1082 | |||
1083 | data_len = (dlen % TEGRA_SE_RNG_DT_SIZE); | ||
1084 | if (data_len == 0) | ||
1085 | num_blocks = num_blocks - 1; | ||
1086 | |||
1087 | /* take access to the hw */ | ||
1088 | mutex_lock(&se_hw_lock); | ||
1089 | pm_runtime_get_sync(se_dev->dev); | ||
1090 | |||
1091 | *se_dev->src_ll_buf = 0; | ||
1092 | *se_dev->dst_ll_buf = 0; | ||
1093 | src_ll = (struct tegra_se_ll *)(se_dev->src_ll_buf + 1); | ||
1094 | dst_ll = (struct tegra_se_ll *)(se_dev->dst_ll_buf + 1); | ||
1095 | src_ll->addr = rng_ctx->dt_buf_adr; | ||
1096 | src_ll->data_len = TEGRA_SE_RNG_DT_SIZE; | ||
1097 | dst_ll->addr = rng_ctx->rng_buf_adr; | ||
1098 | dst_ll->data_len = TEGRA_SE_RNG_DT_SIZE; | ||
1099 | |||
1100 | tegra_se_config_algo(se_dev, SE_AES_OP_MODE_RNG_X931, true, | ||
1101 | TEGRA_SE_KEY_128_SIZE); | ||
1102 | tegra_se_config_crypto(se_dev, SE_AES_OP_MODE_RNG_X931, true, | ||
1103 | rng_ctx->slot->slot_num, rng_ctx->use_org_iv); | ||
1104 | for (j = 0; j <= num_blocks; j++) { | ||
1105 | ret = tegra_se_start_operation(se_dev, | ||
1106 | TEGRA_SE_RNG_DT_SIZE, false); | ||
1107 | |||
1108 | if (!ret) { | ||
1109 | rdata_addr = (rdata + (j * TEGRA_SE_RNG_DT_SIZE)); | ||
1110 | |||
1111 | if (data_len && num_blocks == j) { | ||
1112 | memcpy(rdata_addr, rng_ctx->rng_buf, data_len); | ||
1113 | } else { | ||
1114 | memcpy(rdata_addr, | ||
1115 | rng_ctx->rng_buf, TEGRA_SE_RNG_DT_SIZE); | ||
1116 | } | ||
1117 | |||
1118 | /* update DT vector */ | ||
1119 | for (i = TEGRA_SE_RNG_DT_SIZE - 1; i >= 0; i--) { | ||
1120 | dt_buf[i] += 1; | ||
1121 | if (dt_buf[i] != 0) | ||
1122 | break; | ||
1123 | } | ||
1124 | } else { | ||
1125 | dlen = 0; | ||
1126 | } | ||
1127 | if (rng_ctx->use_org_iv) { | ||
1128 | rng_ctx->use_org_iv = false; | ||
1129 | tegra_se_config_crypto(se_dev, | ||
1130 | SE_AES_OP_MODE_RNG_X931, true, | ||
1131 | rng_ctx->slot->slot_num, rng_ctx->use_org_iv); | ||
1132 | } | ||
1133 | } | ||
1134 | |||
1135 | pm_runtime_put(se_dev->dev); | ||
1136 | mutex_unlock(&se_hw_lock); | ||
1137 | |||
1138 | return dlen; | ||
1139 | } | ||
1140 | |||
1141 | static int tegra_se_rng_reset(struct crypto_rng *tfm, u8 *seed, u32 slen) | ||
1142 | { | ||
1143 | struct tegra_se_rng_context *rng_ctx = crypto_rng_ctx(tfm); | ||
1144 | struct tegra_se_dev *se_dev = rng_ctx->se_dev; | ||
1145 | u8 *iv = seed; | ||
1146 | u8 *key = (u8 *)(seed + TEGRA_SE_RNG_IV_SIZE); | ||
1147 | u8 *dt = key + TEGRA_SE_RNG_KEY_SIZE; | ||
1148 | struct timespec ts; | ||
1149 | u64 nsec, tmp[2]; | ||
1150 | |||
1151 | BUG_ON(!seed); | ||
1152 | |||
1153 | /* take access to the hw */ | ||
1154 | mutex_lock(&se_hw_lock); | ||
1155 | pm_runtime_get_sync(se_dev->dev); | ||
1156 | |||
1157 | tegra_se_write_key_table(key, TEGRA_SE_RNG_KEY_SIZE, | ||
1158 | rng_ctx->slot->slot_num, SE_KEY_TABLE_TYPE_KEY); | ||
1159 | |||
1160 | tegra_se_write_key_table(iv, TEGRA_SE_RNG_IV_SIZE, | ||
1161 | rng_ctx->slot->slot_num, SE_KEY_TABLE_TYPE_ORGIV); | ||
1162 | |||
1163 | pm_runtime_put(se_dev->dev); | ||
1164 | mutex_unlock(&se_hw_lock); | ||
1165 | |||
1166 | if (slen < TEGRA_SE_RNG_SEED_SIZE) { | ||
1167 | getnstimeofday(&ts); | ||
1168 | nsec = timespec_to_ns(&ts); | ||
1169 | do_div(nsec, 1000); | ||
1170 | nsec ^= se_dev->ctr << 56; | ||
1171 | se_dev->ctr++; | ||
1172 | tmp[0] = nsec; | ||
1173 | tmp[1] = tegra_chip_uid(); | ||
1174 | memcpy(rng_ctx->dt_buf, (u8 *)tmp, TEGRA_SE_RNG_DT_SIZE); | ||
1175 | } else { | ||
1176 | memcpy(rng_ctx->dt_buf, dt, TEGRA_SE_RNG_DT_SIZE); | ||
1177 | } | ||
1178 | |||
1179 | rng_ctx->use_org_iv = true; | ||
1180 | |||
1181 | return 0; | ||
1182 | } | ||
1183 | |||
1184 | int tegra_se_sha_init(struct ahash_request *req) | ||
1185 | { | ||
1186 | return 0; | ||
1187 | } | ||
1188 | |||
1189 | int tegra_se_sha_update(struct ahash_request *req) | ||
1190 | { | ||
1191 | return 0; | ||
1192 | } | ||
1193 | |||
1194 | int tegra_se_sha_finup(struct ahash_request *req) | ||
1195 | { | ||
1196 | return 0; | ||
1197 | } | ||
1198 | |||
1199 | int tegra_se_sha_final(struct ahash_request *req) | ||
1200 | { | ||
1201 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | ||
1202 | struct tegra_se_sha_context *sha_ctx = crypto_ahash_ctx(tfm); | ||
1203 | struct tegra_se_dev *se_dev = sg_tegra_se_dev; | ||
1204 | struct scatterlist *src_sg; | ||
1205 | struct tegra_se_ll *src_ll; | ||
1206 | u32 total, num_sgs; | ||
1207 | int err = 0; | ||
1208 | |||
1209 | if (!req->nbytes) | ||
1210 | return -EINVAL; | ||
1211 | |||
1212 | if (crypto_ahash_digestsize(tfm) == SHA1_DIGEST_SIZE) | ||
1213 | sha_ctx->op_mode = SE_AES_OP_MODE_SHA1; | ||
1214 | |||
1215 | if (crypto_ahash_digestsize(tfm) == SHA224_DIGEST_SIZE) | ||
1216 | sha_ctx->op_mode = SE_AES_OP_MODE_SHA224; | ||
1217 | |||
1218 | if (crypto_ahash_digestsize(tfm) == SHA256_DIGEST_SIZE) | ||
1219 | sha_ctx->op_mode = SE_AES_OP_MODE_SHA256; | ||
1220 | |||
1221 | if (crypto_ahash_digestsize(tfm) == SHA384_DIGEST_SIZE) | ||
1222 | sha_ctx->op_mode = SE_AES_OP_MODE_SHA384; | ||
1223 | |||
1224 | if (crypto_ahash_digestsize(tfm) == SHA512_DIGEST_SIZE) | ||
1225 | sha_ctx->op_mode = SE_AES_OP_MODE_SHA512; | ||
1226 | |||
1227 | /* take access to the hw */ | ||
1228 | mutex_lock(&se_hw_lock); | ||
1229 | pm_runtime_get_sync(se_dev->dev); | ||
1230 | |||
1231 | num_sgs = tegra_se_count_sgs(req->src, req->nbytes); | ||
1232 | if ((num_sgs > SE_MAX_SRC_SG_COUNT)) { | ||
1233 | dev_err(se_dev->dev, "num of SG buffers are more\n"); | ||
1234 | pm_runtime_put(se_dev->dev); | ||
1235 | mutex_unlock(&se_hw_lock); | ||
1236 | return -EINVAL; | ||
1237 | } | ||
1238 | *se_dev->src_ll_buf = num_sgs-1; | ||
1239 | src_ll = (struct tegra_se_ll *)(se_dev->src_ll_buf + 1); | ||
1240 | src_sg = req->src; | ||
1241 | total = req->nbytes; | ||
1242 | |||
1243 | while (total) { | ||
1244 | err = dma_map_sg(se_dev->dev, src_sg, 1, DMA_TO_DEVICE); | ||
1245 | if (!err) { | ||
1246 | dev_err(se_dev->dev, "dma_map_sg() error\n"); | ||
1247 | pm_runtime_put(se_dev->dev); | ||
1248 | mutex_unlock(&se_hw_lock); | ||
1249 | return -EINVAL; | ||
1250 | } | ||
1251 | src_ll->addr = sg_dma_address(src_sg); | ||
1252 | src_ll->data_len = src_sg->length; | ||
1253 | |||
1254 | total -= src_sg->length; | ||
1255 | src_sg = sg_next(src_sg); | ||
1256 | src_ll++; | ||
1257 | } | ||
1258 | |||
1259 | tegra_se_config_algo(se_dev, sha_ctx->op_mode, false, 0); | ||
1260 | tegra_se_config_sha(se_dev, req->nbytes); | ||
1261 | err = tegra_se_start_operation(se_dev, 0, false); | ||
1262 | if (!err) { | ||
1263 | tegra_se_read_hash_result(se_dev, req->result, | ||
1264 | crypto_ahash_digestsize(tfm), true); | ||
1265 | if ((sha_ctx->op_mode == SE_AES_OP_MODE_SHA384) || | ||
1266 | (sha_ctx->op_mode == SE_AES_OP_MODE_SHA512)) { | ||
1267 | u32 *result = (u32 *)req->result; | ||
1268 | u32 temp, i; | ||
1269 | |||
1270 | for (i = 0; i < crypto_ahash_digestsize(tfm)/4; | ||
1271 | i += 2) { | ||
1272 | temp = result[i]; | ||
1273 | result[i] = result[i+1]; | ||
1274 | result[i+1] = temp; | ||
1275 | } | ||
1276 | } | ||
1277 | } | ||
1278 | |||
1279 | src_sg = req->src; | ||
1280 | total = req->nbytes; | ||
1281 | while (total) { | ||
1282 | dma_unmap_sg(se_dev->dev, src_sg, 1, DMA_TO_DEVICE); | ||
1283 | total -= src_sg->length; | ||
1284 | src_sg = sg_next(src_sg); | ||
1285 | } | ||
1286 | pm_runtime_put(se_dev->dev); | ||
1287 | mutex_unlock(&se_hw_lock); | ||
1288 | |||
1289 | return err; | ||
1290 | } | ||
1291 | |||
1292 | static int tegra_se_sha_digest(struct ahash_request *req) | ||
1293 | { | ||
1294 | return tegra_se_sha_init(req) ?: tegra_se_sha_final(req); | ||
1295 | } | ||
1296 | |||
1297 | int tegra_se_sha_cra_init(struct crypto_tfm *tfm) | ||
1298 | { | ||
1299 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | ||
1300 | sizeof(struct tegra_se_sha_context)); | ||
1301 | return 0; | ||
1302 | } | ||
1303 | |||
1304 | void tegra_se_sha_cra_exit(struct crypto_tfm *tfm) | ||
1305 | { | ||
1306 | /* do nothing */ | ||
1307 | } | ||
1308 | |||
1309 | int tegra_se_aes_cmac_init(struct ahash_request *req) | ||
1310 | { | ||
1311 | |||
1312 | return 0; | ||
1313 | } | ||
1314 | |||
1315 | int tegra_se_aes_cmac_update(struct ahash_request *req) | ||
1316 | { | ||
1317 | return 0; | ||
1318 | } | ||
1319 | |||
1320 | int tegra_se_aes_cmac_final(struct ahash_request *req) | ||
1321 | { | ||
1322 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | ||
1323 | struct tegra_se_aes_cmac_context *cmac_ctx = crypto_ahash_ctx(tfm); | ||
1324 | struct tegra_se_dev *se_dev = sg_tegra_se_dev; | ||
1325 | struct scatterlist *src_sg; | ||
1326 | struct tegra_se_ll *src_ll; | ||
1327 | struct sg_mapping_iter miter; | ||
1328 | u32 num_sgs, blocks_to_process, last_block_bytes = 0, bytes_to_copy = 0; | ||
1329 | u8 piv[TEGRA_SE_AES_IV_SIZE]; | ||
1330 | int total, ret = 0, i = 0, mapped_sg_count = 0; | ||
1331 | bool padding_needed = false; | ||
1332 | unsigned long flags; | ||
1333 | unsigned int sg_flags = SG_MITER_ATOMIC; | ||
1334 | u8 *temp_buffer = NULL; | ||
1335 | bool use_orig_iv = true; | ||
1336 | |||
1337 | /* take access to the hw */ | ||
1338 | mutex_lock(&se_hw_lock); | ||
1339 | pm_runtime_get_sync(se_dev->dev); | ||
1340 | |||
1341 | blocks_to_process = req->nbytes / TEGRA_SE_AES_BLOCK_SIZE; | ||
1342 | /* num of bytes less than block size */ | ||
1343 | if ((req->nbytes % TEGRA_SE_AES_BLOCK_SIZE) || !blocks_to_process) { | ||
1344 | padding_needed = true; | ||
1345 | last_block_bytes = req->nbytes % TEGRA_SE_AES_BLOCK_SIZE; | ||
1346 | } else { | ||
1347 | /* decrement num of blocks */ | ||
1348 | blocks_to_process--; | ||
1349 | if (blocks_to_process) { | ||
1350 | /* there are blocks to process and find last block | ||
1351 | bytes */ | ||
1352 | last_block_bytes = req->nbytes - | ||
1353 | (blocks_to_process * TEGRA_SE_AES_BLOCK_SIZE); | ||
1354 | } else { | ||
1355 | /* this is the last block and equal to block size */ | ||
1356 | last_block_bytes = req->nbytes; | ||
1357 | } | ||
1358 | } | ||
1359 | |||
1360 | /* first process all blocks except last block */ | ||
1361 | if (blocks_to_process) { | ||
1362 | num_sgs = tegra_se_count_sgs(req->src, req->nbytes); | ||
1363 | if (num_sgs > SE_MAX_SRC_SG_COUNT) { | ||
1364 | dev_err(se_dev->dev, "num of SG buffers are more\n"); | ||
1365 | goto out; | ||
1366 | } | ||
1367 | *se_dev->src_ll_buf = num_sgs - 1; | ||
1368 | src_ll = (struct tegra_se_ll *)(se_dev->src_ll_buf + 1); | ||
1369 | src_sg = req->src; | ||
1370 | total = blocks_to_process * TEGRA_SE_AES_BLOCK_SIZE; | ||
1371 | while (total > 0) { | ||
1372 | ret = dma_map_sg(se_dev->dev, src_sg, 1, DMA_TO_DEVICE); | ||
1373 | mapped_sg_count++; | ||
1374 | if (!ret) { | ||
1375 | dev_err(se_dev->dev, "dma_map_sg() error\n"); | ||
1376 | goto out; | ||
1377 | } | ||
1378 | src_ll->addr = sg_dma_address(src_sg); | ||
1379 | if (total > src_sg->length) | ||
1380 | src_ll->data_len = src_sg->length; | ||
1381 | else | ||
1382 | src_ll->data_len = total; | ||
1383 | |||
1384 | total -= src_sg->length; | ||
1385 | if (total > 0) { | ||
1386 | src_sg = sg_next(src_sg); | ||
1387 | src_ll++; | ||
1388 | } | ||
1389 | WARN_ON(((total != 0) && (!src_sg))); | ||
1390 | } | ||
1391 | tegra_se_config_algo(se_dev, SE_AES_OP_MODE_CMAC, true, | ||
1392 | cmac_ctx->keylen); | ||
1393 | /* write zero IV */ | ||
1394 | memset(piv, 0, TEGRA_SE_AES_IV_SIZE); | ||
1395 | tegra_se_write_key_table(piv, TEGRA_SE_AES_IV_SIZE, | ||
1396 | cmac_ctx->slot->slot_num, | ||
1397 | SE_KEY_TABLE_TYPE_ORGIV); | ||
1398 | tegra_se_config_crypto(se_dev, SE_AES_OP_MODE_CMAC, true, | ||
1399 | cmac_ctx->slot->slot_num, true); | ||
1400 | tegra_se_start_operation(se_dev, | ||
1401 | blocks_to_process * TEGRA_SE_AES_BLOCK_SIZE, false); | ||
1402 | src_sg = req->src; | ||
1403 | while (mapped_sg_count--) { | ||
1404 | dma_unmap_sg(se_dev->dev, src_sg, 1, DMA_TO_DEVICE); | ||
1405 | src_sg = sg_next(src_sg); | ||
1406 | } | ||
1407 | use_orig_iv = false; | ||
1408 | } | ||
1409 | |||
1410 | /* get the last block bytes from the sg_dma buffer using miter */ | ||
1411 | src_sg = req->src; | ||
1412 | num_sgs = tegra_se_count_sgs(req->src, req->nbytes); | ||
1413 | sg_flags |= SG_MITER_FROM_SG; | ||
1414 | sg_miter_start(&miter, req->src, num_sgs, sg_flags); | ||
1415 | local_irq_save(flags); | ||
1416 | total = 0; | ||
1417 | cmac_ctx->buffer = dma_alloc_coherent(se_dev->dev, | ||
1418 | TEGRA_SE_AES_BLOCK_SIZE, | ||
1419 | &cmac_ctx->dma_addr, GFP_KERNEL); | ||
1420 | |||
1421 | if (!cmac_ctx->buffer) | ||
1422 | goto out; | ||
1423 | |||
1424 | temp_buffer = cmac_ctx->buffer; | ||
1425 | while (sg_miter_next(&miter) && total < req->nbytes) { | ||
1426 | unsigned int len; | ||
1427 | len = min(miter.length, req->nbytes - total); | ||
1428 | if ((req->nbytes - (total + len)) <= last_block_bytes) { | ||
1429 | bytes_to_copy = | ||
1430 | last_block_bytes - | ||
1431 | (req->nbytes - (total + len)); | ||
1432 | memcpy(temp_buffer, miter.addr + (len - bytes_to_copy), | ||
1433 | bytes_to_copy); | ||
1434 | last_block_bytes -= bytes_to_copy; | ||
1435 | temp_buffer += bytes_to_copy; | ||
1436 | } | ||
1437 | total += len; | ||
1438 | } | ||
1439 | sg_miter_stop(&miter); | ||
1440 | local_irq_restore(flags); | ||
1441 | |||
1442 | /* process last block */ | ||
1443 | if (padding_needed) { | ||
1444 | /* pad with 0x80, 0, 0 ... */ | ||
1445 | last_block_bytes = req->nbytes % TEGRA_SE_AES_BLOCK_SIZE; | ||
1446 | cmac_ctx->buffer[last_block_bytes] = 0x80; | ||
1447 | for (i = last_block_bytes+1; i < TEGRA_SE_AES_BLOCK_SIZE; i++) | ||
1448 | cmac_ctx->buffer[i] = 0; | ||
1449 | /* XOR with K2 */ | ||
1450 | for (i = 0; i < TEGRA_SE_AES_BLOCK_SIZE; i++) | ||
1451 | cmac_ctx->buffer[i] ^= cmac_ctx->K2[i]; | ||
1452 | } else { | ||
1453 | /* XOR with K1 */ | ||
1454 | for (i = 0; i < TEGRA_SE_AES_BLOCK_SIZE; i++) | ||
1455 | cmac_ctx->buffer[i] ^= cmac_ctx->K1[i]; | ||
1456 | } | ||
1457 | *se_dev->src_ll_buf = 0; | ||
1458 | src_ll = (struct tegra_se_ll *)(se_dev->src_ll_buf + 1); | ||
1459 | src_ll->addr = cmac_ctx->dma_addr; | ||
1460 | src_ll->data_len = TEGRA_SE_AES_BLOCK_SIZE; | ||
1461 | |||
1462 | if (use_orig_iv) { | ||
1463 | /* use zero IV, this is when num of bytes is | ||
1464 | less <= block size */ | ||
1465 | memset(piv, 0, TEGRA_SE_AES_IV_SIZE); | ||
1466 | tegra_se_write_key_table(piv, TEGRA_SE_AES_IV_SIZE, | ||
1467 | cmac_ctx->slot->slot_num, | ||
1468 | SE_KEY_TABLE_TYPE_ORGIV); | ||
1469 | } | ||
1470 | |||
1471 | tegra_se_config_algo(se_dev, SE_AES_OP_MODE_CMAC, true, | ||
1472 | cmac_ctx->keylen); | ||
1473 | tegra_se_config_crypto(se_dev, SE_AES_OP_MODE_CMAC, true, | ||
1474 | cmac_ctx->slot->slot_num, use_orig_iv); | ||
1475 | tegra_se_start_operation(se_dev, TEGRA_SE_AES_BLOCK_SIZE, false); | ||
1476 | tegra_se_read_hash_result(se_dev, req->result, | ||
1477 | TEGRA_SE_AES_CMAC_DIGEST_SIZE, false); | ||
1478 | |||
1479 | out: | ||
1480 | pm_runtime_put(se_dev->dev); | ||
1481 | mutex_unlock(&se_hw_lock); | ||
1482 | |||
1483 | if (cmac_ctx->buffer) | ||
1484 | dma_free_coherent(se_dev->dev, TEGRA_SE_AES_BLOCK_SIZE, | ||
1485 | cmac_ctx->buffer, cmac_ctx->dma_addr); | ||
1486 | |||
1487 | return 0; | ||
1488 | } | ||
1489 | |||
1490 | int tegra_se_aes_cmac_setkey(struct crypto_ahash *tfm, const u8 *key, | ||
1491 | unsigned int keylen) | ||
1492 | { | ||
1493 | struct tegra_se_aes_cmac_context *ctx = crypto_ahash_ctx(tfm); | ||
1494 | struct tegra_se_dev *se_dev = sg_tegra_se_dev; | ||
1495 | struct tegra_se_ll *src_ll, *dst_ll; | ||
1496 | struct tegra_se_slot *pslot; | ||
1497 | u8 piv[TEGRA_SE_AES_IV_SIZE]; | ||
1498 | u32 *pbuf; | ||
1499 | dma_addr_t pbuf_adr; | ||
1500 | int ret = 0; | ||
1501 | u8 const rb = 0x87; | ||
1502 | u8 msb; | ||
1503 | |||
1504 | if (!ctx) { | ||
1505 | dev_err(se_dev->dev, "invalid context"); | ||
1506 | return -EINVAL; | ||
1507 | } | ||
1508 | |||
1509 | if ((keylen != TEGRA_SE_KEY_128_SIZE) && | ||
1510 | (keylen != TEGRA_SE_KEY_192_SIZE) && | ||
1511 | (keylen != TEGRA_SE_KEY_256_SIZE)) { | ||
1512 | dev_err(se_dev->dev, "invalid key size"); | ||
1513 | return -EINVAL; | ||
1514 | } | ||
1515 | |||
1516 | if (key) { | ||
1517 | if (!ctx->slot || (ctx->slot && | ||
1518 | ctx->slot->slot_num == ssk_slot.slot_num)) { | ||
1519 | pslot = tegra_se_alloc_key_slot(); | ||
1520 | if (!pslot) { | ||
1521 | dev_err(se_dev->dev, "no free key slot\n"); | ||
1522 | return -ENOMEM; | ||
1523 | } | ||
1524 | ctx->slot = pslot; | ||
1525 | } | ||
1526 | ctx->keylen = keylen; | ||
1527 | } else { | ||
1528 | tegra_se_free_key_slot(ctx->slot); | ||
1529 | ctx->slot = &ssk_slot; | ||
1530 | ctx->keylen = AES_KEYSIZE_128; | ||
1531 | } | ||
1532 | |||
1533 | pbuf = dma_alloc_coherent(se_dev->dev, TEGRA_SE_AES_BLOCK_SIZE, | ||
1534 | &pbuf_adr, GFP_KERNEL); | ||
1535 | if (!pbuf) { | ||
1536 | dev_err(se_dev->dev, "can not allocate dma buffer"); | ||
1537 | return -ENOMEM; | ||
1538 | } | ||
1539 | memset(pbuf, 0, TEGRA_SE_AES_BLOCK_SIZE); | ||
1540 | |||
1541 | /* take access to the hw */ | ||
1542 | mutex_lock(&se_hw_lock); | ||
1543 | pm_runtime_get_sync(se_dev->dev); | ||
1544 | |||
1545 | *se_dev->src_ll_buf = 0; | ||
1546 | *se_dev->dst_ll_buf = 0; | ||
1547 | src_ll = (struct tegra_se_ll *)(se_dev->src_ll_buf + 1); | ||
1548 | dst_ll = (struct tegra_se_ll *)(se_dev->dst_ll_buf + 1); | ||
1549 | |||
1550 | src_ll->addr = pbuf_adr; | ||
1551 | src_ll->data_len = TEGRA_SE_AES_BLOCK_SIZE; | ||
1552 | dst_ll->addr = pbuf_adr; | ||
1553 | dst_ll->data_len = TEGRA_SE_AES_BLOCK_SIZE; | ||
1554 | |||
1555 | /* load the key */ | ||
1556 | tegra_se_write_key_table((u8 *)key, keylen, | ||
1557 | ctx->slot->slot_num, SE_KEY_TABLE_TYPE_KEY); | ||
1558 | |||
1559 | /* write zero IV */ | ||
1560 | memset(piv, 0, TEGRA_SE_AES_IV_SIZE); | ||
1561 | |||
1562 | /* load IV */ | ||
1563 | tegra_se_write_key_table(piv, TEGRA_SE_AES_IV_SIZE, | ||
1564 | ctx->slot->slot_num, SE_KEY_TABLE_TYPE_ORGIV); | ||
1565 | |||
1566 | /* config crypto algo */ | ||
1567 | tegra_se_config_algo(se_dev, SE_AES_OP_MODE_CBC, true, keylen); | ||
1568 | |||
1569 | tegra_se_config_crypto(se_dev, SE_AES_OP_MODE_CBC, true, | ||
1570 | ctx->slot->slot_num, true); | ||
1571 | |||
1572 | ret = tegra_se_start_operation(se_dev, TEGRA_SE_AES_BLOCK_SIZE, false); | ||
1573 | if (ret) { | ||
1574 | dev_err(se_dev->dev, "tegra_se_aes_cmac_setkey:: start op failed\n"); | ||
1575 | goto out; | ||
1576 | } | ||
1577 | |||
1578 | /* compute K1 subkey */ | ||
1579 | memcpy(ctx->K1, pbuf, TEGRA_SE_AES_BLOCK_SIZE); | ||
1580 | tegra_se_leftshift_onebit(ctx->K1, TEGRA_SE_AES_BLOCK_SIZE, &msb); | ||
1581 | if (msb) | ||
1582 | ctx->K1[TEGRA_SE_AES_BLOCK_SIZE - 1] ^= rb; | ||
1583 | |||
1584 | /* compute K2 subkey */ | ||
1585 | memcpy(ctx->K2, ctx->K1, TEGRA_SE_AES_BLOCK_SIZE); | ||
1586 | tegra_se_leftshift_onebit(ctx->K2, TEGRA_SE_AES_BLOCK_SIZE, &msb); | ||
1587 | |||
1588 | if (msb) | ||
1589 | ctx->K2[TEGRA_SE_AES_BLOCK_SIZE - 1] ^= rb; | ||
1590 | |||
1591 | out: | ||
1592 | pm_runtime_put(se_dev->dev); | ||
1593 | mutex_unlock(&se_hw_lock); | ||
1594 | |||
1595 | if (pbuf) { | ||
1596 | dma_free_coherent(se_dev->dev, TEGRA_SE_AES_BLOCK_SIZE, | ||
1597 | pbuf, pbuf_adr); | ||
1598 | } | ||
1599 | |||
1600 | return 0; | ||
1601 | } | ||
1602 | |||
1603 | int tegra_se_aes_cmac_digest(struct ahash_request *req) | ||
1604 | { | ||
1605 | return tegra_se_aes_cmac_init(req) ?: tegra_se_aes_cmac_final(req); | ||
1606 | } | ||
1607 | |||
1608 | int tegra_se_aes_cmac_finup(struct ahash_request *req) | ||
1609 | { | ||
1610 | return 0; | ||
1611 | } | ||
1612 | |||
1613 | int tegra_se_aes_cmac_cra_init(struct crypto_tfm *tfm) | ||
1614 | { | ||
1615 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | ||
1616 | sizeof(struct tegra_se_aes_cmac_context)); | ||
1617 | |||
1618 | return 0; | ||
1619 | } | ||
1620 | void tegra_se_aes_cmac_cra_exit(struct crypto_tfm *tfm) | ||
1621 | { | ||
1622 | struct tegra_se_aes_cmac_context *ctx = crypto_tfm_ctx(tfm); | ||
1623 | |||
1624 | tegra_se_free_key_slot(ctx->slot); | ||
1625 | ctx->slot = NULL; | ||
1626 | } | ||
1627 | |||
1628 | static struct crypto_alg aes_algs[] = { | ||
1629 | { | ||
1630 | .cra_name = "cbc(aes)", | ||
1631 | .cra_driver_name = "cbc-aes-tegra", | ||
1632 | .cra_priority = 300, | ||
1633 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
1634 | .cra_blocksize = TEGRA_SE_AES_BLOCK_SIZE, | ||
1635 | .cra_ctxsize = sizeof(struct tegra_se_aes_context), | ||
1636 | .cra_alignmask = 0, | ||
1637 | .cra_type = &crypto_ablkcipher_type, | ||
1638 | .cra_module = THIS_MODULE, | ||
1639 | .cra_init = tegra_se_aes_cra_init, | ||
1640 | .cra_exit = tegra_se_aes_cra_exit, | ||
1641 | .cra_u.ablkcipher = { | ||
1642 | .min_keysize = TEGRA_SE_AES_MIN_KEY_SIZE, | ||
1643 | .max_keysize = TEGRA_SE_AES_MAX_KEY_SIZE, | ||
1644 | .ivsize = TEGRA_SE_AES_IV_SIZE, | ||
1645 | .setkey = tegra_se_aes_setkey, | ||
1646 | .encrypt = tegra_se_aes_cbc_encrypt, | ||
1647 | .decrypt = tegra_se_aes_cbc_decrypt, | ||
1648 | } | ||
1649 | }, { | ||
1650 | .cra_name = "ecb(aes)", | ||
1651 | .cra_driver_name = "ecb-aes-tegra", | ||
1652 | .cra_priority = 300, | ||
1653 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
1654 | .cra_blocksize = TEGRA_SE_AES_BLOCK_SIZE, | ||
1655 | .cra_ctxsize = sizeof(struct tegra_se_aes_context), | ||
1656 | .cra_alignmask = 0, | ||
1657 | .cra_type = &crypto_ablkcipher_type, | ||
1658 | .cra_module = THIS_MODULE, | ||
1659 | .cra_init = tegra_se_aes_cra_init, | ||
1660 | .cra_exit = tegra_se_aes_cra_exit, | ||
1661 | .cra_u.ablkcipher = { | ||
1662 | .min_keysize = TEGRA_SE_AES_MIN_KEY_SIZE, | ||
1663 | .max_keysize = TEGRA_SE_AES_MAX_KEY_SIZE, | ||
1664 | .ivsize = TEGRA_SE_AES_IV_SIZE, | ||
1665 | .setkey = tegra_se_aes_setkey, | ||
1666 | .encrypt = tegra_se_aes_ecb_encrypt, | ||
1667 | .decrypt = tegra_se_aes_ecb_decrypt, | ||
1668 | } | ||
1669 | }, { | ||
1670 | .cra_name = "ctr(aes)", | ||
1671 | .cra_driver_name = "ctr-aes-tegra", | ||
1672 | .cra_priority = 300, | ||
1673 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
1674 | .cra_blocksize = TEGRA_SE_AES_BLOCK_SIZE, | ||
1675 | .cra_ctxsize = sizeof(struct tegra_se_aes_context), | ||
1676 | .cra_alignmask = 0, | ||
1677 | .cra_type = &crypto_ablkcipher_type, | ||
1678 | .cra_module = THIS_MODULE, | ||
1679 | .cra_init = tegra_se_aes_cra_init, | ||
1680 | .cra_exit = tegra_se_aes_cra_exit, | ||
1681 | .cra_u.ablkcipher = { | ||
1682 | .min_keysize = TEGRA_SE_AES_MIN_KEY_SIZE, | ||
1683 | .max_keysize = TEGRA_SE_AES_MAX_KEY_SIZE, | ||
1684 | .ivsize = TEGRA_SE_AES_IV_SIZE, | ||
1685 | .setkey = tegra_se_aes_setkey, | ||
1686 | .encrypt = tegra_se_aes_ctr_encrypt, | ||
1687 | .decrypt = tegra_se_aes_ctr_decrypt, | ||
1688 | .geniv = "eseqiv", | ||
1689 | } | ||
1690 | }, { | ||
1691 | .cra_name = "ofb(aes)", | ||
1692 | .cra_driver_name = "ofb-aes-tegra", | ||
1693 | .cra_priority = 300, | ||
1694 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
1695 | .cra_blocksize = TEGRA_SE_AES_BLOCK_SIZE, | ||
1696 | .cra_ctxsize = sizeof(struct tegra_se_aes_context), | ||
1697 | .cra_alignmask = 0, | ||
1698 | .cra_type = &crypto_ablkcipher_type, | ||
1699 | .cra_module = THIS_MODULE, | ||
1700 | .cra_init = tegra_se_aes_cra_init, | ||
1701 | .cra_exit = tegra_se_aes_cra_exit, | ||
1702 | .cra_u.ablkcipher = { | ||
1703 | .min_keysize = TEGRA_SE_AES_MIN_KEY_SIZE, | ||
1704 | .max_keysize = TEGRA_SE_AES_MAX_KEY_SIZE, | ||
1705 | .ivsize = TEGRA_SE_AES_IV_SIZE, | ||
1706 | .setkey = tegra_se_aes_setkey, | ||
1707 | .encrypt = tegra_se_aes_ofb_encrypt, | ||
1708 | .decrypt = tegra_se_aes_ofb_decrypt, | ||
1709 | .geniv = "eseqiv", | ||
1710 | } | ||
1711 | }, { | ||
1712 | .cra_name = "ansi_cprng", | ||
1713 | .cra_driver_name = "rng-aes-tegra", | ||
1714 | .cra_priority = 100, | ||
1715 | .cra_flags = CRYPTO_ALG_TYPE_RNG, | ||
1716 | .cra_ctxsize = sizeof(struct tegra_se_rng_context), | ||
1717 | .cra_type = &crypto_rng_type, | ||
1718 | .cra_module = THIS_MODULE, | ||
1719 | .cra_init = tegra_se_rng_init, | ||
1720 | .cra_exit = tegra_se_rng_exit, | ||
1721 | .cra_u = { | ||
1722 | .rng = { | ||
1723 | .rng_make_random = tegra_se_rng_get_random, | ||
1724 | .rng_reset = tegra_se_rng_reset, | ||
1725 | .seedsize = TEGRA_SE_RNG_SEED_SIZE, | ||
1726 | } | ||
1727 | } | ||
1728 | } | ||
1729 | }; | ||
1730 | |||
1731 | static struct ahash_alg hash_algs[] = { | ||
1732 | { | ||
1733 | .init = tegra_se_aes_cmac_init, | ||
1734 | .update = tegra_se_aes_cmac_update, | ||
1735 | .final = tegra_se_aes_cmac_final, | ||
1736 | .finup = tegra_se_aes_cmac_finup, | ||
1737 | .digest = tegra_se_aes_cmac_digest, | ||
1738 | .setkey = tegra_se_aes_cmac_setkey, | ||
1739 | .halg.digestsize = TEGRA_SE_AES_CMAC_DIGEST_SIZE, | ||
1740 | .halg.base = { | ||
1741 | .cra_name = "cmac(aes)", | ||
1742 | .cra_driver_name = "tegra-se-cmac(aes)", | ||
1743 | .cra_priority = 100, | ||
1744 | .cra_flags = CRYPTO_ALG_TYPE_AHASH, | ||
1745 | .cra_blocksize = TEGRA_SE_AES_BLOCK_SIZE, | ||
1746 | .cra_ctxsize = sizeof(struct tegra_se_aes_cmac_context), | ||
1747 | .cra_alignmask = 0, | ||
1748 | .cra_module = THIS_MODULE, | ||
1749 | .cra_init = tegra_se_aes_cmac_cra_init, | ||
1750 | .cra_exit = tegra_se_aes_cmac_cra_exit, | ||
1751 | } | ||
1752 | }, { | ||
1753 | .init = tegra_se_sha_init, | ||
1754 | .update = tegra_se_sha_update, | ||
1755 | .final = tegra_se_sha_final, | ||
1756 | .finup = tegra_se_sha_finup, | ||
1757 | .digest = tegra_se_sha_digest, | ||
1758 | .halg.digestsize = SHA1_DIGEST_SIZE, | ||
1759 | .halg.base = { | ||
1760 | .cra_name = "sha1", | ||
1761 | .cra_driver_name = "tegra-se-sha1", | ||
1762 | .cra_priority = 100, | ||
1763 | .cra_flags = CRYPTO_ALG_TYPE_AHASH, | ||
1764 | .cra_blocksize = SHA1_BLOCK_SIZE, | ||
1765 | .cra_ctxsize = sizeof(struct tegra_se_sha_context), | ||
1766 | .cra_alignmask = 0, | ||
1767 | .cra_module = THIS_MODULE, | ||
1768 | .cra_init = tegra_se_sha_cra_init, | ||
1769 | .cra_exit = tegra_se_sha_cra_exit, | ||
1770 | } | ||
1771 | }, { | ||
1772 | .init = tegra_se_sha_init, | ||
1773 | .update = tegra_se_sha_update, | ||
1774 | .final = tegra_se_sha_final, | ||
1775 | .finup = tegra_se_sha_finup, | ||
1776 | .digest = tegra_se_sha_digest, | ||
1777 | .halg.digestsize = SHA224_DIGEST_SIZE, | ||
1778 | .halg.base = { | ||
1779 | .cra_name = "sha224", | ||
1780 | .cra_driver_name = "tegra-se-sha224", | ||
1781 | .cra_priority = 100, | ||
1782 | .cra_flags = CRYPTO_ALG_TYPE_AHASH, | ||
1783 | .cra_blocksize = SHA224_BLOCK_SIZE, | ||
1784 | .cra_ctxsize = sizeof(struct tegra_se_sha_context), | ||
1785 | .cra_alignmask = 0, | ||
1786 | .cra_module = THIS_MODULE, | ||
1787 | .cra_init = tegra_se_sha_cra_init, | ||
1788 | .cra_exit = tegra_se_sha_cra_exit, | ||
1789 | } | ||
1790 | }, { | ||
1791 | .init = tegra_se_sha_init, | ||
1792 | .update = tegra_se_sha_update, | ||
1793 | .final = tegra_se_sha_final, | ||
1794 | .finup = tegra_se_sha_finup, | ||
1795 | .digest = tegra_se_sha_digest, | ||
1796 | .halg.digestsize = SHA256_DIGEST_SIZE, | ||
1797 | .halg.base = { | ||
1798 | .cra_name = "sha256", | ||
1799 | .cra_driver_name = "tegra-se-sha256", | ||
1800 | .cra_priority = 100, | ||
1801 | .cra_flags = CRYPTO_ALG_TYPE_AHASH, | ||
1802 | .cra_blocksize = SHA256_BLOCK_SIZE, | ||
1803 | .cra_ctxsize = sizeof(struct tegra_se_sha_context), | ||
1804 | .cra_alignmask = 0, | ||
1805 | .cra_module = THIS_MODULE, | ||
1806 | .cra_init = tegra_se_sha_cra_init, | ||
1807 | .cra_exit = tegra_se_sha_cra_exit, | ||
1808 | } | ||
1809 | }, { | ||
1810 | .init = tegra_se_sha_init, | ||
1811 | .update = tegra_se_sha_update, | ||
1812 | .final = tegra_se_sha_final, | ||
1813 | .finup = tegra_se_sha_finup, | ||
1814 | .digest = tegra_se_sha_digest, | ||
1815 | .halg.digestsize = SHA384_DIGEST_SIZE, | ||
1816 | .halg.base = { | ||
1817 | .cra_name = "sha384", | ||
1818 | .cra_driver_name = "tegra-se-sha384", | ||
1819 | .cra_priority = 100, | ||
1820 | .cra_flags = CRYPTO_ALG_TYPE_AHASH, | ||
1821 | .cra_blocksize = SHA384_BLOCK_SIZE, | ||
1822 | .cra_ctxsize = sizeof(struct tegra_se_sha_context), | ||
1823 | .cra_alignmask = 0, | ||
1824 | .cra_module = THIS_MODULE, | ||
1825 | .cra_init = tegra_se_sha_cra_init, | ||
1826 | .cra_exit = tegra_se_sha_cra_exit, | ||
1827 | } | ||
1828 | }, { | ||
1829 | .init = tegra_se_sha_init, | ||
1830 | .update = tegra_se_sha_update, | ||
1831 | .final = tegra_se_sha_final, | ||
1832 | .finup = tegra_se_sha_finup, | ||
1833 | .digest = tegra_se_sha_digest, | ||
1834 | .halg.digestsize = SHA512_DIGEST_SIZE, | ||
1835 | .halg.base = { | ||
1836 | .cra_name = "sha512", | ||
1837 | .cra_driver_name = "tegra-se-sha512", | ||
1838 | .cra_priority = 100, | ||
1839 | .cra_flags = CRYPTO_ALG_TYPE_AHASH, | ||
1840 | .cra_blocksize = SHA512_BLOCK_SIZE, | ||
1841 | .cra_ctxsize = sizeof(struct tegra_se_sha_context), | ||
1842 | .cra_alignmask = 0, | ||
1843 | .cra_module = THIS_MODULE, | ||
1844 | .cra_init = tegra_se_sha_cra_init, | ||
1845 | .cra_exit = tegra_se_sha_cra_exit, | ||
1846 | } | ||
1847 | } | ||
1848 | }; | ||
1849 | |||
1850 | static int tegra_se_probe(struct platform_device *pdev) | ||
1851 | { | ||
1852 | struct tegra_se_dev *se_dev = NULL; | ||
1853 | struct resource *res = NULL; | ||
1854 | int err = 0, i = 0, j = 0, k = 0; | ||
1855 | |||
1856 | se_dev = kzalloc(sizeof(struct tegra_se_dev), GFP_KERNEL); | ||
1857 | if (!se_dev) { | ||
1858 | dev_err(&pdev->dev, "memory allocation failed\n"); | ||
1859 | return -ENOMEM; | ||
1860 | } | ||
1861 | |||
1862 | spin_lock_init(&se_dev->lock); | ||
1863 | crypto_init_queue(&se_dev->queue, TEGRA_SE_CRYPTO_QUEUE_LENGTH); | ||
1864 | platform_set_drvdata(pdev, se_dev); | ||
1865 | se_dev->dev = &pdev->dev; | ||
1866 | |||
1867 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1868 | if (!res) { | ||
1869 | err = -ENXIO; | ||
1870 | dev_err(se_dev->dev, "platform_get_resource failed\n"); | ||
1871 | goto fail; | ||
1872 | } | ||
1873 | |||
1874 | se_dev->io_reg = ioremap(res->start, resource_size(res)); | ||
1875 | if (!se_dev->io_reg) { | ||
1876 | err = -ENOMEM; | ||
1877 | dev_err(se_dev->dev, "ioremap failed\n"); | ||
1878 | goto fail; | ||
1879 | } | ||
1880 | |||
1881 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
1882 | if (!res) { | ||
1883 | err = -ENXIO; | ||
1884 | dev_err(se_dev->dev, "platform_get_resource failed\n"); | ||
1885 | goto err_pmc; | ||
1886 | } | ||
1887 | |||
1888 | se_dev->pmc_io_reg = ioremap(res->start, resource_size(res)); | ||
1889 | if (!se_dev->pmc_io_reg) { | ||
1890 | err = -ENOMEM; | ||
1891 | dev_err(se_dev->dev, "pmc ioremap failed\n"); | ||
1892 | goto err_pmc; | ||
1893 | } | ||
1894 | |||
1895 | se_dev->irq = platform_get_irq(pdev, 0); | ||
1896 | if (!se_dev->irq) { | ||
1897 | err = -ENODEV; | ||
1898 | dev_err(se_dev->dev, "platform_get_irq failed\n"); | ||
1899 | goto err_irq; | ||
1900 | } | ||
1901 | |||
1902 | err = request_irq(se_dev->irq, tegra_se_irq, IRQF_DISABLED, | ||
1903 | DRIVER_NAME, se_dev); | ||
1904 | if (err) { | ||
1905 | dev_err(se_dev->dev, "request_irq failed - irq[%d] err[%d]\n", | ||
1906 | se_dev->irq, err); | ||
1907 | goto err_irq; | ||
1908 | } | ||
1909 | |||
1910 | /* Initialize the clock */ | ||
1911 | se_dev->pclk = clk_get(se_dev->dev, "se"); | ||
1912 | if (IS_ERR(se_dev->pclk)) { | ||
1913 | dev_err(se_dev->dev, "clock intialization failed (%d)\n", | ||
1914 | (int)se_dev->pclk); | ||
1915 | err = -ENODEV; | ||
1916 | goto clean; | ||
1917 | } | ||
1918 | |||
1919 | err = clk_set_rate(se_dev->pclk, ULONG_MAX); | ||
1920 | if (err) { | ||
1921 | dev_err(se_dev->dev, "clock set_rate failed.\n"); | ||
1922 | goto clean; | ||
1923 | } | ||
1924 | |||
1925 | err = tegra_init_key_slot(se_dev); | ||
1926 | if (err) { | ||
1927 | dev_err(se_dev->dev, "init_key_slot failed\n"); | ||
1928 | goto clean; | ||
1929 | } | ||
1930 | |||
1931 | init_completion(&se_dev->complete); | ||
1932 | se_work_q = alloc_workqueue("se_work_q", WQ_HIGHPRI | WQ_UNBOUND, 16); | ||
1933 | if (!se_work_q) { | ||
1934 | dev_err(se_dev->dev, "alloc_workqueue failed\n"); | ||
1935 | goto clean; | ||
1936 | } | ||
1937 | |||
1938 | sg_tegra_se_dev = se_dev; | ||
1939 | pm_runtime_enable(se_dev->dev); | ||
1940 | tegra_se_key_read_disable_all(); | ||
1941 | |||
1942 | err = tegra_se_alloc_ll_buf(se_dev, SE_MAX_SRC_SG_COUNT, | ||
1943 | SE_MAX_DST_SG_COUNT); | ||
1944 | if (err) { | ||
1945 | dev_err(se_dev->dev, "can not allocate ll dma buffer\n"); | ||
1946 | goto clean; | ||
1947 | } | ||
1948 | |||
1949 | for (i = 0; i < ARRAY_SIZE(aes_algs); i++) { | ||
1950 | INIT_LIST_HEAD(&aes_algs[i].cra_list); | ||
1951 | err = crypto_register_alg(&aes_algs[i]); | ||
1952 | if (err) { | ||
1953 | dev_err(se_dev->dev, | ||
1954 | "crypto_register_alg failed index[%d]\n", i); | ||
1955 | goto clean; | ||
1956 | } | ||
1957 | } | ||
1958 | |||
1959 | for (j = 0; j < ARRAY_SIZE(hash_algs); j++) { | ||
1960 | err = crypto_register_ahash(&hash_algs[j]); | ||
1961 | if (err) { | ||
1962 | dev_err(se_dev->dev, | ||
1963 | "crypto_register_sha alg failed index[%d]\n", i); | ||
1964 | goto clean; | ||
1965 | } | ||
1966 | } | ||
1967 | |||
1968 | #if defined(CONFIG_PM) | ||
1969 | se_dev->ctx_save_buf = dma_alloc_coherent(se_dev->dev, | ||
1970 | SE_CONTEXT_BUFER_SIZE, &se_dev->ctx_save_buf_adr, GFP_KERNEL); | ||
1971 | if (!se_dev->ctx_save_buf) { | ||
1972 | dev_err(se_dev->dev, "Context save buffer alloc filed\n"); | ||
1973 | goto clean; | ||
1974 | } | ||
1975 | #endif | ||
1976 | |||
1977 | dev_info(se_dev->dev, "%s: complete", __func__); | ||
1978 | return 0; | ||
1979 | |||
1980 | clean: | ||
1981 | pm_runtime_disable(se_dev->dev); | ||
1982 | for (k = 0; k < i; k++) | ||
1983 | crypto_unregister_alg(&aes_algs[k]); | ||
1984 | |||
1985 | for (k = 0; k < j; k++) | ||
1986 | crypto_unregister_ahash(&hash_algs[j]); | ||
1987 | |||
1988 | tegra_se_free_ll_buf(se_dev); | ||
1989 | |||
1990 | if (se_work_q) | ||
1991 | destroy_workqueue(se_work_q); | ||
1992 | |||
1993 | if (se_dev->pclk) | ||
1994 | clk_put(se_dev->pclk); | ||
1995 | |||
1996 | free_irq(se_dev->irq, &pdev->dev); | ||
1997 | |||
1998 | err_irq: | ||
1999 | iounmap(se_dev->pmc_io_reg); | ||
2000 | err_pmc: | ||
2001 | iounmap(se_dev->io_reg); | ||
2002 | |||
2003 | fail: | ||
2004 | platform_set_drvdata(pdev, NULL); | ||
2005 | kfree(se_dev); | ||
2006 | sg_tegra_se_dev = NULL; | ||
2007 | |||
2008 | return err; | ||
2009 | } | ||
2010 | |||
2011 | static int __devexit tegra_se_remove(struct platform_device *pdev) | ||
2012 | { | ||
2013 | struct tegra_se_dev *se_dev = platform_get_drvdata(pdev); | ||
2014 | int i; | ||
2015 | |||
2016 | if (!se_dev) | ||
2017 | return -ENODEV; | ||
2018 | |||
2019 | pm_runtime_disable(se_dev->dev); | ||
2020 | |||
2021 | cancel_work_sync(&se_work); | ||
2022 | if (se_work_q) | ||
2023 | destroy_workqueue(se_work_q); | ||
2024 | free_irq(se_dev->irq, &pdev->dev); | ||
2025 | for (i = 0; i < ARRAY_SIZE(aes_algs); i++) | ||
2026 | crypto_unregister_alg(&aes_algs[i]); | ||
2027 | for (i = 0; i < ARRAY_SIZE(hash_algs); i++) | ||
2028 | crypto_unregister_ahash(&hash_algs[i]); | ||
2029 | if (se_dev->pclk) | ||
2030 | clk_put(se_dev->pclk); | ||
2031 | tegra_se_free_ll_buf(se_dev); | ||
2032 | if (se_dev->ctx_save_buf) { | ||
2033 | dma_free_coherent(se_dev->dev, SE_CONTEXT_BUFER_SIZE, | ||
2034 | se_dev->ctx_save_buf, se_dev->ctx_save_buf_adr); | ||
2035 | se_dev->ctx_save_buf = NULL; | ||
2036 | } | ||
2037 | iounmap(se_dev->io_reg); | ||
2038 | iounmap(se_dev->pmc_io_reg); | ||
2039 | kfree(se_dev); | ||
2040 | sg_tegra_se_dev = NULL; | ||
2041 | |||
2042 | return 0; | ||
2043 | } | ||
2044 | |||
2045 | #if defined(CONFIG_PM) | ||
2046 | static int tegra_se_resume(struct device *dev) | ||
2047 | { | ||
2048 | return 0; | ||
2049 | } | ||
2050 | |||
2051 | static int tegra_se_generate_rng_key(struct tegra_se_dev *se_dev) | ||
2052 | { | ||
2053 | int ret = 0; | ||
2054 | u32 val = 0; | ||
2055 | |||
2056 | *se_dev->src_ll_buf = 0; | ||
2057 | *se_dev->dst_ll_buf = 0; | ||
2058 | |||
2059 | /* Configure algorithm */ | ||
2060 | val = SE_CONFIG_ENC_ALG(ALG_RNG) | SE_CONFIG_ENC_MODE(MODE_KEY128) | | ||
2061 | SE_CONFIG_DST(DST_KEYTAB); | ||
2062 | se_writel(se_dev, val, SE_CONFIG_REG_OFFSET); | ||
2063 | |||
2064 | /* Configure destination key index number */ | ||
2065 | val = SE_CRYPTO_KEYTABLE_DST_KEY_INDEX(srk_slot.slot_num) | | ||
2066 | SE_CRYPTO_KEYTABLE_DST_WORD_QUAD(KEYS_0_3); | ||
2067 | se_writel(se_dev, val, SE_CRYPTO_KEYTABLE_DST_REG_OFFSET); | ||
2068 | |||
2069 | /* Configure crypto */ | ||
2070 | val = SE_CRYPTO_INPUT_SEL(INPUT_LFSR) | SE_CRYPTO_XOR_POS(XOR_BYPASS) | | ||
2071 | SE_CRYPTO_CORE_SEL(CORE_ENCRYPT) | | ||
2072 | SE_CRYPTO_HASH(HASH_DISABLE) | | ||
2073 | SE_CRYPTO_KEY_INDEX(ssk_slot.slot_num) | | ||
2074 | SE_CRYPTO_IV_SEL(IV_ORIGINAL); | ||
2075 | se_writel(se_dev, val, SE_CRYPTO_REG_OFFSET); | ||
2076 | |||
2077 | ret = tegra_se_start_operation(se_dev, TEGRA_SE_KEY_128_SIZE, false); | ||
2078 | |||
2079 | return ret; | ||
2080 | } | ||
2081 | |||
2082 | static int tegra_se_generate_srk(struct tegra_se_dev *se_dev) | ||
2083 | { | ||
2084 | int ret = 0; | ||
2085 | u32 val = 0; | ||
2086 | |||
2087 | mutex_lock(&se_hw_lock); | ||
2088 | pm_runtime_get_sync(se_dev->dev); | ||
2089 | |||
2090 | ret = tegra_se_generate_rng_key(se_dev); | ||
2091 | if (ret) { | ||
2092 | pm_runtime_put(se_dev->dev); | ||
2093 | mutex_unlock(&se_hw_lock); | ||
2094 | return ret; | ||
2095 | } | ||
2096 | |||
2097 | *se_dev->src_ll_buf = 0; | ||
2098 | *se_dev->dst_ll_buf = 0; | ||
2099 | |||
2100 | val = SE_CONFIG_ENC_ALG(ALG_RNG) | SE_CONFIG_ENC_MODE(MODE_KEY128) | | ||
2101 | SE_CONFIG_DEC_ALG(ALG_NOP) | SE_CONFIG_DST(DST_SRK); | ||
2102 | |||
2103 | se_writel(se_dev, val, SE_CONFIG_REG_OFFSET); | ||
2104 | |||
2105 | val = SE_CRYPTO_XOR_POS(XOR_BYPASS) | | ||
2106 | SE_CRYPTO_CORE_SEL(CORE_ENCRYPT) | | ||
2107 | SE_CRYPTO_HASH(HASH_DISABLE) | | ||
2108 | SE_CRYPTO_KEY_INDEX(srk_slot.slot_num) | | ||
2109 | SE_CRYPTO_IV_SEL(IV_UPDATED); | ||
2110 | |||
2111 | se_writel(se_dev, val, SE_CRYPTO_REG_OFFSET); | ||
2112 | ret = tegra_se_start_operation(se_dev, TEGRA_SE_KEY_128_SIZE, false); | ||
2113 | |||
2114 | pm_runtime_put(se_dev->dev); | ||
2115 | mutex_unlock(&se_hw_lock); | ||
2116 | |||
2117 | return ret; | ||
2118 | } | ||
2119 | |||
2120 | static int tegra_se_lp_generate_random_data(struct tegra_se_dev *se_dev) | ||
2121 | { | ||
2122 | struct tegra_se_ll *src_ll, *dst_ll; | ||
2123 | int ret = 0; | ||
2124 | u32 val; | ||
2125 | |||
2126 | mutex_lock(&se_hw_lock); | ||
2127 | pm_runtime_get_sync(se_dev->dev); | ||
2128 | |||
2129 | *se_dev->src_ll_buf = 0; | ||
2130 | *se_dev->dst_ll_buf = 0; | ||
2131 | src_ll = (struct tegra_se_ll *)(se_dev->src_ll_buf + 1); | ||
2132 | dst_ll = (struct tegra_se_ll *)(se_dev->dst_ll_buf + 1); | ||
2133 | src_ll->addr = se_dev->ctx_save_buf_adr; | ||
2134 | src_ll->data_len = SE_CONTEXT_SAVE_RANDOM_DATA_SIZE; | ||
2135 | dst_ll->addr = se_dev->ctx_save_buf_adr; | ||
2136 | dst_ll->data_len = SE_CONTEXT_SAVE_RANDOM_DATA_SIZE; | ||
2137 | |||
2138 | tegra_se_config_algo(se_dev, SE_AES_OP_MODE_RNG_X931, true, | ||
2139 | TEGRA_SE_KEY_128_SIZE); | ||
2140 | |||
2141 | /* Configure crypto */ | ||
2142 | val = SE_CRYPTO_INPUT_SEL(INPUT_LFSR) | SE_CRYPTO_XOR_POS(XOR_BYPASS) | | ||
2143 | SE_CRYPTO_CORE_SEL(CORE_ENCRYPT) | | ||
2144 | SE_CRYPTO_HASH(HASH_DISABLE) | | ||
2145 | SE_CRYPTO_KEY_INDEX(srk_slot.slot_num) | | ||
2146 | SE_CRYPTO_IV_SEL(IV_ORIGINAL); | ||
2147 | |||
2148 | se_writel(se_dev, val, SE_CRYPTO_REG_OFFSET); | ||
2149 | ret = tegra_se_start_operation(se_dev, | ||
2150 | SE_CONTEXT_SAVE_RANDOM_DATA_SIZE, false); | ||
2151 | |||
2152 | pm_runtime_put(se_dev->dev); | ||
2153 | mutex_unlock(&se_hw_lock); | ||
2154 | |||
2155 | return ret; | ||
2156 | |||
2157 | } | ||
2158 | |||
2159 | static int tegra_se_lp_encrypt_context_data(struct tegra_se_dev *se_dev, | ||
2160 | u32 context_offset, u32 data_size) | ||
2161 | { | ||
2162 | struct tegra_se_ll *src_ll, *dst_ll; | ||
2163 | int ret = 0; | ||
2164 | |||
2165 | mutex_lock(&se_hw_lock); | ||
2166 | pm_runtime_get_sync(se_dev->dev); | ||
2167 | |||
2168 | *se_dev->src_ll_buf = 0; | ||
2169 | *se_dev->dst_ll_buf = 0; | ||
2170 | src_ll = (struct tegra_se_ll *)(se_dev->src_ll_buf + 1); | ||
2171 | dst_ll = (struct tegra_se_ll *)(se_dev->dst_ll_buf + 1); | ||
2172 | src_ll->addr = se_dev->ctx_save_buf_adr + context_offset; | ||
2173 | src_ll->data_len = data_size; | ||
2174 | dst_ll->addr = se_dev->ctx_save_buf_adr + context_offset; | ||
2175 | dst_ll->data_len = data_size; | ||
2176 | |||
2177 | se_writel(se_dev, SE_CONTEXT_SAVE_SRC(MEM), | ||
2178 | SE_CONTEXT_SAVE_CONFIG_REG_OFFSET); | ||
2179 | |||
2180 | ret = tegra_se_start_operation(se_dev, data_size, true); | ||
2181 | |||
2182 | pm_runtime_put(se_dev->dev); | ||
2183 | |||
2184 | mutex_unlock(&se_hw_lock); | ||
2185 | |||
2186 | return ret; | ||
2187 | } | ||
2188 | |||
2189 | static int tegra_se_lp_sticky_bits_context_save(struct tegra_se_dev *se_dev) | ||
2190 | { | ||
2191 | struct tegra_se_ll *dst_ll; | ||
2192 | int ret = 0; | ||
2193 | |||
2194 | mutex_lock(&se_hw_lock); | ||
2195 | pm_runtime_get_sync(se_dev->dev); | ||
2196 | |||
2197 | *se_dev->src_ll_buf = 0; | ||
2198 | *se_dev->dst_ll_buf = 0; | ||
2199 | dst_ll = (struct tegra_se_ll *)(se_dev->dst_ll_buf + 1); | ||
2200 | dst_ll->addr = (se_dev->ctx_save_buf_adr + | ||
2201 | SE_CONTEXT_SAVE_STICKY_BITS_OFFSET); | ||
2202 | dst_ll->data_len = SE_CONTEXT_SAVE_STICKY_BITS_SIZE; | ||
2203 | |||
2204 | se_writel(se_dev, SE_CONTEXT_SAVE_SRC(STICKY_BITS), | ||
2205 | SE_CONTEXT_SAVE_CONFIG_REG_OFFSET); | ||
2206 | |||
2207 | ret = tegra_se_start_operation(se_dev, | ||
2208 | SE_CONTEXT_SAVE_STICKY_BITS_SIZE, true); | ||
2209 | |||
2210 | pm_runtime_put(se_dev->dev); | ||
2211 | mutex_unlock(&se_hw_lock); | ||
2212 | |||
2213 | return ret; | ||
2214 | } | ||
2215 | |||
2216 | static int tegra_se_lp_keytable_context_save(struct tegra_se_dev *se_dev) | ||
2217 | { | ||
2218 | struct tegra_se_ll *dst_ll; | ||
2219 | int ret = 0, i, j; | ||
2220 | u32 val = 0; | ||
2221 | |||
2222 | /* take access to the hw */ | ||
2223 | mutex_lock(&se_hw_lock); | ||
2224 | pm_runtime_get_sync(se_dev->dev); | ||
2225 | |||
2226 | *se_dev->dst_ll_buf = 0; | ||
2227 | dst_ll = (struct tegra_se_ll *)(se_dev->dst_ll_buf + 1); | ||
2228 | dst_ll->addr = (se_dev->ctx_save_buf_adr + SE_CONTEXT_SAVE_KEYS_OFFSET); | ||
2229 | dst_ll->data_len = TEGRA_SE_KEY_128_SIZE; | ||
2230 | |||
2231 | for (i = 0; i < TEGRA_SE_KEYSLOT_COUNT; i++) { | ||
2232 | for (j = 0; j < 2; j++) { | ||
2233 | val = SE_CONTEXT_SAVE_SRC(KEYTABLE) | | ||
2234 | SE_CONTEXT_SAVE_KEY_INDEX(i) | | ||
2235 | SE_CONTEXT_SAVE_WORD_QUAD(j); | ||
2236 | se_writel(se_dev, | ||
2237 | val, SE_CONTEXT_SAVE_CONFIG_REG_OFFSET); | ||
2238 | ret = tegra_se_start_operation(se_dev, | ||
2239 | TEGRA_SE_KEY_128_SIZE, true); | ||
2240 | if (ret) | ||
2241 | break; | ||
2242 | dst_ll->addr += TEGRA_SE_KEY_128_SIZE; | ||
2243 | } | ||
2244 | } | ||
2245 | |||
2246 | pm_runtime_put(se_dev->dev); | ||
2247 | mutex_unlock(&se_hw_lock); | ||
2248 | |||
2249 | return ret; | ||
2250 | } | ||
2251 | |||
2252 | static int tegra_se_lp_iv_context_save(struct tegra_se_dev *se_dev, | ||
2253 | bool org_iv, u32 context_offset) | ||
2254 | { | ||
2255 | struct tegra_se_ll *dst_ll; | ||
2256 | int ret = 0, i; | ||
2257 | u32 val = 0; | ||
2258 | |||
2259 | mutex_lock(&se_hw_lock); | ||
2260 | pm_runtime_get_sync(se_dev->dev); | ||
2261 | |||
2262 | *se_dev->dst_ll_buf = 0; | ||
2263 | dst_ll = (struct tegra_se_ll *)(se_dev->dst_ll_buf + 1); | ||
2264 | dst_ll->addr = (se_dev->ctx_save_buf_adr + context_offset); | ||
2265 | dst_ll->data_len = TEGRA_SE_AES_IV_SIZE; | ||
2266 | |||
2267 | for (i = 0; i < TEGRA_SE_KEYSLOT_COUNT; i++) { | ||
2268 | val = SE_CONTEXT_SAVE_SRC(KEYTABLE) | | ||
2269 | SE_CONTEXT_SAVE_KEY_INDEX(i) | | ||
2270 | (org_iv ? SE_CONTEXT_SAVE_WORD_QUAD(ORIG_IV) : | ||
2271 | SE_CONTEXT_SAVE_WORD_QUAD(UPD_IV)); | ||
2272 | se_writel(se_dev, val, SE_CONTEXT_SAVE_CONFIG_REG_OFFSET); | ||
2273 | ret = tegra_se_start_operation(se_dev, | ||
2274 | TEGRA_SE_AES_IV_SIZE, true); | ||
2275 | if (ret) | ||
2276 | break; | ||
2277 | dst_ll->addr += TEGRA_SE_AES_IV_SIZE; | ||
2278 | } | ||
2279 | |||
2280 | pm_runtime_put(se_dev->dev); | ||
2281 | mutex_unlock(&se_hw_lock); | ||
2282 | |||
2283 | return ret; | ||
2284 | } | ||
2285 | |||
2286 | static int tegra_se_save_SRK(struct tegra_se_dev *se_dev) | ||
2287 | { | ||
2288 | int ret = 0; | ||
2289 | |||
2290 | mutex_lock(&se_hw_lock); | ||
2291 | pm_runtime_get_sync(se_dev->dev); | ||
2292 | |||
2293 | se_writel(se_dev, SE_CONTEXT_SAVE_SRC(SRK), | ||
2294 | SE_CONTEXT_SAVE_CONFIG_REG_OFFSET); | ||
2295 | ret = tegra_se_start_operation(se_dev, 0, true); | ||
2296 | |||
2297 | pm_runtime_put(se_dev->dev); | ||
2298 | mutex_unlock(&se_hw_lock); | ||
2299 | |||
2300 | return ret; | ||
2301 | } | ||
2302 | |||
2303 | static int tegra_se_suspend(struct device *dev) | ||
2304 | { | ||
2305 | struct platform_device *pdev = to_platform_device(dev); | ||
2306 | struct tegra_se_dev *se_dev = platform_get_drvdata(pdev); | ||
2307 | int err = 0, i; | ||
2308 | unsigned char *dt_buf = NULL; | ||
2309 | u8 pdata[SE_CONTEXT_KNOWN_PATTERN_SIZE] = { | ||
2310 | 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, | ||
2311 | 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f}; | ||
2312 | |||
2313 | if (!se_dev) | ||
2314 | return -ENODEV; | ||
2315 | |||
2316 | /* Generate SRK */ | ||
2317 | err = tegra_se_generate_srk(se_dev); | ||
2318 | if (err) { | ||
2319 | dev_err(se_dev->dev, "\n LP SRK genration failed\n"); | ||
2320 | goto out; | ||
2321 | } | ||
2322 | |||
2323 | /* Generate random data*/ | ||
2324 | err = tegra_se_lp_generate_random_data(se_dev); | ||
2325 | if (err) { | ||
2326 | dev_err(se_dev->dev, "\n LP random pattern generation failed\n"); | ||
2327 | goto out; | ||
2328 | } | ||
2329 | |||
2330 | /* Encrypt random data */ | ||
2331 | err = tegra_se_lp_encrypt_context_data(se_dev, | ||
2332 | SE_CONTEXT_SAVE_RANDOM_DATA_OFFSET, | ||
2333 | SE_CONTEXT_SAVE_RANDOM_DATA_SIZE); | ||
2334 | if (err) { | ||
2335 | dev_err(se_dev->dev, "\n LP random pattern encryption failed\n"); | ||
2336 | goto out; | ||
2337 | } | ||
2338 | |||
2339 | /* Sticky bits context save*/ | ||
2340 | err = tegra_se_lp_sticky_bits_context_save(se_dev); | ||
2341 | if (err) { | ||
2342 | dev_err(se_dev->dev, "\n LP sticky bits context save failure\n"); | ||
2343 | goto out; | ||
2344 | } | ||
2345 | |||
2346 | /* Key table context save*/ | ||
2347 | err = tegra_se_lp_keytable_context_save(se_dev); | ||
2348 | if (err) { | ||
2349 | dev_err(se_dev->dev, "\n LP key table save failure\n"); | ||
2350 | goto out; | ||
2351 | } | ||
2352 | |||
2353 | /* Original iv context save*/ | ||
2354 | err = tegra_se_lp_iv_context_save(se_dev, | ||
2355 | true, SE_CONTEXT_ORIGINAL_IV_OFFSET); | ||
2356 | if (err) { | ||
2357 | dev_err(se_dev->dev, "\n LP original iv save failure\n"); | ||
2358 | goto out; | ||
2359 | } | ||
2360 | |||
2361 | /* UPdated iv context save*/ | ||
2362 | err = tegra_se_lp_iv_context_save(se_dev, | ||
2363 | false, SE_CONTEXT_UPDATED_IV_OFFSET); | ||
2364 | if (err) { | ||
2365 | dev_err(se_dev->dev, "\n LP updated iv save failure\n"); | ||
2366 | goto out; | ||
2367 | } | ||
2368 | |||
2369 | /* Encrypt known pattern */ | ||
2370 | dt_buf = (unsigned char *)se_dev->ctx_save_buf; | ||
2371 | dt_buf += SE_CONTEXT_KNOWN_PATTERN_OFFSET; | ||
2372 | for (i = 0; i < SE_CONTEXT_KNOWN_PATTERN_SIZE; i++) | ||
2373 | dt_buf[i] = pdata[i]; | ||
2374 | err = tegra_se_lp_encrypt_context_data(se_dev, | ||
2375 | SE_CONTEXT_KNOWN_PATTERN_OFFSET, SE_CONTEXT_KNOWN_PATTERN_SIZE); | ||
2376 | if (err) { | ||
2377 | dev_err(se_dev->dev, "LP known pattern save failure\n"); | ||
2378 | goto out; | ||
2379 | } | ||
2380 | |||
2381 | /* Write lp context buffer address into PMC scratch register */ | ||
2382 | writel(se_dev->ctx_save_buf_adr, | ||
2383 | se_dev->pmc_io_reg + PMC_SCRATCH43_REG_OFFSET); | ||
2384 | |||
2385 | /* Saves SRK in secure scratch */ | ||
2386 | err = tegra_se_save_SRK(se_dev); | ||
2387 | if (err) { | ||
2388 | dev_err(se_dev->dev, "LP SRK save failure\n"); | ||
2389 | goto out; | ||
2390 | } | ||
2391 | |||
2392 | out: | ||
2393 | return err; | ||
2394 | } | ||
2395 | #endif | ||
2396 | |||
2397 | #if defined(CONFIG_PM_RUNTIME) | ||
2398 | static int tegra_se_runtime_suspend(struct device *dev) | ||
2399 | { | ||
2400 | /* | ||
2401 | * do a dummy read, to avoid scenarios where you have unposted writes | ||
2402 | * still on the bus, before disabling clocks | ||
2403 | */ | ||
2404 | se_readl(sg_tegra_se_dev, SE_CONFIG_REG_OFFSET); | ||
2405 | |||
2406 | clk_disable(sg_tegra_se_dev->pclk); | ||
2407 | return 0; | ||
2408 | } | ||
2409 | |||
2410 | static int tegra_se_runtime_resume(struct device *dev) | ||
2411 | { | ||
2412 | clk_enable(sg_tegra_se_dev->pclk); | ||
2413 | return 0; | ||
2414 | } | ||
2415 | |||
2416 | static const struct dev_pm_ops tegra_se_dev_pm_ops = { | ||
2417 | .runtime_suspend = tegra_se_runtime_suspend, | ||
2418 | .runtime_resume = tegra_se_runtime_resume, | ||
2419 | #if defined(CONFIG_PM) | ||
2420 | .suspend = tegra_se_suspend, | ||
2421 | .resume = tegra_se_resume, | ||
2422 | #endif | ||
2423 | }; | ||
2424 | #endif | ||
2425 | |||
2426 | static struct platform_driver tegra_se_driver = { | ||
2427 | .probe = tegra_se_probe, | ||
2428 | .remove = __devexit_p(tegra_se_remove), | ||
2429 | .driver = { | ||
2430 | .name = "tegra-se", | ||
2431 | .owner = THIS_MODULE, | ||
2432 | #if defined(CONFIG_PM_RUNTIME) | ||
2433 | .pm = &tegra_se_dev_pm_ops, | ||
2434 | #endif | ||
2435 | }, | ||
2436 | }; | ||
2437 | |||
2438 | static int __init tegra_se_module_init(void) | ||
2439 | { | ||
2440 | return platform_driver_register(&tegra_se_driver); | ||
2441 | } | ||
2442 | |||
2443 | static void __exit tegra_se_module_exit(void) | ||
2444 | { | ||
2445 | platform_driver_unregister(&tegra_se_driver); | ||
2446 | } | ||
2447 | |||
2448 | module_init(tegra_se_module_init); | ||
2449 | module_exit(tegra_se_module_exit); | ||
2450 | |||
2451 | MODULE_DESCRIPTION("Tegra Crypto algorithm support"); | ||
2452 | MODULE_AUTHOR("NVIDIA Corporation"); | ||
2453 | MODULE_LICENSE("GPL"); | ||
2454 | MODULE_ALIAS("tegra-se"); | ||
2455 | |||
diff --git a/drivers/crypto/tegra-se.h b/drivers/crypto/tegra-se.h new file mode 100644 index 00000000000..8c54df8991e --- /dev/null +++ b/drivers/crypto/tegra-se.h | |||
@@ -0,0 +1,235 @@ | |||
1 | /* | ||
2 | * Driver for Tegra Security Engine | ||
3 | * | ||
4 | * Copyright (c) 2011, NVIDIA Corporation. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
12 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
13 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
14 | * more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License along | ||
17 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
18 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
19 | */ | ||
20 | |||
21 | #ifndef _CRYPTO_TEGRA_SE_H | ||
22 | #define _CRYPTO_TEGRA_SE_H | ||
23 | |||
24 | #include <crypto/hash.h> | ||
25 | #include <crypto/sha.h> | ||
26 | |||
27 | #define PFX "tegra-se: " | ||
28 | |||
29 | #define TEGRA_SE_CRA_PRIORITY 300 | ||
30 | #define TEGRA_SE_COMPOSITE_PRIORITY 400 | ||
31 | #define TEGRA_SE_CRYPTO_QUEUE_LENGTH 50 | ||
32 | #define SE_MAX_SRC_SG_COUNT 50 | ||
33 | #define SE_MAX_DST_SG_COUNT 50 | ||
34 | |||
35 | #define TEGRA_SE_KEYSLOT_COUNT 16 | ||
36 | |||
37 | /* SE register definitions */ | ||
38 | #define SE_CONFIG_REG_OFFSET 0x014 | ||
39 | #define SE_CONFIG_ENC_ALG_SHIFT 12 | ||
40 | #define SE_CONFIG_DEC_ALG_SHIFT 8 | ||
41 | #define ALG_AES_ENC 1 | ||
42 | #define ALG_RNG 2 | ||
43 | #define ALG_SHA 3 | ||
44 | #define ALG_NOP 0 | ||
45 | #define ALG_AES_DEC 1 | ||
46 | #define SE_CONFIG_ENC_ALG(x) (x << SE_CONFIG_ENC_ALG_SHIFT) | ||
47 | #define SE_CONFIG_DEC_ALG(x) (x << SE_CONFIG_DEC_ALG_SHIFT) | ||
48 | #define SE_CONFIG_DST_SHIFT 2 | ||
49 | #define DST_MEMORY 0 | ||
50 | #define DST_HASHREG 1 | ||
51 | #define DST_KEYTAB 2 | ||
52 | #define DST_SRK 3 | ||
53 | #define SE_CONFIG_DST(x) (x << SE_CONFIG_DST_SHIFT) | ||
54 | #define SE_CONFIG_ENC_MODE_SHIFT 24 | ||
55 | #define SE_CONFIG_DEC_MODE_SHIFT 16 | ||
56 | #define MODE_KEY128 0 | ||
57 | #define MODE_KEY192 1 | ||
58 | #define MODE_KEY256 2 | ||
59 | #define MODE_SHA1 0 | ||
60 | #define MODE_SHA224 4 | ||
61 | #define MODE_SHA256 5 | ||
62 | #define MODE_SHA384 6 | ||
63 | #define MODE_SHA512 7 | ||
64 | #define SE_CONFIG_ENC_MODE(x) (x << SE_CONFIG_ENC_MODE_SHIFT) | ||
65 | #define SE_CONFIG_DEC_MODE(x) (x << SE_CONFIG_DEC_MODE_SHIFT) | ||
66 | |||
67 | #define SE_KEYTABLE_REG_OFFSET 0x31c | ||
68 | #define SE_KEYTABLE_SLOT_SHIFT 4 | ||
69 | #define SE_KEYTABLE_SLOT(x) (x << SE_KEYTABLE_SLOT_SHIFT) | ||
70 | #define SE_KEYTABLE_QUAD_SHIFT 2 | ||
71 | #define QUAD_KEYS_128 0 | ||
72 | #define QUAD_KEYS_192 1 | ||
73 | #define QUAD_KEYS_256 1 | ||
74 | #define QUAD_ORG_IV 2 | ||
75 | #define QUAD_UPDTD_IV 3 | ||
76 | #define SE_KEYTABLE_QUAD(x) (x << SE_KEYTABLE_QUAD_SHIFT) | ||
77 | #define SE_KEYTABLE_OP_TYPE_SHIFT 9 | ||
78 | #define OP_READ 0 | ||
79 | #define OP_WRITE 1 | ||
80 | #define SE_KEYTABLE_OP_TYPE(x) (x << SE_KEYTABLE_OP_TYPE_SHIFT) | ||
81 | #define SE_KEYTABLE_TABLE_SEL_SHIFT 8 | ||
82 | #define TABLE_KEYIV 0 | ||
83 | #define TABLE_SCHEDULE 1 | ||
84 | #define SE_KEYTABLE_TABLE_SEL(x) (x << SE_KEYTABLE_TABLE_SEL_SHIFT) | ||
85 | #define SE_KEYTABLE_PKT_SHIFT 0 | ||
86 | #define SE_KEYTABLE_PKT(x) (x << SE_KEYTABLE_PKT_SHIFT) | ||
87 | |||
88 | #define SE_CRYPTO_REG_OFFSET 0x304 | ||
89 | #define SE_CRYPTO_HASH_SHIFT 0 | ||
90 | #define HASH_DISABLE 0 | ||
91 | #define HASH_ENABLE 1 | ||
92 | #define SE_CRYPTO_HASH(x) (x << SE_CRYPTO_HASH_SHIFT) | ||
93 | #define SE_CRYPTO_XOR_POS_SHIFT 1 | ||
94 | #define XOR_BYPASS 0 | ||
95 | #define XOR_TOP 2 | ||
96 | #define XOR_BOTTOM 3 | ||
97 | #define SE_CRYPTO_XOR_POS(x) (x << SE_CRYPTO_XOR_POS_SHIFT) | ||
98 | #define SE_CRYPTO_INPUT_SEL_SHIFT 3 | ||
99 | #define INPUT_AHB 0 | ||
100 | #define INPUT_LFSR 1 | ||
101 | #define INPUT_AESOUT 2 | ||
102 | #define INPUT_LNR_CTR 3 | ||
103 | #define SE_CRYPTO_INPUT_SEL(x) (x << SE_CRYPTO_INPUT_SEL_SHIFT) | ||
104 | #define SE_CRYPTO_VCTRAM_SEL_SHIFT 5 | ||
105 | #define VCTRAM_AHB 0 | ||
106 | #define VCTRAM_AESOUT 2 | ||
107 | #define VCTRAM_PREVAHB 3 | ||
108 | #define SE_CRYPTO_VCTRAM_SEL(x) (x << SE_CRYPTO_VCTRAM_SEL_SHIFT) | ||
109 | #define SE_CRYPTO_IV_SEL_SHIFT 7 | ||
110 | #define IV_ORIGINAL 0 | ||
111 | #define IV_UPDATED 1 | ||
112 | #define SE_CRYPTO_IV_SEL(x) (x << SE_CRYPTO_IV_SEL_SHIFT) | ||
113 | #define SE_CRYPTO_CORE_SEL_SHIFT 8 | ||
114 | #define CORE_DECRYPT 0 | ||
115 | #define CORE_ENCRYPT 1 | ||
116 | #define SE_CRYPTO_CORE_SEL(x) (x << SE_CRYPTO_CORE_SEL_SHIFT) | ||
117 | #define SE_CRYPTO_CTR_VAL_SHIFT 11 | ||
118 | #define SE_CRYPTO_CTR_VAL(x) (x << SE_CRYPTO_CTR_VAL_SHIFT) | ||
119 | #define SE_CRYPTO_KEY_INDEX_SHIFT 24 | ||
120 | #define SE_CRYPTO_KEY_INDEX(x) (x << SE_CRYPTO_KEY_INDEX_SHIFT) | ||
121 | #define SE_CRYPTO_CTR_CNTN_SHIFT 11 | ||
122 | #define SE_CRYPTO_CTR_CNTN(x) (x << SE_CRYPTO_CTR_CNTN_SHIFT) | ||
123 | |||
124 | #define SE_CRYPTO_CTR_REG_COUNT 4 | ||
125 | #define SE_CRYPTO_CTR_REG_OFFSET 0x308 | ||
126 | |||
127 | #define SE_OPERATION_REG_OFFSET 0x008 | ||
128 | #define SE_OPERATION_SHIFT 0 | ||
129 | #define OP_ABORT 0 | ||
130 | #define OP_SRART 1 | ||
131 | #define OP_RESTART 2 | ||
132 | #define OP_CTX_SAVE 3 | ||
133 | #define SE_OPERATION(x) (x << SE_OPERATION_SHIFT) | ||
134 | |||
135 | #define SE_CONTEXT_SAVE_CONFIG_REG_OFFSET 0x070 | ||
136 | #define SE_CONTEXT_SAVE_WORD_QUAD_SHIFT 0 | ||
137 | #define KEYS_0_3 0 | ||
138 | #define KEYS_4_7 1 | ||
139 | #define ORIG_IV 2 | ||
140 | #define UPD_IV 3 | ||
141 | #define SE_CONTEXT_SAVE_WORD_QUAD(x) (x << SE_CONTEXT_SAVE_WORD_QUAD_SHIFT) | ||
142 | |||
143 | #define SE_CONTEXT_SAVE_KEY_INDEX_SHIFT 8 | ||
144 | #define SE_CONTEXT_SAVE_KEY_INDEX(x) (x << SE_CONTEXT_SAVE_KEY_INDEX_SHIFT) | ||
145 | |||
146 | |||
147 | #define SE_CONTEXT_SAVE_SRC_SHIFT 30 | ||
148 | #define STICKY_BITS 0 | ||
149 | #define KEYTABLE 1 | ||
150 | #define MEM 2 | ||
151 | #define SRK 3 | ||
152 | #define SE_CONTEXT_SAVE_SRC(x) (x << SE_CONTEXT_SAVE_SRC_SHIFT) | ||
153 | |||
154 | #define SE_INT_ENABLE_REG_OFFSET 0x00c | ||
155 | #define SE_INT_STATUS_REG_OFFSET 0x010 | ||
156 | #define INT_DISABLE 0 | ||
157 | #define INT_ENABLE 1 | ||
158 | #define INT_UNSET 0 | ||
159 | #define INT_SET 1 | ||
160 | #define SE_INT_OP_DONE_SHIFT 4 | ||
161 | #define SE_INT_OP_DONE(x) (x << SE_INT_OP_DONE_SHIFT) | ||
162 | #define SE_INT_ERROR_SHIFT 16 | ||
163 | #define SE_INT_ERROR(x) (x << SE_INT_ERROR_SHIFT) | ||
164 | |||
165 | #define SE_CRYPTO_KEYTABLE_DST_REG_OFFSET 0X330 | ||
166 | #define SE_CRYPTO_KEYTABLE_DST_WORD_QUAD_SHIFT 0 | ||
167 | #define SE_CRYPTO_KEYTABLE_DST_WORD_QUAD(x) \ | ||
168 | (x << SE_CRYPTO_KEYTABLE_DST_WORD_QUAD_SHIFT) | ||
169 | |||
170 | #define SE_KEY_INDEX_SHIFT 8 | ||
171 | #define SE_CRYPTO_KEYTABLE_DST_KEY_INDEX(x) (x << SE_KEY_INDEX_SHIFT) | ||
172 | |||
173 | #define SE_IN_LL_ADDR_REG_OFFSET 0x018 | ||
174 | #define SE_OUT_LL_ADDR_REG_OFFSET 0x024 | ||
175 | |||
176 | #define SE_KEYTABLE_DATA0_REG_OFFSET 0x320 | ||
177 | #define SE_KEYTABLE_REG_MAX_DATA 16 | ||
178 | |||
179 | #define SE_BLOCK_COUNT_REG_OFFSET 0x318 | ||
180 | |||
181 | #define SE_SPARE_0_REG_OFFSET 0x80c | ||
182 | |||
183 | #define SE_SHA_CONFIG_REG_OFFSET 0x200 | ||
184 | #define SHA_DISABLE 0 | ||
185 | #define SHA_ENABLE 1 | ||
186 | |||
187 | #define SE_SHA_MSG_LENGTH_REG_OFFSET 0x204 | ||
188 | #define SE_SHA_MSG_LEFT_REG_OFFSET 0x214 | ||
189 | |||
190 | |||
191 | #define SE_HASH_RESULT_REG_COUNT 16 | ||
192 | #define SE_HASH_RESULT_REG_OFFSET 0x030 | ||
193 | |||
194 | |||
195 | #define TEGRA_SE_KEY_256_SIZE 32 | ||
196 | #define TEGRA_SE_KEY_192_SIZE 24 | ||
197 | #define TEGRA_SE_KEY_128_SIZE 16 | ||
198 | #define TEGRA_SE_AES_BLOCK_SIZE 16 | ||
199 | #define TEGRA_SE_AES_MIN_KEY_SIZE 16 | ||
200 | #define TEGRA_SE_AES_MAX_KEY_SIZE 32 | ||
201 | #define TEGRA_SE_AES_IV_SIZE 16 | ||
202 | #define TEGRA_SE_RNG_IV_SIZE 16 | ||
203 | #define TEGRA_SE_RNG_DT_SIZE 16 | ||
204 | #define TEGRA_SE_RNG_KEY_SIZE 16 | ||
205 | #define TEGRA_SE_RNG_SEED_SIZE (TEGRA_SE_RNG_IV_SIZE + \ | ||
206 | TEGRA_SE_RNG_KEY_SIZE + \ | ||
207 | TEGRA_SE_RNG_DT_SIZE) | ||
208 | #define TEGRA_SE_AES_CMAC_DIGEST_SIZE 16 | ||
209 | |||
210 | #define SE_KEY_TABLE_ACCESS_REG_OFFSET 0x284 | ||
211 | #define SE_KEY_READ_DISABLE_SHIFT 0 | ||
212 | |||
213 | #define SE_CONTEXT_BUFER_SIZE 1072 | ||
214 | #define SE_CONTEXT_SAVE_RANDOM_DATA_OFFSET 0 | ||
215 | #define SE_CONTEXT_SAVE_RANDOM_DATA_SIZE 16 | ||
216 | #define SE_CONTEXT_SAVE_STICKY_BITS_OFFSET \ | ||
217 | (SE_CONTEXT_SAVE_RANDOM_DATA_OFFSET + SE_CONTEXT_SAVE_RANDOM_DATA_SIZE) | ||
218 | #define SE_CONTEXT_SAVE_STICKY_BITS_SIZE 16 | ||
219 | #define SE_CONTEXT_SAVE_KEYS_OFFSET (SE_CONTEXT_SAVE_STICKY_BITS_OFFSET + \ | ||
220 | SE_CONTEXT_SAVE_STICKY_BITS_SIZE) | ||
221 | #define SE_CONTEXT_SAVE_KEY_LENGTH 512 | ||
222 | #define SE_CONTEXT_ORIGINAL_IV_OFFSET (SE_CONTEXT_SAVE_KEYS_OFFSET + \ | ||
223 | SE_CONTEXT_SAVE_KEY_LENGTH) | ||
224 | #define SE_CONTEXT_ORIGINAL_IV_LENGTH 256 | ||
225 | |||
226 | #define SE_CONTEXT_UPDATED_IV_OFFSET (SE_CONTEXT_ORIGINAL_IV_OFFSET + \ | ||
227 | SE_CONTEXT_ORIGINAL_IV_LENGTH) | ||
228 | |||
229 | #define SE_CONTEXT_UPDATED_IV_LENGTH 256 | ||
230 | #define SE_CONTEXT_KNOWN_PATTERN_OFFSET (SE_CONTEXT_UPDATED_IV_OFFSET + \ | ||
231 | SE_CONTEXT_UPDATED_IV_LENGTH) | ||
232 | #define SE_CONTEXT_KNOWN_PATTERN_SIZE 16 | ||
233 | |||
234 | |||
235 | #endif /* _CRYPTO_TEGRA_SE_H */ | ||