diff options
author | Gary R Hook <gary.hook@amd.com> | 2016-07-26 20:10:21 -0400 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2016-08-09 06:47:16 -0400 |
commit | 4b394a232df78414442778b02ca4a388d947d059 (patch) | |
tree | 5268deba5299e9c4c9fbd697b6e26daa95ffe531 | |
parent | bb4e89b34d1bf46156b7e880a0f34205fb7ce2a5 (diff) |
crypto: ccp - Let a v5 CCP provide the same function as v3
Enable equivalent function on a v5 CCP. Add support for a
version 5 CCP which enables AES/XTS/SHA services. Also,
more work on the data structures to virtualize
functionality.
Signed-off-by: Gary R Hook <gary.hook@amd.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
-rw-r--r-- | drivers/crypto/ccp/Makefile | 1 | ||||
-rw-r--r-- | drivers/crypto/ccp/ccp-crypto-sha.c | 18 | ||||
-rw-r--r-- | drivers/crypto/ccp/ccp-dev-v3.c | 28 | ||||
-rw-r--r-- | drivers/crypto/ccp/ccp-dev-v5.c | 961 | ||||
-rw-r--r-- | drivers/crypto/ccp/ccp-dev.h | 164 | ||||
-rw-r--r-- | drivers/crypto/ccp/ccp-ops.c | 279 | ||||
-rw-r--r-- | drivers/crypto/ccp/ccp-pci.c | 6 | ||||
-rw-r--r-- | include/linux/ccp.h | 3 |
8 files changed, 1340 insertions, 120 deletions
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index ee4d2741b3ab..346ceb8f17bd 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile | |||
@@ -2,6 +2,7 @@ obj-$(CONFIG_CRYPTO_DEV_CCP_DD) += ccp.o | |||
2 | ccp-objs := ccp-dev.o \ | 2 | ccp-objs := ccp-dev.o \ |
3 | ccp-ops.o \ | 3 | ccp-ops.o \ |
4 | ccp-dev-v3.o \ | 4 | ccp-dev-v3.o \ |
5 | ccp-dev-v5.o \ | ||
5 | ccp-platform.o \ | 6 | ccp-platform.o \ |
6 | ccp-dmaengine.o | 7 | ccp-dmaengine.o |
7 | ccp-$(CONFIG_PCI) += ccp-pci.o | 8 | ccp-$(CONFIG_PCI) += ccp-pci.o |
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c index 8f36af62fe95..84a652be4274 100644 --- a/drivers/crypto/ccp/ccp-crypto-sha.c +++ b/drivers/crypto/ccp/ccp-crypto-sha.c | |||
@@ -4,6 +4,7 @@ | |||
4 | * Copyright (C) 2013,2016 Advanced Micro Devices, Inc. | 4 | * Copyright (C) 2013,2016 Advanced Micro Devices, Inc. |
5 | * | 5 | * |
6 | * Author: Tom Lendacky <thomas.lendacky@amd.com> | 6 | * Author: Tom Lendacky <thomas.lendacky@amd.com> |
7 | * Author: Gary R Hook <gary.hook@amd.com> | ||
7 | * | 8 | * |
8 | * This program is free software; you can redistribute it and/or modify | 9 | * This program is free software; you can redistribute it and/or modify |
9 | * it under the terms of the GNU General Public License version 2 as | 10 | * it under the terms of the GNU General Public License version 2 as |
@@ -134,7 +135,22 @@ static int ccp_do_sha_update(struct ahash_request *req, unsigned int nbytes, | |||
134 | rctx->cmd.engine = CCP_ENGINE_SHA; | 135 | rctx->cmd.engine = CCP_ENGINE_SHA; |
135 | rctx->cmd.u.sha.type = rctx->type; | 136 | rctx->cmd.u.sha.type = rctx->type; |
136 | rctx->cmd.u.sha.ctx = &rctx->ctx_sg; | 137 | rctx->cmd.u.sha.ctx = &rctx->ctx_sg; |
137 | rctx->cmd.u.sha.ctx_len = sizeof(rctx->ctx); | 138 | |
139 | switch (rctx->type) { | ||
140 | case CCP_SHA_TYPE_1: | ||
141 | rctx->cmd.u.sha.ctx_len = SHA1_DIGEST_SIZE; | ||
142 | break; | ||
143 | case CCP_SHA_TYPE_224: | ||
144 | rctx->cmd.u.sha.ctx_len = SHA224_DIGEST_SIZE; | ||
145 | break; | ||
146 | case CCP_SHA_TYPE_256: | ||
147 | rctx->cmd.u.sha.ctx_len = SHA256_DIGEST_SIZE; | ||
148 | break; | ||
149 | default: | ||
150 | /* Should never get here */ | ||
151 | break; | ||
152 | } | ||
153 | |||
138 | rctx->cmd.u.sha.src = sg; | 154 | rctx->cmd.u.sha.src = sg; |
139 | rctx->cmd.u.sha.src_len = rctx->hash_cnt; | 155 | rctx->cmd.u.sha.src_len = rctx->hash_cnt; |
140 | rctx->cmd.u.sha.opad = ctx->u.sha.key_len ? | 156 | rctx->cmd.u.sha.opad = ctx->u.sha.key_len ? |
diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c index 02c8c95fdc2d..ff2d2a4de16a 100644 --- a/drivers/crypto/ccp/ccp-dev-v3.c +++ b/drivers/crypto/ccp/ccp-dev-v3.c | |||
@@ -405,6 +405,7 @@ static int ccp_init(struct ccp_device *ccp) | |||
405 | init_waitqueue_head(&ccp->sb_queue); | 405 | init_waitqueue_head(&ccp->sb_queue); |
406 | init_waitqueue_head(&ccp->suspend_queue); | 406 | init_waitqueue_head(&ccp->suspend_queue); |
407 | 407 | ||
408 | dev_dbg(dev, "Starting threads...\n"); | ||
408 | /* Create a kthread for each queue */ | 409 | /* Create a kthread for each queue */ |
409 | for (i = 0; i < ccp->cmd_q_count; i++) { | 410 | for (i = 0; i < ccp->cmd_q_count; i++) { |
410 | struct task_struct *kthread; | 411 | struct task_struct *kthread; |
@@ -424,6 +425,13 @@ static int ccp_init(struct ccp_device *ccp) | |||
424 | wake_up_process(kthread); | 425 | wake_up_process(kthread); |
425 | } | 426 | } |
426 | 427 | ||
428 | dev_dbg(dev, "Enabling interrupts...\n"); | ||
429 | /* Enable interrupts */ | ||
430 | iowrite32(qim, ccp->io_regs + IRQ_MASK_REG); | ||
431 | |||
432 | dev_dbg(dev, "Registering device...\n"); | ||
433 | ccp_add_device(ccp); | ||
434 | |||
427 | /* Register the RNG */ | 435 | /* Register the RNG */ |
428 | ccp->hwrng.name = ccp->rngname; | 436 | ccp->hwrng.name = ccp->rngname; |
429 | ccp->hwrng.read = ccp_trng_read; | 437 | ccp->hwrng.read = ccp_trng_read; |
@@ -438,11 +446,6 @@ static int ccp_init(struct ccp_device *ccp) | |||
438 | if (ret) | 446 | if (ret) |
439 | goto e_hwrng; | 447 | goto e_hwrng; |
440 | 448 | ||
441 | ccp_add_device(ccp); | ||
442 | |||
443 | /* Enable interrupts */ | ||
444 | iowrite32(qim, ccp->io_regs + IRQ_MASK_REG); | ||
445 | |||
446 | return 0; | 449 | return 0; |
447 | 450 | ||
448 | e_hwrng: | 451 | e_hwrng: |
@@ -468,7 +471,13 @@ static void ccp_destroy(struct ccp_device *ccp) | |||
468 | struct ccp_cmd *cmd; | 471 | struct ccp_cmd *cmd; |
469 | unsigned int qim, i; | 472 | unsigned int qim, i; |
470 | 473 | ||
471 | /* Remove this device from the list of available units first */ | 474 | /* Unregister the DMA engine */ |
475 | ccp_dmaengine_unregister(ccp); | ||
476 | |||
477 | /* Unregister the RNG */ | ||
478 | hwrng_unregister(&ccp->hwrng); | ||
479 | |||
480 | /* Remove this device from the list of available units */ | ||
472 | ccp_del_device(ccp); | 481 | ccp_del_device(ccp); |
473 | 482 | ||
474 | /* Build queue interrupt mask (two interrupt masks per queue) */ | 483 | /* Build queue interrupt mask (two interrupt masks per queue) */ |
@@ -488,12 +497,6 @@ static void ccp_destroy(struct ccp_device *ccp) | |||
488 | } | 497 | } |
489 | iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG); | 498 | iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG); |
490 | 499 | ||
491 | /* Unregister the DMA engine */ | ||
492 | ccp_dmaengine_unregister(ccp); | ||
493 | |||
494 | /* Unregister the RNG */ | ||
495 | hwrng_unregister(&ccp->hwrng); | ||
496 | |||
497 | /* Stop the queue kthreads */ | 500 | /* Stop the queue kthreads */ |
498 | for (i = 0; i < ccp->cmd_q_count; i++) | 501 | for (i = 0; i < ccp->cmd_q_count; i++) |
499 | if (ccp->cmd_q[i].kthread) | 502 | if (ccp->cmd_q[i].kthread) |
@@ -570,6 +573,7 @@ static const struct ccp_actions ccp3_actions = { | |||
570 | 573 | ||
571 | struct ccp_vdata ccpv3 = { | 574 | struct ccp_vdata ccpv3 = { |
572 | .version = CCP_VERSION(3, 0), | 575 | .version = CCP_VERSION(3, 0), |
576 | .setup = NULL, | ||
573 | .perform = &ccp3_actions, | 577 | .perform = &ccp3_actions, |
574 | .bar = 2, | 578 | .bar = 2, |
575 | .offset = 0x20000, | 579 | .offset = 0x20000, |
diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c new file mode 100644 index 000000000000..16dad9633754 --- /dev/null +++ b/drivers/crypto/ccp/ccp-dev-v5.c | |||
@@ -0,0 +1,961 @@ | |||
1 | /* | ||
2 | * AMD Cryptographic Coprocessor (CCP) driver | ||
3 | * | ||
4 | * Copyright (C) 2016 Advanced Micro Devices, Inc. | ||
5 | * | ||
6 | * Author: Gary R Hook <gary.hook@amd.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | */ | ||
12 | |||
13 | #include <linux/module.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/pci.h> | ||
16 | #include <linux/kthread.h> | ||
17 | #include <linux/dma-mapping.h> | ||
18 | #include <linux/interrupt.h> | ||
19 | #include <linux/compiler.h> | ||
20 | #include <linux/ccp.h> | ||
21 | |||
22 | #include "ccp-dev.h" | ||
23 | |||
24 | static u32 ccp_lsb_alloc(struct ccp_cmd_queue *cmd_q, unsigned int count) | ||
25 | { | ||
26 | struct ccp_device *ccp; | ||
27 | int start; | ||
28 | |||
29 | /* First look at the map for the queue */ | ||
30 | if (cmd_q->lsb >= 0) { | ||
31 | start = (u32)bitmap_find_next_zero_area(cmd_q->lsbmap, | ||
32 | LSB_SIZE, | ||
33 | 0, count, 0); | ||
34 | if (start < LSB_SIZE) { | ||
35 | bitmap_set(cmd_q->lsbmap, start, count); | ||
36 | return start + cmd_q->lsb * LSB_SIZE; | ||
37 | } | ||
38 | } | ||
39 | |||
40 | /* No joy; try to get an entry from the shared blocks */ | ||
41 | ccp = cmd_q->ccp; | ||
42 | for (;;) { | ||
43 | mutex_lock(&ccp->sb_mutex); | ||
44 | |||
45 | start = (u32)bitmap_find_next_zero_area(ccp->lsbmap, | ||
46 | MAX_LSB_CNT * LSB_SIZE, | ||
47 | 0, | ||
48 | count, 0); | ||
49 | if (start <= MAX_LSB_CNT * LSB_SIZE) { | ||
50 | bitmap_set(ccp->lsbmap, start, count); | ||
51 | |||
52 | mutex_unlock(&ccp->sb_mutex); | ||
53 | return start * LSB_ITEM_SIZE; | ||
54 | } | ||
55 | |||
56 | ccp->sb_avail = 0; | ||
57 | |||
58 | mutex_unlock(&ccp->sb_mutex); | ||
59 | |||
60 | /* Wait for KSB entries to become available */ | ||
61 | if (wait_event_interruptible(ccp->sb_queue, ccp->sb_avail)) | ||
62 | return 0; | ||
63 | } | ||
64 | } | ||
65 | |||
66 | static void ccp_lsb_free(struct ccp_cmd_queue *cmd_q, unsigned int start, | ||
67 | unsigned int count) | ||
68 | { | ||
69 | int lsbno = start / LSB_SIZE; | ||
70 | |||
71 | if (!start) | ||
72 | return; | ||
73 | |||
74 | if (cmd_q->lsb == lsbno) { | ||
75 | /* An entry from the private LSB */ | ||
76 | bitmap_clear(cmd_q->lsbmap, start % LSB_SIZE, count); | ||
77 | } else { | ||
78 | /* From the shared LSBs */ | ||
79 | struct ccp_device *ccp = cmd_q->ccp; | ||
80 | |||
81 | mutex_lock(&ccp->sb_mutex); | ||
82 | bitmap_clear(ccp->lsbmap, start, count); | ||
83 | ccp->sb_avail = 1; | ||
84 | mutex_unlock(&ccp->sb_mutex); | ||
85 | wake_up_interruptible_all(&ccp->sb_queue); | ||
86 | } | ||
87 | } | ||
88 | |||
89 | /* CCP version 5: Union to define the function field (cmd_reg1/dword0) */ | ||
90 | union ccp_function { | ||
91 | struct { | ||
92 | u16 size:7; | ||
93 | u16 encrypt:1; | ||
94 | u16 mode:5; | ||
95 | u16 type:2; | ||
96 | } aes; | ||
97 | struct { | ||
98 | u16 size:7; | ||
99 | u16 encrypt:1; | ||
100 | u16 rsvd:5; | ||
101 | u16 type:2; | ||
102 | } aes_xts; | ||
103 | struct { | ||
104 | u16 rsvd1:10; | ||
105 | u16 type:4; | ||
106 | u16 rsvd2:1; | ||
107 | } sha; | ||
108 | struct { | ||
109 | u16 mode:3; | ||
110 | u16 size:12; | ||
111 | } rsa; | ||
112 | struct { | ||
113 | u16 byteswap:2; | ||
114 | u16 bitwise:3; | ||
115 | u16 reflect:2; | ||
116 | u16 rsvd:8; | ||
117 | } pt; | ||
118 | struct { | ||
119 | u16 rsvd:13; | ||
120 | } zlib; | ||
121 | struct { | ||
122 | u16 size:10; | ||
123 | u16 type:2; | ||
124 | u16 mode:3; | ||
125 | } ecc; | ||
126 | u16 raw; | ||
127 | }; | ||
128 | |||
129 | #define CCP_AES_SIZE(p) ((p)->aes.size) | ||
130 | #define CCP_AES_ENCRYPT(p) ((p)->aes.encrypt) | ||
131 | #define CCP_AES_MODE(p) ((p)->aes.mode) | ||
132 | #define CCP_AES_TYPE(p) ((p)->aes.type) | ||
133 | #define CCP_XTS_SIZE(p) ((p)->aes_xts.size) | ||
134 | #define CCP_XTS_ENCRYPT(p) ((p)->aes_xts.encrypt) | ||
135 | #define CCP_SHA_TYPE(p) ((p)->sha.type) | ||
136 | #define CCP_RSA_SIZE(p) ((p)->rsa.size) | ||
137 | #define CCP_PT_BYTESWAP(p) ((p)->pt.byteswap) | ||
138 | #define CCP_PT_BITWISE(p) ((p)->pt.bitwise) | ||
139 | #define CCP_ECC_MODE(p) ((p)->ecc.mode) | ||
140 | #define CCP_ECC_AFFINE(p) ((p)->ecc.one) | ||
141 | |||
142 | /* Word 0 */ | ||
143 | #define CCP5_CMD_DW0(p) ((p)->dw0) | ||
144 | #define CCP5_CMD_SOC(p) (CCP5_CMD_DW0(p).soc) | ||
145 | #define CCP5_CMD_IOC(p) (CCP5_CMD_DW0(p).ioc) | ||
146 | #define CCP5_CMD_INIT(p) (CCP5_CMD_DW0(p).init) | ||
147 | #define CCP5_CMD_EOM(p) (CCP5_CMD_DW0(p).eom) | ||
148 | #define CCP5_CMD_FUNCTION(p) (CCP5_CMD_DW0(p).function) | ||
149 | #define CCP5_CMD_ENGINE(p) (CCP5_CMD_DW0(p).engine) | ||
150 | #define CCP5_CMD_PROT(p) (CCP5_CMD_DW0(p).prot) | ||
151 | |||
152 | /* Word 1 */ | ||
153 | #define CCP5_CMD_DW1(p) ((p)->length) | ||
154 | #define CCP5_CMD_LEN(p) (CCP5_CMD_DW1(p)) | ||
155 | |||
156 | /* Word 2 */ | ||
157 | #define CCP5_CMD_DW2(p) ((p)->src_lo) | ||
158 | #define CCP5_CMD_SRC_LO(p) (CCP5_CMD_DW2(p)) | ||
159 | |||
160 | /* Word 3 */ | ||
161 | #define CCP5_CMD_DW3(p) ((p)->dw3) | ||
162 | #define CCP5_CMD_SRC_MEM(p) ((p)->dw3.src_mem) | ||
163 | #define CCP5_CMD_SRC_HI(p) ((p)->dw3.src_hi) | ||
164 | #define CCP5_CMD_LSB_ID(p) ((p)->dw3.lsb_cxt_id) | ||
165 | #define CCP5_CMD_FIX_SRC(p) ((p)->dw3.fixed) | ||
166 | |||
167 | /* Words 4/5 */ | ||
168 | #define CCP5_CMD_DW4(p) ((p)->dw4) | ||
169 | #define CCP5_CMD_DST_LO(p) (CCP5_CMD_DW4(p).dst_lo) | ||
170 | #define CCP5_CMD_DW5(p) ((p)->dw5.fields.dst_hi) | ||
171 | #define CCP5_CMD_DST_HI(p) (CCP5_CMD_DW5(p)) | ||
172 | #define CCP5_CMD_DST_MEM(p) ((p)->dw5.fields.dst_mem) | ||
173 | #define CCP5_CMD_FIX_DST(p) ((p)->dw5.fields.fixed) | ||
174 | #define CCP5_CMD_SHA_LO(p) ((p)->dw4.sha_len_lo) | ||
175 | #define CCP5_CMD_SHA_HI(p) ((p)->dw5.sha_len_hi) | ||
176 | |||
177 | /* Word 6/7 */ | ||
178 | #define CCP5_CMD_DW6(p) ((p)->key_lo) | ||
179 | #define CCP5_CMD_KEY_LO(p) (CCP5_CMD_DW6(p)) | ||
180 | #define CCP5_CMD_DW7(p) ((p)->dw7) | ||
181 | #define CCP5_CMD_KEY_HI(p) ((p)->dw7.key_hi) | ||
182 | #define CCP5_CMD_KEY_MEM(p) ((p)->dw7.key_mem) | ||
183 | |||
184 | static inline u32 low_address(unsigned long addr) | ||
185 | { | ||
186 | return (u64)addr & 0x0ffffffff; | ||
187 | } | ||
188 | |||
189 | static inline u32 high_address(unsigned long addr) | ||
190 | { | ||
191 | return ((u64)addr >> 32) & 0x00000ffff; | ||
192 | } | ||
193 | |||
194 | static unsigned int ccp5_get_free_slots(struct ccp_cmd_queue *cmd_q) | ||
195 | { | ||
196 | unsigned int head_idx, n; | ||
197 | u32 head_lo, queue_start; | ||
198 | |||
199 | queue_start = low_address(cmd_q->qdma_tail); | ||
200 | head_lo = ioread32(cmd_q->reg_head_lo); | ||
201 | head_idx = (head_lo - queue_start) / sizeof(struct ccp5_desc); | ||
202 | |||
203 | n = head_idx + COMMANDS_PER_QUEUE - cmd_q->qidx - 1; | ||
204 | |||
205 | return n % COMMANDS_PER_QUEUE; /* Always one unused spot */ | ||
206 | } | ||
207 | |||
208 | static int ccp5_do_cmd(struct ccp5_desc *desc, | ||
209 | struct ccp_cmd_queue *cmd_q) | ||
210 | { | ||
211 | u32 *mP; | ||
212 | __le32 *dP; | ||
213 | u32 tail; | ||
214 | int i; | ||
215 | int ret = 0; | ||
216 | |||
217 | if (CCP5_CMD_SOC(desc)) { | ||
218 | CCP5_CMD_IOC(desc) = 1; | ||
219 | CCP5_CMD_SOC(desc) = 0; | ||
220 | } | ||
221 | mutex_lock(&cmd_q->q_mutex); | ||
222 | |||
223 | mP = (u32 *) &cmd_q->qbase[cmd_q->qidx]; | ||
224 | dP = (__le32 *) desc; | ||
225 | for (i = 0; i < 8; i++) | ||
226 | mP[i] = cpu_to_le32(dP[i]); /* handle endianness */ | ||
227 | |||
228 | cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE; | ||
229 | |||
230 | /* The data used by this command must be flushed to memory */ | ||
231 | wmb(); | ||
232 | |||
233 | /* Write the new tail address back to the queue register */ | ||
234 | tail = low_address(cmd_q->qdma_tail + cmd_q->qidx * Q_DESC_SIZE); | ||
235 | iowrite32(tail, cmd_q->reg_tail_lo); | ||
236 | |||
237 | /* Turn the queue back on using our cached control register */ | ||
238 | iowrite32(cmd_q->qcontrol | CMD5_Q_RUN, cmd_q->reg_control); | ||
239 | mutex_unlock(&cmd_q->q_mutex); | ||
240 | |||
241 | if (CCP5_CMD_IOC(desc)) { | ||
242 | /* Wait for the job to complete */ | ||
243 | ret = wait_event_interruptible(cmd_q->int_queue, | ||
244 | cmd_q->int_rcvd); | ||
245 | if (ret || cmd_q->cmd_error) { | ||
246 | /* A version 5 device doesn't use Job IDs... */ | ||
247 | if (!ret) | ||
248 | ret = -EIO; | ||
249 | } | ||
250 | cmd_q->int_rcvd = 0; | ||
251 | } | ||
252 | |||
253 | return 0; | ||
254 | } | ||
255 | |||
256 | static int ccp5_perform_aes(struct ccp_op *op) | ||
257 | { | ||
258 | struct ccp5_desc desc; | ||
259 | union ccp_function function; | ||
260 | u32 key_addr = op->sb_key * LSB_ITEM_SIZE; | ||
261 | |||
262 | /* Zero out all the fields of the command desc */ | ||
263 | memset(&desc, 0, Q_DESC_SIZE); | ||
264 | |||
265 | CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_AES; | ||
266 | |||
267 | CCP5_CMD_SOC(&desc) = op->soc; | ||
268 | CCP5_CMD_IOC(&desc) = 1; | ||
269 | CCP5_CMD_INIT(&desc) = op->init; | ||
270 | CCP5_CMD_EOM(&desc) = op->eom; | ||
271 | CCP5_CMD_PROT(&desc) = 0; | ||
272 | |||
273 | function.raw = 0; | ||
274 | CCP_AES_ENCRYPT(&function) = op->u.aes.action; | ||
275 | CCP_AES_MODE(&function) = op->u.aes.mode; | ||
276 | CCP_AES_TYPE(&function) = op->u.aes.type; | ||
277 | if (op->u.aes.mode == CCP_AES_MODE_CFB) | ||
278 | CCP_AES_SIZE(&function) = 0x7f; | ||
279 | |||
280 | CCP5_CMD_FUNCTION(&desc) = function.raw; | ||
281 | |||
282 | CCP5_CMD_LEN(&desc) = op->src.u.dma.length; | ||
283 | |||
284 | CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); | ||
285 | CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); | ||
286 | CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; | ||
287 | |||
288 | CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma); | ||
289 | CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); | ||
290 | CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; | ||
291 | |||
292 | CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr); | ||
293 | CCP5_CMD_KEY_HI(&desc) = 0; | ||
294 | CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB; | ||
295 | CCP5_CMD_LSB_ID(&desc) = op->sb_ctx; | ||
296 | |||
297 | return ccp5_do_cmd(&desc, op->cmd_q); | ||
298 | } | ||
299 | |||
300 | static int ccp5_perform_xts_aes(struct ccp_op *op) | ||
301 | { | ||
302 | struct ccp5_desc desc; | ||
303 | union ccp_function function; | ||
304 | u32 key_addr = op->sb_key * LSB_ITEM_SIZE; | ||
305 | |||
306 | /* Zero out all the fields of the command desc */ | ||
307 | memset(&desc, 0, Q_DESC_SIZE); | ||
308 | |||
309 | CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_XTS_AES_128; | ||
310 | |||
311 | CCP5_CMD_SOC(&desc) = op->soc; | ||
312 | CCP5_CMD_IOC(&desc) = 1; | ||
313 | CCP5_CMD_INIT(&desc) = op->init; | ||
314 | CCP5_CMD_EOM(&desc) = op->eom; | ||
315 | CCP5_CMD_PROT(&desc) = 0; | ||
316 | |||
317 | function.raw = 0; | ||
318 | CCP_XTS_ENCRYPT(&function) = op->u.xts.action; | ||
319 | CCP_XTS_SIZE(&function) = op->u.xts.unit_size; | ||
320 | CCP5_CMD_FUNCTION(&desc) = function.raw; | ||
321 | |||
322 | CCP5_CMD_LEN(&desc) = op->src.u.dma.length; | ||
323 | |||
324 | CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); | ||
325 | CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); | ||
326 | CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; | ||
327 | |||
328 | CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma); | ||
329 | CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); | ||
330 | CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; | ||
331 | |||
332 | CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr); | ||
333 | CCP5_CMD_KEY_HI(&desc) = 0; | ||
334 | CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB; | ||
335 | CCP5_CMD_LSB_ID(&desc) = op->sb_ctx; | ||
336 | |||
337 | return ccp5_do_cmd(&desc, op->cmd_q); | ||
338 | } | ||
339 | |||
340 | static int ccp5_perform_sha(struct ccp_op *op) | ||
341 | { | ||
342 | struct ccp5_desc desc; | ||
343 | union ccp_function function; | ||
344 | |||
345 | /* Zero out all the fields of the command desc */ | ||
346 | memset(&desc, 0, Q_DESC_SIZE); | ||
347 | |||
348 | CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SHA; | ||
349 | |||
350 | CCP5_CMD_SOC(&desc) = op->soc; | ||
351 | CCP5_CMD_IOC(&desc) = 1; | ||
352 | CCP5_CMD_INIT(&desc) = 1; | ||
353 | CCP5_CMD_EOM(&desc) = op->eom; | ||
354 | CCP5_CMD_PROT(&desc) = 0; | ||
355 | |||
356 | function.raw = 0; | ||
357 | CCP_SHA_TYPE(&function) = op->u.sha.type; | ||
358 | CCP5_CMD_FUNCTION(&desc) = function.raw; | ||
359 | |||
360 | CCP5_CMD_LEN(&desc) = op->src.u.dma.length; | ||
361 | |||
362 | CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); | ||
363 | CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); | ||
364 | CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; | ||
365 | |||
366 | CCP5_CMD_LSB_ID(&desc) = op->sb_ctx; | ||
367 | |||
368 | if (op->eom) { | ||
369 | CCP5_CMD_SHA_LO(&desc) = lower_32_bits(op->u.sha.msg_bits); | ||
370 | CCP5_CMD_SHA_HI(&desc) = upper_32_bits(op->u.sha.msg_bits); | ||
371 | } else { | ||
372 | CCP5_CMD_SHA_LO(&desc) = 0; | ||
373 | CCP5_CMD_SHA_HI(&desc) = 0; | ||
374 | } | ||
375 | |||
376 | return ccp5_do_cmd(&desc, op->cmd_q); | ||
377 | } | ||
378 | |||
379 | static int ccp5_perform_rsa(struct ccp_op *op) | ||
380 | { | ||
381 | struct ccp5_desc desc; | ||
382 | union ccp_function function; | ||
383 | |||
384 | /* Zero out all the fields of the command desc */ | ||
385 | memset(&desc, 0, Q_DESC_SIZE); | ||
386 | |||
387 | CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_RSA; | ||
388 | |||
389 | CCP5_CMD_SOC(&desc) = op->soc; | ||
390 | CCP5_CMD_IOC(&desc) = 1; | ||
391 | CCP5_CMD_INIT(&desc) = 0; | ||
392 | CCP5_CMD_EOM(&desc) = 1; | ||
393 | CCP5_CMD_PROT(&desc) = 0; | ||
394 | |||
395 | function.raw = 0; | ||
396 | CCP_RSA_SIZE(&function) = op->u.rsa.mod_size; | ||
397 | CCP5_CMD_FUNCTION(&desc) = function.raw; | ||
398 | |||
399 | CCP5_CMD_LEN(&desc) = op->u.rsa.input_len; | ||
400 | |||
401 | /* Source is from external memory */ | ||
402 | CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); | ||
403 | CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); | ||
404 | CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; | ||
405 | |||
406 | /* Destination is in external memory */ | ||
407 | CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma); | ||
408 | CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); | ||
409 | CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; | ||
410 | |||
411 | /* Key (Exponent) is in external memory */ | ||
412 | CCP5_CMD_KEY_LO(&desc) = ccp_addr_lo(&op->exp.u.dma); | ||
413 | CCP5_CMD_KEY_HI(&desc) = ccp_addr_hi(&op->exp.u.dma); | ||
414 | CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SYSTEM; | ||
415 | |||
416 | return ccp5_do_cmd(&desc, op->cmd_q); | ||
417 | } | ||
418 | |||
419 | static int ccp5_perform_passthru(struct ccp_op *op) | ||
420 | { | ||
421 | struct ccp5_desc desc; | ||
422 | union ccp_function function; | ||
423 | struct ccp_dma_info *saddr = &op->src.u.dma; | ||
424 | struct ccp_dma_info *daddr = &op->dst.u.dma; | ||
425 | |||
426 | memset(&desc, 0, Q_DESC_SIZE); | ||
427 | |||
428 | CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_PASSTHRU; | ||
429 | |||
430 | CCP5_CMD_SOC(&desc) = 0; | ||
431 | CCP5_CMD_IOC(&desc) = 1; | ||
432 | CCP5_CMD_INIT(&desc) = 0; | ||
433 | CCP5_CMD_EOM(&desc) = op->eom; | ||
434 | CCP5_CMD_PROT(&desc) = 0; | ||
435 | |||
436 | function.raw = 0; | ||
437 | CCP_PT_BYTESWAP(&function) = op->u.passthru.byte_swap; | ||
438 | CCP_PT_BITWISE(&function) = op->u.passthru.bit_mod; | ||
439 | CCP5_CMD_FUNCTION(&desc) = function.raw; | ||
440 | |||
441 | /* Length of source data is always 256 bytes */ | ||
442 | if (op->src.type == CCP_MEMTYPE_SYSTEM) | ||
443 | CCP5_CMD_LEN(&desc) = saddr->length; | ||
444 | else | ||
445 | CCP5_CMD_LEN(&desc) = daddr->length; | ||
446 | |||
447 | if (op->src.type == CCP_MEMTYPE_SYSTEM) { | ||
448 | CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); | ||
449 | CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); | ||
450 | CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; | ||
451 | |||
452 | if (op->u.passthru.bit_mod != CCP_PASSTHRU_BITWISE_NOOP) | ||
453 | CCP5_CMD_LSB_ID(&desc) = op->sb_key; | ||
454 | } else { | ||
455 | u32 key_addr = op->src.u.sb * CCP_SB_BYTES; | ||
456 | |||
457 | CCP5_CMD_SRC_LO(&desc) = lower_32_bits(key_addr); | ||
458 | CCP5_CMD_SRC_HI(&desc) = 0; | ||
459 | CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SB; | ||
460 | } | ||
461 | |||
462 | if (op->dst.type == CCP_MEMTYPE_SYSTEM) { | ||
463 | CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma); | ||
464 | CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); | ||
465 | CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; | ||
466 | } else { | ||
467 | u32 key_addr = op->dst.u.sb * CCP_SB_BYTES; | ||
468 | |||
469 | CCP5_CMD_DST_LO(&desc) = lower_32_bits(key_addr); | ||
470 | CCP5_CMD_DST_HI(&desc) = 0; | ||
471 | CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SB; | ||
472 | } | ||
473 | |||
474 | return ccp5_do_cmd(&desc, op->cmd_q); | ||
475 | } | ||
476 | |||
477 | static int ccp5_perform_ecc(struct ccp_op *op) | ||
478 | { | ||
479 | struct ccp5_desc desc; | ||
480 | union ccp_function function; | ||
481 | |||
482 | /* Zero out all the fields of the command desc */ | ||
483 | memset(&desc, 0, Q_DESC_SIZE); | ||
484 | |||
485 | CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_ECC; | ||
486 | |||
487 | CCP5_CMD_SOC(&desc) = 0; | ||
488 | CCP5_CMD_IOC(&desc) = 1; | ||
489 | CCP5_CMD_INIT(&desc) = 0; | ||
490 | CCP5_CMD_EOM(&desc) = 1; | ||
491 | CCP5_CMD_PROT(&desc) = 0; | ||
492 | |||
493 | function.raw = 0; | ||
494 | function.ecc.mode = op->u.ecc.function; | ||
495 | CCP5_CMD_FUNCTION(&desc) = function.raw; | ||
496 | |||
497 | CCP5_CMD_LEN(&desc) = op->src.u.dma.length; | ||
498 | |||
499 | CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); | ||
500 | CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); | ||
501 | CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; | ||
502 | |||
503 | CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma); | ||
504 | CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); | ||
505 | CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; | ||
506 | |||
507 | return ccp5_do_cmd(&desc, op->cmd_q); | ||
508 | } | ||
509 | |||
510 | static int ccp_find_lsb_regions(struct ccp_cmd_queue *cmd_q, u64 status) | ||
511 | { | ||
512 | int q_mask = 1 << cmd_q->id; | ||
513 | int queues = 0; | ||
514 | int j; | ||
515 | |||
516 | /* Build a bit mask to know which LSBs this queue has access to. | ||
517 | * Don't bother with segment 0 as it has special privileges. | ||
518 | */ | ||
519 | for (j = 1; j < MAX_LSB_CNT; j++) { | ||
520 | if (status & q_mask) | ||
521 | bitmap_set(cmd_q->lsbmask, j, 1); | ||
522 | status >>= LSB_REGION_WIDTH; | ||
523 | } | ||
524 | queues = bitmap_weight(cmd_q->lsbmask, MAX_LSB_CNT); | ||
525 | dev_info(cmd_q->ccp->dev, "Queue %d can access %d LSB regions\n", | ||
526 | cmd_q->id, queues); | ||
527 | |||
528 | return queues ? 0 : -EINVAL; | ||
529 | } | ||
530 | |||
531 | |||
532 | static int ccp_find_and_assign_lsb_to_q(struct ccp_device *ccp, | ||
533 | int lsb_cnt, int n_lsbs, | ||
534 | unsigned long *lsb_pub) | ||
535 | { | ||
536 | DECLARE_BITMAP(qlsb, MAX_LSB_CNT); | ||
537 | int bitno; | ||
538 | int qlsb_wgt; | ||
539 | int i; | ||
540 | |||
541 | /* For each queue: | ||
542 | * If the count of potential LSBs available to a queue matches the | ||
543 | * ordinal given to us in lsb_cnt: | ||
544 | * Copy the mask of possible LSBs for this queue into "qlsb"; | ||
545 | * For each bit in qlsb, see if the corresponding bit in the | ||
546 | * aggregation mask is set; if so, we have a match. | ||
547 | * If we have a match, clear the bit in the aggregation to | ||
548 | * mark it as no longer available. | ||
549 | * If there is no match, clear the bit in qlsb and keep looking. | ||
550 | */ | ||
551 | for (i = 0; i < ccp->cmd_q_count; i++) { | ||
552 | struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i]; | ||
553 | |||
554 | qlsb_wgt = bitmap_weight(cmd_q->lsbmask, MAX_LSB_CNT); | ||
555 | |||
556 | if (qlsb_wgt == lsb_cnt) { | ||
557 | bitmap_copy(qlsb, cmd_q->lsbmask, MAX_LSB_CNT); | ||
558 | |||
559 | bitno = find_first_bit(qlsb, MAX_LSB_CNT); | ||
560 | while (bitno < MAX_LSB_CNT) { | ||
561 | if (test_bit(bitno, lsb_pub)) { | ||
562 | /* We found an available LSB | ||
563 | * that this queue can access | ||
564 | */ | ||
565 | cmd_q->lsb = bitno; | ||
566 | bitmap_clear(lsb_pub, bitno, 1); | ||
567 | dev_info(ccp->dev, | ||
568 | "Queue %d gets LSB %d\n", | ||
569 | i, bitno); | ||
570 | break; | ||
571 | } | ||
572 | bitmap_clear(qlsb, bitno, 1); | ||
573 | bitno = find_first_bit(qlsb, MAX_LSB_CNT); | ||
574 | } | ||
575 | if (bitno >= MAX_LSB_CNT) | ||
576 | return -EINVAL; | ||
577 | n_lsbs--; | ||
578 | } | ||
579 | } | ||
580 | return n_lsbs; | ||
581 | } | ||
582 | |||
583 | /* For each queue, from the most- to least-constrained: | ||
584 | * find an LSB that can be assigned to the queue. If there are N queues that | ||
585 | * can only use M LSBs, where N > M, fail; otherwise, every queue will get a | ||
586 | * dedicated LSB. Remaining LSB regions become a shared resource. | ||
587 | * If we have fewer LSBs than queues, all LSB regions become shared resources. | ||
588 | */ | ||
589 | static int ccp_assign_lsbs(struct ccp_device *ccp) | ||
590 | { | ||
591 | DECLARE_BITMAP(lsb_pub, MAX_LSB_CNT); | ||
592 | DECLARE_BITMAP(qlsb, MAX_LSB_CNT); | ||
593 | int n_lsbs = 0; | ||
594 | int bitno; | ||
595 | int i, lsb_cnt; | ||
596 | int rc = 0; | ||
597 | |||
598 | bitmap_zero(lsb_pub, MAX_LSB_CNT); | ||
599 | |||
600 | /* Create an aggregate bitmap to get a total count of available LSBs */ | ||
601 | for (i = 0; i < ccp->cmd_q_count; i++) | ||
602 | bitmap_or(lsb_pub, | ||
603 | lsb_pub, ccp->cmd_q[i].lsbmask, | ||
604 | MAX_LSB_CNT); | ||
605 | |||
606 | n_lsbs = bitmap_weight(lsb_pub, MAX_LSB_CNT); | ||
607 | |||
608 | if (n_lsbs >= ccp->cmd_q_count) { | ||
609 | /* We have enough LSBS to give every queue a private LSB. | ||
610 | * Brute force search to start with the queues that are more | ||
611 | * constrained in LSB choice. When an LSB is privately | ||
612 | * assigned, it is removed from the public mask. | ||
613 | * This is an ugly N squared algorithm with some optimization. | ||
614 | */ | ||
615 | for (lsb_cnt = 1; | ||
616 | n_lsbs && (lsb_cnt <= MAX_LSB_CNT); | ||
617 | lsb_cnt++) { | ||
618 | rc = ccp_find_and_assign_lsb_to_q(ccp, lsb_cnt, n_lsbs, | ||
619 | lsb_pub); | ||
620 | if (rc < 0) | ||
621 | return -EINVAL; | ||
622 | n_lsbs = rc; | ||
623 | } | ||
624 | } | ||
625 | |||
626 | rc = 0; | ||
627 | /* What's left of the LSBs, according to the public mask, now become | ||
628 | * shared. Any zero bits in the lsb_pub mask represent an LSB region | ||
629 | * that can't be used as a shared resource, so mark the LSB slots for | ||
630 | * them as "in use". | ||
631 | */ | ||
632 | bitmap_copy(qlsb, lsb_pub, MAX_LSB_CNT); | ||
633 | |||
634 | bitno = find_first_zero_bit(qlsb, MAX_LSB_CNT); | ||
635 | while (bitno < MAX_LSB_CNT) { | ||
636 | bitmap_set(ccp->lsbmap, bitno * LSB_SIZE, LSB_SIZE); | ||
637 | bitmap_set(qlsb, bitno, 1); | ||
638 | bitno = find_first_zero_bit(qlsb, MAX_LSB_CNT); | ||
639 | } | ||
640 | |||
641 | return rc; | ||
642 | } | ||
643 | |||
644 | static int ccp5_init(struct ccp_device *ccp) | ||
645 | { | ||
646 | struct device *dev = ccp->dev; | ||
647 | struct ccp_cmd_queue *cmd_q; | ||
648 | struct dma_pool *dma_pool; | ||
649 | char dma_pool_name[MAX_DMAPOOL_NAME_LEN]; | ||
650 | unsigned int qmr, qim, i; | ||
651 | u64 status; | ||
652 | u32 status_lo, status_hi; | ||
653 | int ret; | ||
654 | |||
655 | /* Find available queues */ | ||
656 | qim = 0; | ||
657 | qmr = ioread32(ccp->io_regs + Q_MASK_REG); | ||
658 | for (i = 0; i < MAX_HW_QUEUES; i++) { | ||
659 | |||
660 | if (!(qmr & (1 << i))) | ||
661 | continue; | ||
662 | |||
663 | /* Allocate a dma pool for this queue */ | ||
664 | snprintf(dma_pool_name, sizeof(dma_pool_name), "%s_q%d", | ||
665 | ccp->name, i); | ||
666 | dma_pool = dma_pool_create(dma_pool_name, dev, | ||
667 | CCP_DMAPOOL_MAX_SIZE, | ||
668 | CCP_DMAPOOL_ALIGN, 0); | ||
669 | if (!dma_pool) { | ||
670 | dev_err(dev, "unable to allocate dma pool\n"); | ||
671 | ret = -ENOMEM; | ||
672 | } | ||
673 | |||
674 | cmd_q = &ccp->cmd_q[ccp->cmd_q_count]; | ||
675 | ccp->cmd_q_count++; | ||
676 | |||
677 | cmd_q->ccp = ccp; | ||
678 | cmd_q->id = i; | ||
679 | cmd_q->dma_pool = dma_pool; | ||
680 | mutex_init(&cmd_q->q_mutex); | ||
681 | |||
682 | /* Page alignment satisfies our needs for N <= 128 */ | ||
683 | BUILD_BUG_ON(COMMANDS_PER_QUEUE > 128); | ||
684 | cmd_q->qsize = Q_SIZE(Q_DESC_SIZE); | ||
685 | cmd_q->qbase = dma_zalloc_coherent(dev, cmd_q->qsize, | ||
686 | &cmd_q->qbase_dma, | ||
687 | GFP_KERNEL); | ||
688 | if (!cmd_q->qbase) { | ||
689 | dev_err(dev, "unable to allocate command queue\n"); | ||
690 | ret = -ENOMEM; | ||
691 | goto e_pool; | ||
692 | } | ||
693 | |||
694 | cmd_q->qidx = 0; | ||
695 | /* Preset some register values and masks that are queue | ||
696 | * number dependent | ||
697 | */ | ||
698 | cmd_q->reg_control = ccp->io_regs + | ||
699 | CMD5_Q_STATUS_INCR * (i + 1); | ||
700 | cmd_q->reg_tail_lo = cmd_q->reg_control + CMD5_Q_TAIL_LO_BASE; | ||
701 | cmd_q->reg_head_lo = cmd_q->reg_control + CMD5_Q_HEAD_LO_BASE; | ||
702 | cmd_q->reg_int_enable = cmd_q->reg_control + | ||
703 | CMD5_Q_INT_ENABLE_BASE; | ||
704 | cmd_q->reg_interrupt_status = cmd_q->reg_control + | ||
705 | CMD5_Q_INTERRUPT_STATUS_BASE; | ||
706 | cmd_q->reg_status = cmd_q->reg_control + CMD5_Q_STATUS_BASE; | ||
707 | cmd_q->reg_int_status = cmd_q->reg_control + | ||
708 | CMD5_Q_INT_STATUS_BASE; | ||
709 | cmd_q->reg_dma_status = cmd_q->reg_control + | ||
710 | CMD5_Q_DMA_STATUS_BASE; | ||
711 | cmd_q->reg_dma_read_status = cmd_q->reg_control + | ||
712 | CMD5_Q_DMA_READ_STATUS_BASE; | ||
713 | cmd_q->reg_dma_write_status = cmd_q->reg_control + | ||
714 | CMD5_Q_DMA_WRITE_STATUS_BASE; | ||
715 | |||
716 | init_waitqueue_head(&cmd_q->int_queue); | ||
717 | |||
718 | dev_dbg(dev, "queue #%u available\n", i); | ||
719 | } | ||
720 | if (ccp->cmd_q_count == 0) { | ||
721 | dev_notice(dev, "no command queues available\n"); | ||
722 | ret = -EIO; | ||
723 | goto e_pool; | ||
724 | } | ||
725 | dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count); | ||
726 | |||
727 | /* Turn off the queues and disable interrupts until ready */ | ||
728 | for (i = 0; i < ccp->cmd_q_count; i++) { | ||
729 | cmd_q = &ccp->cmd_q[i]; | ||
730 | |||
731 | cmd_q->qcontrol = 0; /* Start with nothing */ | ||
732 | iowrite32(cmd_q->qcontrol, cmd_q->reg_control); | ||
733 | |||
734 | /* Disable the interrupts */ | ||
735 | iowrite32(0x00, cmd_q->reg_int_enable); | ||
736 | ioread32(cmd_q->reg_int_status); | ||
737 | ioread32(cmd_q->reg_status); | ||
738 | |||
739 | /* Clear the interrupts */ | ||
740 | iowrite32(ALL_INTERRUPTS, cmd_q->reg_interrupt_status); | ||
741 | } | ||
742 | |||
743 | dev_dbg(dev, "Requesting an IRQ...\n"); | ||
744 | /* Request an irq */ | ||
745 | ret = ccp->get_irq(ccp); | ||
746 | if (ret) { | ||
747 | dev_err(dev, "unable to allocate an IRQ\n"); | ||
748 | goto e_pool; | ||
749 | } | ||
750 | |||
751 | /* Initialize the queue used to suspend */ | ||
752 | init_waitqueue_head(&ccp->suspend_queue); | ||
753 | |||
754 | dev_dbg(dev, "Loading LSB map...\n"); | ||
755 | /* Copy the private LSB mask to the public registers */ | ||
756 | status_lo = ioread32(ccp->io_regs + LSB_PRIVATE_MASK_LO_OFFSET); | ||
757 | status_hi = ioread32(ccp->io_regs + LSB_PRIVATE_MASK_HI_OFFSET); | ||
758 | iowrite32(status_lo, ccp->io_regs + LSB_PUBLIC_MASK_LO_OFFSET); | ||
759 | iowrite32(status_hi, ccp->io_regs + LSB_PUBLIC_MASK_HI_OFFSET); | ||
760 | status = ((u64)status_hi<<30) | (u64)status_lo; | ||
761 | |||
762 | dev_dbg(dev, "Configuring virtual queues...\n"); | ||
763 | /* Configure size of each virtual queue accessible to host */ | ||
764 | for (i = 0; i < ccp->cmd_q_count; i++) { | ||
765 | u32 dma_addr_lo; | ||
766 | u32 dma_addr_hi; | ||
767 | |||
768 | cmd_q = &ccp->cmd_q[i]; | ||
769 | |||
770 | cmd_q->qcontrol &= ~(CMD5_Q_SIZE << CMD5_Q_SHIFT); | ||
771 | cmd_q->qcontrol |= QUEUE_SIZE_VAL << CMD5_Q_SHIFT; | ||
772 | |||
773 | cmd_q->qdma_tail = cmd_q->qbase_dma; | ||
774 | dma_addr_lo = low_address(cmd_q->qdma_tail); | ||
775 | iowrite32((u32)dma_addr_lo, cmd_q->reg_tail_lo); | ||
776 | iowrite32((u32)dma_addr_lo, cmd_q->reg_head_lo); | ||
777 | |||
778 | dma_addr_hi = high_address(cmd_q->qdma_tail); | ||
779 | cmd_q->qcontrol |= (dma_addr_hi << 16); | ||
780 | iowrite32(cmd_q->qcontrol, cmd_q->reg_control); | ||
781 | |||
782 | /* Find the LSB regions accessible to the queue */ | ||
783 | ccp_find_lsb_regions(cmd_q, status); | ||
784 | cmd_q->lsb = -1; /* Unassigned value */ | ||
785 | } | ||
786 | |||
787 | dev_dbg(dev, "Assigning LSBs...\n"); | ||
788 | ret = ccp_assign_lsbs(ccp); | ||
789 | if (ret) { | ||
790 | dev_err(dev, "Unable to assign LSBs (%d)\n", ret); | ||
791 | goto e_irq; | ||
792 | } | ||
793 | |||
794 | /* Optimization: pre-allocate LSB slots for each queue */ | ||
795 | for (i = 0; i < ccp->cmd_q_count; i++) { | ||
796 | ccp->cmd_q[i].sb_key = ccp_lsb_alloc(&ccp->cmd_q[i], 2); | ||
797 | ccp->cmd_q[i].sb_ctx = ccp_lsb_alloc(&ccp->cmd_q[i], 2); | ||
798 | } | ||
799 | |||
800 | dev_dbg(dev, "Starting threads...\n"); | ||
801 | /* Create a kthread for each queue */ | ||
802 | for (i = 0; i < ccp->cmd_q_count; i++) { | ||
803 | struct task_struct *kthread; | ||
804 | |||
805 | cmd_q = &ccp->cmd_q[i]; | ||
806 | |||
807 | kthread = kthread_create(ccp_cmd_queue_thread, cmd_q, | ||
808 | "%s-q%u", ccp->name, cmd_q->id); | ||
809 | if (IS_ERR(kthread)) { | ||
810 | dev_err(dev, "error creating queue thread (%ld)\n", | ||
811 | PTR_ERR(kthread)); | ||
812 | ret = PTR_ERR(kthread); | ||
813 | goto e_kthread; | ||
814 | } | ||
815 | |||
816 | cmd_q->kthread = kthread; | ||
817 | wake_up_process(kthread); | ||
818 | } | ||
819 | |||
820 | dev_dbg(dev, "Enabling interrupts...\n"); | ||
821 | /* Enable interrupts */ | ||
822 | for (i = 0; i < ccp->cmd_q_count; i++) { | ||
823 | cmd_q = &ccp->cmd_q[i]; | ||
824 | iowrite32(ALL_INTERRUPTS, cmd_q->reg_int_enable); | ||
825 | } | ||
826 | |||
827 | dev_dbg(dev, "Registering device...\n"); | ||
828 | /* Put this on the unit list to make it available */ | ||
829 | ccp_add_device(ccp); | ||
830 | |||
831 | return 0; | ||
832 | |||
833 | e_kthread: | ||
834 | for (i = 0; i < ccp->cmd_q_count; i++) | ||
835 | if (ccp->cmd_q[i].kthread) | ||
836 | kthread_stop(ccp->cmd_q[i].kthread); | ||
837 | |||
838 | e_irq: | ||
839 | ccp->free_irq(ccp); | ||
840 | |||
841 | e_pool: | ||
842 | for (i = 0; i < ccp->cmd_q_count; i++) | ||
843 | dma_pool_destroy(ccp->cmd_q[i].dma_pool); | ||
844 | |||
845 | return ret; | ||
846 | } | ||
847 | |||
848 | static void ccp5_destroy(struct ccp_device *ccp) | ||
849 | { | ||
850 | struct device *dev = ccp->dev; | ||
851 | struct ccp_cmd_queue *cmd_q; | ||
852 | struct ccp_cmd *cmd; | ||
853 | unsigned int i; | ||
854 | |||
855 | /* Remove this device from the list of available units first */ | ||
856 | ccp_del_device(ccp); | ||
857 | |||
858 | /* Disable and clear interrupts */ | ||
859 | for (i = 0; i < ccp->cmd_q_count; i++) { | ||
860 | cmd_q = &ccp->cmd_q[i]; | ||
861 | |||
862 | /* Turn off the run bit */ | ||
863 | iowrite32(cmd_q->qcontrol & ~CMD5_Q_RUN, cmd_q->reg_control); | ||
864 | |||
865 | /* Disable the interrupts */ | ||
866 | iowrite32(ALL_INTERRUPTS, cmd_q->reg_interrupt_status); | ||
867 | |||
868 | /* Clear the interrupt status */ | ||
869 | iowrite32(0x00, cmd_q->reg_int_enable); | ||
870 | ioread32(cmd_q->reg_int_status); | ||
871 | ioread32(cmd_q->reg_status); | ||
872 | } | ||
873 | |||
874 | /* Stop the queue kthreads */ | ||
875 | for (i = 0; i < ccp->cmd_q_count; i++) | ||
876 | if (ccp->cmd_q[i].kthread) | ||
877 | kthread_stop(ccp->cmd_q[i].kthread); | ||
878 | |||
879 | ccp->free_irq(ccp); | ||
880 | |||
881 | for (i = 0; i < ccp->cmd_q_count; i++) { | ||
882 | cmd_q = &ccp->cmd_q[i]; | ||
883 | dma_free_coherent(dev, cmd_q->qsize, cmd_q->qbase, | ||
884 | cmd_q->qbase_dma); | ||
885 | } | ||
886 | |||
887 | /* Flush the cmd and backlog queue */ | ||
888 | while (!list_empty(&ccp->cmd)) { | ||
889 | /* Invoke the callback directly with an error code */ | ||
890 | cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry); | ||
891 | list_del(&cmd->entry); | ||
892 | cmd->callback(cmd->data, -ENODEV); | ||
893 | } | ||
894 | while (!list_empty(&ccp->backlog)) { | ||
895 | /* Invoke the callback directly with an error code */ | ||
896 | cmd = list_first_entry(&ccp->backlog, struct ccp_cmd, entry); | ||
897 | list_del(&cmd->entry); | ||
898 | cmd->callback(cmd->data, -ENODEV); | ||
899 | } | ||
900 | } | ||
901 | |||
902 | static irqreturn_t ccp5_irq_handler(int irq, void *data) | ||
903 | { | ||
904 | struct device *dev = data; | ||
905 | struct ccp_device *ccp = dev_get_drvdata(dev); | ||
906 | u32 status; | ||
907 | unsigned int i; | ||
908 | |||
909 | for (i = 0; i < ccp->cmd_q_count; i++) { | ||
910 | struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i]; | ||
911 | |||
912 | status = ioread32(cmd_q->reg_interrupt_status); | ||
913 | |||
914 | if (status) { | ||
915 | cmd_q->int_status = status; | ||
916 | cmd_q->q_status = ioread32(cmd_q->reg_status); | ||
917 | cmd_q->q_int_status = ioread32(cmd_q->reg_int_status); | ||
918 | |||
919 | /* On error, only save the first error value */ | ||
920 | if ((status & INT_ERROR) && !cmd_q->cmd_error) | ||
921 | cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status); | ||
922 | |||
923 | cmd_q->int_rcvd = 1; | ||
924 | |||
925 | /* Acknowledge the interrupt and wake the kthread */ | ||
926 | iowrite32(ALL_INTERRUPTS, cmd_q->reg_interrupt_status); | ||
927 | wake_up_interruptible(&cmd_q->int_queue); | ||
928 | } | ||
929 | } | ||
930 | |||
931 | return IRQ_HANDLED; | ||
932 | } | ||
933 | |||
934 | static void ccp5_config(struct ccp_device *ccp) | ||
935 | { | ||
936 | /* Public side */ | ||
937 | iowrite32(0x00001249, ccp->io_regs + CMD5_REQID_CONFIG_OFFSET); | ||
938 | } | ||
939 | |||
940 | static const struct ccp_actions ccp5_actions = { | ||
941 | .aes = ccp5_perform_aes, | ||
942 | .xts_aes = ccp5_perform_xts_aes, | ||
943 | .sha = ccp5_perform_sha, | ||
944 | .rsa = ccp5_perform_rsa, | ||
945 | .passthru = ccp5_perform_passthru, | ||
946 | .ecc = ccp5_perform_ecc, | ||
947 | .sballoc = ccp_lsb_alloc, | ||
948 | .sbfree = ccp_lsb_free, | ||
949 | .init = ccp5_init, | ||
950 | .destroy = ccp5_destroy, | ||
951 | .get_free_slots = ccp5_get_free_slots, | ||
952 | .irqhandler = ccp5_irq_handler, | ||
953 | }; | ||
954 | |||
955 | struct ccp_vdata ccpv5 = { | ||
956 | .version = CCP_VERSION(5, 0), | ||
957 | .setup = ccp5_config, | ||
958 | .perform = &ccp5_actions, | ||
959 | .bar = 2, | ||
960 | .offset = 0x0, | ||
961 | }; | ||
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index de907029c6ee..5ff4a73e3bd4 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h | |||
@@ -61,7 +61,62 @@ | |||
61 | #define CMD_Q_ERROR(__qs) ((__qs) & 0x0000003f) | 61 | #define CMD_Q_ERROR(__qs) ((__qs) & 0x0000003f) |
62 | #define CMD_Q_DEPTH(__qs) (((__qs) >> 12) & 0x0000000f) | 62 | #define CMD_Q_DEPTH(__qs) (((__qs) >> 12) & 0x0000000f) |
63 | 63 | ||
64 | /****** REQ0 Related Values ******/ | 64 | /* ------------------------ CCP Version 5 Specifics ------------------------ */ |
65 | #define CMD5_QUEUE_MASK_OFFSET 0x00 | ||
66 | #define CMD5_REQID_CONFIG_OFFSET 0x08 | ||
67 | #define LSB_PUBLIC_MASK_LO_OFFSET 0x18 | ||
68 | #define LSB_PUBLIC_MASK_HI_OFFSET 0x1C | ||
69 | #define LSB_PRIVATE_MASK_LO_OFFSET 0x20 | ||
70 | #define LSB_PRIVATE_MASK_HI_OFFSET 0x24 | ||
71 | |||
72 | #define CMD5_Q_CONTROL_BASE 0x0000 | ||
73 | #define CMD5_Q_TAIL_LO_BASE 0x0004 | ||
74 | #define CMD5_Q_HEAD_LO_BASE 0x0008 | ||
75 | #define CMD5_Q_INT_ENABLE_BASE 0x000C | ||
76 | #define CMD5_Q_INTERRUPT_STATUS_BASE 0x0010 | ||
77 | |||
78 | #define CMD5_Q_STATUS_BASE 0x0100 | ||
79 | #define CMD5_Q_INT_STATUS_BASE 0x0104 | ||
80 | #define CMD5_Q_DMA_STATUS_BASE 0x0108 | ||
81 | #define CMD5_Q_DMA_READ_STATUS_BASE 0x010C | ||
82 | #define CMD5_Q_DMA_WRITE_STATUS_BASE 0x0110 | ||
83 | #define CMD5_Q_ABORT_BASE 0x0114 | ||
84 | #define CMD5_Q_AX_CACHE_BASE 0x0118 | ||
85 | |||
86 | /* Address offset between two virtual queue registers */ | ||
87 | #define CMD5_Q_STATUS_INCR 0x1000 | ||
88 | |||
89 | /* Bit masks */ | ||
90 | #define CMD5_Q_RUN 0x1 | ||
91 | #define CMD5_Q_HALT 0x2 | ||
92 | #define CMD5_Q_MEM_LOCATION 0x4 | ||
93 | #define CMD5_Q_SIZE 0x1F | ||
94 | #define CMD5_Q_SHIFT 3 | ||
95 | #define COMMANDS_PER_QUEUE 16 | ||
96 | #define QUEUE_SIZE_VAL ((ffs(COMMANDS_PER_QUEUE) - 2) & \ | ||
97 | CMD5_Q_SIZE) | ||
98 | #define Q_PTR_MASK (2 << (QUEUE_SIZE_VAL + 5) - 1) | ||
99 | #define Q_DESC_SIZE sizeof(struct ccp5_desc) | ||
100 | #define Q_SIZE(n) (COMMANDS_PER_QUEUE*(n)) | ||
101 | |||
102 | #define INT_COMPLETION 0x1 | ||
103 | #define INT_ERROR 0x2 | ||
104 | #define INT_QUEUE_STOPPED 0x4 | ||
105 | #define ALL_INTERRUPTS (INT_COMPLETION| \ | ||
106 | INT_ERROR| \ | ||
107 | INT_QUEUE_STOPPED) | ||
108 | |||
109 | #define LSB_REGION_WIDTH 5 | ||
110 | #define MAX_LSB_CNT 8 | ||
111 | |||
112 | #define LSB_SIZE 16 | ||
113 | #define LSB_ITEM_SIZE 32 | ||
114 | #define PLSB_MAP_SIZE (LSB_SIZE) | ||
115 | #define SLSB_MAP_SIZE (MAX_LSB_CNT * LSB_SIZE) | ||
116 | |||
117 | #define LSB_ENTRY_NUMBER(LSB_ADDR) (LSB_ADDR / LSB_ITEM_SIZE) | ||
118 | |||
119 | /* ------------------------ CCP Version 3 Specifics ------------------------ */ | ||
65 | #define REQ0_WAIT_FOR_WRITE 0x00000004 | 120 | #define REQ0_WAIT_FOR_WRITE 0x00000004 |
66 | #define REQ0_INT_ON_COMPLETE 0x00000002 | 121 | #define REQ0_INT_ON_COMPLETE 0x00000002 |
67 | #define REQ0_STOP_ON_COMPLETE 0x00000001 | 122 | #define REQ0_STOP_ON_COMPLETE 0x00000001 |
@@ -115,6 +170,8 @@ | |||
115 | 170 | ||
116 | #define CCP_JOBID_MASK 0x0000003f | 171 | #define CCP_JOBID_MASK 0x0000003f |
117 | 172 | ||
173 | /* ------------------------ General CCP Defines ------------------------ */ | ||
174 | |||
118 | #define CCP_DMAPOOL_MAX_SIZE 64 | 175 | #define CCP_DMAPOOL_MAX_SIZE 64 |
119 | #define CCP_DMAPOOL_ALIGN BIT(5) | 176 | #define CCP_DMAPOOL_ALIGN BIT(5) |
120 | 177 | ||
@@ -149,6 +206,7 @@ | |||
149 | struct ccp_op; | 206 | struct ccp_op; |
150 | struct ccp_device; | 207 | struct ccp_device; |
151 | struct ccp_cmd; | 208 | struct ccp_cmd; |
209 | struct ccp_fns; | ||
152 | 210 | ||
153 | struct ccp_dma_cmd { | 211 | struct ccp_dma_cmd { |
154 | struct list_head entry; | 212 | struct list_head entry; |
@@ -192,10 +250,30 @@ struct ccp_cmd_queue { | |||
192 | /* Queue dma pool */ | 250 | /* Queue dma pool */ |
193 | struct dma_pool *dma_pool; | 251 | struct dma_pool *dma_pool; |
194 | 252 | ||
253 | /* Queue base address (not neccessarily aligned)*/ | ||
254 | struct ccp5_desc *qbase; | ||
255 | |||
256 | /* Aligned queue start address (per requirement) */ | ||
257 | struct mutex q_mutex ____cacheline_aligned; | ||
258 | unsigned int qidx; | ||
259 | |||
260 | /* Version 5 has different requirements for queue memory */ | ||
261 | unsigned int qsize; | ||
262 | dma_addr_t qbase_dma; | ||
263 | dma_addr_t qdma_tail; | ||
264 | |||
195 | /* Per-queue reserved storage block(s) */ | 265 | /* Per-queue reserved storage block(s) */ |
196 | u32 sb_key; | 266 | u32 sb_key; |
197 | u32 sb_ctx; | 267 | u32 sb_ctx; |
198 | 268 | ||
269 | /* Bitmap of LSBs that can be accessed by this queue */ | ||
270 | DECLARE_BITMAP(lsbmask, MAX_LSB_CNT); | ||
271 | /* Private LSB that is assigned to this queue, or -1 if none. | ||
272 | * Bitmap for my private LSB, unused otherwise | ||
273 | */ | ||
274 | unsigned int lsb; | ||
275 | DECLARE_BITMAP(lsbmap, PLSB_MAP_SIZE); | ||
276 | |||
199 | /* Queue processing thread */ | 277 | /* Queue processing thread */ |
200 | struct task_struct *kthread; | 278 | struct task_struct *kthread; |
201 | unsigned int active; | 279 | unsigned int active; |
@@ -209,8 +287,17 @@ struct ccp_cmd_queue { | |||
209 | u32 int_err; | 287 | u32 int_err; |
210 | 288 | ||
211 | /* Register addresses for queue */ | 289 | /* Register addresses for queue */ |
290 | void __iomem *reg_control; | ||
291 | void __iomem *reg_tail_lo; | ||
292 | void __iomem *reg_head_lo; | ||
293 | void __iomem *reg_int_enable; | ||
294 | void __iomem *reg_interrupt_status; | ||
212 | void __iomem *reg_status; | 295 | void __iomem *reg_status; |
213 | void __iomem *reg_int_status; | 296 | void __iomem *reg_int_status; |
297 | void __iomem *reg_dma_status; | ||
298 | void __iomem *reg_dma_read_status; | ||
299 | void __iomem *reg_dma_write_status; | ||
300 | u32 qcontrol; /* Cached control register */ | ||
214 | 301 | ||
215 | /* Status values from job */ | 302 | /* Status values from job */ |
216 | u32 int_status; | 303 | u32 int_status; |
@@ -306,6 +393,9 @@ struct ccp_device { | |||
306 | unsigned int sb_count; | 393 | unsigned int sb_count; |
307 | u32 sb_start; | 394 | u32 sb_start; |
308 | 395 | ||
396 | /* Bitmap of shared LSBs, if any */ | ||
397 | DECLARE_BITMAP(lsbmap, SLSB_MAP_SIZE); | ||
398 | |||
309 | /* Suspend support */ | 399 | /* Suspend support */ |
310 | unsigned int suspending; | 400 | unsigned int suspending; |
311 | wait_queue_head_t suspend_queue; | 401 | wait_queue_head_t suspend_queue; |
@@ -320,6 +410,7 @@ enum ccp_memtype { | |||
320 | CCP_MEMTYPE_LOCAL, | 410 | CCP_MEMTYPE_LOCAL, |
321 | CCP_MEMTYPE__LAST, | 411 | CCP_MEMTYPE__LAST, |
322 | }; | 412 | }; |
413 | #define CCP_MEMTYPE_LSB CCP_MEMTYPE_KSB | ||
323 | 414 | ||
324 | struct ccp_dma_info { | 415 | struct ccp_dma_info { |
325 | dma_addr_t address; | 416 | dma_addr_t address; |
@@ -407,6 +498,7 @@ struct ccp_op { | |||
407 | 498 | ||
408 | struct ccp_mem src; | 499 | struct ccp_mem src; |
409 | struct ccp_mem dst; | 500 | struct ccp_mem dst; |
501 | struct ccp_mem exp; | ||
410 | 502 | ||
411 | union { | 503 | union { |
412 | struct ccp_aes_op aes; | 504 | struct ccp_aes_op aes; |
@@ -416,6 +508,7 @@ struct ccp_op { | |||
416 | struct ccp_passthru_op passthru; | 508 | struct ccp_passthru_op passthru; |
417 | struct ccp_ecc_op ecc; | 509 | struct ccp_ecc_op ecc; |
418 | } u; | 510 | } u; |
511 | struct ccp_mem key; | ||
419 | }; | 512 | }; |
420 | 513 | ||
421 | static inline u32 ccp_addr_lo(struct ccp_dma_info *info) | 514 | static inline u32 ccp_addr_lo(struct ccp_dma_info *info) |
@@ -428,6 +521,70 @@ static inline u32 ccp_addr_hi(struct ccp_dma_info *info) | |||
428 | return upper_32_bits(info->address + info->offset) & 0x0000ffff; | 521 | return upper_32_bits(info->address + info->offset) & 0x0000ffff; |
429 | } | 522 | } |
430 | 523 | ||
524 | /** | ||
525 | * descriptor for version 5 CPP commands | ||
526 | * 8 32-bit words: | ||
527 | * word 0: function; engine; control bits | ||
528 | * word 1: length of source data | ||
529 | * word 2: low 32 bits of source pointer | ||
530 | * word 3: upper 16 bits of source pointer; source memory type | ||
531 | * word 4: low 32 bits of destination pointer | ||
532 | * word 5: upper 16 bits of destination pointer; destination memory type | ||
533 | * word 6: low 32 bits of key pointer | ||
534 | * word 7: upper 16 bits of key pointer; key memory type | ||
535 | */ | ||
536 | struct dword0 { | ||
537 | __le32 soc:1; | ||
538 | __le32 ioc:1; | ||
539 | __le32 rsvd1:1; | ||
540 | __le32 init:1; | ||
541 | __le32 eom:1; /* AES/SHA only */ | ||
542 | __le32 function:15; | ||
543 | __le32 engine:4; | ||
544 | __le32 prot:1; | ||
545 | __le32 rsvd2:7; | ||
546 | }; | ||
547 | |||
548 | struct dword3 { | ||
549 | __le32 src_hi:16; | ||
550 | __le32 src_mem:2; | ||
551 | __le32 lsb_cxt_id:8; | ||
552 | __le32 rsvd1:5; | ||
553 | __le32 fixed:1; | ||
554 | }; | ||
555 | |||
556 | union dword4 { | ||
557 | __le32 dst_lo; /* NON-SHA */ | ||
558 | __le32 sha_len_lo; /* SHA */ | ||
559 | }; | ||
560 | |||
561 | union dword5 { | ||
562 | struct { | ||
563 | __le32 dst_hi:16; | ||
564 | __le32 dst_mem:2; | ||
565 | __le32 rsvd1:13; | ||
566 | __le32 fixed:1; | ||
567 | } fields; | ||
568 | __le32 sha_len_hi; | ||
569 | }; | ||
570 | |||
571 | struct dword7 { | ||
572 | __le32 key_hi:16; | ||
573 | __le32 key_mem:2; | ||
574 | __le32 rsvd1:14; | ||
575 | }; | ||
576 | |||
577 | struct ccp5_desc { | ||
578 | struct dword0 dw0; | ||
579 | __le32 length; | ||
580 | __le32 src_lo; | ||
581 | struct dword3 dw3; | ||
582 | union dword4 dw4; | ||
583 | union dword5 dw5; | ||
584 | __le32 key_lo; | ||
585 | struct dword7 dw7; | ||
586 | }; | ||
587 | |||
431 | int ccp_pci_init(void); | 588 | int ccp_pci_init(void); |
432 | void ccp_pci_exit(void); | 589 | void ccp_pci_exit(void); |
433 | 590 | ||
@@ -466,13 +623,14 @@ struct ccp_actions { | |||
466 | 623 | ||
467 | /* Structure to hold CCP version-specific values */ | 624 | /* Structure to hold CCP version-specific values */ |
468 | struct ccp_vdata { | 625 | struct ccp_vdata { |
469 | unsigned int version; | 626 | const unsigned int version; |
470 | int (*init)(struct ccp_device *); | 627 | void (*setup)(struct ccp_device *); |
471 | const struct ccp_actions *perform; | 628 | const struct ccp_actions *perform; |
472 | const unsigned int bar; | 629 | const unsigned int bar; |
473 | const unsigned int offset; | 630 | const unsigned int offset; |
474 | }; | 631 | }; |
475 | 632 | ||
476 | extern struct ccp_vdata ccpv3; | 633 | extern struct ccp_vdata ccpv3; |
634 | extern struct ccp_vdata ccpv5; | ||
477 | 635 | ||
478 | #endif | 636 | #endif |
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index fdab0ae4f7c9..50fae4442801 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c | |||
@@ -21,26 +21,29 @@ | |||
21 | #include "ccp-dev.h" | 21 | #include "ccp-dev.h" |
22 | 22 | ||
23 | /* SHA initial context values */ | 23 | /* SHA initial context values */ |
24 | static const __be32 ccp_sha1_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = { | 24 | static const __be32 ccp_sha1_init[SHA1_DIGEST_SIZE / sizeof(__be32)] = { |
25 | cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1), | 25 | cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1), |
26 | cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3), | 26 | cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3), |
27 | cpu_to_be32(SHA1_H4), 0, 0, 0, | 27 | cpu_to_be32(SHA1_H4), |
28 | }; | 28 | }; |
29 | 29 | ||
30 | static const __be32 ccp_sha224_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = { | 30 | static const __be32 ccp_sha224_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = { |
31 | cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1), | 31 | cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1), |
32 | cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3), | 32 | cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3), |
33 | cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5), | 33 | cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5), |
34 | cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7), | 34 | cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7), |
35 | }; | 35 | }; |
36 | 36 | ||
37 | static const __be32 ccp_sha256_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = { | 37 | static const __be32 ccp_sha256_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = { |
38 | cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1), | 38 | cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1), |
39 | cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3), | 39 | cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3), |
40 | cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5), | 40 | cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5), |
41 | cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7), | 41 | cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7), |
42 | }; | 42 | }; |
43 | 43 | ||
44 | #define CCP_NEW_JOBID(ccp) ((ccp->vdata->version == CCP_VERSION(3, 0)) ? \ | ||
45 | ccp_gen_jobid(ccp) : 0) | ||
46 | |||
44 | static u32 ccp_gen_jobid(struct ccp_device *ccp) | 47 | static u32 ccp_gen_jobid(struct ccp_device *ccp) |
45 | { | 48 | { |
46 | return atomic_inc_return(&ccp->current_id) & CCP_JOBID_MASK; | 49 | return atomic_inc_return(&ccp->current_id) & CCP_JOBID_MASK; |
@@ -487,7 +490,7 @@ static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q, | |||
487 | ret = -EIO; | 490 | ret = -EIO; |
488 | memset(&op, 0, sizeof(op)); | 491 | memset(&op, 0, sizeof(op)); |
489 | op.cmd_q = cmd_q; | 492 | op.cmd_q = cmd_q; |
490 | op.jobid = ccp_gen_jobid(cmd_q->ccp); | 493 | op.jobid = CCP_NEW_JOBID(cmd_q->ccp); |
491 | op.sb_key = cmd_q->sb_key; | 494 | op.sb_key = cmd_q->sb_key; |
492 | op.sb_ctx = cmd_q->sb_ctx; | 495 | op.sb_ctx = cmd_q->sb_ctx; |
493 | op.init = 1; | 496 | op.init = 1; |
@@ -640,7 +643,7 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | |||
640 | ret = -EIO; | 643 | ret = -EIO; |
641 | memset(&op, 0, sizeof(op)); | 644 | memset(&op, 0, sizeof(op)); |
642 | op.cmd_q = cmd_q; | 645 | op.cmd_q = cmd_q; |
643 | op.jobid = ccp_gen_jobid(cmd_q->ccp); | 646 | op.jobid = CCP_NEW_JOBID(cmd_q->ccp); |
644 | op.sb_key = cmd_q->sb_key; | 647 | op.sb_key = cmd_q->sb_key; |
645 | op.sb_ctx = cmd_q->sb_ctx; | 648 | op.sb_ctx = cmd_q->sb_ctx; |
646 | op.init = (aes->mode == CCP_AES_MODE_ECB) ? 0 : 1; | 649 | op.init = (aes->mode == CCP_AES_MODE_ECB) ? 0 : 1; |
@@ -679,7 +682,7 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | |||
679 | goto e_key; | 682 | goto e_key; |
680 | 683 | ||
681 | if (aes->mode != CCP_AES_MODE_ECB) { | 684 | if (aes->mode != CCP_AES_MODE_ECB) { |
682 | /* Load the AES context - conver to LE */ | 685 | /* Load the AES context - convert to LE */ |
683 | dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE; | 686 | dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE; |
684 | ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); | 687 | ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); |
685 | ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, | 688 | ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, |
@@ -817,7 +820,7 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q, | |||
817 | ret = -EIO; | 820 | ret = -EIO; |
818 | memset(&op, 0, sizeof(op)); | 821 | memset(&op, 0, sizeof(op)); |
819 | op.cmd_q = cmd_q; | 822 | op.cmd_q = cmd_q; |
820 | op.jobid = ccp_gen_jobid(cmd_q->ccp); | 823 | op.jobid = CCP_NEW_JOBID(cmd_q->ccp); |
821 | op.sb_key = cmd_q->sb_key; | 824 | op.sb_key = cmd_q->sb_key; |
822 | op.sb_ctx = cmd_q->sb_ctx; | 825 | op.sb_ctx = cmd_q->sb_ctx; |
823 | op.init = 1; | 826 | op.init = 1; |
@@ -936,98 +939,154 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | |||
936 | struct ccp_dm_workarea ctx; | 939 | struct ccp_dm_workarea ctx; |
937 | struct ccp_data src; | 940 | struct ccp_data src; |
938 | struct ccp_op op; | 941 | struct ccp_op op; |
942 | unsigned int ioffset, ooffset; | ||
943 | unsigned int digest_size; | ||
944 | int sb_count; | ||
945 | const void *init; | ||
946 | u64 block_size; | ||
947 | int ctx_size; | ||
939 | int ret; | 948 | int ret; |
940 | 949 | ||
941 | if (sha->ctx_len != CCP_SHA_CTXSIZE) | 950 | switch (sha->type) { |
951 | case CCP_SHA_TYPE_1: | ||
952 | if (sha->ctx_len < SHA1_DIGEST_SIZE) | ||
953 | return -EINVAL; | ||
954 | block_size = SHA1_BLOCK_SIZE; | ||
955 | break; | ||
956 | case CCP_SHA_TYPE_224: | ||
957 | if (sha->ctx_len < SHA224_DIGEST_SIZE) | ||
958 | return -EINVAL; | ||
959 | block_size = SHA224_BLOCK_SIZE; | ||
960 | break; | ||
961 | case CCP_SHA_TYPE_256: | ||
962 | if (sha->ctx_len < SHA256_DIGEST_SIZE) | ||
963 | return -EINVAL; | ||
964 | block_size = SHA256_BLOCK_SIZE; | ||
965 | break; | ||
966 | default: | ||
942 | return -EINVAL; | 967 | return -EINVAL; |
968 | } | ||
943 | 969 | ||
944 | if (!sha->ctx) | 970 | if (!sha->ctx) |
945 | return -EINVAL; | 971 | return -EINVAL; |
946 | 972 | ||
947 | if (!sha->final && (sha->src_len & (CCP_SHA_BLOCKSIZE - 1))) | 973 | if (!sha->final && (sha->src_len & (block_size - 1))) |
948 | return -EINVAL; | 974 | return -EINVAL; |
949 | 975 | ||
950 | if (!sha->src_len) { | 976 | /* The version 3 device can't handle zero-length input */ |
951 | const u8 *sha_zero; | 977 | if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) { |
952 | 978 | ||
953 | /* Not final, just return */ | 979 | if (!sha->src_len) { |
954 | if (!sha->final) | 980 | unsigned int digest_len; |
955 | return 0; | 981 | const u8 *sha_zero; |
956 | 982 | ||
957 | /* CCP can't do a zero length sha operation so the caller | 983 | /* Not final, just return */ |
958 | * must buffer the data. | 984 | if (!sha->final) |
959 | */ | 985 | return 0; |
960 | if (sha->msg_bits) | ||
961 | return -EINVAL; | ||
962 | 986 | ||
963 | /* The CCP cannot perform zero-length sha operations so the | 987 | /* CCP can't do a zero length sha operation so the |
964 | * caller is required to buffer data for the final operation. | 988 | * caller must buffer the data. |
965 | * However, a sha operation for a message with a total length | 989 | */ |
966 | * of zero is valid so known values are required to supply | 990 | if (sha->msg_bits) |
967 | * the result. | 991 | return -EINVAL; |
968 | */ | ||
969 | switch (sha->type) { | ||
970 | case CCP_SHA_TYPE_1: | ||
971 | sha_zero = sha1_zero_message_hash; | ||
972 | break; | ||
973 | case CCP_SHA_TYPE_224: | ||
974 | sha_zero = sha224_zero_message_hash; | ||
975 | break; | ||
976 | case CCP_SHA_TYPE_256: | ||
977 | sha_zero = sha256_zero_message_hash; | ||
978 | break; | ||
979 | default: | ||
980 | return -EINVAL; | ||
981 | } | ||
982 | 992 | ||
983 | scatterwalk_map_and_copy((void *)sha_zero, sha->ctx, 0, | 993 | /* The CCP cannot perform zero-length sha operations |
984 | sha->ctx_len, 1); | 994 | * so the caller is required to buffer data for the |
995 | * final operation. However, a sha operation for a | ||
996 | * message with a total length of zero is valid so | ||
997 | * known values are required to supply the result. | ||
998 | */ | ||
999 | switch (sha->type) { | ||
1000 | case CCP_SHA_TYPE_1: | ||
1001 | sha_zero = sha1_zero_message_hash; | ||
1002 | digest_len = SHA1_DIGEST_SIZE; | ||
1003 | break; | ||
1004 | case CCP_SHA_TYPE_224: | ||
1005 | sha_zero = sha224_zero_message_hash; | ||
1006 | digest_len = SHA224_DIGEST_SIZE; | ||
1007 | break; | ||
1008 | case CCP_SHA_TYPE_256: | ||
1009 | sha_zero = sha256_zero_message_hash; | ||
1010 | digest_len = SHA256_DIGEST_SIZE; | ||
1011 | break; | ||
1012 | default: | ||
1013 | return -EINVAL; | ||
1014 | } | ||
985 | 1015 | ||
986 | return 0; | 1016 | scatterwalk_map_and_copy((void *)sha_zero, sha->ctx, 0, |
1017 | digest_len, 1); | ||
1018 | |||
1019 | return 0; | ||
1020 | } | ||
987 | } | 1021 | } |
988 | 1022 | ||
989 | if (!sha->src) | 1023 | /* Set variables used throughout */ |
990 | return -EINVAL; | 1024 | switch (sha->type) { |
1025 | case CCP_SHA_TYPE_1: | ||
1026 | digest_size = SHA1_DIGEST_SIZE; | ||
1027 | init = (void *) ccp_sha1_init; | ||
1028 | ctx_size = SHA1_DIGEST_SIZE; | ||
1029 | sb_count = 1; | ||
1030 | if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0)) | ||
1031 | ooffset = ioffset = CCP_SB_BYTES - SHA1_DIGEST_SIZE; | ||
1032 | else | ||
1033 | ooffset = ioffset = 0; | ||
1034 | break; | ||
1035 | case CCP_SHA_TYPE_224: | ||
1036 | digest_size = SHA224_DIGEST_SIZE; | ||
1037 | init = (void *) ccp_sha224_init; | ||
1038 | ctx_size = SHA256_DIGEST_SIZE; | ||
1039 | sb_count = 1; | ||
1040 | ioffset = 0; | ||
1041 | if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0)) | ||
1042 | ooffset = CCP_SB_BYTES - SHA224_DIGEST_SIZE; | ||
1043 | else | ||
1044 | ooffset = 0; | ||
1045 | break; | ||
1046 | case CCP_SHA_TYPE_256: | ||
1047 | digest_size = SHA256_DIGEST_SIZE; | ||
1048 | init = (void *) ccp_sha256_init; | ||
1049 | ctx_size = SHA256_DIGEST_SIZE; | ||
1050 | sb_count = 1; | ||
1051 | ooffset = ioffset = 0; | ||
1052 | break; | ||
1053 | default: | ||
1054 | ret = -EINVAL; | ||
1055 | goto e_data; | ||
1056 | } | ||
991 | 1057 | ||
992 | BUILD_BUG_ON(CCP_SHA_SB_COUNT != 1); | 1058 | /* For zero-length plaintext the src pointer is ignored; |
1059 | * otherwise both parts must be valid | ||
1060 | */ | ||
1061 | if (sha->src_len && !sha->src) | ||
1062 | return -EINVAL; | ||
993 | 1063 | ||
994 | memset(&op, 0, sizeof(op)); | 1064 | memset(&op, 0, sizeof(op)); |
995 | op.cmd_q = cmd_q; | 1065 | op.cmd_q = cmd_q; |
996 | op.jobid = ccp_gen_jobid(cmd_q->ccp); | 1066 | op.jobid = CCP_NEW_JOBID(cmd_q->ccp); |
997 | op.sb_ctx = cmd_q->sb_ctx; | 1067 | op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */ |
998 | op.u.sha.type = sha->type; | 1068 | op.u.sha.type = sha->type; |
999 | op.u.sha.msg_bits = sha->msg_bits; | 1069 | op.u.sha.msg_bits = sha->msg_bits; |
1000 | 1070 | ||
1001 | /* The SHA context fits in a single (32-byte) SB entry and | 1071 | ret = ccp_init_dm_workarea(&ctx, cmd_q, sb_count * CCP_SB_BYTES, |
1002 | * must be in little endian format. Use the 256-bit byte swap | ||
1003 | * passthru option to convert from big endian to little endian. | ||
1004 | */ | ||
1005 | ret = ccp_init_dm_workarea(&ctx, cmd_q, | ||
1006 | CCP_SHA_SB_COUNT * CCP_SB_BYTES, | ||
1007 | DMA_BIDIRECTIONAL); | 1072 | DMA_BIDIRECTIONAL); |
1008 | if (ret) | 1073 | if (ret) |
1009 | return ret; | 1074 | return ret; |
1010 | |||
1011 | if (sha->first) { | 1075 | if (sha->first) { |
1012 | const __be32 *init; | ||
1013 | |||
1014 | switch (sha->type) { | 1076 | switch (sha->type) { |
1015 | case CCP_SHA_TYPE_1: | 1077 | case CCP_SHA_TYPE_1: |
1016 | init = ccp_sha1_init; | ||
1017 | break; | ||
1018 | case CCP_SHA_TYPE_224: | 1078 | case CCP_SHA_TYPE_224: |
1019 | init = ccp_sha224_init; | ||
1020 | break; | ||
1021 | case CCP_SHA_TYPE_256: | 1079 | case CCP_SHA_TYPE_256: |
1022 | init = ccp_sha256_init; | 1080 | memcpy(ctx.address + ioffset, init, ctx_size); |
1023 | break; | 1081 | break; |
1024 | default: | 1082 | default: |
1025 | ret = -EINVAL; | 1083 | ret = -EINVAL; |
1026 | goto e_ctx; | 1084 | goto e_ctx; |
1027 | } | 1085 | } |
1028 | memcpy(ctx.address, init, CCP_SHA_CTXSIZE); | ||
1029 | } else { | 1086 | } else { |
1030 | ccp_set_dm_area(&ctx, 0, sha->ctx, 0, sha->ctx_len); | 1087 | /* Restore the context */ |
1088 | ccp_set_dm_area(&ctx, 0, sha->ctx, 0, | ||
1089 | sb_count * CCP_SB_BYTES); | ||
1031 | } | 1090 | } |
1032 | 1091 | ||
1033 | ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, | 1092 | ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, |
@@ -1037,24 +1096,33 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | |||
1037 | goto e_ctx; | 1096 | goto e_ctx; |
1038 | } | 1097 | } |
1039 | 1098 | ||
1040 | /* Send data to the CCP SHA engine */ | 1099 | if (sha->src) { |
1041 | ret = ccp_init_data(&src, cmd_q, sha->src, sha->src_len, | 1100 | /* Send data to the CCP SHA engine; block_size is set above */ |
1042 | CCP_SHA_BLOCKSIZE, DMA_TO_DEVICE); | 1101 | ret = ccp_init_data(&src, cmd_q, sha->src, sha->src_len, |
1043 | if (ret) | 1102 | block_size, DMA_TO_DEVICE); |
1044 | goto e_ctx; | 1103 | if (ret) |
1104 | goto e_ctx; | ||
1045 | 1105 | ||
1046 | while (src.sg_wa.bytes_left) { | 1106 | while (src.sg_wa.bytes_left) { |
1047 | ccp_prepare_data(&src, NULL, &op, CCP_SHA_BLOCKSIZE, false); | 1107 | ccp_prepare_data(&src, NULL, &op, block_size, false); |
1048 | if (sha->final && !src.sg_wa.bytes_left) | 1108 | if (sha->final && !src.sg_wa.bytes_left) |
1049 | op.eom = 1; | 1109 | op.eom = 1; |
1110 | |||
1111 | ret = cmd_q->ccp->vdata->perform->sha(&op); | ||
1112 | if (ret) { | ||
1113 | cmd->engine_error = cmd_q->cmd_error; | ||
1114 | goto e_data; | ||
1115 | } | ||
1050 | 1116 | ||
1117 | ccp_process_data(&src, NULL, &op); | ||
1118 | } | ||
1119 | } else { | ||
1120 | op.eom = 1; | ||
1051 | ret = cmd_q->ccp->vdata->perform->sha(&op); | 1121 | ret = cmd_q->ccp->vdata->perform->sha(&op); |
1052 | if (ret) { | 1122 | if (ret) { |
1053 | cmd->engine_error = cmd_q->cmd_error; | 1123 | cmd->engine_error = cmd_q->cmd_error; |
1054 | goto e_data; | 1124 | goto e_data; |
1055 | } | 1125 | } |
1056 | |||
1057 | ccp_process_data(&src, NULL, &op); | ||
1058 | } | 1126 | } |
1059 | 1127 | ||
1060 | /* Retrieve the SHA context - convert from LE to BE using | 1128 | /* Retrieve the SHA context - convert from LE to BE using |
@@ -1067,32 +1135,31 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | |||
1067 | goto e_data; | 1135 | goto e_data; |
1068 | } | 1136 | } |
1069 | 1137 | ||
1070 | ccp_get_dm_area(&ctx, 0, sha->ctx, 0, sha->ctx_len); | 1138 | if (sha->final) { |
1071 | 1139 | /* Finishing up, so get the digest */ | |
1072 | if (sha->final && sha->opad) { | ||
1073 | /* HMAC operation, recursively perform final SHA */ | ||
1074 | struct ccp_cmd hmac_cmd; | ||
1075 | struct scatterlist sg; | ||
1076 | u64 block_size, digest_size; | ||
1077 | u8 *hmac_buf; | ||
1078 | |||
1079 | switch (sha->type) { | 1140 | switch (sha->type) { |
1080 | case CCP_SHA_TYPE_1: | 1141 | case CCP_SHA_TYPE_1: |
1081 | block_size = SHA1_BLOCK_SIZE; | ||
1082 | digest_size = SHA1_DIGEST_SIZE; | ||
1083 | break; | ||
1084 | case CCP_SHA_TYPE_224: | 1142 | case CCP_SHA_TYPE_224: |
1085 | block_size = SHA224_BLOCK_SIZE; | ||
1086 | digest_size = SHA224_DIGEST_SIZE; | ||
1087 | break; | ||
1088 | case CCP_SHA_TYPE_256: | 1143 | case CCP_SHA_TYPE_256: |
1089 | block_size = SHA256_BLOCK_SIZE; | 1144 | ccp_get_dm_area(&ctx, ooffset, |
1090 | digest_size = SHA256_DIGEST_SIZE; | 1145 | sha->ctx, 0, |
1146 | digest_size); | ||
1091 | break; | 1147 | break; |
1092 | default: | 1148 | default: |
1093 | ret = -EINVAL; | 1149 | ret = -EINVAL; |
1094 | goto e_data; | 1150 | goto e_ctx; |
1095 | } | 1151 | } |
1152 | } else { | ||
1153 | /* Stash the context */ | ||
1154 | ccp_get_dm_area(&ctx, 0, sha->ctx, 0, | ||
1155 | sb_count * CCP_SB_BYTES); | ||
1156 | } | ||
1157 | |||
1158 | if (sha->final && sha->opad) { | ||
1159 | /* HMAC operation, recursively perform final SHA */ | ||
1160 | struct ccp_cmd hmac_cmd; | ||
1161 | struct scatterlist sg; | ||
1162 | u8 *hmac_buf; | ||
1096 | 1163 | ||
1097 | if (sha->opad_len != block_size) { | 1164 | if (sha->opad_len != block_size) { |
1098 | ret = -EINVAL; | 1165 | ret = -EINVAL; |
@@ -1107,7 +1174,18 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | |||
1107 | sg_init_one(&sg, hmac_buf, block_size + digest_size); | 1174 | sg_init_one(&sg, hmac_buf, block_size + digest_size); |
1108 | 1175 | ||
1109 | scatterwalk_map_and_copy(hmac_buf, sha->opad, 0, block_size, 0); | 1176 | scatterwalk_map_and_copy(hmac_buf, sha->opad, 0, block_size, 0); |
1110 | memcpy(hmac_buf + block_size, ctx.address, digest_size); | 1177 | switch (sha->type) { |
1178 | case CCP_SHA_TYPE_1: | ||
1179 | case CCP_SHA_TYPE_224: | ||
1180 | case CCP_SHA_TYPE_256: | ||
1181 | memcpy(hmac_buf + block_size, | ||
1182 | ctx.address + ooffset, | ||
1183 | digest_size); | ||
1184 | break; | ||
1185 | default: | ||
1186 | ret = -EINVAL; | ||
1187 | goto e_ctx; | ||
1188 | } | ||
1111 | 1189 | ||
1112 | memset(&hmac_cmd, 0, sizeof(hmac_cmd)); | 1190 | memset(&hmac_cmd, 0, sizeof(hmac_cmd)); |
1113 | hmac_cmd.engine = CCP_ENGINE_SHA; | 1191 | hmac_cmd.engine = CCP_ENGINE_SHA; |
@@ -1130,7 +1208,8 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | |||
1130 | } | 1208 | } |
1131 | 1209 | ||
1132 | e_data: | 1210 | e_data: |
1133 | ccp_free_data(&src, cmd_q); | 1211 | if (sha->src) |
1212 | ccp_free_data(&src, cmd_q); | ||
1134 | 1213 | ||
1135 | e_ctx: | 1214 | e_ctx: |
1136 | ccp_dm_free(&ctx); | 1215 | ccp_dm_free(&ctx); |
@@ -1261,7 +1340,7 @@ static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, | |||
1261 | struct ccp_op op; | 1340 | struct ccp_op op; |
1262 | bool in_place = false; | 1341 | bool in_place = false; |
1263 | unsigned int i; | 1342 | unsigned int i; |
1264 | int ret; | 1343 | int ret = 0; |
1265 | 1344 | ||
1266 | if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1))) | 1345 | if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1))) |
1267 | return -EINVAL; | 1346 | return -EINVAL; |
@@ -1280,7 +1359,7 @@ static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, | |||
1280 | 1359 | ||
1281 | memset(&op, 0, sizeof(op)); | 1360 | memset(&op, 0, sizeof(op)); |
1282 | op.cmd_q = cmd_q; | 1361 | op.cmd_q = cmd_q; |
1283 | op.jobid = ccp_gen_jobid(cmd_q->ccp); | 1362 | op.jobid = CCP_NEW_JOBID(cmd_q->ccp); |
1284 | 1363 | ||
1285 | if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) { | 1364 | if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) { |
1286 | /* Load the mask */ | 1365 | /* Load the mask */ |
@@ -1469,7 +1548,7 @@ static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | |||
1469 | 1548 | ||
1470 | memset(&op, 0, sizeof(op)); | 1549 | memset(&op, 0, sizeof(op)); |
1471 | op.cmd_q = cmd_q; | 1550 | op.cmd_q = cmd_q; |
1472 | op.jobid = ccp_gen_jobid(cmd_q->ccp); | 1551 | op.jobid = CCP_NEW_JOBID(cmd_q->ccp); |
1473 | 1552 | ||
1474 | /* Concatenate the modulus and the operands. Both the modulus and | 1553 | /* Concatenate the modulus and the operands. Both the modulus and |
1475 | * the operands must be in little endian format. Since the input | 1554 | * the operands must be in little endian format. Since the input |
@@ -1594,7 +1673,7 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | |||
1594 | 1673 | ||
1595 | memset(&op, 0, sizeof(op)); | 1674 | memset(&op, 0, sizeof(op)); |
1596 | op.cmd_q = cmd_q; | 1675 | op.cmd_q = cmd_q; |
1597 | op.jobid = ccp_gen_jobid(cmd_q->ccp); | 1676 | op.jobid = CCP_NEW_JOBID(cmd_q->ccp); |
1598 | 1677 | ||
1599 | /* Concatenate the modulus and the operands. Both the modulus and | 1678 | /* Concatenate the modulus and the operands. Both the modulus and |
1600 | * the operands must be in little endian format. Since the input | 1679 | * the operands must be in little endian format. Since the input |
@@ -1632,7 +1711,7 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | |||
1632 | goto e_src; | 1711 | goto e_src; |
1633 | src.address += CCP_ECC_OPERAND_SIZE; | 1712 | src.address += CCP_ECC_OPERAND_SIZE; |
1634 | 1713 | ||
1635 | /* Set the first point Z coordianate to 1 */ | 1714 | /* Set the first point Z coordinate to 1 */ |
1636 | *src.address = 0x01; | 1715 | *src.address = 0x01; |
1637 | src.address += CCP_ECC_OPERAND_SIZE; | 1716 | src.address += CCP_ECC_OPERAND_SIZE; |
1638 | 1717 | ||
@@ -1651,7 +1730,7 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | |||
1651 | goto e_src; | 1730 | goto e_src; |
1652 | src.address += CCP_ECC_OPERAND_SIZE; | 1731 | src.address += CCP_ECC_OPERAND_SIZE; |
1653 | 1732 | ||
1654 | /* Set the second point Z coordianate to 1 */ | 1733 | /* Set the second point Z coordinate to 1 */ |
1655 | *src.address = 0x01; | 1734 | *src.address = 0x01; |
1656 | src.address += CCP_ECC_OPERAND_SIZE; | 1735 | src.address += CCP_ECC_OPERAND_SIZE; |
1657 | } else { | 1736 | } else { |
diff --git a/drivers/crypto/ccp/ccp-pci.c b/drivers/crypto/ccp/ccp-pci.c index 072bcedef386..064e20f78b10 100644 --- a/drivers/crypto/ccp/ccp-pci.c +++ b/drivers/crypto/ccp/ccp-pci.c | |||
@@ -141,10 +141,11 @@ static void ccp_free_irqs(struct ccp_device *ccp) | |||
141 | free_irq(ccp_pci->msix[ccp_pci->msix_count].vector, | 141 | free_irq(ccp_pci->msix[ccp_pci->msix_count].vector, |
142 | dev); | 142 | dev); |
143 | pci_disable_msix(pdev); | 143 | pci_disable_msix(pdev); |
144 | } else { | 144 | } else if (ccp->irq) { |
145 | free_irq(ccp->irq, dev); | 145 | free_irq(ccp->irq, dev); |
146 | pci_disable_msi(pdev); | 146 | pci_disable_msi(pdev); |
147 | } | 147 | } |
148 | ccp->irq = 0; | ||
148 | } | 149 | } |
149 | 150 | ||
150 | static int ccp_find_mmio_area(struct ccp_device *ccp) | 151 | static int ccp_find_mmio_area(struct ccp_device *ccp) |
@@ -229,6 +230,8 @@ static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
229 | 230 | ||
230 | dev_set_drvdata(dev, ccp); | 231 | dev_set_drvdata(dev, ccp); |
231 | 232 | ||
233 | if (ccp->vdata->setup) | ||
234 | ccp->vdata->setup(ccp); | ||
232 | ret = ccp->vdata->perform->init(ccp); | 235 | ret = ccp->vdata->perform->init(ccp); |
233 | if (ret) | 236 | if (ret) |
234 | goto e_iomap; | 237 | goto e_iomap; |
@@ -321,6 +324,7 @@ static int ccp_pci_resume(struct pci_dev *pdev) | |||
321 | 324 | ||
322 | static const struct pci_device_id ccp_pci_table[] = { | 325 | static const struct pci_device_id ccp_pci_table[] = { |
323 | { PCI_VDEVICE(AMD, 0x1537), (kernel_ulong_t)&ccpv3 }, | 326 | { PCI_VDEVICE(AMD, 0x1537), (kernel_ulong_t)&ccpv3 }, |
327 | { PCI_VDEVICE(AMD, 0x1456), (kernel_ulong_t)&ccpv5 }, | ||
324 | /* Last entry must be zero */ | 328 | /* Last entry must be zero */ |
325 | { 0, } | 329 | { 0, } |
326 | }; | 330 | }; |
diff --git a/include/linux/ccp.h b/include/linux/ccp.h index 7c2bb27c067c..a7653339fedb 100644 --- a/include/linux/ccp.h +++ b/include/linux/ccp.h | |||
@@ -238,9 +238,6 @@ struct ccp_xts_aes_engine { | |||
238 | }; | 238 | }; |
239 | 239 | ||
240 | /***** SHA engine *****/ | 240 | /***** SHA engine *****/ |
241 | #define CCP_SHA_BLOCKSIZE SHA256_BLOCK_SIZE | ||
242 | #define CCP_SHA_CTXSIZE SHA256_DIGEST_SIZE | ||
243 | |||
244 | /** | 241 | /** |
245 | * ccp_sha_type - type of SHA operation | 242 | * ccp_sha_type - type of SHA operation |
246 | * | 243 | * |