aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto/ccp
diff options
context:
space:
mode:
authorGary R Hook <gary.hook@amd.com>2016-03-01 14:49:25 -0500
committerHerbert Xu <herbert@gondor.apana.org.au>2016-03-11 08:19:17 -0500
commitea0375afa17281e9e0190034215d0404dbad7449 (patch)
tree9b27b39689db3c66f9d6440aa2e3a255cfb659dc /drivers/crypto/ccp
parentc7019c4d739e79d7baaa13c86dcaaedec8113d70 (diff)
crypto: ccp - Add abstraction for device-specific calls
Support for different generations of the coprocessor requires that an abstraction layer be implemented for interacting with the hardware. This patch splits out version-specific functions to a separate file and populates the version structure (acting as a driver) with function pointers. Signed-off-by: Gary R Hook <gary.hook@amd.com> Acked-by: Tom Lendacky <thomas.lendacky@amd.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto/ccp')
-rw-r--r--drivers/crypto/ccp/Makefile2
-rw-r--r--drivers/crypto/ccp/ccp-dev-v3.c533
-rw-r--r--drivers/crypto/ccp/ccp-dev.c306
-rw-r--r--drivers/crypto/ccp/ccp-dev.h138
-rw-r--r--drivers/crypto/ccp/ccp-ops.c381
-rw-r--r--drivers/crypto/ccp/ccp-pci.c10
-rw-r--r--drivers/crypto/ccp/ccp-platform.c7
7 files changed, 710 insertions, 667 deletions
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
index 55a1f3951578..b750592cc936 100644
--- a/drivers/crypto/ccp/Makefile
+++ b/drivers/crypto/ccp/Makefile
@@ -1,5 +1,5 @@
1obj-$(CONFIG_CRYPTO_DEV_CCP_DD) += ccp.o 1obj-$(CONFIG_CRYPTO_DEV_CCP_DD) += ccp.o
2ccp-objs := ccp-dev.o ccp-ops.o ccp-platform.o 2ccp-objs := ccp-dev.o ccp-ops.o ccp-dev-v3.o ccp-platform.o
3ccp-$(CONFIG_PCI) += ccp-pci.o 3ccp-$(CONFIG_PCI) += ccp-pci.o
4 4
5obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o 5obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o
diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c
new file mode 100644
index 000000000000..7d5eab49179e
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-dev-v3.c
@@ -0,0 +1,533 @@
1/*
2 * AMD Cryptographic Coprocessor (CCP) driver
3 *
4 * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
5 *
6 * Author: Tom Lendacky <thomas.lendacky@amd.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/module.h>
14#include <linux/kernel.h>
15#include <linux/pci.h>
16#include <linux/kthread.h>
17#include <linux/interrupt.h>
18#include <linux/ccp.h>
19
20#include "ccp-dev.h"
21
22static int ccp_do_cmd(struct ccp_op *op, u32 *cr, unsigned int cr_count)
23{
24 struct ccp_cmd_queue *cmd_q = op->cmd_q;
25 struct ccp_device *ccp = cmd_q->ccp;
26 void __iomem *cr_addr;
27 u32 cr0, cmd;
28 unsigned int i;
29 int ret = 0;
30
31 /* We could read a status register to see how many free slots
32 * are actually available, but reading that register resets it
33 * and you could lose some error information.
34 */
35 cmd_q->free_slots--;
36
37 cr0 = (cmd_q->id << REQ0_CMD_Q_SHIFT)
38 | (op->jobid << REQ0_JOBID_SHIFT)
39 | REQ0_WAIT_FOR_WRITE;
40
41 if (op->soc)
42 cr0 |= REQ0_STOP_ON_COMPLETE
43 | REQ0_INT_ON_COMPLETE;
44
45 if (op->ioc || !cmd_q->free_slots)
46 cr0 |= REQ0_INT_ON_COMPLETE;
47
48 /* Start at CMD_REQ1 */
49 cr_addr = ccp->io_regs + CMD_REQ0 + CMD_REQ_INCR;
50
51 mutex_lock(&ccp->req_mutex);
52
53 /* Write CMD_REQ1 through CMD_REQx first */
54 for (i = 0; i < cr_count; i++, cr_addr += CMD_REQ_INCR)
55 iowrite32(*(cr + i), cr_addr);
56
57 /* Tell the CCP to start */
58 wmb();
59 iowrite32(cr0, ccp->io_regs + CMD_REQ0);
60
61 mutex_unlock(&ccp->req_mutex);
62
63 if (cr0 & REQ0_INT_ON_COMPLETE) {
64 /* Wait for the job to complete */
65 ret = wait_event_interruptible(cmd_q->int_queue,
66 cmd_q->int_rcvd);
67 if (ret || cmd_q->cmd_error) {
68 /* On error delete all related jobs from the queue */
69 cmd = (cmd_q->id << DEL_Q_ID_SHIFT)
70 | op->jobid;
71
72 iowrite32(cmd, ccp->io_regs + DEL_CMD_Q_JOB);
73
74 if (!ret)
75 ret = -EIO;
76 } else if (op->soc) {
77 /* Delete just head job from the queue on SoC */
78 cmd = DEL_Q_ACTIVE
79 | (cmd_q->id << DEL_Q_ID_SHIFT)
80 | op->jobid;
81
82 iowrite32(cmd, ccp->io_regs + DEL_CMD_Q_JOB);
83 }
84
85 cmd_q->free_slots = CMD_Q_DEPTH(cmd_q->q_status);
86
87 cmd_q->int_rcvd = 0;
88 }
89
90 return ret;
91}
92
93static int ccp_perform_aes(struct ccp_op *op)
94{
95 u32 cr[6];
96
97 /* Fill out the register contents for REQ1 through REQ6 */
98 cr[0] = (CCP_ENGINE_AES << REQ1_ENGINE_SHIFT)
99 | (op->u.aes.type << REQ1_AES_TYPE_SHIFT)
100 | (op->u.aes.mode << REQ1_AES_MODE_SHIFT)
101 | (op->u.aes.action << REQ1_AES_ACTION_SHIFT)
102 | (op->ksb_key << REQ1_KEY_KSB_SHIFT);
103 cr[1] = op->src.u.dma.length - 1;
104 cr[2] = ccp_addr_lo(&op->src.u.dma);
105 cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT)
106 | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
107 | ccp_addr_hi(&op->src.u.dma);
108 cr[4] = ccp_addr_lo(&op->dst.u.dma);
109 cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
110 | ccp_addr_hi(&op->dst.u.dma);
111
112 if (op->u.aes.mode == CCP_AES_MODE_CFB)
113 cr[0] |= ((0x7f) << REQ1_AES_CFB_SIZE_SHIFT);
114
115 if (op->eom)
116 cr[0] |= REQ1_EOM;
117
118 if (op->init)
119 cr[0] |= REQ1_INIT;
120
121 return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
122}
123
124static int ccp_perform_xts_aes(struct ccp_op *op)
125{
126 u32 cr[6];
127
128 /* Fill out the register contents for REQ1 through REQ6 */
129 cr[0] = (CCP_ENGINE_XTS_AES_128 << REQ1_ENGINE_SHIFT)
130 | (op->u.xts.action << REQ1_AES_ACTION_SHIFT)
131 | (op->u.xts.unit_size << REQ1_XTS_AES_SIZE_SHIFT)
132 | (op->ksb_key << REQ1_KEY_KSB_SHIFT);
133 cr[1] = op->src.u.dma.length - 1;
134 cr[2] = ccp_addr_lo(&op->src.u.dma);
135 cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT)
136 | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
137 | ccp_addr_hi(&op->src.u.dma);
138 cr[4] = ccp_addr_lo(&op->dst.u.dma);
139 cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
140 | ccp_addr_hi(&op->dst.u.dma);
141
142 if (op->eom)
143 cr[0] |= REQ1_EOM;
144
145 if (op->init)
146 cr[0] |= REQ1_INIT;
147
148 return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
149}
150
151static int ccp_perform_sha(struct ccp_op *op)
152{
153 u32 cr[6];
154
155 /* Fill out the register contents for REQ1 through REQ6 */
156 cr[0] = (CCP_ENGINE_SHA << REQ1_ENGINE_SHIFT)
157 | (op->u.sha.type << REQ1_SHA_TYPE_SHIFT)
158 | REQ1_INIT;
159 cr[1] = op->src.u.dma.length - 1;
160 cr[2] = ccp_addr_lo(&op->src.u.dma);
161 cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT)
162 | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
163 | ccp_addr_hi(&op->src.u.dma);
164
165 if (op->eom) {
166 cr[0] |= REQ1_EOM;
167 cr[4] = lower_32_bits(op->u.sha.msg_bits);
168 cr[5] = upper_32_bits(op->u.sha.msg_bits);
169 } else {
170 cr[4] = 0;
171 cr[5] = 0;
172 }
173
174 return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
175}
176
177static int ccp_perform_rsa(struct ccp_op *op)
178{
179 u32 cr[6];
180
181 /* Fill out the register contents for REQ1 through REQ6 */
182 cr[0] = (CCP_ENGINE_RSA << REQ1_ENGINE_SHIFT)
183 | (op->u.rsa.mod_size << REQ1_RSA_MOD_SIZE_SHIFT)
184 | (op->ksb_key << REQ1_KEY_KSB_SHIFT)
185 | REQ1_EOM;
186 cr[1] = op->u.rsa.input_len - 1;
187 cr[2] = ccp_addr_lo(&op->src.u.dma);
188 cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT)
189 | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
190 | ccp_addr_hi(&op->src.u.dma);
191 cr[4] = ccp_addr_lo(&op->dst.u.dma);
192 cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
193 | ccp_addr_hi(&op->dst.u.dma);
194
195 return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
196}
197
198static int ccp_perform_passthru(struct ccp_op *op)
199{
200 u32 cr[6];
201
202 /* Fill out the register contents for REQ1 through REQ6 */
203 cr[0] = (CCP_ENGINE_PASSTHRU << REQ1_ENGINE_SHIFT)
204 | (op->u.passthru.bit_mod << REQ1_PT_BW_SHIFT)
205 | (op->u.passthru.byte_swap << REQ1_PT_BS_SHIFT);
206
207 if (op->src.type == CCP_MEMTYPE_SYSTEM)
208 cr[1] = op->src.u.dma.length - 1;
209 else
210 cr[1] = op->dst.u.dma.length - 1;
211
212 if (op->src.type == CCP_MEMTYPE_SYSTEM) {
213 cr[2] = ccp_addr_lo(&op->src.u.dma);
214 cr[3] = (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
215 | ccp_addr_hi(&op->src.u.dma);
216
217 if (op->u.passthru.bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
218 cr[3] |= (op->ksb_key << REQ4_KSB_SHIFT);
219 } else {
220 cr[2] = op->src.u.ksb * CCP_KSB_BYTES;
221 cr[3] = (CCP_MEMTYPE_KSB << REQ4_MEMTYPE_SHIFT);
222 }
223
224 if (op->dst.type == CCP_MEMTYPE_SYSTEM) {
225 cr[4] = ccp_addr_lo(&op->dst.u.dma);
226 cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
227 | ccp_addr_hi(&op->dst.u.dma);
228 } else {
229 cr[4] = op->dst.u.ksb * CCP_KSB_BYTES;
230 cr[5] = (CCP_MEMTYPE_KSB << REQ6_MEMTYPE_SHIFT);
231 }
232
233 if (op->eom)
234 cr[0] |= REQ1_EOM;
235
236 return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
237}
238
239static int ccp_perform_ecc(struct ccp_op *op)
240{
241 u32 cr[6];
242
243 /* Fill out the register contents for REQ1 through REQ6 */
244 cr[0] = REQ1_ECC_AFFINE_CONVERT
245 | (CCP_ENGINE_ECC << REQ1_ENGINE_SHIFT)
246 | (op->u.ecc.function << REQ1_ECC_FUNCTION_SHIFT)
247 | REQ1_EOM;
248 cr[1] = op->src.u.dma.length - 1;
249 cr[2] = ccp_addr_lo(&op->src.u.dma);
250 cr[3] = (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
251 | ccp_addr_hi(&op->src.u.dma);
252 cr[4] = ccp_addr_lo(&op->dst.u.dma);
253 cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
254 | ccp_addr_hi(&op->dst.u.dma);
255
256 return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
257}
258
259static int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
260{
261 struct ccp_device *ccp = container_of(rng, struct ccp_device, hwrng);
262 u32 trng_value;
263 int len = min_t(int, sizeof(trng_value), max);
264
265 /*
266 * Locking is provided by the caller so we can update device
267 * hwrng-related fields safely
268 */
269 trng_value = ioread32(ccp->io_regs + TRNG_OUT_REG);
270 if (!trng_value) {
271 /* Zero is returned if not data is available or if a
272 * bad-entropy error is present. Assume an error if
273 * we exceed TRNG_RETRIES reads of zero.
274 */
275 if (ccp->hwrng_retries++ > TRNG_RETRIES)
276 return -EIO;
277
278 return 0;
279 }
280
281 /* Reset the counter and save the rng value */
282 ccp->hwrng_retries = 0;
283 memcpy(data, &trng_value, len);
284
285 return len;
286}
287
288static int ccp_init(struct ccp_device *ccp)
289{
290 struct device *dev = ccp->dev;
291 struct ccp_cmd_queue *cmd_q;
292 struct dma_pool *dma_pool;
293 char dma_pool_name[MAX_DMAPOOL_NAME_LEN];
294 unsigned int qmr, qim, i;
295 int ret;
296
297 /* Find available queues */
298 qim = 0;
299 qmr = ioread32(ccp->io_regs + Q_MASK_REG);
300 for (i = 0; i < MAX_HW_QUEUES; i++) {
301 if (!(qmr & (1 << i)))
302 continue;
303
304 /* Allocate a dma pool for this queue */
305 snprintf(dma_pool_name, sizeof(dma_pool_name), "%s_q%d",
306 ccp->name, i);
307 dma_pool = dma_pool_create(dma_pool_name, dev,
308 CCP_DMAPOOL_MAX_SIZE,
309 CCP_DMAPOOL_ALIGN, 0);
310 if (!dma_pool) {
311 dev_err(dev, "unable to allocate dma pool\n");
312 ret = -ENOMEM;
313 goto e_pool;
314 }
315
316 cmd_q = &ccp->cmd_q[ccp->cmd_q_count];
317 ccp->cmd_q_count++;
318
319 cmd_q->ccp = ccp;
320 cmd_q->id = i;
321 cmd_q->dma_pool = dma_pool;
322
323 /* Reserve 2 KSB regions for the queue */
324 cmd_q->ksb_key = KSB_START + ccp->ksb_start++;
325 cmd_q->ksb_ctx = KSB_START + ccp->ksb_start++;
326 ccp->ksb_count -= 2;
327
328 /* Preset some register values and masks that are queue
329 * number dependent
330 */
331 cmd_q->reg_status = ccp->io_regs + CMD_Q_STATUS_BASE +
332 (CMD_Q_STATUS_INCR * i);
333 cmd_q->reg_int_status = ccp->io_regs + CMD_Q_INT_STATUS_BASE +
334 (CMD_Q_STATUS_INCR * i);
335 cmd_q->int_ok = 1 << (i * 2);
336 cmd_q->int_err = 1 << ((i * 2) + 1);
337
338 cmd_q->free_slots = CMD_Q_DEPTH(ioread32(cmd_q->reg_status));
339
340 init_waitqueue_head(&cmd_q->int_queue);
341
342 /* Build queue interrupt mask (two interrupts per queue) */
343 qim |= cmd_q->int_ok | cmd_q->int_err;
344
345#ifdef CONFIG_ARM64
346 /* For arm64 set the recommended queue cache settings */
347 iowrite32(ccp->axcache, ccp->io_regs + CMD_Q_CACHE_BASE +
348 (CMD_Q_CACHE_INC * i));
349#endif
350
351 dev_dbg(dev, "queue #%u available\n", i);
352 }
353 if (ccp->cmd_q_count == 0) {
354 dev_notice(dev, "no command queues available\n");
355 ret = -EIO;
356 goto e_pool;
357 }
358 dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count);
359
360 /* Disable and clear interrupts until ready */
361 iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
362 for (i = 0; i < ccp->cmd_q_count; i++) {
363 cmd_q = &ccp->cmd_q[i];
364
365 ioread32(cmd_q->reg_int_status);
366 ioread32(cmd_q->reg_status);
367 }
368 iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG);
369
370 /* Request an irq */
371 ret = ccp->get_irq(ccp);
372 if (ret) {
373 dev_err(dev, "unable to allocate an IRQ\n");
374 goto e_pool;
375 }
376
377 /* Initialize the queues used to wait for KSB space and suspend */
378 init_waitqueue_head(&ccp->ksb_queue);
379 init_waitqueue_head(&ccp->suspend_queue);
380
381 /* Create a kthread for each queue */
382 for (i = 0; i < ccp->cmd_q_count; i++) {
383 struct task_struct *kthread;
384
385 cmd_q = &ccp->cmd_q[i];
386
387 kthread = kthread_create(ccp_cmd_queue_thread, cmd_q,
388 "%s-q%u", ccp->name, cmd_q->id);
389 if (IS_ERR(kthread)) {
390 dev_err(dev, "error creating queue thread (%ld)\n",
391 PTR_ERR(kthread));
392 ret = PTR_ERR(kthread);
393 goto e_kthread;
394 }
395
396 cmd_q->kthread = kthread;
397 wake_up_process(kthread);
398 }
399
400 /* Register the RNG */
401 ccp->hwrng.name = ccp->rngname;
402 ccp->hwrng.read = ccp_trng_read;
403 ret = hwrng_register(&ccp->hwrng);
404 if (ret) {
405 dev_err(dev, "error registering hwrng (%d)\n", ret);
406 goto e_kthread;
407 }
408
409 ccp_add_device(ccp);
410
411 /* Enable interrupts */
412 iowrite32(qim, ccp->io_regs + IRQ_MASK_REG);
413
414 return 0;
415
416e_kthread:
417 for (i = 0; i < ccp->cmd_q_count; i++)
418 if (ccp->cmd_q[i].kthread)
419 kthread_stop(ccp->cmd_q[i].kthread);
420
421 ccp->free_irq(ccp);
422
423e_pool:
424 for (i = 0; i < ccp->cmd_q_count; i++)
425 dma_pool_destroy(ccp->cmd_q[i].dma_pool);
426
427 return ret;
428}
429
430static void ccp_destroy(struct ccp_device *ccp)
431{
432 struct ccp_cmd_queue *cmd_q;
433 struct ccp_cmd *cmd;
434 unsigned int qim, i;
435
436 /* Remove this device from the list of available units first */
437 ccp_del_device(ccp);
438
439 /* Unregister the RNG */
440 hwrng_unregister(&ccp->hwrng);
441
442 /* Stop the queue kthreads */
443 for (i = 0; i < ccp->cmd_q_count; i++)
444 if (ccp->cmd_q[i].kthread)
445 kthread_stop(ccp->cmd_q[i].kthread);
446
447 /* Build queue interrupt mask (two interrupt masks per queue) */
448 qim = 0;
449 for (i = 0; i < ccp->cmd_q_count; i++) {
450 cmd_q = &ccp->cmd_q[i];
451 qim |= cmd_q->int_ok | cmd_q->int_err;
452 }
453
454 /* Disable and clear interrupts */
455 iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
456 for (i = 0; i < ccp->cmd_q_count; i++) {
457 cmd_q = &ccp->cmd_q[i];
458
459 ioread32(cmd_q->reg_int_status);
460 ioread32(cmd_q->reg_status);
461 }
462 iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG);
463
464 ccp->free_irq(ccp);
465
466 for (i = 0; i < ccp->cmd_q_count; i++)
467 dma_pool_destroy(ccp->cmd_q[i].dma_pool);
468
469 /* Flush the cmd and backlog queue */
470 while (!list_empty(&ccp->cmd)) {
471 /* Invoke the callback directly with an error code */
472 cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry);
473 list_del(&cmd->entry);
474 cmd->callback(cmd->data, -ENODEV);
475 }
476 while (!list_empty(&ccp->backlog)) {
477 /* Invoke the callback directly with an error code */
478 cmd = list_first_entry(&ccp->backlog, struct ccp_cmd, entry);
479 list_del(&cmd->entry);
480 cmd->callback(cmd->data, -ENODEV);
481 }
482}
483
484static irqreturn_t ccp_irq_handler(int irq, void *data)
485{
486 struct device *dev = data;
487 struct ccp_device *ccp = dev_get_drvdata(dev);
488 struct ccp_cmd_queue *cmd_q;
489 u32 q_int, status;
490 unsigned int i;
491
492 status = ioread32(ccp->io_regs + IRQ_STATUS_REG);
493
494 for (i = 0; i < ccp->cmd_q_count; i++) {
495 cmd_q = &ccp->cmd_q[i];
496
497 q_int = status & (cmd_q->int_ok | cmd_q->int_err);
498 if (q_int) {
499 cmd_q->int_status = status;
500 cmd_q->q_status = ioread32(cmd_q->reg_status);
501 cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
502
503 /* On error, only save the first error value */
504 if ((q_int & cmd_q->int_err) && !cmd_q->cmd_error)
505 cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
506
507 cmd_q->int_rcvd = 1;
508
509 /* Acknowledge the interrupt and wake the kthread */
510 iowrite32(q_int, ccp->io_regs + IRQ_STATUS_REG);
511 wake_up_interruptible(&cmd_q->int_queue);
512 }
513 }
514
515 return IRQ_HANDLED;
516}
517
518static struct ccp_actions ccp3_actions = {
519 .perform_aes = ccp_perform_aes,
520 .perform_xts_aes = ccp_perform_xts_aes,
521 .perform_sha = ccp_perform_sha,
522 .perform_rsa = ccp_perform_rsa,
523 .perform_passthru = ccp_perform_passthru,
524 .perform_ecc = ccp_perform_ecc,
525 .init = ccp_init,
526 .destroy = ccp_destroy,
527 .irqhandler = ccp_irq_handler,
528};
529
530struct ccp_vdata ccpv3 = {
531 .version = CCP_VERSION(3, 0),
532 .perform = &ccp3_actions,
533};
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index 5348512da643..336e5b780fcb 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -63,11 +63,17 @@ unsigned int ccp_increment_unit_ordinal(void)
63 return atomic_inc_return(&ccp_unit_ordinal); 63 return atomic_inc_return(&ccp_unit_ordinal);
64} 64}
65 65
66/* 66/**
67 * ccp_add_device - add a CCP device to the list
68 *
69 * @ccp: ccp_device struct pointer
70 *
67 * Put this CCP on the unit list, which makes it available 71 * Put this CCP on the unit list, which makes it available
68 * for use. 72 * for use.
73 *
74 * Returns zero if a CCP device is present, -ENODEV otherwise.
69 */ 75 */
70static inline void ccp_add_device(struct ccp_device *ccp) 76void ccp_add_device(struct ccp_device *ccp)
71{ 77{
72 unsigned long flags; 78 unsigned long flags;
73 79
@@ -81,11 +87,16 @@ static inline void ccp_add_device(struct ccp_device *ccp)
81 write_unlock_irqrestore(&ccp_unit_lock, flags); 87 write_unlock_irqrestore(&ccp_unit_lock, flags);
82} 88}
83 89
84/* Remove this unit from the list of devices. If the next device 90/**
91 * ccp_del_device - remove a CCP device from the list
92 *
93 * @ccp: ccp_device struct pointer
94 *
95 * Remove this unit from the list of devices. If the next device
85 * up for use is this one, adjust the pointer. If this is the last 96 * up for use is this one, adjust the pointer. If this is the last
86 * device, NULL the pointer. 97 * device, NULL the pointer.
87 */ 98 */
88static inline void ccp_del_device(struct ccp_device *ccp) 99void ccp_del_device(struct ccp_device *ccp)
89{ 100{
90 unsigned long flags; 101 unsigned long flags;
91 102
@@ -326,7 +337,12 @@ static void ccp_do_cmd_complete(unsigned long data)
326 complete(&tdata->completion); 337 complete(&tdata->completion);
327} 338}
328 339
329static int ccp_cmd_queue_thread(void *data) 340/**
341 * ccp_cmd_queue_thread - create a kernel thread to manage a CCP queue
342 *
343 * @data: thread-specific data
344 */
345int ccp_cmd_queue_thread(void *data)
330{ 346{
331 struct ccp_cmd_queue *cmd_q = (struct ccp_cmd_queue *)data; 347 struct ccp_cmd_queue *cmd_q = (struct ccp_cmd_queue *)data;
332 struct ccp_cmd *cmd; 348 struct ccp_cmd *cmd;
@@ -362,35 +378,6 @@ static int ccp_cmd_queue_thread(void *data)
362 return 0; 378 return 0;
363} 379}
364 380
365static int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
366{
367 struct ccp_device *ccp = container_of(rng, struct ccp_device, hwrng);
368 u32 trng_value;
369 int len = min_t(int, sizeof(trng_value), max);
370
371 /*
372 * Locking is provided by the caller so we can update device
373 * hwrng-related fields safely
374 */
375 trng_value = ioread32(ccp->io_regs + TRNG_OUT_REG);
376 if (!trng_value) {
377 /* Zero is returned if not data is available or if a
378 * bad-entropy error is present. Assume an error if
379 * we exceed TRNG_RETRIES reads of zero.
380 */
381 if (ccp->hwrng_retries++ > TRNG_RETRIES)
382 return -EIO;
383
384 return 0;
385 }
386
387 /* Reset the counter and save the rng value */
388 ccp->hwrng_retries = 0;
389 memcpy(data, &trng_value, len);
390
391 return len;
392}
393
394/** 381/**
395 * ccp_alloc_struct - allocate and initialize the ccp_device struct 382 * ccp_alloc_struct - allocate and initialize the ccp_device struct
396 * 383 *
@@ -421,253 +408,6 @@ struct ccp_device *ccp_alloc_struct(struct device *dev)
421 return ccp; 408 return ccp;
422} 409}
423 410
424/**
425 * ccp_init - initialize the CCP device
426 *
427 * @ccp: ccp_device struct
428 */
429int ccp_init(struct ccp_device *ccp)
430{
431 struct device *dev = ccp->dev;
432 struct ccp_cmd_queue *cmd_q;
433 struct dma_pool *dma_pool;
434 char dma_pool_name[MAX_DMAPOOL_NAME_LEN];
435 unsigned int qmr, qim, i;
436 int ret;
437
438 /* Find available queues */
439 qim = 0;
440 qmr = ioread32(ccp->io_regs + Q_MASK_REG);
441 for (i = 0; i < MAX_HW_QUEUES; i++) {
442 if (!(qmr & (1 << i)))
443 continue;
444
445 /* Allocate a dma pool for this queue */
446 snprintf(dma_pool_name, sizeof(dma_pool_name), "%s_q%d",
447 ccp->name, i);
448 dma_pool = dma_pool_create(dma_pool_name, dev,
449 CCP_DMAPOOL_MAX_SIZE,
450 CCP_DMAPOOL_ALIGN, 0);
451 if (!dma_pool) {
452 dev_err(dev, "unable to allocate dma pool\n");
453 ret = -ENOMEM;
454 goto e_pool;
455 }
456
457 cmd_q = &ccp->cmd_q[ccp->cmd_q_count];
458 ccp->cmd_q_count++;
459
460 cmd_q->ccp = ccp;
461 cmd_q->id = i;
462 cmd_q->dma_pool = dma_pool;
463
464 /* Reserve 2 KSB regions for the queue */
465 cmd_q->ksb_key = KSB_START + ccp->ksb_start++;
466 cmd_q->ksb_ctx = KSB_START + ccp->ksb_start++;
467 ccp->ksb_count -= 2;
468
469 /* Preset some register values and masks that are queue
470 * number dependent
471 */
472 cmd_q->reg_status = ccp->io_regs + CMD_Q_STATUS_BASE +
473 (CMD_Q_STATUS_INCR * i);
474 cmd_q->reg_int_status = ccp->io_regs + CMD_Q_INT_STATUS_BASE +
475 (CMD_Q_STATUS_INCR * i);
476 cmd_q->int_ok = 1 << (i * 2);
477 cmd_q->int_err = 1 << ((i * 2) + 1);
478
479 cmd_q->free_slots = CMD_Q_DEPTH(ioread32(cmd_q->reg_status));
480
481 init_waitqueue_head(&cmd_q->int_queue);
482
483 /* Build queue interrupt mask (two interrupts per queue) */
484 qim |= cmd_q->int_ok | cmd_q->int_err;
485
486#ifdef CONFIG_ARM64
487 /* For arm64 set the recommended queue cache settings */
488 iowrite32(ccp->axcache, ccp->io_regs + CMD_Q_CACHE_BASE +
489 (CMD_Q_CACHE_INC * i));
490#endif
491
492 dev_dbg(dev, "queue #%u available\n", i);
493 }
494 if (ccp->cmd_q_count == 0) {
495 dev_notice(dev, "no command queues available\n");
496 ret = -EIO;
497 goto e_pool;
498 }
499 dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count);
500
501 /* Disable and clear interrupts until ready */
502 iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
503 for (i = 0; i < ccp->cmd_q_count; i++) {
504 cmd_q = &ccp->cmd_q[i];
505
506 ioread32(cmd_q->reg_int_status);
507 ioread32(cmd_q->reg_status);
508 }
509 iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG);
510
511 /* Request an irq */
512 ret = ccp->get_irq(ccp);
513 if (ret) {
514 dev_err(dev, "unable to allocate an IRQ\n");
515 goto e_pool;
516 }
517
518 /* Initialize the queues used to wait for KSB space and suspend */
519 init_waitqueue_head(&ccp->ksb_queue);
520 init_waitqueue_head(&ccp->suspend_queue);
521
522 /* Create a kthread for each queue */
523 for (i = 0; i < ccp->cmd_q_count; i++) {
524 struct task_struct *kthread;
525
526 cmd_q = &ccp->cmd_q[i];
527
528 kthread = kthread_create(ccp_cmd_queue_thread, cmd_q,
529 "%s-q%u", ccp->name, cmd_q->id);
530 if (IS_ERR(kthread)) {
531 dev_err(dev, "error creating queue thread (%ld)\n",
532 PTR_ERR(kthread));
533 ret = PTR_ERR(kthread);
534 goto e_kthread;
535 }
536
537 cmd_q->kthread = kthread;
538 wake_up_process(kthread);
539 }
540
541 /* Register the RNG */
542 ccp->hwrng.name = ccp->rngname;
543 ccp->hwrng.read = ccp_trng_read;
544 ret = hwrng_register(&ccp->hwrng);
545 if (ret) {
546 dev_err(dev, "error registering hwrng (%d)\n", ret);
547 goto e_kthread;
548 }
549
550 /* Make the device struct available before enabling interrupts */
551 ccp_add_device(ccp);
552
553 /* Enable interrupts */
554 iowrite32(qim, ccp->io_regs + IRQ_MASK_REG);
555
556 return 0;
557
558e_kthread:
559 for (i = 0; i < ccp->cmd_q_count; i++)
560 if (ccp->cmd_q[i].kthread)
561 kthread_stop(ccp->cmd_q[i].kthread);
562
563 ccp->free_irq(ccp);
564
565e_pool:
566 for (i = 0; i < ccp->cmd_q_count; i++)
567 dma_pool_destroy(ccp->cmd_q[i].dma_pool);
568
569 return ret;
570}
571
572/**
573 * ccp_destroy - tear down the CCP device
574 *
575 * @ccp: ccp_device struct
576 */
577void ccp_destroy(struct ccp_device *ccp)
578{
579 struct ccp_cmd_queue *cmd_q;
580 struct ccp_cmd *cmd;
581 unsigned int qim, i;
582
583 /* Remove general access to the device struct */
584 ccp_del_device(ccp);
585
586 /* Unregister the RNG */
587 hwrng_unregister(&ccp->hwrng);
588
589 /* Stop the queue kthreads */
590 for (i = 0; i < ccp->cmd_q_count; i++)
591 if (ccp->cmd_q[i].kthread)
592 kthread_stop(ccp->cmd_q[i].kthread);
593
594 /* Build queue interrupt mask (two interrupt masks per queue) */
595 qim = 0;
596 for (i = 0; i < ccp->cmd_q_count; i++) {
597 cmd_q = &ccp->cmd_q[i];
598 qim |= cmd_q->int_ok | cmd_q->int_err;
599 }
600
601 /* Disable and clear interrupts */
602 iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
603 for (i = 0; i < ccp->cmd_q_count; i++) {
604 cmd_q = &ccp->cmd_q[i];
605
606 ioread32(cmd_q->reg_int_status);
607 ioread32(cmd_q->reg_status);
608 }
609 iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG);
610
611 ccp->free_irq(ccp);
612
613 for (i = 0; i < ccp->cmd_q_count; i++)
614 dma_pool_destroy(ccp->cmd_q[i].dma_pool);
615
616 /* Flush the cmd and backlog queue */
617 while (!list_empty(&ccp->cmd)) {
618 /* Invoke the callback directly with an error code */
619 cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry);
620 list_del(&cmd->entry);
621 cmd->callback(cmd->data, -ENODEV);
622 }
623 while (!list_empty(&ccp->backlog)) {
624 /* Invoke the callback directly with an error code */
625 cmd = list_first_entry(&ccp->backlog, struct ccp_cmd, entry);
626 list_del(&cmd->entry);
627 cmd->callback(cmd->data, -ENODEV);
628 }
629}
630
631/**
632 * ccp_irq_handler - handle interrupts generated by the CCP device
633 *
634 * @irq: the irq associated with the interrupt
635 * @data: the data value supplied when the irq was created
636 */
637irqreturn_t ccp_irq_handler(int irq, void *data)
638{
639 struct device *dev = data;
640 struct ccp_device *ccp = dev_get_drvdata(dev);
641 struct ccp_cmd_queue *cmd_q;
642 u32 q_int, status;
643 unsigned int i;
644
645 status = ioread32(ccp->io_regs + IRQ_STATUS_REG);
646
647 for (i = 0; i < ccp->cmd_q_count; i++) {
648 cmd_q = &ccp->cmd_q[i];
649
650 q_int = status & (cmd_q->int_ok | cmd_q->int_err);
651 if (q_int) {
652 cmd_q->int_status = status;
653 cmd_q->q_status = ioread32(cmd_q->reg_status);
654 cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
655
656 /* On error, only save the first error value */
657 if ((q_int & cmd_q->int_err) && !cmd_q->cmd_error)
658 cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
659
660 cmd_q->int_rcvd = 1;
661
662 /* Acknowledge the interrupt and wake the kthread */
663 iowrite32(q_int, ccp->io_regs + IRQ_STATUS_REG);
664 wake_up_interruptible(&cmd_q->int_queue);
665 }
666 }
667
668 return IRQ_HANDLED;
669}
670
671#ifdef CONFIG_PM 411#ifdef CONFIG_PM
672bool ccp_queues_suspended(struct ccp_device *ccp) 412bool ccp_queues_suspended(struct ccp_device *ccp)
673{ 413{
@@ -687,10 +427,6 @@ bool ccp_queues_suspended(struct ccp_device *ccp)
687} 427}
688#endif 428#endif
689 429
690struct ccp_vdata ccpv3 = {
691 .version = CCP_VERSION(3, 0),
692};
693
694static int __init ccp_mod_init(void) 430static int __init ccp_mod_init(void)
695{ 431{
696#ifdef CONFIG_X86 432#ifdef CONFIG_X86
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
index 90a8cc8c7d46..7745d0be491d 100644
--- a/drivers/crypto/ccp/ccp-dev.h
+++ b/drivers/crypto/ccp/ccp-dev.h
@@ -141,9 +141,25 @@
141#define CCP_ECC_RESULT_OFFSET 60 141#define CCP_ECC_RESULT_OFFSET 60
142#define CCP_ECC_RESULT_SUCCESS 0x0001 142#define CCP_ECC_RESULT_SUCCESS 0x0001
143 143
144struct ccp_op;
145
146/* Structure for computation functions that are device-specific */
147struct ccp_actions {
148 int (*perform_aes)(struct ccp_op *);
149 int (*perform_xts_aes)(struct ccp_op *);
150 int (*perform_sha)(struct ccp_op *);
151 int (*perform_rsa)(struct ccp_op *);
152 int (*perform_passthru)(struct ccp_op *);
153 int (*perform_ecc)(struct ccp_op *);
154 int (*init)(struct ccp_device *);
155 void (*destroy)(struct ccp_device *);
156 irqreturn_t (*irqhandler)(int, void *);
157};
158
144/* Structure to hold CCP version-specific values */ 159/* Structure to hold CCP version-specific values */
145struct ccp_vdata { 160struct ccp_vdata {
146 unsigned int version; 161 unsigned int version;
162 struct ccp_actions *perform;
147}; 163};
148 164
149extern struct ccp_vdata ccpv3; 165extern struct ccp_vdata ccpv3;
@@ -273,18 +289,132 @@ struct ccp_device {
273 unsigned int axcache; 289 unsigned int axcache;
274}; 290};
275 291
292enum ccp_memtype {
293 CCP_MEMTYPE_SYSTEM = 0,
294 CCP_MEMTYPE_KSB,
295 CCP_MEMTYPE_LOCAL,
296 CCP_MEMTYPE__LAST,
297};
298
299struct ccp_dma_info {
300 dma_addr_t address;
301 unsigned int offset;
302 unsigned int length;
303 enum dma_data_direction dir;
304};
305
306struct ccp_dm_workarea {
307 struct device *dev;
308 struct dma_pool *dma_pool;
309 unsigned int length;
310
311 u8 *address;
312 struct ccp_dma_info dma;
313};
314
315struct ccp_sg_workarea {
316 struct scatterlist *sg;
317 int nents;
318
319 struct scatterlist *dma_sg;
320 struct device *dma_dev;
321 unsigned int dma_count;
322 enum dma_data_direction dma_dir;
323
324 unsigned int sg_used;
325
326 u64 bytes_left;
327};
328
329struct ccp_data {
330 struct ccp_sg_workarea sg_wa;
331 struct ccp_dm_workarea dm_wa;
332};
333
334struct ccp_mem {
335 enum ccp_memtype type;
336 union {
337 struct ccp_dma_info dma;
338 u32 ksb;
339 } u;
340};
341
342struct ccp_aes_op {
343 enum ccp_aes_type type;
344 enum ccp_aes_mode mode;
345 enum ccp_aes_action action;
346};
347
348struct ccp_xts_aes_op {
349 enum ccp_aes_action action;
350 enum ccp_xts_aes_unit_size unit_size;
351};
352
353struct ccp_sha_op {
354 enum ccp_sha_type type;
355 u64 msg_bits;
356};
357
358struct ccp_rsa_op {
359 u32 mod_size;
360 u32 input_len;
361};
362
363struct ccp_passthru_op {
364 enum ccp_passthru_bitwise bit_mod;
365 enum ccp_passthru_byteswap byte_swap;
366};
367
368struct ccp_ecc_op {
369 enum ccp_ecc_function function;
370};
371
372struct ccp_op {
373 struct ccp_cmd_queue *cmd_q;
374
375 u32 jobid;
376 u32 ioc;
377 u32 soc;
378 u32 ksb_key;
379 u32 ksb_ctx;
380 u32 init;
381 u32 eom;
382
383 struct ccp_mem src;
384 struct ccp_mem dst;
385
386 union {
387 struct ccp_aes_op aes;
388 struct ccp_xts_aes_op xts;
389 struct ccp_sha_op sha;
390 struct ccp_rsa_op rsa;
391 struct ccp_passthru_op passthru;
392 struct ccp_ecc_op ecc;
393 } u;
394};
395
396static inline u32 ccp_addr_lo(struct ccp_dma_info *info)
397{
398 return lower_32_bits(info->address + info->offset);
399}
400
401static inline u32 ccp_addr_hi(struct ccp_dma_info *info)
402{
403 return upper_32_bits(info->address + info->offset) & 0x0000ffff;
404}
405
276int ccp_pci_init(void); 406int ccp_pci_init(void);
277void ccp_pci_exit(void); 407void ccp_pci_exit(void);
278 408
279int ccp_platform_init(void); 409int ccp_platform_init(void);
280void ccp_platform_exit(void); 410void ccp_platform_exit(void);
281 411
412void ccp_add_device(struct ccp_device *ccp);
413void ccp_del_device(struct ccp_device *ccp);
414
282struct ccp_device *ccp_alloc_struct(struct device *dev); 415struct ccp_device *ccp_alloc_struct(struct device *dev);
283int ccp_init(struct ccp_device *ccp);
284void ccp_destroy(struct ccp_device *ccp);
285bool ccp_queues_suspended(struct ccp_device *ccp); 416bool ccp_queues_suspended(struct ccp_device *ccp);
286 417int ccp_cmd_queue_thread(void *data);
287irqreturn_t ccp_irq_handler(int irq, void *data);
288 418
289int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd); 419int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd);
290 420
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index 6613aee79b87..eefdf595f758 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * AMD Cryptographic Coprocessor (CCP) driver 2 * AMD Cryptographic Coprocessor (CCP) driver
3 * 3 *
4 * Copyright (C) 2013 Advanced Micro Devices, Inc. 4 * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
5 * 5 *
6 * Author: Tom Lendacky <thomas.lendacky@amd.com> 6 * Author: Tom Lendacky <thomas.lendacky@amd.com>
7 * 7 *
@@ -13,124 +13,12 @@
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/pci.h> 15#include <linux/pci.h>
16#include <linux/pci_ids.h>
17#include <linux/kthread.h>
18#include <linux/sched.h>
19#include <linux/interrupt.h> 16#include <linux/interrupt.h>
20#include <linux/spinlock.h>
21#include <linux/mutex.h>
22#include <linux/delay.h>
23#include <linux/ccp.h>
24#include <linux/scatterlist.h>
25#include <crypto/scatterwalk.h> 17#include <crypto/scatterwalk.h>
26#include <crypto/sha.h> 18#include <linux/ccp.h>
27 19
28#include "ccp-dev.h" 20#include "ccp-dev.h"
29 21
30enum ccp_memtype {
31 CCP_MEMTYPE_SYSTEM = 0,
32 CCP_MEMTYPE_KSB,
33 CCP_MEMTYPE_LOCAL,
34 CCP_MEMTYPE__LAST,
35};
36
37struct ccp_dma_info {
38 dma_addr_t address;
39 unsigned int offset;
40 unsigned int length;
41 enum dma_data_direction dir;
42};
43
44struct ccp_dm_workarea {
45 struct device *dev;
46 struct dma_pool *dma_pool;
47 unsigned int length;
48
49 u8 *address;
50 struct ccp_dma_info dma;
51};
52
53struct ccp_sg_workarea {
54 struct scatterlist *sg;
55 int nents;
56
57 struct scatterlist *dma_sg;
58 struct device *dma_dev;
59 unsigned int dma_count;
60 enum dma_data_direction dma_dir;
61
62 unsigned int sg_used;
63
64 u64 bytes_left;
65};
66
67struct ccp_data {
68 struct ccp_sg_workarea sg_wa;
69 struct ccp_dm_workarea dm_wa;
70};
71
72struct ccp_mem {
73 enum ccp_memtype type;
74 union {
75 struct ccp_dma_info dma;
76 u32 ksb;
77 } u;
78};
79
80struct ccp_aes_op {
81 enum ccp_aes_type type;
82 enum ccp_aes_mode mode;
83 enum ccp_aes_action action;
84};
85
86struct ccp_xts_aes_op {
87 enum ccp_aes_action action;
88 enum ccp_xts_aes_unit_size unit_size;
89};
90
91struct ccp_sha_op {
92 enum ccp_sha_type type;
93 u64 msg_bits;
94};
95
96struct ccp_rsa_op {
97 u32 mod_size;
98 u32 input_len;
99};
100
101struct ccp_passthru_op {
102 enum ccp_passthru_bitwise bit_mod;
103 enum ccp_passthru_byteswap byte_swap;
104};
105
106struct ccp_ecc_op {
107 enum ccp_ecc_function function;
108};
109
110struct ccp_op {
111 struct ccp_cmd_queue *cmd_q;
112
113 u32 jobid;
114 u32 ioc;
115 u32 soc;
116 u32 ksb_key;
117 u32 ksb_ctx;
118 u32 init;
119 u32 eom;
120
121 struct ccp_mem src;
122 struct ccp_mem dst;
123
124 union {
125 struct ccp_aes_op aes;
126 struct ccp_xts_aes_op xts;
127 struct ccp_sha_op sha;
128 struct ccp_rsa_op rsa;
129 struct ccp_passthru_op passthru;
130 struct ccp_ecc_op ecc;
131 } u;
132};
133
134/* SHA initial context values */ 22/* SHA initial context values */
135static const __be32 ccp_sha1_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = { 23static const __be32 ccp_sha1_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = {
136 cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1), 24 cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1),
@@ -152,253 +40,6 @@ static const __be32 ccp_sha256_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = {
152 cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7), 40 cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7),
153}; 41};
154 42
155static u32 ccp_addr_lo(struct ccp_dma_info *info)
156{
157 return lower_32_bits(info->address + info->offset);
158}
159
160static u32 ccp_addr_hi(struct ccp_dma_info *info)
161{
162 return upper_32_bits(info->address + info->offset) & 0x0000ffff;
163}
164
165static int ccp_do_cmd(struct ccp_op *op, u32 *cr, unsigned int cr_count)
166{
167 struct ccp_cmd_queue *cmd_q = op->cmd_q;
168 struct ccp_device *ccp = cmd_q->ccp;
169 void __iomem *cr_addr;
170 u32 cr0, cmd;
171 unsigned int i;
172 int ret = 0;
173
174 /* We could read a status register to see how many free slots
175 * are actually available, but reading that register resets it
176 * and you could lose some error information.
177 */
178 cmd_q->free_slots--;
179
180 cr0 = (cmd_q->id << REQ0_CMD_Q_SHIFT)
181 | (op->jobid << REQ0_JOBID_SHIFT)
182 | REQ0_WAIT_FOR_WRITE;
183
184 if (op->soc)
185 cr0 |= REQ0_STOP_ON_COMPLETE
186 | REQ0_INT_ON_COMPLETE;
187
188 if (op->ioc || !cmd_q->free_slots)
189 cr0 |= REQ0_INT_ON_COMPLETE;
190
191 /* Start at CMD_REQ1 */
192 cr_addr = ccp->io_regs + CMD_REQ0 + CMD_REQ_INCR;
193
194 mutex_lock(&ccp->req_mutex);
195
196 /* Write CMD_REQ1 through CMD_REQx first */
197 for (i = 0; i < cr_count; i++, cr_addr += CMD_REQ_INCR)
198 iowrite32(*(cr + i), cr_addr);
199
200 /* Tell the CCP to start */
201 wmb();
202 iowrite32(cr0, ccp->io_regs + CMD_REQ0);
203
204 mutex_unlock(&ccp->req_mutex);
205
206 if (cr0 & REQ0_INT_ON_COMPLETE) {
207 /* Wait for the job to complete */
208 ret = wait_event_interruptible(cmd_q->int_queue,
209 cmd_q->int_rcvd);
210 if (ret || cmd_q->cmd_error) {
211 /* On error delete all related jobs from the queue */
212 cmd = (cmd_q->id << DEL_Q_ID_SHIFT)
213 | op->jobid;
214
215 iowrite32(cmd, ccp->io_regs + DEL_CMD_Q_JOB);
216
217 if (!ret)
218 ret = -EIO;
219 } else if (op->soc) {
220 /* Delete just head job from the queue on SoC */
221 cmd = DEL_Q_ACTIVE
222 | (cmd_q->id << DEL_Q_ID_SHIFT)
223 | op->jobid;
224
225 iowrite32(cmd, ccp->io_regs + DEL_CMD_Q_JOB);
226 }
227
228 cmd_q->free_slots = CMD_Q_DEPTH(cmd_q->q_status);
229
230 cmd_q->int_rcvd = 0;
231 }
232
233 return ret;
234}
235
236static int ccp_perform_aes(struct ccp_op *op)
237{
238 u32 cr[6];
239
240 /* Fill out the register contents for REQ1 through REQ6 */
241 cr[0] = (CCP_ENGINE_AES << REQ1_ENGINE_SHIFT)
242 | (op->u.aes.type << REQ1_AES_TYPE_SHIFT)
243 | (op->u.aes.mode << REQ1_AES_MODE_SHIFT)
244 | (op->u.aes.action << REQ1_AES_ACTION_SHIFT)
245 | (op->ksb_key << REQ1_KEY_KSB_SHIFT);
246 cr[1] = op->src.u.dma.length - 1;
247 cr[2] = ccp_addr_lo(&op->src.u.dma);
248 cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT)
249 | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
250 | ccp_addr_hi(&op->src.u.dma);
251 cr[4] = ccp_addr_lo(&op->dst.u.dma);
252 cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
253 | ccp_addr_hi(&op->dst.u.dma);
254
255 if (op->u.aes.mode == CCP_AES_MODE_CFB)
256 cr[0] |= ((0x7f) << REQ1_AES_CFB_SIZE_SHIFT);
257
258 if (op->eom)
259 cr[0] |= REQ1_EOM;
260
261 if (op->init)
262 cr[0] |= REQ1_INIT;
263
264 return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
265}
266
267static int ccp_perform_xts_aes(struct ccp_op *op)
268{
269 u32 cr[6];
270
271 /* Fill out the register contents for REQ1 through REQ6 */
272 cr[0] = (CCP_ENGINE_XTS_AES_128 << REQ1_ENGINE_SHIFT)
273 | (op->u.xts.action << REQ1_AES_ACTION_SHIFT)
274 | (op->u.xts.unit_size << REQ1_XTS_AES_SIZE_SHIFT)
275 | (op->ksb_key << REQ1_KEY_KSB_SHIFT);
276 cr[1] = op->src.u.dma.length - 1;
277 cr[2] = ccp_addr_lo(&op->src.u.dma);
278 cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT)
279 | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
280 | ccp_addr_hi(&op->src.u.dma);
281 cr[4] = ccp_addr_lo(&op->dst.u.dma);
282 cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
283 | ccp_addr_hi(&op->dst.u.dma);
284
285 if (op->eom)
286 cr[0] |= REQ1_EOM;
287
288 if (op->init)
289 cr[0] |= REQ1_INIT;
290
291 return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
292}
293
294static int ccp_perform_sha(struct ccp_op *op)
295{
296 u32 cr[6];
297
298 /* Fill out the register contents for REQ1 through REQ6 */
299 cr[0] = (CCP_ENGINE_SHA << REQ1_ENGINE_SHIFT)
300 | (op->u.sha.type << REQ1_SHA_TYPE_SHIFT)
301 | REQ1_INIT;
302 cr[1] = op->src.u.dma.length - 1;
303 cr[2] = ccp_addr_lo(&op->src.u.dma);
304 cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT)
305 | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
306 | ccp_addr_hi(&op->src.u.dma);
307
308 if (op->eom) {
309 cr[0] |= REQ1_EOM;
310 cr[4] = lower_32_bits(op->u.sha.msg_bits);
311 cr[5] = upper_32_bits(op->u.sha.msg_bits);
312 } else {
313 cr[4] = 0;
314 cr[5] = 0;
315 }
316
317 return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
318}
319
320static int ccp_perform_rsa(struct ccp_op *op)
321{
322 u32 cr[6];
323
324 /* Fill out the register contents for REQ1 through REQ6 */
325 cr[0] = (CCP_ENGINE_RSA << REQ1_ENGINE_SHIFT)
326 | (op->u.rsa.mod_size << REQ1_RSA_MOD_SIZE_SHIFT)
327 | (op->ksb_key << REQ1_KEY_KSB_SHIFT)
328 | REQ1_EOM;
329 cr[1] = op->u.rsa.input_len - 1;
330 cr[2] = ccp_addr_lo(&op->src.u.dma);
331 cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT)
332 | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
333 | ccp_addr_hi(&op->src.u.dma);
334 cr[4] = ccp_addr_lo(&op->dst.u.dma);
335 cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
336 | ccp_addr_hi(&op->dst.u.dma);
337
338 return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
339}
340
341static int ccp_perform_passthru(struct ccp_op *op)
342{
343 u32 cr[6];
344
345 /* Fill out the register contents for REQ1 through REQ6 */
346 cr[0] = (CCP_ENGINE_PASSTHRU << REQ1_ENGINE_SHIFT)
347 | (op->u.passthru.bit_mod << REQ1_PT_BW_SHIFT)
348 | (op->u.passthru.byte_swap << REQ1_PT_BS_SHIFT);
349
350 if (op->src.type == CCP_MEMTYPE_SYSTEM)
351 cr[1] = op->src.u.dma.length - 1;
352 else
353 cr[1] = op->dst.u.dma.length - 1;
354
355 if (op->src.type == CCP_MEMTYPE_SYSTEM) {
356 cr[2] = ccp_addr_lo(&op->src.u.dma);
357 cr[3] = (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
358 | ccp_addr_hi(&op->src.u.dma);
359
360 if (op->u.passthru.bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
361 cr[3] |= (op->ksb_key << REQ4_KSB_SHIFT);
362 } else {
363 cr[2] = op->src.u.ksb * CCP_KSB_BYTES;
364 cr[3] = (CCP_MEMTYPE_KSB << REQ4_MEMTYPE_SHIFT);
365 }
366
367 if (op->dst.type == CCP_MEMTYPE_SYSTEM) {
368 cr[4] = ccp_addr_lo(&op->dst.u.dma);
369 cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
370 | ccp_addr_hi(&op->dst.u.dma);
371 } else {
372 cr[4] = op->dst.u.ksb * CCP_KSB_BYTES;
373 cr[5] = (CCP_MEMTYPE_KSB << REQ6_MEMTYPE_SHIFT);
374 }
375
376 if (op->eom)
377 cr[0] |= REQ1_EOM;
378
379 return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
380}
381
382static int ccp_perform_ecc(struct ccp_op *op)
383{
384 u32 cr[6];
385
386 /* Fill out the register contents for REQ1 through REQ6 */
387 cr[0] = REQ1_ECC_AFFINE_CONVERT
388 | (CCP_ENGINE_ECC << REQ1_ENGINE_SHIFT)
389 | (op->u.ecc.function << REQ1_ECC_FUNCTION_SHIFT)
390 | REQ1_EOM;
391 cr[1] = op->src.u.dma.length - 1;
392 cr[2] = ccp_addr_lo(&op->src.u.dma);
393 cr[3] = (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
394 | ccp_addr_hi(&op->src.u.dma);
395 cr[4] = ccp_addr_lo(&op->dst.u.dma);
396 cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
397 | ccp_addr_hi(&op->dst.u.dma);
398
399 return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
400}
401
402static u32 ccp_alloc_ksb(struct ccp_device *ccp, unsigned int count) 43static u32 ccp_alloc_ksb(struct ccp_device *ccp, unsigned int count)
403{ 44{
404 int start; 45 int start;
@@ -837,7 +478,7 @@ static int ccp_copy_to_from_ksb(struct ccp_cmd_queue *cmd_q,
837 478
838 op.u.passthru.byte_swap = byte_swap; 479 op.u.passthru.byte_swap = byte_swap;
839 480
840 return ccp_perform_passthru(&op); 481 return cmd_q->ccp->vdata->perform->perform_passthru(&op);
841} 482}
842 483
843static int ccp_copy_to_ksb(struct ccp_cmd_queue *cmd_q, 484static int ccp_copy_to_ksb(struct ccp_cmd_queue *cmd_q,
@@ -969,7 +610,7 @@ static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q,
969 } 610 }
970 } 611 }
971 612
972 ret = ccp_perform_aes(&op); 613 ret = cmd_q->ccp->vdata->perform->perform_aes(&op);
973 if (ret) { 614 if (ret) {
974 cmd->engine_error = cmd_q->cmd_error; 615 cmd->engine_error = cmd_q->cmd_error;
975 goto e_src; 616 goto e_src;
@@ -1131,7 +772,7 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1131 op.soc = 1; 772 op.soc = 1;
1132 } 773 }
1133 774
1134 ret = ccp_perform_aes(&op); 775 ret = cmd_q->ccp->vdata->perform->perform_aes(&op);
1135 if (ret) { 776 if (ret) {
1136 cmd->engine_error = cmd_q->cmd_error; 777 cmd->engine_error = cmd_q->cmd_error;
1137 goto e_dst; 778 goto e_dst;
@@ -1296,7 +937,7 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
1296 if (!src.sg_wa.bytes_left) 937 if (!src.sg_wa.bytes_left)
1297 op.eom = 1; 938 op.eom = 1;
1298 939
1299 ret = ccp_perform_xts_aes(&op); 940 ret = cmd_q->ccp->vdata->perform->perform_xts_aes(&op);
1300 if (ret) { 941 if (ret) {
1301 cmd->engine_error = cmd_q->cmd_error; 942 cmd->engine_error = cmd_q->cmd_error;
1302 goto e_dst; 943 goto e_dst;
@@ -1453,7 +1094,7 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1453 if (sha->final && !src.sg_wa.bytes_left) 1094 if (sha->final && !src.sg_wa.bytes_left)
1454 op.eom = 1; 1095 op.eom = 1;
1455 1096
1456 ret = ccp_perform_sha(&op); 1097 ret = cmd_q->ccp->vdata->perform->perform_sha(&op);
1457 if (ret) { 1098 if (ret) {
1458 cmd->engine_error = cmd_q->cmd_error; 1099 cmd->engine_error = cmd_q->cmd_error;
1459 goto e_data; 1100 goto e_data;
@@ -1633,7 +1274,7 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1633 op.u.rsa.mod_size = rsa->key_size; 1274 op.u.rsa.mod_size = rsa->key_size;
1634 op.u.rsa.input_len = i_len; 1275 op.u.rsa.input_len = i_len;
1635 1276
1636 ret = ccp_perform_rsa(&op); 1277 ret = cmd_q->ccp->vdata->perform->perform_rsa(&op);
1637 if (ret) { 1278 if (ret) {
1638 cmd->engine_error = cmd_q->cmd_error; 1279 cmd->engine_error = cmd_q->cmd_error;
1639 goto e_dst; 1280 goto e_dst;
@@ -1758,7 +1399,7 @@ static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q,
1758 op.dst.u.dma.offset = dst.sg_wa.sg_used; 1399 op.dst.u.dma.offset = dst.sg_wa.sg_used;
1759 op.dst.u.dma.length = op.src.u.dma.length; 1400 op.dst.u.dma.length = op.src.u.dma.length;
1760 1401
1761 ret = ccp_perform_passthru(&op); 1402 ret = cmd_q->ccp->vdata->perform->perform_passthru(&op);
1762 if (ret) { 1403 if (ret) {
1763 cmd->engine_error = cmd_q->cmd_error; 1404 cmd->engine_error = cmd_q->cmd_error;
1764 goto e_dst; 1405 goto e_dst;
@@ -1870,7 +1511,7 @@ static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1870 1511
1871 op.u.ecc.function = cmd->u.ecc.function; 1512 op.u.ecc.function = cmd->u.ecc.function;
1872 1513
1873 ret = ccp_perform_ecc(&op); 1514 ret = cmd_q->ccp->vdata->perform->perform_ecc(&op);
1874 if (ret) { 1515 if (ret) {
1875 cmd->engine_error = cmd_q->cmd_error; 1516 cmd->engine_error = cmd_q->cmd_error;
1876 goto e_dst; 1517 goto e_dst;
@@ -2034,7 +1675,7 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
2034 1675
2035 op.u.ecc.function = cmd->u.ecc.function; 1676 op.u.ecc.function = cmd->u.ecc.function;
2036 1677
2037 ret = ccp_perform_ecc(&op); 1678 ret = cmd_q->ccp->vdata->perform->perform_ecc(&op);
2038 if (ret) { 1679 if (ret) {
2039 cmd->engine_error = cmd_q->cmd_error; 1680 cmd->engine_error = cmd_q->cmd_error;
2040 goto e_dst; 1681 goto e_dst;
diff --git a/drivers/crypto/ccp/ccp-pci.c b/drivers/crypto/ccp/ccp-pci.c
index d1a36af44012..0bf262e36b6b 100644
--- a/drivers/crypto/ccp/ccp-pci.c
+++ b/drivers/crypto/ccp/ccp-pci.c
@@ -62,7 +62,8 @@ static int ccp_get_msix_irqs(struct ccp_device *ccp)
62 snprintf(ccp_pci->msix[v].name, name_len, "%s-%u", 62 snprintf(ccp_pci->msix[v].name, name_len, "%s-%u",
63 ccp->name, v); 63 ccp->name, v);
64 ccp_pci->msix[v].vector = msix_entry[v].vector; 64 ccp_pci->msix[v].vector = msix_entry[v].vector;
65 ret = request_irq(ccp_pci->msix[v].vector, ccp_irq_handler, 65 ret = request_irq(ccp_pci->msix[v].vector,
66 ccp->vdata->perform->irqhandler,
66 0, ccp_pci->msix[v].name, dev); 67 0, ccp_pci->msix[v].name, dev);
67 if (ret) { 68 if (ret) {
68 dev_notice(dev, "unable to allocate MSI-X IRQ (%d)\n", 69 dev_notice(dev, "unable to allocate MSI-X IRQ (%d)\n",
@@ -95,7 +96,8 @@ static int ccp_get_msi_irq(struct ccp_device *ccp)
95 return ret; 96 return ret;
96 97
97 ccp->irq = pdev->irq; 98 ccp->irq = pdev->irq;
98 ret = request_irq(ccp->irq, ccp_irq_handler, 0, ccp->name, dev); 99 ret = request_irq(ccp->irq, ccp->vdata->perform->irqhandler, 0,
100 ccp->name, dev);
99 if (ret) { 101 if (ret) {
100 dev_notice(dev, "unable to allocate MSI IRQ (%d)\n", ret); 102 dev_notice(dev, "unable to allocate MSI IRQ (%d)\n", ret);
101 goto e_msi; 103 goto e_msi;
@@ -228,7 +230,7 @@ static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
228 230
229 dev_set_drvdata(dev, ccp); 231 dev_set_drvdata(dev, ccp);
230 232
231 ret = ccp_init(ccp); 233 ret = ccp->vdata->perform->init(ccp);
232 if (ret) 234 if (ret)
233 goto e_iomap; 235 goto e_iomap;
234 236
@@ -258,7 +260,7 @@ static void ccp_pci_remove(struct pci_dev *pdev)
258 if (!ccp) 260 if (!ccp)
259 return; 261 return;
260 262
261 ccp_destroy(ccp); 263 ccp->vdata->perform->destroy(ccp);
262 264
263 pci_iounmap(pdev, ccp->io_map); 265 pci_iounmap(pdev, ccp->io_map);
264 266
diff --git a/drivers/crypto/ccp/ccp-platform.c b/drivers/crypto/ccp/ccp-platform.c
index 6e1cf228c7c0..351f28d8c336 100644
--- a/drivers/crypto/ccp/ccp-platform.c
+++ b/drivers/crypto/ccp/ccp-platform.c
@@ -70,7 +70,8 @@ static int ccp_get_irq(struct ccp_device *ccp)
70 return ret; 70 return ret;
71 71
72 ccp->irq = ret; 72 ccp->irq = ret;
73 ret = request_irq(ccp->irq, ccp_irq_handler, 0, ccp->name, dev); 73 ret = request_irq(ccp->irq, ccp->vdata->perform->irqhandler, 0,
74 ccp->name, dev);
74 if (ret) { 75 if (ret) {
75 dev_notice(dev, "unable to allocate IRQ (%d)\n", ret); 76 dev_notice(dev, "unable to allocate IRQ (%d)\n", ret);
76 return ret; 77 return ret;
@@ -171,7 +172,7 @@ static int ccp_platform_probe(struct platform_device *pdev)
171 172
172 dev_set_drvdata(dev, ccp); 173 dev_set_drvdata(dev, ccp);
173 174
174 ret = ccp_init(ccp); 175 ret = ccp->vdata->perform->init(ccp);
175 if (ret) 176 if (ret)
176 goto e_err; 177 goto e_err;
177 178
@@ -189,7 +190,7 @@ static int ccp_platform_remove(struct platform_device *pdev)
189 struct device *dev = &pdev->dev; 190 struct device *dev = &pdev->dev;
190 struct ccp_device *ccp = dev_get_drvdata(dev); 191 struct ccp_device *ccp = dev_get_drvdata(dev);
191 192
192 ccp_destroy(ccp); 193 ccp->vdata->perform->destroy(ccp);
193 194
194 dev_notice(dev, "disabled\n"); 195 dev_notice(dev, "disabled\n");
195 196