aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto/caam/jr.c
diff options
context:
space:
mode:
authorKim Phillips <kim.phillips@freescale.com>2011-03-13 04:54:26 -0400
committerHerbert Xu <herbert@gondor.apana.org.au>2011-03-26 22:45:16 -0400
commit8e8ec596e6c0144e2dd500a57ee23dde9684df46 (patch)
tree6ca9b342f37b3dc7a62621e544c8861920668117 /drivers/crypto/caam/jr.c
parent60af520cf264ea26b2af3a6871bbd71850522aea (diff)
crypto: caam - Add support for the Freescale SEC4/CAAM
The SEC4 supercedes the SEC2.x/3.x as Freescale's Integrated Security Engine. Its programming model is incompatible with all prior versions of the SEC (talitos). The SEC4 is also known as the Cryptographic Accelerator and Assurance Module (CAAM); this driver is named caam. This initial submission does not include support for Data Path mode operation - AEAD descriptors are submitted via the job ring interface, while the Queue Interface (QI) is enabled for use by others. Only AEAD algorithms are implemented at this time, for use with IPsec. Many thanks to the Freescale STC team for their contributions to this driver. Signed-off-by: Steve Cornelius <sec@pobox.com> Signed-off-by: Kim Phillips <kim.phillips@freescale.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto/caam/jr.c')
-rw-r--r--drivers/crypto/caam/jr.c523
1 files changed, 523 insertions, 0 deletions
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
new file mode 100644
index 000000000000..68cb9af4d1a3
--- /dev/null
+++ b/drivers/crypto/caam/jr.c
@@ -0,0 +1,523 @@
1/*
2 * CAAM/SEC 4.x transport/backend driver
3 * JobR backend functionality
4 *
5 * Copyright 2008-2011 Freescale Semiconductor, Inc.
6 */
7
8#include "compat.h"
9#include "regs.h"
10#include "jr.h"
11#include "desc.h"
12#include "intern.h"
13
14/* Main per-ring interrupt handler */
15static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
16{
17 struct device *dev = st_dev;
18 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
19 u32 irqstate;
20
21 /*
22 * Check the output ring for ready responses, kick
23 * tasklet if jobs done.
24 */
25 irqstate = rd_reg32(&jrp->rregs->jrintstatus);
26 if (!irqstate)
27 return IRQ_NONE;
28
29 /*
30 * If JobR error, we got more development work to do
31 * Flag a bug now, but we really need to shut down and
32 * restart the queue (and fix code).
33 */
34 if (irqstate & JRINT_JR_ERROR) {
35 dev_err(dev, "job ring error: irqstate: %08x\n", irqstate);
36 BUG();
37 }
38
39 /* mask valid interrupts */
40 setbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
41
42 /* Have valid interrupt at this point, just ACK and trigger */
43 wr_reg32(&jrp->rregs->jrintstatus, irqstate);
44
45 preempt_disable();
46 tasklet_schedule(&jrp->irqtask[smp_processor_id()]);
47 preempt_enable();
48
49 return IRQ_HANDLED;
50}
51
52/* Deferred service handler, run as interrupt-fired tasklet */
53static void caam_jr_dequeue(unsigned long devarg)
54{
55 int hw_idx, sw_idx, i, head, tail;
56 struct device *dev = (struct device *)devarg;
57 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
58 void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
59 u32 *userdesc, userstatus;
60 void *userarg;
61 unsigned long flags;
62
63 spin_lock_irqsave(&jrp->outlock, flags);
64
65 head = ACCESS_ONCE(jrp->head);
66 sw_idx = tail = jrp->tail;
67
68 while (CIRC_CNT(head, tail, JOBR_DEPTH) >= 1 &&
69 rd_reg32(&jrp->rregs->outring_used)) {
70
71 hw_idx = jrp->out_ring_read_index;
72 for (i = 0; CIRC_CNT(head, tail + i, JOBR_DEPTH) >= 1; i++) {
73 sw_idx = (tail + i) & (JOBR_DEPTH - 1);
74
75 smp_read_barrier_depends();
76
77 if (jrp->outring[hw_idx].desc ==
78 jrp->entinfo[sw_idx].desc_addr_dma)
79 break; /* found */
80 }
81 /* we should never fail to find a matching descriptor */
82 BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0);
83
84 /* Unmap just-run descriptor so we can post-process */
85 dma_unmap_single(dev, jrp->outring[hw_idx].desc,
86 jrp->entinfo[sw_idx].desc_size,
87 DMA_TO_DEVICE);
88
89 /* mark completed, avoid matching on a recycled desc addr */
90 jrp->entinfo[sw_idx].desc_addr_dma = 0;
91
92 /* Stash callback params for use outside of lock */
93 usercall = jrp->entinfo[sw_idx].callbk;
94 userarg = jrp->entinfo[sw_idx].cbkarg;
95 userdesc = jrp->entinfo[sw_idx].desc_addr_virt;
96 userstatus = jrp->outring[hw_idx].jrstatus;
97
98 smp_mb();
99
100 jrp->out_ring_read_index = (jrp->out_ring_read_index + 1) &
101 (JOBR_DEPTH - 1);
102
103 /*
104 * if this job completed out-of-order, do not increment
105 * the tail. Otherwise, increment tail by 1 plus the
106 * number of subsequent jobs already completed out-of-order
107 */
108 if (sw_idx == tail) {
109 do {
110 tail = (tail + 1) & (JOBR_DEPTH - 1);
111 smp_read_barrier_depends();
112 } while (CIRC_CNT(head, tail, JOBR_DEPTH) >= 1 &&
113 jrp->entinfo[tail].desc_addr_dma == 0);
114
115 jrp->tail = tail;
116 }
117
118 /* set done */
119 wr_reg32(&jrp->rregs->outring_rmvd, 1);
120
121 spin_unlock_irqrestore(&jrp->outlock, flags);
122
123 /* Finally, execute user's callback */
124 usercall(dev, userdesc, userstatus, userarg);
125
126 spin_lock_irqsave(&jrp->outlock, flags);
127
128 head = ACCESS_ONCE(jrp->head);
129 sw_idx = tail = jrp->tail;
130 }
131
132 spin_unlock_irqrestore(&jrp->outlock, flags);
133
134 /* reenable / unmask IRQs */
135 clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
136}
137
138/**
139 * caam_jr_register() - Alloc a ring for someone to use as needed. Returns
140 * an ordinal of the rings allocated, else returns -ENODEV if no rings
141 * are available.
142 * @ctrldev: points to the controller level dev (parent) that
143 * owns rings available for use.
144 * @dev: points to where a pointer to the newly allocated queue's
145 * dev can be written to if successful.
146 **/
147int caam_jr_register(struct device *ctrldev, struct device **rdev)
148{
149 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
150 struct caam_drv_private_jr *jrpriv = NULL;
151 unsigned long flags;
152 int ring;
153
154 /* Lock, if free ring - assign, unlock */
155 spin_lock_irqsave(&ctrlpriv->jr_alloc_lock, flags);
156 for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
157 jrpriv = dev_get_drvdata(ctrlpriv->jrdev[ring]);
158 if (jrpriv->assign == JOBR_UNASSIGNED) {
159 jrpriv->assign = JOBR_ASSIGNED;
160 *rdev = ctrlpriv->jrdev[ring];
161 spin_unlock_irqrestore(&ctrlpriv->jr_alloc_lock, flags);
162 return ring;
163 }
164 }
165
166 /* If assigned, write dev where caller needs it */
167 spin_unlock_irqrestore(&ctrlpriv->jr_alloc_lock, flags);
168 *rdev = NULL;
169
170 return -ENODEV;
171}
172EXPORT_SYMBOL(caam_jr_register);
173
174/**
175 * caam_jr_deregister() - Deregister an API and release the queue.
176 * Returns 0 if OK, -EBUSY if queue still contains pending entries
177 * or unprocessed results at the time of the call
178 * @dev - points to the dev that identifies the queue to
179 * be released.
180 **/
181int caam_jr_deregister(struct device *rdev)
182{
183 struct caam_drv_private_jr *jrpriv = dev_get_drvdata(rdev);
184 struct caam_drv_private *ctrlpriv;
185 unsigned long flags;
186
187 /* Get the owning controller's private space */
188 ctrlpriv = dev_get_drvdata(jrpriv->parentdev);
189
190 /*
191 * Make sure ring empty before release
192 */
193 if (rd_reg32(&jrpriv->rregs->outring_used) ||
194 (rd_reg32(&jrpriv->rregs->inpring_avail) != JOBR_DEPTH))
195 return -EBUSY;
196
197 /* Release ring */
198 spin_lock_irqsave(&ctrlpriv->jr_alloc_lock, flags);
199 jrpriv->assign = JOBR_UNASSIGNED;
200 spin_unlock_irqrestore(&ctrlpriv->jr_alloc_lock, flags);
201
202 return 0;
203}
204EXPORT_SYMBOL(caam_jr_deregister);
205
206/**
207 * caam_jr_enqueue() - Enqueue a job descriptor head. Returns 0 if OK,
208 * -EBUSY if the queue is full, -EIO if it cannot map the caller's
209 * descriptor.
210 * @dev: device of the job ring to be used. This device should have
211 * been assigned prior by caam_jr_register().
212 * @desc: points to a job descriptor that execute our request. All
213 * descriptors (and all referenced data) must be in a DMAable
214 * region, and all data references must be physical addresses
215 * accessible to CAAM (i.e. within a PAMU window granted
216 * to it).
217 * @cbk: pointer to a callback function to be invoked upon completion
218 * of this request. This has the form:
219 * callback(struct device *dev, u32 *desc, u32 stat, void *arg)
220 * where:
221 * @dev: contains the job ring device that processed this
222 * response.
223 * @desc: descriptor that initiated the request, same as
224 * "desc" being argued to caam_jr_enqueue().
225 * @status: untranslated status received from CAAM. See the
226 * reference manual for a detailed description of
227 * error meaning, or see the JRSTA definitions in the
228 * register header file
229 * @areq: optional pointer to an argument passed with the
230 * original request
231 * @areq: optional pointer to a user argument for use at callback
232 * time.
233 **/
234int caam_jr_enqueue(struct device *dev, u32 *desc,
235 void (*cbk)(struct device *dev, u32 *desc,
236 u32 status, void *areq),
237 void *areq)
238{
239 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
240 struct caam_jrentry_info *head_entry;
241 unsigned long flags;
242 int head, tail, desc_size;
243 dma_addr_t desc_dma;
244
245 desc_size = (*desc & HDR_JD_LENGTH_MASK) * sizeof(u32);
246 desc_dma = dma_map_single(dev, desc, desc_size, DMA_TO_DEVICE);
247 if (dma_mapping_error(dev, desc_dma)) {
248 dev_err(dev, "caam_jr_enqueue(): can't map jobdesc\n");
249 return -EIO;
250 }
251
252 spin_lock_irqsave(&jrp->inplock, flags);
253
254 head = jrp->head;
255 tail = ACCESS_ONCE(jrp->tail);
256
257 if (!rd_reg32(&jrp->rregs->inpring_avail) ||
258 CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) {
259 spin_unlock_irqrestore(&jrp->inplock, flags);
260 dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE);
261 return -EBUSY;
262 }
263
264 head_entry = &jrp->entinfo[head];
265 head_entry->desc_addr_virt = desc;
266 head_entry->desc_size = desc_size;
267 head_entry->callbk = (void *)cbk;
268 head_entry->cbkarg = areq;
269 head_entry->desc_addr_dma = desc_dma;
270
271 jrp->inpring[jrp->inp_ring_write_index] = desc_dma;
272
273 smp_wmb();
274
275 jrp->inp_ring_write_index = (jrp->inp_ring_write_index + 1) &
276 (JOBR_DEPTH - 1);
277 jrp->head = (head + 1) & (JOBR_DEPTH - 1);
278
279 wmb();
280
281 wr_reg32(&jrp->rregs->inpring_jobadd, 1);
282
283 spin_unlock_irqrestore(&jrp->inplock, flags);
284
285 return 0;
286}
287EXPORT_SYMBOL(caam_jr_enqueue);
288
289static int caam_reset_hw_jr(struct device *dev)
290{
291 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
292 unsigned int timeout = 100000;
293
294 /*
295 * FIXME: disabling IRQs here inhibits proper job completion
296 * and error propagation
297 */
298 disable_irq(jrp->irq);
299
300 /* initiate flush (required prior to reset) */
301 wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
302 while (((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) ==
303 JRINT_ERR_HALT_INPROGRESS) && --timeout)
304 cpu_relax();
305
306 if ((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) !=
307 JRINT_ERR_HALT_COMPLETE || timeout == 0) {
308 dev_err(dev, "failed to flush job ring %d\n", jrp->ridx);
309 return -EIO;
310 }
311
312 /* initiate reset */
313 timeout = 100000;
314 wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
315 while ((rd_reg32(&jrp->rregs->jrcommand) & JRCR_RESET) && --timeout)
316 cpu_relax();
317
318 if (timeout == 0) {
319 dev_err(dev, "failed to reset job ring %d\n", jrp->ridx);
320 return -EIO;
321 }
322
323 enable_irq(jrp->irq);
324
325 return 0;
326}
327
328/*
329 * Init JobR independent of platform property detection
330 */
331static int caam_jr_init(struct device *dev)
332{
333 struct caam_drv_private_jr *jrp;
334 dma_addr_t inpbusaddr, outbusaddr;
335 int i, error;
336
337 jrp = dev_get_drvdata(dev);
338
339 error = caam_reset_hw_jr(dev);
340 if (error)
341 return error;
342
343 jrp->inpring = kzalloc(sizeof(dma_addr_t) * JOBR_DEPTH,
344 GFP_KERNEL | GFP_DMA);
345 jrp->outring = kzalloc(sizeof(struct jr_outentry) *
346 JOBR_DEPTH, GFP_KERNEL | GFP_DMA);
347
348 jrp->entinfo = kzalloc(sizeof(struct caam_jrentry_info) * JOBR_DEPTH,
349 GFP_KERNEL);
350
351 if ((jrp->inpring == NULL) || (jrp->outring == NULL) ||
352 (jrp->entinfo == NULL)) {
353 dev_err(dev, "can't allocate job rings for %d\n",
354 jrp->ridx);
355 return -ENOMEM;
356 }
357
358 for (i = 0; i < JOBR_DEPTH; i++)
359 jrp->entinfo[i].desc_addr_dma = !0;
360
361 /* Setup rings */
362 inpbusaddr = dma_map_single(dev, jrp->inpring,
363 sizeof(u32 *) * JOBR_DEPTH,
364 DMA_BIDIRECTIONAL);
365 if (dma_mapping_error(dev, inpbusaddr)) {
366 dev_err(dev, "caam_jr_init(): can't map input ring\n");
367 kfree(jrp->inpring);
368 kfree(jrp->outring);
369 kfree(jrp->entinfo);
370 return -EIO;
371 }
372
373 outbusaddr = dma_map_single(dev, jrp->outring,
374 sizeof(struct jr_outentry) * JOBR_DEPTH,
375 DMA_BIDIRECTIONAL);
376 if (dma_mapping_error(dev, outbusaddr)) {
377 dev_err(dev, "caam_jr_init(): can't map output ring\n");
378 dma_unmap_single(dev, inpbusaddr,
379 sizeof(u32 *) * JOBR_DEPTH,
380 DMA_BIDIRECTIONAL);
381 kfree(jrp->inpring);
382 kfree(jrp->outring);
383 kfree(jrp->entinfo);
384 return -EIO;
385 }
386
387 jrp->inp_ring_write_index = 0;
388 jrp->out_ring_read_index = 0;
389 jrp->head = 0;
390 jrp->tail = 0;
391
392 wr_reg64(&jrp->rregs->inpring_base, inpbusaddr);
393 wr_reg64(&jrp->rregs->outring_base, outbusaddr);
394 wr_reg32(&jrp->rregs->inpring_size, JOBR_DEPTH);
395 wr_reg32(&jrp->rregs->outring_size, JOBR_DEPTH);
396
397 jrp->ringsize = JOBR_DEPTH;
398
399 spin_lock_init(&jrp->inplock);
400 spin_lock_init(&jrp->outlock);
401
402 /* Select interrupt coalescing parameters */
403 setbits32(&jrp->rregs->rconfig_lo, JOBR_INTC |
404 (JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) |
405 (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT));
406
407 /* Connect job ring interrupt handler. */
408 for_each_possible_cpu(i)
409 tasklet_init(&jrp->irqtask[i], caam_jr_dequeue,
410 (unsigned long)dev);
411
412 error = request_irq(jrp->irq, caam_jr_interrupt, 0,
413 "caam-jobr", dev);
414 if (error) {
415 dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
416 jrp->ridx, jrp->irq);
417 irq_dispose_mapping(jrp->irq);
418 jrp->irq = 0;
419 dma_unmap_single(dev, inpbusaddr, sizeof(u32 *) * JOBR_DEPTH,
420 DMA_BIDIRECTIONAL);
421 dma_unmap_single(dev, outbusaddr, sizeof(u32 *) * JOBR_DEPTH,
422 DMA_BIDIRECTIONAL);
423 kfree(jrp->inpring);
424 kfree(jrp->outring);
425 kfree(jrp->entinfo);
426 return -EINVAL;
427 }
428
429 jrp->assign = JOBR_UNASSIGNED;
430 return 0;
431}
432
433/*
434 * Shutdown JobR independent of platform property code
435 */
436int caam_jr_shutdown(struct device *dev)
437{
438 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
439 dma_addr_t inpbusaddr, outbusaddr;
440 int ret, i;
441
442 ret = caam_reset_hw_jr(dev);
443
444 for_each_possible_cpu(i)
445 tasklet_kill(&jrp->irqtask[i]);
446
447 /* Release interrupt */
448 free_irq(jrp->irq, dev);
449
450 /* Free rings */
451 inpbusaddr = rd_reg64(&jrp->rregs->inpring_base);
452 outbusaddr = rd_reg64(&jrp->rregs->outring_base);
453 dma_unmap_single(dev, outbusaddr,
454 sizeof(struct jr_outentry) * JOBR_DEPTH,
455 DMA_BIDIRECTIONAL);
456 dma_unmap_single(dev, inpbusaddr, sizeof(u32 *) * JOBR_DEPTH,
457 DMA_BIDIRECTIONAL);
458 kfree(jrp->outring);
459 kfree(jrp->inpring);
460 kfree(jrp->entinfo);
461
462 return ret;
463}
464
465/*
466 * Probe routine for each detected JobR subsystem. It assumes that
467 * property detection was picked up externally.
468 */
469int caam_jr_probe(struct platform_device *pdev, struct device_node *np,
470 int ring)
471{
472 struct device *ctrldev, *jrdev;
473 struct platform_device *jr_pdev;
474 struct caam_drv_private *ctrlpriv;
475 struct caam_drv_private_jr *jrpriv;
476 u32 *jroffset;
477 int error;
478
479 ctrldev = &pdev->dev;
480 ctrlpriv = dev_get_drvdata(ctrldev);
481
482 jrpriv = kmalloc(sizeof(struct caam_drv_private_jr),
483 GFP_KERNEL);
484 if (jrpriv == NULL) {
485 dev_err(ctrldev, "can't alloc private mem for job ring %d\n",
486 ring);
487 return -ENOMEM;
488 }
489 jrpriv->parentdev = ctrldev; /* point back to parent */
490 jrpriv->ridx = ring; /* save ring identity relative to detection */
491
492 /*
493 * Derive a pointer to the detected JobRs regs
494 * Driver has already iomapped the entire space, we just
495 * need to add in the offset to this JobR. Don't know if I
496 * like this long-term, but it'll run
497 */
498 jroffset = (u32 *)of_get_property(np, "reg", NULL);
499 jrpriv->rregs = (struct caam_job_ring __iomem *)((void *)ctrlpriv->ctrl
500 + *jroffset);
501
502 /* Build a local dev for each detected queue */
503 jr_pdev = of_platform_device_create(np, NULL, ctrldev);
504 if (jr_pdev == NULL) {
505 kfree(jrpriv);
506 return -EINVAL;
507 }
508 jrdev = &jr_pdev->dev;
509 dev_set_drvdata(jrdev, jrpriv);
510 ctrlpriv->jrdev[ring] = jrdev;
511
512 /* Identify the interrupt */
513 jrpriv->irq = of_irq_to_resource(np, 0, NULL);
514
515 /* Now do the platform independent part */
516 error = caam_jr_init(jrdev); /* now turn on hardware */
517 if (error) {
518 kfree(jrpriv);
519 return error;
520 }
521
522 return error;
523}