aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto/caam/jr.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/caam/jr.c')
-rw-r--r--drivers/crypto/caam/jr.c340
1 files changed, 232 insertions, 108 deletions
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index bdb786d5a5e5..1d80bd3636c5 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -6,6 +6,7 @@
6 */ 6 */
7 7
8#include <linux/of_irq.h> 8#include <linux/of_irq.h>
9#include <linux/of_address.h>
9 10
10#include "compat.h" 11#include "compat.h"
11#include "regs.h" 12#include "regs.h"
@@ -13,6 +14,113 @@
13#include "desc.h" 14#include "desc.h"
14#include "intern.h" 15#include "intern.h"
15 16
17struct jr_driver_data {
18 /* List of Physical JobR's with the Driver */
19 struct list_head jr_list;
20 spinlock_t jr_alloc_lock; /* jr_list lock */
21} ____cacheline_aligned;
22
23static struct jr_driver_data driver_data;
24
25static int caam_reset_hw_jr(struct device *dev)
26{
27 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
28 unsigned int timeout = 100000;
29
30 /*
31 * mask interrupts since we are going to poll
32 * for reset completion status
33 */
34 setbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
35
36 /* initiate flush (required prior to reset) */
37 wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
38 while (((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) ==
39 JRINT_ERR_HALT_INPROGRESS) && --timeout)
40 cpu_relax();
41
42 if ((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) !=
43 JRINT_ERR_HALT_COMPLETE || timeout == 0) {
44 dev_err(dev, "failed to flush job ring %d\n", jrp->ridx);
45 return -EIO;
46 }
47
48 /* initiate reset */
49 timeout = 100000;
50 wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
51 while ((rd_reg32(&jrp->rregs->jrcommand) & JRCR_RESET) && --timeout)
52 cpu_relax();
53
54 if (timeout == 0) {
55 dev_err(dev, "failed to reset job ring %d\n", jrp->ridx);
56 return -EIO;
57 }
58
59 /* unmask interrupts */
60 clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
61
62 return 0;
63}
64
65/*
66 * Shutdown JobR independent of platform property code
67 */
68int caam_jr_shutdown(struct device *dev)
69{
70 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
71 dma_addr_t inpbusaddr, outbusaddr;
72 int ret;
73
74 ret = caam_reset_hw_jr(dev);
75
76 tasklet_kill(&jrp->irqtask);
77
78 /* Release interrupt */
79 free_irq(jrp->irq, dev);
80
81 /* Free rings */
82 inpbusaddr = rd_reg64(&jrp->rregs->inpring_base);
83 outbusaddr = rd_reg64(&jrp->rregs->outring_base);
84 dma_free_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH,
85 jrp->inpring, inpbusaddr);
86 dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH,
87 jrp->outring, outbusaddr);
88 kfree(jrp->entinfo);
89
90 return ret;
91}
92
93static int caam_jr_remove(struct platform_device *pdev)
94{
95 int ret;
96 struct device *jrdev;
97 struct caam_drv_private_jr *jrpriv;
98
99 jrdev = &pdev->dev;
100 jrpriv = dev_get_drvdata(jrdev);
101
102 /*
103 * Return EBUSY if job ring already allocated.
104 */
105 if (atomic_read(&jrpriv->tfm_count)) {
106 dev_err(jrdev, "Device is busy\n");
107 return -EBUSY;
108 }
109
110 /* Remove the node from Physical JobR list maintained by driver */
111 spin_lock(&driver_data.jr_alloc_lock);
112 list_del(&jrpriv->list_node);
113 spin_unlock(&driver_data.jr_alloc_lock);
114
115 /* Release ring */
116 ret = caam_jr_shutdown(jrdev);
117 if (ret)
118 dev_err(jrdev, "Failed to shut down job ring\n");
119 irq_dispose_mapping(jrpriv->irq);
120
121 return ret;
122}
123
16/* Main per-ring interrupt handler */ 124/* Main per-ring interrupt handler */
17static irqreturn_t caam_jr_interrupt(int irq, void *st_dev) 125static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
18{ 126{
@@ -128,6 +236,59 @@ static void caam_jr_dequeue(unsigned long devarg)
128} 236}
129 237
130/** 238/**
239 * caam_jr_alloc() - Alloc a job ring for someone to use as needed.
240 *
241 * returns : pointer to the newly allocated physical
242 * JobR dev can be written to if successful.
243 **/
244struct device *caam_jr_alloc(void)
245{
246 struct caam_drv_private_jr *jrpriv, *min_jrpriv = NULL;
247 struct device *dev = NULL;
248 int min_tfm_cnt = INT_MAX;
249 int tfm_cnt;
250
251 spin_lock(&driver_data.jr_alloc_lock);
252
253 if (list_empty(&driver_data.jr_list)) {
254 spin_unlock(&driver_data.jr_alloc_lock);
255 return ERR_PTR(-ENODEV);
256 }
257
258 list_for_each_entry(jrpriv, &driver_data.jr_list, list_node) {
259 tfm_cnt = atomic_read(&jrpriv->tfm_count);
260 if (tfm_cnt < min_tfm_cnt) {
261 min_tfm_cnt = tfm_cnt;
262 min_jrpriv = jrpriv;
263 }
264 if (!min_tfm_cnt)
265 break;
266 }
267
268 if (min_jrpriv) {
269 atomic_inc(&min_jrpriv->tfm_count);
270 dev = min_jrpriv->dev;
271 }
272 spin_unlock(&driver_data.jr_alloc_lock);
273
274 return dev;
275}
276EXPORT_SYMBOL(caam_jr_alloc);
277
278/**
279 * caam_jr_free() - Free the Job Ring
280 * @rdev - points to the dev that identifies the Job ring to
281 * be released.
282 **/
283void caam_jr_free(struct device *rdev)
284{
285 struct caam_drv_private_jr *jrpriv = dev_get_drvdata(rdev);
286
287 atomic_dec(&jrpriv->tfm_count);
288}
289EXPORT_SYMBOL(caam_jr_free);
290
291/**
131 * caam_jr_enqueue() - Enqueue a job descriptor head. Returns 0 if OK, 292 * caam_jr_enqueue() - Enqueue a job descriptor head. Returns 0 if OK,
132 * -EBUSY if the queue is full, -EIO if it cannot map the caller's 293 * -EBUSY if the queue is full, -EIO if it cannot map the caller's
133 * descriptor. 294 * descriptor.
@@ -207,46 +368,6 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
207} 368}
208EXPORT_SYMBOL(caam_jr_enqueue); 369EXPORT_SYMBOL(caam_jr_enqueue);
209 370
210static int caam_reset_hw_jr(struct device *dev)
211{
212 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
213 unsigned int timeout = 100000;
214
215 /*
216 * mask interrupts since we are going to poll
217 * for reset completion status
218 */
219 setbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
220
221 /* initiate flush (required prior to reset) */
222 wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
223 while (((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) ==
224 JRINT_ERR_HALT_INPROGRESS) && --timeout)
225 cpu_relax();
226
227 if ((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) !=
228 JRINT_ERR_HALT_COMPLETE || timeout == 0) {
229 dev_err(dev, "failed to flush job ring %d\n", jrp->ridx);
230 return -EIO;
231 }
232
233 /* initiate reset */
234 timeout = 100000;
235 wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
236 while ((rd_reg32(&jrp->rregs->jrcommand) & JRCR_RESET) && --timeout)
237 cpu_relax();
238
239 if (timeout == 0) {
240 dev_err(dev, "failed to reset job ring %d\n", jrp->ridx);
241 return -EIO;
242 }
243
244 /* unmask interrupts */
245 clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
246
247 return 0;
248}
249
250/* 371/*
251 * Init JobR independent of platform property detection 372 * Init JobR independent of platform property detection
252 */ 373 */
@@ -262,7 +383,7 @@ static int caam_jr_init(struct device *dev)
262 383
263 /* Connect job ring interrupt handler. */ 384 /* Connect job ring interrupt handler. */
264 error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED, 385 error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED,
265 "caam-jobr", dev); 386 dev_name(dev), dev);
266 if (error) { 387 if (error) {
267 dev_err(dev, "can't connect JobR %d interrupt (%d)\n", 388 dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
268 jrp->ridx, jrp->irq); 389 jrp->ridx, jrp->irq);
@@ -318,86 +439,43 @@ static int caam_jr_init(struct device *dev)
318 return 0; 439 return 0;
319} 440}
320 441
321/*
322 * Shutdown JobR independent of platform property code
323 */
324int caam_jr_shutdown(struct device *dev)
325{
326 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
327 dma_addr_t inpbusaddr, outbusaddr;
328 int ret;
329
330 ret = caam_reset_hw_jr(dev);
331
332 tasklet_kill(&jrp->irqtask);
333
334 /* Release interrupt */
335 free_irq(jrp->irq, dev);
336
337 /* Free rings */
338 inpbusaddr = rd_reg64(&jrp->rregs->inpring_base);
339 outbusaddr = rd_reg64(&jrp->rregs->outring_base);
340 dma_free_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH,
341 jrp->inpring, inpbusaddr);
342 dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH,
343 jrp->outring, outbusaddr);
344 kfree(jrp->entinfo);
345 of_device_unregister(jrp->jr_pdev);
346
347 return ret;
348}
349 442
350/* 443/*
351 * Probe routine for each detected JobR subsystem. It assumes that 444 * Probe routine for each detected JobR subsystem.
352 * property detection was picked up externally.
353 */ 445 */
354int caam_jr_probe(struct platform_device *pdev, struct device_node *np, 446static int caam_jr_probe(struct platform_device *pdev)
355 int ring)
356{ 447{
357 struct device *ctrldev, *jrdev; 448 struct device *jrdev;
358 struct platform_device *jr_pdev; 449 struct device_node *nprop;
359 struct caam_drv_private *ctrlpriv; 450 struct caam_job_ring __iomem *ctrl;
360 struct caam_drv_private_jr *jrpriv; 451 struct caam_drv_private_jr *jrpriv;
361 u32 *jroffset; 452 static int total_jobrs;
362 int error; 453 int error;
363 454
364 ctrldev = &pdev->dev; 455 jrdev = &pdev->dev;
365 ctrlpriv = dev_get_drvdata(ctrldev);
366
367 jrpriv = kmalloc(sizeof(struct caam_drv_private_jr), 456 jrpriv = kmalloc(sizeof(struct caam_drv_private_jr),
368 GFP_KERNEL); 457 GFP_KERNEL);
369 if (jrpriv == NULL) { 458 if (!jrpriv)
370 dev_err(ctrldev, "can't alloc private mem for job ring %d\n",
371 ring);
372 return -ENOMEM; 459 return -ENOMEM;
373 }
374 jrpriv->parentdev = ctrldev; /* point back to parent */
375 jrpriv->ridx = ring; /* save ring identity relative to detection */
376 460
377 /* 461 dev_set_drvdata(jrdev, jrpriv);
378 * Derive a pointer to the detected JobRs regs
379 * Driver has already iomapped the entire space, we just
380 * need to add in the offset to this JobR. Don't know if I
381 * like this long-term, but it'll run
382 */
383 jroffset = (u32 *)of_get_property(np, "reg", NULL);
384 jrpriv->rregs = (struct caam_job_ring __iomem *)((void *)ctrlpriv->ctrl
385 + *jroffset);
386 462
387 /* Build a local dev for each detected queue */ 463 /* save ring identity relative to detection */
388 jr_pdev = of_platform_device_create(np, NULL, ctrldev); 464 jrpriv->ridx = total_jobrs++;
389 if (jr_pdev == NULL) { 465
390 kfree(jrpriv); 466 nprop = pdev->dev.of_node;
391 return -EINVAL; 467 /* Get configuration properties from device tree */
468 /* First, get register page */
469 ctrl = of_iomap(nprop, 0);
470 if (!ctrl) {
471 dev_err(jrdev, "of_iomap() failed\n");
472 return -ENOMEM;
392 } 473 }
393 474
394 jrpriv->jr_pdev = jr_pdev; 475 jrpriv->rregs = (struct caam_job_ring __force *)ctrl;
395 jrdev = &jr_pdev->dev;
396 dev_set_drvdata(jrdev, jrpriv);
397 ctrlpriv->jrdev[ring] = jrdev;
398 476
399 if (sizeof(dma_addr_t) == sizeof(u64)) 477 if (sizeof(dma_addr_t) == sizeof(u64))
400 if (of_device_is_compatible(np, "fsl,sec-v5.0-job-ring")) 478 if (of_device_is_compatible(nprop, "fsl,sec-v5.0-job-ring"))
401 dma_set_mask(jrdev, DMA_BIT_MASK(40)); 479 dma_set_mask(jrdev, DMA_BIT_MASK(40));
402 else 480 else
403 dma_set_mask(jrdev, DMA_BIT_MASK(36)); 481 dma_set_mask(jrdev, DMA_BIT_MASK(36));
@@ -405,15 +483,61 @@ int caam_jr_probe(struct platform_device *pdev, struct device_node *np,
405 dma_set_mask(jrdev, DMA_BIT_MASK(32)); 483 dma_set_mask(jrdev, DMA_BIT_MASK(32));
406 484
407 /* Identify the interrupt */ 485 /* Identify the interrupt */
408 jrpriv->irq = irq_of_parse_and_map(np, 0); 486 jrpriv->irq = irq_of_parse_and_map(nprop, 0);
409 487
410 /* Now do the platform independent part */ 488 /* Now do the platform independent part */
411 error = caam_jr_init(jrdev); /* now turn on hardware */ 489 error = caam_jr_init(jrdev); /* now turn on hardware */
412 if (error) { 490 if (error) {
413 of_device_unregister(jr_pdev);
414 kfree(jrpriv); 491 kfree(jrpriv);
415 return error; 492 return error;
416 } 493 }
417 494
418 return error; 495 jrpriv->dev = jrdev;
496 spin_lock(&driver_data.jr_alloc_lock);
497 list_add_tail(&jrpriv->list_node, &driver_data.jr_list);
498 spin_unlock(&driver_data.jr_alloc_lock);
499
500 atomic_set(&jrpriv->tfm_count, 0);
501
502 return 0;
503}
504
505static struct of_device_id caam_jr_match[] = {
506 {
507 .compatible = "fsl,sec-v4.0-job-ring",
508 },
509 {
510 .compatible = "fsl,sec4.0-job-ring",
511 },
512 {},
513};
514MODULE_DEVICE_TABLE(of, caam_jr_match);
515
516static struct platform_driver caam_jr_driver = {
517 .driver = {
518 .name = "caam_jr",
519 .owner = THIS_MODULE,
520 .of_match_table = caam_jr_match,
521 },
522 .probe = caam_jr_probe,
523 .remove = caam_jr_remove,
524};
525
526static int __init jr_driver_init(void)
527{
528 spin_lock_init(&driver_data.jr_alloc_lock);
529 INIT_LIST_HEAD(&driver_data.jr_list);
530 return platform_driver_register(&caam_jr_driver);
531}
532
533static void __exit jr_driver_exit(void)
534{
535 platform_driver_unregister(&caam_jr_driver);
419} 536}
537
538module_init(jr_driver_init);
539module_exit(jr_driver_exit);
540
541MODULE_LICENSE("GPL");
542MODULE_DESCRIPTION("FSL CAAM JR request backend");
543MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");