aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto/caam/jr.c
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2013-01-17 16:15:55 -0500
committerJonathan Herman <hermanjl@cs.unc.edu>2013-01-17 16:15:55 -0500
commit8dea78da5cee153b8af9c07a2745f6c55057fe12 (patch)
treea8f4d49d63b1ecc92f2fddceba0655b2472c5bd9 /drivers/crypto/caam/jr.c
parent406089d01562f1e2bf9f089fd7637009ebaad589 (diff)
Patched in Tegra support.
Diffstat (limited to 'drivers/crypto/caam/jr.c')
-rw-r--r--drivers/crypto/caam/jr.c115
1 files changed, 76 insertions, 39 deletions
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index 93d14070141..340fa322c0f 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -2,7 +2,7 @@
2 * CAAM/SEC 4.x transport/backend driver 2 * CAAM/SEC 4.x transport/backend driver
3 * JobR backend functionality 3 * JobR backend functionality
4 * 4 *
5 * Copyright 2008-2012 Freescale Semiconductor, Inc. 5 * Copyright 2008-2011 Freescale Semiconductor, Inc.
6 */ 6 */
7 7
8#include "compat.h" 8#include "compat.h"
@@ -43,7 +43,7 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
43 wr_reg32(&jrp->rregs->jrintstatus, irqstate); 43 wr_reg32(&jrp->rregs->jrintstatus, irqstate);
44 44
45 preempt_disable(); 45 preempt_disable();
46 tasklet_schedule(&jrp->irqtask); 46 tasklet_schedule(&jrp->irqtask[smp_processor_id()]);
47 preempt_enable(); 47 preempt_enable();
48 48
49 return IRQ_HANDLED; 49 return IRQ_HANDLED;
@@ -58,16 +58,17 @@ static void caam_jr_dequeue(unsigned long devarg)
58 void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg); 58 void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
59 u32 *userdesc, userstatus; 59 u32 *userdesc, userstatus;
60 void *userarg; 60 void *userarg;
61 unsigned long flags;
61 62
62 while (rd_reg32(&jrp->rregs->outring_used)) { 63 spin_lock_irqsave(&jrp->outlock, flags);
63 64
64 head = ACCESS_ONCE(jrp->head); 65 head = ACCESS_ONCE(jrp->head);
66 sw_idx = tail = jrp->tail;
65 67
66 spin_lock(&jrp->outlock); 68 while (CIRC_CNT(head, tail, JOBR_DEPTH) >= 1 &&
69 rd_reg32(&jrp->rregs->outring_used)) {
67 70
68 sw_idx = tail = jrp->tail;
69 hw_idx = jrp->out_ring_read_index; 71 hw_idx = jrp->out_ring_read_index;
70
71 for (i = 0; CIRC_CNT(head, tail + i, JOBR_DEPTH) >= 1; i++) { 72 for (i = 0; CIRC_CNT(head, tail + i, JOBR_DEPTH) >= 1; i++) {
72 sw_idx = (tail + i) & (JOBR_DEPTH - 1); 73 sw_idx = (tail + i) & (JOBR_DEPTH - 1);
73 74
@@ -94,8 +95,7 @@ static void caam_jr_dequeue(unsigned long devarg)
94 userdesc = jrp->entinfo[sw_idx].desc_addr_virt; 95 userdesc = jrp->entinfo[sw_idx].desc_addr_virt;
95 userstatus = jrp->outring[hw_idx].jrstatus; 96 userstatus = jrp->outring[hw_idx].jrstatus;
96 97
97 /* set done */ 98 smp_mb();
98 wr_reg32(&jrp->rregs->outring_rmvd, 1);
99 99
100 jrp->out_ring_read_index = (jrp->out_ring_read_index + 1) & 100 jrp->out_ring_read_index = (jrp->out_ring_read_index + 1) &
101 (JOBR_DEPTH - 1); 101 (JOBR_DEPTH - 1);
@@ -115,12 +115,22 @@ static void caam_jr_dequeue(unsigned long devarg)
115 jrp->tail = tail; 115 jrp->tail = tail;
116 } 116 }
117 117
118 spin_unlock(&jrp->outlock); 118 /* set done */
119 wr_reg32(&jrp->rregs->outring_rmvd, 1);
120
121 spin_unlock_irqrestore(&jrp->outlock, flags);
119 122
120 /* Finally, execute user's callback */ 123 /* Finally, execute user's callback */
121 usercall(dev, userdesc, userstatus, userarg); 124 usercall(dev, userdesc, userstatus, userarg);
125
126 spin_lock_irqsave(&jrp->outlock, flags);
127
128 head = ACCESS_ONCE(jrp->head);
129 sw_idx = tail = jrp->tail;
122 } 130 }
123 131
132 spin_unlock_irqrestore(&jrp->outlock, flags);
133
124 /* reenable / unmask IRQs */ 134 /* reenable / unmask IRQs */
125 clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK); 135 clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
126} 136}
@@ -138,22 +148,23 @@ int caam_jr_register(struct device *ctrldev, struct device **rdev)
138{ 148{
139 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev); 149 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
140 struct caam_drv_private_jr *jrpriv = NULL; 150 struct caam_drv_private_jr *jrpriv = NULL;
151 unsigned long flags;
141 int ring; 152 int ring;
142 153
143 /* Lock, if free ring - assign, unlock */ 154 /* Lock, if free ring - assign, unlock */
144 spin_lock(&ctrlpriv->jr_alloc_lock); 155 spin_lock_irqsave(&ctrlpriv->jr_alloc_lock, flags);
145 for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) { 156 for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
146 jrpriv = dev_get_drvdata(ctrlpriv->jrdev[ring]); 157 jrpriv = dev_get_drvdata(ctrlpriv->jrdev[ring]);
147 if (jrpriv->assign == JOBR_UNASSIGNED) { 158 if (jrpriv->assign == JOBR_UNASSIGNED) {
148 jrpriv->assign = JOBR_ASSIGNED; 159 jrpriv->assign = JOBR_ASSIGNED;
149 *rdev = ctrlpriv->jrdev[ring]; 160 *rdev = ctrlpriv->jrdev[ring];
150 spin_unlock(&ctrlpriv->jr_alloc_lock); 161 spin_unlock_irqrestore(&ctrlpriv->jr_alloc_lock, flags);
151 return ring; 162 return ring;
152 } 163 }
153 } 164 }
154 165
155 /* If assigned, write dev where caller needs it */ 166 /* If assigned, write dev where caller needs it */
156 spin_unlock(&ctrlpriv->jr_alloc_lock); 167 spin_unlock_irqrestore(&ctrlpriv->jr_alloc_lock, flags);
157 *rdev = NULL; 168 *rdev = NULL;
158 169
159 return -ENODEV; 170 return -ENODEV;
@@ -171,6 +182,7 @@ int caam_jr_deregister(struct device *rdev)
171{ 182{
172 struct caam_drv_private_jr *jrpriv = dev_get_drvdata(rdev); 183 struct caam_drv_private_jr *jrpriv = dev_get_drvdata(rdev);
173 struct caam_drv_private *ctrlpriv; 184 struct caam_drv_private *ctrlpriv;
185 unsigned long flags;
174 186
175 /* Get the owning controller's private space */ 187 /* Get the owning controller's private space */
176 ctrlpriv = dev_get_drvdata(jrpriv->parentdev); 188 ctrlpriv = dev_get_drvdata(jrpriv->parentdev);
@@ -183,9 +195,9 @@ int caam_jr_deregister(struct device *rdev)
183 return -EBUSY; 195 return -EBUSY;
184 196
185 /* Release ring */ 197 /* Release ring */
186 spin_lock(&ctrlpriv->jr_alloc_lock); 198 spin_lock_irqsave(&ctrlpriv->jr_alloc_lock, flags);
187 jrpriv->assign = JOBR_UNASSIGNED; 199 jrpriv->assign = JOBR_UNASSIGNED;
188 spin_unlock(&ctrlpriv->jr_alloc_lock); 200 spin_unlock_irqrestore(&ctrlpriv->jr_alloc_lock, flags);
189 201
190 return 0; 202 return 0;
191} 203}
@@ -226,6 +238,7 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
226{ 238{
227 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); 239 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
228 struct caam_jrentry_info *head_entry; 240 struct caam_jrentry_info *head_entry;
241 unsigned long flags;
229 int head, tail, desc_size; 242 int head, tail, desc_size;
230 dma_addr_t desc_dma; 243 dma_addr_t desc_dma;
231 244
@@ -236,14 +249,14 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
236 return -EIO; 249 return -EIO;
237 } 250 }
238 251
239 spin_lock_bh(&jrp->inplock); 252 spin_lock_irqsave(&jrp->inplock, flags);
240 253
241 head = jrp->head; 254 head = jrp->head;
242 tail = ACCESS_ONCE(jrp->tail); 255 tail = ACCESS_ONCE(jrp->tail);
243 256
244 if (!rd_reg32(&jrp->rregs->inpring_avail) || 257 if (!rd_reg32(&jrp->rregs->inpring_avail) ||
245 CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) { 258 CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) {
246 spin_unlock_bh(&jrp->inplock); 259 spin_unlock_irqrestore(&jrp->inplock, flags);
247 dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE); 260 dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE);
248 return -EBUSY; 261 return -EBUSY;
249 } 262 }
@@ -263,9 +276,11 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
263 (JOBR_DEPTH - 1); 276 (JOBR_DEPTH - 1);
264 jrp->head = (head + 1) & (JOBR_DEPTH - 1); 277 jrp->head = (head + 1) & (JOBR_DEPTH - 1);
265 278
279 wmb();
280
266 wr_reg32(&jrp->rregs->inpring_jobadd, 1); 281 wr_reg32(&jrp->rregs->inpring_jobadd, 1);
267 282
268 spin_unlock_bh(&jrp->inplock); 283 spin_unlock_irqrestore(&jrp->inplock, flags);
269 284
270 return 0; 285 return 0;
271} 286}
@@ -322,9 +337,11 @@ static int caam_jr_init(struct device *dev)
322 337
323 jrp = dev_get_drvdata(dev); 338 jrp = dev_get_drvdata(dev);
324 339
325 tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev);
326
327 /* Connect job ring interrupt handler. */ 340 /* Connect job ring interrupt handler. */
341 for_each_possible_cpu(i)
342 tasklet_init(&jrp->irqtask[i], caam_jr_dequeue,
343 (unsigned long)dev);
344
328 error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED, 345 error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED,
329 "caam-jobr", dev); 346 "caam-jobr", dev);
330 if (error) { 347 if (error) {
@@ -339,11 +356,10 @@ static int caam_jr_init(struct device *dev)
339 if (error) 356 if (error)
340 return error; 357 return error;
341 358
342 jrp->inpring = dma_alloc_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH, 359 jrp->inpring = kzalloc(sizeof(dma_addr_t) * JOBR_DEPTH,
343 &inpbusaddr, GFP_KERNEL); 360 GFP_KERNEL | GFP_DMA);
344 361 jrp->outring = kzalloc(sizeof(struct jr_outentry) *
345 jrp->outring = dma_alloc_coherent(dev, sizeof(struct jr_outentry) * 362 JOBR_DEPTH, GFP_KERNEL | GFP_DMA);
346 JOBR_DEPTH, &outbusaddr, GFP_KERNEL);
347 363
348 jrp->entinfo = kzalloc(sizeof(struct caam_jrentry_info) * JOBR_DEPTH, 364 jrp->entinfo = kzalloc(sizeof(struct caam_jrentry_info) * JOBR_DEPTH,
349 GFP_KERNEL); 365 GFP_KERNEL);
@@ -359,6 +375,31 @@ static int caam_jr_init(struct device *dev)
359 jrp->entinfo[i].desc_addr_dma = !0; 375 jrp->entinfo[i].desc_addr_dma = !0;
360 376
361 /* Setup rings */ 377 /* Setup rings */
378 inpbusaddr = dma_map_single(dev, jrp->inpring,
379 sizeof(u32 *) * JOBR_DEPTH,
380 DMA_BIDIRECTIONAL);
381 if (dma_mapping_error(dev, inpbusaddr)) {
382 dev_err(dev, "caam_jr_init(): can't map input ring\n");
383 kfree(jrp->inpring);
384 kfree(jrp->outring);
385 kfree(jrp->entinfo);
386 return -EIO;
387 }
388
389 outbusaddr = dma_map_single(dev, jrp->outring,
390 sizeof(struct jr_outentry) * JOBR_DEPTH,
391 DMA_BIDIRECTIONAL);
392 if (dma_mapping_error(dev, outbusaddr)) {
393 dev_err(dev, "caam_jr_init(): can't map output ring\n");
394 dma_unmap_single(dev, inpbusaddr,
395 sizeof(u32 *) * JOBR_DEPTH,
396 DMA_BIDIRECTIONAL);
397 kfree(jrp->inpring);
398 kfree(jrp->outring);
399 kfree(jrp->entinfo);
400 return -EIO;
401 }
402
362 jrp->inp_ring_write_index = 0; 403 jrp->inp_ring_write_index = 0;
363 jrp->out_ring_read_index = 0; 404 jrp->out_ring_read_index = 0;
364 jrp->head = 0; 405 jrp->head = 0;
@@ -390,11 +431,12 @@ int caam_jr_shutdown(struct device *dev)
390{ 431{
391 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); 432 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
392 dma_addr_t inpbusaddr, outbusaddr; 433 dma_addr_t inpbusaddr, outbusaddr;
393 int ret; 434 int ret, i;
394 435
395 ret = caam_reset_hw_jr(dev); 436 ret = caam_reset_hw_jr(dev);
396 437
397 tasklet_kill(&jrp->irqtask); 438 for_each_possible_cpu(i)
439 tasklet_kill(&jrp->irqtask[i]);
398 440
399 /* Release interrupt */ 441 /* Release interrupt */
400 free_irq(jrp->irq, dev); 442 free_irq(jrp->irq, dev);
@@ -402,10 +444,13 @@ int caam_jr_shutdown(struct device *dev)
402 /* Free rings */ 444 /* Free rings */
403 inpbusaddr = rd_reg64(&jrp->rregs->inpring_base); 445 inpbusaddr = rd_reg64(&jrp->rregs->inpring_base);
404 outbusaddr = rd_reg64(&jrp->rregs->outring_base); 446 outbusaddr = rd_reg64(&jrp->rregs->outring_base);
405 dma_free_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH, 447 dma_unmap_single(dev, outbusaddr,
406 jrp->inpring, inpbusaddr); 448 sizeof(struct jr_outentry) * JOBR_DEPTH,
407 dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH, 449 DMA_BIDIRECTIONAL);
408 jrp->outring, outbusaddr); 450 dma_unmap_single(dev, inpbusaddr, sizeof(u32 *) * JOBR_DEPTH,
451 DMA_BIDIRECTIONAL);
452 kfree(jrp->outring);
453 kfree(jrp->inpring);
409 kfree(jrp->entinfo); 454 kfree(jrp->entinfo);
410 455
411 return ret; 456 return ret;
@@ -458,14 +503,6 @@ int caam_jr_probe(struct platform_device *pdev, struct device_node *np,
458 dev_set_drvdata(jrdev, jrpriv); 503 dev_set_drvdata(jrdev, jrpriv);
459 ctrlpriv->jrdev[ring] = jrdev; 504 ctrlpriv->jrdev[ring] = jrdev;
460 505
461 if (sizeof(dma_addr_t) == sizeof(u64))
462 if (of_device_is_compatible(np, "fsl,sec-v5.0-job-ring"))
463 dma_set_mask(jrdev, DMA_BIT_MASK(40));
464 else
465 dma_set_mask(jrdev, DMA_BIT_MASK(36));
466 else
467 dma_set_mask(jrdev, DMA_BIT_MASK(32));
468
469 /* Identify the interrupt */ 506 /* Identify the interrupt */
470 jrpriv->irq = of_irq_to_resource(np, 0, NULL); 507 jrpriv->irq = of_irq_to_resource(np, 0, NULL);
471 508