aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGary R Hook <gary.hook@amd.com>2017-04-21 11:50:05 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-05-20 08:28:37 -0400
commita0a232489c0fd8f41016f221cfca6644a34b7b9a (patch)
treea4c27ba76642e1fc66b986eca6c43555d4079a44
parent93424b2b63e0765c32e723173689a2d2b78d19ba (diff)
crypto: ccp - Change ISR handler method for a v3 CCP
commit 7b537b24e76a1e8e6d7ea91483a45d5b1426809b upstream. The CCP has the ability to perform several operations simultaneously, but only one interrupt. When implemented as a PCI device and using MSI-X/MSI interrupts, use a tasklet model to service interrupts. By disabling and enabling interrupts from the CCP, coupled with the queuing that tasklets provide, we can ensure that all events (occurring on the device) are recognized and serviced. This change fixes a problem wherein 2 or more busy queues can cause notification bits to change state while a (CCP) interrupt is being serviced, but after the queue state has been evaluated. This results in the event being 'lost' and the queue hanging, waiting to be serviced. Since the status bits are never fully de-asserted, the CCP never generates another interrupt (all bits zero -> one or more bits one), and no further CCP operations will be executed. Signed-off-by: Gary R Hook <gary.hook@amd.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--drivers/crypto/ccp/ccp-dev-v3.c120
-rw-r--r--drivers/crypto/ccp/ccp-dev.h3
-rw-r--r--drivers/crypto/ccp/ccp-pci.c2
3 files changed, 75 insertions, 50 deletions
diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c
index 8d2dbacc6161..e68966bbfa58 100644
--- a/drivers/crypto/ccp/ccp-dev-v3.c
+++ b/drivers/crypto/ccp/ccp-dev-v3.c
@@ -315,17 +315,73 @@ static int ccp_perform_ecc(struct ccp_op *op)
315 return ccp_do_cmd(op, cr, ARRAY_SIZE(cr)); 315 return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
316} 316}
317 317
318static void ccp_disable_queue_interrupts(struct ccp_device *ccp)
319{
320 iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
321}
322
323static void ccp_enable_queue_interrupts(struct ccp_device *ccp)
324{
325 iowrite32(ccp->qim, ccp->io_regs + IRQ_MASK_REG);
326}
327
328static void ccp_irq_bh(unsigned long data)
329{
330 struct ccp_device *ccp = (struct ccp_device *)data;
331 struct ccp_cmd_queue *cmd_q;
332 u32 q_int, status;
333 unsigned int i;
334
335 status = ioread32(ccp->io_regs + IRQ_STATUS_REG);
336
337 for (i = 0; i < ccp->cmd_q_count; i++) {
338 cmd_q = &ccp->cmd_q[i];
339
340 q_int = status & (cmd_q->int_ok | cmd_q->int_err);
341 if (q_int) {
342 cmd_q->int_status = status;
343 cmd_q->q_status = ioread32(cmd_q->reg_status);
344 cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
345
346 /* On error, only save the first error value */
347 if ((q_int & cmd_q->int_err) && !cmd_q->cmd_error)
348 cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
349
350 cmd_q->int_rcvd = 1;
351
352 /* Acknowledge the interrupt and wake the kthread */
353 iowrite32(q_int, ccp->io_regs + IRQ_STATUS_REG);
354 wake_up_interruptible(&cmd_q->int_queue);
355 }
356 }
357 ccp_enable_queue_interrupts(ccp);
358}
359
360static irqreturn_t ccp_irq_handler(int irq, void *data)
361{
362 struct device *dev = data;
363 struct ccp_device *ccp = dev_get_drvdata(dev);
364
365 ccp_disable_queue_interrupts(ccp);
366 if (ccp->use_tasklet)
367 tasklet_schedule(&ccp->irq_tasklet);
368 else
369 ccp_irq_bh((unsigned long)ccp);
370
371 return IRQ_HANDLED;
372}
373
318static int ccp_init(struct ccp_device *ccp) 374static int ccp_init(struct ccp_device *ccp)
319{ 375{
320 struct device *dev = ccp->dev; 376 struct device *dev = ccp->dev;
321 struct ccp_cmd_queue *cmd_q; 377 struct ccp_cmd_queue *cmd_q;
322 struct dma_pool *dma_pool; 378 struct dma_pool *dma_pool;
323 char dma_pool_name[MAX_DMAPOOL_NAME_LEN]; 379 char dma_pool_name[MAX_DMAPOOL_NAME_LEN];
324 unsigned int qmr, qim, i; 380 unsigned int qmr, i;
325 int ret; 381 int ret;
326 382
327 /* Find available queues */ 383 /* Find available queues */
328 qim = 0; 384 ccp->qim = 0;
329 qmr = ioread32(ccp->io_regs + Q_MASK_REG); 385 qmr = ioread32(ccp->io_regs + Q_MASK_REG);
330 for (i = 0; i < MAX_HW_QUEUES; i++) { 386 for (i = 0; i < MAX_HW_QUEUES; i++) {
331 if (!(qmr & (1 << i))) 387 if (!(qmr & (1 << i)))
@@ -370,7 +426,7 @@ static int ccp_init(struct ccp_device *ccp)
370 init_waitqueue_head(&cmd_q->int_queue); 426 init_waitqueue_head(&cmd_q->int_queue);
371 427
372 /* Build queue interrupt mask (two interrupts per queue) */ 428 /* Build queue interrupt mask (two interrupts per queue) */
373 qim |= cmd_q->int_ok | cmd_q->int_err; 429 ccp->qim |= cmd_q->int_ok | cmd_q->int_err;
374 430
375#ifdef CONFIG_ARM64 431#ifdef CONFIG_ARM64
376 /* For arm64 set the recommended queue cache settings */ 432 /* For arm64 set the recommended queue cache settings */
@@ -388,14 +444,14 @@ static int ccp_init(struct ccp_device *ccp)
388 dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count); 444 dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count);
389 445
390 /* Disable and clear interrupts until ready */ 446 /* Disable and clear interrupts until ready */
391 iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG); 447 ccp_disable_queue_interrupts(ccp);
392 for (i = 0; i < ccp->cmd_q_count; i++) { 448 for (i = 0; i < ccp->cmd_q_count; i++) {
393 cmd_q = &ccp->cmd_q[i]; 449 cmd_q = &ccp->cmd_q[i];
394 450
395 ioread32(cmd_q->reg_int_status); 451 ioread32(cmd_q->reg_int_status);
396 ioread32(cmd_q->reg_status); 452 ioread32(cmd_q->reg_status);
397 } 453 }
398 iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG); 454 iowrite32(ccp->qim, ccp->io_regs + IRQ_STATUS_REG);
399 455
400 /* Request an irq */ 456 /* Request an irq */
401 ret = ccp->get_irq(ccp); 457 ret = ccp->get_irq(ccp);
@@ -408,6 +464,11 @@ static int ccp_init(struct ccp_device *ccp)
408 init_waitqueue_head(&ccp->sb_queue); 464 init_waitqueue_head(&ccp->sb_queue);
409 init_waitqueue_head(&ccp->suspend_queue); 465 init_waitqueue_head(&ccp->suspend_queue);
410 466
467 /* Initialize the ISR tasklet? */
468 if (ccp->use_tasklet)
469 tasklet_init(&ccp->irq_tasklet, ccp_irq_bh,
470 (unsigned long)ccp);
471
411 dev_dbg(dev, "Starting threads...\n"); 472 dev_dbg(dev, "Starting threads...\n");
412 /* Create a kthread for each queue */ 473 /* Create a kthread for each queue */
413 for (i = 0; i < ccp->cmd_q_count; i++) { 474 for (i = 0; i < ccp->cmd_q_count; i++) {
@@ -430,7 +491,7 @@ static int ccp_init(struct ccp_device *ccp)
430 491
431 dev_dbg(dev, "Enabling interrupts...\n"); 492 dev_dbg(dev, "Enabling interrupts...\n");
432 /* Enable interrupts */ 493 /* Enable interrupts */
433 iowrite32(qim, ccp->io_regs + IRQ_MASK_REG); 494 ccp_enable_queue_interrupts(ccp);
434 495
435 dev_dbg(dev, "Registering device...\n"); 496 dev_dbg(dev, "Registering device...\n");
436 ccp_add_device(ccp); 497 ccp_add_device(ccp);
@@ -467,7 +528,7 @@ static void ccp_destroy(struct ccp_device *ccp)
467{ 528{
468 struct ccp_cmd_queue *cmd_q; 529 struct ccp_cmd_queue *cmd_q;
469 struct ccp_cmd *cmd; 530 struct ccp_cmd *cmd;
470 unsigned int qim, i; 531 unsigned int i;
471 532
472 /* Unregister the DMA engine */ 533 /* Unregister the DMA engine */
473 ccp_dmaengine_unregister(ccp); 534 ccp_dmaengine_unregister(ccp);
@@ -478,22 +539,15 @@ static void ccp_destroy(struct ccp_device *ccp)
478 /* Remove this device from the list of available units */ 539 /* Remove this device from the list of available units */
479 ccp_del_device(ccp); 540 ccp_del_device(ccp);
480 541
481 /* Build queue interrupt mask (two interrupt masks per queue) */
482 qim = 0;
483 for (i = 0; i < ccp->cmd_q_count; i++) {
484 cmd_q = &ccp->cmd_q[i];
485 qim |= cmd_q->int_ok | cmd_q->int_err;
486 }
487
488 /* Disable and clear interrupts */ 542 /* Disable and clear interrupts */
489 iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG); 543 ccp_disable_queue_interrupts(ccp);
490 for (i = 0; i < ccp->cmd_q_count; i++) { 544 for (i = 0; i < ccp->cmd_q_count; i++) {
491 cmd_q = &ccp->cmd_q[i]; 545 cmd_q = &ccp->cmd_q[i];
492 546
493 ioread32(cmd_q->reg_int_status); 547 ioread32(cmd_q->reg_int_status);
494 ioread32(cmd_q->reg_status); 548 ioread32(cmd_q->reg_status);
495 } 549 }
496 iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG); 550 iowrite32(ccp->qim, ccp->io_regs + IRQ_STATUS_REG);
497 551
498 /* Stop the queue kthreads */ 552 /* Stop the queue kthreads */
499 for (i = 0; i < ccp->cmd_q_count; i++) 553 for (i = 0; i < ccp->cmd_q_count; i++)
@@ -520,40 +574,6 @@ static void ccp_destroy(struct ccp_device *ccp)
520 } 574 }
521} 575}
522 576
523static irqreturn_t ccp_irq_handler(int irq, void *data)
524{
525 struct device *dev = data;
526 struct ccp_device *ccp = dev_get_drvdata(dev);
527 struct ccp_cmd_queue *cmd_q;
528 u32 q_int, status;
529 unsigned int i;
530
531 status = ioread32(ccp->io_regs + IRQ_STATUS_REG);
532
533 for (i = 0; i < ccp->cmd_q_count; i++) {
534 cmd_q = &ccp->cmd_q[i];
535
536 q_int = status & (cmd_q->int_ok | cmd_q->int_err);
537 if (q_int) {
538 cmd_q->int_status = status;
539 cmd_q->q_status = ioread32(cmd_q->reg_status);
540 cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
541
542 /* On error, only save the first error value */
543 if ((q_int & cmd_q->int_err) && !cmd_q->cmd_error)
544 cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
545
546 cmd_q->int_rcvd = 1;
547
548 /* Acknowledge the interrupt and wake the kthread */
549 iowrite32(q_int, ccp->io_regs + IRQ_STATUS_REG);
550 wake_up_interruptible(&cmd_q->int_queue);
551 }
552 }
553
554 return IRQ_HANDLED;
555}
556
557static const struct ccp_actions ccp3_actions = { 577static const struct ccp_actions ccp3_actions = {
558 .aes = ccp_perform_aes, 578 .aes = ccp_perform_aes,
559 .xts_aes = ccp_perform_xts_aes, 579 .xts_aes = ccp_perform_xts_aes,
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
index a0d7979836a5..8ac7ae17e1f4 100644
--- a/drivers/crypto/ccp/ccp-dev.h
+++ b/drivers/crypto/ccp/ccp-dev.h
@@ -332,7 +332,10 @@ struct ccp_device {
332 void *dev_specific; 332 void *dev_specific;
333 int (*get_irq)(struct ccp_device *ccp); 333 int (*get_irq)(struct ccp_device *ccp);
334 void (*free_irq)(struct ccp_device *ccp); 334 void (*free_irq)(struct ccp_device *ccp);
335 unsigned int qim;
335 unsigned int irq; 336 unsigned int irq;
337 bool use_tasklet;
338 struct tasklet_struct irq_tasklet;
336 339
337 /* I/O area used for device communication. The register mapping 340 /* I/O area used for device communication. The register mapping
338 * starts at an offset into the mapped bar. 341 * starts at an offset into the mapped bar.
diff --git a/drivers/crypto/ccp/ccp-pci.c b/drivers/crypto/ccp/ccp-pci.c
index 28a9996c1085..e880d4cf4ada 100644
--- a/drivers/crypto/ccp/ccp-pci.c
+++ b/drivers/crypto/ccp/ccp-pci.c
@@ -69,6 +69,7 @@ static int ccp_get_msix_irqs(struct ccp_device *ccp)
69 goto e_irq; 69 goto e_irq;
70 } 70 }
71 } 71 }
72 ccp->use_tasklet = true;
72 73
73 return 0; 74 return 0;
74 75
@@ -100,6 +101,7 @@ static int ccp_get_msi_irq(struct ccp_device *ccp)
100 dev_notice(dev, "unable to allocate MSI IRQ (%d)\n", ret); 101 dev_notice(dev, "unable to allocate MSI IRQ (%d)\n", ret);
101 goto e_msi; 102 goto e_msi;
102 } 103 }
104 ccp->use_tasklet = true;
103 105
104 return 0; 106 return 0;
105 107