aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGary R Hook <gary.hook@amd.com>2017-04-21 11:50:14 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-05-20 08:28:37 -0400
commitf8d05099ec72a943a71b2c95959d37dbef13b7f1 (patch)
tree0dd0a2fd8aaa612a7d1bed0a79babcc31a0479db
parenta0a232489c0fd8f41016f221cfca6644a34b7b9a (diff)
crypto: ccp - Change ISR handler method for a v5 CCP
commit 6263b51eb3190d30351360fd168959af7e3a49a9 upstream. The CCP has the ability to perform several operations simultaneously, but only one interrupt. When implemented as a PCI device and using MSI-X/MSI interrupts, use a tasklet model to service interrupts. By disabling and enabling interrupts from the CCP, coupled with the queuing that tasklets provide, we can ensure that all events (occurring on the device) are recognized and serviced. This change fixes a problem wherein 2 or more busy queues can cause notification bits to change state while a (CCP) interrupt is being serviced, but after the queue state has been evaluated. This results in the event being 'lost' and the queue hanging, waiting to be serviced. Since the status bits are never fully de-asserted, the CCP never generates another interrupt (all bits zero -> one or more bits one), and no further CCP operations will be executed. Signed-off-by: Gary R Hook <gary.hook@amd.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--drivers/crypto/ccp/ccp-dev-v5.c111
1 files changed, 67 insertions, 44 deletions
diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
index a6cb07810d6a..2c0ce5f605b3 100644
--- a/drivers/crypto/ccp/ccp-dev-v5.c
+++ b/drivers/crypto/ccp/ccp-dev-v5.c
@@ -644,6 +644,65 @@ static int ccp_assign_lsbs(struct ccp_device *ccp)
644 return rc; 644 return rc;
645} 645}
646 646
647static void ccp5_disable_queue_interrupts(struct ccp_device *ccp)
648{
649 unsigned int i;
650
651 for (i = 0; i < ccp->cmd_q_count; i++)
652 iowrite32(0x0, ccp->cmd_q[i].reg_int_enable);
653}
654
655static void ccp5_enable_queue_interrupts(struct ccp_device *ccp)
656{
657 unsigned int i;
658
659 for (i = 0; i < ccp->cmd_q_count; i++)
660 iowrite32(SUPPORTED_INTERRUPTS, ccp->cmd_q[i].reg_int_enable);
661}
662
663static void ccp5_irq_bh(unsigned long data)
664{
665 struct ccp_device *ccp = (struct ccp_device *)data;
666 u32 status;
667 unsigned int i;
668
669 for (i = 0; i < ccp->cmd_q_count; i++) {
670 struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i];
671
672 status = ioread32(cmd_q->reg_interrupt_status);
673
674 if (status) {
675 cmd_q->int_status = status;
676 cmd_q->q_status = ioread32(cmd_q->reg_status);
677 cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
678
679 /* On error, only save the first error value */
680 if ((status & INT_ERROR) && !cmd_q->cmd_error)
681 cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
682
683 cmd_q->int_rcvd = 1;
684
685 /* Acknowledge the interrupt and wake the kthread */
686 iowrite32(status, cmd_q->reg_interrupt_status);
687 wake_up_interruptible(&cmd_q->int_queue);
688 }
689 }
690 ccp5_enable_queue_interrupts(ccp);
691}
692
693static irqreturn_t ccp5_irq_handler(int irq, void *data)
694{
695 struct device *dev = data;
696 struct ccp_device *ccp = dev_get_drvdata(dev);
697
698 ccp5_disable_queue_interrupts(ccp);
699 if (ccp->use_tasklet)
700 tasklet_schedule(&ccp->irq_tasklet);
701 else
702 ccp5_irq_bh((unsigned long)ccp);
703 return IRQ_HANDLED;
704}
705
647static int ccp5_init(struct ccp_device *ccp) 706static int ccp5_init(struct ccp_device *ccp)
648{ 707{
649 struct device *dev = ccp->dev; 708 struct device *dev = ccp->dev;
@@ -728,18 +787,17 @@ static int ccp5_init(struct ccp_device *ccp)
728 dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count); 787 dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count);
729 788
730 /* Turn off the queues and disable interrupts until ready */ 789 /* Turn off the queues and disable interrupts until ready */
790 ccp5_disable_queue_interrupts(ccp);
731 for (i = 0; i < ccp->cmd_q_count; i++) { 791 for (i = 0; i < ccp->cmd_q_count; i++) {
732 cmd_q = &ccp->cmd_q[i]; 792 cmd_q = &ccp->cmd_q[i];
733 793
734 cmd_q->qcontrol = 0; /* Start with nothing */ 794 cmd_q->qcontrol = 0; /* Start with nothing */
735 iowrite32(cmd_q->qcontrol, cmd_q->reg_control); 795 iowrite32(cmd_q->qcontrol, cmd_q->reg_control);
736 796
737 /* Disable the interrupts */
738 iowrite32(0x00, cmd_q->reg_int_enable);
739 ioread32(cmd_q->reg_int_status); 797 ioread32(cmd_q->reg_int_status);
740 ioread32(cmd_q->reg_status); 798 ioread32(cmd_q->reg_status);
741 799
742 /* Clear the interrupts */ 800 /* Clear the interrupt status */
743 iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_interrupt_status); 801 iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_interrupt_status);
744 } 802 }
745 803
@@ -750,6 +808,10 @@ static int ccp5_init(struct ccp_device *ccp)
750 dev_err(dev, "unable to allocate an IRQ\n"); 808 dev_err(dev, "unable to allocate an IRQ\n");
751 goto e_pool; 809 goto e_pool;
752 } 810 }
811 /* Initialize the ISR tasklet */
812 if (ccp->use_tasklet)
813 tasklet_init(&ccp->irq_tasklet, ccp5_irq_bh,
814 (unsigned long)ccp);
753 815
754 /* Initialize the queue used to suspend */ 816 /* Initialize the queue used to suspend */
755 init_waitqueue_head(&ccp->suspend_queue); 817 init_waitqueue_head(&ccp->suspend_queue);
@@ -821,11 +883,7 @@ static int ccp5_init(struct ccp_device *ccp)
821 } 883 }
822 884
823 dev_dbg(dev, "Enabling interrupts...\n"); 885 dev_dbg(dev, "Enabling interrupts...\n");
824 /* Enable interrupts */ 886 ccp5_enable_queue_interrupts(ccp);
825 for (i = 0; i < ccp->cmd_q_count; i++) {
826 cmd_q = &ccp->cmd_q[i];
827 iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_int_enable);
828 }
829 887
830 dev_dbg(dev, "Registering device...\n"); 888 dev_dbg(dev, "Registering device...\n");
831 /* Put this on the unit list to make it available */ 889 /* Put this on the unit list to make it available */
@@ -877,15 +935,13 @@ static void ccp5_destroy(struct ccp_device *ccp)
877 ccp_del_device(ccp); 935 ccp_del_device(ccp);
878 936
879 /* Disable and clear interrupts */ 937 /* Disable and clear interrupts */
938 ccp5_disable_queue_interrupts(ccp);
880 for (i = 0; i < ccp->cmd_q_count; i++) { 939 for (i = 0; i < ccp->cmd_q_count; i++) {
881 cmd_q = &ccp->cmd_q[i]; 940 cmd_q = &ccp->cmd_q[i];
882 941
883 /* Turn off the run bit */ 942 /* Turn off the run bit */
884 iowrite32(cmd_q->qcontrol & ~CMD5_Q_RUN, cmd_q->reg_control); 943 iowrite32(cmd_q->qcontrol & ~CMD5_Q_RUN, cmd_q->reg_control);
885 944
886 /* Disable the interrupts */
887 iowrite32(0x00, cmd_q->reg_int_enable);
888
889 /* Clear the interrupt status */ 945 /* Clear the interrupt status */
890 iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_interrupt_status); 946 iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_interrupt_status);
891 ioread32(cmd_q->reg_int_status); 947 ioread32(cmd_q->reg_int_status);
@@ -920,39 +976,6 @@ static void ccp5_destroy(struct ccp_device *ccp)
920 } 976 }
921} 977}
922 978
923static irqreturn_t ccp5_irq_handler(int irq, void *data)
924{
925 struct device *dev = data;
926 struct ccp_device *ccp = dev_get_drvdata(dev);
927 u32 status;
928 unsigned int i;
929
930 for (i = 0; i < ccp->cmd_q_count; i++) {
931 struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i];
932
933 status = ioread32(cmd_q->reg_interrupt_status);
934
935 if (status) {
936 cmd_q->int_status = status;
937 cmd_q->q_status = ioread32(cmd_q->reg_status);
938 cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
939
940 /* On error, only save the first error value */
941 if ((status & INT_ERROR) && !cmd_q->cmd_error)
942 cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
943
944 cmd_q->int_rcvd = 1;
945
946 /* Acknowledge the interrupt and wake the kthread */
947 iowrite32(SUPPORTED_INTERRUPTS,
948 cmd_q->reg_interrupt_status);
949 wake_up_interruptible(&cmd_q->int_queue);
950 }
951 }
952
953 return IRQ_HANDLED;
954}
955
956static void ccp5_config(struct ccp_device *ccp) 979static void ccp5_config(struct ccp_device *ccp)
957{ 980{
958 /* Public side */ 981 /* Public side */