aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto/ccp/ccp-dev-v5.c
diff options
context:
space:
mode:
authorGary R Hook <gary.hook@amd.com>2017-04-21 11:50:14 -0400
committerHerbert Xu <herbert@gondor.apana.org.au>2017-04-24 06:11:07 -0400
commit6263b51eb3190d30351360fd168959af7e3a49a9 (patch)
tree170a9134598f2967204d8e861f92f950f6422100 /drivers/crypto/ccp/ccp-dev-v5.c
parent7b537b24e76a1e8e6d7ea91483a45d5b1426809b (diff)
crypto: ccp - Change ISR handler method for a v5 CCP
The CCP has the ability to perform several operations simultaneously, but only one interrupt. When implemented as a PCI device and using MSI-X/MSI interrupts, use a tasklet model to service interrupts. By disabling and enabling interrupts from the CCP, coupled with the queuing that tasklets provide, we can ensure that all events (occurring on the device) are recognized and serviced. This change fixes a problem wherein 2 or more busy queues can cause notification bits to change state while a (CCP) interrupt is being serviced, but after the queue state has been evaluated. This results in the event being 'lost' and the queue hanging, waiting to be serviced. Since the status bits are never fully de-asserted, the CCP never generates another interrupt (all bits zero -> one or more bits one), and no further CCP operations will be executed. Cc: <stable@vger.kernel.org> # 4.9.x+ Signed-off-by: Gary R Hook <gary.hook@amd.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto/ccp/ccp-dev-v5.c')
-rw-r--r--drivers/crypto/ccp/ccp-dev-v5.c111
1 files changed, 67 insertions, 44 deletions
diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
index 13b81a1c1184..ccbe32d5dd1c 100644
--- a/drivers/crypto/ccp/ccp-dev-v5.c
+++ b/drivers/crypto/ccp/ccp-dev-v5.c
@@ -705,6 +705,65 @@ static int ccp_assign_lsbs(struct ccp_device *ccp)
705 return rc; 705 return rc;
706} 706}
707 707
708static void ccp5_disable_queue_interrupts(struct ccp_device *ccp)
709{
710 unsigned int i;
711
712 for (i = 0; i < ccp->cmd_q_count; i++)
713 iowrite32(0x0, ccp->cmd_q[i].reg_int_enable);
714}
715
716static void ccp5_enable_queue_interrupts(struct ccp_device *ccp)
717{
718 unsigned int i;
719
720 for (i = 0; i < ccp->cmd_q_count; i++)
721 iowrite32(SUPPORTED_INTERRUPTS, ccp->cmd_q[i].reg_int_enable);
722}
723
724static void ccp5_irq_bh(unsigned long data)
725{
726 struct ccp_device *ccp = (struct ccp_device *)data;
727 u32 status;
728 unsigned int i;
729
730 for (i = 0; i < ccp->cmd_q_count; i++) {
731 struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i];
732
733 status = ioread32(cmd_q->reg_interrupt_status);
734
735 if (status) {
736 cmd_q->int_status = status;
737 cmd_q->q_status = ioread32(cmd_q->reg_status);
738 cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
739
740 /* On error, only save the first error value */
741 if ((status & INT_ERROR) && !cmd_q->cmd_error)
742 cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
743
744 cmd_q->int_rcvd = 1;
745
746 /* Acknowledge the interrupt and wake the kthread */
747 iowrite32(status, cmd_q->reg_interrupt_status);
748 wake_up_interruptible(&cmd_q->int_queue);
749 }
750 }
751 ccp5_enable_queue_interrupts(ccp);
752}
753
754static irqreturn_t ccp5_irq_handler(int irq, void *data)
755{
756 struct device *dev = data;
757 struct ccp_device *ccp = dev_get_drvdata(dev);
758
759 ccp5_disable_queue_interrupts(ccp);
760 if (ccp->use_tasklet)
761 tasklet_schedule(&ccp->irq_tasklet);
762 else
763 ccp5_irq_bh((unsigned long)ccp);
764 return IRQ_HANDLED;
765}
766
708static int ccp5_init(struct ccp_device *ccp) 767static int ccp5_init(struct ccp_device *ccp)
709{ 768{
710 struct device *dev = ccp->dev; 769 struct device *dev = ccp->dev;
@@ -789,18 +848,17 @@ static int ccp5_init(struct ccp_device *ccp)
789 } 848 }
790 849
791 /* Turn off the queues and disable interrupts until ready */ 850 /* Turn off the queues and disable interrupts until ready */
851 ccp5_disable_queue_interrupts(ccp);
792 for (i = 0; i < ccp->cmd_q_count; i++) { 852 for (i = 0; i < ccp->cmd_q_count; i++) {
793 cmd_q = &ccp->cmd_q[i]; 853 cmd_q = &ccp->cmd_q[i];
794 854
795 cmd_q->qcontrol = 0; /* Start with nothing */ 855 cmd_q->qcontrol = 0; /* Start with nothing */
796 iowrite32(cmd_q->qcontrol, cmd_q->reg_control); 856 iowrite32(cmd_q->qcontrol, cmd_q->reg_control);
797 857
798 /* Disable the interrupts */
799 iowrite32(0x00, cmd_q->reg_int_enable);
800 ioread32(cmd_q->reg_int_status); 858 ioread32(cmd_q->reg_int_status);
801 ioread32(cmd_q->reg_status); 859 ioread32(cmd_q->reg_status);
802 860
803 /* Clear the interrupts */ 861 /* Clear the interrupt status */
804 iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_interrupt_status); 862 iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_interrupt_status);
805 } 863 }
806 864
@@ -811,6 +869,10 @@ static int ccp5_init(struct ccp_device *ccp)
811 dev_err(dev, "unable to allocate an IRQ\n"); 869 dev_err(dev, "unable to allocate an IRQ\n");
812 goto e_pool; 870 goto e_pool;
813 } 871 }
872 /* Initialize the ISR tasklet */
873 if (ccp->use_tasklet)
874 tasklet_init(&ccp->irq_tasklet, ccp5_irq_bh,
875 (unsigned long)ccp);
814 876
815 dev_dbg(dev, "Loading LSB map...\n"); 877 dev_dbg(dev, "Loading LSB map...\n");
816 /* Copy the private LSB mask to the public registers */ 878 /* Copy the private LSB mask to the public registers */
@@ -879,11 +941,7 @@ static int ccp5_init(struct ccp_device *ccp)
879 } 941 }
880 942
881 dev_dbg(dev, "Enabling interrupts...\n"); 943 dev_dbg(dev, "Enabling interrupts...\n");
882 /* Enable interrupts */ 944 ccp5_enable_queue_interrupts(ccp);
883 for (i = 0; i < ccp->cmd_q_count; i++) {
884 cmd_q = &ccp->cmd_q[i];
885 iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_int_enable);
886 }
887 945
888 dev_dbg(dev, "Registering device...\n"); 946 dev_dbg(dev, "Registering device...\n");
889 /* Put this on the unit list to make it available */ 947 /* Put this on the unit list to make it available */
@@ -935,15 +993,13 @@ static void ccp5_destroy(struct ccp_device *ccp)
935 ccp_del_device(ccp); 993 ccp_del_device(ccp);
936 994
937 /* Disable and clear interrupts */ 995 /* Disable and clear interrupts */
996 ccp5_disable_queue_interrupts(ccp);
938 for (i = 0; i < ccp->cmd_q_count; i++) { 997 for (i = 0; i < ccp->cmd_q_count; i++) {
939 cmd_q = &ccp->cmd_q[i]; 998 cmd_q = &ccp->cmd_q[i];
940 999
941 /* Turn off the run bit */ 1000 /* Turn off the run bit */
942 iowrite32(cmd_q->qcontrol & ~CMD5_Q_RUN, cmd_q->reg_control); 1001 iowrite32(cmd_q->qcontrol & ~CMD5_Q_RUN, cmd_q->reg_control);
943 1002
944 /* Disable the interrupts */
945 iowrite32(0x00, cmd_q->reg_int_enable);
946
947 /* Clear the interrupt status */ 1003 /* Clear the interrupt status */
948 iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_interrupt_status); 1004 iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_interrupt_status);
949 ioread32(cmd_q->reg_int_status); 1005 ioread32(cmd_q->reg_int_status);
@@ -978,39 +1034,6 @@ static void ccp5_destroy(struct ccp_device *ccp)
978 } 1034 }
979} 1035}
980 1036
981static irqreturn_t ccp5_irq_handler(int irq, void *data)
982{
983 struct device *dev = data;
984 struct ccp_device *ccp = dev_get_drvdata(dev);
985 u32 status;
986 unsigned int i;
987
988 for (i = 0; i < ccp->cmd_q_count; i++) {
989 struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i];
990
991 status = ioread32(cmd_q->reg_interrupt_status);
992
993 if (status) {
994 cmd_q->int_status = status;
995 cmd_q->q_status = ioread32(cmd_q->reg_status);
996 cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
997
998 /* On error, only save the first error value */
999 if ((status & INT_ERROR) && !cmd_q->cmd_error)
1000 cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
1001
1002 cmd_q->int_rcvd = 1;
1003
1004 /* Acknowledge the interrupt and wake the kthread */
1005 iowrite32(SUPPORTED_INTERRUPTS,
1006 cmd_q->reg_interrupt_status);
1007 wake_up_interruptible(&cmd_q->int_queue);
1008 }
1009 }
1010
1011 return IRQ_HANDLED;
1012}
1013
1014static void ccp5_config(struct ccp_device *ccp) 1037static void ccp5_config(struct ccp_device *ccp)
1015{ 1038{
1016 /* Public side */ 1039 /* Public side */