aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2017-02-15 02:18:43 -0500
committerHerbert Xu <herbert@gondor.apana.org.au>2017-02-23 07:11:02 -0500
commit15c0b9edcc41fe8fddcd07d6b58ee15e6554d17e (patch)
treef5ba8165be0802d5464be110018f04254cb68078 /drivers
parent613844e811a87ddbc646bd30e724c34472540296 (diff)
crypto: cavium - switch to pci_alloc_irq_vectors
pci_enable_msix has been long deprecated, but this driver adds a new instance. Convert it to pci_alloc_irq_vectors and greatly simplify the code, and make sure the prope code properly unwinds. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/crypto/cavium/cpt/cptvf.h3
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_main.c203
2 files changed, 65 insertions, 141 deletions
diff --git a/drivers/crypto/cavium/cpt/cptvf.h b/drivers/crypto/cavium/cpt/cptvf.h
index 1cc04aa611e4..0a835a07d4f2 100644
--- a/drivers/crypto/cavium/cpt/cptvf.h
+++ b/drivers/crypto/cavium/cpt/cptvf.h
@@ -107,9 +107,6 @@ struct cpt_vf {
107 void __iomem *reg_base; /* Register start address */ 107 void __iomem *reg_base; /* Register start address */
108 void *wqe_info; /* BH worker info */ 108 void *wqe_info; /* BH worker info */
109 /* MSI-X */ 109 /* MSI-X */
110 bool msix_enabled;
111 struct msix_entry msix_entries[CPT_VF_MSIX_VECTORS];
112 bool irq_allocated[CPT_VF_MSIX_VECTORS];
113 cpumask_var_t affinity_mask[CPT_VF_MSIX_VECTORS]; 110 cpumask_var_t affinity_mask[CPT_VF_MSIX_VECTORS];
114 /* Command and Pending queues */ 111 /* Command and Pending queues */
115 u32 qsize; 112 u32 qsize;
diff --git a/drivers/crypto/cavium/cpt/cptvf_main.c b/drivers/crypto/cavium/cpt/cptvf_main.c
index 527bdc3c2969..aac2966ff8d9 100644
--- a/drivers/crypto/cavium/cpt/cptvf_main.c
+++ b/drivers/crypto/cavium/cpt/cptvf_main.c
@@ -357,48 +357,10 @@ setup_pqfail:
357 return ret; 357 return ret;
358} 358}
359 359
360static void cptvf_disable_msix(struct cpt_vf *cptvf) 360static void cptvf_free_irq_affinity(struct cpt_vf *cptvf, int vec)
361{ 361{
362 if (cptvf->msix_enabled) { 362 irq_set_affinity_hint(pci_irq_vector(cptvf->pdev, vec), NULL);
363 pci_disable_msix(cptvf->pdev); 363 free_cpumask_var(cptvf->affinity_mask[vec]);
364 cptvf->msix_enabled = 0;
365 }
366}
367
368static int cptvf_enable_msix(struct cpt_vf *cptvf)
369{
370 int i, ret;
371
372 for (i = 0; i < CPT_VF_MSIX_VECTORS; i++)
373 cptvf->msix_entries[i].entry = i;
374
375 ret = pci_enable_msix(cptvf->pdev, cptvf->msix_entries,
376 CPT_VF_MSIX_VECTORS);
377 if (ret) {
378 dev_err(&cptvf->pdev->dev, "Request for #%d msix vectors failed\n",
379 CPT_VF_MSIX_VECTORS);
380 return ret;
381 }
382
383 cptvf->msix_enabled = 1;
384 /* Mark MSIX enabled */
385 cptvf->flags |= CPT_FLAG_MSIX_ENABLED;
386
387 return 0;
388}
389
390static void cptvf_free_all_interrupts(struct cpt_vf *cptvf)
391{
392 int irq;
393
394 for (irq = 0; irq < CPT_VF_MSIX_VECTORS; irq++) {
395 if (cptvf->irq_allocated[irq])
396 irq_set_affinity_hint(cptvf->msix_entries[irq].vector,
397 NULL);
398 free_cpumask_var(cptvf->affinity_mask[irq]);
399 free_irq(cptvf->msix_entries[irq].vector, cptvf);
400 cptvf->irq_allocated[irq] = false;
401 }
402} 364}
403 365
404static void cptvf_write_vq_ctl(struct cpt_vf *cptvf, bool val) 366static void cptvf_write_vq_ctl(struct cpt_vf *cptvf, bool val)
@@ -650,85 +612,23 @@ static irqreturn_t cptvf_done_intr_handler(int irq, void *cptvf_irq)
650 return IRQ_HANDLED; 612 return IRQ_HANDLED;
651} 613}
652 614
653static int cptvf_register_misc_intr(struct cpt_vf *cptvf) 615static void cptvf_set_irq_affinity(struct cpt_vf *cptvf, int vec)
654{
655 struct pci_dev *pdev = cptvf->pdev;
656 int ret;
657
658 /* Register misc interrupt handlers */
659 ret = request_irq(cptvf->msix_entries[CPT_VF_INT_VEC_E_MISC].vector,
660 cptvf_misc_intr_handler, 0, "CPT VF misc intr",
661 cptvf);
662 if (ret)
663 goto fail;
664
665 cptvf->irq_allocated[CPT_VF_INT_VEC_E_MISC] = true;
666
667 /* Enable mailbox interrupt */
668 cptvf_enable_mbox_interrupts(cptvf);
669 cptvf_enable_swerr_interrupts(cptvf);
670
671 return 0;
672
673fail:
674 dev_err(&pdev->dev, "Request misc irq failed");
675 cptvf_free_all_interrupts(cptvf);
676 return ret;
677}
678
679static int cptvf_register_done_intr(struct cpt_vf *cptvf)
680{
681 struct pci_dev *pdev = cptvf->pdev;
682 int ret;
683
684 /* Register DONE interrupt handlers */
685 ret = request_irq(cptvf->msix_entries[CPT_VF_INT_VEC_E_DONE].vector,
686 cptvf_done_intr_handler, 0, "CPT VF done intr",
687 cptvf);
688 if (ret)
689 goto fail;
690
691 cptvf->irq_allocated[CPT_VF_INT_VEC_E_DONE] = true;
692
693 /* Enable mailbox interrupt */
694 cptvf_enable_done_interrupts(cptvf);
695 return 0;
696
697fail:
698 dev_err(&pdev->dev, "Request done irq failed\n");
699 cptvf_free_all_interrupts(cptvf);
700 return ret;
701}
702
703static void cptvf_unregister_interrupts(struct cpt_vf *cptvf)
704{
705 cptvf_free_all_interrupts(cptvf);
706 cptvf_disable_msix(cptvf);
707}
708
709static void cptvf_set_irq_affinity(struct cpt_vf *cptvf)
710{ 616{
711 struct pci_dev *pdev = cptvf->pdev; 617 struct pci_dev *pdev = cptvf->pdev;
712 int vec, cpu; 618 int cpu;
713 int irqnum;
714
715 for (vec = 0; vec < CPT_VF_MSIX_VECTORS; vec++) {
716 if (!cptvf->irq_allocated[vec])
717 continue;
718
719 if (!zalloc_cpumask_var(&cptvf->affinity_mask[vec],
720 GFP_KERNEL)) {
721 dev_err(&pdev->dev, "Allocation failed for affinity_mask for VF %d",
722 cptvf->vfid);
723 return;
724 }
725 619
726 cpu = cptvf->vfid % num_online_cpus(); 620 if (!zalloc_cpumask_var(&cptvf->affinity_mask[vec],
727 cpumask_set_cpu(cpumask_local_spread(cpu, cptvf->node), 621 GFP_KERNEL)) {
728 cptvf->affinity_mask[vec]); 622 dev_err(&pdev->dev, "Allocation failed for affinity_mask for VF %d",
729 irqnum = cptvf->msix_entries[vec].vector; 623 cptvf->vfid);
730 irq_set_affinity_hint(irqnum, cptvf->affinity_mask[vec]); 624 return;
731 } 625 }
626
627 cpu = cptvf->vfid % num_online_cpus();
628 cpumask_set_cpu(cpumask_local_spread(cpu, cptvf->node),
629 cptvf->affinity_mask[vec]);
630 irq_set_affinity_hint(pci_irq_vector(pdev, vec),
631 cptvf->affinity_mask[vec]);
732} 632}
733 633
734static void cptvf_write_vq_saddr(struct cpt_vf *cptvf, u64 val) 634static void cptvf_write_vq_saddr(struct cpt_vf *cptvf, u64 val)
@@ -809,22 +709,32 @@ static int cptvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
809 } 709 }
810 710
811 cptvf->node = dev_to_node(&pdev->dev); 711 cptvf->node = dev_to_node(&pdev->dev);
812 /* Enable MSI-X */ 712 err = pci_alloc_irq_vectors(pdev, CPT_VF_MSIX_VECTORS,
813 err = cptvf_enable_msix(cptvf); 713 CPT_VF_MSIX_VECTORS, PCI_IRQ_MSIX);
814 if (err) { 714 if (err < 0) {
815 dev_err(dev, "cptvf_enable_msix() failed"); 715 dev_err(dev, "Request for #%d msix vectors failed\n",
716 CPT_VF_MSIX_VECTORS);
816 goto cptvf_err_release_regions; 717 goto cptvf_err_release_regions;
817 } 718 }
818 719
819 /* Register mailbox interrupts */ 720 err = request_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC),
820 cptvf_register_misc_intr(cptvf); 721 cptvf_misc_intr_handler, 0, "CPT VF misc intr",
722 cptvf);
723 if (err) {
724 dev_err(dev, "Request misc irq failed");
725 goto cptvf_free_vectors;
726 }
727
728 /* Enable mailbox interrupt */
729 cptvf_enable_mbox_interrupts(cptvf);
730 cptvf_enable_swerr_interrupts(cptvf);
821 731
822 /* Check ready with PF */ 732 /* Check ready with PF */
823 /* Gets chip ID / device Id from PF if ready */ 733 /* Gets chip ID / device Id from PF if ready */
824 err = cptvf_check_pf_ready(cptvf); 734 err = cptvf_check_pf_ready(cptvf);
825 if (err) { 735 if (err) {
826 dev_err(dev, "PF not responding to READY msg"); 736 dev_err(dev, "PF not responding to READY msg");
827 goto cptvf_err_release_regions; 737 goto cptvf_free_misc_irq;
828 } 738 }
829 739
830 /* CPT VF software resources initialization */ 740 /* CPT VF software resources initialization */
@@ -832,13 +742,13 @@ static int cptvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
832 err = cptvf_sw_init(cptvf, CPT_CMD_QLEN, CPT_NUM_QS_PER_VF); 742 err = cptvf_sw_init(cptvf, CPT_CMD_QLEN, CPT_NUM_QS_PER_VF);
833 if (err) { 743 if (err) {
834 dev_err(dev, "cptvf_sw_init() failed"); 744 dev_err(dev, "cptvf_sw_init() failed");
835 goto cptvf_err_release_regions; 745 goto cptvf_free_misc_irq;
836 } 746 }
837 /* Convey VQ LEN to PF */ 747 /* Convey VQ LEN to PF */
838 err = cptvf_send_vq_size_msg(cptvf); 748 err = cptvf_send_vq_size_msg(cptvf);
839 if (err) { 749 if (err) {
840 dev_err(dev, "PF not responding to QLEN msg"); 750 dev_err(dev, "PF not responding to QLEN msg");
841 goto cptvf_err_release_regions; 751 goto cptvf_free_misc_irq;
842 } 752 }
843 753
844 /* CPT VF device initialization */ 754 /* CPT VF device initialization */
@@ -848,37 +758,50 @@ static int cptvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
848 err = cptvf_send_vf_to_grp_msg(cptvf); 758 err = cptvf_send_vf_to_grp_msg(cptvf);
849 if (err) { 759 if (err) {
850 dev_err(dev, "PF not responding to VF_GRP msg"); 760 dev_err(dev, "PF not responding to VF_GRP msg");
851 goto cptvf_err_release_regions; 761 goto cptvf_free_misc_irq;
852 } 762 }
853 763
854 cptvf->priority = 1; 764 cptvf->priority = 1;
855 err = cptvf_send_vf_priority_msg(cptvf); 765 err = cptvf_send_vf_priority_msg(cptvf);
856 if (err) { 766 if (err) {
857 dev_err(dev, "PF not responding to VF_PRIO msg"); 767 dev_err(dev, "PF not responding to VF_PRIO msg");
858 goto cptvf_err_release_regions; 768 goto cptvf_free_misc_irq;
769 }
770
771 err = request_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_DONE),
772 cptvf_done_intr_handler, 0, "CPT VF done intr",
773 cptvf);
774 if (err) {
775 dev_err(dev, "Request done irq failed\n");
776 goto cptvf_free_misc_irq;
859 } 777 }
860 /* Register DONE interrupts */ 778
861 err = cptvf_register_done_intr(cptvf); 779 /* Enable mailbox interrupt */
862 if (err) 780 cptvf_enable_done_interrupts(cptvf);
863 goto cptvf_err_release_regions;
864 781
865 /* Set irq affinity masks */ 782 /* Set irq affinity masks */
866 cptvf_set_irq_affinity(cptvf); 783 cptvf_set_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC);
867 /* Convey UP to PF */ 784 cptvf_set_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE);
785
868 err = cptvf_send_vf_up(cptvf); 786 err = cptvf_send_vf_up(cptvf);
869 if (err) { 787 if (err) {
870 dev_err(dev, "PF not responding to UP msg"); 788 dev_err(dev, "PF not responding to UP msg");
871 goto cptvf_up_fail; 789 goto cptvf_free_irq_affinity;
872 } 790 }
873 err = cvm_crypto_init(cptvf); 791 err = cvm_crypto_init(cptvf);
874 if (err) { 792 if (err) {
875 dev_err(dev, "Algorithm register failed\n"); 793 dev_err(dev, "Algorithm register failed\n");
876 goto cptvf_up_fail; 794 goto cptvf_free_irq_affinity;
877 } 795 }
878 return 0; 796 return 0;
879 797
880cptvf_up_fail: 798cptvf_free_irq_affinity:
881 cptvf_unregister_interrupts(cptvf); 799 cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE);
800 cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC);
801cptvf_free_misc_irq:
802 free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC), cptvf);
803cptvf_free_vectors:
804 pci_free_irq_vectors(cptvf->pdev);
882cptvf_err_release_regions: 805cptvf_err_release_regions:
883 pci_release_regions(pdev); 806 pci_release_regions(pdev);
884cptvf_err_disable_device: 807cptvf_err_disable_device:
@@ -899,7 +822,11 @@ static void cptvf_remove(struct pci_dev *pdev)
899 if (cptvf_send_vf_down(cptvf)) { 822 if (cptvf_send_vf_down(cptvf)) {
900 dev_err(&pdev->dev, "PF not responding to DOWN msg"); 823 dev_err(&pdev->dev, "PF not responding to DOWN msg");
901 } else { 824 } else {
902 cptvf_unregister_interrupts(cptvf); 825 cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE);
826 cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC);
827 free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_DONE), cptvf);
828 free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC), cptvf);
829 pci_free_irq_vectors(cptvf->pdev);
903 cptvf_sw_cleanup(cptvf); 830 cptvf_sw_cleanup(cptvf);
904 pci_set_drvdata(pdev, NULL); 831 pci_set_drvdata(pdev, NULL);
905 pci_release_regions(pdev); 832 pci_release_regions(pdev);