aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2008-09-11 10:51:41 -0400
committerIngo Molnar <mingo@elte.hu>2008-09-19 06:59:15 -0400
commita80dc3e0e0dc8393158de317d66ae0f345dc58f9 (patch)
treed7e18a5d25a6dc1101fe699966a9b59b9b88a2f2 /arch/x86
parent3eaf28a1cd2686aaa185b54d5a5e18e91b41f7f2 (diff)
AMD IOMMU: add MSI interrupt support
The AMD IOMMU can generate interrupts for various reasons. This patch adds the basic interrupt enabling infrastructure to the driver. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/kernel/amd_iommu.c11
-rw-r--r--arch/x86/kernel/amd_iommu_init.c99
3 files changed, 110 insertions, 1 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index ed92864d1325..39fd3f42696d 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -553,6 +553,7 @@ config CALGARY_IOMMU_ENABLED_BY_DEFAULT
553config AMD_IOMMU 553config AMD_IOMMU
554 bool "AMD IOMMU support" 554 bool "AMD IOMMU support"
555 select SWIOTLB 555 select SWIOTLB
556 select PCI_MSI
556 depends on X86_64 && PCI && ACPI 557 depends on X86_64 && PCI && ACPI
557 help 558 help
558 With this option you can enable support for AMD IOMMU hardware in 559 With this option you can enable support for AMD IOMMU hardware in
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index 15792ed082e0..0e494b9d5f20 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -51,6 +51,17 @@ static int iommu_has_npcache(struct amd_iommu *iommu)
51 51
52/**************************************************************************** 52/****************************************************************************
53 * 53 *
54 * Interrupt handling functions
55 *
56 ****************************************************************************/
57
58irqreturn_t amd_iommu_int_handler(int irq, void *data)
59{
60 return IRQ_NONE;
61}
62
63/****************************************************************************
64 *
54 * IOMMU command queuing functions 65 * IOMMU command queuing functions
55 * 66 *
56 ****************************************************************************/ 67 ****************************************************************************/
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index a7eb89d8923d..14a06464a694 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -22,6 +22,8 @@
22#include <linux/gfp.h> 22#include <linux/gfp.h>
23#include <linux/list.h> 23#include <linux/list.h>
24#include <linux/sysdev.h> 24#include <linux/sysdev.h>
25#include <linux/interrupt.h>
26#include <linux/msi.h>
25#include <asm/pci-direct.h> 27#include <asm/pci-direct.h>
26#include <asm/amd_iommu_types.h> 28#include <asm/amd_iommu_types.h>
27#include <asm/amd_iommu.h> 29#include <asm/amd_iommu.h>
@@ -515,17 +517,20 @@ static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
515static void __init init_iommu_from_pci(struct amd_iommu *iommu) 517static void __init init_iommu_from_pci(struct amd_iommu *iommu)
516{ 518{
517 int cap_ptr = iommu->cap_ptr; 519 int cap_ptr = iommu->cap_ptr;
518 u32 range; 520 u32 range, misc;
519 521
520 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET, 522 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
521 &iommu->cap); 523 &iommu->cap);
522 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET, 524 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET,
523 &range); 525 &range);
526 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET,
527 &misc);
524 528
525 iommu->first_device = calc_devid(MMIO_GET_BUS(range), 529 iommu->first_device = calc_devid(MMIO_GET_BUS(range),
526 MMIO_GET_FD(range)); 530 MMIO_GET_FD(range));
527 iommu->last_device = calc_devid(MMIO_GET_BUS(range), 531 iommu->last_device = calc_devid(MMIO_GET_BUS(range),
528 MMIO_GET_LD(range)); 532 MMIO_GET_LD(range));
533 iommu->evt_msi_num = MMIO_MSI_NUM(misc);
529} 534}
530 535
531/* 536/*
@@ -696,6 +701,8 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
696 if (!iommu->evt_buf) 701 if (!iommu->evt_buf)
697 return -ENOMEM; 702 return -ENOMEM;
698 703
704 iommu->int_enabled = false;
705
699 init_iommu_from_pci(iommu); 706 init_iommu_from_pci(iommu);
700 init_iommu_from_acpi(iommu, h); 707 init_iommu_from_acpi(iommu, h);
701 init_iommu_devices(iommu); 708 init_iommu_devices(iommu);
@@ -743,6 +750,95 @@ static int __init init_iommu_all(struct acpi_table_header *table)
743 750
744/**************************************************************************** 751/****************************************************************************
745 * 752 *
753 * The following functions initialize the MSI interrupts for all IOMMUs
754 * in the system. Its a bit challenging because there could be multiple
755 * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per
756 * pci_dev.
757 *
758 ****************************************************************************/
759
760static int __init iommu_setup_msix(struct amd_iommu *iommu)
761{
762 struct amd_iommu *curr;
763 struct msix_entry entries[32]; /* only 32 supported by AMD IOMMU */
764 int nvec = 0, i;
765
766 list_for_each_entry(curr, &amd_iommu_list, list) {
767 if (curr->dev == iommu->dev) {
768 entries[nvec].entry = curr->evt_msi_num;
769 entries[nvec].vector = 0;
770 curr->int_enabled = true;
771 nvec++;
772 }
773 }
774
775 if (pci_enable_msix(iommu->dev, entries, nvec)) {
776 pci_disable_msix(iommu->dev);
777 return 1;
778 }
779
780 for (i = 0; i < nvec; ++i) {
781 int r = request_irq(entries->vector, amd_iommu_int_handler,
782 IRQF_SAMPLE_RANDOM,
783 "AMD IOMMU",
784 NULL);
785 if (r)
786 goto out_free;
787 }
788
789 return 0;
790
791out_free:
792 for (i -= 1; i >= 0; --i)
793 free_irq(entries->vector, NULL);
794
795 pci_disable_msix(iommu->dev);
796
797 return 1;
798}
799
800static int __init iommu_setup_msi(struct amd_iommu *iommu)
801{
802 int r;
803 struct amd_iommu *curr;
804
805 list_for_each_entry(curr, &amd_iommu_list, list) {
806 if (curr->dev == iommu->dev)
807 curr->int_enabled = true;
808 }
809
810
811 if (pci_enable_msi(iommu->dev))
812 return 1;
813
814 r = request_irq(iommu->dev->irq, amd_iommu_int_handler,
815 IRQF_SAMPLE_RANDOM,
816 "AMD IOMMU",
817 NULL);
818
819 if (r) {
820 pci_disable_msi(iommu->dev);
821 return 1;
822 }
823
824 return 0;
825}
826
827static int __init iommu_init_msi(struct amd_iommu *iommu)
828{
829 if (iommu->int_enabled)
830 return 0;
831
832 if (pci_find_capability(iommu->dev, PCI_CAP_ID_MSIX))
833 return iommu_setup_msix(iommu);
834 else if (pci_find_capability(iommu->dev, PCI_CAP_ID_MSI))
835 return iommu_setup_msi(iommu);
836
837 return 1;
838}
839
840/****************************************************************************
841 *
746 * The next functions belong to the third pass of parsing the ACPI 842 * The next functions belong to the third pass of parsing the ACPI
747 * table. In this last pass the memory mapping requirements are 843 * table. In this last pass the memory mapping requirements are
748 * gathered (like exclusion and unity mapping reanges). 844 * gathered (like exclusion and unity mapping reanges).
@@ -862,6 +958,7 @@ static void __init enable_iommus(void)
862 958
863 list_for_each_entry(iommu, &amd_iommu_list, list) { 959 list_for_each_entry(iommu, &amd_iommu_list, list) {
864 iommu_set_exclusion_range(iommu); 960 iommu_set_exclusion_range(iommu);
961 iommu_init_msi(iommu);
865 iommu_enable(iommu); 962 iommu_enable(iommu);
866 } 963 }
867} 964}