aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/kernel/amd_iommu.c11
-rw-r--r--arch/x86/kernel/amd_iommu_init.c99
-rw-r--r--include/asm-x86/amd_iommu.h3
-rw-r--r--include/asm-x86/amd_iommu_types.h7
5 files changed, 120 insertions, 1 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index ed92864d1325..39fd3f42696d 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -553,6 +553,7 @@ config CALGARY_IOMMU_ENABLED_BY_DEFAULT
553config AMD_IOMMU 553config AMD_IOMMU
554 bool "AMD IOMMU support" 554 bool "AMD IOMMU support"
555 select SWIOTLB 555 select SWIOTLB
556 select PCI_MSI
556 depends on X86_64 && PCI && ACPI 557 depends on X86_64 && PCI && ACPI
557 help 558 help
558 With this option you can enable support for AMD IOMMU hardware in 559 With this option you can enable support for AMD IOMMU hardware in
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index 15792ed082e0..0e494b9d5f20 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -51,6 +51,17 @@ static int iommu_has_npcache(struct amd_iommu *iommu)
51 51
52/**************************************************************************** 52/****************************************************************************
53 * 53 *
54 * Interrupt handling functions
55 *
56 ****************************************************************************/
57
58irqreturn_t amd_iommu_int_handler(int irq, void *data)
59{
60 return IRQ_NONE;
61}
62
63/****************************************************************************
64 *
54 * IOMMU command queuing functions 65 * IOMMU command queuing functions
55 * 66 *
56 ****************************************************************************/ 67 ****************************************************************************/
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index a7eb89d8923d..14a06464a694 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -22,6 +22,8 @@
22#include <linux/gfp.h> 22#include <linux/gfp.h>
23#include <linux/list.h> 23#include <linux/list.h>
24#include <linux/sysdev.h> 24#include <linux/sysdev.h>
25#include <linux/interrupt.h>
26#include <linux/msi.h>
25#include <asm/pci-direct.h> 27#include <asm/pci-direct.h>
26#include <asm/amd_iommu_types.h> 28#include <asm/amd_iommu_types.h>
27#include <asm/amd_iommu.h> 29#include <asm/amd_iommu.h>
@@ -515,17 +517,20 @@ static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
515static void __init init_iommu_from_pci(struct amd_iommu *iommu) 517static void __init init_iommu_from_pci(struct amd_iommu *iommu)
516{ 518{
517 int cap_ptr = iommu->cap_ptr; 519 int cap_ptr = iommu->cap_ptr;
518 u32 range; 520 u32 range, misc;
519 521
520 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET, 522 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
521 &iommu->cap); 523 &iommu->cap);
522 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET, 524 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET,
523 &range); 525 &range);
526 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET,
527 &misc);
524 528
525 iommu->first_device = calc_devid(MMIO_GET_BUS(range), 529 iommu->first_device = calc_devid(MMIO_GET_BUS(range),
526 MMIO_GET_FD(range)); 530 MMIO_GET_FD(range));
527 iommu->last_device = calc_devid(MMIO_GET_BUS(range), 531 iommu->last_device = calc_devid(MMIO_GET_BUS(range),
528 MMIO_GET_LD(range)); 532 MMIO_GET_LD(range));
533 iommu->evt_msi_num = MMIO_MSI_NUM(misc);
529} 534}
530 535
531/* 536/*
@@ -696,6 +701,8 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
696 if (!iommu->evt_buf) 701 if (!iommu->evt_buf)
697 return -ENOMEM; 702 return -ENOMEM;
698 703
704 iommu->int_enabled = false;
705
699 init_iommu_from_pci(iommu); 706 init_iommu_from_pci(iommu);
700 init_iommu_from_acpi(iommu, h); 707 init_iommu_from_acpi(iommu, h);
701 init_iommu_devices(iommu); 708 init_iommu_devices(iommu);
@@ -743,6 +750,95 @@ static int __init init_iommu_all(struct acpi_table_header *table)
743 750
744/**************************************************************************** 751/****************************************************************************
745 * 752 *
753 * The following functions initialize the MSI interrupts for all IOMMUs
754 * in the system. Its a bit challenging because there could be multiple
755 * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per
756 * pci_dev.
757 *
758 ****************************************************************************/
759
760static int __init iommu_setup_msix(struct amd_iommu *iommu)
761{
762 struct amd_iommu *curr;
763 struct msix_entry entries[32]; /* only 32 supported by AMD IOMMU */
764 int nvec = 0, i;
765
766 list_for_each_entry(curr, &amd_iommu_list, list) {
767 if (curr->dev == iommu->dev) {
768 entries[nvec].entry = curr->evt_msi_num;
769 entries[nvec].vector = 0;
770 curr->int_enabled = true;
771 nvec++;
772 }
773 }
774
775 if (pci_enable_msix(iommu->dev, entries, nvec)) {
776 pci_disable_msix(iommu->dev);
777 return 1;
778 }
779
780 for (i = 0; i < nvec; ++i) {
781 int r = request_irq(entries->vector, amd_iommu_int_handler,
782 IRQF_SAMPLE_RANDOM,
783 "AMD IOMMU",
784 NULL);
785 if (r)
786 goto out_free;
787 }
788
789 return 0;
790
791out_free:
792 for (i -= 1; i >= 0; --i)
793 free_irq(entries->vector, NULL);
794
795 pci_disable_msix(iommu->dev);
796
797 return 1;
798}
799
800static int __init iommu_setup_msi(struct amd_iommu *iommu)
801{
802 int r;
803 struct amd_iommu *curr;
804
805 list_for_each_entry(curr, &amd_iommu_list, list) {
806 if (curr->dev == iommu->dev)
807 curr->int_enabled = true;
808 }
809
810
811 if (pci_enable_msi(iommu->dev))
812 return 1;
813
814 r = request_irq(iommu->dev->irq, amd_iommu_int_handler,
815 IRQF_SAMPLE_RANDOM,
816 "AMD IOMMU",
817 NULL);
818
819 if (r) {
820 pci_disable_msi(iommu->dev);
821 return 1;
822 }
823
824 return 0;
825}
826
827static int __init iommu_init_msi(struct amd_iommu *iommu)
828{
829 if (iommu->int_enabled)
830 return 0;
831
832 if (pci_find_capability(iommu->dev, PCI_CAP_ID_MSIX))
833 return iommu_setup_msix(iommu);
834 else if (pci_find_capability(iommu->dev, PCI_CAP_ID_MSI))
835 return iommu_setup_msi(iommu);
836
837 return 1;
838}
839
840/****************************************************************************
841 *
746 * The next functions belong to the third pass of parsing the ACPI 842 * The next functions belong to the third pass of parsing the ACPI
747 * table. In this last pass the memory mapping requirements are 843 * table. In this last pass the memory mapping requirements are
748 * gathered (like exclusion and unity mapping reanges). 844 * gathered (like exclusion and unity mapping reanges).
@@ -862,6 +958,7 @@ static void __init enable_iommus(void)
862 958
863 list_for_each_entry(iommu, &amd_iommu_list, list) { 959 list_for_each_entry(iommu, &amd_iommu_list, list) {
864 iommu_set_exclusion_range(iommu); 960 iommu_set_exclusion_range(iommu);
961 iommu_init_msi(iommu);
865 iommu_enable(iommu); 962 iommu_enable(iommu);
866 } 963 }
867} 964}
diff --git a/include/asm-x86/amd_iommu.h b/include/asm-x86/amd_iommu.h
index 30a12049353b..2fd97cb250c7 100644
--- a/include/asm-x86/amd_iommu.h
+++ b/include/asm-x86/amd_iommu.h
@@ -20,10 +20,13 @@
20#ifndef _ASM_X86_AMD_IOMMU_H 20#ifndef _ASM_X86_AMD_IOMMU_H
21#define _ASM_X86_AMD_IOMMU_H 21#define _ASM_X86_AMD_IOMMU_H
22 22
23#include <linux/irqreturn.h>
24
23#ifdef CONFIG_AMD_IOMMU 25#ifdef CONFIG_AMD_IOMMU
24extern int amd_iommu_init(void); 26extern int amd_iommu_init(void);
25extern int amd_iommu_init_dma_ops(void); 27extern int amd_iommu_init_dma_ops(void);
26extern void amd_iommu_detect(void); 28extern void amd_iommu_detect(void);
29extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
27#else 30#else
28static inline int amd_iommu_init(void) { return -ENODEV; } 31static inline int amd_iommu_init(void) { return -ENODEV; }
29static inline void amd_iommu_detect(void) { } 32static inline void amd_iommu_detect(void) { }
diff --git a/include/asm-x86/amd_iommu_types.h b/include/asm-x86/amd_iommu_types.h
index a5629a21557c..8533f09b34b7 100644
--- a/include/asm-x86/amd_iommu_types.h
+++ b/include/asm-x86/amd_iommu_types.h
@@ -37,6 +37,7 @@
37/* Capability offsets used by the driver */ 37/* Capability offsets used by the driver */
38#define MMIO_CAP_HDR_OFFSET 0x00 38#define MMIO_CAP_HDR_OFFSET 0x00
39#define MMIO_RANGE_OFFSET 0x0c 39#define MMIO_RANGE_OFFSET 0x0c
40#define MMIO_MISC_OFFSET 0x10
40 41
41/* Masks, shifts and macros to parse the device range capability */ 42/* Masks, shifts and macros to parse the device range capability */
42#define MMIO_RANGE_LD_MASK 0xff000000 43#define MMIO_RANGE_LD_MASK 0xff000000
@@ -48,6 +49,7 @@
48#define MMIO_GET_LD(x) (((x) & MMIO_RANGE_LD_MASK) >> MMIO_RANGE_LD_SHIFT) 49#define MMIO_GET_LD(x) (((x) & MMIO_RANGE_LD_MASK) >> MMIO_RANGE_LD_SHIFT)
49#define MMIO_GET_FD(x) (((x) & MMIO_RANGE_FD_MASK) >> MMIO_RANGE_FD_SHIFT) 50#define MMIO_GET_FD(x) (((x) & MMIO_RANGE_FD_MASK) >> MMIO_RANGE_FD_SHIFT)
50#define MMIO_GET_BUS(x) (((x) & MMIO_RANGE_BUS_MASK) >> MMIO_RANGE_BUS_SHIFT) 51#define MMIO_GET_BUS(x) (((x) & MMIO_RANGE_BUS_MASK) >> MMIO_RANGE_BUS_SHIFT)
52#define MMIO_MSI_NUM(x) ((x) & 0x1f)
51 53
52/* Flag masks for the AMD IOMMU exclusion range */ 54/* Flag masks for the AMD IOMMU exclusion range */
53#define MMIO_EXCL_ENABLE_MASK 0x01ULL 55#define MMIO_EXCL_ENABLE_MASK 0x01ULL
@@ -255,10 +257,15 @@ struct amd_iommu {
255 u8 *evt_buf; 257 u8 *evt_buf;
256 /* size of event buffer */ 258 /* size of event buffer */
257 u32 evt_buf_size; 259 u32 evt_buf_size;
260 /* MSI number for event interrupt */
261 u16 evt_msi_num;
258 262
259 /* if one, we need to send a completion wait command */ 263 /* if one, we need to send a completion wait command */
260 int need_sync; 264 int need_sync;
261 265
266 /* true if interrupts for this IOMMU are already enabled */
267 bool int_enabled;
268
262 /* default dma_ops domain for that IOMMU */ 269 /* default dma_ops domain for that IOMMU */
263 struct dma_ops_domain *default_dom; 270 struct dma_ops_domain *default_dom;
264}; 271};