diff options
Diffstat (limited to 'drivers/iommu/intel_irq_remapping.c')
-rw-r--r-- | drivers/iommu/intel_irq_remapping.c | 108 |
1 files changed, 72 insertions, 36 deletions
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c index ef5f65dbafe9..9b174893f0f5 100644 --- a/drivers/iommu/intel_irq_remapping.c +++ b/drivers/iommu/intel_irq_remapping.c | |||
@@ -38,6 +38,17 @@ static struct ioapic_scope ir_ioapic[MAX_IO_APICS]; | |||
38 | static struct hpet_scope ir_hpet[MAX_HPET_TBS]; | 38 | static struct hpet_scope ir_hpet[MAX_HPET_TBS]; |
39 | static int ir_ioapic_num, ir_hpet_num; | 39 | static int ir_ioapic_num, ir_hpet_num; |
40 | 40 | ||
41 | /* | ||
42 | * Lock ordering: | ||
43 | * ->dmar_global_lock | ||
44 | * ->irq_2_ir_lock | ||
45 | * ->qi->q_lock | ||
46 | * ->iommu->register_lock | ||
47 | * Note: | ||
48 | * intel_irq_remap_ops.{supported,prepare,enable,disable,reenable} are called | ||
49 | * in single-threaded environment with interrupt disabled, so no need to tabke | ||
50 | * the dmar_global_lock. | ||
51 | */ | ||
41 | static DEFINE_RAW_SPINLOCK(irq_2_ir_lock); | 52 | static DEFINE_RAW_SPINLOCK(irq_2_ir_lock); |
42 | 53 | ||
43 | static int __init parse_ioapics_under_ir(void); | 54 | static int __init parse_ioapics_under_ir(void); |
@@ -307,12 +318,14 @@ static int set_ioapic_sid(struct irte *irte, int apic) | |||
307 | if (!irte) | 318 | if (!irte) |
308 | return -1; | 319 | return -1; |
309 | 320 | ||
321 | down_read(&dmar_global_lock); | ||
310 | for (i = 0; i < MAX_IO_APICS; i++) { | 322 | for (i = 0; i < MAX_IO_APICS; i++) { |
311 | if (ir_ioapic[i].id == apic) { | 323 | if (ir_ioapic[i].id == apic) { |
312 | sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn; | 324 | sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn; |
313 | break; | 325 | break; |
314 | } | 326 | } |
315 | } | 327 | } |
328 | up_read(&dmar_global_lock); | ||
316 | 329 | ||
317 | if (sid == 0) { | 330 | if (sid == 0) { |
318 | pr_warning("Failed to set source-id of IOAPIC (%d)\n", apic); | 331 | pr_warning("Failed to set source-id of IOAPIC (%d)\n", apic); |
@@ -332,12 +345,14 @@ static int set_hpet_sid(struct irte *irte, u8 id) | |||
332 | if (!irte) | 345 | if (!irte) |
333 | return -1; | 346 | return -1; |
334 | 347 | ||
348 | down_read(&dmar_global_lock); | ||
335 | for (i = 0; i < MAX_HPET_TBS; i++) { | 349 | for (i = 0; i < MAX_HPET_TBS; i++) { |
336 | if (ir_hpet[i].id == id) { | 350 | if (ir_hpet[i].id == id) { |
337 | sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn; | 351 | sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn; |
338 | break; | 352 | break; |
339 | } | 353 | } |
340 | } | 354 | } |
355 | up_read(&dmar_global_lock); | ||
341 | 356 | ||
342 | if (sid == 0) { | 357 | if (sid == 0) { |
343 | pr_warning("Failed to set source-id of HPET block (%d)\n", id); | 358 | pr_warning("Failed to set source-id of HPET block (%d)\n", id); |
@@ -794,10 +809,16 @@ static int __init parse_ioapics_under_ir(void) | |||
794 | 809 | ||
795 | static int __init ir_dev_scope_init(void) | 810 | static int __init ir_dev_scope_init(void) |
796 | { | 811 | { |
812 | int ret; | ||
813 | |||
797 | if (!irq_remapping_enabled) | 814 | if (!irq_remapping_enabled) |
798 | return 0; | 815 | return 0; |
799 | 816 | ||
800 | return dmar_dev_scope_init(); | 817 | down_write(&dmar_global_lock); |
818 | ret = dmar_dev_scope_init(); | ||
819 | up_write(&dmar_global_lock); | ||
820 | |||
821 | return ret; | ||
801 | } | 822 | } |
802 | rootfs_initcall(ir_dev_scope_init); | 823 | rootfs_initcall(ir_dev_scope_init); |
803 | 824 | ||
@@ -878,23 +899,27 @@ static int intel_setup_ioapic_entry(int irq, | |||
878 | struct io_apic_irq_attr *attr) | 899 | struct io_apic_irq_attr *attr) |
879 | { | 900 | { |
880 | int ioapic_id = mpc_ioapic_id(attr->ioapic); | 901 | int ioapic_id = mpc_ioapic_id(attr->ioapic); |
881 | struct intel_iommu *iommu = map_ioapic_to_ir(ioapic_id); | 902 | struct intel_iommu *iommu; |
882 | struct IR_IO_APIC_route_entry *entry; | 903 | struct IR_IO_APIC_route_entry *entry; |
883 | struct irte irte; | 904 | struct irte irte; |
884 | int index; | 905 | int index; |
885 | 906 | ||
907 | down_read(&dmar_global_lock); | ||
908 | iommu = map_ioapic_to_ir(ioapic_id); | ||
886 | if (!iommu) { | 909 | if (!iommu) { |
887 | pr_warn("No mapping iommu for ioapic %d\n", ioapic_id); | 910 | pr_warn("No mapping iommu for ioapic %d\n", ioapic_id); |
888 | return -ENODEV; | 911 | index = -ENODEV; |
889 | } | 912 | } else { |
890 | 913 | index = alloc_irte(iommu, irq, 1); | |
891 | entry = (struct IR_IO_APIC_route_entry *)route_entry; | 914 | if (index < 0) { |
892 | 915 | pr_warn("Failed to allocate IRTE for ioapic %d\n", | |
893 | index = alloc_irte(iommu, irq, 1); | 916 | ioapic_id); |
894 | if (index < 0) { | 917 | index = -ENOMEM; |
895 | pr_warn("Failed to allocate IRTE for ioapic %d\n", ioapic_id); | 918 | } |
896 | return -ENOMEM; | ||
897 | } | 919 | } |
920 | up_read(&dmar_global_lock); | ||
921 | if (index < 0) | ||
922 | return index; | ||
898 | 923 | ||
899 | prepare_irte(&irte, vector, destination); | 924 | prepare_irte(&irte, vector, destination); |
900 | 925 | ||
@@ -913,6 +938,7 @@ static int intel_setup_ioapic_entry(int irq, | |||
913 | irte.avail, irte.vector, irte.dest_id, | 938 | irte.avail, irte.vector, irte.dest_id, |
914 | irte.sid, irte.sq, irte.svt); | 939 | irte.sid, irte.sq, irte.svt); |
915 | 940 | ||
941 | entry = (struct IR_IO_APIC_route_entry *)route_entry; | ||
916 | memset(entry, 0, sizeof(*entry)); | 942 | memset(entry, 0, sizeof(*entry)); |
917 | 943 | ||
918 | entry->index2 = (index >> 15) & 0x1; | 944 | entry->index2 = (index >> 15) & 0x1; |
@@ -1043,20 +1069,23 @@ static int intel_msi_alloc_irq(struct pci_dev *dev, int irq, int nvec) | |||
1043 | struct intel_iommu *iommu; | 1069 | struct intel_iommu *iommu; |
1044 | int index; | 1070 | int index; |
1045 | 1071 | ||
1072 | down_read(&dmar_global_lock); | ||
1046 | iommu = map_dev_to_ir(dev); | 1073 | iommu = map_dev_to_ir(dev); |
1047 | if (!iommu) { | 1074 | if (!iommu) { |
1048 | printk(KERN_ERR | 1075 | printk(KERN_ERR |
1049 | "Unable to map PCI %s to iommu\n", pci_name(dev)); | 1076 | "Unable to map PCI %s to iommu\n", pci_name(dev)); |
1050 | return -ENOENT; | 1077 | index = -ENOENT; |
1078 | } else { | ||
1079 | index = alloc_irte(iommu, irq, nvec); | ||
1080 | if (index < 0) { | ||
1081 | printk(KERN_ERR | ||
1082 | "Unable to allocate %d IRTE for PCI %s\n", | ||
1083 | nvec, pci_name(dev)); | ||
1084 | index = -ENOSPC; | ||
1085 | } | ||
1051 | } | 1086 | } |
1087 | up_read(&dmar_global_lock); | ||
1052 | 1088 | ||
1053 | index = alloc_irte(iommu, irq, nvec); | ||
1054 | if (index < 0) { | ||
1055 | printk(KERN_ERR | ||
1056 | "Unable to allocate %d IRTE for PCI %s\n", nvec, | ||
1057 | pci_name(dev)); | ||
1058 | return -ENOSPC; | ||
1059 | } | ||
1060 | return index; | 1089 | return index; |
1061 | } | 1090 | } |
1062 | 1091 | ||
@@ -1064,33 +1093,40 @@ static int intel_msi_setup_irq(struct pci_dev *pdev, unsigned int irq, | |||
1064 | int index, int sub_handle) | 1093 | int index, int sub_handle) |
1065 | { | 1094 | { |
1066 | struct intel_iommu *iommu; | 1095 | struct intel_iommu *iommu; |
1096 | int ret = -ENOENT; | ||
1067 | 1097 | ||
1098 | down_read(&dmar_global_lock); | ||
1068 | iommu = map_dev_to_ir(pdev); | 1099 | iommu = map_dev_to_ir(pdev); |
1069 | if (!iommu) | 1100 | if (iommu) { |
1070 | return -ENOENT; | 1101 | /* |
1071 | /* | 1102 | * setup the mapping between the irq and the IRTE |
1072 | * setup the mapping between the irq and the IRTE | 1103 | * base index, the sub_handle pointing to the |
1073 | * base index, the sub_handle pointing to the | 1104 | * appropriate interrupt remap table entry. |
1074 | * appropriate interrupt remap table entry. | 1105 | */ |
1075 | */ | 1106 | set_irte_irq(irq, iommu, index, sub_handle); |
1076 | set_irte_irq(irq, iommu, index, sub_handle); | 1107 | ret = 0; |
1108 | } | ||
1109 | up_read(&dmar_global_lock); | ||
1077 | 1110 | ||
1078 | return 0; | 1111 | return ret; |
1079 | } | 1112 | } |
1080 | 1113 | ||
1081 | static int intel_setup_hpet_msi(unsigned int irq, unsigned int id) | 1114 | static int intel_setup_hpet_msi(unsigned int irq, unsigned int id) |
1082 | { | 1115 | { |
1083 | struct intel_iommu *iommu = map_hpet_to_ir(id); | 1116 | int ret = -1; |
1117 | struct intel_iommu *iommu; | ||
1084 | int index; | 1118 | int index; |
1085 | 1119 | ||
1086 | if (!iommu) | 1120 | down_read(&dmar_global_lock); |
1087 | return -1; | 1121 | iommu = map_hpet_to_ir(id); |
1088 | 1122 | if (iommu) { | |
1089 | index = alloc_irte(iommu, irq, 1); | 1123 | index = alloc_irte(iommu, irq, 1); |
1090 | if (index < 0) | 1124 | if (index >= 0) |
1091 | return -1; | 1125 | ret = 0; |
1126 | } | ||
1127 | up_read(&dmar_global_lock); | ||
1092 | 1128 | ||
1093 | return 0; | 1129 | return ret; |
1094 | } | 1130 | } |
1095 | 1131 | ||
1096 | struct irq_remap_ops intel_irq_remap_ops = { | 1132 | struct irq_remap_ops intel_irq_remap_ops = { |