aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu/intel_irq_remapping.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu/intel_irq_remapping.c')
-rw-r--r--drivers/iommu/intel_irq_remapping.c249
1 files changed, 187 insertions, 62 deletions
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index 7c80661b35c1..27541d440849 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -36,7 +36,6 @@ struct hpet_scope {
36 36
37static struct ioapic_scope ir_ioapic[MAX_IO_APICS]; 37static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
38static struct hpet_scope ir_hpet[MAX_HPET_TBS]; 38static struct hpet_scope ir_hpet[MAX_HPET_TBS];
39static int ir_ioapic_num, ir_hpet_num;
40 39
41/* 40/*
42 * Lock ordering: 41 * Lock ordering:
@@ -206,7 +205,7 @@ static struct intel_iommu *map_hpet_to_ir(u8 hpet_id)
206 int i; 205 int i;
207 206
208 for (i = 0; i < MAX_HPET_TBS; i++) 207 for (i = 0; i < MAX_HPET_TBS; i++)
209 if (ir_hpet[i].id == hpet_id) 208 if (ir_hpet[i].id == hpet_id && ir_hpet[i].iommu)
210 return ir_hpet[i].iommu; 209 return ir_hpet[i].iommu;
211 return NULL; 210 return NULL;
212} 211}
@@ -216,7 +215,7 @@ static struct intel_iommu *map_ioapic_to_ir(int apic)
216 int i; 215 int i;
217 216
218 for (i = 0; i < MAX_IO_APICS; i++) 217 for (i = 0; i < MAX_IO_APICS; i++)
219 if (ir_ioapic[i].id == apic) 218 if (ir_ioapic[i].id == apic && ir_ioapic[i].iommu)
220 return ir_ioapic[i].iommu; 219 return ir_ioapic[i].iommu;
221 return NULL; 220 return NULL;
222} 221}
@@ -325,7 +324,7 @@ static int set_ioapic_sid(struct irte *irte, int apic)
325 324
326 down_read(&dmar_global_lock); 325 down_read(&dmar_global_lock);
327 for (i = 0; i < MAX_IO_APICS; i++) { 326 for (i = 0; i < MAX_IO_APICS; i++) {
328 if (ir_ioapic[i].id == apic) { 327 if (ir_ioapic[i].iommu && ir_ioapic[i].id == apic) {
329 sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn; 328 sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn;
330 break; 329 break;
331 } 330 }
@@ -352,7 +351,7 @@ static int set_hpet_sid(struct irte *irte, u8 id)
352 351
353 down_read(&dmar_global_lock); 352 down_read(&dmar_global_lock);
354 for (i = 0; i < MAX_HPET_TBS; i++) { 353 for (i = 0; i < MAX_HPET_TBS; i++) {
355 if (ir_hpet[i].id == id) { 354 if (ir_hpet[i].iommu && ir_hpet[i].id == id) {
356 sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn; 355 sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn;
357 break; 356 break;
358 } 357 }
@@ -473,17 +472,17 @@ static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode)
473 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); 472 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
474} 473}
475 474
476 475static int intel_setup_irq_remapping(struct intel_iommu *iommu)
477static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode)
478{ 476{
479 struct ir_table *ir_table; 477 struct ir_table *ir_table;
480 struct page *pages; 478 struct page *pages;
481 unsigned long *bitmap; 479 unsigned long *bitmap;
482 480
483 ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table), 481 if (iommu->ir_table)
484 GFP_ATOMIC); 482 return 0;
485 483
486 if (!iommu->ir_table) 484 ir_table = kzalloc(sizeof(struct ir_table), GFP_ATOMIC);
485 if (!ir_table)
487 return -ENOMEM; 486 return -ENOMEM;
488 487
489 pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 488 pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO,
@@ -492,24 +491,37 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode)
492 if (!pages) { 491 if (!pages) {
493 pr_err("IR%d: failed to allocate pages of order %d\n", 492 pr_err("IR%d: failed to allocate pages of order %d\n",
494 iommu->seq_id, INTR_REMAP_PAGE_ORDER); 493 iommu->seq_id, INTR_REMAP_PAGE_ORDER);
495 kfree(iommu->ir_table); 494 goto out_free_table;
496 return -ENOMEM;
497 } 495 }
498 496
499 bitmap = kcalloc(BITS_TO_LONGS(INTR_REMAP_TABLE_ENTRIES), 497 bitmap = kcalloc(BITS_TO_LONGS(INTR_REMAP_TABLE_ENTRIES),
500 sizeof(long), GFP_ATOMIC); 498 sizeof(long), GFP_ATOMIC);
501 if (bitmap == NULL) { 499 if (bitmap == NULL) {
502 pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id); 500 pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id);
503 __free_pages(pages, INTR_REMAP_PAGE_ORDER); 501 goto out_free_pages;
504 kfree(ir_table);
505 return -ENOMEM;
506 } 502 }
507 503
508 ir_table->base = page_address(pages); 504 ir_table->base = page_address(pages);
509 ir_table->bitmap = bitmap; 505 ir_table->bitmap = bitmap;
510 506 iommu->ir_table = ir_table;
511 iommu_set_irq_remapping(iommu, mode);
512 return 0; 507 return 0;
508
509out_free_pages:
510 __free_pages(pages, INTR_REMAP_PAGE_ORDER);
511out_free_table:
512 kfree(ir_table);
513 return -ENOMEM;
514}
515
516static void intel_teardown_irq_remapping(struct intel_iommu *iommu)
517{
518 if (iommu && iommu->ir_table) {
519 free_pages((unsigned long)iommu->ir_table->base,
520 INTR_REMAP_PAGE_ORDER);
521 kfree(iommu->ir_table->bitmap);
522 kfree(iommu->ir_table);
523 iommu->ir_table = NULL;
524 }
513} 525}
514 526
515/* 527/*
@@ -666,9 +678,10 @@ static int __init intel_enable_irq_remapping(void)
666 if (!ecap_ir_support(iommu->ecap)) 678 if (!ecap_ir_support(iommu->ecap))
667 continue; 679 continue;
668 680
669 if (intel_setup_irq_remapping(iommu, eim)) 681 if (intel_setup_irq_remapping(iommu))
670 goto error; 682 goto error;
671 683
684 iommu_set_irq_remapping(iommu, eim);
672 setup = 1; 685 setup = 1;
673 } 686 }
674 687
@@ -689,9 +702,11 @@ static int __init intel_enable_irq_remapping(void)
689 return eim ? IRQ_REMAP_X2APIC_MODE : IRQ_REMAP_XAPIC_MODE; 702 return eim ? IRQ_REMAP_X2APIC_MODE : IRQ_REMAP_XAPIC_MODE;
690 703
691error: 704error:
692 /* 705 for_each_iommu(iommu, drhd)
693 * handle error condition gracefully here! 706 if (ecap_ir_support(iommu->ecap)) {
694 */ 707 iommu_disable_irq_remapping(iommu);
708 intel_teardown_irq_remapping(iommu);
709 }
695 710
696 if (x2apic_present) 711 if (x2apic_present)
697 pr_warn("Failed to enable irq remapping. You are vulnerable to irq-injection attacks.\n"); 712 pr_warn("Failed to enable irq remapping. You are vulnerable to irq-injection attacks.\n");
@@ -699,12 +714,13 @@ error:
699 return -1; 714 return -1;
700} 715}
701 716
702static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope, 717static int ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
703 struct intel_iommu *iommu) 718 struct intel_iommu *iommu,
719 struct acpi_dmar_hardware_unit *drhd)
704{ 720{
705 struct acpi_dmar_pci_path *path; 721 struct acpi_dmar_pci_path *path;
706 u8 bus; 722 u8 bus;
707 int count; 723 int count, free = -1;
708 724
709 bus = scope->bus; 725 bus = scope->bus;
710 path = (struct acpi_dmar_pci_path *)(scope + 1); 726 path = (struct acpi_dmar_pci_path *)(scope + 1);
@@ -720,19 +736,36 @@ static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
720 PCI_SECONDARY_BUS); 736 PCI_SECONDARY_BUS);
721 path++; 737 path++;
722 } 738 }
723 ir_hpet[ir_hpet_num].bus = bus; 739
724 ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->device, path->function); 740 for (count = 0; count < MAX_HPET_TBS; count++) {
725 ir_hpet[ir_hpet_num].iommu = iommu; 741 if (ir_hpet[count].iommu == iommu &&
726 ir_hpet[ir_hpet_num].id = scope->enumeration_id; 742 ir_hpet[count].id == scope->enumeration_id)
727 ir_hpet_num++; 743 return 0;
744 else if (ir_hpet[count].iommu == NULL && free == -1)
745 free = count;
746 }
747 if (free == -1) {
748 pr_warn("Exceeded Max HPET blocks\n");
749 return -ENOSPC;
750 }
751
752 ir_hpet[free].iommu = iommu;
753 ir_hpet[free].id = scope->enumeration_id;
754 ir_hpet[free].bus = bus;
755 ir_hpet[free].devfn = PCI_DEVFN(path->device, path->function);
756 pr_info("HPET id %d under DRHD base 0x%Lx\n",
757 scope->enumeration_id, drhd->address);
758
759 return 0;
728} 760}
729 761
730static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope, 762static int ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
731 struct intel_iommu *iommu) 763 struct intel_iommu *iommu,
764 struct acpi_dmar_hardware_unit *drhd)
732{ 765{
733 struct acpi_dmar_pci_path *path; 766 struct acpi_dmar_pci_path *path;
734 u8 bus; 767 u8 bus;
735 int count; 768 int count, free = -1;
736 769
737 bus = scope->bus; 770 bus = scope->bus;
738 path = (struct acpi_dmar_pci_path *)(scope + 1); 771 path = (struct acpi_dmar_pci_path *)(scope + 1);
@@ -749,54 +782,63 @@ static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
749 path++; 782 path++;
750 } 783 }
751 784
752 ir_ioapic[ir_ioapic_num].bus = bus; 785 for (count = 0; count < MAX_IO_APICS; count++) {
753 ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->device, path->function); 786 if (ir_ioapic[count].iommu == iommu &&
754 ir_ioapic[ir_ioapic_num].iommu = iommu; 787 ir_ioapic[count].id == scope->enumeration_id)
755 ir_ioapic[ir_ioapic_num].id = scope->enumeration_id; 788 return 0;
756 ir_ioapic_num++; 789 else if (ir_ioapic[count].iommu == NULL && free == -1)
790 free = count;
791 }
792 if (free == -1) {
793 pr_warn("Exceeded Max IO APICS\n");
794 return -ENOSPC;
795 }
796
797 ir_ioapic[free].bus = bus;
798 ir_ioapic[free].devfn = PCI_DEVFN(path->device, path->function);
799 ir_ioapic[free].iommu = iommu;
800 ir_ioapic[free].id = scope->enumeration_id;
801 pr_info("IOAPIC id %d under DRHD base 0x%Lx IOMMU %d\n",
802 scope->enumeration_id, drhd->address, iommu->seq_id);
803
804 return 0;
757} 805}
758 806
759static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header, 807static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header,
760 struct intel_iommu *iommu) 808 struct intel_iommu *iommu)
761{ 809{
810 int ret = 0;
762 struct acpi_dmar_hardware_unit *drhd; 811 struct acpi_dmar_hardware_unit *drhd;
763 struct acpi_dmar_device_scope *scope; 812 struct acpi_dmar_device_scope *scope;
764 void *start, *end; 813 void *start, *end;
765 814
766 drhd = (struct acpi_dmar_hardware_unit *)header; 815 drhd = (struct acpi_dmar_hardware_unit *)header;
767
768 start = (void *)(drhd + 1); 816 start = (void *)(drhd + 1);
769 end = ((void *)drhd) + header->length; 817 end = ((void *)drhd) + header->length;
770 818
771 while (start < end) { 819 while (start < end && ret == 0) {
772 scope = start; 820 scope = start;
773 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) { 821 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC)
774 if (ir_ioapic_num == MAX_IO_APICS) { 822 ret = ir_parse_one_ioapic_scope(scope, iommu, drhd);
775 printk(KERN_WARNING "Exceeded Max IO APICS\n"); 823 else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET)
776 return -1; 824 ret = ir_parse_one_hpet_scope(scope, iommu, drhd);
777 } 825 start += scope->length;
778 826 }
779 printk(KERN_INFO "IOAPIC id %d under DRHD base "
780 " 0x%Lx IOMMU %d\n", scope->enumeration_id,
781 drhd->address, iommu->seq_id);
782 827
783 ir_parse_one_ioapic_scope(scope, iommu); 828 return ret;
784 } else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) { 829}
785 if (ir_hpet_num == MAX_HPET_TBS) {
786 printk(KERN_WARNING "Exceeded Max HPET blocks\n");
787 return -1;
788 }
789 830
790 printk(KERN_INFO "HPET id %d under DRHD base" 831static void ir_remove_ioapic_hpet_scope(struct intel_iommu *iommu)
791 " 0x%Lx\n", scope->enumeration_id, 832{
792 drhd->address); 833 int i;
793 834
794 ir_parse_one_hpet_scope(scope, iommu); 835 for (i = 0; i < MAX_HPET_TBS; i++)
795 } 836 if (ir_hpet[i].iommu == iommu)
796 start += scope->length; 837 ir_hpet[i].iommu = NULL;
797 }
798 838
799 return 0; 839 for (i = 0; i < MAX_IO_APICS; i++)
840 if (ir_ioapic[i].iommu == iommu)
841 ir_ioapic[i].iommu = NULL;
800} 842}
801 843
802/* 844/*
@@ -1171,3 +1213,86 @@ struct irq_remap_ops intel_irq_remap_ops = {
1171 .msi_setup_irq = intel_msi_setup_irq, 1213 .msi_setup_irq = intel_msi_setup_irq,
1172 .alloc_hpet_msi = intel_alloc_hpet_msi, 1214 .alloc_hpet_msi = intel_alloc_hpet_msi,
1173}; 1215};
1216
1217/*
1218 * Support of Interrupt Remapping Unit Hotplug
1219 */
1220static int dmar_ir_add(struct dmar_drhd_unit *dmaru, struct intel_iommu *iommu)
1221{
1222 int ret;
1223 int eim = x2apic_enabled();
1224
1225 if (eim && !ecap_eim_support(iommu->ecap)) {
1226 pr_info("DRHD %Lx: EIM not supported by DRHD, ecap %Lx\n",
1227 iommu->reg_phys, iommu->ecap);
1228 return -ENODEV;
1229 }
1230
1231 if (ir_parse_ioapic_hpet_scope(dmaru->hdr, iommu)) {
1232 pr_warn("DRHD %Lx: failed to parse managed IOAPIC/HPET\n",
1233 iommu->reg_phys);
1234 return -ENODEV;
1235 }
1236
1237 /* TODO: check all IOAPICs are covered by IOMMU */
1238
1239 /* Setup Interrupt-remapping now. */
1240 ret = intel_setup_irq_remapping(iommu);
1241 if (ret) {
1242 pr_err("DRHD %Lx: failed to allocate resource\n",
1243 iommu->reg_phys);
1244 ir_remove_ioapic_hpet_scope(iommu);
1245 return ret;
1246 }
1247
1248 if (!iommu->qi) {
1249 /* Clear previous faults. */
1250 dmar_fault(-1, iommu);
1251 iommu_disable_irq_remapping(iommu);
1252 dmar_disable_qi(iommu);
1253 }
1254
1255 /* Enable queued invalidation */
1256 ret = dmar_enable_qi(iommu);
1257 if (!ret) {
1258 iommu_set_irq_remapping(iommu, eim);
1259 } else {
1260 pr_err("DRHD %Lx: failed to enable queued invalidation, ecap %Lx, ret %d\n",
1261 iommu->reg_phys, iommu->ecap, ret);
1262 intel_teardown_irq_remapping(iommu);
1263 ir_remove_ioapic_hpet_scope(iommu);
1264 }
1265
1266 return ret;
1267}
1268
1269int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
1270{
1271 int ret = 0;
1272 struct intel_iommu *iommu = dmaru->iommu;
1273
1274 if (!irq_remapping_enabled)
1275 return 0;
1276 if (iommu == NULL)
1277 return -EINVAL;
1278 if (!ecap_ir_support(iommu->ecap))
1279 return 0;
1280
1281 if (insert) {
1282 if (!iommu->ir_table)
1283 ret = dmar_ir_add(dmaru, iommu);
1284 } else {
1285 if (iommu->ir_table) {
1286 if (!bitmap_empty(iommu->ir_table->bitmap,
1287 INTR_REMAP_TABLE_ENTRIES)) {
1288 ret = -EBUSY;
1289 } else {
1290 iommu_disable_irq_remapping(iommu);
1291 intel_teardown_irq_remapping(iommu);
1292 ir_remove_ioapic_hpet_scope(iommu);
1293 }
1294 }
1295 }
1296
1297 return ret;
1298}