diff options
author | Jiang Liu <jiang.liu@linux.intel.com> | 2014-11-09 09:48:00 -0500 |
---|---|---|
committer | Joerg Roedel <jroedel@suse.de> | 2014-11-18 05:18:36 -0500 |
commit | a7a3dad944344caf034699b0c0e8dc51b469cf20 (patch) | |
tree | cabb9e9275c447df1167c11870b3f7b96d1b220b | |
parent | d35165a955f095095cdb8512cb7cd8f63101649a (diff) |
iommu/vt-d: Enhance intel_irq_remapping driver to support DMAR unit hotplug
Implement required callback functions for intel_irq_remapping driver
to support DMAR unit hotplug.
Signed-off-by: Jiang Liu <jiang.liu@linux.intel.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
-rw-r--r-- | drivers/iommu/intel_irq_remapping.c | 238 |
1 files changed, 178 insertions, 60 deletions
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c index 1cbdb509bc7b..7af0b56dc2d1 100644 --- a/drivers/iommu/intel_irq_remapping.c +++ b/drivers/iommu/intel_irq_remapping.c | |||
@@ -36,7 +36,6 @@ struct hpet_scope { | |||
36 | 36 | ||
37 | static struct ioapic_scope ir_ioapic[MAX_IO_APICS]; | 37 | static struct ioapic_scope ir_ioapic[MAX_IO_APICS]; |
38 | static struct hpet_scope ir_hpet[MAX_HPET_TBS]; | 38 | static struct hpet_scope ir_hpet[MAX_HPET_TBS]; |
39 | static int ir_ioapic_num, ir_hpet_num; | ||
40 | 39 | ||
41 | /* | 40 | /* |
42 | * Lock ordering: | 41 | * Lock ordering: |
@@ -206,7 +205,7 @@ static struct intel_iommu *map_hpet_to_ir(u8 hpet_id) | |||
206 | int i; | 205 | int i; |
207 | 206 | ||
208 | for (i = 0; i < MAX_HPET_TBS; i++) | 207 | for (i = 0; i < MAX_HPET_TBS; i++) |
209 | if (ir_hpet[i].id == hpet_id) | 208 | if (ir_hpet[i].id == hpet_id && ir_hpet[i].iommu) |
210 | return ir_hpet[i].iommu; | 209 | return ir_hpet[i].iommu; |
211 | return NULL; | 210 | return NULL; |
212 | } | 211 | } |
@@ -216,7 +215,7 @@ static struct intel_iommu *map_ioapic_to_ir(int apic) | |||
216 | int i; | 215 | int i; |
217 | 216 | ||
218 | for (i = 0; i < MAX_IO_APICS; i++) | 217 | for (i = 0; i < MAX_IO_APICS; i++) |
219 | if (ir_ioapic[i].id == apic) | 218 | if (ir_ioapic[i].id == apic && ir_ioapic[i].iommu) |
220 | return ir_ioapic[i].iommu; | 219 | return ir_ioapic[i].iommu; |
221 | return NULL; | 220 | return NULL; |
222 | } | 221 | } |
@@ -325,7 +324,7 @@ static int set_ioapic_sid(struct irte *irte, int apic) | |||
325 | 324 | ||
326 | down_read(&dmar_global_lock); | 325 | down_read(&dmar_global_lock); |
327 | for (i = 0; i < MAX_IO_APICS; i++) { | 326 | for (i = 0; i < MAX_IO_APICS; i++) { |
328 | if (ir_ioapic[i].id == apic) { | 327 | if (ir_ioapic[i].iommu && ir_ioapic[i].id == apic) { |
329 | sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn; | 328 | sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn; |
330 | break; | 329 | break; |
331 | } | 330 | } |
@@ -352,7 +351,7 @@ static int set_hpet_sid(struct irte *irte, u8 id) | |||
352 | 351 | ||
353 | down_read(&dmar_global_lock); | 352 | down_read(&dmar_global_lock); |
354 | for (i = 0; i < MAX_HPET_TBS; i++) { | 353 | for (i = 0; i < MAX_HPET_TBS; i++) { |
355 | if (ir_hpet[i].id == id) { | 354 | if (ir_hpet[i].iommu && ir_hpet[i].id == id) { |
356 | sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn; | 355 | sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn; |
357 | break; | 356 | break; |
358 | } | 357 | } |
@@ -473,17 +472,17 @@ static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode) | |||
473 | raw_spin_unlock_irqrestore(&iommu->register_lock, flags); | 472 | raw_spin_unlock_irqrestore(&iommu->register_lock, flags); |
474 | } | 473 | } |
475 | 474 | ||
476 | 475 | static int intel_setup_irq_remapping(struct intel_iommu *iommu) | |
477 | static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode) | ||
478 | { | 476 | { |
479 | struct ir_table *ir_table; | 477 | struct ir_table *ir_table; |
480 | struct page *pages; | 478 | struct page *pages; |
481 | unsigned long *bitmap; | 479 | unsigned long *bitmap; |
482 | 480 | ||
483 | ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table), | 481 | if (iommu->ir_table) |
484 | GFP_ATOMIC); | 482 | return 0; |
485 | 483 | ||
486 | if (!iommu->ir_table) | 484 | ir_table = kzalloc(sizeof(struct ir_table), GFP_ATOMIC); |
485 | if (!ir_table) | ||
487 | return -ENOMEM; | 486 | return -ENOMEM; |
488 | 487 | ||
489 | pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, | 488 | pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, |
@@ -492,24 +491,37 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode) | |||
492 | if (!pages) { | 491 | if (!pages) { |
493 | pr_err("IR%d: failed to allocate pages of order %d\n", | 492 | pr_err("IR%d: failed to allocate pages of order %d\n", |
494 | iommu->seq_id, INTR_REMAP_PAGE_ORDER); | 493 | iommu->seq_id, INTR_REMAP_PAGE_ORDER); |
495 | kfree(iommu->ir_table); | 494 | goto out_free_table; |
496 | return -ENOMEM; | ||
497 | } | 495 | } |
498 | 496 | ||
499 | bitmap = kcalloc(BITS_TO_LONGS(INTR_REMAP_TABLE_ENTRIES), | 497 | bitmap = kcalloc(BITS_TO_LONGS(INTR_REMAP_TABLE_ENTRIES), |
500 | sizeof(long), GFP_ATOMIC); | 498 | sizeof(long), GFP_ATOMIC); |
501 | if (bitmap == NULL) { | 499 | if (bitmap == NULL) { |
502 | pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id); | 500 | pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id); |
503 | __free_pages(pages, INTR_REMAP_PAGE_ORDER); | 501 | goto out_free_pages; |
504 | kfree(ir_table); | ||
505 | return -ENOMEM; | ||
506 | } | 502 | } |
507 | 503 | ||
508 | ir_table->base = page_address(pages); | 504 | ir_table->base = page_address(pages); |
509 | ir_table->bitmap = bitmap; | 505 | ir_table->bitmap = bitmap; |
510 | 506 | iommu->ir_table = ir_table; | |
511 | iommu_set_irq_remapping(iommu, mode); | ||
512 | return 0; | 507 | return 0; |
508 | |||
509 | out_free_pages: | ||
510 | __free_pages(pages, INTR_REMAP_PAGE_ORDER); | ||
511 | out_free_table: | ||
512 | kfree(ir_table); | ||
513 | return -ENOMEM; | ||
514 | } | ||
515 | |||
516 | static void intel_teardown_irq_remapping(struct intel_iommu *iommu) | ||
517 | { | ||
518 | if (iommu && iommu->ir_table) { | ||
519 | free_pages((unsigned long)iommu->ir_table->base, | ||
520 | INTR_REMAP_PAGE_ORDER); | ||
521 | kfree(iommu->ir_table->bitmap); | ||
522 | kfree(iommu->ir_table); | ||
523 | iommu->ir_table = NULL; | ||
524 | } | ||
513 | } | 525 | } |
514 | 526 | ||
515 | /* | 527 | /* |
@@ -666,9 +678,10 @@ static int __init intel_enable_irq_remapping(void) | |||
666 | if (!ecap_ir_support(iommu->ecap)) | 678 | if (!ecap_ir_support(iommu->ecap)) |
667 | continue; | 679 | continue; |
668 | 680 | ||
669 | if (intel_setup_irq_remapping(iommu, eim)) | 681 | if (intel_setup_irq_remapping(iommu)) |
670 | goto error; | 682 | goto error; |
671 | 683 | ||
684 | iommu_set_irq_remapping(iommu, eim); | ||
672 | setup = 1; | 685 | setup = 1; |
673 | } | 686 | } |
674 | 687 | ||
@@ -699,12 +712,13 @@ error: | |||
699 | return -1; | 712 | return -1; |
700 | } | 713 | } |
701 | 714 | ||
702 | static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope, | 715 | static int ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope, |
703 | struct intel_iommu *iommu) | 716 | struct intel_iommu *iommu, |
717 | struct acpi_dmar_hardware_unit *drhd) | ||
704 | { | 718 | { |
705 | struct acpi_dmar_pci_path *path; | 719 | struct acpi_dmar_pci_path *path; |
706 | u8 bus; | 720 | u8 bus; |
707 | int count; | 721 | int count, free = -1; |
708 | 722 | ||
709 | bus = scope->bus; | 723 | bus = scope->bus; |
710 | path = (struct acpi_dmar_pci_path *)(scope + 1); | 724 | path = (struct acpi_dmar_pci_path *)(scope + 1); |
@@ -720,19 +734,36 @@ static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope, | |||
720 | PCI_SECONDARY_BUS); | 734 | PCI_SECONDARY_BUS); |
721 | path++; | 735 | path++; |
722 | } | 736 | } |
723 | ir_hpet[ir_hpet_num].bus = bus; | 737 | |
724 | ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->device, path->function); | 738 | for (count = 0; count < MAX_HPET_TBS; count++) { |
725 | ir_hpet[ir_hpet_num].iommu = iommu; | 739 | if (ir_hpet[count].iommu == iommu && |
726 | ir_hpet[ir_hpet_num].id = scope->enumeration_id; | 740 | ir_hpet[count].id == scope->enumeration_id) |
727 | ir_hpet_num++; | 741 | return 0; |
742 | else if (ir_hpet[count].iommu == NULL && free == -1) | ||
743 | free = count; | ||
744 | } | ||
745 | if (free == -1) { | ||
746 | pr_warn("Exceeded Max HPET blocks\n"); | ||
747 | return -ENOSPC; | ||
748 | } | ||
749 | |||
750 | ir_hpet[free].iommu = iommu; | ||
751 | ir_hpet[free].id = scope->enumeration_id; | ||
752 | ir_hpet[free].bus = bus; | ||
753 | ir_hpet[free].devfn = PCI_DEVFN(path->device, path->function); | ||
754 | pr_info("HPET id %d under DRHD base 0x%Lx\n", | ||
755 | scope->enumeration_id, drhd->address); | ||
756 | |||
757 | return 0; | ||
728 | } | 758 | } |
729 | 759 | ||
730 | static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope, | 760 | static int ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope, |
731 | struct intel_iommu *iommu) | 761 | struct intel_iommu *iommu, |
762 | struct acpi_dmar_hardware_unit *drhd) | ||
732 | { | 763 | { |
733 | struct acpi_dmar_pci_path *path; | 764 | struct acpi_dmar_pci_path *path; |
734 | u8 bus; | 765 | u8 bus; |
735 | int count; | 766 | int count, free = -1; |
736 | 767 | ||
737 | bus = scope->bus; | 768 | bus = scope->bus; |
738 | path = (struct acpi_dmar_pci_path *)(scope + 1); | 769 | path = (struct acpi_dmar_pci_path *)(scope + 1); |
@@ -749,54 +780,63 @@ static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope, | |||
749 | path++; | 780 | path++; |
750 | } | 781 | } |
751 | 782 | ||
752 | ir_ioapic[ir_ioapic_num].bus = bus; | 783 | for (count = 0; count < MAX_IO_APICS; count++) { |
753 | ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->device, path->function); | 784 | if (ir_ioapic[count].iommu == iommu && |
754 | ir_ioapic[ir_ioapic_num].iommu = iommu; | 785 | ir_ioapic[count].id == scope->enumeration_id) |
755 | ir_ioapic[ir_ioapic_num].id = scope->enumeration_id; | 786 | return 0; |
756 | ir_ioapic_num++; | 787 | else if (ir_ioapic[count].iommu == NULL && free == -1) |
788 | free = count; | ||
789 | } | ||
790 | if (free == -1) { | ||
791 | pr_warn("Exceeded Max IO APICS\n"); | ||
792 | return -ENOSPC; | ||
793 | } | ||
794 | |||
795 | ir_ioapic[free].bus = bus; | ||
796 | ir_ioapic[free].devfn = PCI_DEVFN(path->device, path->function); | ||
797 | ir_ioapic[free].iommu = iommu; | ||
798 | ir_ioapic[free].id = scope->enumeration_id; | ||
799 | pr_info("IOAPIC id %d under DRHD base 0x%Lx IOMMU %d\n", | ||
800 | scope->enumeration_id, drhd->address, iommu->seq_id); | ||
801 | |||
802 | return 0; | ||
757 | } | 803 | } |
758 | 804 | ||
759 | static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header, | 805 | static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header, |
760 | struct intel_iommu *iommu) | 806 | struct intel_iommu *iommu) |
761 | { | 807 | { |
808 | int ret = 0; | ||
762 | struct acpi_dmar_hardware_unit *drhd; | 809 | struct acpi_dmar_hardware_unit *drhd; |
763 | struct acpi_dmar_device_scope *scope; | 810 | struct acpi_dmar_device_scope *scope; |
764 | void *start, *end; | 811 | void *start, *end; |
765 | 812 | ||
766 | drhd = (struct acpi_dmar_hardware_unit *)header; | 813 | drhd = (struct acpi_dmar_hardware_unit *)header; |
767 | |||
768 | start = (void *)(drhd + 1); | 814 | start = (void *)(drhd + 1); |
769 | end = ((void *)drhd) + header->length; | 815 | end = ((void *)drhd) + header->length; |
770 | 816 | ||
771 | while (start < end) { | 817 | while (start < end && ret == 0) { |
772 | scope = start; | 818 | scope = start; |
773 | if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) { | 819 | if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) |
774 | if (ir_ioapic_num == MAX_IO_APICS) { | 820 | ret = ir_parse_one_ioapic_scope(scope, iommu, drhd); |
775 | printk(KERN_WARNING "Exceeded Max IO APICS\n"); | 821 | else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) |
776 | return -1; | 822 | ret = ir_parse_one_hpet_scope(scope, iommu, drhd); |
777 | } | 823 | start += scope->length; |
778 | 824 | } | |
779 | printk(KERN_INFO "IOAPIC id %d under DRHD base " | ||
780 | " 0x%Lx IOMMU %d\n", scope->enumeration_id, | ||
781 | drhd->address, iommu->seq_id); | ||
782 | 825 | ||
783 | ir_parse_one_ioapic_scope(scope, iommu); | 826 | return ret; |
784 | } else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) { | 827 | } |
785 | if (ir_hpet_num == MAX_HPET_TBS) { | ||
786 | printk(KERN_WARNING "Exceeded Max HPET blocks\n"); | ||
787 | return -1; | ||
788 | } | ||
789 | 828 | ||
790 | printk(KERN_INFO "HPET id %d under DRHD base" | 829 | static void ir_remove_ioapic_hpet_scope(struct intel_iommu *iommu) |
791 | " 0x%Lx\n", scope->enumeration_id, | 830 | { |
792 | drhd->address); | 831 | int i; |
793 | 832 | ||
794 | ir_parse_one_hpet_scope(scope, iommu); | 833 | for (i = 0; i < MAX_HPET_TBS; i++) |
795 | } | 834 | if (ir_hpet[i].iommu == iommu) |
796 | start += scope->length; | 835 | ir_hpet[i].iommu = NULL; |
797 | } | ||
798 | 836 | ||
799 | return 0; | 837 | for (i = 0; i < MAX_IO_APICS; i++) |
838 | if (ir_ioapic[i].iommu == iommu) | ||
839 | ir_ioapic[i].iommu = NULL; | ||
800 | } | 840 | } |
801 | 841 | ||
802 | /* | 842 | /* |
@@ -1172,7 +1212,85 @@ struct irq_remap_ops intel_irq_remap_ops = { | |||
1172 | .alloc_hpet_msi = intel_alloc_hpet_msi, | 1212 | .alloc_hpet_msi = intel_alloc_hpet_msi, |
1173 | }; | 1213 | }; |
1174 | 1214 | ||
1215 | /* | ||
1216 | * Support of Interrupt Remapping Unit Hotplug | ||
1217 | */ | ||
1218 | static int dmar_ir_add(struct dmar_drhd_unit *dmaru, struct intel_iommu *iommu) | ||
1219 | { | ||
1220 | int ret; | ||
1221 | int eim = x2apic_enabled(); | ||
1222 | |||
1223 | if (eim && !ecap_eim_support(iommu->ecap)) { | ||
1224 | pr_info("DRHD %Lx: EIM not supported by DRHD, ecap %Lx\n", | ||
1225 | iommu->reg_phys, iommu->ecap); | ||
1226 | return -ENODEV; | ||
1227 | } | ||
1228 | |||
1229 | if (ir_parse_ioapic_hpet_scope(dmaru->hdr, iommu)) { | ||
1230 | pr_warn("DRHD %Lx: failed to parse managed IOAPIC/HPET\n", | ||
1231 | iommu->reg_phys); | ||
1232 | return -ENODEV; | ||
1233 | } | ||
1234 | |||
1235 | /* TODO: check all IOAPICs are covered by IOMMU */ | ||
1236 | |||
1237 | /* Setup Interrupt-remapping now. */ | ||
1238 | ret = intel_setup_irq_remapping(iommu); | ||
1239 | if (ret) { | ||
1240 | pr_err("DRHD %Lx: failed to allocate resource\n", | ||
1241 | iommu->reg_phys); | ||
1242 | ir_remove_ioapic_hpet_scope(iommu); | ||
1243 | return ret; | ||
1244 | } | ||
1245 | |||
1246 | if (!iommu->qi) { | ||
1247 | /* Clear previous faults. */ | ||
1248 | dmar_fault(-1, iommu); | ||
1249 | iommu_disable_irq_remapping(iommu); | ||
1250 | dmar_disable_qi(iommu); | ||
1251 | } | ||
1252 | |||
1253 | /* Enable queued invalidation */ | ||
1254 | ret = dmar_enable_qi(iommu); | ||
1255 | if (!ret) { | ||
1256 | iommu_set_irq_remapping(iommu, eim); | ||
1257 | } else { | ||
1258 | pr_err("DRHD %Lx: failed to enable queued invalidation, ecap %Lx, ret %d\n", | ||
1259 | iommu->reg_phys, iommu->ecap, ret); | ||
1260 | intel_teardown_irq_remapping(iommu); | ||
1261 | ir_remove_ioapic_hpet_scope(iommu); | ||
1262 | } | ||
1263 | |||
1264 | return ret; | ||
1265 | } | ||
1266 | |||
1175 | int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert) | 1267 | int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert) |
1176 | { | 1268 | { |
1177 | return irq_remapping_enabled ? -ENOSYS : 0; | 1269 | int ret = 0; |
1270 | struct intel_iommu *iommu = dmaru->iommu; | ||
1271 | |||
1272 | if (!irq_remapping_enabled) | ||
1273 | return 0; | ||
1274 | if (iommu == NULL) | ||
1275 | return -EINVAL; | ||
1276 | if (!ecap_ir_support(iommu->ecap)) | ||
1277 | return 0; | ||
1278 | |||
1279 | if (insert) { | ||
1280 | if (!iommu->ir_table) | ||
1281 | ret = dmar_ir_add(dmaru, iommu); | ||
1282 | } else { | ||
1283 | if (iommu->ir_table) { | ||
1284 | if (!bitmap_empty(iommu->ir_table->bitmap, | ||
1285 | INTR_REMAP_TABLE_ENTRIES)) { | ||
1286 | ret = -EBUSY; | ||
1287 | } else { | ||
1288 | iommu_disable_irq_remapping(iommu); | ||
1289 | intel_teardown_irq_remapping(iommu); | ||
1290 | ir_remove_ioapic_hpet_scope(iommu); | ||
1291 | } | ||
1292 | } | ||
1293 | } | ||
1294 | |||
1295 | return ret; | ||
1178 | } | 1296 | } |