aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/pci/dmar.c82
-rw-r--r--drivers/pci/intel-iommu.c129
-rw-r--r--drivers/pci/intr_remapping.c6
3 files changed, 99 insertions, 118 deletions
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index 33ead97f0c4b..0a19708074c2 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -131,9 +131,10 @@ static int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
131 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT || 131 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
132 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) 132 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
133 (*cnt)++; 133 (*cnt)++;
134 else 134 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
135 printk(KERN_WARNING PREFIX 135 printk(KERN_WARNING PREFIX
136 "Unsupported device scope\n"); 136 "Unsupported device scope\n");
137 }
137 start += scope->length; 138 start += scope->length;
138 } 139 }
139 if (*cnt == 0) 140 if (*cnt == 0)
@@ -309,6 +310,8 @@ int dmar_find_matched_atsr_unit(struct pci_dev *dev)
309 struct acpi_dmar_atsr *atsr; 310 struct acpi_dmar_atsr *atsr;
310 struct dmar_atsr_unit *atsru; 311 struct dmar_atsr_unit *atsru;
311 312
313 dev = pci_physfn(dev);
314
312 list_for_each_entry(atsru, &dmar_atsr_units, list) { 315 list_for_each_entry(atsru, &dmar_atsr_units, list) {
313 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header); 316 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
314 if (atsr->segment == pci_domain_nr(dev->bus)) 317 if (atsr->segment == pci_domain_nr(dev->bus))
@@ -358,12 +361,14 @@ dmar_parse_one_rhsa(struct acpi_dmar_header *header)
358 return 0; 361 return 0;
359 } 362 }
360 } 363 }
361 WARN(1, "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n" 364 WARN_TAINT(
362 "BIOS vendor: %s; Ver: %s; Product Version: %s\n", 365 1, TAINT_FIRMWARE_WORKAROUND,
363 drhd->reg_base_addr, 366 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
364 dmi_get_system_info(DMI_BIOS_VENDOR), 367 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
365 dmi_get_system_info(DMI_BIOS_VERSION), 368 drhd->reg_base_addr,
366 dmi_get_system_info(DMI_PRODUCT_VERSION)); 369 dmi_get_system_info(DMI_BIOS_VENDOR),
370 dmi_get_system_info(DMI_BIOS_VERSION),
371 dmi_get_system_info(DMI_PRODUCT_VERSION));
367 372
368 return 0; 373 return 0;
369} 374}
@@ -507,7 +512,7 @@ parse_dmar_table(void)
507 return ret; 512 return ret;
508} 513}
509 514
510int dmar_pci_device_match(struct pci_dev *devices[], int cnt, 515static int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
511 struct pci_dev *dev) 516 struct pci_dev *dev)
512{ 517{
513 int index; 518 int index;
@@ -530,6 +535,8 @@ dmar_find_matched_drhd_unit(struct pci_dev *dev)
530 struct dmar_drhd_unit *dmaru = NULL; 535 struct dmar_drhd_unit *dmaru = NULL;
531 struct acpi_dmar_hardware_unit *drhd; 536 struct acpi_dmar_hardware_unit *drhd;
532 537
538 dev = pci_physfn(dev);
539
533 list_for_each_entry(dmaru, &dmar_drhd_units, list) { 540 list_for_each_entry(dmaru, &dmar_drhd_units, list) {
534 drhd = container_of(dmaru->hdr, 541 drhd = container_of(dmaru->hdr,
535 struct acpi_dmar_hardware_unit, 542 struct acpi_dmar_hardware_unit,
@@ -614,7 +621,17 @@ int __init dmar_table_init(void)
614 return 0; 621 return 0;
615} 622}
616 623
617static int bios_warned; 624static void warn_invalid_dmar(u64 addr, const char *message)
625{
626 WARN_TAINT_ONCE(
627 1, TAINT_FIRMWARE_WORKAROUND,
628 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
629 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
630 addr, message,
631 dmi_get_system_info(DMI_BIOS_VENDOR),
632 dmi_get_system_info(DMI_BIOS_VERSION),
633 dmi_get_system_info(DMI_PRODUCT_VERSION));
634}
618 635
619int __init check_zero_address(void) 636int __init check_zero_address(void)
620{ 637{
@@ -640,13 +657,7 @@ int __init check_zero_address(void)
640 657
641 drhd = (void *)entry_header; 658 drhd = (void *)entry_header;
642 if (!drhd->address) { 659 if (!drhd->address) {
643 /* Promote an attitude of violence to a BIOS engineer today */ 660 warn_invalid_dmar(0, "");
644 WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n"
645 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
646 dmi_get_system_info(DMI_BIOS_VENDOR),
647 dmi_get_system_info(DMI_BIOS_VERSION),
648 dmi_get_system_info(DMI_PRODUCT_VERSION));
649 bios_warned = 1;
650 goto failed; 661 goto failed;
651 } 662 }
652 663
@@ -659,14 +670,8 @@ int __init check_zero_address(void)
659 ecap = dmar_readq(addr + DMAR_ECAP_REG); 670 ecap = dmar_readq(addr + DMAR_ECAP_REG);
660 early_iounmap(addr, VTD_PAGE_SIZE); 671 early_iounmap(addr, VTD_PAGE_SIZE);
661 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) { 672 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
662 /* Promote an attitude of violence to a BIOS engineer today */ 673 warn_invalid_dmar(drhd->address,
663 WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n" 674 " returns all ones");
664 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
665 drhd->address,
666 dmi_get_system_info(DMI_BIOS_VENDOR),
667 dmi_get_system_info(DMI_BIOS_VERSION),
668 dmi_get_system_info(DMI_PRODUCT_VERSION));
669 bios_warned = 1;
670 goto failed; 675 goto failed;
671 } 676 }
672 } 677 }
@@ -731,14 +736,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
731 int msagaw = 0; 736 int msagaw = 0;
732 737
733 if (!drhd->reg_base_addr) { 738 if (!drhd->reg_base_addr) {
734 if (!bios_warned) { 739 warn_invalid_dmar(0, "");
735 WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n"
736 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
737 dmi_get_system_info(DMI_BIOS_VENDOR),
738 dmi_get_system_info(DMI_BIOS_VERSION),
739 dmi_get_system_info(DMI_PRODUCT_VERSION));
740 bios_warned = 1;
741 }
742 return -EINVAL; 740 return -EINVAL;
743 } 741 }
744 742
@@ -758,16 +756,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
758 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG); 756 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
759 757
760 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) { 758 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
761 if (!bios_warned) { 759 warn_invalid_dmar(drhd->reg_base_addr, " returns all ones");
762 /* Promote an attitude of violence to a BIOS engineer today */
763 WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n"
764 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
765 drhd->reg_base_addr,
766 dmi_get_system_info(DMI_BIOS_VENDOR),
767 dmi_get_system_info(DMI_BIOS_VERSION),
768 dmi_get_system_info(DMI_PRODUCT_VERSION));
769 bios_warned = 1;
770 }
771 goto err_unmap; 760 goto err_unmap;
772 } 761 }
773 762
@@ -806,7 +795,8 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
806 } 795 }
807 796
808 ver = readl(iommu->reg + DMAR_VER_REG); 797 ver = readl(iommu->reg + DMAR_VER_REG);
809 pr_info("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n", 798 pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
799 iommu->seq_id,
810 (unsigned long long)drhd->reg_base_addr, 800 (unsigned long long)drhd->reg_base_addr,
811 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver), 801 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
812 (unsigned long long)iommu->cap, 802 (unsigned long long)iommu->cap,
@@ -1457,9 +1447,11 @@ int dmar_reenable_qi(struct intel_iommu *iommu)
1457/* 1447/*
1458 * Check interrupt remapping support in DMAR table description. 1448 * Check interrupt remapping support in DMAR table description.
1459 */ 1449 */
1460int dmar_ir_support(void) 1450int __init dmar_ir_support(void)
1461{ 1451{
1462 struct acpi_table_dmar *dmar; 1452 struct acpi_table_dmar *dmar;
1463 dmar = (struct acpi_table_dmar *)dmar_tbl; 1453 dmar = (struct acpi_table_dmar *)dmar_tbl;
1454 if (!dmar)
1455 return 0;
1464 return dmar->flags & 0x1; 1456 return dmar->flags & 0x1;
1465} 1457}
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 371dc564e2e4..796828fce34c 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -491,13 +491,11 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain)
491 491
492 domain->iommu_coherency = 1; 492 domain->iommu_coherency = 1;
493 493
494 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus); 494 for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
495 for (; i < g_num_of_iommus; ) {
496 if (!ecap_coherent(g_iommus[i]->ecap)) { 495 if (!ecap_coherent(g_iommus[i]->ecap)) {
497 domain->iommu_coherency = 0; 496 domain->iommu_coherency = 0;
498 break; 497 break;
499 } 498 }
500 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
501 } 499 }
502} 500}
503 501
@@ -507,13 +505,11 @@ static void domain_update_iommu_snooping(struct dmar_domain *domain)
507 505
508 domain->iommu_snooping = 1; 506 domain->iommu_snooping = 1;
509 507
510 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus); 508 for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
511 for (; i < g_num_of_iommus; ) {
512 if (!ecap_sc_support(g_iommus[i]->ecap)) { 509 if (!ecap_sc_support(g_iommus[i]->ecap)) {
513 domain->iommu_snooping = 0; 510 domain->iommu_snooping = 0;
514 break; 511 break;
515 } 512 }
516 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
517 } 513 }
518} 514}
519 515
@@ -1068,7 +1064,7 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1068} 1064}
1069 1065
1070static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, 1066static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
1071 unsigned long pfn, unsigned int pages) 1067 unsigned long pfn, unsigned int pages, int map)
1072{ 1068{
1073 unsigned int mask = ilog2(__roundup_pow_of_two(pages)); 1069 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
1074 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT; 1070 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
@@ -1089,10 +1085,10 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
1089 DMA_TLB_PSI_FLUSH); 1085 DMA_TLB_PSI_FLUSH);
1090 1086
1091 /* 1087 /*
1092 * In caching mode, domain ID 0 is reserved for non-present to present 1088 * In caching mode, changes of pages from non-present to present require
1093 * mapping flush. Device IOTLB doesn't need to be flushed in this case. 1089 * flush. However, device IOTLB doesn't need to be flushed in this case.
1094 */ 1090 */
1095 if (!cap_caching_mode(iommu->cap) || did) 1091 if (!cap_caching_mode(iommu->cap) || !map)
1096 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask); 1092 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
1097} 1093}
1098 1094
@@ -1154,7 +1150,8 @@ static int iommu_init_domains(struct intel_iommu *iommu)
1154 unsigned long nlongs; 1150 unsigned long nlongs;
1155 1151
1156 ndomains = cap_ndoms(iommu->cap); 1152 ndomains = cap_ndoms(iommu->cap);
1157 pr_debug("Number of Domains supportd <%ld>\n", ndomains); 1153 pr_debug("IOMMU %d: Number of Domains supportd <%ld>\n", iommu->seq_id,
1154 ndomains);
1158 nlongs = BITS_TO_LONGS(ndomains); 1155 nlongs = BITS_TO_LONGS(ndomains);
1159 1156
1160 spin_lock_init(&iommu->lock); 1157 spin_lock_init(&iommu->lock);
@@ -1194,8 +1191,7 @@ void free_dmar_iommu(struct intel_iommu *iommu)
1194 unsigned long flags; 1191 unsigned long flags;
1195 1192
1196 if ((iommu->domains) && (iommu->domain_ids)) { 1193 if ((iommu->domains) && (iommu->domain_ids)) {
1197 i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap)); 1194 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
1198 for (; i < cap_ndoms(iommu->cap); ) {
1199 domain = iommu->domains[i]; 1195 domain = iommu->domains[i];
1200 clear_bit(i, iommu->domain_ids); 1196 clear_bit(i, iommu->domain_ids);
1201 1197
@@ -1207,9 +1203,6 @@ void free_dmar_iommu(struct intel_iommu *iommu)
1207 domain_exit(domain); 1203 domain_exit(domain);
1208 } 1204 }
1209 spin_unlock_irqrestore(&domain->iommu_lock, flags); 1205 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1210
1211 i = find_next_bit(iommu->domain_ids,
1212 cap_ndoms(iommu->cap), i+1);
1213 } 1206 }
1214 } 1207 }
1215 1208
@@ -1292,14 +1285,11 @@ static void iommu_detach_domain(struct dmar_domain *domain,
1292 1285
1293 spin_lock_irqsave(&iommu->lock, flags); 1286 spin_lock_irqsave(&iommu->lock, flags);
1294 ndomains = cap_ndoms(iommu->cap); 1287 ndomains = cap_ndoms(iommu->cap);
1295 num = find_first_bit(iommu->domain_ids, ndomains); 1288 for_each_set_bit(num, iommu->domain_ids, ndomains) {
1296 for (; num < ndomains; ) {
1297 if (iommu->domains[num] == domain) { 1289 if (iommu->domains[num] == domain) {
1298 found = 1; 1290 found = 1;
1299 break; 1291 break;
1300 } 1292 }
1301 num = find_next_bit(iommu->domain_ids,
1302 cap_ndoms(iommu->cap), num+1);
1303 } 1293 }
1304 1294
1305 if (found) { 1295 if (found) {
@@ -1485,15 +1475,12 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1485 1475
1486 /* find an available domain id for this device in iommu */ 1476 /* find an available domain id for this device in iommu */
1487 ndomains = cap_ndoms(iommu->cap); 1477 ndomains = cap_ndoms(iommu->cap);
1488 num = find_first_bit(iommu->domain_ids, ndomains); 1478 for_each_set_bit(num, iommu->domain_ids, ndomains) {
1489 for (; num < ndomains; ) {
1490 if (iommu->domains[num] == domain) { 1479 if (iommu->domains[num] == domain) {
1491 id = num; 1480 id = num;
1492 found = 1; 1481 found = 1;
1493 break; 1482 break;
1494 } 1483 }
1495 num = find_next_bit(iommu->domain_ids,
1496 cap_ndoms(iommu->cap), num+1);
1497 } 1484 }
1498 1485
1499 if (found == 0) { 1486 if (found == 0) {
@@ -1558,7 +1545,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1558 (((u16)bus) << 8) | devfn, 1545 (((u16)bus) << 8) | devfn,
1559 DMA_CCMD_MASK_NOBIT, 1546 DMA_CCMD_MASK_NOBIT,
1560 DMA_CCMD_DEVICE_INVL); 1547 DMA_CCMD_DEVICE_INVL);
1561 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH); 1548 iommu->flush.flush_iotlb(iommu, domain->id, 0, 0, DMA_TLB_DSI_FLUSH);
1562 } else { 1549 } else {
1563 iommu_flush_write_buffer(iommu); 1550 iommu_flush_write_buffer(iommu);
1564 } 1551 }
@@ -2333,14 +2320,16 @@ int __init init_dmars(void)
2333 */ 2320 */
2334 iommu->flush.flush_context = __iommu_flush_context; 2321 iommu->flush.flush_context = __iommu_flush_context;
2335 iommu->flush.flush_iotlb = __iommu_flush_iotlb; 2322 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2336 printk(KERN_INFO "IOMMU 0x%Lx: using Register based " 2323 printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based "
2337 "invalidation\n", 2324 "invalidation\n",
2325 iommu->seq_id,
2338 (unsigned long long)drhd->reg_base_addr); 2326 (unsigned long long)drhd->reg_base_addr);
2339 } else { 2327 } else {
2340 iommu->flush.flush_context = qi_flush_context; 2328 iommu->flush.flush_context = qi_flush_context;
2341 iommu->flush.flush_iotlb = qi_flush_iotlb; 2329 iommu->flush.flush_iotlb = qi_flush_iotlb;
2342 printk(KERN_INFO "IOMMU 0x%Lx: using Queued " 2330 printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued "
2343 "invalidation\n", 2331 "invalidation\n",
2332 iommu->seq_id,
2344 (unsigned long long)drhd->reg_base_addr); 2333 (unsigned long long)drhd->reg_base_addr);
2345 } 2334 }
2346 } 2335 }
@@ -2621,7 +2610,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2621 2610
2622 /* it's a non-present to present mapping. Only flush if caching mode */ 2611 /* it's a non-present to present mapping. Only flush if caching mode */
2623 if (cap_caching_mode(iommu->cap)) 2612 if (cap_caching_mode(iommu->cap))
2624 iommu_flush_iotlb_psi(iommu, 0, mm_to_dma_pfn(iova->pfn_lo), size); 2613 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 1);
2625 else 2614 else
2626 iommu_flush_write_buffer(iommu); 2615 iommu_flush_write_buffer(iommu);
2627 2616
@@ -2661,15 +2650,24 @@ static void flush_unmaps(void)
2661 if (!deferred_flush[i].next) 2650 if (!deferred_flush[i].next)
2662 continue; 2651 continue;
2663 2652
2664 iommu->flush.flush_iotlb(iommu, 0, 0, 0, 2653 /* In caching mode, global flushes turn emulation expensive */
2654 if (!cap_caching_mode(iommu->cap))
2655 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
2665 DMA_TLB_GLOBAL_FLUSH); 2656 DMA_TLB_GLOBAL_FLUSH);
2666 for (j = 0; j < deferred_flush[i].next; j++) { 2657 for (j = 0; j < deferred_flush[i].next; j++) {
2667 unsigned long mask; 2658 unsigned long mask;
2668 struct iova *iova = deferred_flush[i].iova[j]; 2659 struct iova *iova = deferred_flush[i].iova[j];
2669 2660 struct dmar_domain *domain = deferred_flush[i].domain[j];
2670 mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1)); 2661
2671 iommu_flush_dev_iotlb(deferred_flush[i].domain[j], 2662 /* On real hardware multiple invalidations are expensive */
2672 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask); 2663 if (cap_caching_mode(iommu->cap))
2664 iommu_flush_iotlb_psi(iommu, domain->id,
2665 iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1, 0);
2666 else {
2667 mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1));
2668 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
2669 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
2670 }
2673 __free_iova(&deferred_flush[i].domain[j]->iovad, iova); 2671 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
2674 } 2672 }
2675 deferred_flush[i].next = 0; 2673 deferred_flush[i].next = 0;
@@ -2750,7 +2748,7 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2750 2748
2751 if (intel_iommu_strict) { 2749 if (intel_iommu_strict) {
2752 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn, 2750 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
2753 last_pfn - start_pfn + 1); 2751 last_pfn - start_pfn + 1, 0);
2754 /* free iova */ 2752 /* free iova */
2755 __free_iova(&domain->iovad, iova); 2753 __free_iova(&domain->iovad, iova);
2756 } else { 2754 } else {
@@ -2840,7 +2838,7 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2840 2838
2841 if (intel_iommu_strict) { 2839 if (intel_iommu_strict) {
2842 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn, 2840 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
2843 last_pfn - start_pfn + 1); 2841 last_pfn - start_pfn + 1, 0);
2844 /* free iova */ 2842 /* free iova */
2845 __free_iova(&domain->iovad, iova); 2843 __free_iova(&domain->iovad, iova);
2846 } else { 2844 } else {
@@ -2874,7 +2872,6 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
2874 struct dmar_domain *domain; 2872 struct dmar_domain *domain;
2875 size_t size = 0; 2873 size_t size = 0;
2876 int prot = 0; 2874 int prot = 0;
2877 size_t offset_pfn = 0;
2878 struct iova *iova = NULL; 2875 struct iova *iova = NULL;
2879 int ret; 2876 int ret;
2880 struct scatterlist *sg; 2877 struct scatterlist *sg;
@@ -2928,7 +2925,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
2928 2925
2929 /* it's a non-present to present mapping. Only flush if caching mode */ 2926 /* it's a non-present to present mapping. Only flush if caching mode */
2930 if (cap_caching_mode(iommu->cap)) 2927 if (cap_caching_mode(iommu->cap))
2931 iommu_flush_iotlb_psi(iommu, 0, start_vpfn, offset_pfn); 2928 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 1);
2932 else 2929 else
2933 iommu_flush_write_buffer(iommu); 2930 iommu_flush_write_buffer(iommu);
2934 2931
@@ -3436,22 +3433,6 @@ static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
3436/* domain id for virtual machine, it won't be set in context */ 3433/* domain id for virtual machine, it won't be set in context */
3437static unsigned long vm_domid; 3434static unsigned long vm_domid;
3438 3435
3439static int vm_domain_min_agaw(struct dmar_domain *domain)
3440{
3441 int i;
3442 int min_agaw = domain->agaw;
3443
3444 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
3445 for (; i < g_num_of_iommus; ) {
3446 if (min_agaw > g_iommus[i]->agaw)
3447 min_agaw = g_iommus[i]->agaw;
3448
3449 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
3450 }
3451
3452 return min_agaw;
3453}
3454
3455static struct dmar_domain *iommu_alloc_vm_domain(void) 3436static struct dmar_domain *iommu_alloc_vm_domain(void)
3456{ 3437{
3457 struct dmar_domain *domain; 3438 struct dmar_domain *domain;
@@ -3512,8 +3493,7 @@ static void iommu_free_vm_domain(struct dmar_domain *domain)
3512 iommu = drhd->iommu; 3493 iommu = drhd->iommu;
3513 3494
3514 ndomains = cap_ndoms(iommu->cap); 3495 ndomains = cap_ndoms(iommu->cap);
3515 i = find_first_bit(iommu->domain_ids, ndomains); 3496 for_each_set_bit(i, iommu->domain_ids, ndomains) {
3516 for (; i < ndomains; ) {
3517 if (iommu->domains[i] == domain) { 3497 if (iommu->domains[i] == domain) {
3518 spin_lock_irqsave(&iommu->lock, flags); 3498 spin_lock_irqsave(&iommu->lock, flags);
3519 clear_bit(i, iommu->domain_ids); 3499 clear_bit(i, iommu->domain_ids);
@@ -3521,7 +3501,6 @@ static void iommu_free_vm_domain(struct dmar_domain *domain)
3521 spin_unlock_irqrestore(&iommu->lock, flags); 3501 spin_unlock_irqrestore(&iommu->lock, flags);
3522 break; 3502 break;
3523 } 3503 }
3524 i = find_next_bit(iommu->domain_ids, ndomains, i+1);
3525 } 3504 }
3526 } 3505 }
3527} 3506}
@@ -3582,7 +3561,6 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
3582 struct pci_dev *pdev = to_pci_dev(dev); 3561 struct pci_dev *pdev = to_pci_dev(dev);
3583 struct intel_iommu *iommu; 3562 struct intel_iommu *iommu;
3584 int addr_width; 3563 int addr_width;
3585 u64 end;
3586 3564
3587 /* normally pdev is not mapped */ 3565 /* normally pdev is not mapped */
3588 if (unlikely(domain_context_mapped(pdev))) { 3566 if (unlikely(domain_context_mapped(pdev))) {
@@ -3605,14 +3583,30 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
3605 3583
3606 /* check if this iommu agaw is sufficient for max mapped address */ 3584 /* check if this iommu agaw is sufficient for max mapped address */
3607 addr_width = agaw_to_width(iommu->agaw); 3585 addr_width = agaw_to_width(iommu->agaw);
3608 end = DOMAIN_MAX_ADDR(addr_width); 3586 if (addr_width > cap_mgaw(iommu->cap))
3609 end = end & VTD_PAGE_MASK; 3587 addr_width = cap_mgaw(iommu->cap);
3610 if (end < dmar_domain->max_addr) { 3588
3611 printk(KERN_ERR "%s: iommu agaw (%d) is not " 3589 if (dmar_domain->max_addr > (1LL << addr_width)) {
3590 printk(KERN_ERR "%s: iommu width (%d) is not "
3612 "sufficient for the mapped address (%llx)\n", 3591 "sufficient for the mapped address (%llx)\n",
3613 __func__, iommu->agaw, dmar_domain->max_addr); 3592 __func__, addr_width, dmar_domain->max_addr);
3614 return -EFAULT; 3593 return -EFAULT;
3615 } 3594 }
3595 dmar_domain->gaw = addr_width;
3596
3597 /*
3598 * Knock out extra levels of page tables if necessary
3599 */
3600 while (iommu->agaw < dmar_domain->agaw) {
3601 struct dma_pte *pte;
3602
3603 pte = dmar_domain->pgd;
3604 if (dma_pte_present(pte)) {
3605 free_pgtable_page(dmar_domain->pgd);
3606 dmar_domain->pgd = (struct dma_pte *)dma_pte_addr(pte);
3607 }
3608 dmar_domain->agaw--;
3609 }
3616 3610
3617 return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL); 3611 return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
3618} 3612}
@@ -3632,7 +3626,6 @@ static int intel_iommu_map(struct iommu_domain *domain,
3632{ 3626{
3633 struct dmar_domain *dmar_domain = domain->priv; 3627 struct dmar_domain *dmar_domain = domain->priv;
3634 u64 max_addr; 3628 u64 max_addr;
3635 int addr_width;
3636 int prot = 0; 3629 int prot = 0;
3637 size_t size; 3630 size_t size;
3638 int ret; 3631 int ret;
@@ -3647,18 +3640,14 @@ static int intel_iommu_map(struct iommu_domain *domain,
3647 size = PAGE_SIZE << gfp_order; 3640 size = PAGE_SIZE << gfp_order;
3648 max_addr = iova + size; 3641 max_addr = iova + size;
3649 if (dmar_domain->max_addr < max_addr) { 3642 if (dmar_domain->max_addr < max_addr) {
3650 int min_agaw;
3651 u64 end; 3643 u64 end;
3652 3644
3653 /* check if minimum agaw is sufficient for mapped address */ 3645 /* check if minimum agaw is sufficient for mapped address */
3654 min_agaw = vm_domain_min_agaw(dmar_domain); 3646 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
3655 addr_width = agaw_to_width(min_agaw);
3656 end = DOMAIN_MAX_ADDR(addr_width);
3657 end = end & VTD_PAGE_MASK;
3658 if (end < max_addr) { 3647 if (end < max_addr) {
3659 printk(KERN_ERR "%s: iommu agaw (%d) is not " 3648 printk(KERN_ERR "%s: iommu width (%d) is not "
3660 "sufficient for the mapped address (%llx)\n", 3649 "sufficient for the mapped address (%llx)\n",
3661 __func__, min_agaw, max_addr); 3650 __func__, dmar_domain->gaw, max_addr);
3662 return -EFAULT; 3651 return -EFAULT;
3663 } 3652 }
3664 dmar_domain->max_addr = max_addr; 3653 dmar_domain->max_addr = max_addr;
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c
index 6ee98a56946f..1315ac688aa2 100644
--- a/drivers/pci/intr_remapping.c
+++ b/drivers/pci/intr_remapping.c
@@ -832,9 +832,9 @@ static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header,
832 return -1; 832 return -1;
833 } 833 }
834 834
835 printk(KERN_INFO "IOAPIC id %d under DRHD base" 835 printk(KERN_INFO "IOAPIC id %d under DRHD base "
836 " 0x%Lx\n", scope->enumeration_id, 836 " 0x%Lx IOMMU %d\n", scope->enumeration_id,
837 drhd->address); 837 drhd->address, iommu->seq_id);
838 838
839 ir_parse_one_ioapic_scope(scope, iommu); 839 ir_parse_one_ioapic_scope(scope, iommu);
840 } else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) { 840 } else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) {