aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci/intel-iommu.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-05-21 20:25:01 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-05-21 20:25:01 -0400
commit0961d6581c870850342ad6ea25263763433d666f (patch)
tree371c61fd7f621397907983031003e784a040402e /drivers/pci/intel-iommu.c
parent1756ac3d3c41341297ea25b818b7fce505bb2a9a (diff)
parentfd0c8894893cba722bdea12de25b49f980795d06 (diff)
Merge git://git.infradead.org/iommu-2.6
* git://git.infradead.org/iommu-2.6: intel-iommu: Set a more specific taint flag for invalid BIOS DMAR tables intel-iommu: Combine the BIOS DMAR table warning messages panic: Add taint flag TAINT_FIRMWARE_WORKAROUND ('I') panic: Allow warnings to set different taint flags intel-iommu: intel_iommu_map_range failed at very end of address space intel-iommu: errors with smaller iommu widths intel-iommu: Fix boot inside 64bit virtualbox with io-apic disabled intel-iommu: use physfn to search drhd for VF intel-iommu: Print out iommu seq_id intel-iommu: Don't complain that ACPI_DMAR_SCOPE_TYPE_IOAPIC is not supported intel-iommu: Avoid global flushes with caching mode. intel-iommu: Use correct domain ID when caching mode is enabled intel-iommu mistakenly uses offset_pfn when caching mode is enabled intel-iommu: use for_each_set_bit() intel-iommu: Fix section mismatch dmar_ir_support() uses dmar_tbl.
Diffstat (limited to 'drivers/pci/intel-iommu.c')
-rw-r--r--drivers/pci/intel-iommu.c129
1 files changed, 59 insertions, 70 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 371dc564e2e4..796828fce34c 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -491,13 +491,11 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain)
491 491
492 domain->iommu_coherency = 1; 492 domain->iommu_coherency = 1;
493 493
494 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus); 494 for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
495 for (; i < g_num_of_iommus; ) {
496 if (!ecap_coherent(g_iommus[i]->ecap)) { 495 if (!ecap_coherent(g_iommus[i]->ecap)) {
497 domain->iommu_coherency = 0; 496 domain->iommu_coherency = 0;
498 break; 497 break;
499 } 498 }
500 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
501 } 499 }
502} 500}
503 501
@@ -507,13 +505,11 @@ static void domain_update_iommu_snooping(struct dmar_domain *domain)
507 505
508 domain->iommu_snooping = 1; 506 domain->iommu_snooping = 1;
509 507
510 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus); 508 for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
511 for (; i < g_num_of_iommus; ) {
512 if (!ecap_sc_support(g_iommus[i]->ecap)) { 509 if (!ecap_sc_support(g_iommus[i]->ecap)) {
513 domain->iommu_snooping = 0; 510 domain->iommu_snooping = 0;
514 break; 511 break;
515 } 512 }
516 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
517 } 513 }
518} 514}
519 515
@@ -1068,7 +1064,7 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1068} 1064}
1069 1065
1070static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, 1066static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
1071 unsigned long pfn, unsigned int pages) 1067 unsigned long pfn, unsigned int pages, int map)
1072{ 1068{
1073 unsigned int mask = ilog2(__roundup_pow_of_two(pages)); 1069 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
1074 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT; 1070 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
@@ -1089,10 +1085,10 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
1089 DMA_TLB_PSI_FLUSH); 1085 DMA_TLB_PSI_FLUSH);
1090 1086
1091 /* 1087 /*
1092 * In caching mode, domain ID 0 is reserved for non-present to present 1088 * In caching mode, changes of pages from non-present to present require
1093 * mapping flush. Device IOTLB doesn't need to be flushed in this case. 1089 * flush. However, device IOTLB doesn't need to be flushed in this case.
1094 */ 1090 */
1095 if (!cap_caching_mode(iommu->cap) || did) 1091 if (!cap_caching_mode(iommu->cap) || !map)
1096 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask); 1092 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
1097} 1093}
1098 1094
@@ -1154,7 +1150,8 @@ static int iommu_init_domains(struct intel_iommu *iommu)
1154 unsigned long nlongs; 1150 unsigned long nlongs;
1155 1151
1156 ndomains = cap_ndoms(iommu->cap); 1152 ndomains = cap_ndoms(iommu->cap);
1157 pr_debug("Number of Domains supportd <%ld>\n", ndomains); 1153 pr_debug("IOMMU %d: Number of Domains supportd <%ld>\n", iommu->seq_id,
1154 ndomains);
1158 nlongs = BITS_TO_LONGS(ndomains); 1155 nlongs = BITS_TO_LONGS(ndomains);
1159 1156
1160 spin_lock_init(&iommu->lock); 1157 spin_lock_init(&iommu->lock);
@@ -1194,8 +1191,7 @@ void free_dmar_iommu(struct intel_iommu *iommu)
1194 unsigned long flags; 1191 unsigned long flags;
1195 1192
1196 if ((iommu->domains) && (iommu->domain_ids)) { 1193 if ((iommu->domains) && (iommu->domain_ids)) {
1197 i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap)); 1194 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
1198 for (; i < cap_ndoms(iommu->cap); ) {
1199 domain = iommu->domains[i]; 1195 domain = iommu->domains[i];
1200 clear_bit(i, iommu->domain_ids); 1196 clear_bit(i, iommu->domain_ids);
1201 1197
@@ -1207,9 +1203,6 @@ void free_dmar_iommu(struct intel_iommu *iommu)
1207 domain_exit(domain); 1203 domain_exit(domain);
1208 } 1204 }
1209 spin_unlock_irqrestore(&domain->iommu_lock, flags); 1205 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1210
1211 i = find_next_bit(iommu->domain_ids,
1212 cap_ndoms(iommu->cap), i+1);
1213 } 1206 }
1214 } 1207 }
1215 1208
@@ -1292,14 +1285,11 @@ static void iommu_detach_domain(struct dmar_domain *domain,
1292 1285
1293 spin_lock_irqsave(&iommu->lock, flags); 1286 spin_lock_irqsave(&iommu->lock, flags);
1294 ndomains = cap_ndoms(iommu->cap); 1287 ndomains = cap_ndoms(iommu->cap);
1295 num = find_first_bit(iommu->domain_ids, ndomains); 1288 for_each_set_bit(num, iommu->domain_ids, ndomains) {
1296 for (; num < ndomains; ) {
1297 if (iommu->domains[num] == domain) { 1289 if (iommu->domains[num] == domain) {
1298 found = 1; 1290 found = 1;
1299 break; 1291 break;
1300 } 1292 }
1301 num = find_next_bit(iommu->domain_ids,
1302 cap_ndoms(iommu->cap), num+1);
1303 } 1293 }
1304 1294
1305 if (found) { 1295 if (found) {
@@ -1485,15 +1475,12 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1485 1475
1486 /* find an available domain id for this device in iommu */ 1476 /* find an available domain id for this device in iommu */
1487 ndomains = cap_ndoms(iommu->cap); 1477 ndomains = cap_ndoms(iommu->cap);
1488 num = find_first_bit(iommu->domain_ids, ndomains); 1478 for_each_set_bit(num, iommu->domain_ids, ndomains) {
1489 for (; num < ndomains; ) {
1490 if (iommu->domains[num] == domain) { 1479 if (iommu->domains[num] == domain) {
1491 id = num; 1480 id = num;
1492 found = 1; 1481 found = 1;
1493 break; 1482 break;
1494 } 1483 }
1495 num = find_next_bit(iommu->domain_ids,
1496 cap_ndoms(iommu->cap), num+1);
1497 } 1484 }
1498 1485
1499 if (found == 0) { 1486 if (found == 0) {
@@ -1558,7 +1545,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1558 (((u16)bus) << 8) | devfn, 1545 (((u16)bus) << 8) | devfn,
1559 DMA_CCMD_MASK_NOBIT, 1546 DMA_CCMD_MASK_NOBIT,
1560 DMA_CCMD_DEVICE_INVL); 1547 DMA_CCMD_DEVICE_INVL);
1561 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH); 1548 iommu->flush.flush_iotlb(iommu, domain->id, 0, 0, DMA_TLB_DSI_FLUSH);
1562 } else { 1549 } else {
1563 iommu_flush_write_buffer(iommu); 1550 iommu_flush_write_buffer(iommu);
1564 } 1551 }
@@ -2333,14 +2320,16 @@ int __init init_dmars(void)
2333 */ 2320 */
2334 iommu->flush.flush_context = __iommu_flush_context; 2321 iommu->flush.flush_context = __iommu_flush_context;
2335 iommu->flush.flush_iotlb = __iommu_flush_iotlb; 2322 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2336 printk(KERN_INFO "IOMMU 0x%Lx: using Register based " 2323 printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based "
2337 "invalidation\n", 2324 "invalidation\n",
2325 iommu->seq_id,
2338 (unsigned long long)drhd->reg_base_addr); 2326 (unsigned long long)drhd->reg_base_addr);
2339 } else { 2327 } else {
2340 iommu->flush.flush_context = qi_flush_context; 2328 iommu->flush.flush_context = qi_flush_context;
2341 iommu->flush.flush_iotlb = qi_flush_iotlb; 2329 iommu->flush.flush_iotlb = qi_flush_iotlb;
2342 printk(KERN_INFO "IOMMU 0x%Lx: using Queued " 2330 printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued "
2343 "invalidation\n", 2331 "invalidation\n",
2332 iommu->seq_id,
2344 (unsigned long long)drhd->reg_base_addr); 2333 (unsigned long long)drhd->reg_base_addr);
2345 } 2334 }
2346 } 2335 }
@@ -2621,7 +2610,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2621 2610
2622 /* it's a non-present to present mapping. Only flush if caching mode */ 2611 /* it's a non-present to present mapping. Only flush if caching mode */
2623 if (cap_caching_mode(iommu->cap)) 2612 if (cap_caching_mode(iommu->cap))
2624 iommu_flush_iotlb_psi(iommu, 0, mm_to_dma_pfn(iova->pfn_lo), size); 2613 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 1);
2625 else 2614 else
2626 iommu_flush_write_buffer(iommu); 2615 iommu_flush_write_buffer(iommu);
2627 2616
@@ -2661,15 +2650,24 @@ static void flush_unmaps(void)
2661 if (!deferred_flush[i].next) 2650 if (!deferred_flush[i].next)
2662 continue; 2651 continue;
2663 2652
2664 iommu->flush.flush_iotlb(iommu, 0, 0, 0, 2653 /* In caching mode, global flushes turn emulation expensive */
2654 if (!cap_caching_mode(iommu->cap))
2655 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
2665 DMA_TLB_GLOBAL_FLUSH); 2656 DMA_TLB_GLOBAL_FLUSH);
2666 for (j = 0; j < deferred_flush[i].next; j++) { 2657 for (j = 0; j < deferred_flush[i].next; j++) {
2667 unsigned long mask; 2658 unsigned long mask;
2668 struct iova *iova = deferred_flush[i].iova[j]; 2659 struct iova *iova = deferred_flush[i].iova[j];
2669 2660 struct dmar_domain *domain = deferred_flush[i].domain[j];
2670 mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1)); 2661
2671 iommu_flush_dev_iotlb(deferred_flush[i].domain[j], 2662 /* On real hardware multiple invalidations are expensive */
2672 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask); 2663 if (cap_caching_mode(iommu->cap))
2664 iommu_flush_iotlb_psi(iommu, domain->id,
2665 iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1, 0);
2666 else {
2667 mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1));
2668 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
2669 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
2670 }
2673 __free_iova(&deferred_flush[i].domain[j]->iovad, iova); 2671 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
2674 } 2672 }
2675 deferred_flush[i].next = 0; 2673 deferred_flush[i].next = 0;
@@ -2750,7 +2748,7 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2750 2748
2751 if (intel_iommu_strict) { 2749 if (intel_iommu_strict) {
2752 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn, 2750 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
2753 last_pfn - start_pfn + 1); 2751 last_pfn - start_pfn + 1, 0);
2754 /* free iova */ 2752 /* free iova */
2755 __free_iova(&domain->iovad, iova); 2753 __free_iova(&domain->iovad, iova);
2756 } else { 2754 } else {
@@ -2840,7 +2838,7 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2840 2838
2841 if (intel_iommu_strict) { 2839 if (intel_iommu_strict) {
2842 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn, 2840 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
2843 last_pfn - start_pfn + 1); 2841 last_pfn - start_pfn + 1, 0);
2844 /* free iova */ 2842 /* free iova */
2845 __free_iova(&domain->iovad, iova); 2843 __free_iova(&domain->iovad, iova);
2846 } else { 2844 } else {
@@ -2874,7 +2872,6 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
2874 struct dmar_domain *domain; 2872 struct dmar_domain *domain;
2875 size_t size = 0; 2873 size_t size = 0;
2876 int prot = 0; 2874 int prot = 0;
2877 size_t offset_pfn = 0;
2878 struct iova *iova = NULL; 2875 struct iova *iova = NULL;
2879 int ret; 2876 int ret;
2880 struct scatterlist *sg; 2877 struct scatterlist *sg;
@@ -2928,7 +2925,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
2928 2925
2929 /* it's a non-present to present mapping. Only flush if caching mode */ 2926 /* it's a non-present to present mapping. Only flush if caching mode */
2930 if (cap_caching_mode(iommu->cap)) 2927 if (cap_caching_mode(iommu->cap))
2931 iommu_flush_iotlb_psi(iommu, 0, start_vpfn, offset_pfn); 2928 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 1);
2932 else 2929 else
2933 iommu_flush_write_buffer(iommu); 2930 iommu_flush_write_buffer(iommu);
2934 2931
@@ -3436,22 +3433,6 @@ static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
3436/* domain id for virtual machine, it won't be set in context */ 3433/* domain id for virtual machine, it won't be set in context */
3437static unsigned long vm_domid; 3434static unsigned long vm_domid;
3438 3435
3439static int vm_domain_min_agaw(struct dmar_domain *domain)
3440{
3441 int i;
3442 int min_agaw = domain->agaw;
3443
3444 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
3445 for (; i < g_num_of_iommus; ) {
3446 if (min_agaw > g_iommus[i]->agaw)
3447 min_agaw = g_iommus[i]->agaw;
3448
3449 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
3450 }
3451
3452 return min_agaw;
3453}
3454
3455static struct dmar_domain *iommu_alloc_vm_domain(void) 3436static struct dmar_domain *iommu_alloc_vm_domain(void)
3456{ 3437{
3457 struct dmar_domain *domain; 3438 struct dmar_domain *domain;
@@ -3512,8 +3493,7 @@ static void iommu_free_vm_domain(struct dmar_domain *domain)
3512 iommu = drhd->iommu; 3493 iommu = drhd->iommu;
3513 3494
3514 ndomains = cap_ndoms(iommu->cap); 3495 ndomains = cap_ndoms(iommu->cap);
3515 i = find_first_bit(iommu->domain_ids, ndomains); 3496 for_each_set_bit(i, iommu->domain_ids, ndomains) {
3516 for (; i < ndomains; ) {
3517 if (iommu->domains[i] == domain) { 3497 if (iommu->domains[i] == domain) {
3518 spin_lock_irqsave(&iommu->lock, flags); 3498 spin_lock_irqsave(&iommu->lock, flags);
3519 clear_bit(i, iommu->domain_ids); 3499 clear_bit(i, iommu->domain_ids);
@@ -3521,7 +3501,6 @@ static void iommu_free_vm_domain(struct dmar_domain *domain)
3521 spin_unlock_irqrestore(&iommu->lock, flags); 3501 spin_unlock_irqrestore(&iommu->lock, flags);
3522 break; 3502 break;
3523 } 3503 }
3524 i = find_next_bit(iommu->domain_ids, ndomains, i+1);
3525 } 3504 }
3526 } 3505 }
3527} 3506}
@@ -3582,7 +3561,6 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
3582 struct pci_dev *pdev = to_pci_dev(dev); 3561 struct pci_dev *pdev = to_pci_dev(dev);
3583 struct intel_iommu *iommu; 3562 struct intel_iommu *iommu;
3584 int addr_width; 3563 int addr_width;
3585 u64 end;
3586 3564
3587 /* normally pdev is not mapped */ 3565 /* normally pdev is not mapped */
3588 if (unlikely(domain_context_mapped(pdev))) { 3566 if (unlikely(domain_context_mapped(pdev))) {
@@ -3605,14 +3583,30 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
3605 3583
3606 /* check if this iommu agaw is sufficient for max mapped address */ 3584 /* check if this iommu agaw is sufficient for max mapped address */
3607 addr_width = agaw_to_width(iommu->agaw); 3585 addr_width = agaw_to_width(iommu->agaw);
3608 end = DOMAIN_MAX_ADDR(addr_width); 3586 if (addr_width > cap_mgaw(iommu->cap))
3609 end = end & VTD_PAGE_MASK; 3587 addr_width = cap_mgaw(iommu->cap);
3610 if (end < dmar_domain->max_addr) { 3588
3611 printk(KERN_ERR "%s: iommu agaw (%d) is not " 3589 if (dmar_domain->max_addr > (1LL << addr_width)) {
3590 printk(KERN_ERR "%s: iommu width (%d) is not "
3612 "sufficient for the mapped address (%llx)\n", 3591 "sufficient for the mapped address (%llx)\n",
3613 __func__, iommu->agaw, dmar_domain->max_addr); 3592 __func__, addr_width, dmar_domain->max_addr);
3614 return -EFAULT; 3593 return -EFAULT;
3615 } 3594 }
3595 dmar_domain->gaw = addr_width;
3596
3597 /*
3598 * Knock out extra levels of page tables if necessary
3599 */
3600 while (iommu->agaw < dmar_domain->agaw) {
3601 struct dma_pte *pte;
3602
3603 pte = dmar_domain->pgd;
3604 if (dma_pte_present(pte)) {
3605 free_pgtable_page(dmar_domain->pgd);
3606 dmar_domain->pgd = (struct dma_pte *)dma_pte_addr(pte);
3607 }
3608 dmar_domain->agaw--;
3609 }
3616 3610
3617 return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL); 3611 return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
3618} 3612}
@@ -3632,7 +3626,6 @@ static int intel_iommu_map(struct iommu_domain *domain,
3632{ 3626{
3633 struct dmar_domain *dmar_domain = domain->priv; 3627 struct dmar_domain *dmar_domain = domain->priv;
3634 u64 max_addr; 3628 u64 max_addr;
3635 int addr_width;
3636 int prot = 0; 3629 int prot = 0;
3637 size_t size; 3630 size_t size;
3638 int ret; 3631 int ret;
@@ -3647,18 +3640,14 @@ static int intel_iommu_map(struct iommu_domain *domain,
3647 size = PAGE_SIZE << gfp_order; 3640 size = PAGE_SIZE << gfp_order;
3648 max_addr = iova + size; 3641 max_addr = iova + size;
3649 if (dmar_domain->max_addr < max_addr) { 3642 if (dmar_domain->max_addr < max_addr) {
3650 int min_agaw;
3651 u64 end; 3643 u64 end;
3652 3644
3653 /* check if minimum agaw is sufficient for mapped address */ 3645 /* check if minimum agaw is sufficient for mapped address */
3654 min_agaw = vm_domain_min_agaw(dmar_domain); 3646 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
3655 addr_width = agaw_to_width(min_agaw);
3656 end = DOMAIN_MAX_ADDR(addr_width);
3657 end = end & VTD_PAGE_MASK;
3658 if (end < max_addr) { 3647 if (end < max_addr) {
3659 printk(KERN_ERR "%s: iommu agaw (%d) is not " 3648 printk(KERN_ERR "%s: iommu width (%d) is not "
3660 "sufficient for the mapped address (%llx)\n", 3649 "sufficient for the mapped address (%llx)\n",
3661 __func__, min_agaw, max_addr); 3650 __func__, dmar_domain->gaw, max_addr);
3662 return -EFAULT; 3651 return -EFAULT;
3663 } 3652 }
3664 dmar_domain->max_addr = max_addr; 3653 dmar_domain->max_addr = max_addr;