diff options
Diffstat (limited to 'drivers/pci')
-rw-r--r-- | drivers/pci/intel-iommu.c | 97 |
1 files changed, 67 insertions, 30 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 86b9f58a645e..9dca689215eb 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c | |||
@@ -208,7 +208,7 @@ static inline bool dma_pte_present(struct dma_pte *pte) | |||
208 | 208 | ||
209 | struct dmar_domain { | 209 | struct dmar_domain { |
210 | int id; /* domain id */ | 210 | int id; /* domain id */ |
211 | struct intel_iommu *iommu; /* back pointer to owning iommu */ | 211 | unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/ |
212 | 212 | ||
213 | struct list_head devices; /* all devices' list */ | 213 | struct list_head devices; /* all devices' list */ |
214 | struct iova_domain iovad; /* iova's that belong to this domain */ | 214 | struct iova_domain iovad; /* iova's that belong to this domain */ |
@@ -362,6 +362,18 @@ void free_iova_mem(struct iova *iova) | |||
362 | kmem_cache_free(iommu_iova_cache, iova); | 362 | kmem_cache_free(iommu_iova_cache, iova); |
363 | } | 363 | } |
364 | 364 | ||
365 | /* in native case, each domain is related to only one iommu */ | ||
366 | static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain) | ||
367 | { | ||
368 | int iommu_id; | ||
369 | |||
370 | iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus); | ||
371 | if (iommu_id < 0 || iommu_id >= g_num_of_iommus) | ||
372 | return NULL; | ||
373 | |||
374 | return g_iommus[iommu_id]; | ||
375 | } | ||
376 | |||
365 | /* Gets context entry for a given bus and devfn */ | 377 | /* Gets context entry for a given bus and devfn */ |
366 | static struct context_entry * device_to_context_entry(struct intel_iommu *iommu, | 378 | static struct context_entry * device_to_context_entry(struct intel_iommu *iommu, |
367 | u8 bus, u8 devfn) | 379 | u8 bus, u8 devfn) |
@@ -502,6 +514,7 @@ static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr) | |||
502 | int level = agaw_to_level(domain->agaw); | 514 | int level = agaw_to_level(domain->agaw); |
503 | int offset; | 515 | int offset; |
504 | unsigned long flags; | 516 | unsigned long flags; |
517 | struct intel_iommu *iommu = domain_get_iommu(domain); | ||
505 | 518 | ||
506 | BUG_ON(!domain->pgd); | 519 | BUG_ON(!domain->pgd); |
507 | 520 | ||
@@ -525,7 +538,7 @@ static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr) | |||
525 | flags); | 538 | flags); |
526 | return NULL; | 539 | return NULL; |
527 | } | 540 | } |
528 | __iommu_flush_cache(domain->iommu, tmp_page, | 541 | __iommu_flush_cache(iommu, tmp_page, |
529 | PAGE_SIZE); | 542 | PAGE_SIZE); |
530 | dma_set_pte_addr(pte, virt_to_phys(tmp_page)); | 543 | dma_set_pte_addr(pte, virt_to_phys(tmp_page)); |
531 | /* | 544 | /* |
@@ -534,7 +547,7 @@ static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr) | |||
534 | */ | 547 | */ |
535 | dma_set_pte_readable(pte); | 548 | dma_set_pte_readable(pte); |
536 | dma_set_pte_writable(pte); | 549 | dma_set_pte_writable(pte); |
537 | __iommu_flush_cache(domain->iommu, pte, sizeof(*pte)); | 550 | __iommu_flush_cache(iommu, pte, sizeof(*pte)); |
538 | } | 551 | } |
539 | parent = phys_to_virt(dma_pte_addr(pte)); | 552 | parent = phys_to_virt(dma_pte_addr(pte)); |
540 | level--; | 553 | level--; |
@@ -571,13 +584,14 @@ static struct dma_pte *dma_addr_level_pte(struct dmar_domain *domain, u64 addr, | |||
571 | static void dma_pte_clear_one(struct dmar_domain *domain, u64 addr) | 584 | static void dma_pte_clear_one(struct dmar_domain *domain, u64 addr) |
572 | { | 585 | { |
573 | struct dma_pte *pte = NULL; | 586 | struct dma_pte *pte = NULL; |
587 | struct intel_iommu *iommu = domain_get_iommu(domain); | ||
574 | 588 | ||
575 | /* get last level pte */ | 589 | /* get last level pte */ |
576 | pte = dma_addr_level_pte(domain, addr, 1); | 590 | pte = dma_addr_level_pte(domain, addr, 1); |
577 | 591 | ||
578 | if (pte) { | 592 | if (pte) { |
579 | dma_clear_pte(pte); | 593 | dma_clear_pte(pte); |
580 | __iommu_flush_cache(domain->iommu, pte, sizeof(*pte)); | 594 | __iommu_flush_cache(iommu, pte, sizeof(*pte)); |
581 | } | 595 | } |
582 | } | 596 | } |
583 | 597 | ||
@@ -608,6 +622,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain, | |||
608 | int total = agaw_to_level(domain->agaw); | 622 | int total = agaw_to_level(domain->agaw); |
609 | int level; | 623 | int level; |
610 | u64 tmp; | 624 | u64 tmp; |
625 | struct intel_iommu *iommu = domain_get_iommu(domain); | ||
611 | 626 | ||
612 | start &= (((u64)1) << addr_width) - 1; | 627 | start &= (((u64)1) << addr_width) - 1; |
613 | end &= (((u64)1) << addr_width) - 1; | 628 | end &= (((u64)1) << addr_width) - 1; |
@@ -625,7 +640,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain, | |||
625 | free_pgtable_page( | 640 | free_pgtable_page( |
626 | phys_to_virt(dma_pte_addr(pte))); | 641 | phys_to_virt(dma_pte_addr(pte))); |
627 | dma_clear_pte(pte); | 642 | dma_clear_pte(pte); |
628 | __iommu_flush_cache(domain->iommu, | 643 | __iommu_flush_cache(iommu, |
629 | pte, sizeof(*pte)); | 644 | pte, sizeof(*pte)); |
630 | } | 645 | } |
631 | tmp += level_size(level); | 646 | tmp += level_size(level); |
@@ -1195,7 +1210,8 @@ static struct dmar_domain * iommu_alloc_domain(struct intel_iommu *iommu) | |||
1195 | 1210 | ||
1196 | set_bit(num, iommu->domain_ids); | 1211 | set_bit(num, iommu->domain_ids); |
1197 | domain->id = num; | 1212 | domain->id = num; |
1198 | domain->iommu = iommu; | 1213 | memset(&domain->iommu_bmp, 0, sizeof(unsigned long)); |
1214 | set_bit(iommu->seq_id, &domain->iommu_bmp); | ||
1199 | domain->flags = 0; | 1215 | domain->flags = 0; |
1200 | iommu->domains[num] = domain; | 1216 | iommu->domains[num] = domain; |
1201 | spin_unlock_irqrestore(&iommu->lock, flags); | 1217 | spin_unlock_irqrestore(&iommu->lock, flags); |
@@ -1206,10 +1222,13 @@ static struct dmar_domain * iommu_alloc_domain(struct intel_iommu *iommu) | |||
1206 | static void iommu_free_domain(struct dmar_domain *domain) | 1222 | static void iommu_free_domain(struct dmar_domain *domain) |
1207 | { | 1223 | { |
1208 | unsigned long flags; | 1224 | unsigned long flags; |
1225 | struct intel_iommu *iommu; | ||
1226 | |||
1227 | iommu = domain_get_iommu(domain); | ||
1209 | 1228 | ||
1210 | spin_lock_irqsave(&domain->iommu->lock, flags); | 1229 | spin_lock_irqsave(&iommu->lock, flags); |
1211 | clear_bit(domain->id, domain->iommu->domain_ids); | 1230 | clear_bit(domain->id, iommu->domain_ids); |
1212 | spin_unlock_irqrestore(&domain->iommu->lock, flags); | 1231 | spin_unlock_irqrestore(&iommu->lock, flags); |
1213 | } | 1232 | } |
1214 | 1233 | ||
1215 | static struct iova_domain reserved_iova_list; | 1234 | static struct iova_domain reserved_iova_list; |
@@ -1288,7 +1307,7 @@ static int domain_init(struct dmar_domain *domain, int guest_width) | |||
1288 | domain_reserve_special_ranges(domain); | 1307 | domain_reserve_special_ranges(domain); |
1289 | 1308 | ||
1290 | /* calculate AGAW */ | 1309 | /* calculate AGAW */ |
1291 | iommu = domain->iommu; | 1310 | iommu = domain_get_iommu(domain); |
1292 | if (guest_width > cap_mgaw(iommu->cap)) | 1311 | if (guest_width > cap_mgaw(iommu->cap)) |
1293 | guest_width = cap_mgaw(iommu->cap); | 1312 | guest_width = cap_mgaw(iommu->cap); |
1294 | domain->gaw = guest_width; | 1313 | domain->gaw = guest_width; |
@@ -1341,7 +1360,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain, | |||
1341 | u8 bus, u8 devfn) | 1360 | u8 bus, u8 devfn) |
1342 | { | 1361 | { |
1343 | struct context_entry *context; | 1362 | struct context_entry *context; |
1344 | struct intel_iommu *iommu = domain->iommu; | 1363 | struct intel_iommu *iommu = domain_get_iommu(domain); |
1345 | unsigned long flags; | 1364 | unsigned long flags; |
1346 | 1365 | ||
1347 | pr_debug("Set context mapping for %02x:%02x.%d\n", | 1366 | pr_debug("Set context mapping for %02x:%02x.%d\n", |
@@ -1413,8 +1432,9 @@ static int domain_context_mapped(struct dmar_domain *domain, | |||
1413 | { | 1432 | { |
1414 | int ret; | 1433 | int ret; |
1415 | struct pci_dev *tmp, *parent; | 1434 | struct pci_dev *tmp, *parent; |
1435 | struct intel_iommu *iommu = domain_get_iommu(domain); | ||
1416 | 1436 | ||
1417 | ret = device_context_mapped(domain->iommu, | 1437 | ret = device_context_mapped(iommu, |
1418 | pdev->bus->number, pdev->devfn); | 1438 | pdev->bus->number, pdev->devfn); |
1419 | if (!ret) | 1439 | if (!ret) |
1420 | return ret; | 1440 | return ret; |
@@ -1425,17 +1445,17 @@ static int domain_context_mapped(struct dmar_domain *domain, | |||
1425 | /* Secondary interface's bus number and devfn 0 */ | 1445 | /* Secondary interface's bus number and devfn 0 */ |
1426 | parent = pdev->bus->self; | 1446 | parent = pdev->bus->self; |
1427 | while (parent != tmp) { | 1447 | while (parent != tmp) { |
1428 | ret = device_context_mapped(domain->iommu, parent->bus->number, | 1448 | ret = device_context_mapped(iommu, parent->bus->number, |
1429 | parent->devfn); | 1449 | parent->devfn); |
1430 | if (!ret) | 1450 | if (!ret) |
1431 | return ret; | 1451 | return ret; |
1432 | parent = parent->bus->self; | 1452 | parent = parent->bus->self; |
1433 | } | 1453 | } |
1434 | if (tmp->is_pcie) | 1454 | if (tmp->is_pcie) |
1435 | return device_context_mapped(domain->iommu, | 1455 | return device_context_mapped(iommu, |
1436 | tmp->subordinate->number, 0); | 1456 | tmp->subordinate->number, 0); |
1437 | else | 1457 | else |
1438 | return device_context_mapped(domain->iommu, | 1458 | return device_context_mapped(iommu, |
1439 | tmp->bus->number, tmp->devfn); | 1459 | tmp->bus->number, tmp->devfn); |
1440 | } | 1460 | } |
1441 | 1461 | ||
@@ -1447,6 +1467,7 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova, | |||
1447 | struct dma_pte *pte; | 1467 | struct dma_pte *pte; |
1448 | int index; | 1468 | int index; |
1449 | int addr_width = agaw_to_width(domain->agaw); | 1469 | int addr_width = agaw_to_width(domain->agaw); |
1470 | struct intel_iommu *iommu = domain_get_iommu(domain); | ||
1450 | 1471 | ||
1451 | hpa &= (((u64)1) << addr_width) - 1; | 1472 | hpa &= (((u64)1) << addr_width) - 1; |
1452 | 1473 | ||
@@ -1466,7 +1487,7 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova, | |||
1466 | BUG_ON(dma_pte_addr(pte)); | 1487 | BUG_ON(dma_pte_addr(pte)); |
1467 | dma_set_pte_addr(pte, start_pfn << VTD_PAGE_SHIFT); | 1488 | dma_set_pte_addr(pte, start_pfn << VTD_PAGE_SHIFT); |
1468 | dma_set_pte_prot(pte, prot); | 1489 | dma_set_pte_prot(pte, prot); |
1469 | __iommu_flush_cache(domain->iommu, pte, sizeof(*pte)); | 1490 | __iommu_flush_cache(iommu, pte, sizeof(*pte)); |
1470 | start_pfn++; | 1491 | start_pfn++; |
1471 | index++; | 1492 | index++; |
1472 | } | 1493 | } |
@@ -1475,10 +1496,12 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova, | |||
1475 | 1496 | ||
1476 | static void detach_domain_for_dev(struct dmar_domain *domain, u8 bus, u8 devfn) | 1497 | static void detach_domain_for_dev(struct dmar_domain *domain, u8 bus, u8 devfn) |
1477 | { | 1498 | { |
1478 | clear_context_table(domain->iommu, bus, devfn); | 1499 | struct intel_iommu *iommu = domain_get_iommu(domain); |
1479 | domain->iommu->flush.flush_context(domain->iommu, 0, 0, 0, | 1500 | |
1501 | clear_context_table(iommu, bus, devfn); | ||
1502 | iommu->flush.flush_context(iommu, 0, 0, 0, | ||
1480 | DMA_CCMD_GLOBAL_INVL, 0); | 1503 | DMA_CCMD_GLOBAL_INVL, 0); |
1481 | domain->iommu->flush.flush_iotlb(domain->iommu, 0, 0, 0, | 1504 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, |
1482 | DMA_TLB_GLOBAL_FLUSH, 0); | 1505 | DMA_TLB_GLOBAL_FLUSH, 0); |
1483 | } | 1506 | } |
1484 | 1507 | ||
@@ -2033,6 +2056,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, | |||
2033 | struct iova *iova; | 2056 | struct iova *iova; |
2034 | int prot = 0; | 2057 | int prot = 0; |
2035 | int ret; | 2058 | int ret; |
2059 | struct intel_iommu *iommu; | ||
2036 | 2060 | ||
2037 | BUG_ON(dir == DMA_NONE); | 2061 | BUG_ON(dir == DMA_NONE); |
2038 | if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) | 2062 | if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) |
@@ -2042,6 +2066,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, | |||
2042 | if (!domain) | 2066 | if (!domain) |
2043 | return 0; | 2067 | return 0; |
2044 | 2068 | ||
2069 | iommu = domain_get_iommu(domain); | ||
2045 | size = aligned_size((u64)paddr, size); | 2070 | size = aligned_size((u64)paddr, size); |
2046 | 2071 | ||
2047 | iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask); | 2072 | iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask); |
@@ -2055,7 +2080,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, | |||
2055 | * mappings.. | 2080 | * mappings.. |
2056 | */ | 2081 | */ |
2057 | if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \ | 2082 | if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \ |
2058 | !cap_zlr(domain->iommu->cap)) | 2083 | !cap_zlr(iommu->cap)) |
2059 | prot |= DMA_PTE_READ; | 2084 | prot |= DMA_PTE_READ; |
2060 | if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) | 2085 | if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) |
2061 | prot |= DMA_PTE_WRITE; | 2086 | prot |= DMA_PTE_WRITE; |
@@ -2071,10 +2096,10 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, | |||
2071 | goto error; | 2096 | goto error; |
2072 | 2097 | ||
2073 | /* it's a non-present to present mapping */ | 2098 | /* it's a non-present to present mapping */ |
2074 | ret = iommu_flush_iotlb_psi(domain->iommu, domain->id, | 2099 | ret = iommu_flush_iotlb_psi(iommu, domain->id, |
2075 | start_paddr, size >> VTD_PAGE_SHIFT, 1); | 2100 | start_paddr, size >> VTD_PAGE_SHIFT, 1); |
2076 | if (ret) | 2101 | if (ret) |
2077 | iommu_flush_write_buffer(domain->iommu); | 2102 | iommu_flush_write_buffer(iommu); |
2078 | 2103 | ||
2079 | return start_paddr + ((u64)paddr & (~PAGE_MASK)); | 2104 | return start_paddr + ((u64)paddr & (~PAGE_MASK)); |
2080 | 2105 | ||
@@ -2132,12 +2157,14 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova) | |||
2132 | { | 2157 | { |
2133 | unsigned long flags; | 2158 | unsigned long flags; |
2134 | int next, iommu_id; | 2159 | int next, iommu_id; |
2160 | struct intel_iommu *iommu; | ||
2135 | 2161 | ||
2136 | spin_lock_irqsave(&async_umap_flush_lock, flags); | 2162 | spin_lock_irqsave(&async_umap_flush_lock, flags); |
2137 | if (list_size == HIGH_WATER_MARK) | 2163 | if (list_size == HIGH_WATER_MARK) |
2138 | flush_unmaps(); | 2164 | flush_unmaps(); |
2139 | 2165 | ||
2140 | iommu_id = dom->iommu->seq_id; | 2166 | iommu = domain_get_iommu(dom); |
2167 | iommu_id = iommu->seq_id; | ||
2141 | 2168 | ||
2142 | next = deferred_flush[iommu_id].next; | 2169 | next = deferred_flush[iommu_id].next; |
2143 | deferred_flush[iommu_id].domain[next] = dom; | 2170 | deferred_flush[iommu_id].domain[next] = dom; |
@@ -2159,12 +2186,15 @@ void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size, | |||
2159 | struct dmar_domain *domain; | 2186 | struct dmar_domain *domain; |
2160 | unsigned long start_addr; | 2187 | unsigned long start_addr; |
2161 | struct iova *iova; | 2188 | struct iova *iova; |
2189 | struct intel_iommu *iommu; | ||
2162 | 2190 | ||
2163 | if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) | 2191 | if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) |
2164 | return; | 2192 | return; |
2165 | domain = find_domain(pdev); | 2193 | domain = find_domain(pdev); |
2166 | BUG_ON(!domain); | 2194 | BUG_ON(!domain); |
2167 | 2195 | ||
2196 | iommu = domain_get_iommu(domain); | ||
2197 | |||
2168 | iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr)); | 2198 | iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr)); |
2169 | if (!iova) | 2199 | if (!iova) |
2170 | return; | 2200 | return; |
@@ -2180,9 +2210,9 @@ void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size, | |||
2180 | /* free page tables */ | 2210 | /* free page tables */ |
2181 | dma_pte_free_pagetable(domain, start_addr, start_addr + size); | 2211 | dma_pte_free_pagetable(domain, start_addr, start_addr + size); |
2182 | if (intel_iommu_strict) { | 2212 | if (intel_iommu_strict) { |
2183 | if (iommu_flush_iotlb_psi(domain->iommu, | 2213 | if (iommu_flush_iotlb_psi(iommu, |
2184 | domain->id, start_addr, size >> VTD_PAGE_SHIFT, 0)) | 2214 | domain->id, start_addr, size >> VTD_PAGE_SHIFT, 0)) |
2185 | iommu_flush_write_buffer(domain->iommu); | 2215 | iommu_flush_write_buffer(iommu); |
2186 | /* free iova */ | 2216 | /* free iova */ |
2187 | __free_iova(&domain->iovad, iova); | 2217 | __free_iova(&domain->iovad, iova); |
2188 | } else { | 2218 | } else { |
@@ -2243,11 +2273,15 @@ void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, | |||
2243 | size_t size = 0; | 2273 | size_t size = 0; |
2244 | void *addr; | 2274 | void *addr; |
2245 | struct scatterlist *sg; | 2275 | struct scatterlist *sg; |
2276 | struct intel_iommu *iommu; | ||
2246 | 2277 | ||
2247 | if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) | 2278 | if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) |
2248 | return; | 2279 | return; |
2249 | 2280 | ||
2250 | domain = find_domain(pdev); | 2281 | domain = find_domain(pdev); |
2282 | BUG_ON(!domain); | ||
2283 | |||
2284 | iommu = domain_get_iommu(domain); | ||
2251 | 2285 | ||
2252 | iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address)); | 2286 | iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address)); |
2253 | if (!iova) | 2287 | if (!iova) |
@@ -2264,9 +2298,9 @@ void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, | |||
2264 | /* free page tables */ | 2298 | /* free page tables */ |
2265 | dma_pte_free_pagetable(domain, start_addr, start_addr + size); | 2299 | dma_pte_free_pagetable(domain, start_addr, start_addr + size); |
2266 | 2300 | ||
2267 | if (iommu_flush_iotlb_psi(domain->iommu, domain->id, start_addr, | 2301 | if (iommu_flush_iotlb_psi(iommu, domain->id, start_addr, |
2268 | size >> VTD_PAGE_SHIFT, 0)) | 2302 | size >> VTD_PAGE_SHIFT, 0)) |
2269 | iommu_flush_write_buffer(domain->iommu); | 2303 | iommu_flush_write_buffer(iommu); |
2270 | 2304 | ||
2271 | /* free iova */ | 2305 | /* free iova */ |
2272 | __free_iova(&domain->iovad, iova); | 2306 | __free_iova(&domain->iovad, iova); |
@@ -2300,6 +2334,7 @@ int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, | |||
2300 | int ret; | 2334 | int ret; |
2301 | struct scatterlist *sg; | 2335 | struct scatterlist *sg; |
2302 | unsigned long start_addr; | 2336 | unsigned long start_addr; |
2337 | struct intel_iommu *iommu; | ||
2303 | 2338 | ||
2304 | BUG_ON(dir == DMA_NONE); | 2339 | BUG_ON(dir == DMA_NONE); |
2305 | if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) | 2340 | if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) |
@@ -2309,6 +2344,8 @@ int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, | |||
2309 | if (!domain) | 2344 | if (!domain) |
2310 | return 0; | 2345 | return 0; |
2311 | 2346 | ||
2347 | iommu = domain_get_iommu(domain); | ||
2348 | |||
2312 | for_each_sg(sglist, sg, nelems, i) { | 2349 | for_each_sg(sglist, sg, nelems, i) { |
2313 | addr = SG_ENT_VIRT_ADDRESS(sg); | 2350 | addr = SG_ENT_VIRT_ADDRESS(sg); |
2314 | addr = (void *)virt_to_phys(addr); | 2351 | addr = (void *)virt_to_phys(addr); |
@@ -2326,7 +2363,7 @@ int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, | |||
2326 | * mappings.. | 2363 | * mappings.. |
2327 | */ | 2364 | */ |
2328 | if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \ | 2365 | if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \ |
2329 | !cap_zlr(domain->iommu->cap)) | 2366 | !cap_zlr(iommu->cap)) |
2330 | prot |= DMA_PTE_READ; | 2367 | prot |= DMA_PTE_READ; |
2331 | if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) | 2368 | if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) |
2332 | prot |= DMA_PTE_WRITE; | 2369 | prot |= DMA_PTE_WRITE; |
@@ -2358,9 +2395,9 @@ int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, | |||
2358 | } | 2395 | } |
2359 | 2396 | ||
2360 | /* it's a non-present to present mapping */ | 2397 | /* it's a non-present to present mapping */ |
2361 | if (iommu_flush_iotlb_psi(domain->iommu, domain->id, | 2398 | if (iommu_flush_iotlb_psi(iommu, domain->id, |
2362 | start_addr, offset >> VTD_PAGE_SHIFT, 1)) | 2399 | start_addr, offset >> VTD_PAGE_SHIFT, 1)) |
2363 | iommu_flush_write_buffer(domain->iommu); | 2400 | iommu_flush_write_buffer(iommu); |
2364 | return nelems; | 2401 | return nelems; |
2365 | } | 2402 | } |
2366 | 2403 | ||