aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci/intel-iommu.c
diff options
context:
space:
mode:
authorYu Zhao <yu.zhao@intel.com>2009-05-18 01:51:36 -0400
committerDavid Woodhouse <David.Woodhouse@intel.com>2009-05-18 09:46:00 -0400
commit9dd2fe89062c90a964d122b8be5615d6f2203bbe (patch)
tree73907c1df5822e9a0ab8460782e8503b24842690 /drivers/pci/intel-iommu.c
parent6ba6c3a4cacfd68bf970e3e04e2ff0d66fa0f695 (diff)
VT-d: cleanup iommu_flush_iotlb_psi and flush_unmaps
Make iommu_flush_iotlb_psi() and flush_unmaps() more readable. Signed-off-by: Yu Zhao <yu.zhao@intel.com> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers/pci/intel-iommu.c')
-rw-r--r--drivers/pci/intel-iommu.c38
1 files changed, 17 insertions, 21 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index bc99b1e47fbc..6d7cb84c63ea 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -948,28 +948,23 @@ static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
948static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, 948static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
949 u64 addr, unsigned int pages) 949 u64 addr, unsigned int pages)
950{ 950{
951 unsigned int mask; 951 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
952 952
953 BUG_ON(addr & (~VTD_PAGE_MASK)); 953 BUG_ON(addr & (~VTD_PAGE_MASK));
954 BUG_ON(pages == 0); 954 BUG_ON(pages == 0);
955 955
956 /* Fallback to domain selective flush if no PSI support */
957 if (!cap_pgsel_inv(iommu->cap))
958 return iommu->flush.flush_iotlb(iommu, did, 0, 0,
959 DMA_TLB_DSI_FLUSH);
960
961 /* 956 /*
957 * Fallback to domain selective flush if no PSI support or the size is
958 * too big.
962 * PSI requires page size to be 2 ^ x, and the base address is naturally 959 * PSI requires page size to be 2 ^ x, and the base address is naturally
963 * aligned to the size 960 * aligned to the size
964 */ 961 */
965 mask = ilog2(__roundup_pow_of_two(pages)); 962 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
966 /* Fallback to domain selective flush if size is too big */ 963 iommu->flush.flush_iotlb(iommu, did, 0, 0,
967 if (mask > cap_max_amask_val(iommu->cap))
968 return iommu->flush.flush_iotlb(iommu, did, 0, 0,
969 DMA_TLB_DSI_FLUSH); 964 DMA_TLB_DSI_FLUSH);
970 965 else
971 return iommu->flush.flush_iotlb(iommu, did, addr, mask, 966 iommu->flush.flush_iotlb(iommu, did, addr, mask,
972 DMA_TLB_PSI_FLUSH); 967 DMA_TLB_PSI_FLUSH);
973} 968}
974 969
975static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu) 970static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
@@ -2260,15 +2255,16 @@ static void flush_unmaps(void)
2260 if (!iommu) 2255 if (!iommu)
2261 continue; 2256 continue;
2262 2257
2263 if (deferred_flush[i].next) { 2258 if (!deferred_flush[i].next)
2264 iommu->flush.flush_iotlb(iommu, 0, 0, 0, 2259 continue;
2265 DMA_TLB_GLOBAL_FLUSH); 2260
2266 for (j = 0; j < deferred_flush[i].next; j++) { 2261 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
2267 __free_iova(&deferred_flush[i].domain[j]->iovad, 2262 DMA_TLB_GLOBAL_FLUSH, 0);
2268 deferred_flush[i].iova[j]); 2263 for (j = 0; j < deferred_flush[i].next; j++) {
2269 } 2264 __free_iova(&deferred_flush[i].domain[j]->iovad,
2270 deferred_flush[i].next = 0; 2265 deferred_flush[i].iova[j]);
2271 } 2266 }
2267 deferred_flush[i].next = 0;
2272 } 2268 }
2273 2269
2274 list_size = 0; 2270 list_size = 0;