aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/amd_iommu.c
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2009-11-20 09:30:58 -0500
committerJoerg Roedel <joerg.roedel@amd.com>2009-11-27 08:16:26 -0500
commitdcd1e92e405449ecc5e8bd8fcfebf3b2a13d3d37 (patch)
tree8c40ea4593985366369de1e9255a8baf478d07b1 /arch/x86/kernel/amd_iommu.c
parent6de8ad9b9ee0ec5b52ec8ec41401833e5e89186f (diff)
x86/amd-iommu: Use __iommu_flush_pages for tlb flushes
This patch re-implements iommu_flush_tlb functions to use the __iommu_flush_pages logic. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'arch/x86/kernel/amd_iommu.c')
-rw-r--r--arch/x86/kernel/amd_iommu.c22
1 files changed, 7 insertions, 15 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index 7c06e574008f..c55aa079ded3 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -451,7 +451,7 @@ static void __iommu_flush_pages(struct protection_domain *domain,
451 u64 address, size_t size, int pde) 451 u64 address, size_t size, int pde)
452{ 452{
453 int s = 0, i; 453 int s = 0, i;
454 unsigned pages = iommu_num_pages(address, size, PAGE_SIZE); 454 unsigned long pages = iommu_num_pages(address, size, PAGE_SIZE);
455 455
456 address &= PAGE_MASK; 456 address &= PAGE_MASK;
457 457
@@ -487,23 +487,15 @@ static void iommu_flush_pages(struct protection_domain *domain,
487} 487}
488 488
489/* Flush the whole IO/TLB for a given protection domain */ 489/* Flush the whole IO/TLB for a given protection domain */
490static void iommu_flush_tlb(struct amd_iommu *iommu, u16 domid) 490static void iommu_flush_tlb(struct protection_domain *domain)
491{ 491{
492 u64 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS; 492 __iommu_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 0);
493
494 INC_STATS_COUNTER(domain_flush_single);
495
496 iommu_queue_inv_iommu_pages(iommu, address, domid, 0, 1);
497} 493}
498 494
499/* Flush the whole IO/TLB for a given protection domain - including PDE */ 495/* Flush the whole IO/TLB for a given protection domain - including PDE */
500static void iommu_flush_tlb_pde(struct amd_iommu *iommu, u16 domid) 496static void iommu_flush_tlb_pde(struct protection_domain *domain)
501{ 497{
502 u64 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS; 498 __iommu_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1);
503
504 INC_STATS_COUNTER(domain_flush_single);
505
506 iommu_queue_inv_iommu_pages(iommu, address, domid, 1, 1);
507} 499}
508 500
509/* 501/*
@@ -1236,7 +1228,7 @@ static void attach_device(struct amd_iommu *iommu,
1236 * here to evict all dirty stuff. 1228 * here to evict all dirty stuff.
1237 */ 1229 */
1238 iommu_queue_inv_dev_entry(iommu, devid); 1230 iommu_queue_inv_dev_entry(iommu, devid);
1239 iommu_flush_tlb_pde(iommu, domain->id); 1231 iommu_flush_tlb_pde(domain);
1240} 1232}
1241 1233
1242/* 1234/*
@@ -1697,7 +1689,7 @@ retry:
1697 ADD_STATS_COUNTER(alloced_io_mem, size); 1689 ADD_STATS_COUNTER(alloced_io_mem, size);
1698 1690
1699 if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) { 1691 if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) {
1700 iommu_flush_tlb(iommu, dma_dom->domain.id); 1692 iommu_flush_tlb(&dma_dom->domain);
1701 dma_dom->need_flush = false; 1693 dma_dom->need_flush = false;
1702 } else if (unlikely(iommu_has_npcache(iommu))) 1694 } else if (unlikely(iommu_has_npcache(iommu)))
1703 iommu_flush_pages(&dma_dom->domain, address, size); 1695 iommu_flush_pages(&dma_dom->domain, address, size);