aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/amd_iommu.c
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2011-04-06 12:01:35 -0400
committerJoerg Roedel <joerg.roedel@amd.com>2011-04-07 04:46:06 -0400
commit17b124bf1463582005d662d4dd95f037ad863c57 (patch)
treed28bb759019eeb9363c6041432316764365a545f /arch/x86/kernel/amd_iommu.c
parent61985a040f17c03b09a2772508ee02729571365b (diff)
x86/amd-iommu: Rename iommu_flush* to domain_flush*
These functions all operate on protection domains and not on singe IOMMUs. Represent that in their name. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'arch/x86/kernel/amd_iommu.c')
-rw-r--r--arch/x86/kernel/amd_iommu.c87
1 files changed, 44 insertions, 43 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index 0147c5c87aa8..9d66b2092ae1 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -488,22 +488,6 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
488 return 0; 488 return 0;
489} 489}
490 490
491static void iommu_flush_complete(struct protection_domain *domain)
492{
493 int i;
494
495 for (i = 0; i < amd_iommus_present; ++i) {
496 if (!domain->dev_iommu[i])
497 continue;
498
499 /*
500 * Devices of this domain are behind this IOMMU
501 * We need to wait for completion of all commands.
502 */
503 iommu_completion_wait(amd_iommus[i]);
504 }
505}
506
507/* 491/*
508 * Command send function for invalidating a device table entry 492 * Command send function for invalidating a device table entry
509 */ 493 */
@@ -526,8 +510,8 @@ static int iommu_flush_device(struct device *dev)
526 * It invalidates a single PTE if the range to flush is within a single 510 * It invalidates a single PTE if the range to flush is within a single
527 * page. Otherwise it flushes the whole TLB of the IOMMU. 511 * page. Otherwise it flushes the whole TLB of the IOMMU.
528 */ 512 */
529static void __iommu_flush_pages(struct protection_domain *domain, 513static void __domain_flush_pages(struct protection_domain *domain,
530 u64 address, size_t size, int pde) 514 u64 address, size_t size, int pde)
531{ 515{
532 struct iommu_cmd cmd; 516 struct iommu_cmd cmd;
533 int ret = 0, i; 517 int ret = 0, i;
@@ -548,29 +532,45 @@ static void __iommu_flush_pages(struct protection_domain *domain,
548 WARN_ON(ret); 532 WARN_ON(ret);
549} 533}
550 534
551static void iommu_flush_pages(struct protection_domain *domain, 535static void domain_flush_pages(struct protection_domain *domain,
552 u64 address, size_t size) 536 u64 address, size_t size)
553{ 537{
554 __iommu_flush_pages(domain, address, size, 0); 538 __domain_flush_pages(domain, address, size, 0);
555} 539}
556 540
557/* Flush the whole IO/TLB for a given protection domain */ 541/* Flush the whole IO/TLB for a given protection domain */
558static void iommu_flush_tlb(struct protection_domain *domain) 542static void domain_flush_tlb(struct protection_domain *domain)
559{ 543{
560 __iommu_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 0); 544 __domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 0);
561} 545}
562 546
563/* Flush the whole IO/TLB for a given protection domain - including PDE */ 547/* Flush the whole IO/TLB for a given protection domain - including PDE */
564static void iommu_flush_tlb_pde(struct protection_domain *domain) 548static void domain_flush_tlb_pde(struct protection_domain *domain)
565{ 549{
566 __iommu_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1); 550 __domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1);
551}
552
553static void domain_flush_complete(struct protection_domain *domain)
554{
555 int i;
556
557 for (i = 0; i < amd_iommus_present; ++i) {
558 if (!domain->dev_iommu[i])
559 continue;
560
561 /*
562 * Devices of this domain are behind this IOMMU
563 * We need to wait for completion of all commands.
564 */
565 iommu_completion_wait(amd_iommus[i]);
566 }
567} 567}
568 568
569 569
570/* 570/*
571 * This function flushes the DTEs for all devices in domain 571 * This function flushes the DTEs for all devices in domain
572 */ 572 */
573static void iommu_flush_domain_devices(struct protection_domain *domain) 573static void domain_flush_devices(struct protection_domain *domain)
574{ 574{
575 struct iommu_dev_data *dev_data; 575 struct iommu_dev_data *dev_data;
576 unsigned long flags; 576 unsigned long flags;
@@ -591,8 +591,8 @@ static void iommu_flush_all_domain_devices(void)
591 spin_lock_irqsave(&amd_iommu_pd_lock, flags); 591 spin_lock_irqsave(&amd_iommu_pd_lock, flags);
592 592
593 list_for_each_entry(domain, &amd_iommu_pd_list, list) { 593 list_for_each_entry(domain, &amd_iommu_pd_list, list) {
594 iommu_flush_domain_devices(domain); 594 domain_flush_devices(domain);
595 iommu_flush_complete(domain); 595 domain_flush_complete(domain);
596 } 596 }
597 597
598 spin_unlock_irqrestore(&amd_iommu_pd_lock, flags); 598 spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
@@ -616,8 +616,8 @@ void amd_iommu_flush_all_domains(void)
616 616
617 list_for_each_entry(domain, &amd_iommu_pd_list, list) { 617 list_for_each_entry(domain, &amd_iommu_pd_list, list) {
618 spin_lock(&domain->lock); 618 spin_lock(&domain->lock);
619 iommu_flush_tlb_pde(domain); 619 domain_flush_tlb_pde(domain);
620 iommu_flush_complete(domain); 620 domain_flush_complete(domain);
621 spin_unlock(&domain->lock); 621 spin_unlock(&domain->lock);
622 } 622 }
623 623
@@ -1480,7 +1480,7 @@ static int attach_device(struct device *dev,
1480 * left the caches in the IOMMU dirty. So we have to flush 1480 * left the caches in the IOMMU dirty. So we have to flush
1481 * here to evict all dirty stuff. 1481 * here to evict all dirty stuff.
1482 */ 1482 */
1483 iommu_flush_tlb_pde(domain); 1483 domain_flush_tlb_pde(domain);
1484 1484
1485 return ret; 1485 return ret;
1486} 1486}
@@ -1693,8 +1693,9 @@ static void update_domain(struct protection_domain *domain)
1693 return; 1693 return;
1694 1694
1695 update_device_table(domain); 1695 update_device_table(domain);
1696 iommu_flush_domain_devices(domain); 1696
1697 iommu_flush_tlb_pde(domain); 1697 domain_flush_devices(domain);
1698 domain_flush_tlb_pde(domain);
1698 1699
1699 domain->updated = false; 1700 domain->updated = false;
1700} 1701}
@@ -1853,10 +1854,10 @@ retry:
1853 ADD_STATS_COUNTER(alloced_io_mem, size); 1854 ADD_STATS_COUNTER(alloced_io_mem, size);
1854 1855
1855 if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) { 1856 if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) {
1856 iommu_flush_tlb(&dma_dom->domain); 1857 domain_flush_tlb(&dma_dom->domain);
1857 dma_dom->need_flush = false; 1858 dma_dom->need_flush = false;
1858 } else if (unlikely(amd_iommu_np_cache)) 1859 } else if (unlikely(amd_iommu_np_cache))
1859 iommu_flush_pages(&dma_dom->domain, address, size); 1860 domain_flush_pages(&dma_dom->domain, address, size);
1860 1861
1861out: 1862out:
1862 return address; 1863 return address;
@@ -1905,7 +1906,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
1905 dma_ops_free_addresses(dma_dom, dma_addr, pages); 1906 dma_ops_free_addresses(dma_dom, dma_addr, pages);
1906 1907
1907 if (amd_iommu_unmap_flush || dma_dom->need_flush) { 1908 if (amd_iommu_unmap_flush || dma_dom->need_flush) {
1908 iommu_flush_pages(&dma_dom->domain, flush_addr, size); 1909 domain_flush_pages(&dma_dom->domain, flush_addr, size);
1909 dma_dom->need_flush = false; 1910 dma_dom->need_flush = false;
1910 } 1911 }
1911} 1912}
@@ -1941,7 +1942,7 @@ static dma_addr_t map_page(struct device *dev, struct page *page,
1941 if (addr == DMA_ERROR_CODE) 1942 if (addr == DMA_ERROR_CODE)
1942 goto out; 1943 goto out;
1943 1944
1944 iommu_flush_complete(domain); 1945 domain_flush_complete(domain);
1945 1946
1946out: 1947out:
1947 spin_unlock_irqrestore(&domain->lock, flags); 1948 spin_unlock_irqrestore(&domain->lock, flags);
@@ -1968,7 +1969,7 @@ static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
1968 1969
1969 __unmap_single(domain->priv, dma_addr, size, dir); 1970 __unmap_single(domain->priv, dma_addr, size, dir);
1970 1971
1971 iommu_flush_complete(domain); 1972 domain_flush_complete(domain);
1972 1973
1973 spin_unlock_irqrestore(&domain->lock, flags); 1974 spin_unlock_irqrestore(&domain->lock, flags);
1974} 1975}
@@ -2033,7 +2034,7 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
2033 goto unmap; 2034 goto unmap;
2034 } 2035 }
2035 2036
2036 iommu_flush_complete(domain); 2037 domain_flush_complete(domain);
2037 2038
2038out: 2039out:
2039 spin_unlock_irqrestore(&domain->lock, flags); 2040 spin_unlock_irqrestore(&domain->lock, flags);
@@ -2079,7 +2080,7 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist,
2079 s->dma_address = s->dma_length = 0; 2080 s->dma_address = s->dma_length = 0;
2080 } 2081 }
2081 2082
2082 iommu_flush_complete(domain); 2083 domain_flush_complete(domain);
2083 2084
2084 spin_unlock_irqrestore(&domain->lock, flags); 2085 spin_unlock_irqrestore(&domain->lock, flags);
2085} 2086}
@@ -2129,7 +2130,7 @@ static void *alloc_coherent(struct device *dev, size_t size,
2129 goto out_free; 2130 goto out_free;
2130 } 2131 }
2131 2132
2132 iommu_flush_complete(domain); 2133 domain_flush_complete(domain);
2133 2134
2134 spin_unlock_irqrestore(&domain->lock, flags); 2135 spin_unlock_irqrestore(&domain->lock, flags);
2135 2136
@@ -2161,7 +2162,7 @@ static void free_coherent(struct device *dev, size_t size,
2161 2162
2162 __unmap_single(domain->priv, dma_addr, size, DMA_BIDIRECTIONAL); 2163 __unmap_single(domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);
2163 2164
2164 iommu_flush_complete(domain); 2165 domain_flush_complete(domain);
2165 2166
2166 spin_unlock_irqrestore(&domain->lock, flags); 2167 spin_unlock_irqrestore(&domain->lock, flags);
2167 2168
@@ -2471,7 +2472,7 @@ static int amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
2471 unmap_size = iommu_unmap_page(domain, iova, page_size); 2472 unmap_size = iommu_unmap_page(domain, iova, page_size);
2472 mutex_unlock(&domain->api_lock); 2473 mutex_unlock(&domain->api_lock);
2473 2474
2474 iommu_flush_tlb_pde(domain); 2475 domain_flush_tlb_pde(domain);
2475 2476
2476 return get_order(unmap_size); 2477 return get_order(unmap_size);
2477} 2478}