diff options
Diffstat (limited to 'arch/x86/kernel/amd_iommu.c')
-rw-r--r-- | arch/x86/kernel/amd_iommu.c | 26 |
1 files changed, 22 insertions, 4 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 691e023695ad..679f2a8e22ee 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -203,6 +203,14 @@ static int iommu_flush_pages(struct amd_iommu *iommu, u16 domid, | |||
203 | return 0; | 203 | return 0; |
204 | } | 204 | } |
205 | 205 | ||
206 | /* Flush the whole IO/TLB for a given protection domain */ | ||
207 | static void iommu_flush_tlb(struct amd_iommu *iommu, u16 domid) | ||
208 | { | ||
209 | u64 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS; | ||
210 | |||
211 | iommu_queue_inv_iommu_pages(iommu, address, domid, 0, 1); | ||
212 | } | ||
213 | |||
206 | /**************************************************************************** | 214 | /**************************************************************************** |
207 | * | 215 | * |
208 | * The functions below are used the create the page table mappings for | 216 | * The functions below are used the create the page table mappings for |
@@ -386,14 +394,18 @@ static unsigned long dma_ops_alloc_addresses(struct device *dev, | |||
386 | PAGE_SIZE) >> PAGE_SHIFT; | 394 | PAGE_SIZE) >> PAGE_SHIFT; |
387 | limit = limit < size ? limit : size; | 395 | limit = limit < size ? limit : size; |
388 | 396 | ||
389 | if (dom->next_bit >= limit) | 397 | if (dom->next_bit >= limit) { |
390 | dom->next_bit = 0; | 398 | dom->next_bit = 0; |
399 | dom->need_flush = true; | ||
400 | } | ||
391 | 401 | ||
392 | address = iommu_area_alloc(dom->bitmap, limit, dom->next_bit, pages, | 402 | address = iommu_area_alloc(dom->bitmap, limit, dom->next_bit, pages, |
393 | 0 , boundary_size, 0); | 403 | 0 , boundary_size, 0); |
394 | if (address == -1) | 404 | if (address == -1) { |
395 | address = iommu_area_alloc(dom->bitmap, limit, 0, pages, | 405 | address = iommu_area_alloc(dom->bitmap, limit, 0, pages, |
396 | 0, boundary_size, 0); | 406 | 0, boundary_size, 0); |
407 | dom->need_flush = true; | ||
408 | } | ||
397 | 409 | ||
398 | if (likely(address != -1)) { | 410 | if (likely(address != -1)) { |
399 | dom->next_bit = address + pages; | 411 | dom->next_bit = address + pages; |
@@ -553,6 +565,8 @@ static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu, | |||
553 | dma_dom->bitmap[0] = 1; | 565 | dma_dom->bitmap[0] = 1; |
554 | dma_dom->next_bit = 0; | 566 | dma_dom->next_bit = 0; |
555 | 567 | ||
568 | dma_dom->need_flush = false; | ||
569 | |||
556 | /* Intialize the exclusion range if necessary */ | 570 | /* Intialize the exclusion range if necessary */ |
557 | if (iommu->exclusion_start && | 571 | if (iommu->exclusion_start && |
558 | iommu->exclusion_start < dma_dom->aperture_size) { | 572 | iommu->exclusion_start < dma_dom->aperture_size) { |
@@ -795,7 +809,10 @@ static dma_addr_t __map_single(struct device *dev, | |||
795 | } | 809 | } |
796 | address += offset; | 810 | address += offset; |
797 | 811 | ||
798 | if (unlikely(iommu_has_npcache(iommu))) | 812 | if (unlikely(dma_dom->need_flush && !iommu_fullflush)) { |
813 | iommu_flush_tlb(iommu, dma_dom->domain.id); | ||
814 | dma_dom->need_flush = false; | ||
815 | } else if (unlikely(iommu_has_npcache(iommu))) | ||
799 | iommu_flush_pages(iommu, dma_dom->domain.id, address, size); | 816 | iommu_flush_pages(iommu, dma_dom->domain.id, address, size); |
800 | 817 | ||
801 | out: | 818 | out: |
@@ -829,7 +846,8 @@ static void __unmap_single(struct amd_iommu *iommu, | |||
829 | 846 | ||
830 | dma_ops_free_addresses(dma_dom, dma_addr, pages); | 847 | dma_ops_free_addresses(dma_dom, dma_addr, pages); |
831 | 848 | ||
832 | iommu_flush_pages(iommu, dma_dom->domain.id, dma_addr, size); | 849 | if (iommu_fullflush) |
850 | iommu_flush_pages(iommu, dma_dom->domain.id, dma_addr, size); | ||
833 | } | 851 | } |
834 | 852 | ||
835 | /* | 853 | /* |