diff options
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/kernel/amd_iommu.c | 74 |
1 files changed, 74 insertions, 0 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index f4747fe70aaa..aab9125ac0b2 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -798,3 +798,77 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist, | |||
798 | spin_unlock_irqrestore(&domain->lock, flags); | 798 | spin_unlock_irqrestore(&domain->lock, flags); |
799 | } | 799 | } |
800 | 800 | ||
801 | static void *alloc_coherent(struct device *dev, size_t size, | ||
802 | dma_addr_t *dma_addr, gfp_t flag) | ||
803 | { | ||
804 | unsigned long flags; | ||
805 | void *virt_addr; | ||
806 | struct amd_iommu *iommu; | ||
807 | struct protection_domain *domain; | ||
808 | u16 devid; | ||
809 | phys_addr_t paddr; | ||
810 | |||
811 | virt_addr = (void *)__get_free_pages(flag, get_order(size)); | ||
812 | if (!virt_addr) | ||
813 | return 0; | ||
814 | |||
815 | memset(virt_addr, 0, size); | ||
816 | paddr = virt_to_phys(virt_addr); | ||
817 | |||
818 | get_device_resources(dev, &iommu, &domain, &devid); | ||
819 | |||
820 | if (!iommu || !domain) { | ||
821 | *dma_addr = (dma_addr_t)paddr; | ||
822 | return virt_addr; | ||
823 | } | ||
824 | |||
825 | spin_lock_irqsave(&domain->lock, flags); | ||
826 | |||
827 | *dma_addr = __map_single(dev, iommu, domain->priv, paddr, | ||
828 | size, DMA_BIDIRECTIONAL); | ||
829 | |||
830 | if (*dma_addr == bad_dma_address) { | ||
831 | free_pages((unsigned long)virt_addr, get_order(size)); | ||
832 | virt_addr = NULL; | ||
833 | goto out; | ||
834 | } | ||
835 | |||
836 | if (iommu_has_npcache(iommu)) | ||
837 | iommu_flush_pages(iommu, domain->id, *dma_addr, size); | ||
838 | |||
839 | if (iommu->need_sync) | ||
840 | iommu_completion_wait(iommu); | ||
841 | |||
842 | out: | ||
843 | spin_unlock_irqrestore(&domain->lock, flags); | ||
844 | |||
845 | return virt_addr; | ||
846 | } | ||
847 | |||
848 | static void free_coherent(struct device *dev, size_t size, | ||
849 | void *virt_addr, dma_addr_t dma_addr) | ||
850 | { | ||
851 | unsigned long flags; | ||
852 | struct amd_iommu *iommu; | ||
853 | struct protection_domain *domain; | ||
854 | u16 devid; | ||
855 | |||
856 | get_device_resources(dev, &iommu, &domain, &devid); | ||
857 | |||
858 | if (!iommu || !domain) | ||
859 | goto free_mem; | ||
860 | |||
861 | spin_lock_irqsave(&domain->lock, flags); | ||
862 | |||
863 | __unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL); | ||
864 | iommu_flush_pages(iommu, domain->id, dma_addr, size); | ||
865 | |||
866 | if (iommu->need_sync) | ||
867 | iommu_completion_wait(iommu); | ||
868 | |||
869 | spin_unlock_irqrestore(&domain->lock, flags); | ||
870 | |||
871 | free_mem: | ||
872 | free_pages((unsigned long)virt_addr, get_order(size)); | ||
873 | } | ||
874 | |||