aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/amd_iommu.c
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2008-06-26 15:28:03 -0400
committerIngo Molnar <mingo@elte.hu>2008-06-27 04:12:19 -0400
commit5d8b53cf3f8762b2230fb3d5b4e2ff78c5e701d8 (patch)
tree03f98a3264f90cdd1977785818c28f622b4814c3 /arch/x86/kernel/amd_iommu.c
parent65b050adbfd9481ec20514cfc06fa596a92cb3b5 (diff)
x86, AMD IOMMU: add mapping functions for coherent mappings
This patch adds the dma_ops functions for coherent mappings. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com> Cc: iommu@lists.linux-foundation.org Cc: bhavna.sarathy@amd.com Cc: Sebastian.Biemueller@amd.com Cc: robert.richter@amd.com Cc: joro@8bytes.org Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/amd_iommu.c')
-rw-r--r--arch/x86/kernel/amd_iommu.c74
1 files changed, 74 insertions, 0 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index f4747fe70aaa..aab9125ac0b2 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -798,3 +798,77 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist,
798 spin_unlock_irqrestore(&domain->lock, flags); 798 spin_unlock_irqrestore(&domain->lock, flags);
799} 799}
800 800
801static void *alloc_coherent(struct device *dev, size_t size,
802 dma_addr_t *dma_addr, gfp_t flag)
803{
804 unsigned long flags;
805 void *virt_addr;
806 struct amd_iommu *iommu;
807 struct protection_domain *domain;
808 u16 devid;
809 phys_addr_t paddr;
810
811 virt_addr = (void *)__get_free_pages(flag, get_order(size));
812 if (!virt_addr)
813 return 0;
814
815 memset(virt_addr, 0, size);
816 paddr = virt_to_phys(virt_addr);
817
818 get_device_resources(dev, &iommu, &domain, &devid);
819
820 if (!iommu || !domain) {
821 *dma_addr = (dma_addr_t)paddr;
822 return virt_addr;
823 }
824
825 spin_lock_irqsave(&domain->lock, flags);
826
827 *dma_addr = __map_single(dev, iommu, domain->priv, paddr,
828 size, DMA_BIDIRECTIONAL);
829
830 if (*dma_addr == bad_dma_address) {
831 free_pages((unsigned long)virt_addr, get_order(size));
832 virt_addr = NULL;
833 goto out;
834 }
835
836 if (iommu_has_npcache(iommu))
837 iommu_flush_pages(iommu, domain->id, *dma_addr, size);
838
839 if (iommu->need_sync)
840 iommu_completion_wait(iommu);
841
842out:
843 spin_unlock_irqrestore(&domain->lock, flags);
844
845 return virt_addr;
846}
847
848static void free_coherent(struct device *dev, size_t size,
849 void *virt_addr, dma_addr_t dma_addr)
850{
851 unsigned long flags;
852 struct amd_iommu *iommu;
853 struct protection_domain *domain;
854 u16 devid;
855
856 get_device_resources(dev, &iommu, &domain, &devid);
857
858 if (!iommu || !domain)
859 goto free_mem;
860
861 spin_lock_irqsave(&domain->lock, flags);
862
863 __unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);
864 iommu_flush_pages(iommu, domain->id, dma_addr, size);
865
866 if (iommu->need_sync)
867 iommu_completion_wait(iommu);
868
869 spin_unlock_irqrestore(&domain->lock, flags);
870
871free_mem:
872 free_pages((unsigned long)virt_addr, get_order(size));
873}
874