diff options
author | Joerg Roedel <joerg.roedel@amd.com> | 2008-06-26 15:28:01 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-06-27 04:12:18 -0400 |
commit | 4da70b9e4f8576ec906dba9240c5b6bc6584f91d (patch) | |
tree | 2dec55824ec43c8b1f8a9a5971295ceff7422b55 | |
parent | cb76c3229725c6dcae31da65e9ca57f434628c05 (diff) |
x86, AMD IOMMU: add dma_ops mapping functions for single mappings
This patch adds the dma_ops specific mapping functions for single mappings.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Cc: iommu@lists.linux-foundation.org
Cc: bhavna.sarathy@amd.com
Cc: Sebastian.Biemueller@amd.com
Cc: robert.richter@amd.com
Cc: joro@8bytes.org
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | arch/x86/kernel/amd_iommu.c | 59 |
1 files changed, 59 insertions, 0 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index e00a3e7ba356..b4079f6bbd74 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -40,6 +40,11 @@ struct command { | |||
40 | static int dma_ops_unity_map(struct dma_ops_domain *dma_dom, | 40 | static int dma_ops_unity_map(struct dma_ops_domain *dma_dom, |
41 | struct unity_map_entry *e); | 41 | struct unity_map_entry *e); |
42 | 42 | ||
43 | static int iommu_has_npcache(struct amd_iommu *iommu) | ||
44 | { | ||
45 | return iommu->cap & IOMMU_CAP_NPCACHE; | ||
46 | } | ||
47 | |||
43 | static int __iommu_queue_command(struct amd_iommu *iommu, struct command *cmd) | 48 | static int __iommu_queue_command(struct amd_iommu *iommu, struct command *cmd) |
44 | { | 49 | { |
45 | u32 tail, head; | 50 | u32 tail, head; |
@@ -641,3 +646,57 @@ static void __unmap_single(struct amd_iommu *iommu, | |||
641 | dma_ops_free_addresses(dma_dom, dma_addr, pages); | 646 | dma_ops_free_addresses(dma_dom, dma_addr, pages); |
642 | } | 647 | } |
643 | 648 | ||
649 | static dma_addr_t map_single(struct device *dev, phys_addr_t paddr, | ||
650 | size_t size, int dir) | ||
651 | { | ||
652 | unsigned long flags; | ||
653 | struct amd_iommu *iommu; | ||
654 | struct protection_domain *domain; | ||
655 | u16 devid; | ||
656 | dma_addr_t addr; | ||
657 | |||
658 | get_device_resources(dev, &iommu, &domain, &devid); | ||
659 | |||
660 | if (iommu == NULL || domain == NULL) | ||
661 | return (dma_addr_t)paddr; | ||
662 | |||
663 | spin_lock_irqsave(&domain->lock, flags); | ||
664 | addr = __map_single(dev, iommu, domain->priv, paddr, size, dir); | ||
665 | if (addr == bad_dma_address) | ||
666 | goto out; | ||
667 | |||
668 | if (iommu_has_npcache(iommu)) | ||
669 | iommu_flush_pages(iommu, domain->id, addr, size); | ||
670 | |||
671 | if (iommu->need_sync) | ||
672 | iommu_completion_wait(iommu); | ||
673 | |||
674 | out: | ||
675 | spin_unlock_irqrestore(&domain->lock, flags); | ||
676 | |||
677 | return addr; | ||
678 | } | ||
679 | |||
680 | static void unmap_single(struct device *dev, dma_addr_t dma_addr, | ||
681 | size_t size, int dir) | ||
682 | { | ||
683 | unsigned long flags; | ||
684 | struct amd_iommu *iommu; | ||
685 | struct protection_domain *domain; | ||
686 | u16 devid; | ||
687 | |||
688 | if (!get_device_resources(dev, &iommu, &domain, &devid)) | ||
689 | return; | ||
690 | |||
691 | spin_lock_irqsave(&domain->lock, flags); | ||
692 | |||
693 | __unmap_single(iommu, domain->priv, dma_addr, size, dir); | ||
694 | |||
695 | iommu_flush_pages(iommu, domain->id, dma_addr, size); | ||
696 | |||
697 | if (iommu->need_sync) | ||
698 | iommu_completion_wait(iommu); | ||
699 | |||
700 | spin_unlock_irqrestore(&domain->lock, flags); | ||
701 | } | ||
702 | |||