diff options
author | Joerg Roedel <joerg.roedel@amd.com> | 2008-06-26 15:28:02 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-06-27 04:12:19 -0400 |
commit | 65b050adbfd9481ec20514cfc06fa596a92cb3b5 (patch) | |
tree | 5ab439cbde1e41b07bcb9baca78585ab705d0359 | |
parent | 4da70b9e4f8576ec906dba9240c5b6bc6584f91d (diff) |
x86, AMD IOMMU: add mapping functions for scatter gather lists
This patch adds the dma_ops functions for mapping and unmapping scatter gather
lists.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Cc: iommu@lists.linux-foundation.org
Cc: bhavna.sarathy@amd.com
Cc: Sebastian.Biemueller@amd.com
Cc: robert.richter@amd.com
Cc: joro@8bytes.org
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | arch/x86/kernel/amd_iommu.c | 98 |
1 files changed, 98 insertions, 0 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index b4079f6bbd74..f4747fe70aaa 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -700,3 +700,101 @@ static void unmap_single(struct device *dev, dma_addr_t dma_addr, | |||
700 | spin_unlock_irqrestore(&domain->lock, flags); | 700 | spin_unlock_irqrestore(&domain->lock, flags); |
701 | } | 701 | } |
702 | 702 | ||
703 | static int map_sg_no_iommu(struct device *dev, struct scatterlist *sglist, | ||
704 | int nelems, int dir) | ||
705 | { | ||
706 | struct scatterlist *s; | ||
707 | int i; | ||
708 | |||
709 | for_each_sg(sglist, s, nelems, i) { | ||
710 | s->dma_address = (dma_addr_t)sg_phys(s); | ||
711 | s->dma_length = s->length; | ||
712 | } | ||
713 | |||
714 | return nelems; | ||
715 | } | ||
716 | |||
717 | static int map_sg(struct device *dev, struct scatterlist *sglist, | ||
718 | int nelems, int dir) | ||
719 | { | ||
720 | unsigned long flags; | ||
721 | struct amd_iommu *iommu; | ||
722 | struct protection_domain *domain; | ||
723 | u16 devid; | ||
724 | int i; | ||
725 | struct scatterlist *s; | ||
726 | phys_addr_t paddr; | ||
727 | int mapped_elems = 0; | ||
728 | |||
729 | get_device_resources(dev, &iommu, &domain, &devid); | ||
730 | |||
731 | if (!iommu || !domain) | ||
732 | return map_sg_no_iommu(dev, sglist, nelems, dir); | ||
733 | |||
734 | spin_lock_irqsave(&domain->lock, flags); | ||
735 | |||
736 | for_each_sg(sglist, s, nelems, i) { | ||
737 | paddr = sg_phys(s); | ||
738 | |||
739 | s->dma_address = __map_single(dev, iommu, domain->priv, | ||
740 | paddr, s->length, dir); | ||
741 | |||
742 | if (s->dma_address) { | ||
743 | s->dma_length = s->length; | ||
744 | mapped_elems++; | ||
745 | } else | ||
746 | goto unmap; | ||
747 | if (iommu_has_npcache(iommu)) | ||
748 | iommu_flush_pages(iommu, domain->id, s->dma_address, | ||
749 | s->dma_length); | ||
750 | } | ||
751 | |||
752 | if (iommu->need_sync) | ||
753 | iommu_completion_wait(iommu); | ||
754 | |||
755 | out: | ||
756 | spin_unlock_irqrestore(&domain->lock, flags); | ||
757 | |||
758 | return mapped_elems; | ||
759 | unmap: | ||
760 | for_each_sg(sglist, s, mapped_elems, i) { | ||
761 | if (s->dma_address) | ||
762 | __unmap_single(iommu, domain->priv, s->dma_address, | ||
763 | s->dma_length, dir); | ||
764 | s->dma_address = s->dma_length = 0; | ||
765 | } | ||
766 | |||
767 | mapped_elems = 0; | ||
768 | |||
769 | goto out; | ||
770 | } | ||
771 | |||
772 | static void unmap_sg(struct device *dev, struct scatterlist *sglist, | ||
773 | int nelems, int dir) | ||
774 | { | ||
775 | unsigned long flags; | ||
776 | struct amd_iommu *iommu; | ||
777 | struct protection_domain *domain; | ||
778 | struct scatterlist *s; | ||
779 | u16 devid; | ||
780 | int i; | ||
781 | |||
782 | if (!get_device_resources(dev, &iommu, &domain, &devid)) | ||
783 | return; | ||
784 | |||
785 | spin_lock_irqsave(&domain->lock, flags); | ||
786 | |||
787 | for_each_sg(sglist, s, nelems, i) { | ||
788 | __unmap_single(iommu, domain->priv, s->dma_address, | ||
789 | s->dma_length, dir); | ||
790 | iommu_flush_pages(iommu, domain->id, s->dma_address, | ||
791 | s->dma_length); | ||
792 | s->dma_address = s->dma_length = 0; | ||
793 | } | ||
794 | |||
795 | if (iommu->need_sync) | ||
796 | iommu_completion_wait(iommu); | ||
797 | |||
798 | spin_unlock_irqrestore(&domain->lock, flags); | ||
799 | } | ||
800 | |||