aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci
diff options
context:
space:
mode:
authorFUJITA Tomonori <tomof@acm.org>2007-10-21 19:42:00 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-22 11:13:19 -0400
commitc03ab37cbe1db0ec9186d8de04dd3801c0af0fba (patch)
tree947ed07c3d4130475376fe663d87027637014dec /drivers/pci
parent358dd8ac53a3bdafe62e3319e30627f3fef3a7b0 (diff)
intel-iommu sg chaining support
x86_64 defines ARCH_HAS_SG_CHAIN. So if IOMMU implementations don't support sg chaining, we will get data corruption. Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Acked-by: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Cc: Jens Axboe <jens.axboe@oracle.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/intel-iommu.c34
1 files changed, 17 insertions, 17 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 8f372c8f3d87..b3d70310af49 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -1963,7 +1963,7 @@ static void intel_free_coherent(struct device *hwdev, size_t size,
1963} 1963}
1964 1964
1965#define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset) 1965#define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset)
1966static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sg, 1966static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
1967 int nelems, int dir) 1967 int nelems, int dir)
1968{ 1968{
1969 int i; 1969 int i;
@@ -1973,16 +1973,17 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sg,
1973 struct iova *iova; 1973 struct iova *iova;
1974 size_t size = 0; 1974 size_t size = 0;
1975 void *addr; 1975 void *addr;
1976 struct scatterlist *sg;
1976 1977
1977 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) 1978 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
1978 return; 1979 return;
1979 1980
1980 domain = find_domain(pdev); 1981 domain = find_domain(pdev);
1981 1982
1982 iova = find_iova(&domain->iovad, IOVA_PFN(sg[0].dma_address)); 1983 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
1983 if (!iova) 1984 if (!iova)
1984 return; 1985 return;
1985 for (i = 0; i < nelems; i++, sg++) { 1986 for_each_sg(sglist, sg, nelems, i) {
1986 addr = SG_ENT_VIRT_ADDRESS(sg); 1987 addr = SG_ENT_VIRT_ADDRESS(sg);
1987 size += aligned_size((u64)addr, sg->length); 1988 size += aligned_size((u64)addr, sg->length);
1988 } 1989 }
@@ -2003,21 +2004,21 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sg,
2003} 2004}
2004 2005
2005static int intel_nontranslate_map_sg(struct device *hddev, 2006static int intel_nontranslate_map_sg(struct device *hddev,
2006 struct scatterlist *sg, int nelems, int dir) 2007 struct scatterlist *sglist, int nelems, int dir)
2007{ 2008{
2008 int i; 2009 int i;
2010 struct scatterlist *sg;
2009 2011
2010 for (i = 0; i < nelems; i++) { 2012 for_each_sg(sglist, sg, nelems, i) {
2011 struct scatterlist *s = &sg[i]; 2013 BUG_ON(!sg->page);
2012 BUG_ON(!s->page); 2014 sg->dma_address = virt_to_bus(SG_ENT_VIRT_ADDRESS(sg));
2013 s->dma_address = virt_to_bus(SG_ENT_VIRT_ADDRESS(s)); 2015 sg->dma_length = sg->length;
2014 s->dma_length = s->length;
2015 } 2016 }
2016 return nelems; 2017 return nelems;
2017} 2018}
2018 2019
2019static int intel_map_sg(struct device *hwdev, struct scatterlist *sg, 2020static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist,
2020 int nelems, int dir) 2021 int nelems, int dir)
2021{ 2022{
2022 void *addr; 2023 void *addr;
2023 int i; 2024 int i;
@@ -2028,18 +2029,18 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sg,
2028 size_t offset = 0; 2029 size_t offset = 0;
2029 struct iova *iova = NULL; 2030 struct iova *iova = NULL;
2030 int ret; 2031 int ret;
2031 struct scatterlist *orig_sg = sg; 2032 struct scatterlist *sg;
2032 unsigned long start_addr; 2033 unsigned long start_addr;
2033 2034
2034 BUG_ON(dir == DMA_NONE); 2035 BUG_ON(dir == DMA_NONE);
2035 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) 2036 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
2036 return intel_nontranslate_map_sg(hwdev, sg, nelems, dir); 2037 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
2037 2038
2038 domain = get_valid_domain_for_dev(pdev); 2039 domain = get_valid_domain_for_dev(pdev);
2039 if (!domain) 2040 if (!domain)
2040 return 0; 2041 return 0;
2041 2042
2042 for (i = 0; i < nelems; i++, sg++) { 2043 for_each_sg(sglist, sg, nelems, i) {
2043 addr = SG_ENT_VIRT_ADDRESS(sg); 2044 addr = SG_ENT_VIRT_ADDRESS(sg);
2044 addr = (void *)virt_to_phys(addr); 2045 addr = (void *)virt_to_phys(addr);
2045 size += aligned_size((u64)addr, sg->length); 2046 size += aligned_size((u64)addr, sg->length);
@@ -2047,7 +2048,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sg,
2047 2048
2048 iova = __intel_alloc_iova(hwdev, domain, size); 2049 iova = __intel_alloc_iova(hwdev, domain, size);
2049 if (!iova) { 2050 if (!iova) {
2050 orig_sg->dma_length = 0; 2051 sglist->dma_length = 0;
2051 return 0; 2052 return 0;
2052 } 2053 }
2053 2054
@@ -2063,8 +2064,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sg,
2063 2064
2064 start_addr = iova->pfn_lo << PAGE_SHIFT_4K; 2065 start_addr = iova->pfn_lo << PAGE_SHIFT_4K;
2065 offset = 0; 2066 offset = 0;
2066 sg = orig_sg; 2067 for_each_sg(sglist, sg, nelems, i) {
2067 for (i = 0; i < nelems; i++, sg++) {
2068 addr = SG_ENT_VIRT_ADDRESS(sg); 2068 addr = SG_ENT_VIRT_ADDRESS(sg);
2069 addr = (void *)virt_to_phys(addr); 2069 addr = (void *)virt_to_phys(addr);
2070 size = aligned_size((u64)addr, sg->length); 2070 size = aligned_size((u64)addr, sg->length);