diff options
author | Dan Williams <dan.j.williams@intel.com> | 2015-08-17 10:13:26 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2015-08-17 10:13:26 -0400 |
commit | db0fa0cb015794dd19f664933d49c6ce902ec1e1 (patch) | |
tree | 3c3dddb796ddb24b24de8081df7851495cfafc11 | |
parent | 89e2a8404e4415da1edbac6ca4f7332b4a74fae2 (diff) |
scatterlist: use sg_phys()
Coccinelle cleanup to replace open coded sg to physical address
translations. This is in preparation for introducing scatterlists that
reference __pfn_t.
// sg_phys.cocci: convert usage page_to_phys(sg_page(sg)) to sg_phys(sg)
// usage: make coccicheck COCCI=sg_phys.cocci MODE=patch
virtual patch
@@
struct scatterlist *sg;
@@
- page_to_phys(sg_page(sg)) + sg->offset
+ sg_phys(sg)
@@
struct scatterlist *sg;
@@
- page_to_phys(sg_page(sg))
+ sg_phys(sg) & PAGE_MASK
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r-- | arch/arm/mm/dma-mapping.c | 2 | ||||
-rw-r--r-- | arch/microblaze/kernel/dma.c | 3 | ||||
-rw-r--r-- | drivers/iommu/intel-iommu.c | 4 | ||||
-rw-r--r-- | drivers/iommu/iommu.c | 2 | ||||
-rw-r--r-- | drivers/staging/android/ion/ion_chunk_heap.c | 4 |
5 files changed, 7 insertions, 8 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 1ced8a0f7a52..4efaefd61c1c 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -1520,7 +1520,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, | |||
1520 | return -ENOMEM; | 1520 | return -ENOMEM; |
1521 | 1521 | ||
1522 | for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) { | 1522 | for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) { |
1523 | phys_addr_t phys = page_to_phys(sg_page(s)); | 1523 | phys_addr_t phys = sg_phys(s) & PAGE_MASK; |
1524 | unsigned int len = PAGE_ALIGN(s->offset + s->length); | 1524 | unsigned int len = PAGE_ALIGN(s->offset + s->length); |
1525 | 1525 | ||
1526 | if (!is_coherent && | 1526 | if (!is_coherent && |
diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c index bf4dec229437..c89da6312954 100644 --- a/arch/microblaze/kernel/dma.c +++ b/arch/microblaze/kernel/dma.c | |||
@@ -61,8 +61,7 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, | |||
61 | /* FIXME this part of code is untested */ | 61 | /* FIXME this part of code is untested */ |
62 | for_each_sg(sgl, sg, nents, i) { | 62 | for_each_sg(sgl, sg, nents, i) { |
63 | sg->dma_address = sg_phys(sg); | 63 | sg->dma_address = sg_phys(sg); |
64 | __dma_sync(page_to_phys(sg_page(sg)) + sg->offset, | 64 | __dma_sync(sg_phys(sg), sg->length, direction); |
65 | sg->length, direction); | ||
66 | } | 65 | } |
67 | 66 | ||
68 | return nents; | 67 | return nents; |
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index a98a7b27aca1..b261850a7694 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
@@ -2094,7 +2094,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, | |||
2094 | sg_res = aligned_nrpages(sg->offset, sg->length); | 2094 | sg_res = aligned_nrpages(sg->offset, sg->length); |
2095 | sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset; | 2095 | sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset; |
2096 | sg->dma_length = sg->length; | 2096 | sg->dma_length = sg->length; |
2097 | pteval = page_to_phys(sg_page(sg)) | prot; | 2097 | pteval = (sg_phys(sg) & PAGE_MASK) | prot; |
2098 | phys_pfn = pteval >> VTD_PAGE_SHIFT; | 2098 | phys_pfn = pteval >> VTD_PAGE_SHIFT; |
2099 | } | 2099 | } |
2100 | 2100 | ||
@@ -3620,7 +3620,7 @@ static int intel_nontranslate_map_sg(struct device *hddev, | |||
3620 | 3620 | ||
3621 | for_each_sg(sglist, sg, nelems, i) { | 3621 | for_each_sg(sglist, sg, nelems, i) { |
3622 | BUG_ON(!sg_page(sg)); | 3622 | BUG_ON(!sg_page(sg)); |
3623 | sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset; | 3623 | sg->dma_address = sg_phys(sg); |
3624 | sg->dma_length = sg->length; | 3624 | sg->dma_length = sg->length; |
3625 | } | 3625 | } |
3626 | return nelems; | 3626 | return nelems; |
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index f286090931cc..049df495c274 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c | |||
@@ -1408,7 +1408,7 @@ size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova, | |||
1408 | min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap); | 1408 | min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap); |
1409 | 1409 | ||
1410 | for_each_sg(sg, s, nents, i) { | 1410 | for_each_sg(sg, s, nents, i) { |
1411 | phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset; | 1411 | phys_addr_t phys = sg_phys(s); |
1412 | 1412 | ||
1413 | /* | 1413 | /* |
1414 | * We are mapping on IOMMU page boundaries, so offset within | 1414 | * We are mapping on IOMMU page boundaries, so offset within |
diff --git a/drivers/staging/android/ion/ion_chunk_heap.c b/drivers/staging/android/ion/ion_chunk_heap.c index 54746157d799..f7b6ef991cd0 100644 --- a/drivers/staging/android/ion/ion_chunk_heap.c +++ b/drivers/staging/android/ion/ion_chunk_heap.c | |||
@@ -81,7 +81,7 @@ static int ion_chunk_heap_allocate(struct ion_heap *heap, | |||
81 | err: | 81 | err: |
82 | sg = table->sgl; | 82 | sg = table->sgl; |
83 | for (i -= 1; i >= 0; i--) { | 83 | for (i -= 1; i >= 0; i--) { |
84 | gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)), | 84 | gen_pool_free(chunk_heap->pool, sg_phys(sg) & PAGE_MASK, |
85 | sg->length); | 85 | sg->length); |
86 | sg = sg_next(sg); | 86 | sg = sg_next(sg); |
87 | } | 87 | } |
@@ -109,7 +109,7 @@ static void ion_chunk_heap_free(struct ion_buffer *buffer) | |||
109 | DMA_BIDIRECTIONAL); | 109 | DMA_BIDIRECTIONAL); |
110 | 110 | ||
111 | for_each_sg(table->sgl, sg, table->nents, i) { | 111 | for_each_sg(table->sgl, sg, table->nents, i) { |
112 | gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)), | 112 | gen_pool_free(chunk_heap->pool, sg_phys(sg) & PAGE_MASK, |
113 | sg->length); | 113 | sg->length); |
114 | } | 114 | } |
115 | chunk_heap->allocated -= allocated_size; | 115 | chunk_heap->allocated -= allocated_size; |