diff options
author | Alexander Duyck <alexander.h.duyck@intel.com> | 2016-12-14 18:04:41 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-12-14 19:04:07 -0500 |
commit | 64c596b59c726514f548e15f13a87010efbff40d (patch) | |
tree | 22131f32f19ea044111641b7211ee6242fed8c8b /arch/c6x/kernel | |
parent | 8c16a2e209d563a1909451c769417a6f953e5b2c (diff) |
arch/c6x: add option to skip sync on DMA map and unmap
This change allows us to pass DMA_ATTR_SKIP_CPU_SYNC which allows us to
avoid invoking cache line invalidation if the driver will just handle it
later via a sync_for_cpu or sync_for_device call.
Link: http://lkml.kernel.org/r/20161110113442.76501.7673.stgit@ahduyck-blue-test.jf.intel.com
Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Acked-by: Mark Salter <msalter@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/c6x/kernel')
-rw-r--r-- | arch/c6x/kernel/dma.c | 14 |
1 files changed, 10 insertions, 4 deletions
diff --git a/arch/c6x/kernel/dma.c b/arch/c6x/kernel/dma.c index db4a6a301f5e..6752df32ef06 100644 --- a/arch/c6x/kernel/dma.c +++ b/arch/c6x/kernel/dma.c | |||
@@ -42,14 +42,17 @@ static dma_addr_t c6x_dma_map_page(struct device *dev, struct page *page, | |||
42 | { | 42 | { |
43 | dma_addr_t handle = virt_to_phys(page_address(page) + offset); | 43 | dma_addr_t handle = virt_to_phys(page_address(page) + offset); |
44 | 44 | ||
45 | c6x_dma_sync(handle, size, dir); | 45 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) |
46 | c6x_dma_sync(handle, size, dir); | ||
47 | |||
46 | return handle; | 48 | return handle; |
47 | } | 49 | } |
48 | 50 | ||
49 | static void c6x_dma_unmap_page(struct device *dev, dma_addr_t handle, | 51 | static void c6x_dma_unmap_page(struct device *dev, dma_addr_t handle, |
50 | size_t size, enum dma_data_direction dir, unsigned long attrs) | 52 | size_t size, enum dma_data_direction dir, unsigned long attrs) |
51 | { | 53 | { |
52 | c6x_dma_sync(handle, size, dir); | 54 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) |
55 | c6x_dma_sync(handle, size, dir); | ||
53 | } | 56 | } |
54 | 57 | ||
55 | static int c6x_dma_map_sg(struct device *dev, struct scatterlist *sglist, | 58 | static int c6x_dma_map_sg(struct device *dev, struct scatterlist *sglist, |
@@ -60,7 +63,8 @@ static int c6x_dma_map_sg(struct device *dev, struct scatterlist *sglist, | |||
60 | 63 | ||
61 | for_each_sg(sglist, sg, nents, i) { | 64 | for_each_sg(sglist, sg, nents, i) { |
62 | sg->dma_address = sg_phys(sg); | 65 | sg->dma_address = sg_phys(sg); |
63 | c6x_dma_sync(sg->dma_address, sg->length, dir); | 66 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) |
67 | c6x_dma_sync(sg->dma_address, sg->length, dir); | ||
64 | } | 68 | } |
65 | 69 | ||
66 | return nents; | 70 | return nents; |
@@ -72,9 +76,11 @@ static void c6x_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, | |||
72 | struct scatterlist *sg; | 76 | struct scatterlist *sg; |
73 | int i; | 77 | int i; |
74 | 78 | ||
79 | if (attrs & DMA_ATTR_SKIP_CPU_SYNC) | ||
80 | return; | ||
81 | |||
75 | for_each_sg(sglist, sg, nents, i) | 82 | for_each_sg(sglist, sg, nents, i) |
76 | c6x_dma_sync(sg_dma_address(sg), sg->length, dir); | 83 | c6x_dma_sync(sg_dma_address(sg), sg->length, dir); |
77 | |||
78 | } | 84 | } |
79 | 85 | ||
80 | static void c6x_dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, | 86 | static void c6x_dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, |