aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2018-08-20 07:54:29 -0400
committerChristoph Hellwig <hch@lst.de>2018-09-08 05:17:33 -0400
commite0a9317d900426233621fd88d7ef2ce83c9987aa (patch)
tree20f448115376bdc024c061f520c75a789714fc8e
parentf406f222d4b21e63b0ad24d4ddcb2b9745177b55 (diff)
hexagon: use generic dma_noncoherent_ops
Switch to the generic noncoherent direct mapping implementation. Signed-off-by: Christoph Hellwig <hch@lst.de>
-rw-r--r--arch/hexagon/Kconfig2
-rw-r--r--arch/hexagon/include/asm/Kbuild1
-rw-r--r--arch/hexagon/include/asm/dma-mapping.h40
-rw-r--r--arch/hexagon/kernel/dma.c148
4 files changed, 11 insertions, 180 deletions
diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig
index 89a4b22f34d9..6cee842a9b44 100644
--- a/arch/hexagon/Kconfig
+++ b/arch/hexagon/Kconfig
@@ -4,6 +4,7 @@ comment "Linux Kernel Configuration for Hexagon"
4 4
5config HEXAGON 5config HEXAGON
6 def_bool y 6 def_bool y
7 select ARCH_HAS_SYNC_DMA_FOR_DEVICE
7 select ARCH_NO_PREEMPT 8 select ARCH_NO_PREEMPT
8 select HAVE_OPROFILE 9 select HAVE_OPROFILE
9 # Other pending projects/to-do items. 10 # Other pending projects/to-do items.
@@ -29,6 +30,7 @@ config HEXAGON
29 select GENERIC_CLOCKEVENTS_BROADCAST 30 select GENERIC_CLOCKEVENTS_BROADCAST
30 select MODULES_USE_ELF_RELA 31 select MODULES_USE_ELF_RELA
31 select GENERIC_CPU_DEVICES 32 select GENERIC_CPU_DEVICES
33 select DMA_NONCOHERENT_OPS
32 ---help--- 34 ---help---
33 Qualcomm Hexagon is a processor architecture designed for high 35 Qualcomm Hexagon is a processor architecture designed for high
34 performance and low power across a wide variety of applications. 36 performance and low power across a wide variety of applications.
diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild
index dd2fd9c0d292..47c4da3d64a4 100644
--- a/arch/hexagon/include/asm/Kbuild
+++ b/arch/hexagon/include/asm/Kbuild
@@ -6,6 +6,7 @@ generic-y += compat.h
6generic-y += current.h 6generic-y += current.h
7generic-y += device.h 7generic-y += device.h
8generic-y += div64.h 8generic-y += div64.h
9generic-y += dma-mapping.h
9generic-y += emergency-restart.h 10generic-y += emergency-restart.h
10generic-y += extable.h 11generic-y += extable.h
11generic-y += fb.h 12generic-y += fb.h
diff --git a/arch/hexagon/include/asm/dma-mapping.h b/arch/hexagon/include/asm/dma-mapping.h
deleted file mode 100644
index 263f6acbfb0f..000000000000
--- a/arch/hexagon/include/asm/dma-mapping.h
+++ /dev/null
@@ -1,40 +0,0 @@
1/*
2 * DMA operations for the Hexagon architecture
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21#ifndef _ASM_DMA_MAPPING_H
22#define _ASM_DMA_MAPPING_H
23
24#include <linux/types.h>
25#include <linux/cache.h>
26#include <linux/mm.h>
27#include <linux/scatterlist.h>
28#include <linux/dma-debug.h>
29#include <asm/io.h>
30
31struct device;
32
33extern const struct dma_map_ops *dma_ops;
34
35static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
36{
37 return dma_ops;
38}
39
40#endif
diff --git a/arch/hexagon/kernel/dma.c b/arch/hexagon/kernel/dma.c
index 9e46556a227d..ffc4ae8e126f 100644
--- a/arch/hexagon/kernel/dma.c
+++ b/arch/hexagon/kernel/dma.c
@@ -18,32 +18,19 @@
18 * 02110-1301, USA. 18 * 02110-1301, USA.
19 */ 19 */
20 20
21#include <linux/dma-mapping.h> 21#include <linux/dma-noncoherent.h>
22#include <linux/dma-direct.h>
23#include <linux/bootmem.h> 22#include <linux/bootmem.h>
24#include <linux/genalloc.h> 23#include <linux/genalloc.h>
25#include <asm/dma-mapping.h>
26#include <linux/module.h> 24#include <linux/module.h>
27#include <asm/page.h> 25#include <asm/page.h>
28 26
29#define HEXAGON_MAPPING_ERROR 0
30
31const struct dma_map_ops *dma_ops;
32EXPORT_SYMBOL(dma_ops);
33
34static inline void *dma_addr_to_virt(dma_addr_t dma_addr)
35{
36 return phys_to_virt((unsigned long) dma_addr);
37}
38
39static struct gen_pool *coherent_pool; 27static struct gen_pool *coherent_pool;
40 28
41 29
42/* Allocates from a pool of uncached memory that was reserved at boot time */ 30/* Allocates from a pool of uncached memory that was reserved at boot time */
43 31
44static void *hexagon_dma_alloc_coherent(struct device *dev, size_t size, 32void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_addr,
45 dma_addr_t *dma_addr, gfp_t flag, 33 gfp_t flag, unsigned long attrs)
46 unsigned long attrs)
47{ 34{
48 void *ret; 35 void *ret;
49 36
@@ -75,58 +62,17 @@ static void *hexagon_dma_alloc_coherent(struct device *dev, size_t size,
75 return ret; 62 return ret;
76} 63}
77 64
78static void hexagon_free_coherent(struct device *dev, size_t size, void *vaddr, 65void arch_dma_free(struct device *dev, size_t size, void *vaddr,
79 dma_addr_t dma_addr, unsigned long attrs) 66 dma_addr_t dma_addr, unsigned long attrs)
80{ 67{
81 gen_pool_free(coherent_pool, (unsigned long) vaddr, size); 68 gen_pool_free(coherent_pool, (unsigned long) vaddr, size);
82} 69}
83 70
84static int check_addr(const char *name, struct device *hwdev, 71void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
85 dma_addr_t bus, size_t size) 72 size_t size, enum dma_data_direction dir)
86{
87 if (hwdev && hwdev->dma_mask && !dma_capable(hwdev, bus, size)) {
88 if (*hwdev->dma_mask >= DMA_BIT_MASK(32))
89 printk(KERN_ERR
90 "%s: overflow %Lx+%zu of device mask %Lx\n",
91 name, (long long)bus, size,
92 (long long)*hwdev->dma_mask);
93 return 0;
94 }
95 return 1;
96}
97
98static int hexagon_map_sg(struct device *hwdev, struct scatterlist *sg,
99 int nents, enum dma_data_direction dir,
100 unsigned long attrs)
101{ 73{
102 struct scatterlist *s; 74 void *addr = phys_to_virt(paddr);
103 int i;
104
105 WARN_ON(nents == 0 || sg[0].length == 0);
106
107 for_each_sg(sg, s, nents, i) {
108 s->dma_address = sg_phys(s);
109 if (!check_addr("map_sg", hwdev, s->dma_address, s->length))
110 return 0;
111
112 s->dma_length = s->length;
113 75
114 if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
115 continue;
116
117 flush_dcache_range(dma_addr_to_virt(s->dma_address),
118 dma_addr_to_virt(s->dma_address + s->length));
119 }
120
121 return nents;
122}
123
124/*
125 * address is virtual
126 */
127static inline void dma_sync(void *addr, size_t size,
128 enum dma_data_direction dir)
129{
130 switch (dir) { 76 switch (dir) {
131 case DMA_TO_DEVICE: 77 case DMA_TO_DEVICE:
132 hexagon_clean_dcache_range((unsigned long) addr, 78 hexagon_clean_dcache_range((unsigned long) addr,
@@ -144,81 +90,3 @@ static inline void dma_sync(void *addr, size_t size,
144 BUG(); 90 BUG();
145 } 91 }
146} 92}
147
148/**
149 * hexagon_map_page() - maps an address for device DMA
150 * @dev: pointer to DMA device
151 * @page: pointer to page struct of DMA memory
152 * @offset: offset within page
153 * @size: size of memory to map
154 * @dir: transfer direction
155 * @attrs: pointer to DMA attrs (not used)
156 *
157 * Called to map a memory address to a DMA address prior
158 * to accesses to/from device.
159 *
160 * We don't particularly have many hoops to jump through
161 * so far. Straight translation between phys and virtual.
162 *
163 * DMA is not cache coherent so sync is necessary; this
164 * seems to be a convenient place to do it.
165 *
166 */
167static dma_addr_t hexagon_map_page(struct device *dev, struct page *page,
168 unsigned long offset, size_t size,
169 enum dma_data_direction dir,
170 unsigned long attrs)
171{
172 dma_addr_t bus = page_to_phys(page) + offset;
173 WARN_ON(size == 0);
174
175 if (!check_addr("map_single", dev, bus, size))
176 return HEXAGON_MAPPING_ERROR;
177
178 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
179 dma_sync(dma_addr_to_virt(bus), size, dir);
180
181 return bus;
182}
183
184static void hexagon_sync_single_for_device(struct device *dev,
185 dma_addr_t dma_handle, size_t size,
186 enum dma_data_direction dir)
187{
188 dma_sync(dma_addr_to_virt(dma_handle), size, dir);
189}
190
191static void hexagon_sync_sg_for_device(struct device *dev,
192 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
193{
194 struct scatterlist *sg;
195 int i;
196
197 for_each_sg(sgl, sg, nents, i)
198 hexagon_sync_single_for_device(dev, sg_dma_address(sg),
199 sg->length, dir);
200}
201
202
203static int hexagon_mapping_error(struct device *dev, dma_addr_t dma_addr)
204{
205 return dma_addr == HEXAGON_MAPPING_ERROR;
206}
207
208const struct dma_map_ops hexagon_dma_ops = {
209 .alloc = hexagon_dma_alloc_coherent,
210 .free = hexagon_free_coherent,
211 .map_sg = hexagon_map_sg,
212 .map_page = hexagon_map_page,
213 .sync_single_for_device = hexagon_sync_single_for_device,
214 .sync_sg_for_device = hexagon_sync_sg_for_device,
215 .mapping_error = hexagon_mapping_error,
216};
217
218void __init hexagon_dma_init(void)
219{
220 if (dma_ops)
221 return;
222
223 dma_ops = &hexagon_dma_ops;
224}