aboutsummaryrefslogtreecommitdiffstats
path: root/arch/blackfin
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2016-01-20 18:01:32 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-01-20 20:09:18 -0500
commit6f62097583e799040d6d18909b670b1e4dbb614d (patch)
tree8af751b0083030217216c7f667fef84bdc7e8335 /arch/blackfin
parenta34a517ac96c6910a3a0aab9513035bfbed0020c (diff)
blackfin: convert to dma_map_ops
Signed-off-by: Christoph Hellwig <hch@lst.de> Cc: Steven Miao <realmz6@gmail.com> Cc: Christian Borntraeger <borntraeger@de.ibm.com> Cc: Joerg Roedel <jroedel@suse.de> Cc: Sebastian Ott <sebott@linux.vnet.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/blackfin')
-rw-r--r--arch/blackfin/Kconfig1
-rw-r--r--arch/blackfin/include/asm/dma-mapping.h127
-rw-r--r--arch/blackfin/kernel/dma-mapping.c52
3 files changed, 43 insertions, 137 deletions
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index af76634f8d98..4be2f905198d 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -14,6 +14,7 @@ config BLACKFIN
14 def_bool y 14 def_bool y
15 select HAVE_ARCH_KGDB 15 select HAVE_ARCH_KGDB
16 select HAVE_ARCH_TRACEHOOK 16 select HAVE_ARCH_TRACEHOOK
17 select HAVE_DMA_ATTRS
17 select HAVE_DYNAMIC_FTRACE 18 select HAVE_DYNAMIC_FTRACE
18 select HAVE_FTRACE_MCOUNT_RECORD 19 select HAVE_FTRACE_MCOUNT_RECORD
19 select HAVE_FUNCTION_GRAPH_TRACER 20 select HAVE_FUNCTION_GRAPH_TRACER
diff --git a/arch/blackfin/include/asm/dma-mapping.h b/arch/blackfin/include/asm/dma-mapping.h
index 054d9ec57d9d..ea5a2e82db7c 100644
--- a/arch/blackfin/include/asm/dma-mapping.h
+++ b/arch/blackfin/include/asm/dma-mapping.h
@@ -8,36 +8,6 @@
8#define _BLACKFIN_DMA_MAPPING_H 8#define _BLACKFIN_DMA_MAPPING_H
9 9
10#include <asm/cacheflush.h> 10#include <asm/cacheflush.h>
11struct scatterlist;
12
13void *dma_alloc_coherent(struct device *dev, size_t size,
14 dma_addr_t *dma_handle, gfp_t gfp);
15void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
16 dma_addr_t dma_handle);
17
18/*
19 * Now for the API extensions over the pci_ one
20 */
21#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
22#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
23#define dma_supported(d, m) (1)
24
25static inline int
26dma_set_mask(struct device *dev, u64 dma_mask)
27{
28 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
29 return -EIO;
30
31 *dev->dma_mask = dma_mask;
32
33 return 0;
34}
35
36static inline int
37dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
38{
39 return 0;
40}
41 11
42extern void 12extern void
43__dma_sync(dma_addr_t addr, size_t size, enum dma_data_direction dir); 13__dma_sync(dma_addr_t addr, size_t size, enum dma_data_direction dir);
@@ -66,102 +36,13 @@ _dma_sync(dma_addr_t addr, size_t size, enum dma_data_direction dir)
66 __dma_sync(addr, size, dir); 36 __dma_sync(addr, size, dir);
67} 37}
68 38
69static inline dma_addr_t 39extern struct dma_map_ops bfin_dma_ops;
70dma_map_single(struct device *dev, void *ptr, size_t size,
71 enum dma_data_direction dir)
72{
73 _dma_sync((dma_addr_t)ptr, size, dir);
74 return (dma_addr_t) ptr;
75}
76
77static inline dma_addr_t
78dma_map_page(struct device *dev, struct page *page,
79 unsigned long offset, size_t size,
80 enum dma_data_direction dir)
81{
82 return dma_map_single(dev, page_address(page) + offset, size, dir);
83}
84
85static inline void
86dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
87 enum dma_data_direction dir)
88{
89 BUG_ON(!valid_dma_direction(dir));
90}
91
92static inline void
93dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
94 enum dma_data_direction dir)
95{
96 dma_unmap_single(dev, dma_addr, size, dir);
97}
98
99extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
100 enum dma_data_direction dir);
101
102static inline void
103dma_unmap_sg(struct device *dev, struct scatterlist *sg,
104 int nhwentries, enum dma_data_direction dir)
105{
106 BUG_ON(!valid_dma_direction(dir));
107}
108
109static inline void
110dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t handle,
111 unsigned long offset, size_t size,
112 enum dma_data_direction dir)
113{
114 BUG_ON(!valid_dma_direction(dir));
115}
116
117static inline void
118dma_sync_single_range_for_device(struct device *dev, dma_addr_t handle,
119 unsigned long offset, size_t size,
120 enum dma_data_direction dir)
121{
122 _dma_sync(handle + offset, size, dir);
123}
124 40
125static inline void 41static inline struct dma_map_ops *get_dma_ops(struct device *dev)
126dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size,
127 enum dma_data_direction dir)
128{ 42{
129 dma_sync_single_range_for_cpu(dev, handle, 0, size, dir); 43 return &bfin_dma_ops;
130} 44}
131 45
132static inline void 46#include <asm-generic/dma-mapping-common.h>
133dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size,
134 enum dma_data_direction dir)
135{
136 dma_sync_single_range_for_device(dev, handle, 0, size, dir);
137}
138
139static inline void
140dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
141 enum dma_data_direction dir)
142{
143 BUG_ON(!valid_dma_direction(dir));
144}
145
146extern void
147dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
148 int nents, enum dma_data_direction dir);
149
150static inline void
151dma_cache_sync(struct device *dev, void *vaddr, size_t size,
152 enum dma_data_direction dir)
153{
154 _dma_sync((dma_addr_t)vaddr, size, dir);
155}
156
157/* drivers/base/dma-mapping.c */
158extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
159 void *cpu_addr, dma_addr_t dma_addr, size_t size);
160extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
161 void *cpu_addr, dma_addr_t dma_addr,
162 size_t size);
163
164#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
165#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
166 47
167#endif /* _BLACKFIN_DMA_MAPPING_H */ 48#endif /* _BLACKFIN_DMA_MAPPING_H */
diff --git a/arch/blackfin/kernel/dma-mapping.c b/arch/blackfin/kernel/dma-mapping.c
index df437e52d9df..771afe6e4264 100644
--- a/arch/blackfin/kernel/dma-mapping.c
+++ b/arch/blackfin/kernel/dma-mapping.c
@@ -78,8 +78,8 @@ static void __free_dma_pages(unsigned long addr, unsigned int pages)
78 spin_unlock_irqrestore(&dma_page_lock, flags); 78 spin_unlock_irqrestore(&dma_page_lock, flags);
79} 79}
80 80
81void *dma_alloc_coherent(struct device *dev, size_t size, 81static void *bfin_dma_alloc(struct device *dev, size_t size,
82 dma_addr_t *dma_handle, gfp_t gfp) 82 dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
83{ 83{
84 void *ret; 84 void *ret;
85 85
@@ -92,15 +92,12 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
92 92
93 return ret; 93 return ret;
94} 94}
95EXPORT_SYMBOL(dma_alloc_coherent);
96 95
97void 96static void bfin_dma_free(struct device *dev, size_t size, void *vaddr,
98dma_free_coherent(struct device *dev, size_t size, void *vaddr, 97 dma_addr_t dma_handle, struct dma_attrs *attrs)
99 dma_addr_t dma_handle)
100{ 98{
101 __free_dma_pages((unsigned long)vaddr, get_pages(size)); 99 __free_dma_pages((unsigned long)vaddr, get_pages(size));
102} 100}
103EXPORT_SYMBOL(dma_free_coherent);
104 101
105/* 102/*
106 * Streaming DMA mappings 103 * Streaming DMA mappings
@@ -112,9 +109,9 @@ void __dma_sync(dma_addr_t addr, size_t size,
112} 109}
113EXPORT_SYMBOL(__dma_sync); 110EXPORT_SYMBOL(__dma_sync);
114 111
115int 112static int bfin_dma_map_sg(struct device *dev, struct scatterlist *sg_list,
116dma_map_sg(struct device *dev, struct scatterlist *sg_list, int nents, 113 int nents, enum dma_data_direction direction,
117 enum dma_data_direction direction) 114 struct dma_attrs *attrs)
118{ 115{
119 struct scatterlist *sg; 116 struct scatterlist *sg;
120 int i; 117 int i;
@@ -126,10 +123,10 @@ dma_map_sg(struct device *dev, struct scatterlist *sg_list, int nents,
126 123
127 return nents; 124 return nents;
128} 125}
129EXPORT_SYMBOL(dma_map_sg);
130 126
131void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg_list, 127static void bfin_dma_sync_sg_for_device(struct device *dev,
132 int nelems, enum dma_data_direction direction) 128 struct scatterlist *sg_list, int nelems,
129 enum dma_data_direction direction)
133{ 130{
134 struct scatterlist *sg; 131 struct scatterlist *sg;
135 int i; 132 int i;
@@ -139,4 +136,31 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg_list,
139 __dma_sync(sg_dma_address(sg), sg_dma_len(sg), direction); 136 __dma_sync(sg_dma_address(sg), sg_dma_len(sg), direction);
140 } 137 }
141} 138}
142EXPORT_SYMBOL(dma_sync_sg_for_device); 139
140static dma_addr_t bfin_dma_map_page(struct device *dev, struct page *page,
141 unsigned long offset, size_t size, enum dma_data_direction dir,
142 struct dma_attrs *attrs)
143{
144 dma_addr_t handle = (dma_addr_t)(page_address(page) + offset);
145
146 _dma_sync(handle, size, dir);
147 return handle;
148}
149
150static inline void bfin_dma_sync_single_for_device(struct device *dev,
151 dma_addr_t handle, size_t size, enum dma_data_direction dir)
152{
153 _dma_sync(handle, size, dir);
154}
155
156struct dma_map_ops bfin_dma_ops = {
157 .alloc = bfin_dma_alloc,
158 .free = bfin_dma_free,
159
160 .map_page = bfin_dma_map_page,
161 .map_sg = bfin_dma_map_sg,
162
163 .sync_single_for_device = bfin_dma_sync_single_for_device,
164 .sync_sg_for_device = bfin_dma_sync_sg_for_device,
165};
166EXPORT_SYMBOL(bfin_dma_ops);