aboutsummaryrefslogtreecommitdiffstats
path: root/arch/blackfin
diff options
context:
space:
mode:
authorBarry Song <barry.song@analog.com>2009-11-22 22:47:24 -0500
committerMike Frysinger <vapier@gentoo.org>2009-12-15 00:16:01 -0500
commitdd3b0e3e6a322184313e47e2fd5955ab113ad463 (patch)
tree224a3f1eae739b66ecfb65bc44dc7d9a482a0df3 /arch/blackfin
parenta00b4fe5ce4b98f7c4457fffdb392d7bfece2e78 (diff)
Blackfin: dma-mapping.h: flesh out missing DMA mapping functions
Signed-off-by: Barry Song <barry.song@analog.com> Signed-off-by: Mike Frysinger <vapier@gentoo.org>
Diffstat (limited to 'arch/blackfin')
-rw-r--r--arch/blackfin/include/asm/dma-mapping.h121
-rw-r--r--arch/blackfin/include/asm/dma.h5
-rw-r--r--arch/blackfin/kernel/dma-mapping.c54
3 files changed, 116 insertions, 64 deletions
diff --git a/arch/blackfin/include/asm/dma-mapping.h b/arch/blackfin/include/asm/dma-mapping.h
index 7a23d824ac9..f9172ff30e5 100644
--- a/arch/blackfin/include/asm/dma-mapping.h
+++ b/arch/blackfin/include/asm/dma-mapping.h
@@ -7,9 +7,9 @@
7#ifndef _BLACKFIN_DMA_MAPPING_H 7#ifndef _BLACKFIN_DMA_MAPPING_H
8#define _BLACKFIN_DMA_MAPPING_H 8#define _BLACKFIN_DMA_MAPPING_H
9 9
10#include <asm/scatterlist.h> 10#include <asm/cacheflush.h>
11struct scatterlist;
11 12
12void dma_alloc_init(unsigned long start, unsigned long end);
13void *dma_alloc_coherent(struct device *dev, size_t size, 13void *dma_alloc_coherent(struct device *dev, size_t size,
14 dma_addr_t *dma_handle, gfp_t gfp); 14 dma_addr_t *dma_handle, gfp_t gfp);
15void dma_free_coherent(struct device *dev, size_t size, void *vaddr, 15void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
@@ -20,13 +20,51 @@ void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
20 */ 20 */
21#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 21#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
22#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 22#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
23#define dma_supported(d, m) (1)
24#define dma_get_cache_alignment() (32)
25#define dma_is_consistent(d, h) (1)
23 26
24static inline 27static inline int
25int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 28dma_set_mask(struct device *dev, u64 dma_mask)
26{ 29{
30 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
31 return -EIO;
32
33 *dev->dma_mask = dma_mask;
34
27 return 0; 35 return 0;
28} 36}
29 37
38static inline int
39dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
40{
41 return 0;
42}
43
44extern void
45__dma_sync(dma_addr_t addr, size_t size, enum dma_data_direction dir);
46static inline void
47_dma_sync(dma_addr_t addr, size_t size, enum dma_data_direction dir)
48{
49 if (!__builtin_constant_p(dir)) {
50 __dma_sync(addr, size, dir);
51 return;
52 }
53
54 switch (dir) {
55 case DMA_NONE:
56 BUG();
57 case DMA_TO_DEVICE: /* writeback only */
58 flush_dcache_range(addr, addr + size);
59 break;
60 case DMA_FROM_DEVICE: /* invalidate only */
61 case DMA_BIDIRECTIONAL: /* flush and invalidate */
62 /* Blackfin has no dedicated invalidate (it includes a flush) */
63 invalidate_dcache_range(addr, addr + size);
64 break;
65 }
66}
67
30/* 68/*
31 * Map a single buffer of the indicated size for DMA in streaming mode. 69 * Map a single buffer of the indicated size for DMA in streaming mode.
32 * The 32-bit bus address to use is returned. 70 * The 32-bit bus address to use is returned.
@@ -34,8 +72,13 @@ int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
34 * Once the device is given the dma address, the device owns this memory 72 * Once the device is given the dma address, the device owns this memory
35 * until either pci_unmap_single or pci_dma_sync_single is performed. 73 * until either pci_unmap_single or pci_dma_sync_single is performed.
36 */ 74 */
37extern dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, 75static inline dma_addr_t
38 enum dma_data_direction direction); 76dma_map_single(struct device *dev, void *ptr, size_t size,
77 enum dma_data_direction dir)
78{
79 _dma_sync((dma_addr_t)ptr, size, dir);
80 return (dma_addr_t) ptr;
81}
39 82
40static inline dma_addr_t 83static inline dma_addr_t
41dma_map_page(struct device *dev, struct page *page, 84dma_map_page(struct device *dev, struct page *page,
@@ -53,8 +96,12 @@ dma_map_page(struct device *dev, struct page *page,
53 * After this call, reads by the cpu to the buffer are guarenteed to see 96 * After this call, reads by the cpu to the buffer are guarenteed to see
54 * whatever the device wrote there. 97 * whatever the device wrote there.
55 */ 98 */
56extern void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, 99static inline void
57 enum dma_data_direction direction); 100dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
101 enum dma_data_direction dir)
102{
103 BUG_ON(!valid_dma_direction(dir));
104}
58 105
59static inline void 106static inline void
60dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, 107dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
@@ -80,38 +127,66 @@ dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
80 * the same here. 127 * the same here.
81 */ 128 */
82extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 129extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
83 enum dma_data_direction direction); 130 enum dma_data_direction dir);
84 131
85/* 132/*
86 * Unmap a set of streaming mode DMA translations. 133 * Unmap a set of streaming mode DMA translations.
87 * Again, cpu read rules concerning calls here are the same as for 134 * Again, cpu read rules concerning calls here are the same as for
88 * pci_unmap_single() above. 135 * pci_unmap_single() above.
89 */ 136 */
90extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg, 137static inline void
91 int nhwentries, enum dma_data_direction direction); 138dma_unmap_sg(struct device *dev, struct scatterlist *sg,
139 int nhwentries, enum dma_data_direction dir)
140{
141 BUG_ON(!valid_dma_direction(dir));
142}
92 143
93static inline void dma_sync_single_for_cpu(struct device *dev, 144static inline void
94 dma_addr_t handle, size_t size, 145dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t handle,
95 enum dma_data_direction dir) 146 unsigned long offset, size_t size,
147 enum dma_data_direction dir)
96{ 148{
149 BUG_ON(!valid_dma_direction(dir));
97} 150}
98 151
99static inline void dma_sync_single_for_device(struct device *dev, 152static inline void
100 dma_addr_t handle, size_t size, 153dma_sync_single_range_for_device(struct device *dev, dma_addr_t handle,
101 enum dma_data_direction dir) 154 unsigned long offset, size_t size,
155 enum dma_data_direction dir)
102{ 156{
157 _dma_sync(handle + offset, size, dir);
103} 158}
104 159
105static inline void dma_sync_sg_for_cpu(struct device *dev, 160static inline void
106 struct scatterlist *sg, 161dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size,
107 int nents, enum dma_data_direction dir) 162 enum dma_data_direction dir)
108{ 163{
164 dma_sync_single_range_for_cpu(dev, handle, 0, size, dir);
109} 165}
110 166
111static inline void dma_sync_sg_for_device(struct device *dev, 167static inline void
112 struct scatterlist *sg, 168dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size,
113 int nents, enum dma_data_direction dir) 169 enum dma_data_direction dir)
170{
171 dma_sync_single_range_for_device(dev, handle, 0, size, dir);
172}
173
174static inline void
175dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
176 enum dma_data_direction dir)
177{
178 BUG_ON(!valid_dma_direction(dir));
179}
180
181extern void
182dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
183 int nents, enum dma_data_direction dir);
184
185static inline void
186dma_cache_sync(struct device *dev, void *vaddr, size_t size,
187 enum dma_data_direction dir)
114{ 188{
189 _dma_sync((dma_addr_t)vaddr, size, dir);
115} 190}
116 191
117#endif /* _BLACKFIN_DMA_MAPPING_H */ 192#endif /* _BLACKFIN_DMA_MAPPING_H */
diff --git a/arch/blackfin/include/asm/dma.h b/arch/blackfin/include/asm/dma.h
index 5eb29502bbe..bd2e62243ab 100644
--- a/arch/blackfin/include/asm/dma.h
+++ b/arch/blackfin/include/asm/dma.h
@@ -13,8 +13,7 @@
13#include <asm/atomic.h> 13#include <asm/atomic.h>
14#include <asm/blackfin.h> 14#include <asm/blackfin.h>
15#include <asm/page.h> 15#include <asm/page.h>
16 16#include <asm-generic/dma.h>
17#define MAX_DMA_ADDRESS PAGE_OFFSET
18 17
19/* DMA_CONFIG Masks */ 18/* DMA_CONFIG Masks */
20#define DMAEN 0x0001 /* DMA Channel Enable */ 19#define DMAEN 0x0001 /* DMA Channel Enable */
@@ -257,8 +256,6 @@ static inline void enable_dma(unsigned int channel)
257 dma_ch[channel].regs->curr_y_count = 0; 256 dma_ch[channel].regs->curr_y_count = 0;
258 dma_ch[channel].regs->cfg |= DMAEN; 257 dma_ch[channel].regs->cfg |= DMAEN;
259} 258}
260void free_dma(unsigned int channel);
261int request_dma(unsigned int channel, const char *device_id);
262int set_dma_callback(unsigned int channel, irq_handler_t callback, void *data); 259int set_dma_callback(unsigned int channel, irq_handler_t callback, void *data);
263 260
264static inline void dma_disable_irq(unsigned int channel) 261static inline void dma_disable_irq(unsigned int channel)
diff --git a/arch/blackfin/kernel/dma-mapping.c b/arch/blackfin/kernel/dma-mapping.c
index 755a0f4b61a..e937f323d82 100644
--- a/arch/blackfin/kernel/dma-mapping.c
+++ b/arch/blackfin/kernel/dma-mapping.c
@@ -7,17 +7,11 @@
7 */ 7 */
8 8
9#include <linux/types.h> 9#include <linux/types.h>
10#include <linux/mm.h> 10#include <linux/gfp.h>
11#include <linux/string.h> 11#include <linux/string.h>
12#include <linux/bootmem.h>
13#include <linux/spinlock.h> 12#include <linux/spinlock.h>
14#include <linux/device.h>
15#include <linux/dma-mapping.h> 13#include <linux/dma-mapping.h>
16#include <linux/io.h>
17#include <linux/scatterlist.h> 14#include <linux/scatterlist.h>
18#include <asm/cacheflush.h>
19#include <asm/bfin-global.h>
20#include <asm/sections.h>
21 15
22static spinlock_t dma_page_lock; 16static spinlock_t dma_page_lock;
23static unsigned long *dma_page; 17static unsigned long *dma_page;
@@ -26,7 +20,7 @@ static unsigned long dma_base;
26static unsigned long dma_size; 20static unsigned long dma_size;
27static unsigned int dma_initialized; 21static unsigned int dma_initialized;
28 22
29void dma_alloc_init(unsigned long start, unsigned long end) 23static void dma_alloc_init(unsigned long start, unsigned long end)
30{ 24{
31 spin_lock_init(&dma_page_lock); 25 spin_lock_init(&dma_page_lock);
32 dma_initialized = 0; 26 dma_initialized = 0;
@@ -93,7 +87,7 @@ static void __free_dma_pages(unsigned long addr, unsigned int pages)
93} 87}
94 88
95void *dma_alloc_coherent(struct device *dev, size_t size, 89void *dma_alloc_coherent(struct device *dev, size_t size,
96 dma_addr_t * dma_handle, gfp_t gfp) 90 dma_addr_t *dma_handle, gfp_t gfp)
97{ 91{
98 void *ret; 92 void *ret;
99 93
@@ -117,21 +111,14 @@ dma_free_coherent(struct device *dev, size_t size, void *vaddr,
117EXPORT_SYMBOL(dma_free_coherent); 111EXPORT_SYMBOL(dma_free_coherent);
118 112
119/* 113/*
120 * Dummy functions defined for some existing drivers 114 * Streaming DMA mappings
121 */ 115 */
122 116void __dma_sync(dma_addr_t addr, size_t size,
123dma_addr_t 117 enum dma_data_direction dir)
124dma_map_single(struct device *dev, void *ptr, size_t size,
125 enum dma_data_direction direction)
126{ 118{
127 BUG_ON(direction == DMA_NONE); 119 _dma_sync(addr, size, dir);
128
129 invalidate_dcache_range((unsigned long)ptr,
130 (unsigned long)ptr + size);
131
132 return (dma_addr_t) ptr;
133} 120}
134EXPORT_SYMBOL(dma_map_single); 121EXPORT_SYMBOL(__dma_sync);
135 122
136int 123int
137dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 124dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
@@ -139,30 +126,23 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
139{ 126{
140 int i; 127 int i;
141 128
142 BUG_ON(direction == DMA_NONE);
143
144 for (i = 0; i < nents; i++, sg++) { 129 for (i = 0; i < nents; i++, sg++) {
145 sg->dma_address = (dma_addr_t) sg_virt(sg); 130 sg->dma_address = (dma_addr_t) sg_virt(sg);
146 131 __dma_sync(sg_dma_address(sg), sg_dma_len(sg), direction);
147 invalidate_dcache_range(sg_dma_address(sg),
148 sg_dma_address(sg) +
149 sg_dma_len(sg));
150 } 132 }
151 133
152 return nents; 134 return nents;
153} 135}
154EXPORT_SYMBOL(dma_map_sg); 136EXPORT_SYMBOL(dma_map_sg);
155 137
156void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, 138void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
157 enum dma_data_direction direction) 139 int nelems, enum dma_data_direction direction)
158{ 140{
159 BUG_ON(direction == DMA_NONE); 141 int i;
160}
161EXPORT_SYMBOL(dma_unmap_single);
162 142
163void dma_unmap_sg(struct device *dev, struct scatterlist *sg, 143 for (i = 0; i < nelems; i++, sg++) {
164 int nhwentries, enum dma_data_direction direction) 144 sg->dma_address = (dma_addr_t) sg_virt(sg);
165{ 145 __dma_sync(sg_dma_address(sg), sg_dma_len(sg), direction);
166 BUG_ON(direction == DMA_NONE); 146 }
167} 147}
168EXPORT_SYMBOL(dma_unmap_sg); 148EXPORT_SYMBOL(dma_sync_sg_for_device);