aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-cris/dma-mapping.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-cris/dma-mapping.h')
-rw-r--r--include/asm-cris/dma-mapping.h170
1 files changed, 112 insertions, 58 deletions
diff --git a/include/asm-cris/dma-mapping.h b/include/asm-cris/dma-mapping.h
index 0d770f60127a..0b5c3fdaefe1 100644
--- a/include/asm-cris/dma-mapping.h
+++ b/include/asm-cris/dma-mapping.h
@@ -1,125 +1,179 @@
1/* DMA mapping. Nothing tricky here, just virt_to_phys */
2
1#ifndef _ASM_CRIS_DMA_MAPPING_H 3#ifndef _ASM_CRIS_DMA_MAPPING_H
2#define _ASM_CRIS_DMA_MAPPING_H 4#define _ASM_CRIS_DMA_MAPPING_H
3 5
4#include "scatterlist.h" 6#include <linux/mm.h>
7#include <linux/kernel.h>
5 8
6static inline int 9#include <asm/cache.h>
7dma_supported(struct device *dev, u64 mask) 10#include <asm/io.h>
8{ 11#include <asm/scatterlist.h>
9 BUG();
10 return 0;
11}
12 12
13static inline int 13#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
14dma_set_mask(struct device *dev, u64 dma_mask) 14#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
15{ 15
16 BUG(); 16#ifdef CONFIG_PCI
17 return 1; 17void *dma_alloc_coherent(struct device *dev, size_t size,
18} 18 dma_addr_t *dma_handle, int flag);
19 19
20void dma_free_coherent(struct device *dev, size_t size,
21 void *vaddr, dma_addr_t dma_handle);
22#else
20static inline void * 23static inline void *
21dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, 24dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
22 int flag) 25 int flag)
23{ 26{
24 BUG(); 27 BUG();
25 return NULL; 28 return NULL;
26} 29}
27 30
28static inline void 31static inline void
29dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, 32dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
30 dma_addr_t dma_handle) 33 dma_addr_t dma_handle)
31{ 34{
32 BUG(); 35 BUG();
33} 36}
34 37#endif
35static inline dma_addr_t 38static inline dma_addr_t
36dma_map_single(struct device *dev, void *cpu_addr, size_t size, 39dma_map_single(struct device *dev, void *ptr, size_t size,
37 enum dma_data_direction direction) 40 enum dma_data_direction direction)
38{ 41{
39 BUG(); 42 BUG_ON(direction == DMA_NONE);
40 return 0; 43 return virt_to_phys(ptr);
41} 44}
42 45
43static inline void 46static inline void
44dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, 47dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
45 enum dma_data_direction direction) 48 enum dma_data_direction direction)
46{ 49{
47 BUG(); 50 BUG_ON(direction == DMA_NONE);
51}
52
53static inline int
54dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
55 enum dma_data_direction direction)
56{
57 printk("Map sg\n");
58 return nents;
48} 59}
49 60
50static inline dma_addr_t 61static inline dma_addr_t
51dma_map_page(struct device *dev, struct page *page, 62dma_map_page(struct device *dev, struct page *page, unsigned long offset,
52 unsigned long offset, size_t size, 63 size_t size, enum dma_data_direction direction)
53 enum dma_data_direction direction)
54{ 64{
55 BUG(); 65 BUG_ON(direction == DMA_NONE);
56 return 0; 66 return page_to_phys(page) + offset;
57} 67}
58 68
59static inline void 69static inline void
60dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, 70dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
61 enum dma_data_direction direction) 71 enum dma_data_direction direction)
62{ 72{
63 BUG(); 73 BUG_ON(direction == DMA_NONE);
64} 74}
65 75
66static inline int
67dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
68 enum dma_data_direction direction)
69{
70 BUG();
71 return 1;
72}
73 76
74static inline void 77static inline void
75dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, 78dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
76 enum dma_data_direction direction) 79 enum dma_data_direction direction)
77{ 80{
78 BUG(); 81 BUG_ON(direction == DMA_NONE);
79} 82}
80 83
81static inline void 84static inline void
82dma_sync_single(struct device *dev, dma_addr_t dma_handle, size_t size, 85dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
83 enum dma_data_direction direction) 86 enum dma_data_direction direction)
84{ 87{
85 BUG();
86} 88}
87 89
88static inline void 90static inline void
89dma_sync_sg(struct device *dev, struct scatterlist *sg, int nelems, 91dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
90 enum dma_data_direction direction) 92 enum dma_data_direction direction)
91{ 93{
92 BUG();
93} 94}
94 95
95/* Now for the API extensions over the pci_ one */ 96static inline void
97dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
98 unsigned long offset, size_t size,
99 enum dma_data_direction direction)
100{
101}
96 102
97#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 103static inline void
98#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 104dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
99#define dma_is_consistent(d) (1) 105 unsigned long offset, size_t size,
106 enum dma_data_direction direction)
107{
108}
100 109
101static inline int 110static inline void
102dma_get_cache_alignment(void) 111dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
112 enum dma_data_direction direction)
103{ 113{
104 /* no easy way to get cache size on all processors, so return
105 * the maximum possible, to be safe */
106 return (1 << L1_CACHE_SHIFT_MAX);
107} 114}
108 115
109static inline void 116static inline void
110dma_sync_single_range(struct device *dev, dma_addr_t dma_handle, 117dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
111 unsigned long offset, size_t size, 118 enum dma_data_direction direction)
112 enum dma_data_direction direction)
113{ 119{
114 BUG();
115} 120}
116 121
122static inline int
123dma_mapping_error(dma_addr_t dma_addr)
124{
125 return 0;
126}
127
128static inline int
129dma_supported(struct device *dev, u64 mask)
130{
131 /*
132 * we fall back to GFP_DMA when the mask isn't all 1s,
133 * so we can't guarantee allocations that must be
134 * within a tighter range than GFP_DMA..
135 */
136 if(mask < 0x00ffffff)
137 return 0;
138
139 return 1;
140}
141
142static inline int
143dma_set_mask(struct device *dev, u64 mask)
144{
145 if(!dev->dma_mask || !dma_supported(dev, mask))
146 return -EIO;
147
148 *dev->dma_mask = mask;
149
150 return 0;
151}
152
153static inline int
154dma_get_cache_alignment(void)
155{
156 return (1 << L1_CACHE_SHIFT_MAX);
157}
158
159#define dma_is_consistent(d) (1)
160
117static inline void 161static inline void
118dma_cache_sync(void *vaddr, size_t size, 162dma_cache_sync(void *vaddr, size_t size,
119 enum dma_data_direction direction) 163 enum dma_data_direction direction)
120{ 164{
121 BUG();
122} 165}
123 166
124#endif 167#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
168extern int
169dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
170 dma_addr_t device_addr, size_t size, int flags);
171
172extern void
173dma_release_declared_memory(struct device *dev);
125 174
175extern void *
176dma_mark_declared_memory_occupied(struct device *dev,
177 dma_addr_t device_addr, size_t size);
178
179#endif