diff options
Diffstat (limited to 'include/asm-cris/dma-mapping.h')
-rw-r--r-- | include/asm-cris/dma-mapping.h | 170 |
1 files changed, 0 insertions, 170 deletions
diff --git a/include/asm-cris/dma-mapping.h b/include/asm-cris/dma-mapping.h deleted file mode 100644 index da8ef8e8f842..000000000000 --- a/include/asm-cris/dma-mapping.h +++ /dev/null | |||
@@ -1,170 +0,0 @@ | |||
1 | /* DMA mapping. Nothing tricky here, just virt_to_phys */ | ||
2 | |||
3 | #ifndef _ASM_CRIS_DMA_MAPPING_H | ||
4 | #define _ASM_CRIS_DMA_MAPPING_H | ||
5 | |||
6 | #include <linux/mm.h> | ||
7 | #include <linux/kernel.h> | ||
8 | |||
9 | #include <asm/cache.h> | ||
10 | #include <asm/io.h> | ||
11 | #include <asm/scatterlist.h> | ||
12 | |||
13 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | ||
14 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | ||
15 | |||
16 | #ifdef CONFIG_PCI | ||
17 | #include <asm-generic/dma-coherent.h> | ||
18 | |||
19 | void *dma_alloc_coherent(struct device *dev, size_t size, | ||
20 | dma_addr_t *dma_handle, gfp_t flag); | ||
21 | |||
22 | void dma_free_coherent(struct device *dev, size_t size, | ||
23 | void *vaddr, dma_addr_t dma_handle); | ||
24 | #else | ||
25 | static inline void * | ||
26 | dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | ||
27 | gfp_t flag) | ||
28 | { | ||
29 | BUG(); | ||
30 | return NULL; | ||
31 | } | ||
32 | |||
33 | static inline void | ||
34 | dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, | ||
35 | dma_addr_t dma_handle) | ||
36 | { | ||
37 | BUG(); | ||
38 | } | ||
39 | #endif | ||
40 | static inline dma_addr_t | ||
41 | dma_map_single(struct device *dev, void *ptr, size_t size, | ||
42 | enum dma_data_direction direction) | ||
43 | { | ||
44 | BUG_ON(direction == DMA_NONE); | ||
45 | return virt_to_phys(ptr); | ||
46 | } | ||
47 | |||
48 | static inline void | ||
49 | dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | ||
50 | enum dma_data_direction direction) | ||
51 | { | ||
52 | BUG_ON(direction == DMA_NONE); | ||
53 | } | ||
54 | |||
55 | static inline int | ||
56 | dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | ||
57 | enum dma_data_direction direction) | ||
58 | { | ||
59 | printk("Map sg\n"); | ||
60 | return nents; | ||
61 | } | ||
62 | |||
63 | static inline dma_addr_t | ||
64 | dma_map_page(struct device *dev, struct page *page, unsigned long offset, | ||
65 | size_t size, enum dma_data_direction direction) | ||
66 | { | ||
67 | BUG_ON(direction == DMA_NONE); | ||
68 | return page_to_phys(page) + offset; | ||
69 | } | ||
70 | |||
71 | static inline void | ||
72 | dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | ||
73 | enum dma_data_direction direction) | ||
74 | { | ||
75 | BUG_ON(direction == DMA_NONE); | ||
76 | } | ||
77 | |||
78 | |||
79 | static inline void | ||
80 | dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | ||
81 | enum dma_data_direction direction) | ||
82 | { | ||
83 | BUG_ON(direction == DMA_NONE); | ||
84 | } | ||
85 | |||
86 | static inline void | ||
87 | dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, | ||
88 | enum dma_data_direction direction) | ||
89 | { | ||
90 | } | ||
91 | |||
92 | static inline void | ||
93 | dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, | ||
94 | enum dma_data_direction direction) | ||
95 | { | ||
96 | } | ||
97 | |||
98 | static inline void | ||
99 | dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, | ||
100 | unsigned long offset, size_t size, | ||
101 | enum dma_data_direction direction) | ||
102 | { | ||
103 | } | ||
104 | |||
105 | static inline void | ||
106 | dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, | ||
107 | unsigned long offset, size_t size, | ||
108 | enum dma_data_direction direction) | ||
109 | { | ||
110 | } | ||
111 | |||
112 | static inline void | ||
113 | dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, | ||
114 | enum dma_data_direction direction) | ||
115 | { | ||
116 | } | ||
117 | |||
118 | static inline void | ||
119 | dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, | ||
120 | enum dma_data_direction direction) | ||
121 | { | ||
122 | } | ||
123 | |||
124 | static inline int | ||
125 | dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | ||
126 | { | ||
127 | return 0; | ||
128 | } | ||
129 | |||
130 | static inline int | ||
131 | dma_supported(struct device *dev, u64 mask) | ||
132 | { | ||
133 | /* | ||
134 | * we fall back to GFP_DMA when the mask isn't all 1s, | ||
135 | * so we can't guarantee allocations that must be | ||
136 | * within a tighter range than GFP_DMA.. | ||
137 | */ | ||
138 | if(mask < 0x00ffffff) | ||
139 | return 0; | ||
140 | |||
141 | return 1; | ||
142 | } | ||
143 | |||
144 | static inline int | ||
145 | dma_set_mask(struct device *dev, u64 mask) | ||
146 | { | ||
147 | if(!dev->dma_mask || !dma_supported(dev, mask)) | ||
148 | return -EIO; | ||
149 | |||
150 | *dev->dma_mask = mask; | ||
151 | |||
152 | return 0; | ||
153 | } | ||
154 | |||
155 | static inline int | ||
156 | dma_get_cache_alignment(void) | ||
157 | { | ||
158 | return (1 << INTERNODE_CACHE_SHIFT); | ||
159 | } | ||
160 | |||
161 | #define dma_is_consistent(d, h) (1) | ||
162 | |||
163 | static inline void | ||
164 | dma_cache_sync(struct device *dev, void *vaddr, size_t size, | ||
165 | enum dma_data_direction direction) | ||
166 | { | ||
167 | } | ||
168 | |||
169 | |||
170 | #endif | ||