diff options
author | Vineet Gupta <vgupta@synopsys.com> | 2013-01-18 04:42:20 -0500 |
---|---|---|
committer | Vineet Gupta <vgupta@synopsys.com> | 2013-02-15 12:45:54 -0500 |
commit | 1162b0701b14ba112d4e3fe5c27c694caf983539 (patch) | |
tree | 4255ede27a8c75378ec6c2e5cccc64fac7e41c0a /arch/arc/include/asm/dma-mapping.h | |
parent | fbd7053a7854b12b0fdc415089c59baabf25c625 (diff) |
ARC: I/O and DMA Mappings
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
Diffstat (limited to 'arch/arc/include/asm/dma-mapping.h')
-rw-r--r-- | arch/arc/include/asm/dma-mapping.h | 205 |
1 files changed, 205 insertions, 0 deletions
diff --git a/arch/arc/include/asm/dma-mapping.h b/arch/arc/include/asm/dma-mapping.h new file mode 100644 index 000000000000..7fd150e97eb2 --- /dev/null +++ b/arch/arc/include/asm/dma-mapping.h | |||
@@ -0,0 +1,205 @@ | |||
1 | /* | ||
2 | * DMA Mapping glue for ARC | ||
3 | * | ||
4 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | |||
11 | #ifndef ASM_ARC_DMA_MAPPING_H | ||
12 | #define ASM_ARC_DMA_MAPPING_H | ||
13 | |||
14 | #include <asm-generic/dma-coherent.h> | ||
15 | #include <asm/cacheflush.h> | ||
16 | #include <plat/dma_addr.h> | ||
17 | |||
18 | void *dma_alloc_noncoherent(struct device *dev, size_t size, | ||
19 | dma_addr_t *dma_handle, gfp_t gfp); | ||
20 | |||
21 | void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, | ||
22 | dma_addr_t dma_handle); | ||
23 | |||
24 | void *dma_alloc_coherent(struct device *dev, size_t size, | ||
25 | dma_addr_t *dma_handle, gfp_t gfp); | ||
26 | |||
27 | void dma_free_coherent(struct device *dev, size_t size, void *kvaddr, | ||
28 | dma_addr_t dma_handle); | ||
29 | |||
30 | /* drivers/base/dma-mapping.c */ | ||
31 | extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, | ||
32 | void *cpu_addr, dma_addr_t dma_addr, size_t size); | ||
33 | extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, | ||
34 | void *cpu_addr, dma_addr_t dma_addr, | ||
35 | size_t size); | ||
36 | |||
37 | #define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s) | ||
38 | #define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s) | ||
39 | |||
40 | /* | ||
41 | * streaming DMA Mapping API... | ||
42 | * CPU accesses page via normal paddr, thus needs to explicitly made | ||
43 | * consistent before each use | ||
44 | */ | ||
45 | |||
46 | static inline void __inline_dma_cache_sync(unsigned long paddr, size_t size, | ||
47 | enum dma_data_direction dir) | ||
48 | { | ||
49 | switch (dir) { | ||
50 | case DMA_FROM_DEVICE: | ||
51 | dma_cache_inv(paddr, size); | ||
52 | break; | ||
53 | case DMA_TO_DEVICE: | ||
54 | dma_cache_wback(paddr, size); | ||
55 | break; | ||
56 | case DMA_BIDIRECTIONAL: | ||
57 | dma_cache_wback_inv(paddr, size); | ||
58 | break; | ||
59 | default: | ||
60 | pr_err("Invalid DMA dir [%d] for OP @ %lx\n", dir, paddr); | ||
61 | } | ||
62 | } | ||
63 | |||
64 | void __arc_dma_cache_sync(unsigned long paddr, size_t size, | ||
65 | enum dma_data_direction dir); | ||
66 | |||
67 | #define _dma_cache_sync(addr, sz, dir) \ | ||
68 | do { \ | ||
69 | if (__builtin_constant_p(dir)) \ | ||
70 | __inline_dma_cache_sync(addr, sz, dir); \ | ||
71 | else \ | ||
72 | __arc_dma_cache_sync(addr, sz, dir); \ | ||
73 | } \ | ||
74 | while (0); | ||
75 | |||
76 | static inline dma_addr_t | ||
77 | dma_map_single(struct device *dev, void *cpu_addr, size_t size, | ||
78 | enum dma_data_direction dir) | ||
79 | { | ||
80 | _dma_cache_sync((unsigned long)cpu_addr, size, dir); | ||
81 | return plat_kernel_addr_to_dma(dev, cpu_addr); | ||
82 | } | ||
83 | |||
84 | static inline void | ||
85 | dma_unmap_single(struct device *dev, dma_addr_t dma_addr, | ||
86 | size_t size, enum dma_data_direction dir) | ||
87 | { | ||
88 | } | ||
89 | |||
90 | static inline dma_addr_t | ||
91 | dma_map_page(struct device *dev, struct page *page, | ||
92 | unsigned long offset, size_t size, | ||
93 | enum dma_data_direction dir) | ||
94 | { | ||
95 | unsigned long paddr = page_to_phys(page) + offset; | ||
96 | return dma_map_single(dev, (void *)paddr, size, dir); | ||
97 | } | ||
98 | |||
99 | static inline void | ||
100 | dma_unmap_page(struct device *dev, dma_addr_t dma_handle, | ||
101 | size_t size, enum dma_data_direction dir) | ||
102 | { | ||
103 | } | ||
104 | |||
105 | static inline int | ||
106 | dma_map_sg(struct device *dev, struct scatterlist *sg, | ||
107 | int nents, enum dma_data_direction dir) | ||
108 | { | ||
109 | struct scatterlist *s; | ||
110 | int i; | ||
111 | |||
112 | for_each_sg(sg, s, nents, i) | ||
113 | sg->dma_address = dma_map_page(dev, sg_page(s), s->offset, | ||
114 | s->length, dir); | ||
115 | |||
116 | return nents; | ||
117 | } | ||
118 | |||
119 | static inline void | ||
120 | dma_unmap_sg(struct device *dev, struct scatterlist *sg, | ||
121 | int nents, enum dma_data_direction dir) | ||
122 | { | ||
123 | struct scatterlist *s; | ||
124 | int i; | ||
125 | |||
126 | for_each_sg(sg, s, nents, i) | ||
127 | dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir); | ||
128 | } | ||
129 | |||
130 | static inline void | ||
131 | dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, | ||
132 | size_t size, enum dma_data_direction dir) | ||
133 | { | ||
134 | _dma_cache_sync(plat_dma_addr_to_kernel(dev, dma_handle), size, | ||
135 | DMA_FROM_DEVICE); | ||
136 | } | ||
137 | |||
138 | static inline void | ||
139 | dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, | ||
140 | size_t size, enum dma_data_direction dir) | ||
141 | { | ||
142 | _dma_cache_sync(plat_dma_addr_to_kernel(dev, dma_handle), size, | ||
143 | DMA_TO_DEVICE); | ||
144 | } | ||
145 | |||
146 | static inline void | ||
147 | dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, | ||
148 | unsigned long offset, size_t size, | ||
149 | enum dma_data_direction direction) | ||
150 | { | ||
151 | _dma_cache_sync(plat_dma_addr_to_kernel(dev, dma_handle) + offset, | ||
152 | size, DMA_FROM_DEVICE); | ||
153 | } | ||
154 | |||
155 | static inline void | ||
156 | dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, | ||
157 | unsigned long offset, size_t size, | ||
158 | enum dma_data_direction direction) | ||
159 | { | ||
160 | _dma_cache_sync(plat_dma_addr_to_kernel(dev, dma_handle) + offset, | ||
161 | size, DMA_TO_DEVICE); | ||
162 | } | ||
163 | |||
164 | static inline void | ||
165 | dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, | ||
166 | enum dma_data_direction dir) | ||
167 | { | ||
168 | int i; | ||
169 | |||
170 | for (i = 0; i < nelems; i++, sg++) | ||
171 | _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir); | ||
172 | } | ||
173 | |||
174 | static inline void | ||
175 | dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, | ||
176 | enum dma_data_direction dir) | ||
177 | { | ||
178 | int i; | ||
179 | |||
180 | for (i = 0; i < nelems; i++, sg++) | ||
181 | _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir); | ||
182 | } | ||
183 | |||
184 | static inline int dma_supported(struct device *dev, u64 dma_mask) | ||
185 | { | ||
186 | /* Support 32 bit DMA mask exclusively */ | ||
187 | return dma_mask == DMA_BIT_MASK(32); | ||
188 | } | ||
189 | |||
190 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | ||
191 | { | ||
192 | return 0; | ||
193 | } | ||
194 | |||
195 | static inline int dma_set_mask(struct device *dev, u64 dma_mask) | ||
196 | { | ||
197 | if (!dev->dma_mask || !dma_supported(dev, dma_mask)) | ||
198 | return -EIO; | ||
199 | |||
200 | *dev->dma_mask = dma_mask; | ||
201 | |||
202 | return 0; | ||
203 | } | ||
204 | |||
205 | #endif | ||