diff options
Diffstat (limited to 'include/asm-frv/dma-mapping.h')
-rw-r--r-- | include/asm-frv/dma-mapping.h | 184 |
1 files changed, 184 insertions, 0 deletions
diff --git a/include/asm-frv/dma-mapping.h b/include/asm-frv/dma-mapping.h new file mode 100644 index 000000000000..0206ab35eae0 --- /dev/null +++ b/include/asm-frv/dma-mapping.h | |||
@@ -0,0 +1,184 @@ | |||
1 | #ifndef _ASM_DMA_MAPPING_H | ||
2 | #define _ASM_DMA_MAPPING_H | ||
3 | |||
4 | #include <linux/device.h> | ||
5 | #include <asm/cache.h> | ||
6 | #include <asm/cacheflush.h> | ||
7 | #include <asm/scatterlist.h> | ||
8 | #include <asm/io.h> | ||
9 | |||
10 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | ||
11 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | ||
12 | |||
13 | extern unsigned long __nongprelbss dma_coherent_mem_start; | ||
14 | extern unsigned long __nongprelbss dma_coherent_mem_end; | ||
15 | |||
16 | void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, int gfp); | ||
17 | void dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle); | ||
18 | |||
19 | /* | ||
20 | * These macros should be used after a pci_map_sg call has been done | ||
21 | * to get bus addresses of each of the SG entries and their lengths. | ||
22 | * You should only work with the number of sg entries pci_map_sg | ||
23 | * returns, or alternatively stop on the first sg_dma_len(sg) which | ||
24 | * is 0. | ||
25 | */ | ||
26 | #define sg_dma_address(sg) ((unsigned long) (page_to_phys((sg)->page) + (sg)->offset)) | ||
27 | #define sg_dma_len(sg) ((sg)->length) | ||
28 | |||
29 | /* | ||
30 | * Map a single buffer of the indicated size for DMA in streaming mode. | ||
31 | * The 32-bit bus address to use is returned. | ||
32 | * | ||
33 | * Once the device is given the dma address, the device owns this memory | ||
34 | * until either pci_unmap_single or pci_dma_sync_single is performed. | ||
35 | */ | ||
36 | extern dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, | ||
37 | enum dma_data_direction direction); | ||
38 | |||
39 | /* | ||
40 | * Unmap a single streaming mode DMA translation. The dma_addr and size | ||
41 | * must match what was provided for in a previous pci_map_single call. All | ||
42 | * other usages are undefined. | ||
43 | * | ||
44 | * After this call, reads by the cpu to the buffer are guarenteed to see | ||
45 | * whatever the device wrote there. | ||
46 | */ | ||
47 | static inline | ||
48 | void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | ||
49 | enum dma_data_direction direction) | ||
50 | { | ||
51 | BUG_ON(direction == DMA_NONE); | ||
52 | } | ||
53 | |||
54 | /* | ||
55 | * Map a set of buffers described by scatterlist in streaming | ||
56 | * mode for DMA. This is the scather-gather version of the | ||
57 | * above pci_map_single interface. Here the scatter gather list | ||
58 | * elements are each tagged with the appropriate dma address | ||
59 | * and length. They are obtained via sg_dma_{address,length}(SG). | ||
60 | * | ||
61 | * NOTE: An implementation may be able to use a smaller number of | ||
62 | * DMA address/length pairs than there are SG table elements. | ||
63 | * (for example via virtual mapping capabilities) | ||
64 | * The routine returns the number of addr/length pairs actually | ||
65 | * used, at most nents. | ||
66 | * | ||
67 | * Device ownership issues as mentioned above for pci_map_single are | ||
68 | * the same here. | ||
69 | */ | ||
70 | extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | ||
71 | enum dma_data_direction direction); | ||
72 | |||
73 | /* | ||
74 | * Unmap a set of streaming mode DMA translations. | ||
75 | * Again, cpu read rules concerning calls here are the same as for | ||
76 | * pci_unmap_single() above. | ||
77 | */ | ||
78 | static inline | ||
79 | void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | ||
80 | enum dma_data_direction direction) | ||
81 | { | ||
82 | BUG_ON(direction == DMA_NONE); | ||
83 | } | ||
84 | |||
85 | extern | ||
86 | dma_addr_t dma_map_page(struct device *dev, struct page *page, unsigned long offset, | ||
87 | size_t size, enum dma_data_direction direction); | ||
88 | |||
89 | static inline | ||
90 | void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | ||
91 | enum dma_data_direction direction) | ||
92 | { | ||
93 | BUG_ON(direction == DMA_NONE); | ||
94 | } | ||
95 | |||
96 | |||
97 | static inline | ||
98 | void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, | ||
99 | enum dma_data_direction direction) | ||
100 | { | ||
101 | } | ||
102 | |||
103 | static inline | ||
104 | void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, | ||
105 | enum dma_data_direction direction) | ||
106 | { | ||
107 | flush_write_buffers(); | ||
108 | } | ||
109 | |||
110 | static inline | ||
111 | void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, | ||
112 | unsigned long offset, size_t size, | ||
113 | enum dma_data_direction direction) | ||
114 | { | ||
115 | } | ||
116 | |||
117 | static inline | ||
118 | void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, | ||
119 | unsigned long offset, size_t size, | ||
120 | enum dma_data_direction direction) | ||
121 | { | ||
122 | flush_write_buffers(); | ||
123 | } | ||
124 | |||
125 | static inline | ||
126 | void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, | ||
127 | enum dma_data_direction direction) | ||
128 | { | ||
129 | } | ||
130 | |||
131 | static inline | ||
132 | void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, | ||
133 | enum dma_data_direction direction) | ||
134 | { | ||
135 | flush_write_buffers(); | ||
136 | } | ||
137 | |||
138 | static inline | ||
139 | int dma_mapping_error(dma_addr_t dma_addr) | ||
140 | { | ||
141 | return 0; | ||
142 | } | ||
143 | |||
144 | static inline | ||
145 | int dma_supported(struct device *dev, u64 mask) | ||
146 | { | ||
147 | /* | ||
148 | * we fall back to GFP_DMA when the mask isn't all 1s, | ||
149 | * so we can't guarantee allocations that must be | ||
150 | * within a tighter range than GFP_DMA.. | ||
151 | */ | ||
152 | if (mask < 0x00ffffff) | ||
153 | return 0; | ||
154 | |||
155 | return 1; | ||
156 | } | ||
157 | |||
158 | static inline | ||
159 | int dma_set_mask(struct device *dev, u64 mask) | ||
160 | { | ||
161 | if (!dev->dma_mask || !dma_supported(dev, mask)) | ||
162 | return -EIO; | ||
163 | |||
164 | *dev->dma_mask = mask; | ||
165 | |||
166 | return 0; | ||
167 | } | ||
168 | |||
169 | static inline | ||
170 | int dma_get_cache_alignment(void) | ||
171 | { | ||
172 | return 1 << L1_CACHE_SHIFT; | ||
173 | } | ||
174 | |||
175 | #define dma_is_consistent(d) (1) | ||
176 | |||
177 | static inline | ||
178 | void dma_cache_sync(void *vaddr, size_t size, | ||
179 | enum dma_data_direction direction) | ||
180 | { | ||
181 | flush_write_buffers(); | ||
182 | } | ||
183 | |||
184 | #endif /* _ASM_DMA_MAPPING_H */ | ||