diff options
Diffstat (limited to 'include/asm-powerpc/dma-mapping.h')
-rw-r--r-- | include/asm-powerpc/dma-mapping.h | 182 |
1 files changed, 143 insertions, 39 deletions
diff --git a/include/asm-powerpc/dma-mapping.h b/include/asm-powerpc/dma-mapping.h index 2ab9baf78bb4..7e38b5fddada 100644 --- a/include/asm-powerpc/dma-mapping.h +++ b/include/asm-powerpc/dma-mapping.h | |||
@@ -44,26 +44,150 @@ extern void __dma_sync_page(struct page *page, unsigned long offset, | |||
44 | #endif /* ! CONFIG_NOT_COHERENT_CACHE */ | 44 | #endif /* ! CONFIG_NOT_COHERENT_CACHE */ |
45 | 45 | ||
46 | #ifdef CONFIG_PPC64 | 46 | #ifdef CONFIG_PPC64 |
47 | /* | ||
48 | * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO | ||
49 | */ | ||
50 | struct dma_mapping_ops { | ||
51 | void * (*alloc_coherent)(struct device *dev, size_t size, | ||
52 | dma_addr_t *dma_handle, gfp_t flag); | ||
53 | void (*free_coherent)(struct device *dev, size_t size, | ||
54 | void *vaddr, dma_addr_t dma_handle); | ||
55 | dma_addr_t (*map_single)(struct device *dev, void *ptr, | ||
56 | size_t size, enum dma_data_direction direction); | ||
57 | void (*unmap_single)(struct device *dev, dma_addr_t dma_addr, | ||
58 | size_t size, enum dma_data_direction direction); | ||
59 | int (*map_sg)(struct device *dev, struct scatterlist *sg, | ||
60 | int nents, enum dma_data_direction direction); | ||
61 | void (*unmap_sg)(struct device *dev, struct scatterlist *sg, | ||
62 | int nents, enum dma_data_direction direction); | ||
63 | int (*dma_supported)(struct device *dev, u64 mask); | ||
64 | int (*dac_dma_supported)(struct device *dev, u64 mask); | ||
65 | int (*set_dma_mask)(struct device *dev, u64 dma_mask); | ||
66 | }; | ||
67 | |||
68 | static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) | ||
69 | { | ||
70 | /* We don't handle the NULL dev case for ISA for now. We could | ||
71 | * do it via an out of line call but it is not needed for now. The | ||
72 | * only ISA DMA device we support is the floppy and we have a hack | ||
73 | * in the floppy driver directly to get a device for us. | ||
74 | */ | ||
75 | if (unlikely(dev == NULL || dev->archdata.dma_ops == NULL)) | ||
76 | return NULL; | ||
77 | return dev->archdata.dma_ops; | ||
78 | } | ||
47 | 79 | ||
48 | extern int dma_supported(struct device *dev, u64 mask); | 80 | static inline int dma_supported(struct device *dev, u64 mask) |
49 | extern int dma_set_mask(struct device *dev, u64 dma_mask); | 81 | { |
50 | extern void *dma_alloc_coherent(struct device *dev, size_t size, | 82 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); |
51 | dma_addr_t *dma_handle, gfp_t flag); | 83 | |
52 | extern void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, | 84 | if (unlikely(dma_ops == NULL)) |
53 | dma_addr_t dma_handle); | 85 | return 0; |
54 | extern dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, | 86 | if (dma_ops->dma_supported == NULL) |
55 | size_t size, enum dma_data_direction direction); | 87 | return 1; |
56 | extern void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, | 88 | return dma_ops->dma_supported(dev, mask); |
57 | size_t size, enum dma_data_direction direction); | 89 | } |
58 | extern dma_addr_t dma_map_page(struct device *dev, struct page *page, | 90 | |
59 | unsigned long offset, size_t size, | 91 | static inline int dma_set_mask(struct device *dev, u64 dma_mask) |
60 | enum dma_data_direction direction); | 92 | { |
61 | extern void dma_unmap_page(struct device *dev, dma_addr_t dma_address, | 93 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); |
62 | size_t size, enum dma_data_direction direction); | 94 | |
63 | extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | 95 | if (unlikely(dma_ops == NULL)) |
64 | enum dma_data_direction direction); | 96 | return -EIO; |
65 | extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg, | 97 | if (dma_ops->set_dma_mask != NULL) |
66 | int nhwentries, enum dma_data_direction direction); | 98 | return dma_ops->set_dma_mask(dev, dma_mask); |
99 | if (!dev->dma_mask || !dma_supported(dev, *dev->dma_mask)) | ||
100 | return -EIO; | ||
101 | *dev->dma_mask = dma_mask; | ||
102 | return 0; | ||
103 | } | ||
104 | |||
105 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, | ||
106 | dma_addr_t *dma_handle, gfp_t flag) | ||
107 | { | ||
108 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | ||
109 | |||
110 | BUG_ON(!dma_ops); | ||
111 | return dma_ops->alloc_coherent(dev, size, dma_handle, flag); | ||
112 | } | ||
113 | |||
114 | static inline void dma_free_coherent(struct device *dev, size_t size, | ||
115 | void *cpu_addr, dma_addr_t dma_handle) | ||
116 | { | ||
117 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | ||
118 | |||
119 | BUG_ON(!dma_ops); | ||
120 | dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); | ||
121 | } | ||
122 | |||
123 | static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, | ||
124 | size_t size, | ||
125 | enum dma_data_direction direction) | ||
126 | { | ||
127 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | ||
128 | |||
129 | BUG_ON(!dma_ops); | ||
130 | return dma_ops->map_single(dev, cpu_addr, size, direction); | ||
131 | } | ||
132 | |||
133 | static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, | ||
134 | size_t size, | ||
135 | enum dma_data_direction direction) | ||
136 | { | ||
137 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | ||
138 | |||
139 | BUG_ON(!dma_ops); | ||
140 | dma_ops->unmap_single(dev, dma_addr, size, direction); | ||
141 | } | ||
142 | |||
143 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, | ||
144 | unsigned long offset, size_t size, | ||
145 | enum dma_data_direction direction) | ||
146 | { | ||
147 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | ||
148 | |||
149 | BUG_ON(!dma_ops); | ||
150 | return dma_ops->map_single(dev, page_address(page) + offset, size, | ||
151 | direction); | ||
152 | } | ||
153 | |||
154 | static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, | ||
155 | size_t size, | ||
156 | enum dma_data_direction direction) | ||
157 | { | ||
158 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | ||
159 | |||
160 | BUG_ON(!dma_ops); | ||
161 | dma_ops->unmap_single(dev, dma_address, size, direction); | ||
162 | } | ||
163 | |||
164 | static inline int dma_map_sg(struct device *dev, struct scatterlist *sg, | ||
165 | int nents, enum dma_data_direction direction) | ||
166 | { | ||
167 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | ||
168 | |||
169 | BUG_ON(!dma_ops); | ||
170 | return dma_ops->map_sg(dev, sg, nents, direction); | ||
171 | } | ||
172 | |||
173 | static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg, | ||
174 | int nhwentries, | ||
175 | enum dma_data_direction direction) | ||
176 | { | ||
177 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | ||
178 | |||
179 | BUG_ON(!dma_ops); | ||
180 | dma_ops->unmap_sg(dev, sg, nhwentries, direction); | ||
181 | } | ||
182 | |||
183 | |||
184 | /* | ||
185 | * Available generic sets of operations | ||
186 | */ | ||
187 | extern struct dma_mapping_ops dma_iommu_ops; | ||
188 | extern struct dma_mapping_ops dma_direct_ops; | ||
189 | |||
190 | extern unsigned long dma_direct_offset; | ||
67 | 191 | ||
68 | #else /* CONFIG_PPC64 */ | 192 | #else /* CONFIG_PPC64 */ |
69 | 193 | ||
@@ -261,25 +385,5 @@ static inline void dma_cache_sync(void *vaddr, size_t size, | |||
261 | __dma_sync(vaddr, size, (int)direction); | 385 | __dma_sync(vaddr, size, (int)direction); |
262 | } | 386 | } |
263 | 387 | ||
264 | /* | ||
265 | * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO | ||
266 | */ | ||
267 | struct dma_mapping_ops { | ||
268 | void * (*alloc_coherent)(struct device *dev, size_t size, | ||
269 | dma_addr_t *dma_handle, gfp_t flag); | ||
270 | void (*free_coherent)(struct device *dev, size_t size, | ||
271 | void *vaddr, dma_addr_t dma_handle); | ||
272 | dma_addr_t (*map_single)(struct device *dev, void *ptr, | ||
273 | size_t size, enum dma_data_direction direction); | ||
274 | void (*unmap_single)(struct device *dev, dma_addr_t dma_addr, | ||
275 | size_t size, enum dma_data_direction direction); | ||
276 | int (*map_sg)(struct device *dev, struct scatterlist *sg, | ||
277 | int nents, enum dma_data_direction direction); | ||
278 | void (*unmap_sg)(struct device *dev, struct scatterlist *sg, | ||
279 | int nents, enum dma_data_direction direction); | ||
280 | int (*dma_supported)(struct device *dev, u64 mask); | ||
281 | int (*dac_dma_supported)(struct device *dev, u64 mask); | ||
282 | }; | ||
283 | |||
284 | #endif /* __KERNEL__ */ | 388 | #endif /* __KERNEL__ */ |
285 | #endif /* _ASM_DMA_MAPPING_H */ | 389 | #endif /* _ASM_DMA_MAPPING_H */ |