diff options
Diffstat (limited to 'arch/tile/kernel/pci-dma.c')
-rw-r--r-- | arch/tile/kernel/pci-dma.c | 369 |
1 files changed, 298 insertions, 71 deletions
diff --git a/arch/tile/kernel/pci-dma.c b/arch/tile/kernel/pci-dma.c index edd856a000c..b9fe80ec108 100644 --- a/arch/tile/kernel/pci-dma.c +++ b/arch/tile/kernel/pci-dma.c | |||
@@ -14,6 +14,7 @@ | |||
14 | 14 | ||
15 | #include <linux/mm.h> | 15 | #include <linux/mm.h> |
16 | #include <linux/dma-mapping.h> | 16 | #include <linux/dma-mapping.h> |
17 | #include <linux/swiotlb.h> | ||
17 | #include <linux/vmalloc.h> | 18 | #include <linux/vmalloc.h> |
18 | #include <linux/export.h> | 19 | #include <linux/export.h> |
19 | #include <asm/tlbflush.h> | 20 | #include <asm/tlbflush.h> |
@@ -31,10 +32,9 @@ | |||
31 | #define PAGE_HOME_DMA PAGE_HOME_HASH | 32 | #define PAGE_HOME_DMA PAGE_HOME_HASH |
32 | #endif | 33 | #endif |
33 | 34 | ||
34 | void *dma_alloc_coherent(struct device *dev, | 35 | static void *tile_dma_alloc_coherent(struct device *dev, size_t size, |
35 | size_t size, | 36 | dma_addr_t *dma_handle, gfp_t gfp, |
36 | dma_addr_t *dma_handle, | 37 | struct dma_attrs *attrs) |
37 | gfp_t gfp) | ||
38 | { | 38 | { |
39 | u64 dma_mask = dev->coherent_dma_mask ?: DMA_BIT_MASK(32); | 39 | u64 dma_mask = dev->coherent_dma_mask ?: DMA_BIT_MASK(32); |
40 | int node = dev_to_node(dev); | 40 | int node = dev_to_node(dev); |
@@ -68,19 +68,19 @@ void *dma_alloc_coherent(struct device *dev, | |||
68 | } | 68 | } |
69 | 69 | ||
70 | *dma_handle = addr; | 70 | *dma_handle = addr; |
71 | |||
71 | return page_address(pg); | 72 | return page_address(pg); |
72 | } | 73 | } |
73 | EXPORT_SYMBOL(dma_alloc_coherent); | ||
74 | 74 | ||
75 | /* | 75 | /* |
76 | * Free memory that was allocated with dma_alloc_coherent. | 76 | * Free memory that was allocated with tile_dma_alloc_coherent. |
77 | */ | 77 | */ |
78 | void dma_free_coherent(struct device *dev, size_t size, | 78 | static void tile_dma_free_coherent(struct device *dev, size_t size, |
79 | void *vaddr, dma_addr_t dma_handle) | 79 | void *vaddr, dma_addr_t dma_handle, |
80 | struct dma_attrs *attrs) | ||
80 | { | 81 | { |
81 | homecache_free_pages((unsigned long)vaddr, get_order(size)); | 82 | homecache_free_pages((unsigned long)vaddr, get_order(size)); |
82 | } | 83 | } |
83 | EXPORT_SYMBOL(dma_free_coherent); | ||
84 | 84 | ||
85 | /* | 85 | /* |
86 | * The map routines "map" the specified address range for DMA | 86 | * The map routines "map" the specified address range for DMA |
@@ -199,38 +199,182 @@ static void __dma_complete_pa_range(dma_addr_t dma_addr, size_t size, | |||
199 | } | 199 | } |
200 | } | 200 | } |
201 | 201 | ||
202 | static int tile_dma_map_sg(struct device *dev, struct scatterlist *sglist, | ||
203 | int nents, enum dma_data_direction direction, | ||
204 | struct dma_attrs *attrs) | ||
205 | { | ||
206 | struct scatterlist *sg; | ||
207 | int i; | ||
202 | 208 | ||
203 | /* | 209 | BUG_ON(!valid_dma_direction(direction)); |
204 | * dma_map_single can be passed any memory address, and there appear | 210 | |
205 | * to be no alignment constraints. | 211 | WARN_ON(nents == 0 || sglist->length == 0); |
206 | * | 212 | |
207 | * There is a chance that the start of the buffer will share a cache | 213 | for_each_sg(sglist, sg, nents, i) { |
208 | * line with some other data that has been touched in the meantime. | 214 | sg->dma_address = sg_phys(sg); |
209 | */ | 215 | __dma_prep_pa_range(sg->dma_address, sg->length, direction); |
210 | dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, | 216 | #ifdef CONFIG_NEED_SG_DMA_LENGTH |
211 | enum dma_data_direction direction) | 217 | sg->dma_length = sg->length; |
218 | #endif | ||
219 | } | ||
220 | |||
221 | return nents; | ||
222 | } | ||
223 | |||
224 | static void tile_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, | ||
225 | int nents, enum dma_data_direction direction, | ||
226 | struct dma_attrs *attrs) | ||
227 | { | ||
228 | struct scatterlist *sg; | ||
229 | int i; | ||
230 | |||
231 | BUG_ON(!valid_dma_direction(direction)); | ||
232 | for_each_sg(sglist, sg, nents, i) { | ||
233 | sg->dma_address = sg_phys(sg); | ||
234 | __dma_complete_pa_range(sg->dma_address, sg->length, | ||
235 | direction); | ||
236 | } | ||
237 | } | ||
238 | |||
239 | static dma_addr_t tile_dma_map_page(struct device *dev, struct page *page, | ||
240 | unsigned long offset, size_t size, | ||
241 | enum dma_data_direction direction, | ||
242 | struct dma_attrs *attrs) | ||
212 | { | 243 | { |
213 | dma_addr_t dma_addr = __pa(ptr); | 244 | BUG_ON(!valid_dma_direction(direction)); |
245 | |||
246 | BUG_ON(offset + size > PAGE_SIZE); | ||
247 | __dma_prep_page(page, offset, size, direction); | ||
248 | |||
249 | return page_to_pa(page) + offset; | ||
250 | } | ||
251 | |||
252 | static void tile_dma_unmap_page(struct device *dev, dma_addr_t dma_address, | ||
253 | size_t size, enum dma_data_direction direction, | ||
254 | struct dma_attrs *attrs) | ||
255 | { | ||
256 | BUG_ON(!valid_dma_direction(direction)); | ||
257 | |||
258 | __dma_complete_page(pfn_to_page(PFN_DOWN(dma_address)), | ||
259 | dma_address & PAGE_OFFSET, size, direction); | ||
260 | } | ||
214 | 261 | ||
262 | static void tile_dma_sync_single_for_cpu(struct device *dev, | ||
263 | dma_addr_t dma_handle, | ||
264 | size_t size, | ||
265 | enum dma_data_direction direction) | ||
266 | { | ||
215 | BUG_ON(!valid_dma_direction(direction)); | 267 | BUG_ON(!valid_dma_direction(direction)); |
216 | WARN_ON(size == 0); | ||
217 | 268 | ||
218 | __dma_prep_pa_range(dma_addr, size, direction); | 269 | __dma_complete_pa_range(dma_handle, size, direction); |
270 | } | ||
219 | 271 | ||
220 | return dma_addr; | 272 | static void tile_dma_sync_single_for_device(struct device *dev, |
273 | dma_addr_t dma_handle, size_t size, | ||
274 | enum dma_data_direction direction) | ||
275 | { | ||
276 | __dma_prep_pa_range(dma_handle, size, direction); | ||
221 | } | 277 | } |
222 | EXPORT_SYMBOL(dma_map_single); | ||
223 | 278 | ||
224 | void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | 279 | static void tile_dma_sync_sg_for_cpu(struct device *dev, |
225 | enum dma_data_direction direction) | 280 | struct scatterlist *sglist, int nelems, |
281 | enum dma_data_direction direction) | ||
226 | { | 282 | { |
283 | struct scatterlist *sg; | ||
284 | int i; | ||
285 | |||
227 | BUG_ON(!valid_dma_direction(direction)); | 286 | BUG_ON(!valid_dma_direction(direction)); |
228 | __dma_complete_pa_range(dma_addr, size, direction); | 287 | WARN_ON(nelems == 0 || sglist->length == 0); |
288 | |||
289 | for_each_sg(sglist, sg, nelems, i) { | ||
290 | dma_sync_single_for_cpu(dev, sg->dma_address, | ||
291 | sg_dma_len(sg), direction); | ||
292 | } | ||
229 | } | 293 | } |
230 | EXPORT_SYMBOL(dma_unmap_single); | ||
231 | 294 | ||
232 | int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, | 295 | static void tile_dma_sync_sg_for_device(struct device *dev, |
233 | enum dma_data_direction direction) | 296 | struct scatterlist *sglist, int nelems, |
297 | enum dma_data_direction direction) | ||
298 | { | ||
299 | struct scatterlist *sg; | ||
300 | int i; | ||
301 | |||
302 | BUG_ON(!valid_dma_direction(direction)); | ||
303 | WARN_ON(nelems == 0 || sglist->length == 0); | ||
304 | |||
305 | for_each_sg(sglist, sg, nelems, i) { | ||
306 | dma_sync_single_for_device(dev, sg->dma_address, | ||
307 | sg_dma_len(sg), direction); | ||
308 | } | ||
309 | } | ||
310 | |||
311 | static inline int | ||
312 | tile_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | ||
313 | { | ||
314 | return 0; | ||
315 | } | ||
316 | |||
317 | static inline int | ||
318 | tile_dma_supported(struct device *dev, u64 mask) | ||
319 | { | ||
320 | return 1; | ||
321 | } | ||
322 | |||
323 | static struct dma_map_ops tile_default_dma_map_ops = { | ||
324 | .alloc = tile_dma_alloc_coherent, | ||
325 | .free = tile_dma_free_coherent, | ||
326 | .map_page = tile_dma_map_page, | ||
327 | .unmap_page = tile_dma_unmap_page, | ||
328 | .map_sg = tile_dma_map_sg, | ||
329 | .unmap_sg = tile_dma_unmap_sg, | ||
330 | .sync_single_for_cpu = tile_dma_sync_single_for_cpu, | ||
331 | .sync_single_for_device = tile_dma_sync_single_for_device, | ||
332 | .sync_sg_for_cpu = tile_dma_sync_sg_for_cpu, | ||
333 | .sync_sg_for_device = tile_dma_sync_sg_for_device, | ||
334 | .mapping_error = tile_dma_mapping_error, | ||
335 | .dma_supported = tile_dma_supported | ||
336 | }; | ||
337 | |||
338 | struct dma_map_ops *tile_dma_map_ops = &tile_default_dma_map_ops; | ||
339 | EXPORT_SYMBOL(tile_dma_map_ops); | ||
340 | |||
341 | /* Generic PCI DMA mapping functions */ | ||
342 | |||
343 | static void *tile_pci_dma_alloc_coherent(struct device *dev, size_t size, | ||
344 | dma_addr_t *dma_handle, gfp_t gfp, | ||
345 | struct dma_attrs *attrs) | ||
346 | { | ||
347 | int node = dev_to_node(dev); | ||
348 | int order = get_order(size); | ||
349 | struct page *pg; | ||
350 | dma_addr_t addr; | ||
351 | |||
352 | gfp |= __GFP_ZERO; | ||
353 | |||
354 | pg = homecache_alloc_pages_node(node, gfp, order, PAGE_HOME_DMA); | ||
355 | if (pg == NULL) | ||
356 | return NULL; | ||
357 | |||
358 | addr = page_to_phys(pg); | ||
359 | |||
360 | *dma_handle = phys_to_dma(dev, addr); | ||
361 | |||
362 | return page_address(pg); | ||
363 | } | ||
364 | |||
365 | /* | ||
366 | * Free memory that was allocated with tile_pci_dma_alloc_coherent. | ||
367 | */ | ||
368 | static void tile_pci_dma_free_coherent(struct device *dev, size_t size, | ||
369 | void *vaddr, dma_addr_t dma_handle, | ||
370 | struct dma_attrs *attrs) | ||
371 | { | ||
372 | homecache_free_pages((unsigned long)vaddr, get_order(size)); | ||
373 | } | ||
374 | |||
375 | static int tile_pci_dma_map_sg(struct device *dev, struct scatterlist *sglist, | ||
376 | int nents, enum dma_data_direction direction, | ||
377 | struct dma_attrs *attrs) | ||
234 | { | 378 | { |
235 | struct scatterlist *sg; | 379 | struct scatterlist *sg; |
236 | int i; | 380 | int i; |
@@ -242,14 +386,20 @@ int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, | |||
242 | for_each_sg(sglist, sg, nents, i) { | 386 | for_each_sg(sglist, sg, nents, i) { |
243 | sg->dma_address = sg_phys(sg); | 387 | sg->dma_address = sg_phys(sg); |
244 | __dma_prep_pa_range(sg->dma_address, sg->length, direction); | 388 | __dma_prep_pa_range(sg->dma_address, sg->length, direction); |
389 | |||
390 | sg->dma_address = phys_to_dma(dev, sg->dma_address); | ||
391 | #ifdef CONFIG_NEED_SG_DMA_LENGTH | ||
392 | sg->dma_length = sg->length; | ||
393 | #endif | ||
245 | } | 394 | } |
246 | 395 | ||
247 | return nents; | 396 | return nents; |
248 | } | 397 | } |
249 | EXPORT_SYMBOL(dma_map_sg); | ||
250 | 398 | ||
251 | void dma_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, | 399 | static void tile_pci_dma_unmap_sg(struct device *dev, |
252 | enum dma_data_direction direction) | 400 | struct scatterlist *sglist, int nents, |
401 | enum dma_data_direction direction, | ||
402 | struct dma_attrs *attrs) | ||
253 | { | 403 | { |
254 | struct scatterlist *sg; | 404 | struct scatterlist *sg; |
255 | int i; | 405 | int i; |
@@ -261,46 +411,60 @@ void dma_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, | |||
261 | direction); | 411 | direction); |
262 | } | 412 | } |
263 | } | 413 | } |
264 | EXPORT_SYMBOL(dma_unmap_sg); | ||
265 | 414 | ||
266 | dma_addr_t dma_map_page(struct device *dev, struct page *page, | 415 | static dma_addr_t tile_pci_dma_map_page(struct device *dev, struct page *page, |
267 | unsigned long offset, size_t size, | 416 | unsigned long offset, size_t size, |
268 | enum dma_data_direction direction) | 417 | enum dma_data_direction direction, |
418 | struct dma_attrs *attrs) | ||
269 | { | 419 | { |
270 | BUG_ON(!valid_dma_direction(direction)); | 420 | BUG_ON(!valid_dma_direction(direction)); |
271 | 421 | ||
272 | BUG_ON(offset + size > PAGE_SIZE); | 422 | BUG_ON(offset + size > PAGE_SIZE); |
273 | __dma_prep_page(page, offset, size, direction); | 423 | __dma_prep_page(page, offset, size, direction); |
274 | return page_to_pa(page) + offset; | 424 | |
425 | return phys_to_dma(dev, page_to_pa(page) + offset); | ||
275 | } | 426 | } |
276 | EXPORT_SYMBOL(dma_map_page); | ||
277 | 427 | ||
278 | void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | 428 | static void tile_pci_dma_unmap_page(struct device *dev, dma_addr_t dma_address, |
279 | enum dma_data_direction direction) | 429 | size_t size, |
430 | enum dma_data_direction direction, | ||
431 | struct dma_attrs *attrs) | ||
280 | { | 432 | { |
281 | BUG_ON(!valid_dma_direction(direction)); | 433 | BUG_ON(!valid_dma_direction(direction)); |
434 | |||
435 | dma_address = dma_to_phys(dev, dma_address); | ||
436 | |||
282 | __dma_complete_page(pfn_to_page(PFN_DOWN(dma_address)), | 437 | __dma_complete_page(pfn_to_page(PFN_DOWN(dma_address)), |
283 | dma_address & PAGE_OFFSET, size, direction); | 438 | dma_address & PAGE_OFFSET, size, direction); |
284 | } | 439 | } |
285 | EXPORT_SYMBOL(dma_unmap_page); | ||
286 | 440 | ||
287 | void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, | 441 | static void tile_pci_dma_sync_single_for_cpu(struct device *dev, |
288 | size_t size, enum dma_data_direction direction) | 442 | dma_addr_t dma_handle, |
443 | size_t size, | ||
444 | enum dma_data_direction direction) | ||
289 | { | 445 | { |
290 | BUG_ON(!valid_dma_direction(direction)); | 446 | BUG_ON(!valid_dma_direction(direction)); |
447 | |||
448 | dma_handle = dma_to_phys(dev, dma_handle); | ||
449 | |||
291 | __dma_complete_pa_range(dma_handle, size, direction); | 450 | __dma_complete_pa_range(dma_handle, size, direction); |
292 | } | 451 | } |
293 | EXPORT_SYMBOL(dma_sync_single_for_cpu); | ||
294 | 452 | ||
295 | void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, | 453 | static void tile_pci_dma_sync_single_for_device(struct device *dev, |
296 | size_t size, enum dma_data_direction direction) | 454 | dma_addr_t dma_handle, |
455 | size_t size, | ||
456 | enum dma_data_direction | ||
457 | direction) | ||
297 | { | 458 | { |
459 | dma_handle = dma_to_phys(dev, dma_handle); | ||
460 | |||
298 | __dma_prep_pa_range(dma_handle, size, direction); | 461 | __dma_prep_pa_range(dma_handle, size, direction); |
299 | } | 462 | } |
300 | EXPORT_SYMBOL(dma_sync_single_for_device); | ||
301 | 463 | ||
302 | void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, | 464 | static void tile_pci_dma_sync_sg_for_cpu(struct device *dev, |
303 | int nelems, enum dma_data_direction direction) | 465 | struct scatterlist *sglist, |
466 | int nelems, | ||
467 | enum dma_data_direction direction) | ||
304 | { | 468 | { |
305 | struct scatterlist *sg; | 469 | struct scatterlist *sg; |
306 | int i; | 470 | int i; |
@@ -313,10 +477,11 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, | |||
313 | sg_dma_len(sg), direction); | 477 | sg_dma_len(sg), direction); |
314 | } | 478 | } |
315 | } | 479 | } |
316 | EXPORT_SYMBOL(dma_sync_sg_for_cpu); | ||
317 | 480 | ||
318 | void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, | 481 | static void tile_pci_dma_sync_sg_for_device(struct device *dev, |
319 | int nelems, enum dma_data_direction direction) | 482 | struct scatterlist *sglist, |
483 | int nelems, | ||
484 | enum dma_data_direction direction) | ||
320 | { | 485 | { |
321 | struct scatterlist *sg; | 486 | struct scatterlist *sg; |
322 | int i; | 487 | int i; |
@@ -329,31 +494,93 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, | |||
329 | sg_dma_len(sg), direction); | 494 | sg_dma_len(sg), direction); |
330 | } | 495 | } |
331 | } | 496 | } |
332 | EXPORT_SYMBOL(dma_sync_sg_for_device); | ||
333 | 497 | ||
334 | void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, | 498 | static inline int |
335 | unsigned long offset, size_t size, | 499 | tile_pci_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
336 | enum dma_data_direction direction) | ||
337 | { | 500 | { |
338 | dma_sync_single_for_cpu(dev, dma_handle + offset, size, direction); | 501 | return 0; |
339 | } | 502 | } |
340 | EXPORT_SYMBOL(dma_sync_single_range_for_cpu); | ||
341 | 503 | ||
342 | void dma_sync_single_range_for_device(struct device *dev, | 504 | static inline int |
343 | dma_addr_t dma_handle, | 505 | tile_pci_dma_supported(struct device *dev, u64 mask) |
344 | unsigned long offset, size_t size, | ||
345 | enum dma_data_direction direction) | ||
346 | { | 506 | { |
347 | dma_sync_single_for_device(dev, dma_handle + offset, size, direction); | 507 | return 1; |
348 | } | 508 | } |
349 | EXPORT_SYMBOL(dma_sync_single_range_for_device); | ||
350 | 509 | ||
351 | /* | 510 | static struct dma_map_ops tile_pci_default_dma_map_ops = { |
352 | * dma_alloc_noncoherent() is #defined to return coherent memory, | 511 | .alloc = tile_pci_dma_alloc_coherent, |
353 | * so there's no need to do any flushing here. | 512 | .free = tile_pci_dma_free_coherent, |
354 | */ | 513 | .map_page = tile_pci_dma_map_page, |
355 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | 514 | .unmap_page = tile_pci_dma_unmap_page, |
356 | enum dma_data_direction direction) | 515 | .map_sg = tile_pci_dma_map_sg, |
516 | .unmap_sg = tile_pci_dma_unmap_sg, | ||
517 | .sync_single_for_cpu = tile_pci_dma_sync_single_for_cpu, | ||
518 | .sync_single_for_device = tile_pci_dma_sync_single_for_device, | ||
519 | .sync_sg_for_cpu = tile_pci_dma_sync_sg_for_cpu, | ||
520 | .sync_sg_for_device = tile_pci_dma_sync_sg_for_device, | ||
521 | .mapping_error = tile_pci_dma_mapping_error, | ||
522 | .dma_supported = tile_pci_dma_supported | ||
523 | }; | ||
524 | |||
525 | struct dma_map_ops *gx_pci_dma_map_ops = &tile_pci_default_dma_map_ops; | ||
526 | EXPORT_SYMBOL(gx_pci_dma_map_ops); | ||
527 | |||
528 | /* PCI DMA mapping functions for legacy PCI devices */ | ||
529 | |||
530 | #ifdef CONFIG_SWIOTLB | ||
531 | static void *tile_swiotlb_alloc_coherent(struct device *dev, size_t size, | ||
532 | dma_addr_t *dma_handle, gfp_t gfp, | ||
533 | struct dma_attrs *attrs) | ||
534 | { | ||
535 | gfp |= GFP_DMA; | ||
536 | return swiotlb_alloc_coherent(dev, size, dma_handle, gfp); | ||
537 | } | ||
538 | |||
539 | static void tile_swiotlb_free_coherent(struct device *dev, size_t size, | ||
540 | void *vaddr, dma_addr_t dma_addr, | ||
541 | struct dma_attrs *attrs) | ||
357 | { | 542 | { |
543 | swiotlb_free_coherent(dev, size, vaddr, dma_addr); | ||
358 | } | 544 | } |
359 | EXPORT_SYMBOL(dma_cache_sync); | 545 | |
546 | static struct dma_map_ops pci_swiotlb_dma_ops = { | ||
547 | .alloc = tile_swiotlb_alloc_coherent, | ||
548 | .free = tile_swiotlb_free_coherent, | ||
549 | .map_page = swiotlb_map_page, | ||
550 | .unmap_page = swiotlb_unmap_page, | ||
551 | .map_sg = swiotlb_map_sg_attrs, | ||
552 | .unmap_sg = swiotlb_unmap_sg_attrs, | ||
553 | .sync_single_for_cpu = swiotlb_sync_single_for_cpu, | ||
554 | .sync_single_for_device = swiotlb_sync_single_for_device, | ||
555 | .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, | ||
556 | .sync_sg_for_device = swiotlb_sync_sg_for_device, | ||
557 | .dma_supported = swiotlb_dma_supported, | ||
558 | .mapping_error = swiotlb_dma_mapping_error, | ||
559 | }; | ||
560 | |||
561 | struct dma_map_ops *gx_legacy_pci_dma_map_ops = &pci_swiotlb_dma_ops; | ||
562 | #else | ||
563 | struct dma_map_ops *gx_legacy_pci_dma_map_ops; | ||
564 | #endif | ||
565 | EXPORT_SYMBOL(gx_legacy_pci_dma_map_ops); | ||
566 | |||
567 | #ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK | ||
568 | int dma_set_coherent_mask(struct device *dev, u64 mask) | ||
569 | { | ||
570 | struct dma_map_ops *dma_ops = get_dma_ops(dev); | ||
571 | |||
572 | /* Handle legacy PCI devices with limited memory addressability. */ | ||
573 | if (((dma_ops == gx_pci_dma_map_ops) || | ||
574 | (dma_ops == gx_legacy_pci_dma_map_ops)) && | ||
575 | (mask <= DMA_BIT_MASK(32))) { | ||
576 | if (mask > dev->archdata.max_direct_dma_addr) | ||
577 | mask = dev->archdata.max_direct_dma_addr; | ||
578 | } | ||
579 | |||
580 | if (!dma_supported(dev, mask)) | ||
581 | return -EIO; | ||
582 | dev->coherent_dma_mask = mask; | ||
583 | return 0; | ||
584 | } | ||
585 | EXPORT_SYMBOL(dma_set_coherent_mask); | ||
586 | #endif | ||