diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-07-06 22:20:54 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-07-06 22:20:54 -0400 |
commit | f72e24a1240b78f421649c4d88f5c24ab1c896a1 (patch) | |
tree | 90bed3bf33ae0abf5636dafcc3eda3cc354612b0 /arch/tile | |
parent | 2c669275dc3245e2866a0eea15bda8ec8d1ab8db (diff) | |
parent | 1655cf8829d82d367d8fdb5cb58e5885d7d2a391 (diff) |
Merge tag 'dma-mapping-4.13' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-mapping infrastructure from Christoph Hellwig:
"This is the first pull request for the new dma-mapping subsystem
In this new subsystem we'll try to properly maintain all the generic
code related to dma-mapping, and will further consolidate arch code
into common helpers.
This pull request contains:
- removal of the DMA_ERROR_CODE macro, replacing it with calls to
->mapping_error so that the dma_map_ops instances are more self
contained and can be shared across architectures (me)
- removal of the ->set_dma_mask method, which duplicates the
->dma_capable one in terms of functionality, but requires more
duplicate code.
- various updates for the coherent dma pool and related arm code
(Vladimir)
- various smaller cleanups (me)"
* tag 'dma-mapping-4.13' of git://git.infradead.org/users/hch/dma-mapping: (56 commits)
ARM: dma-mapping: Remove traces of NOMMU code
ARM: NOMMU: Set ARM_DMA_MEM_BUFFERABLE for M-class cpus
ARM: NOMMU: Introduce dma operations for noMMU
drivers: dma-mapping: allow dma_common_mmap() for NOMMU
drivers: dma-coherent: Introduce default DMA pool
drivers: dma-coherent: Account dma_pfn_offset when used with device tree
dma: Take into account dma_pfn_offset
dma-mapping: replace dmam_alloc_noncoherent with dmam_alloc_attrs
dma-mapping: remove dmam_free_noncoherent
crypto: qat - avoid an uninitialized variable warning
au1100fb: remove a bogus dma_free_nonconsistent call
MAINTAINERS: add entry for dma mapping helpers
powerpc: merge __dma_set_mask into dma_set_mask
dma-mapping: remove the set_dma_mask method
powerpc/cell: use the dma_supported method for ops switching
powerpc/cell: clean up fixed mapping dma_ops initialization
tile: remove dma_supported and mapping_error methods
xen-swiotlb: remove xen_swiotlb_set_dma_mask
arm: implement ->dma_supported instead of ->set_dma_mask
mips/loongson64: implement ->dma_supported instead of ->set_dma_mask
...
Diffstat (limited to 'arch/tile')
-rw-r--r-- | arch/tile/kernel/pci-dma.c | 30 |
1 files changed, 0 insertions, 30 deletions
diff --git a/arch/tile/kernel/pci-dma.c b/arch/tile/kernel/pci-dma.c index 569bb6dd154a..f2abedc8a080 100644 --- a/arch/tile/kernel/pci-dma.c +++ b/arch/tile/kernel/pci-dma.c | |||
@@ -317,18 +317,6 @@ static void tile_dma_sync_sg_for_device(struct device *dev, | |||
317 | } | 317 | } |
318 | } | 318 | } |
319 | 319 | ||
320 | static inline int | ||
321 | tile_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | ||
322 | { | ||
323 | return 0; | ||
324 | } | ||
325 | |||
326 | static inline int | ||
327 | tile_dma_supported(struct device *dev, u64 mask) | ||
328 | { | ||
329 | return 1; | ||
330 | } | ||
331 | |||
332 | static const struct dma_map_ops tile_default_dma_map_ops = { | 320 | static const struct dma_map_ops tile_default_dma_map_ops = { |
333 | .alloc = tile_dma_alloc_coherent, | 321 | .alloc = tile_dma_alloc_coherent, |
334 | .free = tile_dma_free_coherent, | 322 | .free = tile_dma_free_coherent, |
@@ -340,8 +328,6 @@ static const struct dma_map_ops tile_default_dma_map_ops = { | |||
340 | .sync_single_for_device = tile_dma_sync_single_for_device, | 328 | .sync_single_for_device = tile_dma_sync_single_for_device, |
341 | .sync_sg_for_cpu = tile_dma_sync_sg_for_cpu, | 329 | .sync_sg_for_cpu = tile_dma_sync_sg_for_cpu, |
342 | .sync_sg_for_device = tile_dma_sync_sg_for_device, | 330 | .sync_sg_for_device = tile_dma_sync_sg_for_device, |
343 | .mapping_error = tile_dma_mapping_error, | ||
344 | .dma_supported = tile_dma_supported | ||
345 | }; | 331 | }; |
346 | 332 | ||
347 | const struct dma_map_ops *tile_dma_map_ops = &tile_default_dma_map_ops; | 333 | const struct dma_map_ops *tile_dma_map_ops = &tile_default_dma_map_ops; |
@@ -504,18 +490,6 @@ static void tile_pci_dma_sync_sg_for_device(struct device *dev, | |||
504 | } | 490 | } |
505 | } | 491 | } |
506 | 492 | ||
507 | static inline int | ||
508 | tile_pci_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | ||
509 | { | ||
510 | return 0; | ||
511 | } | ||
512 | |||
513 | static inline int | ||
514 | tile_pci_dma_supported(struct device *dev, u64 mask) | ||
515 | { | ||
516 | return 1; | ||
517 | } | ||
518 | |||
519 | static const struct dma_map_ops tile_pci_default_dma_map_ops = { | 493 | static const struct dma_map_ops tile_pci_default_dma_map_ops = { |
520 | .alloc = tile_pci_dma_alloc_coherent, | 494 | .alloc = tile_pci_dma_alloc_coherent, |
521 | .free = tile_pci_dma_free_coherent, | 495 | .free = tile_pci_dma_free_coherent, |
@@ -527,8 +501,6 @@ static const struct dma_map_ops tile_pci_default_dma_map_ops = { | |||
527 | .sync_single_for_device = tile_pci_dma_sync_single_for_device, | 501 | .sync_single_for_device = tile_pci_dma_sync_single_for_device, |
528 | .sync_sg_for_cpu = tile_pci_dma_sync_sg_for_cpu, | 502 | .sync_sg_for_cpu = tile_pci_dma_sync_sg_for_cpu, |
529 | .sync_sg_for_device = tile_pci_dma_sync_sg_for_device, | 503 | .sync_sg_for_device = tile_pci_dma_sync_sg_for_device, |
530 | .mapping_error = tile_pci_dma_mapping_error, | ||
531 | .dma_supported = tile_pci_dma_supported | ||
532 | }; | 504 | }; |
533 | 505 | ||
534 | const struct dma_map_ops *gx_pci_dma_map_ops = &tile_pci_default_dma_map_ops; | 506 | const struct dma_map_ops *gx_pci_dma_map_ops = &tile_pci_default_dma_map_ops; |
@@ -578,8 +550,6 @@ static const struct dma_map_ops pci_hybrid_dma_ops = { | |||
578 | .sync_single_for_device = tile_pci_dma_sync_single_for_device, | 550 | .sync_single_for_device = tile_pci_dma_sync_single_for_device, |
579 | .sync_sg_for_cpu = tile_pci_dma_sync_sg_for_cpu, | 551 | .sync_sg_for_cpu = tile_pci_dma_sync_sg_for_cpu, |
580 | .sync_sg_for_device = tile_pci_dma_sync_sg_for_device, | 552 | .sync_sg_for_device = tile_pci_dma_sync_sg_for_device, |
581 | .mapping_error = tile_pci_dma_mapping_error, | ||
582 | .dma_supported = tile_pci_dma_supported | ||
583 | }; | 553 | }; |
584 | 554 | ||
585 | const struct dma_map_ops *gx_legacy_pci_dma_map_ops = &pci_swiotlb_dma_ops; | 555 | const struct dma_map_ops *gx_legacy_pci_dma_map_ops = &pci_swiotlb_dma_ops; |