diff options
author | Christoph Hellwig <hch@lst.de> | 2016-01-20 18:02:02 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-01-20 20:09:18 -0500 |
commit | bd38118f9c57b22f57f9c2fccca4a82aef15cc5f (patch) | |
tree | 11b33d85fda55e3a1719a36474a7ae5ce9a71874 | |
parent | 30081d8ea47d521e8804398b25f59b8e49a2ed0b (diff) |
tile: uninline dma_set_mask
We'll soon merge <asm-generic/dma-mapping-common.h> into
<linux/dma-mapping.h> and the reference to dma_capable in the tile
dma_set_mask would create a circular dependency.
Fix this by moving the implementation out of line.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Cc: Chris Metcalf <cmetcalf@ezchip.com>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Joerg Roedel <jroedel@suse.de>
Cc: Sebastian Ott <sebott@linux.vnet.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | arch/tile/include/asm/dma-mapping.h | 29 | ||||
-rw-r--r-- | arch/tile/kernel/pci-dma.c | 29 |
2 files changed, 30 insertions, 28 deletions
diff --git a/arch/tile/include/asm/dma-mapping.h b/arch/tile/include/asm/dma-mapping.h index 96ac6cce4a32..c342736e3f1f 100644 --- a/arch/tile/include/asm/dma-mapping.h +++ b/arch/tile/include/asm/dma-mapping.h | |||
@@ -76,34 +76,7 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) | |||
76 | 76 | ||
77 | #include <asm-generic/dma-mapping-common.h> | 77 | #include <asm-generic/dma-mapping-common.h> |
78 | 78 | ||
79 | static inline int | 79 | int dma_set_mask(struct device *dev, u64 mask); |
80 | dma_set_mask(struct device *dev, u64 mask) | ||
81 | { | ||
82 | struct dma_map_ops *dma_ops = get_dma_ops(dev); | ||
83 | |||
84 | /* | ||
85 | * For PCI devices with 64-bit DMA addressing capability, promote | ||
86 | * the dma_ops to hybrid, with the consistent memory DMA space limited | ||
87 | * to 32-bit. For 32-bit capable devices, limit the streaming DMA | ||
88 | * address range to max_direct_dma_addr. | ||
89 | */ | ||
90 | if (dma_ops == gx_pci_dma_map_ops || | ||
91 | dma_ops == gx_hybrid_pci_dma_map_ops || | ||
92 | dma_ops == gx_legacy_pci_dma_map_ops) { | ||
93 | if (mask == DMA_BIT_MASK(64) && | ||
94 | dma_ops == gx_legacy_pci_dma_map_ops) | ||
95 | set_dma_ops(dev, gx_hybrid_pci_dma_map_ops); | ||
96 | else if (mask > dev->archdata.max_direct_dma_addr) | ||
97 | mask = dev->archdata.max_direct_dma_addr; | ||
98 | } | ||
99 | |||
100 | if (!dev->dma_mask || !dma_supported(dev, mask)) | ||
101 | return -EIO; | ||
102 | |||
103 | *dev->dma_mask = mask; | ||
104 | |||
105 | return 0; | ||
106 | } | ||
107 | 80 | ||
108 | /* | 81 | /* |
109 | * dma_alloc_noncoherent() is #defined to return coherent memory, | 82 | * dma_alloc_noncoherent() is #defined to return coherent memory, |
diff --git a/arch/tile/kernel/pci-dma.c b/arch/tile/kernel/pci-dma.c index 09b58703ac26..b6bc0547a4f6 100644 --- a/arch/tile/kernel/pci-dma.c +++ b/arch/tile/kernel/pci-dma.c | |||
@@ -583,6 +583,35 @@ struct dma_map_ops *gx_hybrid_pci_dma_map_ops; | |||
583 | EXPORT_SYMBOL(gx_legacy_pci_dma_map_ops); | 583 | EXPORT_SYMBOL(gx_legacy_pci_dma_map_ops); |
584 | EXPORT_SYMBOL(gx_hybrid_pci_dma_map_ops); | 584 | EXPORT_SYMBOL(gx_hybrid_pci_dma_map_ops); |
585 | 585 | ||
586 | int dma_set_mask(struct device *dev, u64 mask) | ||
587 | { | ||
588 | struct dma_map_ops *dma_ops = get_dma_ops(dev); | ||
589 | |||
590 | /* | ||
591 | * For PCI devices with 64-bit DMA addressing capability, promote | ||
592 | * the dma_ops to hybrid, with the consistent memory DMA space limited | ||
593 | * to 32-bit. For 32-bit capable devices, limit the streaming DMA | ||
594 | * address range to max_direct_dma_addr. | ||
595 | */ | ||
596 | if (dma_ops == gx_pci_dma_map_ops || | ||
597 | dma_ops == gx_hybrid_pci_dma_map_ops || | ||
598 | dma_ops == gx_legacy_pci_dma_map_ops) { | ||
599 | if (mask == DMA_BIT_MASK(64) && | ||
600 | dma_ops == gx_legacy_pci_dma_map_ops) | ||
601 | set_dma_ops(dev, gx_hybrid_pci_dma_map_ops); | ||
602 | else if (mask > dev->archdata.max_direct_dma_addr) | ||
603 | mask = dev->archdata.max_direct_dma_addr; | ||
604 | } | ||
605 | |||
606 | if (!dev->dma_mask || !dma_supported(dev, mask)) | ||
607 | return -EIO; | ||
608 | |||
609 | *dev->dma_mask = mask; | ||
610 | |||
611 | return 0; | ||
612 | } | ||
613 | EXPORT_SYMBOL(dma_set_mask); | ||
614 | |||
586 | #ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK | 615 | #ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK |
587 | int dma_set_coherent_mask(struct device *dev, u64 mask) | 616 | int dma_set_coherent_mask(struct device *dev, u64 mask) |
588 | { | 617 | { |