diff options
author | Christoph Hellwig <hch@lst.de> | 2018-09-06 19:27:24 -0400 |
---|---|---|
committer | Christoph Hellwig <hch@lst.de> | 2018-10-01 10:27:00 -0400 |
commit | c6d4381220a0087ce19dbf6984d92c451bd6b364 (patch) | |
tree | 137796a9983bcc8282410110a131c0833b659b1b /drivers/pci/controller/vmd.c | |
parent | b733116feab5471c0489ab33e90fceb553215e5b (diff) |
dma-mapping: make the get_required_mask method available unconditionally
This save some duplication for ia64, and makes the interface more
general. In the long run we want each dma_map_ops instance to fill this
out, but this will take a little more prep work.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'drivers/pci/controller/vmd.c')
-rw-r--r-- | drivers/pci/controller/vmd.c | 4 |
1 files changed, 0 insertions, 4 deletions
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c index fd2dbd7eed7b..f31ed62d518c 100644 --- a/drivers/pci/controller/vmd.c +++ b/drivers/pci/controller/vmd.c | |||
@@ -404,12 +404,10 @@ static int vmd_dma_supported(struct device *dev, u64 mask) | |||
404 | return vmd_dma_ops(dev)->dma_supported(to_vmd_dev(dev), mask); | 404 | return vmd_dma_ops(dev)->dma_supported(to_vmd_dev(dev), mask); |
405 | } | 405 | } |
406 | 406 | ||
407 | #ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK | ||
408 | static u64 vmd_get_required_mask(struct device *dev) | 407 | static u64 vmd_get_required_mask(struct device *dev) |
409 | { | 408 | { |
410 | return vmd_dma_ops(dev)->get_required_mask(to_vmd_dev(dev)); | 409 | return vmd_dma_ops(dev)->get_required_mask(to_vmd_dev(dev)); |
411 | } | 410 | } |
412 | #endif | ||
413 | 411 | ||
414 | static void vmd_teardown_dma_ops(struct vmd_dev *vmd) | 412 | static void vmd_teardown_dma_ops(struct vmd_dev *vmd) |
415 | { | 413 | { |
@@ -450,9 +448,7 @@ static void vmd_setup_dma_ops(struct vmd_dev *vmd) | |||
450 | ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_device); | 448 | ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_device); |
451 | ASSIGN_VMD_DMA_OPS(source, dest, mapping_error); | 449 | ASSIGN_VMD_DMA_OPS(source, dest, mapping_error); |
452 | ASSIGN_VMD_DMA_OPS(source, dest, dma_supported); | 450 | ASSIGN_VMD_DMA_OPS(source, dest, dma_supported); |
453 | #ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK | ||
454 | ASSIGN_VMD_DMA_OPS(source, dest, get_required_mask); | 451 | ASSIGN_VMD_DMA_OPS(source, dest, get_required_mask); |
455 | #endif | ||
456 | add_dma_domain(domain); | 452 | add_dma_domain(domain); |
457 | } | 453 | } |
458 | #undef ASSIGN_VMD_DMA_OPS | 454 | #undef ASSIGN_VMD_DMA_OPS |