aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2018-09-06 19:27:24 -0400
committerChristoph Hellwig <hch@lst.de>2018-10-01 10:27:00 -0400
commitc6d4381220a0087ce19dbf6984d92c451bd6b364 (patch)
tree137796a9983bcc8282410110a131c0833b659b1b
parentb733116feab5471c0489ab33e90fceb553215e5b (diff)
dma-mapping: make the get_required_mask method available unconditionally
This save some duplication for ia64, and makes the interface more general. In the long run we want each dma_map_ops instance to fill this out, but this will take a little more prep work. Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
-rw-r--r--arch/ia64/include/asm/dma-mapping.h2
-rw-r--r--arch/ia64/include/asm/machvec.h7
-rw-r--r--arch/ia64/include/asm/machvec_init.h1
-rw-r--r--arch/ia64/include/asm/machvec_sn2.h2
-rw-r--r--arch/ia64/pci/pci.c26
-rw-r--r--arch/ia64/sn/pci/pci_dma.c4
-rw-r--r--drivers/base/platform.c13
-rw-r--r--drivers/pci/controller/vmd.c4
-rw-r--r--include/linux/dma-mapping.h2
9 files changed, 13 insertions, 48 deletions
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
index 76e4d6632d68..522745ae67bb 100644
--- a/arch/ia64/include/asm/dma-mapping.h
+++ b/arch/ia64/include/asm/dma-mapping.h
@@ -10,8 +10,6 @@
10#include <linux/scatterlist.h> 10#include <linux/scatterlist.h>
11#include <linux/dma-debug.h> 11#include <linux/dma-debug.h>
12 12
13#define ARCH_HAS_DMA_GET_REQUIRED_MASK
14
15extern const struct dma_map_ops *dma_ops; 13extern const struct dma_map_ops *dma_ops;
16extern struct ia64_machine_vector ia64_mv; 14extern struct ia64_machine_vector ia64_mv;
17extern void set_iommu_machvec(void); 15extern void set_iommu_machvec(void);
diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h
index 267f4f170191..5133739966bc 100644
--- a/arch/ia64/include/asm/machvec.h
+++ b/arch/ia64/include/asm/machvec.h
@@ -44,7 +44,6 @@ typedef void ia64_mv_kernel_launch_event_t(void);
44 44
45/* DMA-mapping interface: */ 45/* DMA-mapping interface: */
46typedef void ia64_mv_dma_init (void); 46typedef void ia64_mv_dma_init (void);
47typedef u64 ia64_mv_dma_get_required_mask (struct device *);
48typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *); 47typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
49 48
50/* 49/*
@@ -127,7 +126,6 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *);
127# define platform_global_tlb_purge ia64_mv.global_tlb_purge 126# define platform_global_tlb_purge ia64_mv.global_tlb_purge
128# define platform_tlb_migrate_finish ia64_mv.tlb_migrate_finish 127# define platform_tlb_migrate_finish ia64_mv.tlb_migrate_finish
129# define platform_dma_init ia64_mv.dma_init 128# define platform_dma_init ia64_mv.dma_init
130# define platform_dma_get_required_mask ia64_mv.dma_get_required_mask
131# define platform_dma_get_ops ia64_mv.dma_get_ops 129# define platform_dma_get_ops ia64_mv.dma_get_ops
132# define platform_irq_to_vector ia64_mv.irq_to_vector 130# define platform_irq_to_vector ia64_mv.irq_to_vector
133# define platform_local_vector_to_irq ia64_mv.local_vector_to_irq 131# define platform_local_vector_to_irq ia64_mv.local_vector_to_irq
@@ -171,7 +169,6 @@ struct ia64_machine_vector {
171 ia64_mv_global_tlb_purge_t *global_tlb_purge; 169 ia64_mv_global_tlb_purge_t *global_tlb_purge;
172 ia64_mv_tlb_migrate_finish_t *tlb_migrate_finish; 170 ia64_mv_tlb_migrate_finish_t *tlb_migrate_finish;
173 ia64_mv_dma_init *dma_init; 171 ia64_mv_dma_init *dma_init;
174 ia64_mv_dma_get_required_mask *dma_get_required_mask;
175 ia64_mv_dma_get_ops *dma_get_ops; 172 ia64_mv_dma_get_ops *dma_get_ops;
176 ia64_mv_irq_to_vector *irq_to_vector; 173 ia64_mv_irq_to_vector *irq_to_vector;
177 ia64_mv_local_vector_to_irq *local_vector_to_irq; 174 ia64_mv_local_vector_to_irq *local_vector_to_irq;
@@ -211,7 +208,6 @@ struct ia64_machine_vector {
211 platform_global_tlb_purge, \ 208 platform_global_tlb_purge, \
212 platform_tlb_migrate_finish, \ 209 platform_tlb_migrate_finish, \
213 platform_dma_init, \ 210 platform_dma_init, \
214 platform_dma_get_required_mask, \
215 platform_dma_get_ops, \ 211 platform_dma_get_ops, \
216 platform_irq_to_vector, \ 212 platform_irq_to_vector, \
217 platform_local_vector_to_irq, \ 213 platform_local_vector_to_irq, \
@@ -286,9 +282,6 @@ extern const struct dma_map_ops *dma_get_ops(struct device *);
286#ifndef platform_dma_get_ops 282#ifndef platform_dma_get_ops
287# define platform_dma_get_ops dma_get_ops 283# define platform_dma_get_ops dma_get_ops
288#endif 284#endif
289#ifndef platform_dma_get_required_mask
290# define platform_dma_get_required_mask ia64_dma_get_required_mask
291#endif
292#ifndef platform_irq_to_vector 285#ifndef platform_irq_to_vector
293# define platform_irq_to_vector __ia64_irq_to_vector 286# define platform_irq_to_vector __ia64_irq_to_vector
294#endif 287#endif
diff --git a/arch/ia64/include/asm/machvec_init.h b/arch/ia64/include/asm/machvec_init.h
index 2b32fd06b7c6..2aafb69a3787 100644
--- a/arch/ia64/include/asm/machvec_init.h
+++ b/arch/ia64/include/asm/machvec_init.h
@@ -4,7 +4,6 @@
4 4
5extern ia64_mv_send_ipi_t ia64_send_ipi; 5extern ia64_mv_send_ipi_t ia64_send_ipi;
6extern ia64_mv_global_tlb_purge_t ia64_global_tlb_purge; 6extern ia64_mv_global_tlb_purge_t ia64_global_tlb_purge;
7extern ia64_mv_dma_get_required_mask ia64_dma_get_required_mask;
8extern ia64_mv_irq_to_vector __ia64_irq_to_vector; 7extern ia64_mv_irq_to_vector __ia64_irq_to_vector;
9extern ia64_mv_local_vector_to_irq __ia64_local_vector_to_irq; 8extern ia64_mv_local_vector_to_irq __ia64_local_vector_to_irq;
10extern ia64_mv_pci_get_legacy_mem_t ia64_pci_get_legacy_mem; 9extern ia64_mv_pci_get_legacy_mem_t ia64_pci_get_legacy_mem;
diff --git a/arch/ia64/include/asm/machvec_sn2.h b/arch/ia64/include/asm/machvec_sn2.h
index ece9fa85be88..b5153d300289 100644
--- a/arch/ia64/include/asm/machvec_sn2.h
+++ b/arch/ia64/include/asm/machvec_sn2.h
@@ -55,7 +55,6 @@ extern ia64_mv_readb_t __sn_readb_relaxed;
55extern ia64_mv_readw_t __sn_readw_relaxed; 55extern ia64_mv_readw_t __sn_readw_relaxed;
56extern ia64_mv_readl_t __sn_readl_relaxed; 56extern ia64_mv_readl_t __sn_readl_relaxed;
57extern ia64_mv_readq_t __sn_readq_relaxed; 57extern ia64_mv_readq_t __sn_readq_relaxed;
58extern ia64_mv_dma_get_required_mask sn_dma_get_required_mask;
59extern ia64_mv_dma_init sn_dma_init; 58extern ia64_mv_dma_init sn_dma_init;
60extern ia64_mv_migrate_t sn_migrate; 59extern ia64_mv_migrate_t sn_migrate;
61extern ia64_mv_kernel_launch_event_t sn_kernel_launch_event; 60extern ia64_mv_kernel_launch_event_t sn_kernel_launch_event;
@@ -100,7 +99,6 @@ extern ia64_mv_pci_fixup_bus_t sn_pci_fixup_bus;
100#define platform_pci_get_legacy_mem sn_pci_get_legacy_mem 99#define platform_pci_get_legacy_mem sn_pci_get_legacy_mem
101#define platform_pci_legacy_read sn_pci_legacy_read 100#define platform_pci_legacy_read sn_pci_legacy_read
102#define platform_pci_legacy_write sn_pci_legacy_write 101#define platform_pci_legacy_write sn_pci_legacy_write
103#define platform_dma_get_required_mask sn_dma_get_required_mask
104#define platform_dma_init sn_dma_init 102#define platform_dma_init sn_dma_init
105#define platform_migrate sn_migrate 103#define platform_migrate sn_migrate
106#define platform_kernel_launch_event sn_kernel_launch_event 104#define platform_kernel_launch_event sn_kernel_launch_event
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index 7ccc64d5fe3e..5d71800df431 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -568,32 +568,6 @@ static void __init set_pci_dfl_cacheline_size(void)
568 pci_dfl_cache_line_size = (1 << cci.pcci_line_size) / 4; 568 pci_dfl_cache_line_size = (1 << cci.pcci_line_size) / 4;
569} 569}
570 570
571u64 ia64_dma_get_required_mask(struct device *dev)
572{
573 u32 low_totalram = ((max_pfn - 1) << PAGE_SHIFT);
574 u32 high_totalram = ((max_pfn - 1) >> (32 - PAGE_SHIFT));
575 u64 mask;
576
577 if (!high_totalram) {
578 /* convert to mask just covering totalram */
579 low_totalram = (1 << (fls(low_totalram) - 1));
580 low_totalram += low_totalram - 1;
581 mask = low_totalram;
582 } else {
583 high_totalram = (1 << (fls(high_totalram) - 1));
584 high_totalram += high_totalram - 1;
585 mask = (((u64)high_totalram) << 32) + 0xffffffff;
586 }
587 return mask;
588}
589EXPORT_SYMBOL_GPL(ia64_dma_get_required_mask);
590
591u64 dma_get_required_mask(struct device *dev)
592{
593 return platform_dma_get_required_mask(dev);
594}
595EXPORT_SYMBOL_GPL(dma_get_required_mask);
596
597static int __init pcibios_init(void) 571static int __init pcibios_init(void)
598{ 572{
599 set_pci_dfl_cacheline_size(); 573 set_pci_dfl_cacheline_size();
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
index 74c934a997bb..96eb2567718a 100644
--- a/arch/ia64/sn/pci/pci_dma.c
+++ b/arch/ia64/sn/pci/pci_dma.c
@@ -344,11 +344,10 @@ static int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
344 return 0; 344 return 0;
345} 345}
346 346
347u64 sn_dma_get_required_mask(struct device *dev) 347static u64 sn_dma_get_required_mask(struct device *dev)
348{ 348{
349 return DMA_BIT_MASK(64); 349 return DMA_BIT_MASK(64);
350} 350}
351EXPORT_SYMBOL_GPL(sn_dma_get_required_mask);
352 351
353char *sn_pci_get_legacy_mem(struct pci_bus *bus) 352char *sn_pci_get_legacy_mem(struct pci_bus *bus)
354{ 353{
@@ -473,6 +472,7 @@ static struct dma_map_ops sn_dma_ops = {
473 .sync_sg_for_device = sn_dma_sync_sg_for_device, 472 .sync_sg_for_device = sn_dma_sync_sg_for_device,
474 .mapping_error = sn_dma_mapping_error, 473 .mapping_error = sn_dma_mapping_error,
475 .dma_supported = sn_dma_supported, 474 .dma_supported = sn_dma_supported,
475 .get_required_mask = sn_dma_get_required_mask,
476}; 476};
477 477
478void sn_dma_init(void) 478void sn_dma_init(void)
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index dff82a3c2caa..cfe22fded980 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -1179,8 +1179,7 @@ int __init platform_bus_init(void)
1179 return error; 1179 return error;
1180} 1180}
1181 1181
1182#ifndef ARCH_HAS_DMA_GET_REQUIRED_MASK 1182static u64 dma_default_get_required_mask(struct device *dev)
1183u64 dma_get_required_mask(struct device *dev)
1184{ 1183{
1185 u32 low_totalram = ((max_pfn - 1) << PAGE_SHIFT); 1184 u32 low_totalram = ((max_pfn - 1) << PAGE_SHIFT);
1186 u32 high_totalram = ((max_pfn - 1) >> (32 - PAGE_SHIFT)); 1185 u32 high_totalram = ((max_pfn - 1) >> (32 - PAGE_SHIFT));
@@ -1198,6 +1197,16 @@ u64 dma_get_required_mask(struct device *dev)
1198 } 1197 }
1199 return mask; 1198 return mask;
1200} 1199}
1200
1201#ifndef ARCH_HAS_DMA_GET_REQUIRED_MASK
1202u64 dma_get_required_mask(struct device *dev)
1203{
1204 const struct dma_map_ops *ops = get_dma_ops(dev);
1205
1206 if (ops->get_required_mask)
1207 return ops->get_required_mask(dev);
1208 return dma_default_get_required_mask(dev);
1209}
1201EXPORT_SYMBOL_GPL(dma_get_required_mask); 1210EXPORT_SYMBOL_GPL(dma_get_required_mask);
1202#endif 1211#endif
1203 1212
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index fd2dbd7eed7b..f31ed62d518c 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -404,12 +404,10 @@ static int vmd_dma_supported(struct device *dev, u64 mask)
404 return vmd_dma_ops(dev)->dma_supported(to_vmd_dev(dev), mask); 404 return vmd_dma_ops(dev)->dma_supported(to_vmd_dev(dev), mask);
405} 405}
406 406
407#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
408static u64 vmd_get_required_mask(struct device *dev) 407static u64 vmd_get_required_mask(struct device *dev)
409{ 408{
410 return vmd_dma_ops(dev)->get_required_mask(to_vmd_dev(dev)); 409 return vmd_dma_ops(dev)->get_required_mask(to_vmd_dev(dev));
411} 410}
412#endif
413 411
414static void vmd_teardown_dma_ops(struct vmd_dev *vmd) 412static void vmd_teardown_dma_ops(struct vmd_dev *vmd)
415{ 413{
@@ -450,9 +448,7 @@ static void vmd_setup_dma_ops(struct vmd_dev *vmd)
450 ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_device); 448 ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_device);
451 ASSIGN_VMD_DMA_OPS(source, dest, mapping_error); 449 ASSIGN_VMD_DMA_OPS(source, dest, mapping_error);
452 ASSIGN_VMD_DMA_OPS(source, dest, dma_supported); 450 ASSIGN_VMD_DMA_OPS(source, dest, dma_supported);
453#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
454 ASSIGN_VMD_DMA_OPS(source, dest, get_required_mask); 451 ASSIGN_VMD_DMA_OPS(source, dest, get_required_mask);
455#endif
456 add_dma_domain(domain); 452 add_dma_domain(domain);
457} 453}
458#undef ASSIGN_VMD_DMA_OPS 454#undef ASSIGN_VMD_DMA_OPS
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index d23fc45c8208..562af6b45f23 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -130,9 +130,7 @@ struct dma_map_ops {
130 enum dma_data_direction direction); 130 enum dma_data_direction direction);
131 int (*mapping_error)(struct device *dev, dma_addr_t dma_addr); 131 int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
132 int (*dma_supported)(struct device *dev, u64 mask); 132 int (*dma_supported)(struct device *dev, u64 mask);
133#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
134 u64 (*get_required_mask)(struct device *dev); 133 u64 (*get_required_mask)(struct device *dev);
135#endif
136}; 134};
137 135
138extern const struct dma_map_ops dma_direct_ops; 136extern const struct dma_map_ops dma_direct_ops;