summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/blkdev.h2
-rw-r--r--include/linux/dma-mapping.h34
-rw-r--r--include/linux/dma-noncoherent.h13
-rw-r--r--include/linux/mmc/host.h2
-rw-r--r--include/linux/vmalloc.h2
-rw-r--r--include/xen/arm/hypervisor.h2
-rw-r--r--include/xen/arm/page-coherent.h24
-rw-r--r--include/xen/swiotlb-xen.h5
8 files changed, 50 insertions, 34 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 3094f2d513b2..d9db32fb75ee 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1110,6 +1110,8 @@ extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
1110extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua); 1110extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
1111extern void blk_queue_required_elevator_features(struct request_queue *q, 1111extern void blk_queue_required_elevator_features(struct request_queue *q,
1112 unsigned int features); 1112 unsigned int features);
1113extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
1114 struct device *dev);
1113 1115
1114/* 1116/*
1115 * Number of physical segments as sent to the device. 1117 * Number of physical segments as sent to the device.
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 14702e2d6fa8..4a1c4fca475a 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -131,6 +131,7 @@ struct dma_map_ops {
131 int (*dma_supported)(struct device *dev, u64 mask); 131 int (*dma_supported)(struct device *dev, u64 mask);
132 u64 (*get_required_mask)(struct device *dev); 132 u64 (*get_required_mask)(struct device *dev);
133 size_t (*max_mapping_size)(struct device *dev); 133 size_t (*max_mapping_size)(struct device *dev);
134 unsigned long (*get_merge_boundary)(struct device *dev);
134}; 135};
135 136
136#define DMA_MAPPING_ERROR (~(dma_addr_t)0) 137#define DMA_MAPPING_ERROR (~(dma_addr_t)0)
@@ -457,11 +458,13 @@ int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
457int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, 458int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
458 void *cpu_addr, dma_addr_t dma_addr, size_t size, 459 void *cpu_addr, dma_addr_t dma_addr, size_t size,
459 unsigned long attrs); 460 unsigned long attrs);
461bool dma_can_mmap(struct device *dev);
460int dma_supported(struct device *dev, u64 mask); 462int dma_supported(struct device *dev, u64 mask);
461int dma_set_mask(struct device *dev, u64 mask); 463int dma_set_mask(struct device *dev, u64 mask);
462int dma_set_coherent_mask(struct device *dev, u64 mask); 464int dma_set_coherent_mask(struct device *dev, u64 mask);
463u64 dma_get_required_mask(struct device *dev); 465u64 dma_get_required_mask(struct device *dev);
464size_t dma_max_mapping_size(struct device *dev); 466size_t dma_max_mapping_size(struct device *dev);
467unsigned long dma_get_merge_boundary(struct device *dev);
465#else /* CONFIG_HAS_DMA */ 468#else /* CONFIG_HAS_DMA */
466static inline dma_addr_t dma_map_page_attrs(struct device *dev, 469static inline dma_addr_t dma_map_page_attrs(struct device *dev,
467 struct page *page, size_t offset, size_t size, 470 struct page *page, size_t offset, size_t size,
@@ -547,6 +550,10 @@ static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
547{ 550{
548 return -ENXIO; 551 return -ENXIO;
549} 552}
553static inline bool dma_can_mmap(struct device *dev)
554{
555 return false;
556}
550static inline int dma_supported(struct device *dev, u64 mask) 557static inline int dma_supported(struct device *dev, u64 mask)
551{ 558{
552 return 0; 559 return 0;
@@ -567,6 +574,10 @@ static inline size_t dma_max_mapping_size(struct device *dev)
567{ 574{
568 return 0; 575 return 0;
569} 576}
577static inline unsigned long dma_get_merge_boundary(struct device *dev)
578{
579 return 0;
580}
570#endif /* CONFIG_HAS_DMA */ 581#endif /* CONFIG_HAS_DMA */
571 582
572static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, 583static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
@@ -610,16 +621,14 @@ extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
610 void *cpu_addr, dma_addr_t dma_addr, size_t size, 621 void *cpu_addr, dma_addr_t dma_addr, size_t size,
611 unsigned long attrs); 622 unsigned long attrs);
612 623
624struct page **dma_common_find_pages(void *cpu_addr);
613void *dma_common_contiguous_remap(struct page *page, size_t size, 625void *dma_common_contiguous_remap(struct page *page, size_t size,
614 unsigned long vm_flags,
615 pgprot_t prot, const void *caller); 626 pgprot_t prot, const void *caller);
616 627
617void *dma_common_pages_remap(struct page **pages, size_t size, 628void *dma_common_pages_remap(struct page **pages, size_t size,
618 unsigned long vm_flags, pgprot_t prot, 629 pgprot_t prot, const void *caller);
619 const void *caller); 630void dma_common_free_remap(void *cpu_addr, size_t size);
620void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
621 631
622int __init dma_atomic_pool_init(gfp_t gfp, pgprot_t prot);
623bool dma_in_atomic_pool(void *start, size_t size); 632bool dma_in_atomic_pool(void *start, size_t size);
624void *dma_alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags); 633void *dma_alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags);
625bool dma_free_from_pool(void *start, size_t size); 634bool dma_free_from_pool(void *start, size_t size);
@@ -749,7 +758,6 @@ static inline int dma_get_cache_alignment(void)
749#ifdef CONFIG_DMA_DECLARE_COHERENT 758#ifdef CONFIG_DMA_DECLARE_COHERENT
750int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, 759int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
751 dma_addr_t device_addr, size_t size); 760 dma_addr_t device_addr, size_t size);
752void dma_release_declared_memory(struct device *dev);
753#else 761#else
754static inline int 762static inline int
755dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, 763dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
@@ -757,11 +765,6 @@ dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
757{ 765{
758 return -ENOSYS; 766 return -ENOSYS;
759} 767}
760
761static inline void
762dma_release_declared_memory(struct device *dev)
763{
764}
765#endif /* CONFIG_DMA_DECLARE_COHERENT */ 768#endif /* CONFIG_DMA_DECLARE_COHERENT */
766 769
767static inline void *dmam_alloc_coherent(struct device *dev, size_t size, 770static inline void *dmam_alloc_coherent(struct device *dev, size_t size,
@@ -781,9 +784,6 @@ static inline void *dma_alloc_wc(struct device *dev, size_t size,
781 784
782 return dma_alloc_attrs(dev, size, dma_addr, gfp, attrs); 785 return dma_alloc_attrs(dev, size, dma_addr, gfp, attrs);
783} 786}
784#ifndef dma_alloc_writecombine
785#define dma_alloc_writecombine dma_alloc_wc
786#endif
787 787
788static inline void dma_free_wc(struct device *dev, size_t size, 788static inline void dma_free_wc(struct device *dev, size_t size,
789 void *cpu_addr, dma_addr_t dma_addr) 789 void *cpu_addr, dma_addr_t dma_addr)
@@ -791,9 +791,6 @@ static inline void dma_free_wc(struct device *dev, size_t size,
791 return dma_free_attrs(dev, size, cpu_addr, dma_addr, 791 return dma_free_attrs(dev, size, cpu_addr, dma_addr,
792 DMA_ATTR_WRITE_COMBINE); 792 DMA_ATTR_WRITE_COMBINE);
793} 793}
794#ifndef dma_free_writecombine
795#define dma_free_writecombine dma_free_wc
796#endif
797 794
798static inline int dma_mmap_wc(struct device *dev, 795static inline int dma_mmap_wc(struct device *dev,
799 struct vm_area_struct *vma, 796 struct vm_area_struct *vma,
@@ -803,9 +800,6 @@ static inline int dma_mmap_wc(struct device *dev,
803 return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, 800 return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size,
804 DMA_ATTR_WRITE_COMBINE); 801 DMA_ATTR_WRITE_COMBINE);
805} 802}
806#ifndef dma_mmap_writecombine
807#define dma_mmap_writecombine dma_mmap_wc
808#endif
809 803
810#ifdef CONFIG_NEED_DMA_MAP_STATE 804#ifdef CONFIG_NEED_DMA_MAP_STATE
811#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME 805#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
diff --git a/include/linux/dma-noncoherent.h b/include/linux/dma-noncoherent.h
index 0bff3d7fac92..dd3de6d88fc0 100644
--- a/include/linux/dma-noncoherent.h
+++ b/include/linux/dma-noncoherent.h
@@ -3,6 +3,7 @@
3#define _LINUX_DMA_NONCOHERENT_H 1 3#define _LINUX_DMA_NONCOHERENT_H 1
4 4
5#include <linux/dma-mapping.h> 5#include <linux/dma-mapping.h>
6#include <asm/pgtable.h>
6 7
7#ifdef CONFIG_ARCH_HAS_DMA_COHERENCE_H 8#ifdef CONFIG_ARCH_HAS_DMA_COHERENCE_H
8#include <asm/dma-coherence.h> 9#include <asm/dma-coherence.h>
@@ -42,10 +43,18 @@ void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
42 dma_addr_t dma_addr, unsigned long attrs); 43 dma_addr_t dma_addr, unsigned long attrs);
43long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr, 44long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
44 dma_addr_t dma_addr); 45 dma_addr_t dma_addr);
45pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
46 unsigned long attrs);
47 46
48#ifdef CONFIG_MMU 47#ifdef CONFIG_MMU
48/*
49 * Page protection so that devices that can't snoop CPU caches can use the
50 * memory coherently. We default to pgprot_noncached which is usually used
51 * for ioremap as a safe bet, but architectures can override this with less
52 * strict semantics if possible.
53 */
54#ifndef pgprot_dmacoherent
55#define pgprot_dmacoherent(prot) pgprot_noncached(prot)
56#endif
57
49pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs); 58pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs);
50#else 59#else
51static inline pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, 60static inline pgprot_t dma_pgprot(struct device *dev, pgprot_t prot,
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 4704b77259ee..ba703384bea0 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -368,6 +368,7 @@ struct mmc_host {
368#define MMC_CAP2_CQE (1 << 23) /* Has eMMC command queue engine */ 368#define MMC_CAP2_CQE (1 << 23) /* Has eMMC command queue engine */
369#define MMC_CAP2_CQE_DCMD (1 << 24) /* CQE can issue a direct command */ 369#define MMC_CAP2_CQE_DCMD (1 << 24) /* CQE can issue a direct command */
370#define MMC_CAP2_AVOID_3_3V (1 << 25) /* Host must negotiate down from 3.3V */ 370#define MMC_CAP2_AVOID_3_3V (1 << 25) /* Host must negotiate down from 3.3V */
371#define MMC_CAP2_MERGE_CAPABLE (1 << 26) /* Host can merge a segment over the segment size */
371 372
372 int fixed_drv_type; /* fixed driver type for non-removable media */ 373 int fixed_drv_type; /* fixed driver type for non-removable media */
373 374
@@ -397,6 +398,7 @@ struct mmc_host {
397 unsigned int retune_paused:1; /* re-tuning is temporarily disabled */ 398 unsigned int retune_paused:1; /* re-tuning is temporarily disabled */
398 unsigned int use_blk_mq:1; /* use blk-mq */ 399 unsigned int use_blk_mq:1; /* use blk-mq */
399 unsigned int retune_crc_disable:1; /* don't trigger retune upon crc */ 400 unsigned int retune_crc_disable:1; /* don't trigger retune upon crc */
401 unsigned int can_dma_map_merge:1; /* merging can be used */
400 402
401 int rescan_disable; /* disable card detection */ 403 int rescan_disable; /* disable card detection */
402 int rescan_entered; /* used with nonremovable devices */ 404 int rescan_entered; /* used with nonremovable devices */
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 9b21d0047710..dfa718ffdd4f 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -18,6 +18,7 @@ struct notifier_block; /* in notifier.h */
18#define VM_ALLOC 0x00000002 /* vmalloc() */ 18#define VM_ALLOC 0x00000002 /* vmalloc() */
19#define VM_MAP 0x00000004 /* vmap()ed pages */ 19#define VM_MAP 0x00000004 /* vmap()ed pages */
20#define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */ 20#define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
21#define VM_DMA_COHERENT 0x00000010 /* dma_alloc_coherent */
21#define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */ 22#define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
22#define VM_NO_GUARD 0x00000040 /* don't add guard page */ 23#define VM_NO_GUARD 0x00000040 /* don't add guard page */
23#define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */ 24#define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */
@@ -26,6 +27,7 @@ struct notifier_block; /* in notifier.h */
26 * vfree_atomic(). 27 * vfree_atomic().
27 */ 28 */
28#define VM_FLUSH_RESET_PERMS 0x00000100 /* Reset direct map and flush TLB on unmap */ 29#define VM_FLUSH_RESET_PERMS 0x00000100 /* Reset direct map and flush TLB on unmap */
30
29/* bits [20..32] reserved for arch specific ioremap internals */ 31/* bits [20..32] reserved for arch specific ioremap internals */
30 32
31/* 33/*
diff --git a/include/xen/arm/hypervisor.h b/include/xen/arm/hypervisor.h
index 2982571f7cc1..43ef24dd030e 100644
--- a/include/xen/arm/hypervisor.h
+++ b/include/xen/arm/hypervisor.h
@@ -19,8 +19,6 @@ static inline enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
19 return PARAVIRT_LAZY_NONE; 19 return PARAVIRT_LAZY_NONE;
20} 20}
21 21
22extern const struct dma_map_ops *xen_dma_ops;
23
24#ifdef CONFIG_XEN 22#ifdef CONFIG_XEN
25void __init xen_early_init(void); 23void __init xen_early_init(void);
26#else 24#else
diff --git a/include/xen/arm/page-coherent.h b/include/xen/arm/page-coherent.h
index 2ca9164a79bf..b9cc11e887ed 100644
--- a/include/xen/arm/page-coherent.h
+++ b/include/xen/arm/page-coherent.h
@@ -2,15 +2,19 @@
2#ifndef _XEN_ARM_PAGE_COHERENT_H 2#ifndef _XEN_ARM_PAGE_COHERENT_H
3#define _XEN_ARM_PAGE_COHERENT_H 3#define _XEN_ARM_PAGE_COHERENT_H
4 4
5void __xen_dma_map_page(struct device *hwdev, struct page *page, 5#include <linux/dma-mapping.h>
6 dma_addr_t dev_addr, unsigned long offset, size_t size, 6#include <asm/page.h>
7 enum dma_data_direction dir, unsigned long attrs); 7
8void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, 8static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
9 size_t size, enum dma_data_direction dir, 9 dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
10 unsigned long attrs); 10{
11void __xen_dma_sync_single_for_cpu(struct device *hwdev, 11 return dma_direct_alloc(hwdev, size, dma_handle, flags, attrs);
12 dma_addr_t handle, size_t size, enum dma_data_direction dir); 12}
13void __xen_dma_sync_single_for_device(struct device *hwdev, 13
14 dma_addr_t handle, size_t size, enum dma_data_direction dir); 14static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
15 void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs)
16{
17 dma_direct_free(hwdev, size, cpu_addr, dma_handle, attrs);
18}
15 19
16#endif /* _XEN_ARM_PAGE_COHERENT_H */ 20#endif /* _XEN_ARM_PAGE_COHERENT_H */
diff --git a/include/xen/swiotlb-xen.h b/include/xen/swiotlb-xen.h
index 5e4b83f83dbc..d71380f6ed0b 100644
--- a/include/xen/swiotlb-xen.h
+++ b/include/xen/swiotlb-xen.h
@@ -4,6 +4,11 @@
4 4
5#include <linux/swiotlb.h> 5#include <linux/swiotlb.h>
6 6
7void xen_dma_sync_for_cpu(struct device *dev, dma_addr_t handle,
8 phys_addr_t paddr, size_t size, enum dma_data_direction dir);
9void xen_dma_sync_for_device(struct device *dev, dma_addr_t handle,
10 phys_addr_t paddr, size_t size, enum dma_data_direction dir);
11
7extern int xen_swiotlb_init(int verbose, bool early); 12extern int xen_swiotlb_init(int verbose, bool early);
8extern const struct dma_map_ops xen_swiotlb_dma_ops; 13extern const struct dma_map_ops xen_swiotlb_dma_ops;
9 14