diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-06-04 13:58:12 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-06-04 13:58:12 -0400 |
commit | e5a594643a3444d39c1467040e638bf08a4e0db8 (patch) | |
tree | e65c94ef60a51559db467055232ce1021ec263e1 | |
parent | f956d08a56732c61a4d44e8034eeeedfc06fe721 (diff) | |
parent | 2550bbfd495227945e17ed1fa1c05bce4753b86b (diff) |
Merge tag 'dma-mapping-4.18' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-mapping updates from Christoph Hellwig:
- replace the force_dma flag with a dma_configure bus method. (Nipun
Gupta, although one patch is Ñ–ncorrectly attributed to me due to a
git rebase bug)
- use GFP_DMA32 more agressively in dma-direct. (Takashi Iwai)
- remove PCI_DMA_BUS_IS_PHYS and rely on the dma-mapping API to do the
right thing for bounce buffering.
- move dma-debug initialization to common code, and apply a few
cleanups to the dma-debug code.
- cleanup the Kconfig mess around swiotlb selection
- swiotlb comment fixup (Yisheng Xie)
- a trivial swiotlb fix. (Dan Carpenter)
- support swiotlb on RISC-V. (based on a patch from Palmer Dabbelt)
- add a new generic dma-noncoherent dma_map_ops implementation and use
it for arc, c6x and nds32.
- improve scatterlist validity checking in dma-debug. (Robin Murphy)
- add a struct device quirk to limit the dma-mask to 32-bit due to
bridge/system issues, and switch x86 to use it instead of a local
hack for VIA bridges.
- handle devices without a dma_mask more gracefully in the dma-direct
code.
* tag 'dma-mapping-4.18' of git://git.infradead.org/users/hch/dma-mapping: (48 commits)
dma-direct: don't crash on device without dma_mask
nds32: use generic dma_noncoherent_ops
nds32: implement the unmap_sg DMA operation
nds32: consolidate DMA cache maintainance routines
x86/pci-dma: switch the VIA 32-bit DMA quirk to use the struct device flag
x86/pci-dma: remove the explicit nodac and allowdac option
x86/pci-dma: remove the experimental forcesac boot option
Documentation/x86: remove a stray reference to pci-nommu.c
core, dma-direct: add a flag 32-bit dma limits
dma-mapping: remove unused gfp_t parameter to arch_dma_alloc_attrs
dma-debug: check scatterlist segments
c6x: use generic dma_noncoherent_ops
arc: use generic dma_noncoherent_ops
arc: fix arc_dma_{map,unmap}_page
arc: fix arc_dma_sync_sg_for_{cpu,device}
arc: simplify arc_dma_sync_single_for_{cpu,device}
dma-mapping: provide a generic dma-noncoherent implementation
dma-mapping: simplify Kconfig dependencies
riscv: add swiotlb support
riscv: only enable ZONE_DMA32 for 64-bit
...
141 files changed, 624 insertions, 1392 deletions
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index f2040d46f095..cc0ac035b8fe 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt | |||
@@ -1705,7 +1705,6 @@ | |||
1705 | nopanic | 1705 | nopanic |
1706 | merge | 1706 | merge |
1707 | nomerge | 1707 | nomerge |
1708 | forcesac | ||
1709 | soft | 1708 | soft |
1710 | pt [x86, IA-64] | 1709 | pt [x86, IA-64] |
1711 | nobypass [PPC/POWERNV] | 1710 | nobypass [PPC/POWERNV] |
diff --git a/Documentation/features/io/dma-api-debug/arch-support.txt b/Documentation/features/io/dma-api-debug/arch-support.txt deleted file mode 100644 index e438ed675623..000000000000 --- a/Documentation/features/io/dma-api-debug/arch-support.txt +++ /dev/null | |||
@@ -1,31 +0,0 @@ | |||
1 | # | ||
2 | # Feature name: dma-api-debug | ||
3 | # Kconfig: HAVE_DMA_API_DEBUG | ||
4 | # description: arch supports DMA debug facilities | ||
5 | # | ||
6 | ----------------------- | ||
7 | | arch |status| | ||
8 | ----------------------- | ||
9 | | alpha: | TODO | | ||
10 | | arc: | TODO | | ||
11 | | arm: | ok | | ||
12 | | arm64: | ok | | ||
13 | | c6x: | ok | | ||
14 | | h8300: | TODO | | ||
15 | | hexagon: | TODO | | ||
16 | | ia64: | ok | | ||
17 | | m68k: | TODO | | ||
18 | | microblaze: | ok | | ||
19 | | mips: | ok | | ||
20 | | nios2: | TODO | | ||
21 | | openrisc: | TODO | | ||
22 | | parisc: | TODO | | ||
23 | | powerpc: | ok | | ||
24 | | s390: | ok | | ||
25 | | sh: | ok | | ||
26 | | sparc: | ok | | ||
27 | | um: | TODO | | ||
28 | | unicore32: | TODO | | ||
29 | | x86: | ok | | ||
30 | | xtensa: | ok | | ||
31 | ----------------------- | ||
diff --git a/Documentation/x86/x86_64/boot-options.txt b/Documentation/x86/x86_64/boot-options.txt index b297c48389b9..8d109ef67ab6 100644 --- a/Documentation/x86/x86_64/boot-options.txt +++ b/Documentation/x86/x86_64/boot-options.txt | |||
@@ -187,9 +187,9 @@ PCI | |||
187 | 187 | ||
188 | IOMMU (input/output memory management unit) | 188 | IOMMU (input/output memory management unit) |
189 | 189 | ||
190 | Currently four x86-64 PCI-DMA mapping implementations exist: | 190 | Multiple x86-64 PCI-DMA mapping implementations exist, for example: |
191 | 191 | ||
192 | 1. <arch/x86_64/kernel/pci-nommu.c>: use no hardware/software IOMMU at all | 192 | 1. <lib/dma-direct.c>: use no hardware/software IOMMU at all |
193 | (e.g. because you have < 3 GB memory). | 193 | (e.g. because you have < 3 GB memory). |
194 | Kernel boot message: "PCI-DMA: Disabling IOMMU" | 194 | Kernel boot message: "PCI-DMA: Disabling IOMMU" |
195 | 195 | ||
@@ -208,7 +208,7 @@ IOMMU (input/output memory management unit) | |||
208 | Kernel boot message: "PCI-DMA: Using Calgary IOMMU" | 208 | Kernel boot message: "PCI-DMA: Using Calgary IOMMU" |
209 | 209 | ||
210 | iommu=[<size>][,noagp][,off][,force][,noforce][,leak[=<nr_of_leak_pages>] | 210 | iommu=[<size>][,noagp][,off][,force][,noforce][,leak[=<nr_of_leak_pages>] |
211 | [,memaper[=<order>]][,merge][,forcesac][,fullflush][,nomerge] | 211 | [,memaper[=<order>]][,merge][,fullflush][,nomerge] |
212 | [,noaperture][,calgary] | 212 | [,noaperture][,calgary] |
213 | 213 | ||
214 | General iommu options: | 214 | General iommu options: |
@@ -235,14 +235,7 @@ IOMMU (input/output memory management unit) | |||
235 | (experimental). | 235 | (experimental). |
236 | nomerge Don't do scatter-gather (SG) merging. | 236 | nomerge Don't do scatter-gather (SG) merging. |
237 | noaperture Ask the IOMMU not to touch the aperture for AGP. | 237 | noaperture Ask the IOMMU not to touch the aperture for AGP. |
238 | forcesac Force single-address cycle (SAC) mode for masks <40bits | ||
239 | (experimental). | ||
240 | noagp Don't initialize the AGP driver and use full aperture. | 238 | noagp Don't initialize the AGP driver and use full aperture. |
241 | allowdac Allow double-address cycle (DAC) mode, i.e. DMA >4GB. | ||
242 | DAC is used with 32-bit PCI to push a 64-bit address in | ||
243 | two cycles. When off all DMA over >4GB is forced through | ||
244 | an IOMMU or software bounce buffering. | ||
245 | nodac Forbid DAC mode, i.e. DMA >4GB. | ||
246 | panic Always panic when IOMMU overflows. | 239 | panic Always panic when IOMMU overflows. |
247 | calgary Use the Calgary IOMMU if it is available | 240 | calgary Use the Calgary IOMMU if it is available |
248 | 241 | ||
diff --git a/MAINTAINERS b/MAINTAINERS index cbc6bbf4ef9c..0c411a53b424 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -4330,12 +4330,14 @@ W: http://git.infradead.org/users/hch/dma-mapping.git | |||
4330 | S: Supported | 4330 | S: Supported |
4331 | F: lib/dma-debug.c | 4331 | F: lib/dma-debug.c |
4332 | F: lib/dma-direct.c | 4332 | F: lib/dma-direct.c |
4333 | F: lib/dma-noncoherent.c | ||
4333 | F: lib/dma-virt.c | 4334 | F: lib/dma-virt.c |
4334 | F: drivers/base/dma-mapping.c | 4335 | F: drivers/base/dma-mapping.c |
4335 | F: drivers/base/dma-coherent.c | 4336 | F: drivers/base/dma-coherent.c |
4336 | F: include/asm-generic/dma-mapping.h | 4337 | F: include/asm-generic/dma-mapping.h |
4337 | F: include/linux/dma-direct.h | 4338 | F: include/linux/dma-direct.h |
4338 | F: include/linux/dma-mapping.h | 4339 | F: include/linux/dma-mapping.h |
4340 | F: include/linux/dma-noncoherent.h | ||
4339 | 4341 | ||
4340 | DME1737 HARDWARE MONITOR DRIVER | 4342 | DME1737 HARDWARE MONITOR DRIVER |
4341 | M: Juerg Haefliger <juergh@gmail.com> | 4343 | M: Juerg Haefliger <juergh@gmail.com> |
diff --git a/arch/Kconfig b/arch/Kconfig index 75dd23acf133..b624634daea6 100644 --- a/arch/Kconfig +++ b/arch/Kconfig | |||
@@ -278,9 +278,6 @@ config HAVE_CLK | |||
278 | The <linux/clk.h> calls support software clock gating and | 278 | The <linux/clk.h> calls support software clock gating and |
279 | thus are a key power management tool on many systems. | 279 | thus are a key power management tool on many systems. |
280 | 280 | ||
281 | config HAVE_DMA_API_DEBUG | ||
282 | bool | ||
283 | |||
284 | config HAVE_HW_BREAKPOINT | 281 | config HAVE_HW_BREAKPOINT |
285 | bool | 282 | bool |
286 | depends on PERF_EVENTS | 283 | depends on PERF_EVENTS |
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index f19dc31288c8..94af0c7f494a 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig | |||
@@ -10,6 +10,8 @@ config ALPHA | |||
10 | select HAVE_OPROFILE | 10 | select HAVE_OPROFILE |
11 | select HAVE_PCSPKR_PLATFORM | 11 | select HAVE_PCSPKR_PLATFORM |
12 | select HAVE_PERF_EVENTS | 12 | select HAVE_PERF_EVENTS |
13 | select NEED_DMA_MAP_STATE | ||
14 | select NEED_SG_DMA_LENGTH | ||
13 | select VIRT_TO_BUS | 15 | select VIRT_TO_BUS |
14 | select GENERIC_IRQ_PROBE | 16 | select GENERIC_IRQ_PROBE |
15 | select AUTO_IRQ_AFFINITY if SMP | 17 | select AUTO_IRQ_AFFINITY if SMP |
@@ -64,15 +66,6 @@ config ZONE_DMA | |||
64 | bool | 66 | bool |
65 | default y | 67 | default y |
66 | 68 | ||
67 | config ARCH_DMA_ADDR_T_64BIT | ||
68 | def_bool y | ||
69 | |||
70 | config NEED_DMA_MAP_STATE | ||
71 | def_bool y | ||
72 | |||
73 | config NEED_SG_DMA_LENGTH | ||
74 | def_bool y | ||
75 | |||
76 | config GENERIC_ISA_DMA | 69 | config GENERIC_ISA_DMA |
77 | bool | 70 | bool |
78 | default y | 71 | default y |
@@ -346,9 +339,6 @@ config PCI_DOMAINS | |||
346 | config PCI_SYSCALL | 339 | config PCI_SYSCALL |
347 | def_bool PCI | 340 | def_bool PCI |
348 | 341 | ||
349 | config IOMMU_HELPER | ||
350 | def_bool PCI | ||
351 | |||
352 | config ALPHA_NONAME | 342 | config ALPHA_NONAME |
353 | bool | 343 | bool |
354 | depends on ALPHA_BOOK1 || ALPHA_NONAME_CH | 344 | depends on ALPHA_BOOK1 || ALPHA_NONAME_CH |
diff --git a/arch/alpha/include/asm/pci.h b/arch/alpha/include/asm/pci.h index b9ec55351924..cf6bc1e64d66 100644 --- a/arch/alpha/include/asm/pci.h +++ b/arch/alpha/include/asm/pci.h | |||
@@ -56,11 +56,6 @@ struct pci_controller { | |||
56 | 56 | ||
57 | /* IOMMU controls. */ | 57 | /* IOMMU controls. */ |
58 | 58 | ||
59 | /* The PCI address space does not equal the physical memory address space. | ||
60 | The networking and block device layers use this boolean for bounce buffer | ||
61 | decisions. */ | ||
62 | #define PCI_DMA_BUS_IS_PHYS 0 | ||
63 | |||
64 | /* TODO: integrate with include/asm-generic/pci.h ? */ | 59 | /* TODO: integrate with include/asm-generic/pci.h ? */ |
65 | static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) | 60 | static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) |
66 | { | 61 | { |
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index d76bf4a83740..89d47eac18b2 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig | |||
@@ -9,11 +9,15 @@ | |||
9 | config ARC | 9 | config ARC |
10 | def_bool y | 10 | def_bool y |
11 | select ARC_TIMERS | 11 | select ARC_TIMERS |
12 | select ARCH_HAS_SYNC_DMA_FOR_CPU | ||
13 | select ARCH_HAS_SYNC_DMA_FOR_DEVICE | ||
12 | select ARCH_HAS_SG_CHAIN | 14 | select ARCH_HAS_SG_CHAIN |
13 | select ARCH_SUPPORTS_ATOMIC_RMW if ARC_HAS_LLSC | 15 | select ARCH_SUPPORTS_ATOMIC_RMW if ARC_HAS_LLSC |
14 | select BUILDTIME_EXTABLE_SORT | 16 | select BUILDTIME_EXTABLE_SORT |
15 | select CLONE_BACKWARDS | 17 | select CLONE_BACKWARDS |
16 | select COMMON_CLK | 18 | select COMMON_CLK |
19 | select DMA_NONCOHERENT_OPS | ||
20 | select DMA_NONCOHERENT_MMAP | ||
17 | select GENERIC_ATOMIC64 if !ISA_ARCV2 || !(ARC_HAS_LL64 && ARC_HAS_LLSC) | 21 | select GENERIC_ATOMIC64 if !ISA_ARCV2 || !(ARC_HAS_LL64 && ARC_HAS_LLSC) |
18 | select GENERIC_CLOCKEVENTS | 22 | select GENERIC_CLOCKEVENTS |
19 | select GENERIC_FIND_FIRST_BIT | 23 | select GENERIC_FIND_FIRST_BIT |
@@ -453,16 +457,11 @@ config ARC_HAS_PAE40 | |||
453 | default n | 457 | default n |
454 | depends on ISA_ARCV2 | 458 | depends on ISA_ARCV2 |
455 | select HIGHMEM | 459 | select HIGHMEM |
460 | select PHYS_ADDR_T_64BIT | ||
456 | help | 461 | help |
457 | Enable access to physical memory beyond 4G, only supported on | 462 | Enable access to physical memory beyond 4G, only supported on |
458 | ARC cores with 40 bit Physical Addressing support | 463 | ARC cores with 40 bit Physical Addressing support |
459 | 464 | ||
460 | config ARCH_PHYS_ADDR_T_64BIT | ||
461 | def_bool ARC_HAS_PAE40 | ||
462 | |||
463 | config ARCH_DMA_ADDR_T_64BIT | ||
464 | bool | ||
465 | |||
466 | config ARC_KVADDR_SIZE | 465 | config ARC_KVADDR_SIZE |
467 | int "Kernel Virtual Address Space size (MB)" | 466 | int "Kernel Virtual Address Space size (MB)" |
468 | range 0 512 | 467 | range 0 512 |
diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild index 4bd5d4369e05..bbdcb955e18f 100644 --- a/arch/arc/include/asm/Kbuild +++ b/arch/arc/include/asm/Kbuild | |||
@@ -2,6 +2,7 @@ | |||
2 | generic-y += bugs.h | 2 | generic-y += bugs.h |
3 | generic-y += device.h | 3 | generic-y += device.h |
4 | generic-y += div64.h | 4 | generic-y += div64.h |
5 | generic-y += dma-mapping.h | ||
5 | generic-y += emergency-restart.h | 6 | generic-y += emergency-restart.h |
6 | generic-y += extable.h | 7 | generic-y += extable.h |
7 | generic-y += fb.h | 8 | generic-y += fb.h |
diff --git a/arch/arc/include/asm/dma-mapping.h b/arch/arc/include/asm/dma-mapping.h deleted file mode 100644 index 7a16824bfe98..000000000000 --- a/arch/arc/include/asm/dma-mapping.h +++ /dev/null | |||
@@ -1,21 +0,0 @@ | |||
1 | /* | ||
2 | * DMA Mapping glue for ARC | ||
3 | * | ||
4 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | |||
11 | #ifndef ASM_ARC_DMA_MAPPING_H | ||
12 | #define ASM_ARC_DMA_MAPPING_H | ||
13 | |||
14 | extern const struct dma_map_ops arc_dma_ops; | ||
15 | |||
16 | static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) | ||
17 | { | ||
18 | return &arc_dma_ops; | ||
19 | } | ||
20 | |||
21 | #endif | ||
diff --git a/arch/arc/include/asm/pci.h b/arch/arc/include/asm/pci.h index ba56c23c1b20..4ff53c041c64 100644 --- a/arch/arc/include/asm/pci.h +++ b/arch/arc/include/asm/pci.h | |||
@@ -16,12 +16,6 @@ | |||
16 | #define PCIBIOS_MIN_MEM 0x100000 | 16 | #define PCIBIOS_MIN_MEM 0x100000 |
17 | 17 | ||
18 | #define pcibios_assign_all_busses() 1 | 18 | #define pcibios_assign_all_busses() 1 |
19 | /* | ||
20 | * The PCI address space does equal the physical memory address space. | ||
21 | * The networking and block device layers use this boolean for bounce | ||
22 | * buffer decisions. | ||
23 | */ | ||
24 | #define PCI_DMA_BUS_IS_PHYS 1 | ||
25 | 19 | ||
26 | #endif /* __KERNEL__ */ | 20 | #endif /* __KERNEL__ */ |
27 | 21 | ||
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c index 1dcc404b5aec..8c1071840979 100644 --- a/arch/arc/mm/dma.c +++ b/arch/arc/mm/dma.c | |||
@@ -16,13 +16,12 @@ | |||
16 | * The default DMA address == Phy address which is 0x8000_0000 based. | 16 | * The default DMA address == Phy address which is 0x8000_0000 based. |
17 | */ | 17 | */ |
18 | 18 | ||
19 | #include <linux/dma-mapping.h> | 19 | #include <linux/dma-noncoherent.h> |
20 | #include <asm/cache.h> | 20 | #include <asm/cache.h> |
21 | #include <asm/cacheflush.h> | 21 | #include <asm/cacheflush.h> |
22 | 22 | ||
23 | 23 | void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, | |
24 | static void *arc_dma_alloc(struct device *dev, size_t size, | 24 | gfp_t gfp, unsigned long attrs) |
25 | dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) | ||
26 | { | 25 | { |
27 | unsigned long order = get_order(size); | 26 | unsigned long order = get_order(size); |
28 | struct page *page; | 27 | struct page *page; |
@@ -89,7 +88,7 @@ static void *arc_dma_alloc(struct device *dev, size_t size, | |||
89 | return kvaddr; | 88 | return kvaddr; |
90 | } | 89 | } |
91 | 90 | ||
92 | static void arc_dma_free(struct device *dev, size_t size, void *vaddr, | 91 | void arch_dma_free(struct device *dev, size_t size, void *vaddr, |
93 | dma_addr_t dma_handle, unsigned long attrs) | 92 | dma_addr_t dma_handle, unsigned long attrs) |
94 | { | 93 | { |
95 | phys_addr_t paddr = dma_handle; | 94 | phys_addr_t paddr = dma_handle; |
@@ -105,9 +104,9 @@ static void arc_dma_free(struct device *dev, size_t size, void *vaddr, | |||
105 | __free_pages(page, get_order(size)); | 104 | __free_pages(page, get_order(size)); |
106 | } | 105 | } |
107 | 106 | ||
108 | static int arc_dma_mmap(struct device *dev, struct vm_area_struct *vma, | 107 | int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma, |
109 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | 108 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
110 | unsigned long attrs) | 109 | unsigned long attrs) |
111 | { | 110 | { |
112 | unsigned long user_count = vma_pages(vma); | 111 | unsigned long user_count = vma_pages(vma); |
113 | unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; | 112 | unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
@@ -130,149 +129,14 @@ static int arc_dma_mmap(struct device *dev, struct vm_area_struct *vma, | |||
130 | return ret; | 129 | return ret; |
131 | } | 130 | } |
132 | 131 | ||
133 | /* | 132 | void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, |
134 | * streaming DMA Mapping API... | 133 | size_t size, enum dma_data_direction dir) |
135 | * CPU accesses page via normal paddr, thus needs to explicitly made | ||
136 | * consistent before each use | ||
137 | */ | ||
138 | static void _dma_cache_sync(phys_addr_t paddr, size_t size, | ||
139 | enum dma_data_direction dir) | ||
140 | { | 134 | { |
141 | switch (dir) { | 135 | dma_cache_wback(paddr, size); |
142 | case DMA_FROM_DEVICE: | ||
143 | dma_cache_inv(paddr, size); | ||
144 | break; | ||
145 | case DMA_TO_DEVICE: | ||
146 | dma_cache_wback(paddr, size); | ||
147 | break; | ||
148 | case DMA_BIDIRECTIONAL: | ||
149 | dma_cache_wback_inv(paddr, size); | ||
150 | break; | ||
151 | default: | ||
152 | pr_err("Invalid DMA dir [%d] for OP @ %pa[p]\n", dir, &paddr); | ||
153 | } | ||
154 | } | 136 | } |
155 | 137 | ||
156 | /* | 138 | void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, |
157 | * arc_dma_map_page - map a portion of a page for streaming DMA | 139 | size_t size, enum dma_data_direction dir) |
158 | * | ||
159 | * Ensure that any data held in the cache is appropriately discarded | ||
160 | * or written back. | ||
161 | * | ||
162 | * The device owns this memory once this call has completed. The CPU | ||
163 | * can regain ownership by calling dma_unmap_page(). | ||
164 | * | ||
165 | * Note: while it takes struct page as arg, caller can "abuse" it to pass | ||
166 | * a region larger than PAGE_SIZE, provided it is physically contiguous | ||
167 | * and this still works correctly | ||
168 | */ | ||
169 | static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page, | ||
170 | unsigned long offset, size_t size, enum dma_data_direction dir, | ||
171 | unsigned long attrs) | ||
172 | { | 140 | { |
173 | phys_addr_t paddr = page_to_phys(page) + offset; | 141 | dma_cache_inv(paddr, size); |
174 | |||
175 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) | ||
176 | _dma_cache_sync(paddr, size, dir); | ||
177 | |||
178 | return paddr; | ||
179 | } | ||
180 | |||
181 | /* | ||
182 | * arc_dma_unmap_page - unmap a buffer previously mapped through dma_map_page() | ||
183 | * | ||
184 | * After this call, reads by the CPU to the buffer are guaranteed to see | ||
185 | * whatever the device wrote there. | ||
186 | * | ||
187 | * Note: historically this routine was not implemented for ARC | ||
188 | */ | ||
189 | static void arc_dma_unmap_page(struct device *dev, dma_addr_t handle, | ||
190 | size_t size, enum dma_data_direction dir, | ||
191 | unsigned long attrs) | ||
192 | { | ||
193 | phys_addr_t paddr = handle; | ||
194 | |||
195 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) | ||
196 | _dma_cache_sync(paddr, size, dir); | ||
197 | } | 142 | } |
198 | |||
199 | static int arc_dma_map_sg(struct device *dev, struct scatterlist *sg, | ||
200 | int nents, enum dma_data_direction dir, unsigned long attrs) | ||
201 | { | ||
202 | struct scatterlist *s; | ||
203 | int i; | ||
204 | |||
205 | for_each_sg(sg, s, nents, i) | ||
206 | s->dma_address = dma_map_page(dev, sg_page(s), s->offset, | ||
207 | s->length, dir); | ||
208 | |||
209 | return nents; | ||
210 | } | ||
211 | |||
212 | static void arc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, | ||
213 | int nents, enum dma_data_direction dir, | ||
214 | unsigned long attrs) | ||
215 | { | ||
216 | struct scatterlist *s; | ||
217 | int i; | ||
218 | |||
219 | for_each_sg(sg, s, nents, i) | ||
220 | arc_dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, | ||
221 | attrs); | ||
222 | } | ||
223 | |||
224 | static void arc_dma_sync_single_for_cpu(struct device *dev, | ||
225 | dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) | ||
226 | { | ||
227 | _dma_cache_sync(dma_handle, size, DMA_FROM_DEVICE); | ||
228 | } | ||
229 | |||
230 | static void arc_dma_sync_single_for_device(struct device *dev, | ||
231 | dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) | ||
232 | { | ||
233 | _dma_cache_sync(dma_handle, size, DMA_TO_DEVICE); | ||
234 | } | ||
235 | |||
236 | static void arc_dma_sync_sg_for_cpu(struct device *dev, | ||
237 | struct scatterlist *sglist, int nelems, | ||
238 | enum dma_data_direction dir) | ||
239 | { | ||
240 | int i; | ||
241 | struct scatterlist *sg; | ||
242 | |||
243 | for_each_sg(sglist, sg, nelems, i) | ||
244 | _dma_cache_sync(sg_phys(sg), sg->length, dir); | ||
245 | } | ||
246 | |||
247 | static void arc_dma_sync_sg_for_device(struct device *dev, | ||
248 | struct scatterlist *sglist, int nelems, | ||
249 | enum dma_data_direction dir) | ||
250 | { | ||
251 | int i; | ||
252 | struct scatterlist *sg; | ||
253 | |||
254 | for_each_sg(sglist, sg, nelems, i) | ||
255 | _dma_cache_sync(sg_phys(sg), sg->length, dir); | ||
256 | } | ||
257 | |||
258 | static int arc_dma_supported(struct device *dev, u64 dma_mask) | ||
259 | { | ||
260 | /* Support 32 bit DMA mask exclusively */ | ||
261 | return dma_mask == DMA_BIT_MASK(32); | ||
262 | } | ||
263 | |||
264 | const struct dma_map_ops arc_dma_ops = { | ||
265 | .alloc = arc_dma_alloc, | ||
266 | .free = arc_dma_free, | ||
267 | .mmap = arc_dma_mmap, | ||
268 | .map_page = arc_dma_map_page, | ||
269 | .unmap_page = arc_dma_unmap_page, | ||
270 | .map_sg = arc_dma_map_sg, | ||
271 | .unmap_sg = arc_dma_unmap_sg, | ||
272 | .sync_single_for_device = arc_dma_sync_single_for_device, | ||
273 | .sync_single_for_cpu = arc_dma_sync_single_for_cpu, | ||
274 | .sync_sg_for_cpu = arc_dma_sync_sg_for_cpu, | ||
275 | .sync_sg_for_device = arc_dma_sync_sg_for_device, | ||
276 | .dma_supported = arc_dma_supported, | ||
277 | }; | ||
278 | EXPORT_SYMBOL(arc_dma_ops); | ||
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index a7f8e7f4b88f..c43f5bb55ac8 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -60,7 +60,6 @@ config ARM | |||
60 | select HAVE_CONTEXT_TRACKING | 60 | select HAVE_CONTEXT_TRACKING |
61 | select HAVE_C_RECORDMCOUNT | 61 | select HAVE_C_RECORDMCOUNT |
62 | select HAVE_DEBUG_KMEMLEAK | 62 | select HAVE_DEBUG_KMEMLEAK |
63 | select HAVE_DMA_API_DEBUG | ||
64 | select HAVE_DMA_CONTIGUOUS if MMU | 63 | select HAVE_DMA_CONTIGUOUS if MMU |
65 | select HAVE_DYNAMIC_FTRACE if (!XIP_KERNEL) && !CPU_ENDIAN_BE32 && MMU | 64 | select HAVE_DYNAMIC_FTRACE if (!XIP_KERNEL) && !CPU_ENDIAN_BE32 && MMU |
66 | select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE | 65 | select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE |
@@ -96,6 +95,7 @@ config ARM | |||
96 | select HAVE_VIRT_CPU_ACCOUNTING_GEN | 95 | select HAVE_VIRT_CPU_ACCOUNTING_GEN |
97 | select IRQ_FORCED_THREADING | 96 | select IRQ_FORCED_THREADING |
98 | select MODULES_USE_ELF_REL | 97 | select MODULES_USE_ELF_REL |
98 | select NEED_DMA_MAP_STATE | ||
99 | select NO_BOOTMEM | 99 | select NO_BOOTMEM |
100 | select OF_EARLY_FLATTREE if OF | 100 | select OF_EARLY_FLATTREE if OF |
101 | select OF_RESERVED_MEM if OF | 101 | select OF_RESERVED_MEM if OF |
@@ -119,9 +119,6 @@ config ARM_HAS_SG_CHAIN | |||
119 | select ARCH_HAS_SG_CHAIN | 119 | select ARCH_HAS_SG_CHAIN |
120 | bool | 120 | bool |
121 | 121 | ||
122 | config NEED_SG_DMA_LENGTH | ||
123 | bool | ||
124 | |||
125 | config ARM_DMA_USE_IOMMU | 122 | config ARM_DMA_USE_IOMMU |
126 | bool | 123 | bool |
127 | select ARM_HAS_SG_CHAIN | 124 | select ARM_HAS_SG_CHAIN |
@@ -224,9 +221,6 @@ config ARCH_MAY_HAVE_PC_FDC | |||
224 | config ZONE_DMA | 221 | config ZONE_DMA |
225 | bool | 222 | bool |
226 | 223 | ||
227 | config NEED_DMA_MAP_STATE | ||
228 | def_bool y | ||
229 | |||
230 | config ARCH_SUPPORTS_UPROBES | 224 | config ARCH_SUPPORTS_UPROBES |
231 | def_bool y | 225 | def_bool y |
232 | 226 | ||
@@ -1778,12 +1772,6 @@ config SECCOMP | |||
1778 | and the task is only allowed to execute a few safe syscalls | 1772 | and the task is only allowed to execute a few safe syscalls |
1779 | defined by each seccomp mode. | 1773 | defined by each seccomp mode. |
1780 | 1774 | ||
1781 | config SWIOTLB | ||
1782 | def_bool y | ||
1783 | |||
1784 | config IOMMU_HELPER | ||
1785 | def_bool SWIOTLB | ||
1786 | |||
1787 | config PARAVIRT | 1775 | config PARAVIRT |
1788 | bool "Enable paravirtualization code" | 1776 | bool "Enable paravirtualization code" |
1789 | help | 1777 | help |
@@ -1815,6 +1803,7 @@ config XEN | |||
1815 | depends on MMU | 1803 | depends on MMU |
1816 | select ARCH_DMA_ADDR_T_64BIT | 1804 | select ARCH_DMA_ADDR_T_64BIT |
1817 | select ARM_PSCI | 1805 | select ARM_PSCI |
1806 | select SWIOTLB | ||
1818 | select SWIOTLB_XEN | 1807 | select SWIOTLB_XEN |
1819 | select PARAVIRT | 1808 | select PARAVIRT |
1820 | help | 1809 | help |
diff --git a/arch/arm/include/asm/pci.h b/arch/arm/include/asm/pci.h index 1f0de808d111..0abd389cf0ec 100644 --- a/arch/arm/include/asm/pci.h +++ b/arch/arm/include/asm/pci.h | |||
@@ -19,13 +19,6 @@ static inline int pci_proc_domain(struct pci_bus *bus) | |||
19 | } | 19 | } |
20 | #endif /* CONFIG_PCI_DOMAINS */ | 20 | #endif /* CONFIG_PCI_DOMAINS */ |
21 | 21 | ||
22 | /* | ||
23 | * The PCI address space does equal the physical memory address space. | ||
24 | * The networking and block device layers use this boolean for bounce | ||
25 | * buffer decisions. | ||
26 | */ | ||
27 | #define PCI_DMA_BUS_IS_PHYS (1) | ||
28 | |||
29 | #define HAVE_PCI_MMAP | 22 | #define HAVE_PCI_MMAP |
30 | #define ARCH_GENERIC_PCI_MMAP_RESOURCE | 23 | #define ARCH_GENERIC_PCI_MMAP_RESOURCE |
31 | 24 | ||
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index fc40a2b40595..35ca494c028c 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c | |||
@@ -754,7 +754,7 @@ int __init arm_add_memory(u64 start, u64 size) | |||
754 | else | 754 | else |
755 | size -= aligned_start - start; | 755 | size -= aligned_start - start; |
756 | 756 | ||
757 | #ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT | 757 | #ifndef CONFIG_PHYS_ADDR_T_64BIT |
758 | if (aligned_start > ULONG_MAX) { | 758 | if (aligned_start > ULONG_MAX) { |
759 | pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n", | 759 | pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n", |
760 | (long long)start); | 760 | (long long)start); |
diff --git a/arch/arm/mach-axxia/Kconfig b/arch/arm/mach-axxia/Kconfig index bb2ce1c63fd9..d3eae6037913 100644 --- a/arch/arm/mach-axxia/Kconfig +++ b/arch/arm/mach-axxia/Kconfig | |||
@@ -2,7 +2,6 @@ | |||
2 | config ARCH_AXXIA | 2 | config ARCH_AXXIA |
3 | bool "LSI Axxia platforms" | 3 | bool "LSI Axxia platforms" |
4 | depends on ARCH_MULTI_V7 && ARM_LPAE | 4 | depends on ARCH_MULTI_V7 && ARM_LPAE |
5 | select ARCH_DMA_ADDR_T_64BIT | ||
6 | select ARM_AMBA | 5 | select ARM_AMBA |
7 | select ARM_GIC | 6 | select ARM_GIC |
8 | select ARM_TIMER_SP804 | 7 | select ARM_TIMER_SP804 |
diff --git a/arch/arm/mach-bcm/Kconfig b/arch/arm/mach-bcm/Kconfig index c2f3b0d216a4..c46a728df44e 100644 --- a/arch/arm/mach-bcm/Kconfig +++ b/arch/arm/mach-bcm/Kconfig | |||
@@ -211,7 +211,6 @@ config ARCH_BRCMSTB | |||
211 | select BRCMSTB_L2_IRQ | 211 | select BRCMSTB_L2_IRQ |
212 | select BCM7120_L2_IRQ | 212 | select BCM7120_L2_IRQ |
213 | select ARCH_HAS_HOLES_MEMORYMODEL | 213 | select ARCH_HAS_HOLES_MEMORYMODEL |
214 | select ARCH_DMA_ADDR_T_64BIT if ARM_LPAE | ||
215 | select ZONE_DMA if ARM_LPAE | 214 | select ZONE_DMA if ARM_LPAE |
216 | select SOC_BRCMSTB | 215 | select SOC_BRCMSTB |
217 | select SOC_BUS | 216 | select SOC_BUS |
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig index 647c319f9f5f..2ca405816846 100644 --- a/arch/arm/mach-exynos/Kconfig +++ b/arch/arm/mach-exynos/Kconfig | |||
@@ -112,7 +112,6 @@ config SOC_EXYNOS5440 | |||
112 | bool "SAMSUNG EXYNOS5440" | 112 | bool "SAMSUNG EXYNOS5440" |
113 | default y | 113 | default y |
114 | depends on ARCH_EXYNOS5 | 114 | depends on ARCH_EXYNOS5 |
115 | select ARCH_DMA_ADDR_T_64BIT if ARM_LPAE | ||
116 | select HAVE_ARM_ARCH_TIMER | 115 | select HAVE_ARM_ARCH_TIMER |
117 | select AUTO_ZRELADDR | 116 | select AUTO_ZRELADDR |
118 | select PINCTRL_EXYNOS5440 | 117 | select PINCTRL_EXYNOS5440 |
diff --git a/arch/arm/mach-highbank/Kconfig b/arch/arm/mach-highbank/Kconfig index 81110ec34226..5552968f07f8 100644 --- a/arch/arm/mach-highbank/Kconfig +++ b/arch/arm/mach-highbank/Kconfig | |||
@@ -1,7 +1,6 @@ | |||
1 | config ARCH_HIGHBANK | 1 | config ARCH_HIGHBANK |
2 | bool "Calxeda ECX-1000/2000 (Highbank/Midway)" | 2 | bool "Calxeda ECX-1000/2000 (Highbank/Midway)" |
3 | depends on ARCH_MULTI_V7 | 3 | depends on ARCH_MULTI_V7 |
4 | select ARCH_DMA_ADDR_T_64BIT if ARM_LPAE | ||
5 | select ARCH_HAS_HOLES_MEMORYMODEL | 4 | select ARCH_HAS_HOLES_MEMORYMODEL |
6 | select ARCH_SUPPORTS_BIG_ENDIAN | 5 | select ARCH_SUPPORTS_BIG_ENDIAN |
7 | select ARM_AMBA | 6 | select ARM_AMBA |
diff --git a/arch/arm/mach-rockchip/Kconfig b/arch/arm/mach-rockchip/Kconfig index a4065966881a..fafd3d7f9f8c 100644 --- a/arch/arm/mach-rockchip/Kconfig +++ b/arch/arm/mach-rockchip/Kconfig | |||
@@ -3,7 +3,6 @@ config ARCH_ROCKCHIP | |||
3 | depends on ARCH_MULTI_V7 | 3 | depends on ARCH_MULTI_V7 |
4 | select PINCTRL | 4 | select PINCTRL |
5 | select PINCTRL_ROCKCHIP | 5 | select PINCTRL_ROCKCHIP |
6 | select ARCH_DMA_ADDR_T_64BIT if ARM_LPAE | ||
7 | select ARCH_HAS_RESET_CONTROLLER | 6 | select ARCH_HAS_RESET_CONTROLLER |
8 | select ARM_AMBA | 7 | select ARM_AMBA |
9 | select ARM_GIC | 8 | select ARM_GIC |
diff --git a/arch/arm/mach-shmobile/Kconfig b/arch/arm/mach-shmobile/Kconfig index 280e7312a9e1..fe60cd09a5ca 100644 --- a/arch/arm/mach-shmobile/Kconfig +++ b/arch/arm/mach-shmobile/Kconfig | |||
@@ -29,7 +29,6 @@ config ARCH_RMOBILE | |||
29 | menuconfig ARCH_RENESAS | 29 | menuconfig ARCH_RENESAS |
30 | bool "Renesas ARM SoCs" | 30 | bool "Renesas ARM SoCs" |
31 | depends on ARCH_MULTI_V7 && MMU | 31 | depends on ARCH_MULTI_V7 && MMU |
32 | select ARCH_DMA_ADDR_T_64BIT if ARM_LPAE | ||
33 | select ARCH_SHMOBILE | 32 | select ARCH_SHMOBILE |
34 | select ARM_GIC | 33 | select ARM_GIC |
35 | select GPIOLIB | 34 | select GPIOLIB |
diff --git a/arch/arm/mach-tegra/Kconfig b/arch/arm/mach-tegra/Kconfig index 1e0aeb47bac6..7f3b83e0d324 100644 --- a/arch/arm/mach-tegra/Kconfig +++ b/arch/arm/mach-tegra/Kconfig | |||
@@ -15,6 +15,5 @@ menuconfig ARCH_TEGRA | |||
15 | select RESET_CONTROLLER | 15 | select RESET_CONTROLLER |
16 | select SOC_BUS | 16 | select SOC_BUS |
17 | select ZONE_DMA if ARM_LPAE | 17 | select ZONE_DMA if ARM_LPAE |
18 | select ARCH_DMA_ADDR_T_64BIT if ARM_LPAE | ||
19 | help | 18 | help |
20 | This enables support for NVIDIA Tegra based systems. | 19 | This enables support for NVIDIA Tegra based systems. |
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 7f14acf67caf..5a016bc80e26 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig | |||
@@ -661,6 +661,7 @@ config ARM_LPAE | |||
661 | bool "Support for the Large Physical Address Extension" | 661 | bool "Support for the Large Physical Address Extension" |
662 | depends on MMU && CPU_32v7 && !CPU_32v6 && !CPU_32v5 && \ | 662 | depends on MMU && CPU_32v7 && !CPU_32v6 && !CPU_32v5 && \ |
663 | !CPU_32v4 && !CPU_32v3 | 663 | !CPU_32v4 && !CPU_32v3 |
664 | select PHYS_ADDR_T_64BIT | ||
664 | help | 665 | help |
665 | Say Y if you have an ARMv7 processor supporting the LPAE page | 666 | Say Y if you have an ARMv7 processor supporting the LPAE page |
666 | table format and you would like to access memory beyond the | 667 | table format and you would like to access memory beyond the |
@@ -673,12 +674,6 @@ config ARM_PV_FIXUP | |||
673 | def_bool y | 674 | def_bool y |
674 | depends on ARM_LPAE && ARM_PATCH_PHYS_VIRT && ARCH_KEYSTONE | 675 | depends on ARM_LPAE && ARM_PATCH_PHYS_VIRT && ARCH_KEYSTONE |
675 | 676 | ||
676 | config ARCH_PHYS_ADDR_T_64BIT | ||
677 | def_bool ARM_LPAE | ||
678 | |||
679 | config ARCH_DMA_ADDR_T_64BIT | ||
680 | bool | ||
681 | |||
682 | config ARM_THUMB | 677 | config ARM_THUMB |
683 | bool "Support Thumb user binaries" if !CPU_THUMBONLY && EXPERT | 678 | bool "Support Thumb user binaries" if !CPU_THUMBONLY && EXPERT |
684 | depends on CPU_THUMB_CAPABLE | 679 | depends on CPU_THUMB_CAPABLE |
diff --git a/arch/arm/mm/dma-mapping-nommu.c b/arch/arm/mm/dma-mapping-nommu.c index 619f24a42d09..f448a0663b10 100644 --- a/arch/arm/mm/dma-mapping-nommu.c +++ b/arch/arm/mm/dma-mapping-nommu.c | |||
@@ -241,12 +241,3 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, | |||
241 | void arch_teardown_dma_ops(struct device *dev) | 241 | void arch_teardown_dma_ops(struct device *dev) |
242 | { | 242 | { |
243 | } | 243 | } |
244 | |||
245 | #define PREALLOC_DMA_DEBUG_ENTRIES 4096 | ||
246 | |||
247 | static int __init dma_debug_do_init(void) | ||
248 | { | ||
249 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); | ||
250 | return 0; | ||
251 | } | ||
252 | core_initcall(dma_debug_do_init); | ||
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index ada8eb206a90..4b6613b5e042 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -1151,15 +1151,6 @@ int arm_dma_supported(struct device *dev, u64 mask) | |||
1151 | return __dma_supported(dev, mask, false); | 1151 | return __dma_supported(dev, mask, false); |
1152 | } | 1152 | } |
1153 | 1153 | ||
1154 | #define PREALLOC_DMA_DEBUG_ENTRIES 4096 | ||
1155 | |||
1156 | static int __init dma_debug_do_init(void) | ||
1157 | { | ||
1158 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); | ||
1159 | return 0; | ||
1160 | } | ||
1161 | core_initcall(dma_debug_do_init); | ||
1162 | |||
1163 | #ifdef CONFIG_ARM_DMA_USE_IOMMU | 1154 | #ifdef CONFIG_ARM_DMA_USE_IOMMU |
1164 | 1155 | ||
1165 | static int __dma_info_to_prot(enum dma_data_direction dir, unsigned long attrs) | 1156 | static int __dma_info_to_prot(enum dma_data_direction dir, unsigned long attrs) |
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index eb2cf4938f6d..b25ed7834f6c 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig | |||
@@ -105,7 +105,6 @@ config ARM64 | |||
105 | select HAVE_CONTEXT_TRACKING | 105 | select HAVE_CONTEXT_TRACKING |
106 | select HAVE_DEBUG_BUGVERBOSE | 106 | select HAVE_DEBUG_BUGVERBOSE |
107 | select HAVE_DEBUG_KMEMLEAK | 107 | select HAVE_DEBUG_KMEMLEAK |
108 | select HAVE_DMA_API_DEBUG | ||
109 | select HAVE_DMA_CONTIGUOUS | 108 | select HAVE_DMA_CONTIGUOUS |
110 | select HAVE_DYNAMIC_FTRACE | 109 | select HAVE_DYNAMIC_FTRACE |
111 | select HAVE_EFFICIENT_UNALIGNED_ACCESS | 110 | select HAVE_EFFICIENT_UNALIGNED_ACCESS |
@@ -133,6 +132,8 @@ config ARM64 | |||
133 | select IRQ_FORCED_THREADING | 132 | select IRQ_FORCED_THREADING |
134 | select MODULES_USE_ELF_RELA | 133 | select MODULES_USE_ELF_RELA |
135 | select MULTI_IRQ_HANDLER | 134 | select MULTI_IRQ_HANDLER |
135 | select NEED_DMA_MAP_STATE | ||
136 | select NEED_SG_DMA_LENGTH | ||
136 | select NO_BOOTMEM | 137 | select NO_BOOTMEM |
137 | select OF | 138 | select OF |
138 | select OF_EARLY_FLATTREE | 139 | select OF_EARLY_FLATTREE |
@@ -142,6 +143,7 @@ config ARM64 | |||
142 | select POWER_SUPPLY | 143 | select POWER_SUPPLY |
143 | select REFCOUNT_FULL | 144 | select REFCOUNT_FULL |
144 | select SPARSE_IRQ | 145 | select SPARSE_IRQ |
146 | select SWIOTLB | ||
145 | select SYSCTL_EXCEPTION_TRACE | 147 | select SYSCTL_EXCEPTION_TRACE |
146 | select THREAD_INFO_IN_TASK | 148 | select THREAD_INFO_IN_TASK |
147 | help | 149 | help |
@@ -150,9 +152,6 @@ config ARM64 | |||
150 | config 64BIT | 152 | config 64BIT |
151 | def_bool y | 153 | def_bool y |
152 | 154 | ||
153 | config ARCH_PHYS_ADDR_T_64BIT | ||
154 | def_bool y | ||
155 | |||
156 | config MMU | 155 | config MMU |
157 | def_bool y | 156 | def_bool y |
158 | 157 | ||
@@ -237,24 +236,9 @@ config ZONE_DMA32 | |||
237 | config HAVE_GENERIC_GUP | 236 | config HAVE_GENERIC_GUP |
238 | def_bool y | 237 | def_bool y |
239 | 238 | ||
240 | config ARCH_DMA_ADDR_T_64BIT | ||
241 | def_bool y | ||
242 | |||
243 | config NEED_DMA_MAP_STATE | ||
244 | def_bool y | ||
245 | |||
246 | config NEED_SG_DMA_LENGTH | ||
247 | def_bool y | ||
248 | |||
249 | config SMP | 239 | config SMP |
250 | def_bool y | 240 | def_bool y |
251 | 241 | ||
252 | config SWIOTLB | ||
253 | def_bool y | ||
254 | |||
255 | config IOMMU_HELPER | ||
256 | def_bool SWIOTLB | ||
257 | |||
258 | config KERNEL_MODE_NEON | 242 | config KERNEL_MODE_NEON |
259 | def_bool y | 243 | def_bool y |
260 | 244 | ||
diff --git a/arch/arm64/include/asm/pci.h b/arch/arm64/include/asm/pci.h index 8747f7c5e0e7..9e690686e8aa 100644 --- a/arch/arm64/include/asm/pci.h +++ b/arch/arm64/include/asm/pci.h | |||
@@ -18,11 +18,6 @@ | |||
18 | #define pcibios_assign_all_busses() \ | 18 | #define pcibios_assign_all_busses() \ |
19 | (pci_has_flag(PCI_REASSIGN_ALL_BUS)) | 19 | (pci_has_flag(PCI_REASSIGN_ALL_BUS)) |
20 | 20 | ||
21 | /* | ||
22 | * PCI address space differs from physical memory address space | ||
23 | */ | ||
24 | #define PCI_DMA_BUS_IS_PHYS (0) | ||
25 | |||
26 | #define ARCH_GENERIC_PCI_MMAP_RESOURCE 1 | 21 | #define ARCH_GENERIC_PCI_MMAP_RESOURCE 1 |
27 | 22 | ||
28 | extern int isa_dma_bridge_buggy; | 23 | extern int isa_dma_bridge_buggy; |
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index a96ec0181818..db01f2709842 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c | |||
@@ -508,16 +508,6 @@ static int __init arm64_dma_init(void) | |||
508 | } | 508 | } |
509 | arch_initcall(arm64_dma_init); | 509 | arch_initcall(arm64_dma_init); |
510 | 510 | ||
511 | #define PREALLOC_DMA_DEBUG_ENTRIES 4096 | ||
512 | |||
513 | static int __init dma_debug_do_init(void) | ||
514 | { | ||
515 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); | ||
516 | return 0; | ||
517 | } | ||
518 | fs_initcall(dma_debug_do_init); | ||
519 | |||
520 | |||
521 | #ifdef CONFIG_IOMMU_DMA | 511 | #ifdef CONFIG_IOMMU_DMA |
522 | #include <linux/dma-iommu.h> | 512 | #include <linux/dma-iommu.h> |
523 | #include <linux/platform_device.h> | 513 | #include <linux/platform_device.h> |
diff --git a/arch/c6x/Kconfig b/arch/c6x/Kconfig index c6b4dd1418b4..bf59855628ac 100644 --- a/arch/c6x/Kconfig +++ b/arch/c6x/Kconfig | |||
@@ -6,11 +6,13 @@ | |||
6 | 6 | ||
7 | config C6X | 7 | config C6X |
8 | def_bool y | 8 | def_bool y |
9 | select ARCH_HAS_SYNC_DMA_FOR_CPU | ||
10 | select ARCH_HAS_SYNC_DMA_FOR_DEVICE | ||
9 | select CLKDEV_LOOKUP | 11 | select CLKDEV_LOOKUP |
12 | select DMA_NONCOHERENT_OPS | ||
10 | select GENERIC_ATOMIC64 | 13 | select GENERIC_ATOMIC64 |
11 | select GENERIC_IRQ_SHOW | 14 | select GENERIC_IRQ_SHOW |
12 | select HAVE_ARCH_TRACEHOOK | 15 | select HAVE_ARCH_TRACEHOOK |
13 | select HAVE_DMA_API_DEBUG | ||
14 | select HAVE_MEMBLOCK | 16 | select HAVE_MEMBLOCK |
15 | select SPARSE_IRQ | 17 | select SPARSE_IRQ |
16 | select IRQ_DOMAIN | 18 | select IRQ_DOMAIN |
diff --git a/arch/c6x/include/asm/Kbuild b/arch/c6x/include/asm/Kbuild index fd4c840de837..434600e47662 100644 --- a/arch/c6x/include/asm/Kbuild +++ b/arch/c6x/include/asm/Kbuild | |||
@@ -5,6 +5,7 @@ generic-y += current.h | |||
5 | generic-y += device.h | 5 | generic-y += device.h |
6 | generic-y += div64.h | 6 | generic-y += div64.h |
7 | generic-y += dma.h | 7 | generic-y += dma.h |
8 | generic-y += dma-mapping.h | ||
8 | generic-y += emergency-restart.h | 9 | generic-y += emergency-restart.h |
9 | generic-y += exec.h | 10 | generic-y += exec.h |
10 | generic-y += extable.h | 11 | generic-y += extable.h |
diff --git a/arch/c6x/include/asm/dma-mapping.h b/arch/c6x/include/asm/dma-mapping.h deleted file mode 100644 index 05daf1038111..000000000000 --- a/arch/c6x/include/asm/dma-mapping.h +++ /dev/null | |||
@@ -1,28 +0,0 @@ | |||
1 | /* | ||
2 | * Port on Texas Instruments TMS320C6x architecture | ||
3 | * | ||
4 | * Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated | ||
5 | * Author: Aurelien Jacquiot <aurelien.jacquiot@ti.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | */ | ||
12 | #ifndef _ASM_C6X_DMA_MAPPING_H | ||
13 | #define _ASM_C6X_DMA_MAPPING_H | ||
14 | |||
15 | extern const struct dma_map_ops c6x_dma_ops; | ||
16 | |||
17 | static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) | ||
18 | { | ||
19 | return &c6x_dma_ops; | ||
20 | } | ||
21 | |||
22 | extern void coherent_mem_init(u32 start, u32 size); | ||
23 | void *c6x_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, | ||
24 | gfp_t gfp, unsigned long attrs); | ||
25 | void c6x_dma_free(struct device *dev, size_t size, void *vaddr, | ||
26 | dma_addr_t dma_handle, unsigned long attrs); | ||
27 | |||
28 | #endif /* _ASM_C6X_DMA_MAPPING_H */ | ||
diff --git a/arch/c6x/include/asm/setup.h b/arch/c6x/include/asm/setup.h index 852afb209afb..350f34debb19 100644 --- a/arch/c6x/include/asm/setup.h +++ b/arch/c6x/include/asm/setup.h | |||
@@ -28,5 +28,7 @@ extern unsigned char c6x_fuse_mac[6]; | |||
28 | extern void machine_init(unsigned long dt_ptr); | 28 | extern void machine_init(unsigned long dt_ptr); |
29 | extern void time_init(void); | 29 | extern void time_init(void); |
30 | 30 | ||
31 | extern void coherent_mem_init(u32 start, u32 size); | ||
32 | |||
31 | #endif /* !__ASSEMBLY__ */ | 33 | #endif /* !__ASSEMBLY__ */ |
32 | #endif /* _ASM_C6X_SETUP_H */ | 34 | #endif /* _ASM_C6X_SETUP_H */ |
diff --git a/arch/c6x/kernel/Makefile b/arch/c6x/kernel/Makefile index 02f340d7b8fe..fbe74174de87 100644 --- a/arch/c6x/kernel/Makefile +++ b/arch/c6x/kernel/Makefile | |||
@@ -8,6 +8,6 @@ extra-y := head.o vmlinux.lds | |||
8 | obj-y := process.o traps.o irq.o signal.o ptrace.o | 8 | obj-y := process.o traps.o irq.o signal.o ptrace.o |
9 | obj-y += setup.o sys_c6x.o time.o devicetree.o | 9 | obj-y += setup.o sys_c6x.o time.o devicetree.o |
10 | obj-y += switch_to.o entry.o vectors.o c6x_ksyms.o | 10 | obj-y += switch_to.o entry.o vectors.o c6x_ksyms.o |
11 | obj-y += soc.o dma.o | 11 | obj-y += soc.o |
12 | 12 | ||
13 | obj-$(CONFIG_MODULES) += module.o | 13 | obj-$(CONFIG_MODULES) += module.o |
diff --git a/arch/c6x/kernel/dma.c b/arch/c6x/kernel/dma.c deleted file mode 100644 index 9fff8be75f58..000000000000 --- a/arch/c6x/kernel/dma.c +++ /dev/null | |||
@@ -1,149 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2011 Texas Instruments Incorporated | ||
3 | * Author: Mark Salter <msalter@redhat.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | */ | ||
9 | #include <linux/module.h> | ||
10 | #include <linux/dma-mapping.h> | ||
11 | #include <linux/mm.h> | ||
12 | #include <linux/mm_types.h> | ||
13 | #include <linux/scatterlist.h> | ||
14 | |||
15 | #include <asm/cacheflush.h> | ||
16 | |||
17 | static void c6x_dma_sync(dma_addr_t handle, size_t size, | ||
18 | enum dma_data_direction dir) | ||
19 | { | ||
20 | unsigned long paddr = handle; | ||
21 | |||
22 | BUG_ON(!valid_dma_direction(dir)); | ||
23 | |||
24 | switch (dir) { | ||
25 | case DMA_FROM_DEVICE: | ||
26 | L2_cache_block_invalidate(paddr, paddr + size); | ||
27 | break; | ||
28 | case DMA_TO_DEVICE: | ||
29 | L2_cache_block_writeback(paddr, paddr + size); | ||
30 | break; | ||
31 | case DMA_BIDIRECTIONAL: | ||
32 | L2_cache_block_writeback_invalidate(paddr, paddr + size); | ||
33 | break; | ||
34 | default: | ||
35 | break; | ||
36 | } | ||
37 | } | ||
38 | |||
39 | static dma_addr_t c6x_dma_map_page(struct device *dev, struct page *page, | ||
40 | unsigned long offset, size_t size, enum dma_data_direction dir, | ||
41 | unsigned long attrs) | ||
42 | { | ||
43 | dma_addr_t handle = virt_to_phys(page_address(page) + offset); | ||
44 | |||
45 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) | ||
46 | c6x_dma_sync(handle, size, dir); | ||
47 | |||
48 | return handle; | ||
49 | } | ||
50 | |||
51 | static void c6x_dma_unmap_page(struct device *dev, dma_addr_t handle, | ||
52 | size_t size, enum dma_data_direction dir, unsigned long attrs) | ||
53 | { | ||
54 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) | ||
55 | c6x_dma_sync(handle, size, dir); | ||
56 | } | ||
57 | |||
58 | static int c6x_dma_map_sg(struct device *dev, struct scatterlist *sglist, | ||
59 | int nents, enum dma_data_direction dir, unsigned long attrs) | ||
60 | { | ||
61 | struct scatterlist *sg; | ||
62 | int i; | ||
63 | |||
64 | for_each_sg(sglist, sg, nents, i) { | ||
65 | sg->dma_address = sg_phys(sg); | ||
66 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) | ||
67 | c6x_dma_sync(sg->dma_address, sg->length, dir); | ||
68 | } | ||
69 | |||
70 | return nents; | ||
71 | } | ||
72 | |||
73 | static void c6x_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, | ||
74 | int nents, enum dma_data_direction dir, unsigned long attrs) | ||
75 | { | ||
76 | struct scatterlist *sg; | ||
77 | int i; | ||
78 | |||
79 | if (attrs & DMA_ATTR_SKIP_CPU_SYNC) | ||
80 | return; | ||
81 | |||
82 | for_each_sg(sglist, sg, nents, i) | ||
83 | c6x_dma_sync(sg_dma_address(sg), sg->length, dir); | ||
84 | } | ||
85 | |||
86 | static void c6x_dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, | ||
87 | size_t size, enum dma_data_direction dir) | ||
88 | { | ||
89 | c6x_dma_sync(handle, size, dir); | ||
90 | |||
91 | } | ||
92 | |||
93 | static void c6x_dma_sync_single_for_device(struct device *dev, | ||
94 | dma_addr_t handle, size_t size, enum dma_data_direction dir) | ||
95 | { | ||
96 | c6x_dma_sync(handle, size, dir); | ||
97 | |||
98 | } | ||
99 | |||
100 | static void c6x_dma_sync_sg_for_cpu(struct device *dev, | ||
101 | struct scatterlist *sglist, int nents, | ||
102 | enum dma_data_direction dir) | ||
103 | { | ||
104 | struct scatterlist *sg; | ||
105 | int i; | ||
106 | |||
107 | for_each_sg(sglist, sg, nents, i) | ||
108 | c6x_dma_sync_single_for_cpu(dev, sg_dma_address(sg), | ||
109 | sg->length, dir); | ||
110 | |||
111 | } | ||
112 | |||
113 | static void c6x_dma_sync_sg_for_device(struct device *dev, | ||
114 | struct scatterlist *sglist, int nents, | ||
115 | enum dma_data_direction dir) | ||
116 | { | ||
117 | struct scatterlist *sg; | ||
118 | int i; | ||
119 | |||
120 | for_each_sg(sglist, sg, nents, i) | ||
121 | c6x_dma_sync_single_for_device(dev, sg_dma_address(sg), | ||
122 | sg->length, dir); | ||
123 | |||
124 | } | ||
125 | |||
126 | const struct dma_map_ops c6x_dma_ops = { | ||
127 | .alloc = c6x_dma_alloc, | ||
128 | .free = c6x_dma_free, | ||
129 | .map_page = c6x_dma_map_page, | ||
130 | .unmap_page = c6x_dma_unmap_page, | ||
131 | .map_sg = c6x_dma_map_sg, | ||
132 | .unmap_sg = c6x_dma_unmap_sg, | ||
133 | .sync_single_for_device = c6x_dma_sync_single_for_device, | ||
134 | .sync_single_for_cpu = c6x_dma_sync_single_for_cpu, | ||
135 | .sync_sg_for_device = c6x_dma_sync_sg_for_device, | ||
136 | .sync_sg_for_cpu = c6x_dma_sync_sg_for_cpu, | ||
137 | }; | ||
138 | EXPORT_SYMBOL(c6x_dma_ops); | ||
139 | |||
140 | /* Number of entries preallocated for DMA-API debugging */ | ||
141 | #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) | ||
142 | |||
143 | static int __init dma_init(void) | ||
144 | { | ||
145 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); | ||
146 | |||
147 | return 0; | ||
148 | } | ||
149 | fs_initcall(dma_init); | ||
diff --git a/arch/c6x/mm/dma-coherent.c b/arch/c6x/mm/dma-coherent.c index 95e38ad27c69..d0a8e0c4b27e 100644 --- a/arch/c6x/mm/dma-coherent.c +++ b/arch/c6x/mm/dma-coherent.c | |||
@@ -19,10 +19,12 @@ | |||
19 | #include <linux/bitops.h> | 19 | #include <linux/bitops.h> |
20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | #include <linux/interrupt.h> | 21 | #include <linux/interrupt.h> |
22 | #include <linux/dma-mapping.h> | 22 | #include <linux/dma-noncoherent.h> |
23 | #include <linux/memblock.h> | 23 | #include <linux/memblock.h> |
24 | 24 | ||
25 | #include <asm/cacheflush.h> | ||
25 | #include <asm/page.h> | 26 | #include <asm/page.h> |
27 | #include <asm/setup.h> | ||
26 | 28 | ||
27 | /* | 29 | /* |
28 | * DMA coherent memory management, can be redefined using the memdma= | 30 | * DMA coherent memory management, can be redefined using the memdma= |
@@ -73,7 +75,7 @@ static void __free_dma_pages(u32 addr, int order) | |||
73 | * Allocate DMA coherent memory space and return both the kernel | 75 | * Allocate DMA coherent memory space and return both the kernel |
74 | * virtual and DMA address for that space. | 76 | * virtual and DMA address for that space. |
75 | */ | 77 | */ |
76 | void *c6x_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, | 78 | void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, |
77 | gfp_t gfp, unsigned long attrs) | 79 | gfp_t gfp, unsigned long attrs) |
78 | { | 80 | { |
79 | u32 paddr; | 81 | u32 paddr; |
@@ -98,7 +100,7 @@ void *c6x_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, | |||
98 | /* | 100 | /* |
99 | * Free DMA coherent memory as defined by the above mapping. | 101 | * Free DMA coherent memory as defined by the above mapping. |
100 | */ | 102 | */ |
101 | void c6x_dma_free(struct device *dev, size_t size, void *vaddr, | 103 | void arch_dma_free(struct device *dev, size_t size, void *vaddr, |
102 | dma_addr_t dma_handle, unsigned long attrs) | 104 | dma_addr_t dma_handle, unsigned long attrs) |
103 | { | 105 | { |
104 | int order; | 106 | int order; |
@@ -139,3 +141,35 @@ void __init coherent_mem_init(phys_addr_t start, u32 size) | |||
139 | dma_bitmap = phys_to_virt(bitmap_phys); | 141 | dma_bitmap = phys_to_virt(bitmap_phys); |
140 | memset(dma_bitmap, 0, dma_pages * PAGE_SIZE); | 142 | memset(dma_bitmap, 0, dma_pages * PAGE_SIZE); |
141 | } | 143 | } |
144 | |||
145 | static void c6x_dma_sync(struct device *dev, phys_addr_t paddr, size_t size, | ||
146 | enum dma_data_direction dir) | ||
147 | { | ||
148 | BUG_ON(!valid_dma_direction(dir)); | ||
149 | |||
150 | switch (dir) { | ||
151 | case DMA_FROM_DEVICE: | ||
152 | L2_cache_block_invalidate(paddr, paddr + size); | ||
153 | break; | ||
154 | case DMA_TO_DEVICE: | ||
155 | L2_cache_block_writeback(paddr, paddr + size); | ||
156 | break; | ||
157 | case DMA_BIDIRECTIONAL: | ||
158 | L2_cache_block_writeback_invalidate(paddr, paddr + size); | ||
159 | break; | ||
160 | default: | ||
161 | break; | ||
162 | } | ||
163 | } | ||
164 | |||
165 | void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, | ||
166 | size_t size, enum dma_data_direction dir) | ||
167 | { | ||
168 | return c6x_dma_sync(dev, paddr, size, dir); | ||
169 | } | ||
170 | |||
171 | void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, | ||
172 | size_t size, enum dma_data_direction dir) | ||
173 | { | ||
174 | return c6x_dma_sync(dev, paddr, size, dir); | ||
175 | } | ||
diff --git a/arch/h8300/include/asm/pci.h b/arch/h8300/include/asm/pci.h index 7c9e55d62215..d4d345a52092 100644 --- a/arch/h8300/include/asm/pci.h +++ b/arch/h8300/include/asm/pci.h | |||
@@ -15,6 +15,4 @@ static inline void pcibios_penalize_isa_irq(int irq, int active) | |||
15 | /* We don't do dynamic PCI IRQ allocation */ | 15 | /* We don't do dynamic PCI IRQ allocation */ |
16 | } | 16 | } |
17 | 17 | ||
18 | #define PCI_DMA_BUS_IS_PHYS (1) | ||
19 | |||
20 | #endif /* _ASM_H8300_PCI_H */ | 18 | #endif /* _ASM_H8300_PCI_H */ |
diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig index 76d2f20d525e..37adb2003033 100644 --- a/arch/hexagon/Kconfig +++ b/arch/hexagon/Kconfig | |||
@@ -19,6 +19,7 @@ config HEXAGON | |||
19 | select GENERIC_IRQ_SHOW | 19 | select GENERIC_IRQ_SHOW |
20 | select HAVE_ARCH_KGDB | 20 | select HAVE_ARCH_KGDB |
21 | select HAVE_ARCH_TRACEHOOK | 21 | select HAVE_ARCH_TRACEHOOK |
22 | select NEED_SG_DMA_LENGTH | ||
22 | select NO_IOPORT_MAP | 23 | select NO_IOPORT_MAP |
23 | select GENERIC_IOMAP | 24 | select GENERIC_IOMAP |
24 | select GENERIC_SMP_IDLE_THREAD | 25 | select GENERIC_SMP_IDLE_THREAD |
@@ -63,9 +64,6 @@ config GENERIC_CSUM | |||
63 | config GENERIC_IRQ_PROBE | 64 | config GENERIC_IRQ_PROBE |
64 | def_bool y | 65 | def_bool y |
65 | 66 | ||
66 | config NEED_SG_DMA_LENGTH | ||
67 | def_bool y | ||
68 | |||
69 | config RWSEM_GENERIC_SPINLOCK | 67 | config RWSEM_GENERIC_SPINLOCK |
70 | def_bool n | 68 | def_bool n |
71 | 69 | ||
diff --git a/arch/hexagon/kernel/dma.c b/arch/hexagon/kernel/dma.c index ad8347c29dcf..77459df34e2e 100644 --- a/arch/hexagon/kernel/dma.c +++ b/arch/hexagon/kernel/dma.c | |||
@@ -208,7 +208,6 @@ const struct dma_map_ops hexagon_dma_ops = { | |||
208 | .sync_single_for_cpu = hexagon_sync_single_for_cpu, | 208 | .sync_single_for_cpu = hexagon_sync_single_for_cpu, |
209 | .sync_single_for_device = hexagon_sync_single_for_device, | 209 | .sync_single_for_device = hexagon_sync_single_for_device, |
210 | .mapping_error = hexagon_mapping_error, | 210 | .mapping_error = hexagon_mapping_error, |
211 | .is_phys = 1, | ||
212 | }; | 211 | }; |
213 | 212 | ||
214 | void __init hexagon_dma_init(void) | 213 | void __init hexagon_dma_init(void) |
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index bbe12a038d21..2067289fad4a 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig | |||
@@ -29,7 +29,6 @@ config IA64 | |||
29 | select HAVE_FUNCTION_TRACER | 29 | select HAVE_FUNCTION_TRACER |
30 | select TTY | 30 | select TTY |
31 | select HAVE_ARCH_TRACEHOOK | 31 | select HAVE_ARCH_TRACEHOOK |
32 | select HAVE_DMA_API_DEBUG | ||
33 | select HAVE_MEMBLOCK | 32 | select HAVE_MEMBLOCK |
34 | select HAVE_MEMBLOCK_NODE_MAP | 33 | select HAVE_MEMBLOCK_NODE_MAP |
35 | select HAVE_VIRT_CPU_ACCOUNTING | 34 | select HAVE_VIRT_CPU_ACCOUNTING |
@@ -54,6 +53,8 @@ config IA64 | |||
54 | select MODULES_USE_ELF_RELA | 53 | select MODULES_USE_ELF_RELA |
55 | select ARCH_USE_CMPXCHG_LOCKREF | 54 | select ARCH_USE_CMPXCHG_LOCKREF |
56 | select HAVE_ARCH_AUDITSYSCALL | 55 | select HAVE_ARCH_AUDITSYSCALL |
56 | select NEED_DMA_MAP_STATE | ||
57 | select NEED_SG_DMA_LENGTH | ||
57 | default y | 58 | default y |
58 | help | 59 | help |
59 | The Itanium Processor Family is Intel's 64-bit successor to | 60 | The Itanium Processor Family is Intel's 64-bit successor to |
@@ -78,18 +79,6 @@ config MMU | |||
78 | bool | 79 | bool |
79 | default y | 80 | default y |
80 | 81 | ||
81 | config ARCH_DMA_ADDR_T_64BIT | ||
82 | def_bool y | ||
83 | |||
84 | config NEED_DMA_MAP_STATE | ||
85 | def_bool y | ||
86 | |||
87 | config NEED_SG_DMA_LENGTH | ||
88 | def_bool y | ||
89 | |||
90 | config SWIOTLB | ||
91 | bool | ||
92 | |||
93 | config STACKTRACE_SUPPORT | 82 | config STACKTRACE_SUPPORT |
94 | def_bool y | 83 | def_bool y |
95 | 84 | ||
@@ -146,7 +135,6 @@ config IA64_GENERIC | |||
146 | bool "generic" | 135 | bool "generic" |
147 | select NUMA | 136 | select NUMA |
148 | select ACPI_NUMA | 137 | select ACPI_NUMA |
149 | select DMA_DIRECT_OPS | ||
150 | select SWIOTLB | 138 | select SWIOTLB |
151 | select PCI_MSI | 139 | select PCI_MSI |
152 | help | 140 | help |
@@ -167,7 +155,6 @@ config IA64_GENERIC | |||
167 | 155 | ||
168 | config IA64_DIG | 156 | config IA64_DIG |
169 | bool "DIG-compliant" | 157 | bool "DIG-compliant" |
170 | select DMA_DIRECT_OPS | ||
171 | select SWIOTLB | 158 | select SWIOTLB |
172 | 159 | ||
173 | config IA64_DIG_VTD | 160 | config IA64_DIG_VTD |
@@ -183,7 +170,6 @@ config IA64_HP_ZX1 | |||
183 | 170 | ||
184 | config IA64_HP_ZX1_SWIOTLB | 171 | config IA64_HP_ZX1_SWIOTLB |
185 | bool "HP-zx1/sx1000 with software I/O TLB" | 172 | bool "HP-zx1/sx1000 with software I/O TLB" |
186 | select DMA_DIRECT_OPS | ||
187 | select SWIOTLB | 173 | select SWIOTLB |
188 | help | 174 | help |
189 | Build a kernel that runs on HP zx1 and sx1000 systems even when they | 175 | Build a kernel that runs on HP zx1 and sx1000 systems even when they |
@@ -207,7 +193,6 @@ config IA64_SGI_UV | |||
207 | bool "SGI-UV" | 193 | bool "SGI-UV" |
208 | select NUMA | 194 | select NUMA |
209 | select ACPI_NUMA | 195 | select ACPI_NUMA |
210 | select DMA_DIRECT_OPS | ||
211 | select SWIOTLB | 196 | select SWIOTLB |
212 | help | 197 | help |
213 | Selecting this option will optimize the kernel for use on UV based | 198 | Selecting this option will optimize the kernel for use on UV based |
@@ -218,7 +203,6 @@ config IA64_SGI_UV | |||
218 | 203 | ||
219 | config IA64_HP_SIM | 204 | config IA64_HP_SIM |
220 | bool "Ski-simulator" | 205 | bool "Ski-simulator" |
221 | select DMA_DIRECT_OPS | ||
222 | select SWIOTLB | 206 | select SWIOTLB |
223 | depends on !PM | 207 | depends on !PM |
224 | 208 | ||
@@ -613,6 +597,3 @@ source "security/Kconfig" | |||
613 | source "crypto/Kconfig" | 597 | source "crypto/Kconfig" |
614 | 598 | ||
615 | source "lib/Kconfig" | 599 | source "lib/Kconfig" |
616 | |||
617 | config IOMMU_HELPER | ||
618 | def_bool (IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB || IA64_GENERIC || SWIOTLB) | ||
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index cb5cd86a5530..ee5b652d320a 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c | |||
@@ -1845,9 +1845,6 @@ static void ioc_init(unsigned long hpa, struct ioc *ioc) | |||
1845 | ioc_resource_init(ioc); | 1845 | ioc_resource_init(ioc); |
1846 | ioc_sac_init(ioc); | 1846 | ioc_sac_init(ioc); |
1847 | 1847 | ||
1848 | if ((long) ~iovp_mask > (long) ia64_max_iommu_merge_mask) | ||
1849 | ia64_max_iommu_merge_mask = ~iovp_mask; | ||
1850 | |||
1851 | printk(KERN_INFO PFX | 1848 | printk(KERN_INFO PFX |
1852 | "%s %d.%d HPA 0x%lx IOVA space %dMb at 0x%lx\n", | 1849 | "%s %d.%d HPA 0x%lx IOVA space %dMb at 0x%lx\n", |
1853 | ioc->name, (ioc->rev >> 4) & 0xF, ioc->rev & 0xF, | 1850 | ioc->name, (ioc->rev >> 4) & 0xF, ioc->rev & 0xF, |
diff --git a/arch/ia64/include/asm/pci.h b/arch/ia64/include/asm/pci.h index b1d04e8bafc8..780e8744ba85 100644 --- a/arch/ia64/include/asm/pci.h +++ b/arch/ia64/include/asm/pci.h | |||
@@ -30,23 +30,6 @@ struct pci_vector_struct { | |||
30 | #define PCIBIOS_MIN_IO 0x1000 | 30 | #define PCIBIOS_MIN_IO 0x1000 |
31 | #define PCIBIOS_MIN_MEM 0x10000000 | 31 | #define PCIBIOS_MIN_MEM 0x10000000 |
32 | 32 | ||
33 | /* | ||
34 | * PCI_DMA_BUS_IS_PHYS should be set to 1 if there is _necessarily_ a direct | ||
35 | * correspondence between device bus addresses and CPU physical addresses. | ||
36 | * Platforms with a hardware I/O MMU _must_ turn this off to suppress the | ||
37 | * bounce buffer handling code in the block and network device layers. | ||
38 | * Platforms with separate bus address spaces _must_ turn this off and provide | ||
39 | * a device DMA mapping implementation that takes care of the necessary | ||
40 | * address translation. | ||
41 | * | ||
42 | * For now, the ia64 platforms which may have separate/multiple bus address | ||
43 | * spaces all have I/O MMUs which support the merging of physically | ||
44 | * discontiguous buffers, so we can use that as the sole factor to determine | ||
45 | * the setting of PCI_DMA_BUS_IS_PHYS. | ||
46 | */ | ||
47 | extern unsigned long ia64_max_iommu_merge_mask; | ||
48 | #define PCI_DMA_BUS_IS_PHYS (ia64_max_iommu_merge_mask == ~0UL) | ||
49 | |||
50 | #define HAVE_PCI_MMAP | 33 | #define HAVE_PCI_MMAP |
51 | #define ARCH_GENERIC_PCI_MMAP_RESOURCE | 34 | #define ARCH_GENERIC_PCI_MMAP_RESOURCE |
52 | #define arch_can_pci_mmap_wc() 1 | 35 | #define arch_can_pci_mmap_wc() 1 |
diff --git a/arch/ia64/kernel/dma-mapping.c b/arch/ia64/kernel/dma-mapping.c index f2d57e66fd86..7a471d8d67d4 100644 --- a/arch/ia64/kernel/dma-mapping.c +++ b/arch/ia64/kernel/dma-mapping.c | |||
@@ -9,16 +9,6 @@ int iommu_detected __read_mostly; | |||
9 | const struct dma_map_ops *dma_ops; | 9 | const struct dma_map_ops *dma_ops; |
10 | EXPORT_SYMBOL(dma_ops); | 10 | EXPORT_SYMBOL(dma_ops); |
11 | 11 | ||
12 | #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) | ||
13 | |||
14 | static int __init dma_init(void) | ||
15 | { | ||
16 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); | ||
17 | |||
18 | return 0; | ||
19 | } | ||
20 | fs_initcall(dma_init); | ||
21 | |||
22 | const struct dma_map_ops *dma_get_ops(struct device *dev) | 12 | const struct dma_map_ops *dma_get_ops(struct device *dev) |
23 | { | 13 | { |
24 | return dma_ops; | 14 | return dma_ops; |
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index dee56bcb993d..ad43cbf70628 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c | |||
@@ -124,18 +124,6 @@ unsigned long ia64_i_cache_stride_shift = ~0; | |||
124 | unsigned long ia64_cache_stride_shift = ~0; | 124 | unsigned long ia64_cache_stride_shift = ~0; |
125 | 125 | ||
126 | /* | 126 | /* |
127 | * The merge_mask variable needs to be set to (max(iommu_page_size(iommu)) - 1). This | ||
128 | * mask specifies a mask of address bits that must be 0 in order for two buffers to be | ||
129 | * mergeable by the I/O MMU (i.e., the end address of the first buffer and the start | ||
130 | * address of the second buffer must be aligned to (merge_mask+1) in order to be | ||
131 | * mergeable). By default, we assume there is no I/O MMU which can merge physically | ||
132 | * discontiguous buffers, so we set the merge_mask to ~0UL, which corresponds to a iommu | ||
133 | * page-size of 2^64. | ||
134 | */ | ||
135 | unsigned long ia64_max_iommu_merge_mask = ~0UL; | ||
136 | EXPORT_SYMBOL(ia64_max_iommu_merge_mask); | ||
137 | |||
138 | /* | ||
139 | * We use a special marker for the end of memory and it uses the extra (+1) slot | 127 | * We use a special marker for the end of memory and it uses the extra (+1) slot |
140 | */ | 128 | */ |
141 | struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1] __initdata; | 129 | struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1] __initdata; |
diff --git a/arch/ia64/sn/kernel/io_common.c b/arch/ia64/sn/kernel/io_common.c index 11f2275570fb..8479e9a7ce16 100644 --- a/arch/ia64/sn/kernel/io_common.c +++ b/arch/ia64/sn/kernel/io_common.c | |||
@@ -480,11 +480,6 @@ sn_io_early_init(void) | |||
480 | tioca_init_provider(); | 480 | tioca_init_provider(); |
481 | tioce_init_provider(); | 481 | tioce_init_provider(); |
482 | 482 | ||
483 | /* | ||
484 | * This is needed to avoid bounce limit checks in the blk layer | ||
485 | */ | ||
486 | ia64_max_iommu_merge_mask = ~PAGE_MASK; | ||
487 | |||
488 | sn_irq_lh_init(); | 483 | sn_irq_lh_init(); |
489 | INIT_LIST_HEAD(&sn_sysdata_list); | 484 | INIT_LIST_HEAD(&sn_sysdata_list); |
490 | sn_init_cpei_timer(); | 485 | sn_init_cpei_timer(); |
diff --git a/arch/m68k/include/asm/pci.h b/arch/m68k/include/asm/pci.h index ef26fae8cf0b..5a4bc223743b 100644 --- a/arch/m68k/include/asm/pci.h +++ b/arch/m68k/include/asm/pci.h | |||
@@ -4,12 +4,6 @@ | |||
4 | 4 | ||
5 | #include <asm-generic/pci.h> | 5 | #include <asm-generic/pci.h> |
6 | 6 | ||
7 | /* The PCI address space does equal the physical memory | ||
8 | * address space. The networking and block device layers use | ||
9 | * this boolean for bounce buffer decisions. | ||
10 | */ | ||
11 | #define PCI_DMA_BUS_IS_PHYS (1) | ||
12 | |||
13 | #define pcibios_assign_all_busses() 1 | 7 | #define pcibios_assign_all_busses() 1 |
14 | 8 | ||
15 | #define PCIBIOS_MIN_IO 0x00000100 | 9 | #define PCIBIOS_MIN_IO 0x00000100 |
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index 3817a3e2146c..d14782100088 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig | |||
@@ -19,7 +19,6 @@ config MICROBLAZE | |||
19 | select HAVE_ARCH_HASH | 19 | select HAVE_ARCH_HASH |
20 | select HAVE_ARCH_KGDB | 20 | select HAVE_ARCH_KGDB |
21 | select HAVE_DEBUG_KMEMLEAK | 21 | select HAVE_DEBUG_KMEMLEAK |
22 | select HAVE_DMA_API_DEBUG | ||
23 | select HAVE_DYNAMIC_FTRACE | 22 | select HAVE_DYNAMIC_FTRACE |
24 | select HAVE_FTRACE_MCOUNT_RECORD | 23 | select HAVE_FTRACE_MCOUNT_RECORD |
25 | select HAVE_FUNCTION_GRAPH_TRACER | 24 | select HAVE_FUNCTION_GRAPH_TRACER |
diff --git a/arch/microblaze/include/asm/pci.h b/arch/microblaze/include/asm/pci.h index 5de871eb4a59..00337861472e 100644 --- a/arch/microblaze/include/asm/pci.h +++ b/arch/microblaze/include/asm/pci.h | |||
@@ -62,12 +62,6 @@ extern int pci_mmap_legacy_page_range(struct pci_bus *bus, | |||
62 | 62 | ||
63 | #define HAVE_PCI_LEGACY 1 | 63 | #define HAVE_PCI_LEGACY 1 |
64 | 64 | ||
65 | /* The PCI address space does equal the physical memory | ||
66 | * address space (no IOMMU). The IDE and SCSI device layers use | ||
67 | * this boolean for bounce buffer decisions. | ||
68 | */ | ||
69 | #define PCI_DMA_BUS_IS_PHYS (1) | ||
70 | |||
71 | extern void pcibios_claim_one_bus(struct pci_bus *b); | 65 | extern void pcibios_claim_one_bus(struct pci_bus *b); |
72 | 66 | ||
73 | extern void pcibios_finish_adding_to_bus(struct pci_bus *bus); | 67 | extern void pcibios_finish_adding_to_bus(struct pci_bus *bus); |
diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c index c91e8cef98dd..3145e7dc8ab1 100644 --- a/arch/microblaze/kernel/dma.c +++ b/arch/microblaze/kernel/dma.c | |||
@@ -184,14 +184,3 @@ const struct dma_map_ops dma_nommu_ops = { | |||
184 | .sync_sg_for_device = dma_nommu_sync_sg_for_device, | 184 | .sync_sg_for_device = dma_nommu_sync_sg_for_device, |
185 | }; | 185 | }; |
186 | EXPORT_SYMBOL(dma_nommu_ops); | 186 | EXPORT_SYMBOL(dma_nommu_ops); |
187 | |||
188 | /* Number of entries preallocated for DMA-API debugging */ | ||
189 | #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) | ||
190 | |||
191 | static int __init dma_init(void) | ||
192 | { | ||
193 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); | ||
194 | |||
195 | return 0; | ||
196 | } | ||
197 | fs_initcall(dma_init); | ||
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 225c95da23ce..2dcdc13cd65d 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig | |||
@@ -42,7 +42,6 @@ config MIPS | |||
42 | select HAVE_C_RECORDMCOUNT | 42 | select HAVE_C_RECORDMCOUNT |
43 | select HAVE_DEBUG_KMEMLEAK | 43 | select HAVE_DEBUG_KMEMLEAK |
44 | select HAVE_DEBUG_STACKOVERFLOW | 44 | select HAVE_DEBUG_STACKOVERFLOW |
45 | select HAVE_DMA_API_DEBUG | ||
46 | select HAVE_DMA_CONTIGUOUS | 45 | select HAVE_DMA_CONTIGUOUS |
47 | select HAVE_DYNAMIC_FTRACE | 46 | select HAVE_DYNAMIC_FTRACE |
48 | select HAVE_EXIT_THREAD | 47 | select HAVE_EXIT_THREAD |
@@ -132,7 +131,7 @@ config MIPS_GENERIC | |||
132 | 131 | ||
133 | config MIPS_ALCHEMY | 132 | config MIPS_ALCHEMY |
134 | bool "Alchemy processor based machines" | 133 | bool "Alchemy processor based machines" |
135 | select ARCH_PHYS_ADDR_T_64BIT | 134 | select PHYS_ADDR_T_64BIT |
136 | select CEVT_R4K | 135 | select CEVT_R4K |
137 | select CSRC_R4K | 136 | select CSRC_R4K |
138 | select IRQ_MIPS_CPU | 137 | select IRQ_MIPS_CPU |
@@ -890,7 +889,7 @@ config CAVIUM_OCTEON_SOC | |||
890 | bool "Cavium Networks Octeon SoC based boards" | 889 | bool "Cavium Networks Octeon SoC based boards" |
891 | select CEVT_R4K | 890 | select CEVT_R4K |
892 | select ARCH_HAS_PHYS_TO_DMA | 891 | select ARCH_HAS_PHYS_TO_DMA |
893 | select ARCH_PHYS_ADDR_T_64BIT | 892 | select PHYS_ADDR_T_64BIT |
894 | select DMA_COHERENT | 893 | select DMA_COHERENT |
895 | select SYS_SUPPORTS_64BIT_KERNEL | 894 | select SYS_SUPPORTS_64BIT_KERNEL |
896 | select SYS_SUPPORTS_BIG_ENDIAN | 895 | select SYS_SUPPORTS_BIG_ENDIAN |
@@ -912,6 +911,7 @@ config CAVIUM_OCTEON_SOC | |||
912 | select MIPS_NR_CPU_NR_MAP_1024 | 911 | select MIPS_NR_CPU_NR_MAP_1024 |
913 | select BUILTIN_DTB | 912 | select BUILTIN_DTB |
914 | select MTD_COMPLEX_MAPPINGS | 913 | select MTD_COMPLEX_MAPPINGS |
914 | select SWIOTLB | ||
915 | select SYS_SUPPORTS_RELOCATABLE | 915 | select SYS_SUPPORTS_RELOCATABLE |
916 | help | 916 | help |
917 | This option supports all of the Octeon reference boards from Cavium | 917 | This option supports all of the Octeon reference boards from Cavium |
@@ -936,7 +936,7 @@ config NLM_XLR_BOARD | |||
936 | select SWAP_IO_SPACE | 936 | select SWAP_IO_SPACE |
937 | select SYS_SUPPORTS_32BIT_KERNEL | 937 | select SYS_SUPPORTS_32BIT_KERNEL |
938 | select SYS_SUPPORTS_64BIT_KERNEL | 938 | select SYS_SUPPORTS_64BIT_KERNEL |
939 | select ARCH_PHYS_ADDR_T_64BIT | 939 | select PHYS_ADDR_T_64BIT |
940 | select SYS_SUPPORTS_BIG_ENDIAN | 940 | select SYS_SUPPORTS_BIG_ENDIAN |
941 | select SYS_SUPPORTS_HIGHMEM | 941 | select SYS_SUPPORTS_HIGHMEM |
942 | select DMA_COHERENT | 942 | select DMA_COHERENT |
@@ -962,7 +962,7 @@ config NLM_XLP_BOARD | |||
962 | select HW_HAS_PCI | 962 | select HW_HAS_PCI |
963 | select SYS_SUPPORTS_32BIT_KERNEL | 963 | select SYS_SUPPORTS_32BIT_KERNEL |
964 | select SYS_SUPPORTS_64BIT_KERNEL | 964 | select SYS_SUPPORTS_64BIT_KERNEL |
965 | select ARCH_PHYS_ADDR_T_64BIT | 965 | select PHYS_ADDR_T_64BIT |
966 | select GPIOLIB | 966 | select GPIOLIB |
967 | select SYS_SUPPORTS_BIG_ENDIAN | 967 | select SYS_SUPPORTS_BIG_ENDIAN |
968 | select SYS_SUPPORTS_LITTLE_ENDIAN | 968 | select SYS_SUPPORTS_LITTLE_ENDIAN |
@@ -1101,9 +1101,6 @@ config GPIO_TXX9 | |||
1101 | config FW_CFE | 1101 | config FW_CFE |
1102 | bool | 1102 | bool |
1103 | 1103 | ||
1104 | config ARCH_DMA_ADDR_T_64BIT | ||
1105 | def_bool (HIGHMEM && ARCH_PHYS_ADDR_T_64BIT) || 64BIT | ||
1106 | |||
1107 | config ARCH_SUPPORTS_UPROBES | 1104 | config ARCH_SUPPORTS_UPROBES |
1108 | bool | 1105 | bool |
1109 | 1106 | ||
@@ -1122,9 +1119,6 @@ config DMA_NONCOHERENT | |||
1122 | bool | 1119 | bool |
1123 | select NEED_DMA_MAP_STATE | 1120 | select NEED_DMA_MAP_STATE |
1124 | 1121 | ||
1125 | config NEED_DMA_MAP_STATE | ||
1126 | bool | ||
1127 | |||
1128 | config SYS_HAS_EARLY_PRINTK | 1122 | config SYS_HAS_EARLY_PRINTK |
1129 | bool | 1123 | bool |
1130 | 1124 | ||
@@ -1373,6 +1367,7 @@ config CPU_LOONGSON3 | |||
1373 | select MIPS_PGD_C0_CONTEXT | 1367 | select MIPS_PGD_C0_CONTEXT |
1374 | select MIPS_L1_CACHE_SHIFT_6 | 1368 | select MIPS_L1_CACHE_SHIFT_6 |
1375 | select GPIOLIB | 1369 | select GPIOLIB |
1370 | select SWIOTLB | ||
1376 | help | 1371 | help |
1377 | The Loongson 3 processor implements the MIPS64R2 instruction | 1372 | The Loongson 3 processor implements the MIPS64R2 instruction |
1378 | set with many extensions. | 1373 | set with many extensions. |
@@ -1770,7 +1765,7 @@ config CPU_MIPS32_R5_XPA | |||
1770 | depends on SYS_SUPPORTS_HIGHMEM | 1765 | depends on SYS_SUPPORTS_HIGHMEM |
1771 | select XPA | 1766 | select XPA |
1772 | select HIGHMEM | 1767 | select HIGHMEM |
1773 | select ARCH_PHYS_ADDR_T_64BIT | 1768 | select PHYS_ADDR_T_64BIT |
1774 | default n | 1769 | default n |
1775 | help | 1770 | help |
1776 | Choose this option if you want to enable the Extended Physical | 1771 | Choose this option if you want to enable the Extended Physical |
@@ -2402,9 +2397,6 @@ config SB1_PASS_2_1_WORKAROUNDS | |||
2402 | default y | 2397 | default y |
2403 | 2398 | ||
2404 | 2399 | ||
2405 | config ARCH_PHYS_ADDR_T_64BIT | ||
2406 | bool | ||
2407 | |||
2408 | choice | 2400 | choice |
2409 | prompt "SmartMIPS or microMIPS ASE support" | 2401 | prompt "SmartMIPS or microMIPS ASE support" |
2410 | 2402 | ||
diff --git a/arch/mips/cavium-octeon/Kconfig b/arch/mips/cavium-octeon/Kconfig index b5eee1a57d6c..4984e462be30 100644 --- a/arch/mips/cavium-octeon/Kconfig +++ b/arch/mips/cavium-octeon/Kconfig | |||
@@ -67,18 +67,6 @@ config CAVIUM_OCTEON_LOCK_L2_MEMCPY | |||
67 | help | 67 | help |
68 | Lock the kernel's implementation of memcpy() into L2. | 68 | Lock the kernel's implementation of memcpy() into L2. |
69 | 69 | ||
70 | config IOMMU_HELPER | ||
71 | bool | ||
72 | |||
73 | config NEED_SG_DMA_LENGTH | ||
74 | bool | ||
75 | |||
76 | config SWIOTLB | ||
77 | def_bool y | ||
78 | select DMA_DIRECT_OPS | ||
79 | select IOMMU_HELPER | ||
80 | select NEED_SG_DMA_LENGTH | ||
81 | |||
82 | config OCTEON_ILM | 70 | config OCTEON_ILM |
83 | tristate "Module to measure interrupt latency using Octeon CIU Timer" | 71 | tristate "Module to measure interrupt latency using Octeon CIU Timer" |
84 | help | 72 | help |
diff --git a/arch/mips/include/asm/pci.h b/arch/mips/include/asm/pci.h index 2339f42f047a..436099883022 100644 --- a/arch/mips/include/asm/pci.h +++ b/arch/mips/include/asm/pci.h | |||
@@ -121,13 +121,6 @@ extern unsigned long PCIBIOS_MIN_MEM; | |||
121 | #include <linux/string.h> | 121 | #include <linux/string.h> |
122 | #include <asm/io.h> | 122 | #include <asm/io.h> |
123 | 123 | ||
124 | /* | ||
125 | * The PCI address space does equal the physical memory address space. | ||
126 | * The networking and block device layers use this boolean for bounce | ||
127 | * buffer decisions. | ||
128 | */ | ||
129 | #define PCI_DMA_BUS_IS_PHYS (1) | ||
130 | |||
131 | #ifdef CONFIG_PCI_DOMAINS_GENERIC | 124 | #ifdef CONFIG_PCI_DOMAINS_GENERIC |
132 | static inline int pci_proc_domain(struct pci_bus *bus) | 125 | static inline int pci_proc_domain(struct pci_bus *bus) |
133 | { | 126 | { |
diff --git a/arch/mips/loongson64/Kconfig b/arch/mips/loongson64/Kconfig index 72af0c183969..c79e6a565572 100644 --- a/arch/mips/loongson64/Kconfig +++ b/arch/mips/loongson64/Kconfig | |||
@@ -130,21 +130,6 @@ config LOONGSON_UART_BASE | |||
130 | default y | 130 | default y |
131 | depends on EARLY_PRINTK || SERIAL_8250 | 131 | depends on EARLY_PRINTK || SERIAL_8250 |
132 | 132 | ||
133 | config IOMMU_HELPER | ||
134 | bool | ||
135 | |||
136 | config NEED_SG_DMA_LENGTH | ||
137 | bool | ||
138 | |||
139 | config SWIOTLB | ||
140 | bool "Soft IOMMU Support for All-Memory DMA" | ||
141 | default y | ||
142 | depends on CPU_LOONGSON3 | ||
143 | select DMA_DIRECT_OPS | ||
144 | select IOMMU_HELPER | ||
145 | select NEED_SG_DMA_LENGTH | ||
146 | select NEED_DMA_MAP_STATE | ||
147 | |||
148 | config PHYS48_TO_HT40 | 133 | config PHYS48_TO_HT40 |
149 | bool | 134 | bool |
150 | default y if CPU_LOONGSON3 | 135 | default y if CPU_LOONGSON3 |
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c index dcafa43613b6..f9fef0028ca2 100644 --- a/arch/mips/mm/dma-default.c +++ b/arch/mips/mm/dma-default.c | |||
@@ -402,13 +402,3 @@ static const struct dma_map_ops mips_default_dma_map_ops = { | |||
402 | 402 | ||
403 | const struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops; | 403 | const struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops; |
404 | EXPORT_SYMBOL(mips_dma_map_ops); | 404 | EXPORT_SYMBOL(mips_dma_map_ops); |
405 | |||
406 | #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) | ||
407 | |||
408 | static int __init mips_dma_init(void) | ||
409 | { | ||
410 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); | ||
411 | |||
412 | return 0; | ||
413 | } | ||
414 | fs_initcall(mips_dma_init); | ||
diff --git a/arch/mips/netlogic/Kconfig b/arch/mips/netlogic/Kconfig index 7fcfc7fe9f14..412351c5acc6 100644 --- a/arch/mips/netlogic/Kconfig +++ b/arch/mips/netlogic/Kconfig | |||
@@ -83,10 +83,4 @@ endif | |||
83 | config NLM_COMMON | 83 | config NLM_COMMON |
84 | bool | 84 | bool |
85 | 85 | ||
86 | config IOMMU_HELPER | ||
87 | bool | ||
88 | |||
89 | config NEED_SG_DMA_LENGTH | ||
90 | bool | ||
91 | |||
92 | endif | 86 | endif |
diff --git a/arch/nds32/Kconfig b/arch/nds32/Kconfig index b7404f2dcf5b..6aed974276d8 100644 --- a/arch/nds32/Kconfig +++ b/arch/nds32/Kconfig | |||
@@ -5,10 +5,13 @@ | |||
5 | 5 | ||
6 | config NDS32 | 6 | config NDS32 |
7 | def_bool y | 7 | def_bool y |
8 | select ARCH_HAS_SYNC_DMA_FOR_CPU | ||
9 | select ARCH_HAS_SYNC_DMA_FOR_DEVICE | ||
8 | select ARCH_WANT_FRAME_POINTERS if FTRACE | 10 | select ARCH_WANT_FRAME_POINTERS if FTRACE |
9 | select CLKSRC_MMIO | 11 | select CLKSRC_MMIO |
10 | select CLONE_BACKWARDS | 12 | select CLONE_BACKWARDS |
11 | select COMMON_CLK | 13 | select COMMON_CLK |
14 | select DMA_NONCOHERENT_OPS | ||
12 | select GENERIC_ASHLDI3 | 15 | select GENERIC_ASHLDI3 |
13 | select GENERIC_ASHRDI3 | 16 | select GENERIC_ASHRDI3 |
14 | select GENERIC_LSHRDI3 | 17 | select GENERIC_LSHRDI3 |
diff --git a/arch/nds32/include/asm/Kbuild b/arch/nds32/include/asm/Kbuild index 142e612aa639..6f5cc29ed07f 100644 --- a/arch/nds32/include/asm/Kbuild +++ b/arch/nds32/include/asm/Kbuild | |||
@@ -13,6 +13,7 @@ generic-y += cputime.h | |||
13 | generic-y += device.h | 13 | generic-y += device.h |
14 | generic-y += div64.h | 14 | generic-y += div64.h |
15 | generic-y += dma.h | 15 | generic-y += dma.h |
16 | generic-y += dma-mapping.h | ||
16 | generic-y += emergency-restart.h | 17 | generic-y += emergency-restart.h |
17 | generic-y += errno.h | 18 | generic-y += errno.h |
18 | generic-y += exec.h | 19 | generic-y += exec.h |
diff --git a/arch/nds32/include/asm/dma-mapping.h b/arch/nds32/include/asm/dma-mapping.h deleted file mode 100644 index 2dd47d245c25..000000000000 --- a/arch/nds32/include/asm/dma-mapping.h +++ /dev/null | |||
@@ -1,14 +0,0 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
3 | |||
4 | #ifndef ASMNDS32_DMA_MAPPING_H | ||
5 | #define ASMNDS32_DMA_MAPPING_H | ||
6 | |||
7 | extern struct dma_map_ops nds32_dma_ops; | ||
8 | |||
9 | static inline struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) | ||
10 | { | ||
11 | return &nds32_dma_ops; | ||
12 | } | ||
13 | |||
14 | #endif | ||
diff --git a/arch/nds32/kernel/dma.c b/arch/nds32/kernel/dma.c index d291800fc621..d0dbd4fe9645 100644 --- a/arch/nds32/kernel/dma.c +++ b/arch/nds32/kernel/dma.c | |||
@@ -3,17 +3,14 @@ | |||
3 | 3 | ||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | #include <linux/mm.h> | 5 | #include <linux/mm.h> |
6 | #include <linux/export.h> | ||
7 | #include <linux/string.h> | 6 | #include <linux/string.h> |
8 | #include <linux/scatterlist.h> | 7 | #include <linux/dma-noncoherent.h> |
9 | #include <linux/dma-mapping.h> | ||
10 | #include <linux/io.h> | 8 | #include <linux/io.h> |
11 | #include <linux/cache.h> | 9 | #include <linux/cache.h> |
12 | #include <linux/highmem.h> | 10 | #include <linux/highmem.h> |
13 | #include <linux/slab.h> | 11 | #include <linux/slab.h> |
14 | #include <asm/cacheflush.h> | 12 | #include <asm/cacheflush.h> |
15 | #include <asm/tlbflush.h> | 13 | #include <asm/tlbflush.h> |
16 | #include <asm/dma-mapping.h> | ||
17 | #include <asm/proc-fns.h> | 14 | #include <asm/proc-fns.h> |
18 | 15 | ||
19 | /* | 16 | /* |
@@ -22,11 +19,6 @@ | |||
22 | static pte_t *consistent_pte; | 19 | static pte_t *consistent_pte; |
23 | static DEFINE_RAW_SPINLOCK(consistent_lock); | 20 | static DEFINE_RAW_SPINLOCK(consistent_lock); |
24 | 21 | ||
25 | enum master_type { | ||
26 | FOR_CPU = 0, | ||
27 | FOR_DEVICE = 1, | ||
28 | }; | ||
29 | |||
30 | /* | 22 | /* |
31 | * VM region handling support. | 23 | * VM region handling support. |
32 | * | 24 | * |
@@ -124,10 +116,8 @@ out: | |||
124 | return c; | 116 | return c; |
125 | } | 117 | } |
126 | 118 | ||
127 | /* FIXME: attrs is not used. */ | 119 | void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, |
128 | static void *nds32_dma_alloc_coherent(struct device *dev, size_t size, | 120 | gfp_t gfp, unsigned long attrs) |
129 | dma_addr_t * handle, gfp_t gfp, | ||
130 | unsigned long attrs) | ||
131 | { | 121 | { |
132 | struct page *page; | 122 | struct page *page; |
133 | struct arch_vm_region *c; | 123 | struct arch_vm_region *c; |
@@ -232,8 +222,8 @@ no_page: | |||
232 | return NULL; | 222 | return NULL; |
233 | } | 223 | } |
234 | 224 | ||
235 | static void nds32_dma_free(struct device *dev, size_t size, void *cpu_addr, | 225 | void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, |
236 | dma_addr_t handle, unsigned long attrs) | 226 | dma_addr_t handle, unsigned long attrs) |
237 | { | 227 | { |
238 | struct arch_vm_region *c; | 228 | struct arch_vm_region *c; |
239 | unsigned long flags, addr; | 229 | unsigned long flags, addr; |
@@ -333,145 +323,69 @@ static int __init consistent_init(void) | |||
333 | } | 323 | } |
334 | 324 | ||
335 | core_initcall(consistent_init); | 325 | core_initcall(consistent_init); |
336 | static void consistent_sync(void *vaddr, size_t size, int direction, int master_type); | ||
337 | static dma_addr_t nds32_dma_map_page(struct device *dev, struct page *page, | ||
338 | unsigned long offset, size_t size, | ||
339 | enum dma_data_direction dir, | ||
340 | unsigned long attrs) | ||
341 | { | ||
342 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) | ||
343 | consistent_sync((void *)(page_address(page) + offset), size, dir, FOR_DEVICE); | ||
344 | return page_to_phys(page) + offset; | ||
345 | } | ||
346 | |||
347 | static void nds32_dma_unmap_page(struct device *dev, dma_addr_t handle, | ||
348 | size_t size, enum dma_data_direction dir, | ||
349 | unsigned long attrs) | ||
350 | { | ||
351 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) | ||
352 | consistent_sync(phys_to_virt(handle), size, dir, FOR_CPU); | ||
353 | } | ||
354 | |||
355 | /* | ||
356 | * Make an area consistent for devices. | ||
357 | */ | ||
358 | static void consistent_sync(void *vaddr, size_t size, int direction, int master_type) | ||
359 | { | ||
360 | unsigned long start = (unsigned long)vaddr; | ||
361 | unsigned long end = start + size; | ||
362 | |||
363 | if (master_type == FOR_CPU) { | ||
364 | switch (direction) { | ||
365 | case DMA_TO_DEVICE: | ||
366 | break; | ||
367 | case DMA_FROM_DEVICE: | ||
368 | case DMA_BIDIRECTIONAL: | ||
369 | cpu_dma_inval_range(start, end); | ||
370 | break; | ||
371 | default: | ||
372 | BUG(); | ||
373 | } | ||
374 | } else { | ||
375 | /* FOR_DEVICE */ | ||
376 | switch (direction) { | ||
377 | case DMA_FROM_DEVICE: | ||
378 | break; | ||
379 | case DMA_TO_DEVICE: | ||
380 | case DMA_BIDIRECTIONAL: | ||
381 | cpu_dma_wb_range(start, end); | ||
382 | break; | ||
383 | default: | ||
384 | BUG(); | ||
385 | } | ||
386 | } | ||
387 | } | ||
388 | 326 | ||
389 | static int nds32_dma_map_sg(struct device *dev, struct scatterlist *sg, | 327 | static inline void cache_op(phys_addr_t paddr, size_t size, |
390 | int nents, enum dma_data_direction dir, | 328 | void (*fn)(unsigned long start, unsigned long end)) |
391 | unsigned long attrs) | ||
392 | { | 329 | { |
393 | int i; | 330 | struct page *page = pfn_to_page(paddr >> PAGE_SHIFT); |
331 | unsigned offset = paddr & ~PAGE_MASK; | ||
332 | size_t left = size; | ||
333 | unsigned long start; | ||
394 | 334 | ||
395 | for (i = 0; i < nents; i++, sg++) { | 335 | do { |
396 | void *virt; | 336 | size_t len = left; |
397 | unsigned long pfn; | ||
398 | struct page *page = sg_page(sg); | ||
399 | 337 | ||
400 | sg->dma_address = sg_phys(sg); | ||
401 | pfn = page_to_pfn(page) + sg->offset / PAGE_SIZE; | ||
402 | page = pfn_to_page(pfn); | ||
403 | if (PageHighMem(page)) { | 338 | if (PageHighMem(page)) { |
404 | virt = kmap_atomic(page); | 339 | void *addr; |
405 | consistent_sync(virt, sg->length, dir, FOR_CPU); | 340 | |
406 | kunmap_atomic(virt); | 341 | if (offset + len > PAGE_SIZE) { |
342 | if (offset >= PAGE_SIZE) { | ||
343 | page += offset >> PAGE_SHIFT; | ||
344 | offset &= ~PAGE_MASK; | ||
345 | } | ||
346 | len = PAGE_SIZE - offset; | ||
347 | } | ||
348 | |||
349 | addr = kmap_atomic(page); | ||
350 | start = (unsigned long)(addr + offset); | ||
351 | fn(start, start + len); | ||
352 | kunmap_atomic(addr); | ||
407 | } else { | 353 | } else { |
408 | if (sg->offset > PAGE_SIZE) | 354 | start = (unsigned long)phys_to_virt(paddr); |
409 | panic("sg->offset:%08x > PAGE_SIZE\n", | 355 | fn(start, start + size); |
410 | sg->offset); | ||
411 | virt = page_address(page) + sg->offset; | ||
412 | consistent_sync(virt, sg->length, dir, FOR_CPU); | ||
413 | } | 356 | } |
414 | } | 357 | offset = 0; |
415 | return nents; | 358 | page++; |
359 | left -= len; | ||
360 | } while (left); | ||
416 | } | 361 | } |
417 | 362 | ||
418 | static void nds32_dma_unmap_sg(struct device *dev, struct scatterlist *sg, | 363 | void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, |
419 | int nhwentries, enum dma_data_direction dir, | 364 | size_t size, enum dma_data_direction dir) |
420 | unsigned long attrs) | ||
421 | { | 365 | { |
422 | } | 366 | switch (dir) { |
423 | 367 | case DMA_FROM_DEVICE: | |
424 | static void | 368 | break; |
425 | nds32_dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, | 369 | case DMA_TO_DEVICE: |
426 | size_t size, enum dma_data_direction dir) | 370 | case DMA_BIDIRECTIONAL: |
427 | { | 371 | cache_op(paddr, size, cpu_dma_wb_range); |
428 | consistent_sync((void *)phys_to_virt(handle), size, dir, FOR_CPU); | 372 | break; |
429 | } | 373 | default: |
430 | 374 | BUG(); | |
431 | static void | ||
432 | nds32_dma_sync_single_for_device(struct device *dev, dma_addr_t handle, | ||
433 | size_t size, enum dma_data_direction dir) | ||
434 | { | ||
435 | consistent_sync((void *)phys_to_virt(handle), size, dir, FOR_DEVICE); | ||
436 | } | ||
437 | |||
438 | static void | ||
439 | nds32_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, | ||
440 | enum dma_data_direction dir) | ||
441 | { | ||
442 | int i; | ||
443 | |||
444 | for (i = 0; i < nents; i++, sg++) { | ||
445 | char *virt = | ||
446 | page_address((struct page *)sg->page_link) + sg->offset; | ||
447 | consistent_sync(virt, sg->length, dir, FOR_CPU); | ||
448 | } | 375 | } |
449 | } | 376 | } |
450 | 377 | ||
451 | static void | 378 | void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, |
452 | nds32_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | 379 | size_t size, enum dma_data_direction dir) |
453 | int nents, enum dma_data_direction dir) | ||
454 | { | 380 | { |
455 | int i; | 381 | switch (dir) { |
456 | 382 | case DMA_TO_DEVICE: | |
457 | for (i = 0; i < nents; i++, sg++) { | 383 | break; |
458 | char *virt = | 384 | case DMA_FROM_DEVICE: |
459 | page_address((struct page *)sg->page_link) + sg->offset; | 385 | case DMA_BIDIRECTIONAL: |
460 | consistent_sync(virt, sg->length, dir, FOR_DEVICE); | 386 | cache_op(paddr, size, cpu_dma_inval_range); |
387 | break; | ||
388 | default: | ||
389 | BUG(); | ||
461 | } | 390 | } |
462 | } | 391 | } |
463 | |||
464 | struct dma_map_ops nds32_dma_ops = { | ||
465 | .alloc = nds32_dma_alloc_coherent, | ||
466 | .free = nds32_dma_free, | ||
467 | .map_page = nds32_dma_map_page, | ||
468 | .unmap_page = nds32_dma_unmap_page, | ||
469 | .map_sg = nds32_dma_map_sg, | ||
470 | .unmap_sg = nds32_dma_unmap_sg, | ||
471 | .sync_single_for_device = nds32_dma_sync_single_for_device, | ||
472 | .sync_single_for_cpu = nds32_dma_sync_single_for_cpu, | ||
473 | .sync_sg_for_cpu = nds32_dma_sync_sg_for_cpu, | ||
474 | .sync_sg_for_device = nds32_dma_sync_sg_for_device, | ||
475 | }; | ||
476 | |||
477 | EXPORT_SYMBOL(nds32_dma_ops); | ||
diff --git a/arch/openrisc/kernel/dma.c b/arch/openrisc/kernel/dma.c index a945f00011b4..ec7fd45704d2 100644 --- a/arch/openrisc/kernel/dma.c +++ b/arch/openrisc/kernel/dma.c | |||
@@ -247,14 +247,3 @@ const struct dma_map_ops or1k_dma_map_ops = { | |||
247 | .sync_single_for_device = or1k_sync_single_for_device, | 247 | .sync_single_for_device = or1k_sync_single_for_device, |
248 | }; | 248 | }; |
249 | EXPORT_SYMBOL(or1k_dma_map_ops); | 249 | EXPORT_SYMBOL(or1k_dma_map_ops); |
250 | |||
251 | /* Number of entries preallocated for DMA-API debugging */ | ||
252 | #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) | ||
253 | |||
254 | static int __init dma_init(void) | ||
255 | { | ||
256 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); | ||
257 | |||
258 | return 0; | ||
259 | } | ||
260 | fs_initcall(dma_init); | ||
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index fc5a574c3482..4d8f64d48597 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig | |||
@@ -51,6 +51,8 @@ config PARISC | |||
51 | select GENERIC_CLOCKEVENTS | 51 | select GENERIC_CLOCKEVENTS |
52 | select ARCH_NO_COHERENT_DMA_MMAP | 52 | select ARCH_NO_COHERENT_DMA_MMAP |
53 | select CPU_NO_EFFICIENT_FFS | 53 | select CPU_NO_EFFICIENT_FFS |
54 | select NEED_DMA_MAP_STATE | ||
55 | select NEED_SG_DMA_LENGTH | ||
54 | 56 | ||
55 | help | 57 | help |
56 | The PA-RISC microprocessor is designed by Hewlett-Packard and used | 58 | The PA-RISC microprocessor is designed by Hewlett-Packard and used |
@@ -111,12 +113,6 @@ config PM | |||
111 | config STACKTRACE_SUPPORT | 113 | config STACKTRACE_SUPPORT |
112 | def_bool y | 114 | def_bool y |
113 | 115 | ||
114 | config NEED_DMA_MAP_STATE | ||
115 | def_bool y | ||
116 | |||
117 | config NEED_SG_DMA_LENGTH | ||
118 | def_bool y | ||
119 | |||
120 | config ISA_DMA_API | 116 | config ISA_DMA_API |
121 | bool | 117 | bool |
122 | 118 | ||
diff --git a/arch/parisc/include/asm/pci.h b/arch/parisc/include/asm/pci.h index 96b7deec512d..3328fd17c19d 100644 --- a/arch/parisc/include/asm/pci.h +++ b/arch/parisc/include/asm/pci.h | |||
@@ -88,29 +88,6 @@ struct pci_hba_data { | |||
88 | #endif /* !CONFIG_64BIT */ | 88 | #endif /* !CONFIG_64BIT */ |
89 | 89 | ||
90 | /* | 90 | /* |
91 | * If the PCI device's view of memory is the same as the CPU's view of memory, | ||
92 | * PCI_DMA_BUS_IS_PHYS is true. The networking and block device layers use | ||
93 | * this boolean for bounce buffer decisions. | ||
94 | */ | ||
95 | #ifdef CONFIG_PA20 | ||
96 | /* All PA-2.0 machines have an IOMMU. */ | ||
97 | #define PCI_DMA_BUS_IS_PHYS 0 | ||
98 | #define parisc_has_iommu() do { } while (0) | ||
99 | #else | ||
100 | |||
101 | #if defined(CONFIG_IOMMU_CCIO) || defined(CONFIG_IOMMU_SBA) | ||
102 | extern int parisc_bus_is_phys; /* in arch/parisc/kernel/setup.c */ | ||
103 | #define PCI_DMA_BUS_IS_PHYS parisc_bus_is_phys | ||
104 | #define parisc_has_iommu() do { parisc_bus_is_phys = 0; } while (0) | ||
105 | #else | ||
106 | #define PCI_DMA_BUS_IS_PHYS 1 | ||
107 | #define parisc_has_iommu() do { } while (0) | ||
108 | #endif | ||
109 | |||
110 | #endif /* !CONFIG_PA20 */ | ||
111 | |||
112 | |||
113 | /* | ||
114 | ** Most PCI devices (eg Tulip, NCR720) also export the same registers | 91 | ** Most PCI devices (eg Tulip, NCR720) also export the same registers |
115 | ** to both MMIO and I/O port space. Due to poor performance of I/O Port | 92 | ** to both MMIO and I/O port space. Due to poor performance of I/O Port |
116 | ** access under HP PCI bus adapters, strongly recommend the use of MMIO | 93 | ** access under HP PCI bus adapters, strongly recommend the use of MMIO |
diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c index 0e9675f857a5..8d3a7b80ac42 100644 --- a/arch/parisc/kernel/setup.c +++ b/arch/parisc/kernel/setup.c | |||
@@ -58,11 +58,6 @@ struct proc_dir_entry * proc_runway_root __read_mostly = NULL; | |||
58 | struct proc_dir_entry * proc_gsc_root __read_mostly = NULL; | 58 | struct proc_dir_entry * proc_gsc_root __read_mostly = NULL; |
59 | struct proc_dir_entry * proc_mckinley_root __read_mostly = NULL; | 59 | struct proc_dir_entry * proc_mckinley_root __read_mostly = NULL; |
60 | 60 | ||
61 | #if !defined(CONFIG_PA20) && (defined(CONFIG_IOMMU_CCIO) || defined(CONFIG_IOMMU_SBA)) | ||
62 | int parisc_bus_is_phys __read_mostly = 1; /* Assume no IOMMU is present */ | ||
63 | EXPORT_SYMBOL(parisc_bus_is_phys); | ||
64 | #endif | ||
65 | |||
66 | void __init setup_cmdline(char **cmdline_p) | 61 | void __init setup_cmdline(char **cmdline_p) |
67 | { | 62 | { |
68 | extern unsigned int boot_args[]; | 63 | extern unsigned int boot_args[]; |
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index c32a181a7cbb..268fd46fc3c7 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig | |||
@@ -13,12 +13,6 @@ config 64BIT | |||
13 | bool | 13 | bool |
14 | default y if PPC64 | 14 | default y if PPC64 |
15 | 15 | ||
16 | config ARCH_PHYS_ADDR_T_64BIT | ||
17 | def_bool PPC64 || PHYS_64BIT | ||
18 | |||
19 | config ARCH_DMA_ADDR_T_64BIT | ||
20 | def_bool ARCH_PHYS_ADDR_T_64BIT | ||
21 | |||
22 | config MMU | 16 | config MMU |
23 | bool | 17 | bool |
24 | default y | 18 | default y |
@@ -187,7 +181,6 @@ config PPC | |||
187 | select HAVE_CONTEXT_TRACKING if PPC64 | 181 | select HAVE_CONTEXT_TRACKING if PPC64 |
188 | select HAVE_DEBUG_KMEMLEAK | 182 | select HAVE_DEBUG_KMEMLEAK |
189 | select HAVE_DEBUG_STACKOVERFLOW | 183 | select HAVE_DEBUG_STACKOVERFLOW |
190 | select HAVE_DMA_API_DEBUG | ||
191 | select HAVE_DYNAMIC_FTRACE | 184 | select HAVE_DYNAMIC_FTRACE |
192 | select HAVE_DYNAMIC_FTRACE_WITH_REGS if MPROFILE_KERNEL | 185 | select HAVE_DYNAMIC_FTRACE_WITH_REGS if MPROFILE_KERNEL |
193 | select HAVE_EBPF_JIT if PPC64 | 186 | select HAVE_EBPF_JIT if PPC64 |
@@ -223,9 +216,11 @@ config PPC | |||
223 | select HAVE_SYSCALL_TRACEPOINTS | 216 | select HAVE_SYSCALL_TRACEPOINTS |
224 | select HAVE_VIRT_CPU_ACCOUNTING | 217 | select HAVE_VIRT_CPU_ACCOUNTING |
225 | select HAVE_IRQ_TIME_ACCOUNTING | 218 | select HAVE_IRQ_TIME_ACCOUNTING |
219 | select IOMMU_HELPER if PPC64 | ||
226 | select IRQ_DOMAIN | 220 | select IRQ_DOMAIN |
227 | select IRQ_FORCED_THREADING | 221 | select IRQ_FORCED_THREADING |
228 | select MODULES_USE_ELF_RELA | 222 | select MODULES_USE_ELF_RELA |
223 | select NEED_SG_DMA_LENGTH | ||
229 | select NO_BOOTMEM | 224 | select NO_BOOTMEM |
230 | select OF | 225 | select OF |
231 | select OF_EARLY_FLATTREE | 226 | select OF_EARLY_FLATTREE |
@@ -478,19 +473,6 @@ config MPROFILE_KERNEL | |||
478 | depends on PPC64 && CPU_LITTLE_ENDIAN | 473 | depends on PPC64 && CPU_LITTLE_ENDIAN |
479 | def_bool !DISABLE_MPROFILE_KERNEL | 474 | def_bool !DISABLE_MPROFILE_KERNEL |
480 | 475 | ||
481 | config IOMMU_HELPER | ||
482 | def_bool PPC64 | ||
483 | |||
484 | config SWIOTLB | ||
485 | bool "SWIOTLB support" | ||
486 | default n | ||
487 | select IOMMU_HELPER | ||
488 | ---help--- | ||
489 | Support for IO bounce buffering for systems without an IOMMU. | ||
490 | This allows us to DMA to the full physical address space on | ||
491 | platforms where the size of a physical address is larger | ||
492 | than the bus address. Not all platforms support this. | ||
493 | |||
494 | config HOTPLUG_CPU | 476 | config HOTPLUG_CPU |
495 | bool "Support for enabling/disabling CPUs" | 477 | bool "Support for enabling/disabling CPUs" |
496 | depends on SMP && (PPC_PSERIES || \ | 478 | depends on SMP && (PPC_PSERIES || \ |
@@ -913,9 +895,6 @@ config ZONE_DMA | |||
913 | config NEED_DMA_MAP_STATE | 895 | config NEED_DMA_MAP_STATE |
914 | def_bool (PPC64 || NOT_COHERENT_CACHE) | 896 | def_bool (PPC64 || NOT_COHERENT_CACHE) |
915 | 897 | ||
916 | config NEED_SG_DMA_LENGTH | ||
917 | def_bool y | ||
918 | |||
919 | config GENERIC_ISA_DMA | 898 | config GENERIC_ISA_DMA |
920 | bool | 899 | bool |
921 | depends on ISA_DMA_API | 900 | depends on ISA_DMA_API |
diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h index 401c62aad5e4..2af9ded80540 100644 --- a/arch/powerpc/include/asm/pci.h +++ b/arch/powerpc/include/asm/pci.h | |||
@@ -92,24 +92,6 @@ extern int pci_mmap_legacy_page_range(struct pci_bus *bus, | |||
92 | 92 | ||
93 | #define HAVE_PCI_LEGACY 1 | 93 | #define HAVE_PCI_LEGACY 1 |
94 | 94 | ||
95 | #ifdef CONFIG_PPC64 | ||
96 | |||
97 | /* The PCI address space does not equal the physical memory address | ||
98 | * space (we have an IOMMU). The IDE and SCSI device layers use | ||
99 | * this boolean for bounce buffer decisions. | ||
100 | */ | ||
101 | #define PCI_DMA_BUS_IS_PHYS (0) | ||
102 | |||
103 | #else /* 32-bit */ | ||
104 | |||
105 | /* The PCI address space does equal the physical memory | ||
106 | * address space (no IOMMU). The IDE and SCSI device layers use | ||
107 | * this boolean for bounce buffer decisions. | ||
108 | */ | ||
109 | #define PCI_DMA_BUS_IS_PHYS (1) | ||
110 | |||
111 | #endif /* CONFIG_PPC64 */ | ||
112 | |||
113 | extern void pcibios_claim_one_bus(struct pci_bus *b); | 95 | extern void pcibios_claim_one_bus(struct pci_bus *b); |
114 | 96 | ||
115 | extern void pcibios_finish_adding_to_bus(struct pci_bus *bus); | 97 | extern void pcibios_finish_adding_to_bus(struct pci_bus *bus); |
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c index da20569de9d4..138157deeadf 100644 --- a/arch/powerpc/kernel/dma.c +++ b/arch/powerpc/kernel/dma.c | |||
@@ -309,8 +309,6 @@ int dma_set_coherent_mask(struct device *dev, u64 mask) | |||
309 | } | 309 | } |
310 | EXPORT_SYMBOL(dma_set_coherent_mask); | 310 | EXPORT_SYMBOL(dma_set_coherent_mask); |
311 | 311 | ||
312 | #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) | ||
313 | |||
314 | int dma_set_mask(struct device *dev, u64 dma_mask) | 312 | int dma_set_mask(struct device *dev, u64 dma_mask) |
315 | { | 313 | { |
316 | if (ppc_md.dma_set_mask) | 314 | if (ppc_md.dma_set_mask) |
@@ -361,7 +359,6 @@ EXPORT_SYMBOL_GPL(dma_get_required_mask); | |||
361 | 359 | ||
362 | static int __init dma_init(void) | 360 | static int __init dma_init(void) |
363 | { | 361 | { |
364 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); | ||
365 | #ifdef CONFIG_PCI | 362 | #ifdef CONFIG_PCI |
366 | dma_debug_add_bus(&pci_bus_type); | 363 | dma_debug_add_bus(&pci_bus_type); |
367 | #endif | 364 | #endif |
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index 67d3125d0610..84b58abc08ee 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype | |||
@@ -222,6 +222,7 @@ config PTE_64BIT | |||
222 | config PHYS_64BIT | 222 | config PHYS_64BIT |
223 | bool 'Large physical address support' if E500 || PPC_86xx | 223 | bool 'Large physical address support' if E500 || PPC_86xx |
224 | depends on (44x || E500 || PPC_86xx) && !PPC_83xx && !PPC_82xx | 224 | depends on (44x || E500 || PPC_86xx) && !PPC_83xx && !PPC_82xx |
225 | select PHYS_ADDR_T_64BIT | ||
225 | ---help--- | 226 | ---help--- |
226 | This option enables kernel support for larger than 32-bit physical | 227 | This option enables kernel support for larger than 32-bit physical |
227 | addresses. This feature may not be available on all cores. | 228 | addresses. This feature may not be available on all cores. |
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index cd4fd85fde84..274bc064c41f 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig | |||
@@ -3,8 +3,16 @@ | |||
3 | # see Documentation/kbuild/kconfig-language.txt. | 3 | # see Documentation/kbuild/kconfig-language.txt. |
4 | # | 4 | # |
5 | 5 | ||
6 | config 64BIT | ||
7 | bool | ||
8 | |||
9 | config 32BIT | ||
10 | bool | ||
11 | |||
6 | config RISCV | 12 | config RISCV |
7 | def_bool y | 13 | def_bool y |
14 | # even on 32-bit, physical (and DMA) addresses are > 32-bits | ||
15 | select PHYS_ADDR_T_64BIT | ||
8 | select OF | 16 | select OF |
9 | select OF_EARLY_FLATTREE | 17 | select OF_EARLY_FLATTREE |
10 | select OF_IRQ | 18 | select OF_IRQ |
@@ -22,7 +30,6 @@ config RISCV | |||
22 | select GENERIC_ATOMIC64 if !64BIT || !RISCV_ISA_A | 30 | select GENERIC_ATOMIC64 if !64BIT || !RISCV_ISA_A |
23 | select HAVE_MEMBLOCK | 31 | select HAVE_MEMBLOCK |
24 | select HAVE_MEMBLOCK_NODE_MAP | 32 | select HAVE_MEMBLOCK_NODE_MAP |
25 | select HAVE_DMA_API_DEBUG | ||
26 | select HAVE_DMA_CONTIGUOUS | 33 | select HAVE_DMA_CONTIGUOUS |
27 | select HAVE_GENERIC_DMA_COHERENT | 34 | select HAVE_GENERIC_DMA_COHERENT |
28 | select IRQ_DOMAIN | 35 | select IRQ_DOMAIN |
@@ -39,16 +46,9 @@ config RISCV | |||
39 | config MMU | 46 | config MMU |
40 | def_bool y | 47 | def_bool y |
41 | 48 | ||
42 | # even on 32-bit, physical (and DMA) addresses are > 32-bits | ||
43 | config ARCH_PHYS_ADDR_T_64BIT | ||
44 | def_bool y | ||
45 | |||
46 | config ZONE_DMA32 | 49 | config ZONE_DMA32 |
47 | bool | 50 | bool |
48 | default y | 51 | default y if 64BIT |
49 | |||
50 | config ARCH_DMA_ADDR_T_64BIT | ||
51 | def_bool y | ||
52 | 52 | ||
53 | config PAGE_OFFSET | 53 | config PAGE_OFFSET |
54 | hex | 54 | hex |
@@ -101,7 +101,6 @@ choice | |||
101 | 101 | ||
102 | config ARCH_RV32I | 102 | config ARCH_RV32I |
103 | bool "RV32I" | 103 | bool "RV32I" |
104 | select CPU_SUPPORTS_32BIT_KERNEL | ||
105 | select 32BIT | 104 | select 32BIT |
106 | select GENERIC_ASHLDI3 | 105 | select GENERIC_ASHLDI3 |
107 | select GENERIC_ASHRDI3 | 106 | select GENERIC_ASHRDI3 |
@@ -109,13 +108,13 @@ config ARCH_RV32I | |||
109 | 108 | ||
110 | config ARCH_RV64I | 109 | config ARCH_RV64I |
111 | bool "RV64I" | 110 | bool "RV64I" |
112 | select CPU_SUPPORTS_64BIT_KERNEL | ||
113 | select 64BIT | 111 | select 64BIT |
114 | select HAVE_FUNCTION_TRACER | 112 | select HAVE_FUNCTION_TRACER |
115 | select HAVE_FUNCTION_GRAPH_TRACER | 113 | select HAVE_FUNCTION_GRAPH_TRACER |
116 | select HAVE_FTRACE_MCOUNT_RECORD | 114 | select HAVE_FTRACE_MCOUNT_RECORD |
117 | select HAVE_DYNAMIC_FTRACE | 115 | select HAVE_DYNAMIC_FTRACE |
118 | select HAVE_DYNAMIC_FTRACE_WITH_REGS | 116 | select HAVE_DYNAMIC_FTRACE_WITH_REGS |
117 | select SWIOTLB | ||
119 | 118 | ||
120 | endchoice | 119 | endchoice |
121 | 120 | ||
@@ -171,11 +170,6 @@ config NR_CPUS | |||
171 | depends on SMP | 170 | depends on SMP |
172 | default "8" | 171 | default "8" |
173 | 172 | ||
174 | config CPU_SUPPORTS_32BIT_KERNEL | ||
175 | bool | ||
176 | config CPU_SUPPORTS_64BIT_KERNEL | ||
177 | bool | ||
178 | |||
179 | choice | 173 | choice |
180 | prompt "CPU Tuning" | 174 | prompt "CPU Tuning" |
181 | default TUNE_GENERIC | 175 | default TUNE_GENERIC |
@@ -202,24 +196,6 @@ endmenu | |||
202 | 196 | ||
203 | menu "Kernel type" | 197 | menu "Kernel type" |
204 | 198 | ||
205 | choice | ||
206 | prompt "Kernel code model" | ||
207 | default 64BIT | ||
208 | |||
209 | config 32BIT | ||
210 | bool "32-bit kernel" | ||
211 | depends on CPU_SUPPORTS_32BIT_KERNEL | ||
212 | help | ||
213 | Select this option to build a 32-bit kernel. | ||
214 | |||
215 | config 64BIT | ||
216 | bool "64-bit kernel" | ||
217 | depends on CPU_SUPPORTS_64BIT_KERNEL | ||
218 | help | ||
219 | Select this option to build a 64-bit kernel. | ||
220 | |||
221 | endchoice | ||
222 | |||
223 | source "mm/Kconfig" | 199 | source "mm/Kconfig" |
224 | 200 | ||
225 | source "kernel/Kconfig.preempt" | 201 | source "kernel/Kconfig.preempt" |
diff --git a/arch/riscv/include/asm/dma-mapping.h b/arch/riscv/include/asm/dma-mapping.h new file mode 100644 index 000000000000..8facc1c8fa05 --- /dev/null +++ b/arch/riscv/include/asm/dma-mapping.h | |||
@@ -0,0 +1,15 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | #ifndef _RISCV_ASM_DMA_MAPPING_H | ||
3 | #define _RISCV_ASM_DMA_MAPPING_H 1 | ||
4 | |||
5 | #ifdef CONFIG_SWIOTLB | ||
6 | #include <linux/swiotlb.h> | ||
7 | static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) | ||
8 | { | ||
9 | return &swiotlb_dma_ops; | ||
10 | } | ||
11 | #else | ||
12 | #include <asm-generic/dma-mapping.h> | ||
13 | #endif /* CONFIG_SWIOTLB */ | ||
14 | |||
15 | #endif /* _RISCV_ASM_DMA_MAPPING_H */ | ||
diff --git a/arch/riscv/include/asm/pci.h b/arch/riscv/include/asm/pci.h index 0f2fc9ef20fc..b3638c505728 100644 --- a/arch/riscv/include/asm/pci.h +++ b/arch/riscv/include/asm/pci.h | |||
@@ -26,9 +26,6 @@ | |||
26 | /* RISC-V shim does not initialize PCI bus */ | 26 | /* RISC-V shim does not initialize PCI bus */ |
27 | #define pcibios_assign_all_busses() 1 | 27 | #define pcibios_assign_all_busses() 1 |
28 | 28 | ||
29 | /* We do not have an IOMMU */ | ||
30 | #define PCI_DMA_BUS_IS_PHYS 1 | ||
31 | |||
32 | extern int isa_dma_bridge_buggy; | 29 | extern int isa_dma_bridge_buggy; |
33 | 30 | ||
34 | #ifdef CONFIG_PCI | 31 | #ifdef CONFIG_PCI |
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index c11f40c1b2a8..ee44a48faf79 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/of_fdt.h> | 29 | #include <linux/of_fdt.h> |
30 | #include <linux/of_platform.h> | 30 | #include <linux/of_platform.h> |
31 | #include <linux/sched/task.h> | 31 | #include <linux/sched/task.h> |
32 | #include <linux/swiotlb.h> | ||
32 | 33 | ||
33 | #include <asm/setup.h> | 34 | #include <asm/setup.h> |
34 | #include <asm/sections.h> | 35 | #include <asm/sections.h> |
@@ -206,6 +207,7 @@ void __init setup_arch(char **cmdline_p) | |||
206 | setup_bootmem(); | 207 | setup_bootmem(); |
207 | paging_init(); | 208 | paging_init(); |
208 | unflatten_device_tree(); | 209 | unflatten_device_tree(); |
210 | swiotlb_init(1); | ||
209 | 211 | ||
210 | #ifdef CONFIG_SMP | 212 | #ifdef CONFIG_SMP |
211 | setup_smp(); | 213 | setup_smp(); |
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 199ac3e4da1d..6a64287ec1da 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -35,9 +35,6 @@ config GENERIC_BUG | |||
35 | config GENERIC_BUG_RELATIVE_POINTERS | 35 | config GENERIC_BUG_RELATIVE_POINTERS |
36 | def_bool y | 36 | def_bool y |
37 | 37 | ||
38 | config ARCH_DMA_ADDR_T_64BIT | ||
39 | def_bool y | ||
40 | |||
41 | config GENERIC_LOCKBREAK | 38 | config GENERIC_LOCKBREAK |
42 | def_bool y if SMP && PREEMPT | 39 | def_bool y if SMP && PREEMPT |
43 | 40 | ||
@@ -133,7 +130,6 @@ config S390 | |||
133 | select HAVE_CMPXCHG_LOCAL | 130 | select HAVE_CMPXCHG_LOCAL |
134 | select HAVE_COPY_THREAD_TLS | 131 | select HAVE_COPY_THREAD_TLS |
135 | select HAVE_DEBUG_KMEMLEAK | 132 | select HAVE_DEBUG_KMEMLEAK |
136 | select HAVE_DMA_API_DEBUG | ||
137 | select HAVE_DMA_CONTIGUOUS | 133 | select HAVE_DMA_CONTIGUOUS |
138 | select DMA_DIRECT_OPS | 134 | select DMA_DIRECT_OPS |
139 | select HAVE_DYNAMIC_FTRACE | 135 | select HAVE_DYNAMIC_FTRACE |
@@ -709,7 +705,11 @@ config QDIO | |||
709 | menuconfig PCI | 705 | menuconfig PCI |
710 | bool "PCI support" | 706 | bool "PCI support" |
711 | select PCI_MSI | 707 | select PCI_MSI |
708 | select IOMMU_HELPER | ||
712 | select IOMMU_SUPPORT | 709 | select IOMMU_SUPPORT |
710 | select NEED_DMA_MAP_STATE | ||
711 | select NEED_SG_DMA_LENGTH | ||
712 | |||
713 | help | 713 | help |
714 | Enable PCI support. | 714 | Enable PCI support. |
715 | 715 | ||
@@ -733,15 +733,6 @@ config PCI_DOMAINS | |||
733 | config HAS_IOMEM | 733 | config HAS_IOMEM |
734 | def_bool PCI | 734 | def_bool PCI |
735 | 735 | ||
736 | config IOMMU_HELPER | ||
737 | def_bool PCI | ||
738 | |||
739 | config NEED_SG_DMA_LENGTH | ||
740 | def_bool PCI | ||
741 | |||
742 | config NEED_DMA_MAP_STATE | ||
743 | def_bool PCI | ||
744 | |||
745 | config CHSC_SCH | 736 | config CHSC_SCH |
746 | def_tristate m | 737 | def_tristate m |
747 | prompt "Support for CHSC subchannels" | 738 | prompt "Support for CHSC subchannels" |
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h index 12fe3591034f..94f8db468c9b 100644 --- a/arch/s390/include/asm/pci.h +++ b/arch/s390/include/asm/pci.h | |||
@@ -2,8 +2,6 @@ | |||
2 | #ifndef __ASM_S390_PCI_H | 2 | #ifndef __ASM_S390_PCI_H |
3 | #define __ASM_S390_PCI_H | 3 | #define __ASM_S390_PCI_H |
4 | 4 | ||
5 | /* must be set before including asm-generic/pci.h */ | ||
6 | #define PCI_DMA_BUS_IS_PHYS (0) | ||
7 | /* must be set before including pci_clp.h */ | 5 | /* must be set before including pci_clp.h */ |
8 | #define PCI_BAR_COUNT 6 | 6 | #define PCI_BAR_COUNT 6 |
9 | 7 | ||
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c index 2d15d84c20ed..d387a0fbdd7e 100644 --- a/arch/s390/pci/pci_dma.c +++ b/arch/s390/pci/pci_dma.c | |||
@@ -668,15 +668,6 @@ void zpci_dma_exit(void) | |||
668 | kmem_cache_destroy(dma_region_table_cache); | 668 | kmem_cache_destroy(dma_region_table_cache); |
669 | } | 669 | } |
670 | 670 | ||
671 | #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) | ||
672 | |||
673 | static int __init dma_debug_do_init(void) | ||
674 | { | ||
675 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); | ||
676 | return 0; | ||
677 | } | ||
678 | fs_initcall(dma_debug_do_init); | ||
679 | |||
680 | const struct dma_map_ops s390_pci_dma_ops = { | 671 | const struct dma_map_ops s390_pci_dma_ops = { |
681 | .alloc = s390_dma_alloc, | 672 | .alloc = s390_dma_alloc, |
682 | .free = s390_dma_free, | 673 | .free = s390_dma_free, |
@@ -685,8 +676,6 @@ const struct dma_map_ops s390_pci_dma_ops = { | |||
685 | .map_page = s390_dma_map_pages, | 676 | .map_page = s390_dma_map_pages, |
686 | .unmap_page = s390_dma_unmap_pages, | 677 | .unmap_page = s390_dma_unmap_pages, |
687 | .mapping_error = s390_mapping_error, | 678 | .mapping_error = s390_mapping_error, |
688 | /* if we support direct DMA this must be conditional */ | ||
689 | .is_phys = 0, | ||
690 | /* dma_supported is unconditionally true without a callback */ | 679 | /* dma_supported is unconditionally true without a callback */ |
691 | }; | 680 | }; |
692 | EXPORT_SYMBOL_GPL(s390_pci_dma_ops); | 681 | EXPORT_SYMBOL_GPL(s390_pci_dma_ops); |
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 1851eaeee131..a97538b607a4 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig | |||
@@ -14,7 +14,6 @@ config SUPERH | |||
14 | select HAVE_OPROFILE | 14 | select HAVE_OPROFILE |
15 | select HAVE_GENERIC_DMA_COHERENT | 15 | select HAVE_GENERIC_DMA_COHERENT |
16 | select HAVE_ARCH_TRACEHOOK | 16 | select HAVE_ARCH_TRACEHOOK |
17 | select HAVE_DMA_API_DEBUG | ||
18 | select HAVE_PERF_EVENTS | 17 | select HAVE_PERF_EVENTS |
19 | select HAVE_DEBUG_BUGVERBOSE | 18 | select HAVE_DEBUG_BUGVERBOSE |
20 | select ARCH_HAVE_CUSTOM_GPIO_H | 19 | select ARCH_HAVE_CUSTOM_GPIO_H |
@@ -51,6 +50,9 @@ config SUPERH | |||
51 | select HAVE_ARCH_AUDITSYSCALL | 50 | select HAVE_ARCH_AUDITSYSCALL |
52 | select HAVE_FUTEX_CMPXCHG if FUTEX | 51 | select HAVE_FUTEX_CMPXCHG if FUTEX |
53 | select HAVE_NMI | 52 | select HAVE_NMI |
53 | select NEED_DMA_MAP_STATE | ||
54 | select NEED_SG_DMA_LENGTH | ||
55 | |||
54 | help | 56 | help |
55 | The SuperH is a RISC processor targeted for use in embedded systems | 57 | The SuperH is a RISC processor targeted for use in embedded systems |
56 | and consumer electronics; it was also used in the Sega Dreamcast | 58 | and consumer electronics; it was also used in the Sega Dreamcast |
@@ -161,12 +163,6 @@ config DMA_COHERENT | |||
161 | config DMA_NONCOHERENT | 163 | config DMA_NONCOHERENT |
162 | def_bool !DMA_COHERENT | 164 | def_bool !DMA_COHERENT |
163 | 165 | ||
164 | config NEED_DMA_MAP_STATE | ||
165 | def_bool DMA_NONCOHERENT | ||
166 | |||
167 | config NEED_SG_DMA_LENGTH | ||
168 | def_bool y | ||
169 | |||
170 | config PGTABLE_LEVELS | 166 | config PGTABLE_LEVELS |
171 | default 3 if X2TLB | 167 | default 3 if X2TLB |
172 | default 2 | 168 | default 2 |
diff --git a/arch/sh/include/asm/pci.h b/arch/sh/include/asm/pci.h index 0033f0df2b3b..10a36b1cf2ea 100644 --- a/arch/sh/include/asm/pci.h +++ b/arch/sh/include/asm/pci.h | |||
@@ -71,12 +71,6 @@ extern unsigned long PCIBIOS_MIN_IO, PCIBIOS_MIN_MEM; | |||
71 | * SuperH has everything mapped statically like x86. | 71 | * SuperH has everything mapped statically like x86. |
72 | */ | 72 | */ |
73 | 73 | ||
74 | /* The PCI address space does equal the physical memory | ||
75 | * address space. The networking and block device layers use | ||
76 | * this boolean for bounce buffer decisions. | ||
77 | */ | ||
78 | #define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys) | ||
79 | |||
80 | #ifdef CONFIG_PCI | 74 | #ifdef CONFIG_PCI |
81 | /* | 75 | /* |
82 | * None of the SH PCI controllers support MWI, it is always treated as a | 76 | * None of the SH PCI controllers support MWI, it is always treated as a |
diff --git a/arch/sh/kernel/dma-nommu.c b/arch/sh/kernel/dma-nommu.c index 178457d7620c..3e3a32fc676e 100644 --- a/arch/sh/kernel/dma-nommu.c +++ b/arch/sh/kernel/dma-nommu.c | |||
@@ -78,7 +78,6 @@ const struct dma_map_ops nommu_dma_ops = { | |||
78 | .sync_single_for_device = nommu_sync_single_for_device, | 78 | .sync_single_for_device = nommu_sync_single_for_device, |
79 | .sync_sg_for_device = nommu_sync_sg_for_device, | 79 | .sync_sg_for_device = nommu_sync_sg_for_device, |
80 | #endif | 80 | #endif |
81 | .is_phys = 1, | ||
82 | }; | 81 | }; |
83 | 82 | ||
84 | void __init no_iommu_init(void) | 83 | void __init no_iommu_init(void) |
diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c index f1b44697ad68..fceb2adfcac7 100644 --- a/arch/sh/mm/consistent.c +++ b/arch/sh/mm/consistent.c | |||
@@ -20,18 +20,9 @@ | |||
20 | #include <asm/cacheflush.h> | 20 | #include <asm/cacheflush.h> |
21 | #include <asm/addrspace.h> | 21 | #include <asm/addrspace.h> |
22 | 22 | ||
23 | #define PREALLOC_DMA_DEBUG_ENTRIES 4096 | ||
24 | |||
25 | const struct dma_map_ops *dma_ops; | 23 | const struct dma_map_ops *dma_ops; |
26 | EXPORT_SYMBOL(dma_ops); | 24 | EXPORT_SYMBOL(dma_ops); |
27 | 25 | ||
28 | static int __init dma_init(void) | ||
29 | { | ||
30 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); | ||
31 | return 0; | ||
32 | } | ||
33 | fs_initcall(dma_init); | ||
34 | |||
35 | void *dma_generic_alloc_coherent(struct device *dev, size_t size, | 26 | void *dma_generic_alloc_coherent(struct device *dev, size_t size, |
36 | dma_addr_t *dma_handle, gfp_t gfp, | 27 | dma_addr_t *dma_handle, gfp_t gfp, |
37 | unsigned long attrs) | 28 | unsigned long attrs) |
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 8767e45f1b2b..435dbc033afe 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig | |||
@@ -25,7 +25,6 @@ config SPARC | |||
25 | select RTC_CLASS | 25 | select RTC_CLASS |
26 | select RTC_DRV_M48T59 | 26 | select RTC_DRV_M48T59 |
27 | select RTC_SYSTOHC | 27 | select RTC_SYSTOHC |
28 | select HAVE_DMA_API_DEBUG | ||
29 | select HAVE_ARCH_JUMP_LABEL if SPARC64 | 28 | select HAVE_ARCH_JUMP_LABEL if SPARC64 |
30 | select GENERIC_IRQ_SHOW | 29 | select GENERIC_IRQ_SHOW |
31 | select ARCH_WANT_IPC_PARSE_VERSION | 30 | select ARCH_WANT_IPC_PARSE_VERSION |
@@ -44,6 +43,8 @@ config SPARC | |||
44 | select ARCH_HAS_SG_CHAIN | 43 | select ARCH_HAS_SG_CHAIN |
45 | select CPU_NO_EFFICIENT_FFS | 44 | select CPU_NO_EFFICIENT_FFS |
46 | select LOCKDEP_SMALL if LOCKDEP | 45 | select LOCKDEP_SMALL if LOCKDEP |
46 | select NEED_DMA_MAP_STATE | ||
47 | select NEED_SG_DMA_LENGTH | ||
47 | 48 | ||
48 | config SPARC32 | 49 | config SPARC32 |
49 | def_bool !64BIT | 50 | def_bool !64BIT |
@@ -67,6 +68,7 @@ config SPARC64 | |||
67 | select HAVE_SYSCALL_TRACEPOINTS | 68 | select HAVE_SYSCALL_TRACEPOINTS |
68 | select HAVE_CONTEXT_TRACKING | 69 | select HAVE_CONTEXT_TRACKING |
69 | select HAVE_DEBUG_KMEMLEAK | 70 | select HAVE_DEBUG_KMEMLEAK |
71 | select IOMMU_HELPER | ||
70 | select SPARSE_IRQ | 72 | select SPARSE_IRQ |
71 | select RTC_DRV_CMOS | 73 | select RTC_DRV_CMOS |
72 | select RTC_DRV_BQ4802 | 74 | select RTC_DRV_BQ4802 |
@@ -102,14 +104,6 @@ config ARCH_ATU | |||
102 | bool | 104 | bool |
103 | default y if SPARC64 | 105 | default y if SPARC64 |
104 | 106 | ||
105 | config ARCH_DMA_ADDR_T_64BIT | ||
106 | bool | ||
107 | default y if ARCH_ATU | ||
108 | |||
109 | config IOMMU_HELPER | ||
110 | bool | ||
111 | default y if SPARC64 | ||
112 | |||
113 | config STACKTRACE_SUPPORT | 107 | config STACKTRACE_SUPPORT |
114 | bool | 108 | bool |
115 | default y if SPARC64 | 109 | default y if SPARC64 |
@@ -146,12 +140,6 @@ config ZONE_DMA | |||
146 | bool | 140 | bool |
147 | default y if SPARC32 | 141 | default y if SPARC32 |
148 | 142 | ||
149 | config NEED_DMA_MAP_STATE | ||
150 | def_bool y | ||
151 | |||
152 | config NEED_SG_DMA_LENGTH | ||
153 | def_bool y | ||
154 | |||
155 | config GENERIC_ISA_DMA | 143 | config GENERIC_ISA_DMA |
156 | bool | 144 | bool |
157 | default y if SPARC32 | 145 | default y if SPARC32 |
diff --git a/include/linux/iommu-common.h b/arch/sparc/include/asm/iommu-common.h index 802c90c79d1f..802c90c79d1f 100644 --- a/include/linux/iommu-common.h +++ b/arch/sparc/include/asm/iommu-common.h | |||
diff --git a/arch/sparc/include/asm/iommu_64.h b/arch/sparc/include/asm/iommu_64.h index 9ed6b54caa4b..0ef6dedf747e 100644 --- a/arch/sparc/include/asm/iommu_64.h +++ b/arch/sparc/include/asm/iommu_64.h | |||
@@ -17,7 +17,7 @@ | |||
17 | #define IOPTE_WRITE 0x0000000000000002UL | 17 | #define IOPTE_WRITE 0x0000000000000002UL |
18 | 18 | ||
19 | #define IOMMU_NUM_CTXS 4096 | 19 | #define IOMMU_NUM_CTXS 4096 |
20 | #include <linux/iommu-common.h> | 20 | #include <asm/iommu-common.h> |
21 | 21 | ||
22 | struct iommu_arena { | 22 | struct iommu_arena { |
23 | unsigned long *map; | 23 | unsigned long *map; |
diff --git a/arch/sparc/include/asm/pci_32.h b/arch/sparc/include/asm/pci_32.h index 98917e48727d..cfc0ee9476c6 100644 --- a/arch/sparc/include/asm/pci_32.h +++ b/arch/sparc/include/asm/pci_32.h | |||
@@ -17,10 +17,6 @@ | |||
17 | 17 | ||
18 | #define PCI_IRQ_NONE 0xffffffff | 18 | #define PCI_IRQ_NONE 0xffffffff |
19 | 19 | ||
20 | /* Dynamic DMA mapping stuff. | ||
21 | */ | ||
22 | #define PCI_DMA_BUS_IS_PHYS (0) | ||
23 | |||
24 | #endif /* __KERNEL__ */ | 20 | #endif /* __KERNEL__ */ |
25 | 21 | ||
26 | #ifndef CONFIG_LEON_PCI | 22 | #ifndef CONFIG_LEON_PCI |
diff --git a/arch/sparc/include/asm/pci_64.h b/arch/sparc/include/asm/pci_64.h index 671274e36cfa..fac77813402c 100644 --- a/arch/sparc/include/asm/pci_64.h +++ b/arch/sparc/include/asm/pci_64.h | |||
@@ -17,12 +17,6 @@ | |||
17 | 17 | ||
18 | #define PCI_IRQ_NONE 0xffffffff | 18 | #define PCI_IRQ_NONE 0xffffffff |
19 | 19 | ||
20 | /* The PCI address space does not equal the physical memory | ||
21 | * address space. The networking and block device layers use | ||
22 | * this boolean for bounce buffer decisions. | ||
23 | */ | ||
24 | #define PCI_DMA_BUS_IS_PHYS (0) | ||
25 | |||
26 | /* PCI IOMMU mapping bypass support. */ | 20 | /* PCI IOMMU mapping bypass support. */ |
27 | 21 | ||
28 | /* PCI 64-bit addressing works for all slots on all controller | 22 | /* PCI 64-bit addressing works for all slots on all controller |
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile index 76cb57750dda..cf8640841b7a 100644 --- a/arch/sparc/kernel/Makefile +++ b/arch/sparc/kernel/Makefile | |||
@@ -59,7 +59,7 @@ obj-$(CONFIG_SPARC32) += leon_pmc.o | |||
59 | 59 | ||
60 | obj-$(CONFIG_SPARC64) += reboot.o | 60 | obj-$(CONFIG_SPARC64) += reboot.o |
61 | obj-$(CONFIG_SPARC64) += sysfs.o | 61 | obj-$(CONFIG_SPARC64) += sysfs.o |
62 | obj-$(CONFIG_SPARC64) += iommu.o | 62 | obj-$(CONFIG_SPARC64) += iommu.o iommu-common.o |
63 | obj-$(CONFIG_SPARC64) += central.o | 63 | obj-$(CONFIG_SPARC64) += central.o |
64 | obj-$(CONFIG_SPARC64) += starfire.o | 64 | obj-$(CONFIG_SPARC64) += starfire.o |
65 | obj-$(CONFIG_SPARC64) += power.o | 65 | obj-$(CONFIG_SPARC64) += power.o |
@@ -74,8 +74,6 @@ obj-$(CONFIG_SPARC64) += pcr.o | |||
74 | obj-$(CONFIG_SPARC64) += nmi.o | 74 | obj-$(CONFIG_SPARC64) += nmi.o |
75 | obj-$(CONFIG_SPARC64_SMP) += cpumap.o | 75 | obj-$(CONFIG_SPARC64_SMP) += cpumap.o |
76 | 76 | ||
77 | obj-y += dma.o | ||
78 | |||
79 | obj-$(CONFIG_PCIC_PCI) += pcic.o | 77 | obj-$(CONFIG_PCIC_PCI) += pcic.o |
80 | obj-$(CONFIG_LEON_PCI) += leon_pci.o | 78 | obj-$(CONFIG_LEON_PCI) += leon_pci.o |
81 | obj-$(CONFIG_SPARC_GRPCI2)+= leon_pci_grpci2.o | 79 | obj-$(CONFIG_SPARC_GRPCI2)+= leon_pci_grpci2.o |
diff --git a/arch/sparc/kernel/dma.c b/arch/sparc/kernel/dma.c deleted file mode 100644 index f73e7597c971..000000000000 --- a/arch/sparc/kernel/dma.c +++ /dev/null | |||
@@ -1,13 +0,0 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | #include <linux/kernel.h> | ||
3 | #include <linux/dma-mapping.h> | ||
4 | #include <linux/dma-debug.h> | ||
5 | |||
6 | #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 15) | ||
7 | |||
8 | static int __init dma_init(void) | ||
9 | { | ||
10 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); | ||
11 | return 0; | ||
12 | } | ||
13 | fs_initcall(dma_init); | ||
diff --git a/lib/iommu-common.c b/arch/sparc/kernel/iommu-common.c index 55b00de106b5..59cb16691322 100644 --- a/lib/iommu-common.c +++ b/arch/sparc/kernel/iommu-common.c | |||
@@ -8,9 +8,9 @@ | |||
8 | #include <linux/bitmap.h> | 8 | #include <linux/bitmap.h> |
9 | #include <linux/bug.h> | 9 | #include <linux/bug.h> |
10 | #include <linux/iommu-helper.h> | 10 | #include <linux/iommu-helper.h> |
11 | #include <linux/iommu-common.h> | ||
12 | #include <linux/dma-mapping.h> | 11 | #include <linux/dma-mapping.h> |
13 | #include <linux/hash.h> | 12 | #include <linux/hash.h> |
13 | #include <asm/iommu-common.h> | ||
14 | 14 | ||
15 | static unsigned long iommu_large_alloc = 15; | 15 | static unsigned long iommu_large_alloc = 15; |
16 | 16 | ||
@@ -93,7 +93,6 @@ void iommu_tbl_pool_init(struct iommu_map_table *iommu, | |||
93 | p->hint = p->start; | 93 | p->hint = p->start; |
94 | p->end = num_entries; | 94 | p->end = num_entries; |
95 | } | 95 | } |
96 | EXPORT_SYMBOL(iommu_tbl_pool_init); | ||
97 | 96 | ||
98 | unsigned long iommu_tbl_range_alloc(struct device *dev, | 97 | unsigned long iommu_tbl_range_alloc(struct device *dev, |
99 | struct iommu_map_table *iommu, | 98 | struct iommu_map_table *iommu, |
@@ -224,7 +223,6 @@ bail: | |||
224 | 223 | ||
225 | return n; | 224 | return n; |
226 | } | 225 | } |
227 | EXPORT_SYMBOL(iommu_tbl_range_alloc); | ||
228 | 226 | ||
229 | static struct iommu_pool *get_pool(struct iommu_map_table *tbl, | 227 | static struct iommu_pool *get_pool(struct iommu_map_table *tbl, |
230 | unsigned long entry) | 228 | unsigned long entry) |
@@ -264,4 +262,3 @@ void iommu_tbl_range_free(struct iommu_map_table *iommu, u64 dma_addr, | |||
264 | bitmap_clear(iommu->map, entry, npages); | 262 | bitmap_clear(iommu->map, entry, npages); |
265 | spin_unlock_irqrestore(&(pool->lock), flags); | 263 | spin_unlock_irqrestore(&(pool->lock), flags); |
266 | } | 264 | } |
267 | EXPORT_SYMBOL(iommu_tbl_range_free); | ||
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c index b08dc3416f06..40d008b0bd3e 100644 --- a/arch/sparc/kernel/iommu.c +++ b/arch/sparc/kernel/iommu.c | |||
@@ -14,7 +14,7 @@ | |||
14 | #include <linux/errno.h> | 14 | #include <linux/errno.h> |
15 | #include <linux/iommu-helper.h> | 15 | #include <linux/iommu-helper.h> |
16 | #include <linux/bitmap.h> | 16 | #include <linux/bitmap.h> |
17 | #include <linux/iommu-common.h> | 17 | #include <asm/iommu-common.h> |
18 | 18 | ||
19 | #ifdef CONFIG_PCI | 19 | #ifdef CONFIG_PCI |
20 | #include <linux/pci.h> | 20 | #include <linux/pci.h> |
diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c index 86b625f9d8dc..c0fa3ef6cf01 100644 --- a/arch/sparc/kernel/ldc.c +++ b/arch/sparc/kernel/ldc.c | |||
@@ -16,7 +16,7 @@ | |||
16 | #include <linux/list.h> | 16 | #include <linux/list.h> |
17 | #include <linux/init.h> | 17 | #include <linux/init.h> |
18 | #include <linux/bitmap.h> | 18 | #include <linux/bitmap.h> |
19 | #include <linux/iommu-common.h> | 19 | #include <asm/iommu-common.h> |
20 | 20 | ||
21 | #include <asm/hypervisor.h> | 21 | #include <asm/hypervisor.h> |
22 | #include <asm/iommu.h> | 22 | #include <asm/iommu.h> |
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c index 249367228c33..565d9ac883d0 100644 --- a/arch/sparc/kernel/pci_sun4v.c +++ b/arch/sparc/kernel/pci_sun4v.c | |||
@@ -16,7 +16,7 @@ | |||
16 | #include <linux/export.h> | 16 | #include <linux/export.h> |
17 | #include <linux/log2.h> | 17 | #include <linux/log2.h> |
18 | #include <linux/of_device.h> | 18 | #include <linux/of_device.h> |
19 | #include <linux/iommu-common.h> | 19 | #include <asm/iommu-common.h> |
20 | 20 | ||
21 | #include <asm/iommu.h> | 21 | #include <asm/iommu.h> |
22 | #include <asm/irq.h> | 22 | #include <asm/irq.h> |
diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig index 462e59a7ae78..03f991e44288 100644 --- a/arch/unicore32/Kconfig +++ b/arch/unicore32/Kconfig | |||
@@ -19,6 +19,8 @@ config UNICORE32 | |||
19 | select ARCH_WANT_FRAME_POINTERS | 19 | select ARCH_WANT_FRAME_POINTERS |
20 | select GENERIC_IOMAP | 20 | select GENERIC_IOMAP |
21 | select MODULES_USE_ELF_REL | 21 | select MODULES_USE_ELF_REL |
22 | select NEED_DMA_MAP_STATE | ||
23 | select SWIOTLB | ||
22 | help | 24 | help |
23 | UniCore-32 is 32-bit Instruction Set Architecture, | 25 | UniCore-32 is 32-bit Instruction Set Architecture, |
24 | including a series of low-power-consumption RISC chip | 26 | including a series of low-power-consumption RISC chip |
@@ -61,9 +63,6 @@ config ARCH_MAY_HAVE_PC_FDC | |||
61 | config ZONE_DMA | 63 | config ZONE_DMA |
62 | def_bool y | 64 | def_bool y |
63 | 65 | ||
64 | config NEED_DMA_MAP_STATE | ||
65 | def_bool y | ||
66 | |||
67 | source "init/Kconfig" | 66 | source "init/Kconfig" |
68 | 67 | ||
69 | source "kernel/Kconfig.freezer" | 68 | source "kernel/Kconfig.freezer" |
diff --git a/arch/unicore32/mm/Kconfig b/arch/unicore32/mm/Kconfig index e9154a59d561..82759b6aba67 100644 --- a/arch/unicore32/mm/Kconfig +++ b/arch/unicore32/mm/Kconfig | |||
@@ -39,14 +39,3 @@ config CPU_TLB_SINGLE_ENTRY_DISABLE | |||
39 | default y | 39 | default y |
40 | help | 40 | help |
41 | Say Y here to disable the TLB single entry operations. | 41 | Say Y here to disable the TLB single entry operations. |
42 | |||
43 | config SWIOTLB | ||
44 | def_bool y | ||
45 | select DMA_DIRECT_OPS | ||
46 | |||
47 | config IOMMU_HELPER | ||
48 | def_bool SWIOTLB | ||
49 | |||
50 | config NEED_SG_DMA_LENGTH | ||
51 | def_bool SWIOTLB | ||
52 | |||
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index c07f492b871a..f2ee6a8ffe65 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -28,6 +28,8 @@ config X86_64 | |||
28 | select ARCH_USE_CMPXCHG_LOCKREF | 28 | select ARCH_USE_CMPXCHG_LOCKREF |
29 | select HAVE_ARCH_SOFT_DIRTY | 29 | select HAVE_ARCH_SOFT_DIRTY |
30 | select MODULES_USE_ELF_RELA | 30 | select MODULES_USE_ELF_RELA |
31 | select NEED_DMA_MAP_STATE | ||
32 | select SWIOTLB | ||
31 | select X86_DEV_DMA_OPS | 33 | select X86_DEV_DMA_OPS |
32 | select ARCH_HAS_SYSCALL_WRAPPER | 34 | select ARCH_HAS_SYSCALL_WRAPPER |
33 | 35 | ||
@@ -134,7 +136,6 @@ config X86 | |||
134 | select HAVE_C_RECORDMCOUNT | 136 | select HAVE_C_RECORDMCOUNT |
135 | select HAVE_DEBUG_KMEMLEAK | 137 | select HAVE_DEBUG_KMEMLEAK |
136 | select HAVE_DEBUG_STACKOVERFLOW | 138 | select HAVE_DEBUG_STACKOVERFLOW |
137 | select HAVE_DMA_API_DEBUG | ||
138 | select HAVE_DMA_CONTIGUOUS | 139 | select HAVE_DMA_CONTIGUOUS |
139 | select HAVE_DYNAMIC_FTRACE | 140 | select HAVE_DYNAMIC_FTRACE |
140 | select HAVE_DYNAMIC_FTRACE_WITH_REGS | 141 | select HAVE_DYNAMIC_FTRACE_WITH_REGS |
@@ -184,6 +185,7 @@ config X86 | |||
184 | select HAVE_UNSTABLE_SCHED_CLOCK | 185 | select HAVE_UNSTABLE_SCHED_CLOCK |
185 | select HAVE_USER_RETURN_NOTIFIER | 186 | select HAVE_USER_RETURN_NOTIFIER |
186 | select IRQ_FORCED_THREADING | 187 | select IRQ_FORCED_THREADING |
188 | select NEED_SG_DMA_LENGTH | ||
187 | select PCI_LOCKLESS_CONFIG | 189 | select PCI_LOCKLESS_CONFIG |
188 | select PERF_EVENTS | 190 | select PERF_EVENTS |
189 | select RTC_LIB | 191 | select RTC_LIB |
@@ -236,13 +238,6 @@ config ARCH_MMAP_RND_COMPAT_BITS_MAX | |||
236 | config SBUS | 238 | config SBUS |
237 | bool | 239 | bool |
238 | 240 | ||
239 | config NEED_DMA_MAP_STATE | ||
240 | def_bool y | ||
241 | depends on X86_64 || INTEL_IOMMU || DMA_API_DEBUG || SWIOTLB | ||
242 | |||
243 | config NEED_SG_DMA_LENGTH | ||
244 | def_bool y | ||
245 | |||
246 | config GENERIC_ISA_DMA | 241 | config GENERIC_ISA_DMA |
247 | def_bool y | 242 | def_bool y |
248 | depends on ISA_DMA_API | 243 | depends on ISA_DMA_API |
@@ -875,6 +870,7 @@ config DMI | |||
875 | 870 | ||
876 | config GART_IOMMU | 871 | config GART_IOMMU |
877 | bool "Old AMD GART IOMMU support" | 872 | bool "Old AMD GART IOMMU support" |
873 | select IOMMU_HELPER | ||
878 | select SWIOTLB | 874 | select SWIOTLB |
879 | depends on X86_64 && PCI && AMD_NB | 875 | depends on X86_64 && PCI && AMD_NB |
880 | ---help--- | 876 | ---help--- |
@@ -896,6 +892,7 @@ config GART_IOMMU | |||
896 | 892 | ||
897 | config CALGARY_IOMMU | 893 | config CALGARY_IOMMU |
898 | bool "IBM Calgary IOMMU support" | 894 | bool "IBM Calgary IOMMU support" |
895 | select IOMMU_HELPER | ||
899 | select SWIOTLB | 896 | select SWIOTLB |
900 | depends on X86_64 && PCI | 897 | depends on X86_64 && PCI |
901 | ---help--- | 898 | ---help--- |
@@ -923,20 +920,6 @@ config CALGARY_IOMMU_ENABLED_BY_DEFAULT | |||
923 | Calgary anyway, pass 'iommu=calgary' on the kernel command line. | 920 | Calgary anyway, pass 'iommu=calgary' on the kernel command line. |
924 | If unsure, say Y. | 921 | If unsure, say Y. |
925 | 922 | ||
926 | # need this always selected by IOMMU for the VIA workaround | ||
927 | config SWIOTLB | ||
928 | def_bool y if X86_64 | ||
929 | ---help--- | ||
930 | Support for software bounce buffers used on x86-64 systems | ||
931 | which don't have a hardware IOMMU. Using this PCI devices | ||
932 | which can only access 32-bits of memory can be used on systems | ||
933 | with more than 3 GB of memory. | ||
934 | If unsure, say Y. | ||
935 | |||
936 | config IOMMU_HELPER | ||
937 | def_bool y | ||
938 | depends on CALGARY_IOMMU || GART_IOMMU || SWIOTLB || AMD_IOMMU | ||
939 | |||
940 | config MAXSMP | 923 | config MAXSMP |
941 | bool "Enable Maximum number of SMP Processors and NUMA Nodes" | 924 | bool "Enable Maximum number of SMP Processors and NUMA Nodes" |
942 | depends on X86_64 && SMP && DEBUG_KERNEL | 925 | depends on X86_64 && SMP && DEBUG_KERNEL |
@@ -1458,6 +1441,7 @@ config HIGHMEM | |||
1458 | config X86_PAE | 1441 | config X86_PAE |
1459 | bool "PAE (Physical Address Extension) Support" | 1442 | bool "PAE (Physical Address Extension) Support" |
1460 | depends on X86_32 && !HIGHMEM4G | 1443 | depends on X86_32 && !HIGHMEM4G |
1444 | select PHYS_ADDR_T_64BIT | ||
1461 | select SWIOTLB | 1445 | select SWIOTLB |
1462 | ---help--- | 1446 | ---help--- |
1463 | PAE is required for NX support, and furthermore enables | 1447 | PAE is required for NX support, and furthermore enables |
@@ -1485,14 +1469,6 @@ config X86_5LEVEL | |||
1485 | 1469 | ||
1486 | Say N if unsure. | 1470 | Say N if unsure. |
1487 | 1471 | ||
1488 | config ARCH_PHYS_ADDR_T_64BIT | ||
1489 | def_bool y | ||
1490 | depends on X86_64 || X86_PAE | ||
1491 | |||
1492 | config ARCH_DMA_ADDR_T_64BIT | ||
1493 | def_bool y | ||
1494 | depends on X86_64 || HIGHMEM64G | ||
1495 | |||
1496 | config X86_DIRECT_GBPAGES | 1472 | config X86_DIRECT_GBPAGES |
1497 | def_bool y | 1473 | def_bool y |
1498 | depends on X86_64 && !DEBUG_PAGEALLOC | 1474 | depends on X86_64 && !DEBUG_PAGEALLOC |
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index 89ce4bfd241f..ce4d176b3d13 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h | |||
@@ -30,10 +30,7 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) | |||
30 | return dma_ops; | 30 | return dma_ops; |
31 | } | 31 | } |
32 | 32 | ||
33 | int arch_dma_supported(struct device *dev, u64 mask); | 33 | bool arch_dma_alloc_attrs(struct device **dev); |
34 | #define arch_dma_supported arch_dma_supported | ||
35 | |||
36 | bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp); | ||
37 | #define arch_dma_alloc_attrs arch_dma_alloc_attrs | 34 | #define arch_dma_alloc_attrs arch_dma_alloc_attrs |
38 | 35 | ||
39 | #endif | 36 | #endif |
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h index d32175e30259..662963681ea6 100644 --- a/arch/x86/include/asm/pci.h +++ b/arch/x86/include/asm/pci.h | |||
@@ -117,9 +117,6 @@ void native_restore_msi_irqs(struct pci_dev *dev); | |||
117 | #define native_setup_msi_irqs NULL | 117 | #define native_setup_msi_irqs NULL |
118 | #define native_teardown_msi_irq NULL | 118 | #define native_teardown_msi_irq NULL |
119 | #endif | 119 | #endif |
120 | |||
121 | #define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys) | ||
122 | |||
123 | #endif /* __KERNEL__ */ | 120 | #endif /* __KERNEL__ */ |
124 | 121 | ||
125 | #ifdef CONFIG_X86_64 | 122 | #ifdef CONFIG_X86_64 |
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index 77625b60a510..ab5d9dd668d2 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c | |||
@@ -15,13 +15,11 @@ | |||
15 | #include <asm/x86_init.h> | 15 | #include <asm/x86_init.h> |
16 | #include <asm/iommu_table.h> | 16 | #include <asm/iommu_table.h> |
17 | 17 | ||
18 | static int forbid_dac __read_mostly; | 18 | static bool disable_dac_quirk __read_mostly; |
19 | 19 | ||
20 | const struct dma_map_ops *dma_ops = &dma_direct_ops; | 20 | const struct dma_map_ops *dma_ops = &dma_direct_ops; |
21 | EXPORT_SYMBOL(dma_ops); | 21 | EXPORT_SYMBOL(dma_ops); |
22 | 22 | ||
23 | static int iommu_sac_force __read_mostly; | ||
24 | |||
25 | #ifdef CONFIG_IOMMU_DEBUG | 23 | #ifdef CONFIG_IOMMU_DEBUG |
26 | int panic_on_overflow __read_mostly = 1; | 24 | int panic_on_overflow __read_mostly = 1; |
27 | int force_iommu __read_mostly = 1; | 25 | int force_iommu __read_mostly = 1; |
@@ -55,9 +53,6 @@ struct device x86_dma_fallback_dev = { | |||
55 | }; | 53 | }; |
56 | EXPORT_SYMBOL(x86_dma_fallback_dev); | 54 | EXPORT_SYMBOL(x86_dma_fallback_dev); |
57 | 55 | ||
58 | /* Number of entries preallocated for DMA-API debugging */ | ||
59 | #define PREALLOC_DMA_DEBUG_ENTRIES 65536 | ||
60 | |||
61 | void __init pci_iommu_alloc(void) | 56 | void __init pci_iommu_alloc(void) |
62 | { | 57 | { |
63 | struct iommu_table_entry *p; | 58 | struct iommu_table_entry *p; |
@@ -76,7 +71,7 @@ void __init pci_iommu_alloc(void) | |||
76 | } | 71 | } |
77 | } | 72 | } |
78 | 73 | ||
79 | bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp) | 74 | bool arch_dma_alloc_attrs(struct device **dev) |
80 | { | 75 | { |
81 | if (!*dev) | 76 | if (!*dev) |
82 | *dev = &x86_dma_fallback_dev; | 77 | *dev = &x86_dma_fallback_dev; |
@@ -125,13 +120,13 @@ static __init int iommu_setup(char *p) | |||
125 | if (!strncmp(p, "nomerge", 7)) | 120 | if (!strncmp(p, "nomerge", 7)) |
126 | iommu_merge = 0; | 121 | iommu_merge = 0; |
127 | if (!strncmp(p, "forcesac", 8)) | 122 | if (!strncmp(p, "forcesac", 8)) |
128 | iommu_sac_force = 1; | 123 | pr_warn("forcesac option ignored.\n"); |
129 | if (!strncmp(p, "allowdac", 8)) | 124 | if (!strncmp(p, "allowdac", 8)) |
130 | forbid_dac = 0; | 125 | pr_warn("allowdac option ignored.\n"); |
131 | if (!strncmp(p, "nodac", 5)) | 126 | if (!strncmp(p, "nodac", 5)) |
132 | forbid_dac = 1; | 127 | pr_warn("nodac option ignored.\n"); |
133 | if (!strncmp(p, "usedac", 6)) { | 128 | if (!strncmp(p, "usedac", 6)) { |
134 | forbid_dac = -1; | 129 | disable_dac_quirk = true; |
135 | return 1; | 130 | return 1; |
136 | } | 131 | } |
137 | #ifdef CONFIG_SWIOTLB | 132 | #ifdef CONFIG_SWIOTLB |
@@ -156,40 +151,9 @@ static __init int iommu_setup(char *p) | |||
156 | } | 151 | } |
157 | early_param("iommu", iommu_setup); | 152 | early_param("iommu", iommu_setup); |
158 | 153 | ||
159 | int arch_dma_supported(struct device *dev, u64 mask) | ||
160 | { | ||
161 | #ifdef CONFIG_PCI | ||
162 | if (mask > 0xffffffff && forbid_dac > 0) { | ||
163 | dev_info(dev, "PCI: Disallowing DAC for device\n"); | ||
164 | return 0; | ||
165 | } | ||
166 | #endif | ||
167 | |||
168 | /* Tell the device to use SAC when IOMMU force is on. This | ||
169 | allows the driver to use cheaper accesses in some cases. | ||
170 | |||
171 | Problem with this is that if we overflow the IOMMU area and | ||
172 | return DAC as fallback address the device may not handle it | ||
173 | correctly. | ||
174 | |||
175 | As a special case some controllers have a 39bit address | ||
176 | mode that is as efficient as 32bit (aic79xx). Don't force | ||
177 | SAC for these. Assume all masks <= 40 bits are of this | ||
178 | type. Normally this doesn't make any difference, but gives | ||
179 | more gentle handling of IOMMU overflow. */ | ||
180 | if (iommu_sac_force && (mask >= DMA_BIT_MASK(40))) { | ||
181 | dev_info(dev, "Force SAC with mask %Lx\n", mask); | ||
182 | return 0; | ||
183 | } | ||
184 | |||
185 | return 1; | ||
186 | } | ||
187 | EXPORT_SYMBOL(arch_dma_supported); | ||
188 | |||
189 | static int __init pci_iommu_init(void) | 154 | static int __init pci_iommu_init(void) |
190 | { | 155 | { |
191 | struct iommu_table_entry *p; | 156 | struct iommu_table_entry *p; |
192 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); | ||
193 | 157 | ||
194 | #ifdef CONFIG_PCI | 158 | #ifdef CONFIG_PCI |
195 | dma_debug_add_bus(&pci_bus_type); | 159 | dma_debug_add_bus(&pci_bus_type); |
@@ -209,11 +173,17 @@ rootfs_initcall(pci_iommu_init); | |||
209 | #ifdef CONFIG_PCI | 173 | #ifdef CONFIG_PCI |
210 | /* Many VIA bridges seem to corrupt data for DAC. Disable it here */ | 174 | /* Many VIA bridges seem to corrupt data for DAC. Disable it here */ |
211 | 175 | ||
176 | static int via_no_dac_cb(struct pci_dev *pdev, void *data) | ||
177 | { | ||
178 | pdev->dev.dma_32bit_limit = true; | ||
179 | return 0; | ||
180 | } | ||
181 | |||
212 | static void via_no_dac(struct pci_dev *dev) | 182 | static void via_no_dac(struct pci_dev *dev) |
213 | { | 183 | { |
214 | if (forbid_dac == 0) { | 184 | if (!disable_dac_quirk) { |
215 | dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n"); | 185 | dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n"); |
216 | forbid_dac = 1; | 186 | pci_walk_bus(dev->subordinate, via_no_dac_cb, NULL); |
217 | } | 187 | } |
218 | } | 188 | } |
219 | DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, | 189 | DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, |
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index c921e8bccdc8..17df332269b2 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig | |||
@@ -19,7 +19,6 @@ config XTENSA | |||
19 | select HAVE_ARCH_KASAN if MMU | 19 | select HAVE_ARCH_KASAN if MMU |
20 | select HAVE_CC_STACKPROTECTOR | 20 | select HAVE_CC_STACKPROTECTOR |
21 | select HAVE_DEBUG_KMEMLEAK | 21 | select HAVE_DEBUG_KMEMLEAK |
22 | select HAVE_DMA_API_DEBUG | ||
23 | select HAVE_DMA_CONTIGUOUS | 22 | select HAVE_DMA_CONTIGUOUS |
24 | select HAVE_EXIT_THREAD | 23 | select HAVE_EXIT_THREAD |
25 | select HAVE_FUNCTION_TRACER | 24 | select HAVE_FUNCTION_TRACER |
diff --git a/arch/xtensa/include/asm/pci.h b/arch/xtensa/include/asm/pci.h index d5a82153a7c5..6ddf0a30c60d 100644 --- a/arch/xtensa/include/asm/pci.h +++ b/arch/xtensa/include/asm/pci.h | |||
@@ -42,8 +42,6 @@ extern struct pci_controller* pcibios_alloc_controller(void); | |||
42 | * decisions. | 42 | * decisions. |
43 | */ | 43 | */ |
44 | 44 | ||
45 | #define PCI_DMA_BUS_IS_PHYS (1) | ||
46 | |||
47 | /* Tell PCI code what kind of PCI resource mappings we support */ | 45 | /* Tell PCI code what kind of PCI resource mappings we support */ |
48 | #define HAVE_PCI_MMAP 1 | 46 | #define HAVE_PCI_MMAP 1 |
49 | #define ARCH_GENERIC_PCI_MMAP_RESOURCE 1 | 47 | #define ARCH_GENERIC_PCI_MMAP_RESOURCE 1 |
diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c index 732631ce250f..392b4a80ebc2 100644 --- a/arch/xtensa/kernel/pci-dma.c +++ b/arch/xtensa/kernel/pci-dma.c | |||
@@ -261,12 +261,3 @@ const struct dma_map_ops xtensa_dma_map_ops = { | |||
261 | .mapping_error = xtensa_dma_mapping_error, | 261 | .mapping_error = xtensa_dma_mapping_error, |
262 | }; | 262 | }; |
263 | EXPORT_SYMBOL(xtensa_dma_map_ops); | 263 | EXPORT_SYMBOL(xtensa_dma_map_ops); |
264 | |||
265 | #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) | ||
266 | |||
267 | static int __init xtensa_dma_init(void) | ||
268 | { | ||
269 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); | ||
270 | return 0; | ||
271 | } | ||
272 | fs_initcall(xtensa_dma_init); | ||
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c index 4a3ac31c07d0..3b0118786b43 100644 --- a/drivers/amba/bus.c +++ b/drivers/amba/bus.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/sizes.h> | 20 | #include <linux/sizes.h> |
21 | #include <linux/limits.h> | 21 | #include <linux/limits.h> |
22 | #include <linux/clk/clk-conf.h> | 22 | #include <linux/clk/clk-conf.h> |
23 | #include <linux/platform_device.h> | ||
23 | 24 | ||
24 | #include <asm/irq.h> | 25 | #include <asm/irq.h> |
25 | 26 | ||
@@ -193,14 +194,16 @@ static const struct dev_pm_ops amba_pm = { | |||
193 | /* | 194 | /* |
194 | * Primecells are part of the Advanced Microcontroller Bus Architecture, | 195 | * Primecells are part of the Advanced Microcontroller Bus Architecture, |
195 | * so we call the bus "amba". | 196 | * so we call the bus "amba". |
197 | * DMA configuration for platform and AMBA bus is same. So here we reuse | ||
198 | * platform's DMA config routine. | ||
196 | */ | 199 | */ |
197 | struct bus_type amba_bustype = { | 200 | struct bus_type amba_bustype = { |
198 | .name = "amba", | 201 | .name = "amba", |
199 | .dev_groups = amba_dev_groups, | 202 | .dev_groups = amba_dev_groups, |
200 | .match = amba_match, | 203 | .match = amba_match, |
201 | .uevent = amba_uevent, | 204 | .uevent = amba_uevent, |
205 | .dma_configure = platform_dma_configure, | ||
202 | .pm = &amba_pm, | 206 | .pm = &amba_pm, |
203 | .force_dma = true, | ||
204 | }; | 207 | }; |
205 | 208 | ||
206 | static int __init amba_init(void) | 209 | static int __init amba_init(void) |
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c index d82566d6e237..f831a582209c 100644 --- a/drivers/base/dma-mapping.c +++ b/drivers/base/dma-mapping.c | |||
@@ -329,36 +329,13 @@ void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags) | |||
329 | #endif | 329 | #endif |
330 | 330 | ||
331 | /* | 331 | /* |
332 | * Common configuration to enable DMA API use for a device | 332 | * enables DMA API use for a device |
333 | */ | 333 | */ |
334 | #include <linux/pci.h> | ||
335 | |||
336 | int dma_configure(struct device *dev) | 334 | int dma_configure(struct device *dev) |
337 | { | 335 | { |
338 | struct device *bridge = NULL, *dma_dev = dev; | 336 | if (dev->bus->dma_configure) |
339 | enum dev_dma_attr attr; | 337 | return dev->bus->dma_configure(dev); |
340 | int ret = 0; | 338 | return 0; |
341 | |||
342 | if (dev_is_pci(dev)) { | ||
343 | bridge = pci_get_host_bridge_device(to_pci_dev(dev)); | ||
344 | dma_dev = bridge; | ||
345 | if (IS_ENABLED(CONFIG_OF) && dma_dev->parent && | ||
346 | dma_dev->parent->of_node) | ||
347 | dma_dev = dma_dev->parent; | ||
348 | } | ||
349 | |||
350 | if (dma_dev->of_node) { | ||
351 | ret = of_dma_configure(dev, dma_dev->of_node); | ||
352 | } else if (has_acpi_companion(dma_dev)) { | ||
353 | attr = acpi_get_dma_attr(to_acpi_device_node(dma_dev->fwnode)); | ||
354 | if (attr != DEV_DMA_NOT_SUPPORTED) | ||
355 | ret = acpi_dma_configure(dev, attr); | ||
356 | } | ||
357 | |||
358 | if (bridge) | ||
359 | pci_put_host_bridge_device(bridge); | ||
360 | |||
361 | return ret; | ||
362 | } | 339 | } |
363 | 340 | ||
364 | void dma_deconfigure(struct device *dev) | 341 | void dma_deconfigure(struct device *dev) |
diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 8075ddc70a17..c0ff1e73a634 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c | |||
@@ -1130,6 +1130,22 @@ int platform_pm_restore(struct device *dev) | |||
1130 | 1130 | ||
1131 | #endif /* CONFIG_HIBERNATE_CALLBACKS */ | 1131 | #endif /* CONFIG_HIBERNATE_CALLBACKS */ |
1132 | 1132 | ||
1133 | int platform_dma_configure(struct device *dev) | ||
1134 | { | ||
1135 | enum dev_dma_attr attr; | ||
1136 | int ret = 0; | ||
1137 | |||
1138 | if (dev->of_node) { | ||
1139 | ret = of_dma_configure(dev, dev->of_node, true); | ||
1140 | } else if (has_acpi_companion(dev)) { | ||
1141 | attr = acpi_get_dma_attr(to_acpi_device_node(dev->fwnode)); | ||
1142 | if (attr != DEV_DMA_NOT_SUPPORTED) | ||
1143 | ret = acpi_dma_configure(dev, attr); | ||
1144 | } | ||
1145 | |||
1146 | return ret; | ||
1147 | } | ||
1148 | |||
1133 | static const struct dev_pm_ops platform_dev_pm_ops = { | 1149 | static const struct dev_pm_ops platform_dev_pm_ops = { |
1134 | .runtime_suspend = pm_generic_runtime_suspend, | 1150 | .runtime_suspend = pm_generic_runtime_suspend, |
1135 | .runtime_resume = pm_generic_runtime_resume, | 1151 | .runtime_resume = pm_generic_runtime_resume, |
@@ -1141,8 +1157,8 @@ struct bus_type platform_bus_type = { | |||
1141 | .dev_groups = platform_dev_groups, | 1157 | .dev_groups = platform_dev_groups, |
1142 | .match = platform_match, | 1158 | .match = platform_match, |
1143 | .uevent = platform_uevent, | 1159 | .uevent = platform_uevent, |
1160 | .dma_configure = platform_dma_configure, | ||
1144 | .pm = &platform_dev_pm_ops, | 1161 | .pm = &platform_dev_pm_ops, |
1145 | .force_dma = true, | ||
1146 | }; | 1162 | }; |
1147 | EXPORT_SYMBOL_GPL(platform_bus_type); | 1163 | EXPORT_SYMBOL_GPL(platform_bus_type); |
1148 | 1164 | ||
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c index e6986c7608f1..fc1f4acdd189 100644 --- a/drivers/bcma/main.c +++ b/drivers/bcma/main.c | |||
@@ -207,7 +207,7 @@ static void bcma_of_fill_device(struct device *parent, | |||
207 | 207 | ||
208 | core->irq = bcma_of_get_irq(parent, core, 0); | 208 | core->irq = bcma_of_get_irq(parent, core, 0); |
209 | 209 | ||
210 | of_dma_configure(&core->dev, node); | 210 | of_dma_configure(&core->dev, node, false); |
211 | } | 211 | } |
212 | 212 | ||
213 | unsigned int bcma_core_irq(struct bcma_device *core, int num) | 213 | unsigned int bcma_core_irq(struct bcma_device *core, int num) |
diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c index 000c7019ca7d..d64edeb6771a 100644 --- a/drivers/dma/qcom/hidma_mgmt.c +++ b/drivers/dma/qcom/hidma_mgmt.c | |||
@@ -398,7 +398,7 @@ static int __init hidma_mgmt_of_populate_channels(struct device_node *np) | |||
398 | } | 398 | } |
399 | of_node_get(child); | 399 | of_node_get(child); |
400 | new_pdev->dev.of_node = child; | 400 | new_pdev->dev.of_node = child; |
401 | of_dma_configure(&new_pdev->dev, child); | 401 | of_dma_configure(&new_pdev->dev, child, true); |
402 | /* | 402 | /* |
403 | * It is assumed that calling of_msi_configure is safe on | 403 | * It is assumed that calling of_msi_configure is safe on |
404 | * platforms with or without MSI support. | 404 | * platforms with or without MSI support. |
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c index 88a3558b7916..815bdb42e3f0 100644 --- a/drivers/gpu/host1x/bus.c +++ b/drivers/gpu/host1x/bus.c | |||
@@ -314,6 +314,11 @@ static int host1x_device_match(struct device *dev, struct device_driver *drv) | |||
314 | return strcmp(dev_name(dev), drv->name) == 0; | 314 | return strcmp(dev_name(dev), drv->name) == 0; |
315 | } | 315 | } |
316 | 316 | ||
317 | static int host1x_dma_configure(struct device *dev) | ||
318 | { | ||
319 | return of_dma_configure(dev, dev->of_node, true); | ||
320 | } | ||
321 | |||
317 | static const struct dev_pm_ops host1x_device_pm_ops = { | 322 | static const struct dev_pm_ops host1x_device_pm_ops = { |
318 | .suspend = pm_generic_suspend, | 323 | .suspend = pm_generic_suspend, |
319 | .resume = pm_generic_resume, | 324 | .resume = pm_generic_resume, |
@@ -326,8 +331,8 @@ static const struct dev_pm_ops host1x_device_pm_ops = { | |||
326 | struct bus_type host1x_bus_type = { | 331 | struct bus_type host1x_bus_type = { |
327 | .name = "host1x", | 332 | .name = "host1x", |
328 | .match = host1x_device_match, | 333 | .match = host1x_device_match, |
334 | .dma_configure = host1x_dma_configure, | ||
329 | .pm = &host1x_device_pm_ops, | 335 | .pm = &host1x_device_pm_ops, |
330 | .force_dma = true, | ||
331 | }; | 336 | }; |
332 | 337 | ||
333 | static void __host1x_device_del(struct host1x_device *device) | 338 | static void __host1x_device_del(struct host1x_device *device) |
@@ -416,7 +421,7 @@ static int host1x_device_add(struct host1x *host1x, | |||
416 | device->dev.bus = &host1x_bus_type; | 421 | device->dev.bus = &host1x_bus_type; |
417 | device->dev.parent = host1x->dev; | 422 | device->dev.parent = host1x->dev; |
418 | 423 | ||
419 | of_dma_configure(&device->dev, host1x->dev->of_node); | 424 | of_dma_configure(&device->dev, host1x->dev->of_node, true); |
420 | 425 | ||
421 | err = host1x_device_parse_dt(device, driver); | 426 | err = host1x_device_parse_dt(device, driver); |
422 | if (err < 0) { | 427 | if (err < 0) { |
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c index 54d4d78ca46a..6f344654ef22 100644 --- a/drivers/ide/ide-dma.c +++ b/drivers/ide/ide-dma.c | |||
@@ -180,7 +180,6 @@ EXPORT_SYMBOL_GPL(ide_dma_unmap_sg); | |||
180 | void ide_dma_off_quietly(ide_drive_t *drive) | 180 | void ide_dma_off_quietly(ide_drive_t *drive) |
181 | { | 181 | { |
182 | drive->dev_flags &= ~IDE_DFLAG_USING_DMA; | 182 | drive->dev_flags &= ~IDE_DFLAG_USING_DMA; |
183 | ide_toggle_bounce(drive, 0); | ||
184 | 183 | ||
185 | drive->hwif->dma_ops->dma_host_set(drive, 0); | 184 | drive->hwif->dma_ops->dma_host_set(drive, 0); |
186 | } | 185 | } |
@@ -211,7 +210,6 @@ EXPORT_SYMBOL(ide_dma_off); | |||
211 | void ide_dma_on(ide_drive_t *drive) | 210 | void ide_dma_on(ide_drive_t *drive) |
212 | { | 211 | { |
213 | drive->dev_flags |= IDE_DFLAG_USING_DMA; | 212 | drive->dev_flags |= IDE_DFLAG_USING_DMA; |
214 | ide_toggle_bounce(drive, 1); | ||
215 | 213 | ||
216 | drive->hwif->dma_ops->dma_host_set(drive, 1); | 214 | drive->hwif->dma_ops->dma_host_set(drive, 1); |
217 | } | 215 | } |
diff --git a/drivers/ide/ide-lib.c b/drivers/ide/ide-lib.c index e1180fa46196..78cb79eddc8b 100644 --- a/drivers/ide/ide-lib.c +++ b/drivers/ide/ide-lib.c | |||
@@ -6,32 +6,6 @@ | |||
6 | #include <linux/ide.h> | 6 | #include <linux/ide.h> |
7 | #include <linux/bitops.h> | 7 | #include <linux/bitops.h> |
8 | 8 | ||
9 | /** | ||
10 | * ide_toggle_bounce - handle bounce buffering | ||
11 | * @drive: drive to update | ||
12 | * @on: on/off boolean | ||
13 | * | ||
14 | * Enable or disable bounce buffering for the device. Drives move | ||
15 | * between PIO and DMA and that changes the rules we need. | ||
16 | */ | ||
17 | |||
18 | void ide_toggle_bounce(ide_drive_t *drive, int on) | ||
19 | { | ||
20 | u64 addr = BLK_BOUNCE_HIGH; /* dma64_addr_t */ | ||
21 | |||
22 | if (!PCI_DMA_BUS_IS_PHYS) { | ||
23 | addr = BLK_BOUNCE_ANY; | ||
24 | } else if (on && drive->media == ide_disk) { | ||
25 | struct device *dev = drive->hwif->dev; | ||
26 | |||
27 | if (dev && dev->dma_mask) | ||
28 | addr = *dev->dma_mask; | ||
29 | } | ||
30 | |||
31 | if (drive->queue) | ||
32 | blk_queue_bounce_limit(drive->queue, addr); | ||
33 | } | ||
34 | |||
35 | u64 ide_get_lba_addr(struct ide_cmd *cmd, int lba48) | 9 | u64 ide_get_lba_addr(struct ide_cmd *cmd, int lba48) |
36 | { | 10 | { |
37 | struct ide_taskfile *tf = &cmd->tf; | 11 | struct ide_taskfile *tf = &cmd->tf; |
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index 2019e66eada7..56d7bc228cb3 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c | |||
@@ -796,8 +796,7 @@ static int ide_init_queue(ide_drive_t *drive) | |||
796 | * This will be fixed once we teach pci_map_sg() about our boundary | 796 | * This will be fixed once we teach pci_map_sg() about our boundary |
797 | * requirements, hopefully soon. *FIXME* | 797 | * requirements, hopefully soon. *FIXME* |
798 | */ | 798 | */ |
799 | if (!PCI_DMA_BUS_IS_PHYS) | 799 | max_sg_entries >>= 1; |
800 | max_sg_entries >>= 1; | ||
801 | #endif /* CONFIG_PCI */ | 800 | #endif /* CONFIG_PCI */ |
802 | 801 | ||
803 | blk_queue_max_segments(q, max_sg_entries); | 802 | blk_queue_max_segments(q, max_sg_entries); |
@@ -805,9 +804,6 @@ static int ide_init_queue(ide_drive_t *drive) | |||
805 | /* assign drive queue */ | 804 | /* assign drive queue */ |
806 | drive->queue = q; | 805 | drive->queue = q; |
807 | 806 | ||
808 | /* needs drive->queue to be set */ | ||
809 | ide_toggle_bounce(drive, 1); | ||
810 | |||
811 | return 0; | 807 | return 0; |
812 | } | 808 | } |
813 | 809 | ||
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index df171cb85822..5b714a062fa7 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig | |||
@@ -146,6 +146,7 @@ config INTEL_IOMMU | |||
146 | select DMA_DIRECT_OPS | 146 | select DMA_DIRECT_OPS |
147 | select IOMMU_API | 147 | select IOMMU_API |
148 | select IOMMU_IOVA | 148 | select IOMMU_IOVA |
149 | select NEED_DMA_MAP_STATE | ||
149 | select DMAR_TABLE | 150 | select DMAR_TABLE |
150 | help | 151 | help |
151 | DMA remapping (DMAR) devices support enables independent address | 152 | DMA remapping (DMAR) devices support enables independent address |
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index a4ebd8715494..661828e8fdcf 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c | |||
@@ -1289,9 +1289,8 @@ static int efx_init_io(struct efx_nic *efx) | |||
1289 | 1289 | ||
1290 | pci_set_master(pci_dev); | 1290 | pci_set_master(pci_dev); |
1291 | 1291 | ||
1292 | /* Set the PCI DMA mask. Try all possibilities from our | 1292 | /* Set the PCI DMA mask. Try all possibilities from our genuine mask |
1293 | * genuine mask down to 32 bits, because some architectures | 1293 | * down to 32 bits, because some architectures will allow 40 bit |
1294 | * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit | ||
1295 | * masks event though they reject 46 bit masks. | 1294 | * masks event though they reject 46 bit masks. |
1296 | */ | 1295 | */ |
1297 | while (dma_mask > 0x7fffffffUL) { | 1296 | while (dma_mask > 0x7fffffffUL) { |
diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c index 3d6c91e96589..dd5530a4f8c8 100644 --- a/drivers/net/ethernet/sfc/falcon/efx.c +++ b/drivers/net/ethernet/sfc/falcon/efx.c | |||
@@ -1242,9 +1242,8 @@ static int ef4_init_io(struct ef4_nic *efx) | |||
1242 | 1242 | ||
1243 | pci_set_master(pci_dev); | 1243 | pci_set_master(pci_dev); |
1244 | 1244 | ||
1245 | /* Set the PCI DMA mask. Try all possibilities from our | 1245 | /* Set the PCI DMA mask. Try all possibilities from our genuine mask |
1246 | * genuine mask down to 32 bits, because some architectures | 1246 | * down to 32 bits, because some architectures will allow 40 bit |
1247 | * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit | ||
1248 | * masks event though they reject 46 bit masks. | 1247 | * masks event though they reject 46 bit masks. |
1249 | */ | 1248 | */ |
1250 | while (dma_mask > 0x7fffffffUL) { | 1249 | while (dma_mask > 0x7fffffffUL) { |
diff --git a/drivers/of/device.c b/drivers/of/device.c index 064c818105bd..33d85511d790 100644 --- a/drivers/of/device.c +++ b/drivers/of/device.c | |||
@@ -76,6 +76,8 @@ int of_device_add(struct platform_device *ofdev) | |||
76 | * of_dma_configure - Setup DMA configuration | 76 | * of_dma_configure - Setup DMA configuration |
77 | * @dev: Device to apply DMA configuration | 77 | * @dev: Device to apply DMA configuration |
78 | * @np: Pointer to OF node having DMA configuration | 78 | * @np: Pointer to OF node having DMA configuration |
79 | * @force_dma: Whether device is to be set up by of_dma_configure() even if | ||
80 | * DMA capability is not explicitly described by firmware. | ||
79 | * | 81 | * |
80 | * Try to get devices's DMA configuration from DT and update it | 82 | * Try to get devices's DMA configuration from DT and update it |
81 | * accordingly. | 83 | * accordingly. |
@@ -84,7 +86,7 @@ int of_device_add(struct platform_device *ofdev) | |||
84 | * can use a platform bus notifier and handle BUS_NOTIFY_ADD_DEVICE events | 86 | * can use a platform bus notifier and handle BUS_NOTIFY_ADD_DEVICE events |
85 | * to fix up DMA configuration. | 87 | * to fix up DMA configuration. |
86 | */ | 88 | */ |
87 | int of_dma_configure(struct device *dev, struct device_node *np) | 89 | int of_dma_configure(struct device *dev, struct device_node *np, bool force_dma) |
88 | { | 90 | { |
89 | u64 dma_addr, paddr, size = 0; | 91 | u64 dma_addr, paddr, size = 0; |
90 | int ret; | 92 | int ret; |
@@ -100,7 +102,7 @@ int of_dma_configure(struct device *dev, struct device_node *np) | |||
100 | * DMA configuration regardless of whether "dma-ranges" is | 102 | * DMA configuration regardless of whether "dma-ranges" is |
101 | * correctly specified or not. | 103 | * correctly specified or not. |
102 | */ | 104 | */ |
103 | if (!dev->bus->force_dma) | 105 | if (!force_dma) |
104 | return ret == -ENODEV ? 0 : ret; | 106 | return ret == -ENODEV ? 0 : ret; |
105 | 107 | ||
106 | dma_addr = offset = 0; | 108 | dma_addr = offset = 0; |
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c index 9a4f4246231d..895c83e0c7b6 100644 --- a/drivers/of/of_reserved_mem.c +++ b/drivers/of/of_reserved_mem.c | |||
@@ -353,7 +353,7 @@ int of_reserved_mem_device_init_by_idx(struct device *dev, | |||
353 | /* ensure that dma_ops is set for virtual devices | 353 | /* ensure that dma_ops is set for virtual devices |
354 | * using reserved memory | 354 | * using reserved memory |
355 | */ | 355 | */ |
356 | of_dma_configure(dev, np); | 356 | of_dma_configure(dev, np, true); |
357 | 357 | ||
358 | dev_info(dev, "assigned reserved memory node %s\n", rmem->name); | 358 | dev_info(dev, "assigned reserved memory node %s\n", rmem->name); |
359 | } else { | 359 | } else { |
diff --git a/drivers/parisc/Kconfig b/drivers/parisc/Kconfig index 3a102a84d637..5a48b5606110 100644 --- a/drivers/parisc/Kconfig +++ b/drivers/parisc/Kconfig | |||
@@ -103,11 +103,6 @@ config IOMMU_SBA | |||
103 | depends on PCI_LBA | 103 | depends on PCI_LBA |
104 | default PCI_LBA | 104 | default PCI_LBA |
105 | 105 | ||
106 | config IOMMU_HELPER | ||
107 | bool | ||
108 | depends on IOMMU_SBA || IOMMU_CCIO | ||
109 | default y | ||
110 | |||
111 | source "drivers/pcmcia/Kconfig" | 106 | source "drivers/pcmcia/Kconfig" |
112 | 107 | ||
113 | endmenu | 108 | endmenu |
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c index d29cedb3f23b..614823617b8b 100644 --- a/drivers/parisc/ccio-dma.c +++ b/drivers/parisc/ccio-dma.c | |||
@@ -1570,8 +1570,6 @@ static int __init ccio_probe(struct parisc_device *dev) | |||
1570 | } | 1570 | } |
1571 | #endif | 1571 | #endif |
1572 | ioc_count++; | 1572 | ioc_count++; |
1573 | |||
1574 | parisc_has_iommu(); | ||
1575 | return 0; | 1573 | return 0; |
1576 | } | 1574 | } |
1577 | 1575 | ||
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c index 0d33d1f86d10..11de0eccf968 100644 --- a/drivers/parisc/sba_iommu.c +++ b/drivers/parisc/sba_iommu.c | |||
@@ -1989,8 +1989,6 @@ static int __init sba_driver_callback(struct parisc_device *dev) | |||
1989 | proc_create_single("sba_iommu", 0, root, sba_proc_info); | 1989 | proc_create_single("sba_iommu", 0, root, sba_proc_info); |
1990 | proc_create_single("sba_iommu-bitmap", 0, root, sba_proc_bitmap_info); | 1990 | proc_create_single("sba_iommu-bitmap", 0, root, sba_proc_bitmap_info); |
1991 | #endif | 1991 | #endif |
1992 | |||
1993 | parisc_has_iommu(); | ||
1994 | return 0; | 1992 | return 0; |
1995 | } | 1993 | } |
1996 | 1994 | ||
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig index 34b56a8f8480..29a487f31dae 100644 --- a/drivers/pci/Kconfig +++ b/drivers/pci/Kconfig | |||
@@ -5,10 +5,6 @@ | |||
5 | 5 | ||
6 | source "drivers/pci/pcie/Kconfig" | 6 | source "drivers/pci/pcie/Kconfig" |
7 | 7 | ||
8 | config PCI_BUS_ADDR_T_64BIT | ||
9 | def_bool y if (ARCH_DMA_ADDR_T_64BIT || 64BIT) | ||
10 | depends on PCI | ||
11 | |||
12 | config PCI_MSI | 8 | config PCI_MSI |
13 | bool "Message Signaled Interrupts (MSI and MSI-X)" | 9 | bool "Message Signaled Interrupts (MSI and MSI-X)" |
14 | depends on PCI | 10 | depends on PCI |
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index bc2ded4c451f..35b7fc87eac5 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c | |||
@@ -120,7 +120,7 @@ int devm_request_pci_bus_resources(struct device *dev, | |||
120 | EXPORT_SYMBOL_GPL(devm_request_pci_bus_resources); | 120 | EXPORT_SYMBOL_GPL(devm_request_pci_bus_resources); |
121 | 121 | ||
122 | static struct pci_bus_region pci_32_bit = {0, 0xffffffffULL}; | 122 | static struct pci_bus_region pci_32_bit = {0, 0xffffffffULL}; |
123 | #ifdef CONFIG_PCI_BUS_ADDR_T_64BIT | 123 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
124 | static struct pci_bus_region pci_64_bit = {0, | 124 | static struct pci_bus_region pci_64_bit = {0, |
125 | (pci_bus_addr_t) 0xffffffffffffffffULL}; | 125 | (pci_bus_addr_t) 0xffffffffffffffffULL}; |
126 | static struct pci_bus_region pci_high = {(pci_bus_addr_t) 0x100000000ULL, | 126 | static struct pci_bus_region pci_high = {(pci_bus_addr_t) 0x100000000ULL, |
@@ -230,7 +230,7 @@ int pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res, | |||
230 | resource_size_t), | 230 | resource_size_t), |
231 | void *alignf_data) | 231 | void *alignf_data) |
232 | { | 232 | { |
233 | #ifdef CONFIG_PCI_BUS_ADDR_T_64BIT | 233 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
234 | int rc; | 234 | int rc; |
235 | 235 | ||
236 | if (res->flags & IORESOURCE_MEM_64) { | 236 | if (res->flags & IORESOURCE_MEM_64) { |
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index b9a131137e64..f8269a725667 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c | |||
@@ -16,6 +16,8 @@ | |||
16 | #include <linux/pm_runtime.h> | 16 | #include <linux/pm_runtime.h> |
17 | #include <linux/suspend.h> | 17 | #include <linux/suspend.h> |
18 | #include <linux/kexec.h> | 18 | #include <linux/kexec.h> |
19 | #include <linux/of_device.h> | ||
20 | #include <linux/acpi.h> | ||
19 | #include "pci.h" | 21 | #include "pci.h" |
20 | #include "pcie/portdrv.h" | 22 | #include "pcie/portdrv.h" |
21 | 23 | ||
@@ -1577,6 +1579,35 @@ static int pci_bus_num_vf(struct device *dev) | |||
1577 | return pci_num_vf(to_pci_dev(dev)); | 1579 | return pci_num_vf(to_pci_dev(dev)); |
1578 | } | 1580 | } |
1579 | 1581 | ||
1582 | /** | ||
1583 | * pci_dma_configure - Setup DMA configuration | ||
1584 | * @dev: ptr to dev structure | ||
1585 | * | ||
1586 | * Function to update PCI devices's DMA configuration using the same | ||
1587 | * info from the OF node or ACPI node of host bridge's parent (if any). | ||
1588 | */ | ||
1589 | static int pci_dma_configure(struct device *dev) | ||
1590 | { | ||
1591 | struct device *bridge; | ||
1592 | int ret = 0; | ||
1593 | |||
1594 | bridge = pci_get_host_bridge_device(to_pci_dev(dev)); | ||
1595 | |||
1596 | if (IS_ENABLED(CONFIG_OF) && bridge->parent && | ||
1597 | bridge->parent->of_node) { | ||
1598 | ret = of_dma_configure(dev, bridge->parent->of_node, true); | ||
1599 | } else if (has_acpi_companion(bridge)) { | ||
1600 | struct acpi_device *adev = to_acpi_device_node(bridge->fwnode); | ||
1601 | enum dev_dma_attr attr = acpi_get_dma_attr(adev); | ||
1602 | |||
1603 | if (attr != DEV_DMA_NOT_SUPPORTED) | ||
1604 | ret = acpi_dma_configure(dev, attr); | ||
1605 | } | ||
1606 | |||
1607 | pci_put_host_bridge_device(bridge); | ||
1608 | return ret; | ||
1609 | } | ||
1610 | |||
1580 | struct bus_type pci_bus_type = { | 1611 | struct bus_type pci_bus_type = { |
1581 | .name = "pci", | 1612 | .name = "pci", |
1582 | .match = pci_bus_match, | 1613 | .match = pci_bus_match, |
@@ -1589,7 +1620,7 @@ struct bus_type pci_bus_type = { | |||
1589 | .drv_groups = pci_drv_groups, | 1620 | .drv_groups = pci_drv_groups, |
1590 | .pm = PCI_PM_OPS_PTR, | 1621 | .pm = PCI_PM_OPS_PTR, |
1591 | .num_vf = pci_bus_num_vf, | 1622 | .num_vf = pci_bus_num_vf, |
1592 | .force_dma = true, | 1623 | .dma_configure = pci_dma_configure, |
1593 | }; | 1624 | }; |
1594 | EXPORT_SYMBOL(pci_bus_type); | 1625 | EXPORT_SYMBOL(pci_bus_type); |
1595 | 1626 | ||
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index f125fd71c0f2..fb38aeff9dbd 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -2149,27 +2149,6 @@ static int scsi_map_queues(struct blk_mq_tag_set *set) | |||
2149 | return blk_mq_map_queues(set); | 2149 | return blk_mq_map_queues(set); |
2150 | } | 2150 | } |
2151 | 2151 | ||
2152 | static u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost) | ||
2153 | { | ||
2154 | struct device *host_dev; | ||
2155 | u64 bounce_limit = 0xffffffff; | ||
2156 | |||
2157 | if (shost->unchecked_isa_dma) | ||
2158 | return BLK_BOUNCE_ISA; | ||
2159 | /* | ||
2160 | * Platforms with virtual-DMA translation | ||
2161 | * hardware have no practical limit. | ||
2162 | */ | ||
2163 | if (!PCI_DMA_BUS_IS_PHYS) | ||
2164 | return BLK_BOUNCE_ANY; | ||
2165 | |||
2166 | host_dev = scsi_get_device(shost); | ||
2167 | if (host_dev && host_dev->dma_mask) | ||
2168 | bounce_limit = (u64)dma_max_pfn(host_dev) << PAGE_SHIFT; | ||
2169 | |||
2170 | return bounce_limit; | ||
2171 | } | ||
2172 | |||
2173 | void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q) | 2152 | void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q) |
2174 | { | 2153 | { |
2175 | struct device *dev = shost->dma_dev; | 2154 | struct device *dev = shost->dma_dev; |
@@ -2189,7 +2168,8 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q) | |||
2189 | } | 2168 | } |
2190 | 2169 | ||
2191 | blk_queue_max_hw_sectors(q, shost->max_sectors); | 2170 | blk_queue_max_hw_sectors(q, shost->max_sectors); |
2192 | blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost)); | 2171 | if (shost->unchecked_isa_dma) |
2172 | blk_queue_bounce_limit(q, BLK_BOUNCE_ISA); | ||
2193 | blk_queue_segment_boundary(q, shost->dma_boundary); | 2173 | blk_queue_segment_boundary(q, shost->dma_boundary); |
2194 | dma_set_seg_boundary(dev, shost->dma_boundary); | 2174 | dma_set_seg_boundary(dev, shost->dma_boundary); |
2195 | 2175 | ||
diff --git a/include/asm-generic/dma-mapping.h b/include/asm-generic/dma-mapping.h index 880a292d792f..ad2868263867 100644 --- a/include/asm-generic/dma-mapping.h +++ b/include/asm-generic/dma-mapping.h | |||
@@ -4,7 +4,16 @@ | |||
4 | 4 | ||
5 | static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) | 5 | static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) |
6 | { | 6 | { |
7 | /* | ||
8 | * Use the non-coherent ops if available. If an architecture wants a | ||
9 | * more fine-grained selection of operations it will have to implement | ||
10 | * get_arch_dma_ops itself or use the per-device dma_ops. | ||
11 | */ | ||
12 | #ifdef CONFIG_DMA_NONCOHERENT_OPS | ||
13 | return &dma_noncoherent_ops; | ||
14 | #else | ||
7 | return &dma_direct_ops; | 15 | return &dma_direct_ops; |
16 | #endif | ||
8 | } | 17 | } |
9 | 18 | ||
10 | #endif /* _ASM_GENERIC_DMA_MAPPING_H */ | 19 | #endif /* _ASM_GENERIC_DMA_MAPPING_H */ |
diff --git a/include/asm-generic/pci.h b/include/asm-generic/pci.h index 830d7659289b..6bb3cd3d695a 100644 --- a/include/asm-generic/pci.h +++ b/include/asm-generic/pci.h | |||
@@ -14,12 +14,4 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) | |||
14 | } | 14 | } |
15 | #endif /* HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ */ | 15 | #endif /* HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ */ |
16 | 16 | ||
17 | /* | ||
18 | * By default, assume that no iommu is in use and that the PCI | ||
19 | * space is mapped to address physical 0. | ||
20 | */ | ||
21 | #ifndef PCI_DMA_BUS_IS_PHYS | ||
22 | #define PCI_DMA_BUS_IS_PHYS (1) | ||
23 | #endif | ||
24 | |||
25 | #endif /* _ASM_GENERIC_PCI_H */ | 17 | #endif /* _ASM_GENERIC_PCI_H */ |
diff --git a/include/linux/device.h b/include/linux/device.h index 477956990f5e..00b6c3b42437 100644 --- a/include/linux/device.h +++ b/include/linux/device.h | |||
@@ -88,6 +88,8 @@ extern void bus_remove_file(struct bus_type *, struct bus_attribute *); | |||
88 | * @resume: Called to bring a device on this bus out of sleep mode. | 88 | * @resume: Called to bring a device on this bus out of sleep mode. |
89 | * @num_vf: Called to find out how many virtual functions a device on this | 89 | * @num_vf: Called to find out how many virtual functions a device on this |
90 | * bus supports. | 90 | * bus supports. |
91 | * @dma_configure: Called to setup DMA configuration on a device on | ||
92 | this bus. | ||
91 | * @pm: Power management operations of this bus, callback the specific | 93 | * @pm: Power management operations of this bus, callback the specific |
92 | * device driver's pm-ops. | 94 | * device driver's pm-ops. |
93 | * @iommu_ops: IOMMU specific operations for this bus, used to attach IOMMU | 95 | * @iommu_ops: IOMMU specific operations for this bus, used to attach IOMMU |
@@ -96,8 +98,6 @@ extern void bus_remove_file(struct bus_type *, struct bus_attribute *); | |||
96 | * @p: The private data of the driver core, only the driver core can | 98 | * @p: The private data of the driver core, only the driver core can |
97 | * touch this. | 99 | * touch this. |
98 | * @lock_key: Lock class key for use by the lock validator | 100 | * @lock_key: Lock class key for use by the lock validator |
99 | * @force_dma: Assume devices on this bus should be set up by dma_configure() | ||
100 | * even if DMA capability is not explicitly described by firmware. | ||
101 | * | 101 | * |
102 | * A bus is a channel between the processor and one or more devices. For the | 102 | * A bus is a channel between the processor and one or more devices. For the |
103 | * purposes of the device model, all devices are connected via a bus, even if | 103 | * purposes of the device model, all devices are connected via a bus, even if |
@@ -130,14 +130,14 @@ struct bus_type { | |||
130 | 130 | ||
131 | int (*num_vf)(struct device *dev); | 131 | int (*num_vf)(struct device *dev); |
132 | 132 | ||
133 | int (*dma_configure)(struct device *dev); | ||
134 | |||
133 | const struct dev_pm_ops *pm; | 135 | const struct dev_pm_ops *pm; |
134 | 136 | ||
135 | const struct iommu_ops *iommu_ops; | 137 | const struct iommu_ops *iommu_ops; |
136 | 138 | ||
137 | struct subsys_private *p; | 139 | struct subsys_private *p; |
138 | struct lock_class_key lock_key; | 140 | struct lock_class_key lock_key; |
139 | |||
140 | bool force_dma; | ||
141 | }; | 141 | }; |
142 | 142 | ||
143 | extern int __must_check bus_register(struct bus_type *bus); | 143 | extern int __must_check bus_register(struct bus_type *bus); |
@@ -904,6 +904,8 @@ struct dev_links_info { | |||
904 | * @offline: Set after successful invocation of bus type's .offline(). | 904 | * @offline: Set after successful invocation of bus type's .offline(). |
905 | * @of_node_reused: Set if the device-tree node is shared with an ancestor | 905 | * @of_node_reused: Set if the device-tree node is shared with an ancestor |
906 | * device. | 906 | * device. |
907 | * @dma_32bit_limit: bridge limited to 32bit DMA even if the device itself | ||
908 | * indicates support for a higher limit in the dma_mask field. | ||
907 | * | 909 | * |
908 | * At the lowest level, every device in a Linux system is represented by an | 910 | * At the lowest level, every device in a Linux system is represented by an |
909 | * instance of struct device. The device structure contains the information | 911 | * instance of struct device. The device structure contains the information |
@@ -992,6 +994,7 @@ struct device { | |||
992 | bool offline_disabled:1; | 994 | bool offline_disabled:1; |
993 | bool offline:1; | 995 | bool offline:1; |
994 | bool of_node_reused:1; | 996 | bool of_node_reused:1; |
997 | bool dma_32bit_limit:1; | ||
995 | }; | 998 | }; |
996 | 999 | ||
997 | static inline struct device *kobj_to_dev(struct kobject *kobj) | 1000 | static inline struct device *kobj_to_dev(struct kobject *kobj) |
diff --git a/include/linux/dma-debug.h b/include/linux/dma-debug.h index c7d844f09c3a..a785f2507159 100644 --- a/include/linux/dma-debug.h +++ b/include/linux/dma-debug.h | |||
@@ -30,8 +30,6 @@ struct bus_type; | |||
30 | 30 | ||
31 | extern void dma_debug_add_bus(struct bus_type *bus); | 31 | extern void dma_debug_add_bus(struct bus_type *bus); |
32 | 32 | ||
33 | extern void dma_debug_init(u32 num_entries); | ||
34 | |||
35 | extern int dma_debug_resize_entries(u32 num_entries); | 33 | extern int dma_debug_resize_entries(u32 num_entries); |
36 | 34 | ||
37 | extern void debug_dma_map_page(struct device *dev, struct page *page, | 35 | extern void debug_dma_map_page(struct device *dev, struct page *page, |
@@ -100,10 +98,6 @@ static inline void dma_debug_add_bus(struct bus_type *bus) | |||
100 | { | 98 | { |
101 | } | 99 | } |
102 | 100 | ||
103 | static inline void dma_debug_init(u32 num_entries) | ||
104 | { | ||
105 | } | ||
106 | |||
107 | static inline int dma_debug_resize_entries(u32 num_entries) | 101 | static inline int dma_debug_resize_entries(u32 num_entries) |
108 | { | 102 | { |
109 | return 0; | 103 | return 0; |
diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h index 53ad6a47f513..8d9f33febde5 100644 --- a/include/linux/dma-direct.h +++ b/include/linux/dma-direct.h | |||
@@ -59,6 +59,11 @@ void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, | |||
59 | gfp_t gfp, unsigned long attrs); | 59 | gfp_t gfp, unsigned long attrs); |
60 | void dma_direct_free(struct device *dev, size_t size, void *cpu_addr, | 60 | void dma_direct_free(struct device *dev, size_t size, void *cpu_addr, |
61 | dma_addr_t dma_addr, unsigned long attrs); | 61 | dma_addr_t dma_addr, unsigned long attrs); |
62 | dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, | ||
63 | unsigned long offset, size_t size, enum dma_data_direction dir, | ||
64 | unsigned long attrs); | ||
65 | int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, | ||
66 | enum dma_data_direction dir, unsigned long attrs); | ||
62 | int dma_direct_supported(struct device *dev, u64 mask); | 67 | int dma_direct_supported(struct device *dev, u64 mask); |
63 | 68 | int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr); | |
64 | #endif /* _LINUX_DMA_DIRECT_H */ | 69 | #endif /* _LINUX_DMA_DIRECT_H */ |
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index f8ab1c0f589e..f9cc309507d9 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h | |||
@@ -133,10 +133,10 @@ struct dma_map_ops { | |||
133 | #ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK | 133 | #ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK |
134 | u64 (*get_required_mask)(struct device *dev); | 134 | u64 (*get_required_mask)(struct device *dev); |
135 | #endif | 135 | #endif |
136 | int is_phys; | ||
137 | }; | 136 | }; |
138 | 137 | ||
139 | extern const struct dma_map_ops dma_direct_ops; | 138 | extern const struct dma_map_ops dma_direct_ops; |
139 | extern const struct dma_map_ops dma_noncoherent_ops; | ||
140 | extern const struct dma_map_ops dma_virt_ops; | 140 | extern const struct dma_map_ops dma_virt_ops; |
141 | 141 | ||
142 | #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) | 142 | #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) |
@@ -502,7 +502,7 @@ dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr, | |||
502 | #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0) | 502 | #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0) |
503 | 503 | ||
504 | #ifndef arch_dma_alloc_attrs | 504 | #ifndef arch_dma_alloc_attrs |
505 | #define arch_dma_alloc_attrs(dev, flag) (true) | 505 | #define arch_dma_alloc_attrs(dev) (true) |
506 | #endif | 506 | #endif |
507 | 507 | ||
508 | static inline void *dma_alloc_attrs(struct device *dev, size_t size, | 508 | static inline void *dma_alloc_attrs(struct device *dev, size_t size, |
@@ -521,7 +521,7 @@ static inline void *dma_alloc_attrs(struct device *dev, size_t size, | |||
521 | /* let the implementation decide on the zone to allocate from: */ | 521 | /* let the implementation decide on the zone to allocate from: */ |
522 | flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM); | 522 | flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM); |
523 | 523 | ||
524 | if (!arch_dma_alloc_attrs(&dev, &flag)) | 524 | if (!arch_dma_alloc_attrs(&dev)) |
525 | return NULL; | 525 | return NULL; |
526 | if (!ops->alloc) | 526 | if (!ops->alloc) |
527 | return NULL; | 527 | return NULL; |
@@ -572,14 +572,6 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | |||
572 | return 0; | 572 | return 0; |
573 | } | 573 | } |
574 | 574 | ||
575 | /* | ||
576 | * This is a hack for the legacy x86 forbid_dac and iommu_sac_force. Please | ||
577 | * don't use this in new code. | ||
578 | */ | ||
579 | #ifndef arch_dma_supported | ||
580 | #define arch_dma_supported(dev, mask) (1) | ||
581 | #endif | ||
582 | |||
583 | static inline void dma_check_mask(struct device *dev, u64 mask) | 575 | static inline void dma_check_mask(struct device *dev, u64 mask) |
584 | { | 576 | { |
585 | if (sme_active() && (mask < (((u64)sme_get_me_mask() << 1) - 1))) | 577 | if (sme_active() && (mask < (((u64)sme_get_me_mask() << 1) - 1))) |
@@ -592,9 +584,6 @@ static inline int dma_supported(struct device *dev, u64 mask) | |||
592 | 584 | ||
593 | if (!ops) | 585 | if (!ops) |
594 | return 0; | 586 | return 0; |
595 | if (!arch_dma_supported(dev, mask)) | ||
596 | return 0; | ||
597 | |||
598 | if (!ops->dma_supported) | 587 | if (!ops->dma_supported) |
599 | return 1; | 588 | return 1; |
600 | return ops->dma_supported(dev, mask); | 589 | return ops->dma_supported(dev, mask); |
@@ -839,7 +828,7 @@ static inline int dma_mmap_wc(struct device *dev, | |||
839 | #define dma_mmap_writecombine dma_mmap_wc | 828 | #define dma_mmap_writecombine dma_mmap_wc |
840 | #endif | 829 | #endif |
841 | 830 | ||
842 | #if defined(CONFIG_NEED_DMA_MAP_STATE) || defined(CONFIG_DMA_API_DEBUG) | 831 | #ifdef CONFIG_NEED_DMA_MAP_STATE |
843 | #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME | 832 | #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME |
844 | #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME | 833 | #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME |
845 | #define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) | 834 | #define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) |
diff --git a/include/linux/dma-noncoherent.h b/include/linux/dma-noncoherent.h new file mode 100644 index 000000000000..10b2654d549b --- /dev/null +++ b/include/linux/dma-noncoherent.h | |||
@@ -0,0 +1,47 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | #ifndef _LINUX_DMA_NONCOHERENT_H | ||
3 | #define _LINUX_DMA_NONCOHERENT_H 1 | ||
4 | |||
5 | #include <linux/dma-mapping.h> | ||
6 | |||
7 | void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, | ||
8 | gfp_t gfp, unsigned long attrs); | ||
9 | void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, | ||
10 | dma_addr_t dma_addr, unsigned long attrs); | ||
11 | |||
12 | #ifdef CONFIG_DMA_NONCOHERENT_MMAP | ||
13 | int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma, | ||
14 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | ||
15 | unsigned long attrs); | ||
16 | #else | ||
17 | #define arch_dma_mmap NULL | ||
18 | #endif /* CONFIG_DMA_NONCOHERENT_MMAP */ | ||
19 | |||
20 | #ifdef CONFIG_DMA_NONCOHERENT_CACHE_SYNC | ||
21 | void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size, | ||
22 | enum dma_data_direction direction); | ||
23 | #else | ||
24 | #define arch_dma_cache_sync NULL | ||
25 | #endif /* CONFIG_DMA_NONCOHERENT_CACHE_SYNC */ | ||
26 | |||
27 | #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE | ||
28 | void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, | ||
29 | size_t size, enum dma_data_direction dir); | ||
30 | #else | ||
31 | static inline void arch_sync_dma_for_device(struct device *dev, | ||
32 | phys_addr_t paddr, size_t size, enum dma_data_direction dir) | ||
33 | { | ||
34 | } | ||
35 | #endif /* ARCH_HAS_SYNC_DMA_FOR_DEVICE */ | ||
36 | |||
37 | #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU | ||
38 | void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, | ||
39 | size_t size, enum dma_data_direction dir); | ||
40 | #else | ||
41 | static inline void arch_sync_dma_for_cpu(struct device *dev, | ||
42 | phys_addr_t paddr, size_t size, enum dma_data_direction dir) | ||
43 | { | ||
44 | } | ||
45 | #endif /* ARCH_HAS_SYNC_DMA_FOR_CPU */ | ||
46 | |||
47 | #endif /* _LINUX_DMA_NONCOHERENT_H */ | ||
diff --git a/include/linux/ide.h b/include/linux/ide.h index 752464f5a772..c74b0321922a 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h | |||
@@ -1508,8 +1508,6 @@ static inline void ide_set_hwifdata (ide_hwif_t * hwif, void *data) | |||
1508 | hwif->hwif_data = data; | 1508 | hwif->hwif_data = data; |
1509 | } | 1509 | } |
1510 | 1510 | ||
1511 | extern void ide_toggle_bounce(ide_drive_t *drive, int on); | ||
1512 | |||
1513 | u64 ide_get_lba_addr(struct ide_cmd *, int); | 1511 | u64 ide_get_lba_addr(struct ide_cmd *, int); |
1514 | u8 ide_dump_status(ide_drive_t *, const char *, u8); | 1512 | u8 ide_dump_status(ide_drive_t *, const char *, u8); |
1515 | 1513 | ||
diff --git a/include/linux/iommu-helper.h b/include/linux/iommu-helper.h index cb9a9248c8c0..70d01edcbf8b 100644 --- a/include/linux/iommu-helper.h +++ b/include/linux/iommu-helper.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #ifndef _LINUX_IOMMU_HELPER_H | 2 | #ifndef _LINUX_IOMMU_HELPER_H |
3 | #define _LINUX_IOMMU_HELPER_H | 3 | #define _LINUX_IOMMU_HELPER_H |
4 | 4 | ||
5 | #include <linux/bug.h> | ||
5 | #include <linux/kernel.h> | 6 | #include <linux/kernel.h> |
6 | 7 | ||
7 | static inline unsigned long iommu_device_max_index(unsigned long size, | 8 | static inline unsigned long iommu_device_max_index(unsigned long size, |
@@ -14,9 +15,15 @@ static inline unsigned long iommu_device_max_index(unsigned long size, | |||
14 | return size; | 15 | return size; |
15 | } | 16 | } |
16 | 17 | ||
17 | extern int iommu_is_span_boundary(unsigned int index, unsigned int nr, | 18 | static inline int iommu_is_span_boundary(unsigned int index, unsigned int nr, |
18 | unsigned long shift, | 19 | unsigned long shift, unsigned long boundary_size) |
19 | unsigned long boundary_size); | 20 | { |
21 | BUG_ON(!is_power_of_2(boundary_size)); | ||
22 | |||
23 | shift = (shift + index) & (boundary_size - 1); | ||
24 | return shift + nr > boundary_size; | ||
25 | } | ||
26 | |||
20 | extern unsigned long iommu_area_alloc(unsigned long *map, unsigned long size, | 27 | extern unsigned long iommu_area_alloc(unsigned long *map, unsigned long size, |
21 | unsigned long start, unsigned int nr, | 28 | unsigned long start, unsigned int nr, |
22 | unsigned long shift, | 29 | unsigned long shift, |
diff --git a/include/linux/of_device.h b/include/linux/of_device.h index 8da5a1b31ece..165fd302b442 100644 --- a/include/linux/of_device.h +++ b/include/linux/of_device.h | |||
@@ -55,7 +55,9 @@ static inline struct device_node *of_cpu_device_node_get(int cpu) | |||
55 | return of_node_get(cpu_dev->of_node); | 55 | return of_node_get(cpu_dev->of_node); |
56 | } | 56 | } |
57 | 57 | ||
58 | int of_dma_configure(struct device *dev, struct device_node *np); | 58 | int of_dma_configure(struct device *dev, |
59 | struct device_node *np, | ||
60 | bool force_dma); | ||
59 | void of_dma_deconfigure(struct device *dev); | 61 | void of_dma_deconfigure(struct device *dev); |
60 | #else /* CONFIG_OF */ | 62 | #else /* CONFIG_OF */ |
61 | 63 | ||
@@ -105,7 +107,9 @@ static inline struct device_node *of_cpu_device_node_get(int cpu) | |||
105 | return NULL; | 107 | return NULL; |
106 | } | 108 | } |
107 | 109 | ||
108 | static inline int of_dma_configure(struct device *dev, struct device_node *np) | 110 | static inline int of_dma_configure(struct device *dev, |
111 | struct device_node *np, | ||
112 | bool force_dma) | ||
109 | { | 113 | { |
110 | return 0; | 114 | return 0; |
111 | } | 115 | } |
diff --git a/include/linux/pci.h b/include/linux/pci.h index 73178a2fcee0..55371cb827ad 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h | |||
@@ -670,7 +670,7 @@ int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn, | |||
670 | int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn, | 670 | int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn, |
671 | int reg, int len, u32 val); | 671 | int reg, int len, u32 val); |
672 | 672 | ||
673 | #ifdef CONFIG_PCI_BUS_ADDR_T_64BIT | 673 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
674 | typedef u64 pci_bus_addr_t; | 674 | typedef u64 pci_bus_addr_t; |
675 | #else | 675 | #else |
676 | typedef u32 pci_bus_addr_t; | 676 | typedef u32 pci_bus_addr_t; |
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index 49f634d96118..3097c943fab9 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h | |||
@@ -356,6 +356,8 @@ extern int platform_pm_restore(struct device *dev); | |||
356 | #define platform_pm_restore NULL | 356 | #define platform_pm_restore NULL |
357 | #endif | 357 | #endif |
358 | 358 | ||
359 | extern int platform_dma_configure(struct device *dev); | ||
360 | |||
359 | #ifdef CONFIG_PM_SLEEP | 361 | #ifdef CONFIG_PM_SLEEP |
360 | #define USE_PLATFORM_PM_SLEEP_OPS \ | 362 | #define USE_PLATFORM_PM_SLEEP_OPS \ |
361 | .suspend = platform_pm_suspend, \ | 363 | .suspend = platform_pm_suspend, \ |
diff --git a/lib/Kconfig b/lib/Kconfig index 5fe577673b98..7a913937888b 100644 --- a/lib/Kconfig +++ b/lib/Kconfig | |||
@@ -429,15 +429,50 @@ config SGL_ALLOC | |||
429 | bool | 429 | bool |
430 | default n | 430 | default n |
431 | 431 | ||
432 | config NEED_SG_DMA_LENGTH | ||
433 | bool | ||
434 | |||
435 | config NEED_DMA_MAP_STATE | ||
436 | bool | ||
437 | |||
438 | config ARCH_DMA_ADDR_T_64BIT | ||
439 | def_bool 64BIT || PHYS_ADDR_T_64BIT | ||
440 | |||
441 | config IOMMU_HELPER | ||
442 | bool | ||
443 | |||
444 | config ARCH_HAS_SYNC_DMA_FOR_DEVICE | ||
445 | bool | ||
446 | |||
447 | config ARCH_HAS_SYNC_DMA_FOR_CPU | ||
448 | bool | ||
449 | select NEED_DMA_MAP_STATE | ||
450 | |||
432 | config DMA_DIRECT_OPS | 451 | config DMA_DIRECT_OPS |
433 | bool | 452 | bool |
434 | depends on HAS_DMA && (!64BIT || ARCH_DMA_ADDR_T_64BIT) | 453 | depends on HAS_DMA |
435 | default n | 454 | |
455 | config DMA_NONCOHERENT_OPS | ||
456 | bool | ||
457 | depends on HAS_DMA | ||
458 | select DMA_DIRECT_OPS | ||
459 | |||
460 | config DMA_NONCOHERENT_MMAP | ||
461 | bool | ||
462 | depends on DMA_NONCOHERENT_OPS | ||
463 | |||
464 | config DMA_NONCOHERENT_CACHE_SYNC | ||
465 | bool | ||
466 | depends on DMA_NONCOHERENT_OPS | ||
436 | 467 | ||
437 | config DMA_VIRT_OPS | 468 | config DMA_VIRT_OPS |
438 | bool | 469 | bool |
439 | depends on HAS_DMA && (!64BIT || ARCH_DMA_ADDR_T_64BIT) | 470 | depends on HAS_DMA |
440 | default n | 471 | |
472 | config SWIOTLB | ||
473 | bool | ||
474 | select DMA_DIRECT_OPS | ||
475 | select NEED_DMA_MAP_STATE | ||
441 | 476 | ||
442 | config CHECK_SIGNATURE | 477 | config CHECK_SIGNATURE |
443 | bool | 478 | bool |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index c40c7b734cd1..76555479ae36 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -1634,7 +1634,7 @@ config PROVIDE_OHCI1394_DMA_INIT | |||
1634 | 1634 | ||
1635 | config DMA_API_DEBUG | 1635 | config DMA_API_DEBUG |
1636 | bool "Enable debugging of DMA-API usage" | 1636 | bool "Enable debugging of DMA-API usage" |
1637 | depends on HAVE_DMA_API_DEBUG | 1637 | select NEED_DMA_MAP_STATE |
1638 | help | 1638 | help |
1639 | Enable this option to debug the use of the DMA API by device drivers. | 1639 | Enable this option to debug the use of the DMA API by device drivers. |
1640 | With this option you will be able to detect common bugs in device | 1640 | With this option you will be able to detect common bugs in device |
@@ -1651,6 +1651,23 @@ config DMA_API_DEBUG | |||
1651 | 1651 | ||
1652 | If unsure, say N. | 1652 | If unsure, say N. |
1653 | 1653 | ||
1654 | config DMA_API_DEBUG_SG | ||
1655 | bool "Debug DMA scatter-gather usage" | ||
1656 | default y | ||
1657 | depends on DMA_API_DEBUG | ||
1658 | help | ||
1659 | Perform extra checking that callers of dma_map_sg() have respected the | ||
1660 | appropriate segment length/boundary limits for the given device when | ||
1661 | preparing DMA scatterlists. | ||
1662 | |||
1663 | This is particularly likely to have been overlooked in cases where the | ||
1664 | dma_map_sg() API is used for general bulk mapping of pages rather than | ||
1665 | preparing literal scatter-gather descriptors, where there is a risk of | ||
1666 | unexpected behaviour from DMA API implementations if the scatterlist | ||
1667 | is technically out-of-spec. | ||
1668 | |||
1669 | If unsure, say N. | ||
1670 | |||
1654 | menuconfig RUNTIME_TESTING_MENU | 1671 | menuconfig RUNTIME_TESTING_MENU |
1655 | bool "Runtime Testing" | 1672 | bool "Runtime Testing" |
1656 | def_bool y | 1673 | def_bool y |
diff --git a/lib/Makefile b/lib/Makefile index ce20696d5a92..9f18c8152281 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
@@ -30,6 +30,7 @@ lib-$(CONFIG_PRINTK) += dump_stack.o | |||
30 | lib-$(CONFIG_MMU) += ioremap.o | 30 | lib-$(CONFIG_MMU) += ioremap.o |
31 | lib-$(CONFIG_SMP) += cpumask.o | 31 | lib-$(CONFIG_SMP) += cpumask.o |
32 | lib-$(CONFIG_DMA_DIRECT_OPS) += dma-direct.o | 32 | lib-$(CONFIG_DMA_DIRECT_OPS) += dma-direct.o |
33 | lib-$(CONFIG_DMA_NONCOHERENT_OPS) += dma-noncoherent.o | ||
33 | lib-$(CONFIG_DMA_VIRT_OPS) += dma-virt.o | 34 | lib-$(CONFIG_DMA_VIRT_OPS) += dma-virt.o |
34 | 35 | ||
35 | lib-y += kobject.o klist.o | 36 | lib-y += kobject.o klist.o |
@@ -147,7 +148,7 @@ obj-$(CONFIG_AUDIT_GENERIC) += audit.o | |||
147 | obj-$(CONFIG_AUDIT_COMPAT_GENERIC) += compat_audit.o | 148 | obj-$(CONFIG_AUDIT_COMPAT_GENERIC) += compat_audit.o |
148 | 149 | ||
149 | obj-$(CONFIG_SWIOTLB) += swiotlb.o | 150 | obj-$(CONFIG_SWIOTLB) += swiotlb.o |
150 | obj-$(CONFIG_IOMMU_HELPER) += iommu-helper.o iommu-common.o | 151 | obj-$(CONFIG_IOMMU_HELPER) += iommu-helper.o |
151 | obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o | 152 | obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o |
152 | obj-$(CONFIG_NOTIFIER_ERROR_INJECTION) += notifier-error-inject.o | 153 | obj-$(CONFIG_NOTIFIER_ERROR_INJECTION) += notifier-error-inject.o |
153 | obj-$(CONFIG_PM_NOTIFIER_ERROR_INJECT) += pm-notifier-error-inject.o | 154 | obj-$(CONFIG_PM_NOTIFIER_ERROR_INJECT) += pm-notifier-error-inject.o |
diff --git a/lib/dma-debug.c b/lib/dma-debug.c index 7f5cdc1e6b29..c007d25bee09 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c | |||
@@ -41,6 +41,11 @@ | |||
41 | #define HASH_FN_SHIFT 13 | 41 | #define HASH_FN_SHIFT 13 |
42 | #define HASH_FN_MASK (HASH_SIZE - 1) | 42 | #define HASH_FN_MASK (HASH_SIZE - 1) |
43 | 43 | ||
44 | /* allow architectures to override this if absolutely required */ | ||
45 | #ifndef PREALLOC_DMA_DEBUG_ENTRIES | ||
46 | #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) | ||
47 | #endif | ||
48 | |||
44 | enum { | 49 | enum { |
45 | dma_debug_single, | 50 | dma_debug_single, |
46 | dma_debug_page, | 51 | dma_debug_page, |
@@ -127,7 +132,7 @@ static u32 min_free_entries; | |||
127 | static u32 nr_total_entries; | 132 | static u32 nr_total_entries; |
128 | 133 | ||
129 | /* number of preallocated entries requested by kernel cmdline */ | 134 | /* number of preallocated entries requested by kernel cmdline */ |
130 | static u32 req_entries; | 135 | static u32 nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES; |
131 | 136 | ||
132 | /* debugfs dentry's for the stuff above */ | 137 | /* debugfs dentry's for the stuff above */ |
133 | static struct dentry *dma_debug_dent __read_mostly; | 138 | static struct dentry *dma_debug_dent __read_mostly; |
@@ -439,7 +444,6 @@ void debug_dma_dump_mappings(struct device *dev) | |||
439 | spin_unlock_irqrestore(&bucket->lock, flags); | 444 | spin_unlock_irqrestore(&bucket->lock, flags); |
440 | } | 445 | } |
441 | } | 446 | } |
442 | EXPORT_SYMBOL(debug_dma_dump_mappings); | ||
443 | 447 | ||
444 | /* | 448 | /* |
445 | * For each mapping (initial cacheline in the case of | 449 | * For each mapping (initial cacheline in the case of |
@@ -748,7 +752,6 @@ int dma_debug_resize_entries(u32 num_entries) | |||
748 | 752 | ||
749 | return ret; | 753 | return ret; |
750 | } | 754 | } |
751 | EXPORT_SYMBOL(dma_debug_resize_entries); | ||
752 | 755 | ||
753 | /* | 756 | /* |
754 | * DMA-API debugging init code | 757 | * DMA-API debugging init code |
@@ -1004,10 +1007,7 @@ void dma_debug_add_bus(struct bus_type *bus) | |||
1004 | bus_register_notifier(bus, nb); | 1007 | bus_register_notifier(bus, nb); |
1005 | } | 1008 | } |
1006 | 1009 | ||
1007 | /* | 1010 | static int dma_debug_init(void) |
1008 | * Let the architectures decide how many entries should be preallocated. | ||
1009 | */ | ||
1010 | void dma_debug_init(u32 num_entries) | ||
1011 | { | 1011 | { |
1012 | int i; | 1012 | int i; |
1013 | 1013 | ||
@@ -1015,7 +1015,7 @@ void dma_debug_init(u32 num_entries) | |||
1015 | * called to set dma_debug_initialized | 1015 | * called to set dma_debug_initialized |
1016 | */ | 1016 | */ |
1017 | if (global_disable) | 1017 | if (global_disable) |
1018 | return; | 1018 | return 0; |
1019 | 1019 | ||
1020 | for (i = 0; i < HASH_SIZE; ++i) { | 1020 | for (i = 0; i < HASH_SIZE; ++i) { |
1021 | INIT_LIST_HEAD(&dma_entry_hash[i].list); | 1021 | INIT_LIST_HEAD(&dma_entry_hash[i].list); |
@@ -1026,17 +1026,14 @@ void dma_debug_init(u32 num_entries) | |||
1026 | pr_err("DMA-API: error creating debugfs entries - disabling\n"); | 1026 | pr_err("DMA-API: error creating debugfs entries - disabling\n"); |
1027 | global_disable = true; | 1027 | global_disable = true; |
1028 | 1028 | ||
1029 | return; | 1029 | return 0; |
1030 | } | 1030 | } |
1031 | 1031 | ||
1032 | if (req_entries) | 1032 | if (prealloc_memory(nr_prealloc_entries) != 0) { |
1033 | num_entries = req_entries; | ||
1034 | |||
1035 | if (prealloc_memory(num_entries) != 0) { | ||
1036 | pr_err("DMA-API: debugging out of memory error - disabled\n"); | 1033 | pr_err("DMA-API: debugging out of memory error - disabled\n"); |
1037 | global_disable = true; | 1034 | global_disable = true; |
1038 | 1035 | ||
1039 | return; | 1036 | return 0; |
1040 | } | 1037 | } |
1041 | 1038 | ||
1042 | nr_total_entries = num_free_entries; | 1039 | nr_total_entries = num_free_entries; |
@@ -1044,7 +1041,9 @@ void dma_debug_init(u32 num_entries) | |||
1044 | dma_debug_initialized = true; | 1041 | dma_debug_initialized = true; |
1045 | 1042 | ||
1046 | pr_info("DMA-API: debugging enabled by kernel config\n"); | 1043 | pr_info("DMA-API: debugging enabled by kernel config\n"); |
1044 | return 0; | ||
1047 | } | 1045 | } |
1046 | core_initcall(dma_debug_init); | ||
1048 | 1047 | ||
1049 | static __init int dma_debug_cmdline(char *str) | 1048 | static __init int dma_debug_cmdline(char *str) |
1050 | { | 1049 | { |
@@ -1061,16 +1060,10 @@ static __init int dma_debug_cmdline(char *str) | |||
1061 | 1060 | ||
1062 | static __init int dma_debug_entries_cmdline(char *str) | 1061 | static __init int dma_debug_entries_cmdline(char *str) |
1063 | { | 1062 | { |
1064 | int res; | ||
1065 | |||
1066 | if (!str) | 1063 | if (!str) |
1067 | return -EINVAL; | 1064 | return -EINVAL; |
1068 | 1065 | if (!get_option(&str, &nr_prealloc_entries)) | |
1069 | res = get_option(&str, &req_entries); | 1066 | nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES; |
1070 | |||
1071 | if (!res) | ||
1072 | req_entries = 0; | ||
1073 | |||
1074 | return 0; | 1067 | return 0; |
1075 | } | 1068 | } |
1076 | 1069 | ||
@@ -1293,6 +1286,32 @@ out: | |||
1293 | put_hash_bucket(bucket, &flags); | 1286 | put_hash_bucket(bucket, &flags); |
1294 | } | 1287 | } |
1295 | 1288 | ||
1289 | static void check_sg_segment(struct device *dev, struct scatterlist *sg) | ||
1290 | { | ||
1291 | #ifdef CONFIG_DMA_API_DEBUG_SG | ||
1292 | unsigned int max_seg = dma_get_max_seg_size(dev); | ||
1293 | u64 start, end, boundary = dma_get_seg_boundary(dev); | ||
1294 | |||
1295 | /* | ||
1296 | * Either the driver forgot to set dma_parms appropriately, or | ||
1297 | * whoever generated the list forgot to check them. | ||
1298 | */ | ||
1299 | if (sg->length > max_seg) | ||
1300 | err_printk(dev, NULL, "DMA-API: mapping sg segment longer than device claims to support [len=%u] [max=%u]\n", | ||
1301 | sg->length, max_seg); | ||
1302 | /* | ||
1303 | * In some cases this could potentially be the DMA API | ||
1304 | * implementation's fault, but it would usually imply that | ||
1305 | * the scatterlist was built inappropriately to begin with. | ||
1306 | */ | ||
1307 | start = sg_dma_address(sg); | ||
1308 | end = start + sg_dma_len(sg) - 1; | ||
1309 | if ((start ^ end) & ~boundary) | ||
1310 | err_printk(dev, NULL, "DMA-API: mapping sg segment across boundary [start=0x%016llx] [end=0x%016llx] [boundary=0x%016llx]\n", | ||
1311 | start, end, boundary); | ||
1312 | #endif | ||
1313 | } | ||
1314 | |||
1296 | void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, | 1315 | void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, |
1297 | size_t size, int direction, dma_addr_t dma_addr, | 1316 | size_t size, int direction, dma_addr_t dma_addr, |
1298 | bool map_single) | 1317 | bool map_single) |
@@ -1423,6 +1442,8 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, | |||
1423 | check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s)); | 1442 | check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s)); |
1424 | } | 1443 | } |
1425 | 1444 | ||
1445 | check_sg_segment(dev, s); | ||
1446 | |||
1426 | add_dma_entry(entry); | 1447 | add_dma_entry(entry); |
1427 | } | 1448 | } |
1428 | } | 1449 | } |
diff --git a/lib/dma-direct.c b/lib/dma-direct.c index bbfb229aa067..8be8106270c2 100644 --- a/lib/dma-direct.c +++ b/lib/dma-direct.c | |||
@@ -34,6 +34,13 @@ check_addr(struct device *dev, dma_addr_t dma_addr, size_t size, | |||
34 | const char *caller) | 34 | const char *caller) |
35 | { | 35 | { |
36 | if (unlikely(dev && !dma_capable(dev, dma_addr, size))) { | 36 | if (unlikely(dev && !dma_capable(dev, dma_addr, size))) { |
37 | if (!dev->dma_mask) { | ||
38 | dev_err(dev, | ||
39 | "%s: call on device without dma_mask\n", | ||
40 | caller); | ||
41 | return false; | ||
42 | } | ||
43 | |||
37 | if (*dev->dma_mask >= DMA_BIT_MASK(32)) { | 44 | if (*dev->dma_mask >= DMA_BIT_MASK(32)) { |
38 | dev_err(dev, | 45 | dev_err(dev, |
39 | "%s: overflow %pad+%zu of device mask %llx\n", | 46 | "%s: overflow %pad+%zu of device mask %llx\n", |
@@ -84,6 +91,13 @@ again: | |||
84 | __free_pages(page, page_order); | 91 | __free_pages(page, page_order); |
85 | page = NULL; | 92 | page = NULL; |
86 | 93 | ||
94 | if (IS_ENABLED(CONFIG_ZONE_DMA32) && | ||
95 | dev->coherent_dma_mask < DMA_BIT_MASK(64) && | ||
96 | !(gfp & (GFP_DMA32 | GFP_DMA))) { | ||
97 | gfp |= GFP_DMA32; | ||
98 | goto again; | ||
99 | } | ||
100 | |||
87 | if (IS_ENABLED(CONFIG_ZONE_DMA) && | 101 | if (IS_ENABLED(CONFIG_ZONE_DMA) && |
88 | dev->coherent_dma_mask < DMA_BIT_MASK(32) && | 102 | dev->coherent_dma_mask < DMA_BIT_MASK(32) && |
89 | !(gfp & GFP_DMA)) { | 103 | !(gfp & GFP_DMA)) { |
@@ -121,7 +135,7 @@ void dma_direct_free(struct device *dev, size_t size, void *cpu_addr, | |||
121 | free_pages((unsigned long)cpu_addr, page_order); | 135 | free_pages((unsigned long)cpu_addr, page_order); |
122 | } | 136 | } |
123 | 137 | ||
124 | static dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, | 138 | dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, |
125 | unsigned long offset, size_t size, enum dma_data_direction dir, | 139 | unsigned long offset, size_t size, enum dma_data_direction dir, |
126 | unsigned long attrs) | 140 | unsigned long attrs) |
127 | { | 141 | { |
@@ -132,8 +146,8 @@ static dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, | |||
132 | return dma_addr; | 146 | return dma_addr; |
133 | } | 147 | } |
134 | 148 | ||
135 | static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, | 149 | int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, |
136 | int nents, enum dma_data_direction dir, unsigned long attrs) | 150 | enum dma_data_direction dir, unsigned long attrs) |
137 | { | 151 | { |
138 | int i; | 152 | int i; |
139 | struct scatterlist *sg; | 153 | struct scatterlist *sg; |
@@ -165,10 +179,16 @@ int dma_direct_supported(struct device *dev, u64 mask) | |||
165 | if (mask < DMA_BIT_MASK(32)) | 179 | if (mask < DMA_BIT_MASK(32)) |
166 | return 0; | 180 | return 0; |
167 | #endif | 181 | #endif |
182 | /* | ||
183 | * Various PCI/PCIe bridges have broken support for > 32bit DMA even | ||
184 | * if the device itself might support it. | ||
185 | */ | ||
186 | if (dev->dma_32bit_limit && mask > DMA_BIT_MASK(32)) | ||
187 | return 0; | ||
168 | return 1; | 188 | return 1; |
169 | } | 189 | } |
170 | 190 | ||
171 | static int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr) | 191 | int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr) |
172 | { | 192 | { |
173 | return dma_addr == DIRECT_MAPPING_ERROR; | 193 | return dma_addr == DIRECT_MAPPING_ERROR; |
174 | } | 194 | } |
@@ -180,6 +200,5 @@ const struct dma_map_ops dma_direct_ops = { | |||
180 | .map_sg = dma_direct_map_sg, | 200 | .map_sg = dma_direct_map_sg, |
181 | .dma_supported = dma_direct_supported, | 201 | .dma_supported = dma_direct_supported, |
182 | .mapping_error = dma_direct_mapping_error, | 202 | .mapping_error = dma_direct_mapping_error, |
183 | .is_phys = 1, | ||
184 | }; | 203 | }; |
185 | EXPORT_SYMBOL(dma_direct_ops); | 204 | EXPORT_SYMBOL(dma_direct_ops); |
diff --git a/lib/dma-noncoherent.c b/lib/dma-noncoherent.c new file mode 100644 index 000000000000..79e9a757387f --- /dev/null +++ b/lib/dma-noncoherent.c | |||
@@ -0,0 +1,102 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * Copyright (C) 2018 Christoph Hellwig. | ||
4 | * | ||
5 | * DMA operations that map physical memory directly without providing cache | ||
6 | * coherence. | ||
7 | */ | ||
8 | #include <linux/export.h> | ||
9 | #include <linux/mm.h> | ||
10 | #include <linux/dma-direct.h> | ||
11 | #include <linux/dma-noncoherent.h> | ||
12 | #include <linux/scatterlist.h> | ||
13 | |||
14 | static void dma_noncoherent_sync_single_for_device(struct device *dev, | ||
15 | dma_addr_t addr, size_t size, enum dma_data_direction dir) | ||
16 | { | ||
17 | arch_sync_dma_for_device(dev, dma_to_phys(dev, addr), size, dir); | ||
18 | } | ||
19 | |||
20 | static void dma_noncoherent_sync_sg_for_device(struct device *dev, | ||
21 | struct scatterlist *sgl, int nents, enum dma_data_direction dir) | ||
22 | { | ||
23 | struct scatterlist *sg; | ||
24 | int i; | ||
25 | |||
26 | for_each_sg(sgl, sg, nents, i) | ||
27 | arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir); | ||
28 | } | ||
29 | |||
30 | static dma_addr_t dma_noncoherent_map_page(struct device *dev, struct page *page, | ||
31 | unsigned long offset, size_t size, enum dma_data_direction dir, | ||
32 | unsigned long attrs) | ||
33 | { | ||
34 | dma_addr_t addr; | ||
35 | |||
36 | addr = dma_direct_map_page(dev, page, offset, size, dir, attrs); | ||
37 | if (!dma_mapping_error(dev, addr) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) | ||
38 | arch_sync_dma_for_device(dev, page_to_phys(page) + offset, | ||
39 | size, dir); | ||
40 | return addr; | ||
41 | } | ||
42 | |||
43 | static int dma_noncoherent_map_sg(struct device *dev, struct scatterlist *sgl, | ||
44 | int nents, enum dma_data_direction dir, unsigned long attrs) | ||
45 | { | ||
46 | nents = dma_direct_map_sg(dev, sgl, nents, dir, attrs); | ||
47 | if (nents > 0 && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) | ||
48 | dma_noncoherent_sync_sg_for_device(dev, sgl, nents, dir); | ||
49 | return nents; | ||
50 | } | ||
51 | |||
52 | #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU | ||
53 | static void dma_noncoherent_sync_single_for_cpu(struct device *dev, | ||
54 | dma_addr_t addr, size_t size, enum dma_data_direction dir) | ||
55 | { | ||
56 | arch_sync_dma_for_cpu(dev, dma_to_phys(dev, addr), size, dir); | ||
57 | } | ||
58 | |||
59 | static void dma_noncoherent_sync_sg_for_cpu(struct device *dev, | ||
60 | struct scatterlist *sgl, int nents, enum dma_data_direction dir) | ||
61 | { | ||
62 | struct scatterlist *sg; | ||
63 | int i; | ||
64 | |||
65 | for_each_sg(sgl, sg, nents, i) | ||
66 | arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir); | ||
67 | } | ||
68 | |||
69 | static void dma_noncoherent_unmap_page(struct device *dev, dma_addr_t addr, | ||
70 | size_t size, enum dma_data_direction dir, unsigned long attrs) | ||
71 | { | ||
72 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) | ||
73 | dma_noncoherent_sync_single_for_cpu(dev, addr, size, dir); | ||
74 | } | ||
75 | |||
76 | static void dma_noncoherent_unmap_sg(struct device *dev, struct scatterlist *sgl, | ||
77 | int nents, enum dma_data_direction dir, unsigned long attrs) | ||
78 | { | ||
79 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) | ||
80 | dma_noncoherent_sync_sg_for_cpu(dev, sgl, nents, dir); | ||
81 | } | ||
82 | #endif | ||
83 | |||
84 | const struct dma_map_ops dma_noncoherent_ops = { | ||
85 | .alloc = arch_dma_alloc, | ||
86 | .free = arch_dma_free, | ||
87 | .mmap = arch_dma_mmap, | ||
88 | .sync_single_for_device = dma_noncoherent_sync_single_for_device, | ||
89 | .sync_sg_for_device = dma_noncoherent_sync_sg_for_device, | ||
90 | .map_page = dma_noncoherent_map_page, | ||
91 | .map_sg = dma_noncoherent_map_sg, | ||
92 | #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU | ||
93 | .sync_single_for_cpu = dma_noncoherent_sync_single_for_cpu, | ||
94 | .sync_sg_for_cpu = dma_noncoherent_sync_sg_for_cpu, | ||
95 | .unmap_page = dma_noncoherent_unmap_page, | ||
96 | .unmap_sg = dma_noncoherent_unmap_sg, | ||
97 | #endif | ||
98 | .dma_supported = dma_direct_supported, | ||
99 | .mapping_error = dma_direct_mapping_error, | ||
100 | .cache_sync = arch_dma_cache_sync, | ||
101 | }; | ||
102 | EXPORT_SYMBOL(dma_noncoherent_ops); | ||
diff --git a/lib/iommu-helper.c b/lib/iommu-helper.c index 23633c0fda4a..92a9f243c0e2 100644 --- a/lib/iommu-helper.c +++ b/lib/iommu-helper.c | |||
@@ -3,19 +3,8 @@ | |||
3 | * IOMMU helper functions for the free area management | 3 | * IOMMU helper functions for the free area management |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #include <linux/export.h> | ||
7 | #include <linux/bitmap.h> | 6 | #include <linux/bitmap.h> |
8 | #include <linux/bug.h> | 7 | #include <linux/iommu-helper.h> |
9 | |||
10 | int iommu_is_span_boundary(unsigned int index, unsigned int nr, | ||
11 | unsigned long shift, | ||
12 | unsigned long boundary_size) | ||
13 | { | ||
14 | BUG_ON(!is_power_of_2(boundary_size)); | ||
15 | |||
16 | shift = (shift + index) & (boundary_size - 1); | ||
17 | return shift + nr > boundary_size; | ||
18 | } | ||
19 | 8 | ||
20 | unsigned long iommu_area_alloc(unsigned long *map, unsigned long size, | 9 | unsigned long iommu_area_alloc(unsigned long *map, unsigned long size, |
21 | unsigned long start, unsigned int nr, | 10 | unsigned long start, unsigned int nr, |
@@ -38,4 +27,3 @@ again: | |||
38 | } | 27 | } |
39 | return -1; | 28 | return -1; |
40 | } | 29 | } |
41 | EXPORT_SYMBOL(iommu_area_alloc); | ||
diff --git a/lib/swiotlb.c b/lib/swiotlb.c index cc640588f145..04b68d9dffac 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c | |||
@@ -593,9 +593,8 @@ found: | |||
593 | } | 593 | } |
594 | 594 | ||
595 | /* | 595 | /* |
596 | * Allocates bounce buffer and returns its kernel virtual address. | 596 | * Allocates bounce buffer and returns its physical address. |
597 | */ | 597 | */ |
598 | |||
599 | static phys_addr_t | 598 | static phys_addr_t |
600 | map_single(struct device *hwdev, phys_addr_t phys, size_t size, | 599 | map_single(struct device *hwdev, phys_addr_t phys, size_t size, |
601 | enum dma_data_direction dir, unsigned long attrs) | 600 | enum dma_data_direction dir, unsigned long attrs) |
@@ -614,7 +613,7 @@ map_single(struct device *hwdev, phys_addr_t phys, size_t size, | |||
614 | } | 613 | } |
615 | 614 | ||
616 | /* | 615 | /* |
617 | * dma_addr is the kernel virtual address of the bounce buffer to unmap. | 616 | * tlb_addr is the physical address of the bounce buffer to unmap. |
618 | */ | 617 | */ |
619 | void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, | 618 | void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, |
620 | size_t size, enum dma_data_direction dir, | 619 | size_t size, enum dma_data_direction dir, |
@@ -692,7 +691,6 @@ void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr, | |||
692 | } | 691 | } |
693 | } | 692 | } |
694 | 693 | ||
695 | #ifdef CONFIG_DMA_DIRECT_OPS | ||
696 | static inline bool dma_coherent_ok(struct device *dev, dma_addr_t addr, | 694 | static inline bool dma_coherent_ok(struct device *dev, dma_addr_t addr, |
697 | size_t size) | 695 | size_t size) |
698 | { | 696 | { |
@@ -727,7 +725,7 @@ swiotlb_alloc_buffer(struct device *dev, size_t size, dma_addr_t *dma_handle, | |||
727 | 725 | ||
728 | out_unmap: | 726 | out_unmap: |
729 | dev_warn(dev, "hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", | 727 | dev_warn(dev, "hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", |
730 | (unsigned long long)(dev ? dev->coherent_dma_mask : 0), | 728 | (unsigned long long)dev->coherent_dma_mask, |
731 | (unsigned long long)*dma_handle); | 729 | (unsigned long long)*dma_handle); |
732 | 730 | ||
733 | /* | 731 | /* |
@@ -764,7 +762,6 @@ static bool swiotlb_free_buffer(struct device *dev, size_t size, | |||
764 | DMA_ATTR_SKIP_CPU_SYNC); | 762 | DMA_ATTR_SKIP_CPU_SYNC); |
765 | return true; | 763 | return true; |
766 | } | 764 | } |
767 | #endif | ||
768 | 765 | ||
769 | static void | 766 | static void |
770 | swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir, | 767 | swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir, |
@@ -1045,7 +1042,6 @@ swiotlb_dma_supported(struct device *hwdev, u64 mask) | |||
1045 | return __phys_to_dma(hwdev, io_tlb_end - 1) <= mask; | 1042 | return __phys_to_dma(hwdev, io_tlb_end - 1) <= mask; |
1046 | } | 1043 | } |
1047 | 1044 | ||
1048 | #ifdef CONFIG_DMA_DIRECT_OPS | ||
1049 | void *swiotlb_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, | 1045 | void *swiotlb_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, |
1050 | gfp_t gfp, unsigned long attrs) | 1046 | gfp_t gfp, unsigned long attrs) |
1051 | { | 1047 | { |
@@ -1089,4 +1085,3 @@ const struct dma_map_ops swiotlb_dma_ops = { | |||
1089 | .unmap_page = swiotlb_unmap_page, | 1085 | .unmap_page = swiotlb_unmap_page, |
1090 | .dma_supported = dma_direct_supported, | 1086 | .dma_supported = dma_direct_supported, |
1091 | }; | 1087 | }; |
1092 | #endif /* CONFIG_DMA_DIRECT_OPS */ | ||
diff --git a/mm/Kconfig b/mm/Kconfig index e14c01513bfd..9673e7fbb4f0 100644 --- a/mm/Kconfig +++ b/mm/Kconfig | |||
@@ -266,7 +266,7 @@ config ARCH_ENABLE_THP_MIGRATION | |||
266 | bool | 266 | bool |
267 | 267 | ||
268 | config PHYS_ADDR_T_64BIT | 268 | config PHYS_ADDR_T_64BIT |
269 | def_bool 64BIT || ARCH_PHYS_ADDR_T_64BIT | 269 | def_bool 64BIT |
270 | 270 | ||
271 | config BOUNCE | 271 | config BOUNCE |
272 | bool "Enable bounce buffers" | 272 | bool "Enable bounce buffers" |
diff --git a/net/core/dev.c b/net/core/dev.c index 2af787e8b130..983b277a1229 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -2884,11 +2884,7 @@ void netdev_rx_csum_fault(struct net_device *dev) | |||
2884 | EXPORT_SYMBOL(netdev_rx_csum_fault); | 2884 | EXPORT_SYMBOL(netdev_rx_csum_fault); |
2885 | #endif | 2885 | #endif |
2886 | 2886 | ||
2887 | /* Actually, we should eliminate this check as soon as we know, that: | 2887 | /* XXX: check that highmem exists at all on the given machine. */ |
2888 | * 1. IOMMU is present and allows to map all the memory. | ||
2889 | * 2. No high memory really exists on this machine. | ||
2890 | */ | ||
2891 | |||
2892 | static int illegal_highdma(struct net_device *dev, struct sk_buff *skb) | 2888 | static int illegal_highdma(struct net_device *dev, struct sk_buff *skb) |
2893 | { | 2889 | { |
2894 | #ifdef CONFIG_HIGHMEM | 2890 | #ifdef CONFIG_HIGHMEM |
@@ -2902,20 +2898,6 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb) | |||
2902 | return 1; | 2898 | return 1; |
2903 | } | 2899 | } |
2904 | } | 2900 | } |
2905 | |||
2906 | if (PCI_DMA_BUS_IS_PHYS) { | ||
2907 | struct device *pdev = dev->dev.parent; | ||
2908 | |||
2909 | if (!pdev) | ||
2910 | return 0; | ||
2911 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | ||
2912 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | ||
2913 | dma_addr_t addr = page_to_phys(skb_frag_page(frag)); | ||
2914 | |||
2915 | if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask) | ||
2916 | return 1; | ||
2917 | } | ||
2918 | } | ||
2919 | #endif | 2901 | #endif |
2920 | return 0; | 2902 | return 0; |
2921 | } | 2903 | } |
diff --git a/tools/virtio/linux/dma-mapping.h b/tools/virtio/linux/dma-mapping.h index 1571e24e9494..f91aeb5fe571 100644 --- a/tools/virtio/linux/dma-mapping.h +++ b/tools/virtio/linux/dma-mapping.h | |||
@@ -6,8 +6,6 @@ | |||
6 | # error Virtio userspace code does not support CONFIG_HAS_DMA | 6 | # error Virtio userspace code does not support CONFIG_HAS_DMA |
7 | #endif | 7 | #endif |
8 | 8 | ||
9 | #define PCI_DMA_BUS_IS_PHYS 1 | ||
10 | |||
11 | enum dma_data_direction { | 9 | enum dma_data_direction { |
12 | DMA_BIDIRECTIONAL = 0, | 10 | DMA_BIDIRECTIONAL = 0, |
13 | DMA_TO_DEVICE = 1, | 11 | DMA_TO_DEVICE = 1, |