diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-08-04 08:51:12 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-08-04 08:51:12 -0400 |
commit | d597690eef4142cf622fd469859ecc56506119b5 (patch) | |
tree | e0be5d05994de4b4243a4c76cf6ca13380c3eac3 | |
parent | 96b585267f552d4b6a28ea8bd75e5ed03deb6e71 (diff) | |
parent | 9049fc745300c5e2236cbfc69f5e8cadb6f1f57c (diff) |
Merge branch 'akpm' (patches from Andrew)
Merge even more updates from Andrew Morton:
- dma-mapping API cleanup
- a few cleanups and misc things
- use jump labels in dynamic-debug
* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
dynamic_debug: add jump label support
jump_label: remove bug.h, atomic.h dependencies for HAVE_JUMP_LABEL
arm: jump label may reference text in __exit
tile: support static_key usage in non-module __exit sections
sparc: support static_key usage in non-module __exit sections
powerpc: add explicit #include <asm/asm-compat.h> for jump label
drivers/media/dvb-frontends/cxd2841er.c: avoid misleading gcc warning
MAINTAINERS: update email and list of Samsung HW driver maintainers
block: remove BLK_DEV_DAX config option
samples/kretprobe: fix the wrong type
samples/kretprobe: convert the printk to pr_info/pr_err
samples/jprobe: convert the printk to pr_info/pr_err
samples/kprobe: convert the printk to pr_info/pr_err
dma-mapping: use unsigned long for dma_attrs
media: mtk-vcodec: remove unused dma_attrs
include/linux/bitmap.h: cleanup
tree-wide: replace config_enabled() with IS_ENABLED()
drivers/fpga/Kconfig: fix build failure
177 files changed, 1040 insertions, 1029 deletions
diff --git a/Documentation/DMA-API.txt b/Documentation/DMA-API.txt index 45ef3f279c3b..1d26eeb6b5f6 100644 --- a/Documentation/DMA-API.txt +++ b/Documentation/DMA-API.txt | |||
@@ -369,35 +369,32 @@ See also dma_map_single(). | |||
369 | dma_addr_t | 369 | dma_addr_t |
370 | dma_map_single_attrs(struct device *dev, void *cpu_addr, size_t size, | 370 | dma_map_single_attrs(struct device *dev, void *cpu_addr, size_t size, |
371 | enum dma_data_direction dir, | 371 | enum dma_data_direction dir, |
372 | struct dma_attrs *attrs) | 372 | unsigned long attrs) |
373 | 373 | ||
374 | void | 374 | void |
375 | dma_unmap_single_attrs(struct device *dev, dma_addr_t dma_addr, | 375 | dma_unmap_single_attrs(struct device *dev, dma_addr_t dma_addr, |
376 | size_t size, enum dma_data_direction dir, | 376 | size_t size, enum dma_data_direction dir, |
377 | struct dma_attrs *attrs) | 377 | unsigned long attrs) |
378 | 378 | ||
379 | int | 379 | int |
380 | dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, | 380 | dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, |
381 | int nents, enum dma_data_direction dir, | 381 | int nents, enum dma_data_direction dir, |
382 | struct dma_attrs *attrs) | 382 | unsigned long attrs) |
383 | 383 | ||
384 | void | 384 | void |
385 | dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl, | 385 | dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl, |
386 | int nents, enum dma_data_direction dir, | 386 | int nents, enum dma_data_direction dir, |
387 | struct dma_attrs *attrs) | 387 | unsigned long attrs) |
388 | 388 | ||
389 | The four functions above are just like the counterpart functions | 389 | The four functions above are just like the counterpart functions |
390 | without the _attrs suffixes, except that they pass an optional | 390 | without the _attrs suffixes, except that they pass an optional |
391 | struct dma_attrs*. | 391 | dma_attrs. |
392 | |||
393 | struct dma_attrs encapsulates a set of "DMA attributes". For the | ||
394 | definition of struct dma_attrs see linux/dma-attrs.h. | ||
395 | 392 | ||
396 | The interpretation of DMA attributes is architecture-specific, and | 393 | The interpretation of DMA attributes is architecture-specific, and |
397 | each attribute should be documented in Documentation/DMA-attributes.txt. | 394 | each attribute should be documented in Documentation/DMA-attributes.txt. |
398 | 395 | ||
399 | If struct dma_attrs* is NULL, the semantics of each of these | 396 | If dma_attrs are 0, the semantics of each of these functions |
400 | functions is identical to those of the corresponding function | 397 | is identical to those of the corresponding function |
401 | without the _attrs suffix. As a result dma_map_single_attrs() | 398 | without the _attrs suffix. As a result dma_map_single_attrs() |
402 | can generally replace dma_map_single(), etc. | 399 | can generally replace dma_map_single(), etc. |
403 | 400 | ||
@@ -405,15 +402,15 @@ As an example of the use of the *_attrs functions, here's how | |||
405 | you could pass an attribute DMA_ATTR_FOO when mapping memory | 402 | you could pass an attribute DMA_ATTR_FOO when mapping memory |
406 | for DMA: | 403 | for DMA: |
407 | 404 | ||
408 | #include <linux/dma-attrs.h> | 405 | #include <linux/dma-mapping.h> |
409 | /* DMA_ATTR_FOO should be defined in linux/dma-attrs.h and | 406 | /* DMA_ATTR_FOO should be defined in linux/dma-mapping.h and |
410 | * documented in Documentation/DMA-attributes.txt */ | 407 | * documented in Documentation/DMA-attributes.txt */ |
411 | ... | 408 | ... |
412 | 409 | ||
413 | DEFINE_DMA_ATTRS(attrs); | 410 | unsigned long attr; |
414 | dma_set_attr(DMA_ATTR_FOO, &attrs); | 411 | attr |= DMA_ATTR_FOO; |
415 | .... | 412 | .... |
416 | n = dma_map_sg_attrs(dev, sg, nents, DMA_TO_DEVICE, &attr); | 413 | n = dma_map_sg_attrs(dev, sg, nents, DMA_TO_DEVICE, attr); |
417 | .... | 414 | .... |
418 | 415 | ||
419 | Architectures that care about DMA_ATTR_FOO would check for its | 416 | Architectures that care about DMA_ATTR_FOO would check for its |
@@ -422,12 +419,10 @@ routines, e.g.: | |||
422 | 419 | ||
423 | void whizco_dma_map_sg_attrs(struct device *dev, dma_addr_t dma_addr, | 420 | void whizco_dma_map_sg_attrs(struct device *dev, dma_addr_t dma_addr, |
424 | size_t size, enum dma_data_direction dir, | 421 | size_t size, enum dma_data_direction dir, |
425 | struct dma_attrs *attrs) | 422 | unsigned long attrs) |
426 | { | 423 | { |
427 | .... | 424 | .... |
428 | int foo = dma_get_attr(DMA_ATTR_FOO, attrs); | 425 | if (attrs & DMA_ATTR_FOO) |
429 | .... | ||
430 | if (foo) | ||
431 | /* twizzle the frobnozzle */ | 426 | /* twizzle the frobnozzle */ |
432 | .... | 427 | .... |
433 | 428 | ||
diff --git a/Documentation/DMA-attributes.txt b/Documentation/DMA-attributes.txt index e8cf9cf873b3..2d455a5cf671 100644 --- a/Documentation/DMA-attributes.txt +++ b/Documentation/DMA-attributes.txt | |||
@@ -2,7 +2,7 @@ | |||
2 | ============== | 2 | ============== |
3 | 3 | ||
4 | This document describes the semantics of the DMA attributes that are | 4 | This document describes the semantics of the DMA attributes that are |
5 | defined in linux/dma-attrs.h. | 5 | defined in linux/dma-mapping.h. |
6 | 6 | ||
7 | DMA_ATTR_WRITE_BARRIER | 7 | DMA_ATTR_WRITE_BARRIER |
8 | ---------------------- | 8 | ---------------------- |
diff --git a/MAINTAINERS b/MAINTAINERS index 429fc61bee81..bafc8043d4f0 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -1648,7 +1648,8 @@ F: arch/arm/mach-s5pv210/ | |||
1648 | 1648 | ||
1649 | ARM/SAMSUNG S5P SERIES 2D GRAPHICS ACCELERATION (G2D) SUPPORT | 1649 | ARM/SAMSUNG S5P SERIES 2D GRAPHICS ACCELERATION (G2D) SUPPORT |
1650 | M: Kyungmin Park <kyungmin.park@samsung.com> | 1650 | M: Kyungmin Park <kyungmin.park@samsung.com> |
1651 | M: Kamil Debski <k.debski@samsung.com> | 1651 | M: Kamil Debski <kamil@wypas.org> |
1652 | M: Andrzej Hajda <a.hajda@samsung.com> | ||
1652 | L: linux-arm-kernel@lists.infradead.org | 1653 | L: linux-arm-kernel@lists.infradead.org |
1653 | L: linux-media@vger.kernel.org | 1654 | L: linux-media@vger.kernel.org |
1654 | S: Maintained | 1655 | S: Maintained |
@@ -1656,8 +1657,9 @@ F: drivers/media/platform/s5p-g2d/ | |||
1656 | 1657 | ||
1657 | ARM/SAMSUNG S5P SERIES Multi Format Codec (MFC) SUPPORT | 1658 | ARM/SAMSUNG S5P SERIES Multi Format Codec (MFC) SUPPORT |
1658 | M: Kyungmin Park <kyungmin.park@samsung.com> | 1659 | M: Kyungmin Park <kyungmin.park@samsung.com> |
1659 | M: Kamil Debski <k.debski@samsung.com> | 1660 | M: Kamil Debski <kamil@wypas.org> |
1660 | M: Jeongtae Park <jtp.park@samsung.com> | 1661 | M: Jeongtae Park <jtp.park@samsung.com> |
1662 | M: Andrzej Hajda <a.hajda@samsung.com> | ||
1661 | L: linux-arm-kernel@lists.infradead.org | 1663 | L: linux-arm-kernel@lists.infradead.org |
1662 | L: linux-media@vger.kernel.org | 1664 | L: linux-media@vger.kernel.org |
1663 | S: Maintained | 1665 | S: Maintained |
@@ -9470,7 +9472,8 @@ S: Odd Fixes | |||
9470 | F: drivers/media/usb/pwc/* | 9472 | F: drivers/media/usb/pwc/* |
9471 | 9473 | ||
9472 | PWM FAN DRIVER | 9474 | PWM FAN DRIVER |
9473 | M: Kamil Debski <k.debski@samsung.com> | 9475 | M: Kamil Debski <kamil@wypas.org> |
9476 | M: Lukasz Majewski <l.majewski@samsung.com> | ||
9474 | L: linux-hwmon@vger.kernel.org | 9477 | L: linux-hwmon@vger.kernel.org |
9475 | S: Supported | 9478 | S: Supported |
9476 | F: Documentation/devicetree/bindings/hwmon/pwm-fan.txt | 9479 | F: Documentation/devicetree/bindings/hwmon/pwm-fan.txt |
@@ -10218,7 +10221,8 @@ T: git https://github.com/lmajewski/linux-samsung-thermal.git | |||
10218 | F: drivers/thermal/samsung/ | 10221 | F: drivers/thermal/samsung/ |
10219 | 10222 | ||
10220 | SAMSUNG USB2 PHY DRIVER | 10223 | SAMSUNG USB2 PHY DRIVER |
10221 | M: Kamil Debski <k.debski@samsung.com> | 10224 | M: Kamil Debski <kamil@wypas.org> |
10225 | M: Sylwester Nawrocki <s.nawrocki@samsung.com> | ||
10222 | L: linux-kernel@vger.kernel.org | 10226 | L: linux-kernel@vger.kernel.org |
10223 | S: Supported | 10227 | S: Supported |
10224 | F: Documentation/devicetree/bindings/phy/samsung-phy.txt | 10228 | F: Documentation/devicetree/bindings/phy/samsung-phy.txt |
diff --git a/arch/alpha/include/asm/dma-mapping.h b/arch/alpha/include/asm/dma-mapping.h index 3c3451f58ff4..c63b6ac19ee5 100644 --- a/arch/alpha/include/asm/dma-mapping.h +++ b/arch/alpha/include/asm/dma-mapping.h | |||
@@ -1,8 +1,6 @@ | |||
1 | #ifndef _ALPHA_DMA_MAPPING_H | 1 | #ifndef _ALPHA_DMA_MAPPING_H |
2 | #define _ALPHA_DMA_MAPPING_H | 2 | #define _ALPHA_DMA_MAPPING_H |
3 | 3 | ||
4 | #include <linux/dma-attrs.h> | ||
5 | |||
6 | extern struct dma_map_ops *dma_ops; | 4 | extern struct dma_map_ops *dma_ops; |
7 | 5 | ||
8 | static inline struct dma_map_ops *get_dma_ops(struct device *dev) | 6 | static inline struct dma_map_ops *get_dma_ops(struct device *dev) |
diff --git a/arch/alpha/kernel/pci-noop.c b/arch/alpha/kernel/pci-noop.c index 8e735b5e56bd..bb152e21e5ae 100644 --- a/arch/alpha/kernel/pci-noop.c +++ b/arch/alpha/kernel/pci-noop.c | |||
@@ -109,7 +109,7 @@ sys_pciconfig_write(unsigned long bus, unsigned long dfn, | |||
109 | 109 | ||
110 | static void *alpha_noop_alloc_coherent(struct device *dev, size_t size, | 110 | static void *alpha_noop_alloc_coherent(struct device *dev, size_t size, |
111 | dma_addr_t *dma_handle, gfp_t gfp, | 111 | dma_addr_t *dma_handle, gfp_t gfp, |
112 | struct dma_attrs *attrs) | 112 | unsigned long attrs) |
113 | { | 113 | { |
114 | void *ret; | 114 | void *ret; |
115 | 115 | ||
diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c index 8969bf2dfe3a..451fc9cdd323 100644 --- a/arch/alpha/kernel/pci_iommu.c +++ b/arch/alpha/kernel/pci_iommu.c | |||
@@ -349,7 +349,7 @@ static struct pci_dev *alpha_gendev_to_pci(struct device *dev) | |||
349 | static dma_addr_t alpha_pci_map_page(struct device *dev, struct page *page, | 349 | static dma_addr_t alpha_pci_map_page(struct device *dev, struct page *page, |
350 | unsigned long offset, size_t size, | 350 | unsigned long offset, size_t size, |
351 | enum dma_data_direction dir, | 351 | enum dma_data_direction dir, |
352 | struct dma_attrs *attrs) | 352 | unsigned long attrs) |
353 | { | 353 | { |
354 | struct pci_dev *pdev = alpha_gendev_to_pci(dev); | 354 | struct pci_dev *pdev = alpha_gendev_to_pci(dev); |
355 | int dac_allowed; | 355 | int dac_allowed; |
@@ -369,7 +369,7 @@ static dma_addr_t alpha_pci_map_page(struct device *dev, struct page *page, | |||
369 | 369 | ||
370 | static void alpha_pci_unmap_page(struct device *dev, dma_addr_t dma_addr, | 370 | static void alpha_pci_unmap_page(struct device *dev, dma_addr_t dma_addr, |
371 | size_t size, enum dma_data_direction dir, | 371 | size_t size, enum dma_data_direction dir, |
372 | struct dma_attrs *attrs) | 372 | unsigned long attrs) |
373 | { | 373 | { |
374 | unsigned long flags; | 374 | unsigned long flags; |
375 | struct pci_dev *pdev = alpha_gendev_to_pci(dev); | 375 | struct pci_dev *pdev = alpha_gendev_to_pci(dev); |
@@ -433,7 +433,7 @@ static void alpha_pci_unmap_page(struct device *dev, dma_addr_t dma_addr, | |||
433 | 433 | ||
434 | static void *alpha_pci_alloc_coherent(struct device *dev, size_t size, | 434 | static void *alpha_pci_alloc_coherent(struct device *dev, size_t size, |
435 | dma_addr_t *dma_addrp, gfp_t gfp, | 435 | dma_addr_t *dma_addrp, gfp_t gfp, |
436 | struct dma_attrs *attrs) | 436 | unsigned long attrs) |
437 | { | 437 | { |
438 | struct pci_dev *pdev = alpha_gendev_to_pci(dev); | 438 | struct pci_dev *pdev = alpha_gendev_to_pci(dev); |
439 | void *cpu_addr; | 439 | void *cpu_addr; |
@@ -478,7 +478,7 @@ try_again: | |||
478 | 478 | ||
479 | static void alpha_pci_free_coherent(struct device *dev, size_t size, | 479 | static void alpha_pci_free_coherent(struct device *dev, size_t size, |
480 | void *cpu_addr, dma_addr_t dma_addr, | 480 | void *cpu_addr, dma_addr_t dma_addr, |
481 | struct dma_attrs *attrs) | 481 | unsigned long attrs) |
482 | { | 482 | { |
483 | struct pci_dev *pdev = alpha_gendev_to_pci(dev); | 483 | struct pci_dev *pdev = alpha_gendev_to_pci(dev); |
484 | pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL); | 484 | pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL); |
@@ -651,7 +651,7 @@ sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end, | |||
651 | 651 | ||
652 | static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg, | 652 | static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg, |
653 | int nents, enum dma_data_direction dir, | 653 | int nents, enum dma_data_direction dir, |
654 | struct dma_attrs *attrs) | 654 | unsigned long attrs) |
655 | { | 655 | { |
656 | struct pci_dev *pdev = alpha_gendev_to_pci(dev); | 656 | struct pci_dev *pdev = alpha_gendev_to_pci(dev); |
657 | struct scatterlist *start, *end, *out; | 657 | struct scatterlist *start, *end, *out; |
@@ -729,7 +729,7 @@ static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg, | |||
729 | 729 | ||
730 | static void alpha_pci_unmap_sg(struct device *dev, struct scatterlist *sg, | 730 | static void alpha_pci_unmap_sg(struct device *dev, struct scatterlist *sg, |
731 | int nents, enum dma_data_direction dir, | 731 | int nents, enum dma_data_direction dir, |
732 | struct dma_attrs *attrs) | 732 | unsigned long attrs) |
733 | { | 733 | { |
734 | struct pci_dev *pdev = alpha_gendev_to_pci(dev); | 734 | struct pci_dev *pdev = alpha_gendev_to_pci(dev); |
735 | unsigned long flags; | 735 | unsigned long flags; |
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c index ab74b5d9186c..20afc65e22dc 100644 --- a/arch/arc/mm/dma.c +++ b/arch/arc/mm/dma.c | |||
@@ -22,7 +22,7 @@ | |||
22 | 22 | ||
23 | 23 | ||
24 | static void *arc_dma_alloc(struct device *dev, size_t size, | 24 | static void *arc_dma_alloc(struct device *dev, size_t size, |
25 | dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs) | 25 | dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) |
26 | { | 26 | { |
27 | unsigned long order = get_order(size); | 27 | unsigned long order = get_order(size); |
28 | struct page *page; | 28 | struct page *page; |
@@ -46,7 +46,7 @@ static void *arc_dma_alloc(struct device *dev, size_t size, | |||
46 | * (vs. always going to memory - thus are faster) | 46 | * (vs. always going to memory - thus are faster) |
47 | */ | 47 | */ |
48 | if ((is_isa_arcv2() && ioc_exists) || | 48 | if ((is_isa_arcv2() && ioc_exists) || |
49 | dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) | 49 | (attrs & DMA_ATTR_NON_CONSISTENT)) |
50 | need_coh = 0; | 50 | need_coh = 0; |
51 | 51 | ||
52 | /* | 52 | /* |
@@ -90,13 +90,13 @@ static void *arc_dma_alloc(struct device *dev, size_t size, | |||
90 | } | 90 | } |
91 | 91 | ||
92 | static void arc_dma_free(struct device *dev, size_t size, void *vaddr, | 92 | static void arc_dma_free(struct device *dev, size_t size, void *vaddr, |
93 | dma_addr_t dma_handle, struct dma_attrs *attrs) | 93 | dma_addr_t dma_handle, unsigned long attrs) |
94 | { | 94 | { |
95 | phys_addr_t paddr = plat_dma_to_phys(dev, dma_handle); | 95 | phys_addr_t paddr = plat_dma_to_phys(dev, dma_handle); |
96 | struct page *page = virt_to_page(paddr); | 96 | struct page *page = virt_to_page(paddr); |
97 | int is_non_coh = 1; | 97 | int is_non_coh = 1; |
98 | 98 | ||
99 | is_non_coh = dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs) || | 99 | is_non_coh = (attrs & DMA_ATTR_NON_CONSISTENT) || |
100 | (is_isa_arcv2() && ioc_exists); | 100 | (is_isa_arcv2() && ioc_exists); |
101 | 101 | ||
102 | if (PageHighMem(page) || !is_non_coh) | 102 | if (PageHighMem(page) || !is_non_coh) |
@@ -130,7 +130,7 @@ static void _dma_cache_sync(phys_addr_t paddr, size_t size, | |||
130 | 130 | ||
131 | static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page, | 131 | static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page, |
132 | unsigned long offset, size_t size, enum dma_data_direction dir, | 132 | unsigned long offset, size_t size, enum dma_data_direction dir, |
133 | struct dma_attrs *attrs) | 133 | unsigned long attrs) |
134 | { | 134 | { |
135 | phys_addr_t paddr = page_to_phys(page) + offset; | 135 | phys_addr_t paddr = page_to_phys(page) + offset; |
136 | _dma_cache_sync(paddr, size, dir); | 136 | _dma_cache_sync(paddr, size, dir); |
@@ -138,7 +138,7 @@ static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page, | |||
138 | } | 138 | } |
139 | 139 | ||
140 | static int arc_dma_map_sg(struct device *dev, struct scatterlist *sg, | 140 | static int arc_dma_map_sg(struct device *dev, struct scatterlist *sg, |
141 | int nents, enum dma_data_direction dir, struct dma_attrs *attrs) | 141 | int nents, enum dma_data_direction dir, unsigned long attrs) |
142 | { | 142 | { |
143 | struct scatterlist *s; | 143 | struct scatterlist *s; |
144 | int i; | 144 | int i; |
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c index 1143c4d5c567..301281645d08 100644 --- a/arch/arm/common/dmabounce.c +++ b/arch/arm/common/dmabounce.c | |||
@@ -310,7 +310,7 @@ static inline void unmap_single(struct device *dev, struct safe_buffer *buf, | |||
310 | */ | 310 | */ |
311 | static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page, | 311 | static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page, |
312 | unsigned long offset, size_t size, enum dma_data_direction dir, | 312 | unsigned long offset, size_t size, enum dma_data_direction dir, |
313 | struct dma_attrs *attrs) | 313 | unsigned long attrs) |
314 | { | 314 | { |
315 | dma_addr_t dma_addr; | 315 | dma_addr_t dma_addr; |
316 | int ret; | 316 | int ret; |
@@ -344,7 +344,7 @@ static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page, | |||
344 | * should be) | 344 | * should be) |
345 | */ | 345 | */ |
346 | static void dmabounce_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, | 346 | static void dmabounce_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, |
347 | enum dma_data_direction dir, struct dma_attrs *attrs) | 347 | enum dma_data_direction dir, unsigned long attrs) |
348 | { | 348 | { |
349 | struct safe_buffer *buf; | 349 | struct safe_buffer *buf; |
350 | 350 | ||
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index a83570f10124..d009f7911ffc 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h | |||
@@ -5,7 +5,6 @@ | |||
5 | 5 | ||
6 | #include <linux/mm_types.h> | 6 | #include <linux/mm_types.h> |
7 | #include <linux/scatterlist.h> | 7 | #include <linux/scatterlist.h> |
8 | #include <linux/dma-attrs.h> | ||
9 | #include <linux/dma-debug.h> | 8 | #include <linux/dma-debug.h> |
10 | 9 | ||
11 | #include <asm/memory.h> | 10 | #include <asm/memory.h> |
@@ -174,7 +173,7 @@ static inline void dma_mark_clean(void *addr, size_t size) { } | |||
174 | * to be the device-viewed address. | 173 | * to be the device-viewed address. |
175 | */ | 174 | */ |
176 | extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, | 175 | extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, |
177 | gfp_t gfp, struct dma_attrs *attrs); | 176 | gfp_t gfp, unsigned long attrs); |
178 | 177 | ||
179 | /** | 178 | /** |
180 | * arm_dma_free - free memory allocated by arm_dma_alloc | 179 | * arm_dma_free - free memory allocated by arm_dma_alloc |
@@ -191,7 +190,7 @@ extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, | |||
191 | * during and after this call executing are illegal. | 190 | * during and after this call executing are illegal. |
192 | */ | 191 | */ |
193 | extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, | 192 | extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, |
194 | dma_addr_t handle, struct dma_attrs *attrs); | 193 | dma_addr_t handle, unsigned long attrs); |
195 | 194 | ||
196 | /** | 195 | /** |
197 | * arm_dma_mmap - map a coherent DMA allocation into user space | 196 | * arm_dma_mmap - map a coherent DMA allocation into user space |
@@ -208,7 +207,7 @@ extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, | |||
208 | */ | 207 | */ |
209 | extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, | 208 | extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, |
210 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | 209 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
211 | struct dma_attrs *attrs); | 210 | unsigned long attrs); |
212 | 211 | ||
213 | /* | 212 | /* |
214 | * This can be called during early boot to increase the size of the atomic | 213 | * This can be called during early boot to increase the size of the atomic |
@@ -262,16 +261,16 @@ extern void dmabounce_unregister_dev(struct device *); | |||
262 | * The scatter list versions of the above methods. | 261 | * The scatter list versions of the above methods. |
263 | */ | 262 | */ |
264 | extern int arm_dma_map_sg(struct device *, struct scatterlist *, int, | 263 | extern int arm_dma_map_sg(struct device *, struct scatterlist *, int, |
265 | enum dma_data_direction, struct dma_attrs *attrs); | 264 | enum dma_data_direction, unsigned long attrs); |
266 | extern void arm_dma_unmap_sg(struct device *, struct scatterlist *, int, | 265 | extern void arm_dma_unmap_sg(struct device *, struct scatterlist *, int, |
267 | enum dma_data_direction, struct dma_attrs *attrs); | 266 | enum dma_data_direction, unsigned long attrs); |
268 | extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int, | 267 | extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int, |
269 | enum dma_data_direction); | 268 | enum dma_data_direction); |
270 | extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int, | 269 | extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int, |
271 | enum dma_data_direction); | 270 | enum dma_data_direction); |
272 | extern int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt, | 271 | extern int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt, |
273 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | 272 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
274 | struct dma_attrs *attrs); | 273 | unsigned long attrs); |
275 | 274 | ||
276 | #endif /* __KERNEL__ */ | 275 | #endif /* __KERNEL__ */ |
277 | #endif | 276 | #endif |
diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/page-coherent.h index 9408a994cc91..95ce6ac3a971 100644 --- a/arch/arm/include/asm/xen/page-coherent.h +++ b/arch/arm/include/asm/xen/page-coherent.h | |||
@@ -2,15 +2,14 @@ | |||
2 | #define _ASM_ARM_XEN_PAGE_COHERENT_H | 2 | #define _ASM_ARM_XEN_PAGE_COHERENT_H |
3 | 3 | ||
4 | #include <asm/page.h> | 4 | #include <asm/page.h> |
5 | #include <linux/dma-attrs.h> | ||
6 | #include <linux/dma-mapping.h> | 5 | #include <linux/dma-mapping.h> |
7 | 6 | ||
8 | void __xen_dma_map_page(struct device *hwdev, struct page *page, | 7 | void __xen_dma_map_page(struct device *hwdev, struct page *page, |
9 | dma_addr_t dev_addr, unsigned long offset, size_t size, | 8 | dma_addr_t dev_addr, unsigned long offset, size_t size, |
10 | enum dma_data_direction dir, struct dma_attrs *attrs); | 9 | enum dma_data_direction dir, unsigned long attrs); |
11 | void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, | 10 | void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, |
12 | size_t size, enum dma_data_direction dir, | 11 | size_t size, enum dma_data_direction dir, |
13 | struct dma_attrs *attrs); | 12 | unsigned long attrs); |
14 | void __xen_dma_sync_single_for_cpu(struct device *hwdev, | 13 | void __xen_dma_sync_single_for_cpu(struct device *hwdev, |
15 | dma_addr_t handle, size_t size, enum dma_data_direction dir); | 14 | dma_addr_t handle, size_t size, enum dma_data_direction dir); |
16 | 15 | ||
@@ -18,22 +17,20 @@ void __xen_dma_sync_single_for_device(struct device *hwdev, | |||
18 | dma_addr_t handle, size_t size, enum dma_data_direction dir); | 17 | dma_addr_t handle, size_t size, enum dma_data_direction dir); |
19 | 18 | ||
20 | static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, | 19 | static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, |
21 | dma_addr_t *dma_handle, gfp_t flags, | 20 | dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs) |
22 | struct dma_attrs *attrs) | ||
23 | { | 21 | { |
24 | return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs); | 22 | return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs); |
25 | } | 23 | } |
26 | 24 | ||
27 | static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, | 25 | static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, |
28 | void *cpu_addr, dma_addr_t dma_handle, | 26 | void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs) |
29 | struct dma_attrs *attrs) | ||
30 | { | 27 | { |
31 | __generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs); | 28 | __generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs); |
32 | } | 29 | } |
33 | 30 | ||
34 | static inline void xen_dma_map_page(struct device *hwdev, struct page *page, | 31 | static inline void xen_dma_map_page(struct device *hwdev, struct page *page, |
35 | dma_addr_t dev_addr, unsigned long offset, size_t size, | 32 | dma_addr_t dev_addr, unsigned long offset, size_t size, |
36 | enum dma_data_direction dir, struct dma_attrs *attrs) | 33 | enum dma_data_direction dir, unsigned long attrs) |
37 | { | 34 | { |
38 | unsigned long page_pfn = page_to_xen_pfn(page); | 35 | unsigned long page_pfn = page_to_xen_pfn(page); |
39 | unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr); | 36 | unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr); |
@@ -58,8 +55,7 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page, | |||
58 | } | 55 | } |
59 | 56 | ||
60 | static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, | 57 | static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, |
61 | size_t size, enum dma_data_direction dir, | 58 | size_t size, enum dma_data_direction dir, unsigned long attrs) |
62 | struct dma_attrs *attrs) | ||
63 | { | 59 | { |
64 | unsigned long pfn = PFN_DOWN(handle); | 60 | unsigned long pfn = PFN_DOWN(handle); |
65 | /* | 61 | /* |
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 99420fc1f066..d24e5dd2aa7a 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S | |||
@@ -44,7 +44,7 @@ | |||
44 | #endif | 44 | #endif |
45 | 45 | ||
46 | #if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \ | 46 | #if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \ |
47 | defined(CONFIG_GENERIC_BUG) | 47 | defined(CONFIG_GENERIC_BUG) || defined(CONFIG_JUMP_LABEL) |
48 | #define ARM_EXIT_KEEP(x) x | 48 | #define ARM_EXIT_KEEP(x) x |
49 | #define ARM_EXIT_DISCARD(x) | 49 | #define ARM_EXIT_DISCARD(x) |
50 | #else | 50 | #else |
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index b7eed75960fe..c6834c0cfd1c 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -128,16 +128,16 @@ static void __dma_page_dev_to_cpu(struct page *, unsigned long, | |||
128 | */ | 128 | */ |
129 | static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page, | 129 | static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page, |
130 | unsigned long offset, size_t size, enum dma_data_direction dir, | 130 | unsigned long offset, size_t size, enum dma_data_direction dir, |
131 | struct dma_attrs *attrs) | 131 | unsigned long attrs) |
132 | { | 132 | { |
133 | if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) | 133 | if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) |
134 | __dma_page_cpu_to_dev(page, offset, size, dir); | 134 | __dma_page_cpu_to_dev(page, offset, size, dir); |
135 | return pfn_to_dma(dev, page_to_pfn(page)) + offset; | 135 | return pfn_to_dma(dev, page_to_pfn(page)) + offset; |
136 | } | 136 | } |
137 | 137 | ||
138 | static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *page, | 138 | static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *page, |
139 | unsigned long offset, size_t size, enum dma_data_direction dir, | 139 | unsigned long offset, size_t size, enum dma_data_direction dir, |
140 | struct dma_attrs *attrs) | 140 | unsigned long attrs) |
141 | { | 141 | { |
142 | return pfn_to_dma(dev, page_to_pfn(page)) + offset; | 142 | return pfn_to_dma(dev, page_to_pfn(page)) + offset; |
143 | } | 143 | } |
@@ -157,10 +157,9 @@ static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *pag | |||
157 | * whatever the device wrote there. | 157 | * whatever the device wrote there. |
158 | */ | 158 | */ |
159 | static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle, | 159 | static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle, |
160 | size_t size, enum dma_data_direction dir, | 160 | size_t size, enum dma_data_direction dir, unsigned long attrs) |
161 | struct dma_attrs *attrs) | ||
162 | { | 161 | { |
163 | if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) | 162 | if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) |
164 | __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)), | 163 | __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)), |
165 | handle & ~PAGE_MASK, size, dir); | 164 | handle & ~PAGE_MASK, size, dir); |
166 | } | 165 | } |
@@ -198,12 +197,12 @@ struct dma_map_ops arm_dma_ops = { | |||
198 | EXPORT_SYMBOL(arm_dma_ops); | 197 | EXPORT_SYMBOL(arm_dma_ops); |
199 | 198 | ||
200 | static void *arm_coherent_dma_alloc(struct device *dev, size_t size, | 199 | static void *arm_coherent_dma_alloc(struct device *dev, size_t size, |
201 | dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs); | 200 | dma_addr_t *handle, gfp_t gfp, unsigned long attrs); |
202 | static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr, | 201 | static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr, |
203 | dma_addr_t handle, struct dma_attrs *attrs); | 202 | dma_addr_t handle, unsigned long attrs); |
204 | static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma, | 203 | static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma, |
205 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | 204 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
206 | struct dma_attrs *attrs); | 205 | unsigned long attrs); |
207 | 206 | ||
208 | struct dma_map_ops arm_coherent_dma_ops = { | 207 | struct dma_map_ops arm_coherent_dma_ops = { |
209 | .alloc = arm_coherent_dma_alloc, | 208 | .alloc = arm_coherent_dma_alloc, |
@@ -639,11 +638,11 @@ static void __free_from_contiguous(struct device *dev, struct page *page, | |||
639 | dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); | 638 | dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); |
640 | } | 639 | } |
641 | 640 | ||
642 | static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot) | 641 | static inline pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot) |
643 | { | 642 | { |
644 | prot = dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs) ? | 643 | prot = (attrs & DMA_ATTR_WRITE_COMBINE) ? |
645 | pgprot_writecombine(prot) : | 644 | pgprot_writecombine(prot) : |
646 | pgprot_dmacoherent(prot); | 645 | pgprot_dmacoherent(prot); |
647 | return prot; | 646 | return prot; |
648 | } | 647 | } |
649 | 648 | ||
@@ -751,7 +750,7 @@ static struct arm_dma_allocator remap_allocator = { | |||
751 | 750 | ||
752 | static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, | 751 | static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, |
753 | gfp_t gfp, pgprot_t prot, bool is_coherent, | 752 | gfp_t gfp, pgprot_t prot, bool is_coherent, |
754 | struct dma_attrs *attrs, const void *caller) | 753 | unsigned long attrs, const void *caller) |
755 | { | 754 | { |
756 | u64 mask = get_coherent_dma_mask(dev); | 755 | u64 mask = get_coherent_dma_mask(dev); |
757 | struct page *page = NULL; | 756 | struct page *page = NULL; |
@@ -764,7 +763,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, | |||
764 | .gfp = gfp, | 763 | .gfp = gfp, |
765 | .prot = prot, | 764 | .prot = prot, |
766 | .caller = caller, | 765 | .caller = caller, |
767 | .want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs), | 766 | .want_vaddr = ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0), |
768 | .coherent_flag = is_coherent ? COHERENT : NORMAL, | 767 | .coherent_flag = is_coherent ? COHERENT : NORMAL, |
769 | }; | 768 | }; |
770 | 769 | ||
@@ -834,7 +833,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, | |||
834 | * virtual and bus address for that space. | 833 | * virtual and bus address for that space. |
835 | */ | 834 | */ |
836 | void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, | 835 | void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, |
837 | gfp_t gfp, struct dma_attrs *attrs) | 836 | gfp_t gfp, unsigned long attrs) |
838 | { | 837 | { |
839 | pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); | 838 | pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); |
840 | 839 | ||
@@ -843,7 +842,7 @@ void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, | |||
843 | } | 842 | } |
844 | 843 | ||
845 | static void *arm_coherent_dma_alloc(struct device *dev, size_t size, | 844 | static void *arm_coherent_dma_alloc(struct device *dev, size_t size, |
846 | dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) | 845 | dma_addr_t *handle, gfp_t gfp, unsigned long attrs) |
847 | { | 846 | { |
848 | return __dma_alloc(dev, size, handle, gfp, PAGE_KERNEL, true, | 847 | return __dma_alloc(dev, size, handle, gfp, PAGE_KERNEL, true, |
849 | attrs, __builtin_return_address(0)); | 848 | attrs, __builtin_return_address(0)); |
@@ -851,7 +850,7 @@ static void *arm_coherent_dma_alloc(struct device *dev, size_t size, | |||
851 | 850 | ||
852 | static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, | 851 | static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, |
853 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | 852 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
854 | struct dma_attrs *attrs) | 853 | unsigned long attrs) |
855 | { | 854 | { |
856 | int ret = -ENXIO; | 855 | int ret = -ENXIO; |
857 | #ifdef CONFIG_MMU | 856 | #ifdef CONFIG_MMU |
@@ -879,14 +878,14 @@ static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, | |||
879 | */ | 878 | */ |
880 | static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma, | 879 | static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma, |
881 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | 880 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
882 | struct dma_attrs *attrs) | 881 | unsigned long attrs) |
883 | { | 882 | { |
884 | return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs); | 883 | return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs); |
885 | } | 884 | } |
886 | 885 | ||
887 | int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, | 886 | int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, |
888 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | 887 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
889 | struct dma_attrs *attrs) | 888 | unsigned long attrs) |
890 | { | 889 | { |
891 | #ifdef CONFIG_MMU | 890 | #ifdef CONFIG_MMU |
892 | vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); | 891 | vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); |
@@ -898,7 +897,7 @@ int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, | |||
898 | * Free a buffer as defined by the above mapping. | 897 | * Free a buffer as defined by the above mapping. |
899 | */ | 898 | */ |
900 | static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, | 899 | static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, |
901 | dma_addr_t handle, struct dma_attrs *attrs, | 900 | dma_addr_t handle, unsigned long attrs, |
902 | bool is_coherent) | 901 | bool is_coherent) |
903 | { | 902 | { |
904 | struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); | 903 | struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); |
@@ -908,7 +907,7 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, | |||
908 | .size = PAGE_ALIGN(size), | 907 | .size = PAGE_ALIGN(size), |
909 | .cpu_addr = cpu_addr, | 908 | .cpu_addr = cpu_addr, |
910 | .page = page, | 909 | .page = page, |
911 | .want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs), | 910 | .want_vaddr = ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0), |
912 | }; | 911 | }; |
913 | 912 | ||
914 | buf = arm_dma_buffer_find(cpu_addr); | 913 | buf = arm_dma_buffer_find(cpu_addr); |
@@ -920,20 +919,20 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, | |||
920 | } | 919 | } |
921 | 920 | ||
922 | void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, | 921 | void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, |
923 | dma_addr_t handle, struct dma_attrs *attrs) | 922 | dma_addr_t handle, unsigned long attrs) |
924 | { | 923 | { |
925 | __arm_dma_free(dev, size, cpu_addr, handle, attrs, false); | 924 | __arm_dma_free(dev, size, cpu_addr, handle, attrs, false); |
926 | } | 925 | } |
927 | 926 | ||
928 | static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr, | 927 | static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr, |
929 | dma_addr_t handle, struct dma_attrs *attrs) | 928 | dma_addr_t handle, unsigned long attrs) |
930 | { | 929 | { |
931 | __arm_dma_free(dev, size, cpu_addr, handle, attrs, true); | 930 | __arm_dma_free(dev, size, cpu_addr, handle, attrs, true); |
932 | } | 931 | } |
933 | 932 | ||
934 | int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt, | 933 | int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt, |
935 | void *cpu_addr, dma_addr_t handle, size_t size, | 934 | void *cpu_addr, dma_addr_t handle, size_t size, |
936 | struct dma_attrs *attrs) | 935 | unsigned long attrs) |
937 | { | 936 | { |
938 | struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); | 937 | struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); |
939 | int ret; | 938 | int ret; |
@@ -1066,7 +1065,7 @@ static void __dma_page_dev_to_cpu(struct page *page, unsigned long off, | |||
1066 | * here. | 1065 | * here. |
1067 | */ | 1066 | */ |
1068 | int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | 1067 | int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, |
1069 | enum dma_data_direction dir, struct dma_attrs *attrs) | 1068 | enum dma_data_direction dir, unsigned long attrs) |
1070 | { | 1069 | { |
1071 | struct dma_map_ops *ops = get_dma_ops(dev); | 1070 | struct dma_map_ops *ops = get_dma_ops(dev); |
1072 | struct scatterlist *s; | 1071 | struct scatterlist *s; |
@@ -1100,7 +1099,7 @@ int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | |||
1100 | * rules concerning calls here are the same as for dma_unmap_single(). | 1099 | * rules concerning calls here are the same as for dma_unmap_single(). |
1101 | */ | 1100 | */ |
1102 | void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, | 1101 | void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, |
1103 | enum dma_data_direction dir, struct dma_attrs *attrs) | 1102 | enum dma_data_direction dir, unsigned long attrs) |
1104 | { | 1103 | { |
1105 | struct dma_map_ops *ops = get_dma_ops(dev); | 1104 | struct dma_map_ops *ops = get_dma_ops(dev); |
1106 | struct scatterlist *s; | 1105 | struct scatterlist *s; |
@@ -1273,7 +1272,7 @@ static inline void __free_iova(struct dma_iommu_mapping *mapping, | |||
1273 | static const int iommu_order_array[] = { 9, 8, 4, 0 }; | 1272 | static const int iommu_order_array[] = { 9, 8, 4, 0 }; |
1274 | 1273 | ||
1275 | static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, | 1274 | static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, |
1276 | gfp_t gfp, struct dma_attrs *attrs, | 1275 | gfp_t gfp, unsigned long attrs, |
1277 | int coherent_flag) | 1276 | int coherent_flag) |
1278 | { | 1277 | { |
1279 | struct page **pages; | 1278 | struct page **pages; |
@@ -1289,7 +1288,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, | |||
1289 | if (!pages) | 1288 | if (!pages) |
1290 | return NULL; | 1289 | return NULL; |
1291 | 1290 | ||
1292 | if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) | 1291 | if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) |
1293 | { | 1292 | { |
1294 | unsigned long order = get_order(size); | 1293 | unsigned long order = get_order(size); |
1295 | struct page *page; | 1294 | struct page *page; |
@@ -1307,7 +1306,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, | |||
1307 | } | 1306 | } |
1308 | 1307 | ||
1309 | /* Go straight to 4K chunks if caller says it's OK. */ | 1308 | /* Go straight to 4K chunks if caller says it's OK. */ |
1310 | if (dma_get_attr(DMA_ATTR_ALLOC_SINGLE_PAGES, attrs)) | 1309 | if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES) |
1311 | order_idx = ARRAY_SIZE(iommu_order_array) - 1; | 1310 | order_idx = ARRAY_SIZE(iommu_order_array) - 1; |
1312 | 1311 | ||
1313 | /* | 1312 | /* |
@@ -1363,12 +1362,12 @@ error: | |||
1363 | } | 1362 | } |
1364 | 1363 | ||
1365 | static int __iommu_free_buffer(struct device *dev, struct page **pages, | 1364 | static int __iommu_free_buffer(struct device *dev, struct page **pages, |
1366 | size_t size, struct dma_attrs *attrs) | 1365 | size_t size, unsigned long attrs) |
1367 | { | 1366 | { |
1368 | int count = size >> PAGE_SHIFT; | 1367 | int count = size >> PAGE_SHIFT; |
1369 | int i; | 1368 | int i; |
1370 | 1369 | ||
1371 | if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) { | 1370 | if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) { |
1372 | dma_release_from_contiguous(dev, pages[0], count); | 1371 | dma_release_from_contiguous(dev, pages[0], count); |
1373 | } else { | 1372 | } else { |
1374 | for (i = 0; i < count; i++) | 1373 | for (i = 0; i < count; i++) |
@@ -1460,14 +1459,14 @@ static struct page **__atomic_get_pages(void *addr) | |||
1460 | return (struct page **)page; | 1459 | return (struct page **)page; |
1461 | } | 1460 | } |
1462 | 1461 | ||
1463 | static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs) | 1462 | static struct page **__iommu_get_pages(void *cpu_addr, unsigned long attrs) |
1464 | { | 1463 | { |
1465 | struct vm_struct *area; | 1464 | struct vm_struct *area; |
1466 | 1465 | ||
1467 | if (__in_atomic_pool(cpu_addr, PAGE_SIZE)) | 1466 | if (__in_atomic_pool(cpu_addr, PAGE_SIZE)) |
1468 | return __atomic_get_pages(cpu_addr); | 1467 | return __atomic_get_pages(cpu_addr); |
1469 | 1468 | ||
1470 | if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) | 1469 | if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) |
1471 | return cpu_addr; | 1470 | return cpu_addr; |
1472 | 1471 | ||
1473 | area = find_vm_area(cpu_addr); | 1472 | area = find_vm_area(cpu_addr); |
@@ -1511,7 +1510,7 @@ static void __iommu_free_atomic(struct device *dev, void *cpu_addr, | |||
1511 | } | 1510 | } |
1512 | 1511 | ||
1513 | static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size, | 1512 | static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size, |
1514 | dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs, | 1513 | dma_addr_t *handle, gfp_t gfp, unsigned long attrs, |
1515 | int coherent_flag) | 1514 | int coherent_flag) |
1516 | { | 1515 | { |
1517 | pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); | 1516 | pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); |
@@ -1542,7 +1541,7 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size, | |||
1542 | if (*handle == DMA_ERROR_CODE) | 1541 | if (*handle == DMA_ERROR_CODE) |
1543 | goto err_buffer; | 1542 | goto err_buffer; |
1544 | 1543 | ||
1545 | if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) | 1544 | if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) |
1546 | return pages; | 1545 | return pages; |
1547 | 1546 | ||
1548 | addr = __iommu_alloc_remap(pages, size, gfp, prot, | 1547 | addr = __iommu_alloc_remap(pages, size, gfp, prot, |
@@ -1560,20 +1559,20 @@ err_buffer: | |||
1560 | } | 1559 | } |
1561 | 1560 | ||
1562 | static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, | 1561 | static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, |
1563 | dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) | 1562 | dma_addr_t *handle, gfp_t gfp, unsigned long attrs) |
1564 | { | 1563 | { |
1565 | return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, NORMAL); | 1564 | return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, NORMAL); |
1566 | } | 1565 | } |
1567 | 1566 | ||
1568 | static void *arm_coherent_iommu_alloc_attrs(struct device *dev, size_t size, | 1567 | static void *arm_coherent_iommu_alloc_attrs(struct device *dev, size_t size, |
1569 | dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) | 1568 | dma_addr_t *handle, gfp_t gfp, unsigned long attrs) |
1570 | { | 1569 | { |
1571 | return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, COHERENT); | 1570 | return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, COHERENT); |
1572 | } | 1571 | } |
1573 | 1572 | ||
1574 | static int __arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, | 1573 | static int __arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, |
1575 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | 1574 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
1576 | struct dma_attrs *attrs) | 1575 | unsigned long attrs) |
1577 | { | 1576 | { |
1578 | unsigned long uaddr = vma->vm_start; | 1577 | unsigned long uaddr = vma->vm_start; |
1579 | unsigned long usize = vma->vm_end - vma->vm_start; | 1578 | unsigned long usize = vma->vm_end - vma->vm_start; |
@@ -1603,7 +1602,7 @@ static int __arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma | |||
1603 | } | 1602 | } |
1604 | static int arm_iommu_mmap_attrs(struct device *dev, | 1603 | static int arm_iommu_mmap_attrs(struct device *dev, |
1605 | struct vm_area_struct *vma, void *cpu_addr, | 1604 | struct vm_area_struct *vma, void *cpu_addr, |
1606 | dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs) | 1605 | dma_addr_t dma_addr, size_t size, unsigned long attrs) |
1607 | { | 1606 | { |
1608 | vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); | 1607 | vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); |
1609 | 1608 | ||
@@ -1612,7 +1611,7 @@ static int arm_iommu_mmap_attrs(struct device *dev, | |||
1612 | 1611 | ||
1613 | static int arm_coherent_iommu_mmap_attrs(struct device *dev, | 1612 | static int arm_coherent_iommu_mmap_attrs(struct device *dev, |
1614 | struct vm_area_struct *vma, void *cpu_addr, | 1613 | struct vm_area_struct *vma, void *cpu_addr, |
1615 | dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs) | 1614 | dma_addr_t dma_addr, size_t size, unsigned long attrs) |
1616 | { | 1615 | { |
1617 | return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs); | 1616 | return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs); |
1618 | } | 1617 | } |
@@ -1622,7 +1621,7 @@ static int arm_coherent_iommu_mmap_attrs(struct device *dev, | |||
1622 | * Must not be called with IRQs disabled. | 1621 | * Must not be called with IRQs disabled. |
1623 | */ | 1622 | */ |
1624 | void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, | 1623 | void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, |
1625 | dma_addr_t handle, struct dma_attrs *attrs, int coherent_flag) | 1624 | dma_addr_t handle, unsigned long attrs, int coherent_flag) |
1626 | { | 1625 | { |
1627 | struct page **pages; | 1626 | struct page **pages; |
1628 | size = PAGE_ALIGN(size); | 1627 | size = PAGE_ALIGN(size); |
@@ -1638,7 +1637,7 @@ void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, | |||
1638 | return; | 1637 | return; |
1639 | } | 1638 | } |
1640 | 1639 | ||
1641 | if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) { | 1640 | if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0) { |
1642 | dma_common_free_remap(cpu_addr, size, | 1641 | dma_common_free_remap(cpu_addr, size, |
1643 | VM_ARM_DMA_CONSISTENT | VM_USERMAP); | 1642 | VM_ARM_DMA_CONSISTENT | VM_USERMAP); |
1644 | } | 1643 | } |
@@ -1648,20 +1647,20 @@ void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, | |||
1648 | } | 1647 | } |
1649 | 1648 | ||
1650 | void arm_iommu_free_attrs(struct device *dev, size_t size, | 1649 | void arm_iommu_free_attrs(struct device *dev, size_t size, |
1651 | void *cpu_addr, dma_addr_t handle, struct dma_attrs *attrs) | 1650 | void *cpu_addr, dma_addr_t handle, unsigned long attrs) |
1652 | { | 1651 | { |
1653 | __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, NORMAL); | 1652 | __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, NORMAL); |
1654 | } | 1653 | } |
1655 | 1654 | ||
1656 | void arm_coherent_iommu_free_attrs(struct device *dev, size_t size, | 1655 | void arm_coherent_iommu_free_attrs(struct device *dev, size_t size, |
1657 | void *cpu_addr, dma_addr_t handle, struct dma_attrs *attrs) | 1656 | void *cpu_addr, dma_addr_t handle, unsigned long attrs) |
1658 | { | 1657 | { |
1659 | __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, COHERENT); | 1658 | __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, COHERENT); |
1660 | } | 1659 | } |
1661 | 1660 | ||
1662 | static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt, | 1661 | static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt, |
1663 | void *cpu_addr, dma_addr_t dma_addr, | 1662 | void *cpu_addr, dma_addr_t dma_addr, |
1664 | size_t size, struct dma_attrs *attrs) | 1663 | size_t size, unsigned long attrs) |
1665 | { | 1664 | { |
1666 | unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; | 1665 | unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
1667 | struct page **pages = __iommu_get_pages(cpu_addr, attrs); | 1666 | struct page **pages = __iommu_get_pages(cpu_addr, attrs); |
@@ -1699,7 +1698,7 @@ static int __dma_direction_to_prot(enum dma_data_direction dir) | |||
1699 | */ | 1698 | */ |
1700 | static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, | 1699 | static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, |
1701 | size_t size, dma_addr_t *handle, | 1700 | size_t size, dma_addr_t *handle, |
1702 | enum dma_data_direction dir, struct dma_attrs *attrs, | 1701 | enum dma_data_direction dir, unsigned long attrs, |
1703 | bool is_coherent) | 1702 | bool is_coherent) |
1704 | { | 1703 | { |
1705 | struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); | 1704 | struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); |
@@ -1720,8 +1719,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, | |||
1720 | phys_addr_t phys = page_to_phys(sg_page(s)); | 1719 | phys_addr_t phys = page_to_phys(sg_page(s)); |
1721 | unsigned int len = PAGE_ALIGN(s->offset + s->length); | 1720 | unsigned int len = PAGE_ALIGN(s->offset + s->length); |
1722 | 1721 | ||
1723 | if (!is_coherent && | 1722 | if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) |
1724 | !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) | ||
1725 | __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); | 1723 | __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); |
1726 | 1724 | ||
1727 | prot = __dma_direction_to_prot(dir); | 1725 | prot = __dma_direction_to_prot(dir); |
@@ -1742,7 +1740,7 @@ fail: | |||
1742 | } | 1740 | } |
1743 | 1741 | ||
1744 | static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, | 1742 | static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, |
1745 | enum dma_data_direction dir, struct dma_attrs *attrs, | 1743 | enum dma_data_direction dir, unsigned long attrs, |
1746 | bool is_coherent) | 1744 | bool is_coherent) |
1747 | { | 1745 | { |
1748 | struct scatterlist *s = sg, *dma = sg, *start = sg; | 1746 | struct scatterlist *s = sg, *dma = sg, *start = sg; |
@@ -1800,7 +1798,7 @@ bad_mapping: | |||
1800 | * obtained via sg_dma_{address,length}. | 1798 | * obtained via sg_dma_{address,length}. |
1801 | */ | 1799 | */ |
1802 | int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg, | 1800 | int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg, |
1803 | int nents, enum dma_data_direction dir, struct dma_attrs *attrs) | 1801 | int nents, enum dma_data_direction dir, unsigned long attrs) |
1804 | { | 1802 | { |
1805 | return __iommu_map_sg(dev, sg, nents, dir, attrs, true); | 1803 | return __iommu_map_sg(dev, sg, nents, dir, attrs, true); |
1806 | } | 1804 | } |
@@ -1818,14 +1816,14 @@ int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg, | |||
1818 | * sg_dma_{address,length}. | 1816 | * sg_dma_{address,length}. |
1819 | */ | 1817 | */ |
1820 | int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, | 1818 | int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, |
1821 | int nents, enum dma_data_direction dir, struct dma_attrs *attrs) | 1819 | int nents, enum dma_data_direction dir, unsigned long attrs) |
1822 | { | 1820 | { |
1823 | return __iommu_map_sg(dev, sg, nents, dir, attrs, false); | 1821 | return __iommu_map_sg(dev, sg, nents, dir, attrs, false); |
1824 | } | 1822 | } |
1825 | 1823 | ||
1826 | static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg, | 1824 | static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg, |
1827 | int nents, enum dma_data_direction dir, struct dma_attrs *attrs, | 1825 | int nents, enum dma_data_direction dir, |
1828 | bool is_coherent) | 1826 | unsigned long attrs, bool is_coherent) |
1829 | { | 1827 | { |
1830 | struct scatterlist *s; | 1828 | struct scatterlist *s; |
1831 | int i; | 1829 | int i; |
@@ -1834,8 +1832,7 @@ static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg, | |||
1834 | if (sg_dma_len(s)) | 1832 | if (sg_dma_len(s)) |
1835 | __iommu_remove_mapping(dev, sg_dma_address(s), | 1833 | __iommu_remove_mapping(dev, sg_dma_address(s), |
1836 | sg_dma_len(s)); | 1834 | sg_dma_len(s)); |
1837 | if (!is_coherent && | 1835 | if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) |
1838 | !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) | ||
1839 | __dma_page_dev_to_cpu(sg_page(s), s->offset, | 1836 | __dma_page_dev_to_cpu(sg_page(s), s->offset, |
1840 | s->length, dir); | 1837 | s->length, dir); |
1841 | } | 1838 | } |
@@ -1852,7 +1849,8 @@ static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg, | |||
1852 | * rules concerning calls here are the same as for dma_unmap_single(). | 1849 | * rules concerning calls here are the same as for dma_unmap_single(). |
1853 | */ | 1850 | */ |
1854 | void arm_coherent_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, | 1851 | void arm_coherent_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, |
1855 | int nents, enum dma_data_direction dir, struct dma_attrs *attrs) | 1852 | int nents, enum dma_data_direction dir, |
1853 | unsigned long attrs) | ||
1856 | { | 1854 | { |
1857 | __iommu_unmap_sg(dev, sg, nents, dir, attrs, true); | 1855 | __iommu_unmap_sg(dev, sg, nents, dir, attrs, true); |
1858 | } | 1856 | } |
@@ -1868,7 +1866,8 @@ void arm_coherent_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, | |||
1868 | * rules concerning calls here are the same as for dma_unmap_single(). | 1866 | * rules concerning calls here are the same as for dma_unmap_single(). |
1869 | */ | 1867 | */ |
1870 | void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, | 1868 | void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, |
1871 | enum dma_data_direction dir, struct dma_attrs *attrs) | 1869 | enum dma_data_direction dir, |
1870 | unsigned long attrs) | ||
1872 | { | 1871 | { |
1873 | __iommu_unmap_sg(dev, sg, nents, dir, attrs, false); | 1872 | __iommu_unmap_sg(dev, sg, nents, dir, attrs, false); |
1874 | } | 1873 | } |
@@ -1921,7 +1920,7 @@ void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | |||
1921 | */ | 1920 | */ |
1922 | static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *page, | 1921 | static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *page, |
1923 | unsigned long offset, size_t size, enum dma_data_direction dir, | 1922 | unsigned long offset, size_t size, enum dma_data_direction dir, |
1924 | struct dma_attrs *attrs) | 1923 | unsigned long attrs) |
1925 | { | 1924 | { |
1926 | struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); | 1925 | struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); |
1927 | dma_addr_t dma_addr; | 1926 | dma_addr_t dma_addr; |
@@ -1955,9 +1954,9 @@ fail: | |||
1955 | */ | 1954 | */ |
1956 | static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page, | 1955 | static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page, |
1957 | unsigned long offset, size_t size, enum dma_data_direction dir, | 1956 | unsigned long offset, size_t size, enum dma_data_direction dir, |
1958 | struct dma_attrs *attrs) | 1957 | unsigned long attrs) |
1959 | { | 1958 | { |
1960 | if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) | 1959 | if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) |
1961 | __dma_page_cpu_to_dev(page, offset, size, dir); | 1960 | __dma_page_cpu_to_dev(page, offset, size, dir); |
1962 | 1961 | ||
1963 | return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs); | 1962 | return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs); |
@@ -1973,8 +1972,7 @@ static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page, | |||
1973 | * Coherent IOMMU aware version of arm_dma_unmap_page() | 1972 | * Coherent IOMMU aware version of arm_dma_unmap_page() |
1974 | */ | 1973 | */ |
1975 | static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle, | 1974 | static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle, |
1976 | size_t size, enum dma_data_direction dir, | 1975 | size_t size, enum dma_data_direction dir, unsigned long attrs) |
1977 | struct dma_attrs *attrs) | ||
1978 | { | 1976 | { |
1979 | struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); | 1977 | struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); |
1980 | dma_addr_t iova = handle & PAGE_MASK; | 1978 | dma_addr_t iova = handle & PAGE_MASK; |
@@ -1998,8 +1996,7 @@ static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle, | |||
1998 | * IOMMU aware version of arm_dma_unmap_page() | 1996 | * IOMMU aware version of arm_dma_unmap_page() |
1999 | */ | 1997 | */ |
2000 | static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle, | 1998 | static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle, |
2001 | size_t size, enum dma_data_direction dir, | 1999 | size_t size, enum dma_data_direction dir, unsigned long attrs) |
2002 | struct dma_attrs *attrs) | ||
2003 | { | 2000 | { |
2004 | struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); | 2001 | struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); |
2005 | dma_addr_t iova = handle & PAGE_MASK; | 2002 | dma_addr_t iova = handle & PAGE_MASK; |
@@ -2010,7 +2007,7 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle, | |||
2010 | if (!iova) | 2007 | if (!iova) |
2011 | return; | 2008 | return; |
2012 | 2009 | ||
2013 | if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) | 2010 | if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) |
2014 | __dma_page_dev_to_cpu(page, offset, size, dir); | 2011 | __dma_page_dev_to_cpu(page, offset, size, dir); |
2015 | 2012 | ||
2016 | iommu_unmap(mapping->domain, iova, len); | 2013 | iommu_unmap(mapping->domain, iova, len); |
diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c index c5f9a9e3d1f3..d062f08f5020 100644 --- a/arch/arm/xen/mm.c +++ b/arch/arm/xen/mm.c | |||
@@ -98,11 +98,11 @@ static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle, | |||
98 | 98 | ||
99 | void __xen_dma_map_page(struct device *hwdev, struct page *page, | 99 | void __xen_dma_map_page(struct device *hwdev, struct page *page, |
100 | dma_addr_t dev_addr, unsigned long offset, size_t size, | 100 | dma_addr_t dev_addr, unsigned long offset, size_t size, |
101 | enum dma_data_direction dir, struct dma_attrs *attrs) | 101 | enum dma_data_direction dir, unsigned long attrs) |
102 | { | 102 | { |
103 | if (is_device_dma_coherent(hwdev)) | 103 | if (is_device_dma_coherent(hwdev)) |
104 | return; | 104 | return; |
105 | if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) | 105 | if (attrs & DMA_ATTR_SKIP_CPU_SYNC) |
106 | return; | 106 | return; |
107 | 107 | ||
108 | __xen_dma_page_cpu_to_dev(hwdev, dev_addr, size, dir); | 108 | __xen_dma_page_cpu_to_dev(hwdev, dev_addr, size, dir); |
@@ -110,12 +110,12 @@ void __xen_dma_map_page(struct device *hwdev, struct page *page, | |||
110 | 110 | ||
111 | void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, | 111 | void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, |
112 | size_t size, enum dma_data_direction dir, | 112 | size_t size, enum dma_data_direction dir, |
113 | struct dma_attrs *attrs) | 113 | unsigned long attrs) |
114 | 114 | ||
115 | { | 115 | { |
116 | if (is_device_dma_coherent(hwdev)) | 116 | if (is_device_dma_coherent(hwdev)) |
117 | return; | 117 | return; |
118 | if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) | 118 | if (attrs & DMA_ATTR_SKIP_CPU_SYNC) |
119 | return; | 119 | return; |
120 | 120 | ||
121 | __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir); | 121 | __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir); |
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index f6c55afab3e2..c4284c432ae8 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c | |||
@@ -32,10 +32,10 @@ | |||
32 | 32 | ||
33 | static int swiotlb __read_mostly; | 33 | static int swiotlb __read_mostly; |
34 | 34 | ||
35 | static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot, | 35 | static pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot, |
36 | bool coherent) | 36 | bool coherent) |
37 | { | 37 | { |
38 | if (!coherent || dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs)) | 38 | if (!coherent || (attrs & DMA_ATTR_WRITE_COMBINE)) |
39 | return pgprot_writecombine(prot); | 39 | return pgprot_writecombine(prot); |
40 | return prot; | 40 | return prot; |
41 | } | 41 | } |
@@ -91,7 +91,7 @@ static int __free_from_pool(void *start, size_t size) | |||
91 | 91 | ||
92 | static void *__dma_alloc_coherent(struct device *dev, size_t size, | 92 | static void *__dma_alloc_coherent(struct device *dev, size_t size, |
93 | dma_addr_t *dma_handle, gfp_t flags, | 93 | dma_addr_t *dma_handle, gfp_t flags, |
94 | struct dma_attrs *attrs) | 94 | unsigned long attrs) |
95 | { | 95 | { |
96 | if (dev == NULL) { | 96 | if (dev == NULL) { |
97 | WARN_ONCE(1, "Use an actual device structure for DMA allocation\n"); | 97 | WARN_ONCE(1, "Use an actual device structure for DMA allocation\n"); |
@@ -121,7 +121,7 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size, | |||
121 | 121 | ||
122 | static void __dma_free_coherent(struct device *dev, size_t size, | 122 | static void __dma_free_coherent(struct device *dev, size_t size, |
123 | void *vaddr, dma_addr_t dma_handle, | 123 | void *vaddr, dma_addr_t dma_handle, |
124 | struct dma_attrs *attrs) | 124 | unsigned long attrs) |
125 | { | 125 | { |
126 | bool freed; | 126 | bool freed; |
127 | phys_addr_t paddr = dma_to_phys(dev, dma_handle); | 127 | phys_addr_t paddr = dma_to_phys(dev, dma_handle); |
@@ -140,7 +140,7 @@ static void __dma_free_coherent(struct device *dev, size_t size, | |||
140 | 140 | ||
141 | static void *__dma_alloc(struct device *dev, size_t size, | 141 | static void *__dma_alloc(struct device *dev, size_t size, |
142 | dma_addr_t *dma_handle, gfp_t flags, | 142 | dma_addr_t *dma_handle, gfp_t flags, |
143 | struct dma_attrs *attrs) | 143 | unsigned long attrs) |
144 | { | 144 | { |
145 | struct page *page; | 145 | struct page *page; |
146 | void *ptr, *coherent_ptr; | 146 | void *ptr, *coherent_ptr; |
@@ -188,7 +188,7 @@ no_mem: | |||
188 | 188 | ||
189 | static void __dma_free(struct device *dev, size_t size, | 189 | static void __dma_free(struct device *dev, size_t size, |
190 | void *vaddr, dma_addr_t dma_handle, | 190 | void *vaddr, dma_addr_t dma_handle, |
191 | struct dma_attrs *attrs) | 191 | unsigned long attrs) |
192 | { | 192 | { |
193 | void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle)); | 193 | void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle)); |
194 | 194 | ||
@@ -205,7 +205,7 @@ static void __dma_free(struct device *dev, size_t size, | |||
205 | static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page, | 205 | static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page, |
206 | unsigned long offset, size_t size, | 206 | unsigned long offset, size_t size, |
207 | enum dma_data_direction dir, | 207 | enum dma_data_direction dir, |
208 | struct dma_attrs *attrs) | 208 | unsigned long attrs) |
209 | { | 209 | { |
210 | dma_addr_t dev_addr; | 210 | dma_addr_t dev_addr; |
211 | 211 | ||
@@ -219,7 +219,7 @@ static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page, | |||
219 | 219 | ||
220 | static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr, | 220 | static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr, |
221 | size_t size, enum dma_data_direction dir, | 221 | size_t size, enum dma_data_direction dir, |
222 | struct dma_attrs *attrs) | 222 | unsigned long attrs) |
223 | { | 223 | { |
224 | if (!is_device_dma_coherent(dev)) | 224 | if (!is_device_dma_coherent(dev)) |
225 | __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir); | 225 | __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir); |
@@ -228,7 +228,7 @@ static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr, | |||
228 | 228 | ||
229 | static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl, | 229 | static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl, |
230 | int nelems, enum dma_data_direction dir, | 230 | int nelems, enum dma_data_direction dir, |
231 | struct dma_attrs *attrs) | 231 | unsigned long attrs) |
232 | { | 232 | { |
233 | struct scatterlist *sg; | 233 | struct scatterlist *sg; |
234 | int i, ret; | 234 | int i, ret; |
@@ -245,7 +245,7 @@ static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl, | |||
245 | static void __swiotlb_unmap_sg_attrs(struct device *dev, | 245 | static void __swiotlb_unmap_sg_attrs(struct device *dev, |
246 | struct scatterlist *sgl, int nelems, | 246 | struct scatterlist *sgl, int nelems, |
247 | enum dma_data_direction dir, | 247 | enum dma_data_direction dir, |
248 | struct dma_attrs *attrs) | 248 | unsigned long attrs) |
249 | { | 249 | { |
250 | struct scatterlist *sg; | 250 | struct scatterlist *sg; |
251 | int i; | 251 | int i; |
@@ -306,7 +306,7 @@ static void __swiotlb_sync_sg_for_device(struct device *dev, | |||
306 | static int __swiotlb_mmap(struct device *dev, | 306 | static int __swiotlb_mmap(struct device *dev, |
307 | struct vm_area_struct *vma, | 307 | struct vm_area_struct *vma, |
308 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | 308 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
309 | struct dma_attrs *attrs) | 309 | unsigned long attrs) |
310 | { | 310 | { |
311 | int ret = -ENXIO; | 311 | int ret = -ENXIO; |
312 | unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >> | 312 | unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >> |
@@ -333,7 +333,7 @@ static int __swiotlb_mmap(struct device *dev, | |||
333 | 333 | ||
334 | static int __swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt, | 334 | static int __swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt, |
335 | void *cpu_addr, dma_addr_t handle, size_t size, | 335 | void *cpu_addr, dma_addr_t handle, size_t size, |
336 | struct dma_attrs *attrs) | 336 | unsigned long attrs) |
337 | { | 337 | { |
338 | int ret = sg_alloc_table(sgt, 1, GFP_KERNEL); | 338 | int ret = sg_alloc_table(sgt, 1, GFP_KERNEL); |
339 | 339 | ||
@@ -435,21 +435,21 @@ out: | |||
435 | 435 | ||
436 | static void *__dummy_alloc(struct device *dev, size_t size, | 436 | static void *__dummy_alloc(struct device *dev, size_t size, |
437 | dma_addr_t *dma_handle, gfp_t flags, | 437 | dma_addr_t *dma_handle, gfp_t flags, |
438 | struct dma_attrs *attrs) | 438 | unsigned long attrs) |
439 | { | 439 | { |
440 | return NULL; | 440 | return NULL; |
441 | } | 441 | } |
442 | 442 | ||
443 | static void __dummy_free(struct device *dev, size_t size, | 443 | static void __dummy_free(struct device *dev, size_t size, |
444 | void *vaddr, dma_addr_t dma_handle, | 444 | void *vaddr, dma_addr_t dma_handle, |
445 | struct dma_attrs *attrs) | 445 | unsigned long attrs) |
446 | { | 446 | { |
447 | } | 447 | } |
448 | 448 | ||
449 | static int __dummy_mmap(struct device *dev, | 449 | static int __dummy_mmap(struct device *dev, |
450 | struct vm_area_struct *vma, | 450 | struct vm_area_struct *vma, |
451 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | 451 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
452 | struct dma_attrs *attrs) | 452 | unsigned long attrs) |
453 | { | 453 | { |
454 | return -ENXIO; | 454 | return -ENXIO; |
455 | } | 455 | } |
@@ -457,20 +457,20 @@ static int __dummy_mmap(struct device *dev, | |||
457 | static dma_addr_t __dummy_map_page(struct device *dev, struct page *page, | 457 | static dma_addr_t __dummy_map_page(struct device *dev, struct page *page, |
458 | unsigned long offset, size_t size, | 458 | unsigned long offset, size_t size, |
459 | enum dma_data_direction dir, | 459 | enum dma_data_direction dir, |
460 | struct dma_attrs *attrs) | 460 | unsigned long attrs) |
461 | { | 461 | { |
462 | return DMA_ERROR_CODE; | 462 | return DMA_ERROR_CODE; |
463 | } | 463 | } |
464 | 464 | ||
465 | static void __dummy_unmap_page(struct device *dev, dma_addr_t dev_addr, | 465 | static void __dummy_unmap_page(struct device *dev, dma_addr_t dev_addr, |
466 | size_t size, enum dma_data_direction dir, | 466 | size_t size, enum dma_data_direction dir, |
467 | struct dma_attrs *attrs) | 467 | unsigned long attrs) |
468 | { | 468 | { |
469 | } | 469 | } |
470 | 470 | ||
471 | static int __dummy_map_sg(struct device *dev, struct scatterlist *sgl, | 471 | static int __dummy_map_sg(struct device *dev, struct scatterlist *sgl, |
472 | int nelems, enum dma_data_direction dir, | 472 | int nelems, enum dma_data_direction dir, |
473 | struct dma_attrs *attrs) | 473 | unsigned long attrs) |
474 | { | 474 | { |
475 | return 0; | 475 | return 0; |
476 | } | 476 | } |
@@ -478,7 +478,7 @@ static int __dummy_map_sg(struct device *dev, struct scatterlist *sgl, | |||
478 | static void __dummy_unmap_sg(struct device *dev, | 478 | static void __dummy_unmap_sg(struct device *dev, |
479 | struct scatterlist *sgl, int nelems, | 479 | struct scatterlist *sgl, int nelems, |
480 | enum dma_data_direction dir, | 480 | enum dma_data_direction dir, |
481 | struct dma_attrs *attrs) | 481 | unsigned long attrs) |
482 | { | 482 | { |
483 | } | 483 | } |
484 | 484 | ||
@@ -553,7 +553,7 @@ static void flush_page(struct device *dev, const void *virt, phys_addr_t phys) | |||
553 | 553 | ||
554 | static void *__iommu_alloc_attrs(struct device *dev, size_t size, | 554 | static void *__iommu_alloc_attrs(struct device *dev, size_t size, |
555 | dma_addr_t *handle, gfp_t gfp, | 555 | dma_addr_t *handle, gfp_t gfp, |
556 | struct dma_attrs *attrs) | 556 | unsigned long attrs) |
557 | { | 557 | { |
558 | bool coherent = is_device_dma_coherent(dev); | 558 | bool coherent = is_device_dma_coherent(dev); |
559 | int ioprot = dma_direction_to_prot(DMA_BIDIRECTIONAL, coherent); | 559 | int ioprot = dma_direction_to_prot(DMA_BIDIRECTIONAL, coherent); |
@@ -613,7 +613,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size, | |||
613 | } | 613 | } |
614 | 614 | ||
615 | static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, | 615 | static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, |
616 | dma_addr_t handle, struct dma_attrs *attrs) | 616 | dma_addr_t handle, unsigned long attrs) |
617 | { | 617 | { |
618 | size_t iosize = size; | 618 | size_t iosize = size; |
619 | 619 | ||
@@ -629,7 +629,7 @@ static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, | |||
629 | * Hence how dodgy the below logic looks... | 629 | * Hence how dodgy the below logic looks... |
630 | */ | 630 | */ |
631 | if (__in_atomic_pool(cpu_addr, size)) { | 631 | if (__in_atomic_pool(cpu_addr, size)) { |
632 | iommu_dma_unmap_page(dev, handle, iosize, 0, NULL); | 632 | iommu_dma_unmap_page(dev, handle, iosize, 0, 0); |
633 | __free_from_pool(cpu_addr, size); | 633 | __free_from_pool(cpu_addr, size); |
634 | } else if (is_vmalloc_addr(cpu_addr)){ | 634 | } else if (is_vmalloc_addr(cpu_addr)){ |
635 | struct vm_struct *area = find_vm_area(cpu_addr); | 635 | struct vm_struct *area = find_vm_area(cpu_addr); |
@@ -639,14 +639,14 @@ static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, | |||
639 | iommu_dma_free(dev, area->pages, iosize, &handle); | 639 | iommu_dma_free(dev, area->pages, iosize, &handle); |
640 | dma_common_free_remap(cpu_addr, size, VM_USERMAP); | 640 | dma_common_free_remap(cpu_addr, size, VM_USERMAP); |
641 | } else { | 641 | } else { |
642 | iommu_dma_unmap_page(dev, handle, iosize, 0, NULL); | 642 | iommu_dma_unmap_page(dev, handle, iosize, 0, 0); |
643 | __free_pages(virt_to_page(cpu_addr), get_order(size)); | 643 | __free_pages(virt_to_page(cpu_addr), get_order(size)); |
644 | } | 644 | } |
645 | } | 645 | } |
646 | 646 | ||
647 | static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, | 647 | static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, |
648 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | 648 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
649 | struct dma_attrs *attrs) | 649 | unsigned long attrs) |
650 | { | 650 | { |
651 | struct vm_struct *area; | 651 | struct vm_struct *area; |
652 | int ret; | 652 | int ret; |
@@ -666,7 +666,7 @@ static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, | |||
666 | 666 | ||
667 | static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt, | 667 | static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt, |
668 | void *cpu_addr, dma_addr_t dma_addr, | 668 | void *cpu_addr, dma_addr_t dma_addr, |
669 | size_t size, struct dma_attrs *attrs) | 669 | size_t size, unsigned long attrs) |
670 | { | 670 | { |
671 | unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; | 671 | unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
672 | struct vm_struct *area = find_vm_area(cpu_addr); | 672 | struct vm_struct *area = find_vm_area(cpu_addr); |
@@ -707,14 +707,14 @@ static void __iommu_sync_single_for_device(struct device *dev, | |||
707 | static dma_addr_t __iommu_map_page(struct device *dev, struct page *page, | 707 | static dma_addr_t __iommu_map_page(struct device *dev, struct page *page, |
708 | unsigned long offset, size_t size, | 708 | unsigned long offset, size_t size, |
709 | enum dma_data_direction dir, | 709 | enum dma_data_direction dir, |
710 | struct dma_attrs *attrs) | 710 | unsigned long attrs) |
711 | { | 711 | { |
712 | bool coherent = is_device_dma_coherent(dev); | 712 | bool coherent = is_device_dma_coherent(dev); |
713 | int prot = dma_direction_to_prot(dir, coherent); | 713 | int prot = dma_direction_to_prot(dir, coherent); |
714 | dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot); | 714 | dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot); |
715 | 715 | ||
716 | if (!iommu_dma_mapping_error(dev, dev_addr) && | 716 | if (!iommu_dma_mapping_error(dev, dev_addr) && |
717 | !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) | 717 | (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) |
718 | __iommu_sync_single_for_device(dev, dev_addr, size, dir); | 718 | __iommu_sync_single_for_device(dev, dev_addr, size, dir); |
719 | 719 | ||
720 | return dev_addr; | 720 | return dev_addr; |
@@ -722,9 +722,9 @@ static dma_addr_t __iommu_map_page(struct device *dev, struct page *page, | |||
722 | 722 | ||
723 | static void __iommu_unmap_page(struct device *dev, dma_addr_t dev_addr, | 723 | static void __iommu_unmap_page(struct device *dev, dma_addr_t dev_addr, |
724 | size_t size, enum dma_data_direction dir, | 724 | size_t size, enum dma_data_direction dir, |
725 | struct dma_attrs *attrs) | 725 | unsigned long attrs) |
726 | { | 726 | { |
727 | if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) | 727 | if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) |
728 | __iommu_sync_single_for_cpu(dev, dev_addr, size, dir); | 728 | __iommu_sync_single_for_cpu(dev, dev_addr, size, dir); |
729 | 729 | ||
730 | iommu_dma_unmap_page(dev, dev_addr, size, dir, attrs); | 730 | iommu_dma_unmap_page(dev, dev_addr, size, dir, attrs); |
@@ -760,11 +760,11 @@ static void __iommu_sync_sg_for_device(struct device *dev, | |||
760 | 760 | ||
761 | static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl, | 761 | static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl, |
762 | int nelems, enum dma_data_direction dir, | 762 | int nelems, enum dma_data_direction dir, |
763 | struct dma_attrs *attrs) | 763 | unsigned long attrs) |
764 | { | 764 | { |
765 | bool coherent = is_device_dma_coherent(dev); | 765 | bool coherent = is_device_dma_coherent(dev); |
766 | 766 | ||
767 | if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) | 767 | if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) |
768 | __iommu_sync_sg_for_device(dev, sgl, nelems, dir); | 768 | __iommu_sync_sg_for_device(dev, sgl, nelems, dir); |
769 | 769 | ||
770 | return iommu_dma_map_sg(dev, sgl, nelems, | 770 | return iommu_dma_map_sg(dev, sgl, nelems, |
@@ -774,9 +774,9 @@ static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl, | |||
774 | static void __iommu_unmap_sg_attrs(struct device *dev, | 774 | static void __iommu_unmap_sg_attrs(struct device *dev, |
775 | struct scatterlist *sgl, int nelems, | 775 | struct scatterlist *sgl, int nelems, |
776 | enum dma_data_direction dir, | 776 | enum dma_data_direction dir, |
777 | struct dma_attrs *attrs) | 777 | unsigned long attrs) |
778 | { | 778 | { |
779 | if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) | 779 | if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) |
780 | __iommu_sync_sg_for_cpu(dev, sgl, nelems, dir); | 780 | __iommu_sync_sg_for_cpu(dev, sgl, nelems, dir); |
781 | 781 | ||
782 | iommu_dma_unmap_sg(dev, sgl, nelems, dir, attrs); | 782 | iommu_dma_unmap_sg(dev, sgl, nelems, dir, attrs); |
diff --git a/arch/avr32/mm/dma-coherent.c b/arch/avr32/mm/dma-coherent.c index 92cf1fb2b3e6..58610d0df7ed 100644 --- a/arch/avr32/mm/dma-coherent.c +++ b/arch/avr32/mm/dma-coherent.c | |||
@@ -99,7 +99,7 @@ static void __dma_free(struct device *dev, size_t size, | |||
99 | } | 99 | } |
100 | 100 | ||
101 | static void *avr32_dma_alloc(struct device *dev, size_t size, | 101 | static void *avr32_dma_alloc(struct device *dev, size_t size, |
102 | dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) | 102 | dma_addr_t *handle, gfp_t gfp, unsigned long attrs) |
103 | { | 103 | { |
104 | struct page *page; | 104 | struct page *page; |
105 | dma_addr_t phys; | 105 | dma_addr_t phys; |
@@ -109,7 +109,7 @@ static void *avr32_dma_alloc(struct device *dev, size_t size, | |||
109 | return NULL; | 109 | return NULL; |
110 | phys = page_to_phys(page); | 110 | phys = page_to_phys(page); |
111 | 111 | ||
112 | if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs)) { | 112 | if (attrs & DMA_ATTR_WRITE_COMBINE) { |
113 | /* Now, map the page into P3 with write-combining turned on */ | 113 | /* Now, map the page into P3 with write-combining turned on */ |
114 | *handle = phys; | 114 | *handle = phys; |
115 | return __ioremap(phys, size, _PAGE_BUFFER); | 115 | return __ioremap(phys, size, _PAGE_BUFFER); |
@@ -119,11 +119,11 @@ static void *avr32_dma_alloc(struct device *dev, size_t size, | |||
119 | } | 119 | } |
120 | 120 | ||
121 | static void avr32_dma_free(struct device *dev, size_t size, | 121 | static void avr32_dma_free(struct device *dev, size_t size, |
122 | void *cpu_addr, dma_addr_t handle, struct dma_attrs *attrs) | 122 | void *cpu_addr, dma_addr_t handle, unsigned long attrs) |
123 | { | 123 | { |
124 | struct page *page; | 124 | struct page *page; |
125 | 125 | ||
126 | if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs)) { | 126 | if (attrs & DMA_ATTR_WRITE_COMBINE) { |
127 | iounmap(cpu_addr); | 127 | iounmap(cpu_addr); |
128 | 128 | ||
129 | page = phys_to_page(handle); | 129 | page = phys_to_page(handle); |
@@ -142,7 +142,7 @@ static void avr32_dma_free(struct device *dev, size_t size, | |||
142 | 142 | ||
143 | static dma_addr_t avr32_dma_map_page(struct device *dev, struct page *page, | 143 | static dma_addr_t avr32_dma_map_page(struct device *dev, struct page *page, |
144 | unsigned long offset, size_t size, | 144 | unsigned long offset, size_t size, |
145 | enum dma_data_direction direction, struct dma_attrs *attrs) | 145 | enum dma_data_direction direction, unsigned long attrs) |
146 | { | 146 | { |
147 | void *cpu_addr = page_address(page) + offset; | 147 | void *cpu_addr = page_address(page) + offset; |
148 | 148 | ||
@@ -152,7 +152,7 @@ static dma_addr_t avr32_dma_map_page(struct device *dev, struct page *page, | |||
152 | 152 | ||
153 | static int avr32_dma_map_sg(struct device *dev, struct scatterlist *sglist, | 153 | static int avr32_dma_map_sg(struct device *dev, struct scatterlist *sglist, |
154 | int nents, enum dma_data_direction direction, | 154 | int nents, enum dma_data_direction direction, |
155 | struct dma_attrs *attrs) | 155 | unsigned long attrs) |
156 | { | 156 | { |
157 | int i; | 157 | int i; |
158 | struct scatterlist *sg; | 158 | struct scatterlist *sg; |
diff --git a/arch/blackfin/kernel/dma-mapping.c b/arch/blackfin/kernel/dma-mapping.c index 771afe6e4264..53fbbb61aa86 100644 --- a/arch/blackfin/kernel/dma-mapping.c +++ b/arch/blackfin/kernel/dma-mapping.c | |||
@@ -79,7 +79,7 @@ static void __free_dma_pages(unsigned long addr, unsigned int pages) | |||
79 | } | 79 | } |
80 | 80 | ||
81 | static void *bfin_dma_alloc(struct device *dev, size_t size, | 81 | static void *bfin_dma_alloc(struct device *dev, size_t size, |
82 | dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs) | 82 | dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) |
83 | { | 83 | { |
84 | void *ret; | 84 | void *ret; |
85 | 85 | ||
@@ -94,7 +94,7 @@ static void *bfin_dma_alloc(struct device *dev, size_t size, | |||
94 | } | 94 | } |
95 | 95 | ||
96 | static void bfin_dma_free(struct device *dev, size_t size, void *vaddr, | 96 | static void bfin_dma_free(struct device *dev, size_t size, void *vaddr, |
97 | dma_addr_t dma_handle, struct dma_attrs *attrs) | 97 | dma_addr_t dma_handle, unsigned long attrs) |
98 | { | 98 | { |
99 | __free_dma_pages((unsigned long)vaddr, get_pages(size)); | 99 | __free_dma_pages((unsigned long)vaddr, get_pages(size)); |
100 | } | 100 | } |
@@ -111,7 +111,7 @@ EXPORT_SYMBOL(__dma_sync); | |||
111 | 111 | ||
112 | static int bfin_dma_map_sg(struct device *dev, struct scatterlist *sg_list, | 112 | static int bfin_dma_map_sg(struct device *dev, struct scatterlist *sg_list, |
113 | int nents, enum dma_data_direction direction, | 113 | int nents, enum dma_data_direction direction, |
114 | struct dma_attrs *attrs) | 114 | unsigned long attrs) |
115 | { | 115 | { |
116 | struct scatterlist *sg; | 116 | struct scatterlist *sg; |
117 | int i; | 117 | int i; |
@@ -139,7 +139,7 @@ static void bfin_dma_sync_sg_for_device(struct device *dev, | |||
139 | 139 | ||
140 | static dma_addr_t bfin_dma_map_page(struct device *dev, struct page *page, | 140 | static dma_addr_t bfin_dma_map_page(struct device *dev, struct page *page, |
141 | unsigned long offset, size_t size, enum dma_data_direction dir, | 141 | unsigned long offset, size_t size, enum dma_data_direction dir, |
142 | struct dma_attrs *attrs) | 142 | unsigned long attrs) |
143 | { | 143 | { |
144 | dma_addr_t handle = (dma_addr_t)(page_address(page) + offset); | 144 | dma_addr_t handle = (dma_addr_t)(page_address(page) + offset); |
145 | 145 | ||
diff --git a/arch/c6x/include/asm/dma-mapping.h b/arch/c6x/include/asm/dma-mapping.h index 6b5cd7b0cf32..5717b1e52d96 100644 --- a/arch/c6x/include/asm/dma-mapping.h +++ b/arch/c6x/include/asm/dma-mapping.h | |||
@@ -26,8 +26,8 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) | |||
26 | 26 | ||
27 | extern void coherent_mem_init(u32 start, u32 size); | 27 | extern void coherent_mem_init(u32 start, u32 size); |
28 | void *c6x_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, | 28 | void *c6x_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, |
29 | gfp_t gfp, struct dma_attrs *attrs); | 29 | gfp_t gfp, unsigned long attrs); |
30 | void c6x_dma_free(struct device *dev, size_t size, void *vaddr, | 30 | void c6x_dma_free(struct device *dev, size_t size, void *vaddr, |
31 | dma_addr_t dma_handle, struct dma_attrs *attrs); | 31 | dma_addr_t dma_handle, unsigned long attrs); |
32 | 32 | ||
33 | #endif /* _ASM_C6X_DMA_MAPPING_H */ | 33 | #endif /* _ASM_C6X_DMA_MAPPING_H */ |
diff --git a/arch/c6x/kernel/dma.c b/arch/c6x/kernel/dma.c index 8a80f3a250c0..db4a6a301f5e 100644 --- a/arch/c6x/kernel/dma.c +++ b/arch/c6x/kernel/dma.c | |||
@@ -38,7 +38,7 @@ static void c6x_dma_sync(dma_addr_t handle, size_t size, | |||
38 | 38 | ||
39 | static dma_addr_t c6x_dma_map_page(struct device *dev, struct page *page, | 39 | static dma_addr_t c6x_dma_map_page(struct device *dev, struct page *page, |
40 | unsigned long offset, size_t size, enum dma_data_direction dir, | 40 | unsigned long offset, size_t size, enum dma_data_direction dir, |
41 | struct dma_attrs *attrs) | 41 | unsigned long attrs) |
42 | { | 42 | { |
43 | dma_addr_t handle = virt_to_phys(page_address(page) + offset); | 43 | dma_addr_t handle = virt_to_phys(page_address(page) + offset); |
44 | 44 | ||
@@ -47,13 +47,13 @@ static dma_addr_t c6x_dma_map_page(struct device *dev, struct page *page, | |||
47 | } | 47 | } |
48 | 48 | ||
49 | static void c6x_dma_unmap_page(struct device *dev, dma_addr_t handle, | 49 | static void c6x_dma_unmap_page(struct device *dev, dma_addr_t handle, |
50 | size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) | 50 | size_t size, enum dma_data_direction dir, unsigned long attrs) |
51 | { | 51 | { |
52 | c6x_dma_sync(handle, size, dir); | 52 | c6x_dma_sync(handle, size, dir); |
53 | } | 53 | } |
54 | 54 | ||
55 | static int c6x_dma_map_sg(struct device *dev, struct scatterlist *sglist, | 55 | static int c6x_dma_map_sg(struct device *dev, struct scatterlist *sglist, |
56 | int nents, enum dma_data_direction dir, struct dma_attrs *attrs) | 56 | int nents, enum dma_data_direction dir, unsigned long attrs) |
57 | { | 57 | { |
58 | struct scatterlist *sg; | 58 | struct scatterlist *sg; |
59 | int i; | 59 | int i; |
@@ -67,8 +67,7 @@ static int c6x_dma_map_sg(struct device *dev, struct scatterlist *sglist, | |||
67 | } | 67 | } |
68 | 68 | ||
69 | static void c6x_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, | 69 | static void c6x_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, |
70 | int nents, enum dma_data_direction dir, | 70 | int nents, enum dma_data_direction dir, unsigned long attrs) |
71 | struct dma_attrs *attrs) | ||
72 | { | 71 | { |
73 | struct scatterlist *sg; | 72 | struct scatterlist *sg; |
74 | int i; | 73 | int i; |
diff --git a/arch/c6x/mm/dma-coherent.c b/arch/c6x/mm/dma-coherent.c index f7ee63af2541..95e38ad27c69 100644 --- a/arch/c6x/mm/dma-coherent.c +++ b/arch/c6x/mm/dma-coherent.c | |||
@@ -74,7 +74,7 @@ static void __free_dma_pages(u32 addr, int order) | |||
74 | * virtual and DMA address for that space. | 74 | * virtual and DMA address for that space. |
75 | */ | 75 | */ |
76 | void *c6x_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, | 76 | void *c6x_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, |
77 | gfp_t gfp, struct dma_attrs *attrs) | 77 | gfp_t gfp, unsigned long attrs) |
78 | { | 78 | { |
79 | u32 paddr; | 79 | u32 paddr; |
80 | int order; | 80 | int order; |
@@ -99,7 +99,7 @@ void *c6x_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, | |||
99 | * Free DMA coherent memory as defined by the above mapping. | 99 | * Free DMA coherent memory as defined by the above mapping. |
100 | */ | 100 | */ |
101 | void c6x_dma_free(struct device *dev, size_t size, void *vaddr, | 101 | void c6x_dma_free(struct device *dev, size_t size, void *vaddr, |
102 | dma_addr_t dma_handle, struct dma_attrs *attrs) | 102 | dma_addr_t dma_handle, unsigned long attrs) |
103 | { | 103 | { |
104 | int order; | 104 | int order; |
105 | 105 | ||
diff --git a/arch/cris/arch-v32/drivers/pci/dma.c b/arch/cris/arch-v32/drivers/pci/dma.c index 8d5efa58cce1..1f0636793f0c 100644 --- a/arch/cris/arch-v32/drivers/pci/dma.c +++ b/arch/cris/arch-v32/drivers/pci/dma.c | |||
@@ -17,7 +17,7 @@ | |||
17 | #include <asm/io.h> | 17 | #include <asm/io.h> |
18 | 18 | ||
19 | static void *v32_dma_alloc(struct device *dev, size_t size, | 19 | static void *v32_dma_alloc(struct device *dev, size_t size, |
20 | dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs) | 20 | dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) |
21 | { | 21 | { |
22 | void *ret; | 22 | void *ret; |
23 | 23 | ||
@@ -37,22 +37,21 @@ static void *v32_dma_alloc(struct device *dev, size_t size, | |||
37 | } | 37 | } |
38 | 38 | ||
39 | static void v32_dma_free(struct device *dev, size_t size, void *vaddr, | 39 | static void v32_dma_free(struct device *dev, size_t size, void *vaddr, |
40 | dma_addr_t dma_handle, struct dma_attrs *attrs) | 40 | dma_addr_t dma_handle, unsigned long attrs) |
41 | { | 41 | { |
42 | free_pages((unsigned long)vaddr, get_order(size)); | 42 | free_pages((unsigned long)vaddr, get_order(size)); |
43 | } | 43 | } |
44 | 44 | ||
45 | static inline dma_addr_t v32_dma_map_page(struct device *dev, | 45 | static inline dma_addr_t v32_dma_map_page(struct device *dev, |
46 | struct page *page, unsigned long offset, size_t size, | 46 | struct page *page, unsigned long offset, size_t size, |
47 | enum dma_data_direction direction, | 47 | enum dma_data_direction direction, unsigned long attrs) |
48 | struct dma_attrs *attrs) | ||
49 | { | 48 | { |
50 | return page_to_phys(page) + offset; | 49 | return page_to_phys(page) + offset; |
51 | } | 50 | } |
52 | 51 | ||
53 | static inline int v32_dma_map_sg(struct device *dev, struct scatterlist *sg, | 52 | static inline int v32_dma_map_sg(struct device *dev, struct scatterlist *sg, |
54 | int nents, enum dma_data_direction direction, | 53 | int nents, enum dma_data_direction direction, |
55 | struct dma_attrs *attrs) | 54 | unsigned long attrs) |
56 | { | 55 | { |
57 | printk("Map sg\n"); | 56 | printk("Map sg\n"); |
58 | return nents; | 57 | return nents; |
diff --git a/arch/frv/mb93090-mb00/pci-dma-nommu.c b/arch/frv/mb93090-mb00/pci-dma-nommu.c index 082be49b5df0..90f2e4cb33d6 100644 --- a/arch/frv/mb93090-mb00/pci-dma-nommu.c +++ b/arch/frv/mb93090-mb00/pci-dma-nommu.c | |||
@@ -35,7 +35,7 @@ static DEFINE_SPINLOCK(dma_alloc_lock); | |||
35 | static LIST_HEAD(dma_alloc_list); | 35 | static LIST_HEAD(dma_alloc_list); |
36 | 36 | ||
37 | static void *frv_dma_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle, | 37 | static void *frv_dma_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle, |
38 | gfp_t gfp, struct dma_attrs *attrs) | 38 | gfp_t gfp, unsigned long attrs) |
39 | { | 39 | { |
40 | struct dma_alloc_record *new; | 40 | struct dma_alloc_record *new; |
41 | struct list_head *this = &dma_alloc_list; | 41 | struct list_head *this = &dma_alloc_list; |
@@ -86,7 +86,7 @@ static void *frv_dma_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_ha | |||
86 | } | 86 | } |
87 | 87 | ||
88 | static void frv_dma_free(struct device *hwdev, size_t size, void *vaddr, | 88 | static void frv_dma_free(struct device *hwdev, size_t size, void *vaddr, |
89 | dma_addr_t dma_handle, struct dma_attrs *attrs) | 89 | dma_addr_t dma_handle, unsigned long attrs) |
90 | { | 90 | { |
91 | struct dma_alloc_record *rec; | 91 | struct dma_alloc_record *rec; |
92 | unsigned long flags; | 92 | unsigned long flags; |
@@ -107,7 +107,7 @@ static void frv_dma_free(struct device *hwdev, size_t size, void *vaddr, | |||
107 | 107 | ||
108 | static int frv_dma_map_sg(struct device *dev, struct scatterlist *sglist, | 108 | static int frv_dma_map_sg(struct device *dev, struct scatterlist *sglist, |
109 | int nents, enum dma_data_direction direction, | 109 | int nents, enum dma_data_direction direction, |
110 | struct dma_attrs *attrs) | 110 | unsigned long attrs) |
111 | { | 111 | { |
112 | int i; | 112 | int i; |
113 | struct scatterlist *sg; | 113 | struct scatterlist *sg; |
@@ -124,7 +124,7 @@ static int frv_dma_map_sg(struct device *dev, struct scatterlist *sglist, | |||
124 | 124 | ||
125 | static dma_addr_t frv_dma_map_page(struct device *dev, struct page *page, | 125 | static dma_addr_t frv_dma_map_page(struct device *dev, struct page *page, |
126 | unsigned long offset, size_t size, | 126 | unsigned long offset, size_t size, |
127 | enum dma_data_direction direction, struct dma_attrs *attrs) | 127 | enum dma_data_direction direction, unsigned long attrs) |
128 | { | 128 | { |
129 | BUG_ON(direction == DMA_NONE); | 129 | BUG_ON(direction == DMA_NONE); |
130 | flush_dcache_page(page); | 130 | flush_dcache_page(page); |
diff --git a/arch/frv/mb93090-mb00/pci-dma.c b/arch/frv/mb93090-mb00/pci-dma.c index 316b7b65348d..f585745b1abc 100644 --- a/arch/frv/mb93090-mb00/pci-dma.c +++ b/arch/frv/mb93090-mb00/pci-dma.c | |||
@@ -19,8 +19,7 @@ | |||
19 | #include <asm/io.h> | 19 | #include <asm/io.h> |
20 | 20 | ||
21 | static void *frv_dma_alloc(struct device *hwdev, size_t size, | 21 | static void *frv_dma_alloc(struct device *hwdev, size_t size, |
22 | dma_addr_t *dma_handle, gfp_t gfp, | 22 | dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) |
23 | struct dma_attrs *attrs) | ||
24 | { | 23 | { |
25 | void *ret; | 24 | void *ret; |
26 | 25 | ||
@@ -32,14 +31,14 @@ static void *frv_dma_alloc(struct device *hwdev, size_t size, | |||
32 | } | 31 | } |
33 | 32 | ||
34 | static void frv_dma_free(struct device *hwdev, size_t size, void *vaddr, | 33 | static void frv_dma_free(struct device *hwdev, size_t size, void *vaddr, |
35 | dma_addr_t dma_handle, struct dma_attrs *attrs) | 34 | dma_addr_t dma_handle, unsigned long attrs) |
36 | { | 35 | { |
37 | consistent_free(vaddr); | 36 | consistent_free(vaddr); |
38 | } | 37 | } |
39 | 38 | ||
40 | static int frv_dma_map_sg(struct device *dev, struct scatterlist *sglist, | 39 | static int frv_dma_map_sg(struct device *dev, struct scatterlist *sglist, |
41 | int nents, enum dma_data_direction direction, | 40 | int nents, enum dma_data_direction direction, |
42 | struct dma_attrs *attrs) | 41 | unsigned long attrs) |
43 | { | 42 | { |
44 | unsigned long dampr2; | 43 | unsigned long dampr2; |
45 | void *vaddr; | 44 | void *vaddr; |
@@ -69,7 +68,7 @@ static int frv_dma_map_sg(struct device *dev, struct scatterlist *sglist, | |||
69 | 68 | ||
70 | static dma_addr_t frv_dma_map_page(struct device *dev, struct page *page, | 69 | static dma_addr_t frv_dma_map_page(struct device *dev, struct page *page, |
71 | unsigned long offset, size_t size, | 70 | unsigned long offset, size_t size, |
72 | enum dma_data_direction direction, struct dma_attrs *attrs) | 71 | enum dma_data_direction direction, unsigned long attrs) |
73 | { | 72 | { |
74 | flush_dcache_page(page); | 73 | flush_dcache_page(page); |
75 | return (dma_addr_t) page_to_phys(page) + offset; | 74 | return (dma_addr_t) page_to_phys(page) + offset; |
diff --git a/arch/h8300/kernel/dma.c b/arch/h8300/kernel/dma.c index eeb13d3f2424..3651da045806 100644 --- a/arch/h8300/kernel/dma.c +++ b/arch/h8300/kernel/dma.c | |||
@@ -12,7 +12,7 @@ | |||
12 | 12 | ||
13 | static void *dma_alloc(struct device *dev, size_t size, | 13 | static void *dma_alloc(struct device *dev, size_t size, |
14 | dma_addr_t *dma_handle, gfp_t gfp, | 14 | dma_addr_t *dma_handle, gfp_t gfp, |
15 | struct dma_attrs *attrs) | 15 | unsigned long attrs) |
16 | { | 16 | { |
17 | void *ret; | 17 | void *ret; |
18 | 18 | ||
@@ -32,7 +32,7 @@ static void *dma_alloc(struct device *dev, size_t size, | |||
32 | 32 | ||
33 | static void dma_free(struct device *dev, size_t size, | 33 | static void dma_free(struct device *dev, size_t size, |
34 | void *vaddr, dma_addr_t dma_handle, | 34 | void *vaddr, dma_addr_t dma_handle, |
35 | struct dma_attrs *attrs) | 35 | unsigned long attrs) |
36 | 36 | ||
37 | { | 37 | { |
38 | free_pages((unsigned long)vaddr, get_order(size)); | 38 | free_pages((unsigned long)vaddr, get_order(size)); |
@@ -41,14 +41,14 @@ static void dma_free(struct device *dev, size_t size, | |||
41 | static dma_addr_t map_page(struct device *dev, struct page *page, | 41 | static dma_addr_t map_page(struct device *dev, struct page *page, |
42 | unsigned long offset, size_t size, | 42 | unsigned long offset, size_t size, |
43 | enum dma_data_direction direction, | 43 | enum dma_data_direction direction, |
44 | struct dma_attrs *attrs) | 44 | unsigned long attrs) |
45 | { | 45 | { |
46 | return page_to_phys(page) + offset; | 46 | return page_to_phys(page) + offset; |
47 | } | 47 | } |
48 | 48 | ||
49 | static int map_sg(struct device *dev, struct scatterlist *sgl, | 49 | static int map_sg(struct device *dev, struct scatterlist *sgl, |
50 | int nents, enum dma_data_direction direction, | 50 | int nents, enum dma_data_direction direction, |
51 | struct dma_attrs *attrs) | 51 | unsigned long attrs) |
52 | { | 52 | { |
53 | struct scatterlist *sg; | 53 | struct scatterlist *sg; |
54 | int i; | 54 | int i; |
diff --git a/arch/hexagon/include/asm/dma-mapping.h b/arch/hexagon/include/asm/dma-mapping.h index aa6203464520..7ef58df909fc 100644 --- a/arch/hexagon/include/asm/dma-mapping.h +++ b/arch/hexagon/include/asm/dma-mapping.h | |||
@@ -26,7 +26,6 @@ | |||
26 | #include <linux/mm.h> | 26 | #include <linux/mm.h> |
27 | #include <linux/scatterlist.h> | 27 | #include <linux/scatterlist.h> |
28 | #include <linux/dma-debug.h> | 28 | #include <linux/dma-debug.h> |
29 | #include <linux/dma-attrs.h> | ||
30 | #include <asm/io.h> | 29 | #include <asm/io.h> |
31 | 30 | ||
32 | struct device; | 31 | struct device; |
diff --git a/arch/hexagon/kernel/dma.c b/arch/hexagon/kernel/dma.c index 9e3ddf792bd3..b9017785fb71 100644 --- a/arch/hexagon/kernel/dma.c +++ b/arch/hexagon/kernel/dma.c | |||
@@ -51,7 +51,7 @@ static struct gen_pool *coherent_pool; | |||
51 | 51 | ||
52 | static void *hexagon_dma_alloc_coherent(struct device *dev, size_t size, | 52 | static void *hexagon_dma_alloc_coherent(struct device *dev, size_t size, |
53 | dma_addr_t *dma_addr, gfp_t flag, | 53 | dma_addr_t *dma_addr, gfp_t flag, |
54 | struct dma_attrs *attrs) | 54 | unsigned long attrs) |
55 | { | 55 | { |
56 | void *ret; | 56 | void *ret; |
57 | 57 | ||
@@ -84,7 +84,7 @@ static void *hexagon_dma_alloc_coherent(struct device *dev, size_t size, | |||
84 | } | 84 | } |
85 | 85 | ||
86 | static void hexagon_free_coherent(struct device *dev, size_t size, void *vaddr, | 86 | static void hexagon_free_coherent(struct device *dev, size_t size, void *vaddr, |
87 | dma_addr_t dma_addr, struct dma_attrs *attrs) | 87 | dma_addr_t dma_addr, unsigned long attrs) |
88 | { | 88 | { |
89 | gen_pool_free(coherent_pool, (unsigned long) vaddr, size); | 89 | gen_pool_free(coherent_pool, (unsigned long) vaddr, size); |
90 | } | 90 | } |
@@ -105,7 +105,7 @@ static int check_addr(const char *name, struct device *hwdev, | |||
105 | 105 | ||
106 | static int hexagon_map_sg(struct device *hwdev, struct scatterlist *sg, | 106 | static int hexagon_map_sg(struct device *hwdev, struct scatterlist *sg, |
107 | int nents, enum dma_data_direction dir, | 107 | int nents, enum dma_data_direction dir, |
108 | struct dma_attrs *attrs) | 108 | unsigned long attrs) |
109 | { | 109 | { |
110 | struct scatterlist *s; | 110 | struct scatterlist *s; |
111 | int i; | 111 | int i; |
@@ -172,7 +172,7 @@ static inline void dma_sync(void *addr, size_t size, | |||
172 | static dma_addr_t hexagon_map_page(struct device *dev, struct page *page, | 172 | static dma_addr_t hexagon_map_page(struct device *dev, struct page *page, |
173 | unsigned long offset, size_t size, | 173 | unsigned long offset, size_t size, |
174 | enum dma_data_direction dir, | 174 | enum dma_data_direction dir, |
175 | struct dma_attrs *attrs) | 175 | unsigned long attrs) |
176 | { | 176 | { |
177 | dma_addr_t bus = page_to_phys(page) + offset; | 177 | dma_addr_t bus = page_to_phys(page) + offset; |
178 | WARN_ON(size == 0); | 178 | WARN_ON(size == 0); |
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index a6d6190c9d24..630ee8073899 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c | |||
@@ -919,7 +919,7 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt) | |||
919 | static dma_addr_t sba_map_page(struct device *dev, struct page *page, | 919 | static dma_addr_t sba_map_page(struct device *dev, struct page *page, |
920 | unsigned long poff, size_t size, | 920 | unsigned long poff, size_t size, |
921 | enum dma_data_direction dir, | 921 | enum dma_data_direction dir, |
922 | struct dma_attrs *attrs) | 922 | unsigned long attrs) |
923 | { | 923 | { |
924 | struct ioc *ioc; | 924 | struct ioc *ioc; |
925 | void *addr = page_address(page) + poff; | 925 | void *addr = page_address(page) + poff; |
@@ -1005,7 +1005,7 @@ static dma_addr_t sba_map_page(struct device *dev, struct page *page, | |||
1005 | 1005 | ||
1006 | static dma_addr_t sba_map_single_attrs(struct device *dev, void *addr, | 1006 | static dma_addr_t sba_map_single_attrs(struct device *dev, void *addr, |
1007 | size_t size, enum dma_data_direction dir, | 1007 | size_t size, enum dma_data_direction dir, |
1008 | struct dma_attrs *attrs) | 1008 | unsigned long attrs) |
1009 | { | 1009 | { |
1010 | return sba_map_page(dev, virt_to_page(addr), | 1010 | return sba_map_page(dev, virt_to_page(addr), |
1011 | (unsigned long)addr & ~PAGE_MASK, size, dir, attrs); | 1011 | (unsigned long)addr & ~PAGE_MASK, size, dir, attrs); |
@@ -1046,7 +1046,7 @@ sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size) | |||
1046 | * See Documentation/DMA-API-HOWTO.txt | 1046 | * See Documentation/DMA-API-HOWTO.txt |
1047 | */ | 1047 | */ |
1048 | static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size, | 1048 | static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size, |
1049 | enum dma_data_direction dir, struct dma_attrs *attrs) | 1049 | enum dma_data_direction dir, unsigned long attrs) |
1050 | { | 1050 | { |
1051 | struct ioc *ioc; | 1051 | struct ioc *ioc; |
1052 | #if DELAYED_RESOURCE_CNT > 0 | 1052 | #if DELAYED_RESOURCE_CNT > 0 |
@@ -1115,7 +1115,7 @@ static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size, | |||
1115 | } | 1115 | } |
1116 | 1116 | ||
1117 | void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size, | 1117 | void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size, |
1118 | enum dma_data_direction dir, struct dma_attrs *attrs) | 1118 | enum dma_data_direction dir, unsigned long attrs) |
1119 | { | 1119 | { |
1120 | sba_unmap_page(dev, iova, size, dir, attrs); | 1120 | sba_unmap_page(dev, iova, size, dir, attrs); |
1121 | } | 1121 | } |
@@ -1130,7 +1130,7 @@ void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size, | |||
1130 | */ | 1130 | */ |
1131 | static void * | 1131 | static void * |
1132 | sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | 1132 | sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, |
1133 | gfp_t flags, struct dma_attrs *attrs) | 1133 | gfp_t flags, unsigned long attrs) |
1134 | { | 1134 | { |
1135 | struct ioc *ioc; | 1135 | struct ioc *ioc; |
1136 | void *addr; | 1136 | void *addr; |
@@ -1175,7 +1175,7 @@ sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | |||
1175 | * device to map single to get an iova mapping. | 1175 | * device to map single to get an iova mapping. |
1176 | */ | 1176 | */ |
1177 | *dma_handle = sba_map_single_attrs(&ioc->sac_only_dev->dev, addr, | 1177 | *dma_handle = sba_map_single_attrs(&ioc->sac_only_dev->dev, addr, |
1178 | size, 0, NULL); | 1178 | size, 0, 0); |
1179 | 1179 | ||
1180 | return addr; | 1180 | return addr; |
1181 | } | 1181 | } |
@@ -1191,9 +1191,9 @@ sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | |||
1191 | * See Documentation/DMA-API-HOWTO.txt | 1191 | * See Documentation/DMA-API-HOWTO.txt |
1192 | */ | 1192 | */ |
1193 | static void sba_free_coherent(struct device *dev, size_t size, void *vaddr, | 1193 | static void sba_free_coherent(struct device *dev, size_t size, void *vaddr, |
1194 | dma_addr_t dma_handle, struct dma_attrs *attrs) | 1194 | dma_addr_t dma_handle, unsigned long attrs) |
1195 | { | 1195 | { |
1196 | sba_unmap_single_attrs(dev, dma_handle, size, 0, NULL); | 1196 | sba_unmap_single_attrs(dev, dma_handle, size, 0, 0); |
1197 | free_pages((unsigned long) vaddr, get_order(size)); | 1197 | free_pages((unsigned long) vaddr, get_order(size)); |
1198 | } | 1198 | } |
1199 | 1199 | ||
@@ -1442,7 +1442,7 @@ sba_coalesce_chunks(struct ioc *ioc, struct device *dev, | |||
1442 | 1442 | ||
1443 | static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, | 1443 | static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, |
1444 | int nents, enum dma_data_direction dir, | 1444 | int nents, enum dma_data_direction dir, |
1445 | struct dma_attrs *attrs); | 1445 | unsigned long attrs); |
1446 | /** | 1446 | /** |
1447 | * sba_map_sg - map Scatter/Gather list | 1447 | * sba_map_sg - map Scatter/Gather list |
1448 | * @dev: instance of PCI owned by the driver that's asking. | 1448 | * @dev: instance of PCI owned by the driver that's asking. |
@@ -1455,7 +1455,7 @@ static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, | |||
1455 | */ | 1455 | */ |
1456 | static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, | 1456 | static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, |
1457 | int nents, enum dma_data_direction dir, | 1457 | int nents, enum dma_data_direction dir, |
1458 | struct dma_attrs *attrs) | 1458 | unsigned long attrs) |
1459 | { | 1459 | { |
1460 | struct ioc *ioc; | 1460 | struct ioc *ioc; |
1461 | int coalesced, filled = 0; | 1461 | int coalesced, filled = 0; |
@@ -1551,7 +1551,7 @@ static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, | |||
1551 | */ | 1551 | */ |
1552 | static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, | 1552 | static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, |
1553 | int nents, enum dma_data_direction dir, | 1553 | int nents, enum dma_data_direction dir, |
1554 | struct dma_attrs *attrs) | 1554 | unsigned long attrs) |
1555 | { | 1555 | { |
1556 | #ifdef ASSERT_PDIR_SANITY | 1556 | #ifdef ASSERT_PDIR_SANITY |
1557 | struct ioc *ioc; | 1557 | struct ioc *ioc; |
diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h index 9c39bdfc2da8..ed7f09089f12 100644 --- a/arch/ia64/include/asm/machvec.h +++ b/arch/ia64/include/asm/machvec.h | |||
@@ -22,7 +22,6 @@ struct pci_bus; | |||
22 | struct task_struct; | 22 | struct task_struct; |
23 | struct pci_dev; | 23 | struct pci_dev; |
24 | struct msi_desc; | 24 | struct msi_desc; |
25 | struct dma_attrs; | ||
26 | 25 | ||
27 | typedef void ia64_mv_setup_t (char **); | 26 | typedef void ia64_mv_setup_t (char **); |
28 | typedef void ia64_mv_cpu_init_t (void); | 27 | typedef void ia64_mv_cpu_init_t (void); |
diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c index 939260aeac98..2933208c0285 100644 --- a/arch/ia64/kernel/pci-swiotlb.c +++ b/arch/ia64/kernel/pci-swiotlb.c | |||
@@ -16,7 +16,7 @@ EXPORT_SYMBOL(swiotlb); | |||
16 | 16 | ||
17 | static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size, | 17 | static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size, |
18 | dma_addr_t *dma_handle, gfp_t gfp, | 18 | dma_addr_t *dma_handle, gfp_t gfp, |
19 | struct dma_attrs *attrs) | 19 | unsigned long attrs) |
20 | { | 20 | { |
21 | if (dev->coherent_dma_mask != DMA_BIT_MASK(64)) | 21 | if (dev->coherent_dma_mask != DMA_BIT_MASK(64)) |
22 | gfp |= GFP_DMA; | 22 | gfp |= GFP_DMA; |
@@ -25,7 +25,7 @@ static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size, | |||
25 | 25 | ||
26 | static void ia64_swiotlb_free_coherent(struct device *dev, size_t size, | 26 | static void ia64_swiotlb_free_coherent(struct device *dev, size_t size, |
27 | void *vaddr, dma_addr_t dma_addr, | 27 | void *vaddr, dma_addr_t dma_addr, |
28 | struct dma_attrs *attrs) | 28 | unsigned long attrs) |
29 | { | 29 | { |
30 | swiotlb_free_coherent(dev, size, vaddr, dma_addr); | 30 | swiotlb_free_coherent(dev, size, vaddr, dma_addr); |
31 | } | 31 | } |
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c index 8f59907007cb..74c934a997bb 100644 --- a/arch/ia64/sn/pci/pci_dma.c +++ b/arch/ia64/sn/pci/pci_dma.c | |||
@@ -77,7 +77,7 @@ EXPORT_SYMBOL(sn_dma_set_mask); | |||
77 | */ | 77 | */ |
78 | static void *sn_dma_alloc_coherent(struct device *dev, size_t size, | 78 | static void *sn_dma_alloc_coherent(struct device *dev, size_t size, |
79 | dma_addr_t * dma_handle, gfp_t flags, | 79 | dma_addr_t * dma_handle, gfp_t flags, |
80 | struct dma_attrs *attrs) | 80 | unsigned long attrs) |
81 | { | 81 | { |
82 | void *cpuaddr; | 82 | void *cpuaddr; |
83 | unsigned long phys_addr; | 83 | unsigned long phys_addr; |
@@ -138,7 +138,7 @@ static void *sn_dma_alloc_coherent(struct device *dev, size_t size, | |||
138 | * any associated IOMMU mappings. | 138 | * any associated IOMMU mappings. |
139 | */ | 139 | */ |
140 | static void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, | 140 | static void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, |
141 | dma_addr_t dma_handle, struct dma_attrs *attrs) | 141 | dma_addr_t dma_handle, unsigned long attrs) |
142 | { | 142 | { |
143 | struct pci_dev *pdev = to_pci_dev(dev); | 143 | struct pci_dev *pdev = to_pci_dev(dev); |
144 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); | 144 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); |
@@ -176,21 +176,18 @@ static void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr | |||
176 | static dma_addr_t sn_dma_map_page(struct device *dev, struct page *page, | 176 | static dma_addr_t sn_dma_map_page(struct device *dev, struct page *page, |
177 | unsigned long offset, size_t size, | 177 | unsigned long offset, size_t size, |
178 | enum dma_data_direction dir, | 178 | enum dma_data_direction dir, |
179 | struct dma_attrs *attrs) | 179 | unsigned long attrs) |
180 | { | 180 | { |
181 | void *cpu_addr = page_address(page) + offset; | 181 | void *cpu_addr = page_address(page) + offset; |
182 | dma_addr_t dma_addr; | 182 | dma_addr_t dma_addr; |
183 | unsigned long phys_addr; | 183 | unsigned long phys_addr; |
184 | struct pci_dev *pdev = to_pci_dev(dev); | 184 | struct pci_dev *pdev = to_pci_dev(dev); |
185 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); | 185 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); |
186 | int dmabarr; | ||
187 | |||
188 | dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs); | ||
189 | 186 | ||
190 | BUG_ON(!dev_is_pci(dev)); | 187 | BUG_ON(!dev_is_pci(dev)); |
191 | 188 | ||
192 | phys_addr = __pa(cpu_addr); | 189 | phys_addr = __pa(cpu_addr); |
193 | if (dmabarr) | 190 | if (attrs & DMA_ATTR_WRITE_BARRIER) |
194 | dma_addr = provider->dma_map_consistent(pdev, phys_addr, | 191 | dma_addr = provider->dma_map_consistent(pdev, phys_addr, |
195 | size, SN_DMA_ADDR_PHYS); | 192 | size, SN_DMA_ADDR_PHYS); |
196 | else | 193 | else |
@@ -218,7 +215,7 @@ static dma_addr_t sn_dma_map_page(struct device *dev, struct page *page, | |||
218 | */ | 215 | */ |
219 | static void sn_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, | 216 | static void sn_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, |
220 | size_t size, enum dma_data_direction dir, | 217 | size_t size, enum dma_data_direction dir, |
221 | struct dma_attrs *attrs) | 218 | unsigned long attrs) |
222 | { | 219 | { |
223 | struct pci_dev *pdev = to_pci_dev(dev); | 220 | struct pci_dev *pdev = to_pci_dev(dev); |
224 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); | 221 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); |
@@ -240,7 +237,7 @@ static void sn_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, | |||
240 | */ | 237 | */ |
241 | static void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl, | 238 | static void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl, |
242 | int nhwentries, enum dma_data_direction dir, | 239 | int nhwentries, enum dma_data_direction dir, |
243 | struct dma_attrs *attrs) | 240 | unsigned long attrs) |
244 | { | 241 | { |
245 | int i; | 242 | int i; |
246 | struct pci_dev *pdev = to_pci_dev(dev); | 243 | struct pci_dev *pdev = to_pci_dev(dev); |
@@ -273,16 +270,13 @@ static void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl, | |||
273 | */ | 270 | */ |
274 | static int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, | 271 | static int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, |
275 | int nhwentries, enum dma_data_direction dir, | 272 | int nhwentries, enum dma_data_direction dir, |
276 | struct dma_attrs *attrs) | 273 | unsigned long attrs) |
277 | { | 274 | { |
278 | unsigned long phys_addr; | 275 | unsigned long phys_addr; |
279 | struct scatterlist *saved_sg = sgl, *sg; | 276 | struct scatterlist *saved_sg = sgl, *sg; |
280 | struct pci_dev *pdev = to_pci_dev(dev); | 277 | struct pci_dev *pdev = to_pci_dev(dev); |
281 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); | 278 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); |
282 | int i; | 279 | int i; |
283 | int dmabarr; | ||
284 | |||
285 | dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs); | ||
286 | 280 | ||
287 | BUG_ON(!dev_is_pci(dev)); | 281 | BUG_ON(!dev_is_pci(dev)); |
288 | 282 | ||
@@ -292,7 +286,7 @@ static int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, | |||
292 | for_each_sg(sgl, sg, nhwentries, i) { | 286 | for_each_sg(sgl, sg, nhwentries, i) { |
293 | dma_addr_t dma_addr; | 287 | dma_addr_t dma_addr; |
294 | phys_addr = SG_ENT_PHYS_ADDRESS(sg); | 288 | phys_addr = SG_ENT_PHYS_ADDRESS(sg); |
295 | if (dmabarr) | 289 | if (attrs & DMA_ATTR_WRITE_BARRIER) |
296 | dma_addr = provider->dma_map_consistent(pdev, | 290 | dma_addr = provider->dma_map_consistent(pdev, |
297 | phys_addr, | 291 | phys_addr, |
298 | sg->length, | 292 | sg->length, |
diff --git a/arch/m68k/kernel/dma.c b/arch/m68k/kernel/dma.c index cbc78b4117b5..8cf97cbadc91 100644 --- a/arch/m68k/kernel/dma.c +++ b/arch/m68k/kernel/dma.c | |||
@@ -19,7 +19,7 @@ | |||
19 | #if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE) | 19 | #if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE) |
20 | 20 | ||
21 | static void *m68k_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, | 21 | static void *m68k_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, |
22 | gfp_t flag, struct dma_attrs *attrs) | 22 | gfp_t flag, unsigned long attrs) |
23 | { | 23 | { |
24 | struct page *page, **map; | 24 | struct page *page, **map; |
25 | pgprot_t pgprot; | 25 | pgprot_t pgprot; |
@@ -62,7 +62,7 @@ static void *m68k_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, | |||
62 | } | 62 | } |
63 | 63 | ||
64 | static void m68k_dma_free(struct device *dev, size_t size, void *addr, | 64 | static void m68k_dma_free(struct device *dev, size_t size, void *addr, |
65 | dma_addr_t handle, struct dma_attrs *attrs) | 65 | dma_addr_t handle, unsigned long attrs) |
66 | { | 66 | { |
67 | pr_debug("dma_free_coherent: %p, %x\n", addr, handle); | 67 | pr_debug("dma_free_coherent: %p, %x\n", addr, handle); |
68 | vfree(addr); | 68 | vfree(addr); |
@@ -73,7 +73,7 @@ static void m68k_dma_free(struct device *dev, size_t size, void *addr, | |||
73 | #include <asm/cacheflush.h> | 73 | #include <asm/cacheflush.h> |
74 | 74 | ||
75 | static void *m68k_dma_alloc(struct device *dev, size_t size, | 75 | static void *m68k_dma_alloc(struct device *dev, size_t size, |
76 | dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs) | 76 | dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) |
77 | { | 77 | { |
78 | void *ret; | 78 | void *ret; |
79 | /* ignore region specifiers */ | 79 | /* ignore region specifiers */ |
@@ -91,7 +91,7 @@ static void *m68k_dma_alloc(struct device *dev, size_t size, | |||
91 | } | 91 | } |
92 | 92 | ||
93 | static void m68k_dma_free(struct device *dev, size_t size, void *vaddr, | 93 | static void m68k_dma_free(struct device *dev, size_t size, void *vaddr, |
94 | dma_addr_t dma_handle, struct dma_attrs *attrs) | 94 | dma_addr_t dma_handle, unsigned long attrs) |
95 | { | 95 | { |
96 | free_pages((unsigned long)vaddr, get_order(size)); | 96 | free_pages((unsigned long)vaddr, get_order(size)); |
97 | } | 97 | } |
@@ -130,7 +130,7 @@ static void m68k_dma_sync_sg_for_device(struct device *dev, | |||
130 | 130 | ||
131 | static dma_addr_t m68k_dma_map_page(struct device *dev, struct page *page, | 131 | static dma_addr_t m68k_dma_map_page(struct device *dev, struct page *page, |
132 | unsigned long offset, size_t size, enum dma_data_direction dir, | 132 | unsigned long offset, size_t size, enum dma_data_direction dir, |
133 | struct dma_attrs *attrs) | 133 | unsigned long attrs) |
134 | { | 134 | { |
135 | dma_addr_t handle = page_to_phys(page) + offset; | 135 | dma_addr_t handle = page_to_phys(page) + offset; |
136 | 136 | ||
@@ -139,7 +139,7 @@ static dma_addr_t m68k_dma_map_page(struct device *dev, struct page *page, | |||
139 | } | 139 | } |
140 | 140 | ||
141 | static int m68k_dma_map_sg(struct device *dev, struct scatterlist *sglist, | 141 | static int m68k_dma_map_sg(struct device *dev, struct scatterlist *sglist, |
142 | int nents, enum dma_data_direction dir, struct dma_attrs *attrs) | 142 | int nents, enum dma_data_direction dir, unsigned long attrs) |
143 | { | 143 | { |
144 | int i; | 144 | int i; |
145 | struct scatterlist *sg; | 145 | struct scatterlist *sg; |
diff --git a/arch/metag/kernel/dma.c b/arch/metag/kernel/dma.c index e12368d02155..0db31e24c541 100644 --- a/arch/metag/kernel/dma.c +++ b/arch/metag/kernel/dma.c | |||
@@ -172,7 +172,7 @@ out: | |||
172 | * virtual and bus address for that space. | 172 | * virtual and bus address for that space. |
173 | */ | 173 | */ |
174 | static void *metag_dma_alloc(struct device *dev, size_t size, | 174 | static void *metag_dma_alloc(struct device *dev, size_t size, |
175 | dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) | 175 | dma_addr_t *handle, gfp_t gfp, unsigned long attrs) |
176 | { | 176 | { |
177 | struct page *page; | 177 | struct page *page; |
178 | struct metag_vm_region *c; | 178 | struct metag_vm_region *c; |
@@ -268,7 +268,7 @@ no_page: | |||
268 | * free a page as defined by the above mapping. | 268 | * free a page as defined by the above mapping. |
269 | */ | 269 | */ |
270 | static void metag_dma_free(struct device *dev, size_t size, void *vaddr, | 270 | static void metag_dma_free(struct device *dev, size_t size, void *vaddr, |
271 | dma_addr_t dma_handle, struct dma_attrs *attrs) | 271 | dma_addr_t dma_handle, unsigned long attrs) |
272 | { | 272 | { |
273 | struct metag_vm_region *c; | 273 | struct metag_vm_region *c; |
274 | unsigned long flags, addr; | 274 | unsigned long flags, addr; |
@@ -331,13 +331,13 @@ no_area: | |||
331 | 331 | ||
332 | static int metag_dma_mmap(struct device *dev, struct vm_area_struct *vma, | 332 | static int metag_dma_mmap(struct device *dev, struct vm_area_struct *vma, |
333 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | 333 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
334 | struct dma_attrs *attrs) | 334 | unsigned long attrs) |
335 | { | 335 | { |
336 | unsigned long flags, user_size, kern_size; | 336 | unsigned long flags, user_size, kern_size; |
337 | struct metag_vm_region *c; | 337 | struct metag_vm_region *c; |
338 | int ret = -ENXIO; | 338 | int ret = -ENXIO; |
339 | 339 | ||
340 | if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs)) | 340 | if (attrs & DMA_ATTR_WRITE_COMBINE) |
341 | vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); | 341 | vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); |
342 | else | 342 | else |
343 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | 343 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
@@ -482,7 +482,7 @@ static void dma_sync_for_cpu(void *vaddr, size_t size, int dma_direction) | |||
482 | 482 | ||
483 | static dma_addr_t metag_dma_map_page(struct device *dev, struct page *page, | 483 | static dma_addr_t metag_dma_map_page(struct device *dev, struct page *page, |
484 | unsigned long offset, size_t size, | 484 | unsigned long offset, size_t size, |
485 | enum dma_data_direction direction, struct dma_attrs *attrs) | 485 | enum dma_data_direction direction, unsigned long attrs) |
486 | { | 486 | { |
487 | dma_sync_for_device((void *)(page_to_phys(page) + offset), size, | 487 | dma_sync_for_device((void *)(page_to_phys(page) + offset), size, |
488 | direction); | 488 | direction); |
@@ -491,14 +491,14 @@ static dma_addr_t metag_dma_map_page(struct device *dev, struct page *page, | |||
491 | 491 | ||
492 | static void metag_dma_unmap_page(struct device *dev, dma_addr_t dma_address, | 492 | static void metag_dma_unmap_page(struct device *dev, dma_addr_t dma_address, |
493 | size_t size, enum dma_data_direction direction, | 493 | size_t size, enum dma_data_direction direction, |
494 | struct dma_attrs *attrs) | 494 | unsigned long attrs) |
495 | { | 495 | { |
496 | dma_sync_for_cpu(phys_to_virt(dma_address), size, direction); | 496 | dma_sync_for_cpu(phys_to_virt(dma_address), size, direction); |
497 | } | 497 | } |
498 | 498 | ||
499 | static int metag_dma_map_sg(struct device *dev, struct scatterlist *sglist, | 499 | static int metag_dma_map_sg(struct device *dev, struct scatterlist *sglist, |
500 | int nents, enum dma_data_direction direction, | 500 | int nents, enum dma_data_direction direction, |
501 | struct dma_attrs *attrs) | 501 | unsigned long attrs) |
502 | { | 502 | { |
503 | struct scatterlist *sg; | 503 | struct scatterlist *sg; |
504 | int i; | 504 | int i; |
@@ -516,7 +516,7 @@ static int metag_dma_map_sg(struct device *dev, struct scatterlist *sglist, | |||
516 | 516 | ||
517 | static void metag_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, | 517 | static void metag_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, |
518 | int nhwentries, enum dma_data_direction direction, | 518 | int nhwentries, enum dma_data_direction direction, |
519 | struct dma_attrs *attrs) | 519 | unsigned long attrs) |
520 | { | 520 | { |
521 | struct scatterlist *sg; | 521 | struct scatterlist *sg; |
522 | int i; | 522 | int i; |
diff --git a/arch/microblaze/include/asm/dma-mapping.h b/arch/microblaze/include/asm/dma-mapping.h index 1884783d15c0..1768d4bdc8d3 100644 --- a/arch/microblaze/include/asm/dma-mapping.h +++ b/arch/microblaze/include/asm/dma-mapping.h | |||
@@ -25,7 +25,6 @@ | |||
25 | #include <linux/mm.h> | 25 | #include <linux/mm.h> |
26 | #include <linux/scatterlist.h> | 26 | #include <linux/scatterlist.h> |
27 | #include <linux/dma-debug.h> | 27 | #include <linux/dma-debug.h> |
28 | #include <linux/dma-attrs.h> | ||
29 | #include <asm/io.h> | 28 | #include <asm/io.h> |
30 | #include <asm/cacheflush.h> | 29 | #include <asm/cacheflush.h> |
31 | 30 | ||
diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c index bf4dec229437..ec04dc1e2527 100644 --- a/arch/microblaze/kernel/dma.c +++ b/arch/microblaze/kernel/dma.c | |||
@@ -17,7 +17,7 @@ | |||
17 | 17 | ||
18 | static void *dma_direct_alloc_coherent(struct device *dev, size_t size, | 18 | static void *dma_direct_alloc_coherent(struct device *dev, size_t size, |
19 | dma_addr_t *dma_handle, gfp_t flag, | 19 | dma_addr_t *dma_handle, gfp_t flag, |
20 | struct dma_attrs *attrs) | 20 | unsigned long attrs) |
21 | { | 21 | { |
22 | #ifdef NOT_COHERENT_CACHE | 22 | #ifdef NOT_COHERENT_CACHE |
23 | return consistent_alloc(flag, size, dma_handle); | 23 | return consistent_alloc(flag, size, dma_handle); |
@@ -42,7 +42,7 @@ static void *dma_direct_alloc_coherent(struct device *dev, size_t size, | |||
42 | 42 | ||
43 | static void dma_direct_free_coherent(struct device *dev, size_t size, | 43 | static void dma_direct_free_coherent(struct device *dev, size_t size, |
44 | void *vaddr, dma_addr_t dma_handle, | 44 | void *vaddr, dma_addr_t dma_handle, |
45 | struct dma_attrs *attrs) | 45 | unsigned long attrs) |
46 | { | 46 | { |
47 | #ifdef NOT_COHERENT_CACHE | 47 | #ifdef NOT_COHERENT_CACHE |
48 | consistent_free(size, vaddr); | 48 | consistent_free(size, vaddr); |
@@ -53,7 +53,7 @@ static void dma_direct_free_coherent(struct device *dev, size_t size, | |||
53 | 53 | ||
54 | static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, | 54 | static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, |
55 | int nents, enum dma_data_direction direction, | 55 | int nents, enum dma_data_direction direction, |
56 | struct dma_attrs *attrs) | 56 | unsigned long attrs) |
57 | { | 57 | { |
58 | struct scatterlist *sg; | 58 | struct scatterlist *sg; |
59 | int i; | 59 | int i; |
@@ -78,7 +78,7 @@ static inline dma_addr_t dma_direct_map_page(struct device *dev, | |||
78 | unsigned long offset, | 78 | unsigned long offset, |
79 | size_t size, | 79 | size_t size, |
80 | enum dma_data_direction direction, | 80 | enum dma_data_direction direction, |
81 | struct dma_attrs *attrs) | 81 | unsigned long attrs) |
82 | { | 82 | { |
83 | __dma_sync(page_to_phys(page) + offset, size, direction); | 83 | __dma_sync(page_to_phys(page) + offset, size, direction); |
84 | return page_to_phys(page) + offset; | 84 | return page_to_phys(page) + offset; |
@@ -88,7 +88,7 @@ static inline void dma_direct_unmap_page(struct device *dev, | |||
88 | dma_addr_t dma_address, | 88 | dma_addr_t dma_address, |
89 | size_t size, | 89 | size_t size, |
90 | enum dma_data_direction direction, | 90 | enum dma_data_direction direction, |
91 | struct dma_attrs *attrs) | 91 | unsigned long attrs) |
92 | { | 92 | { |
93 | /* There is not necessary to do cache cleanup | 93 | /* There is not necessary to do cache cleanup |
94 | * | 94 | * |
@@ -157,7 +157,7 @@ dma_direct_sync_sg_for_device(struct device *dev, | |||
157 | static | 157 | static |
158 | int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma, | 158 | int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma, |
159 | void *cpu_addr, dma_addr_t handle, size_t size, | 159 | void *cpu_addr, dma_addr_t handle, size_t size, |
160 | struct dma_attrs *attrs) | 160 | unsigned long attrs) |
161 | { | 161 | { |
162 | #ifdef CONFIG_MMU | 162 | #ifdef CONFIG_MMU |
163 | unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; | 163 | unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; |
diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c index 2cd45f5f9481..fd69528b24fb 100644 --- a/arch/mips/cavium-octeon/dma-octeon.c +++ b/arch/mips/cavium-octeon/dma-octeon.c | |||
@@ -125,7 +125,7 @@ static phys_addr_t octeon_small_dma_to_phys(struct device *dev, | |||
125 | 125 | ||
126 | static dma_addr_t octeon_dma_map_page(struct device *dev, struct page *page, | 126 | static dma_addr_t octeon_dma_map_page(struct device *dev, struct page *page, |
127 | unsigned long offset, size_t size, enum dma_data_direction direction, | 127 | unsigned long offset, size_t size, enum dma_data_direction direction, |
128 | struct dma_attrs *attrs) | 128 | unsigned long attrs) |
129 | { | 129 | { |
130 | dma_addr_t daddr = swiotlb_map_page(dev, page, offset, size, | 130 | dma_addr_t daddr = swiotlb_map_page(dev, page, offset, size, |
131 | direction, attrs); | 131 | direction, attrs); |
@@ -135,7 +135,7 @@ static dma_addr_t octeon_dma_map_page(struct device *dev, struct page *page, | |||
135 | } | 135 | } |
136 | 136 | ||
137 | static int octeon_dma_map_sg(struct device *dev, struct scatterlist *sg, | 137 | static int octeon_dma_map_sg(struct device *dev, struct scatterlist *sg, |
138 | int nents, enum dma_data_direction direction, struct dma_attrs *attrs) | 138 | int nents, enum dma_data_direction direction, unsigned long attrs) |
139 | { | 139 | { |
140 | int r = swiotlb_map_sg_attrs(dev, sg, nents, direction, attrs); | 140 | int r = swiotlb_map_sg_attrs(dev, sg, nents, direction, attrs); |
141 | mb(); | 141 | mb(); |
@@ -157,7 +157,7 @@ static void octeon_dma_sync_sg_for_device(struct device *dev, | |||
157 | } | 157 | } |
158 | 158 | ||
159 | static void *octeon_dma_alloc_coherent(struct device *dev, size_t size, | 159 | static void *octeon_dma_alloc_coherent(struct device *dev, size_t size, |
160 | dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs) | 160 | dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) |
161 | { | 161 | { |
162 | void *ret; | 162 | void *ret; |
163 | 163 | ||
@@ -189,7 +189,7 @@ static void *octeon_dma_alloc_coherent(struct device *dev, size_t size, | |||
189 | } | 189 | } |
190 | 190 | ||
191 | static void octeon_dma_free_coherent(struct device *dev, size_t size, | 191 | static void octeon_dma_free_coherent(struct device *dev, size_t size, |
192 | void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs) | 192 | void *vaddr, dma_addr_t dma_handle, unsigned long attrs) |
193 | { | 193 | { |
194 | swiotlb_free_coherent(dev, size, vaddr, dma_handle); | 194 | swiotlb_free_coherent(dev, size, vaddr, dma_handle); |
195 | } | 195 | } |
diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h index 9411a4c0bdad..58e7874e9347 100644 --- a/arch/mips/include/asm/mips-cm.h +++ b/arch/mips/include/asm/mips-cm.h | |||
@@ -462,7 +462,7 @@ static inline unsigned int mips_cm_max_vp_width(void) | |||
462 | if (mips_cm_revision() >= CM_REV_CM3) | 462 | if (mips_cm_revision() >= CM_REV_CM3) |
463 | return read_gcr_sys_config2() & CM_GCR_SYS_CONFIG2_MAXVPW_MSK; | 463 | return read_gcr_sys_config2() & CM_GCR_SYS_CONFIG2_MAXVPW_MSK; |
464 | 464 | ||
465 | if (config_enabled(CONFIG_SMP)) | 465 | if (IS_ENABLED(CONFIG_SMP)) |
466 | return smp_num_siblings; | 466 | return smp_num_siblings; |
467 | 467 | ||
468 | return 1; | 468 | return 1; |
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h index 7d44e888134f..70128d3f770a 100644 --- a/arch/mips/include/asm/pgtable.h +++ b/arch/mips/include/asm/pgtable.h | |||
@@ -159,7 +159,7 @@ static inline void set_pte(pte_t *ptep, pte_t pte) | |||
159 | * it better already be global) | 159 | * it better already be global) |
160 | */ | 160 | */ |
161 | if (pte_none(*buddy)) { | 161 | if (pte_none(*buddy)) { |
162 | if (!config_enabled(CONFIG_XPA)) | 162 | if (!IS_ENABLED(CONFIG_XPA)) |
163 | buddy->pte_low |= _PAGE_GLOBAL; | 163 | buddy->pte_low |= _PAGE_GLOBAL; |
164 | buddy->pte_high |= _PAGE_GLOBAL; | 164 | buddy->pte_high |= _PAGE_GLOBAL; |
165 | } | 165 | } |
@@ -172,7 +172,7 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt | |||
172 | 172 | ||
173 | htw_stop(); | 173 | htw_stop(); |
174 | /* Preserve global status for the pair */ | 174 | /* Preserve global status for the pair */ |
175 | if (config_enabled(CONFIG_XPA)) { | 175 | if (IS_ENABLED(CONFIG_XPA)) { |
176 | if (ptep_buddy(ptep)->pte_high & _PAGE_GLOBAL) | 176 | if (ptep_buddy(ptep)->pte_high & _PAGE_GLOBAL) |
177 | null.pte_high = _PAGE_GLOBAL; | 177 | null.pte_high = _PAGE_GLOBAL; |
178 | } else { | 178 | } else { |
@@ -319,7 +319,7 @@ static inline int pte_young(pte_t pte) { return pte.pte_low & _PAGE_ACCESSED; } | |||
319 | static inline pte_t pte_wrprotect(pte_t pte) | 319 | static inline pte_t pte_wrprotect(pte_t pte) |
320 | { | 320 | { |
321 | pte.pte_low &= ~_PAGE_WRITE; | 321 | pte.pte_low &= ~_PAGE_WRITE; |
322 | if (!config_enabled(CONFIG_XPA)) | 322 | if (!IS_ENABLED(CONFIG_XPA)) |
323 | pte.pte_low &= ~_PAGE_SILENT_WRITE; | 323 | pte.pte_low &= ~_PAGE_SILENT_WRITE; |
324 | pte.pte_high &= ~_PAGE_SILENT_WRITE; | 324 | pte.pte_high &= ~_PAGE_SILENT_WRITE; |
325 | return pte; | 325 | return pte; |
@@ -328,7 +328,7 @@ static inline pte_t pte_wrprotect(pte_t pte) | |||
328 | static inline pte_t pte_mkclean(pte_t pte) | 328 | static inline pte_t pte_mkclean(pte_t pte) |
329 | { | 329 | { |
330 | pte.pte_low &= ~_PAGE_MODIFIED; | 330 | pte.pte_low &= ~_PAGE_MODIFIED; |
331 | if (!config_enabled(CONFIG_XPA)) | 331 | if (!IS_ENABLED(CONFIG_XPA)) |
332 | pte.pte_low &= ~_PAGE_SILENT_WRITE; | 332 | pte.pte_low &= ~_PAGE_SILENT_WRITE; |
333 | pte.pte_high &= ~_PAGE_SILENT_WRITE; | 333 | pte.pte_high &= ~_PAGE_SILENT_WRITE; |
334 | return pte; | 334 | return pte; |
@@ -337,7 +337,7 @@ static inline pte_t pte_mkclean(pte_t pte) | |||
337 | static inline pte_t pte_mkold(pte_t pte) | 337 | static inline pte_t pte_mkold(pte_t pte) |
338 | { | 338 | { |
339 | pte.pte_low &= ~_PAGE_ACCESSED; | 339 | pte.pte_low &= ~_PAGE_ACCESSED; |
340 | if (!config_enabled(CONFIG_XPA)) | 340 | if (!IS_ENABLED(CONFIG_XPA)) |
341 | pte.pte_low &= ~_PAGE_SILENT_READ; | 341 | pte.pte_low &= ~_PAGE_SILENT_READ; |
342 | pte.pte_high &= ~_PAGE_SILENT_READ; | 342 | pte.pte_high &= ~_PAGE_SILENT_READ; |
343 | return pte; | 343 | return pte; |
@@ -347,7 +347,7 @@ static inline pte_t pte_mkwrite(pte_t pte) | |||
347 | { | 347 | { |
348 | pte.pte_low |= _PAGE_WRITE; | 348 | pte.pte_low |= _PAGE_WRITE; |
349 | if (pte.pte_low & _PAGE_MODIFIED) { | 349 | if (pte.pte_low & _PAGE_MODIFIED) { |
350 | if (!config_enabled(CONFIG_XPA)) | 350 | if (!IS_ENABLED(CONFIG_XPA)) |
351 | pte.pte_low |= _PAGE_SILENT_WRITE; | 351 | pte.pte_low |= _PAGE_SILENT_WRITE; |
352 | pte.pte_high |= _PAGE_SILENT_WRITE; | 352 | pte.pte_high |= _PAGE_SILENT_WRITE; |
353 | } | 353 | } |
@@ -358,7 +358,7 @@ static inline pte_t pte_mkdirty(pte_t pte) | |||
358 | { | 358 | { |
359 | pte.pte_low |= _PAGE_MODIFIED; | 359 | pte.pte_low |= _PAGE_MODIFIED; |
360 | if (pte.pte_low & _PAGE_WRITE) { | 360 | if (pte.pte_low & _PAGE_WRITE) { |
361 | if (!config_enabled(CONFIG_XPA)) | 361 | if (!IS_ENABLED(CONFIG_XPA)) |
362 | pte.pte_low |= _PAGE_SILENT_WRITE; | 362 | pte.pte_low |= _PAGE_SILENT_WRITE; |
363 | pte.pte_high |= _PAGE_SILENT_WRITE; | 363 | pte.pte_high |= _PAGE_SILENT_WRITE; |
364 | } | 364 | } |
@@ -369,7 +369,7 @@ static inline pte_t pte_mkyoung(pte_t pte) | |||
369 | { | 369 | { |
370 | pte.pte_low |= _PAGE_ACCESSED; | 370 | pte.pte_low |= _PAGE_ACCESSED; |
371 | if (!(pte.pte_low & _PAGE_NO_READ)) { | 371 | if (!(pte.pte_low & _PAGE_NO_READ)) { |
372 | if (!config_enabled(CONFIG_XPA)) | 372 | if (!IS_ENABLED(CONFIG_XPA)) |
373 | pte.pte_low |= _PAGE_SILENT_READ; | 373 | pte.pte_low |= _PAGE_SILENT_READ; |
374 | pte.pte_high |= _PAGE_SILENT_READ; | 374 | pte.pte_high |= _PAGE_SILENT_READ; |
375 | } | 375 | } |
diff --git a/arch/mips/include/asm/seccomp.h b/arch/mips/include/asm/seccomp.h index 684fb3a12ed3..d886d6f7687a 100644 --- a/arch/mips/include/asm/seccomp.h +++ b/arch/mips/include/asm/seccomp.h | |||
@@ -16,10 +16,10 @@ static inline const int *get_compat_mode1_syscalls(void) | |||
16 | 0, /* null terminated */ | 16 | 0, /* null terminated */ |
17 | }; | 17 | }; |
18 | 18 | ||
19 | if (config_enabled(CONFIG_MIPS32_O32) && test_thread_flag(TIF_32BIT_REGS)) | 19 | if (IS_ENABLED(CONFIG_MIPS32_O32) && test_thread_flag(TIF_32BIT_REGS)) |
20 | return syscalls_O32; | 20 | return syscalls_O32; |
21 | 21 | ||
22 | if (config_enabled(CONFIG_MIPS32_N32)) | 22 | if (IS_ENABLED(CONFIG_MIPS32_N32)) |
23 | return syscalls_N32; | 23 | return syscalls_N32; |
24 | 24 | ||
25 | BUG(); | 25 | BUG(); |
diff --git a/arch/mips/include/asm/signal.h b/arch/mips/include/asm/signal.h index 2292373ff11a..82eae1583bcf 100644 --- a/arch/mips/include/asm/signal.h +++ b/arch/mips/include/asm/signal.h | |||
@@ -19,8 +19,8 @@ extern struct mips_abi mips_abi_32; | |||
19 | ((ka)->sa.sa_flags & SA_SIGINFO)) | 19 | ((ka)->sa.sa_flags & SA_SIGINFO)) |
20 | #else | 20 | #else |
21 | #define sig_uses_siginfo(ka, abi) \ | 21 | #define sig_uses_siginfo(ka, abi) \ |
22 | (config_enabled(CONFIG_64BIT) ? 1 : \ | 22 | (IS_ENABLED(CONFIG_64BIT) ? 1 : \ |
23 | (config_enabled(CONFIG_TRAD_SIGNALS) ? \ | 23 | (IS_ENABLED(CONFIG_TRAD_SIGNALS) ? \ |
24 | ((ka)->sa.sa_flags & SA_SIGINFO) : 1) ) | 24 | ((ka)->sa.sa_flags & SA_SIGINFO) : 1) ) |
25 | #endif | 25 | #endif |
26 | 26 | ||
diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h index 47bc45a67e9b..d87882513ee3 100644 --- a/arch/mips/include/asm/syscall.h +++ b/arch/mips/include/asm/syscall.h | |||
@@ -99,7 +99,7 @@ static inline void syscall_get_arguments(struct task_struct *task, | |||
99 | { | 99 | { |
100 | int ret; | 100 | int ret; |
101 | /* O32 ABI syscall() - Either 64-bit with O32 or 32-bit */ | 101 | /* O32 ABI syscall() - Either 64-bit with O32 or 32-bit */ |
102 | if ((config_enabled(CONFIG_32BIT) || | 102 | if ((IS_ENABLED(CONFIG_32BIT) || |
103 | test_tsk_thread_flag(task, TIF_32BIT_REGS)) && | 103 | test_tsk_thread_flag(task, TIF_32BIT_REGS)) && |
104 | (regs->regs[2] == __NR_syscall)) | 104 | (regs->regs[2] == __NR_syscall)) |
105 | i++; | 105 | i++; |
diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index 7f109d4f64a4..11b965f98d95 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h | |||
@@ -88,7 +88,7 @@ extern u64 __ua_limit; | |||
88 | */ | 88 | */ |
89 | static inline bool eva_kernel_access(void) | 89 | static inline bool eva_kernel_access(void) |
90 | { | 90 | { |
91 | if (!config_enabled(CONFIG_EVA)) | 91 | if (!IS_ENABLED(CONFIG_EVA)) |
92 | return false; | 92 | return false; |
93 | 93 | ||
94 | return segment_eq(get_fs(), get_ds()); | 94 | return segment_eq(get_fs(), get_ds()); |
diff --git a/arch/mips/jz4740/setup.c b/arch/mips/jz4740/setup.c index 0914ef775b5f..6d0152321819 100644 --- a/arch/mips/jz4740/setup.c +++ b/arch/mips/jz4740/setup.c | |||
@@ -75,7 +75,7 @@ void __init device_tree_init(void) | |||
75 | 75 | ||
76 | const char *get_system_type(void) | 76 | const char *get_system_type(void) |
77 | { | 77 | { |
78 | if (config_enabled(CONFIG_MACH_JZ4780)) | 78 | if (IS_ENABLED(CONFIG_MACH_JZ4780)) |
79 | return "JZ4780"; | 79 | return "JZ4780"; |
80 | 80 | ||
81 | return "JZ4740"; | 81 | return "JZ4740"; |
diff --git a/arch/mips/kernel/cpu-bugs64.c b/arch/mips/kernel/cpu-bugs64.c index 6392dbe504fb..a378e44688f5 100644 --- a/arch/mips/kernel/cpu-bugs64.c +++ b/arch/mips/kernel/cpu-bugs64.c | |||
@@ -244,7 +244,7 @@ static inline void check_daddi(void) | |||
244 | panic(bug64hit, !DADDI_WAR ? daddiwar : nowar); | 244 | panic(bug64hit, !DADDI_WAR ? daddiwar : nowar); |
245 | } | 245 | } |
246 | 246 | ||
247 | int daddiu_bug = config_enabled(CONFIG_CPU_MIPSR6) ? 0 : -1; | 247 | int daddiu_bug = IS_ENABLED(CONFIG_CPU_MIPSR6) ? 0 : -1; |
248 | 248 | ||
249 | static inline void check_daddiu(void) | 249 | static inline void check_daddiu(void) |
250 | { | 250 | { |
@@ -314,7 +314,7 @@ static inline void check_daddiu(void) | |||
314 | 314 | ||
315 | void __init check_bugs64_early(void) | 315 | void __init check_bugs64_early(void) |
316 | { | 316 | { |
317 | if (!config_enabled(CONFIG_CPU_MIPSR6)) { | 317 | if (!IS_ENABLED(CONFIG_CPU_MIPSR6)) { |
318 | check_mult_sh(); | 318 | check_mult_sh(); |
319 | check_daddiu(); | 319 | check_daddiu(); |
320 | } | 320 | } |
@@ -322,6 +322,6 @@ void __init check_bugs64_early(void) | |||
322 | 322 | ||
323 | void __init check_bugs64(void) | 323 | void __init check_bugs64(void) |
324 | { | 324 | { |
325 | if (!config_enabled(CONFIG_CPU_MIPSR6)) | 325 | if (!IS_ENABLED(CONFIG_CPU_MIPSR6)) |
326 | check_daddi(); | 326 | check_daddi(); |
327 | } | 327 | } |
diff --git a/arch/mips/kernel/elf.c b/arch/mips/kernel/elf.c index 891f5ee63983..e6eb7f1f7723 100644 --- a/arch/mips/kernel/elf.c +++ b/arch/mips/kernel/elf.c | |||
@@ -179,7 +179,7 @@ int arch_check_elf(void *_ehdr, bool has_interpreter, void *_interp_ehdr, | |||
179 | return -ELIBBAD; | 179 | return -ELIBBAD; |
180 | } | 180 | } |
181 | 181 | ||
182 | if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT)) | 182 | if (!IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT)) |
183 | return 0; | 183 | return 0; |
184 | 184 | ||
185 | fp_abi = state->fp_abi; | 185 | fp_abi = state->fp_abi; |
@@ -285,7 +285,7 @@ void mips_set_personality_fp(struct arch_elf_state *state) | |||
285 | * not be worried about N32/N64 binaries. | 285 | * not be worried about N32/N64 binaries. |
286 | */ | 286 | */ |
287 | 287 | ||
288 | if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT)) | 288 | if (!IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT)) |
289 | return; | 289 | return; |
290 | 290 | ||
291 | switch (state->overall_fp_mode) { | 291 | switch (state->overall_fp_mode) { |
diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c index 760217bbb2fa..659e6d3ae335 100644 --- a/arch/mips/kernel/mips-cm.c +++ b/arch/mips/kernel/mips-cm.c | |||
@@ -251,7 +251,7 @@ int mips_cm_probe(void) | |||
251 | mips_cm_probe_l2sync(); | 251 | mips_cm_probe_l2sync(); |
252 | 252 | ||
253 | /* determine register width for this CM */ | 253 | /* determine register width for this CM */ |
254 | mips_cm_is64 = config_enabled(CONFIG_64BIT) && (mips_cm_revision() >= CM_REV_CM3); | 254 | mips_cm_is64 = IS_ENABLED(CONFIG_64BIT) && (mips_cm_revision() >= CM_REV_CM3); |
255 | 255 | ||
256 | for_each_possible_cpu(cpu) | 256 | for_each_possible_cpu(cpu) |
257 | spin_lock_init(&per_cpu(cm_core_lock, cpu)); | 257 | spin_lock_init(&per_cpu(cm_core_lock, cpu)); |
diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c index 7ff2a557f4aa..43fbadc78d0a 100644 --- a/arch/mips/kernel/mips-r2-to-r6-emul.c +++ b/arch/mips/kernel/mips-r2-to-r6-emul.c | |||
@@ -84,7 +84,7 @@ static inline int mipsr6_emul(struct pt_regs *regs, u32 ir) | |||
84 | (s32)MIPSInst_SIMM(ir); | 84 | (s32)MIPSInst_SIMM(ir); |
85 | return 0; | 85 | return 0; |
86 | case daddiu_op: | 86 | case daddiu_op: |
87 | if (config_enabled(CONFIG_32BIT)) | 87 | if (IS_ENABLED(CONFIG_32BIT)) |
88 | break; | 88 | break; |
89 | 89 | ||
90 | if (MIPSInst_RT(ir)) | 90 | if (MIPSInst_RT(ir)) |
@@ -143,7 +143,7 @@ static inline int mipsr6_emul(struct pt_regs *regs, u32 ir) | |||
143 | (u32)regs->regs[MIPSInst_RT(ir)]); | 143 | (u32)regs->regs[MIPSInst_RT(ir)]); |
144 | return 0; | 144 | return 0; |
145 | case dsll_op: | 145 | case dsll_op: |
146 | if (config_enabled(CONFIG_32BIT) || MIPSInst_RS(ir)) | 146 | if (IS_ENABLED(CONFIG_32BIT) || MIPSInst_RS(ir)) |
147 | break; | 147 | break; |
148 | 148 | ||
149 | if (MIPSInst_RD(ir)) | 149 | if (MIPSInst_RD(ir)) |
@@ -152,7 +152,7 @@ static inline int mipsr6_emul(struct pt_regs *regs, u32 ir) | |||
152 | MIPSInst_FD(ir)); | 152 | MIPSInst_FD(ir)); |
153 | return 0; | 153 | return 0; |
154 | case dsrl_op: | 154 | case dsrl_op: |
155 | if (config_enabled(CONFIG_32BIT) || MIPSInst_RS(ir)) | 155 | if (IS_ENABLED(CONFIG_32BIT) || MIPSInst_RS(ir)) |
156 | break; | 156 | break; |
157 | 157 | ||
158 | if (MIPSInst_RD(ir)) | 158 | if (MIPSInst_RD(ir)) |
@@ -161,7 +161,7 @@ static inline int mipsr6_emul(struct pt_regs *regs, u32 ir) | |||
161 | MIPSInst_FD(ir)); | 161 | MIPSInst_FD(ir)); |
162 | return 0; | 162 | return 0; |
163 | case daddu_op: | 163 | case daddu_op: |
164 | if (config_enabled(CONFIG_32BIT) || MIPSInst_FD(ir)) | 164 | if (IS_ENABLED(CONFIG_32BIT) || MIPSInst_FD(ir)) |
165 | break; | 165 | break; |
166 | 166 | ||
167 | if (MIPSInst_RD(ir)) | 167 | if (MIPSInst_RD(ir)) |
@@ -170,7 +170,7 @@ static inline int mipsr6_emul(struct pt_regs *regs, u32 ir) | |||
170 | (u64)regs->regs[MIPSInst_RT(ir)]; | 170 | (u64)regs->regs[MIPSInst_RT(ir)]; |
171 | return 0; | 171 | return 0; |
172 | case dsubu_op: | 172 | case dsubu_op: |
173 | if (config_enabled(CONFIG_32BIT) || MIPSInst_FD(ir)) | 173 | if (IS_ENABLED(CONFIG_32BIT) || MIPSInst_FD(ir)) |
174 | break; | 174 | break; |
175 | 175 | ||
176 | if (MIPSInst_RD(ir)) | 176 | if (MIPSInst_RD(ir)) |
@@ -498,7 +498,7 @@ static int dmult_func(struct pt_regs *regs, u32 ir) | |||
498 | s64 res; | 498 | s64 res; |
499 | s64 rt, rs; | 499 | s64 rt, rs; |
500 | 500 | ||
501 | if (config_enabled(CONFIG_32BIT)) | 501 | if (IS_ENABLED(CONFIG_32BIT)) |
502 | return SIGILL; | 502 | return SIGILL; |
503 | 503 | ||
504 | rt = regs->regs[MIPSInst_RT(ir)]; | 504 | rt = regs->regs[MIPSInst_RT(ir)]; |
@@ -530,7 +530,7 @@ static int dmultu_func(struct pt_regs *regs, u32 ir) | |||
530 | u64 res; | 530 | u64 res; |
531 | u64 rt, rs; | 531 | u64 rt, rs; |
532 | 532 | ||
533 | if (config_enabled(CONFIG_32BIT)) | 533 | if (IS_ENABLED(CONFIG_32BIT)) |
534 | return SIGILL; | 534 | return SIGILL; |
535 | 535 | ||
536 | rt = regs->regs[MIPSInst_RT(ir)]; | 536 | rt = regs->regs[MIPSInst_RT(ir)]; |
@@ -561,7 +561,7 @@ static int ddiv_func(struct pt_regs *regs, u32 ir) | |||
561 | { | 561 | { |
562 | s64 rt, rs; | 562 | s64 rt, rs; |
563 | 563 | ||
564 | if (config_enabled(CONFIG_32BIT)) | 564 | if (IS_ENABLED(CONFIG_32BIT)) |
565 | return SIGILL; | 565 | return SIGILL; |
566 | 566 | ||
567 | rt = regs->regs[MIPSInst_RT(ir)]; | 567 | rt = regs->regs[MIPSInst_RT(ir)]; |
@@ -586,7 +586,7 @@ static int ddivu_func(struct pt_regs *regs, u32 ir) | |||
586 | { | 586 | { |
587 | u64 rt, rs; | 587 | u64 rt, rs; |
588 | 588 | ||
589 | if (config_enabled(CONFIG_32BIT)) | 589 | if (IS_ENABLED(CONFIG_32BIT)) |
590 | return SIGILL; | 590 | return SIGILL; |
591 | 591 | ||
592 | rt = regs->regs[MIPSInst_RT(ir)]; | 592 | rt = regs->regs[MIPSInst_RT(ir)]; |
@@ -825,7 +825,7 @@ static int dclz_func(struct pt_regs *regs, u32 ir) | |||
825 | u64 res; | 825 | u64 res; |
826 | u64 rs; | 826 | u64 rs; |
827 | 827 | ||
828 | if (config_enabled(CONFIG_32BIT)) | 828 | if (IS_ENABLED(CONFIG_32BIT)) |
829 | return SIGILL; | 829 | return SIGILL; |
830 | 830 | ||
831 | if (!MIPSInst_RD(ir)) | 831 | if (!MIPSInst_RD(ir)) |
@@ -852,7 +852,7 @@ static int dclo_func(struct pt_regs *regs, u32 ir) | |||
852 | u64 res; | 852 | u64 res; |
853 | u64 rs; | 853 | u64 rs; |
854 | 854 | ||
855 | if (config_enabled(CONFIG_32BIT)) | 855 | if (IS_ENABLED(CONFIG_32BIT)) |
856 | return SIGILL; | 856 | return SIGILL; |
857 | 857 | ||
858 | if (!MIPSInst_RD(ir)) | 858 | if (!MIPSInst_RD(ir)) |
@@ -1484,7 +1484,7 @@ fpu_emul: | |||
1484 | break; | 1484 | break; |
1485 | 1485 | ||
1486 | case ldl_op: | 1486 | case ldl_op: |
1487 | if (config_enabled(CONFIG_32BIT)) { | 1487 | if (IS_ENABLED(CONFIG_32BIT)) { |
1488 | err = SIGILL; | 1488 | err = SIGILL; |
1489 | break; | 1489 | break; |
1490 | } | 1490 | } |
@@ -1603,7 +1603,7 @@ fpu_emul: | |||
1603 | break; | 1603 | break; |
1604 | 1604 | ||
1605 | case ldr_op: | 1605 | case ldr_op: |
1606 | if (config_enabled(CONFIG_32BIT)) { | 1606 | if (IS_ENABLED(CONFIG_32BIT)) { |
1607 | err = SIGILL; | 1607 | err = SIGILL; |
1608 | break; | 1608 | break; |
1609 | } | 1609 | } |
@@ -1722,7 +1722,7 @@ fpu_emul: | |||
1722 | break; | 1722 | break; |
1723 | 1723 | ||
1724 | case sdl_op: | 1724 | case sdl_op: |
1725 | if (config_enabled(CONFIG_32BIT)) { | 1725 | if (IS_ENABLED(CONFIG_32BIT)) { |
1726 | err = SIGILL; | 1726 | err = SIGILL; |
1727 | break; | 1727 | break; |
1728 | } | 1728 | } |
@@ -1840,7 +1840,7 @@ fpu_emul: | |||
1840 | break; | 1840 | break; |
1841 | 1841 | ||
1842 | case sdr_op: | 1842 | case sdr_op: |
1843 | if (config_enabled(CONFIG_32BIT)) { | 1843 | if (IS_ENABLED(CONFIG_32BIT)) { |
1844 | err = SIGILL; | 1844 | err = SIGILL; |
1845 | break; | 1845 | break; |
1846 | } | 1846 | } |
@@ -2072,7 +2072,7 @@ fpu_emul: | |||
2072 | break; | 2072 | break; |
2073 | 2073 | ||
2074 | case lld_op: | 2074 | case lld_op: |
2075 | if (config_enabled(CONFIG_32BIT)) { | 2075 | if (IS_ENABLED(CONFIG_32BIT)) { |
2076 | err = SIGILL; | 2076 | err = SIGILL; |
2077 | break; | 2077 | break; |
2078 | } | 2078 | } |
@@ -2133,7 +2133,7 @@ fpu_emul: | |||
2133 | break; | 2133 | break; |
2134 | 2134 | ||
2135 | case scd_op: | 2135 | case scd_op: |
2136 | if (config_enabled(CONFIG_32BIT)) { | 2136 | if (IS_ENABLED(CONFIG_32BIT)) { |
2137 | err = SIGILL; | 2137 | err = SIGILL; |
2138 | break; | 2138 | break; |
2139 | } | 2139 | } |
diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c index adda3ffb9b78..5b31a9405ebc 100644 --- a/arch/mips/kernel/pm-cps.c +++ b/arch/mips/kernel/pm-cps.c | |||
@@ -148,7 +148,7 @@ int cps_pm_enter_state(enum cps_pm_state state) | |||
148 | } | 148 | } |
149 | 149 | ||
150 | /* Setup the VPE to run mips_cps_pm_restore when started again */ | 150 | /* Setup the VPE to run mips_cps_pm_restore when started again */ |
151 | if (config_enabled(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) { | 151 | if (IS_ENABLED(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) { |
152 | /* Power gating relies upon CPS SMP */ | 152 | /* Power gating relies upon CPS SMP */ |
153 | if (!mips_cps_smp_in_use()) | 153 | if (!mips_cps_smp_in_use()) |
154 | return -EINVAL; | 154 | return -EINVAL; |
@@ -387,7 +387,7 @@ static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) | |||
387 | memset(labels, 0, sizeof(labels)); | 387 | memset(labels, 0, sizeof(labels)); |
388 | memset(relocs, 0, sizeof(relocs)); | 388 | memset(relocs, 0, sizeof(relocs)); |
389 | 389 | ||
390 | if (config_enabled(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) { | 390 | if (IS_ENABLED(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) { |
391 | /* Power gating relies upon CPS SMP */ | 391 | /* Power gating relies upon CPS SMP */ |
392 | if (!mips_cps_smp_in_use()) | 392 | if (!mips_cps_smp_in_use()) |
393 | goto out_err; | 393 | goto out_err; |
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index ae4231452115..1975cd2f7de6 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c | |||
@@ -165,7 +165,7 @@ static int save_msa_extcontext(void __user *buf) | |||
165 | * should already have been done when handling scalar FP | 165 | * should already have been done when handling scalar FP |
166 | * context. | 166 | * context. |
167 | */ | 167 | */ |
168 | BUG_ON(config_enabled(CONFIG_EVA)); | 168 | BUG_ON(IS_ENABLED(CONFIG_EVA)); |
169 | 169 | ||
170 | err = __put_user(read_msa_csr(), &msa->csr); | 170 | err = __put_user(read_msa_csr(), &msa->csr); |
171 | err |= _save_msa_all_upper(&msa->wr); | 171 | err |= _save_msa_all_upper(&msa->wr); |
@@ -195,7 +195,7 @@ static int restore_msa_extcontext(void __user *buf, unsigned int size) | |||
195 | unsigned int csr; | 195 | unsigned int csr; |
196 | int i, err; | 196 | int i, err; |
197 | 197 | ||
198 | if (!config_enabled(CONFIG_CPU_HAS_MSA)) | 198 | if (!IS_ENABLED(CONFIG_CPU_HAS_MSA)) |
199 | return SIGSYS; | 199 | return SIGSYS; |
200 | 200 | ||
201 | if (size != sizeof(*msa)) | 201 | if (size != sizeof(*msa)) |
@@ -215,7 +215,7 @@ static int restore_msa_extcontext(void __user *buf, unsigned int size) | |||
215 | * scalar FP context, so FPU & MSA should have already been | 215 | * scalar FP context, so FPU & MSA should have already been |
216 | * disabled whilst handling scalar FP context. | 216 | * disabled whilst handling scalar FP context. |
217 | */ | 217 | */ |
218 | BUG_ON(config_enabled(CONFIG_EVA)); | 218 | BUG_ON(IS_ENABLED(CONFIG_EVA)); |
219 | 219 | ||
220 | write_msa_csr(csr); | 220 | write_msa_csr(csr); |
221 | err |= _restore_msa_all_upper(&msa->wr); | 221 | err |= _restore_msa_all_upper(&msa->wr); |
@@ -315,7 +315,7 @@ int protected_save_fp_context(void __user *sc) | |||
315 | * EVA does not have userland equivalents of ldc1 or sdc1, so | 315 | * EVA does not have userland equivalents of ldc1 or sdc1, so |
316 | * save to the kernel FP context & copy that to userland below. | 316 | * save to the kernel FP context & copy that to userland below. |
317 | */ | 317 | */ |
318 | if (config_enabled(CONFIG_EVA)) | 318 | if (IS_ENABLED(CONFIG_EVA)) |
319 | lose_fpu(1); | 319 | lose_fpu(1); |
320 | 320 | ||
321 | while (1) { | 321 | while (1) { |
@@ -378,7 +378,7 @@ int protected_restore_fp_context(void __user *sc) | |||
378 | * disable the FPU here such that the code below simply copies to | 378 | * disable the FPU here such that the code below simply copies to |
379 | * the kernel FP context. | 379 | * the kernel FP context. |
380 | */ | 380 | */ |
381 | if (config_enabled(CONFIG_EVA)) | 381 | if (IS_ENABLED(CONFIG_EVA)) |
382 | lose_fpu(0); | 382 | lose_fpu(0); |
383 | 383 | ||
384 | while (1) { | 384 | while (1) { |
diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c index 4ed36f288d64..05b3201271b4 100644 --- a/arch/mips/kernel/smp-cps.c +++ b/arch/mips/kernel/smp-cps.c | |||
@@ -46,8 +46,8 @@ static unsigned core_vpe_count(unsigned core) | |||
46 | if (threads_disabled) | 46 | if (threads_disabled) |
47 | return 1; | 47 | return 1; |
48 | 48 | ||
49 | if ((!config_enabled(CONFIG_MIPS_MT_SMP) || !cpu_has_mipsmt) | 49 | if ((!IS_ENABLED(CONFIG_MIPS_MT_SMP) || !cpu_has_mipsmt) |
50 | && (!config_enabled(CONFIG_CPU_MIPSR6) || !cpu_has_vp)) | 50 | && (!IS_ENABLED(CONFIG_CPU_MIPSR6) || !cpu_has_vp)) |
51 | return 1; | 51 | return 1; |
52 | 52 | ||
53 | mips_cm_lock_other(core, 0); | 53 | mips_cm_lock_other(core, 0); |
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c index 28b3af73a17b..f1c308dbbc4a 100644 --- a/arch/mips/kernel/unaligned.c +++ b/arch/mips/kernel/unaligned.c | |||
@@ -1025,7 +1025,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, | |||
1025 | if (!access_ok(VERIFY_READ, addr, 2)) | 1025 | if (!access_ok(VERIFY_READ, addr, 2)) |
1026 | goto sigbus; | 1026 | goto sigbus; |
1027 | 1027 | ||
1028 | if (config_enabled(CONFIG_EVA)) { | 1028 | if (IS_ENABLED(CONFIG_EVA)) { |
1029 | if (segment_eq(get_fs(), get_ds())) | 1029 | if (segment_eq(get_fs(), get_ds())) |
1030 | LoadHW(addr, value, res); | 1030 | LoadHW(addr, value, res); |
1031 | else | 1031 | else |
@@ -1044,7 +1044,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, | |||
1044 | if (!access_ok(VERIFY_READ, addr, 4)) | 1044 | if (!access_ok(VERIFY_READ, addr, 4)) |
1045 | goto sigbus; | 1045 | goto sigbus; |
1046 | 1046 | ||
1047 | if (config_enabled(CONFIG_EVA)) { | 1047 | if (IS_ENABLED(CONFIG_EVA)) { |
1048 | if (segment_eq(get_fs(), get_ds())) | 1048 | if (segment_eq(get_fs(), get_ds())) |
1049 | LoadW(addr, value, res); | 1049 | LoadW(addr, value, res); |
1050 | else | 1050 | else |
@@ -1063,7 +1063,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, | |||
1063 | if (!access_ok(VERIFY_READ, addr, 2)) | 1063 | if (!access_ok(VERIFY_READ, addr, 2)) |
1064 | goto sigbus; | 1064 | goto sigbus; |
1065 | 1065 | ||
1066 | if (config_enabled(CONFIG_EVA)) { | 1066 | if (IS_ENABLED(CONFIG_EVA)) { |
1067 | if (segment_eq(get_fs(), get_ds())) | 1067 | if (segment_eq(get_fs(), get_ds())) |
1068 | LoadHWU(addr, value, res); | 1068 | LoadHWU(addr, value, res); |
1069 | else | 1069 | else |
@@ -1131,7 +1131,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, | |||
1131 | compute_return_epc(regs); | 1131 | compute_return_epc(regs); |
1132 | value = regs->regs[insn.i_format.rt]; | 1132 | value = regs->regs[insn.i_format.rt]; |
1133 | 1133 | ||
1134 | if (config_enabled(CONFIG_EVA)) { | 1134 | if (IS_ENABLED(CONFIG_EVA)) { |
1135 | if (segment_eq(get_fs(), get_ds())) | 1135 | if (segment_eq(get_fs(), get_ds())) |
1136 | StoreHW(addr, value, res); | 1136 | StoreHW(addr, value, res); |
1137 | else | 1137 | else |
@@ -1151,7 +1151,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, | |||
1151 | compute_return_epc(regs); | 1151 | compute_return_epc(regs); |
1152 | value = regs->regs[insn.i_format.rt]; | 1152 | value = regs->regs[insn.i_format.rt]; |
1153 | 1153 | ||
1154 | if (config_enabled(CONFIG_EVA)) { | 1154 | if (IS_ENABLED(CONFIG_EVA)) { |
1155 | if (segment_eq(get_fs(), get_ds())) | 1155 | if (segment_eq(get_fs(), get_ds())) |
1156 | StoreW(addr, value, res); | 1156 | StoreW(addr, value, res); |
1157 | else | 1157 | else |
diff --git a/arch/mips/loongson64/common/dma-swiotlb.c b/arch/mips/loongson64/common/dma-swiotlb.c index 4ffa6fc81c8f..1a80b6f73ab2 100644 --- a/arch/mips/loongson64/common/dma-swiotlb.c +++ b/arch/mips/loongson64/common/dma-swiotlb.c | |||
@@ -10,7 +10,7 @@ | |||
10 | #include <dma-coherence.h> | 10 | #include <dma-coherence.h> |
11 | 11 | ||
12 | static void *loongson_dma_alloc_coherent(struct device *dev, size_t size, | 12 | static void *loongson_dma_alloc_coherent(struct device *dev, size_t size, |
13 | dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs) | 13 | dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) |
14 | { | 14 | { |
15 | void *ret; | 15 | void *ret; |
16 | 16 | ||
@@ -41,7 +41,7 @@ static void *loongson_dma_alloc_coherent(struct device *dev, size_t size, | |||
41 | } | 41 | } |
42 | 42 | ||
43 | static void loongson_dma_free_coherent(struct device *dev, size_t size, | 43 | static void loongson_dma_free_coherent(struct device *dev, size_t size, |
44 | void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs) | 44 | void *vaddr, dma_addr_t dma_handle, unsigned long attrs) |
45 | { | 45 | { |
46 | swiotlb_free_coherent(dev, size, vaddr, dma_handle); | 46 | swiotlb_free_coherent(dev, size, vaddr, dma_handle); |
47 | } | 47 | } |
@@ -49,7 +49,7 @@ static void loongson_dma_free_coherent(struct device *dev, size_t size, | |||
49 | static dma_addr_t loongson_dma_map_page(struct device *dev, struct page *page, | 49 | static dma_addr_t loongson_dma_map_page(struct device *dev, struct page *page, |
50 | unsigned long offset, size_t size, | 50 | unsigned long offset, size_t size, |
51 | enum dma_data_direction dir, | 51 | enum dma_data_direction dir, |
52 | struct dma_attrs *attrs) | 52 | unsigned long attrs) |
53 | { | 53 | { |
54 | dma_addr_t daddr = swiotlb_map_page(dev, page, offset, size, | 54 | dma_addr_t daddr = swiotlb_map_page(dev, page, offset, size, |
55 | dir, attrs); | 55 | dir, attrs); |
@@ -59,9 +59,9 @@ static dma_addr_t loongson_dma_map_page(struct device *dev, struct page *page, | |||
59 | 59 | ||
60 | static int loongson_dma_map_sg(struct device *dev, struct scatterlist *sg, | 60 | static int loongson_dma_map_sg(struct device *dev, struct scatterlist *sg, |
61 | int nents, enum dma_data_direction dir, | 61 | int nents, enum dma_data_direction dir, |
62 | struct dma_attrs *attrs) | 62 | unsigned long attrs) |
63 | { | 63 | { |
64 | int r = swiotlb_map_sg_attrs(dev, sg, nents, dir, NULL); | 64 | int r = swiotlb_map_sg_attrs(dev, sg, nents, dir, 0); |
65 | mb(); | 65 | mb(); |
66 | 66 | ||
67 | return r; | 67 | return r; |
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c index 6dc07fba187f..92d15e68abb6 100644 --- a/arch/mips/math-emu/cp1emu.c +++ b/arch/mips/math-emu/cp1emu.c | |||
@@ -784,10 +784,10 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, | |||
784 | */ | 784 | */ |
785 | static inline int cop1_64bit(struct pt_regs *xcp) | 785 | static inline int cop1_64bit(struct pt_regs *xcp) |
786 | { | 786 | { |
787 | if (config_enabled(CONFIG_64BIT) && !config_enabled(CONFIG_MIPS32_O32)) | 787 | if (IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_MIPS32_O32)) |
788 | return 1; | 788 | return 1; |
789 | else if (config_enabled(CONFIG_32BIT) && | 789 | else if (IS_ENABLED(CONFIG_32BIT) && |
790 | !config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT)) | 790 | !IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT)) |
791 | return 0; | 791 | return 0; |
792 | 792 | ||
793 | return !test_thread_flag(TIF_32BIT_FPREGS); | 793 | return !test_thread_flag(TIF_32BIT_FPREGS); |
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c index cb557d28cb21..b2eadd6fa9a1 100644 --- a/arch/mips/mm/dma-default.c +++ b/arch/mips/mm/dma-default.c | |||
@@ -131,7 +131,7 @@ static void *mips_dma_alloc_noncoherent(struct device *dev, size_t size, | |||
131 | } | 131 | } |
132 | 132 | ||
133 | static void *mips_dma_alloc_coherent(struct device *dev, size_t size, | 133 | static void *mips_dma_alloc_coherent(struct device *dev, size_t size, |
134 | dma_addr_t * dma_handle, gfp_t gfp, struct dma_attrs *attrs) | 134 | dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) |
135 | { | 135 | { |
136 | void *ret; | 136 | void *ret; |
137 | struct page *page = NULL; | 137 | struct page *page = NULL; |
@@ -141,7 +141,7 @@ static void *mips_dma_alloc_coherent(struct device *dev, size_t size, | |||
141 | * XXX: seems like the coherent and non-coherent implementations could | 141 | * XXX: seems like the coherent and non-coherent implementations could |
142 | * be consolidated. | 142 | * be consolidated. |
143 | */ | 143 | */ |
144 | if (dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) | 144 | if (attrs & DMA_ATTR_NON_CONSISTENT) |
145 | return mips_dma_alloc_noncoherent(dev, size, dma_handle, gfp); | 145 | return mips_dma_alloc_noncoherent(dev, size, dma_handle, gfp); |
146 | 146 | ||
147 | gfp = massage_gfp_flags(dev, gfp); | 147 | gfp = massage_gfp_flags(dev, gfp); |
@@ -176,13 +176,13 @@ static void mips_dma_free_noncoherent(struct device *dev, size_t size, | |||
176 | } | 176 | } |
177 | 177 | ||
178 | static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr, | 178 | static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr, |
179 | dma_addr_t dma_handle, struct dma_attrs *attrs) | 179 | dma_addr_t dma_handle, unsigned long attrs) |
180 | { | 180 | { |
181 | unsigned long addr = (unsigned long) vaddr; | 181 | unsigned long addr = (unsigned long) vaddr; |
182 | unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; | 182 | unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
183 | struct page *page = NULL; | 183 | struct page *page = NULL; |
184 | 184 | ||
185 | if (dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) { | 185 | if (attrs & DMA_ATTR_NON_CONSISTENT) { |
186 | mips_dma_free_noncoherent(dev, size, vaddr, dma_handle); | 186 | mips_dma_free_noncoherent(dev, size, vaddr, dma_handle); |
187 | return; | 187 | return; |
188 | } | 188 | } |
@@ -200,7 +200,7 @@ static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr, | |||
200 | 200 | ||
201 | static int mips_dma_mmap(struct device *dev, struct vm_area_struct *vma, | 201 | static int mips_dma_mmap(struct device *dev, struct vm_area_struct *vma, |
202 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | 202 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
203 | struct dma_attrs *attrs) | 203 | unsigned long attrs) |
204 | { | 204 | { |
205 | unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; | 205 | unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; |
206 | unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; | 206 | unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
@@ -214,7 +214,7 @@ static int mips_dma_mmap(struct device *dev, struct vm_area_struct *vma, | |||
214 | 214 | ||
215 | pfn = page_to_pfn(virt_to_page((void *)addr)); | 215 | pfn = page_to_pfn(virt_to_page((void *)addr)); |
216 | 216 | ||
217 | if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs)) | 217 | if (attrs & DMA_ATTR_WRITE_COMBINE) |
218 | vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); | 218 | vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); |
219 | else | 219 | else |
220 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | 220 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
@@ -291,7 +291,7 @@ static inline void __dma_sync(struct page *page, | |||
291 | } | 291 | } |
292 | 292 | ||
293 | static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, | 293 | static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, |
294 | size_t size, enum dma_data_direction direction, struct dma_attrs *attrs) | 294 | size_t size, enum dma_data_direction direction, unsigned long attrs) |
295 | { | 295 | { |
296 | if (cpu_needs_post_dma_flush(dev)) | 296 | if (cpu_needs_post_dma_flush(dev)) |
297 | __dma_sync(dma_addr_to_page(dev, dma_addr), | 297 | __dma_sync(dma_addr_to_page(dev, dma_addr), |
@@ -301,7 +301,7 @@ static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, | |||
301 | } | 301 | } |
302 | 302 | ||
303 | static int mips_dma_map_sg(struct device *dev, struct scatterlist *sglist, | 303 | static int mips_dma_map_sg(struct device *dev, struct scatterlist *sglist, |
304 | int nents, enum dma_data_direction direction, struct dma_attrs *attrs) | 304 | int nents, enum dma_data_direction direction, unsigned long attrs) |
305 | { | 305 | { |
306 | int i; | 306 | int i; |
307 | struct scatterlist *sg; | 307 | struct scatterlist *sg; |
@@ -322,7 +322,7 @@ static int mips_dma_map_sg(struct device *dev, struct scatterlist *sglist, | |||
322 | 322 | ||
323 | static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page, | 323 | static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page, |
324 | unsigned long offset, size_t size, enum dma_data_direction direction, | 324 | unsigned long offset, size_t size, enum dma_data_direction direction, |
325 | struct dma_attrs *attrs) | 325 | unsigned long attrs) |
326 | { | 326 | { |
327 | if (!plat_device_is_coherent(dev)) | 327 | if (!plat_device_is_coherent(dev)) |
328 | __dma_sync(page, offset, size, direction); | 328 | __dma_sync(page, offset, size, direction); |
@@ -332,7 +332,7 @@ static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page, | |||
332 | 332 | ||
333 | static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, | 333 | static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, |
334 | int nhwentries, enum dma_data_direction direction, | 334 | int nhwentries, enum dma_data_direction direction, |
335 | struct dma_attrs *attrs) | 335 | unsigned long attrs) |
336 | { | 336 | { |
337 | int i; | 337 | int i; |
338 | struct scatterlist *sg; | 338 | struct scatterlist *sg; |
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 4004b659ce50..ff49b29c2d16 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c | |||
@@ -1025,7 +1025,7 @@ static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep) | |||
1025 | pte_off_odd += offsetof(pte_t, pte_high); | 1025 | pte_off_odd += offsetof(pte_t, pte_high); |
1026 | #endif | 1026 | #endif |
1027 | 1027 | ||
1028 | if (config_enabled(CONFIG_XPA)) { | 1028 | if (IS_ENABLED(CONFIG_XPA)) { |
1029 | uasm_i_lw(p, tmp, pte_off_even, ptep); /* even pte */ | 1029 | uasm_i_lw(p, tmp, pte_off_even, ptep); /* even pte */ |
1030 | UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); | 1030 | UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); |
1031 | UASM_i_MTC0(p, tmp, C0_ENTRYLO0); | 1031 | UASM_i_MTC0(p, tmp, C0_ENTRYLO0); |
@@ -1643,7 +1643,7 @@ iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr, | |||
1643 | unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY); | 1643 | unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY); |
1644 | unsigned int swmode = mode & ~hwmode; | 1644 | unsigned int swmode = mode & ~hwmode; |
1645 | 1645 | ||
1646 | if (config_enabled(CONFIG_XPA) && !cpu_has_64bits) { | 1646 | if (IS_ENABLED(CONFIG_XPA) && !cpu_has_64bits) { |
1647 | uasm_i_lui(p, scratch, swmode >> 16); | 1647 | uasm_i_lui(p, scratch, swmode >> 16); |
1648 | uasm_i_or(p, pte, pte, scratch); | 1648 | uasm_i_or(p, pte, pte, scratch); |
1649 | BUG_ON(swmode & 0xffff); | 1649 | BUG_ON(swmode & 0xffff); |
@@ -2432,7 +2432,7 @@ static void config_htw_params(void) | |||
2432 | pwsize |= ilog2(PTRS_PER_PMD) << MIPS_PWSIZE_MDW_SHIFT; | 2432 | pwsize |= ilog2(PTRS_PER_PMD) << MIPS_PWSIZE_MDW_SHIFT; |
2433 | 2433 | ||
2434 | /* Set pointer size to size of directory pointers */ | 2434 | /* Set pointer size to size of directory pointers */ |
2435 | if (config_enabled(CONFIG_64BIT)) | 2435 | if (IS_ENABLED(CONFIG_64BIT)) |
2436 | pwsize |= MIPS_PWSIZE_PS_MASK; | 2436 | pwsize |= MIPS_PWSIZE_PS_MASK; |
2437 | /* PTEs may be multiple pointers long (e.g. with XPA) */ | 2437 | /* PTEs may be multiple pointers long (e.g. with XPA) */ |
2438 | pwsize |= ((PTE_T_LOG2 - PGD_T_LOG2) << MIPS_PWSIZE_PTEW_SHIFT) | 2438 | pwsize |= ((PTE_T_LOG2 - PGD_T_LOG2) << MIPS_PWSIZE_PTEW_SHIFT) |
@@ -2448,7 +2448,7 @@ static void config_htw_params(void) | |||
2448 | * the pwctl fields. | 2448 | * the pwctl fields. |
2449 | */ | 2449 | */ |
2450 | config = 1 << MIPS_PWCTL_PWEN_SHIFT; | 2450 | config = 1 << MIPS_PWCTL_PWEN_SHIFT; |
2451 | if (config_enabled(CONFIG_64BIT)) | 2451 | if (IS_ENABLED(CONFIG_64BIT)) |
2452 | config |= MIPS_PWCTL_XU_MASK; | 2452 | config |= MIPS_PWCTL_XU_MASK; |
2453 | write_c0_pwctl(config); | 2453 | write_c0_pwctl(config); |
2454 | pr_info("Hardware Page Table Walker enabled\n"); | 2454 | pr_info("Hardware Page Table Walker enabled\n"); |
@@ -2522,7 +2522,7 @@ void build_tlb_refill_handler(void) | |||
2522 | */ | 2522 | */ |
2523 | static int run_once = 0; | 2523 | static int run_once = 0; |
2524 | 2524 | ||
2525 | if (config_enabled(CONFIG_XPA) && !cpu_has_rixi) | 2525 | if (IS_ENABLED(CONFIG_XPA) && !cpu_has_rixi) |
2526 | panic("Kernels supporting XPA currently require CPUs with RIXI"); | 2526 | panic("Kernels supporting XPA currently require CPUs with RIXI"); |
2527 | 2527 | ||
2528 | output_pgtable_bits_defines(); | 2528 | output_pgtable_bits_defines(); |
diff --git a/arch/mips/mti-malta/malta-dtshim.c b/arch/mips/mti-malta/malta-dtshim.c index f7133efc5843..151f4882ec8a 100644 --- a/arch/mips/mti-malta/malta-dtshim.c +++ b/arch/mips/mti-malta/malta-dtshim.c | |||
@@ -31,7 +31,7 @@ static unsigned __init gen_fdt_mem_array(__be32 *mem_array, unsigned long size) | |||
31 | 31 | ||
32 | entries = 1; | 32 | entries = 1; |
33 | mem_array[0] = cpu_to_be32(PHYS_OFFSET); | 33 | mem_array[0] = cpu_to_be32(PHYS_OFFSET); |
34 | if (config_enabled(CONFIG_EVA)) { | 34 | if (IS_ENABLED(CONFIG_EVA)) { |
35 | /* | 35 | /* |
36 | * The current Malta EVA configuration is "special" in that it | 36 | * The current Malta EVA configuration is "special" in that it |
37 | * always makes use of addresses in the upper half of the 32 bit | 37 | * always makes use of addresses in the upper half of the 32 bit |
@@ -82,7 +82,7 @@ static void __init append_memory(void *fdt, int root_off) | |||
82 | physical_memsize = 32 << 20; | 82 | physical_memsize = 32 << 20; |
83 | } | 83 | } |
84 | 84 | ||
85 | if (config_enabled(CONFIG_CPU_BIG_ENDIAN)) { | 85 | if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) { |
86 | /* | 86 | /* |
87 | * SOC-it swaps, or perhaps doesn't swap, when DMA'ing | 87 | * SOC-it swaps, or perhaps doesn't swap, when DMA'ing |
88 | * the last word of physical memory. | 88 | * the last word of physical memory. |
diff --git a/arch/mips/mti-malta/malta-memory.c b/arch/mips/mti-malta/malta-memory.c index d5f8dae6a797..a47556723b85 100644 --- a/arch/mips/mti-malta/malta-memory.c +++ b/arch/mips/mti-malta/malta-memory.c | |||
@@ -32,7 +32,7 @@ static void free_init_pages_eva_malta(void *begin, void *end) | |||
32 | 32 | ||
33 | void __init fw_meminit(void) | 33 | void __init fw_meminit(void) |
34 | { | 34 | { |
35 | bool eva = config_enabled(CONFIG_EVA); | 35 | bool eva = IS_ENABLED(CONFIG_EVA); |
36 | 36 | ||
37 | free_init_pages_eva = eva ? free_init_pages_eva_malta : NULL; | 37 | free_init_pages_eva = eva ? free_init_pages_eva_malta : NULL; |
38 | } | 38 | } |
diff --git a/arch/mips/mti-malta/malta-setup.c b/arch/mips/mti-malta/malta-setup.c index 33d5ff5069e5..ec5b21678fad 100644 --- a/arch/mips/mti-malta/malta-setup.c +++ b/arch/mips/mti-malta/malta-setup.c | |||
@@ -261,7 +261,7 @@ void __init plat_mem_setup(void) | |||
261 | fdt = malta_dt_shim(fdt); | 261 | fdt = malta_dt_shim(fdt); |
262 | __dt_setup_arch(fdt); | 262 | __dt_setup_arch(fdt); |
263 | 263 | ||
264 | if (config_enabled(CONFIG_EVA)) | 264 | if (IS_ENABLED(CONFIG_EVA)) |
265 | /* EVA has already been configured in mach-malta/kernel-init.h */ | 265 | /* EVA has already been configured in mach-malta/kernel-init.h */ |
266 | pr_info("Enhanced Virtual Addressing (EVA) activated\n"); | 266 | pr_info("Enhanced Virtual Addressing (EVA) activated\n"); |
267 | 267 | ||
diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c index 1a8c96035716..d1b7bd09253a 100644 --- a/arch/mips/net/bpf_jit.c +++ b/arch/mips/net/bpf_jit.c | |||
@@ -426,7 +426,7 @@ static inline void emit_load_ptr(unsigned int dst, unsigned int src, | |||
426 | static inline void emit_load_func(unsigned int reg, ptr imm, | 426 | static inline void emit_load_func(unsigned int reg, ptr imm, |
427 | struct jit_ctx *ctx) | 427 | struct jit_ctx *ctx) |
428 | { | 428 | { |
429 | if (config_enabled(CONFIG_64BIT)) { | 429 | if (IS_ENABLED(CONFIG_64BIT)) { |
430 | /* At this point imm is always 64-bit */ | 430 | /* At this point imm is always 64-bit */ |
431 | emit_load_imm(r_tmp, (u64)imm >> 32, ctx); | 431 | emit_load_imm(r_tmp, (u64)imm >> 32, ctx); |
432 | emit_dsll(r_tmp_imm, r_tmp, 16, ctx); /* left shift by 16 */ | 432 | emit_dsll(r_tmp_imm, r_tmp, 16, ctx); /* left shift by 16 */ |
@@ -516,7 +516,7 @@ static inline void emit_jr(unsigned int reg, struct jit_ctx *ctx) | |||
516 | static inline u16 align_sp(unsigned int num) | 516 | static inline u16 align_sp(unsigned int num) |
517 | { | 517 | { |
518 | /* Double word alignment for 32-bit, quadword for 64-bit */ | 518 | /* Double word alignment for 32-bit, quadword for 64-bit */ |
519 | unsigned int align = config_enabled(CONFIG_64BIT) ? 16 : 8; | 519 | unsigned int align = IS_ENABLED(CONFIG_64BIT) ? 16 : 8; |
520 | num = (num + (align - 1)) & -align; | 520 | num = (num + (align - 1)) & -align; |
521 | return num; | 521 | return num; |
522 | } | 522 | } |
diff --git a/arch/mips/netlogic/common/nlm-dma.c b/arch/mips/netlogic/common/nlm-dma.c index 3758715d4ab6..0630693bec2a 100644 --- a/arch/mips/netlogic/common/nlm-dma.c +++ b/arch/mips/netlogic/common/nlm-dma.c | |||
@@ -45,7 +45,7 @@ | |||
45 | static char *nlm_swiotlb; | 45 | static char *nlm_swiotlb; |
46 | 46 | ||
47 | static void *nlm_dma_alloc_coherent(struct device *dev, size_t size, | 47 | static void *nlm_dma_alloc_coherent(struct device *dev, size_t size, |
48 | dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs) | 48 | dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) |
49 | { | 49 | { |
50 | /* ignore region specifiers */ | 50 | /* ignore region specifiers */ |
51 | gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM); | 51 | gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM); |
@@ -62,7 +62,7 @@ static void *nlm_dma_alloc_coherent(struct device *dev, size_t size, | |||
62 | } | 62 | } |
63 | 63 | ||
64 | static void nlm_dma_free_coherent(struct device *dev, size_t size, | 64 | static void nlm_dma_free_coherent(struct device *dev, size_t size, |
65 | void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs) | 65 | void *vaddr, dma_addr_t dma_handle, unsigned long attrs) |
66 | { | 66 | { |
67 | swiotlb_free_coherent(dev, size, vaddr, dma_handle); | 67 | swiotlb_free_coherent(dev, size, vaddr, dma_handle); |
68 | } | 68 | } |
diff --git a/arch/mn10300/mm/dma-alloc.c b/arch/mn10300/mm/dma-alloc.c index 8842394cb49a..4f4b9029f0ea 100644 --- a/arch/mn10300/mm/dma-alloc.c +++ b/arch/mn10300/mm/dma-alloc.c | |||
@@ -21,7 +21,7 @@ | |||
21 | static unsigned long pci_sram_allocated = 0xbc000000; | 21 | static unsigned long pci_sram_allocated = 0xbc000000; |
22 | 22 | ||
23 | static void *mn10300_dma_alloc(struct device *dev, size_t size, | 23 | static void *mn10300_dma_alloc(struct device *dev, size_t size, |
24 | dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs) | 24 | dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) |
25 | { | 25 | { |
26 | unsigned long addr; | 26 | unsigned long addr; |
27 | void *ret; | 27 | void *ret; |
@@ -63,7 +63,7 @@ done: | |||
63 | } | 63 | } |
64 | 64 | ||
65 | static void mn10300_dma_free(struct device *dev, size_t size, void *vaddr, | 65 | static void mn10300_dma_free(struct device *dev, size_t size, void *vaddr, |
66 | dma_addr_t dma_handle, struct dma_attrs *attrs) | 66 | dma_addr_t dma_handle, unsigned long attrs) |
67 | { | 67 | { |
68 | unsigned long addr = (unsigned long) vaddr & ~0x20000000; | 68 | unsigned long addr = (unsigned long) vaddr & ~0x20000000; |
69 | 69 | ||
@@ -75,7 +75,7 @@ static void mn10300_dma_free(struct device *dev, size_t size, void *vaddr, | |||
75 | 75 | ||
76 | static int mn10300_dma_map_sg(struct device *dev, struct scatterlist *sglist, | 76 | static int mn10300_dma_map_sg(struct device *dev, struct scatterlist *sglist, |
77 | int nents, enum dma_data_direction direction, | 77 | int nents, enum dma_data_direction direction, |
78 | struct dma_attrs *attrs) | 78 | unsigned long attrs) |
79 | { | 79 | { |
80 | struct scatterlist *sg; | 80 | struct scatterlist *sg; |
81 | int i; | 81 | int i; |
@@ -92,7 +92,7 @@ static int mn10300_dma_map_sg(struct device *dev, struct scatterlist *sglist, | |||
92 | 92 | ||
93 | static dma_addr_t mn10300_dma_map_page(struct device *dev, struct page *page, | 93 | static dma_addr_t mn10300_dma_map_page(struct device *dev, struct page *page, |
94 | unsigned long offset, size_t size, | 94 | unsigned long offset, size_t size, |
95 | enum dma_data_direction direction, struct dma_attrs *attrs) | 95 | enum dma_data_direction direction, unsigned long attrs) |
96 | { | 96 | { |
97 | return page_to_bus(page) + offset; | 97 | return page_to_bus(page) + offset; |
98 | } | 98 | } |
diff --git a/arch/nios2/mm/dma-mapping.c b/arch/nios2/mm/dma-mapping.c index 90422c367ed3..d800fad87896 100644 --- a/arch/nios2/mm/dma-mapping.c +++ b/arch/nios2/mm/dma-mapping.c | |||
@@ -59,7 +59,7 @@ static inline void __dma_sync_for_cpu(void *vaddr, size_t size, | |||
59 | } | 59 | } |
60 | 60 | ||
61 | static void *nios2_dma_alloc(struct device *dev, size_t size, | 61 | static void *nios2_dma_alloc(struct device *dev, size_t size, |
62 | dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs) | 62 | dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) |
63 | { | 63 | { |
64 | void *ret; | 64 | void *ret; |
65 | 65 | ||
@@ -84,7 +84,7 @@ static void *nios2_dma_alloc(struct device *dev, size_t size, | |||
84 | } | 84 | } |
85 | 85 | ||
86 | static void nios2_dma_free(struct device *dev, size_t size, void *vaddr, | 86 | static void nios2_dma_free(struct device *dev, size_t size, void *vaddr, |
87 | dma_addr_t dma_handle, struct dma_attrs *attrs) | 87 | dma_addr_t dma_handle, unsigned long attrs) |
88 | { | 88 | { |
89 | unsigned long addr = (unsigned long) CAC_ADDR((unsigned long) vaddr); | 89 | unsigned long addr = (unsigned long) CAC_ADDR((unsigned long) vaddr); |
90 | 90 | ||
@@ -93,7 +93,7 @@ static void nios2_dma_free(struct device *dev, size_t size, void *vaddr, | |||
93 | 93 | ||
94 | static int nios2_dma_map_sg(struct device *dev, struct scatterlist *sg, | 94 | static int nios2_dma_map_sg(struct device *dev, struct scatterlist *sg, |
95 | int nents, enum dma_data_direction direction, | 95 | int nents, enum dma_data_direction direction, |
96 | struct dma_attrs *attrs) | 96 | unsigned long attrs) |
97 | { | 97 | { |
98 | int i; | 98 | int i; |
99 | 99 | ||
@@ -113,7 +113,7 @@ static int nios2_dma_map_sg(struct device *dev, struct scatterlist *sg, | |||
113 | static dma_addr_t nios2_dma_map_page(struct device *dev, struct page *page, | 113 | static dma_addr_t nios2_dma_map_page(struct device *dev, struct page *page, |
114 | unsigned long offset, size_t size, | 114 | unsigned long offset, size_t size, |
115 | enum dma_data_direction direction, | 115 | enum dma_data_direction direction, |
116 | struct dma_attrs *attrs) | 116 | unsigned long attrs) |
117 | { | 117 | { |
118 | void *addr = page_address(page) + offset; | 118 | void *addr = page_address(page) + offset; |
119 | 119 | ||
@@ -123,14 +123,14 @@ static dma_addr_t nios2_dma_map_page(struct device *dev, struct page *page, | |||
123 | 123 | ||
124 | static void nios2_dma_unmap_page(struct device *dev, dma_addr_t dma_address, | 124 | static void nios2_dma_unmap_page(struct device *dev, dma_addr_t dma_address, |
125 | size_t size, enum dma_data_direction direction, | 125 | size_t size, enum dma_data_direction direction, |
126 | struct dma_attrs *attrs) | 126 | unsigned long attrs) |
127 | { | 127 | { |
128 | __dma_sync_for_cpu(phys_to_virt(dma_address), size, direction); | 128 | __dma_sync_for_cpu(phys_to_virt(dma_address), size, direction); |
129 | } | 129 | } |
130 | 130 | ||
131 | static void nios2_dma_unmap_sg(struct device *dev, struct scatterlist *sg, | 131 | static void nios2_dma_unmap_sg(struct device *dev, struct scatterlist *sg, |
132 | int nhwentries, enum dma_data_direction direction, | 132 | int nhwentries, enum dma_data_direction direction, |
133 | struct dma_attrs *attrs) | 133 | unsigned long attrs) |
134 | { | 134 | { |
135 | void *addr; | 135 | void *addr; |
136 | int i; | 136 | int i; |
diff --git a/arch/openrisc/kernel/dma.c b/arch/openrisc/kernel/dma.c index 0b77ddb1ee07..140c99140649 100644 --- a/arch/openrisc/kernel/dma.c +++ b/arch/openrisc/kernel/dma.c | |||
@@ -22,7 +22,6 @@ | |||
22 | #include <linux/dma-mapping.h> | 22 | #include <linux/dma-mapping.h> |
23 | #include <linux/dma-debug.h> | 23 | #include <linux/dma-debug.h> |
24 | #include <linux/export.h> | 24 | #include <linux/export.h> |
25 | #include <linux/dma-attrs.h> | ||
26 | 25 | ||
27 | #include <asm/cpuinfo.h> | 26 | #include <asm/cpuinfo.h> |
28 | #include <asm/spr_defs.h> | 27 | #include <asm/spr_defs.h> |
@@ -83,7 +82,7 @@ page_clear_nocache(pte_t *pte, unsigned long addr, | |||
83 | static void * | 82 | static void * |
84 | or1k_dma_alloc(struct device *dev, size_t size, | 83 | or1k_dma_alloc(struct device *dev, size_t size, |
85 | dma_addr_t *dma_handle, gfp_t gfp, | 84 | dma_addr_t *dma_handle, gfp_t gfp, |
86 | struct dma_attrs *attrs) | 85 | unsigned long attrs) |
87 | { | 86 | { |
88 | unsigned long va; | 87 | unsigned long va; |
89 | void *page; | 88 | void *page; |
@@ -101,7 +100,7 @@ or1k_dma_alloc(struct device *dev, size_t size, | |||
101 | 100 | ||
102 | va = (unsigned long)page; | 101 | va = (unsigned long)page; |
103 | 102 | ||
104 | if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) { | 103 | if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0) { |
105 | /* | 104 | /* |
106 | * We need to iterate through the pages, clearing the dcache for | 105 | * We need to iterate through the pages, clearing the dcache for |
107 | * them and setting the cache-inhibit bit. | 106 | * them and setting the cache-inhibit bit. |
@@ -117,7 +116,7 @@ or1k_dma_alloc(struct device *dev, size_t size, | |||
117 | 116 | ||
118 | static void | 117 | static void |
119 | or1k_dma_free(struct device *dev, size_t size, void *vaddr, | 118 | or1k_dma_free(struct device *dev, size_t size, void *vaddr, |
120 | dma_addr_t dma_handle, struct dma_attrs *attrs) | 119 | dma_addr_t dma_handle, unsigned long attrs) |
121 | { | 120 | { |
122 | unsigned long va = (unsigned long)vaddr; | 121 | unsigned long va = (unsigned long)vaddr; |
123 | struct mm_walk walk = { | 122 | struct mm_walk walk = { |
@@ -125,7 +124,7 @@ or1k_dma_free(struct device *dev, size_t size, void *vaddr, | |||
125 | .mm = &init_mm | 124 | .mm = &init_mm |
126 | }; | 125 | }; |
127 | 126 | ||
128 | if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) { | 127 | if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0) { |
129 | /* walk_page_range shouldn't be able to fail here */ | 128 | /* walk_page_range shouldn't be able to fail here */ |
130 | WARN_ON(walk_page_range(va, va + size, &walk)); | 129 | WARN_ON(walk_page_range(va, va + size, &walk)); |
131 | } | 130 | } |
@@ -137,7 +136,7 @@ static dma_addr_t | |||
137 | or1k_map_page(struct device *dev, struct page *page, | 136 | or1k_map_page(struct device *dev, struct page *page, |
138 | unsigned long offset, size_t size, | 137 | unsigned long offset, size_t size, |
139 | enum dma_data_direction dir, | 138 | enum dma_data_direction dir, |
140 | struct dma_attrs *attrs) | 139 | unsigned long attrs) |
141 | { | 140 | { |
142 | unsigned long cl; | 141 | unsigned long cl; |
143 | dma_addr_t addr = page_to_phys(page) + offset; | 142 | dma_addr_t addr = page_to_phys(page) + offset; |
@@ -170,7 +169,7 @@ or1k_map_page(struct device *dev, struct page *page, | |||
170 | static void | 169 | static void |
171 | or1k_unmap_page(struct device *dev, dma_addr_t dma_handle, | 170 | or1k_unmap_page(struct device *dev, dma_addr_t dma_handle, |
172 | size_t size, enum dma_data_direction dir, | 171 | size_t size, enum dma_data_direction dir, |
173 | struct dma_attrs *attrs) | 172 | unsigned long attrs) |
174 | { | 173 | { |
175 | /* Nothing special to do here... */ | 174 | /* Nothing special to do here... */ |
176 | } | 175 | } |
@@ -178,14 +177,14 @@ or1k_unmap_page(struct device *dev, dma_addr_t dma_handle, | |||
178 | static int | 177 | static int |
179 | or1k_map_sg(struct device *dev, struct scatterlist *sg, | 178 | or1k_map_sg(struct device *dev, struct scatterlist *sg, |
180 | int nents, enum dma_data_direction dir, | 179 | int nents, enum dma_data_direction dir, |
181 | struct dma_attrs *attrs) | 180 | unsigned long attrs) |
182 | { | 181 | { |
183 | struct scatterlist *s; | 182 | struct scatterlist *s; |
184 | int i; | 183 | int i; |
185 | 184 | ||
186 | for_each_sg(sg, s, nents, i) { | 185 | for_each_sg(sg, s, nents, i) { |
187 | s->dma_address = or1k_map_page(dev, sg_page(s), s->offset, | 186 | s->dma_address = or1k_map_page(dev, sg_page(s), s->offset, |
188 | s->length, dir, NULL); | 187 | s->length, dir, 0); |
189 | } | 188 | } |
190 | 189 | ||
191 | return nents; | 190 | return nents; |
@@ -194,13 +193,13 @@ or1k_map_sg(struct device *dev, struct scatterlist *sg, | |||
194 | static void | 193 | static void |
195 | or1k_unmap_sg(struct device *dev, struct scatterlist *sg, | 194 | or1k_unmap_sg(struct device *dev, struct scatterlist *sg, |
196 | int nents, enum dma_data_direction dir, | 195 | int nents, enum dma_data_direction dir, |
197 | struct dma_attrs *attrs) | 196 | unsigned long attrs) |
198 | { | 197 | { |
199 | struct scatterlist *s; | 198 | struct scatterlist *s; |
200 | int i; | 199 | int i; |
201 | 200 | ||
202 | for_each_sg(sg, s, nents, i) { | 201 | for_each_sg(sg, s, nents, i) { |
203 | or1k_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, NULL); | 202 | or1k_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, 0); |
204 | } | 203 | } |
205 | } | 204 | } |
206 | 205 | ||
diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c index a27e4928bf73..02d9ed0f3949 100644 --- a/arch/parisc/kernel/pci-dma.c +++ b/arch/parisc/kernel/pci-dma.c | |||
@@ -414,7 +414,7 @@ pcxl_dma_init(void) | |||
414 | __initcall(pcxl_dma_init); | 414 | __initcall(pcxl_dma_init); |
415 | 415 | ||
416 | static void *pa11_dma_alloc(struct device *dev, size_t size, | 416 | static void *pa11_dma_alloc(struct device *dev, size_t size, |
417 | dma_addr_t *dma_handle, gfp_t flag, struct dma_attrs *attrs) | 417 | dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs) |
418 | { | 418 | { |
419 | unsigned long vaddr; | 419 | unsigned long vaddr; |
420 | unsigned long paddr; | 420 | unsigned long paddr; |
@@ -441,7 +441,7 @@ static void *pa11_dma_alloc(struct device *dev, size_t size, | |||
441 | } | 441 | } |
442 | 442 | ||
443 | static void pa11_dma_free(struct device *dev, size_t size, void *vaddr, | 443 | static void pa11_dma_free(struct device *dev, size_t size, void *vaddr, |
444 | dma_addr_t dma_handle, struct dma_attrs *attrs) | 444 | dma_addr_t dma_handle, unsigned long attrs) |
445 | { | 445 | { |
446 | int order; | 446 | int order; |
447 | 447 | ||
@@ -454,7 +454,7 @@ static void pa11_dma_free(struct device *dev, size_t size, void *vaddr, | |||
454 | 454 | ||
455 | static dma_addr_t pa11_dma_map_page(struct device *dev, struct page *page, | 455 | static dma_addr_t pa11_dma_map_page(struct device *dev, struct page *page, |
456 | unsigned long offset, size_t size, | 456 | unsigned long offset, size_t size, |
457 | enum dma_data_direction direction, struct dma_attrs *attrs) | 457 | enum dma_data_direction direction, unsigned long attrs) |
458 | { | 458 | { |
459 | void *addr = page_address(page) + offset; | 459 | void *addr = page_address(page) + offset; |
460 | BUG_ON(direction == DMA_NONE); | 460 | BUG_ON(direction == DMA_NONE); |
@@ -465,7 +465,7 @@ static dma_addr_t pa11_dma_map_page(struct device *dev, struct page *page, | |||
465 | 465 | ||
466 | static void pa11_dma_unmap_page(struct device *dev, dma_addr_t dma_handle, | 466 | static void pa11_dma_unmap_page(struct device *dev, dma_addr_t dma_handle, |
467 | size_t size, enum dma_data_direction direction, | 467 | size_t size, enum dma_data_direction direction, |
468 | struct dma_attrs *attrs) | 468 | unsigned long attrs) |
469 | { | 469 | { |
470 | BUG_ON(direction == DMA_NONE); | 470 | BUG_ON(direction == DMA_NONE); |
471 | 471 | ||
@@ -484,7 +484,7 @@ static void pa11_dma_unmap_page(struct device *dev, dma_addr_t dma_handle, | |||
484 | 484 | ||
485 | static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist, | 485 | static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist, |
486 | int nents, enum dma_data_direction direction, | 486 | int nents, enum dma_data_direction direction, |
487 | struct dma_attrs *attrs) | 487 | unsigned long attrs) |
488 | { | 488 | { |
489 | int i; | 489 | int i; |
490 | struct scatterlist *sg; | 490 | struct scatterlist *sg; |
@@ -503,7 +503,7 @@ static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist, | |||
503 | 503 | ||
504 | static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, | 504 | static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, |
505 | int nents, enum dma_data_direction direction, | 505 | int nents, enum dma_data_direction direction, |
506 | struct dma_attrs *attrs) | 506 | unsigned long attrs) |
507 | { | 507 | { |
508 | int i; | 508 | int i; |
509 | struct scatterlist *sg; | 509 | struct scatterlist *sg; |
@@ -577,11 +577,11 @@ struct dma_map_ops pcxl_dma_ops = { | |||
577 | }; | 577 | }; |
578 | 578 | ||
579 | static void *pcx_dma_alloc(struct device *dev, size_t size, | 579 | static void *pcx_dma_alloc(struct device *dev, size_t size, |
580 | dma_addr_t *dma_handle, gfp_t flag, struct dma_attrs *attrs) | 580 | dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs) |
581 | { | 581 | { |
582 | void *addr; | 582 | void *addr; |
583 | 583 | ||
584 | if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) | 584 | if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0) |
585 | return NULL; | 585 | return NULL; |
586 | 586 | ||
587 | addr = (void *)__get_free_pages(flag, get_order(size)); | 587 | addr = (void *)__get_free_pages(flag, get_order(size)); |
@@ -592,7 +592,7 @@ static void *pcx_dma_alloc(struct device *dev, size_t size, | |||
592 | } | 592 | } |
593 | 593 | ||
594 | static void pcx_dma_free(struct device *dev, size_t size, void *vaddr, | 594 | static void pcx_dma_free(struct device *dev, size_t size, void *vaddr, |
595 | dma_addr_t iova, struct dma_attrs *attrs) | 595 | dma_addr_t iova, unsigned long attrs) |
596 | { | 596 | { |
597 | free_pages((unsigned long)vaddr, get_order(size)); | 597 | free_pages((unsigned long)vaddr, get_order(size)); |
598 | return; | 598 | return; |
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index 77816acd4fd9..84e3f8dd5e4f 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h | |||
@@ -13,7 +13,6 @@ | |||
13 | /* need struct page definitions */ | 13 | /* need struct page definitions */ |
14 | #include <linux/mm.h> | 14 | #include <linux/mm.h> |
15 | #include <linux/scatterlist.h> | 15 | #include <linux/scatterlist.h> |
16 | #include <linux/dma-attrs.h> | ||
17 | #include <linux/dma-debug.h> | 16 | #include <linux/dma-debug.h> |
18 | #include <asm/io.h> | 17 | #include <asm/io.h> |
19 | #include <asm/swiotlb.h> | 18 | #include <asm/swiotlb.h> |
@@ -25,14 +24,14 @@ | |||
25 | /* Some dma direct funcs must be visible for use in other dma_ops */ | 24 | /* Some dma direct funcs must be visible for use in other dma_ops */ |
26 | extern void *__dma_direct_alloc_coherent(struct device *dev, size_t size, | 25 | extern void *__dma_direct_alloc_coherent(struct device *dev, size_t size, |
27 | dma_addr_t *dma_handle, gfp_t flag, | 26 | dma_addr_t *dma_handle, gfp_t flag, |
28 | struct dma_attrs *attrs); | 27 | unsigned long attrs); |
29 | extern void __dma_direct_free_coherent(struct device *dev, size_t size, | 28 | extern void __dma_direct_free_coherent(struct device *dev, size_t size, |
30 | void *vaddr, dma_addr_t dma_handle, | 29 | void *vaddr, dma_addr_t dma_handle, |
31 | struct dma_attrs *attrs); | 30 | unsigned long attrs); |
32 | extern int dma_direct_mmap_coherent(struct device *dev, | 31 | extern int dma_direct_mmap_coherent(struct device *dev, |
33 | struct vm_area_struct *vma, | 32 | struct vm_area_struct *vma, |
34 | void *cpu_addr, dma_addr_t handle, | 33 | void *cpu_addr, dma_addr_t handle, |
35 | size_t size, struct dma_attrs *attrs); | 34 | size_t size, unsigned long attrs); |
36 | 35 | ||
37 | #ifdef CONFIG_NOT_COHERENT_CACHE | 36 | #ifdef CONFIG_NOT_COHERENT_CACHE |
38 | /* | 37 | /* |
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h index f49a72a9062d..2c1d50792944 100644 --- a/arch/powerpc/include/asm/iommu.h +++ b/arch/powerpc/include/asm/iommu.h | |||
@@ -53,7 +53,7 @@ struct iommu_table_ops { | |||
53 | long index, long npages, | 53 | long index, long npages, |
54 | unsigned long uaddr, | 54 | unsigned long uaddr, |
55 | enum dma_data_direction direction, | 55 | enum dma_data_direction direction, |
56 | struct dma_attrs *attrs); | 56 | unsigned long attrs); |
57 | #ifdef CONFIG_IOMMU_API | 57 | #ifdef CONFIG_IOMMU_API |
58 | /* | 58 | /* |
59 | * Exchanges existing TCE with new TCE plus direction bits; | 59 | * Exchanges existing TCE with new TCE plus direction bits; |
@@ -248,12 +248,12 @@ extern int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl, | |||
248 | struct scatterlist *sglist, int nelems, | 248 | struct scatterlist *sglist, int nelems, |
249 | unsigned long mask, | 249 | unsigned long mask, |
250 | enum dma_data_direction direction, | 250 | enum dma_data_direction direction, |
251 | struct dma_attrs *attrs); | 251 | unsigned long attrs); |
252 | extern void ppc_iommu_unmap_sg(struct iommu_table *tbl, | 252 | extern void ppc_iommu_unmap_sg(struct iommu_table *tbl, |
253 | struct scatterlist *sglist, | 253 | struct scatterlist *sglist, |
254 | int nelems, | 254 | int nelems, |
255 | enum dma_data_direction direction, | 255 | enum dma_data_direction direction, |
256 | struct dma_attrs *attrs); | 256 | unsigned long attrs); |
257 | 257 | ||
258 | extern void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl, | 258 | extern void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl, |
259 | size_t size, dma_addr_t *dma_handle, | 259 | size_t size, dma_addr_t *dma_handle, |
@@ -264,10 +264,10 @@ extern dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl, | |||
264 | struct page *page, unsigned long offset, | 264 | struct page *page, unsigned long offset, |
265 | size_t size, unsigned long mask, | 265 | size_t size, unsigned long mask, |
266 | enum dma_data_direction direction, | 266 | enum dma_data_direction direction, |
267 | struct dma_attrs *attrs); | 267 | unsigned long attrs); |
268 | extern void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle, | 268 | extern void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle, |
269 | size_t size, enum dma_data_direction direction, | 269 | size_t size, enum dma_data_direction direction, |
270 | struct dma_attrs *attrs); | 270 | unsigned long attrs); |
271 | 271 | ||
272 | extern void iommu_init_early_pSeries(void); | 272 | extern void iommu_init_early_pSeries(void); |
273 | extern void iommu_init_early_dart(struct pci_controller_ops *controller_ops); | 273 | extern void iommu_init_early_dart(struct pci_controller_ops *controller_ops); |
diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h index 47e155f15433..9af103a23975 100644 --- a/arch/powerpc/include/asm/jump_label.h +++ b/arch/powerpc/include/asm/jump_label.h | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/types.h> | 14 | #include <linux/types.h> |
15 | 15 | ||
16 | #include <asm/feature-fixups.h> | 16 | #include <asm/feature-fixups.h> |
17 | #include <asm/asm-compat.h> | ||
17 | 18 | ||
18 | #define JUMP_ENTRY_TYPE stringify_in_c(FTR_ENTRY_LONG) | 19 | #define JUMP_ENTRY_TYPE stringify_in_c(FTR_ENTRY_LONG) |
19 | #define JUMP_LABEL_NOP_SIZE 4 | 20 | #define JUMP_LABEL_NOP_SIZE 4 |
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c index 41a7d9d49a5a..fb7cbaa37658 100644 --- a/arch/powerpc/kernel/dma-iommu.c +++ b/arch/powerpc/kernel/dma-iommu.c | |||
@@ -18,7 +18,7 @@ | |||
18 | */ | 18 | */ |
19 | static void *dma_iommu_alloc_coherent(struct device *dev, size_t size, | 19 | static void *dma_iommu_alloc_coherent(struct device *dev, size_t size, |
20 | dma_addr_t *dma_handle, gfp_t flag, | 20 | dma_addr_t *dma_handle, gfp_t flag, |
21 | struct dma_attrs *attrs) | 21 | unsigned long attrs) |
22 | { | 22 | { |
23 | return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size, | 23 | return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size, |
24 | dma_handle, dev->coherent_dma_mask, flag, | 24 | dma_handle, dev->coherent_dma_mask, flag, |
@@ -27,7 +27,7 @@ static void *dma_iommu_alloc_coherent(struct device *dev, size_t size, | |||
27 | 27 | ||
28 | static void dma_iommu_free_coherent(struct device *dev, size_t size, | 28 | static void dma_iommu_free_coherent(struct device *dev, size_t size, |
29 | void *vaddr, dma_addr_t dma_handle, | 29 | void *vaddr, dma_addr_t dma_handle, |
30 | struct dma_attrs *attrs) | 30 | unsigned long attrs) |
31 | { | 31 | { |
32 | iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle); | 32 | iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle); |
33 | } | 33 | } |
@@ -40,7 +40,7 @@ static void dma_iommu_free_coherent(struct device *dev, size_t size, | |||
40 | static dma_addr_t dma_iommu_map_page(struct device *dev, struct page *page, | 40 | static dma_addr_t dma_iommu_map_page(struct device *dev, struct page *page, |
41 | unsigned long offset, size_t size, | 41 | unsigned long offset, size_t size, |
42 | enum dma_data_direction direction, | 42 | enum dma_data_direction direction, |
43 | struct dma_attrs *attrs) | 43 | unsigned long attrs) |
44 | { | 44 | { |
45 | return iommu_map_page(dev, get_iommu_table_base(dev), page, offset, | 45 | return iommu_map_page(dev, get_iommu_table_base(dev), page, offset, |
46 | size, device_to_mask(dev), direction, attrs); | 46 | size, device_to_mask(dev), direction, attrs); |
@@ -49,7 +49,7 @@ static dma_addr_t dma_iommu_map_page(struct device *dev, struct page *page, | |||
49 | 49 | ||
50 | static void dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle, | 50 | static void dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle, |
51 | size_t size, enum dma_data_direction direction, | 51 | size_t size, enum dma_data_direction direction, |
52 | struct dma_attrs *attrs) | 52 | unsigned long attrs) |
53 | { | 53 | { |
54 | iommu_unmap_page(get_iommu_table_base(dev), dma_handle, size, direction, | 54 | iommu_unmap_page(get_iommu_table_base(dev), dma_handle, size, direction, |
55 | attrs); | 55 | attrs); |
@@ -58,7 +58,7 @@ static void dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle, | |||
58 | 58 | ||
59 | static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist, | 59 | static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist, |
60 | int nelems, enum dma_data_direction direction, | 60 | int nelems, enum dma_data_direction direction, |
61 | struct dma_attrs *attrs) | 61 | unsigned long attrs) |
62 | { | 62 | { |
63 | return ppc_iommu_map_sg(dev, get_iommu_table_base(dev), sglist, nelems, | 63 | return ppc_iommu_map_sg(dev, get_iommu_table_base(dev), sglist, nelems, |
64 | device_to_mask(dev), direction, attrs); | 64 | device_to_mask(dev), direction, attrs); |
@@ -66,7 +66,7 @@ static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist, | |||
66 | 66 | ||
67 | static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist, | 67 | static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist, |
68 | int nelems, enum dma_data_direction direction, | 68 | int nelems, enum dma_data_direction direction, |
69 | struct dma_attrs *attrs) | 69 | unsigned long attrs) |
70 | { | 70 | { |
71 | ppc_iommu_unmap_sg(get_iommu_table_base(dev), sglist, nelems, | 71 | ppc_iommu_unmap_sg(get_iommu_table_base(dev), sglist, nelems, |
72 | direction, attrs); | 72 | direction, attrs); |
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c index 3f1472a78f39..e64a6016fba7 100644 --- a/arch/powerpc/kernel/dma.c +++ b/arch/powerpc/kernel/dma.c | |||
@@ -64,7 +64,7 @@ static int dma_direct_dma_supported(struct device *dev, u64 mask) | |||
64 | 64 | ||
65 | void *__dma_direct_alloc_coherent(struct device *dev, size_t size, | 65 | void *__dma_direct_alloc_coherent(struct device *dev, size_t size, |
66 | dma_addr_t *dma_handle, gfp_t flag, | 66 | dma_addr_t *dma_handle, gfp_t flag, |
67 | struct dma_attrs *attrs) | 67 | unsigned long attrs) |
68 | { | 68 | { |
69 | void *ret; | 69 | void *ret; |
70 | #ifdef CONFIG_NOT_COHERENT_CACHE | 70 | #ifdef CONFIG_NOT_COHERENT_CACHE |
@@ -121,7 +121,7 @@ void *__dma_direct_alloc_coherent(struct device *dev, size_t size, | |||
121 | 121 | ||
122 | void __dma_direct_free_coherent(struct device *dev, size_t size, | 122 | void __dma_direct_free_coherent(struct device *dev, size_t size, |
123 | void *vaddr, dma_addr_t dma_handle, | 123 | void *vaddr, dma_addr_t dma_handle, |
124 | struct dma_attrs *attrs) | 124 | unsigned long attrs) |
125 | { | 125 | { |
126 | #ifdef CONFIG_NOT_COHERENT_CACHE | 126 | #ifdef CONFIG_NOT_COHERENT_CACHE |
127 | __dma_free_coherent(size, vaddr); | 127 | __dma_free_coherent(size, vaddr); |
@@ -132,7 +132,7 @@ void __dma_direct_free_coherent(struct device *dev, size_t size, | |||
132 | 132 | ||
133 | static void *dma_direct_alloc_coherent(struct device *dev, size_t size, | 133 | static void *dma_direct_alloc_coherent(struct device *dev, size_t size, |
134 | dma_addr_t *dma_handle, gfp_t flag, | 134 | dma_addr_t *dma_handle, gfp_t flag, |
135 | struct dma_attrs *attrs) | 135 | unsigned long attrs) |
136 | { | 136 | { |
137 | struct iommu_table *iommu; | 137 | struct iommu_table *iommu; |
138 | 138 | ||
@@ -156,7 +156,7 @@ static void *dma_direct_alloc_coherent(struct device *dev, size_t size, | |||
156 | 156 | ||
157 | static void dma_direct_free_coherent(struct device *dev, size_t size, | 157 | static void dma_direct_free_coherent(struct device *dev, size_t size, |
158 | void *vaddr, dma_addr_t dma_handle, | 158 | void *vaddr, dma_addr_t dma_handle, |
159 | struct dma_attrs *attrs) | 159 | unsigned long attrs) |
160 | { | 160 | { |
161 | struct iommu_table *iommu; | 161 | struct iommu_table *iommu; |
162 | 162 | ||
@@ -177,7 +177,7 @@ static void dma_direct_free_coherent(struct device *dev, size_t size, | |||
177 | 177 | ||
178 | int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma, | 178 | int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma, |
179 | void *cpu_addr, dma_addr_t handle, size_t size, | 179 | void *cpu_addr, dma_addr_t handle, size_t size, |
180 | struct dma_attrs *attrs) | 180 | unsigned long attrs) |
181 | { | 181 | { |
182 | unsigned long pfn; | 182 | unsigned long pfn; |
183 | 183 | ||
@@ -195,7 +195,7 @@ int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma, | |||
195 | 195 | ||
196 | static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, | 196 | static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, |
197 | int nents, enum dma_data_direction direction, | 197 | int nents, enum dma_data_direction direction, |
198 | struct dma_attrs *attrs) | 198 | unsigned long attrs) |
199 | { | 199 | { |
200 | struct scatterlist *sg; | 200 | struct scatterlist *sg; |
201 | int i; | 201 | int i; |
@@ -211,7 +211,7 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, | |||
211 | 211 | ||
212 | static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg, | 212 | static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg, |
213 | int nents, enum dma_data_direction direction, | 213 | int nents, enum dma_data_direction direction, |
214 | struct dma_attrs *attrs) | 214 | unsigned long attrs) |
215 | { | 215 | { |
216 | } | 216 | } |
217 | 217 | ||
@@ -232,7 +232,7 @@ static inline dma_addr_t dma_direct_map_page(struct device *dev, | |||
232 | unsigned long offset, | 232 | unsigned long offset, |
233 | size_t size, | 233 | size_t size, |
234 | enum dma_data_direction dir, | 234 | enum dma_data_direction dir, |
235 | struct dma_attrs *attrs) | 235 | unsigned long attrs) |
236 | { | 236 | { |
237 | BUG_ON(dir == DMA_NONE); | 237 | BUG_ON(dir == DMA_NONE); |
238 | __dma_sync_page(page, offset, size, dir); | 238 | __dma_sync_page(page, offset, size, dir); |
@@ -243,7 +243,7 @@ static inline void dma_direct_unmap_page(struct device *dev, | |||
243 | dma_addr_t dma_address, | 243 | dma_addr_t dma_address, |
244 | size_t size, | 244 | size_t size, |
245 | enum dma_data_direction direction, | 245 | enum dma_data_direction direction, |
246 | struct dma_attrs *attrs) | 246 | unsigned long attrs) |
247 | { | 247 | { |
248 | } | 248 | } |
249 | 249 | ||
diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c index a89f4f7a66bd..c1ca9282f4a0 100644 --- a/arch/powerpc/kernel/ibmebus.c +++ b/arch/powerpc/kernel/ibmebus.c | |||
@@ -65,7 +65,7 @@ static void *ibmebus_alloc_coherent(struct device *dev, | |||
65 | size_t size, | 65 | size_t size, |
66 | dma_addr_t *dma_handle, | 66 | dma_addr_t *dma_handle, |
67 | gfp_t flag, | 67 | gfp_t flag, |
68 | struct dma_attrs *attrs) | 68 | unsigned long attrs) |
69 | { | 69 | { |
70 | void *mem; | 70 | void *mem; |
71 | 71 | ||
@@ -78,7 +78,7 @@ static void *ibmebus_alloc_coherent(struct device *dev, | |||
78 | static void ibmebus_free_coherent(struct device *dev, | 78 | static void ibmebus_free_coherent(struct device *dev, |
79 | size_t size, void *vaddr, | 79 | size_t size, void *vaddr, |
80 | dma_addr_t dma_handle, | 80 | dma_addr_t dma_handle, |
81 | struct dma_attrs *attrs) | 81 | unsigned long attrs) |
82 | { | 82 | { |
83 | kfree(vaddr); | 83 | kfree(vaddr); |
84 | } | 84 | } |
@@ -88,7 +88,7 @@ static dma_addr_t ibmebus_map_page(struct device *dev, | |||
88 | unsigned long offset, | 88 | unsigned long offset, |
89 | size_t size, | 89 | size_t size, |
90 | enum dma_data_direction direction, | 90 | enum dma_data_direction direction, |
91 | struct dma_attrs *attrs) | 91 | unsigned long attrs) |
92 | { | 92 | { |
93 | return (dma_addr_t)(page_address(page) + offset); | 93 | return (dma_addr_t)(page_address(page) + offset); |
94 | } | 94 | } |
@@ -97,7 +97,7 @@ static void ibmebus_unmap_page(struct device *dev, | |||
97 | dma_addr_t dma_addr, | 97 | dma_addr_t dma_addr, |
98 | size_t size, | 98 | size_t size, |
99 | enum dma_data_direction direction, | 99 | enum dma_data_direction direction, |
100 | struct dma_attrs *attrs) | 100 | unsigned long attrs) |
101 | { | 101 | { |
102 | return; | 102 | return; |
103 | } | 103 | } |
@@ -105,7 +105,7 @@ static void ibmebus_unmap_page(struct device *dev, | |||
105 | static int ibmebus_map_sg(struct device *dev, | 105 | static int ibmebus_map_sg(struct device *dev, |
106 | struct scatterlist *sgl, | 106 | struct scatterlist *sgl, |
107 | int nents, enum dma_data_direction direction, | 107 | int nents, enum dma_data_direction direction, |
108 | struct dma_attrs *attrs) | 108 | unsigned long attrs) |
109 | { | 109 | { |
110 | struct scatterlist *sg; | 110 | struct scatterlist *sg; |
111 | int i; | 111 | int i; |
@@ -121,7 +121,7 @@ static int ibmebus_map_sg(struct device *dev, | |||
121 | static void ibmebus_unmap_sg(struct device *dev, | 121 | static void ibmebus_unmap_sg(struct device *dev, |
122 | struct scatterlist *sg, | 122 | struct scatterlist *sg, |
123 | int nents, enum dma_data_direction direction, | 123 | int nents, enum dma_data_direction direction, |
124 | struct dma_attrs *attrs) | 124 | unsigned long attrs) |
125 | { | 125 | { |
126 | return; | 126 | return; |
127 | } | 127 | } |
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index a8e3490b54e3..37d6e741be82 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c | |||
@@ -307,7 +307,7 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl, | |||
307 | void *page, unsigned int npages, | 307 | void *page, unsigned int npages, |
308 | enum dma_data_direction direction, | 308 | enum dma_data_direction direction, |
309 | unsigned long mask, unsigned int align_order, | 309 | unsigned long mask, unsigned int align_order, |
310 | struct dma_attrs *attrs) | 310 | unsigned long attrs) |
311 | { | 311 | { |
312 | unsigned long entry; | 312 | unsigned long entry; |
313 | dma_addr_t ret = DMA_ERROR_CODE; | 313 | dma_addr_t ret = DMA_ERROR_CODE; |
@@ -431,7 +431,7 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, | |||
431 | int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl, | 431 | int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl, |
432 | struct scatterlist *sglist, int nelems, | 432 | struct scatterlist *sglist, int nelems, |
433 | unsigned long mask, enum dma_data_direction direction, | 433 | unsigned long mask, enum dma_data_direction direction, |
434 | struct dma_attrs *attrs) | 434 | unsigned long attrs) |
435 | { | 435 | { |
436 | dma_addr_t dma_next = 0, dma_addr; | 436 | dma_addr_t dma_next = 0, dma_addr; |
437 | struct scatterlist *s, *outs, *segstart; | 437 | struct scatterlist *s, *outs, *segstart; |
@@ -574,7 +574,7 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl, | |||
574 | 574 | ||
575 | void ppc_iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, | 575 | void ppc_iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, |
576 | int nelems, enum dma_data_direction direction, | 576 | int nelems, enum dma_data_direction direction, |
577 | struct dma_attrs *attrs) | 577 | unsigned long attrs) |
578 | { | 578 | { |
579 | struct scatterlist *sg; | 579 | struct scatterlist *sg; |
580 | 580 | ||
@@ -753,7 +753,7 @@ void iommu_free_table(struct iommu_table *tbl, const char *node_name) | |||
753 | dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl, | 753 | dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl, |
754 | struct page *page, unsigned long offset, size_t size, | 754 | struct page *page, unsigned long offset, size_t size, |
755 | unsigned long mask, enum dma_data_direction direction, | 755 | unsigned long mask, enum dma_data_direction direction, |
756 | struct dma_attrs *attrs) | 756 | unsigned long attrs) |
757 | { | 757 | { |
758 | dma_addr_t dma_handle = DMA_ERROR_CODE; | 758 | dma_addr_t dma_handle = DMA_ERROR_CODE; |
759 | void *vaddr; | 759 | void *vaddr; |
@@ -790,7 +790,7 @@ dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl, | |||
790 | 790 | ||
791 | void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle, | 791 | void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle, |
792 | size_t size, enum dma_data_direction direction, | 792 | size_t size, enum dma_data_direction direction, |
793 | struct dma_attrs *attrs) | 793 | unsigned long attrs) |
794 | { | 794 | { |
795 | unsigned int npages; | 795 | unsigned int npages; |
796 | 796 | ||
@@ -845,7 +845,7 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl, | |||
845 | nio_pages = size >> tbl->it_page_shift; | 845 | nio_pages = size >> tbl->it_page_shift; |
846 | io_order = get_iommu_order(size, tbl); | 846 | io_order = get_iommu_order(size, tbl); |
847 | mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL, | 847 | mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL, |
848 | mask >> tbl->it_page_shift, io_order, NULL); | 848 | mask >> tbl->it_page_shift, io_order, 0); |
849 | if (mapping == DMA_ERROR_CODE) { | 849 | if (mapping == DMA_ERROR_CODE) { |
850 | free_pages((unsigned long)ret, order); | 850 | free_pages((unsigned long)ret, order); |
851 | return NULL; | 851 | return NULL; |
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c index 8d7358f3a273..b3813ddb2fb4 100644 --- a/arch/powerpc/kernel/vio.c +++ b/arch/powerpc/kernel/vio.c | |||
@@ -482,7 +482,7 @@ static void vio_cmo_balance(struct work_struct *work) | |||
482 | 482 | ||
483 | static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size, | 483 | static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size, |
484 | dma_addr_t *dma_handle, gfp_t flag, | 484 | dma_addr_t *dma_handle, gfp_t flag, |
485 | struct dma_attrs *attrs) | 485 | unsigned long attrs) |
486 | { | 486 | { |
487 | struct vio_dev *viodev = to_vio_dev(dev); | 487 | struct vio_dev *viodev = to_vio_dev(dev); |
488 | void *ret; | 488 | void *ret; |
@@ -503,7 +503,7 @@ static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size, | |||
503 | 503 | ||
504 | static void vio_dma_iommu_free_coherent(struct device *dev, size_t size, | 504 | static void vio_dma_iommu_free_coherent(struct device *dev, size_t size, |
505 | void *vaddr, dma_addr_t dma_handle, | 505 | void *vaddr, dma_addr_t dma_handle, |
506 | struct dma_attrs *attrs) | 506 | unsigned long attrs) |
507 | { | 507 | { |
508 | struct vio_dev *viodev = to_vio_dev(dev); | 508 | struct vio_dev *viodev = to_vio_dev(dev); |
509 | 509 | ||
@@ -515,7 +515,7 @@ static void vio_dma_iommu_free_coherent(struct device *dev, size_t size, | |||
515 | static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page, | 515 | static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page, |
516 | unsigned long offset, size_t size, | 516 | unsigned long offset, size_t size, |
517 | enum dma_data_direction direction, | 517 | enum dma_data_direction direction, |
518 | struct dma_attrs *attrs) | 518 | unsigned long attrs) |
519 | { | 519 | { |
520 | struct vio_dev *viodev = to_vio_dev(dev); | 520 | struct vio_dev *viodev = to_vio_dev(dev); |
521 | struct iommu_table *tbl; | 521 | struct iommu_table *tbl; |
@@ -539,7 +539,7 @@ static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page, | |||
539 | static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle, | 539 | static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle, |
540 | size_t size, | 540 | size_t size, |
541 | enum dma_data_direction direction, | 541 | enum dma_data_direction direction, |
542 | struct dma_attrs *attrs) | 542 | unsigned long attrs) |
543 | { | 543 | { |
544 | struct vio_dev *viodev = to_vio_dev(dev); | 544 | struct vio_dev *viodev = to_vio_dev(dev); |
545 | struct iommu_table *tbl; | 545 | struct iommu_table *tbl; |
@@ -552,7 +552,7 @@ static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle, | |||
552 | 552 | ||
553 | static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist, | 553 | static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist, |
554 | int nelems, enum dma_data_direction direction, | 554 | int nelems, enum dma_data_direction direction, |
555 | struct dma_attrs *attrs) | 555 | unsigned long attrs) |
556 | { | 556 | { |
557 | struct vio_dev *viodev = to_vio_dev(dev); | 557 | struct vio_dev *viodev = to_vio_dev(dev); |
558 | struct iommu_table *tbl; | 558 | struct iommu_table *tbl; |
@@ -588,7 +588,7 @@ static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist, | |||
588 | static void vio_dma_iommu_unmap_sg(struct device *dev, | 588 | static void vio_dma_iommu_unmap_sg(struct device *dev, |
589 | struct scatterlist *sglist, int nelems, | 589 | struct scatterlist *sglist, int nelems, |
590 | enum dma_data_direction direction, | 590 | enum dma_data_direction direction, |
591 | struct dma_attrs *attrs) | 591 | unsigned long attrs) |
592 | { | 592 | { |
593 | struct vio_dev *viodev = to_vio_dev(dev); | 593 | struct vio_dev *viodev = to_vio_dev(dev); |
594 | struct iommu_table *tbl; | 594 | struct iommu_table *tbl; |
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c index 9027d7c48507..f7d1a4953ea0 100644 --- a/arch/powerpc/platforms/cell/iommu.c +++ b/arch/powerpc/platforms/cell/iommu.c | |||
@@ -166,7 +166,7 @@ static void invalidate_tce_cache(struct cbe_iommu *iommu, unsigned long *pte, | |||
166 | 166 | ||
167 | static int tce_build_cell(struct iommu_table *tbl, long index, long npages, | 167 | static int tce_build_cell(struct iommu_table *tbl, long index, long npages, |
168 | unsigned long uaddr, enum dma_data_direction direction, | 168 | unsigned long uaddr, enum dma_data_direction direction, |
169 | struct dma_attrs *attrs) | 169 | unsigned long attrs) |
170 | { | 170 | { |
171 | int i; | 171 | int i; |
172 | unsigned long *io_pte, base_pte; | 172 | unsigned long *io_pte, base_pte; |
@@ -193,7 +193,7 @@ static int tce_build_cell(struct iommu_table *tbl, long index, long npages, | |||
193 | base_pte = CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_M | | 193 | base_pte = CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_M | |
194 | CBE_IOPTE_SO_RW | (window->ioid & CBE_IOPTE_IOID_Mask); | 194 | CBE_IOPTE_SO_RW | (window->ioid & CBE_IOPTE_IOID_Mask); |
195 | #endif | 195 | #endif |
196 | if (unlikely(dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs))) | 196 | if (unlikely(attrs & DMA_ATTR_WEAK_ORDERING)) |
197 | base_pte &= ~CBE_IOPTE_SO_RW; | 197 | base_pte &= ~CBE_IOPTE_SO_RW; |
198 | 198 | ||
199 | io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset); | 199 | io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset); |
@@ -526,7 +526,7 @@ cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np, | |||
526 | 526 | ||
527 | __set_bit(0, window->table.it_map); | 527 | __set_bit(0, window->table.it_map); |
528 | tce_build_cell(&window->table, window->table.it_offset, 1, | 528 | tce_build_cell(&window->table, window->table.it_offset, 1, |
529 | (unsigned long)iommu->pad_page, DMA_TO_DEVICE, NULL); | 529 | (unsigned long)iommu->pad_page, DMA_TO_DEVICE, 0); |
530 | 530 | ||
531 | return window; | 531 | return window; |
532 | } | 532 | } |
@@ -572,7 +572,7 @@ static struct iommu_table *cell_get_iommu_table(struct device *dev) | |||
572 | 572 | ||
573 | static void *dma_fixed_alloc_coherent(struct device *dev, size_t size, | 573 | static void *dma_fixed_alloc_coherent(struct device *dev, size_t size, |
574 | dma_addr_t *dma_handle, gfp_t flag, | 574 | dma_addr_t *dma_handle, gfp_t flag, |
575 | struct dma_attrs *attrs) | 575 | unsigned long attrs) |
576 | { | 576 | { |
577 | if (iommu_fixed_is_weak) | 577 | if (iommu_fixed_is_weak) |
578 | return iommu_alloc_coherent(dev, cell_get_iommu_table(dev), | 578 | return iommu_alloc_coherent(dev, cell_get_iommu_table(dev), |
@@ -586,7 +586,7 @@ static void *dma_fixed_alloc_coherent(struct device *dev, size_t size, | |||
586 | 586 | ||
587 | static void dma_fixed_free_coherent(struct device *dev, size_t size, | 587 | static void dma_fixed_free_coherent(struct device *dev, size_t size, |
588 | void *vaddr, dma_addr_t dma_handle, | 588 | void *vaddr, dma_addr_t dma_handle, |
589 | struct dma_attrs *attrs) | 589 | unsigned long attrs) |
590 | { | 590 | { |
591 | if (iommu_fixed_is_weak) | 591 | if (iommu_fixed_is_weak) |
592 | iommu_free_coherent(cell_get_iommu_table(dev), size, vaddr, | 592 | iommu_free_coherent(cell_get_iommu_table(dev), size, vaddr, |
@@ -598,9 +598,9 @@ static void dma_fixed_free_coherent(struct device *dev, size_t size, | |||
598 | static dma_addr_t dma_fixed_map_page(struct device *dev, struct page *page, | 598 | static dma_addr_t dma_fixed_map_page(struct device *dev, struct page *page, |
599 | unsigned long offset, size_t size, | 599 | unsigned long offset, size_t size, |
600 | enum dma_data_direction direction, | 600 | enum dma_data_direction direction, |
601 | struct dma_attrs *attrs) | 601 | unsigned long attrs) |
602 | { | 602 | { |
603 | if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs)) | 603 | if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING)) |
604 | return dma_direct_ops.map_page(dev, page, offset, size, | 604 | return dma_direct_ops.map_page(dev, page, offset, size, |
605 | direction, attrs); | 605 | direction, attrs); |
606 | else | 606 | else |
@@ -611,9 +611,9 @@ static dma_addr_t dma_fixed_map_page(struct device *dev, struct page *page, | |||
611 | 611 | ||
612 | static void dma_fixed_unmap_page(struct device *dev, dma_addr_t dma_addr, | 612 | static void dma_fixed_unmap_page(struct device *dev, dma_addr_t dma_addr, |
613 | size_t size, enum dma_data_direction direction, | 613 | size_t size, enum dma_data_direction direction, |
614 | struct dma_attrs *attrs) | 614 | unsigned long attrs) |
615 | { | 615 | { |
616 | if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs)) | 616 | if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING)) |
617 | dma_direct_ops.unmap_page(dev, dma_addr, size, direction, | 617 | dma_direct_ops.unmap_page(dev, dma_addr, size, direction, |
618 | attrs); | 618 | attrs); |
619 | else | 619 | else |
@@ -623,9 +623,9 @@ static void dma_fixed_unmap_page(struct device *dev, dma_addr_t dma_addr, | |||
623 | 623 | ||
624 | static int dma_fixed_map_sg(struct device *dev, struct scatterlist *sg, | 624 | static int dma_fixed_map_sg(struct device *dev, struct scatterlist *sg, |
625 | int nents, enum dma_data_direction direction, | 625 | int nents, enum dma_data_direction direction, |
626 | struct dma_attrs *attrs) | 626 | unsigned long attrs) |
627 | { | 627 | { |
628 | if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs)) | 628 | if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING)) |
629 | return dma_direct_ops.map_sg(dev, sg, nents, direction, attrs); | 629 | return dma_direct_ops.map_sg(dev, sg, nents, direction, attrs); |
630 | else | 630 | else |
631 | return ppc_iommu_map_sg(dev, cell_get_iommu_table(dev), sg, | 631 | return ppc_iommu_map_sg(dev, cell_get_iommu_table(dev), sg, |
@@ -635,9 +635,9 @@ static int dma_fixed_map_sg(struct device *dev, struct scatterlist *sg, | |||
635 | 635 | ||
636 | static void dma_fixed_unmap_sg(struct device *dev, struct scatterlist *sg, | 636 | static void dma_fixed_unmap_sg(struct device *dev, struct scatterlist *sg, |
637 | int nents, enum dma_data_direction direction, | 637 | int nents, enum dma_data_direction direction, |
638 | struct dma_attrs *attrs) | 638 | unsigned long attrs) |
639 | { | 639 | { |
640 | if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs)) | 640 | if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING)) |
641 | dma_direct_ops.unmap_sg(dev, sg, nents, direction, attrs); | 641 | dma_direct_ops.unmap_sg(dev, sg, nents, direction, attrs); |
642 | else | 642 | else |
643 | ppc_iommu_unmap_sg(cell_get_iommu_table(dev), sg, nents, | 643 | ppc_iommu_unmap_sg(cell_get_iommu_table(dev), sg, nents, |
@@ -1162,7 +1162,7 @@ static int __init setup_iommu_fixed(char *str) | |||
1162 | pciep = of_find_node_by_type(NULL, "pcie-endpoint"); | 1162 | pciep = of_find_node_by_type(NULL, "pcie-endpoint"); |
1163 | 1163 | ||
1164 | if (strcmp(str, "weak") == 0 || (pciep && strcmp(str, "strong") != 0)) | 1164 | if (strcmp(str, "weak") == 0 || (pciep && strcmp(str, "strong") != 0)) |
1165 | iommu_fixed_is_weak = 1; | 1165 | iommu_fixed_is_weak = DMA_ATTR_WEAK_ORDERING; |
1166 | 1166 | ||
1167 | of_node_put(pciep); | 1167 | of_node_put(pciep); |
1168 | 1168 | ||
diff --git a/arch/powerpc/platforms/pasemi/iommu.c b/arch/powerpc/platforms/pasemi/iommu.c index 43dd3fb514e0..309d9ccccd50 100644 --- a/arch/powerpc/platforms/pasemi/iommu.c +++ b/arch/powerpc/platforms/pasemi/iommu.c | |||
@@ -88,7 +88,7 @@ static int iommu_table_iobmap_inited; | |||
88 | static int iobmap_build(struct iommu_table *tbl, long index, | 88 | static int iobmap_build(struct iommu_table *tbl, long index, |
89 | long npages, unsigned long uaddr, | 89 | long npages, unsigned long uaddr, |
90 | enum dma_data_direction direction, | 90 | enum dma_data_direction direction, |
91 | struct dma_attrs *attrs) | 91 | unsigned long attrs) |
92 | { | 92 | { |
93 | u32 *ip; | 93 | u32 *ip; |
94 | u32 rpn; | 94 | u32 rpn; |
diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c index 4383a5ff82ba..00e1a0195c78 100644 --- a/arch/powerpc/platforms/powernv/npu-dma.c +++ b/arch/powerpc/platforms/powernv/npu-dma.c | |||
@@ -73,7 +73,7 @@ EXPORT_SYMBOL(pnv_pci_get_npu_dev); | |||
73 | 73 | ||
74 | static void *dma_npu_alloc(struct device *dev, size_t size, | 74 | static void *dma_npu_alloc(struct device *dev, size_t size, |
75 | dma_addr_t *dma_handle, gfp_t flag, | 75 | dma_addr_t *dma_handle, gfp_t flag, |
76 | struct dma_attrs *attrs) | 76 | unsigned long attrs) |
77 | { | 77 | { |
78 | NPU_DMA_OP_UNSUPPORTED(); | 78 | NPU_DMA_OP_UNSUPPORTED(); |
79 | return NULL; | 79 | return NULL; |
@@ -81,7 +81,7 @@ static void *dma_npu_alloc(struct device *dev, size_t size, | |||
81 | 81 | ||
82 | static void dma_npu_free(struct device *dev, size_t size, | 82 | static void dma_npu_free(struct device *dev, size_t size, |
83 | void *vaddr, dma_addr_t dma_handle, | 83 | void *vaddr, dma_addr_t dma_handle, |
84 | struct dma_attrs *attrs) | 84 | unsigned long attrs) |
85 | { | 85 | { |
86 | NPU_DMA_OP_UNSUPPORTED(); | 86 | NPU_DMA_OP_UNSUPPORTED(); |
87 | } | 87 | } |
@@ -89,7 +89,7 @@ static void dma_npu_free(struct device *dev, size_t size, | |||
89 | static dma_addr_t dma_npu_map_page(struct device *dev, struct page *page, | 89 | static dma_addr_t dma_npu_map_page(struct device *dev, struct page *page, |
90 | unsigned long offset, size_t size, | 90 | unsigned long offset, size_t size, |
91 | enum dma_data_direction direction, | 91 | enum dma_data_direction direction, |
92 | struct dma_attrs *attrs) | 92 | unsigned long attrs) |
93 | { | 93 | { |
94 | NPU_DMA_OP_UNSUPPORTED(); | 94 | NPU_DMA_OP_UNSUPPORTED(); |
95 | return 0; | 95 | return 0; |
@@ -97,7 +97,7 @@ static dma_addr_t dma_npu_map_page(struct device *dev, struct page *page, | |||
97 | 97 | ||
98 | static int dma_npu_map_sg(struct device *dev, struct scatterlist *sglist, | 98 | static int dma_npu_map_sg(struct device *dev, struct scatterlist *sglist, |
99 | int nelems, enum dma_data_direction direction, | 99 | int nelems, enum dma_data_direction direction, |
100 | struct dma_attrs *attrs) | 100 | unsigned long attrs) |
101 | { | 101 | { |
102 | NPU_DMA_OP_UNSUPPORTED(); | 102 | NPU_DMA_OP_UNSUPPORTED(); |
103 | return 0; | 103 | return 0; |
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 891fc4a453df..6b9528307f62 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c | |||
@@ -1806,7 +1806,7 @@ static void pnv_pci_p7ioc_tce_invalidate(struct iommu_table *tbl, | |||
1806 | static int pnv_ioda1_tce_build(struct iommu_table *tbl, long index, | 1806 | static int pnv_ioda1_tce_build(struct iommu_table *tbl, long index, |
1807 | long npages, unsigned long uaddr, | 1807 | long npages, unsigned long uaddr, |
1808 | enum dma_data_direction direction, | 1808 | enum dma_data_direction direction, |
1809 | struct dma_attrs *attrs) | 1809 | unsigned long attrs) |
1810 | { | 1810 | { |
1811 | int ret = pnv_tce_build(tbl, index, npages, uaddr, direction, | 1811 | int ret = pnv_tce_build(tbl, index, npages, uaddr, direction, |
1812 | attrs); | 1812 | attrs); |
@@ -1950,7 +1950,7 @@ static void pnv_pci_ioda2_tce_invalidate(struct iommu_table *tbl, | |||
1950 | static int pnv_ioda2_tce_build(struct iommu_table *tbl, long index, | 1950 | static int pnv_ioda2_tce_build(struct iommu_table *tbl, long index, |
1951 | long npages, unsigned long uaddr, | 1951 | long npages, unsigned long uaddr, |
1952 | enum dma_data_direction direction, | 1952 | enum dma_data_direction direction, |
1953 | struct dma_attrs *attrs) | 1953 | unsigned long attrs) |
1954 | { | 1954 | { |
1955 | int ret = pnv_tce_build(tbl, index, npages, uaddr, direction, | 1955 | int ret = pnv_tce_build(tbl, index, npages, uaddr, direction, |
1956 | attrs); | 1956 | attrs); |
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c index 6701dd5ded20..a21d831c1114 100644 --- a/arch/powerpc/platforms/powernv/pci.c +++ b/arch/powerpc/platforms/powernv/pci.c | |||
@@ -704,7 +704,7 @@ static __be64 *pnv_tce(struct iommu_table *tbl, long idx) | |||
704 | 704 | ||
705 | int pnv_tce_build(struct iommu_table *tbl, long index, long npages, | 705 | int pnv_tce_build(struct iommu_table *tbl, long index, long npages, |
706 | unsigned long uaddr, enum dma_data_direction direction, | 706 | unsigned long uaddr, enum dma_data_direction direction, |
707 | struct dma_attrs *attrs) | 707 | unsigned long attrs) |
708 | { | 708 | { |
709 | u64 proto_tce = iommu_direction_to_tce_perm(direction); | 709 | u64 proto_tce = iommu_direction_to_tce_perm(direction); |
710 | u64 rpn = __pa(uaddr) >> tbl->it_page_shift; | 710 | u64 rpn = __pa(uaddr) >> tbl->it_page_shift; |
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h index d088d4f06116..e64df7894d6e 100644 --- a/arch/powerpc/platforms/powernv/pci.h +++ b/arch/powerpc/platforms/powernv/pci.h | |||
@@ -181,7 +181,7 @@ struct pnv_phb { | |||
181 | extern struct pci_ops pnv_pci_ops; | 181 | extern struct pci_ops pnv_pci_ops; |
182 | extern int pnv_tce_build(struct iommu_table *tbl, long index, long npages, | 182 | extern int pnv_tce_build(struct iommu_table *tbl, long index, long npages, |
183 | unsigned long uaddr, enum dma_data_direction direction, | 183 | unsigned long uaddr, enum dma_data_direction direction, |
184 | struct dma_attrs *attrs); | 184 | unsigned long attrs); |
185 | extern void pnv_tce_free(struct iommu_table *tbl, long index, long npages); | 185 | extern void pnv_tce_free(struct iommu_table *tbl, long index, long npages); |
186 | extern int pnv_tce_xchg(struct iommu_table *tbl, long index, | 186 | extern int pnv_tce_xchg(struct iommu_table *tbl, long index, |
187 | unsigned long *hpa, enum dma_data_direction *direction); | 187 | unsigned long *hpa, enum dma_data_direction *direction); |
diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c index 5606fe36faf2..8af1c15aef85 100644 --- a/arch/powerpc/platforms/ps3/system-bus.c +++ b/arch/powerpc/platforms/ps3/system-bus.c | |||
@@ -516,7 +516,7 @@ core_initcall(ps3_system_bus_init); | |||
516 | */ | 516 | */ |
517 | static void * ps3_alloc_coherent(struct device *_dev, size_t size, | 517 | static void * ps3_alloc_coherent(struct device *_dev, size_t size, |
518 | dma_addr_t *dma_handle, gfp_t flag, | 518 | dma_addr_t *dma_handle, gfp_t flag, |
519 | struct dma_attrs *attrs) | 519 | unsigned long attrs) |
520 | { | 520 | { |
521 | int result; | 521 | int result; |
522 | struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); | 522 | struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); |
@@ -553,7 +553,7 @@ clean_none: | |||
553 | } | 553 | } |
554 | 554 | ||
555 | static void ps3_free_coherent(struct device *_dev, size_t size, void *vaddr, | 555 | static void ps3_free_coherent(struct device *_dev, size_t size, void *vaddr, |
556 | dma_addr_t dma_handle, struct dma_attrs *attrs) | 556 | dma_addr_t dma_handle, unsigned long attrs) |
557 | { | 557 | { |
558 | struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); | 558 | struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); |
559 | 559 | ||
@@ -569,7 +569,7 @@ static void ps3_free_coherent(struct device *_dev, size_t size, void *vaddr, | |||
569 | 569 | ||
570 | static dma_addr_t ps3_sb_map_page(struct device *_dev, struct page *page, | 570 | static dma_addr_t ps3_sb_map_page(struct device *_dev, struct page *page, |
571 | unsigned long offset, size_t size, enum dma_data_direction direction, | 571 | unsigned long offset, size_t size, enum dma_data_direction direction, |
572 | struct dma_attrs *attrs) | 572 | unsigned long attrs) |
573 | { | 573 | { |
574 | struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); | 574 | struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); |
575 | int result; | 575 | int result; |
@@ -592,7 +592,7 @@ static dma_addr_t ps3_sb_map_page(struct device *_dev, struct page *page, | |||
592 | static dma_addr_t ps3_ioc0_map_page(struct device *_dev, struct page *page, | 592 | static dma_addr_t ps3_ioc0_map_page(struct device *_dev, struct page *page, |
593 | unsigned long offset, size_t size, | 593 | unsigned long offset, size_t size, |
594 | enum dma_data_direction direction, | 594 | enum dma_data_direction direction, |
595 | struct dma_attrs *attrs) | 595 | unsigned long attrs) |
596 | { | 596 | { |
597 | struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); | 597 | struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); |
598 | int result; | 598 | int result; |
@@ -626,7 +626,7 @@ static dma_addr_t ps3_ioc0_map_page(struct device *_dev, struct page *page, | |||
626 | } | 626 | } |
627 | 627 | ||
628 | static void ps3_unmap_page(struct device *_dev, dma_addr_t dma_addr, | 628 | static void ps3_unmap_page(struct device *_dev, dma_addr_t dma_addr, |
629 | size_t size, enum dma_data_direction direction, struct dma_attrs *attrs) | 629 | size_t size, enum dma_data_direction direction, unsigned long attrs) |
630 | { | 630 | { |
631 | struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); | 631 | struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); |
632 | int result; | 632 | int result; |
@@ -640,7 +640,7 @@ static void ps3_unmap_page(struct device *_dev, dma_addr_t dma_addr, | |||
640 | } | 640 | } |
641 | 641 | ||
642 | static int ps3_sb_map_sg(struct device *_dev, struct scatterlist *sgl, | 642 | static int ps3_sb_map_sg(struct device *_dev, struct scatterlist *sgl, |
643 | int nents, enum dma_data_direction direction, struct dma_attrs *attrs) | 643 | int nents, enum dma_data_direction direction, unsigned long attrs) |
644 | { | 644 | { |
645 | #if defined(CONFIG_PS3_DYNAMIC_DMA) | 645 | #if defined(CONFIG_PS3_DYNAMIC_DMA) |
646 | BUG_ON("do"); | 646 | BUG_ON("do"); |
@@ -670,14 +670,14 @@ static int ps3_sb_map_sg(struct device *_dev, struct scatterlist *sgl, | |||
670 | static int ps3_ioc0_map_sg(struct device *_dev, struct scatterlist *sg, | 670 | static int ps3_ioc0_map_sg(struct device *_dev, struct scatterlist *sg, |
671 | int nents, | 671 | int nents, |
672 | enum dma_data_direction direction, | 672 | enum dma_data_direction direction, |
673 | struct dma_attrs *attrs) | 673 | unsigned long attrs) |
674 | { | 674 | { |
675 | BUG(); | 675 | BUG(); |
676 | return 0; | 676 | return 0; |
677 | } | 677 | } |
678 | 678 | ||
679 | static void ps3_sb_unmap_sg(struct device *_dev, struct scatterlist *sg, | 679 | static void ps3_sb_unmap_sg(struct device *_dev, struct scatterlist *sg, |
680 | int nents, enum dma_data_direction direction, struct dma_attrs *attrs) | 680 | int nents, enum dma_data_direction direction, unsigned long attrs) |
681 | { | 681 | { |
682 | #if defined(CONFIG_PS3_DYNAMIC_DMA) | 682 | #if defined(CONFIG_PS3_DYNAMIC_DMA) |
683 | BUG_ON("do"); | 683 | BUG_ON("do"); |
@@ -686,7 +686,7 @@ static void ps3_sb_unmap_sg(struct device *_dev, struct scatterlist *sg, | |||
686 | 686 | ||
687 | static void ps3_ioc0_unmap_sg(struct device *_dev, struct scatterlist *sg, | 687 | static void ps3_ioc0_unmap_sg(struct device *_dev, struct scatterlist *sg, |
688 | int nents, enum dma_data_direction direction, | 688 | int nents, enum dma_data_direction direction, |
689 | struct dma_attrs *attrs) | 689 | unsigned long attrs) |
690 | { | 690 | { |
691 | BUG(); | 691 | BUG(); |
692 | } | 692 | } |
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index 770a753b52c9..0024e451bb36 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c | |||
@@ -123,7 +123,7 @@ static void iommu_pseries_free_group(struct iommu_table_group *table_group, | |||
123 | static int tce_build_pSeries(struct iommu_table *tbl, long index, | 123 | static int tce_build_pSeries(struct iommu_table *tbl, long index, |
124 | long npages, unsigned long uaddr, | 124 | long npages, unsigned long uaddr, |
125 | enum dma_data_direction direction, | 125 | enum dma_data_direction direction, |
126 | struct dma_attrs *attrs) | 126 | unsigned long attrs) |
127 | { | 127 | { |
128 | u64 proto_tce; | 128 | u64 proto_tce; |
129 | __be64 *tcep, *tces; | 129 | __be64 *tcep, *tces; |
@@ -173,7 +173,7 @@ static void tce_freemulti_pSeriesLP(struct iommu_table*, long, long); | |||
173 | static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum, | 173 | static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum, |
174 | long npages, unsigned long uaddr, | 174 | long npages, unsigned long uaddr, |
175 | enum dma_data_direction direction, | 175 | enum dma_data_direction direction, |
176 | struct dma_attrs *attrs) | 176 | unsigned long attrs) |
177 | { | 177 | { |
178 | u64 rc = 0; | 178 | u64 rc = 0; |
179 | u64 proto_tce, tce; | 179 | u64 proto_tce, tce; |
@@ -216,7 +216,7 @@ static DEFINE_PER_CPU(__be64 *, tce_page); | |||
216 | static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, | 216 | static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, |
217 | long npages, unsigned long uaddr, | 217 | long npages, unsigned long uaddr, |
218 | enum dma_data_direction direction, | 218 | enum dma_data_direction direction, |
219 | struct dma_attrs *attrs) | 219 | unsigned long attrs) |
220 | { | 220 | { |
221 | u64 rc = 0; | 221 | u64 rc = 0; |
222 | u64 proto_tce; | 222 | u64 proto_tce; |
diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c index 26904f4879ec..3573d54b2770 100644 --- a/arch/powerpc/sysdev/dart_iommu.c +++ b/arch/powerpc/sysdev/dart_iommu.c | |||
@@ -185,7 +185,7 @@ static void dart_flush(struct iommu_table *tbl) | |||
185 | static int dart_build(struct iommu_table *tbl, long index, | 185 | static int dart_build(struct iommu_table *tbl, long index, |
186 | long npages, unsigned long uaddr, | 186 | long npages, unsigned long uaddr, |
187 | enum dma_data_direction direction, | 187 | enum dma_data_direction direction, |
188 | struct dma_attrs *attrs) | 188 | unsigned long attrs) |
189 | { | 189 | { |
190 | unsigned int *dp, *orig_dp; | 190 | unsigned int *dp, *orig_dp; |
191 | unsigned int rpn; | 191 | unsigned int rpn; |
diff --git a/arch/s390/include/asm/dma-mapping.h b/arch/s390/include/asm/dma-mapping.h index 3249b7464889..ffaba07f50ab 100644 --- a/arch/s390/include/asm/dma-mapping.h +++ b/arch/s390/include/asm/dma-mapping.h | |||
@@ -5,7 +5,6 @@ | |||
5 | #include <linux/types.h> | 5 | #include <linux/types.h> |
6 | #include <linux/mm.h> | 6 | #include <linux/mm.h> |
7 | #include <linux/scatterlist.h> | 7 | #include <linux/scatterlist.h> |
8 | #include <linux/dma-attrs.h> | ||
9 | #include <linux/dma-debug.h> | 8 | #include <linux/dma-debug.h> |
10 | #include <linux/io.h> | 9 | #include <linux/io.h> |
11 | 10 | ||
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c index 070f1ae5cfad..7297fce9bf80 100644 --- a/arch/s390/pci/pci_dma.c +++ b/arch/s390/pci/pci_dma.c | |||
@@ -286,7 +286,7 @@ static inline void zpci_err_dma(unsigned long rc, unsigned long addr) | |||
286 | static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page, | 286 | static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page, |
287 | unsigned long offset, size_t size, | 287 | unsigned long offset, size_t size, |
288 | enum dma_data_direction direction, | 288 | enum dma_data_direction direction, |
289 | struct dma_attrs *attrs) | 289 | unsigned long attrs) |
290 | { | 290 | { |
291 | struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); | 291 | struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); |
292 | unsigned long nr_pages, iommu_page_index; | 292 | unsigned long nr_pages, iommu_page_index; |
@@ -332,7 +332,7 @@ out_err: | |||
332 | 332 | ||
333 | static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr, | 333 | static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr, |
334 | size_t size, enum dma_data_direction direction, | 334 | size_t size, enum dma_data_direction direction, |
335 | struct dma_attrs *attrs) | 335 | unsigned long attrs) |
336 | { | 336 | { |
337 | struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); | 337 | struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); |
338 | unsigned long iommu_page_index; | 338 | unsigned long iommu_page_index; |
@@ -355,7 +355,7 @@ static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr, | |||
355 | 355 | ||
356 | static void *s390_dma_alloc(struct device *dev, size_t size, | 356 | static void *s390_dma_alloc(struct device *dev, size_t size, |
357 | dma_addr_t *dma_handle, gfp_t flag, | 357 | dma_addr_t *dma_handle, gfp_t flag, |
358 | struct dma_attrs *attrs) | 358 | unsigned long attrs) |
359 | { | 359 | { |
360 | struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); | 360 | struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); |
361 | struct page *page; | 361 | struct page *page; |
@@ -370,7 +370,7 @@ static void *s390_dma_alloc(struct device *dev, size_t size, | |||
370 | pa = page_to_phys(page); | 370 | pa = page_to_phys(page); |
371 | memset((void *) pa, 0, size); | 371 | memset((void *) pa, 0, size); |
372 | 372 | ||
373 | map = s390_dma_map_pages(dev, page, 0, size, DMA_BIDIRECTIONAL, NULL); | 373 | map = s390_dma_map_pages(dev, page, 0, size, DMA_BIDIRECTIONAL, 0); |
374 | if (dma_mapping_error(dev, map)) { | 374 | if (dma_mapping_error(dev, map)) { |
375 | free_pages(pa, get_order(size)); | 375 | free_pages(pa, get_order(size)); |
376 | return NULL; | 376 | return NULL; |
@@ -384,19 +384,19 @@ static void *s390_dma_alloc(struct device *dev, size_t size, | |||
384 | 384 | ||
385 | static void s390_dma_free(struct device *dev, size_t size, | 385 | static void s390_dma_free(struct device *dev, size_t size, |
386 | void *pa, dma_addr_t dma_handle, | 386 | void *pa, dma_addr_t dma_handle, |
387 | struct dma_attrs *attrs) | 387 | unsigned long attrs) |
388 | { | 388 | { |
389 | struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); | 389 | struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); |
390 | 390 | ||
391 | size = PAGE_ALIGN(size); | 391 | size = PAGE_ALIGN(size); |
392 | atomic64_sub(size / PAGE_SIZE, &zdev->allocated_pages); | 392 | atomic64_sub(size / PAGE_SIZE, &zdev->allocated_pages); |
393 | s390_dma_unmap_pages(dev, dma_handle, size, DMA_BIDIRECTIONAL, NULL); | 393 | s390_dma_unmap_pages(dev, dma_handle, size, DMA_BIDIRECTIONAL, 0); |
394 | free_pages((unsigned long) pa, get_order(size)); | 394 | free_pages((unsigned long) pa, get_order(size)); |
395 | } | 395 | } |
396 | 396 | ||
397 | static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg, | 397 | static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg, |
398 | int nr_elements, enum dma_data_direction dir, | 398 | int nr_elements, enum dma_data_direction dir, |
399 | struct dma_attrs *attrs) | 399 | unsigned long attrs) |
400 | { | 400 | { |
401 | int mapped_elements = 0; | 401 | int mapped_elements = 0; |
402 | struct scatterlist *s; | 402 | struct scatterlist *s; |
@@ -405,7 +405,7 @@ static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg, | |||
405 | for_each_sg(sg, s, nr_elements, i) { | 405 | for_each_sg(sg, s, nr_elements, i) { |
406 | struct page *page = sg_page(s); | 406 | struct page *page = sg_page(s); |
407 | s->dma_address = s390_dma_map_pages(dev, page, s->offset, | 407 | s->dma_address = s390_dma_map_pages(dev, page, s->offset, |
408 | s->length, dir, NULL); | 408 | s->length, dir, 0); |
409 | if (!dma_mapping_error(dev, s->dma_address)) { | 409 | if (!dma_mapping_error(dev, s->dma_address)) { |
410 | s->dma_length = s->length; | 410 | s->dma_length = s->length; |
411 | mapped_elements++; | 411 | mapped_elements++; |
@@ -419,7 +419,7 @@ unmap: | |||
419 | for_each_sg(sg, s, mapped_elements, i) { | 419 | for_each_sg(sg, s, mapped_elements, i) { |
420 | if (s->dma_address) | 420 | if (s->dma_address) |
421 | s390_dma_unmap_pages(dev, s->dma_address, s->dma_length, | 421 | s390_dma_unmap_pages(dev, s->dma_address, s->dma_length, |
422 | dir, NULL); | 422 | dir, 0); |
423 | s->dma_address = 0; | 423 | s->dma_address = 0; |
424 | s->dma_length = 0; | 424 | s->dma_length = 0; |
425 | } | 425 | } |
@@ -429,13 +429,14 @@ unmap: | |||
429 | 429 | ||
430 | static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg, | 430 | static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg, |
431 | int nr_elements, enum dma_data_direction dir, | 431 | int nr_elements, enum dma_data_direction dir, |
432 | struct dma_attrs *attrs) | 432 | unsigned long attrs) |
433 | { | 433 | { |
434 | struct scatterlist *s; | 434 | struct scatterlist *s; |
435 | int i; | 435 | int i; |
436 | 436 | ||
437 | for_each_sg(sg, s, nr_elements, i) { | 437 | for_each_sg(sg, s, nr_elements, i) { |
438 | s390_dma_unmap_pages(dev, s->dma_address, s->dma_length, dir, NULL); | 438 | s390_dma_unmap_pages(dev, s->dma_address, s->dma_length, dir, |
439 | 0); | ||
439 | s->dma_address = 0; | 440 | s->dma_address = 0; |
440 | s->dma_length = 0; | 441 | s->dma_length = 0; |
441 | } | 442 | } |
diff --git a/arch/sh/include/asm/dma-mapping.h b/arch/sh/include/asm/dma-mapping.h index e11cf0c8206b..0052ad40e86d 100644 --- a/arch/sh/include/asm/dma-mapping.h +++ b/arch/sh/include/asm/dma-mapping.h | |||
@@ -17,9 +17,9 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | |||
17 | /* arch/sh/mm/consistent.c */ | 17 | /* arch/sh/mm/consistent.c */ |
18 | extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, | 18 | extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, |
19 | dma_addr_t *dma_addr, gfp_t flag, | 19 | dma_addr_t *dma_addr, gfp_t flag, |
20 | struct dma_attrs *attrs); | 20 | unsigned long attrs); |
21 | extern void dma_generic_free_coherent(struct device *dev, size_t size, | 21 | extern void dma_generic_free_coherent(struct device *dev, size_t size, |
22 | void *vaddr, dma_addr_t dma_handle, | 22 | void *vaddr, dma_addr_t dma_handle, |
23 | struct dma_attrs *attrs); | 23 | unsigned long attrs); |
24 | 24 | ||
25 | #endif /* __ASM_SH_DMA_MAPPING_H */ | 25 | #endif /* __ASM_SH_DMA_MAPPING_H */ |
diff --git a/arch/sh/kernel/dma-nommu.c b/arch/sh/kernel/dma-nommu.c index 5b0bfcda6d0b..eadb669a7329 100644 --- a/arch/sh/kernel/dma-nommu.c +++ b/arch/sh/kernel/dma-nommu.c | |||
@@ -13,7 +13,7 @@ | |||
13 | static dma_addr_t nommu_map_page(struct device *dev, struct page *page, | 13 | static dma_addr_t nommu_map_page(struct device *dev, struct page *page, |
14 | unsigned long offset, size_t size, | 14 | unsigned long offset, size_t size, |
15 | enum dma_data_direction dir, | 15 | enum dma_data_direction dir, |
16 | struct dma_attrs *attrs) | 16 | unsigned long attrs) |
17 | { | 17 | { |
18 | dma_addr_t addr = page_to_phys(page) + offset; | 18 | dma_addr_t addr = page_to_phys(page) + offset; |
19 | 19 | ||
@@ -25,7 +25,7 @@ static dma_addr_t nommu_map_page(struct device *dev, struct page *page, | |||
25 | 25 | ||
26 | static int nommu_map_sg(struct device *dev, struct scatterlist *sg, | 26 | static int nommu_map_sg(struct device *dev, struct scatterlist *sg, |
27 | int nents, enum dma_data_direction dir, | 27 | int nents, enum dma_data_direction dir, |
28 | struct dma_attrs *attrs) | 28 | unsigned long attrs) |
29 | { | 29 | { |
30 | struct scatterlist *s; | 30 | struct scatterlist *s; |
31 | int i; | 31 | int i; |
diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c index b81d9dbf9fef..92b6976fde59 100644 --- a/arch/sh/mm/consistent.c +++ b/arch/sh/mm/consistent.c | |||
@@ -34,7 +34,7 @@ fs_initcall(dma_init); | |||
34 | 34 | ||
35 | void *dma_generic_alloc_coherent(struct device *dev, size_t size, | 35 | void *dma_generic_alloc_coherent(struct device *dev, size_t size, |
36 | dma_addr_t *dma_handle, gfp_t gfp, | 36 | dma_addr_t *dma_handle, gfp_t gfp, |
37 | struct dma_attrs *attrs) | 37 | unsigned long attrs) |
38 | { | 38 | { |
39 | void *ret, *ret_nocache; | 39 | void *ret, *ret_nocache; |
40 | int order = get_order(size); | 40 | int order = get_order(size); |
@@ -66,7 +66,7 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size, | |||
66 | 66 | ||
67 | void dma_generic_free_coherent(struct device *dev, size_t size, | 67 | void dma_generic_free_coherent(struct device *dev, size_t size, |
68 | void *vaddr, dma_addr_t dma_handle, | 68 | void *vaddr, dma_addr_t dma_handle, |
69 | struct dma_attrs *attrs) | 69 | unsigned long attrs) |
70 | { | 70 | { |
71 | int order = get_order(size); | 71 | int order = get_order(size); |
72 | unsigned long pfn = dma_handle >> PAGE_SHIFT; | 72 | unsigned long pfn = dma_handle >> PAGE_SHIFT; |
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c index 37686828c3d9..5c615abff030 100644 --- a/arch/sparc/kernel/iommu.c +++ b/arch/sparc/kernel/iommu.c | |||
@@ -196,7 +196,7 @@ static inline void iommu_free_ctx(struct iommu *iommu, int ctx) | |||
196 | 196 | ||
197 | static void *dma_4u_alloc_coherent(struct device *dev, size_t size, | 197 | static void *dma_4u_alloc_coherent(struct device *dev, size_t size, |
198 | dma_addr_t *dma_addrp, gfp_t gfp, | 198 | dma_addr_t *dma_addrp, gfp_t gfp, |
199 | struct dma_attrs *attrs) | 199 | unsigned long attrs) |
200 | { | 200 | { |
201 | unsigned long order, first_page; | 201 | unsigned long order, first_page; |
202 | struct iommu *iommu; | 202 | struct iommu *iommu; |
@@ -245,7 +245,7 @@ static void *dma_4u_alloc_coherent(struct device *dev, size_t size, | |||
245 | 245 | ||
246 | static void dma_4u_free_coherent(struct device *dev, size_t size, | 246 | static void dma_4u_free_coherent(struct device *dev, size_t size, |
247 | void *cpu, dma_addr_t dvma, | 247 | void *cpu, dma_addr_t dvma, |
248 | struct dma_attrs *attrs) | 248 | unsigned long attrs) |
249 | { | 249 | { |
250 | struct iommu *iommu; | 250 | struct iommu *iommu; |
251 | unsigned long order, npages; | 251 | unsigned long order, npages; |
@@ -263,7 +263,7 @@ static void dma_4u_free_coherent(struct device *dev, size_t size, | |||
263 | static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page, | 263 | static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page, |
264 | unsigned long offset, size_t sz, | 264 | unsigned long offset, size_t sz, |
265 | enum dma_data_direction direction, | 265 | enum dma_data_direction direction, |
266 | struct dma_attrs *attrs) | 266 | unsigned long attrs) |
267 | { | 267 | { |
268 | struct iommu *iommu; | 268 | struct iommu *iommu; |
269 | struct strbuf *strbuf; | 269 | struct strbuf *strbuf; |
@@ -385,7 +385,7 @@ do_flush_sync: | |||
385 | 385 | ||
386 | static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr, | 386 | static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr, |
387 | size_t sz, enum dma_data_direction direction, | 387 | size_t sz, enum dma_data_direction direction, |
388 | struct dma_attrs *attrs) | 388 | unsigned long attrs) |
389 | { | 389 | { |
390 | struct iommu *iommu; | 390 | struct iommu *iommu; |
391 | struct strbuf *strbuf; | 391 | struct strbuf *strbuf; |
@@ -431,7 +431,7 @@ static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr, | |||
431 | 431 | ||
432 | static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist, | 432 | static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist, |
433 | int nelems, enum dma_data_direction direction, | 433 | int nelems, enum dma_data_direction direction, |
434 | struct dma_attrs *attrs) | 434 | unsigned long attrs) |
435 | { | 435 | { |
436 | struct scatterlist *s, *outs, *segstart; | 436 | struct scatterlist *s, *outs, *segstart; |
437 | unsigned long flags, handle, prot, ctx; | 437 | unsigned long flags, handle, prot, ctx; |
@@ -607,7 +607,7 @@ static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg) | |||
607 | 607 | ||
608 | static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist, | 608 | static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist, |
609 | int nelems, enum dma_data_direction direction, | 609 | int nelems, enum dma_data_direction direction, |
610 | struct dma_attrs *attrs) | 610 | unsigned long attrs) |
611 | { | 611 | { |
612 | unsigned long flags, ctx; | 612 | unsigned long flags, ctx; |
613 | struct scatterlist *sg; | 613 | struct scatterlist *sg; |
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c index ffd5ff4678cf..2344103414d1 100644 --- a/arch/sparc/kernel/ioport.c +++ b/arch/sparc/kernel/ioport.c | |||
@@ -260,7 +260,7 @@ EXPORT_SYMBOL(sbus_set_sbus64); | |||
260 | */ | 260 | */ |
261 | static void *sbus_alloc_coherent(struct device *dev, size_t len, | 261 | static void *sbus_alloc_coherent(struct device *dev, size_t len, |
262 | dma_addr_t *dma_addrp, gfp_t gfp, | 262 | dma_addr_t *dma_addrp, gfp_t gfp, |
263 | struct dma_attrs *attrs) | 263 | unsigned long attrs) |
264 | { | 264 | { |
265 | struct platform_device *op = to_platform_device(dev); | 265 | struct platform_device *op = to_platform_device(dev); |
266 | unsigned long len_total = PAGE_ALIGN(len); | 266 | unsigned long len_total = PAGE_ALIGN(len); |
@@ -315,7 +315,7 @@ err_nopages: | |||
315 | } | 315 | } |
316 | 316 | ||
317 | static void sbus_free_coherent(struct device *dev, size_t n, void *p, | 317 | static void sbus_free_coherent(struct device *dev, size_t n, void *p, |
318 | dma_addr_t ba, struct dma_attrs *attrs) | 318 | dma_addr_t ba, unsigned long attrs) |
319 | { | 319 | { |
320 | struct resource *res; | 320 | struct resource *res; |
321 | struct page *pgv; | 321 | struct page *pgv; |
@@ -355,7 +355,7 @@ static void sbus_free_coherent(struct device *dev, size_t n, void *p, | |||
355 | static dma_addr_t sbus_map_page(struct device *dev, struct page *page, | 355 | static dma_addr_t sbus_map_page(struct device *dev, struct page *page, |
356 | unsigned long offset, size_t len, | 356 | unsigned long offset, size_t len, |
357 | enum dma_data_direction dir, | 357 | enum dma_data_direction dir, |
358 | struct dma_attrs *attrs) | 358 | unsigned long attrs) |
359 | { | 359 | { |
360 | void *va = page_address(page) + offset; | 360 | void *va = page_address(page) + offset; |
361 | 361 | ||
@@ -371,20 +371,20 @@ static dma_addr_t sbus_map_page(struct device *dev, struct page *page, | |||
371 | } | 371 | } |
372 | 372 | ||
373 | static void sbus_unmap_page(struct device *dev, dma_addr_t ba, size_t n, | 373 | static void sbus_unmap_page(struct device *dev, dma_addr_t ba, size_t n, |
374 | enum dma_data_direction dir, struct dma_attrs *attrs) | 374 | enum dma_data_direction dir, unsigned long attrs) |
375 | { | 375 | { |
376 | mmu_release_scsi_one(dev, ba, n); | 376 | mmu_release_scsi_one(dev, ba, n); |
377 | } | 377 | } |
378 | 378 | ||
379 | static int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n, | 379 | static int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n, |
380 | enum dma_data_direction dir, struct dma_attrs *attrs) | 380 | enum dma_data_direction dir, unsigned long attrs) |
381 | { | 381 | { |
382 | mmu_get_scsi_sgl(dev, sg, n); | 382 | mmu_get_scsi_sgl(dev, sg, n); |
383 | return n; | 383 | return n; |
384 | } | 384 | } |
385 | 385 | ||
386 | static void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n, | 386 | static void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n, |
387 | enum dma_data_direction dir, struct dma_attrs *attrs) | 387 | enum dma_data_direction dir, unsigned long attrs) |
388 | { | 388 | { |
389 | mmu_release_scsi_sgl(dev, sg, n); | 389 | mmu_release_scsi_sgl(dev, sg, n); |
390 | } | 390 | } |
@@ -429,7 +429,7 @@ arch_initcall(sparc_register_ioport); | |||
429 | */ | 429 | */ |
430 | static void *pci32_alloc_coherent(struct device *dev, size_t len, | 430 | static void *pci32_alloc_coherent(struct device *dev, size_t len, |
431 | dma_addr_t *pba, gfp_t gfp, | 431 | dma_addr_t *pba, gfp_t gfp, |
432 | struct dma_attrs *attrs) | 432 | unsigned long attrs) |
433 | { | 433 | { |
434 | unsigned long len_total = PAGE_ALIGN(len); | 434 | unsigned long len_total = PAGE_ALIGN(len); |
435 | void *va; | 435 | void *va; |
@@ -482,7 +482,7 @@ err_nopages: | |||
482 | * past this call are illegal. | 482 | * past this call are illegal. |
483 | */ | 483 | */ |
484 | static void pci32_free_coherent(struct device *dev, size_t n, void *p, | 484 | static void pci32_free_coherent(struct device *dev, size_t n, void *p, |
485 | dma_addr_t ba, struct dma_attrs *attrs) | 485 | dma_addr_t ba, unsigned long attrs) |
486 | { | 486 | { |
487 | struct resource *res; | 487 | struct resource *res; |
488 | 488 | ||
@@ -518,14 +518,14 @@ static void pci32_free_coherent(struct device *dev, size_t n, void *p, | |||
518 | static dma_addr_t pci32_map_page(struct device *dev, struct page *page, | 518 | static dma_addr_t pci32_map_page(struct device *dev, struct page *page, |
519 | unsigned long offset, size_t size, | 519 | unsigned long offset, size_t size, |
520 | enum dma_data_direction dir, | 520 | enum dma_data_direction dir, |
521 | struct dma_attrs *attrs) | 521 | unsigned long attrs) |
522 | { | 522 | { |
523 | /* IIep is write-through, not flushing. */ | 523 | /* IIep is write-through, not flushing. */ |
524 | return page_to_phys(page) + offset; | 524 | return page_to_phys(page) + offset; |
525 | } | 525 | } |
526 | 526 | ||
527 | static void pci32_unmap_page(struct device *dev, dma_addr_t ba, size_t size, | 527 | static void pci32_unmap_page(struct device *dev, dma_addr_t ba, size_t size, |
528 | enum dma_data_direction dir, struct dma_attrs *attrs) | 528 | enum dma_data_direction dir, unsigned long attrs) |
529 | { | 529 | { |
530 | if (dir != PCI_DMA_TODEVICE) | 530 | if (dir != PCI_DMA_TODEVICE) |
531 | dma_make_coherent(ba, PAGE_ALIGN(size)); | 531 | dma_make_coherent(ba, PAGE_ALIGN(size)); |
@@ -548,7 +548,7 @@ static void pci32_unmap_page(struct device *dev, dma_addr_t ba, size_t size, | |||
548 | */ | 548 | */ |
549 | static int pci32_map_sg(struct device *device, struct scatterlist *sgl, | 549 | static int pci32_map_sg(struct device *device, struct scatterlist *sgl, |
550 | int nents, enum dma_data_direction dir, | 550 | int nents, enum dma_data_direction dir, |
551 | struct dma_attrs *attrs) | 551 | unsigned long attrs) |
552 | { | 552 | { |
553 | struct scatterlist *sg; | 553 | struct scatterlist *sg; |
554 | int n; | 554 | int n; |
@@ -567,7 +567,7 @@ static int pci32_map_sg(struct device *device, struct scatterlist *sgl, | |||
567 | */ | 567 | */ |
568 | static void pci32_unmap_sg(struct device *dev, struct scatterlist *sgl, | 568 | static void pci32_unmap_sg(struct device *dev, struct scatterlist *sgl, |
569 | int nents, enum dma_data_direction dir, | 569 | int nents, enum dma_data_direction dir, |
570 | struct dma_attrs *attrs) | 570 | unsigned long attrs) |
571 | { | 571 | { |
572 | struct scatterlist *sg; | 572 | struct scatterlist *sg; |
573 | int n; | 573 | int n; |
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c index 836e8cef47e2..61c6f935accc 100644 --- a/arch/sparc/kernel/pci_sun4v.c +++ b/arch/sparc/kernel/pci_sun4v.c | |||
@@ -130,7 +130,7 @@ static inline long iommu_batch_end(void) | |||
130 | 130 | ||
131 | static void *dma_4v_alloc_coherent(struct device *dev, size_t size, | 131 | static void *dma_4v_alloc_coherent(struct device *dev, size_t size, |
132 | dma_addr_t *dma_addrp, gfp_t gfp, | 132 | dma_addr_t *dma_addrp, gfp_t gfp, |
133 | struct dma_attrs *attrs) | 133 | unsigned long attrs) |
134 | { | 134 | { |
135 | unsigned long flags, order, first_page, npages, n; | 135 | unsigned long flags, order, first_page, npages, n; |
136 | struct iommu *iommu; | 136 | struct iommu *iommu; |
@@ -213,7 +213,7 @@ static void dma_4v_iommu_demap(void *demap_arg, unsigned long entry, | |||
213 | } | 213 | } |
214 | 214 | ||
215 | static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu, | 215 | static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu, |
216 | dma_addr_t dvma, struct dma_attrs *attrs) | 216 | dma_addr_t dvma, unsigned long attrs) |
217 | { | 217 | { |
218 | struct pci_pbm_info *pbm; | 218 | struct pci_pbm_info *pbm; |
219 | struct iommu *iommu; | 219 | struct iommu *iommu; |
@@ -235,7 +235,7 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu, | |||
235 | static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page, | 235 | static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page, |
236 | unsigned long offset, size_t sz, | 236 | unsigned long offset, size_t sz, |
237 | enum dma_data_direction direction, | 237 | enum dma_data_direction direction, |
238 | struct dma_attrs *attrs) | 238 | unsigned long attrs) |
239 | { | 239 | { |
240 | struct iommu *iommu; | 240 | struct iommu *iommu; |
241 | unsigned long flags, npages, oaddr; | 241 | unsigned long flags, npages, oaddr; |
@@ -294,7 +294,7 @@ iommu_map_fail: | |||
294 | 294 | ||
295 | static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr, | 295 | static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr, |
296 | size_t sz, enum dma_data_direction direction, | 296 | size_t sz, enum dma_data_direction direction, |
297 | struct dma_attrs *attrs) | 297 | unsigned long attrs) |
298 | { | 298 | { |
299 | struct pci_pbm_info *pbm; | 299 | struct pci_pbm_info *pbm; |
300 | struct iommu *iommu; | 300 | struct iommu *iommu; |
@@ -322,7 +322,7 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr, | |||
322 | 322 | ||
323 | static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, | 323 | static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, |
324 | int nelems, enum dma_data_direction direction, | 324 | int nelems, enum dma_data_direction direction, |
325 | struct dma_attrs *attrs) | 325 | unsigned long attrs) |
326 | { | 326 | { |
327 | struct scatterlist *s, *outs, *segstart; | 327 | struct scatterlist *s, *outs, *segstart; |
328 | unsigned long flags, handle, prot; | 328 | unsigned long flags, handle, prot; |
@@ -466,7 +466,7 @@ iommu_map_failed: | |||
466 | 466 | ||
467 | static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, | 467 | static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, |
468 | int nelems, enum dma_data_direction direction, | 468 | int nelems, enum dma_data_direction direction, |
469 | struct dma_attrs *attrs) | 469 | unsigned long attrs) |
470 | { | 470 | { |
471 | struct pci_pbm_info *pbm; | 471 | struct pci_pbm_info *pbm; |
472 | struct scatterlist *sg; | 472 | struct scatterlist *sg; |
diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S index 7d02b1fef025..d79b3b734245 100644 --- a/arch/sparc/kernel/vmlinux.lds.S +++ b/arch/sparc/kernel/vmlinux.lds.S | |||
@@ -150,6 +150,13 @@ SECTIONS | |||
150 | } | 150 | } |
151 | PERCPU_SECTION(SMP_CACHE_BYTES) | 151 | PERCPU_SECTION(SMP_CACHE_BYTES) |
152 | 152 | ||
153 | #ifdef CONFIG_JUMP_LABEL | ||
154 | . = ALIGN(PAGE_SIZE); | ||
155 | .exit.text : { | ||
156 | EXIT_TEXT | ||
157 | } | ||
158 | #endif | ||
159 | |||
153 | . = ALIGN(PAGE_SIZE); | 160 | . = ALIGN(PAGE_SIZE); |
154 | __init_end = .; | 161 | __init_end = .; |
155 | BSS_SECTION(0, 0, 0) | 162 | BSS_SECTION(0, 0, 0) |
diff --git a/arch/tile/kernel/pci-dma.c b/arch/tile/kernel/pci-dma.c index b6bc0547a4f6..09bb774b39cd 100644 --- a/arch/tile/kernel/pci-dma.c +++ b/arch/tile/kernel/pci-dma.c | |||
@@ -34,7 +34,7 @@ | |||
34 | 34 | ||
35 | static void *tile_dma_alloc_coherent(struct device *dev, size_t size, | 35 | static void *tile_dma_alloc_coherent(struct device *dev, size_t size, |
36 | dma_addr_t *dma_handle, gfp_t gfp, | 36 | dma_addr_t *dma_handle, gfp_t gfp, |
37 | struct dma_attrs *attrs) | 37 | unsigned long attrs) |
38 | { | 38 | { |
39 | u64 dma_mask = (dev && dev->coherent_dma_mask) ? | 39 | u64 dma_mask = (dev && dev->coherent_dma_mask) ? |
40 | dev->coherent_dma_mask : DMA_BIT_MASK(32); | 40 | dev->coherent_dma_mask : DMA_BIT_MASK(32); |
@@ -78,7 +78,7 @@ static void *tile_dma_alloc_coherent(struct device *dev, size_t size, | |||
78 | */ | 78 | */ |
79 | static void tile_dma_free_coherent(struct device *dev, size_t size, | 79 | static void tile_dma_free_coherent(struct device *dev, size_t size, |
80 | void *vaddr, dma_addr_t dma_handle, | 80 | void *vaddr, dma_addr_t dma_handle, |
81 | struct dma_attrs *attrs) | 81 | unsigned long attrs) |
82 | { | 82 | { |
83 | homecache_free_pages((unsigned long)vaddr, get_order(size)); | 83 | homecache_free_pages((unsigned long)vaddr, get_order(size)); |
84 | } | 84 | } |
@@ -202,7 +202,7 @@ static void __dma_complete_pa_range(dma_addr_t dma_addr, size_t size, | |||
202 | 202 | ||
203 | static int tile_dma_map_sg(struct device *dev, struct scatterlist *sglist, | 203 | static int tile_dma_map_sg(struct device *dev, struct scatterlist *sglist, |
204 | int nents, enum dma_data_direction direction, | 204 | int nents, enum dma_data_direction direction, |
205 | struct dma_attrs *attrs) | 205 | unsigned long attrs) |
206 | { | 206 | { |
207 | struct scatterlist *sg; | 207 | struct scatterlist *sg; |
208 | int i; | 208 | int i; |
@@ -224,7 +224,7 @@ static int tile_dma_map_sg(struct device *dev, struct scatterlist *sglist, | |||
224 | 224 | ||
225 | static void tile_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, | 225 | static void tile_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, |
226 | int nents, enum dma_data_direction direction, | 226 | int nents, enum dma_data_direction direction, |
227 | struct dma_attrs *attrs) | 227 | unsigned long attrs) |
228 | { | 228 | { |
229 | struct scatterlist *sg; | 229 | struct scatterlist *sg; |
230 | int i; | 230 | int i; |
@@ -240,7 +240,7 @@ static void tile_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, | |||
240 | static dma_addr_t tile_dma_map_page(struct device *dev, struct page *page, | 240 | static dma_addr_t tile_dma_map_page(struct device *dev, struct page *page, |
241 | unsigned long offset, size_t size, | 241 | unsigned long offset, size_t size, |
242 | enum dma_data_direction direction, | 242 | enum dma_data_direction direction, |
243 | struct dma_attrs *attrs) | 243 | unsigned long attrs) |
244 | { | 244 | { |
245 | BUG_ON(!valid_dma_direction(direction)); | 245 | BUG_ON(!valid_dma_direction(direction)); |
246 | 246 | ||
@@ -252,7 +252,7 @@ static dma_addr_t tile_dma_map_page(struct device *dev, struct page *page, | |||
252 | 252 | ||
253 | static void tile_dma_unmap_page(struct device *dev, dma_addr_t dma_address, | 253 | static void tile_dma_unmap_page(struct device *dev, dma_addr_t dma_address, |
254 | size_t size, enum dma_data_direction direction, | 254 | size_t size, enum dma_data_direction direction, |
255 | struct dma_attrs *attrs) | 255 | unsigned long attrs) |
256 | { | 256 | { |
257 | BUG_ON(!valid_dma_direction(direction)); | 257 | BUG_ON(!valid_dma_direction(direction)); |
258 | 258 | ||
@@ -343,7 +343,7 @@ EXPORT_SYMBOL(tile_dma_map_ops); | |||
343 | 343 | ||
344 | static void *tile_pci_dma_alloc_coherent(struct device *dev, size_t size, | 344 | static void *tile_pci_dma_alloc_coherent(struct device *dev, size_t size, |
345 | dma_addr_t *dma_handle, gfp_t gfp, | 345 | dma_addr_t *dma_handle, gfp_t gfp, |
346 | struct dma_attrs *attrs) | 346 | unsigned long attrs) |
347 | { | 347 | { |
348 | int node = dev_to_node(dev); | 348 | int node = dev_to_node(dev); |
349 | int order = get_order(size); | 349 | int order = get_order(size); |
@@ -368,14 +368,14 @@ static void *tile_pci_dma_alloc_coherent(struct device *dev, size_t size, | |||
368 | */ | 368 | */ |
369 | static void tile_pci_dma_free_coherent(struct device *dev, size_t size, | 369 | static void tile_pci_dma_free_coherent(struct device *dev, size_t size, |
370 | void *vaddr, dma_addr_t dma_handle, | 370 | void *vaddr, dma_addr_t dma_handle, |
371 | struct dma_attrs *attrs) | 371 | unsigned long attrs) |
372 | { | 372 | { |
373 | homecache_free_pages((unsigned long)vaddr, get_order(size)); | 373 | homecache_free_pages((unsigned long)vaddr, get_order(size)); |
374 | } | 374 | } |
375 | 375 | ||
376 | static int tile_pci_dma_map_sg(struct device *dev, struct scatterlist *sglist, | 376 | static int tile_pci_dma_map_sg(struct device *dev, struct scatterlist *sglist, |
377 | int nents, enum dma_data_direction direction, | 377 | int nents, enum dma_data_direction direction, |
378 | struct dma_attrs *attrs) | 378 | unsigned long attrs) |
379 | { | 379 | { |
380 | struct scatterlist *sg; | 380 | struct scatterlist *sg; |
381 | int i; | 381 | int i; |
@@ -400,7 +400,7 @@ static int tile_pci_dma_map_sg(struct device *dev, struct scatterlist *sglist, | |||
400 | static void tile_pci_dma_unmap_sg(struct device *dev, | 400 | static void tile_pci_dma_unmap_sg(struct device *dev, |
401 | struct scatterlist *sglist, int nents, | 401 | struct scatterlist *sglist, int nents, |
402 | enum dma_data_direction direction, | 402 | enum dma_data_direction direction, |
403 | struct dma_attrs *attrs) | 403 | unsigned long attrs) |
404 | { | 404 | { |
405 | struct scatterlist *sg; | 405 | struct scatterlist *sg; |
406 | int i; | 406 | int i; |
@@ -416,7 +416,7 @@ static void tile_pci_dma_unmap_sg(struct device *dev, | |||
416 | static dma_addr_t tile_pci_dma_map_page(struct device *dev, struct page *page, | 416 | static dma_addr_t tile_pci_dma_map_page(struct device *dev, struct page *page, |
417 | unsigned long offset, size_t size, | 417 | unsigned long offset, size_t size, |
418 | enum dma_data_direction direction, | 418 | enum dma_data_direction direction, |
419 | struct dma_attrs *attrs) | 419 | unsigned long attrs) |
420 | { | 420 | { |
421 | BUG_ON(!valid_dma_direction(direction)); | 421 | BUG_ON(!valid_dma_direction(direction)); |
422 | 422 | ||
@@ -429,7 +429,7 @@ static dma_addr_t tile_pci_dma_map_page(struct device *dev, struct page *page, | |||
429 | static void tile_pci_dma_unmap_page(struct device *dev, dma_addr_t dma_address, | 429 | static void tile_pci_dma_unmap_page(struct device *dev, dma_addr_t dma_address, |
430 | size_t size, | 430 | size_t size, |
431 | enum dma_data_direction direction, | 431 | enum dma_data_direction direction, |
432 | struct dma_attrs *attrs) | 432 | unsigned long attrs) |
433 | { | 433 | { |
434 | BUG_ON(!valid_dma_direction(direction)); | 434 | BUG_ON(!valid_dma_direction(direction)); |
435 | 435 | ||
@@ -531,7 +531,7 @@ EXPORT_SYMBOL(gx_pci_dma_map_ops); | |||
531 | #ifdef CONFIG_SWIOTLB | 531 | #ifdef CONFIG_SWIOTLB |
532 | static void *tile_swiotlb_alloc_coherent(struct device *dev, size_t size, | 532 | static void *tile_swiotlb_alloc_coherent(struct device *dev, size_t size, |
533 | dma_addr_t *dma_handle, gfp_t gfp, | 533 | dma_addr_t *dma_handle, gfp_t gfp, |
534 | struct dma_attrs *attrs) | 534 | unsigned long attrs) |
535 | { | 535 | { |
536 | gfp |= GFP_DMA; | 536 | gfp |= GFP_DMA; |
537 | return swiotlb_alloc_coherent(dev, size, dma_handle, gfp); | 537 | return swiotlb_alloc_coherent(dev, size, dma_handle, gfp); |
@@ -539,7 +539,7 @@ static void *tile_swiotlb_alloc_coherent(struct device *dev, size_t size, | |||
539 | 539 | ||
540 | static void tile_swiotlb_free_coherent(struct device *dev, size_t size, | 540 | static void tile_swiotlb_free_coherent(struct device *dev, size_t size, |
541 | void *vaddr, dma_addr_t dma_addr, | 541 | void *vaddr, dma_addr_t dma_addr, |
542 | struct dma_attrs *attrs) | 542 | unsigned long attrs) |
543 | { | 543 | { |
544 | swiotlb_free_coherent(dev, size, vaddr, dma_addr); | 544 | swiotlb_free_coherent(dev, size, vaddr, dma_addr); |
545 | } | 545 | } |
diff --git a/arch/tile/kernel/vmlinux.lds.S b/arch/tile/kernel/vmlinux.lds.S index 378f5d8d1ec8..9d449caf8910 100644 --- a/arch/tile/kernel/vmlinux.lds.S +++ b/arch/tile/kernel/vmlinux.lds.S | |||
@@ -60,6 +60,18 @@ SECTIONS | |||
60 | /* "Init" is divided into two areas with very different virtual addresses. */ | 60 | /* "Init" is divided into two areas with very different virtual addresses. */ |
61 | INIT_TEXT_SECTION(PAGE_SIZE) | 61 | INIT_TEXT_SECTION(PAGE_SIZE) |
62 | 62 | ||
63 | /* | ||
64 | * Some things, like the __jump_table, may contain symbol references | ||
65 | * to __exit text, so include such text in the final image if so. | ||
66 | * In that case we also override the _einittext from INIT_TEXT_SECTION. | ||
67 | */ | ||
68 | #ifdef CONFIG_JUMP_LABEL | ||
69 | .exit.text : { | ||
70 | EXIT_TEXT | ||
71 | _einittext = .; | ||
72 | } | ||
73 | #endif | ||
74 | |||
63 | /* Now we skip back to PAGE_OFFSET for the data. */ | 75 | /* Now we skip back to PAGE_OFFSET for the data. */ |
64 | . = (. - TEXT_OFFSET + PAGE_OFFSET); | 76 | . = (. - TEXT_OFFSET + PAGE_OFFSET); |
65 | #undef LOAD_OFFSET | 77 | #undef LOAD_OFFSET |
diff --git a/arch/unicore32/mm/dma-swiotlb.c b/arch/unicore32/mm/dma-swiotlb.c index 16c08b2143a7..3e9f6489ba38 100644 --- a/arch/unicore32/mm/dma-swiotlb.c +++ b/arch/unicore32/mm/dma-swiotlb.c | |||
@@ -19,14 +19,14 @@ | |||
19 | 19 | ||
20 | static void *unicore_swiotlb_alloc_coherent(struct device *dev, size_t size, | 20 | static void *unicore_swiotlb_alloc_coherent(struct device *dev, size_t size, |
21 | dma_addr_t *dma_handle, gfp_t flags, | 21 | dma_addr_t *dma_handle, gfp_t flags, |
22 | struct dma_attrs *attrs) | 22 | unsigned long attrs) |
23 | { | 23 | { |
24 | return swiotlb_alloc_coherent(dev, size, dma_handle, flags); | 24 | return swiotlb_alloc_coherent(dev, size, dma_handle, flags); |
25 | } | 25 | } |
26 | 26 | ||
27 | static void unicore_swiotlb_free_coherent(struct device *dev, size_t size, | 27 | static void unicore_swiotlb_free_coherent(struct device *dev, size_t size, |
28 | void *vaddr, dma_addr_t dma_addr, | 28 | void *vaddr, dma_addr_t dma_addr, |
29 | struct dma_attrs *attrs) | 29 | unsigned long attrs) |
30 | { | 30 | { |
31 | swiotlb_free_coherent(dev, size, vaddr, dma_addr); | 31 | swiotlb_free_coherent(dev, size, vaddr, dma_addr); |
32 | } | 32 | } |
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index 3a27b93e6261..44461626830e 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h | |||
@@ -9,7 +9,6 @@ | |||
9 | #include <linux/kmemcheck.h> | 9 | #include <linux/kmemcheck.h> |
10 | #include <linux/scatterlist.h> | 10 | #include <linux/scatterlist.h> |
11 | #include <linux/dma-debug.h> | 11 | #include <linux/dma-debug.h> |
12 | #include <linux/dma-attrs.h> | ||
13 | #include <asm/io.h> | 12 | #include <asm/io.h> |
14 | #include <asm/swiotlb.h> | 13 | #include <asm/swiotlb.h> |
15 | #include <linux/dma-contiguous.h> | 14 | #include <linux/dma-contiguous.h> |
@@ -48,11 +47,11 @@ extern int dma_supported(struct device *hwdev, u64 mask); | |||
48 | 47 | ||
49 | extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, | 48 | extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, |
50 | dma_addr_t *dma_addr, gfp_t flag, | 49 | dma_addr_t *dma_addr, gfp_t flag, |
51 | struct dma_attrs *attrs); | 50 | unsigned long attrs); |
52 | 51 | ||
53 | extern void dma_generic_free_coherent(struct device *dev, size_t size, | 52 | extern void dma_generic_free_coherent(struct device *dev, size_t size, |
54 | void *vaddr, dma_addr_t dma_addr, | 53 | void *vaddr, dma_addr_t dma_addr, |
55 | struct dma_attrs *attrs); | 54 | unsigned long attrs); |
56 | 55 | ||
57 | #ifdef CONFIG_X86_DMA_REMAP /* Platform code defines bridge-specific code */ | 56 | #ifdef CONFIG_X86_DMA_REMAP /* Platform code defines bridge-specific code */ |
58 | extern bool dma_capable(struct device *dev, dma_addr_t addr, size_t size); | 57 | extern bool dma_capable(struct device *dev, dma_addr_t addr, size_t size); |
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h index fea7724141a0..e7f155c3045e 100644 --- a/arch/x86/include/asm/elf.h +++ b/arch/x86/include/asm/elf.h | |||
@@ -344,8 +344,8 @@ extern int compat_arch_setup_additional_pages(struct linux_binprm *bprm, | |||
344 | */ | 344 | */ |
345 | static inline int mmap_is_ia32(void) | 345 | static inline int mmap_is_ia32(void) |
346 | { | 346 | { |
347 | return config_enabled(CONFIG_X86_32) || | 347 | return IS_ENABLED(CONFIG_X86_32) || |
348 | (config_enabled(CONFIG_COMPAT) && | 348 | (IS_ENABLED(CONFIG_COMPAT) && |
349 | test_thread_flag(TIF_ADDR32)); | 349 | test_thread_flag(TIF_ADDR32)); |
350 | } | 350 | } |
351 | 351 | ||
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h index 116b58347501..2737366ea583 100644 --- a/arch/x86/include/asm/fpu/internal.h +++ b/arch/x86/include/asm/fpu/internal.h | |||
@@ -137,9 +137,9 @@ static inline int copy_fregs_to_user(struct fregs_state __user *fx) | |||
137 | 137 | ||
138 | static inline int copy_fxregs_to_user(struct fxregs_state __user *fx) | 138 | static inline int copy_fxregs_to_user(struct fxregs_state __user *fx) |
139 | { | 139 | { |
140 | if (config_enabled(CONFIG_X86_32)) | 140 | if (IS_ENABLED(CONFIG_X86_32)) |
141 | return user_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx)); | 141 | return user_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx)); |
142 | else if (config_enabled(CONFIG_AS_FXSAVEQ)) | 142 | else if (IS_ENABLED(CONFIG_AS_FXSAVEQ)) |
143 | return user_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx)); | 143 | return user_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx)); |
144 | 144 | ||
145 | /* See comment in copy_fxregs_to_kernel() below. */ | 145 | /* See comment in copy_fxregs_to_kernel() below. */ |
@@ -150,10 +150,10 @@ static inline void copy_kernel_to_fxregs(struct fxregs_state *fx) | |||
150 | { | 150 | { |
151 | int err; | 151 | int err; |
152 | 152 | ||
153 | if (config_enabled(CONFIG_X86_32)) { | 153 | if (IS_ENABLED(CONFIG_X86_32)) { |
154 | err = check_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx)); | 154 | err = check_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx)); |
155 | } else { | 155 | } else { |
156 | if (config_enabled(CONFIG_AS_FXSAVEQ)) { | 156 | if (IS_ENABLED(CONFIG_AS_FXSAVEQ)) { |
157 | err = check_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx)); | 157 | err = check_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx)); |
158 | } else { | 158 | } else { |
159 | /* See comment in copy_fxregs_to_kernel() below. */ | 159 | /* See comment in copy_fxregs_to_kernel() below. */ |
@@ -166,9 +166,9 @@ static inline void copy_kernel_to_fxregs(struct fxregs_state *fx) | |||
166 | 166 | ||
167 | static inline int copy_user_to_fxregs(struct fxregs_state __user *fx) | 167 | static inline int copy_user_to_fxregs(struct fxregs_state __user *fx) |
168 | { | 168 | { |
169 | if (config_enabled(CONFIG_X86_32)) | 169 | if (IS_ENABLED(CONFIG_X86_32)) |
170 | return user_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx)); | 170 | return user_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx)); |
171 | else if (config_enabled(CONFIG_AS_FXSAVEQ)) | 171 | else if (IS_ENABLED(CONFIG_AS_FXSAVEQ)) |
172 | return user_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx)); | 172 | return user_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx)); |
173 | 173 | ||
174 | /* See comment in copy_fxregs_to_kernel() below. */ | 174 | /* See comment in copy_fxregs_to_kernel() below. */ |
@@ -190,9 +190,9 @@ static inline int copy_user_to_fregs(struct fregs_state __user *fx) | |||
190 | 190 | ||
191 | static inline void copy_fxregs_to_kernel(struct fpu *fpu) | 191 | static inline void copy_fxregs_to_kernel(struct fpu *fpu) |
192 | { | 192 | { |
193 | if (config_enabled(CONFIG_X86_32)) | 193 | if (IS_ENABLED(CONFIG_X86_32)) |
194 | asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state.fxsave)); | 194 | asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state.fxsave)); |
195 | else if (config_enabled(CONFIG_AS_FXSAVEQ)) | 195 | else if (IS_ENABLED(CONFIG_AS_FXSAVEQ)) |
196 | asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state.fxsave)); | 196 | asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state.fxsave)); |
197 | else { | 197 | else { |
198 | /* Using "rex64; fxsave %0" is broken because, if the memory | 198 | /* Using "rex64; fxsave %0" is broken because, if the memory |
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index 396348196aa7..d8abfcf524d1 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h | |||
@@ -155,7 +155,7 @@ static inline void arch_exit_mmap(struct mm_struct *mm) | |||
155 | #ifdef CONFIG_X86_64 | 155 | #ifdef CONFIG_X86_64 |
156 | static inline bool is_64bit_mm(struct mm_struct *mm) | 156 | static inline bool is_64bit_mm(struct mm_struct *mm) |
157 | { | 157 | { |
158 | return !config_enabled(CONFIG_IA32_EMULATION) || | 158 | return !IS_ENABLED(CONFIG_IA32_EMULATION) || |
159 | !(mm->context.ia32_compat == TIF_IA32); | 159 | !(mm->context.ia32_compat == TIF_IA32); |
160 | } | 160 | } |
161 | #else | 161 | #else |
diff --git a/arch/x86/include/asm/swiotlb.h b/arch/x86/include/asm/swiotlb.h index ab05d73e2bb7..d2f69b9ff732 100644 --- a/arch/x86/include/asm/swiotlb.h +++ b/arch/x86/include/asm/swiotlb.h | |||
@@ -31,9 +31,9 @@ static inline void dma_mark_clean(void *addr, size_t size) {} | |||
31 | 31 | ||
32 | extern void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, | 32 | extern void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, |
33 | dma_addr_t *dma_handle, gfp_t flags, | 33 | dma_addr_t *dma_handle, gfp_t flags, |
34 | struct dma_attrs *attrs); | 34 | unsigned long attrs); |
35 | extern void x86_swiotlb_free_coherent(struct device *dev, size_t size, | 35 | extern void x86_swiotlb_free_coherent(struct device *dev, size_t size, |
36 | void *vaddr, dma_addr_t dma_addr, | 36 | void *vaddr, dma_addr_t dma_addr, |
37 | struct dma_attrs *attrs); | 37 | unsigned long attrs); |
38 | 38 | ||
39 | #endif /* _ASM_X86_SWIOTLB_H */ | 39 | #endif /* _ASM_X86_SWIOTLB_H */ |
diff --git a/arch/x86/include/asm/xen/page-coherent.h b/arch/x86/include/asm/xen/page-coherent.h index acd844c017d3..f02f025ff988 100644 --- a/arch/x86/include/asm/xen/page-coherent.h +++ b/arch/x86/include/asm/xen/page-coherent.h | |||
@@ -2,12 +2,11 @@ | |||
2 | #define _ASM_X86_XEN_PAGE_COHERENT_H | 2 | #define _ASM_X86_XEN_PAGE_COHERENT_H |
3 | 3 | ||
4 | #include <asm/page.h> | 4 | #include <asm/page.h> |
5 | #include <linux/dma-attrs.h> | ||
6 | #include <linux/dma-mapping.h> | 5 | #include <linux/dma-mapping.h> |
7 | 6 | ||
8 | static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, | 7 | static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, |
9 | dma_addr_t *dma_handle, gfp_t flags, | 8 | dma_addr_t *dma_handle, gfp_t flags, |
10 | struct dma_attrs *attrs) | 9 | unsigned long attrs) |
11 | { | 10 | { |
12 | void *vstart = (void*)__get_free_pages(flags, get_order(size)); | 11 | void *vstart = (void*)__get_free_pages(flags, get_order(size)); |
13 | *dma_handle = virt_to_phys(vstart); | 12 | *dma_handle = virt_to_phys(vstart); |
@@ -16,18 +15,18 @@ static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, | |||
16 | 15 | ||
17 | static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, | 16 | static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, |
18 | void *cpu_addr, dma_addr_t dma_handle, | 17 | void *cpu_addr, dma_addr_t dma_handle, |
19 | struct dma_attrs *attrs) | 18 | unsigned long attrs) |
20 | { | 19 | { |
21 | free_pages((unsigned long) cpu_addr, get_order(size)); | 20 | free_pages((unsigned long) cpu_addr, get_order(size)); |
22 | } | 21 | } |
23 | 22 | ||
24 | static inline void xen_dma_map_page(struct device *hwdev, struct page *page, | 23 | static inline void xen_dma_map_page(struct device *hwdev, struct page *page, |
25 | dma_addr_t dev_addr, unsigned long offset, size_t size, | 24 | dma_addr_t dev_addr, unsigned long offset, size_t size, |
26 | enum dma_data_direction dir, struct dma_attrs *attrs) { } | 25 | enum dma_data_direction dir, unsigned long attrs) { } |
27 | 26 | ||
28 | static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, | 27 | static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, |
29 | size_t size, enum dma_data_direction dir, | 28 | size_t size, enum dma_data_direction dir, |
30 | struct dma_attrs *attrs) { } | 29 | unsigned long attrs) { } |
31 | 30 | ||
32 | static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, | 31 | static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, |
33 | dma_addr_t handle, size_t size, enum dma_data_direction dir) { } | 32 | dma_addr_t handle, size_t size, enum dma_data_direction dir) { } |
diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c index 42d27a62a404..63ff468a7986 100644 --- a/arch/x86/kernel/amd_gart_64.c +++ b/arch/x86/kernel/amd_gart_64.c | |||
@@ -241,7 +241,7 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, | |||
241 | static dma_addr_t gart_map_page(struct device *dev, struct page *page, | 241 | static dma_addr_t gart_map_page(struct device *dev, struct page *page, |
242 | unsigned long offset, size_t size, | 242 | unsigned long offset, size_t size, |
243 | enum dma_data_direction dir, | 243 | enum dma_data_direction dir, |
244 | struct dma_attrs *attrs) | 244 | unsigned long attrs) |
245 | { | 245 | { |
246 | unsigned long bus; | 246 | unsigned long bus; |
247 | phys_addr_t paddr = page_to_phys(page) + offset; | 247 | phys_addr_t paddr = page_to_phys(page) + offset; |
@@ -263,7 +263,7 @@ static dma_addr_t gart_map_page(struct device *dev, struct page *page, | |||
263 | */ | 263 | */ |
264 | static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr, | 264 | static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr, |
265 | size_t size, enum dma_data_direction dir, | 265 | size_t size, enum dma_data_direction dir, |
266 | struct dma_attrs *attrs) | 266 | unsigned long attrs) |
267 | { | 267 | { |
268 | unsigned long iommu_page; | 268 | unsigned long iommu_page; |
269 | int npages; | 269 | int npages; |
@@ -285,7 +285,7 @@ static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr, | |||
285 | * Wrapper for pci_unmap_single working with scatterlists. | 285 | * Wrapper for pci_unmap_single working with scatterlists. |
286 | */ | 286 | */ |
287 | static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, | 287 | static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, |
288 | enum dma_data_direction dir, struct dma_attrs *attrs) | 288 | enum dma_data_direction dir, unsigned long attrs) |
289 | { | 289 | { |
290 | struct scatterlist *s; | 290 | struct scatterlist *s; |
291 | int i; | 291 | int i; |
@@ -293,7 +293,7 @@ static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, | |||
293 | for_each_sg(sg, s, nents, i) { | 293 | for_each_sg(sg, s, nents, i) { |
294 | if (!s->dma_length || !s->length) | 294 | if (!s->dma_length || !s->length) |
295 | break; | 295 | break; |
296 | gart_unmap_page(dev, s->dma_address, s->dma_length, dir, NULL); | 296 | gart_unmap_page(dev, s->dma_address, s->dma_length, dir, 0); |
297 | } | 297 | } |
298 | } | 298 | } |
299 | 299 | ||
@@ -315,7 +315,7 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg, | |||
315 | addr = dma_map_area(dev, addr, s->length, dir, 0); | 315 | addr = dma_map_area(dev, addr, s->length, dir, 0); |
316 | if (addr == bad_dma_addr) { | 316 | if (addr == bad_dma_addr) { |
317 | if (i > 0) | 317 | if (i > 0) |
318 | gart_unmap_sg(dev, sg, i, dir, NULL); | 318 | gart_unmap_sg(dev, sg, i, dir, 0); |
319 | nents = 0; | 319 | nents = 0; |
320 | sg[0].dma_length = 0; | 320 | sg[0].dma_length = 0; |
321 | break; | 321 | break; |
@@ -386,7 +386,7 @@ dma_map_cont(struct device *dev, struct scatterlist *start, int nelems, | |||
386 | * Merge chunks that have page aligned sizes into a continuous mapping. | 386 | * Merge chunks that have page aligned sizes into a continuous mapping. |
387 | */ | 387 | */ |
388 | static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, | 388 | static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, |
389 | enum dma_data_direction dir, struct dma_attrs *attrs) | 389 | enum dma_data_direction dir, unsigned long attrs) |
390 | { | 390 | { |
391 | struct scatterlist *s, *ps, *start_sg, *sgmap; | 391 | struct scatterlist *s, *ps, *start_sg, *sgmap; |
392 | int need = 0, nextneed, i, out, start; | 392 | int need = 0, nextneed, i, out, start; |
@@ -456,7 +456,7 @@ static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, | |||
456 | 456 | ||
457 | error: | 457 | error: |
458 | flush_gart(); | 458 | flush_gart(); |
459 | gart_unmap_sg(dev, sg, out, dir, NULL); | 459 | gart_unmap_sg(dev, sg, out, dir, 0); |
460 | 460 | ||
461 | /* When it was forced or merged try again in a dumb way */ | 461 | /* When it was forced or merged try again in a dumb way */ |
462 | if (force_iommu || iommu_merge) { | 462 | if (force_iommu || iommu_merge) { |
@@ -476,7 +476,7 @@ error: | |||
476 | /* allocate and map a coherent mapping */ | 476 | /* allocate and map a coherent mapping */ |
477 | static void * | 477 | static void * |
478 | gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, | 478 | gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, |
479 | gfp_t flag, struct dma_attrs *attrs) | 479 | gfp_t flag, unsigned long attrs) |
480 | { | 480 | { |
481 | dma_addr_t paddr; | 481 | dma_addr_t paddr; |
482 | unsigned long align_mask; | 482 | unsigned long align_mask; |
@@ -508,9 +508,9 @@ gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, | |||
508 | /* free a coherent mapping */ | 508 | /* free a coherent mapping */ |
509 | static void | 509 | static void |
510 | gart_free_coherent(struct device *dev, size_t size, void *vaddr, | 510 | gart_free_coherent(struct device *dev, size_t size, void *vaddr, |
511 | dma_addr_t dma_addr, struct dma_attrs *attrs) | 511 | dma_addr_t dma_addr, unsigned long attrs) |
512 | { | 512 | { |
513 | gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, NULL); | 513 | gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, 0); |
514 | dma_generic_free_coherent(dev, size, vaddr, dma_addr, attrs); | 514 | dma_generic_free_coherent(dev, size, vaddr, dma_addr, attrs); |
515 | } | 515 | } |
516 | 516 | ||
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 7943d38c57ca..20abd912f0e4 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
@@ -147,7 +147,7 @@ static int force_enable_local_apic __initdata; | |||
147 | */ | 147 | */ |
148 | static int __init parse_lapic(char *arg) | 148 | static int __init parse_lapic(char *arg) |
149 | { | 149 | { |
150 | if (config_enabled(CONFIG_X86_32) && !arg) | 150 | if (IS_ENABLED(CONFIG_X86_32) && !arg) |
151 | force_enable_local_apic = 1; | 151 | force_enable_local_apic = 1; |
152 | else if (arg && !strncmp(arg, "notscdeadline", 13)) | 152 | else if (arg && !strncmp(arg, "notscdeadline", 13)) |
153 | setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER); | 153 | setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER); |
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c index a5e400afc563..6066d945c40e 100644 --- a/arch/x86/kernel/apic/vector.c +++ b/arch/x86/kernel/apic/vector.c | |||
@@ -523,7 +523,7 @@ static int apic_set_affinity(struct irq_data *irq_data, | |||
523 | struct apic_chip_data *data = irq_data->chip_data; | 523 | struct apic_chip_data *data = irq_data->chip_data; |
524 | int err, irq = irq_data->irq; | 524 | int err, irq = irq_data->irq; |
525 | 525 | ||
526 | if (!config_enabled(CONFIG_SMP)) | 526 | if (!IS_ENABLED(CONFIG_SMP)) |
527 | return -EPERM; | 527 | return -EPERM; |
528 | 528 | ||
529 | if (!cpumask_intersects(dest, cpu_online_mask)) | 529 | if (!cpumask_intersects(dest, cpu_online_mask)) |
diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c index 9e231d88bb33..a184c210efba 100644 --- a/arch/x86/kernel/fpu/signal.c +++ b/arch/x86/kernel/fpu/signal.c | |||
@@ -159,8 +159,8 @@ int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size) | |||
159 | struct task_struct *tsk = current; | 159 | struct task_struct *tsk = current; |
160 | int ia32_fxstate = (buf != buf_fx); | 160 | int ia32_fxstate = (buf != buf_fx); |
161 | 161 | ||
162 | ia32_fxstate &= (config_enabled(CONFIG_X86_32) || | 162 | ia32_fxstate &= (IS_ENABLED(CONFIG_X86_32) || |
163 | config_enabled(CONFIG_IA32_EMULATION)); | 163 | IS_ENABLED(CONFIG_IA32_EMULATION)); |
164 | 164 | ||
165 | if (!access_ok(VERIFY_WRITE, buf, size)) | 165 | if (!access_ok(VERIFY_WRITE, buf, size)) |
166 | return -EACCES; | 166 | return -EACCES; |
@@ -268,8 +268,8 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size) | |||
268 | u64 xfeatures = 0; | 268 | u64 xfeatures = 0; |
269 | int fx_only = 0; | 269 | int fx_only = 0; |
270 | 270 | ||
271 | ia32_fxstate &= (config_enabled(CONFIG_X86_32) || | 271 | ia32_fxstate &= (IS_ENABLED(CONFIG_X86_32) || |
272 | config_enabled(CONFIG_IA32_EMULATION)); | 272 | IS_ENABLED(CONFIG_IA32_EMULATION)); |
273 | 273 | ||
274 | if (!buf) { | 274 | if (!buf) { |
275 | fpu__clear(fpu); | 275 | fpu__clear(fpu); |
@@ -416,8 +416,8 @@ void fpu__init_prepare_fx_sw_frame(void) | |||
416 | fx_sw_reserved.xfeatures = xfeatures_mask; | 416 | fx_sw_reserved.xfeatures = xfeatures_mask; |
417 | fx_sw_reserved.xstate_size = fpu_user_xstate_size; | 417 | fx_sw_reserved.xstate_size = fpu_user_xstate_size; |
418 | 418 | ||
419 | if (config_enabled(CONFIG_IA32_EMULATION) || | 419 | if (IS_ENABLED(CONFIG_IA32_EMULATION) || |
420 | config_enabled(CONFIG_X86_32)) { | 420 | IS_ENABLED(CONFIG_X86_32)) { |
421 | int fsave_header_size = sizeof(struct fregs_state); | 421 | int fsave_header_size = sizeof(struct fregs_state); |
422 | 422 | ||
423 | fx_sw_reserved_ia32 = fx_sw_reserved; | 423 | fx_sw_reserved_ia32 = fx_sw_reserved; |
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c index 833b1d329c47..5d400ba1349d 100644 --- a/arch/x86/kernel/pci-calgary_64.c +++ b/arch/x86/kernel/pci-calgary_64.c | |||
@@ -340,7 +340,7 @@ static inline struct iommu_table *find_iommu_table(struct device *dev) | |||
340 | 340 | ||
341 | static void calgary_unmap_sg(struct device *dev, struct scatterlist *sglist, | 341 | static void calgary_unmap_sg(struct device *dev, struct scatterlist *sglist, |
342 | int nelems,enum dma_data_direction dir, | 342 | int nelems,enum dma_data_direction dir, |
343 | struct dma_attrs *attrs) | 343 | unsigned long attrs) |
344 | { | 344 | { |
345 | struct iommu_table *tbl = find_iommu_table(dev); | 345 | struct iommu_table *tbl = find_iommu_table(dev); |
346 | struct scatterlist *s; | 346 | struct scatterlist *s; |
@@ -364,7 +364,7 @@ static void calgary_unmap_sg(struct device *dev, struct scatterlist *sglist, | |||
364 | 364 | ||
365 | static int calgary_map_sg(struct device *dev, struct scatterlist *sg, | 365 | static int calgary_map_sg(struct device *dev, struct scatterlist *sg, |
366 | int nelems, enum dma_data_direction dir, | 366 | int nelems, enum dma_data_direction dir, |
367 | struct dma_attrs *attrs) | 367 | unsigned long attrs) |
368 | { | 368 | { |
369 | struct iommu_table *tbl = find_iommu_table(dev); | 369 | struct iommu_table *tbl = find_iommu_table(dev); |
370 | struct scatterlist *s; | 370 | struct scatterlist *s; |
@@ -396,7 +396,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg, | |||
396 | 396 | ||
397 | return nelems; | 397 | return nelems; |
398 | error: | 398 | error: |
399 | calgary_unmap_sg(dev, sg, nelems, dir, NULL); | 399 | calgary_unmap_sg(dev, sg, nelems, dir, 0); |
400 | for_each_sg(sg, s, nelems, i) { | 400 | for_each_sg(sg, s, nelems, i) { |
401 | sg->dma_address = DMA_ERROR_CODE; | 401 | sg->dma_address = DMA_ERROR_CODE; |
402 | sg->dma_length = 0; | 402 | sg->dma_length = 0; |
@@ -407,7 +407,7 @@ error: | |||
407 | static dma_addr_t calgary_map_page(struct device *dev, struct page *page, | 407 | static dma_addr_t calgary_map_page(struct device *dev, struct page *page, |
408 | unsigned long offset, size_t size, | 408 | unsigned long offset, size_t size, |
409 | enum dma_data_direction dir, | 409 | enum dma_data_direction dir, |
410 | struct dma_attrs *attrs) | 410 | unsigned long attrs) |
411 | { | 411 | { |
412 | void *vaddr = page_address(page) + offset; | 412 | void *vaddr = page_address(page) + offset; |
413 | unsigned long uaddr; | 413 | unsigned long uaddr; |
@@ -422,7 +422,7 @@ static dma_addr_t calgary_map_page(struct device *dev, struct page *page, | |||
422 | 422 | ||
423 | static void calgary_unmap_page(struct device *dev, dma_addr_t dma_addr, | 423 | static void calgary_unmap_page(struct device *dev, dma_addr_t dma_addr, |
424 | size_t size, enum dma_data_direction dir, | 424 | size_t size, enum dma_data_direction dir, |
425 | struct dma_attrs *attrs) | 425 | unsigned long attrs) |
426 | { | 426 | { |
427 | struct iommu_table *tbl = find_iommu_table(dev); | 427 | struct iommu_table *tbl = find_iommu_table(dev); |
428 | unsigned int npages; | 428 | unsigned int npages; |
@@ -432,7 +432,7 @@ static void calgary_unmap_page(struct device *dev, dma_addr_t dma_addr, | |||
432 | } | 432 | } |
433 | 433 | ||
434 | static void* calgary_alloc_coherent(struct device *dev, size_t size, | 434 | static void* calgary_alloc_coherent(struct device *dev, size_t size, |
435 | dma_addr_t *dma_handle, gfp_t flag, struct dma_attrs *attrs) | 435 | dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs) |
436 | { | 436 | { |
437 | void *ret = NULL; | 437 | void *ret = NULL; |
438 | dma_addr_t mapping; | 438 | dma_addr_t mapping; |
@@ -466,7 +466,7 @@ error: | |||
466 | 466 | ||
467 | static void calgary_free_coherent(struct device *dev, size_t size, | 467 | static void calgary_free_coherent(struct device *dev, size_t size, |
468 | void *vaddr, dma_addr_t dma_handle, | 468 | void *vaddr, dma_addr_t dma_handle, |
469 | struct dma_attrs *attrs) | 469 | unsigned long attrs) |
470 | { | 470 | { |
471 | unsigned int npages; | 471 | unsigned int npages; |
472 | struct iommu_table *tbl = find_iommu_table(dev); | 472 | struct iommu_table *tbl = find_iommu_table(dev); |
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index 6ba014c61d62..d30c37750765 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c | |||
@@ -77,7 +77,7 @@ void __init pci_iommu_alloc(void) | |||
77 | } | 77 | } |
78 | void *dma_generic_alloc_coherent(struct device *dev, size_t size, | 78 | void *dma_generic_alloc_coherent(struct device *dev, size_t size, |
79 | dma_addr_t *dma_addr, gfp_t flag, | 79 | dma_addr_t *dma_addr, gfp_t flag, |
80 | struct dma_attrs *attrs) | 80 | unsigned long attrs) |
81 | { | 81 | { |
82 | unsigned long dma_mask; | 82 | unsigned long dma_mask; |
83 | struct page *page; | 83 | struct page *page; |
@@ -120,7 +120,7 @@ again: | |||
120 | } | 120 | } |
121 | 121 | ||
122 | void dma_generic_free_coherent(struct device *dev, size_t size, void *vaddr, | 122 | void dma_generic_free_coherent(struct device *dev, size_t size, void *vaddr, |
123 | dma_addr_t dma_addr, struct dma_attrs *attrs) | 123 | dma_addr_t dma_addr, unsigned long attrs) |
124 | { | 124 | { |
125 | unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; | 125 | unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
126 | struct page *page = virt_to_page(vaddr); | 126 | struct page *page = virt_to_page(vaddr); |
diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c index da15918d1c81..00e71ce396a8 100644 --- a/arch/x86/kernel/pci-nommu.c +++ b/arch/x86/kernel/pci-nommu.c | |||
@@ -28,7 +28,7 @@ check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size) | |||
28 | static dma_addr_t nommu_map_page(struct device *dev, struct page *page, | 28 | static dma_addr_t nommu_map_page(struct device *dev, struct page *page, |
29 | unsigned long offset, size_t size, | 29 | unsigned long offset, size_t size, |
30 | enum dma_data_direction dir, | 30 | enum dma_data_direction dir, |
31 | struct dma_attrs *attrs) | 31 | unsigned long attrs) |
32 | { | 32 | { |
33 | dma_addr_t bus = page_to_phys(page) + offset; | 33 | dma_addr_t bus = page_to_phys(page) + offset; |
34 | WARN_ON(size == 0); | 34 | WARN_ON(size == 0); |
@@ -55,7 +55,7 @@ static dma_addr_t nommu_map_page(struct device *dev, struct page *page, | |||
55 | */ | 55 | */ |
56 | static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg, | 56 | static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg, |
57 | int nents, enum dma_data_direction dir, | 57 | int nents, enum dma_data_direction dir, |
58 | struct dma_attrs *attrs) | 58 | unsigned long attrs) |
59 | { | 59 | { |
60 | struct scatterlist *s; | 60 | struct scatterlist *s; |
61 | int i; | 61 | int i; |
diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c index 5069ef560d83..b47edb8f5256 100644 --- a/arch/x86/kernel/pci-swiotlb.c +++ b/arch/x86/kernel/pci-swiotlb.c | |||
@@ -16,7 +16,7 @@ int swiotlb __read_mostly; | |||
16 | 16 | ||
17 | void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, | 17 | void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, |
18 | dma_addr_t *dma_handle, gfp_t flags, | 18 | dma_addr_t *dma_handle, gfp_t flags, |
19 | struct dma_attrs *attrs) | 19 | unsigned long attrs) |
20 | { | 20 | { |
21 | void *vaddr; | 21 | void *vaddr; |
22 | 22 | ||
@@ -37,7 +37,7 @@ void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
37 | 37 | ||
38 | void x86_swiotlb_free_coherent(struct device *dev, size_t size, | 38 | void x86_swiotlb_free_coherent(struct device *dev, size_t size, |
39 | void *vaddr, dma_addr_t dma_addr, | 39 | void *vaddr, dma_addr_t dma_addr, |
40 | struct dma_attrs *attrs) | 40 | unsigned long attrs) |
41 | { | 41 | { |
42 | if (is_swiotlb_buffer(dma_to_phys(dev, dma_addr))) | 42 | if (is_swiotlb_buffer(dma_to_phys(dev, dma_addr))) |
43 | swiotlb_free_coherent(dev, size, vaddr, dma_addr); | 43 | swiotlb_free_coherent(dev, size, vaddr, dma_addr); |
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index 22cc2f9f8aec..99f285b512db 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c | |||
@@ -146,7 +146,7 @@ static int restore_sigcontext(struct pt_regs *regs, | |||
146 | buf = (void __user *)buf_val; | 146 | buf = (void __user *)buf_val; |
147 | } get_user_catch(err); | 147 | } get_user_catch(err); |
148 | 148 | ||
149 | err |= fpu__restore_sig(buf, config_enabled(CONFIG_X86_32)); | 149 | err |= fpu__restore_sig(buf, IS_ENABLED(CONFIG_X86_32)); |
150 | 150 | ||
151 | force_iret(); | 151 | force_iret(); |
152 | 152 | ||
@@ -245,14 +245,14 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size, | |||
245 | struct fpu *fpu = ¤t->thread.fpu; | 245 | struct fpu *fpu = ¤t->thread.fpu; |
246 | 246 | ||
247 | /* redzone */ | 247 | /* redzone */ |
248 | if (config_enabled(CONFIG_X86_64)) | 248 | if (IS_ENABLED(CONFIG_X86_64)) |
249 | sp -= 128; | 249 | sp -= 128; |
250 | 250 | ||
251 | /* This is the X/Open sanctioned signal stack switching. */ | 251 | /* This is the X/Open sanctioned signal stack switching. */ |
252 | if (ka->sa.sa_flags & SA_ONSTACK) { | 252 | if (ka->sa.sa_flags & SA_ONSTACK) { |
253 | if (sas_ss_flags(sp) == 0) | 253 | if (sas_ss_flags(sp) == 0) |
254 | sp = current->sas_ss_sp + current->sas_ss_size; | 254 | sp = current->sas_ss_sp + current->sas_ss_size; |
255 | } else if (config_enabled(CONFIG_X86_32) && | 255 | } else if (IS_ENABLED(CONFIG_X86_32) && |
256 | !onsigstack && | 256 | !onsigstack && |
257 | (regs->ss & 0xffff) != __USER_DS && | 257 | (regs->ss & 0xffff) != __USER_DS && |
258 | !(ka->sa.sa_flags & SA_RESTORER) && | 258 | !(ka->sa.sa_flags & SA_RESTORER) && |
@@ -262,7 +262,7 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size, | |||
262 | } | 262 | } |
263 | 263 | ||
264 | if (fpu->fpstate_active) { | 264 | if (fpu->fpstate_active) { |
265 | sp = fpu__alloc_mathframe(sp, config_enabled(CONFIG_X86_32), | 265 | sp = fpu__alloc_mathframe(sp, IS_ENABLED(CONFIG_X86_32), |
266 | &buf_fx, &math_size); | 266 | &buf_fx, &math_size); |
267 | *fpstate = (void __user *)sp; | 267 | *fpstate = (void __user *)sp; |
268 | } | 268 | } |
@@ -662,18 +662,18 @@ badframe: | |||
662 | 662 | ||
663 | static inline int is_ia32_compat_frame(void) | 663 | static inline int is_ia32_compat_frame(void) |
664 | { | 664 | { |
665 | return config_enabled(CONFIG_IA32_EMULATION) && | 665 | return IS_ENABLED(CONFIG_IA32_EMULATION) && |
666 | test_thread_flag(TIF_IA32); | 666 | test_thread_flag(TIF_IA32); |
667 | } | 667 | } |
668 | 668 | ||
669 | static inline int is_ia32_frame(void) | 669 | static inline int is_ia32_frame(void) |
670 | { | 670 | { |
671 | return config_enabled(CONFIG_X86_32) || is_ia32_compat_frame(); | 671 | return IS_ENABLED(CONFIG_X86_32) || is_ia32_compat_frame(); |
672 | } | 672 | } |
673 | 673 | ||
674 | static inline int is_x32_frame(void) | 674 | static inline int is_x32_frame(void) |
675 | { | 675 | { |
676 | return config_enabled(CONFIG_X86_X32_ABI) && test_thread_flag(TIF_X32); | 676 | return IS_ENABLED(CONFIG_X86_X32_ABI) && test_thread_flag(TIF_X32); |
677 | } | 677 | } |
678 | 678 | ||
679 | static int | 679 | static int |
diff --git a/arch/x86/pci/sta2x11-fixup.c b/arch/x86/pci/sta2x11-fixup.c index 5ceda85b8687..052c1cb76305 100644 --- a/arch/x86/pci/sta2x11-fixup.c +++ b/arch/x86/pci/sta2x11-fixup.c | |||
@@ -169,7 +169,7 @@ static void *sta2x11_swiotlb_alloc_coherent(struct device *dev, | |||
169 | size_t size, | 169 | size_t size, |
170 | dma_addr_t *dma_handle, | 170 | dma_addr_t *dma_handle, |
171 | gfp_t flags, | 171 | gfp_t flags, |
172 | struct dma_attrs *attrs) | 172 | unsigned long attrs) |
173 | { | 173 | { |
174 | void *vaddr; | 174 | void *vaddr; |
175 | 175 | ||
diff --git a/arch/x86/pci/vmd.c b/arch/x86/pci/vmd.c index e88b4176260f..b814ca675131 100644 --- a/arch/x86/pci/vmd.c +++ b/arch/x86/pci/vmd.c | |||
@@ -274,14 +274,14 @@ static struct dma_map_ops *vmd_dma_ops(struct device *dev) | |||
274 | } | 274 | } |
275 | 275 | ||
276 | static void *vmd_alloc(struct device *dev, size_t size, dma_addr_t *addr, | 276 | static void *vmd_alloc(struct device *dev, size_t size, dma_addr_t *addr, |
277 | gfp_t flag, struct dma_attrs *attrs) | 277 | gfp_t flag, unsigned long attrs) |
278 | { | 278 | { |
279 | return vmd_dma_ops(dev)->alloc(to_vmd_dev(dev), size, addr, flag, | 279 | return vmd_dma_ops(dev)->alloc(to_vmd_dev(dev), size, addr, flag, |
280 | attrs); | 280 | attrs); |
281 | } | 281 | } |
282 | 282 | ||
283 | static void vmd_free(struct device *dev, size_t size, void *vaddr, | 283 | static void vmd_free(struct device *dev, size_t size, void *vaddr, |
284 | dma_addr_t addr, struct dma_attrs *attrs) | 284 | dma_addr_t addr, unsigned long attrs) |
285 | { | 285 | { |
286 | return vmd_dma_ops(dev)->free(to_vmd_dev(dev), size, vaddr, addr, | 286 | return vmd_dma_ops(dev)->free(to_vmd_dev(dev), size, vaddr, addr, |
287 | attrs); | 287 | attrs); |
@@ -289,7 +289,7 @@ static void vmd_free(struct device *dev, size_t size, void *vaddr, | |||
289 | 289 | ||
290 | static int vmd_mmap(struct device *dev, struct vm_area_struct *vma, | 290 | static int vmd_mmap(struct device *dev, struct vm_area_struct *vma, |
291 | void *cpu_addr, dma_addr_t addr, size_t size, | 291 | void *cpu_addr, dma_addr_t addr, size_t size, |
292 | struct dma_attrs *attrs) | 292 | unsigned long attrs) |
293 | { | 293 | { |
294 | return vmd_dma_ops(dev)->mmap(to_vmd_dev(dev), vma, cpu_addr, addr, | 294 | return vmd_dma_ops(dev)->mmap(to_vmd_dev(dev), vma, cpu_addr, addr, |
295 | size, attrs); | 295 | size, attrs); |
@@ -297,7 +297,7 @@ static int vmd_mmap(struct device *dev, struct vm_area_struct *vma, | |||
297 | 297 | ||
298 | static int vmd_get_sgtable(struct device *dev, struct sg_table *sgt, | 298 | static int vmd_get_sgtable(struct device *dev, struct sg_table *sgt, |
299 | void *cpu_addr, dma_addr_t addr, size_t size, | 299 | void *cpu_addr, dma_addr_t addr, size_t size, |
300 | struct dma_attrs *attrs) | 300 | unsigned long attrs) |
301 | { | 301 | { |
302 | return vmd_dma_ops(dev)->get_sgtable(to_vmd_dev(dev), sgt, cpu_addr, | 302 | return vmd_dma_ops(dev)->get_sgtable(to_vmd_dev(dev), sgt, cpu_addr, |
303 | addr, size, attrs); | 303 | addr, size, attrs); |
@@ -306,26 +306,26 @@ static int vmd_get_sgtable(struct device *dev, struct sg_table *sgt, | |||
306 | static dma_addr_t vmd_map_page(struct device *dev, struct page *page, | 306 | static dma_addr_t vmd_map_page(struct device *dev, struct page *page, |
307 | unsigned long offset, size_t size, | 307 | unsigned long offset, size_t size, |
308 | enum dma_data_direction dir, | 308 | enum dma_data_direction dir, |
309 | struct dma_attrs *attrs) | 309 | unsigned long attrs) |
310 | { | 310 | { |
311 | return vmd_dma_ops(dev)->map_page(to_vmd_dev(dev), page, offset, size, | 311 | return vmd_dma_ops(dev)->map_page(to_vmd_dev(dev), page, offset, size, |
312 | dir, attrs); | 312 | dir, attrs); |
313 | } | 313 | } |
314 | 314 | ||
315 | static void vmd_unmap_page(struct device *dev, dma_addr_t addr, size_t size, | 315 | static void vmd_unmap_page(struct device *dev, dma_addr_t addr, size_t size, |
316 | enum dma_data_direction dir, struct dma_attrs *attrs) | 316 | enum dma_data_direction dir, unsigned long attrs) |
317 | { | 317 | { |
318 | vmd_dma_ops(dev)->unmap_page(to_vmd_dev(dev), addr, size, dir, attrs); | 318 | vmd_dma_ops(dev)->unmap_page(to_vmd_dev(dev), addr, size, dir, attrs); |
319 | } | 319 | } |
320 | 320 | ||
321 | static int vmd_map_sg(struct device *dev, struct scatterlist *sg, int nents, | 321 | static int vmd_map_sg(struct device *dev, struct scatterlist *sg, int nents, |
322 | enum dma_data_direction dir, struct dma_attrs *attrs) | 322 | enum dma_data_direction dir, unsigned long attrs) |
323 | { | 323 | { |
324 | return vmd_dma_ops(dev)->map_sg(to_vmd_dev(dev), sg, nents, dir, attrs); | 324 | return vmd_dma_ops(dev)->map_sg(to_vmd_dev(dev), sg, nents, dir, attrs); |
325 | } | 325 | } |
326 | 326 | ||
327 | static void vmd_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, | 327 | static void vmd_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, |
328 | enum dma_data_direction dir, struct dma_attrs *attrs) | 328 | enum dma_data_direction dir, unsigned long attrs) |
329 | { | 329 | { |
330 | vmd_dma_ops(dev)->unmap_sg(to_vmd_dev(dev), sg, nents, dir, attrs); | 330 | vmd_dma_ops(dev)->unmap_sg(to_vmd_dev(dev), sg, nents, dir, attrs); |
331 | } | 331 | } |
diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c index cd66698348ca..1e68806d6695 100644 --- a/arch/xtensa/kernel/pci-dma.c +++ b/arch/xtensa/kernel/pci-dma.c | |||
@@ -142,7 +142,7 @@ static void xtensa_sync_sg_for_device(struct device *dev, | |||
142 | 142 | ||
143 | static void *xtensa_dma_alloc(struct device *dev, size_t size, | 143 | static void *xtensa_dma_alloc(struct device *dev, size_t size, |
144 | dma_addr_t *handle, gfp_t flag, | 144 | dma_addr_t *handle, gfp_t flag, |
145 | struct dma_attrs *attrs) | 145 | unsigned long attrs) |
146 | { | 146 | { |
147 | unsigned long ret; | 147 | unsigned long ret; |
148 | unsigned long uncached = 0; | 148 | unsigned long uncached = 0; |
@@ -171,7 +171,7 @@ static void *xtensa_dma_alloc(struct device *dev, size_t size, | |||
171 | } | 171 | } |
172 | 172 | ||
173 | static void xtensa_dma_free(struct device *hwdev, size_t size, void *vaddr, | 173 | static void xtensa_dma_free(struct device *hwdev, size_t size, void *vaddr, |
174 | dma_addr_t dma_handle, struct dma_attrs *attrs) | 174 | dma_addr_t dma_handle, unsigned long attrs) |
175 | { | 175 | { |
176 | unsigned long addr = (unsigned long)vaddr + | 176 | unsigned long addr = (unsigned long)vaddr + |
177 | XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR; | 177 | XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR; |
@@ -185,7 +185,7 @@ static void xtensa_dma_free(struct device *hwdev, size_t size, void *vaddr, | |||
185 | static dma_addr_t xtensa_map_page(struct device *dev, struct page *page, | 185 | static dma_addr_t xtensa_map_page(struct device *dev, struct page *page, |
186 | unsigned long offset, size_t size, | 186 | unsigned long offset, size_t size, |
187 | enum dma_data_direction dir, | 187 | enum dma_data_direction dir, |
188 | struct dma_attrs *attrs) | 188 | unsigned long attrs) |
189 | { | 189 | { |
190 | dma_addr_t dma_handle = page_to_phys(page) + offset; | 190 | dma_addr_t dma_handle = page_to_phys(page) + offset; |
191 | 191 | ||
@@ -195,14 +195,14 @@ static dma_addr_t xtensa_map_page(struct device *dev, struct page *page, | |||
195 | 195 | ||
196 | static void xtensa_unmap_page(struct device *dev, dma_addr_t dma_handle, | 196 | static void xtensa_unmap_page(struct device *dev, dma_addr_t dma_handle, |
197 | size_t size, enum dma_data_direction dir, | 197 | size_t size, enum dma_data_direction dir, |
198 | struct dma_attrs *attrs) | 198 | unsigned long attrs) |
199 | { | 199 | { |
200 | xtensa_sync_single_for_cpu(dev, dma_handle, size, dir); | 200 | xtensa_sync_single_for_cpu(dev, dma_handle, size, dir); |
201 | } | 201 | } |
202 | 202 | ||
203 | static int xtensa_map_sg(struct device *dev, struct scatterlist *sg, | 203 | static int xtensa_map_sg(struct device *dev, struct scatterlist *sg, |
204 | int nents, enum dma_data_direction dir, | 204 | int nents, enum dma_data_direction dir, |
205 | struct dma_attrs *attrs) | 205 | unsigned long attrs) |
206 | { | 206 | { |
207 | struct scatterlist *s; | 207 | struct scatterlist *s; |
208 | int i; | 208 | int i; |
@@ -217,7 +217,7 @@ static int xtensa_map_sg(struct device *dev, struct scatterlist *sg, | |||
217 | static void xtensa_unmap_sg(struct device *dev, | 217 | static void xtensa_unmap_sg(struct device *dev, |
218 | struct scatterlist *sg, int nents, | 218 | struct scatterlist *sg, int nents, |
219 | enum dma_data_direction dir, | 219 | enum dma_data_direction dir, |
220 | struct dma_attrs *attrs) | 220 | unsigned long attrs) |
221 | { | 221 | { |
222 | struct scatterlist *s; | 222 | struct scatterlist *s; |
223 | int i; | 223 | int i; |
diff --git a/block/Kconfig b/block/Kconfig index 0363cd731320..161491d0a879 100644 --- a/block/Kconfig +++ b/block/Kconfig | |||
@@ -88,19 +88,6 @@ config BLK_DEV_INTEGRITY | |||
88 | T10/SCSI Data Integrity Field or the T13/ATA External Path | 88 | T10/SCSI Data Integrity Field or the T13/ATA External Path |
89 | Protection. If in doubt, say N. | 89 | Protection. If in doubt, say N. |
90 | 90 | ||
91 | config BLK_DEV_DAX | ||
92 | bool "Block device DAX support" | ||
93 | depends on FS_DAX | ||
94 | depends on BROKEN | ||
95 | help | ||
96 | When DAX support is available (CONFIG_FS_DAX) raw block | ||
97 | devices can also support direct userspace access to the | ||
98 | storage capacity via MMAP(2) similar to a file on a | ||
99 | DAX-enabled filesystem. However, the DAX I/O-path disables | ||
100 | some standard I/O-statistics, and the MMAP(2) path has some | ||
101 | operational differences due to bypassing the page | ||
102 | cache. If in doubt, say N. | ||
103 | |||
104 | config BLK_DEV_THROTTLING | 91 | config BLK_DEV_THROTTLING |
105 | bool "Block layer bio throttling support" | 92 | bool "Block layer bio throttling support" |
106 | depends on BLK_CGROUP=y | 93 | depends on BLK_CGROUP=y |
diff --git a/drivers/firmware/broadcom/bcm47xx_sprom.c b/drivers/firmware/broadcom/bcm47xx_sprom.c index b6eb875d4af3..62aa3cf09b4d 100644 --- a/drivers/firmware/broadcom/bcm47xx_sprom.c +++ b/drivers/firmware/broadcom/bcm47xx_sprom.c | |||
@@ -669,7 +669,7 @@ static int bcm47xx_get_sprom_bcma(struct bcma_bus *bus, struct ssb_sprom *out) | |||
669 | case BCMA_HOSTTYPE_PCI: | 669 | case BCMA_HOSTTYPE_PCI: |
670 | memset(out, 0, sizeof(struct ssb_sprom)); | 670 | memset(out, 0, sizeof(struct ssb_sprom)); |
671 | /* On BCM47XX all PCI buses share the same domain */ | 671 | /* On BCM47XX all PCI buses share the same domain */ |
672 | if (config_enabled(CONFIG_BCM47XX)) | 672 | if (IS_ENABLED(CONFIG_BCM47XX)) |
673 | snprintf(buf, sizeof(buf), "pci/%u/%u/", | 673 | snprintf(buf, sizeof(buf), "pci/%u/%u/", |
674 | bus->host_pci->bus->number + 1, | 674 | bus->host_pci->bus->number + 1, |
675 | PCI_SLOT(bus->host_pci->devfn)); | 675 | PCI_SLOT(bus->host_pci->devfn)); |
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig index c9b9fdf6cfbb..d61410299ec0 100644 --- a/drivers/fpga/Kconfig +++ b/drivers/fpga/Kconfig | |||
@@ -21,6 +21,7 @@ config FPGA_MGR_SOCFPGA | |||
21 | 21 | ||
22 | config FPGA_MGR_ZYNQ_FPGA | 22 | config FPGA_MGR_ZYNQ_FPGA |
23 | tristate "Xilinx Zynq FPGA" | 23 | tristate "Xilinx Zynq FPGA" |
24 | depends on HAS_DMA | ||
24 | help | 25 | help |
25 | FPGA manager driver support for Xilinx Zynq FPGAs. | 26 | FPGA manager driver support for Xilinx Zynq FPGAs. |
26 | 27 | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c index fb49443bfd32..4cfb39d543b4 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c | |||
@@ -52,7 +52,7 @@ static int exynos_drm_fb_mmap(struct fb_info *info, | |||
52 | 52 | ||
53 | ret = dma_mmap_attrs(to_dma_dev(helper->dev), vma, exynos_gem->cookie, | 53 | ret = dma_mmap_attrs(to_dma_dev(helper->dev), vma, exynos_gem->cookie, |
54 | exynos_gem->dma_addr, exynos_gem->size, | 54 | exynos_gem->dma_addr, exynos_gem->size, |
55 | &exynos_gem->dma_attrs); | 55 | exynos_gem->dma_attrs); |
56 | if (ret < 0) { | 56 | if (ret < 0) { |
57 | DRM_ERROR("failed to mmap.\n"); | 57 | DRM_ERROR("failed to mmap.\n"); |
58 | return ret; | 58 | return ret; |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index 8564c3da0d22..4bf00f57ffe8 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c | |||
@@ -17,7 +17,6 @@ | |||
17 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
18 | #include <linux/workqueue.h> | 18 | #include <linux/workqueue.h> |
19 | #include <linux/dma-mapping.h> | 19 | #include <linux/dma-mapping.h> |
20 | #include <linux/dma-attrs.h> | ||
21 | #include <linux/of.h> | 20 | #include <linux/of.h> |
22 | 21 | ||
23 | #include <drm/drmP.h> | 22 | #include <drm/drmP.h> |
@@ -235,7 +234,7 @@ struct g2d_data { | |||
235 | struct mutex cmdlist_mutex; | 234 | struct mutex cmdlist_mutex; |
236 | dma_addr_t cmdlist_pool; | 235 | dma_addr_t cmdlist_pool; |
237 | void *cmdlist_pool_virt; | 236 | void *cmdlist_pool_virt; |
238 | struct dma_attrs cmdlist_dma_attrs; | 237 | unsigned long cmdlist_dma_attrs; |
239 | 238 | ||
240 | /* runqueue*/ | 239 | /* runqueue*/ |
241 | struct g2d_runqueue_node *runqueue_node; | 240 | struct g2d_runqueue_node *runqueue_node; |
@@ -256,13 +255,12 @@ static int g2d_init_cmdlist(struct g2d_data *g2d) | |||
256 | int ret; | 255 | int ret; |
257 | struct g2d_buf_info *buf_info; | 256 | struct g2d_buf_info *buf_info; |
258 | 257 | ||
259 | init_dma_attrs(&g2d->cmdlist_dma_attrs); | 258 | g2d->cmdlist_dma_attrs = DMA_ATTR_WRITE_COMBINE; |
260 | dma_set_attr(DMA_ATTR_WRITE_COMBINE, &g2d->cmdlist_dma_attrs); | ||
261 | 259 | ||
262 | g2d->cmdlist_pool_virt = dma_alloc_attrs(to_dma_dev(subdrv->drm_dev), | 260 | g2d->cmdlist_pool_virt = dma_alloc_attrs(to_dma_dev(subdrv->drm_dev), |
263 | G2D_CMDLIST_POOL_SIZE, | 261 | G2D_CMDLIST_POOL_SIZE, |
264 | &g2d->cmdlist_pool, GFP_KERNEL, | 262 | &g2d->cmdlist_pool, GFP_KERNEL, |
265 | &g2d->cmdlist_dma_attrs); | 263 | g2d->cmdlist_dma_attrs); |
266 | if (!g2d->cmdlist_pool_virt) { | 264 | if (!g2d->cmdlist_pool_virt) { |
267 | dev_err(dev, "failed to allocate dma memory\n"); | 265 | dev_err(dev, "failed to allocate dma memory\n"); |
268 | return -ENOMEM; | 266 | return -ENOMEM; |
@@ -295,7 +293,7 @@ static int g2d_init_cmdlist(struct g2d_data *g2d) | |||
295 | err: | 293 | err: |
296 | dma_free_attrs(to_dma_dev(subdrv->drm_dev), G2D_CMDLIST_POOL_SIZE, | 294 | dma_free_attrs(to_dma_dev(subdrv->drm_dev), G2D_CMDLIST_POOL_SIZE, |
297 | g2d->cmdlist_pool_virt, | 295 | g2d->cmdlist_pool_virt, |
298 | g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs); | 296 | g2d->cmdlist_pool, g2d->cmdlist_dma_attrs); |
299 | return ret; | 297 | return ret; |
300 | } | 298 | } |
301 | 299 | ||
@@ -309,7 +307,7 @@ static void g2d_fini_cmdlist(struct g2d_data *g2d) | |||
309 | dma_free_attrs(to_dma_dev(subdrv->drm_dev), | 307 | dma_free_attrs(to_dma_dev(subdrv->drm_dev), |
310 | G2D_CMDLIST_POOL_SIZE, | 308 | G2D_CMDLIST_POOL_SIZE, |
311 | g2d->cmdlist_pool_virt, | 309 | g2d->cmdlist_pool_virt, |
312 | g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs); | 310 | g2d->cmdlist_pool, g2d->cmdlist_dma_attrs); |
313 | } | 311 | } |
314 | } | 312 | } |
315 | 313 | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index cdf9f1af4347..f2ae72ba7d5a 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c | |||
@@ -24,7 +24,7 @@ | |||
24 | static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem) | 24 | static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem) |
25 | { | 25 | { |
26 | struct drm_device *dev = exynos_gem->base.dev; | 26 | struct drm_device *dev = exynos_gem->base.dev; |
27 | enum dma_attr attr; | 27 | unsigned long attr; |
28 | unsigned int nr_pages; | 28 | unsigned int nr_pages; |
29 | struct sg_table sgt; | 29 | struct sg_table sgt; |
30 | int ret = -ENOMEM; | 30 | int ret = -ENOMEM; |
@@ -34,7 +34,7 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem) | |||
34 | return 0; | 34 | return 0; |
35 | } | 35 | } |
36 | 36 | ||
37 | init_dma_attrs(&exynos_gem->dma_attrs); | 37 | exynos_gem->dma_attrs = 0; |
38 | 38 | ||
39 | /* | 39 | /* |
40 | * if EXYNOS_BO_CONTIG, fully physically contiguous memory | 40 | * if EXYNOS_BO_CONTIG, fully physically contiguous memory |
@@ -42,7 +42,7 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem) | |||
42 | * as possible. | 42 | * as possible. |
43 | */ | 43 | */ |
44 | if (!(exynos_gem->flags & EXYNOS_BO_NONCONTIG)) | 44 | if (!(exynos_gem->flags & EXYNOS_BO_NONCONTIG)) |
45 | dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &exynos_gem->dma_attrs); | 45 | exynos_gem->dma_attrs |= DMA_ATTR_FORCE_CONTIGUOUS; |
46 | 46 | ||
47 | /* | 47 | /* |
48 | * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping | 48 | * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping |
@@ -54,8 +54,8 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem) | |||
54 | else | 54 | else |
55 | attr = DMA_ATTR_NON_CONSISTENT; | 55 | attr = DMA_ATTR_NON_CONSISTENT; |
56 | 56 | ||
57 | dma_set_attr(attr, &exynos_gem->dma_attrs); | 57 | exynos_gem->dma_attrs |= attr; |
58 | dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &exynos_gem->dma_attrs); | 58 | exynos_gem->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING; |
59 | 59 | ||
60 | nr_pages = exynos_gem->size >> PAGE_SHIFT; | 60 | nr_pages = exynos_gem->size >> PAGE_SHIFT; |
61 | 61 | ||
@@ -67,7 +67,7 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem) | |||
67 | 67 | ||
68 | exynos_gem->cookie = dma_alloc_attrs(to_dma_dev(dev), exynos_gem->size, | 68 | exynos_gem->cookie = dma_alloc_attrs(to_dma_dev(dev), exynos_gem->size, |
69 | &exynos_gem->dma_addr, GFP_KERNEL, | 69 | &exynos_gem->dma_addr, GFP_KERNEL, |
70 | &exynos_gem->dma_attrs); | 70 | exynos_gem->dma_attrs); |
71 | if (!exynos_gem->cookie) { | 71 | if (!exynos_gem->cookie) { |
72 | DRM_ERROR("failed to allocate buffer.\n"); | 72 | DRM_ERROR("failed to allocate buffer.\n"); |
73 | goto err_free; | 73 | goto err_free; |
@@ -75,7 +75,7 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem) | |||
75 | 75 | ||
76 | ret = dma_get_sgtable_attrs(to_dma_dev(dev), &sgt, exynos_gem->cookie, | 76 | ret = dma_get_sgtable_attrs(to_dma_dev(dev), &sgt, exynos_gem->cookie, |
77 | exynos_gem->dma_addr, exynos_gem->size, | 77 | exynos_gem->dma_addr, exynos_gem->size, |
78 | &exynos_gem->dma_attrs); | 78 | exynos_gem->dma_attrs); |
79 | if (ret < 0) { | 79 | if (ret < 0) { |
80 | DRM_ERROR("failed to get sgtable.\n"); | 80 | DRM_ERROR("failed to get sgtable.\n"); |
81 | goto err_dma_free; | 81 | goto err_dma_free; |
@@ -99,7 +99,7 @@ err_sgt_free: | |||
99 | sg_free_table(&sgt); | 99 | sg_free_table(&sgt); |
100 | err_dma_free: | 100 | err_dma_free: |
101 | dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie, | 101 | dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie, |
102 | exynos_gem->dma_addr, &exynos_gem->dma_attrs); | 102 | exynos_gem->dma_addr, exynos_gem->dma_attrs); |
103 | err_free: | 103 | err_free: |
104 | drm_free_large(exynos_gem->pages); | 104 | drm_free_large(exynos_gem->pages); |
105 | 105 | ||
@@ -120,7 +120,7 @@ static void exynos_drm_free_buf(struct exynos_drm_gem *exynos_gem) | |||
120 | 120 | ||
121 | dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie, | 121 | dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie, |
122 | (dma_addr_t)exynos_gem->dma_addr, | 122 | (dma_addr_t)exynos_gem->dma_addr, |
123 | &exynos_gem->dma_attrs); | 123 | exynos_gem->dma_attrs); |
124 | 124 | ||
125 | drm_free_large(exynos_gem->pages); | 125 | drm_free_large(exynos_gem->pages); |
126 | } | 126 | } |
@@ -346,7 +346,7 @@ static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem, | |||
346 | 346 | ||
347 | ret = dma_mmap_attrs(to_dma_dev(drm_dev), vma, exynos_gem->cookie, | 347 | ret = dma_mmap_attrs(to_dma_dev(drm_dev), vma, exynos_gem->cookie, |
348 | exynos_gem->dma_addr, exynos_gem->size, | 348 | exynos_gem->dma_addr, exynos_gem->size, |
349 | &exynos_gem->dma_attrs); | 349 | exynos_gem->dma_attrs); |
350 | if (ret < 0) { | 350 | if (ret < 0) { |
351 | DRM_ERROR("failed to mmap.\n"); | 351 | DRM_ERROR("failed to mmap.\n"); |
352 | return ret; | 352 | return ret; |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h index 78100742281d..df7c543d6558 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.h +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h | |||
@@ -50,7 +50,7 @@ struct exynos_drm_gem { | |||
50 | void *cookie; | 50 | void *cookie; |
51 | void __iomem *kvaddr; | 51 | void __iomem *kvaddr; |
52 | dma_addr_t dma_addr; | 52 | dma_addr_t dma_addr; |
53 | struct dma_attrs dma_attrs; | 53 | unsigned long dma_attrs; |
54 | struct page **pages; | 54 | struct page **pages; |
55 | struct sg_table *sgt; | 55 | struct sg_table *sgt; |
56 | }; | 56 | }; |
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c index fa2ec0cd00e8..7abc550ebc00 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c | |||
@@ -54,15 +54,14 @@ struct mtk_drm_gem_obj *mtk_drm_gem_create(struct drm_device *dev, | |||
54 | 54 | ||
55 | obj = &mtk_gem->base; | 55 | obj = &mtk_gem->base; |
56 | 56 | ||
57 | init_dma_attrs(&mtk_gem->dma_attrs); | 57 | mtk_gem->dma_attrs = DMA_ATTR_WRITE_COMBINE; |
58 | dma_set_attr(DMA_ATTR_WRITE_COMBINE, &mtk_gem->dma_attrs); | ||
59 | 58 | ||
60 | if (!alloc_kmap) | 59 | if (!alloc_kmap) |
61 | dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &mtk_gem->dma_attrs); | 60 | mtk_gem->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING; |
62 | 61 | ||
63 | mtk_gem->cookie = dma_alloc_attrs(priv->dma_dev, obj->size, | 62 | mtk_gem->cookie = dma_alloc_attrs(priv->dma_dev, obj->size, |
64 | &mtk_gem->dma_addr, GFP_KERNEL, | 63 | &mtk_gem->dma_addr, GFP_KERNEL, |
65 | &mtk_gem->dma_attrs); | 64 | mtk_gem->dma_attrs); |
66 | if (!mtk_gem->cookie) { | 65 | if (!mtk_gem->cookie) { |
67 | DRM_ERROR("failed to allocate %zx byte dma buffer", obj->size); | 66 | DRM_ERROR("failed to allocate %zx byte dma buffer", obj->size); |
68 | ret = -ENOMEM; | 67 | ret = -ENOMEM; |
@@ -93,7 +92,7 @@ void mtk_drm_gem_free_object(struct drm_gem_object *obj) | |||
93 | drm_prime_gem_destroy(obj, mtk_gem->sg); | 92 | drm_prime_gem_destroy(obj, mtk_gem->sg); |
94 | else | 93 | else |
95 | dma_free_attrs(priv->dma_dev, obj->size, mtk_gem->cookie, | 94 | dma_free_attrs(priv->dma_dev, obj->size, mtk_gem->cookie, |
96 | mtk_gem->dma_addr, &mtk_gem->dma_attrs); | 95 | mtk_gem->dma_addr, mtk_gem->dma_attrs); |
97 | 96 | ||
98 | /* release file pointer to gem object. */ | 97 | /* release file pointer to gem object. */ |
99 | drm_gem_object_release(obj); | 98 | drm_gem_object_release(obj); |
@@ -173,7 +172,7 @@ static int mtk_drm_gem_object_mmap(struct drm_gem_object *obj, | |||
173 | vma->vm_pgoff = 0; | 172 | vma->vm_pgoff = 0; |
174 | 173 | ||
175 | ret = dma_mmap_attrs(priv->dma_dev, vma, mtk_gem->cookie, | 174 | ret = dma_mmap_attrs(priv->dma_dev, vma, mtk_gem->cookie, |
176 | mtk_gem->dma_addr, obj->size, &mtk_gem->dma_attrs); | 175 | mtk_gem->dma_addr, obj->size, mtk_gem->dma_attrs); |
177 | if (ret) | 176 | if (ret) |
178 | drm_gem_vm_close(vma); | 177 | drm_gem_vm_close(vma); |
179 | 178 | ||
@@ -224,7 +223,7 @@ struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj) | |||
224 | 223 | ||
225 | ret = dma_get_sgtable_attrs(priv->dma_dev, sgt, mtk_gem->cookie, | 224 | ret = dma_get_sgtable_attrs(priv->dma_dev, sgt, mtk_gem->cookie, |
226 | mtk_gem->dma_addr, obj->size, | 225 | mtk_gem->dma_addr, obj->size, |
227 | &mtk_gem->dma_attrs); | 226 | mtk_gem->dma_attrs); |
228 | if (ret) { | 227 | if (ret) { |
229 | DRM_ERROR("failed to allocate sgt, %d\n", ret); | 228 | DRM_ERROR("failed to allocate sgt, %d\n", ret); |
230 | kfree(sgt); | 229 | kfree(sgt); |
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.h b/drivers/gpu/drm/mediatek/mtk_drm_gem.h index 3a2a5624a1cb..2752718fa5b2 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_gem.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.h | |||
@@ -35,7 +35,7 @@ struct mtk_drm_gem_obj { | |||
35 | void *cookie; | 35 | void *cookie; |
36 | void *kvaddr; | 36 | void *kvaddr; |
37 | dma_addr_t dma_addr; | 37 | dma_addr_t dma_addr; |
38 | struct dma_attrs dma_attrs; | 38 | unsigned long dma_attrs; |
39 | struct sg_table *sg; | 39 | struct sg_table *sg; |
40 | }; | 40 | }; |
41 | 41 | ||
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 26f859ec24b3..8a0237008f74 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c | |||
@@ -238,11 +238,10 @@ static int msm_drm_uninit(struct device *dev) | |||
238 | } | 238 | } |
239 | 239 | ||
240 | if (priv->vram.paddr) { | 240 | if (priv->vram.paddr) { |
241 | DEFINE_DMA_ATTRS(attrs); | 241 | unsigned long attrs = DMA_ATTR_NO_KERNEL_MAPPING; |
242 | dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs); | ||
243 | drm_mm_takedown(&priv->vram.mm); | 242 | drm_mm_takedown(&priv->vram.mm); |
244 | dma_free_attrs(dev, priv->vram.size, NULL, | 243 | dma_free_attrs(dev, priv->vram.size, NULL, |
245 | priv->vram.paddr, &attrs); | 244 | priv->vram.paddr, attrs); |
246 | } | 245 | } |
247 | 246 | ||
248 | component_unbind_all(dev, ddev); | 247 | component_unbind_all(dev, ddev); |
@@ -310,21 +309,21 @@ static int msm_init_vram(struct drm_device *dev) | |||
310 | } | 309 | } |
311 | 310 | ||
312 | if (size) { | 311 | if (size) { |
313 | DEFINE_DMA_ATTRS(attrs); | 312 | unsigned long attrs = 0; |
314 | void *p; | 313 | void *p; |
315 | 314 | ||
316 | priv->vram.size = size; | 315 | priv->vram.size = size; |
317 | 316 | ||
318 | drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1); | 317 | drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1); |
319 | 318 | ||
320 | dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs); | 319 | attrs |= DMA_ATTR_NO_KERNEL_MAPPING; |
321 | dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); | 320 | attrs |= DMA_ATTR_WRITE_COMBINE; |
322 | 321 | ||
323 | /* note that for no-kernel-mapping, the vaddr returned | 322 | /* note that for no-kernel-mapping, the vaddr returned |
324 | * is bogus, but non-null if allocation succeeded: | 323 | * is bogus, but non-null if allocation succeeded: |
325 | */ | 324 | */ |
326 | p = dma_alloc_attrs(dev->dev, size, | 325 | p = dma_alloc_attrs(dev->dev, size, |
327 | &priv->vram.paddr, GFP_KERNEL, &attrs); | 326 | &priv->vram.paddr, GFP_KERNEL, attrs); |
328 | if (!p) { | 327 | if (!p) { |
329 | dev_err(dev->dev, "failed to allocate VRAM\n"); | 328 | dev_err(dev->dev, "failed to allocate VRAM\n"); |
330 | priv->vram.paddr = 0; | 329 | priv->vram.paddr = 0; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c index 6b8f2a19b2d9..a6a7fa0d7679 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c | |||
@@ -109,7 +109,7 @@ struct gk20a_instmem { | |||
109 | u16 iommu_bit; | 109 | u16 iommu_bit; |
110 | 110 | ||
111 | /* Only used by DMA API */ | 111 | /* Only used by DMA API */ |
112 | struct dma_attrs attrs; | 112 | unsigned long attrs; |
113 | }; | 113 | }; |
114 | #define gk20a_instmem(p) container_of((p), struct gk20a_instmem, base) | 114 | #define gk20a_instmem(p) container_of((p), struct gk20a_instmem, base) |
115 | 115 | ||
@@ -293,7 +293,7 @@ gk20a_instobj_dtor_dma(struct nvkm_memory *memory) | |||
293 | goto out; | 293 | goto out; |
294 | 294 | ||
295 | dma_free_attrs(dev, node->base.mem.size << PAGE_SHIFT, node->base.vaddr, | 295 | dma_free_attrs(dev, node->base.mem.size << PAGE_SHIFT, node->base.vaddr, |
296 | node->handle, &imem->attrs); | 296 | node->handle, imem->attrs); |
297 | 297 | ||
298 | out: | 298 | out: |
299 | return node; | 299 | return node; |
@@ -386,7 +386,7 @@ gk20a_instobj_ctor_dma(struct gk20a_instmem *imem, u32 npages, u32 align, | |||
386 | 386 | ||
387 | node->base.vaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT, | 387 | node->base.vaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT, |
388 | &node->handle, GFP_KERNEL, | 388 | &node->handle, GFP_KERNEL, |
389 | &imem->attrs); | 389 | imem->attrs); |
390 | if (!node->base.vaddr) { | 390 | if (!node->base.vaddr) { |
391 | nvkm_error(subdev, "cannot allocate DMA memory\n"); | 391 | nvkm_error(subdev, "cannot allocate DMA memory\n"); |
392 | return -ENOMEM; | 392 | return -ENOMEM; |
@@ -597,10 +597,9 @@ gk20a_instmem_new(struct nvkm_device *device, int index, | |||
597 | 597 | ||
598 | nvkm_info(&imem->base.subdev, "using IOMMU\n"); | 598 | nvkm_info(&imem->base.subdev, "using IOMMU\n"); |
599 | } else { | 599 | } else { |
600 | init_dma_attrs(&imem->attrs); | 600 | imem->attrs = DMA_ATTR_NON_CONSISTENT | |
601 | dma_set_attr(DMA_ATTR_NON_CONSISTENT, &imem->attrs); | 601 | DMA_ATTR_WEAK_ORDERING | |
602 | dma_set_attr(DMA_ATTR_WEAK_ORDERING, &imem->attrs); | 602 | DMA_ATTR_WRITE_COMBINE; |
603 | dma_set_attr(DMA_ATTR_WRITE_COMBINE, &imem->attrs); | ||
604 | 603 | ||
605 | nvkm_info(&imem->base.subdev, "using DMA API\n"); | 604 | nvkm_info(&imem->base.subdev, "using DMA API\n"); |
606 | } | 605 | } |
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c index 059e902f872d..b70f9423379c 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c | |||
@@ -17,8 +17,6 @@ | |||
17 | #include <drm/drm_gem.h> | 17 | #include <drm/drm_gem.h> |
18 | #include <drm/drm_vma_manager.h> | 18 | #include <drm/drm_vma_manager.h> |
19 | 19 | ||
20 | #include <linux/dma-attrs.h> | ||
21 | |||
22 | #include "rockchip_drm_drv.h" | 20 | #include "rockchip_drm_drv.h" |
23 | #include "rockchip_drm_gem.h" | 21 | #include "rockchip_drm_gem.h" |
24 | 22 | ||
@@ -28,15 +26,14 @@ static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj, | |||
28 | struct drm_gem_object *obj = &rk_obj->base; | 26 | struct drm_gem_object *obj = &rk_obj->base; |
29 | struct drm_device *drm = obj->dev; | 27 | struct drm_device *drm = obj->dev; |
30 | 28 | ||
31 | init_dma_attrs(&rk_obj->dma_attrs); | 29 | rk_obj->dma_attrs = DMA_ATTR_WRITE_COMBINE; |
32 | dma_set_attr(DMA_ATTR_WRITE_COMBINE, &rk_obj->dma_attrs); | ||
33 | 30 | ||
34 | if (!alloc_kmap) | 31 | if (!alloc_kmap) |
35 | dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &rk_obj->dma_attrs); | 32 | rk_obj->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING; |
36 | 33 | ||
37 | rk_obj->kvaddr = dma_alloc_attrs(drm->dev, obj->size, | 34 | rk_obj->kvaddr = dma_alloc_attrs(drm->dev, obj->size, |
38 | &rk_obj->dma_addr, GFP_KERNEL, | 35 | &rk_obj->dma_addr, GFP_KERNEL, |
39 | &rk_obj->dma_attrs); | 36 | rk_obj->dma_attrs); |
40 | if (!rk_obj->kvaddr) { | 37 | if (!rk_obj->kvaddr) { |
41 | DRM_ERROR("failed to allocate %zu byte dma buffer", obj->size); | 38 | DRM_ERROR("failed to allocate %zu byte dma buffer", obj->size); |
42 | return -ENOMEM; | 39 | return -ENOMEM; |
@@ -51,7 +48,7 @@ static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj) | |||
51 | struct drm_device *drm = obj->dev; | 48 | struct drm_device *drm = obj->dev; |
52 | 49 | ||
53 | dma_free_attrs(drm->dev, obj->size, rk_obj->kvaddr, rk_obj->dma_addr, | 50 | dma_free_attrs(drm->dev, obj->size, rk_obj->kvaddr, rk_obj->dma_addr, |
54 | &rk_obj->dma_attrs); | 51 | rk_obj->dma_attrs); |
55 | } | 52 | } |
56 | 53 | ||
57 | static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj, | 54 | static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj, |
@@ -70,7 +67,7 @@ static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj, | |||
70 | vma->vm_pgoff = 0; | 67 | vma->vm_pgoff = 0; |
71 | 68 | ||
72 | ret = dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr, | 69 | ret = dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr, |
73 | obj->size, &rk_obj->dma_attrs); | 70 | obj->size, rk_obj->dma_attrs); |
74 | if (ret) | 71 | if (ret) |
75 | drm_gem_vm_close(vma); | 72 | drm_gem_vm_close(vma); |
76 | 73 | ||
@@ -262,7 +259,7 @@ struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj) | |||
262 | 259 | ||
263 | ret = dma_get_sgtable_attrs(drm->dev, sgt, rk_obj->kvaddr, | 260 | ret = dma_get_sgtable_attrs(drm->dev, sgt, rk_obj->kvaddr, |
264 | rk_obj->dma_addr, obj->size, | 261 | rk_obj->dma_addr, obj->size, |
265 | &rk_obj->dma_attrs); | 262 | rk_obj->dma_attrs); |
266 | if (ret) { | 263 | if (ret) { |
267 | DRM_ERROR("failed to allocate sgt, %d\n", ret); | 264 | DRM_ERROR("failed to allocate sgt, %d\n", ret); |
268 | kfree(sgt); | 265 | kfree(sgt); |
@@ -276,7 +273,7 @@ void *rockchip_gem_prime_vmap(struct drm_gem_object *obj) | |||
276 | { | 273 | { |
277 | struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); | 274 | struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); |
278 | 275 | ||
279 | if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, &rk_obj->dma_attrs)) | 276 | if (rk_obj->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING) |
280 | return NULL; | 277 | return NULL; |
281 | 278 | ||
282 | return rk_obj->kvaddr; | 279 | return rk_obj->kvaddr; |
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.h b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h index ad22618473a4..18b3488db4ec 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.h +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h | |||
@@ -23,7 +23,7 @@ struct rockchip_gem_object { | |||
23 | 23 | ||
24 | void *kvaddr; | 24 | void *kvaddr; |
25 | dma_addr_t dma_addr; | 25 | dma_addr_t dma_addr; |
26 | struct dma_attrs dma_attrs; | 26 | unsigned long dma_attrs; |
27 | }; | 27 | }; |
28 | 28 | ||
29 | struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj); | 29 | struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj); |
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index fe4d2e1a8b58..c68746ce6624 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c | |||
@@ -37,7 +37,6 @@ | |||
37 | #include <linux/sched.h> | 37 | #include <linux/sched.h> |
38 | #include <linux/export.h> | 38 | #include <linux/export.h> |
39 | #include <linux/hugetlb.h> | 39 | #include <linux/hugetlb.h> |
40 | #include <linux/dma-attrs.h> | ||
41 | #include <linux/slab.h> | 40 | #include <linux/slab.h> |
42 | #include <rdma/ib_umem_odp.h> | 41 | #include <rdma/ib_umem_odp.h> |
43 | 42 | ||
@@ -92,12 +91,12 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, | |||
92 | unsigned long npages; | 91 | unsigned long npages; |
93 | int ret; | 92 | int ret; |
94 | int i; | 93 | int i; |
95 | DEFINE_DMA_ATTRS(attrs); | 94 | unsigned long dma_attrs = 0; |
96 | struct scatterlist *sg, *sg_list_start; | 95 | struct scatterlist *sg, *sg_list_start; |
97 | int need_release = 0; | 96 | int need_release = 0; |
98 | 97 | ||
99 | if (dmasync) | 98 | if (dmasync) |
100 | dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs); | 99 | dma_attrs |= DMA_ATTR_WRITE_BARRIER; |
101 | 100 | ||
102 | if (!size) | 101 | if (!size) |
103 | return ERR_PTR(-EINVAL); | 102 | return ERR_PTR(-EINVAL); |
@@ -215,7 +214,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, | |||
215 | umem->sg_head.sgl, | 214 | umem->sg_head.sgl, |
216 | umem->npages, | 215 | umem->npages, |
217 | DMA_BIDIRECTIONAL, | 216 | DMA_BIDIRECTIONAL, |
218 | &attrs); | 217 | dma_attrs); |
219 | 218 | ||
220 | if (umem->nmap <= 0) { | 219 | if (umem->nmap <= 0) { |
221 | ret = -ENOMEM; | 220 | ret = -ENOMEM; |
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 33c177ba93be..96de97a46079 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c | |||
@@ -2375,7 +2375,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom, | |||
2375 | static dma_addr_t map_page(struct device *dev, struct page *page, | 2375 | static dma_addr_t map_page(struct device *dev, struct page *page, |
2376 | unsigned long offset, size_t size, | 2376 | unsigned long offset, size_t size, |
2377 | enum dma_data_direction dir, | 2377 | enum dma_data_direction dir, |
2378 | struct dma_attrs *attrs) | 2378 | unsigned long attrs) |
2379 | { | 2379 | { |
2380 | phys_addr_t paddr = page_to_phys(page) + offset; | 2380 | phys_addr_t paddr = page_to_phys(page) + offset; |
2381 | struct protection_domain *domain; | 2381 | struct protection_domain *domain; |
@@ -2398,7 +2398,7 @@ static dma_addr_t map_page(struct device *dev, struct page *page, | |||
2398 | * The exported unmap_single function for dma_ops. | 2398 | * The exported unmap_single function for dma_ops. |
2399 | */ | 2399 | */ |
2400 | static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, | 2400 | static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, |
2401 | enum dma_data_direction dir, struct dma_attrs *attrs) | 2401 | enum dma_data_direction dir, unsigned long attrs) |
2402 | { | 2402 | { |
2403 | struct protection_domain *domain; | 2403 | struct protection_domain *domain; |
2404 | struct dma_ops_domain *dma_dom; | 2404 | struct dma_ops_domain *dma_dom; |
@@ -2444,7 +2444,7 @@ static int sg_num_pages(struct device *dev, | |||
2444 | */ | 2444 | */ |
2445 | static int map_sg(struct device *dev, struct scatterlist *sglist, | 2445 | static int map_sg(struct device *dev, struct scatterlist *sglist, |
2446 | int nelems, enum dma_data_direction direction, | 2446 | int nelems, enum dma_data_direction direction, |
2447 | struct dma_attrs *attrs) | 2447 | unsigned long attrs) |
2448 | { | 2448 | { |
2449 | int mapped_pages = 0, npages = 0, prot = 0, i; | 2449 | int mapped_pages = 0, npages = 0, prot = 0, i; |
2450 | struct protection_domain *domain; | 2450 | struct protection_domain *domain; |
@@ -2525,7 +2525,7 @@ out_err: | |||
2525 | */ | 2525 | */ |
2526 | static void unmap_sg(struct device *dev, struct scatterlist *sglist, | 2526 | static void unmap_sg(struct device *dev, struct scatterlist *sglist, |
2527 | int nelems, enum dma_data_direction dir, | 2527 | int nelems, enum dma_data_direction dir, |
2528 | struct dma_attrs *attrs) | 2528 | unsigned long attrs) |
2529 | { | 2529 | { |
2530 | struct protection_domain *domain; | 2530 | struct protection_domain *domain; |
2531 | struct dma_ops_domain *dma_dom; | 2531 | struct dma_ops_domain *dma_dom; |
@@ -2548,7 +2548,7 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist, | |||
2548 | */ | 2548 | */ |
2549 | static void *alloc_coherent(struct device *dev, size_t size, | 2549 | static void *alloc_coherent(struct device *dev, size_t size, |
2550 | dma_addr_t *dma_addr, gfp_t flag, | 2550 | dma_addr_t *dma_addr, gfp_t flag, |
2551 | struct dma_attrs *attrs) | 2551 | unsigned long attrs) |
2552 | { | 2552 | { |
2553 | u64 dma_mask = dev->coherent_dma_mask; | 2553 | u64 dma_mask = dev->coherent_dma_mask; |
2554 | struct protection_domain *domain; | 2554 | struct protection_domain *domain; |
@@ -2604,7 +2604,7 @@ out_free: | |||
2604 | */ | 2604 | */ |
2605 | static void free_coherent(struct device *dev, size_t size, | 2605 | static void free_coherent(struct device *dev, size_t size, |
2606 | void *virt_addr, dma_addr_t dma_addr, | 2606 | void *virt_addr, dma_addr_t dma_addr, |
2607 | struct dma_attrs *attrs) | 2607 | unsigned long attrs) |
2608 | { | 2608 | { |
2609 | struct protection_domain *domain; | 2609 | struct protection_domain *domain; |
2610 | struct dma_ops_domain *dma_dom; | 2610 | struct dma_ops_domain *dma_dom; |
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index ea5a9ebf0f78..08a1e2f3690f 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c | |||
@@ -286,7 +286,7 @@ void iommu_dma_free(struct device *dev, struct page **pages, size_t size, | |||
286 | * or NULL on failure. | 286 | * or NULL on failure. |
287 | */ | 287 | */ |
288 | struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp, | 288 | struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp, |
289 | struct dma_attrs *attrs, int prot, dma_addr_t *handle, | 289 | unsigned long attrs, int prot, dma_addr_t *handle, |
290 | void (*flush_page)(struct device *, const void *, phys_addr_t)) | 290 | void (*flush_page)(struct device *, const void *, phys_addr_t)) |
291 | { | 291 | { |
292 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); | 292 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); |
@@ -306,7 +306,7 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp, | |||
306 | } else { | 306 | } else { |
307 | size = ALIGN(size, min_size); | 307 | size = ALIGN(size, min_size); |
308 | } | 308 | } |
309 | if (dma_get_attr(DMA_ATTR_ALLOC_SINGLE_PAGES, attrs)) | 309 | if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES) |
310 | alloc_sizes = min_size; | 310 | alloc_sizes = min_size; |
311 | 311 | ||
312 | count = PAGE_ALIGN(size) >> PAGE_SHIFT; | 312 | count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
@@ -400,7 +400,7 @@ dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, | |||
400 | } | 400 | } |
401 | 401 | ||
402 | void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, | 402 | void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, |
403 | enum dma_data_direction dir, struct dma_attrs *attrs) | 403 | enum dma_data_direction dir, unsigned long attrs) |
404 | { | 404 | { |
405 | __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle); | 405 | __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle); |
406 | } | 406 | } |
@@ -560,7 +560,7 @@ out_restore_sg: | |||
560 | } | 560 | } |
561 | 561 | ||
562 | void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, | 562 | void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, |
563 | enum dma_data_direction dir, struct dma_attrs *attrs) | 563 | enum dma_data_direction dir, unsigned long attrs) |
564 | { | 564 | { |
565 | /* | 565 | /* |
566 | * The scatterlist segments are mapped into a single | 566 | * The scatterlist segments are mapped into a single |
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index afbaa2c69a59..ebb5bf3ddbd9 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
@@ -3552,7 +3552,7 @@ error: | |||
3552 | static dma_addr_t intel_map_page(struct device *dev, struct page *page, | 3552 | static dma_addr_t intel_map_page(struct device *dev, struct page *page, |
3553 | unsigned long offset, size_t size, | 3553 | unsigned long offset, size_t size, |
3554 | enum dma_data_direction dir, | 3554 | enum dma_data_direction dir, |
3555 | struct dma_attrs *attrs) | 3555 | unsigned long attrs) |
3556 | { | 3556 | { |
3557 | return __intel_map_single(dev, page_to_phys(page) + offset, size, | 3557 | return __intel_map_single(dev, page_to_phys(page) + offset, size, |
3558 | dir, *dev->dma_mask); | 3558 | dir, *dev->dma_mask); |
@@ -3711,14 +3711,14 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size) | |||
3711 | 3711 | ||
3712 | static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, | 3712 | static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, |
3713 | size_t size, enum dma_data_direction dir, | 3713 | size_t size, enum dma_data_direction dir, |
3714 | struct dma_attrs *attrs) | 3714 | unsigned long attrs) |
3715 | { | 3715 | { |
3716 | intel_unmap(dev, dev_addr, size); | 3716 | intel_unmap(dev, dev_addr, size); |
3717 | } | 3717 | } |
3718 | 3718 | ||
3719 | static void *intel_alloc_coherent(struct device *dev, size_t size, | 3719 | static void *intel_alloc_coherent(struct device *dev, size_t size, |
3720 | dma_addr_t *dma_handle, gfp_t flags, | 3720 | dma_addr_t *dma_handle, gfp_t flags, |
3721 | struct dma_attrs *attrs) | 3721 | unsigned long attrs) |
3722 | { | 3722 | { |
3723 | struct page *page = NULL; | 3723 | struct page *page = NULL; |
3724 | int order; | 3724 | int order; |
@@ -3764,7 +3764,7 @@ static void *intel_alloc_coherent(struct device *dev, size_t size, | |||
3764 | } | 3764 | } |
3765 | 3765 | ||
3766 | static void intel_free_coherent(struct device *dev, size_t size, void *vaddr, | 3766 | static void intel_free_coherent(struct device *dev, size_t size, void *vaddr, |
3767 | dma_addr_t dma_handle, struct dma_attrs *attrs) | 3767 | dma_addr_t dma_handle, unsigned long attrs) |
3768 | { | 3768 | { |
3769 | int order; | 3769 | int order; |
3770 | struct page *page = virt_to_page(vaddr); | 3770 | struct page *page = virt_to_page(vaddr); |
@@ -3779,7 +3779,7 @@ static void intel_free_coherent(struct device *dev, size_t size, void *vaddr, | |||
3779 | 3779 | ||
3780 | static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist, | 3780 | static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist, |
3781 | int nelems, enum dma_data_direction dir, | 3781 | int nelems, enum dma_data_direction dir, |
3782 | struct dma_attrs *attrs) | 3782 | unsigned long attrs) |
3783 | { | 3783 | { |
3784 | dma_addr_t startaddr = sg_dma_address(sglist) & PAGE_MASK; | 3784 | dma_addr_t startaddr = sg_dma_address(sglist) & PAGE_MASK; |
3785 | unsigned long nrpages = 0; | 3785 | unsigned long nrpages = 0; |
@@ -3808,7 +3808,7 @@ static int intel_nontranslate_map_sg(struct device *hddev, | |||
3808 | } | 3808 | } |
3809 | 3809 | ||
3810 | static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems, | 3810 | static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems, |
3811 | enum dma_data_direction dir, struct dma_attrs *attrs) | 3811 | enum dma_data_direction dir, unsigned long attrs) |
3812 | { | 3812 | { |
3813 | int i; | 3813 | int i; |
3814 | struct dmar_domain *domain; | 3814 | struct dmar_domain *domain; |
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index 3786d0f21972..c5f33c3bd228 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c | |||
@@ -359,7 +359,7 @@ static void gic_handle_shared_int(bool chained) | |||
359 | pending_reg += gic_reg_step; | 359 | pending_reg += gic_reg_step; |
360 | intrmask_reg += gic_reg_step; | 360 | intrmask_reg += gic_reg_step; |
361 | 361 | ||
362 | if (!config_enabled(CONFIG_64BIT) || mips_cm_is64) | 362 | if (!IS_ENABLED(CONFIG_64BIT) || mips_cm_is64) |
363 | continue; | 363 | continue; |
364 | 364 | ||
365 | pending[i] |= (u64)gic_read(pending_reg) << 32; | 365 | pending[i] |= (u64)gic_read(pending_reg) << 32; |
diff --git a/drivers/media/dvb-frontends/cxd2841er.c b/drivers/media/dvb-frontends/cxd2841er.c index 09c39346167f..ffe88bc6b813 100644 --- a/drivers/media/dvb-frontends/cxd2841er.c +++ b/drivers/media/dvb-frontends/cxd2841er.c | |||
@@ -3378,20 +3378,28 @@ static int cxd2841er_tune_tc(struct dvb_frontend *fe, | |||
3378 | ret = cxd2841er_get_carrier_offset_i( | 3378 | ret = cxd2841er_get_carrier_offset_i( |
3379 | priv, p->bandwidth_hz, | 3379 | priv, p->bandwidth_hz, |
3380 | &carrier_offset); | 3380 | &carrier_offset); |
3381 | if (ret) | ||
3382 | return ret; | ||
3381 | break; | 3383 | break; |
3382 | case SYS_DVBT: | 3384 | case SYS_DVBT: |
3383 | ret = cxd2841er_get_carrier_offset_t( | 3385 | ret = cxd2841er_get_carrier_offset_t( |
3384 | priv, p->bandwidth_hz, | 3386 | priv, p->bandwidth_hz, |
3385 | &carrier_offset); | 3387 | &carrier_offset); |
3388 | if (ret) | ||
3389 | return ret; | ||
3386 | break; | 3390 | break; |
3387 | case SYS_DVBT2: | 3391 | case SYS_DVBT2: |
3388 | ret = cxd2841er_get_carrier_offset_t2( | 3392 | ret = cxd2841er_get_carrier_offset_t2( |
3389 | priv, p->bandwidth_hz, | 3393 | priv, p->bandwidth_hz, |
3390 | &carrier_offset); | 3394 | &carrier_offset); |
3395 | if (ret) | ||
3396 | return ret; | ||
3391 | break; | 3397 | break; |
3392 | case SYS_DVBC_ANNEX_A: | 3398 | case SYS_DVBC_ANNEX_A: |
3393 | ret = cxd2841er_get_carrier_offset_c( | 3399 | ret = cxd2841er_get_carrier_offset_c( |
3394 | priv, &carrier_offset); | 3400 | priv, &carrier_offset); |
3401 | if (ret) | ||
3402 | return ret; | ||
3395 | break; | 3403 | break; |
3396 | default: | 3404 | default: |
3397 | dev_dbg(&priv->i2c->dev, | 3405 | dev_dbg(&priv->i2c->dev, |
@@ -3399,8 +3407,6 @@ static int cxd2841er_tune_tc(struct dvb_frontend *fe, | |||
3399 | __func__, priv->system); | 3407 | __func__, priv->system); |
3400 | return -EINVAL; | 3408 | return -EINVAL; |
3401 | } | 3409 | } |
3402 | if (ret) | ||
3403 | return ret; | ||
3404 | dev_dbg(&priv->i2c->dev, "%s(): carrier offset %d\n", | 3410 | dev_dbg(&priv->i2c->dev, "%s(): carrier offset %d\n", |
3405 | __func__, carrier_offset); | 3411 | __func__, carrier_offset); |
3406 | p->frequency += carrier_offset; | 3412 | p->frequency += carrier_offset; |
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c index e277b7c23516..c7806ecda2dd 100644 --- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c +++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c | |||
@@ -246,7 +246,6 @@ static int mtk_vcodec_probe(struct platform_device *pdev) | |||
246 | struct video_device *vfd_enc; | 246 | struct video_device *vfd_enc; |
247 | struct resource *res; | 247 | struct resource *res; |
248 | int i, j, ret; | 248 | int i, j, ret; |
249 | DEFINE_DMA_ATTRS(attrs); | ||
250 | 249 | ||
251 | dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL); | 250 | dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL); |
252 | if (!dev) | 251 | if (!dev) |
@@ -378,9 +377,6 @@ static int mtk_vcodec_probe(struct platform_device *pdev) | |||
378 | goto err_enc_reg; | 377 | goto err_enc_reg; |
379 | } | 378 | } |
380 | 379 | ||
381 | /* Avoid the iommu eat big hunks */ | ||
382 | dma_set_attr(DMA_ATTR_ALLOC_SINGLE_PAGES, &attrs); | ||
383 | |||
384 | mtk_v4l2_debug(0, "encoder registered as /dev/video%d", | 380 | mtk_v4l2_debug(0, "encoder registered as /dev/video%d", |
385 | vfd_enc->num); | 381 | vfd_enc->num); |
386 | 382 | ||
diff --git a/drivers/media/platform/sti/bdisp/bdisp-hw.c b/drivers/media/platform/sti/bdisp/bdisp-hw.c index 3df66d11c795..b7892f3efd98 100644 --- a/drivers/media/platform/sti/bdisp/bdisp-hw.c +++ b/drivers/media/platform/sti/bdisp/bdisp-hw.c | |||
@@ -430,14 +430,11 @@ int bdisp_hw_get_and_clear_irq(struct bdisp_dev *bdisp) | |||
430 | */ | 430 | */ |
431 | void bdisp_hw_free_nodes(struct bdisp_ctx *ctx) | 431 | void bdisp_hw_free_nodes(struct bdisp_ctx *ctx) |
432 | { | 432 | { |
433 | if (ctx && ctx->node[0]) { | 433 | if (ctx && ctx->node[0]) |
434 | DEFINE_DMA_ATTRS(attrs); | ||
435 | |||
436 | dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); | ||
437 | dma_free_attrs(ctx->bdisp_dev->dev, | 434 | dma_free_attrs(ctx->bdisp_dev->dev, |
438 | sizeof(struct bdisp_node) * MAX_NB_NODE, | 435 | sizeof(struct bdisp_node) * MAX_NB_NODE, |
439 | ctx->node[0], ctx->node_paddr[0], &attrs); | 436 | ctx->node[0], ctx->node_paddr[0], |
440 | } | 437 | DMA_ATTR_WRITE_COMBINE); |
441 | } | 438 | } |
442 | 439 | ||
443 | /** | 440 | /** |
@@ -455,12 +452,10 @@ int bdisp_hw_alloc_nodes(struct bdisp_ctx *ctx) | |||
455 | unsigned int i, node_size = sizeof(struct bdisp_node); | 452 | unsigned int i, node_size = sizeof(struct bdisp_node); |
456 | void *base; | 453 | void *base; |
457 | dma_addr_t paddr; | 454 | dma_addr_t paddr; |
458 | DEFINE_DMA_ATTRS(attrs); | ||
459 | 455 | ||
460 | /* Allocate all the nodes within a single memory page */ | 456 | /* Allocate all the nodes within a single memory page */ |
461 | dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); | ||
462 | base = dma_alloc_attrs(dev, node_size * MAX_NB_NODE, &paddr, | 457 | base = dma_alloc_attrs(dev, node_size * MAX_NB_NODE, &paddr, |
463 | GFP_KERNEL | GFP_DMA, &attrs); | 458 | GFP_KERNEL | GFP_DMA, DMA_ATTR_WRITE_COMBINE); |
464 | if (!base) { | 459 | if (!base) { |
465 | dev_err(dev, "%s no mem\n", __func__); | 460 | dev_err(dev, "%s no mem\n", __func__); |
466 | return -ENOMEM; | 461 | return -ENOMEM; |
@@ -493,13 +488,9 @@ void bdisp_hw_free_filters(struct device *dev) | |||
493 | { | 488 | { |
494 | int size = (BDISP_HF_NB * NB_H_FILTER) + (BDISP_VF_NB * NB_V_FILTER); | 489 | int size = (BDISP_HF_NB * NB_H_FILTER) + (BDISP_VF_NB * NB_V_FILTER); |
495 | 490 | ||
496 | if (bdisp_h_filter[0].virt) { | 491 | if (bdisp_h_filter[0].virt) |
497 | DEFINE_DMA_ATTRS(attrs); | ||
498 | |||
499 | dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); | ||
500 | dma_free_attrs(dev, size, bdisp_h_filter[0].virt, | 492 | dma_free_attrs(dev, size, bdisp_h_filter[0].virt, |
501 | bdisp_h_filter[0].paddr, &attrs); | 493 | bdisp_h_filter[0].paddr, DMA_ATTR_WRITE_COMBINE); |
502 | } | ||
503 | } | 494 | } |
504 | 495 | ||
505 | /** | 496 | /** |
@@ -516,12 +507,11 @@ int bdisp_hw_alloc_filters(struct device *dev) | |||
516 | unsigned int i, size; | 507 | unsigned int i, size; |
517 | void *base; | 508 | void *base; |
518 | dma_addr_t paddr; | 509 | dma_addr_t paddr; |
519 | DEFINE_DMA_ATTRS(attrs); | ||
520 | 510 | ||
521 | /* Allocate all the filters within a single memory page */ | 511 | /* Allocate all the filters within a single memory page */ |
522 | size = (BDISP_HF_NB * NB_H_FILTER) + (BDISP_VF_NB * NB_V_FILTER); | 512 | size = (BDISP_HF_NB * NB_H_FILTER) + (BDISP_VF_NB * NB_V_FILTER); |
523 | dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); | 513 | base = dma_alloc_attrs(dev, size, &paddr, GFP_KERNEL | GFP_DMA, |
524 | base = dma_alloc_attrs(dev, size, &paddr, GFP_KERNEL | GFP_DMA, &attrs); | 514 | DMA_ATTR_WRITE_COMBINE); |
525 | if (!base) | 515 | if (!base) |
526 | return -ENOMEM; | 516 | return -ENOMEM; |
527 | 517 | ||
diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c index 863f658a3fa1..b09b2c9b6b63 100644 --- a/drivers/media/v4l2-core/videobuf2-dma-contig.c +++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c | |||
@@ -27,7 +27,7 @@ struct vb2_dc_buf { | |||
27 | unsigned long size; | 27 | unsigned long size; |
28 | void *cookie; | 28 | void *cookie; |
29 | dma_addr_t dma_addr; | 29 | dma_addr_t dma_addr; |
30 | struct dma_attrs attrs; | 30 | unsigned long attrs; |
31 | enum dma_data_direction dma_dir; | 31 | enum dma_data_direction dma_dir; |
32 | struct sg_table *dma_sgt; | 32 | struct sg_table *dma_sgt; |
33 | struct frame_vector *vec; | 33 | struct frame_vector *vec; |
@@ -130,12 +130,12 @@ static void vb2_dc_put(void *buf_priv) | |||
130 | kfree(buf->sgt_base); | 130 | kfree(buf->sgt_base); |
131 | } | 131 | } |
132 | dma_free_attrs(buf->dev, buf->size, buf->cookie, buf->dma_addr, | 132 | dma_free_attrs(buf->dev, buf->size, buf->cookie, buf->dma_addr, |
133 | &buf->attrs); | 133 | buf->attrs); |
134 | put_device(buf->dev); | 134 | put_device(buf->dev); |
135 | kfree(buf); | 135 | kfree(buf); |
136 | } | 136 | } |
137 | 137 | ||
138 | static void *vb2_dc_alloc(struct device *dev, const struct dma_attrs *attrs, | 138 | static void *vb2_dc_alloc(struct device *dev, unsigned long attrs, |
139 | unsigned long size, enum dma_data_direction dma_dir, | 139 | unsigned long size, enum dma_data_direction dma_dir, |
140 | gfp_t gfp_flags) | 140 | gfp_t gfp_flags) |
141 | { | 141 | { |
@@ -146,16 +146,16 @@ static void *vb2_dc_alloc(struct device *dev, const struct dma_attrs *attrs, | |||
146 | return ERR_PTR(-ENOMEM); | 146 | return ERR_PTR(-ENOMEM); |
147 | 147 | ||
148 | if (attrs) | 148 | if (attrs) |
149 | buf->attrs = *attrs; | 149 | buf->attrs = attrs; |
150 | buf->cookie = dma_alloc_attrs(dev, size, &buf->dma_addr, | 150 | buf->cookie = dma_alloc_attrs(dev, size, &buf->dma_addr, |
151 | GFP_KERNEL | gfp_flags, &buf->attrs); | 151 | GFP_KERNEL | gfp_flags, buf->attrs); |
152 | if (!buf->cookie) { | 152 | if (!buf->cookie) { |
153 | dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size); | 153 | dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size); |
154 | kfree(buf); | 154 | kfree(buf); |
155 | return ERR_PTR(-ENOMEM); | 155 | return ERR_PTR(-ENOMEM); |
156 | } | 156 | } |
157 | 157 | ||
158 | if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->attrs)) | 158 | if ((buf->attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0) |
159 | buf->vaddr = buf->cookie; | 159 | buf->vaddr = buf->cookie; |
160 | 160 | ||
161 | /* Prevent the device from being released while the buffer is used */ | 161 | /* Prevent the device from being released while the buffer is used */ |
@@ -189,7 +189,7 @@ static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma) | |||
189 | vma->vm_pgoff = 0; | 189 | vma->vm_pgoff = 0; |
190 | 190 | ||
191 | ret = dma_mmap_attrs(buf->dev, vma, buf->cookie, | 191 | ret = dma_mmap_attrs(buf->dev, vma, buf->cookie, |
192 | buf->dma_addr, buf->size, &buf->attrs); | 192 | buf->dma_addr, buf->size, buf->attrs); |
193 | 193 | ||
194 | if (ret) { | 194 | if (ret) { |
195 | pr_err("Remapping memory failed, error: %d\n", ret); | 195 | pr_err("Remapping memory failed, error: %d\n", ret); |
@@ -372,7 +372,7 @@ static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf) | |||
372 | } | 372 | } |
373 | 373 | ||
374 | ret = dma_get_sgtable_attrs(buf->dev, sgt, buf->cookie, buf->dma_addr, | 374 | ret = dma_get_sgtable_attrs(buf->dev, sgt, buf->cookie, buf->dma_addr, |
375 | buf->size, &buf->attrs); | 375 | buf->size, buf->attrs); |
376 | if (ret < 0) { | 376 | if (ret < 0) { |
377 | dev_err(buf->dev, "failed to get scatterlist from DMA API\n"); | 377 | dev_err(buf->dev, "failed to get scatterlist from DMA API\n"); |
378 | kfree(sgt); | 378 | kfree(sgt); |
@@ -421,15 +421,12 @@ static void vb2_dc_put_userptr(void *buf_priv) | |||
421 | struct page **pages; | 421 | struct page **pages; |
422 | 422 | ||
423 | if (sgt) { | 423 | if (sgt) { |
424 | DEFINE_DMA_ATTRS(attrs); | ||
425 | |||
426 | dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); | ||
427 | /* | 424 | /* |
428 | * No need to sync to CPU, it's already synced to the CPU | 425 | * No need to sync to CPU, it's already synced to the CPU |
429 | * since the finish() memop will have been called before this. | 426 | * since the finish() memop will have been called before this. |
430 | */ | 427 | */ |
431 | dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, | 428 | dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, |
432 | buf->dma_dir, &attrs); | 429 | buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC); |
433 | pages = frame_vector_pages(buf->vec); | 430 | pages = frame_vector_pages(buf->vec); |
434 | /* sgt should exist only if vector contains pages... */ | 431 | /* sgt should exist only if vector contains pages... */ |
435 | BUG_ON(IS_ERR(pages)); | 432 | BUG_ON(IS_ERR(pages)); |
@@ -484,9 +481,6 @@ static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr, | |||
484 | struct sg_table *sgt; | 481 | struct sg_table *sgt; |
485 | unsigned long contig_size; | 482 | unsigned long contig_size; |
486 | unsigned long dma_align = dma_get_cache_alignment(); | 483 | unsigned long dma_align = dma_get_cache_alignment(); |
487 | DEFINE_DMA_ATTRS(attrs); | ||
488 | |||
489 | dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); | ||
490 | 484 | ||
491 | /* Only cache aligned DMA transfers are reliable */ | 485 | /* Only cache aligned DMA transfers are reliable */ |
492 | if (!IS_ALIGNED(vaddr | size, dma_align)) { | 486 | if (!IS_ALIGNED(vaddr | size, dma_align)) { |
@@ -548,7 +542,7 @@ static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr, | |||
548 | * prepare() memop is called. | 542 | * prepare() memop is called. |
549 | */ | 543 | */ |
550 | sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, | 544 | sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, |
551 | buf->dma_dir, &attrs); | 545 | buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC); |
552 | if (sgt->nents <= 0) { | 546 | if (sgt->nents <= 0) { |
553 | pr_err("failed to map scatterlist\n"); | 547 | pr_err("failed to map scatterlist\n"); |
554 | ret = -EIO; | 548 | ret = -EIO; |
@@ -572,7 +566,7 @@ out: | |||
572 | 566 | ||
573 | fail_map_sg: | 567 | fail_map_sg: |
574 | dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, | 568 | dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, |
575 | buf->dma_dir, &attrs); | 569 | buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC); |
576 | 570 | ||
577 | fail_sgt_init: | 571 | fail_sgt_init: |
578 | sg_free_table(sgt); | 572 | sg_free_table(sgt); |
diff --git a/drivers/media/v4l2-core/videobuf2-dma-sg.c b/drivers/media/v4l2-core/videobuf2-dma-sg.c index a39db8a6db7a..bd82d709ee82 100644 --- a/drivers/media/v4l2-core/videobuf2-dma-sg.c +++ b/drivers/media/v4l2-core/videobuf2-dma-sg.c | |||
@@ -95,7 +95,7 @@ static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf, | |||
95 | return 0; | 95 | return 0; |
96 | } | 96 | } |
97 | 97 | ||
98 | static void *vb2_dma_sg_alloc(struct device *dev, const struct dma_attrs *dma_attrs, | 98 | static void *vb2_dma_sg_alloc(struct device *dev, unsigned long dma_attrs, |
99 | unsigned long size, enum dma_data_direction dma_dir, | 99 | unsigned long size, enum dma_data_direction dma_dir, |
100 | gfp_t gfp_flags) | 100 | gfp_t gfp_flags) |
101 | { | 101 | { |
@@ -103,9 +103,6 @@ static void *vb2_dma_sg_alloc(struct device *dev, const struct dma_attrs *dma_at | |||
103 | struct sg_table *sgt; | 103 | struct sg_table *sgt; |
104 | int ret; | 104 | int ret; |
105 | int num_pages; | 105 | int num_pages; |
106 | DEFINE_DMA_ATTRS(attrs); | ||
107 | |||
108 | dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); | ||
109 | 106 | ||
110 | if (WARN_ON(dev == NULL)) | 107 | if (WARN_ON(dev == NULL)) |
111 | return NULL; | 108 | return NULL; |
@@ -144,7 +141,7 @@ static void *vb2_dma_sg_alloc(struct device *dev, const struct dma_attrs *dma_at | |||
144 | * prepare() memop is called. | 141 | * prepare() memop is called. |
145 | */ | 142 | */ |
146 | sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, | 143 | sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, |
147 | buf->dma_dir, &attrs); | 144 | buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC); |
148 | if (!sgt->nents) | 145 | if (!sgt->nents) |
149 | goto fail_map; | 146 | goto fail_map; |
150 | 147 | ||
@@ -179,13 +176,10 @@ static void vb2_dma_sg_put(void *buf_priv) | |||
179 | int i = buf->num_pages; | 176 | int i = buf->num_pages; |
180 | 177 | ||
181 | if (atomic_dec_and_test(&buf->refcount)) { | 178 | if (atomic_dec_and_test(&buf->refcount)) { |
182 | DEFINE_DMA_ATTRS(attrs); | ||
183 | |||
184 | dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); | ||
185 | dprintk(1, "%s: Freeing buffer of %d pages\n", __func__, | 179 | dprintk(1, "%s: Freeing buffer of %d pages\n", __func__, |
186 | buf->num_pages); | 180 | buf->num_pages); |
187 | dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, | 181 | dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, |
188 | buf->dma_dir, &attrs); | 182 | buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC); |
189 | if (buf->vaddr) | 183 | if (buf->vaddr) |
190 | vm_unmap_ram(buf->vaddr, buf->num_pages); | 184 | vm_unmap_ram(buf->vaddr, buf->num_pages); |
191 | sg_free_table(buf->dma_sgt); | 185 | sg_free_table(buf->dma_sgt); |
@@ -228,10 +222,8 @@ static void *vb2_dma_sg_get_userptr(struct device *dev, unsigned long vaddr, | |||
228 | { | 222 | { |
229 | struct vb2_dma_sg_buf *buf; | 223 | struct vb2_dma_sg_buf *buf; |
230 | struct sg_table *sgt; | 224 | struct sg_table *sgt; |
231 | DEFINE_DMA_ATTRS(attrs); | ||
232 | struct frame_vector *vec; | 225 | struct frame_vector *vec; |
233 | 226 | ||
234 | dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); | ||
235 | buf = kzalloc(sizeof *buf, GFP_KERNEL); | 227 | buf = kzalloc(sizeof *buf, GFP_KERNEL); |
236 | if (!buf) | 228 | if (!buf) |
237 | return NULL; | 229 | return NULL; |
@@ -262,7 +254,7 @@ static void *vb2_dma_sg_get_userptr(struct device *dev, unsigned long vaddr, | |||
262 | * prepare() memop is called. | 254 | * prepare() memop is called. |
263 | */ | 255 | */ |
264 | sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, | 256 | sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, |
265 | buf->dma_dir, &attrs); | 257 | buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC); |
266 | if (!sgt->nents) | 258 | if (!sgt->nents) |
267 | goto userptr_fail_map; | 259 | goto userptr_fail_map; |
268 | 260 | ||
@@ -286,14 +278,11 @@ static void vb2_dma_sg_put_userptr(void *buf_priv) | |||
286 | struct vb2_dma_sg_buf *buf = buf_priv; | 278 | struct vb2_dma_sg_buf *buf = buf_priv; |
287 | struct sg_table *sgt = &buf->sg_table; | 279 | struct sg_table *sgt = &buf->sg_table; |
288 | int i = buf->num_pages; | 280 | int i = buf->num_pages; |
289 | DEFINE_DMA_ATTRS(attrs); | ||
290 | |||
291 | dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); | ||
292 | 281 | ||
293 | dprintk(1, "%s: Releasing userspace buffer of %d pages\n", | 282 | dprintk(1, "%s: Releasing userspace buffer of %d pages\n", |
294 | __func__, buf->num_pages); | 283 | __func__, buf->num_pages); |
295 | dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir, | 284 | dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir, |
296 | &attrs); | 285 | DMA_ATTR_SKIP_CPU_SYNC); |
297 | if (buf->vaddr) | 286 | if (buf->vaddr) |
298 | vm_unmap_ram(buf->vaddr, buf->num_pages); | 287 | vm_unmap_ram(buf->vaddr, buf->num_pages); |
299 | sg_free_table(buf->dma_sgt); | 288 | sg_free_table(buf->dma_sgt); |
diff --git a/drivers/media/v4l2-core/videobuf2-vmalloc.c b/drivers/media/v4l2-core/videobuf2-vmalloc.c index 7e8a07ed8d82..c2820a6e164d 100644 --- a/drivers/media/v4l2-core/videobuf2-vmalloc.c +++ b/drivers/media/v4l2-core/videobuf2-vmalloc.c | |||
@@ -33,7 +33,7 @@ struct vb2_vmalloc_buf { | |||
33 | 33 | ||
34 | static void vb2_vmalloc_put(void *buf_priv); | 34 | static void vb2_vmalloc_put(void *buf_priv); |
35 | 35 | ||
36 | static void *vb2_vmalloc_alloc(struct device *dev, const struct dma_attrs *attrs, | 36 | static void *vb2_vmalloc_alloc(struct device *dev, unsigned long attrs, |
37 | unsigned long size, enum dma_data_direction dma_dir, | 37 | unsigned long size, enum dma_data_direction dma_dir, |
38 | gfp_t gfp_flags) | 38 | gfp_t gfp_flags) |
39 | { | 39 | { |
diff --git a/drivers/misc/mic/host/mic_boot.c b/drivers/misc/mic/host/mic_boot.c index e047efd83f57..9599d732aff3 100644 --- a/drivers/misc/mic/host/mic_boot.c +++ b/drivers/misc/mic/host/mic_boot.c | |||
@@ -38,7 +38,7 @@ static inline struct mic_device *vpdev_to_mdev(struct device *dev) | |||
38 | static dma_addr_t | 38 | static dma_addr_t |
39 | _mic_dma_map_page(struct device *dev, struct page *page, | 39 | _mic_dma_map_page(struct device *dev, struct page *page, |
40 | unsigned long offset, size_t size, | 40 | unsigned long offset, size_t size, |
41 | enum dma_data_direction dir, struct dma_attrs *attrs) | 41 | enum dma_data_direction dir, unsigned long attrs) |
42 | { | 42 | { |
43 | void *va = phys_to_virt(page_to_phys(page)) + offset; | 43 | void *va = phys_to_virt(page_to_phys(page)) + offset; |
44 | struct mic_device *mdev = vpdev_to_mdev(dev); | 44 | struct mic_device *mdev = vpdev_to_mdev(dev); |
@@ -48,7 +48,7 @@ _mic_dma_map_page(struct device *dev, struct page *page, | |||
48 | 48 | ||
49 | static void _mic_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, | 49 | static void _mic_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, |
50 | size_t size, enum dma_data_direction dir, | 50 | size_t size, enum dma_data_direction dir, |
51 | struct dma_attrs *attrs) | 51 | unsigned long attrs) |
52 | { | 52 | { |
53 | struct mic_device *mdev = vpdev_to_mdev(dev); | 53 | struct mic_device *mdev = vpdev_to_mdev(dev); |
54 | 54 | ||
@@ -144,7 +144,7 @@ static inline struct mic_device *scdev_to_mdev(struct scif_hw_dev *scdev) | |||
144 | 144 | ||
145 | static void *__mic_dma_alloc(struct device *dev, size_t size, | 145 | static void *__mic_dma_alloc(struct device *dev, size_t size, |
146 | dma_addr_t *dma_handle, gfp_t gfp, | 146 | dma_addr_t *dma_handle, gfp_t gfp, |
147 | struct dma_attrs *attrs) | 147 | unsigned long attrs) |
148 | { | 148 | { |
149 | struct scif_hw_dev *scdev = dev_get_drvdata(dev); | 149 | struct scif_hw_dev *scdev = dev_get_drvdata(dev); |
150 | struct mic_device *mdev = scdev_to_mdev(scdev); | 150 | struct mic_device *mdev = scdev_to_mdev(scdev); |
@@ -164,7 +164,7 @@ static void *__mic_dma_alloc(struct device *dev, size_t size, | |||
164 | } | 164 | } |
165 | 165 | ||
166 | static void __mic_dma_free(struct device *dev, size_t size, void *vaddr, | 166 | static void __mic_dma_free(struct device *dev, size_t size, void *vaddr, |
167 | dma_addr_t dma_handle, struct dma_attrs *attrs) | 167 | dma_addr_t dma_handle, unsigned long attrs) |
168 | { | 168 | { |
169 | struct scif_hw_dev *scdev = dev_get_drvdata(dev); | 169 | struct scif_hw_dev *scdev = dev_get_drvdata(dev); |
170 | struct mic_device *mdev = scdev_to_mdev(scdev); | 170 | struct mic_device *mdev = scdev_to_mdev(scdev); |
@@ -176,7 +176,7 @@ static void __mic_dma_free(struct device *dev, size_t size, void *vaddr, | |||
176 | static dma_addr_t | 176 | static dma_addr_t |
177 | __mic_dma_map_page(struct device *dev, struct page *page, unsigned long offset, | 177 | __mic_dma_map_page(struct device *dev, struct page *page, unsigned long offset, |
178 | size_t size, enum dma_data_direction dir, | 178 | size_t size, enum dma_data_direction dir, |
179 | struct dma_attrs *attrs) | 179 | unsigned long attrs) |
180 | { | 180 | { |
181 | void *va = phys_to_virt(page_to_phys(page)) + offset; | 181 | void *va = phys_to_virt(page_to_phys(page)) + offset; |
182 | struct scif_hw_dev *scdev = dev_get_drvdata(dev); | 182 | struct scif_hw_dev *scdev = dev_get_drvdata(dev); |
@@ -188,7 +188,7 @@ __mic_dma_map_page(struct device *dev, struct page *page, unsigned long offset, | |||
188 | static void | 188 | static void |
189 | __mic_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, | 189 | __mic_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, |
190 | size_t size, enum dma_data_direction dir, | 190 | size_t size, enum dma_data_direction dir, |
191 | struct dma_attrs *attrs) | 191 | unsigned long attrs) |
192 | { | 192 | { |
193 | struct scif_hw_dev *scdev = dev_get_drvdata(dev); | 193 | struct scif_hw_dev *scdev = dev_get_drvdata(dev); |
194 | struct mic_device *mdev = scdev_to_mdev(scdev); | 194 | struct mic_device *mdev = scdev_to_mdev(scdev); |
@@ -198,7 +198,7 @@ __mic_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, | |||
198 | 198 | ||
199 | static int __mic_dma_map_sg(struct device *dev, struct scatterlist *sg, | 199 | static int __mic_dma_map_sg(struct device *dev, struct scatterlist *sg, |
200 | int nents, enum dma_data_direction dir, | 200 | int nents, enum dma_data_direction dir, |
201 | struct dma_attrs *attrs) | 201 | unsigned long attrs) |
202 | { | 202 | { |
203 | struct scif_hw_dev *scdev = dev_get_drvdata(dev); | 203 | struct scif_hw_dev *scdev = dev_get_drvdata(dev); |
204 | struct mic_device *mdev = scdev_to_mdev(scdev); | 204 | struct mic_device *mdev = scdev_to_mdev(scdev); |
@@ -229,7 +229,7 @@ err: | |||
229 | static void __mic_dma_unmap_sg(struct device *dev, | 229 | static void __mic_dma_unmap_sg(struct device *dev, |
230 | struct scatterlist *sg, int nents, | 230 | struct scatterlist *sg, int nents, |
231 | enum dma_data_direction dir, | 231 | enum dma_data_direction dir, |
232 | struct dma_attrs *attrs) | 232 | unsigned long attrs) |
233 | { | 233 | { |
234 | struct scif_hw_dev *scdev = dev_get_drvdata(dev); | 234 | struct scif_hw_dev *scdev = dev_get_drvdata(dev); |
235 | struct mic_device *mdev = scdev_to_mdev(scdev); | 235 | struct mic_device *mdev = scdev_to_mdev(scdev); |
@@ -327,7 +327,7 @@ static inline struct mic_device *mbdev_to_mdev(struct mbus_device *mbdev) | |||
327 | static dma_addr_t | 327 | static dma_addr_t |
328 | mic_dma_map_page(struct device *dev, struct page *page, | 328 | mic_dma_map_page(struct device *dev, struct page *page, |
329 | unsigned long offset, size_t size, enum dma_data_direction dir, | 329 | unsigned long offset, size_t size, enum dma_data_direction dir, |
330 | struct dma_attrs *attrs) | 330 | unsigned long attrs) |
331 | { | 331 | { |
332 | void *va = phys_to_virt(page_to_phys(page)) + offset; | 332 | void *va = phys_to_virt(page_to_phys(page)) + offset; |
333 | struct mic_device *mdev = dev_get_drvdata(dev->parent); | 333 | struct mic_device *mdev = dev_get_drvdata(dev->parent); |
@@ -338,7 +338,7 @@ mic_dma_map_page(struct device *dev, struct page *page, | |||
338 | static void | 338 | static void |
339 | mic_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, | 339 | mic_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, |
340 | size_t size, enum dma_data_direction dir, | 340 | size_t size, enum dma_data_direction dir, |
341 | struct dma_attrs *attrs) | 341 | unsigned long attrs) |
342 | { | 342 | { |
343 | struct mic_device *mdev = dev_get_drvdata(dev->parent); | 343 | struct mic_device *mdev = dev_get_drvdata(dev->parent); |
344 | mic_unmap_single(mdev, dma_addr, size); | 344 | mic_unmap_single(mdev, dma_addr, size); |
diff --git a/drivers/mtd/bcm47xxpart.c b/drivers/mtd/bcm47xxpart.c index 845dd27d9f41..377947580203 100644 --- a/drivers/mtd/bcm47xxpart.c +++ b/drivers/mtd/bcm47xxpart.c | |||
@@ -122,7 +122,7 @@ static int bcm47xxpart_parse(struct mtd_info *master, | |||
122 | for (offset = 0; offset <= master->size - blocksize; | 122 | for (offset = 0; offset <= master->size - blocksize; |
123 | offset += blocksize) { | 123 | offset += blocksize) { |
124 | /* Nothing more in higher memory on BCM47XX (MIPS) */ | 124 | /* Nothing more in higher memory on BCM47XX (MIPS) */ |
125 | if (config_enabled(CONFIG_BCM47XX) && offset >= 0x2000000) | 125 | if (IS_ENABLED(CONFIG_BCM47XX) && offset >= 0x2000000) |
126 | break; | 126 | break; |
127 | 127 | ||
128 | if (curr_part >= BCM47XXPART_MAX_PARTS) { | 128 | if (curr_part >= BCM47XXPART_MAX_PARTS) { |
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index 355e1ae665f9..8f0fd41dfd4b 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c | |||
@@ -139,11 +139,11 @@ void ath10k_debug_print_hwfw_info(struct ath10k *ar) | |||
139 | ar->id.subsystem_vendor, ar->id.subsystem_device); | 139 | ar->id.subsystem_vendor, ar->id.subsystem_device); |
140 | 140 | ||
141 | ath10k_info(ar, "kconfig debug %d debugfs %d tracing %d dfs %d testmode %d\n", | 141 | ath10k_info(ar, "kconfig debug %d debugfs %d tracing %d dfs %d testmode %d\n", |
142 | config_enabled(CONFIG_ATH10K_DEBUG), | 142 | IS_ENABLED(CONFIG_ATH10K_DEBUG), |
143 | config_enabled(CONFIG_ATH10K_DEBUGFS), | 143 | IS_ENABLED(CONFIG_ATH10K_DEBUGFS), |
144 | config_enabled(CONFIG_ATH10K_TRACING), | 144 | IS_ENABLED(CONFIG_ATH10K_TRACING), |
145 | config_enabled(CONFIG_ATH10K_DFS_CERTIFIED), | 145 | IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED), |
146 | config_enabled(CONFIG_NL80211_TESTMODE)); | 146 | IS_ENABLED(CONFIG_NL80211_TESTMODE)); |
147 | 147 | ||
148 | firmware = ar->normal_mode_fw.fw_file.firmware; | 148 | firmware = ar->normal_mode_fw.fw_file.firmware; |
149 | if (firmware) | 149 | if (firmware) |
@@ -2424,7 +2424,7 @@ int ath10k_debug_register(struct ath10k *ar) | |||
2424 | debugfs_create_file("nf_cal_period", S_IRUSR | S_IWUSR, | 2424 | debugfs_create_file("nf_cal_period", S_IRUSR | S_IWUSR, |
2425 | ar->debug.debugfs_phy, ar, &fops_nf_cal_period); | 2425 | ar->debug.debugfs_phy, ar, &fops_nf_cal_period); |
2426 | 2426 | ||
2427 | if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) { | 2427 | if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED)) { |
2428 | debugfs_create_file("dfs_simulate_radar", S_IWUSR, | 2428 | debugfs_create_file("dfs_simulate_radar", S_IWUSR, |
2429 | ar->debug.debugfs_phy, ar, | 2429 | ar->debug.debugfs_phy, ar, |
2430 | &fops_simulate_radar); | 2430 | &fops_simulate_radar); |
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index fb8e38df9446..0bbd0a00edcc 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c | |||
@@ -3039,7 +3039,7 @@ static void ath10k_regd_update(struct ath10k *ar) | |||
3039 | 3039 | ||
3040 | regpair = ar->ath_common.regulatory.regpair; | 3040 | regpair = ar->ath_common.regulatory.regpair; |
3041 | 3041 | ||
3042 | if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) { | 3042 | if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) { |
3043 | nl_dfs_reg = ar->dfs_detector->region; | 3043 | nl_dfs_reg = ar->dfs_detector->region; |
3044 | wmi_dfs_reg = ath10k_mac_get_dfs_region(nl_dfs_reg); | 3044 | wmi_dfs_reg = ath10k_mac_get_dfs_region(nl_dfs_reg); |
3045 | } else { | 3045 | } else { |
@@ -3068,7 +3068,7 @@ static void ath10k_reg_notifier(struct wiphy *wiphy, | |||
3068 | 3068 | ||
3069 | ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory); | 3069 | ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory); |
3070 | 3070 | ||
3071 | if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) { | 3071 | if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) { |
3072 | ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs region 0x%x\n", | 3072 | ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs region 0x%x\n", |
3073 | request->dfs_region); | 3073 | request->dfs_region); |
3074 | result = ar->dfs_detector->set_dfs_domain(ar->dfs_detector, | 3074 | result = ar->dfs_detector->set_dfs_domain(ar->dfs_detector, |
@@ -7955,7 +7955,7 @@ int ath10k_mac_register(struct ath10k *ar) | |||
7955 | if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) | 7955 | if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) |
7956 | ar->hw->netdev_features = NETIF_F_HW_CSUM; | 7956 | ar->hw->netdev_features = NETIF_F_HW_CSUM; |
7957 | 7957 | ||
7958 | if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) { | 7958 | if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED)) { |
7959 | /* Init ath dfs pattern detector */ | 7959 | /* Init ath dfs pattern detector */ |
7960 | ar->ath_common.debug_mask = ATH_DBG_DFS; | 7960 | ar->ath_common.debug_mask = ATH_DBG_DFS; |
7961 | ar->dfs_detector = dfs_pattern_detector_init(&ar->ath_common, | 7961 | ar->dfs_detector = dfs_pattern_detector_init(&ar->ath_common, |
@@ -8003,7 +8003,7 @@ err_unregister: | |||
8003 | ieee80211_unregister_hw(ar->hw); | 8003 | ieee80211_unregister_hw(ar->hw); |
8004 | 8004 | ||
8005 | err_dfs_detector_exit: | 8005 | err_dfs_detector_exit: |
8006 | if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) | 8006 | if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) |
8007 | ar->dfs_detector->exit(ar->dfs_detector); | 8007 | ar->dfs_detector->exit(ar->dfs_detector); |
8008 | 8008 | ||
8009 | err_free: | 8009 | err_free: |
@@ -8018,7 +8018,7 @@ void ath10k_mac_unregister(struct ath10k *ar) | |||
8018 | { | 8018 | { |
8019 | ieee80211_unregister_hw(ar->hw); | 8019 | ieee80211_unregister_hw(ar->hw); |
8020 | 8020 | ||
8021 | if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) | 8021 | if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) |
8022 | ar->dfs_detector->exit(ar->dfs_detector); | 8022 | ar->dfs_detector->exit(ar->dfs_detector); |
8023 | 8023 | ||
8024 | kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels); | 8024 | kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels); |
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index 169cd2e783eb..d2462886b75c 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c | |||
@@ -3704,7 +3704,7 @@ void ath10k_wmi_event_dfs(struct ath10k *ar, | |||
3704 | phyerr->tsf_timestamp, tsf, buf_len); | 3704 | phyerr->tsf_timestamp, tsf, buf_len); |
3705 | 3705 | ||
3706 | /* Skip event if DFS disabled */ | 3706 | /* Skip event if DFS disabled */ |
3707 | if (!config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) | 3707 | if (!IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED)) |
3708 | return; | 3708 | return; |
3709 | 3709 | ||
3710 | ATH10K_DFS_STAT_INC(ar, pulses_total); | 3710 | ATH10K_DFS_STAT_INC(ar, pulses_total); |
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c index 4ad6284fc37d..72e2ec67768d 100644 --- a/drivers/net/wireless/ath/ath6kl/cfg80211.c +++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c | |||
@@ -3881,7 +3881,7 @@ int ath6kl_cfg80211_init(struct ath6kl *ar) | |||
3881 | BIT(NL80211_IFTYPE_P2P_CLIENT); | 3881 | BIT(NL80211_IFTYPE_P2P_CLIENT); |
3882 | } | 3882 | } |
3883 | 3883 | ||
3884 | if (config_enabled(CONFIG_ATH6KL_REGDOMAIN) && | 3884 | if (IS_ENABLED(CONFIG_ATH6KL_REGDOMAIN) && |
3885 | test_bit(ATH6KL_FW_CAPABILITY_REGDOMAIN, ar->fw_capabilities)) { | 3885 | test_bit(ATH6KL_FW_CAPABILITY_REGDOMAIN, ar->fw_capabilities)) { |
3886 | wiphy->reg_notifier = ath6kl_cfg80211_reg_notify; | 3886 | wiphy->reg_notifier = ath6kl_cfg80211_reg_notify; |
3887 | ar->wiphy->features |= NL80211_FEATURE_CELL_BASE_REG_HINTS; | 3887 | ar->wiphy->features |= NL80211_FEATURE_CELL_BASE_REG_HINTS; |
diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.c b/drivers/net/wireless/ath/ath9k/common-spectral.c index a8762711ad74..e2512d5bc0e1 100644 --- a/drivers/net/wireless/ath/ath9k/common-spectral.c +++ b/drivers/net/wireless/ath/ath9k/common-spectral.c | |||
@@ -731,7 +731,7 @@ void ath9k_cmn_spectral_scan_trigger(struct ath_common *common, | |||
731 | struct ath_hw *ah = spec_priv->ah; | 731 | struct ath_hw *ah = spec_priv->ah; |
732 | u32 rxfilter; | 732 | u32 rxfilter; |
733 | 733 | ||
734 | if (config_enabled(CONFIG_ATH9K_TX99)) | 734 | if (IS_ENABLED(CONFIG_ATH9K_TX99)) |
735 | return; | 735 | return; |
736 | 736 | ||
737 | if (!ath9k_hw_ops(ah)->spectral_scan_trigger) { | 737 | if (!ath9k_hw_ops(ah)->spectral_scan_trigger) { |
@@ -806,7 +806,7 @@ static ssize_t write_file_spec_scan_ctl(struct file *file, | |||
806 | char buf[32]; | 806 | char buf[32]; |
807 | ssize_t len; | 807 | ssize_t len; |
808 | 808 | ||
809 | if (config_enabled(CONFIG_ATH9K_TX99)) | 809 | if (IS_ENABLED(CONFIG_ATH9K_TX99)) |
810 | return -EOPNOTSUPP; | 810 | return -EOPNOTSUPP; |
811 | 811 | ||
812 | len = min(count, sizeof(buf) - 1); | 812 | len = min(count, sizeof(buf) - 1); |
@@ -1072,7 +1072,7 @@ static struct rchan_callbacks rfs_spec_scan_cb = { | |||
1072 | 1072 | ||
1073 | void ath9k_cmn_spectral_deinit_debug(struct ath_spec_scan_priv *spec_priv) | 1073 | void ath9k_cmn_spectral_deinit_debug(struct ath_spec_scan_priv *spec_priv) |
1074 | { | 1074 | { |
1075 | if (config_enabled(CONFIG_ATH9K_DEBUGFS)) { | 1075 | if (IS_ENABLED(CONFIG_ATH9K_DEBUGFS)) { |
1076 | relay_close(spec_priv->rfs_chan_spec_scan); | 1076 | relay_close(spec_priv->rfs_chan_spec_scan); |
1077 | spec_priv->rfs_chan_spec_scan = NULL; | 1077 | spec_priv->rfs_chan_spec_scan = NULL; |
1078 | } | 1078 | } |
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index edc74fca60aa..cfa3fe82ade3 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c | |||
@@ -843,7 +843,7 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) | |||
843 | NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE | | 843 | NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE | |
844 | NL80211_FEATURE_P2P_GO_CTWIN; | 844 | NL80211_FEATURE_P2P_GO_CTWIN; |
845 | 845 | ||
846 | if (!config_enabled(CONFIG_ATH9K_TX99)) { | 846 | if (!IS_ENABLED(CONFIG_ATH9K_TX99)) { |
847 | hw->wiphy->interface_modes = | 847 | hw->wiphy->interface_modes = |
848 | BIT(NL80211_IFTYPE_P2P_GO) | | 848 | BIT(NL80211_IFTYPE_P2P_GO) | |
849 | BIT(NL80211_IFTYPE_P2P_CLIENT) | | 849 | BIT(NL80211_IFTYPE_P2P_CLIENT) | |
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 7594650f214f..a394622c9022 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c | |||
@@ -1250,7 +1250,7 @@ static int ath9k_add_interface(struct ieee80211_hw *hw, | |||
1250 | 1250 | ||
1251 | mutex_lock(&sc->mutex); | 1251 | mutex_lock(&sc->mutex); |
1252 | 1252 | ||
1253 | if (config_enabled(CONFIG_ATH9K_TX99)) { | 1253 | if (IS_ENABLED(CONFIG_ATH9K_TX99)) { |
1254 | if (sc->cur_chan->nvifs >= 1) { | 1254 | if (sc->cur_chan->nvifs >= 1) { |
1255 | mutex_unlock(&sc->mutex); | 1255 | mutex_unlock(&sc->mutex); |
1256 | return -EOPNOTSUPP; | 1256 | return -EOPNOTSUPP; |
@@ -1300,7 +1300,7 @@ static int ath9k_change_interface(struct ieee80211_hw *hw, | |||
1300 | 1300 | ||
1301 | mutex_lock(&sc->mutex); | 1301 | mutex_lock(&sc->mutex); |
1302 | 1302 | ||
1303 | if (config_enabled(CONFIG_ATH9K_TX99)) { | 1303 | if (IS_ENABLED(CONFIG_ATH9K_TX99)) { |
1304 | mutex_unlock(&sc->mutex); | 1304 | mutex_unlock(&sc->mutex); |
1305 | return -EOPNOTSUPP; | 1305 | return -EOPNOTSUPP; |
1306 | } | 1306 | } |
@@ -1360,7 +1360,7 @@ static void ath9k_enable_ps(struct ath_softc *sc) | |||
1360 | struct ath_hw *ah = sc->sc_ah; | 1360 | struct ath_hw *ah = sc->sc_ah; |
1361 | struct ath_common *common = ath9k_hw_common(ah); | 1361 | struct ath_common *common = ath9k_hw_common(ah); |
1362 | 1362 | ||
1363 | if (config_enabled(CONFIG_ATH9K_TX99)) | 1363 | if (IS_ENABLED(CONFIG_ATH9K_TX99)) |
1364 | return; | 1364 | return; |
1365 | 1365 | ||
1366 | sc->ps_enabled = true; | 1366 | sc->ps_enabled = true; |
@@ -1379,7 +1379,7 @@ static void ath9k_disable_ps(struct ath_softc *sc) | |||
1379 | struct ath_hw *ah = sc->sc_ah; | 1379 | struct ath_hw *ah = sc->sc_ah; |
1380 | struct ath_common *common = ath9k_hw_common(ah); | 1380 | struct ath_common *common = ath9k_hw_common(ah); |
1381 | 1381 | ||
1382 | if (config_enabled(CONFIG_ATH9K_TX99)) | 1382 | if (IS_ENABLED(CONFIG_ATH9K_TX99)) |
1383 | return; | 1383 | return; |
1384 | 1384 | ||
1385 | sc->ps_enabled = false; | 1385 | sc->ps_enabled = false; |
@@ -1953,7 +1953,7 @@ static int ath9k_get_survey(struct ieee80211_hw *hw, int idx, | |||
1953 | struct ieee80211_channel *chan; | 1953 | struct ieee80211_channel *chan; |
1954 | int pos; | 1954 | int pos; |
1955 | 1955 | ||
1956 | if (config_enabled(CONFIG_ATH9K_TX99)) | 1956 | if (IS_ENABLED(CONFIG_ATH9K_TX99)) |
1957 | return -EOPNOTSUPP; | 1957 | return -EOPNOTSUPP; |
1958 | 1958 | ||
1959 | spin_lock_bh(&common->cc_lock); | 1959 | spin_lock_bh(&common->cc_lock); |
@@ -2003,7 +2003,7 @@ static void ath9k_set_coverage_class(struct ieee80211_hw *hw, | |||
2003 | struct ath_softc *sc = hw->priv; | 2003 | struct ath_softc *sc = hw->priv; |
2004 | struct ath_hw *ah = sc->sc_ah; | 2004 | struct ath_hw *ah = sc->sc_ah; |
2005 | 2005 | ||
2006 | if (config_enabled(CONFIG_ATH9K_TX99)) | 2006 | if (IS_ENABLED(CONFIG_ATH9K_TX99)) |
2007 | return; | 2007 | return; |
2008 | 2008 | ||
2009 | mutex_lock(&sc->mutex); | 2009 | mutex_lock(&sc->mutex); |
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index 32160fca876a..669734252664 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c | |||
@@ -377,7 +377,7 @@ u32 ath_calcrxfilter(struct ath_softc *sc) | |||
377 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); | 377 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); |
378 | u32 rfilt; | 378 | u32 rfilt; |
379 | 379 | ||
380 | if (config_enabled(CONFIG_ATH9K_TX99)) | 380 | if (IS_ENABLED(CONFIG_ATH9K_TX99)) |
381 | return 0; | 381 | return 0; |
382 | 382 | ||
383 | rfilt = ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST | 383 | rfilt = ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST |
diff --git a/drivers/net/wireless/ath/dfs_pattern_detector.c b/drivers/net/wireless/ath/dfs_pattern_detector.c index 2303ef96299d..2f8136d50f78 100644 --- a/drivers/net/wireless/ath/dfs_pattern_detector.c +++ b/drivers/net/wireless/ath/dfs_pattern_detector.c | |||
@@ -352,7 +352,7 @@ dfs_pattern_detector_init(struct ath_common *common, | |||
352 | { | 352 | { |
353 | struct dfs_pattern_detector *dpd; | 353 | struct dfs_pattern_detector *dpd; |
354 | 354 | ||
355 | if (!config_enabled(CONFIG_CFG80211_CERTIFICATION_ONUS)) | 355 | if (!IS_ENABLED(CONFIG_CFG80211_CERTIFICATION_ONUS)) |
356 | return NULL; | 356 | return NULL; |
357 | 357 | ||
358 | dpd = kmalloc(sizeof(*dpd), GFP_KERNEL); | 358 | dpd = kmalloc(sizeof(*dpd), GFP_KERNEL); |
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c index 7e15ed9ed31f..f8506037736f 100644 --- a/drivers/net/wireless/ath/regd.c +++ b/drivers/net/wireless/ath/regd.c | |||
@@ -116,7 +116,7 @@ static const struct ieee80211_regdomain ath_world_regdom_67_68_6A_6C = { | |||
116 | 116 | ||
117 | static bool dynamic_country_user_possible(struct ath_regulatory *reg) | 117 | static bool dynamic_country_user_possible(struct ath_regulatory *reg) |
118 | { | 118 | { |
119 | if (config_enabled(CONFIG_ATH_REG_DYNAMIC_USER_CERT_TESTING)) | 119 | if (IS_ENABLED(CONFIG_ATH_REG_DYNAMIC_USER_CERT_TESTING)) |
120 | return true; | 120 | return true; |
121 | 121 | ||
122 | switch (reg->country_code) { | 122 | switch (reg->country_code) { |
@@ -188,7 +188,7 @@ static bool dynamic_country_user_possible(struct ath_regulatory *reg) | |||
188 | 188 | ||
189 | static bool ath_reg_dyn_country_user_allow(struct ath_regulatory *reg) | 189 | static bool ath_reg_dyn_country_user_allow(struct ath_regulatory *reg) |
190 | { | 190 | { |
191 | if (!config_enabled(CONFIG_ATH_REG_DYNAMIC_USER_REG_HINTS)) | 191 | if (!IS_ENABLED(CONFIG_ATH_REG_DYNAMIC_USER_REG_HINTS)) |
192 | return false; | 192 | return false; |
193 | if (!dynamic_country_user_possible(reg)) | 193 | if (!dynamic_country_user_possible(reg)) |
194 | return false; | 194 | return false; |
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c index e24b05996a1b..3ed6238f8f6e 100644 --- a/drivers/parisc/ccio-dma.c +++ b/drivers/parisc/ccio-dma.c | |||
@@ -790,7 +790,7 @@ ccio_map_single(struct device *dev, void *addr, size_t size, | |||
790 | static dma_addr_t | 790 | static dma_addr_t |
791 | ccio_map_page(struct device *dev, struct page *page, unsigned long offset, | 791 | ccio_map_page(struct device *dev, struct page *page, unsigned long offset, |
792 | size_t size, enum dma_data_direction direction, | 792 | size_t size, enum dma_data_direction direction, |
793 | struct dma_attrs *attrs) | 793 | unsigned long attrs) |
794 | { | 794 | { |
795 | return ccio_map_single(dev, page_address(page) + offset, size, | 795 | return ccio_map_single(dev, page_address(page) + offset, size, |
796 | direction); | 796 | direction); |
@@ -806,7 +806,7 @@ ccio_map_page(struct device *dev, struct page *page, unsigned long offset, | |||
806 | */ | 806 | */ |
807 | static void | 807 | static void |
808 | ccio_unmap_page(struct device *dev, dma_addr_t iova, size_t size, | 808 | ccio_unmap_page(struct device *dev, dma_addr_t iova, size_t size, |
809 | enum dma_data_direction direction, struct dma_attrs *attrs) | 809 | enum dma_data_direction direction, unsigned long attrs) |
810 | { | 810 | { |
811 | struct ioc *ioc; | 811 | struct ioc *ioc; |
812 | unsigned long flags; | 812 | unsigned long flags; |
@@ -844,7 +844,7 @@ ccio_unmap_page(struct device *dev, dma_addr_t iova, size_t size, | |||
844 | */ | 844 | */ |
845 | static void * | 845 | static void * |
846 | ccio_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, | 846 | ccio_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, |
847 | struct dma_attrs *attrs) | 847 | unsigned long attrs) |
848 | { | 848 | { |
849 | void *ret; | 849 | void *ret; |
850 | #if 0 | 850 | #if 0 |
@@ -878,9 +878,9 @@ ccio_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, | |||
878 | */ | 878 | */ |
879 | static void | 879 | static void |
880 | ccio_free(struct device *dev, size_t size, void *cpu_addr, | 880 | ccio_free(struct device *dev, size_t size, void *cpu_addr, |
881 | dma_addr_t dma_handle, struct dma_attrs *attrs) | 881 | dma_addr_t dma_handle, unsigned long attrs) |
882 | { | 882 | { |
883 | ccio_unmap_page(dev, dma_handle, size, 0, NULL); | 883 | ccio_unmap_page(dev, dma_handle, size, 0, 0); |
884 | free_pages((unsigned long)cpu_addr, get_order(size)); | 884 | free_pages((unsigned long)cpu_addr, get_order(size)); |
885 | } | 885 | } |
886 | 886 | ||
@@ -907,7 +907,7 @@ ccio_free(struct device *dev, size_t size, void *cpu_addr, | |||
907 | */ | 907 | */ |
908 | static int | 908 | static int |
909 | ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents, | 909 | ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents, |
910 | enum dma_data_direction direction, struct dma_attrs *attrs) | 910 | enum dma_data_direction direction, unsigned long attrs) |
911 | { | 911 | { |
912 | struct ioc *ioc; | 912 | struct ioc *ioc; |
913 | int coalesced, filled = 0; | 913 | int coalesced, filled = 0; |
@@ -984,7 +984,7 @@ ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents, | |||
984 | */ | 984 | */ |
985 | static void | 985 | static void |
986 | ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, | 986 | ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, |
987 | enum dma_data_direction direction, struct dma_attrs *attrs) | 987 | enum dma_data_direction direction, unsigned long attrs) |
988 | { | 988 | { |
989 | struct ioc *ioc; | 989 | struct ioc *ioc; |
990 | 990 | ||
@@ -1004,7 +1004,7 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, | |||
1004 | ioc->usg_pages += sg_dma_len(sglist) >> PAGE_SHIFT; | 1004 | ioc->usg_pages += sg_dma_len(sglist) >> PAGE_SHIFT; |
1005 | #endif | 1005 | #endif |
1006 | ccio_unmap_page(dev, sg_dma_address(sglist), | 1006 | ccio_unmap_page(dev, sg_dma_address(sglist), |
1007 | sg_dma_len(sglist), direction, NULL); | 1007 | sg_dma_len(sglist), direction, 0); |
1008 | ++sglist; | 1008 | ++sglist; |
1009 | } | 1009 | } |
1010 | 1010 | ||
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c index 42ec4600b7e4..151b86b6d2e2 100644 --- a/drivers/parisc/sba_iommu.c +++ b/drivers/parisc/sba_iommu.c | |||
@@ -783,7 +783,7 @@ sba_map_single(struct device *dev, void *addr, size_t size, | |||
783 | static dma_addr_t | 783 | static dma_addr_t |
784 | sba_map_page(struct device *dev, struct page *page, unsigned long offset, | 784 | sba_map_page(struct device *dev, struct page *page, unsigned long offset, |
785 | size_t size, enum dma_data_direction direction, | 785 | size_t size, enum dma_data_direction direction, |
786 | struct dma_attrs *attrs) | 786 | unsigned long attrs) |
787 | { | 787 | { |
788 | return sba_map_single(dev, page_address(page) + offset, size, | 788 | return sba_map_single(dev, page_address(page) + offset, size, |
789 | direction); | 789 | direction); |
@@ -801,7 +801,7 @@ sba_map_page(struct device *dev, struct page *page, unsigned long offset, | |||
801 | */ | 801 | */ |
802 | static void | 802 | static void |
803 | sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size, | 803 | sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size, |
804 | enum dma_data_direction direction, struct dma_attrs *attrs) | 804 | enum dma_data_direction direction, unsigned long attrs) |
805 | { | 805 | { |
806 | struct ioc *ioc; | 806 | struct ioc *ioc; |
807 | #if DELAYED_RESOURCE_CNT > 0 | 807 | #if DELAYED_RESOURCE_CNT > 0 |
@@ -876,7 +876,7 @@ sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size, | |||
876 | * See Documentation/DMA-API-HOWTO.txt | 876 | * See Documentation/DMA-API-HOWTO.txt |
877 | */ | 877 | */ |
878 | static void *sba_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle, | 878 | static void *sba_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle, |
879 | gfp_t gfp, struct dma_attrs *attrs) | 879 | gfp_t gfp, unsigned long attrs) |
880 | { | 880 | { |
881 | void *ret; | 881 | void *ret; |
882 | 882 | ||
@@ -908,9 +908,9 @@ static void *sba_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle | |||
908 | */ | 908 | */ |
909 | static void | 909 | static void |
910 | sba_free(struct device *hwdev, size_t size, void *vaddr, | 910 | sba_free(struct device *hwdev, size_t size, void *vaddr, |
911 | dma_addr_t dma_handle, struct dma_attrs *attrs) | 911 | dma_addr_t dma_handle, unsigned long attrs) |
912 | { | 912 | { |
913 | sba_unmap_page(hwdev, dma_handle, size, 0, NULL); | 913 | sba_unmap_page(hwdev, dma_handle, size, 0, 0); |
914 | free_pages((unsigned long) vaddr, get_order(size)); | 914 | free_pages((unsigned long) vaddr, get_order(size)); |
915 | } | 915 | } |
916 | 916 | ||
@@ -943,7 +943,7 @@ int dump_run_sg = 0; | |||
943 | */ | 943 | */ |
944 | static int | 944 | static int |
945 | sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, | 945 | sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, |
946 | enum dma_data_direction direction, struct dma_attrs *attrs) | 946 | enum dma_data_direction direction, unsigned long attrs) |
947 | { | 947 | { |
948 | struct ioc *ioc; | 948 | struct ioc *ioc; |
949 | int coalesced, filled = 0; | 949 | int coalesced, filled = 0; |
@@ -1026,7 +1026,7 @@ sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, | |||
1026 | */ | 1026 | */ |
1027 | static void | 1027 | static void |
1028 | sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, | 1028 | sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, |
1029 | enum dma_data_direction direction, struct dma_attrs *attrs) | 1029 | enum dma_data_direction direction, unsigned long attrs) |
1030 | { | 1030 | { |
1031 | struct ioc *ioc; | 1031 | struct ioc *ioc; |
1032 | #ifdef ASSERT_PDIR_SANITY | 1032 | #ifdef ASSERT_PDIR_SANITY |
@@ -1051,7 +1051,7 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, | |||
1051 | while (sg_dma_len(sglist) && nents--) { | 1051 | while (sg_dma_len(sglist) && nents--) { |
1052 | 1052 | ||
1053 | sba_unmap_page(dev, sg_dma_address(sglist), sg_dma_len(sglist), | 1053 | sba_unmap_page(dev, sg_dma_address(sglist), sg_dma_len(sglist), |
1054 | direction, NULL); | 1054 | direction, 0); |
1055 | #ifdef SBA_COLLECT_STATS | 1055 | #ifdef SBA_COLLECT_STATS |
1056 | ioc->usg_pages += ((sg_dma_address(sglist) & ~IOVP_MASK) + sg_dma_len(sglist) + IOVP_SIZE - 1) >> PAGE_SHIFT; | 1056 | ioc->usg_pages += ((sg_dma_address(sglist) & ~IOVP_MASK) + sg_dma_len(sglist) + IOVP_SIZE - 1) >> PAGE_SHIFT; |
1057 | ioc->usingle_calls--; /* kluge since call is unmap_sg() */ | 1057 | ioc->usingle_calls--; /* kluge since call is unmap_sg() */ |
diff --git a/drivers/pci/ecam.c b/drivers/pci/ecam.c index 66e0d718472f..43ed08dd8b01 100644 --- a/drivers/pci/ecam.c +++ b/drivers/pci/ecam.c | |||
@@ -27,7 +27,7 @@ | |||
27 | * since we have enough virtual address range available. On 32-bit, we | 27 | * since we have enough virtual address range available. On 32-bit, we |
28 | * ioremap the config space for each bus individually. | 28 | * ioremap the config space for each bus individually. |
29 | */ | 29 | */ |
30 | static const bool per_bus_mapping = !config_enabled(CONFIG_64BIT); | 30 | static const bool per_bus_mapping = !IS_ENABLED(CONFIG_64BIT); |
31 | 31 | ||
32 | /* | 32 | /* |
33 | * Create a PCI config space window | 33 | * Create a PCI config space window |
diff --git a/drivers/remoteproc/qcom_q6v5_pil.c b/drivers/remoteproc/qcom_q6v5_pil.c index 24791886219a..2a1b2c7d8f2c 100644 --- a/drivers/remoteproc/qcom_q6v5_pil.c +++ b/drivers/remoteproc/qcom_q6v5_pil.c | |||
@@ -349,13 +349,12 @@ static void q6v5proc_halt_axi_port(struct q6v5 *qproc, | |||
349 | 349 | ||
350 | static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw) | 350 | static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw) |
351 | { | 351 | { |
352 | DEFINE_DMA_ATTRS(attrs); | 352 | unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS; |
353 | dma_addr_t phys; | 353 | dma_addr_t phys; |
354 | void *ptr; | 354 | void *ptr; |
355 | int ret; | 355 | int ret; |
356 | 356 | ||
357 | dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &attrs); | 357 | ptr = dma_alloc_attrs(qproc->dev, fw->size, &phys, GFP_KERNEL, dma_attrs); |
358 | ptr = dma_alloc_attrs(qproc->dev, fw->size, &phys, GFP_KERNEL, &attrs); | ||
359 | if (!ptr) { | 358 | if (!ptr) { |
360 | dev_err(qproc->dev, "failed to allocate mdt buffer\n"); | 359 | dev_err(qproc->dev, "failed to allocate mdt buffer\n"); |
361 | return -ENOMEM; | 360 | return -ENOMEM; |
@@ -372,7 +371,7 @@ static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw) | |||
372 | else if (ret < 0) | 371 | else if (ret < 0) |
373 | dev_err(qproc->dev, "MPSS header authentication failed: %d\n", ret); | 372 | dev_err(qproc->dev, "MPSS header authentication failed: %d\n", ret); |
374 | 373 | ||
375 | dma_free_attrs(qproc->dev, fw->size, ptr, phys, &attrs); | 374 | dma_free_attrs(qproc->dev, fw->size, ptr, phys, dma_attrs); |
376 | 375 | ||
377 | return ret < 0 ? ret : 0; | 376 | return ret < 0 ? ret : 0; |
378 | } | 377 | } |
diff --git a/drivers/tty/serial/ar933x_uart.c b/drivers/tty/serial/ar933x_uart.c index 1519d2ca7705..73137f4aac20 100644 --- a/drivers/tty/serial/ar933x_uart.c +++ b/drivers/tty/serial/ar933x_uart.c | |||
@@ -54,7 +54,7 @@ struct ar933x_uart_port { | |||
54 | 54 | ||
55 | static inline bool ar933x_uart_console_enabled(void) | 55 | static inline bool ar933x_uart_console_enabled(void) |
56 | { | 56 | { |
57 | return config_enabled(CONFIG_SERIAL_AR933X_CONSOLE); | 57 | return IS_ENABLED(CONFIG_SERIAL_AR933X_CONSOLE); |
58 | } | 58 | } |
59 | 59 | ||
60 | static inline unsigned int ar933x_uart_read(struct ar933x_uart_port *up, | 60 | static inline unsigned int ar933x_uart_read(struct ar933x_uart_port *up, |
@@ -636,7 +636,7 @@ static int ar933x_uart_probe(struct platform_device *pdev) | |||
636 | int ret; | 636 | int ret; |
637 | 637 | ||
638 | np = pdev->dev.of_node; | 638 | np = pdev->dev.of_node; |
639 | if (config_enabled(CONFIG_OF) && np) { | 639 | if (IS_ENABLED(CONFIG_OF) && np) { |
640 | id = of_alias_get_id(np, "serial"); | 640 | id = of_alias_get_id(np, "serial"); |
641 | if (id < 0) { | 641 | if (id < 0) { |
642 | dev_err(&pdev->dev, "unable to get alias id, err=%d\n", | 642 | dev_err(&pdev->dev, "unable to get alias id, err=%d\n", |
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c index 2fb90cb6803f..1d7c012f09db 100644 --- a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c +++ b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c | |||
@@ -1332,7 +1332,7 @@ static void omapfb_free_fbmem(struct fb_info *fbi) | |||
1332 | } | 1332 | } |
1333 | 1333 | ||
1334 | dma_free_attrs(fbdev->dev, rg->size, rg->token, rg->dma_handle, | 1334 | dma_free_attrs(fbdev->dev, rg->size, rg->token, rg->dma_handle, |
1335 | &rg->attrs); | 1335 | rg->attrs); |
1336 | 1336 | ||
1337 | rg->token = NULL; | 1337 | rg->token = NULL; |
1338 | rg->vaddr = NULL; | 1338 | rg->vaddr = NULL; |
@@ -1370,7 +1370,7 @@ static int omapfb_alloc_fbmem(struct fb_info *fbi, unsigned long size, | |||
1370 | struct omapfb2_device *fbdev = ofbi->fbdev; | 1370 | struct omapfb2_device *fbdev = ofbi->fbdev; |
1371 | struct omapfb2_mem_region *rg; | 1371 | struct omapfb2_mem_region *rg; |
1372 | void *token; | 1372 | void *token; |
1373 | DEFINE_DMA_ATTRS(attrs); | 1373 | unsigned long attrs; |
1374 | dma_addr_t dma_handle; | 1374 | dma_addr_t dma_handle; |
1375 | int r; | 1375 | int r; |
1376 | 1376 | ||
@@ -1386,15 +1386,15 @@ static int omapfb_alloc_fbmem(struct fb_info *fbi, unsigned long size, | |||
1386 | 1386 | ||
1387 | size = PAGE_ALIGN(size); | 1387 | size = PAGE_ALIGN(size); |
1388 | 1388 | ||
1389 | dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); | 1389 | attrs = DMA_ATTR_WRITE_COMBINE; |
1390 | 1390 | ||
1391 | if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) | 1391 | if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) |
1392 | dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs); | 1392 | attrs |= DMA_ATTR_NO_KERNEL_MAPPING; |
1393 | 1393 | ||
1394 | DBG("allocating %lu bytes for fb %d\n", size, ofbi->id); | 1394 | DBG("allocating %lu bytes for fb %d\n", size, ofbi->id); |
1395 | 1395 | ||
1396 | token = dma_alloc_attrs(fbdev->dev, size, &dma_handle, | 1396 | token = dma_alloc_attrs(fbdev->dev, size, &dma_handle, |
1397 | GFP_KERNEL, &attrs); | 1397 | GFP_KERNEL, attrs); |
1398 | 1398 | ||
1399 | if (token == NULL) { | 1399 | if (token == NULL) { |
1400 | dev_err(fbdev->dev, "failed to allocate framebuffer\n"); | 1400 | dev_err(fbdev->dev, "failed to allocate framebuffer\n"); |
@@ -1408,7 +1408,7 @@ static int omapfb_alloc_fbmem(struct fb_info *fbi, unsigned long size, | |||
1408 | r = omap_vrfb_request_ctx(&rg->vrfb); | 1408 | r = omap_vrfb_request_ctx(&rg->vrfb); |
1409 | if (r) { | 1409 | if (r) { |
1410 | dma_free_attrs(fbdev->dev, size, token, dma_handle, | 1410 | dma_free_attrs(fbdev->dev, size, token, dma_handle, |
1411 | &attrs); | 1411 | attrs); |
1412 | dev_err(fbdev->dev, "vrfb create ctx failed\n"); | 1412 | dev_err(fbdev->dev, "vrfb create ctx failed\n"); |
1413 | return r; | 1413 | return r; |
1414 | } | 1414 | } |
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb.h b/drivers/video/fbdev/omap2/omapfb/omapfb.h index bcb9ff4a607d..555487d6dbea 100644 --- a/drivers/video/fbdev/omap2/omapfb/omapfb.h +++ b/drivers/video/fbdev/omap2/omapfb/omapfb.h | |||
@@ -28,7 +28,6 @@ | |||
28 | #endif | 28 | #endif |
29 | 29 | ||
30 | #include <linux/rwsem.h> | 30 | #include <linux/rwsem.h> |
31 | #include <linux/dma-attrs.h> | ||
32 | #include <linux/dma-mapping.h> | 31 | #include <linux/dma-mapping.h> |
33 | 32 | ||
34 | #include <video/omapfb_dss.h> | 33 | #include <video/omapfb_dss.h> |
@@ -51,7 +50,7 @@ extern bool omapfb_debug; | |||
51 | 50 | ||
52 | struct omapfb2_mem_region { | 51 | struct omapfb2_mem_region { |
53 | int id; | 52 | int id; |
54 | struct dma_attrs attrs; | 53 | unsigned long attrs; |
55 | void *token; | 54 | void *token; |
56 | dma_addr_t dma_handle; | 55 | dma_addr_t dma_handle; |
57 | u32 paddr; | 56 | u32 paddr; |
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index 7399782c0998..87e6035c9e81 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c | |||
@@ -294,7 +294,7 @@ error: | |||
294 | void * | 294 | void * |
295 | xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, | 295 | xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, |
296 | dma_addr_t *dma_handle, gfp_t flags, | 296 | dma_addr_t *dma_handle, gfp_t flags, |
297 | struct dma_attrs *attrs) | 297 | unsigned long attrs) |
298 | { | 298 | { |
299 | void *ret; | 299 | void *ret; |
300 | int order = get_order(size); | 300 | int order = get_order(size); |
@@ -346,7 +346,7 @@ EXPORT_SYMBOL_GPL(xen_swiotlb_alloc_coherent); | |||
346 | 346 | ||
347 | void | 347 | void |
348 | xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, | 348 | xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, |
349 | dma_addr_t dev_addr, struct dma_attrs *attrs) | 349 | dma_addr_t dev_addr, unsigned long attrs) |
350 | { | 350 | { |
351 | int order = get_order(size); | 351 | int order = get_order(size); |
352 | phys_addr_t phys; | 352 | phys_addr_t phys; |
@@ -378,7 +378,7 @@ EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent); | |||
378 | dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, | 378 | dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, |
379 | unsigned long offset, size_t size, | 379 | unsigned long offset, size_t size, |
380 | enum dma_data_direction dir, | 380 | enum dma_data_direction dir, |
381 | struct dma_attrs *attrs) | 381 | unsigned long attrs) |
382 | { | 382 | { |
383 | phys_addr_t map, phys = page_to_phys(page) + offset; | 383 | phys_addr_t map, phys = page_to_phys(page) + offset; |
384 | dma_addr_t dev_addr = xen_phys_to_bus(phys); | 384 | dma_addr_t dev_addr = xen_phys_to_bus(phys); |
@@ -434,7 +434,7 @@ EXPORT_SYMBOL_GPL(xen_swiotlb_map_page); | |||
434 | */ | 434 | */ |
435 | static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr, | 435 | static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr, |
436 | size_t size, enum dma_data_direction dir, | 436 | size_t size, enum dma_data_direction dir, |
437 | struct dma_attrs *attrs) | 437 | unsigned long attrs) |
438 | { | 438 | { |
439 | phys_addr_t paddr = xen_bus_to_phys(dev_addr); | 439 | phys_addr_t paddr = xen_bus_to_phys(dev_addr); |
440 | 440 | ||
@@ -462,7 +462,7 @@ static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr, | |||
462 | 462 | ||
463 | void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, | 463 | void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, |
464 | size_t size, enum dma_data_direction dir, | 464 | size_t size, enum dma_data_direction dir, |
465 | struct dma_attrs *attrs) | 465 | unsigned long attrs) |
466 | { | 466 | { |
467 | xen_unmap_single(hwdev, dev_addr, size, dir, attrs); | 467 | xen_unmap_single(hwdev, dev_addr, size, dir, attrs); |
468 | } | 468 | } |
@@ -538,7 +538,7 @@ EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_device); | |||
538 | int | 538 | int |
539 | xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, | 539 | xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, |
540 | int nelems, enum dma_data_direction dir, | 540 | int nelems, enum dma_data_direction dir, |
541 | struct dma_attrs *attrs) | 541 | unsigned long attrs) |
542 | { | 542 | { |
543 | struct scatterlist *sg; | 543 | struct scatterlist *sg; |
544 | int i; | 544 | int i; |
@@ -599,7 +599,7 @@ EXPORT_SYMBOL_GPL(xen_swiotlb_map_sg_attrs); | |||
599 | void | 599 | void |
600 | xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, | 600 | xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, |
601 | int nelems, enum dma_data_direction dir, | 601 | int nelems, enum dma_data_direction dir, |
602 | struct dma_attrs *attrs) | 602 | unsigned long attrs) |
603 | { | 603 | { |
604 | struct scatterlist *sg; | 604 | struct scatterlist *sg; |
605 | int i; | 605 | int i; |
diff --git a/fs/block_dev.c b/fs/block_dev.c index ada42cf42d06..2033a3f91d58 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c | |||
@@ -1275,11 +1275,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) | |||
1275 | bdev->bd_disk = disk; | 1275 | bdev->bd_disk = disk; |
1276 | bdev->bd_queue = disk->queue; | 1276 | bdev->bd_queue = disk->queue; |
1277 | bdev->bd_contains = bdev; | 1277 | bdev->bd_contains = bdev; |
1278 | if (IS_ENABLED(CONFIG_BLK_DEV_DAX) && | 1278 | bdev->bd_inode->i_flags = 0; |
1279 | blk_queue_dax(disk->queue)) | ||
1280 | bdev->bd_inode->i_flags = S_DAX; | ||
1281 | else | ||
1282 | bdev->bd_inode->i_flags = 0; | ||
1283 | 1279 | ||
1284 | if (!partno) { | 1280 | if (!partno) { |
1285 | ret = -ENXIO; | 1281 | ret = -ENXIO; |
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index 27bfc0b631a9..598bc999f4c2 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h | |||
@@ -266,13 +266,12 @@ static inline int bitmap_equal(const unsigned long *src1, | |||
266 | const unsigned long *src2, unsigned int nbits) | 266 | const unsigned long *src2, unsigned int nbits) |
267 | { | 267 | { |
268 | if (small_const_nbits(nbits)) | 268 | if (small_const_nbits(nbits)) |
269 | return ! ((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits)); | 269 | return !((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits)); |
270 | #ifdef CONFIG_S390 | 270 | #ifdef CONFIG_S390 |
271 | else if (__builtin_constant_p(nbits) && (nbits % BITS_PER_LONG) == 0) | 271 | if (__builtin_constant_p(nbits) && (nbits % BITS_PER_LONG) == 0) |
272 | return !memcmp(src1, src2, nbits / 8); | 272 | return !memcmp(src1, src2, nbits / 8); |
273 | #endif | 273 | #endif |
274 | else | 274 | return __bitmap_equal(src1, src2, nbits); |
275 | return __bitmap_equal(src1, src2, nbits); | ||
276 | } | 275 | } |
277 | 276 | ||
278 | static inline int bitmap_intersects(const unsigned long *src1, | 277 | static inline int bitmap_intersects(const unsigned long *src1, |
diff --git a/include/linux/dma-attrs.h b/include/linux/dma-attrs.h deleted file mode 100644 index 5246239a4953..000000000000 --- a/include/linux/dma-attrs.h +++ /dev/null | |||
@@ -1,71 +0,0 @@ | |||
1 | #ifndef _DMA_ATTR_H | ||
2 | #define _DMA_ATTR_H | ||
3 | |||
4 | #include <linux/bitmap.h> | ||
5 | #include <linux/bitops.h> | ||
6 | #include <linux/bug.h> | ||
7 | |||
8 | /** | ||
9 | * an enum dma_attr represents an attribute associated with a DMA | ||
10 | * mapping. The semantics of each attribute should be defined in | ||
11 | * Documentation/DMA-attributes.txt. | ||
12 | */ | ||
13 | enum dma_attr { | ||
14 | DMA_ATTR_WRITE_BARRIER, | ||
15 | DMA_ATTR_WEAK_ORDERING, | ||
16 | DMA_ATTR_WRITE_COMBINE, | ||
17 | DMA_ATTR_NON_CONSISTENT, | ||
18 | DMA_ATTR_NO_KERNEL_MAPPING, | ||
19 | DMA_ATTR_SKIP_CPU_SYNC, | ||
20 | DMA_ATTR_FORCE_CONTIGUOUS, | ||
21 | DMA_ATTR_ALLOC_SINGLE_PAGES, | ||
22 | DMA_ATTR_MAX, | ||
23 | }; | ||
24 | |||
25 | #define __DMA_ATTRS_LONGS BITS_TO_LONGS(DMA_ATTR_MAX) | ||
26 | |||
27 | /** | ||
28 | * struct dma_attrs - an opaque container for DMA attributes | ||
29 | * @flags - bitmask representing a collection of enum dma_attr | ||
30 | */ | ||
31 | struct dma_attrs { | ||
32 | unsigned long flags[__DMA_ATTRS_LONGS]; | ||
33 | }; | ||
34 | |||
35 | #define DEFINE_DMA_ATTRS(x) \ | ||
36 | struct dma_attrs x = { \ | ||
37 | .flags = { [0 ... __DMA_ATTRS_LONGS-1] = 0 }, \ | ||
38 | } | ||
39 | |||
40 | static inline void init_dma_attrs(struct dma_attrs *attrs) | ||
41 | { | ||
42 | bitmap_zero(attrs->flags, __DMA_ATTRS_LONGS); | ||
43 | } | ||
44 | |||
45 | /** | ||
46 | * dma_set_attr - set a specific attribute | ||
47 | * @attr: attribute to set | ||
48 | * @attrs: struct dma_attrs (may be NULL) | ||
49 | */ | ||
50 | static inline void dma_set_attr(enum dma_attr attr, struct dma_attrs *attrs) | ||
51 | { | ||
52 | if (attrs == NULL) | ||
53 | return; | ||
54 | BUG_ON(attr >= DMA_ATTR_MAX); | ||
55 | __set_bit(attr, attrs->flags); | ||
56 | } | ||
57 | |||
58 | /** | ||
59 | * dma_get_attr - check for a specific attribute | ||
60 | * @attr: attribute to set | ||
61 | * @attrs: struct dma_attrs (may be NULL) | ||
62 | */ | ||
63 | static inline int dma_get_attr(enum dma_attr attr, struct dma_attrs *attrs) | ||
64 | { | ||
65 | if (attrs == NULL) | ||
66 | return 0; | ||
67 | BUG_ON(attr >= DMA_ATTR_MAX); | ||
68 | return test_bit(attr, attrs->flags); | ||
69 | } | ||
70 | |||
71 | #endif /* _DMA_ATTR_H */ | ||
diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h index 8443bbb5c071..81c5c8d167ad 100644 --- a/include/linux/dma-iommu.h +++ b/include/linux/dma-iommu.h | |||
@@ -39,7 +39,7 @@ int dma_direction_to_prot(enum dma_data_direction dir, bool coherent); | |||
39 | * the arch code to take care of attributes and cache maintenance | 39 | * the arch code to take care of attributes and cache maintenance |
40 | */ | 40 | */ |
41 | struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp, | 41 | struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp, |
42 | struct dma_attrs *attrs, int prot, dma_addr_t *handle, | 42 | unsigned long attrs, int prot, dma_addr_t *handle, |
43 | void (*flush_page)(struct device *, const void *, phys_addr_t)); | 43 | void (*flush_page)(struct device *, const void *, phys_addr_t)); |
44 | void iommu_dma_free(struct device *dev, struct page **pages, size_t size, | 44 | void iommu_dma_free(struct device *dev, struct page **pages, size_t size, |
45 | dma_addr_t *handle); | 45 | dma_addr_t *handle); |
@@ -56,9 +56,9 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, | |||
56 | * directly as DMA mapping callbacks for simplicity | 56 | * directly as DMA mapping callbacks for simplicity |
57 | */ | 57 | */ |
58 | void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, | 58 | void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, |
59 | enum dma_data_direction dir, struct dma_attrs *attrs); | 59 | enum dma_data_direction dir, unsigned long attrs); |
60 | void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, | 60 | void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, |
61 | enum dma_data_direction dir, struct dma_attrs *attrs); | 61 | enum dma_data_direction dir, unsigned long attrs); |
62 | int iommu_dma_supported(struct device *dev, u64 mask); | 62 | int iommu_dma_supported(struct device *dev, u64 mask); |
63 | int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr); | 63 | int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr); |
64 | 64 | ||
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 71c1b215ef66..66533e18276c 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h | |||
@@ -5,13 +5,58 @@ | |||
5 | #include <linux/string.h> | 5 | #include <linux/string.h> |
6 | #include <linux/device.h> | 6 | #include <linux/device.h> |
7 | #include <linux/err.h> | 7 | #include <linux/err.h> |
8 | #include <linux/dma-attrs.h> | ||
9 | #include <linux/dma-debug.h> | 8 | #include <linux/dma-debug.h> |
10 | #include <linux/dma-direction.h> | 9 | #include <linux/dma-direction.h> |
11 | #include <linux/scatterlist.h> | 10 | #include <linux/scatterlist.h> |
12 | #include <linux/kmemcheck.h> | 11 | #include <linux/kmemcheck.h> |
13 | #include <linux/bug.h> | 12 | #include <linux/bug.h> |
14 | 13 | ||
14 | /** | ||
15 | * List of possible attributes associated with a DMA mapping. The semantics | ||
16 | * of each attribute should be defined in Documentation/DMA-attributes.txt. | ||
17 | * | ||
18 | * DMA_ATTR_WRITE_BARRIER: DMA to a memory region with this attribute | ||
19 | * forces all pending DMA writes to complete. | ||
20 | */ | ||
21 | #define DMA_ATTR_WRITE_BARRIER (1UL << 0) | ||
22 | /* | ||
23 | * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping | ||
24 | * may be weakly ordered, that is that reads and writes may pass each other. | ||
25 | */ | ||
26 | #define DMA_ATTR_WEAK_ORDERING (1UL << 1) | ||
27 | /* | ||
28 | * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be | ||
29 | * buffered to improve performance. | ||
30 | */ | ||
31 | #define DMA_ATTR_WRITE_COMBINE (1UL << 2) | ||
32 | /* | ||
33 | * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either | ||
34 | * consistent or non-consistent memory as it sees fit. | ||
35 | */ | ||
36 | #define DMA_ATTR_NON_CONSISTENT (1UL << 3) | ||
37 | /* | ||
38 | * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel | ||
39 | * virtual mapping for the allocated buffer. | ||
40 | */ | ||
41 | #define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4) | ||
42 | /* | ||
43 | * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of | ||
44 | * the CPU cache for the given buffer assuming that it has been already | ||
45 | * transferred to 'device' domain. | ||
46 | */ | ||
47 | #define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5) | ||
48 | /* | ||
49 | * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer | ||
50 | * in physical memory. | ||
51 | */ | ||
52 | #define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6) | ||
53 | /* | ||
54 | * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem | ||
55 | * that it's probably not worth the time to try to allocate memory to in a way | ||
56 | * that gives better TLB efficiency. | ||
57 | */ | ||
58 | #define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7) | ||
59 | |||
15 | /* | 60 | /* |
16 | * A dma_addr_t can hold any valid DMA or bus address for the platform. | 61 | * A dma_addr_t can hold any valid DMA or bus address for the platform. |
17 | * It can be given to a device to use as a DMA source or target. A CPU cannot | 62 | * It can be given to a device to use as a DMA source or target. A CPU cannot |
@@ -21,34 +66,35 @@ | |||
21 | struct dma_map_ops { | 66 | struct dma_map_ops { |
22 | void* (*alloc)(struct device *dev, size_t size, | 67 | void* (*alloc)(struct device *dev, size_t size, |
23 | dma_addr_t *dma_handle, gfp_t gfp, | 68 | dma_addr_t *dma_handle, gfp_t gfp, |
24 | struct dma_attrs *attrs); | 69 | unsigned long attrs); |
25 | void (*free)(struct device *dev, size_t size, | 70 | void (*free)(struct device *dev, size_t size, |
26 | void *vaddr, dma_addr_t dma_handle, | 71 | void *vaddr, dma_addr_t dma_handle, |
27 | struct dma_attrs *attrs); | 72 | unsigned long attrs); |
28 | int (*mmap)(struct device *, struct vm_area_struct *, | 73 | int (*mmap)(struct device *, struct vm_area_struct *, |
29 | void *, dma_addr_t, size_t, struct dma_attrs *attrs); | 74 | void *, dma_addr_t, size_t, |
75 | unsigned long attrs); | ||
30 | 76 | ||
31 | int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *, | 77 | int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *, |
32 | dma_addr_t, size_t, struct dma_attrs *attrs); | 78 | dma_addr_t, size_t, unsigned long attrs); |
33 | 79 | ||
34 | dma_addr_t (*map_page)(struct device *dev, struct page *page, | 80 | dma_addr_t (*map_page)(struct device *dev, struct page *page, |
35 | unsigned long offset, size_t size, | 81 | unsigned long offset, size_t size, |
36 | enum dma_data_direction dir, | 82 | enum dma_data_direction dir, |
37 | struct dma_attrs *attrs); | 83 | unsigned long attrs); |
38 | void (*unmap_page)(struct device *dev, dma_addr_t dma_handle, | 84 | void (*unmap_page)(struct device *dev, dma_addr_t dma_handle, |
39 | size_t size, enum dma_data_direction dir, | 85 | size_t size, enum dma_data_direction dir, |
40 | struct dma_attrs *attrs); | 86 | unsigned long attrs); |
41 | /* | 87 | /* |
42 | * map_sg returns 0 on error and a value > 0 on success. | 88 | * map_sg returns 0 on error and a value > 0 on success. |
43 | * It should never return a value < 0. | 89 | * It should never return a value < 0. |
44 | */ | 90 | */ |
45 | int (*map_sg)(struct device *dev, struct scatterlist *sg, | 91 | int (*map_sg)(struct device *dev, struct scatterlist *sg, |
46 | int nents, enum dma_data_direction dir, | 92 | int nents, enum dma_data_direction dir, |
47 | struct dma_attrs *attrs); | 93 | unsigned long attrs); |
48 | void (*unmap_sg)(struct device *dev, | 94 | void (*unmap_sg)(struct device *dev, |
49 | struct scatterlist *sg, int nents, | 95 | struct scatterlist *sg, int nents, |
50 | enum dma_data_direction dir, | 96 | enum dma_data_direction dir, |
51 | struct dma_attrs *attrs); | 97 | unsigned long attrs); |
52 | void (*sync_single_for_cpu)(struct device *dev, | 98 | void (*sync_single_for_cpu)(struct device *dev, |
53 | dma_addr_t dma_handle, size_t size, | 99 | dma_addr_t dma_handle, size_t size, |
54 | enum dma_data_direction dir); | 100 | enum dma_data_direction dir); |
@@ -123,7 +169,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) | |||
123 | static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, | 169 | static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, |
124 | size_t size, | 170 | size_t size, |
125 | enum dma_data_direction dir, | 171 | enum dma_data_direction dir, |
126 | struct dma_attrs *attrs) | 172 | unsigned long attrs) |
127 | { | 173 | { |
128 | struct dma_map_ops *ops = get_dma_ops(dev); | 174 | struct dma_map_ops *ops = get_dma_ops(dev); |
129 | dma_addr_t addr; | 175 | dma_addr_t addr; |
@@ -142,7 +188,7 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, | |||
142 | static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, | 188 | static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, |
143 | size_t size, | 189 | size_t size, |
144 | enum dma_data_direction dir, | 190 | enum dma_data_direction dir, |
145 | struct dma_attrs *attrs) | 191 | unsigned long attrs) |
146 | { | 192 | { |
147 | struct dma_map_ops *ops = get_dma_ops(dev); | 193 | struct dma_map_ops *ops = get_dma_ops(dev); |
148 | 194 | ||
@@ -158,7 +204,7 @@ static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, | |||
158 | */ | 204 | */ |
159 | static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, | 205 | static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, |
160 | int nents, enum dma_data_direction dir, | 206 | int nents, enum dma_data_direction dir, |
161 | struct dma_attrs *attrs) | 207 | unsigned long attrs) |
162 | { | 208 | { |
163 | struct dma_map_ops *ops = get_dma_ops(dev); | 209 | struct dma_map_ops *ops = get_dma_ops(dev); |
164 | int i, ents; | 210 | int i, ents; |
@@ -176,7 +222,7 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, | |||
176 | 222 | ||
177 | static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, | 223 | static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, |
178 | int nents, enum dma_data_direction dir, | 224 | int nents, enum dma_data_direction dir, |
179 | struct dma_attrs *attrs) | 225 | unsigned long attrs) |
180 | { | 226 | { |
181 | struct dma_map_ops *ops = get_dma_ops(dev); | 227 | struct dma_map_ops *ops = get_dma_ops(dev); |
182 | 228 | ||
@@ -195,7 +241,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, | |||
195 | 241 | ||
196 | kmemcheck_mark_initialized(page_address(page) + offset, size); | 242 | kmemcheck_mark_initialized(page_address(page) + offset, size); |
197 | BUG_ON(!valid_dma_direction(dir)); | 243 | BUG_ON(!valid_dma_direction(dir)); |
198 | addr = ops->map_page(dev, page, offset, size, dir, NULL); | 244 | addr = ops->map_page(dev, page, offset, size, dir, 0); |
199 | debug_dma_map_page(dev, page, offset, size, dir, addr, false); | 245 | debug_dma_map_page(dev, page, offset, size, dir, addr, false); |
200 | 246 | ||
201 | return addr; | 247 | return addr; |
@@ -208,7 +254,7 @@ static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, | |||
208 | 254 | ||
209 | BUG_ON(!valid_dma_direction(dir)); | 255 | BUG_ON(!valid_dma_direction(dir)); |
210 | if (ops->unmap_page) | 256 | if (ops->unmap_page) |
211 | ops->unmap_page(dev, addr, size, dir, NULL); | 257 | ops->unmap_page(dev, addr, size, dir, 0); |
212 | debug_dma_unmap_page(dev, addr, size, dir, false); | 258 | debug_dma_unmap_page(dev, addr, size, dir, false); |
213 | } | 259 | } |
214 | 260 | ||
@@ -289,10 +335,10 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | |||
289 | 335 | ||
290 | } | 336 | } |
291 | 337 | ||
292 | #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL) | 338 | #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0) |
293 | #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL) | 339 | #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0) |
294 | #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL) | 340 | #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0) |
295 | #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL) | 341 | #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0) |
296 | 342 | ||
297 | extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, | 343 | extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, |
298 | void *cpu_addr, dma_addr_t dma_addr, size_t size); | 344 | void *cpu_addr, dma_addr_t dma_addr, size_t size); |
@@ -321,7 +367,7 @@ void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags); | |||
321 | */ | 367 | */ |
322 | static inline int | 368 | static inline int |
323 | dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, | 369 | dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, |
324 | dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs) | 370 | dma_addr_t dma_addr, size_t size, unsigned long attrs) |
325 | { | 371 | { |
326 | struct dma_map_ops *ops = get_dma_ops(dev); | 372 | struct dma_map_ops *ops = get_dma_ops(dev); |
327 | BUG_ON(!ops); | 373 | BUG_ON(!ops); |
@@ -330,7 +376,7 @@ dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, | |||
330 | return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size); | 376 | return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size); |
331 | } | 377 | } |
332 | 378 | ||
333 | #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL) | 379 | #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0) |
334 | 380 | ||
335 | int | 381 | int |
336 | dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, | 382 | dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, |
@@ -338,7 +384,8 @@ dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, | |||
338 | 384 | ||
339 | static inline int | 385 | static inline int |
340 | dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr, | 386 | dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr, |
341 | dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs) | 387 | dma_addr_t dma_addr, size_t size, |
388 | unsigned long attrs) | ||
342 | { | 389 | { |
343 | struct dma_map_ops *ops = get_dma_ops(dev); | 390 | struct dma_map_ops *ops = get_dma_ops(dev); |
344 | BUG_ON(!ops); | 391 | BUG_ON(!ops); |
@@ -348,7 +395,7 @@ dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr, | |||
348 | return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size); | 395 | return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size); |
349 | } | 396 | } |
350 | 397 | ||
351 | #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL) | 398 | #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0) |
352 | 399 | ||
353 | #ifndef arch_dma_alloc_attrs | 400 | #ifndef arch_dma_alloc_attrs |
354 | #define arch_dma_alloc_attrs(dev, flag) (true) | 401 | #define arch_dma_alloc_attrs(dev, flag) (true) |
@@ -356,7 +403,7 @@ dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr, | |||
356 | 403 | ||
357 | static inline void *dma_alloc_attrs(struct device *dev, size_t size, | 404 | static inline void *dma_alloc_attrs(struct device *dev, size_t size, |
358 | dma_addr_t *dma_handle, gfp_t flag, | 405 | dma_addr_t *dma_handle, gfp_t flag, |
359 | struct dma_attrs *attrs) | 406 | unsigned long attrs) |
360 | { | 407 | { |
361 | struct dma_map_ops *ops = get_dma_ops(dev); | 408 | struct dma_map_ops *ops = get_dma_ops(dev); |
362 | void *cpu_addr; | 409 | void *cpu_addr; |
@@ -378,7 +425,7 @@ static inline void *dma_alloc_attrs(struct device *dev, size_t size, | |||
378 | 425 | ||
379 | static inline void dma_free_attrs(struct device *dev, size_t size, | 426 | static inline void dma_free_attrs(struct device *dev, size_t size, |
380 | void *cpu_addr, dma_addr_t dma_handle, | 427 | void *cpu_addr, dma_addr_t dma_handle, |
381 | struct dma_attrs *attrs) | 428 | unsigned long attrs) |
382 | { | 429 | { |
383 | struct dma_map_ops *ops = get_dma_ops(dev); | 430 | struct dma_map_ops *ops = get_dma_ops(dev); |
384 | 431 | ||
@@ -398,31 +445,27 @@ static inline void dma_free_attrs(struct device *dev, size_t size, | |||
398 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, | 445 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, |
399 | dma_addr_t *dma_handle, gfp_t flag) | 446 | dma_addr_t *dma_handle, gfp_t flag) |
400 | { | 447 | { |
401 | return dma_alloc_attrs(dev, size, dma_handle, flag, NULL); | 448 | return dma_alloc_attrs(dev, size, dma_handle, flag, 0); |
402 | } | 449 | } |
403 | 450 | ||
404 | static inline void dma_free_coherent(struct device *dev, size_t size, | 451 | static inline void dma_free_coherent(struct device *dev, size_t size, |
405 | void *cpu_addr, dma_addr_t dma_handle) | 452 | void *cpu_addr, dma_addr_t dma_handle) |
406 | { | 453 | { |
407 | return dma_free_attrs(dev, size, cpu_addr, dma_handle, NULL); | 454 | return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0); |
408 | } | 455 | } |
409 | 456 | ||
410 | static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, | 457 | static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, |
411 | dma_addr_t *dma_handle, gfp_t gfp) | 458 | dma_addr_t *dma_handle, gfp_t gfp) |
412 | { | 459 | { |
413 | DEFINE_DMA_ATTRS(attrs); | 460 | return dma_alloc_attrs(dev, size, dma_handle, gfp, |
414 | 461 | DMA_ATTR_NON_CONSISTENT); | |
415 | dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs); | ||
416 | return dma_alloc_attrs(dev, size, dma_handle, gfp, &attrs); | ||
417 | } | 462 | } |
418 | 463 | ||
419 | static inline void dma_free_noncoherent(struct device *dev, size_t size, | 464 | static inline void dma_free_noncoherent(struct device *dev, size_t size, |
420 | void *cpu_addr, dma_addr_t dma_handle) | 465 | void *cpu_addr, dma_addr_t dma_handle) |
421 | { | 466 | { |
422 | DEFINE_DMA_ATTRS(attrs); | 467 | dma_free_attrs(dev, size, cpu_addr, dma_handle, |
423 | 468 | DMA_ATTR_NON_CONSISTENT); | |
424 | dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs); | ||
425 | dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs); | ||
426 | } | 469 | } |
427 | 470 | ||
428 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | 471 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
@@ -646,9 +689,8 @@ static inline void dmam_release_declared_memory(struct device *dev) | |||
646 | static inline void *dma_alloc_wc(struct device *dev, size_t size, | 689 | static inline void *dma_alloc_wc(struct device *dev, size_t size, |
647 | dma_addr_t *dma_addr, gfp_t gfp) | 690 | dma_addr_t *dma_addr, gfp_t gfp) |
648 | { | 691 | { |
649 | DEFINE_DMA_ATTRS(attrs); | 692 | return dma_alloc_attrs(dev, size, dma_addr, gfp, |
650 | dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); | 693 | DMA_ATTR_WRITE_COMBINE); |
651 | return dma_alloc_attrs(dev, size, dma_addr, gfp, &attrs); | ||
652 | } | 694 | } |
653 | #ifndef dma_alloc_writecombine | 695 | #ifndef dma_alloc_writecombine |
654 | #define dma_alloc_writecombine dma_alloc_wc | 696 | #define dma_alloc_writecombine dma_alloc_wc |
@@ -657,9 +699,8 @@ static inline void *dma_alloc_wc(struct device *dev, size_t size, | |||
657 | static inline void dma_free_wc(struct device *dev, size_t size, | 699 | static inline void dma_free_wc(struct device *dev, size_t size, |
658 | void *cpu_addr, dma_addr_t dma_addr) | 700 | void *cpu_addr, dma_addr_t dma_addr) |
659 | { | 701 | { |
660 | DEFINE_DMA_ATTRS(attrs); | 702 | return dma_free_attrs(dev, size, cpu_addr, dma_addr, |
661 | dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); | 703 | DMA_ATTR_WRITE_COMBINE); |
662 | return dma_free_attrs(dev, size, cpu_addr, dma_addr, &attrs); | ||
663 | } | 704 | } |
664 | #ifndef dma_free_writecombine | 705 | #ifndef dma_free_writecombine |
665 | #define dma_free_writecombine dma_free_wc | 706 | #define dma_free_writecombine dma_free_wc |
@@ -670,9 +711,8 @@ static inline int dma_mmap_wc(struct device *dev, | |||
670 | void *cpu_addr, dma_addr_t dma_addr, | 711 | void *cpu_addr, dma_addr_t dma_addr, |
671 | size_t size) | 712 | size_t size) |
672 | { | 713 | { |
673 | DEFINE_DMA_ATTRS(attrs); | 714 | return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, |
674 | dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); | 715 | DMA_ATTR_WRITE_COMBINE); |
675 | return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs); | ||
676 | } | 716 | } |
677 | #ifndef dma_mmap_writecombine | 717 | #ifndef dma_mmap_writecombine |
678 | #define dma_mmap_writecombine dma_mmap_wc | 718 | #define dma_mmap_writecombine dma_mmap_wc |
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h index 4f1bbc68cd1b..546d68057e3b 100644 --- a/include/linux/dynamic_debug.h +++ b/include/linux/dynamic_debug.h | |||
@@ -1,6 +1,10 @@ | |||
1 | #ifndef _DYNAMIC_DEBUG_H | 1 | #ifndef _DYNAMIC_DEBUG_H |
2 | #define _DYNAMIC_DEBUG_H | 2 | #define _DYNAMIC_DEBUG_H |
3 | 3 | ||
4 | #if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) | ||
5 | #include <linux/jump_label.h> | ||
6 | #endif | ||
7 | |||
4 | /* | 8 | /* |
5 | * An instance of this structure is created in a special | 9 | * An instance of this structure is created in a special |
6 | * ELF section at every dynamic debug callsite. At runtime, | 10 | * ELF section at every dynamic debug callsite. At runtime, |
@@ -33,6 +37,12 @@ struct _ddebug { | |||
33 | #define _DPRINTK_FLAGS_DEFAULT 0 | 37 | #define _DPRINTK_FLAGS_DEFAULT 0 |
34 | #endif | 38 | #endif |
35 | unsigned int flags:8; | 39 | unsigned int flags:8; |
40 | #ifdef HAVE_JUMP_LABEL | ||
41 | union { | ||
42 | struct static_key_true dd_key_true; | ||
43 | struct static_key_false dd_key_false; | ||
44 | } key; | ||
45 | #endif | ||
36 | } __attribute__((aligned(8))); | 46 | } __attribute__((aligned(8))); |
37 | 47 | ||
38 | 48 | ||
@@ -60,7 +70,7 @@ void __dynamic_netdev_dbg(struct _ddebug *descriptor, | |||
60 | const struct net_device *dev, | 70 | const struct net_device *dev, |
61 | const char *fmt, ...); | 71 | const char *fmt, ...); |
62 | 72 | ||
63 | #define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \ | 73 | #define DEFINE_DYNAMIC_DEBUG_METADATA_KEY(name, fmt, key, init) \ |
64 | static struct _ddebug __aligned(8) \ | 74 | static struct _ddebug __aligned(8) \ |
65 | __attribute__((section("__verbose"))) name = { \ | 75 | __attribute__((section("__verbose"))) name = { \ |
66 | .modname = KBUILD_MODNAME, \ | 76 | .modname = KBUILD_MODNAME, \ |
@@ -68,13 +78,51 @@ void __dynamic_netdev_dbg(struct _ddebug *descriptor, | |||
68 | .filename = __FILE__, \ | 78 | .filename = __FILE__, \ |
69 | .format = (fmt), \ | 79 | .format = (fmt), \ |
70 | .lineno = __LINE__, \ | 80 | .lineno = __LINE__, \ |
71 | .flags = _DPRINTK_FLAGS_DEFAULT, \ | 81 | .flags = _DPRINTK_FLAGS_DEFAULT, \ |
82 | dd_key_init(key, init) \ | ||
72 | } | 83 | } |
73 | 84 | ||
85 | #ifdef HAVE_JUMP_LABEL | ||
86 | |||
87 | #define dd_key_init(key, init) key = (init) | ||
88 | |||
89 | #ifdef DEBUG | ||
90 | #define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \ | ||
91 | DEFINE_DYNAMIC_DEBUG_METADATA_KEY(name, fmt, .key.dd_key_true, \ | ||
92 | (STATIC_KEY_TRUE_INIT)) | ||
93 | |||
94 | #define DYNAMIC_DEBUG_BRANCH(descriptor) \ | ||
95 | static_branch_likely(&descriptor.key.dd_key_true) | ||
96 | #else | ||
97 | #define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \ | ||
98 | DEFINE_DYNAMIC_DEBUG_METADATA_KEY(name, fmt, .key.dd_key_false, \ | ||
99 | (STATIC_KEY_FALSE_INIT)) | ||
100 | |||
101 | #define DYNAMIC_DEBUG_BRANCH(descriptor) \ | ||
102 | static_branch_unlikely(&descriptor.key.dd_key_false) | ||
103 | #endif | ||
104 | |||
105 | #else | ||
106 | |||
107 | #define dd_key_init(key, init) | ||
108 | |||
109 | #define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \ | ||
110 | DEFINE_DYNAMIC_DEBUG_METADATA_KEY(name, fmt, 0, 0) | ||
111 | |||
112 | #ifdef DEBUG | ||
113 | #define DYNAMIC_DEBUG_BRANCH(descriptor) \ | ||
114 | likely(descriptor.flags & _DPRINTK_FLAGS_PRINT) | ||
115 | #else | ||
116 | #define DYNAMIC_DEBUG_BRANCH(descriptor) \ | ||
117 | unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) | ||
118 | #endif | ||
119 | |||
120 | #endif | ||
121 | |||
74 | #define dynamic_pr_debug(fmt, ...) \ | 122 | #define dynamic_pr_debug(fmt, ...) \ |
75 | do { \ | 123 | do { \ |
76 | DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ | 124 | DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ |
77 | if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \ | 125 | if (DYNAMIC_DEBUG_BRANCH(descriptor)) \ |
78 | __dynamic_pr_debug(&descriptor, pr_fmt(fmt), \ | 126 | __dynamic_pr_debug(&descriptor, pr_fmt(fmt), \ |
79 | ##__VA_ARGS__); \ | 127 | ##__VA_ARGS__); \ |
80 | } while (0) | 128 | } while (0) |
@@ -82,7 +130,7 @@ do { \ | |||
82 | #define dynamic_dev_dbg(dev, fmt, ...) \ | 130 | #define dynamic_dev_dbg(dev, fmt, ...) \ |
83 | do { \ | 131 | do { \ |
84 | DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ | 132 | DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ |
85 | if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \ | 133 | if (DYNAMIC_DEBUG_BRANCH(descriptor)) \ |
86 | __dynamic_dev_dbg(&descriptor, dev, fmt, \ | 134 | __dynamic_dev_dbg(&descriptor, dev, fmt, \ |
87 | ##__VA_ARGS__); \ | 135 | ##__VA_ARGS__); \ |
88 | } while (0) | 136 | } while (0) |
@@ -90,7 +138,7 @@ do { \ | |||
90 | #define dynamic_netdev_dbg(dev, fmt, ...) \ | 138 | #define dynamic_netdev_dbg(dev, fmt, ...) \ |
91 | do { \ | 139 | do { \ |
92 | DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ | 140 | DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ |
93 | if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \ | 141 | if (DYNAMIC_DEBUG_BRANCH(descriptor)) \ |
94 | __dynamic_netdev_dbg(&descriptor, dev, fmt, \ | 142 | __dynamic_netdev_dbg(&descriptor, dev, fmt, \ |
95 | ##__VA_ARGS__); \ | 143 | ##__VA_ARGS__); \ |
96 | } while (0) | 144 | } while (0) |
@@ -100,7 +148,7 @@ do { \ | |||
100 | do { \ | 148 | do { \ |
101 | DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, \ | 149 | DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, \ |
102 | __builtin_constant_p(prefix_str) ? prefix_str : "hexdump");\ | 150 | __builtin_constant_p(prefix_str) ? prefix_str : "hexdump");\ |
103 | if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \ | 151 | if (DYNAMIC_DEBUG_BRANCH(descriptor)) \ |
104 | print_hex_dump(KERN_DEBUG, prefix_str, \ | 152 | print_hex_dump(KERN_DEBUG, prefix_str, \ |
105 | prefix_type, rowsize, groupsize, \ | 153 | prefix_type, rowsize, groupsize, \ |
106 | buf, len, ascii); \ | 154 | buf, len, ascii); \ |
diff --git a/include/linux/fence.h b/include/linux/fence.h index 523ea3fbbddd..8cc719a63728 100644 --- a/include/linux/fence.h +++ b/include/linux/fence.h | |||
@@ -358,7 +358,7 @@ u64 fence_context_alloc(unsigned num); | |||
358 | #define FENCE_TRACE(f, fmt, args...) \ | 358 | #define FENCE_TRACE(f, fmt, args...) \ |
359 | do { \ | 359 | do { \ |
360 | struct fence *__ff = (f); \ | 360 | struct fence *__ff = (f); \ |
361 | if (config_enabled(CONFIG_FENCE_TRACE)) \ | 361 | if (IS_ENABLED(CONFIG_FENCE_TRACE)) \ |
362 | pr_info("f %llu#%u: " fmt, \ | 362 | pr_info("f %llu#%u: " fmt, \ |
363 | __ff->context, __ff->seqno, ##args); \ | 363 | __ff->context, __ff->seqno, ##args); \ |
364 | } while (0) | 364 | } while (0) |
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 68904469fba1..661af564fae8 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h | |||
@@ -76,7 +76,6 @@ | |||
76 | 76 | ||
77 | #include <linux/types.h> | 77 | #include <linux/types.h> |
78 | #include <linux/compiler.h> | 78 | #include <linux/compiler.h> |
79 | #include <linux/bug.h> | ||
80 | 79 | ||
81 | extern bool static_key_initialized; | 80 | extern bool static_key_initialized; |
82 | 81 | ||
@@ -115,20 +114,8 @@ enum jump_label_type { | |||
115 | 114 | ||
116 | struct module; | 115 | struct module; |
117 | 116 | ||
118 | #include <linux/atomic.h> | ||
119 | |||
120 | #ifdef HAVE_JUMP_LABEL | 117 | #ifdef HAVE_JUMP_LABEL |
121 | 118 | ||
122 | static inline int static_key_count(struct static_key *key) | ||
123 | { | ||
124 | /* | ||
125 | * -1 means the first static_key_slow_inc() is in progress. | ||
126 | * static_key_enabled() must return true, so return 1 here. | ||
127 | */ | ||
128 | int n = atomic_read(&key->enabled); | ||
129 | return n >= 0 ? n : 1; | ||
130 | } | ||
131 | |||
132 | #define JUMP_TYPE_FALSE 0UL | 119 | #define JUMP_TYPE_FALSE 0UL |
133 | #define JUMP_TYPE_TRUE 1UL | 120 | #define JUMP_TYPE_TRUE 1UL |
134 | #define JUMP_TYPE_MASK 1UL | 121 | #define JUMP_TYPE_MASK 1UL |
@@ -157,16 +144,29 @@ extern int jump_label_text_reserved(void *start, void *end); | |||
157 | extern void static_key_slow_inc(struct static_key *key); | 144 | extern void static_key_slow_inc(struct static_key *key); |
158 | extern void static_key_slow_dec(struct static_key *key); | 145 | extern void static_key_slow_dec(struct static_key *key); |
159 | extern void jump_label_apply_nops(struct module *mod); | 146 | extern void jump_label_apply_nops(struct module *mod); |
147 | extern int static_key_count(struct static_key *key); | ||
148 | extern void static_key_enable(struct static_key *key); | ||
149 | extern void static_key_disable(struct static_key *key); | ||
160 | 150 | ||
151 | /* | ||
152 | * We should be using ATOMIC_INIT() for initializing .enabled, but | ||
153 | * the inclusion of atomic.h is problematic for inclusion of jump_label.h | ||
154 | * in 'low-level' headers. Thus, we are initializing .enabled with a | ||
155 | * raw value, but have added a BUILD_BUG_ON() to catch any issues in | ||
156 | * jump_label_init() see: kernel/jump_label.c. | ||
157 | */ | ||
161 | #define STATIC_KEY_INIT_TRUE \ | 158 | #define STATIC_KEY_INIT_TRUE \ |
162 | { .enabled = ATOMIC_INIT(1), \ | 159 | { .enabled = { 1 }, \ |
163 | .entries = (void *)JUMP_TYPE_TRUE } | 160 | .entries = (void *)JUMP_TYPE_TRUE } |
164 | #define STATIC_KEY_INIT_FALSE \ | 161 | #define STATIC_KEY_INIT_FALSE \ |
165 | { .enabled = ATOMIC_INIT(0), \ | 162 | { .enabled = { 0 }, \ |
166 | .entries = (void *)JUMP_TYPE_FALSE } | 163 | .entries = (void *)JUMP_TYPE_FALSE } |
167 | 164 | ||
168 | #else /* !HAVE_JUMP_LABEL */ | 165 | #else /* !HAVE_JUMP_LABEL */ |
169 | 166 | ||
167 | #include <linux/atomic.h> | ||
168 | #include <linux/bug.h> | ||
169 | |||
170 | static inline int static_key_count(struct static_key *key) | 170 | static inline int static_key_count(struct static_key *key) |
171 | { | 171 | { |
172 | return atomic_read(&key->enabled); | 172 | return atomic_read(&key->enabled); |
@@ -216,14 +216,6 @@ static inline int jump_label_apply_nops(struct module *mod) | |||
216 | return 0; | 216 | return 0; |
217 | } | 217 | } |
218 | 218 | ||
219 | #define STATIC_KEY_INIT_TRUE { .enabled = ATOMIC_INIT(1) } | ||
220 | #define STATIC_KEY_INIT_FALSE { .enabled = ATOMIC_INIT(0) } | ||
221 | |||
222 | #endif /* HAVE_JUMP_LABEL */ | ||
223 | |||
224 | #define STATIC_KEY_INIT STATIC_KEY_INIT_FALSE | ||
225 | #define jump_label_enabled static_key_enabled | ||
226 | |||
227 | static inline void static_key_enable(struct static_key *key) | 219 | static inline void static_key_enable(struct static_key *key) |
228 | { | 220 | { |
229 | int count = static_key_count(key); | 221 | int count = static_key_count(key); |
@@ -244,6 +236,14 @@ static inline void static_key_disable(struct static_key *key) | |||
244 | static_key_slow_dec(key); | 236 | static_key_slow_dec(key); |
245 | } | 237 | } |
246 | 238 | ||
239 | #define STATIC_KEY_INIT_TRUE { .enabled = ATOMIC_INIT(1) } | ||
240 | #define STATIC_KEY_INIT_FALSE { .enabled = ATOMIC_INIT(0) } | ||
241 | |||
242 | #endif /* HAVE_JUMP_LABEL */ | ||
243 | |||
244 | #define STATIC_KEY_INIT STATIC_KEY_INIT_FALSE | ||
245 | #define jump_label_enabled static_key_enabled | ||
246 | |||
247 | /* -------------------------------------------------------------------------- */ | 247 | /* -------------------------------------------------------------------------- */ |
248 | 248 | ||
249 | /* | 249 | /* |
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index 017fced60242..5f81f8a187f2 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h | |||
@@ -6,7 +6,6 @@ | |||
6 | #include <linux/types.h> | 6 | #include <linux/types.h> |
7 | 7 | ||
8 | struct device; | 8 | struct device; |
9 | struct dma_attrs; | ||
10 | struct page; | 9 | struct page; |
11 | struct scatterlist; | 10 | struct scatterlist; |
12 | 11 | ||
@@ -68,10 +67,10 @@ swiotlb_free_coherent(struct device *hwdev, size_t size, | |||
68 | extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, | 67 | extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, |
69 | unsigned long offset, size_t size, | 68 | unsigned long offset, size_t size, |
70 | enum dma_data_direction dir, | 69 | enum dma_data_direction dir, |
71 | struct dma_attrs *attrs); | 70 | unsigned long attrs); |
72 | extern void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, | 71 | extern void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, |
73 | size_t size, enum dma_data_direction dir, | 72 | size_t size, enum dma_data_direction dir, |
74 | struct dma_attrs *attrs); | 73 | unsigned long attrs); |
75 | 74 | ||
76 | extern int | 75 | extern int |
77 | swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, | 76 | swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, |
@@ -83,12 +82,13 @@ swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, | |||
83 | 82 | ||
84 | extern int | 83 | extern int |
85 | swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, | 84 | swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, |
86 | enum dma_data_direction dir, struct dma_attrs *attrs); | 85 | enum dma_data_direction dir, |
86 | unsigned long attrs); | ||
87 | 87 | ||
88 | extern void | 88 | extern void |
89 | swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, | 89 | swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, |
90 | int nelems, enum dma_data_direction dir, | 90 | int nelems, enum dma_data_direction dir, |
91 | struct dma_attrs *attrs); | 91 | unsigned long attrs); |
92 | 92 | ||
93 | extern void | 93 | extern void |
94 | swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, | 94 | swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, |
diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h index 760399a470bd..2bb5deb0012e 100644 --- a/include/linux/ww_mutex.h +++ b/include/linux/ww_mutex.h | |||
@@ -173,14 +173,14 @@ static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx) | |||
173 | mutex_release(&ctx->dep_map, 0, _THIS_IP_); | 173 | mutex_release(&ctx->dep_map, 0, _THIS_IP_); |
174 | 174 | ||
175 | DEBUG_LOCKS_WARN_ON(ctx->acquired); | 175 | DEBUG_LOCKS_WARN_ON(ctx->acquired); |
176 | if (!config_enabled(CONFIG_PROVE_LOCKING)) | 176 | if (!IS_ENABLED(CONFIG_PROVE_LOCKING)) |
177 | /* | 177 | /* |
178 | * lockdep will normally handle this, | 178 | * lockdep will normally handle this, |
179 | * but fail without anyway | 179 | * but fail without anyway |
180 | */ | 180 | */ |
181 | ctx->done_acquire = 1; | 181 | ctx->done_acquire = 1; |
182 | 182 | ||
183 | if (!config_enabled(CONFIG_DEBUG_LOCK_ALLOC)) | 183 | if (!IS_ENABLED(CONFIG_DEBUG_LOCK_ALLOC)) |
184 | /* ensure ww_acquire_fini will still fail if called twice */ | 184 | /* ensure ww_acquire_fini will still fail if called twice */ |
185 | ctx->acquired = ~0U; | 185 | ctx->acquired = ~0U; |
186 | #endif | 186 | #endif |
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h index 946340ce7701..a4a9a55a0c42 100644 --- a/include/media/videobuf2-core.h +++ b/include/media/videobuf2-core.h | |||
@@ -98,7 +98,7 @@ struct vb2_threadio_data; | |||
98 | * #) Required ops for DMABUF types: attach_dmabuf, detach_dmabuf, map_dmabuf, unmap_dmabuf. | 98 | * #) Required ops for DMABUF types: attach_dmabuf, detach_dmabuf, map_dmabuf, unmap_dmabuf. |
99 | */ | 99 | */ |
100 | struct vb2_mem_ops { | 100 | struct vb2_mem_ops { |
101 | void *(*alloc)(struct device *dev, const struct dma_attrs *attrs, | 101 | void *(*alloc)(struct device *dev, unsigned long attrs, |
102 | unsigned long size, enum dma_data_direction dma_dir, | 102 | unsigned long size, enum dma_data_direction dma_dir, |
103 | gfp_t gfp_flags); | 103 | gfp_t gfp_flags); |
104 | void (*put)(void *buf_priv); | 104 | void (*put)(void *buf_priv); |
@@ -408,7 +408,7 @@ struct vb2_buf_ops { | |||
408 | * @io_modes: supported io methods (see vb2_io_modes enum) | 408 | * @io_modes: supported io methods (see vb2_io_modes enum) |
409 | * @dev: device to use for the default allocation context if the driver | 409 | * @dev: device to use for the default allocation context if the driver |
410 | * doesn't fill in the @alloc_devs array. | 410 | * doesn't fill in the @alloc_devs array. |
411 | * @dma_attrs: DMA attributes to use for the DMA. May be NULL. | 411 | * @dma_attrs: DMA attributes to use for the DMA. |
412 | * @fileio_read_once: report EOF after reading the first buffer | 412 | * @fileio_read_once: report EOF after reading the first buffer |
413 | * @fileio_write_immediately: queue buffer after each write() call | 413 | * @fileio_write_immediately: queue buffer after each write() call |
414 | * @allow_zero_bytesused: allow bytesused == 0 to be passed to the driver | 414 | * @allow_zero_bytesused: allow bytesused == 0 to be passed to the driver |
@@ -476,7 +476,7 @@ struct vb2_queue { | |||
476 | unsigned int type; | 476 | unsigned int type; |
477 | unsigned int io_modes; | 477 | unsigned int io_modes; |
478 | struct device *dev; | 478 | struct device *dev; |
479 | const struct dma_attrs *dma_attrs; | 479 | unsigned long dma_attrs; |
480 | unsigned fileio_read_once:1; | 480 | unsigned fileio_read_once:1; |
481 | unsigned fileio_write_immediately:1; | 481 | unsigned fileio_write_immediately:1; |
482 | unsigned allow_zero_bytesused:1; | 482 | unsigned allow_zero_bytesused:1; |
diff --git a/include/media/videobuf2-dma-contig.h b/include/media/videobuf2-dma-contig.h index df2aabee3401..5604818d137e 100644 --- a/include/media/videobuf2-dma-contig.h +++ b/include/media/videobuf2-dma-contig.h | |||
@@ -16,8 +16,6 @@ | |||
16 | #include <media/videobuf2-v4l2.h> | 16 | #include <media/videobuf2-v4l2.h> |
17 | #include <linux/dma-mapping.h> | 17 | #include <linux/dma-mapping.h> |
18 | 18 | ||
19 | struct dma_attrs; | ||
20 | |||
21 | static inline dma_addr_t | 19 | static inline dma_addr_t |
22 | vb2_dma_contig_plane_dma_addr(struct vb2_buffer *vb, unsigned int plane_no) | 20 | vb2_dma_contig_plane_dma_addr(struct vb2_buffer *vb, unsigned int plane_no) |
23 | { | 21 | { |
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 7e440d41487a..a8137dcf5a00 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h | |||
@@ -2819,19 +2819,19 @@ static inline void ib_dma_unmap_single(struct ib_device *dev, | |||
2819 | static inline u64 ib_dma_map_single_attrs(struct ib_device *dev, | 2819 | static inline u64 ib_dma_map_single_attrs(struct ib_device *dev, |
2820 | void *cpu_addr, size_t size, | 2820 | void *cpu_addr, size_t size, |
2821 | enum dma_data_direction direction, | 2821 | enum dma_data_direction direction, |
2822 | struct dma_attrs *attrs) | 2822 | unsigned long dma_attrs) |
2823 | { | 2823 | { |
2824 | return dma_map_single_attrs(dev->dma_device, cpu_addr, size, | 2824 | return dma_map_single_attrs(dev->dma_device, cpu_addr, size, |
2825 | direction, attrs); | 2825 | direction, dma_attrs); |
2826 | } | 2826 | } |
2827 | 2827 | ||
2828 | static inline void ib_dma_unmap_single_attrs(struct ib_device *dev, | 2828 | static inline void ib_dma_unmap_single_attrs(struct ib_device *dev, |
2829 | u64 addr, size_t size, | 2829 | u64 addr, size_t size, |
2830 | enum dma_data_direction direction, | 2830 | enum dma_data_direction direction, |
2831 | struct dma_attrs *attrs) | 2831 | unsigned long dma_attrs) |
2832 | { | 2832 | { |
2833 | return dma_unmap_single_attrs(dev->dma_device, addr, size, | 2833 | return dma_unmap_single_attrs(dev->dma_device, addr, size, |
2834 | direction, attrs); | 2834 | direction, dma_attrs); |
2835 | } | 2835 | } |
2836 | 2836 | ||
2837 | /** | 2837 | /** |
@@ -2906,17 +2906,18 @@ static inline void ib_dma_unmap_sg(struct ib_device *dev, | |||
2906 | static inline int ib_dma_map_sg_attrs(struct ib_device *dev, | 2906 | static inline int ib_dma_map_sg_attrs(struct ib_device *dev, |
2907 | struct scatterlist *sg, int nents, | 2907 | struct scatterlist *sg, int nents, |
2908 | enum dma_data_direction direction, | 2908 | enum dma_data_direction direction, |
2909 | struct dma_attrs *attrs) | 2909 | unsigned long dma_attrs) |
2910 | { | 2910 | { |
2911 | return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, attrs); | 2911 | return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, |
2912 | dma_attrs); | ||
2912 | } | 2913 | } |
2913 | 2914 | ||
2914 | static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, | 2915 | static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, |
2915 | struct scatterlist *sg, int nents, | 2916 | struct scatterlist *sg, int nents, |
2916 | enum dma_data_direction direction, | 2917 | enum dma_data_direction direction, |
2917 | struct dma_attrs *attrs) | 2918 | unsigned long dma_attrs) |
2918 | { | 2919 | { |
2919 | dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, attrs); | 2920 | dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs); |
2920 | } | 2921 | } |
2921 | /** | 2922 | /** |
2922 | * ib_sg_dma_address - Return the DMA address from a scatter/gather entry | 2923 | * ib_sg_dma_address - Return the DMA address from a scatter/gather entry |
diff --git a/include/xen/swiotlb-xen.h b/include/xen/swiotlb-xen.h index 8b2eb93ae8ba..7c35e279d1e3 100644 --- a/include/xen/swiotlb-xen.h +++ b/include/xen/swiotlb-xen.h | |||
@@ -9,30 +9,30 @@ extern int xen_swiotlb_init(int verbose, bool early); | |||
9 | extern void | 9 | extern void |
10 | *xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, | 10 | *xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, |
11 | dma_addr_t *dma_handle, gfp_t flags, | 11 | dma_addr_t *dma_handle, gfp_t flags, |
12 | struct dma_attrs *attrs); | 12 | unsigned long attrs); |
13 | 13 | ||
14 | extern void | 14 | extern void |
15 | xen_swiotlb_free_coherent(struct device *hwdev, size_t size, | 15 | xen_swiotlb_free_coherent(struct device *hwdev, size_t size, |
16 | void *vaddr, dma_addr_t dma_handle, | 16 | void *vaddr, dma_addr_t dma_handle, |
17 | struct dma_attrs *attrs); | 17 | unsigned long attrs); |
18 | 18 | ||
19 | extern dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, | 19 | extern dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, |
20 | unsigned long offset, size_t size, | 20 | unsigned long offset, size_t size, |
21 | enum dma_data_direction dir, | 21 | enum dma_data_direction dir, |
22 | struct dma_attrs *attrs); | 22 | unsigned long attrs); |
23 | 23 | ||
24 | extern void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, | 24 | extern void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, |
25 | size_t size, enum dma_data_direction dir, | 25 | size_t size, enum dma_data_direction dir, |
26 | struct dma_attrs *attrs); | 26 | unsigned long attrs); |
27 | extern int | 27 | extern int |
28 | xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, | 28 | xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, |
29 | int nelems, enum dma_data_direction dir, | 29 | int nelems, enum dma_data_direction dir, |
30 | struct dma_attrs *attrs); | 30 | unsigned long attrs); |
31 | 31 | ||
32 | extern void | 32 | extern void |
33 | xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, | 33 | xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, |
34 | int nelems, enum dma_data_direction dir, | 34 | int nelems, enum dma_data_direction dir, |
35 | struct dma_attrs *attrs); | 35 | unsigned long attrs); |
36 | 36 | ||
37 | extern void | 37 | extern void |
38 | xen_swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, | 38 | xen_swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, |
diff --git a/kernel/jump_label.c b/kernel/jump_label.c index 0dbea887d625..f19aa02a8f48 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/err.h> | 14 | #include <linux/err.h> |
15 | #include <linux/static_key.h> | 15 | #include <linux/static_key.h> |
16 | #include <linux/jump_label_ratelimit.h> | 16 | #include <linux/jump_label_ratelimit.h> |
17 | #include <linux/bug.h> | ||
17 | 18 | ||
18 | #ifdef HAVE_JUMP_LABEL | 19 | #ifdef HAVE_JUMP_LABEL |
19 | 20 | ||
@@ -56,6 +57,49 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop) | |||
56 | 57 | ||
57 | static void jump_label_update(struct static_key *key); | 58 | static void jump_label_update(struct static_key *key); |
58 | 59 | ||
60 | /* | ||
61 | * There are similar definitions for the !HAVE_JUMP_LABEL case in jump_label.h. | ||
62 | * The use of 'atomic_read()' requires atomic.h and its problematic for some | ||
63 | * kernel headers such as kernel.h and others. Since static_key_count() is not | ||
64 | * used in the branch statements as it is for the !HAVE_JUMP_LABEL case its ok | ||
65 | * to have it be a function here. Similarly, for 'static_key_enable()' and | ||
66 | * 'static_key_disable()', which require bug.h. This should allow jump_label.h | ||
67 | * to be included from most/all places for HAVE_JUMP_LABEL. | ||
68 | */ | ||
69 | int static_key_count(struct static_key *key) | ||
70 | { | ||
71 | /* | ||
72 | * -1 means the first static_key_slow_inc() is in progress. | ||
73 | * static_key_enabled() must return true, so return 1 here. | ||
74 | */ | ||
75 | int n = atomic_read(&key->enabled); | ||
76 | |||
77 | return n >= 0 ? n : 1; | ||
78 | } | ||
79 | EXPORT_SYMBOL_GPL(static_key_count); | ||
80 | |||
81 | void static_key_enable(struct static_key *key) | ||
82 | { | ||
83 | int count = static_key_count(key); | ||
84 | |||
85 | WARN_ON_ONCE(count < 0 || count > 1); | ||
86 | |||
87 | if (!count) | ||
88 | static_key_slow_inc(key); | ||
89 | } | ||
90 | EXPORT_SYMBOL_GPL(static_key_enable); | ||
91 | |||
92 | void static_key_disable(struct static_key *key) | ||
93 | { | ||
94 | int count = static_key_count(key); | ||
95 | |||
96 | WARN_ON_ONCE(count < 0 || count > 1); | ||
97 | |||
98 | if (count) | ||
99 | static_key_slow_dec(key); | ||
100 | } | ||
101 | EXPORT_SYMBOL_GPL(static_key_disable); | ||
102 | |||
59 | void static_key_slow_inc(struct static_key *key) | 103 | void static_key_slow_inc(struct static_key *key) |
60 | { | 104 | { |
61 | int v, v1; | 105 | int v, v1; |
@@ -235,6 +279,15 @@ void __init jump_label_init(void) | |||
235 | struct static_key *key = NULL; | 279 | struct static_key *key = NULL; |
236 | struct jump_entry *iter; | 280 | struct jump_entry *iter; |
237 | 281 | ||
282 | /* | ||
283 | * Since we are initializing the static_key.enabled field with | ||
284 | * with the 'raw' int values (to avoid pulling in atomic.h) in | ||
285 | * jump_label.h, let's make sure that is safe. There are only two | ||
286 | * cases to check since we initialize to 0 or 1. | ||
287 | */ | ||
288 | BUILD_BUG_ON((int)ATOMIC_INIT(0) != 0); | ||
289 | BUILD_BUG_ON((int)ATOMIC_INIT(1) != 1); | ||
290 | |||
238 | jump_label_lock(); | 291 | jump_label_lock(); |
239 | jump_label_sort_entries(iter_start, iter_stop); | 292 | jump_label_sort_entries(iter_start, iter_stop); |
240 | 293 | ||
diff --git a/kernel/ptrace.c b/kernel/ptrace.c index d49bfa1e53e6..1d3b7665d0be 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c | |||
@@ -585,8 +585,8 @@ static int ptrace_setoptions(struct task_struct *child, unsigned long data) | |||
585 | return -EINVAL; | 585 | return -EINVAL; |
586 | 586 | ||
587 | if (unlikely(data & PTRACE_O_SUSPEND_SECCOMP)) { | 587 | if (unlikely(data & PTRACE_O_SUSPEND_SECCOMP)) { |
588 | if (!config_enabled(CONFIG_CHECKPOINT_RESTORE) || | 588 | if (!IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) || |
589 | !config_enabled(CONFIG_SECCOMP)) | 589 | !IS_ENABLED(CONFIG_SECCOMP)) |
590 | return -EINVAL; | 590 | return -EINVAL; |
591 | 591 | ||
592 | if (!capable(CAP_SYS_ADMIN)) | 592 | if (!capable(CAP_SYS_ADMIN)) |
diff --git a/kernel/seccomp.c b/kernel/seccomp.c index 54d15eb2b701..ef6c6c3f9d8a 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c | |||
@@ -347,7 +347,7 @@ static struct seccomp_filter *seccomp_prepare_filter(struct sock_fprog *fprog) | |||
347 | { | 347 | { |
348 | struct seccomp_filter *sfilter; | 348 | struct seccomp_filter *sfilter; |
349 | int ret; | 349 | int ret; |
350 | const bool save_orig = config_enabled(CONFIG_CHECKPOINT_RESTORE); | 350 | const bool save_orig = IS_ENABLED(CONFIG_CHECKPOINT_RESTORE); |
351 | 351 | ||
352 | if (fprog->len == 0 || fprog->len > BPF_MAXINSNS) | 352 | if (fprog->len == 0 || fprog->len > BPF_MAXINSNS) |
353 | return ERR_PTR(-EINVAL); | 353 | return ERR_PTR(-EINVAL); |
@@ -542,7 +542,7 @@ void secure_computing_strict(int this_syscall) | |||
542 | { | 542 | { |
543 | int mode = current->seccomp.mode; | 543 | int mode = current->seccomp.mode; |
544 | 544 | ||
545 | if (config_enabled(CONFIG_CHECKPOINT_RESTORE) && | 545 | if (IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) && |
546 | unlikely(current->ptrace & PT_SUSPEND_SECCOMP)) | 546 | unlikely(current->ptrace & PT_SUSPEND_SECCOMP)) |
547 | return; | 547 | return; |
548 | 548 | ||
@@ -655,7 +655,7 @@ int __secure_computing(const struct seccomp_data *sd) | |||
655 | int mode = current->seccomp.mode; | 655 | int mode = current->seccomp.mode; |
656 | int this_syscall; | 656 | int this_syscall; |
657 | 657 | ||
658 | if (config_enabled(CONFIG_CHECKPOINT_RESTORE) && | 658 | if (IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) && |
659 | unlikely(current->ptrace & PT_SUSPEND_SECCOMP)) | 659 | unlikely(current->ptrace & PT_SUSPEND_SECCOMP)) |
660 | return 0; | 660 | return 0; |
661 | 661 | ||
diff --git a/lib/dma-noop.c b/lib/dma-noop.c index 72145646857e..3d766e78fbe2 100644 --- a/lib/dma-noop.c +++ b/lib/dma-noop.c | |||
@@ -10,7 +10,7 @@ | |||
10 | 10 | ||
11 | static void *dma_noop_alloc(struct device *dev, size_t size, | 11 | static void *dma_noop_alloc(struct device *dev, size_t size, |
12 | dma_addr_t *dma_handle, gfp_t gfp, | 12 | dma_addr_t *dma_handle, gfp_t gfp, |
13 | struct dma_attrs *attrs) | 13 | unsigned long attrs) |
14 | { | 14 | { |
15 | void *ret; | 15 | void *ret; |
16 | 16 | ||
@@ -22,7 +22,7 @@ static void *dma_noop_alloc(struct device *dev, size_t size, | |||
22 | 22 | ||
23 | static void dma_noop_free(struct device *dev, size_t size, | 23 | static void dma_noop_free(struct device *dev, size_t size, |
24 | void *cpu_addr, dma_addr_t dma_addr, | 24 | void *cpu_addr, dma_addr_t dma_addr, |
25 | struct dma_attrs *attrs) | 25 | unsigned long attrs) |
26 | { | 26 | { |
27 | free_pages((unsigned long)cpu_addr, get_order(size)); | 27 | free_pages((unsigned long)cpu_addr, get_order(size)); |
28 | } | 28 | } |
@@ -30,13 +30,14 @@ static void dma_noop_free(struct device *dev, size_t size, | |||
30 | static dma_addr_t dma_noop_map_page(struct device *dev, struct page *page, | 30 | static dma_addr_t dma_noop_map_page(struct device *dev, struct page *page, |
31 | unsigned long offset, size_t size, | 31 | unsigned long offset, size_t size, |
32 | enum dma_data_direction dir, | 32 | enum dma_data_direction dir, |
33 | struct dma_attrs *attrs) | 33 | unsigned long attrs) |
34 | { | 34 | { |
35 | return page_to_phys(page) + offset; | 35 | return page_to_phys(page) + offset; |
36 | } | 36 | } |
37 | 37 | ||
38 | static int dma_noop_map_sg(struct device *dev, struct scatterlist *sgl, int nents, | 38 | static int dma_noop_map_sg(struct device *dev, struct scatterlist *sgl, int nents, |
39 | enum dma_data_direction dir, struct dma_attrs *attrs) | 39 | enum dma_data_direction dir, |
40 | unsigned long attrs) | ||
40 | { | 41 | { |
41 | int i; | 42 | int i; |
42 | struct scatterlist *sg; | 43 | struct scatterlist *sg; |
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index fe42b6ec3f0c..da796e2dc4f5 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c | |||
@@ -188,6 +188,13 @@ static int ddebug_change(const struct ddebug_query *query, | |||
188 | newflags = (dp->flags & mask) | flags; | 188 | newflags = (dp->flags & mask) | flags; |
189 | if (newflags == dp->flags) | 189 | if (newflags == dp->flags) |
190 | continue; | 190 | continue; |
191 | #ifdef HAVE_JUMP_LABEL | ||
192 | if (dp->flags & _DPRINTK_FLAGS_PRINT) { | ||
193 | if (!(flags & _DPRINTK_FLAGS_PRINT)) | ||
194 | static_branch_disable(&dp->key.dd_key_true); | ||
195 | } else if (flags & _DPRINTK_FLAGS_PRINT) | ||
196 | static_branch_enable(&dp->key.dd_key_true); | ||
197 | #endif | ||
191 | dp->flags = newflags; | 198 | dp->flags = newflags; |
192 | vpr_info("changed %s:%d [%s]%s =%s\n", | 199 | vpr_info("changed %s:%d [%s]%s =%s\n", |
193 | trim_prefix(dp->filename), dp->lineno, | 200 | trim_prefix(dp->filename), dp->lineno, |
diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 76f29ecba8f4..22e13a0e19d7 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c | |||
@@ -738,7 +738,7 @@ swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir, | |||
738 | dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, | 738 | dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, |
739 | unsigned long offset, size_t size, | 739 | unsigned long offset, size_t size, |
740 | enum dma_data_direction dir, | 740 | enum dma_data_direction dir, |
741 | struct dma_attrs *attrs) | 741 | unsigned long attrs) |
742 | { | 742 | { |
743 | phys_addr_t map, phys = page_to_phys(page) + offset; | 743 | phys_addr_t map, phys = page_to_phys(page) + offset; |
744 | dma_addr_t dev_addr = phys_to_dma(dev, phys); | 744 | dma_addr_t dev_addr = phys_to_dma(dev, phys); |
@@ -807,7 +807,7 @@ static void unmap_single(struct device *hwdev, dma_addr_t dev_addr, | |||
807 | 807 | ||
808 | void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, | 808 | void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, |
809 | size_t size, enum dma_data_direction dir, | 809 | size_t size, enum dma_data_direction dir, |
810 | struct dma_attrs *attrs) | 810 | unsigned long attrs) |
811 | { | 811 | { |
812 | unmap_single(hwdev, dev_addr, size, dir); | 812 | unmap_single(hwdev, dev_addr, size, dir); |
813 | } | 813 | } |
@@ -877,7 +877,7 @@ EXPORT_SYMBOL(swiotlb_sync_single_for_device); | |||
877 | */ | 877 | */ |
878 | int | 878 | int |
879 | swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, | 879 | swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, |
880 | enum dma_data_direction dir, struct dma_attrs *attrs) | 880 | enum dma_data_direction dir, unsigned long attrs) |
881 | { | 881 | { |
882 | struct scatterlist *sg; | 882 | struct scatterlist *sg; |
883 | int i; | 883 | int i; |
@@ -914,7 +914,7 @@ int | |||
914 | swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, | 914 | swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, |
915 | enum dma_data_direction dir) | 915 | enum dma_data_direction dir) |
916 | { | 916 | { |
917 | return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL); | 917 | return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, 0); |
918 | } | 918 | } |
919 | EXPORT_SYMBOL(swiotlb_map_sg); | 919 | EXPORT_SYMBOL(swiotlb_map_sg); |
920 | 920 | ||
@@ -924,7 +924,8 @@ EXPORT_SYMBOL(swiotlb_map_sg); | |||
924 | */ | 924 | */ |
925 | void | 925 | void |
926 | swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, | 926 | swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, |
927 | int nelems, enum dma_data_direction dir, struct dma_attrs *attrs) | 927 | int nelems, enum dma_data_direction dir, |
928 | unsigned long attrs) | ||
928 | { | 929 | { |
929 | struct scatterlist *sg; | 930 | struct scatterlist *sg; |
930 | int i; | 931 | int i; |
@@ -941,7 +942,7 @@ void | |||
941 | swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, | 942 | swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, |
942 | enum dma_data_direction dir) | 943 | enum dma_data_direction dir) |
943 | { | 944 | { |
944 | return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL); | 945 | return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, 0); |
945 | } | 946 | } |
946 | EXPORT_SYMBOL(swiotlb_unmap_sg); | 947 | EXPORT_SYMBOL(swiotlb_unmap_sg); |
947 | 948 | ||
diff --git a/net/wireless/chan.c b/net/wireless/chan.c index da49c0b1fd32..b0e11b6dc994 100644 --- a/net/wireless/chan.c +++ b/net/wireless/chan.c | |||
@@ -715,7 +715,7 @@ static bool cfg80211_ir_permissive_chan(struct wiphy *wiphy, | |||
715 | 715 | ||
716 | ASSERT_RTNL(); | 716 | ASSERT_RTNL(); |
717 | 717 | ||
718 | if (!config_enabled(CONFIG_CFG80211_REG_RELAX_NO_IR) || | 718 | if (!IS_ENABLED(CONFIG_CFG80211_REG_RELAX_NO_IR) || |
719 | !(wiphy->regulatory_flags & REGULATORY_ENABLE_RELAX_NO_IR)) | 719 | !(wiphy->regulatory_flags & REGULATORY_ENABLE_RELAX_NO_IR)) |
720 | return false; | 720 | return false; |
721 | 721 | ||
diff --git a/samples/kprobes/jprobe_example.c b/samples/kprobes/jprobe_example.c index c3108bb15789..e3c0a40909f7 100644 --- a/samples/kprobes/jprobe_example.c +++ b/samples/kprobes/jprobe_example.c | |||
@@ -48,10 +48,10 @@ static int __init jprobe_init(void) | |||
48 | 48 | ||
49 | ret = register_jprobe(&my_jprobe); | 49 | ret = register_jprobe(&my_jprobe); |
50 | if (ret < 0) { | 50 | if (ret < 0) { |
51 | printk(KERN_INFO "register_jprobe failed, returned %d\n", ret); | 51 | pr_err("register_jprobe failed, returned %d\n", ret); |
52 | return -1; | 52 | return -1; |
53 | } | 53 | } |
54 | printk(KERN_INFO "Planted jprobe at %p, handler addr %p\n", | 54 | pr_info("Planted jprobe at %p, handler addr %p\n", |
55 | my_jprobe.kp.addr, my_jprobe.entry); | 55 | my_jprobe.kp.addr, my_jprobe.entry); |
56 | return 0; | 56 | return 0; |
57 | } | 57 | } |
@@ -59,7 +59,7 @@ static int __init jprobe_init(void) | |||
59 | static void __exit jprobe_exit(void) | 59 | static void __exit jprobe_exit(void) |
60 | { | 60 | { |
61 | unregister_jprobe(&my_jprobe); | 61 | unregister_jprobe(&my_jprobe); |
62 | printk(KERN_INFO "jprobe at %p unregistered\n", my_jprobe.kp.addr); | 62 | pr_info("jprobe at %p unregistered\n", my_jprobe.kp.addr); |
63 | } | 63 | } |
64 | 64 | ||
65 | module_init(jprobe_init) | 65 | module_init(jprobe_init) |
diff --git a/samples/kprobes/kprobe_example.c b/samples/kprobes/kprobe_example.c index f3b61b4ee09c..88b3e2d227ae 100644 --- a/samples/kprobes/kprobe_example.c +++ b/samples/kprobes/kprobe_example.c | |||
@@ -27,23 +27,19 @@ static struct kprobe kp = { | |||
27 | static int handler_pre(struct kprobe *p, struct pt_regs *regs) | 27 | static int handler_pre(struct kprobe *p, struct pt_regs *regs) |
28 | { | 28 | { |
29 | #ifdef CONFIG_X86 | 29 | #ifdef CONFIG_X86 |
30 | printk(KERN_INFO "<%s> pre_handler: p->addr = 0x%p, ip = %lx," | 30 | pr_info("<%s> pre_handler: p->addr = 0x%p, ip = %lx, flags = 0x%lx\n", |
31 | " flags = 0x%lx\n", | ||
32 | p->symbol_name, p->addr, regs->ip, regs->flags); | 31 | p->symbol_name, p->addr, regs->ip, regs->flags); |
33 | #endif | 32 | #endif |
34 | #ifdef CONFIG_PPC | 33 | #ifdef CONFIG_PPC |
35 | printk(KERN_INFO "<%s> pre_handler: p->addr = 0x%p, nip = 0x%lx," | 34 | pr_info("<%s> pre_handler: p->addr = 0x%p, nip = 0x%lx, msr = 0x%lx\n", |
36 | " msr = 0x%lx\n", | ||
37 | p->symbol_name, p->addr, regs->nip, regs->msr); | 35 | p->symbol_name, p->addr, regs->nip, regs->msr); |
38 | #endif | 36 | #endif |
39 | #ifdef CONFIG_MIPS | 37 | #ifdef CONFIG_MIPS |
40 | printk(KERN_INFO "<%s> pre_handler: p->addr = 0x%p, epc = 0x%lx," | 38 | pr_info("<%s> pre_handler: p->addr = 0x%p, epc = 0x%lx, status = 0x%lx\n", |
41 | " status = 0x%lx\n", | ||
42 | p->symbol_name, p->addr, regs->cp0_epc, regs->cp0_status); | 39 | p->symbol_name, p->addr, regs->cp0_epc, regs->cp0_status); |
43 | #endif | 40 | #endif |
44 | #ifdef CONFIG_TILEGX | 41 | #ifdef CONFIG_TILEGX |
45 | printk(KERN_INFO "<%s> pre_handler: p->addr = 0x%p, pc = 0x%lx," | 42 | pr_info("<%s> pre_handler: p->addr = 0x%p, pc = 0x%lx, ex1 = 0x%lx\n", |
46 | " ex1 = 0x%lx\n", | ||
47 | p->symbol_name, p->addr, regs->pc, regs->ex1); | 43 | p->symbol_name, p->addr, regs->pc, regs->ex1); |
48 | #endif | 44 | #endif |
49 | #ifdef CONFIG_ARM64 | 45 | #ifdef CONFIG_ARM64 |
@@ -61,19 +57,19 @@ static void handler_post(struct kprobe *p, struct pt_regs *regs, | |||
61 | unsigned long flags) | 57 | unsigned long flags) |
62 | { | 58 | { |
63 | #ifdef CONFIG_X86 | 59 | #ifdef CONFIG_X86 |
64 | printk(KERN_INFO "<%s> post_handler: p->addr = 0x%p, flags = 0x%lx\n", | 60 | pr_info("<%s> post_handler: p->addr = 0x%p, flags = 0x%lx\n", |
65 | p->symbol_name, p->addr, regs->flags); | 61 | p->symbol_name, p->addr, regs->flags); |
66 | #endif | 62 | #endif |
67 | #ifdef CONFIG_PPC | 63 | #ifdef CONFIG_PPC |
68 | printk(KERN_INFO "<%s> post_handler: p->addr = 0x%p, msr = 0x%lx\n", | 64 | pr_info("<%s> post_handler: p->addr = 0x%p, msr = 0x%lx\n", |
69 | p->symbol_name, p->addr, regs->msr); | 65 | p->symbol_name, p->addr, regs->msr); |
70 | #endif | 66 | #endif |
71 | #ifdef CONFIG_MIPS | 67 | #ifdef CONFIG_MIPS |
72 | printk(KERN_INFO "<%s> post_handler: p->addr = 0x%p, status = 0x%lx\n", | 68 | pr_info("<%s> post_handler: p->addr = 0x%p, status = 0x%lx\n", |
73 | p->symbol_name, p->addr, regs->cp0_status); | 69 | p->symbol_name, p->addr, regs->cp0_status); |
74 | #endif | 70 | #endif |
75 | #ifdef CONFIG_TILEGX | 71 | #ifdef CONFIG_TILEGX |
76 | printk(KERN_INFO "<%s> post_handler: p->addr = 0x%p, ex1 = 0x%lx\n", | 72 | pr_info("<%s> post_handler: p->addr = 0x%p, ex1 = 0x%lx\n", |
77 | p->symbol_name, p->addr, regs->ex1); | 73 | p->symbol_name, p->addr, regs->ex1); |
78 | #endif | 74 | #endif |
79 | #ifdef CONFIG_ARM64 | 75 | #ifdef CONFIG_ARM64 |
@@ -89,8 +85,7 @@ static void handler_post(struct kprobe *p, struct pt_regs *regs, | |||
89 | */ | 85 | */ |
90 | static int handler_fault(struct kprobe *p, struct pt_regs *regs, int trapnr) | 86 | static int handler_fault(struct kprobe *p, struct pt_regs *regs, int trapnr) |
91 | { | 87 | { |
92 | printk(KERN_INFO "fault_handler: p->addr = 0x%p, trap #%dn", | 88 | pr_info("fault_handler: p->addr = 0x%p, trap #%dn", p->addr, trapnr); |
93 | p->addr, trapnr); | ||
94 | /* Return 0 because we don't handle the fault. */ | 89 | /* Return 0 because we don't handle the fault. */ |
95 | return 0; | 90 | return 0; |
96 | } | 91 | } |
@@ -104,17 +99,17 @@ static int __init kprobe_init(void) | |||
104 | 99 | ||
105 | ret = register_kprobe(&kp); | 100 | ret = register_kprobe(&kp); |
106 | if (ret < 0) { | 101 | if (ret < 0) { |
107 | printk(KERN_INFO "register_kprobe failed, returned %d\n", ret); | 102 | pr_err("register_kprobe failed, returned %d\n", ret); |
108 | return ret; | 103 | return ret; |
109 | } | 104 | } |
110 | printk(KERN_INFO "Planted kprobe at %p\n", kp.addr); | 105 | pr_info("Planted kprobe at %p\n", kp.addr); |
111 | return 0; | 106 | return 0; |
112 | } | 107 | } |
113 | 108 | ||
114 | static void __exit kprobe_exit(void) | 109 | static void __exit kprobe_exit(void) |
115 | { | 110 | { |
116 | unregister_kprobe(&kp); | 111 | unregister_kprobe(&kp); |
117 | printk(KERN_INFO "kprobe at %p unregistered\n", kp.addr); | 112 | pr_info("kprobe at %p unregistered\n", kp.addr); |
118 | } | 113 | } |
119 | 114 | ||
120 | module_init(kprobe_init) | 115 | module_init(kprobe_init) |
diff --git a/samples/kprobes/kretprobe_example.c b/samples/kprobes/kretprobe_example.c index ebb1d1aed547..7f9060f435cd 100644 --- a/samples/kprobes/kretprobe_example.c +++ b/samples/kprobes/kretprobe_example.c | |||
@@ -55,14 +55,14 @@ static int entry_handler(struct kretprobe_instance *ri, struct pt_regs *regs) | |||
55 | */ | 55 | */ |
56 | static int ret_handler(struct kretprobe_instance *ri, struct pt_regs *regs) | 56 | static int ret_handler(struct kretprobe_instance *ri, struct pt_regs *regs) |
57 | { | 57 | { |
58 | int retval = regs_return_value(regs); | 58 | unsigned long retval = regs_return_value(regs); |
59 | struct my_data *data = (struct my_data *)ri->data; | 59 | struct my_data *data = (struct my_data *)ri->data; |
60 | s64 delta; | 60 | s64 delta; |
61 | ktime_t now; | 61 | ktime_t now; |
62 | 62 | ||
63 | now = ktime_get(); | 63 | now = ktime_get(); |
64 | delta = ktime_to_ns(ktime_sub(now, data->entry_stamp)); | 64 | delta = ktime_to_ns(ktime_sub(now, data->entry_stamp)); |
65 | printk(KERN_INFO "%s returned %d and took %lld ns to execute\n", | 65 | pr_info("%s returned %lu and took %lld ns to execute\n", |
66 | func_name, retval, (long long)delta); | 66 | func_name, retval, (long long)delta); |
67 | return 0; | 67 | return 0; |
68 | } | 68 | } |
@@ -82,11 +82,10 @@ static int __init kretprobe_init(void) | |||
82 | my_kretprobe.kp.symbol_name = func_name; | 82 | my_kretprobe.kp.symbol_name = func_name; |
83 | ret = register_kretprobe(&my_kretprobe); | 83 | ret = register_kretprobe(&my_kretprobe); |
84 | if (ret < 0) { | 84 | if (ret < 0) { |
85 | printk(KERN_INFO "register_kretprobe failed, returned %d\n", | 85 | pr_err("register_kretprobe failed, returned %d\n", ret); |
86 | ret); | ||
87 | return -1; | 86 | return -1; |
88 | } | 87 | } |
89 | printk(KERN_INFO "Planted return probe at %s: %p\n", | 88 | pr_info("Planted return probe at %s: %p\n", |
90 | my_kretprobe.kp.symbol_name, my_kretprobe.kp.addr); | 89 | my_kretprobe.kp.symbol_name, my_kretprobe.kp.addr); |
91 | return 0; | 90 | return 0; |
92 | } | 91 | } |
@@ -94,11 +93,10 @@ static int __init kretprobe_init(void) | |||
94 | static void __exit kretprobe_exit(void) | 93 | static void __exit kretprobe_exit(void) |
95 | { | 94 | { |
96 | unregister_kretprobe(&my_kretprobe); | 95 | unregister_kretprobe(&my_kretprobe); |
97 | printk(KERN_INFO "kretprobe at %p unregistered\n", | 96 | pr_info("kretprobe at %p unregistered\n", my_kretprobe.kp.addr); |
98 | my_kretprobe.kp.addr); | ||
99 | 97 | ||
100 | /* nmissed > 0 suggests that maxactive was set too low. */ | 98 | /* nmissed > 0 suggests that maxactive was set too low. */ |
101 | printk(KERN_INFO "Missed probing %d instances of %s\n", | 99 | pr_info("Missed probing %d instances of %s\n", |
102 | my_kretprobe.nmissed, my_kretprobe.kp.symbol_name); | 100 | my_kretprobe.nmissed, my_kretprobe.kp.symbol_name); |
103 | } | 101 | } |
104 | 102 | ||