diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-09-12 16:30:06 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-09-12 16:30:06 -0400 |
commit | 52269718dc2cf2585d7a2828f31d46ef46e68000 (patch) | |
tree | 9815c6cbaa8c2e3247b6356725c150831dfd4d4e | |
parent | ae71948f398eb2572148006bf34f0c6d934206cb (diff) | |
parent | d35b0996fef3bfe76665e87bbff7d95c6807350a (diff) |
Merge tag 'dma-mapping-4.14' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-mapping updates from Christoph Hellwig:
- removal of the old dma_alloc_noncoherent interface
- remove unused flags to dma_declare_coherent_memory
- restrict OF DMA configuration to specific physical busses
- use the iommu mailing list for dma-mapping questions and patches
* tag 'dma-mapping-4.14' of git://git.infradead.org/users/hch/dma-mapping:
dma-coherent: fix dma_declare_coherent_memory() logic error
ARM: imx: mx31moboard: Remove unused 'dma' variable
dma-coherent: remove an unused variable
MAINTAINERS: use the iommu list for the dma-mapping subsystem
dma-coherent: remove the DMA_MEMORY_MAP and DMA_MEMORY_IO flags
dma-coherent: remove the DMA_MEMORY_INCLUDES_CHILDREN flag
of: restrict DMA configuration
dma-mapping: remove dma_alloc_noncoherent and dma_free_noncoherent
i825xx: switch to switch to dma_alloc_attrs
au1000_eth: switch to dma_alloc_attrs
sgiseeq: switch to dma_alloc_attrs
dma-mapping: reduce dma_mapping_error inline bloat
-rw-r--r-- | Documentation/DMA-API.txt | 55 | ||||
-rw-r--r-- | MAINTAINERS | 2 | ||||
-rw-r--r-- | arch/arm/mach-imx/mach-imx27_visstrim_m10.c | 44 | ||||
-rw-r--r-- | arch/arm/mach-imx/mach-mx31moboard.c | 12 | ||||
-rw-r--r-- | arch/metag/include/asm/dma-mapping.h | 2 | ||||
-rw-r--r-- | arch/nios2/include/asm/dma-mapping.h | 2 | ||||
-rw-r--r-- | arch/sh/drivers/pci/fixups-dreamcast.c | 3 | ||||
-rw-r--r-- | arch/tile/include/asm/dma-mapping.h | 4 | ||||
-rw-r--r-- | drivers/base/dma-coherent.c | 85 | ||||
-rw-r--r-- | drivers/base/dma-mapping.c | 7 | ||||
-rw-r--r-- | drivers/char/virtio_console.c | 3 | ||||
-rw-r--r-- | drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c | 5 | ||||
-rw-r--r-- | drivers/net/ethernet/amd/au1000_eth.c | 18 | ||||
-rw-r--r-- | drivers/net/ethernet/i825xx/lasi_82596.c | 6 | ||||
-rw-r--r-- | drivers/net/ethernet/i825xx/lib82596.c | 9 | ||||
-rw-r--r-- | drivers/net/ethernet/i825xx/sni_82596.c | 6 | ||||
-rw-r--r-- | drivers/net/ethernet/seeq/sgiseeq.c | 8 | ||||
-rw-r--r-- | drivers/of/device.c | 48 | ||||
-rw-r--r-- | drivers/scsi/NCR_Q720.c | 3 | ||||
-rw-r--r-- | drivers/usb/host/ohci-sm501.c | 7 | ||||
-rw-r--r-- | drivers/usb/host/ohci-tmio.c | 9 | ||||
-rw-r--r-- | include/linux/dma-mapping.h | 28 |
22 files changed, 156 insertions, 210 deletions
diff --git a/Documentation/DMA-API.txt b/Documentation/DMA-API.txt index 45b29326d719..ac66ae2509a9 100644 --- a/Documentation/DMA-API.txt +++ b/Documentation/DMA-API.txt | |||
@@ -515,14 +515,15 @@ API at all. | |||
515 | :: | 515 | :: |
516 | 516 | ||
517 | void * | 517 | void * |
518 | dma_alloc_noncoherent(struct device *dev, size_t size, | 518 | dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, |
519 | dma_addr_t *dma_handle, gfp_t flag) | 519 | gfp_t flag, unsigned long attrs) |
520 | 520 | ||
521 | Identical to dma_alloc_coherent() except that the platform will | 521 | Identical to dma_alloc_coherent() except that when the |
522 | choose to return either consistent or non-consistent memory as it sees | 522 | DMA_ATTR_NON_CONSISTENT flags is passed in the attrs argument, the |
523 | fit. By using this API, you are guaranteeing to the platform that you | 523 | platform will choose to return either consistent or non-consistent memory |
524 | have all the correct and necessary sync points for this memory in the | 524 | as it sees fit. By using this API, you are guaranteeing to the platform |
525 | driver should it choose to return non-consistent memory. | 525 | that you have all the correct and necessary sync points for this memory |
526 | in the driver should it choose to return non-consistent memory. | ||
526 | 527 | ||
527 | Note: where the platform can return consistent memory, it will | 528 | Note: where the platform can return consistent memory, it will |
528 | guarantee that the sync points become nops. | 529 | guarantee that the sync points become nops. |
@@ -535,12 +536,13 @@ that simply cannot make consistent memory. | |||
535 | :: | 536 | :: |
536 | 537 | ||
537 | void | 538 | void |
538 | dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr, | 539 | dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, |
539 | dma_addr_t dma_handle) | 540 | dma_addr_t dma_handle, unsigned long attrs) |
540 | 541 | ||
541 | Free memory allocated by the nonconsistent API. All parameters must | 542 | Free memory allocated by the dma_alloc_attrs(). All parameters common |
542 | be identical to those passed in (and returned by | 543 | parameters must identical to those otherwise passed to dma_fre_coherent, |
543 | dma_alloc_noncoherent()). | 544 | and the attrs argument must be identical to the attrs passed to |
545 | dma_alloc_attrs(). | ||
544 | 546 | ||
545 | :: | 547 | :: |
546 | 548 | ||
@@ -564,8 +566,8 @@ memory or doing partial flushes. | |||
564 | dma_cache_sync(struct device *dev, void *vaddr, size_t size, | 566 | dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
565 | enum dma_data_direction direction) | 567 | enum dma_data_direction direction) |
566 | 568 | ||
567 | Do a partial sync of memory that was allocated by | 569 | Do a partial sync of memory that was allocated by dma_alloc_attrs() with |
568 | dma_alloc_noncoherent(), starting at virtual address vaddr and | 570 | the DMA_ATTR_NON_CONSISTENT flag starting at virtual address vaddr and |
569 | continuing on for size. Again, you *must* observe the cache line | 571 | continuing on for size. Again, you *must* observe the cache line |
570 | boundaries when doing this. | 572 | boundaries when doing this. |
571 | 573 | ||
@@ -590,34 +592,11 @@ size is the size of the area (must be multiples of PAGE_SIZE). | |||
590 | 592 | ||
591 | flags can be ORed together and are: | 593 | flags can be ORed together and are: |
592 | 594 | ||
593 | - DMA_MEMORY_MAP - request that the memory returned from | ||
594 | dma_alloc_coherent() be directly writable. | ||
595 | |||
596 | - DMA_MEMORY_IO - request that the memory returned from | ||
597 | dma_alloc_coherent() be addressable using read()/write()/memcpy_toio() etc. | ||
598 | |||
599 | One or both of these flags must be present. | ||
600 | |||
601 | - DMA_MEMORY_INCLUDES_CHILDREN - make the declared memory be allocated by | ||
602 | dma_alloc_coherent of any child devices of this one (for memory residing | ||
603 | on a bridge). | ||
604 | |||
605 | - DMA_MEMORY_EXCLUSIVE - only allocate memory from the declared regions. | 595 | - DMA_MEMORY_EXCLUSIVE - only allocate memory from the declared regions. |
606 | Do not allow dma_alloc_coherent() to fall back to system memory when | 596 | Do not allow dma_alloc_coherent() to fall back to system memory when |
607 | it's out of memory in the declared region. | 597 | it's out of memory in the declared region. |
608 | 598 | ||
609 | The return value will be either DMA_MEMORY_MAP or DMA_MEMORY_IO and | 599 | As a simplification for the platforms, only *one* such region of |
610 | must correspond to a passed in flag (i.e. no returning DMA_MEMORY_IO | ||
611 | if only DMA_MEMORY_MAP were passed in) for success or zero for | ||
612 | failure. | ||
613 | |||
614 | Note, for DMA_MEMORY_IO returns, all subsequent memory returned by | ||
615 | dma_alloc_coherent() may no longer be accessed directly, but instead | ||
616 | must be accessed using the correct bus functions. If your driver | ||
617 | isn't prepared to handle this contingency, it should not specify | ||
618 | DMA_MEMORY_IO in the input flags. | ||
619 | |||
620 | As a simplification for the platforms, only **one** such region of | ||
621 | memory may be declared per device. | 600 | memory may be declared per device. |
622 | 601 | ||
623 | For reasons of efficiency, most platforms choose to track the declared | 602 | For reasons of efficiency, most platforms choose to track the declared |
diff --git a/MAINTAINERS b/MAINTAINERS index e57a4eaec077..f46a3225e398 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -4219,7 +4219,7 @@ DMA MAPPING HELPERS | |||
4219 | M: Christoph Hellwig <hch@lst.de> | 4219 | M: Christoph Hellwig <hch@lst.de> |
4220 | M: Marek Szyprowski <m.szyprowski@samsung.com> | 4220 | M: Marek Szyprowski <m.szyprowski@samsung.com> |
4221 | R: Robin Murphy <robin.murphy@arm.com> | 4221 | R: Robin Murphy <robin.murphy@arm.com> |
4222 | L: linux-kernel@vger.kernel.org | 4222 | L: iommu@lists.linux-foundation.org |
4223 | T: git git://git.infradead.org/users/hch/dma-mapping.git | 4223 | T: git git://git.infradead.org/users/hch/dma-mapping.git |
4224 | W: http://git.infradead.org/users/hch/dma-mapping.git | 4224 | W: http://git.infradead.org/users/hch/dma-mapping.git |
4225 | S: Supported | 4225 | S: Supported |
diff --git a/arch/arm/mach-imx/mach-imx27_visstrim_m10.c b/arch/arm/mach-imx/mach-imx27_visstrim_m10.c index dd75a4756761..5169dfba9718 100644 --- a/arch/arm/mach-imx/mach-imx27_visstrim_m10.c +++ b/arch/arm/mach-imx/mach-imx27_visstrim_m10.c | |||
@@ -245,7 +245,6 @@ static phys_addr_t mx2_camera_base __initdata; | |||
245 | static void __init visstrim_analog_camera_init(void) | 245 | static void __init visstrim_analog_camera_init(void) |
246 | { | 246 | { |
247 | struct platform_device *pdev; | 247 | struct platform_device *pdev; |
248 | int dma; | ||
249 | 248 | ||
250 | gpio_set_value(TVP5150_PWDN, 1); | 249 | gpio_set_value(TVP5150_PWDN, 1); |
251 | ndelay(1); | 250 | ndelay(1); |
@@ -258,12 +257,9 @@ static void __init visstrim_analog_camera_init(void) | |||
258 | if (IS_ERR(pdev)) | 257 | if (IS_ERR(pdev)) |
259 | return; | 258 | return; |
260 | 259 | ||
261 | dma = dma_declare_coherent_memory(&pdev->dev, | 260 | dma_declare_coherent_memory(&pdev->dev, mx2_camera_base, |
262 | mx2_camera_base, mx2_camera_base, | 261 | mx2_camera_base, MX2_CAMERA_BUF_SIZE, |
263 | MX2_CAMERA_BUF_SIZE, | 262 | DMA_MEMORY_EXCLUSIVE); |
264 | DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE); | ||
265 | if (!(dma & DMA_MEMORY_MAP)) | ||
266 | return; | ||
267 | } | 263 | } |
268 | 264 | ||
269 | static void __init visstrim_reserve(void) | 265 | static void __init visstrim_reserve(void) |
@@ -444,16 +440,13 @@ static const struct imx_ssi_platform_data visstrim_m10_ssi_pdata __initconst = { | |||
444 | static void __init visstrim_coda_init(void) | 440 | static void __init visstrim_coda_init(void) |
445 | { | 441 | { |
446 | struct platform_device *pdev; | 442 | struct platform_device *pdev; |
447 | int dma; | ||
448 | 443 | ||
449 | pdev = imx27_add_coda(); | 444 | pdev = imx27_add_coda(); |
450 | dma = dma_declare_coherent_memory(&pdev->dev, | 445 | dma_declare_coherent_memory(&pdev->dev, |
451 | mx2_camera_base + MX2_CAMERA_BUF_SIZE, | 446 | mx2_camera_base + MX2_CAMERA_BUF_SIZE, |
452 | mx2_camera_base + MX2_CAMERA_BUF_SIZE, | 447 | mx2_camera_base + MX2_CAMERA_BUF_SIZE, |
453 | MX2_CAMERA_BUF_SIZE, | 448 | MX2_CAMERA_BUF_SIZE, |
454 | DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE); | 449 | DMA_MEMORY_EXCLUSIVE); |
455 | if (!(dma & DMA_MEMORY_MAP)) | ||
456 | return; | ||
457 | } | 450 | } |
458 | 451 | ||
459 | /* DMA deinterlace */ | 452 | /* DMA deinterlace */ |
@@ -466,24 +459,21 @@ static void __init visstrim_deinterlace_init(void) | |||
466 | { | 459 | { |
467 | int ret = -ENOMEM; | 460 | int ret = -ENOMEM; |
468 | struct platform_device *pdev = &visstrim_deinterlace; | 461 | struct platform_device *pdev = &visstrim_deinterlace; |
469 | int dma; | ||
470 | 462 | ||
471 | ret = platform_device_register(pdev); | 463 | ret = platform_device_register(pdev); |
472 | 464 | ||
473 | dma = dma_declare_coherent_memory(&pdev->dev, | 465 | dma_declare_coherent_memory(&pdev->dev, |
474 | mx2_camera_base + 2 * MX2_CAMERA_BUF_SIZE, | 466 | mx2_camera_base + 2 * MX2_CAMERA_BUF_SIZE, |
475 | mx2_camera_base + 2 * MX2_CAMERA_BUF_SIZE, | 467 | mx2_camera_base + 2 * MX2_CAMERA_BUF_SIZE, |
476 | MX2_CAMERA_BUF_SIZE, | 468 | MX2_CAMERA_BUF_SIZE, |
477 | DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE); | 469 | DMA_MEMORY_EXCLUSIVE); |
478 | if (!(dma & DMA_MEMORY_MAP)) | ||
479 | return; | ||
480 | } | 470 | } |
481 | 471 | ||
482 | /* Emma-PrP for format conversion */ | 472 | /* Emma-PrP for format conversion */ |
483 | static void __init visstrim_emmaprp_init(void) | 473 | static void __init visstrim_emmaprp_init(void) |
484 | { | 474 | { |
485 | struct platform_device *pdev; | 475 | struct platform_device *pdev; |
486 | int dma; | 476 | int ret; |
487 | 477 | ||
488 | pdev = imx27_add_mx2_emmaprp(); | 478 | pdev = imx27_add_mx2_emmaprp(); |
489 | if (IS_ERR(pdev)) | 479 | if (IS_ERR(pdev)) |
@@ -493,11 +483,11 @@ static void __init visstrim_emmaprp_init(void) | |||
493 | * Use the same memory area as the analog camera since both | 483 | * Use the same memory area as the analog camera since both |
494 | * devices are, by nature, exclusive. | 484 | * devices are, by nature, exclusive. |
495 | */ | 485 | */ |
496 | dma = dma_declare_coherent_memory(&pdev->dev, | 486 | ret = dma_declare_coherent_memory(&pdev->dev, |
497 | mx2_camera_base, mx2_camera_base, | 487 | mx2_camera_base, mx2_camera_base, |
498 | MX2_CAMERA_BUF_SIZE, | 488 | MX2_CAMERA_BUF_SIZE, |
499 | DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE); | 489 | DMA_MEMORY_EXCLUSIVE); |
500 | if (!(dma & DMA_MEMORY_MAP)) | 490 | if (ret) |
501 | pr_err("Failed to declare memory for emmaprp\n"); | 491 | pr_err("Failed to declare memory for emmaprp\n"); |
502 | } | 492 | } |
503 | 493 | ||
diff --git a/arch/arm/mach-imx/mach-mx31moboard.c b/arch/arm/mach-imx/mach-mx31moboard.c index bde9a9af6714..7716f83aecdd 100644 --- a/arch/arm/mach-imx/mach-mx31moboard.c +++ b/arch/arm/mach-imx/mach-mx31moboard.c | |||
@@ -475,7 +475,7 @@ static phys_addr_t mx3_camera_base __initdata; | |||
475 | 475 | ||
476 | static int __init mx31moboard_init_cam(void) | 476 | static int __init mx31moboard_init_cam(void) |
477 | { | 477 | { |
478 | int dma, ret = -ENOMEM; | 478 | int ret; |
479 | struct platform_device *pdev; | 479 | struct platform_device *pdev; |
480 | 480 | ||
481 | imx31_add_ipu_core(); | 481 | imx31_add_ipu_core(); |
@@ -484,11 +484,11 @@ static int __init mx31moboard_init_cam(void) | |||
484 | if (IS_ERR(pdev)) | 484 | if (IS_ERR(pdev)) |
485 | return PTR_ERR(pdev); | 485 | return PTR_ERR(pdev); |
486 | 486 | ||
487 | dma = dma_declare_coherent_memory(&pdev->dev, | 487 | ret = dma_declare_coherent_memory(&pdev->dev, |
488 | mx3_camera_base, mx3_camera_base, | 488 | mx3_camera_base, mx3_camera_base, |
489 | MX3_CAMERA_BUF_SIZE, | 489 | MX3_CAMERA_BUF_SIZE, |
490 | DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE); | 490 | DMA_MEMORY_EXCLUSIVE); |
491 | if (!(dma & DMA_MEMORY_MAP)) | 491 | if (ret) |
492 | goto err; | 492 | goto err; |
493 | 493 | ||
494 | ret = platform_device_add(pdev); | 494 | ret = platform_device_add(pdev); |
diff --git a/arch/metag/include/asm/dma-mapping.h b/arch/metag/include/asm/dma-mapping.h index fad3dc3cb210..ea573be2b6d0 100644 --- a/arch/metag/include/asm/dma-mapping.h +++ b/arch/metag/include/asm/dma-mapping.h | |||
@@ -9,7 +9,7 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) | |||
9 | } | 9 | } |
10 | 10 | ||
11 | /* | 11 | /* |
12 | * dma_alloc_noncoherent() returns non-cacheable memory, so there's no need to | 12 | * dma_alloc_attrs() always returns non-cacheable memory, so there's no need to |
13 | * do any flushing here. | 13 | * do any flushing here. |
14 | */ | 14 | */ |
15 | static inline void | 15 | static inline void |
diff --git a/arch/nios2/include/asm/dma-mapping.h b/arch/nios2/include/asm/dma-mapping.h index 7b3c6f280293..f8dc62222741 100644 --- a/arch/nios2/include/asm/dma-mapping.h +++ b/arch/nios2/include/asm/dma-mapping.h | |||
@@ -18,7 +18,7 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) | |||
18 | } | 18 | } |
19 | 19 | ||
20 | /* | 20 | /* |
21 | * dma_alloc_noncoherent() returns non-cacheable memory, so there's no need to | 21 | * dma_alloc_attrs() always returns non-cacheable memory, so there's no need to |
22 | * do any flushing here. | 22 | * do any flushing here. |
23 | */ | 23 | */ |
24 | static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | 24 | static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
diff --git a/arch/sh/drivers/pci/fixups-dreamcast.c b/arch/sh/drivers/pci/fixups-dreamcast.c index 9d597f7ab8dd..48aaefd8f5d6 100644 --- a/arch/sh/drivers/pci/fixups-dreamcast.c +++ b/arch/sh/drivers/pci/fixups-dreamcast.c | |||
@@ -63,11 +63,10 @@ static void gapspci_fixup_resources(struct pci_dev *dev) | |||
63 | res.end = GAPSPCI_DMA_BASE + GAPSPCI_DMA_SIZE - 1; | 63 | res.end = GAPSPCI_DMA_BASE + GAPSPCI_DMA_SIZE - 1; |
64 | res.flags = IORESOURCE_MEM; | 64 | res.flags = IORESOURCE_MEM; |
65 | pcibios_resource_to_bus(dev->bus, ®ion, &res); | 65 | pcibios_resource_to_bus(dev->bus, ®ion, &res); |
66 | BUG_ON(!dma_declare_coherent_memory(&dev->dev, | 66 | BUG_ON(dma_declare_coherent_memory(&dev->dev, |
67 | res.start, | 67 | res.start, |
68 | region.start, | 68 | region.start, |
69 | resource_size(&res), | 69 | resource_size(&res), |
70 | DMA_MEMORY_MAP | | ||
71 | DMA_MEMORY_EXCLUSIVE)); | 70 | DMA_MEMORY_EXCLUSIVE)); |
72 | break; | 71 | break; |
73 | default: | 72 | default: |
diff --git a/arch/tile/include/asm/dma-mapping.h b/arch/tile/include/asm/dma-mapping.h index bbc71a29b2c6..7061dc8af43a 100644 --- a/arch/tile/include/asm/dma-mapping.h +++ b/arch/tile/include/asm/dma-mapping.h | |||
@@ -68,8 +68,8 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) | |||
68 | int dma_set_mask(struct device *dev, u64 mask); | 68 | int dma_set_mask(struct device *dev, u64 mask); |
69 | 69 | ||
70 | /* | 70 | /* |
71 | * dma_alloc_noncoherent() is #defined to return coherent memory, | 71 | * dma_alloc_attrs() always returns non-cacheable memory, so there's no need to |
72 | * so there's no need to do any flushing here. | 72 | * do any flushing here. |
73 | */ | 73 | */ |
74 | static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | 74 | static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
75 | enum dma_data_direction direction) | 75 | enum dma_data_direction direction) |
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c index 1c152aed6b82..a39b2166b145 100644 --- a/drivers/base/dma-coherent.c +++ b/drivers/base/dma-coherent.c | |||
@@ -37,7 +37,7 @@ static inline dma_addr_t dma_get_device_base(struct device *dev, | |||
37 | return mem->device_base; | 37 | return mem->device_base; |
38 | } | 38 | } |
39 | 39 | ||
40 | static bool dma_init_coherent_memory( | 40 | static int dma_init_coherent_memory( |
41 | phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags, | 41 | phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags, |
42 | struct dma_coherent_mem **mem) | 42 | struct dma_coherent_mem **mem) |
43 | { | 43 | { |
@@ -45,25 +45,28 @@ static bool dma_init_coherent_memory( | |||
45 | void __iomem *mem_base = NULL; | 45 | void __iomem *mem_base = NULL; |
46 | int pages = size >> PAGE_SHIFT; | 46 | int pages = size >> PAGE_SHIFT; |
47 | int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long); | 47 | int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long); |
48 | int ret; | ||
48 | 49 | ||
49 | if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0) | 50 | if (!size) { |
50 | goto out; | 51 | ret = -EINVAL; |
51 | if (!size) | ||
52 | goto out; | 52 | goto out; |
53 | } | ||
53 | 54 | ||
54 | if (flags & DMA_MEMORY_MAP) | 55 | mem_base = memremap(phys_addr, size, MEMREMAP_WC); |
55 | mem_base = memremap(phys_addr, size, MEMREMAP_WC); | 56 | if (!mem_base) { |
56 | else | 57 | ret = -EINVAL; |
57 | mem_base = ioremap(phys_addr, size); | ||
58 | if (!mem_base) | ||
59 | goto out; | 58 | goto out; |
60 | 59 | } | |
61 | dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL); | 60 | dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL); |
62 | if (!dma_mem) | 61 | if (!dma_mem) { |
62 | ret = -ENOMEM; | ||
63 | goto out; | 63 | goto out; |
64 | } | ||
64 | dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL); | 65 | dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL); |
65 | if (!dma_mem->bitmap) | 66 | if (!dma_mem->bitmap) { |
67 | ret = -ENOMEM; | ||
66 | goto out; | 68 | goto out; |
69 | } | ||
67 | 70 | ||
68 | dma_mem->virt_base = mem_base; | 71 | dma_mem->virt_base = mem_base; |
69 | dma_mem->device_base = device_addr; | 72 | dma_mem->device_base = device_addr; |
@@ -73,17 +76,13 @@ static bool dma_init_coherent_memory( | |||
73 | spin_lock_init(&dma_mem->spinlock); | 76 | spin_lock_init(&dma_mem->spinlock); |
74 | 77 | ||
75 | *mem = dma_mem; | 78 | *mem = dma_mem; |
76 | return true; | 79 | return 0; |
77 | 80 | ||
78 | out: | 81 | out: |
79 | kfree(dma_mem); | 82 | kfree(dma_mem); |
80 | if (mem_base) { | 83 | if (mem_base) |
81 | if (flags & DMA_MEMORY_MAP) | 84 | memunmap(mem_base); |
82 | memunmap(mem_base); | 85 | return ret; |
83 | else | ||
84 | iounmap(mem_base); | ||
85 | } | ||
86 | return false; | ||
87 | } | 86 | } |
88 | 87 | ||
89 | static void dma_release_coherent_memory(struct dma_coherent_mem *mem) | 88 | static void dma_release_coherent_memory(struct dma_coherent_mem *mem) |
@@ -91,10 +90,7 @@ static void dma_release_coherent_memory(struct dma_coherent_mem *mem) | |||
91 | if (!mem) | 90 | if (!mem) |
92 | return; | 91 | return; |
93 | 92 | ||
94 | if (mem->flags & DMA_MEMORY_MAP) | 93 | memunmap(mem->virt_base); |
95 | memunmap(mem->virt_base); | ||
96 | else | ||
97 | iounmap(mem->virt_base); | ||
98 | kfree(mem->bitmap); | 94 | kfree(mem->bitmap); |
99 | kfree(mem); | 95 | kfree(mem); |
100 | } | 96 | } |
@@ -109,8 +105,6 @@ static int dma_assign_coherent_memory(struct device *dev, | |||
109 | return -EBUSY; | 105 | return -EBUSY; |
110 | 106 | ||
111 | dev->dma_mem = mem; | 107 | dev->dma_mem = mem; |
112 | /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */ | ||
113 | |||
114 | return 0; | 108 | return 0; |
115 | } | 109 | } |
116 | 110 | ||
@@ -118,16 +112,16 @@ int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, | |||
118 | dma_addr_t device_addr, size_t size, int flags) | 112 | dma_addr_t device_addr, size_t size, int flags) |
119 | { | 113 | { |
120 | struct dma_coherent_mem *mem; | 114 | struct dma_coherent_mem *mem; |
115 | int ret; | ||
121 | 116 | ||
122 | if (!dma_init_coherent_memory(phys_addr, device_addr, size, flags, | 117 | ret = dma_init_coherent_memory(phys_addr, device_addr, size, flags, &mem); |
123 | &mem)) | 118 | if (ret) |
124 | return 0; | 119 | return ret; |
125 | |||
126 | if (dma_assign_coherent_memory(dev, mem) == 0) | ||
127 | return flags & DMA_MEMORY_MAP ? DMA_MEMORY_MAP : DMA_MEMORY_IO; | ||
128 | 120 | ||
129 | dma_release_coherent_memory(mem); | 121 | ret = dma_assign_coherent_memory(dev, mem); |
130 | return 0; | 122 | if (ret) |
123 | dma_release_coherent_memory(mem); | ||
124 | return ret; | ||
131 | } | 125 | } |
132 | EXPORT_SYMBOL(dma_declare_coherent_memory); | 126 | EXPORT_SYMBOL(dma_declare_coherent_memory); |
133 | 127 | ||
@@ -171,7 +165,6 @@ static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem, | |||
171 | int order = get_order(size); | 165 | int order = get_order(size); |
172 | unsigned long flags; | 166 | unsigned long flags; |
173 | int pageno; | 167 | int pageno; |
174 | int dma_memory_map; | ||
175 | void *ret; | 168 | void *ret; |
176 | 169 | ||
177 | spin_lock_irqsave(&mem->spinlock, flags); | 170 | spin_lock_irqsave(&mem->spinlock, flags); |
@@ -188,15 +181,9 @@ static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem, | |||
188 | */ | 181 | */ |
189 | *dma_handle = mem->device_base + (pageno << PAGE_SHIFT); | 182 | *dma_handle = mem->device_base + (pageno << PAGE_SHIFT); |
190 | ret = mem->virt_base + (pageno << PAGE_SHIFT); | 183 | ret = mem->virt_base + (pageno << PAGE_SHIFT); |
191 | dma_memory_map = (mem->flags & DMA_MEMORY_MAP); | ||
192 | spin_unlock_irqrestore(&mem->spinlock, flags); | 184 | spin_unlock_irqrestore(&mem->spinlock, flags); |
193 | if (dma_memory_map) | 185 | memset(ret, 0, size); |
194 | memset(ret, 0, size); | ||
195 | else | ||
196 | memset_io(ret, 0, size); | ||
197 | |||
198 | return ret; | 186 | return ret; |
199 | |||
200 | err: | 187 | err: |
201 | spin_unlock_irqrestore(&mem->spinlock, flags); | 188 | spin_unlock_irqrestore(&mem->spinlock, flags); |
202 | return NULL; | 189 | return NULL; |
@@ -359,14 +346,18 @@ static struct reserved_mem *dma_reserved_default_memory __initdata; | |||
359 | static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev) | 346 | static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev) |
360 | { | 347 | { |
361 | struct dma_coherent_mem *mem = rmem->priv; | 348 | struct dma_coherent_mem *mem = rmem->priv; |
349 | int ret; | ||
362 | 350 | ||
363 | if (!mem && | 351 | if (!mem) |
364 | !dma_init_coherent_memory(rmem->base, rmem->base, rmem->size, | 352 | return -ENODEV; |
365 | DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE, | 353 | |
366 | &mem)) { | 354 | ret = dma_init_coherent_memory(rmem->base, rmem->base, rmem->size, |
355 | DMA_MEMORY_EXCLUSIVE, &mem); | ||
356 | |||
357 | if (ret) { | ||
367 | pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n", | 358 | pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n", |
368 | &rmem->base, (unsigned long)rmem->size / SZ_1M); | 359 | &rmem->base, (unsigned long)rmem->size / SZ_1M); |
369 | return -ENODEV; | 360 | return ret; |
370 | } | 361 | } |
371 | mem->use_dev_dma_pfn_offset = true; | 362 | mem->use_dev_dma_pfn_offset = true; |
372 | rmem->priv = mem; | 363 | rmem->priv = mem; |
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c index b555ff9dd8fc..e584eddef0a7 100644 --- a/drivers/base/dma-mapping.c +++ b/drivers/base/dma-mapping.c | |||
@@ -176,13 +176,10 @@ int dmam_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, | |||
176 | 176 | ||
177 | rc = dma_declare_coherent_memory(dev, phys_addr, device_addr, size, | 177 | rc = dma_declare_coherent_memory(dev, phys_addr, device_addr, size, |
178 | flags); | 178 | flags); |
179 | if (rc) { | 179 | if (!rc) |
180 | devres_add(dev, res); | 180 | devres_add(dev, res); |
181 | rc = 0; | 181 | else |
182 | } else { | ||
183 | devres_free(res); | 182 | devres_free(res); |
184 | rc = -ENOMEM; | ||
185 | } | ||
186 | 183 | ||
187 | return rc; | 184 | return rc; |
188 | } | 185 | } |
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 23f33f95d4a6..d1aed2513bd9 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c | |||
@@ -451,9 +451,6 @@ static struct port_buffer *alloc_buf(struct virtqueue *vq, size_t buf_size, | |||
451 | * device is created by remoteproc, the DMA memory is | 451 | * device is created by remoteproc, the DMA memory is |
452 | * associated with the grandparent device: | 452 | * associated with the grandparent device: |
453 | * vdev => rproc => platform-dev. | 453 | * vdev => rproc => platform-dev. |
454 | * The code here would have been less quirky if | ||
455 | * DMA_MEMORY_INCLUDES_CHILDREN had been supported | ||
456 | * in dma-coherent.c | ||
457 | */ | 454 | */ |
458 | if (!vq->vdev->dev.parent || !vq->vdev->dev.parent->parent) | 455 | if (!vq->vdev->dev.parent || !vq->vdev->dev.parent->parent) |
459 | goto free_buf; | 456 | goto free_buf; |
diff --git a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c index 96dc01750bc0..36762ec954e7 100644 --- a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c +++ b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c | |||
@@ -1708,11 +1708,10 @@ static int sh_mobile_ceu_probe(struct platform_device *pdev) | |||
1708 | err = dma_declare_coherent_memory(&pdev->dev, res->start, | 1708 | err = dma_declare_coherent_memory(&pdev->dev, res->start, |
1709 | res->start, | 1709 | res->start, |
1710 | resource_size(res), | 1710 | resource_size(res), |
1711 | DMA_MEMORY_MAP | | ||
1712 | DMA_MEMORY_EXCLUSIVE); | 1711 | DMA_MEMORY_EXCLUSIVE); |
1713 | if (!err) { | 1712 | if (err) { |
1714 | dev_err(&pdev->dev, "Unable to declare CEU memory.\n"); | 1713 | dev_err(&pdev->dev, "Unable to declare CEU memory.\n"); |
1715 | return -ENXIO; | 1714 | return err; |
1716 | } | 1715 | } |
1717 | 1716 | ||
1718 | pcdev->video_limit = resource_size(res); | 1717 | pcdev->video_limit = resource_size(res); |
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c index a3c90fe5de00..73ca8879ada7 100644 --- a/drivers/net/ethernet/amd/au1000_eth.c +++ b/drivers/net/ethernet/amd/au1000_eth.c | |||
@@ -1180,9 +1180,10 @@ static int au1000_probe(struct platform_device *pdev) | |||
1180 | /* Allocate the data buffers | 1180 | /* Allocate the data buffers |
1181 | * Snooping works fine with eth on all au1xxx | 1181 | * Snooping works fine with eth on all au1xxx |
1182 | */ | 1182 | */ |
1183 | aup->vaddr = (u32)dma_alloc_noncoherent(NULL, MAX_BUF_SIZE * | 1183 | aup->vaddr = (u32)dma_alloc_attrs(NULL, MAX_BUF_SIZE * |
1184 | (NUM_TX_BUFFS + NUM_RX_BUFFS), | 1184 | (NUM_TX_BUFFS + NUM_RX_BUFFS), |
1185 | &aup->dma_addr, 0); | 1185 | &aup->dma_addr, 0, |
1186 | DMA_ATTR_NON_CONSISTENT); | ||
1186 | if (!aup->vaddr) { | 1187 | if (!aup->vaddr) { |
1187 | dev_err(&pdev->dev, "failed to allocate data buffers\n"); | 1188 | dev_err(&pdev->dev, "failed to allocate data buffers\n"); |
1188 | err = -ENOMEM; | 1189 | err = -ENOMEM; |
@@ -1361,8 +1362,9 @@ err_remap3: | |||
1361 | err_remap2: | 1362 | err_remap2: |
1362 | iounmap(aup->mac); | 1363 | iounmap(aup->mac); |
1363 | err_remap1: | 1364 | err_remap1: |
1364 | dma_free_noncoherent(NULL, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS), | 1365 | dma_free_attrs(NULL, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS), |
1365 | (void *)aup->vaddr, aup->dma_addr); | 1366 | (void *)aup->vaddr, aup->dma_addr, |
1367 | DMA_ATTR_NON_CONSISTENT); | ||
1366 | err_vaddr: | 1368 | err_vaddr: |
1367 | free_netdev(dev); | 1369 | free_netdev(dev); |
1368 | err_alloc: | 1370 | err_alloc: |
@@ -1394,9 +1396,9 @@ static int au1000_remove(struct platform_device *pdev) | |||
1394 | if (aup->tx_db_inuse[i]) | 1396 | if (aup->tx_db_inuse[i]) |
1395 | au1000_ReleaseDB(aup, aup->tx_db_inuse[i]); | 1397 | au1000_ReleaseDB(aup, aup->tx_db_inuse[i]); |
1396 | 1398 | ||
1397 | dma_free_noncoherent(NULL, MAX_BUF_SIZE * | 1399 | dma_free_attrs(NULL, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS), |
1398 | (NUM_TX_BUFFS + NUM_RX_BUFFS), | 1400 | (void *)aup->vaddr, aup->dma_addr, |
1399 | (void *)aup->vaddr, aup->dma_addr); | 1401 | DMA_ATTR_NON_CONSISTENT); |
1400 | 1402 | ||
1401 | iounmap(aup->macdma); | 1403 | iounmap(aup->macdma); |
1402 | iounmap(aup->mac); | 1404 | iounmap(aup->mac); |
diff --git a/drivers/net/ethernet/i825xx/lasi_82596.c b/drivers/net/ethernet/i825xx/lasi_82596.c index aa22e108f09b..b69c622ba8b2 100644 --- a/drivers/net/ethernet/i825xx/lasi_82596.c +++ b/drivers/net/ethernet/i825xx/lasi_82596.c | |||
@@ -96,8 +96,6 @@ | |||
96 | 96 | ||
97 | #define OPT_SWAP_PORT 0x0001 /* Need to wordswp on the MPU port */ | 97 | #define OPT_SWAP_PORT 0x0001 /* Need to wordswp on the MPU port */ |
98 | 98 | ||
99 | #define DMA_ALLOC dma_alloc_noncoherent | ||
100 | #define DMA_FREE dma_free_noncoherent | ||
101 | #define DMA_WBACK(ndev, addr, len) \ | 99 | #define DMA_WBACK(ndev, addr, len) \ |
102 | do { dma_cache_sync((ndev)->dev.parent, (void *)addr, len, DMA_TO_DEVICE); } while (0) | 100 | do { dma_cache_sync((ndev)->dev.parent, (void *)addr, len, DMA_TO_DEVICE); } while (0) |
103 | 101 | ||
@@ -200,8 +198,8 @@ static int __exit lan_remove_chip(struct parisc_device *pdev) | |||
200 | struct i596_private *lp = netdev_priv(dev); | 198 | struct i596_private *lp = netdev_priv(dev); |
201 | 199 | ||
202 | unregister_netdev (dev); | 200 | unregister_netdev (dev); |
203 | DMA_FREE(&pdev->dev, sizeof(struct i596_private), | 201 | dma_free_attrs(&pdev->dev, sizeof(struct i596_private), lp->dma, |
204 | (void *)lp->dma, lp->dma_addr); | 202 | lp->dma_addr, DMA_ATTR_NON_CONSISTENT); |
205 | free_netdev (dev); | 203 | free_netdev (dev); |
206 | return 0; | 204 | return 0; |
207 | } | 205 | } |
diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c index 8449c58f01fd..f00a1dc2128c 100644 --- a/drivers/net/ethernet/i825xx/lib82596.c +++ b/drivers/net/ethernet/i825xx/lib82596.c | |||
@@ -1063,8 +1063,9 @@ static int i82596_probe(struct net_device *dev) | |||
1063 | if (!dev->base_addr || !dev->irq) | 1063 | if (!dev->base_addr || !dev->irq) |
1064 | return -ENODEV; | 1064 | return -ENODEV; |
1065 | 1065 | ||
1066 | dma = (struct i596_dma *) DMA_ALLOC(dev->dev.parent, | 1066 | dma = dma_alloc_attrs(dev->dev.parent, sizeof(struct i596_dma), |
1067 | sizeof(struct i596_dma), &lp->dma_addr, GFP_KERNEL); | 1067 | &lp->dma_addr, GFP_KERNEL, |
1068 | DMA_ATTR_NON_CONSISTENT); | ||
1068 | if (!dma) { | 1069 | if (!dma) { |
1069 | printk(KERN_ERR "%s: Couldn't get shared memory\n", __FILE__); | 1070 | printk(KERN_ERR "%s: Couldn't get shared memory\n", __FILE__); |
1070 | return -ENOMEM; | 1071 | return -ENOMEM; |
@@ -1085,8 +1086,8 @@ static int i82596_probe(struct net_device *dev) | |||
1085 | 1086 | ||
1086 | i = register_netdev(dev); | 1087 | i = register_netdev(dev); |
1087 | if (i) { | 1088 | if (i) { |
1088 | DMA_FREE(dev->dev.parent, sizeof(struct i596_dma), | 1089 | dma_free_attrs(dev->dev.parent, sizeof(struct i596_dma), |
1089 | (void *)dma, lp->dma_addr); | 1090 | dma, lp->dma_addr, DMA_ATTR_NON_CONSISTENT); |
1090 | return i; | 1091 | return i; |
1091 | } | 1092 | } |
1092 | 1093 | ||
diff --git a/drivers/net/ethernet/i825xx/sni_82596.c b/drivers/net/ethernet/i825xx/sni_82596.c index 2af7f77345fb..b2c04a789744 100644 --- a/drivers/net/ethernet/i825xx/sni_82596.c +++ b/drivers/net/ethernet/i825xx/sni_82596.c | |||
@@ -23,8 +23,6 @@ | |||
23 | 23 | ||
24 | static const char sni_82596_string[] = "snirm_82596"; | 24 | static const char sni_82596_string[] = "snirm_82596"; |
25 | 25 | ||
26 | #define DMA_ALLOC dma_alloc_coherent | ||
27 | #define DMA_FREE dma_free_coherent | ||
28 | #define DMA_WBACK(priv, addr, len) do { } while (0) | 26 | #define DMA_WBACK(priv, addr, len) do { } while (0) |
29 | #define DMA_INV(priv, addr, len) do { } while (0) | 27 | #define DMA_INV(priv, addr, len) do { } while (0) |
30 | #define DMA_WBACK_INV(priv, addr, len) do { } while (0) | 28 | #define DMA_WBACK_INV(priv, addr, len) do { } while (0) |
@@ -152,8 +150,8 @@ static int sni_82596_driver_remove(struct platform_device *pdev) | |||
152 | struct i596_private *lp = netdev_priv(dev); | 150 | struct i596_private *lp = netdev_priv(dev); |
153 | 151 | ||
154 | unregister_netdev(dev); | 152 | unregister_netdev(dev); |
155 | DMA_FREE(dev->dev.parent, sizeof(struct i596_private), | 153 | dma_free_attrs(dev->dev.parent, sizeof(struct i596_private), lp->dma, |
156 | lp->dma, lp->dma_addr); | 154 | lp->dma_addr, DMA_ATTR_NON_CONSISTENT); |
157 | iounmap(lp->ca); | 155 | iounmap(lp->ca); |
158 | iounmap(lp->mpu_port); | 156 | iounmap(lp->mpu_port); |
159 | free_netdev (dev); | 157 | free_netdev (dev); |
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c index 70347720fdf9..573691bc3b71 100644 --- a/drivers/net/ethernet/seeq/sgiseeq.c +++ b/drivers/net/ethernet/seeq/sgiseeq.c | |||
@@ -737,8 +737,8 @@ static int sgiseeq_probe(struct platform_device *pdev) | |||
737 | sp = netdev_priv(dev); | 737 | sp = netdev_priv(dev); |
738 | 738 | ||
739 | /* Make private data page aligned */ | 739 | /* Make private data page aligned */ |
740 | sr = dma_alloc_noncoherent(&pdev->dev, sizeof(*sp->srings), | 740 | sr = dma_alloc_attrs(&pdev->dev, sizeof(*sp->srings), &sp->srings_dma, |
741 | &sp->srings_dma, GFP_KERNEL); | 741 | GFP_KERNEL, DMA_ATTR_NON_CONSISTENT); |
742 | if (!sr) { | 742 | if (!sr) { |
743 | printk(KERN_ERR "Sgiseeq: Page alloc failed, aborting.\n"); | 743 | printk(KERN_ERR "Sgiseeq: Page alloc failed, aborting.\n"); |
744 | err = -ENOMEM; | 744 | err = -ENOMEM; |
@@ -813,8 +813,8 @@ static int sgiseeq_remove(struct platform_device *pdev) | |||
813 | struct sgiseeq_private *sp = netdev_priv(dev); | 813 | struct sgiseeq_private *sp = netdev_priv(dev); |
814 | 814 | ||
815 | unregister_netdev(dev); | 815 | unregister_netdev(dev); |
816 | dma_free_noncoherent(&pdev->dev, sizeof(*sp->srings), sp->srings, | 816 | dma_free_attrs(&pdev->dev, sizeof(*sp->srings), sp->srings, |
817 | sp->srings_dma); | 817 | sp->srings_dma, DMA_ATTR_NON_CONSISTENT); |
818 | free_netdev(dev); | 818 | free_netdev(dev); |
819 | 819 | ||
820 | return 0; | 820 | return 0; |
diff --git a/drivers/of/device.c b/drivers/of/device.c index 17b66e9715d2..64b710265d39 100644 --- a/drivers/of/device.c +++ b/drivers/of/device.c | |||
@@ -9,6 +9,9 @@ | |||
9 | #include <linux/module.h> | 9 | #include <linux/module.h> |
10 | #include <linux/mod_devicetable.h> | 10 | #include <linux/mod_devicetable.h> |
11 | #include <linux/slab.h> | 11 | #include <linux/slab.h> |
12 | #include <linux/pci.h> | ||
13 | #include <linux/platform_device.h> | ||
14 | #include <linux/amba/bus.h> | ||
12 | 15 | ||
13 | #include <asm/errno.h> | 16 | #include <asm/errno.h> |
14 | #include "of_private.h" | 17 | #include "of_private.h" |
@@ -84,31 +87,28 @@ int of_device_add(struct platform_device *ofdev) | |||
84 | */ | 87 | */ |
85 | int of_dma_configure(struct device *dev, struct device_node *np) | 88 | int of_dma_configure(struct device *dev, struct device_node *np) |
86 | { | 89 | { |
87 | u64 dma_addr, paddr, size; | 90 | u64 dma_addr, paddr, size = 0; |
88 | int ret; | 91 | int ret; |
89 | bool coherent; | 92 | bool coherent; |
90 | unsigned long offset; | 93 | unsigned long offset; |
91 | const struct iommu_ops *iommu; | 94 | const struct iommu_ops *iommu; |
92 | u64 mask; | 95 | u64 mask; |
93 | 96 | ||
94 | /* | ||
95 | * Set default coherent_dma_mask to 32 bit. Drivers are expected to | ||
96 | * setup the correct supported mask. | ||
97 | */ | ||
98 | if (!dev->coherent_dma_mask) | ||
99 | dev->coherent_dma_mask = DMA_BIT_MASK(32); | ||
100 | |||
101 | /* | ||
102 | * Set it to coherent_dma_mask by default if the architecture | ||
103 | * code has not set it. | ||
104 | */ | ||
105 | if (!dev->dma_mask) | ||
106 | dev->dma_mask = &dev->coherent_dma_mask; | ||
107 | |||
108 | ret = of_dma_get_range(np, &dma_addr, &paddr, &size); | 97 | ret = of_dma_get_range(np, &dma_addr, &paddr, &size); |
109 | if (ret < 0) { | 98 | if (ret < 0) { |
99 | /* | ||
100 | * For legacy reasons, we have to assume some devices need | ||
101 | * DMA configuration regardless of whether "dma-ranges" is | ||
102 | * correctly specified or not. | ||
103 | */ | ||
104 | if (!dev_is_pci(dev) && | ||
105 | #ifdef CONFIG_ARM_AMBA | ||
106 | dev->bus != &amba_bustype && | ||
107 | #endif | ||
108 | dev->bus != &platform_bus_type) | ||
109 | return ret == -ENODEV ? 0 : ret; | ||
110 | |||
110 | dma_addr = offset = 0; | 111 | dma_addr = offset = 0; |
111 | size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1); | ||
112 | } else { | 112 | } else { |
113 | offset = PFN_DOWN(paddr - dma_addr); | 113 | offset = PFN_DOWN(paddr - dma_addr); |
114 | 114 | ||
@@ -129,6 +129,22 @@ int of_dma_configure(struct device *dev, struct device_node *np) | |||
129 | dev_dbg(dev, "dma_pfn_offset(%#08lx)\n", offset); | 129 | dev_dbg(dev, "dma_pfn_offset(%#08lx)\n", offset); |
130 | } | 130 | } |
131 | 131 | ||
132 | /* | ||
133 | * Set default coherent_dma_mask to 32 bit. Drivers are expected to | ||
134 | * setup the correct supported mask. | ||
135 | */ | ||
136 | if (!dev->coherent_dma_mask) | ||
137 | dev->coherent_dma_mask = DMA_BIT_MASK(32); | ||
138 | /* | ||
139 | * Set it to coherent_dma_mask by default if the architecture | ||
140 | * code has not set it. | ||
141 | */ | ||
142 | if (!dev->dma_mask) | ||
143 | dev->dma_mask = &dev->coherent_dma_mask; | ||
144 | |||
145 | if (!size) | ||
146 | size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1); | ||
147 | |||
132 | dev->dma_pfn_offset = offset; | 148 | dev->dma_pfn_offset = offset; |
133 | 149 | ||
134 | /* | 150 | /* |
diff --git a/drivers/scsi/NCR_Q720.c b/drivers/scsi/NCR_Q720.c index 05835bf1bf9c..54e7d26908ee 100644 --- a/drivers/scsi/NCR_Q720.c +++ b/drivers/scsi/NCR_Q720.c | |||
@@ -217,8 +217,7 @@ NCR_Q720_probe(struct device *dev) | |||
217 | } | 217 | } |
218 | 218 | ||
219 | if (dma_declare_coherent_memory(dev, base_addr, base_addr, | 219 | if (dma_declare_coherent_memory(dev, base_addr, base_addr, |
220 | mem_size, DMA_MEMORY_MAP) | 220 | mem_size, 0)) { |
221 | != DMA_MEMORY_MAP) { | ||
222 | printk(KERN_ERR "NCR_Q720: DMA declare memory failed\n"); | 221 | printk(KERN_ERR "NCR_Q720: DMA declare memory failed\n"); |
223 | goto out_release_region; | 222 | goto out_release_region; |
224 | } | 223 | } |
diff --git a/drivers/usb/host/ohci-sm501.c b/drivers/usb/host/ohci-sm501.c index a8b8d8b8d9f3..d4e0f7cd96fa 100644 --- a/drivers/usb/host/ohci-sm501.c +++ b/drivers/usb/host/ohci-sm501.c | |||
@@ -123,13 +123,12 @@ static int ohci_hcd_sm501_drv_probe(struct platform_device *pdev) | |||
123 | * regular memory. The HCD_LOCAL_MEM flag does just that. | 123 | * regular memory. The HCD_LOCAL_MEM flag does just that. |
124 | */ | 124 | */ |
125 | 125 | ||
126 | if (!dma_declare_coherent_memory(dev, mem->start, | 126 | retval = dma_declare_coherent_memory(dev, mem->start, |
127 | mem->start - mem->parent->start, | 127 | mem->start - mem->parent->start, |
128 | resource_size(mem), | 128 | resource_size(mem), |
129 | DMA_MEMORY_MAP | | 129 | DMA_MEMORY_EXCLUSIVE); |
130 | DMA_MEMORY_EXCLUSIVE)) { | 130 | if (retval) { |
131 | dev_err(dev, "cannot declare coherent memory\n"); | 131 | dev_err(dev, "cannot declare coherent memory\n"); |
132 | retval = -ENXIO; | ||
133 | goto err1; | 132 | goto err1; |
134 | } | 133 | } |
135 | 134 | ||
diff --git a/drivers/usb/host/ohci-tmio.c b/drivers/usb/host/ohci-tmio.c index cfcfadfc94fc..16d081a093bb 100644 --- a/drivers/usb/host/ohci-tmio.c +++ b/drivers/usb/host/ohci-tmio.c | |||
@@ -227,13 +227,10 @@ static int ohci_hcd_tmio_drv_probe(struct platform_device *dev) | |||
227 | goto err_ioremap_regs; | 227 | goto err_ioremap_regs; |
228 | } | 228 | } |
229 | 229 | ||
230 | if (!dma_declare_coherent_memory(&dev->dev, sram->start, | 230 | ret = dma_declare_coherent_memory(&dev->dev, sram->start, sram->start, |
231 | sram->start, | 231 | resource_size(sram), DMA_MEMORY_EXCLUSIVE); |
232 | resource_size(sram), | 232 | if (ret) |
233 | DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE)) { | ||
234 | ret = -EBUSY; | ||
235 | goto err_dma_declare; | 233 | goto err_dma_declare; |
236 | } | ||
237 | 234 | ||
238 | if (cell->enable) { | 235 | if (cell->enable) { |
239 | ret = cell->enable(dev); | 236 | ret = cell->enable(dev); |
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 2189c79cde5d..29ce9815da87 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h | |||
@@ -550,26 +550,13 @@ static inline void dma_free_coherent(struct device *dev, size_t size, | |||
550 | return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0); | 550 | return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0); |
551 | } | 551 | } |
552 | 552 | ||
553 | static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, | ||
554 | dma_addr_t *dma_handle, gfp_t gfp) | ||
555 | { | ||
556 | return dma_alloc_attrs(dev, size, dma_handle, gfp, | ||
557 | DMA_ATTR_NON_CONSISTENT); | ||
558 | } | ||
559 | |||
560 | static inline void dma_free_noncoherent(struct device *dev, size_t size, | ||
561 | void *cpu_addr, dma_addr_t dma_handle) | ||
562 | { | ||
563 | dma_free_attrs(dev, size, cpu_addr, dma_handle, | ||
564 | DMA_ATTR_NON_CONSISTENT); | ||
565 | } | ||
566 | |||
567 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | 553 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
568 | { | 554 | { |
569 | debug_dma_mapping_error(dev, dma_addr); | 555 | const struct dma_map_ops *ops = get_dma_ops(dev); |
570 | 556 | ||
571 | if (get_dma_ops(dev)->mapping_error) | 557 | debug_dma_mapping_error(dev, dma_addr); |
572 | return get_dma_ops(dev)->mapping_error(dev, dma_addr); | 558 | if (ops->mapping_error) |
559 | return ops->mapping_error(dev, dma_addr); | ||
573 | return 0; | 560 | return 0; |
574 | } | 561 | } |
575 | 562 | ||
@@ -720,10 +707,7 @@ static inline int dma_get_cache_alignment(void) | |||
720 | #endif | 707 | #endif |
721 | 708 | ||
722 | /* flags for the coherent memory api */ | 709 | /* flags for the coherent memory api */ |
723 | #define DMA_MEMORY_MAP 0x01 | 710 | #define DMA_MEMORY_EXCLUSIVE 0x01 |
724 | #define DMA_MEMORY_IO 0x02 | ||
725 | #define DMA_MEMORY_INCLUDES_CHILDREN 0x04 | ||
726 | #define DMA_MEMORY_EXCLUSIVE 0x08 | ||
727 | 711 | ||
728 | #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT | 712 | #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT |
729 | int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, | 713 | int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, |
@@ -736,7 +720,7 @@ static inline int | |||
736 | dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, | 720 | dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, |
737 | dma_addr_t device_addr, size_t size, int flags) | 721 | dma_addr_t device_addr, size_t size, int flags) |
738 | { | 722 | { |
739 | return 0; | 723 | return -ENOSYS; |
740 | } | 724 | } |
741 | 725 | ||
742 | static inline void | 726 | static inline void |