aboutsummaryrefslogtreecommitdiffstats
path: root/arch/parisc
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-07-12 18:13:55 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-07-12 18:13:55 -0400
commit9e3a25dc992dd9f3170fb643bdd95da5ca9c5576 (patch)
treef636ae59fa83c83e837a6668b2693175a6e39f3a /arch/parisc
parent9787aed57dd33ba5c15a713c2c50e78baeb5052d (diff)
parent15ffe5e1acf5fe1512e98b20702e46ce9f25e2f7 (diff)
Merge tag 'dma-mapping-5.3' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-mapping updates from Christoph Hellwig: - move the USB special case that bounced DMA through a device bar into the USB code instead of handling it in the common DMA code (Laurentiu Tudor and Fredrik Noring) - don't dip into the global CMA pool for single page allocations (Nicolin Chen) - fix a crash when allocating memory for the atomic pool failed during boot (Florian Fainelli) - move support for MIPS-style uncached segments to the common code and use that for MIPS and nios2 (me) - make support for DMA_ATTR_NON_CONSISTENT and DMA_ATTR_NO_KERNEL_MAPPING generic (me) - convert nds32 to the generic remapping allocator (me) * tag 'dma-mapping-5.3' of git://git.infradead.org/users/hch/dma-mapping: (29 commits) dma-mapping: mark dma_alloc_need_uncached as __always_inline MIPS: only select ARCH_HAS_UNCACHED_SEGMENT for non-coherent platforms usb: host: Fix excessive alignment restriction for local memory allocations lib/genalloc.c: Add algorithm, align and zeroed family of DMA allocators nios2: use the generic uncached segment support in dma-direct nds32: use the generic remapping allocator for coherent DMA allocations arc: use the generic remapping allocator for coherent DMA allocations dma-direct: handle DMA_ATTR_NO_KERNEL_MAPPING in common code dma-direct: handle DMA_ATTR_NON_CONSISTENT in common code dma-mapping: add a dma_alloc_need_uncached helper openrisc: remove the partial DMA_ATTR_NON_CONSISTENT support arc: remove the partial DMA_ATTR_NON_CONSISTENT support arm-nommu: remove the partial DMA_ATTR_NON_CONSISTENT support ARM: dma-mapping: allow larger DMA mask than supported dma-mapping: truncate dma masks to what dma_addr_t can hold iommu/dma: Apply dma_{alloc,free}_contiguous functions dma-remap: Avoid de-referencing NULL atomic_pool MIPS: use the generic uncached segment support in dma-direct dma-direct: provide generic support for uncached kernel segments au1100fb: fix DMA API abuse ...
Diffstat (limited to 'arch/parisc')
-rw-r--r--arch/parisc/kernel/pci-dma.c48
1 files changed, 13 insertions, 35 deletions
diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c
index 239162355b58..ca35d9a76e50 100644
--- a/arch/parisc/kernel/pci-dma.c
+++ b/arch/parisc/kernel/pci-dma.c
@@ -394,17 +394,20 @@ pcxl_dma_init(void)
394 394
395__initcall(pcxl_dma_init); 395__initcall(pcxl_dma_init);
396 396
397static void *pcxl_dma_alloc(struct device *dev, size_t size, 397void *arch_dma_alloc(struct device *dev, size_t size,
398 dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs) 398 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
399{ 399{
400 unsigned long vaddr; 400 unsigned long vaddr;
401 unsigned long paddr; 401 unsigned long paddr;
402 int order; 402 int order;
403 403
404 if (boot_cpu_data.cpu_type != pcxl2 && boot_cpu_data.cpu_type != pcxl)
405 return NULL;
406
404 order = get_order(size); 407 order = get_order(size);
405 size = 1 << (order + PAGE_SHIFT); 408 size = 1 << (order + PAGE_SHIFT);
406 vaddr = pcxl_alloc_range(size); 409 vaddr = pcxl_alloc_range(size);
407 paddr = __get_free_pages(flag | __GFP_ZERO, order); 410 paddr = __get_free_pages(gfp | __GFP_ZERO, order);
408 flush_kernel_dcache_range(paddr, size); 411 flush_kernel_dcache_range(paddr, size);
409 paddr = __pa(paddr); 412 paddr = __pa(paddr);
410 map_uncached_pages(vaddr, size, paddr); 413 map_uncached_pages(vaddr, size, paddr);
@@ -421,44 +424,19 @@ static void *pcxl_dma_alloc(struct device *dev, size_t size,
421 return (void *)vaddr; 424 return (void *)vaddr;
422} 425}
423 426
424static void *pcx_dma_alloc(struct device *dev, size_t size,
425 dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
426{
427 void *addr;
428
429 if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0)
430 return NULL;
431
432 addr = (void *)__get_free_pages(flag | __GFP_ZERO, get_order(size));
433 if (addr)
434 *dma_handle = (dma_addr_t)virt_to_phys(addr);
435
436 return addr;
437}
438
439void *arch_dma_alloc(struct device *dev, size_t size,
440 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
441{
442
443 if (boot_cpu_data.cpu_type == pcxl2 || boot_cpu_data.cpu_type == pcxl)
444 return pcxl_dma_alloc(dev, size, dma_handle, gfp, attrs);
445 else
446 return pcx_dma_alloc(dev, size, dma_handle, gfp, attrs);
447}
448
449void arch_dma_free(struct device *dev, size_t size, void *vaddr, 427void arch_dma_free(struct device *dev, size_t size, void *vaddr,
450 dma_addr_t dma_handle, unsigned long attrs) 428 dma_addr_t dma_handle, unsigned long attrs)
451{ 429{
452 int order = get_order(size); 430 int order = get_order(size);
453 431
454 if (boot_cpu_data.cpu_type == pcxl2 || boot_cpu_data.cpu_type == pcxl) { 432 WARN_ON_ONCE(boot_cpu_data.cpu_type != pcxl2 &&
455 size = 1 << (order + PAGE_SHIFT); 433 boot_cpu_data.cpu_type != pcxl);
456 unmap_uncached_pages((unsigned long)vaddr, size);
457 pcxl_free_range((unsigned long)vaddr, size);
458 434
459 vaddr = __va(dma_handle); 435 size = 1 << (order + PAGE_SHIFT);
460 } 436 unmap_uncached_pages((unsigned long)vaddr, size);
461 free_pages((unsigned long)vaddr, get_order(size)); 437 pcxl_free_range((unsigned long)vaddr, size);
438
439 free_pages((unsigned long)__va(dma_handle), order);
462} 440}
463 441
464void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, 442void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,