aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2018-06-19 03:04:55 -0400
committerHelge Deller <deller@gmx.de>2018-08-13 03:30:32 -0400
commitc1f59375b3782f478ac2c488889abdc00dd8e25f (patch)
tree618197af450d2db9600dfcfbc25dc6c2772462f4
parent7f1501053811414ddeff63db8f5d41bdbe38068f (diff)
parisc: use generic dma_noncoherent_ops
Switch to the generic noncoherent direct mapping implementation. Fix sync_single_for_cpu to do skip the cache flush unless the transfer is to the device to match the more tested unmap_single path which should have the same cache coherency implications. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Helge Deller <deller@gmx.de>
-rw-r--r--arch/parisc/Kconfig4
-rw-r--r--arch/parisc/include/asm/dma-mapping.h4
-rw-r--r--arch/parisc/kernel/pci-dma.c145
-rw-r--r--arch/parisc/kernel/setup.c2
4 files changed, 16 insertions, 139 deletions
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index e7705dde953f..1fcdf1565b23 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -187,6 +187,10 @@ config PA20
187config PA11 187config PA11
188 def_bool y 188 def_bool y
189 depends on PA7000 || PA7100LC || PA7200 || PA7300LC 189 depends on PA7000 || PA7100LC || PA7200 || PA7300LC
190 select ARCH_HAS_SYNC_DMA_FOR_CPU
191 select ARCH_HAS_SYNC_DMA_FOR_DEVICE
192 select DMA_NONCOHERENT_OPS
193 select DMA_NONCOHERENT_CACHE_SYNC
190 194
191config PREFETCH 195config PREFETCH
192 def_bool y 196 def_bool y
diff --git a/arch/parisc/include/asm/dma-mapping.h b/arch/parisc/include/asm/dma-mapping.h
index eeec8dd18e74..44a9f97194aa 100644
--- a/arch/parisc/include/asm/dma-mapping.h
+++ b/arch/parisc/include/asm/dma-mapping.h
@@ -21,10 +21,6 @@
21** flush/purge and allocate "regular" cacheable pages for everything. 21** flush/purge and allocate "regular" cacheable pages for everything.
22*/ 22*/
23 23
24#ifdef CONFIG_PA11
25extern const struct dma_map_ops pa11_dma_ops;
26#endif
27
28extern const struct dma_map_ops *hppa_dma_ops; 24extern const struct dma_map_ops *hppa_dma_ops;
29 25
30static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) 26static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c
index 1f85ca2c0c9e..04c48f1ef3fb 100644
--- a/arch/parisc/kernel/pci-dma.c
+++ b/arch/parisc/kernel/pci-dma.c
@@ -21,13 +21,12 @@
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/gfp.h> 22#include <linux/gfp.h>
23#include <linux/mm.h> 23#include <linux/mm.h>
24#include <linux/pci.h>
25#include <linux/proc_fs.h> 24#include <linux/proc_fs.h>
26#include <linux/seq_file.h> 25#include <linux/seq_file.h>
27#include <linux/string.h> 26#include <linux/string.h>
28#include <linux/types.h> 27#include <linux/types.h>
29#include <linux/scatterlist.h> 28#include <linux/dma-direct.h>
30#include <linux/export.h> 29#include <linux/dma-noncoherent.h>
31 30
32#include <asm/cacheflush.h> 31#include <asm/cacheflush.h>
33#include <asm/dma.h> /* for DMA_CHUNK_SIZE */ 32#include <asm/dma.h> /* for DMA_CHUNK_SIZE */
@@ -437,7 +436,7 @@ static void *pcx_dma_alloc(struct device *dev, size_t size,
437 return addr; 436 return addr;
438} 437}
439 438
440static void *pa11_dma_alloc(struct device *dev, size_t size, 439void *arch_dma_alloc(struct device *dev, size_t size,
441 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) 440 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
442{ 441{
443 442
@@ -447,7 +446,7 @@ static void *pa11_dma_alloc(struct device *dev, size_t size,
447 return pcx_dma_alloc(dev, size, dma_handle, gfp, attrs); 446 return pcx_dma_alloc(dev, size, dma_handle, gfp, attrs);
448} 447}
449 448
450static void pa11_dma_free(struct device *dev, size_t size, void *vaddr, 449void arch_dma_free(struct device *dev, size_t size, void *vaddr,
451 dma_addr_t dma_handle, unsigned long attrs) 450 dma_addr_t dma_handle, unsigned long attrs)
452{ 451{
453 int order = get_order(size); 452 int order = get_order(size);
@@ -462,142 +461,20 @@ static void pa11_dma_free(struct device *dev, size_t size, void *vaddr,
462 free_pages((unsigned long)vaddr, get_order(size)); 461 free_pages((unsigned long)vaddr, get_order(size));
463} 462}
464 463
465static dma_addr_t pa11_dma_map_page(struct device *dev, struct page *page, 464void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
466 unsigned long offset, size_t size, 465 size_t size, enum dma_data_direction dir)
467 enum dma_data_direction direction, unsigned long attrs)
468{ 466{
469 void *addr = page_address(page) + offset; 467 flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size);
470 BUG_ON(direction == DMA_NONE);
471
472 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
473 flush_kernel_dcache_range((unsigned long) addr, size);
474
475 return virt_to_phys(addr);
476}
477
478static void pa11_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
479 size_t size, enum dma_data_direction direction,
480 unsigned long attrs)
481{
482 BUG_ON(direction == DMA_NONE);
483
484 if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
485 return;
486
487 if (direction == DMA_TO_DEVICE)
488 return;
489
490 /*
491 * For PCI_DMA_FROMDEVICE this flush is not necessary for the
492 * simple map/unmap case. However, it IS necessary if if
493 * pci_dma_sync_single_* has been called and the buffer reused.
494 */
495
496 flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle), size);
497} 468}
498 469
499static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist, 470void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
500 int nents, enum dma_data_direction direction, 471 size_t size, enum dma_data_direction dir)
501 unsigned long attrs)
502{ 472{
503 int i; 473 flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size);
504 struct scatterlist *sg;
505
506 BUG_ON(direction == DMA_NONE);
507
508 for_each_sg(sglist, sg, nents, i) {
509 unsigned long vaddr = (unsigned long)sg_virt(sg);
510
511 sg_dma_address(sg) = (dma_addr_t) virt_to_phys(vaddr);
512 sg_dma_len(sg) = sg->length;
513
514 if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
515 continue;
516
517 flush_kernel_dcache_range(vaddr, sg->length);
518 }
519 return nents;
520} 474}
521 475
522static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, 476void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
523 int nents, enum dma_data_direction direction,
524 unsigned long attrs)
525{
526 int i;
527 struct scatterlist *sg;
528
529 BUG_ON(direction == DMA_NONE);
530
531 if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
532 return;
533
534 if (direction == DMA_TO_DEVICE)
535 return;
536
537 /* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
538
539 for_each_sg(sglist, sg, nents, i)
540 flush_kernel_dcache_range(sg_virt(sg), sg->length);
541}
542
543static void pa11_dma_sync_single_for_cpu(struct device *dev,
544 dma_addr_t dma_handle, size_t size,
545 enum dma_data_direction direction)
546{
547 BUG_ON(direction == DMA_NONE);
548
549 flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle),
550 size);
551}
552
553static void pa11_dma_sync_single_for_device(struct device *dev,
554 dma_addr_t dma_handle, size_t size,
555 enum dma_data_direction direction)
556{
557 BUG_ON(direction == DMA_NONE);
558
559 flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle),
560 size);
561}
562
563static void pa11_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
564{
565 int i;
566 struct scatterlist *sg;
567
568 /* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
569
570 for_each_sg(sglist, sg, nents, i)
571 flush_kernel_dcache_range(sg_virt(sg), sg->length);
572}
573
574static void pa11_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
575{
576 int i;
577 struct scatterlist *sg;
578
579 /* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
580
581 for_each_sg(sglist, sg, nents, i)
582 flush_kernel_dcache_range(sg_virt(sg), sg->length);
583}
584
585static void pa11_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
586 enum dma_data_direction direction) 477 enum dma_data_direction direction)
587{ 478{
588 flush_kernel_dcache_range((unsigned long)vaddr, size); 479 flush_kernel_dcache_range((unsigned long)vaddr, size);
589} 480}
590
591const struct dma_map_ops pa11_dma_ops = {
592 .alloc = pa11_dma_alloc,
593 .free = pa11_dma_free,
594 .map_page = pa11_dma_map_page,
595 .unmap_page = pa11_dma_unmap_page,
596 .map_sg = pa11_dma_map_sg,
597 .unmap_sg = pa11_dma_unmap_sg,
598 .sync_single_for_cpu = pa11_dma_sync_single_for_cpu,
599 .sync_single_for_device = pa11_dma_sync_single_for_device,
600 .sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu,
601 .sync_sg_for_device = pa11_dma_sync_sg_for_device,
602 .cache_sync = pa11_dma_cache_sync,
603};
diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
index 5c8450a22255..4e87c35c22b7 100644
--- a/arch/parisc/kernel/setup.c
+++ b/arch/parisc/kernel/setup.c
@@ -102,7 +102,7 @@ void __init dma_ops_init(void)
102 case pcxl: /* falls through */ 102 case pcxl: /* falls through */
103 case pcxs: 103 case pcxs:
104 case pcxt: 104 case pcxt:
105 hppa_dma_ops = &pa11_dma_ops; 105 hppa_dma_ops = &dma_noncoherent_ops;
106 break; 106 break;
107 default: 107 default:
108 break; 108 break;