summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/parisc/Kconfig2
-rw-r--r--arch/parisc/include/asm/dma-mapping.h189
-rw-r--r--arch/parisc/kernel/drivers.c2
-rw-r--r--arch/parisc/kernel/pci-dma.c92
-rw-r--r--drivers/parisc/ccio-dma.c57
-rw-r--r--drivers/parisc/sba_iommu.c52
6 files changed, 124 insertions, 270 deletions
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 7c34cafdf301..1489351134fa 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -29,6 +29,8 @@ config PARISC
29 select TTY # Needed for pdc_cons.c 29 select TTY # Needed for pdc_cons.c
30 select HAVE_DEBUG_STACKOVERFLOW 30 select HAVE_DEBUG_STACKOVERFLOW
31 select HAVE_ARCH_AUDITSYSCALL 31 select HAVE_ARCH_AUDITSYSCALL
32 select ARCH_NO_COHERENT_DMA_MMAP
33 select HAVE_DMA_ATTRS
32 34
33 help 35 help
34 The PA-RISC microprocessor is designed by Hewlett-Packard and used 36 The PA-RISC microprocessor is designed by Hewlett-Packard and used
diff --git a/arch/parisc/include/asm/dma-mapping.h b/arch/parisc/include/asm/dma-mapping.h
index d8d60a57183f..4de518647612 100644
--- a/arch/parisc/include/asm/dma-mapping.h
+++ b/arch/parisc/include/asm/dma-mapping.h
@@ -1,30 +1,11 @@
1#ifndef _PARISC_DMA_MAPPING_H 1#ifndef _PARISC_DMA_MAPPING_H
2#define _PARISC_DMA_MAPPING_H 2#define _PARISC_DMA_MAPPING_H
3 3
4#include <linux/mm.h>
5#include <linux/scatterlist.h>
6#include <asm/cacheflush.h> 4#include <asm/cacheflush.h>
7 5
8/* See Documentation/DMA-API-HOWTO.txt */
9struct hppa_dma_ops {
10 int (*dma_supported)(struct device *dev, u64 mask);
11 void *(*alloc_consistent)(struct device *dev, size_t size, dma_addr_t *iova, gfp_t flag);
12 void *(*alloc_noncoherent)(struct device *dev, size_t size, dma_addr_t *iova, gfp_t flag);
13 void (*free_consistent)(struct device *dev, size_t size, void *vaddr, dma_addr_t iova);
14 dma_addr_t (*map_single)(struct device *dev, void *addr, size_t size, enum dma_data_direction direction);
15 void (*unmap_single)(struct device *dev, dma_addr_t iova, size_t size, enum dma_data_direction direction);
16 int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction);
17 void (*unmap_sg)(struct device *dev, struct scatterlist *sg, int nhwents, enum dma_data_direction direction);
18 void (*dma_sync_single_for_cpu)(struct device *dev, dma_addr_t iova, unsigned long offset, size_t size, enum dma_data_direction direction);
19 void (*dma_sync_single_for_device)(struct device *dev, dma_addr_t iova, unsigned long offset, size_t size, enum dma_data_direction direction);
20 void (*dma_sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction direction);
21 void (*dma_sync_sg_for_device)(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction direction);
22};
23
24/* 6/*
25** We could live without the hppa_dma_ops indirection if we didn't want 7** We need to support 4 different coherent dma models with one binary:
26** to support 4 different coherent dma models with one binary (they will 8**
27** someday be loadable modules):
28** I/O MMU consistent method dma_sync behavior 9** I/O MMU consistent method dma_sync behavior
29** ============= ====================== ======================= 10** ============= ====================== =======================
30** a) PA-7x00LC uncachable host memory flush/purge 11** a) PA-7x00LC uncachable host memory flush/purge
@@ -40,158 +21,22 @@ struct hppa_dma_ops {
40*/ 21*/
41 22
42#ifdef CONFIG_PA11 23#ifdef CONFIG_PA11
43extern struct hppa_dma_ops pcxl_dma_ops; 24extern struct dma_map_ops pcxl_dma_ops;
44extern struct hppa_dma_ops pcx_dma_ops; 25extern struct dma_map_ops pcx_dma_ops;
45#endif 26#endif
46 27
47extern struct hppa_dma_ops *hppa_dma_ops; 28extern struct dma_map_ops *hppa_dma_ops;
48
49#define dma_alloc_attrs(d, s, h, f, a) dma_alloc_coherent(d, s, h, f)
50#define dma_free_attrs(d, s, h, f, a) dma_free_coherent(d, s, h, f)
51
52static inline void *
53dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
54 gfp_t flag)
55{
56 return hppa_dma_ops->alloc_consistent(dev, size, dma_handle, flag);
57}
58
59static inline void *
60dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
61 gfp_t flag)
62{
63 return hppa_dma_ops->alloc_noncoherent(dev, size, dma_handle, flag);
64}
65
66static inline void
67dma_free_coherent(struct device *dev, size_t size,
68 void *vaddr, dma_addr_t dma_handle)
69{
70 hppa_dma_ops->free_consistent(dev, size, vaddr, dma_handle);
71}
72
73static inline void
74dma_free_noncoherent(struct device *dev, size_t size,
75 void *vaddr, dma_addr_t dma_handle)
76{
77 hppa_dma_ops->free_consistent(dev, size, vaddr, dma_handle);
78}
79
80static inline dma_addr_t
81dma_map_single(struct device *dev, void *ptr, size_t size,
82 enum dma_data_direction direction)
83{
84 return hppa_dma_ops->map_single(dev, ptr, size, direction);
85}
86
87static inline void
88dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
89 enum dma_data_direction direction)
90{
91 hppa_dma_ops->unmap_single(dev, dma_addr, size, direction);
92}
93
94static inline int
95dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
96 enum dma_data_direction direction)
97{
98 return hppa_dma_ops->map_sg(dev, sg, nents, direction);
99}
100
101static inline void
102dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
103 enum dma_data_direction direction)
104{
105 hppa_dma_ops->unmap_sg(dev, sg, nhwentries, direction);
106}
107
108static inline dma_addr_t
109dma_map_page(struct device *dev, struct page *page, unsigned long offset,
110 size_t size, enum dma_data_direction direction)
111{
112 return dma_map_single(dev, (page_address(page) + (offset)), size, direction);
113}
114
115static inline void
116dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
117 enum dma_data_direction direction)
118{
119 dma_unmap_single(dev, dma_address, size, direction);
120}
121
122
123static inline void
124dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
125 enum dma_data_direction direction)
126{
127 if(hppa_dma_ops->dma_sync_single_for_cpu)
128 hppa_dma_ops->dma_sync_single_for_cpu(dev, dma_handle, 0, size, direction);
129}
130
131static inline void
132dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
133 enum dma_data_direction direction)
134{
135 if(hppa_dma_ops->dma_sync_single_for_device)
136 hppa_dma_ops->dma_sync_single_for_device(dev, dma_handle, 0, size, direction);
137}
138
139static inline void
140dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
141 unsigned long offset, size_t size,
142 enum dma_data_direction direction)
143{
144 if(hppa_dma_ops->dma_sync_single_for_cpu)
145 hppa_dma_ops->dma_sync_single_for_cpu(dev, dma_handle, offset, size, direction);
146}
147 29
148static inline void 30static inline struct dma_map_ops *get_dma_ops(struct device *dev)
149dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
150 unsigned long offset, size_t size,
151 enum dma_data_direction direction)
152{ 31{
153 if(hppa_dma_ops->dma_sync_single_for_device) 32 return hppa_dma_ops;
154 hppa_dma_ops->dma_sync_single_for_device(dev, dma_handle, offset, size, direction);
155}
156
157static inline void
158dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
159 enum dma_data_direction direction)
160{
161 if(hppa_dma_ops->dma_sync_sg_for_cpu)
162 hppa_dma_ops->dma_sync_sg_for_cpu(dev, sg, nelems, direction);
163}
164
165static inline void
166dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
167 enum dma_data_direction direction)
168{
169 if(hppa_dma_ops->dma_sync_sg_for_device)
170 hppa_dma_ops->dma_sync_sg_for_device(dev, sg, nelems, direction);
171}
172
173static inline int
174dma_supported(struct device *dev, u64 mask)
175{
176 return hppa_dma_ops->dma_supported(dev, mask);
177}
178
179static inline int
180dma_set_mask(struct device *dev, u64 mask)
181{
182 if(!dev->dma_mask || !dma_supported(dev, mask))
183 return -EIO;
184
185 *dev->dma_mask = mask;
186
187 return 0;
188} 33}
189 34
190static inline void 35static inline void
191dma_cache_sync(struct device *dev, void *vaddr, size_t size, 36dma_cache_sync(struct device *dev, void *vaddr, size_t size,
192 enum dma_data_direction direction) 37 enum dma_data_direction direction)
193{ 38{
194 if(hppa_dma_ops->dma_sync_single_for_cpu) 39 if (hppa_dma_ops->sync_single_for_cpu)
195 flush_kernel_dcache_range((unsigned long)vaddr, size); 40 flush_kernel_dcache_range((unsigned long)vaddr, size);
196} 41}
197 42
@@ -238,22 +83,6 @@ struct parisc_device;
238void * sba_get_iommu(struct parisc_device *dev); 83void * sba_get_iommu(struct parisc_device *dev);
239#endif 84#endif
240 85
241/* At the moment, we panic on error for IOMMU resource exaustion */ 86#include <asm-generic/dma-mapping-common.h>
242#define dma_mapping_error(dev, x) 0
243
244/* This API cannot be supported on PA-RISC */
245static inline int dma_mmap_coherent(struct device *dev,
246 struct vm_area_struct *vma, void *cpu_addr,
247 dma_addr_t dma_addr, size_t size)
248{
249 return -EINVAL;
250}
251
252static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
253 void *cpu_addr, dma_addr_t dma_addr,
254 size_t size)
255{
256 return -EINVAL;
257}
258 87
259#endif 88#endif
diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c
index dba508fe1683..f8150669b8c6 100644
--- a/arch/parisc/kernel/drivers.c
+++ b/arch/parisc/kernel/drivers.c
@@ -40,7 +40,7 @@
40#include <asm/parisc-device.h> 40#include <asm/parisc-device.h>
41 41
42/* See comments in include/asm-parisc/pci.h */ 42/* See comments in include/asm-parisc/pci.h */
43struct hppa_dma_ops *hppa_dma_ops __read_mostly; 43struct dma_map_ops *hppa_dma_ops __read_mostly;
44EXPORT_SYMBOL(hppa_dma_ops); 44EXPORT_SYMBOL(hppa_dma_ops);
45 45
46static struct device root = { 46static struct device root = {
diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c
index b9402c9b3454..a27e4928bf73 100644
--- a/arch/parisc/kernel/pci-dma.c
+++ b/arch/parisc/kernel/pci-dma.c
@@ -413,7 +413,8 @@ pcxl_dma_init(void)
413 413
414__initcall(pcxl_dma_init); 414__initcall(pcxl_dma_init);
415 415
416static void * pa11_dma_alloc_consistent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag) 416static void *pa11_dma_alloc(struct device *dev, size_t size,
417 dma_addr_t *dma_handle, gfp_t flag, struct dma_attrs *attrs)
417{ 418{
418 unsigned long vaddr; 419 unsigned long vaddr;
419 unsigned long paddr; 420 unsigned long paddr;
@@ -439,7 +440,8 @@ static void * pa11_dma_alloc_consistent (struct device *dev, size_t size, dma_ad
439 return (void *)vaddr; 440 return (void *)vaddr;
440} 441}
441 442
442static void pa11_dma_free_consistent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) 443static void pa11_dma_free(struct device *dev, size_t size, void *vaddr,
444 dma_addr_t dma_handle, struct dma_attrs *attrs)
443{ 445{
444 int order; 446 int order;
445 447
@@ -450,15 +452,20 @@ static void pa11_dma_free_consistent (struct device *dev, size_t size, void *vad
450 free_pages((unsigned long)__va(dma_handle), order); 452 free_pages((unsigned long)__va(dma_handle), order);
451} 453}
452 454
453static dma_addr_t pa11_dma_map_single(struct device *dev, void *addr, size_t size, enum dma_data_direction direction) 455static dma_addr_t pa11_dma_map_page(struct device *dev, struct page *page,
456 unsigned long offset, size_t size,
457 enum dma_data_direction direction, struct dma_attrs *attrs)
454{ 458{
459 void *addr = page_address(page) + offset;
455 BUG_ON(direction == DMA_NONE); 460 BUG_ON(direction == DMA_NONE);
456 461
457 flush_kernel_dcache_range((unsigned long) addr, size); 462 flush_kernel_dcache_range((unsigned long) addr, size);
458 return virt_to_phys(addr); 463 return virt_to_phys(addr);
459} 464}
460 465
461static void pa11_dma_unmap_single(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) 466static void pa11_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
467 size_t size, enum dma_data_direction direction,
468 struct dma_attrs *attrs)
462{ 469{
463 BUG_ON(direction == DMA_NONE); 470 BUG_ON(direction == DMA_NONE);
464 471
@@ -475,7 +482,9 @@ static void pa11_dma_unmap_single(struct device *dev, dma_addr_t dma_handle, siz
475 return; 482 return;
476} 483}
477 484
478static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction) 485static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist,
486 int nents, enum dma_data_direction direction,
487 struct dma_attrs *attrs)
479{ 488{
480 int i; 489 int i;
481 struct scatterlist *sg; 490 struct scatterlist *sg;
@@ -492,7 +501,9 @@ static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist, int n
492 return nents; 501 return nents;
493} 502}
494 503
495static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction) 504static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
505 int nents, enum dma_data_direction direction,
506 struct dma_attrs *attrs)
496{ 507{
497 int i; 508 int i;
498 struct scatterlist *sg; 509 struct scatterlist *sg;
@@ -509,18 +520,24 @@ static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, in
509 return; 520 return;
510} 521}
511 522
512static void pa11_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, enum dma_data_direction direction) 523static void pa11_dma_sync_single_for_cpu(struct device *dev,
524 dma_addr_t dma_handle, size_t size,
525 enum dma_data_direction direction)
513{ 526{
514 BUG_ON(direction == DMA_NONE); 527 BUG_ON(direction == DMA_NONE);
515 528
516 flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle) + offset, size); 529 flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle),
530 size);
517} 531}
518 532
519static void pa11_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, enum dma_data_direction direction) 533static void pa11_dma_sync_single_for_device(struct device *dev,
534 dma_addr_t dma_handle, size_t size,
535 enum dma_data_direction direction)
520{ 536{
521 BUG_ON(direction == DMA_NONE); 537 BUG_ON(direction == DMA_NONE);
522 538
523 flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle) + offset, size); 539 flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle),
540 size);
524} 541}
525 542
526static void pa11_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction) 543static void pa11_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
@@ -545,32 +562,28 @@ static void pa11_dma_sync_sg_for_device(struct device *dev, struct scatterlist *
545 flush_kernel_vmap_range(sg_virt(sg), sg->length); 562 flush_kernel_vmap_range(sg_virt(sg), sg->length);
546} 563}
547 564
548struct hppa_dma_ops pcxl_dma_ops = { 565struct dma_map_ops pcxl_dma_ops = {
549 .dma_supported = pa11_dma_supported, 566 .dma_supported = pa11_dma_supported,
550 .alloc_consistent = pa11_dma_alloc_consistent, 567 .alloc = pa11_dma_alloc,
551 .alloc_noncoherent = pa11_dma_alloc_consistent, 568 .free = pa11_dma_free,
552 .free_consistent = pa11_dma_free_consistent, 569 .map_page = pa11_dma_map_page,
553 .map_single = pa11_dma_map_single, 570 .unmap_page = pa11_dma_unmap_page,
554 .unmap_single = pa11_dma_unmap_single,
555 .map_sg = pa11_dma_map_sg, 571 .map_sg = pa11_dma_map_sg,
556 .unmap_sg = pa11_dma_unmap_sg, 572 .unmap_sg = pa11_dma_unmap_sg,
557 .dma_sync_single_for_cpu = pa11_dma_sync_single_for_cpu, 573 .sync_single_for_cpu = pa11_dma_sync_single_for_cpu,
558 .dma_sync_single_for_device = pa11_dma_sync_single_for_device, 574 .sync_single_for_device = pa11_dma_sync_single_for_device,
559 .dma_sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu, 575 .sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu,
560 .dma_sync_sg_for_device = pa11_dma_sync_sg_for_device, 576 .sync_sg_for_device = pa11_dma_sync_sg_for_device,
561}; 577};
562 578
563static void *fail_alloc_consistent(struct device *dev, size_t size, 579static void *pcx_dma_alloc(struct device *dev, size_t size,
564 dma_addr_t *dma_handle, gfp_t flag) 580 dma_addr_t *dma_handle, gfp_t flag, struct dma_attrs *attrs)
565{
566 return NULL;
567}
568
569static void *pa11_dma_alloc_noncoherent(struct device *dev, size_t size,
570 dma_addr_t *dma_handle, gfp_t flag)
571{ 581{
572 void *addr; 582 void *addr;
573 583
584 if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs))
585 return NULL;
586
574 addr = (void *)__get_free_pages(flag, get_order(size)); 587 addr = (void *)__get_free_pages(flag, get_order(size));
575 if (addr) 588 if (addr)
576 *dma_handle = (dma_addr_t)virt_to_phys(addr); 589 *dma_handle = (dma_addr_t)virt_to_phys(addr);
@@ -578,24 +591,23 @@ static void *pa11_dma_alloc_noncoherent(struct device *dev, size_t size,
578 return addr; 591 return addr;
579} 592}
580 593
581static void pa11_dma_free_noncoherent(struct device *dev, size_t size, 594static void pcx_dma_free(struct device *dev, size_t size, void *vaddr,
582 void *vaddr, dma_addr_t iova) 595 dma_addr_t iova, struct dma_attrs *attrs)
583{ 596{
584 free_pages((unsigned long)vaddr, get_order(size)); 597 free_pages((unsigned long)vaddr, get_order(size));
585 return; 598 return;
586} 599}
587 600
588struct hppa_dma_ops pcx_dma_ops = { 601struct dma_map_ops pcx_dma_ops = {
589 .dma_supported = pa11_dma_supported, 602 .dma_supported = pa11_dma_supported,
590 .alloc_consistent = fail_alloc_consistent, 603 .alloc = pcx_dma_alloc,
591 .alloc_noncoherent = pa11_dma_alloc_noncoherent, 604 .free = pcx_dma_free,
592 .free_consistent = pa11_dma_free_noncoherent, 605 .map_page = pa11_dma_map_page,
593 .map_single = pa11_dma_map_single, 606 .unmap_page = pa11_dma_unmap_page,
594 .unmap_single = pa11_dma_unmap_single,
595 .map_sg = pa11_dma_map_sg, 607 .map_sg = pa11_dma_map_sg,
596 .unmap_sg = pa11_dma_unmap_sg, 608 .unmap_sg = pa11_dma_unmap_sg,
597 .dma_sync_single_for_cpu = pa11_dma_sync_single_for_cpu, 609 .sync_single_for_cpu = pa11_dma_sync_single_for_cpu,
598 .dma_sync_single_for_device = pa11_dma_sync_single_for_device, 610 .sync_single_for_device = pa11_dma_sync_single_for_device,
599 .dma_sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu, 611 .sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu,
600 .dma_sync_sg_for_device = pa11_dma_sync_sg_for_device, 612 .sync_sg_for_device = pa11_dma_sync_sg_for_device,
601}; 613};
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index 8e11fb2831cd..e24b05996a1b 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -786,18 +786,27 @@ ccio_map_single(struct device *dev, void *addr, size_t size,
786 return CCIO_IOVA(iovp, offset); 786 return CCIO_IOVA(iovp, offset);
787} 787}
788 788
789
790static dma_addr_t
791ccio_map_page(struct device *dev, struct page *page, unsigned long offset,
792 size_t size, enum dma_data_direction direction,
793 struct dma_attrs *attrs)
794{
795 return ccio_map_single(dev, page_address(page) + offset, size,
796 direction);
797}
798
799
789/** 800/**
790 * ccio_unmap_single - Unmap an address range from the IOMMU. 801 * ccio_unmap_page - Unmap an address range from the IOMMU.
791 * @dev: The PCI device. 802 * @dev: The PCI device.
792 * @addr: The start address of the DMA region. 803 * @addr: The start address of the DMA region.
793 * @size: The length of the DMA region. 804 * @size: The length of the DMA region.
794 * @direction: The direction of the DMA transaction (to/from device). 805 * @direction: The direction of the DMA transaction (to/from device).
795 *
796 * This function implements the pci_unmap_single function.
797 */ 806 */
798static void 807static void
799ccio_unmap_single(struct device *dev, dma_addr_t iova, size_t size, 808ccio_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
800 enum dma_data_direction direction) 809 enum dma_data_direction direction, struct dma_attrs *attrs)
801{ 810{
802 struct ioc *ioc; 811 struct ioc *ioc;
803 unsigned long flags; 812 unsigned long flags;
@@ -826,7 +835,7 @@ ccio_unmap_single(struct device *dev, dma_addr_t iova, size_t size,
826} 835}
827 836
828/** 837/**
829 * ccio_alloc_consistent - Allocate a consistent DMA mapping. 838 * ccio_alloc - Allocate a consistent DMA mapping.
830 * @dev: The PCI device. 839 * @dev: The PCI device.
831 * @size: The length of the DMA region. 840 * @size: The length of the DMA region.
832 * @dma_handle: The DMA address handed back to the device (not the cpu). 841 * @dma_handle: The DMA address handed back to the device (not the cpu).
@@ -834,7 +843,8 @@ ccio_unmap_single(struct device *dev, dma_addr_t iova, size_t size,
834 * This function implements the pci_alloc_consistent function. 843 * This function implements the pci_alloc_consistent function.
835 */ 844 */
836static void * 845static void *
837ccio_alloc_consistent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag) 846ccio_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag,
847 struct dma_attrs *attrs)
838{ 848{
839 void *ret; 849 void *ret;
840#if 0 850#if 0
@@ -858,7 +868,7 @@ ccio_alloc_consistent(struct device *dev, size_t size, dma_addr_t *dma_handle, g
858} 868}
859 869
860/** 870/**
861 * ccio_free_consistent - Free a consistent DMA mapping. 871 * ccio_free - Free a consistent DMA mapping.
862 * @dev: The PCI device. 872 * @dev: The PCI device.
863 * @size: The length of the DMA region. 873 * @size: The length of the DMA region.
864 * @cpu_addr: The cpu address returned from the ccio_alloc_consistent. 874 * @cpu_addr: The cpu address returned from the ccio_alloc_consistent.
@@ -867,10 +877,10 @@ ccio_alloc_consistent(struct device *dev, size_t size, dma_addr_t *dma_handle, g
867 * This function implements the pci_free_consistent function. 877 * This function implements the pci_free_consistent function.
868 */ 878 */
869static void 879static void
870ccio_free_consistent(struct device *dev, size_t size, void *cpu_addr, 880ccio_free(struct device *dev, size_t size, void *cpu_addr,
871 dma_addr_t dma_handle) 881 dma_addr_t dma_handle, struct dma_attrs *attrs)
872{ 882{
873 ccio_unmap_single(dev, dma_handle, size, 0); 883 ccio_unmap_page(dev, dma_handle, size, 0, NULL);
874 free_pages((unsigned long)cpu_addr, get_order(size)); 884 free_pages((unsigned long)cpu_addr, get_order(size));
875} 885}
876 886
@@ -897,7 +907,7 @@ ccio_free_consistent(struct device *dev, size_t size, void *cpu_addr,
897 */ 907 */
898static int 908static int
899ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents, 909ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
900 enum dma_data_direction direction) 910 enum dma_data_direction direction, struct dma_attrs *attrs)
901{ 911{
902 struct ioc *ioc; 912 struct ioc *ioc;
903 int coalesced, filled = 0; 913 int coalesced, filled = 0;
@@ -974,7 +984,7 @@ ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
974 */ 984 */
975static void 985static void
976ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, 986ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
977 enum dma_data_direction direction) 987 enum dma_data_direction direction, struct dma_attrs *attrs)
978{ 988{
979 struct ioc *ioc; 989 struct ioc *ioc;
980 990
@@ -993,27 +1003,22 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
993#ifdef CCIO_COLLECT_STATS 1003#ifdef CCIO_COLLECT_STATS
994 ioc->usg_pages += sg_dma_len(sglist) >> PAGE_SHIFT; 1004 ioc->usg_pages += sg_dma_len(sglist) >> PAGE_SHIFT;
995#endif 1005#endif
996 ccio_unmap_single(dev, sg_dma_address(sglist), 1006 ccio_unmap_page(dev, sg_dma_address(sglist),
997 sg_dma_len(sglist), direction); 1007 sg_dma_len(sglist), direction, NULL);
998 ++sglist; 1008 ++sglist;
999 } 1009 }
1000 1010
1001 DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents); 1011 DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
1002} 1012}
1003 1013
1004static struct hppa_dma_ops ccio_ops = { 1014static struct dma_map_ops ccio_ops = {
1005 .dma_supported = ccio_dma_supported, 1015 .dma_supported = ccio_dma_supported,
1006 .alloc_consistent = ccio_alloc_consistent, 1016 .alloc = ccio_alloc,
1007 .alloc_noncoherent = ccio_alloc_consistent, 1017 .free = ccio_free,
1008 .free_consistent = ccio_free_consistent, 1018 .map_page = ccio_map_page,
1009 .map_single = ccio_map_single, 1019 .unmap_page = ccio_unmap_page,
1010 .unmap_single = ccio_unmap_single,
1011 .map_sg = ccio_map_sg, 1020 .map_sg = ccio_map_sg,
1012 .unmap_sg = ccio_unmap_sg, 1021 .unmap_sg = ccio_unmap_sg,
1013 .dma_sync_single_for_cpu = NULL, /* NOP for U2/Uturn */
1014 .dma_sync_single_for_device = NULL, /* NOP for U2/Uturn */
1015 .dma_sync_sg_for_cpu = NULL, /* ditto */
1016 .dma_sync_sg_for_device = NULL, /* ditto */
1017}; 1022};
1018 1023
1019#ifdef CONFIG_PROC_FS 1024#ifdef CONFIG_PROC_FS
@@ -1062,7 +1067,7 @@ static int ccio_proc_info(struct seq_file *m, void *p)
1062 ioc->msingle_calls, ioc->msingle_pages, 1067 ioc->msingle_calls, ioc->msingle_pages,
1063 (int)((ioc->msingle_pages * 1000)/ioc->msingle_calls)); 1068 (int)((ioc->msingle_pages * 1000)/ioc->msingle_calls));
1064 1069
1065 /* KLUGE - unmap_sg calls unmap_single for each mapped page */ 1070 /* KLUGE - unmap_sg calls unmap_page for each mapped page */
1066 min = ioc->usingle_calls - ioc->usg_calls; 1071 min = ioc->usingle_calls - ioc->usg_calls;
1067 max = ioc->usingle_pages - ioc->usg_pages; 1072 max = ioc->usingle_pages - ioc->usg_pages;
1068 seq_printf(m, "pci_unmap_single: %8ld calls %8ld pages (avg %d/1000)\n", 1073 seq_printf(m, "pci_unmap_single: %8ld calls %8ld pages (avg %d/1000)\n",
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index 225049b492e5..42ec4600b7e4 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -780,8 +780,18 @@ sba_map_single(struct device *dev, void *addr, size_t size,
780} 780}
781 781
782 782
783static dma_addr_t
784sba_map_page(struct device *dev, struct page *page, unsigned long offset,
785 size_t size, enum dma_data_direction direction,
786 struct dma_attrs *attrs)
787{
788 return sba_map_single(dev, page_address(page) + offset, size,
789 direction);
790}
791
792
783/** 793/**
784 * sba_unmap_single - unmap one IOVA and free resources 794 * sba_unmap_page - unmap one IOVA and free resources
785 * @dev: instance of PCI owned by the driver that's asking. 795 * @dev: instance of PCI owned by the driver that's asking.
786 * @iova: IOVA of driver buffer previously mapped. 796 * @iova: IOVA of driver buffer previously mapped.
787 * @size: number of bytes mapped in driver buffer. 797 * @size: number of bytes mapped in driver buffer.
@@ -790,8 +800,8 @@ sba_map_single(struct device *dev, void *addr, size_t size,
790 * See Documentation/DMA-API-HOWTO.txt 800 * See Documentation/DMA-API-HOWTO.txt
791 */ 801 */
792static void 802static void
793sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, 803sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
794 enum dma_data_direction direction) 804 enum dma_data_direction direction, struct dma_attrs *attrs)
795{ 805{
796 struct ioc *ioc; 806 struct ioc *ioc;
797#if DELAYED_RESOURCE_CNT > 0 807#if DELAYED_RESOURCE_CNT > 0
@@ -858,15 +868,15 @@ sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size,
858 868
859 869
860/** 870/**
861 * sba_alloc_consistent - allocate/map shared mem for DMA 871 * sba_alloc - allocate/map shared mem for DMA
862 * @hwdev: instance of PCI owned by the driver that's asking. 872 * @hwdev: instance of PCI owned by the driver that's asking.
863 * @size: number of bytes mapped in driver buffer. 873 * @size: number of bytes mapped in driver buffer.
864 * @dma_handle: IOVA of new buffer. 874 * @dma_handle: IOVA of new buffer.
865 * 875 *
866 * See Documentation/DMA-API-HOWTO.txt 876 * See Documentation/DMA-API-HOWTO.txt
867 */ 877 */
868static void *sba_alloc_consistent(struct device *hwdev, size_t size, 878static void *sba_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle,
869 dma_addr_t *dma_handle, gfp_t gfp) 879 gfp_t gfp, struct dma_attrs *attrs)
870{ 880{
871 void *ret; 881 void *ret;
872 882
@@ -888,7 +898,7 @@ static void *sba_alloc_consistent(struct device *hwdev, size_t size,
888 898
889 899
890/** 900/**
891 * sba_free_consistent - free/unmap shared mem for DMA 901 * sba_free - free/unmap shared mem for DMA
892 * @hwdev: instance of PCI owned by the driver that's asking. 902 * @hwdev: instance of PCI owned by the driver that's asking.
893 * @size: number of bytes mapped in driver buffer. 903 * @size: number of bytes mapped in driver buffer.
894 * @vaddr: virtual address IOVA of "consistent" buffer. 904 * @vaddr: virtual address IOVA of "consistent" buffer.
@@ -897,10 +907,10 @@ static void *sba_alloc_consistent(struct device *hwdev, size_t size,
897 * See Documentation/DMA-API-HOWTO.txt 907 * See Documentation/DMA-API-HOWTO.txt
898 */ 908 */
899static void 909static void
900sba_free_consistent(struct device *hwdev, size_t size, void *vaddr, 910sba_free(struct device *hwdev, size_t size, void *vaddr,
901 dma_addr_t dma_handle) 911 dma_addr_t dma_handle, struct dma_attrs *attrs)
902{ 912{
903 sba_unmap_single(hwdev, dma_handle, size, 0); 913 sba_unmap_page(hwdev, dma_handle, size, 0, NULL);
904 free_pages((unsigned long) vaddr, get_order(size)); 914 free_pages((unsigned long) vaddr, get_order(size));
905} 915}
906 916
@@ -933,7 +943,7 @@ int dump_run_sg = 0;
933 */ 943 */
934static int 944static int
935sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, 945sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
936 enum dma_data_direction direction) 946 enum dma_data_direction direction, struct dma_attrs *attrs)
937{ 947{
938 struct ioc *ioc; 948 struct ioc *ioc;
939 int coalesced, filled = 0; 949 int coalesced, filled = 0;
@@ -1016,7 +1026,7 @@ sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
1016 */ 1026 */
1017static void 1027static void
1018sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, 1028sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
1019 enum dma_data_direction direction) 1029 enum dma_data_direction direction, struct dma_attrs *attrs)
1020{ 1030{
1021 struct ioc *ioc; 1031 struct ioc *ioc;
1022#ifdef ASSERT_PDIR_SANITY 1032#ifdef ASSERT_PDIR_SANITY
@@ -1040,7 +1050,8 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
1040 1050
1041 while (sg_dma_len(sglist) && nents--) { 1051 while (sg_dma_len(sglist) && nents--) {
1042 1052
1043 sba_unmap_single(dev, sg_dma_address(sglist), sg_dma_len(sglist), direction); 1053 sba_unmap_page(dev, sg_dma_address(sglist), sg_dma_len(sglist),
1054 direction, NULL);
1044#ifdef SBA_COLLECT_STATS 1055#ifdef SBA_COLLECT_STATS
1045 ioc->usg_pages += ((sg_dma_address(sglist) & ~IOVP_MASK) + sg_dma_len(sglist) + IOVP_SIZE - 1) >> PAGE_SHIFT; 1056 ioc->usg_pages += ((sg_dma_address(sglist) & ~IOVP_MASK) + sg_dma_len(sglist) + IOVP_SIZE - 1) >> PAGE_SHIFT;
1046 ioc->usingle_calls--; /* kluge since call is unmap_sg() */ 1057 ioc->usingle_calls--; /* kluge since call is unmap_sg() */
@@ -1058,19 +1069,14 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
1058 1069
1059} 1070}
1060 1071
1061static struct hppa_dma_ops sba_ops = { 1072static struct dma_map_ops sba_ops = {
1062 .dma_supported = sba_dma_supported, 1073 .dma_supported = sba_dma_supported,
1063 .alloc_consistent = sba_alloc_consistent, 1074 .alloc = sba_alloc,
1064 .alloc_noncoherent = sba_alloc_consistent, 1075 .free = sba_free,
1065 .free_consistent = sba_free_consistent, 1076 .map_page = sba_map_page,
1066 .map_single = sba_map_single, 1077 .unmap_page = sba_unmap_page,
1067 .unmap_single = sba_unmap_single,
1068 .map_sg = sba_map_sg, 1078 .map_sg = sba_map_sg,
1069 .unmap_sg = sba_unmap_sg, 1079 .unmap_sg = sba_unmap_sg,
1070 .dma_sync_single_for_cpu = NULL,
1071 .dma_sync_single_for_device = NULL,
1072 .dma_sync_sg_for_cpu = NULL,
1073 .dma_sync_sg_for_device = NULL,
1074}; 1080};
1075 1081
1076 1082