diff options
author | Ralf Baechle <ralf@linux-mips.org> | 2006-11-15 21:56:12 -0500 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2007-02-13 17:40:50 -0500 |
commit | 9a88cbb5227970757881b1a65be01dea61fe2584 (patch) | |
tree | 2a1e6934ae253e75141efd383ae9df7042452d7c /arch/mips | |
parent | f65e4fa8e0c6022ad58dc88d1b11b12589ed7f9f (diff) |
[MIPS] Unify dma-{coherent,noncoherent.ip27,ip32}
Platforms will now have to supply a function dma_device_is_coherent which
returns if a particular device participates in the coherence domain. For
most platforms this function will always return 0 or 1.
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips')
-rw-r--r-- | arch/mips/Kconfig | 5 | ||||
-rw-r--r-- | arch/mips/mm/Makefile | 14 | ||||
-rw-r--r-- | arch/mips/mm/dma-coherent.c | 254 | ||||
-rw-r--r-- | arch/mips/mm/dma-default.c (renamed from arch/mips/mm/dma-noncoherent.c) | 209 | ||||
-rw-r--r-- | arch/mips/mm/dma-ip27.c | 257 | ||||
-rw-r--r-- | arch/mips/mm/dma-ip32.c | 383 | ||||
-rw-r--r-- | arch/mips/pci/Makefile | 2 | ||||
-rw-r--r-- | arch/mips/pci/pci-dac.c | 79 |
8 files changed, 183 insertions, 1020 deletions
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 5fe195a41a80..a92ce6bd7cf1 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig | |||
@@ -598,8 +598,6 @@ config SGI_IP32 | |||
598 | select ARC | 598 | select ARC |
599 | select ARC32 | 599 | select ARC32 |
600 | select BOOT_ELF32 | 600 | select BOOT_ELF32 |
601 | select OWN_DMA | ||
602 | select DMA_IP32 | ||
603 | select DMA_NONCOHERENT | 601 | select DMA_NONCOHERENT |
604 | select HW_HAS_PCI | 602 | select HW_HAS_PCI |
605 | select R5000_CPU_SCACHE | 603 | select R5000_CPU_SCACHE |
@@ -883,9 +881,6 @@ config DMA_NONCOHERENT | |||
883 | config DMA_NEED_PCI_MAP_STATE | 881 | config DMA_NEED_PCI_MAP_STATE |
884 | bool | 882 | bool |
885 | 883 | ||
886 | config OWN_DMA | ||
887 | bool | ||
888 | |||
889 | config EARLY_PRINTK | 884 | config EARLY_PRINTK |
890 | bool | 885 | bool |
891 | 886 | ||
diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile index 19e41fd186c4..de5727385bc6 100644 --- a/arch/mips/mm/Makefile +++ b/arch/mips/mm/Makefile | |||
@@ -2,8 +2,8 @@ | |||
2 | # Makefile for the Linux/MIPS-specific parts of the memory manager. | 2 | # Makefile for the Linux/MIPS-specific parts of the memory manager. |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y += cache.o extable.o fault.o init.o pgtable.o \ | 5 | obj-y += cache.o dma-default.o extable.o fault.o \ |
6 | tlbex.o tlbex-fault.o | 6 | init.o pgtable.o tlbex.o tlbex-fault.o |
7 | 7 | ||
8 | obj-$(CONFIG_32BIT) += ioremap.o pgtable-32.o | 8 | obj-$(CONFIG_32BIT) += ioremap.o pgtable-32.o |
9 | obj-$(CONFIG_64BIT) += pgtable-64.o | 9 | obj-$(CONFIG_64BIT) += pgtable-64.o |
@@ -32,14 +32,4 @@ obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o | |||
32 | obj-$(CONFIG_RM7000_CPU_SCACHE) += sc-rm7k.o | 32 | obj-$(CONFIG_RM7000_CPU_SCACHE) += sc-rm7k.o |
33 | obj-$(CONFIG_MIPS_CPU_SCACHE) += sc-mips.o | 33 | obj-$(CONFIG_MIPS_CPU_SCACHE) += sc-mips.o |
34 | 34 | ||
35 | # | ||
36 | # Choose one DMA coherency model | ||
37 | # | ||
38 | ifndef CONFIG_OWN_DMA | ||
39 | obj-$(CONFIG_DMA_COHERENT) += dma-coherent.o | ||
40 | obj-$(CONFIG_DMA_NONCOHERENT) += dma-noncoherent.o | ||
41 | endif | ||
42 | obj-$(CONFIG_DMA_IP27) += dma-ip27.o | ||
43 | obj-$(CONFIG_DMA_IP32) += dma-ip32.o | ||
44 | |||
45 | EXTRA_AFLAGS := $(CFLAGS) | 35 | EXTRA_AFLAGS := $(CFLAGS) |
diff --git a/arch/mips/mm/dma-coherent.c b/arch/mips/mm/dma-coherent.c deleted file mode 100644 index 5697c6e250a3..000000000000 --- a/arch/mips/mm/dma-coherent.c +++ /dev/null | |||
@@ -1,254 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com> | ||
7 | * Copyright (C) 2000, 2001 Ralf Baechle <ralf@gnu.org> | ||
8 | * swiped from i386, and cloned for MIPS by Geert, polished by Ralf. | ||
9 | */ | ||
10 | #include <linux/types.h> | ||
11 | #include <linux/dma-mapping.h> | ||
12 | #include <linux/mm.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/string.h> | ||
15 | |||
16 | #include <asm/cache.h> | ||
17 | #include <asm/io.h> | ||
18 | |||
19 | void *dma_alloc_noncoherent(struct device *dev, size_t size, | ||
20 | dma_addr_t * dma_handle, gfp_t gfp) | ||
21 | { | ||
22 | void *ret; | ||
23 | /* ignore region specifiers */ | ||
24 | gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); | ||
25 | |||
26 | if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff)) | ||
27 | gfp |= GFP_DMA; | ||
28 | ret = (void *) __get_free_pages(gfp, get_order(size)); | ||
29 | |||
30 | if (ret != NULL) { | ||
31 | memset(ret, 0, size); | ||
32 | *dma_handle = virt_to_phys(ret); | ||
33 | } | ||
34 | |||
35 | return ret; | ||
36 | } | ||
37 | |||
38 | EXPORT_SYMBOL(dma_alloc_noncoherent); | ||
39 | |||
40 | void *dma_alloc_coherent(struct device *dev, size_t size, | ||
41 | dma_addr_t * dma_handle, gfp_t gfp) | ||
42 | __attribute__((alias("dma_alloc_noncoherent"))); | ||
43 | |||
44 | EXPORT_SYMBOL(dma_alloc_coherent); | ||
45 | |||
46 | void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, | ||
47 | dma_addr_t dma_handle) | ||
48 | { | ||
49 | unsigned long addr = (unsigned long) vaddr; | ||
50 | |||
51 | free_pages(addr, get_order(size)); | ||
52 | } | ||
53 | |||
54 | EXPORT_SYMBOL(dma_free_noncoherent); | ||
55 | |||
56 | void dma_free_coherent(struct device *dev, size_t size, void *vaddr, | ||
57 | dma_addr_t dma_handle) __attribute__((alias("dma_free_noncoherent"))); | ||
58 | |||
59 | EXPORT_SYMBOL(dma_free_coherent); | ||
60 | |||
61 | dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, | ||
62 | enum dma_data_direction direction) | ||
63 | { | ||
64 | BUG_ON(direction == DMA_NONE); | ||
65 | |||
66 | return __pa(ptr); | ||
67 | } | ||
68 | |||
69 | EXPORT_SYMBOL(dma_map_single); | ||
70 | |||
71 | void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | ||
72 | enum dma_data_direction direction) | ||
73 | { | ||
74 | BUG_ON(direction == DMA_NONE); | ||
75 | } | ||
76 | |||
77 | EXPORT_SYMBOL(dma_unmap_single); | ||
78 | |||
79 | int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | ||
80 | enum dma_data_direction direction) | ||
81 | { | ||
82 | int i; | ||
83 | |||
84 | BUG_ON(direction == DMA_NONE); | ||
85 | |||
86 | for (i = 0; i < nents; i++, sg++) { | ||
87 | sg->dma_address = (dma_addr_t)page_to_phys(sg->page) + sg->offset; | ||
88 | } | ||
89 | |||
90 | return nents; | ||
91 | } | ||
92 | |||
93 | EXPORT_SYMBOL(dma_map_sg); | ||
94 | |||
95 | dma_addr_t dma_map_page(struct device *dev, struct page *page, | ||
96 | unsigned long offset, size_t size, enum dma_data_direction direction) | ||
97 | { | ||
98 | BUG_ON(direction == DMA_NONE); | ||
99 | |||
100 | return page_to_phys(page) + offset; | ||
101 | } | ||
102 | |||
103 | EXPORT_SYMBOL(dma_map_page); | ||
104 | |||
105 | void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | ||
106 | enum dma_data_direction direction) | ||
107 | { | ||
108 | BUG_ON(direction == DMA_NONE); | ||
109 | } | ||
110 | |||
111 | EXPORT_SYMBOL(dma_unmap_page); | ||
112 | |||
113 | void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | ||
114 | enum dma_data_direction direction) | ||
115 | { | ||
116 | BUG_ON(direction == DMA_NONE); | ||
117 | } | ||
118 | |||
119 | EXPORT_SYMBOL(dma_unmap_sg); | ||
120 | |||
121 | void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, | ||
122 | size_t size, enum dma_data_direction direction) | ||
123 | { | ||
124 | BUG_ON(direction == DMA_NONE); | ||
125 | } | ||
126 | |||
127 | EXPORT_SYMBOL(dma_sync_single_for_cpu); | ||
128 | |||
129 | void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, | ||
130 | size_t size, enum dma_data_direction direction) | ||
131 | { | ||
132 | BUG_ON(direction == DMA_NONE); | ||
133 | } | ||
134 | |||
135 | EXPORT_SYMBOL(dma_sync_single_for_device); | ||
136 | |||
137 | void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, | ||
138 | unsigned long offset, size_t size, | ||
139 | enum dma_data_direction direction) | ||
140 | { | ||
141 | BUG_ON(direction == DMA_NONE); | ||
142 | } | ||
143 | |||
144 | EXPORT_SYMBOL(dma_sync_single_range_for_cpu); | ||
145 | |||
146 | void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, | ||
147 | unsigned long offset, size_t size, | ||
148 | enum dma_data_direction direction) | ||
149 | { | ||
150 | BUG_ON(direction == DMA_NONE); | ||
151 | } | ||
152 | |||
153 | EXPORT_SYMBOL(dma_sync_single_range_for_device); | ||
154 | |||
155 | void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, | ||
156 | enum dma_data_direction direction) | ||
157 | { | ||
158 | BUG_ON(direction == DMA_NONE); | ||
159 | } | ||
160 | |||
161 | EXPORT_SYMBOL(dma_sync_sg_for_cpu); | ||
162 | |||
163 | void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, | ||
164 | enum dma_data_direction direction) | ||
165 | { | ||
166 | BUG_ON(direction == DMA_NONE); | ||
167 | } | ||
168 | |||
169 | EXPORT_SYMBOL(dma_sync_sg_for_device); | ||
170 | |||
171 | int dma_mapping_error(dma_addr_t dma_addr) | ||
172 | { | ||
173 | return 0; | ||
174 | } | ||
175 | |||
176 | EXPORT_SYMBOL(dma_mapping_error); | ||
177 | |||
178 | int dma_supported(struct device *dev, u64 mask) | ||
179 | { | ||
180 | /* | ||
181 | * we fall back to GFP_DMA when the mask isn't all 1s, | ||
182 | * so we can't guarantee allocations that must be | ||
183 | * within a tighter range than GFP_DMA.. | ||
184 | */ | ||
185 | if (mask < 0x00ffffff) | ||
186 | return 0; | ||
187 | |||
188 | return 1; | ||
189 | } | ||
190 | |||
191 | EXPORT_SYMBOL(dma_supported); | ||
192 | |||
193 | int dma_is_consistent(struct device *dev, dma_addr_t dma_addr) | ||
194 | { | ||
195 | return 1; | ||
196 | } | ||
197 | |||
198 | EXPORT_SYMBOL(dma_is_consistent); | ||
199 | |||
200 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | ||
201 | enum dma_data_direction direction) | ||
202 | { | ||
203 | BUG_ON(direction == DMA_NONE); | ||
204 | } | ||
205 | |||
206 | EXPORT_SYMBOL(dma_cache_sync); | ||
207 | |||
208 | /* The DAC routines are a PCIism.. */ | ||
209 | |||
210 | #ifdef CONFIG_PCI | ||
211 | |||
212 | #include <linux/pci.h> | ||
213 | |||
214 | dma64_addr_t pci_dac_page_to_dma(struct pci_dev *pdev, | ||
215 | struct page *page, unsigned long offset, int direction) | ||
216 | { | ||
217 | return (dma64_addr_t)page_to_phys(page) + offset; | ||
218 | } | ||
219 | |||
220 | EXPORT_SYMBOL(pci_dac_page_to_dma); | ||
221 | |||
222 | struct page *pci_dac_dma_to_page(struct pci_dev *pdev, | ||
223 | dma64_addr_t dma_addr) | ||
224 | { | ||
225 | return mem_map + (dma_addr >> PAGE_SHIFT); | ||
226 | } | ||
227 | |||
228 | EXPORT_SYMBOL(pci_dac_dma_to_page); | ||
229 | |||
230 | unsigned long pci_dac_dma_to_offset(struct pci_dev *pdev, | ||
231 | dma64_addr_t dma_addr) | ||
232 | { | ||
233 | return dma_addr & ~PAGE_MASK; | ||
234 | } | ||
235 | |||
236 | EXPORT_SYMBOL(pci_dac_dma_to_offset); | ||
237 | |||
238 | void pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, | ||
239 | dma64_addr_t dma_addr, size_t len, int direction) | ||
240 | { | ||
241 | BUG_ON(direction == PCI_DMA_NONE); | ||
242 | } | ||
243 | |||
244 | EXPORT_SYMBOL(pci_dac_dma_sync_single_for_cpu); | ||
245 | |||
246 | void pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, | ||
247 | dma64_addr_t dma_addr, size_t len, int direction) | ||
248 | { | ||
249 | BUG_ON(direction == PCI_DMA_NONE); | ||
250 | } | ||
251 | |||
252 | EXPORT_SYMBOL(pci_dac_dma_sync_single_for_device); | ||
253 | |||
254 | #endif /* CONFIG_PCI */ | ||
diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-default.c index 8cecef0957c3..4a32e939698f 100644 --- a/arch/mips/mm/dma-noncoherent.c +++ b/arch/mips/mm/dma-default.c | |||
@@ -4,28 +4,39 @@ | |||
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com> | 6 | * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com> |
7 | * Copyright (C) 2000, 2001 Ralf Baechle <ralf@gnu.org> | 7 | * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org> |
8 | * swiped from i386, and cloned for MIPS by Geert, polished by Ralf. | 8 | * swiped from i386, and cloned for MIPS by Geert, polished by Ralf. |
9 | */ | 9 | */ |
10 | |||
10 | #include <linux/types.h> | 11 | #include <linux/types.h> |
12 | #include <linux/dma-mapping.h> | ||
11 | #include <linux/mm.h> | 13 | #include <linux/mm.h> |
12 | #include <linux/module.h> | 14 | #include <linux/module.h> |
13 | #include <linux/string.h> | 15 | #include <linux/string.h> |
14 | #include <linux/dma-mapping.h> | ||
15 | 16 | ||
16 | #include <asm/cache.h> | 17 | #include <asm/cache.h> |
17 | #include <asm/io.h> | 18 | #include <asm/io.h> |
18 | 19 | ||
20 | #include <dma-coherence.h> | ||
21 | |||
19 | /* | 22 | /* |
20 | * Warning on the terminology - Linux calls an uncached area coherent; | 23 | * Warning on the terminology - Linux calls an uncached area coherent; |
21 | * MIPS terminology calls memory areas with hardware maintained coherency | 24 | * MIPS terminology calls memory areas with hardware maintained coherency |
22 | * coherent. | 25 | * coherent. |
23 | */ | 26 | */ |
24 | 27 | ||
28 | static inline int cpu_is_noncoherent_r10000(struct device *dev) | ||
29 | { | ||
30 | return !plat_device_is_coherent(dev) && | ||
31 | (current_cpu_data.cputype == CPU_R10000 && | ||
32 | current_cpu_data.cputype == CPU_R12000); | ||
33 | } | ||
34 | |||
25 | void *dma_alloc_noncoherent(struct device *dev, size_t size, | 35 | void *dma_alloc_noncoherent(struct device *dev, size_t size, |
26 | dma_addr_t * dma_handle, gfp_t gfp) | 36 | dma_addr_t * dma_handle, gfp_t gfp) |
27 | { | 37 | { |
28 | void *ret; | 38 | void *ret; |
39 | |||
29 | /* ignore region specifiers */ | 40 | /* ignore region specifiers */ |
30 | gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); | 41 | gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); |
31 | 42 | ||
@@ -35,7 +46,7 @@ void *dma_alloc_noncoherent(struct device *dev, size_t size, | |||
35 | 46 | ||
36 | if (ret != NULL) { | 47 | if (ret != NULL) { |
37 | memset(ret, 0, size); | 48 | memset(ret, 0, size); |
38 | *dma_handle = virt_to_phys(ret); | 49 | *dma_handle = plat_map_dma_mem(dev, ret, size); |
39 | } | 50 | } |
40 | 51 | ||
41 | return ret; | 52 | return ret; |
@@ -48,10 +59,21 @@ void *dma_alloc_coherent(struct device *dev, size_t size, | |||
48 | { | 59 | { |
49 | void *ret; | 60 | void *ret; |
50 | 61 | ||
51 | ret = dma_alloc_noncoherent(dev, size, dma_handle, gfp); | 62 | /* ignore region specifiers */ |
63 | gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); | ||
64 | |||
65 | if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff)) | ||
66 | gfp |= GFP_DMA; | ||
67 | ret = (void *) __get_free_pages(gfp, get_order(size)); | ||
68 | |||
52 | if (ret) { | 69 | if (ret) { |
53 | dma_cache_wback_inv((unsigned long) ret, size); | 70 | memset(ret, 0, size); |
54 | ret = UNCAC_ADDR(ret); | 71 | *dma_handle = plat_map_dma_mem(dev, ret, size); |
72 | |||
73 | if (!plat_device_is_coherent(dev)) { | ||
74 | dma_cache_wback_inv((unsigned long) ret, size); | ||
75 | ret = UNCAC_ADDR(ret); | ||
76 | } | ||
55 | } | 77 | } |
56 | 78 | ||
57 | return ret; | 79 | return ret; |
@@ -72,7 +94,9 @@ void dma_free_coherent(struct device *dev, size_t size, void *vaddr, | |||
72 | { | 94 | { |
73 | unsigned long addr = (unsigned long) vaddr; | 95 | unsigned long addr = (unsigned long) vaddr; |
74 | 96 | ||
75 | addr = CAC_ADDR(addr); | 97 | if (!plat_device_is_coherent(dev)) |
98 | addr = CAC_ADDR(addr); | ||
99 | |||
76 | free_pages(addr, get_order(size)); | 100 | free_pages(addr, get_order(size)); |
77 | } | 101 | } |
78 | 102 | ||
@@ -104,9 +128,10 @@ dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, | |||
104 | { | 128 | { |
105 | unsigned long addr = (unsigned long) ptr; | 129 | unsigned long addr = (unsigned long) ptr; |
106 | 130 | ||
107 | __dma_sync(addr, size, direction); | 131 | if (!plat_device_is_coherent(dev)) |
132 | __dma_sync(addr, size, direction); | ||
108 | 133 | ||
109 | return virt_to_phys(ptr); | 134 | return plat_map_dma_mem(dev, ptr, size); |
110 | } | 135 | } |
111 | 136 | ||
112 | EXPORT_SYMBOL(dma_map_single); | 137 | EXPORT_SYMBOL(dma_map_single); |
@@ -114,10 +139,11 @@ EXPORT_SYMBOL(dma_map_single); | |||
114 | void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | 139 | void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, |
115 | enum dma_data_direction direction) | 140 | enum dma_data_direction direction) |
116 | { | 141 | { |
117 | unsigned long addr; | 142 | if (cpu_is_noncoherent_r10000(dev)) |
118 | addr = dma_addr + PAGE_OFFSET; | 143 | __dma_sync(plat_dma_addr_to_phys(dma_addr) + PAGE_OFFSET, size, |
144 | direction); | ||
119 | 145 | ||
120 | //__dma_sync(addr, size, direction); | 146 | plat_unmap_dma_mem(dma_addr); |
121 | } | 147 | } |
122 | 148 | ||
123 | EXPORT_SYMBOL(dma_unmap_single); | 149 | EXPORT_SYMBOL(dma_unmap_single); |
@@ -133,11 +159,10 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | |||
133 | unsigned long addr; | 159 | unsigned long addr; |
134 | 160 | ||
135 | addr = (unsigned long) page_address(sg->page); | 161 | addr = (unsigned long) page_address(sg->page); |
136 | if (addr) { | 162 | if (!plat_device_is_coherent(dev) && addr) |
137 | __dma_sync(addr + sg->offset, sg->length, direction); | 163 | __dma_sync(addr + sg->offset, sg->length, direction); |
138 | sg->dma_address = (dma_addr_t)page_to_phys(sg->page) | 164 | sg->dma_address = plat_map_dma_mem_page(dev, sg->page) + |
139 | + sg->offset; | 165 | sg->offset; |
140 | } | ||
141 | } | 166 | } |
142 | 167 | ||
143 | return nents; | 168 | return nents; |
@@ -148,14 +173,16 @@ EXPORT_SYMBOL(dma_map_sg); | |||
148 | dma_addr_t dma_map_page(struct device *dev, struct page *page, | 173 | dma_addr_t dma_map_page(struct device *dev, struct page *page, |
149 | unsigned long offset, size_t size, enum dma_data_direction direction) | 174 | unsigned long offset, size_t size, enum dma_data_direction direction) |
150 | { | 175 | { |
151 | unsigned long addr; | ||
152 | |||
153 | BUG_ON(direction == DMA_NONE); | 176 | BUG_ON(direction == DMA_NONE); |
154 | 177 | ||
155 | addr = (unsigned long) page_address(page) + offset; | 178 | if (!plat_device_is_coherent(dev)) { |
156 | dma_cache_wback_inv(addr, size); | 179 | unsigned long addr; |
180 | |||
181 | addr = (unsigned long) page_address(page) + offset; | ||
182 | dma_cache_wback_inv(addr, size); | ||
183 | } | ||
157 | 184 | ||
158 | return page_to_phys(page) + offset; | 185 | return plat_map_dma_mem_page(dev, page) + offset; |
159 | } | 186 | } |
160 | 187 | ||
161 | EXPORT_SYMBOL(dma_map_page); | 188 | EXPORT_SYMBOL(dma_map_page); |
@@ -165,12 +192,14 @@ void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | |||
165 | { | 192 | { |
166 | BUG_ON(direction == DMA_NONE); | 193 | BUG_ON(direction == DMA_NONE); |
167 | 194 | ||
168 | if (direction != DMA_TO_DEVICE) { | 195 | if (!plat_device_is_coherent(dev) && direction != DMA_TO_DEVICE) { |
169 | unsigned long addr; | 196 | unsigned long addr; |
170 | 197 | ||
171 | addr = dma_address + PAGE_OFFSET; | 198 | addr = plat_dma_addr_to_phys(dma_address); |
172 | dma_cache_wback_inv(addr, size); | 199 | dma_cache_wback_inv(addr, size); |
173 | } | 200 | } |
201 | |||
202 | plat_unmap_dma_mem(dma_address); | ||
174 | } | 203 | } |
175 | 204 | ||
176 | EXPORT_SYMBOL(dma_unmap_page); | 205 | EXPORT_SYMBOL(dma_unmap_page); |
@@ -183,13 +212,15 @@ void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | |||
183 | 212 | ||
184 | BUG_ON(direction == DMA_NONE); | 213 | BUG_ON(direction == DMA_NONE); |
185 | 214 | ||
186 | if (direction == DMA_TO_DEVICE) | ||
187 | return; | ||
188 | |||
189 | for (i = 0; i < nhwentries; i++, sg++) { | 215 | for (i = 0; i < nhwentries; i++, sg++) { |
190 | addr = (unsigned long) page_address(sg->page); | 216 | if (!plat_device_is_coherent(dev) && |
191 | if (addr) | 217 | direction != DMA_TO_DEVICE) { |
192 | __dma_sync(addr + sg->offset, sg->length, direction); | 218 | addr = (unsigned long) page_address(sg->page); |
219 | if (addr) | ||
220 | __dma_sync(addr + sg->offset, sg->length, | ||
221 | direction); | ||
222 | } | ||
223 | plat_unmap_dma_mem(sg->dma_address); | ||
193 | } | 224 | } |
194 | } | 225 | } |
195 | 226 | ||
@@ -198,12 +229,14 @@ EXPORT_SYMBOL(dma_unmap_sg); | |||
198 | void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, | 229 | void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, |
199 | size_t size, enum dma_data_direction direction) | 230 | size_t size, enum dma_data_direction direction) |
200 | { | 231 | { |
201 | unsigned long addr; | ||
202 | |||
203 | BUG_ON(direction == DMA_NONE); | 232 | BUG_ON(direction == DMA_NONE); |
204 | 233 | ||
205 | addr = dma_handle + PAGE_OFFSET; | 234 | if (cpu_is_noncoherent_r10000(dev)) { |
206 | __dma_sync(addr, size, direction); | 235 | unsigned long addr; |
236 | |||
237 | addr = PAGE_OFFSET + plat_dma_addr_to_phys(dma_handle); | ||
238 | __dma_sync(addr, size, direction); | ||
239 | } | ||
207 | } | 240 | } |
208 | 241 | ||
209 | EXPORT_SYMBOL(dma_sync_single_for_cpu); | 242 | EXPORT_SYMBOL(dma_sync_single_for_cpu); |
@@ -211,12 +244,14 @@ EXPORT_SYMBOL(dma_sync_single_for_cpu); | |||
211 | void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, | 244 | void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, |
212 | size_t size, enum dma_data_direction direction) | 245 | size_t size, enum dma_data_direction direction) |
213 | { | 246 | { |
214 | unsigned long addr; | ||
215 | |||
216 | BUG_ON(direction == DMA_NONE); | 247 | BUG_ON(direction == DMA_NONE); |
217 | 248 | ||
218 | addr = dma_handle + PAGE_OFFSET; | 249 | if (cpu_is_noncoherent_r10000(dev)) { |
219 | __dma_sync(addr, size, direction); | 250 | unsigned long addr; |
251 | |||
252 | addr = plat_dma_addr_to_phys(dma_handle); | ||
253 | __dma_sync(addr, size, direction); | ||
254 | } | ||
220 | } | 255 | } |
221 | 256 | ||
222 | EXPORT_SYMBOL(dma_sync_single_for_device); | 257 | EXPORT_SYMBOL(dma_sync_single_for_device); |
@@ -224,12 +259,14 @@ EXPORT_SYMBOL(dma_sync_single_for_device); | |||
224 | void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, | 259 | void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, |
225 | unsigned long offset, size_t size, enum dma_data_direction direction) | 260 | unsigned long offset, size_t size, enum dma_data_direction direction) |
226 | { | 261 | { |
227 | unsigned long addr; | ||
228 | |||
229 | BUG_ON(direction == DMA_NONE); | 262 | BUG_ON(direction == DMA_NONE); |
230 | 263 | ||
231 | addr = dma_handle + offset + PAGE_OFFSET; | 264 | if (cpu_is_noncoherent_r10000(dev)) { |
232 | __dma_sync(addr, size, direction); | 265 | unsigned long addr; |
266 | |||
267 | addr = PAGE_OFFSET + plat_dma_addr_to_phys(dma_handle); | ||
268 | __dma_sync(addr + offset, size, direction); | ||
269 | } | ||
233 | } | 270 | } |
234 | 271 | ||
235 | EXPORT_SYMBOL(dma_sync_single_range_for_cpu); | 272 | EXPORT_SYMBOL(dma_sync_single_range_for_cpu); |
@@ -237,12 +274,14 @@ EXPORT_SYMBOL(dma_sync_single_range_for_cpu); | |||
237 | void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, | 274 | void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, |
238 | unsigned long offset, size_t size, enum dma_data_direction direction) | 275 | unsigned long offset, size_t size, enum dma_data_direction direction) |
239 | { | 276 | { |
240 | unsigned long addr; | ||
241 | |||
242 | BUG_ON(direction == DMA_NONE); | 277 | BUG_ON(direction == DMA_NONE); |
243 | 278 | ||
244 | addr = dma_handle + offset + PAGE_OFFSET; | 279 | if (cpu_is_noncoherent_r10000(dev)) { |
245 | __dma_sync(addr, size, direction); | 280 | unsigned long addr; |
281 | |||
282 | addr = PAGE_OFFSET + plat_dma_addr_to_phys(dma_handle); | ||
283 | __dma_sync(addr + offset, size, direction); | ||
284 | } | ||
246 | } | 285 | } |
247 | 286 | ||
248 | EXPORT_SYMBOL(dma_sync_single_range_for_device); | 287 | EXPORT_SYMBOL(dma_sync_single_range_for_device); |
@@ -255,9 +294,12 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, | |||
255 | BUG_ON(direction == DMA_NONE); | 294 | BUG_ON(direction == DMA_NONE); |
256 | 295 | ||
257 | /* Make sure that gcc doesn't leave the empty loop body. */ | 296 | /* Make sure that gcc doesn't leave the empty loop body. */ |
258 | for (i = 0; i < nelems; i++, sg++) | 297 | for (i = 0; i < nelems; i++, sg++) { |
259 | __dma_sync((unsigned long)page_address(sg->page), | 298 | if (!plat_device_is_coherent(dev)) |
260 | sg->length, direction); | 299 | __dma_sync((unsigned long)page_address(sg->page), |
300 | sg->length, direction); | ||
301 | plat_unmap_dma_mem(sg->dma_address); | ||
302 | } | ||
261 | } | 303 | } |
262 | 304 | ||
263 | EXPORT_SYMBOL(dma_sync_sg_for_cpu); | 305 | EXPORT_SYMBOL(dma_sync_sg_for_cpu); |
@@ -270,9 +312,12 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nele | |||
270 | BUG_ON(direction == DMA_NONE); | 312 | BUG_ON(direction == DMA_NONE); |
271 | 313 | ||
272 | /* Make sure that gcc doesn't leave the empty loop body. */ | 314 | /* Make sure that gcc doesn't leave the empty loop body. */ |
273 | for (i = 0; i < nelems; i++, sg++) | 315 | for (i = 0; i < nelems; i++, sg++) { |
274 | __dma_sync((unsigned long)page_address(sg->page), | 316 | if (!plat_device_is_coherent(dev)) |
275 | sg->length, direction); | 317 | __dma_sync((unsigned long)page_address(sg->page), |
318 | sg->length, direction); | ||
319 | plat_unmap_dma_mem(sg->dma_address); | ||
320 | } | ||
276 | } | 321 | } |
277 | 322 | ||
278 | EXPORT_SYMBOL(dma_sync_sg_for_device); | 323 | EXPORT_SYMBOL(dma_sync_sg_for_device); |
@@ -301,70 +346,18 @@ EXPORT_SYMBOL(dma_supported); | |||
301 | 346 | ||
302 | int dma_is_consistent(struct device *dev, dma_addr_t dma_addr) | 347 | int dma_is_consistent(struct device *dev, dma_addr_t dma_addr) |
303 | { | 348 | { |
304 | return 1; | 349 | return plat_device_is_coherent(dev); |
305 | } | 350 | } |
306 | 351 | ||
307 | EXPORT_SYMBOL(dma_is_consistent); | 352 | EXPORT_SYMBOL(dma_is_consistent); |
308 | 353 | ||
309 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | 354 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
310 | enum dma_data_direction direction) | 355 | enum dma_data_direction direction) |
311 | { | 356 | { |
312 | if (direction == DMA_NONE) | 357 | BUG_ON(direction == DMA_NONE); |
313 | return; | ||
314 | 358 | ||
315 | dma_cache_wback_inv((unsigned long)vaddr, size); | 359 | if (!plat_device_is_coherent(dev)) |
360 | dma_cache_wback_inv((unsigned long)vaddr, size); | ||
316 | } | 361 | } |
317 | 362 | ||
318 | EXPORT_SYMBOL(dma_cache_sync); | 363 | EXPORT_SYMBOL(dma_cache_sync); |
319 | |||
320 | /* The DAC routines are a PCIism.. */ | ||
321 | |||
322 | #ifdef CONFIG_PCI | ||
323 | |||
324 | #include <linux/pci.h> | ||
325 | |||
326 | dma64_addr_t pci_dac_page_to_dma(struct pci_dev *pdev, | ||
327 | struct page *page, unsigned long offset, int direction) | ||
328 | { | ||
329 | return (dma64_addr_t)page_to_phys(page) + offset; | ||
330 | } | ||
331 | |||
332 | EXPORT_SYMBOL(pci_dac_page_to_dma); | ||
333 | |||
334 | struct page *pci_dac_dma_to_page(struct pci_dev *pdev, | ||
335 | dma64_addr_t dma_addr) | ||
336 | { | ||
337 | return mem_map + (dma_addr >> PAGE_SHIFT); | ||
338 | } | ||
339 | |||
340 | EXPORT_SYMBOL(pci_dac_dma_to_page); | ||
341 | |||
342 | unsigned long pci_dac_dma_to_offset(struct pci_dev *pdev, | ||
343 | dma64_addr_t dma_addr) | ||
344 | { | ||
345 | return dma_addr & ~PAGE_MASK; | ||
346 | } | ||
347 | |||
348 | EXPORT_SYMBOL(pci_dac_dma_to_offset); | ||
349 | |||
350 | void pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, | ||
351 | dma64_addr_t dma_addr, size_t len, int direction) | ||
352 | { | ||
353 | BUG_ON(direction == PCI_DMA_NONE); | ||
354 | |||
355 | dma_cache_wback_inv(dma_addr + PAGE_OFFSET, len); | ||
356 | } | ||
357 | |||
358 | EXPORT_SYMBOL(pci_dac_dma_sync_single_for_cpu); | ||
359 | |||
360 | void pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, | ||
361 | dma64_addr_t dma_addr, size_t len, int direction) | ||
362 | { | ||
363 | BUG_ON(direction == PCI_DMA_NONE); | ||
364 | |||
365 | dma_cache_wback_inv(dma_addr + PAGE_OFFSET, len); | ||
366 | } | ||
367 | |||
368 | EXPORT_SYMBOL(pci_dac_dma_sync_single_for_device); | ||
369 | |||
370 | #endif /* CONFIG_PCI */ | ||
diff --git a/arch/mips/mm/dma-ip27.c b/arch/mips/mm/dma-ip27.c deleted file mode 100644 index f088344db465..000000000000 --- a/arch/mips/mm/dma-ip27.c +++ /dev/null | |||
@@ -1,257 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com> | ||
7 | * Copyright (C) 2000, 2001 Ralf Baechle <ralf@gnu.org> | ||
8 | * swiped from i386, and cloned for MIPS by Geert, polished by Ralf. | ||
9 | */ | ||
10 | #include <linux/types.h> | ||
11 | #include <linux/mm.h> | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/string.h> | ||
14 | #include <linux/pci.h> | ||
15 | |||
16 | #include <asm/cache.h> | ||
17 | #include <asm/pci/bridge.h> | ||
18 | |||
19 | #define pdev_to_baddr(pdev, addr) \ | ||
20 | (BRIDGE_CONTROLLER(pdev->bus)->baddr + (addr)) | ||
21 | #define dev_to_baddr(dev, addr) \ | ||
22 | pdev_to_baddr(to_pci_dev(dev), (addr)) | ||
23 | |||
24 | void *dma_alloc_noncoherent(struct device *dev, size_t size, | ||
25 | dma_addr_t * dma_handle, gfp_t gfp) | ||
26 | { | ||
27 | void *ret; | ||
28 | |||
29 | /* ignore region specifiers */ | ||
30 | gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); | ||
31 | |||
32 | if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff)) | ||
33 | gfp |= GFP_DMA; | ||
34 | ret = (void *) __get_free_pages(gfp, get_order(size)); | ||
35 | |||
36 | if (ret != NULL) { | ||
37 | memset(ret, 0, size); | ||
38 | *dma_handle = dev_to_baddr(dev, virt_to_phys(ret)); | ||
39 | } | ||
40 | |||
41 | return ret; | ||
42 | } | ||
43 | |||
44 | EXPORT_SYMBOL(dma_alloc_noncoherent); | ||
45 | |||
46 | void *dma_alloc_coherent(struct device *dev, size_t size, | ||
47 | dma_addr_t * dma_handle, gfp_t gfp) | ||
48 | __attribute__((alias("dma_alloc_noncoherent"))); | ||
49 | |||
50 | EXPORT_SYMBOL(dma_alloc_coherent); | ||
51 | |||
52 | void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, | ||
53 | dma_addr_t dma_handle) | ||
54 | { | ||
55 | unsigned long addr = (unsigned long) vaddr; | ||
56 | |||
57 | free_pages(addr, get_order(size)); | ||
58 | } | ||
59 | |||
60 | EXPORT_SYMBOL(dma_free_noncoherent); | ||
61 | |||
62 | void dma_free_coherent(struct device *dev, size_t size, void *vaddr, | ||
63 | dma_addr_t dma_handle) __attribute__((alias("dma_free_noncoherent"))); | ||
64 | |||
65 | EXPORT_SYMBOL(dma_free_coherent); | ||
66 | |||
67 | dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, | ||
68 | enum dma_data_direction direction) | ||
69 | { | ||
70 | BUG_ON(direction == DMA_NONE); | ||
71 | |||
72 | return dev_to_baddr(dev, __pa(ptr)); | ||
73 | } | ||
74 | |||
75 | EXPORT_SYMBOL(dma_map_single); | ||
76 | |||
77 | void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | ||
78 | enum dma_data_direction direction) | ||
79 | { | ||
80 | BUG_ON(direction == DMA_NONE); | ||
81 | } | ||
82 | |||
83 | EXPORT_SYMBOL(dma_unmap_single); | ||
84 | |||
85 | int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | ||
86 | enum dma_data_direction direction) | ||
87 | { | ||
88 | int i; | ||
89 | |||
90 | BUG_ON(direction == DMA_NONE); | ||
91 | |||
92 | for (i = 0; i < nents; i++, sg++) { | ||
93 | sg->dma_address = (dma_addr_t) dev_to_baddr(dev, | ||
94 | page_to_phys(sg->page) + sg->offset); | ||
95 | } | ||
96 | |||
97 | return nents; | ||
98 | } | ||
99 | |||
100 | EXPORT_SYMBOL(dma_map_sg); | ||
101 | |||
102 | dma_addr_t dma_map_page(struct device *dev, struct page *page, | ||
103 | unsigned long offset, size_t size, enum dma_data_direction direction) | ||
104 | { | ||
105 | BUG_ON(direction == DMA_NONE); | ||
106 | |||
107 | return dev_to_baddr(dev, page_to_phys(page) + offset); | ||
108 | } | ||
109 | |||
110 | EXPORT_SYMBOL(dma_map_page); | ||
111 | |||
112 | void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | ||
113 | enum dma_data_direction direction) | ||
114 | { | ||
115 | BUG_ON(direction == DMA_NONE); | ||
116 | } | ||
117 | |||
118 | EXPORT_SYMBOL(dma_unmap_page); | ||
119 | |||
120 | void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | ||
121 | enum dma_data_direction direction) | ||
122 | { | ||
123 | BUG_ON(direction == DMA_NONE); | ||
124 | } | ||
125 | |||
126 | EXPORT_SYMBOL(dma_unmap_sg); | ||
127 | |||
128 | void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, | ||
129 | enum dma_data_direction direction) | ||
130 | { | ||
131 | BUG_ON(direction == DMA_NONE); | ||
132 | } | ||
133 | |||
134 | EXPORT_SYMBOL(dma_sync_single_for_cpu); | ||
135 | |||
136 | void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, | ||
137 | enum dma_data_direction direction) | ||
138 | { | ||
139 | BUG_ON(direction == DMA_NONE); | ||
140 | } | ||
141 | |||
142 | EXPORT_SYMBOL(dma_sync_single_for_device); | ||
143 | |||
144 | void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, | ||
145 | unsigned long offset, size_t size, | ||
146 | enum dma_data_direction direction) | ||
147 | { | ||
148 | BUG_ON(direction == DMA_NONE); | ||
149 | } | ||
150 | |||
151 | EXPORT_SYMBOL(dma_sync_single_range_for_cpu); | ||
152 | |||
153 | void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, | ||
154 | unsigned long offset, size_t size, | ||
155 | enum dma_data_direction direction) | ||
156 | { | ||
157 | BUG_ON(direction == DMA_NONE); | ||
158 | } | ||
159 | |||
160 | EXPORT_SYMBOL(dma_sync_single_range_for_device); | ||
161 | |||
162 | void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, | ||
163 | enum dma_data_direction direction) | ||
164 | { | ||
165 | BUG_ON(direction == DMA_NONE); | ||
166 | } | ||
167 | |||
168 | EXPORT_SYMBOL(dma_sync_sg_for_cpu); | ||
169 | |||
170 | void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, | ||
171 | enum dma_data_direction direction) | ||
172 | { | ||
173 | BUG_ON(direction == DMA_NONE); | ||
174 | } | ||
175 | |||
176 | EXPORT_SYMBOL(dma_sync_sg_for_device); | ||
177 | |||
178 | int dma_mapping_error(dma_addr_t dma_addr) | ||
179 | { | ||
180 | return 0; | ||
181 | } | ||
182 | |||
183 | EXPORT_SYMBOL(dma_mapping_error); | ||
184 | |||
185 | int dma_supported(struct device *dev, u64 mask) | ||
186 | { | ||
187 | /* | ||
188 | * we fall back to GFP_DMA when the mask isn't all 1s, | ||
189 | * so we can't guarantee allocations that must be | ||
190 | * within a tighter range than GFP_DMA.. | ||
191 | */ | ||
192 | if (mask < 0x00ffffff) | ||
193 | return 0; | ||
194 | |||
195 | return 1; | ||
196 | } | ||
197 | |||
198 | EXPORT_SYMBOL(dma_supported); | ||
199 | |||
200 | int dma_is_consistent(struct device *dev, dma_addr_t dma_addr) | ||
201 | { | ||
202 | return 1; | ||
203 | } | ||
204 | |||
205 | EXPORT_SYMBOL(dma_is_consistent); | ||
206 | |||
207 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | ||
208 | enum dma_data_direction direction) | ||
209 | { | ||
210 | BUG_ON(direction == DMA_NONE); | ||
211 | } | ||
212 | |||
213 | EXPORT_SYMBOL(dma_cache_sync); | ||
214 | |||
215 | dma64_addr_t pci_dac_page_to_dma(struct pci_dev *pdev, | ||
216 | struct page *page, unsigned long offset, int direction) | ||
217 | { | ||
218 | dma64_addr_t addr = page_to_phys(page) + offset; | ||
219 | |||
220 | return (dma64_addr_t) pdev_to_baddr(pdev, addr); | ||
221 | } | ||
222 | |||
223 | EXPORT_SYMBOL(pci_dac_page_to_dma); | ||
224 | |||
225 | struct page *pci_dac_dma_to_page(struct pci_dev *pdev, | ||
226 | dma64_addr_t dma_addr) | ||
227 | { | ||
228 | struct bridge_controller *bc = BRIDGE_CONTROLLER(pdev->bus); | ||
229 | |||
230 | return pfn_to_page((dma_addr - bc->baddr) >> PAGE_SHIFT); | ||
231 | } | ||
232 | |||
233 | EXPORT_SYMBOL(pci_dac_dma_to_page); | ||
234 | |||
235 | unsigned long pci_dac_dma_to_offset(struct pci_dev *pdev, | ||
236 | dma64_addr_t dma_addr) | ||
237 | { | ||
238 | return dma_addr & ~PAGE_MASK; | ||
239 | } | ||
240 | |||
241 | EXPORT_SYMBOL(pci_dac_dma_to_offset); | ||
242 | |||
243 | void pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, | ||
244 | dma64_addr_t dma_addr, size_t len, int direction) | ||
245 | { | ||
246 | BUG_ON(direction == PCI_DMA_NONE); | ||
247 | } | ||
248 | |||
249 | EXPORT_SYMBOL(pci_dac_dma_sync_single_for_cpu); | ||
250 | |||
251 | void pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, | ||
252 | dma64_addr_t dma_addr, size_t len, int direction) | ||
253 | { | ||
254 | BUG_ON(direction == PCI_DMA_NONE); | ||
255 | } | ||
256 | |||
257 | EXPORT_SYMBOL(pci_dac_dma_sync_single_for_device); | ||
diff --git a/arch/mips/mm/dma-ip32.c b/arch/mips/mm/dma-ip32.c deleted file mode 100644 index b42b6f7456e6..000000000000 --- a/arch/mips/mm/dma-ip32.c +++ /dev/null | |||
@@ -1,383 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com> | ||
7 | * Copyright (C) 2000, 2001 Ralf Baechle <ralf@gnu.org> | ||
8 | * Copyright (C) 2005 Ilya A. Volynets-Evenbakh <ilya@total-knowledge.com> | ||
9 | * swiped from i386, and cloned for MIPS by Geert, polished by Ralf. | ||
10 | * IP32 changes by Ilya. | ||
11 | */ | ||
12 | #include <linux/types.h> | ||
13 | #include <linux/mm.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/string.h> | ||
16 | #include <linux/dma-mapping.h> | ||
17 | |||
18 | #include <asm/cache.h> | ||
19 | #include <asm/io.h> | ||
20 | #include <asm/ip32/crime.h> | ||
21 | |||
22 | /* | ||
23 | * Warning on the terminology - Linux calls an uncached area coherent; | ||
24 | * MIPS terminology calls memory areas with hardware maintained coherency | ||
25 | * coherent. | ||
26 | */ | ||
27 | |||
28 | /* | ||
29 | * Few notes. | ||
30 | * 1. CPU sees memory as two chunks: 0-256M@0x0, and the rest @0x40000000+256M | ||
31 | * 2. PCI sees memory as one big chunk @0x0 (or we could use 0x40000000 for native-endian) | ||
32 | * 3. All other devices see memory as one big chunk at 0x40000000 | ||
33 | * 4. Non-PCI devices will pass NULL as struct device* | ||
34 | * Thus we translate differently, depending on device. | ||
35 | */ | ||
36 | |||
37 | #define RAM_OFFSET_MASK 0x3fffffff | ||
38 | |||
39 | void *dma_alloc_noncoherent(struct device *dev, size_t size, | ||
40 | dma_addr_t * dma_handle, gfp_t gfp) | ||
41 | { | ||
42 | void *ret; | ||
43 | /* ignore region specifiers */ | ||
44 | gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); | ||
45 | |||
46 | if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff)) | ||
47 | gfp |= GFP_DMA; | ||
48 | ret = (void *) __get_free_pages(gfp, get_order(size)); | ||
49 | |||
50 | if (ret != NULL) { | ||
51 | unsigned long addr = virt_to_phys(ret)&RAM_OFFSET_MASK; | ||
52 | memset(ret, 0, size); | ||
53 | if(dev==NULL) | ||
54 | addr+= CRIME_HI_MEM_BASE; | ||
55 | *dma_handle = addr; | ||
56 | } | ||
57 | |||
58 | return ret; | ||
59 | } | ||
60 | |||
61 | EXPORT_SYMBOL(dma_alloc_noncoherent); | ||
62 | |||
63 | void *dma_alloc_coherent(struct device *dev, size_t size, | ||
64 | dma_addr_t * dma_handle, gfp_t gfp) | ||
65 | { | ||
66 | void *ret; | ||
67 | |||
68 | ret = dma_alloc_noncoherent(dev, size, dma_handle, gfp); | ||
69 | if (ret) { | ||
70 | dma_cache_wback_inv((unsigned long) ret, size); | ||
71 | ret = UNCAC_ADDR(ret); | ||
72 | } | ||
73 | |||
74 | return ret; | ||
75 | } | ||
76 | |||
77 | EXPORT_SYMBOL(dma_alloc_coherent); | ||
78 | |||
79 | void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, | ||
80 | dma_addr_t dma_handle) | ||
81 | { | ||
82 | free_pages((unsigned long) vaddr, get_order(size)); | ||
83 | } | ||
84 | |||
85 | EXPORT_SYMBOL(dma_free_noncoherent); | ||
86 | |||
87 | void dma_free_coherent(struct device *dev, size_t size, void *vaddr, | ||
88 | dma_addr_t dma_handle) | ||
89 | { | ||
90 | unsigned long addr = (unsigned long) vaddr; | ||
91 | |||
92 | addr = CAC_ADDR(addr); | ||
93 | free_pages(addr, get_order(size)); | ||
94 | } | ||
95 | |||
96 | EXPORT_SYMBOL(dma_free_coherent); | ||
97 | |||
98 | static inline void __dma_sync(unsigned long addr, size_t size, | ||
99 | enum dma_data_direction direction) | ||
100 | { | ||
101 | switch (direction) { | ||
102 | case DMA_TO_DEVICE: | ||
103 | dma_cache_wback(addr, size); | ||
104 | break; | ||
105 | |||
106 | case DMA_FROM_DEVICE: | ||
107 | dma_cache_inv(addr, size); | ||
108 | break; | ||
109 | |||
110 | case DMA_BIDIRECTIONAL: | ||
111 | dma_cache_wback_inv(addr, size); | ||
112 | break; | ||
113 | |||
114 | default: | ||
115 | BUG(); | ||
116 | } | ||
117 | } | ||
118 | |||
119 | dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, | ||
120 | enum dma_data_direction direction) | ||
121 | { | ||
122 | unsigned long addr = (unsigned long) ptr; | ||
123 | |||
124 | switch (direction) { | ||
125 | case DMA_TO_DEVICE: | ||
126 | dma_cache_wback(addr, size); | ||
127 | break; | ||
128 | |||
129 | case DMA_FROM_DEVICE: | ||
130 | dma_cache_inv(addr, size); | ||
131 | break; | ||
132 | |||
133 | case DMA_BIDIRECTIONAL: | ||
134 | dma_cache_wback_inv(addr, size); | ||
135 | break; | ||
136 | |||
137 | default: | ||
138 | BUG(); | ||
139 | } | ||
140 | |||
141 | addr = virt_to_phys(ptr)&RAM_OFFSET_MASK; | ||
142 | if(dev == NULL) | ||
143 | addr+=CRIME_HI_MEM_BASE; | ||
144 | return (dma_addr_t)addr; | ||
145 | } | ||
146 | |||
147 | EXPORT_SYMBOL(dma_map_single); | ||
148 | |||
149 | void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | ||
150 | enum dma_data_direction direction) | ||
151 | { | ||
152 | switch (direction) { | ||
153 | case DMA_TO_DEVICE: | ||
154 | break; | ||
155 | |||
156 | case DMA_FROM_DEVICE: | ||
157 | break; | ||
158 | |||
159 | case DMA_BIDIRECTIONAL: | ||
160 | break; | ||
161 | |||
162 | default: | ||
163 | BUG(); | ||
164 | } | ||
165 | } | ||
166 | |||
167 | EXPORT_SYMBOL(dma_unmap_single); | ||
168 | |||
169 | int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | ||
170 | enum dma_data_direction direction) | ||
171 | { | ||
172 | int i; | ||
173 | |||
174 | BUG_ON(direction == DMA_NONE); | ||
175 | |||
176 | for (i = 0; i < nents; i++, sg++) { | ||
177 | unsigned long addr; | ||
178 | |||
179 | addr = (unsigned long) page_address(sg->page)+sg->offset; | ||
180 | if (addr) | ||
181 | __dma_sync(addr, sg->length, direction); | ||
182 | addr = __pa(addr)&RAM_OFFSET_MASK; | ||
183 | if(dev == NULL) | ||
184 | addr += CRIME_HI_MEM_BASE; | ||
185 | sg->dma_address = (dma_addr_t)addr; | ||
186 | } | ||
187 | |||
188 | return nents; | ||
189 | } | ||
190 | |||
191 | EXPORT_SYMBOL(dma_map_sg); | ||
192 | |||
193 | dma_addr_t dma_map_page(struct device *dev, struct page *page, | ||
194 | unsigned long offset, size_t size, enum dma_data_direction direction) | ||
195 | { | ||
196 | unsigned long addr; | ||
197 | |||
198 | BUG_ON(direction == DMA_NONE); | ||
199 | |||
200 | addr = (unsigned long) page_address(page) + offset; | ||
201 | dma_cache_wback_inv(addr, size); | ||
202 | addr = __pa(addr)&RAM_OFFSET_MASK; | ||
203 | if(dev == NULL) | ||
204 | addr += CRIME_HI_MEM_BASE; | ||
205 | |||
206 | return (dma_addr_t)addr; | ||
207 | } | ||
208 | |||
209 | EXPORT_SYMBOL(dma_map_page); | ||
210 | |||
211 | void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | ||
212 | enum dma_data_direction direction) | ||
213 | { | ||
214 | BUG_ON(direction == DMA_NONE); | ||
215 | |||
216 | if (direction != DMA_TO_DEVICE) { | ||
217 | unsigned long addr; | ||
218 | |||
219 | dma_address&=RAM_OFFSET_MASK; | ||
220 | addr = dma_address + PAGE_OFFSET; | ||
221 | if(dma_address>=256*1024*1024) | ||
222 | addr+=CRIME_HI_MEM_BASE; | ||
223 | dma_cache_wback_inv(addr, size); | ||
224 | } | ||
225 | } | ||
226 | |||
227 | EXPORT_SYMBOL(dma_unmap_page); | ||
228 | |||
229 | void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | ||
230 | enum dma_data_direction direction) | ||
231 | { | ||
232 | unsigned long addr; | ||
233 | int i; | ||
234 | |||
235 | BUG_ON(direction == DMA_NONE); | ||
236 | |||
237 | if (direction == DMA_TO_DEVICE) | ||
238 | return; | ||
239 | |||
240 | for (i = 0; i < nhwentries; i++, sg++) { | ||
241 | addr = (unsigned long) page_address(sg->page); | ||
242 | if (!addr) | ||
243 | continue; | ||
244 | dma_cache_wback_inv(addr + sg->offset, sg->length); | ||
245 | } | ||
246 | } | ||
247 | |||
248 | EXPORT_SYMBOL(dma_unmap_sg); | ||
249 | |||
250 | void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, | ||
251 | size_t size, enum dma_data_direction direction) | ||
252 | { | ||
253 | unsigned long addr; | ||
254 | |||
255 | BUG_ON(direction == DMA_NONE); | ||
256 | |||
257 | dma_handle&=RAM_OFFSET_MASK; | ||
258 | addr = dma_handle + PAGE_OFFSET; | ||
259 | if(dma_handle>=256*1024*1024) | ||
260 | addr+=CRIME_HI_MEM_BASE; | ||
261 | __dma_sync(addr, size, direction); | ||
262 | } | ||
263 | |||
264 | EXPORT_SYMBOL(dma_sync_single_for_cpu); | ||
265 | |||
266 | void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, | ||
267 | size_t size, enum dma_data_direction direction) | ||
268 | { | ||
269 | unsigned long addr; | ||
270 | |||
271 | BUG_ON(direction == DMA_NONE); | ||
272 | |||
273 | dma_handle&=RAM_OFFSET_MASK; | ||
274 | addr = dma_handle + PAGE_OFFSET; | ||
275 | if(dma_handle>=256*1024*1024) | ||
276 | addr+=CRIME_HI_MEM_BASE; | ||
277 | __dma_sync(addr, size, direction); | ||
278 | } | ||
279 | |||
280 | EXPORT_SYMBOL(dma_sync_single_for_device); | ||
281 | |||
282 | void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, | ||
283 | unsigned long offset, size_t size, enum dma_data_direction direction) | ||
284 | { | ||
285 | unsigned long addr; | ||
286 | |||
287 | BUG_ON(direction == DMA_NONE); | ||
288 | |||
289 | dma_handle&=RAM_OFFSET_MASK; | ||
290 | addr = dma_handle + offset + PAGE_OFFSET; | ||
291 | if(dma_handle>=256*1024*1024) | ||
292 | addr+=CRIME_HI_MEM_BASE; | ||
293 | __dma_sync(addr, size, direction); | ||
294 | } | ||
295 | |||
296 | EXPORT_SYMBOL(dma_sync_single_range_for_cpu); | ||
297 | |||
298 | void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, | ||
299 | unsigned long offset, size_t size, enum dma_data_direction direction) | ||
300 | { | ||
301 | unsigned long addr; | ||
302 | |||
303 | BUG_ON(direction == DMA_NONE); | ||
304 | |||
305 | dma_handle&=RAM_OFFSET_MASK; | ||
306 | addr = dma_handle + offset + PAGE_OFFSET; | ||
307 | if(dma_handle>=256*1024*1024) | ||
308 | addr+=CRIME_HI_MEM_BASE; | ||
309 | __dma_sync(addr, size, direction); | ||
310 | } | ||
311 | |||
312 | EXPORT_SYMBOL(dma_sync_single_range_for_device); | ||
313 | |||
314 | void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, | ||
315 | enum dma_data_direction direction) | ||
316 | { | ||
317 | int i; | ||
318 | |||
319 | BUG_ON(direction == DMA_NONE); | ||
320 | |||
321 | /* Make sure that gcc doesn't leave the empty loop body. */ | ||
322 | for (i = 0; i < nelems; i++, sg++) | ||
323 | __dma_sync((unsigned long)page_address(sg->page), | ||
324 | sg->length, direction); | ||
325 | } | ||
326 | |||
327 | EXPORT_SYMBOL(dma_sync_sg_for_cpu); | ||
328 | |||
329 | void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, | ||
330 | enum dma_data_direction direction) | ||
331 | { | ||
332 | int i; | ||
333 | |||
334 | BUG_ON(direction == DMA_NONE); | ||
335 | |||
336 | /* Make sure that gcc doesn't leave the empty loop body. */ | ||
337 | for (i = 0; i < nelems; i++, sg++) | ||
338 | __dma_sync((unsigned long)page_address(sg->page), | ||
339 | sg->length, direction); | ||
340 | } | ||
341 | |||
342 | EXPORT_SYMBOL(dma_sync_sg_for_device); | ||
343 | |||
344 | int dma_mapping_error(dma_addr_t dma_addr) | ||
345 | { | ||
346 | return 0; | ||
347 | } | ||
348 | |||
349 | EXPORT_SYMBOL(dma_mapping_error); | ||
350 | |||
351 | int dma_supported(struct device *dev, u64 mask) | ||
352 | { | ||
353 | /* | ||
354 | * we fall back to GFP_DMA when the mask isn't all 1s, | ||
355 | * so we can't guarantee allocations that must be | ||
356 | * within a tighter range than GFP_DMA.. | ||
357 | */ | ||
358 | if (mask < 0x00ffffff) | ||
359 | return 0; | ||
360 | |||
361 | return 1; | ||
362 | } | ||
363 | |||
364 | EXPORT_SYMBOL(dma_supported); | ||
365 | |||
366 | int dma_is_consistent(struct device *dev, dma_addr_t dma_addr) | ||
367 | { | ||
368 | return 1; | ||
369 | } | ||
370 | |||
371 | EXPORT_SYMBOL(dma_is_consistent); | ||
372 | |||
373 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | ||
374 | enum dma_data_direction direction) | ||
375 | { | ||
376 | if (direction == DMA_NONE) | ||
377 | return; | ||
378 | |||
379 | dma_cache_wback_inv((unsigned long)vaddr, size); | ||
380 | } | ||
381 | |||
382 | EXPORT_SYMBOL(dma_cache_sync); | ||
383 | |||
diff --git a/arch/mips/pci/Makefile b/arch/mips/pci/Makefile index 82b20c28bef8..bf85995ca042 100644 --- a/arch/mips/pci/Makefile +++ b/arch/mips/pci/Makefile | |||
@@ -2,7 +2,7 @@ | |||
2 | # Makefile for the PCI specific kernel interface routines under Linux. | 2 | # Makefile for the PCI specific kernel interface routines under Linux. |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y += pci.o | 5 | obj-y += pci.o pci-dac.o |
6 | 6 | ||
7 | # | 7 | # |
8 | # PCI bus host bridge specific code | 8 | # PCI bus host bridge specific code |
diff --git a/arch/mips/pci/pci-dac.c b/arch/mips/pci/pci-dac.c new file mode 100644 index 000000000000..0f0ea1b7d4dd --- /dev/null +++ b/arch/mips/pci/pci-dac.c | |||
@@ -0,0 +1,79 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com> | ||
7 | * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org> | ||
8 | * swiped from i386, and cloned for MIPS by Geert, polished by Ralf. | ||
9 | */ | ||
10 | |||
11 | #include <linux/types.h> | ||
12 | #include <linux/dma-mapping.h> | ||
13 | #include <linux/mm.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/string.h> | ||
16 | |||
17 | #include <asm/cache.h> | ||
18 | #include <asm/io.h> | ||
19 | |||
20 | #include <dma-coherence.h> | ||
21 | |||
22 | #include <linux/pci.h> | ||
23 | |||
24 | dma64_addr_t pci_dac_page_to_dma(struct pci_dev *pdev, | ||
25 | struct page *page, unsigned long offset, int direction) | ||
26 | { | ||
27 | struct device *dev = &pdev->dev; | ||
28 | |||
29 | BUG_ON(direction == DMA_NONE); | ||
30 | |||
31 | if (!plat_device_is_coherent(dev)) { | ||
32 | unsigned long addr; | ||
33 | |||
34 | addr = (unsigned long) page_address(page) + offset; | ||
35 | dma_cache_wback_inv(addr, PAGE_SIZE); | ||
36 | } | ||
37 | |||
38 | return plat_map_dma_mem_page(dev, page) + offset; | ||
39 | } | ||
40 | |||
41 | EXPORT_SYMBOL(pci_dac_page_to_dma); | ||
42 | |||
43 | struct page *pci_dac_dma_to_page(struct pci_dev *pdev, | ||
44 | dma64_addr_t dma_addr) | ||
45 | { | ||
46 | return pfn_to_page(plat_dma_addr_to_phys(dma_addr) >> PAGE_SHIFT); | ||
47 | } | ||
48 | |||
49 | EXPORT_SYMBOL(pci_dac_dma_to_page); | ||
50 | |||
51 | unsigned long pci_dac_dma_to_offset(struct pci_dev *pdev, | ||
52 | dma64_addr_t dma_addr) | ||
53 | { | ||
54 | return dma_addr & ~PAGE_MASK; | ||
55 | } | ||
56 | |||
57 | EXPORT_SYMBOL(pci_dac_dma_to_offset); | ||
58 | |||
59 | void pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, | ||
60 | dma64_addr_t dma_addr, size_t len, int direction) | ||
61 | { | ||
62 | BUG_ON(direction == PCI_DMA_NONE); | ||
63 | |||
64 | if (!plat_device_is_coherent(&pdev->dev)) | ||
65 | dma_cache_wback_inv(dma_addr + PAGE_OFFSET, len); | ||
66 | } | ||
67 | |||
68 | EXPORT_SYMBOL(pci_dac_dma_sync_single_for_cpu); | ||
69 | |||
70 | void pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, | ||
71 | dma64_addr_t dma_addr, size_t len, int direction) | ||
72 | { | ||
73 | BUG_ON(direction == PCI_DMA_NONE); | ||
74 | |||
75 | if (!plat_device_is_coherent(&pdev->dev)) | ||
76 | dma_cache_wback_inv(dma_addr + PAGE_OFFSET, len); | ||
77 | } | ||
78 | |||
79 | EXPORT_SYMBOL(pci_dac_dma_sync_single_for_device); | ||