aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile/kernel/pci-dma.c
diff options
context:
space:
mode:
authorChris Metcalf <cmetcalf@tilera.com>2010-05-28 23:09:12 -0400
committerChris Metcalf <cmetcalf@tilera.com>2010-06-04 17:11:18 -0400
commit867e359b97c970a60626d5d76bbe2a8fadbf38fb (patch)
treec5ccbb7f5172e8555977119608ecb1eee3cc37e3 /arch/tile/kernel/pci-dma.c
parent5360bd776f73d0a7da571d72a09a03f237e99900 (diff)
arch/tile: core support for Tilera 32-bit chips.
This change is the core kernel support for TILEPro and TILE64 chips. No driver support (except the console driver) is included yet. This includes the relevant Linux headers in asm/; the low-level low-level "Tile architecture" headers in arch/, which are shared with the hypervisor, etc., and are build-system agnostic; and the relevant hypervisor headers in hv/. Signed-off-by: Chris Metcalf <cmetcalf@tilera.com> Acked-by: Arnd Bergmann <arnd@arndb.de> Acked-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Reviewed-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/tile/kernel/pci-dma.c')
-rw-r--r--arch/tile/kernel/pci-dma.c252
1 files changed, 252 insertions, 0 deletions
diff --git a/arch/tile/kernel/pci-dma.c b/arch/tile/kernel/pci-dma.c
new file mode 100644
index 00000000000..1d456404f06
--- /dev/null
+++ b/arch/tile/kernel/pci-dma.c
@@ -0,0 +1,252 @@
1/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/mm.h>
16#include <linux/dma-mapping.h>
17#include <linux/vmalloc.h>
18#include <asm/tlbflush.h>
19#include <asm/homecache.h>
20
21/* Generic DMA mapping functions: */
22
23/*
24 * Allocate what Linux calls "coherent" memory, which for us just
25 * means uncached.
26 */
27void *dma_alloc_coherent(struct device *dev,
28 size_t size,
29 dma_addr_t *dma_handle,
30 gfp_t gfp)
31{
32 u64 dma_mask = dev->coherent_dma_mask ?: DMA_BIT_MASK(32);
33 int node = dev_to_node(dev);
34 int order = get_order(size);
35 struct page *pg;
36 dma_addr_t addr;
37
38 /* Set GFP_KERNEL to ensure we have memory with a kernel VA. */
39 gfp |= GFP_KERNEL | __GFP_ZERO;
40
41 /*
42 * By forcing NUMA node 0 for 32-bit masks we ensure that the
43 * high 32 bits of the resulting PA will be zero. If the mask
44 * size is, e.g., 24, we may still not be able to guarantee a
45 * suitable memory address, in which case we will return NULL.
46 * But such devices are uncommon.
47 */
48 if (dma_mask <= DMA_BIT_MASK(32))
49 node = 0;
50
51 pg = homecache_alloc_pages_node(node, gfp, order, PAGE_HOME_UNCACHED);
52 if (pg == NULL)
53 return NULL;
54
55 addr = page_to_phys(pg);
56 if (addr + size > dma_mask) {
57 homecache_free_pages(addr, order);
58 return NULL;
59 }
60
61 *dma_handle = addr;
62 return page_address(pg);
63}
64EXPORT_SYMBOL(dma_alloc_coherent);
65
66/*
67 * Free memory that was allocated with dma_alloc_coherent.
68 */
69void dma_free_coherent(struct device *dev, size_t size,
70 void *vaddr, dma_addr_t dma_handle)
71{
72 homecache_free_pages((unsigned long)vaddr, get_order(size));
73}
74EXPORT_SYMBOL(dma_free_coherent);
75
76/*
77 * The map routines "map" the specified address range for DMA
78 * accesses. The memory belongs to the device after this call is
79 * issued, until it is unmapped with dma_unmap_single.
80 *
81 * We don't need to do any mapping, we just flush the address range
82 * out of the cache and return a DMA address.
83 *
84 * The unmap routines do whatever is necessary before the processor
85 * accesses the memory again, and must be called before the driver
86 * touches the memory. We can get away with a cache invalidate if we
87 * can count on nothing having been touched.
88 */
89
90
91/*
92 * dma_map_single can be passed any memory address, and there appear
93 * to be no alignment constraints.
94 *
95 * There is a chance that the start of the buffer will share a cache
96 * line with some other data that has been touched in the meantime.
97 */
98dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
99 enum dma_data_direction direction)
100{
101 struct page *page;
102 dma_addr_t dma_addr;
103 int thispage;
104
105 BUG_ON(!valid_dma_direction(direction));
106 WARN_ON(size == 0);
107
108 dma_addr = __pa(ptr);
109
110 /* We might have been handed a buffer that wraps a page boundary */
111 while ((int)size > 0) {
112 /* The amount to flush that's on this page */
113 thispage = PAGE_SIZE - ((unsigned long)ptr & (PAGE_SIZE - 1));
114 thispage = min((int)thispage, (int)size);
115 /* Is this valid for any page we could be handed? */
116 page = pfn_to_page(kaddr_to_pfn(ptr));
117 homecache_flush_cache(page, 0);
118 ptr += thispage;
119 size -= thispage;
120 }
121
122 return dma_addr;
123}
124EXPORT_SYMBOL(dma_map_single);
125
126void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
127 enum dma_data_direction direction)
128{
129 BUG_ON(!valid_dma_direction(direction));
130}
131EXPORT_SYMBOL(dma_unmap_single);
132
133int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
134 enum dma_data_direction direction)
135{
136 struct scatterlist *sg;
137 int i;
138
139 BUG_ON(!valid_dma_direction(direction));
140
141 WARN_ON(nents == 0 || sglist->length == 0);
142
143 for_each_sg(sglist, sg, nents, i) {
144 struct page *page;
145 sg->dma_address = sg_phys(sg);
146 page = pfn_to_page(sg->dma_address >> PAGE_SHIFT);
147 homecache_flush_cache(page, 0);
148 }
149
150 return nents;
151}
152EXPORT_SYMBOL(dma_map_sg);
153
154void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
155 enum dma_data_direction direction)
156{
157 BUG_ON(!valid_dma_direction(direction));
158}
159EXPORT_SYMBOL(dma_unmap_sg);
160
161dma_addr_t dma_map_page(struct device *dev, struct page *page,
162 unsigned long offset, size_t size,
163 enum dma_data_direction direction)
164{
165 BUG_ON(!valid_dma_direction(direction));
166
167 homecache_flush_cache(page, 0);
168
169 return page_to_pa(page) + offset;
170}
171EXPORT_SYMBOL(dma_map_page);
172
173void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
174 enum dma_data_direction direction)
175{
176 BUG_ON(!valid_dma_direction(direction));
177}
178EXPORT_SYMBOL(dma_unmap_page);
179
180void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
181 size_t size, enum dma_data_direction direction)
182{
183 BUG_ON(!valid_dma_direction(direction));
184}
185EXPORT_SYMBOL(dma_sync_single_for_cpu);
186
187void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
188 size_t size, enum dma_data_direction direction)
189{
190 unsigned long start = PFN_DOWN(dma_handle);
191 unsigned long end = PFN_DOWN(dma_handle + size - 1);
192 unsigned long i;
193
194 BUG_ON(!valid_dma_direction(direction));
195 for (i = start; i <= end; ++i)
196 homecache_flush_cache(pfn_to_page(i), 0);
197}
198EXPORT_SYMBOL(dma_sync_single_for_device);
199
200void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
201 enum dma_data_direction direction)
202{
203 BUG_ON(!valid_dma_direction(direction));
204 WARN_ON(nelems == 0 || sg[0].length == 0);
205}
206EXPORT_SYMBOL(dma_sync_sg_for_cpu);
207
208/*
209 * Flush and invalidate cache for scatterlist.
210 */
211void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
212 int nelems, enum dma_data_direction direction)
213{
214 struct scatterlist *sg;
215 int i;
216
217 BUG_ON(!valid_dma_direction(direction));
218 WARN_ON(nelems == 0 || sglist->length == 0);
219
220 for_each_sg(sglist, sg, nelems, i) {
221 dma_sync_single_for_device(dev, sg->dma_address,
222 sg_dma_len(sg), direction);
223 }
224}
225EXPORT_SYMBOL(dma_sync_sg_for_device);
226
227void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
228 unsigned long offset, size_t size,
229 enum dma_data_direction direction)
230{
231 dma_sync_single_for_cpu(dev, dma_handle + offset, size, direction);
232}
233EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
234
235void dma_sync_single_range_for_device(struct device *dev,
236 dma_addr_t dma_handle,
237 unsigned long offset, size_t size,
238 enum dma_data_direction direction)
239{
240 dma_sync_single_for_device(dev, dma_handle + offset, size, direction);
241}
242EXPORT_SYMBOL(dma_sync_single_range_for_device);
243
244/*
245 * dma_alloc_noncoherent() returns non-cacheable memory, so there's no
246 * need to do any flushing here.
247 */
248void dma_cache_sync(void *vaddr, size_t size,
249 enum dma_data_direction direction)
250{
251}
252EXPORT_SYMBOL(dma_cache_sync);