aboutsummaryrefslogtreecommitdiffstats
path: root/arch/csky/mm/dma-mapping.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/csky/mm/dma-mapping.c')
-rw-r--r--arch/csky/mm/dma-mapping.c254
1 files changed, 254 insertions, 0 deletions
diff --git a/arch/csky/mm/dma-mapping.c b/arch/csky/mm/dma-mapping.c
new file mode 100644
index 000000000000..85437b21e045
--- /dev/null
+++ b/arch/csky/mm/dma-mapping.c
@@ -0,0 +1,254 @@
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3
4#include <linux/cache.h>
5#include <linux/dma-mapping.h>
6#include <linux/dma-contiguous.h>
7#include <linux/dma-noncoherent.h>
8#include <linux/genalloc.h>
9#include <linux/highmem.h>
10#include <linux/io.h>
11#include <linux/mm.h>
12#include <linux/scatterlist.h>
13#include <linux/types.h>
14#include <linux/version.h>
15#include <asm/cache.h>
16
17static struct gen_pool *atomic_pool;
18static size_t atomic_pool_size __initdata = SZ_256K;
19
20static int __init early_coherent_pool(char *p)
21{
22 atomic_pool_size = memparse(p, &p);
23 return 0;
24}
25early_param("coherent_pool", early_coherent_pool);
26
27static int __init atomic_pool_init(void)
28{
29 struct page *page;
30 size_t size = atomic_pool_size;
31 void *ptr;
32 int ret;
33
34 atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
35 if (!atomic_pool)
36 BUG();
37
38 page = alloc_pages(GFP_KERNEL | GFP_DMA, get_order(size));
39 if (!page)
40 BUG();
41
42 ptr = dma_common_contiguous_remap(page, size, VM_ALLOC,
43 pgprot_noncached(PAGE_KERNEL),
44 __builtin_return_address(0));
45 if (!ptr)
46 BUG();
47
48 ret = gen_pool_add_virt(atomic_pool, (unsigned long)ptr,
49 page_to_phys(page), atomic_pool_size, -1);
50 if (ret)
51 BUG();
52
53 gen_pool_set_algo(atomic_pool, gen_pool_first_fit_order_align, NULL);
54
55 pr_info("DMA: preallocated %zu KiB pool for atomic coherent pool\n",
56 atomic_pool_size / 1024);
57
58 pr_info("DMA: vaddr: 0x%x phy: 0x%lx,\n", (unsigned int)ptr,
59 page_to_phys(page));
60
61 return 0;
62}
63postcore_initcall(atomic_pool_init);
64
65static void *csky_dma_alloc_atomic(struct device *dev, size_t size,
66 dma_addr_t *dma_handle)
67{
68 unsigned long addr;
69
70 addr = gen_pool_alloc(atomic_pool, size);
71 if (addr)
72 *dma_handle = gen_pool_virt_to_phys(atomic_pool, addr);
73
74 return (void *)addr;
75}
76
77static void csky_dma_free_atomic(struct device *dev, size_t size, void *vaddr,
78 dma_addr_t dma_handle, unsigned long attrs)
79{
80 gen_pool_free(atomic_pool, (unsigned long)vaddr, size);
81}
82
83static void __dma_clear_buffer(struct page *page, size_t size)
84{
85 if (PageHighMem(page)) {
86 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
87
88 do {
89 void *ptr = kmap_atomic(page);
90 size_t _size = (size < PAGE_SIZE) ? size : PAGE_SIZE;
91
92 memset(ptr, 0, _size);
93 dma_wbinv_range((unsigned long)ptr,
94 (unsigned long)ptr + _size);
95
96 kunmap_atomic(ptr);
97
98 page++;
99 size -= PAGE_SIZE;
100 count--;
101 } while (count);
102 } else {
103 void *ptr = page_address(page);
104
105 memset(ptr, 0, size);
106 dma_wbinv_range((unsigned long)ptr, (unsigned long)ptr + size);
107 }
108}
109
110static void *csky_dma_alloc_nonatomic(struct device *dev, size_t size,
111 dma_addr_t *dma_handle, gfp_t gfp,
112 unsigned long attrs)
113{
114 void *vaddr;
115 struct page *page;
116 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
117
118 if (DMA_ATTR_NON_CONSISTENT & attrs) {
119 pr_err("csky %s can't support DMA_ATTR_NON_CONSISTENT.\n", __func__);
120 return NULL;
121 }
122
123 if (IS_ENABLED(CONFIG_DMA_CMA))
124 page = dma_alloc_from_contiguous(dev, count, get_order(size),
125 gfp);
126 else
127 page = alloc_pages(gfp, get_order(size));
128
129 if (!page) {
130 pr_err("csky %s no more free pages.\n", __func__);
131 return NULL;
132 }
133
134 *dma_handle = page_to_phys(page);
135
136 __dma_clear_buffer(page, size);
137
138 if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
139 return page;
140
141 vaddr = dma_common_contiguous_remap(page, PAGE_ALIGN(size), VM_USERMAP,
142 pgprot_noncached(PAGE_KERNEL), __builtin_return_address(0));
143 if (!vaddr)
144 BUG();
145
146 return vaddr;
147}
148
149static void csky_dma_free_nonatomic(
150 struct device *dev,
151 size_t size,
152 void *vaddr,
153 dma_addr_t dma_handle,
154 unsigned long attrs
155 )
156{
157 struct page *page = phys_to_page(dma_handle);
158 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
159
160 if ((unsigned int)vaddr >= VMALLOC_START)
161 dma_common_free_remap(vaddr, size, VM_USERMAP);
162
163 if (IS_ENABLED(CONFIG_DMA_CMA))
164 dma_release_from_contiguous(dev, page, count);
165 else
166 __free_pages(page, get_order(size));
167}
168
169void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
170 gfp_t gfp, unsigned long attrs)
171{
172 if (gfpflags_allow_blocking(gfp))
173 return csky_dma_alloc_nonatomic(dev, size, dma_handle, gfp,
174 attrs);
175 else
176 return csky_dma_alloc_atomic(dev, size, dma_handle);
177}
178
179void arch_dma_free(struct device *dev, size_t size, void *vaddr,
180 dma_addr_t dma_handle, unsigned long attrs)
181{
182 if (!addr_in_gen_pool(atomic_pool, (unsigned int) vaddr, size))
183 csky_dma_free_nonatomic(dev, size, vaddr, dma_handle, attrs);
184 else
185 csky_dma_free_atomic(dev, size, vaddr, dma_handle, attrs);
186}
187
188static inline void cache_op(phys_addr_t paddr, size_t size,
189 void (*fn)(unsigned long start, unsigned long end))
190{
191 struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
192 unsigned int offset = paddr & ~PAGE_MASK;
193 size_t left = size;
194 unsigned long start;
195
196 do {
197 size_t len = left;
198
199 if (PageHighMem(page)) {
200 void *addr;
201
202 if (offset + len > PAGE_SIZE) {
203 if (offset >= PAGE_SIZE) {
204 page += offset >> PAGE_SHIFT;
205 offset &= ~PAGE_MASK;
206 }
207 len = PAGE_SIZE - offset;
208 }
209
210 addr = kmap_atomic(page);
211 start = (unsigned long)(addr + offset);
212 fn(start, start + len);
213 kunmap_atomic(addr);
214 } else {
215 start = (unsigned long)phys_to_virt(paddr);
216 fn(start, start + size);
217 }
218 offset = 0;
219 page++;
220 left -= len;
221 } while (left);
222}
223
224void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
225 size_t size, enum dma_data_direction dir)
226{
227 switch (dir) {
228 case DMA_TO_DEVICE:
229 cache_op(paddr, size, dma_wb_range);
230 break;
231 case DMA_FROM_DEVICE:
232 case DMA_BIDIRECTIONAL:
233 cache_op(paddr, size, dma_wbinv_range);
234 break;
235 default:
236 BUG();
237 }
238}
239
240void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
241 size_t size, enum dma_data_direction dir)
242{
243 switch (dir) {
244 case DMA_TO_DEVICE:
245 cache_op(paddr, size, dma_wb_range);
246 break;
247 case DMA_FROM_DEVICE:
248 case DMA_BIDIRECTIONAL:
249 cache_op(paddr, size, dma_wbinv_range);
250 break;
251 default:
252 BUG();
253 }
254}