diff options
Diffstat (limited to 'arch/blackfin/kernel/bfin_dma_5xx.c')
-rw-r--r-- | arch/blackfin/kernel/bfin_dma_5xx.c | 936 |
1 files changed, 236 insertions, 700 deletions
diff --git a/arch/blackfin/kernel/bfin_dma_5xx.c b/arch/blackfin/kernel/bfin_dma_5xx.c index 339293d677cc..07e02c0d1c07 100644 --- a/arch/blackfin/kernel/bfin_dma_5xx.c +++ b/arch/blackfin/kernel/bfin_dma_5xx.c | |||
@@ -1,63 +1,27 @@ | |||
1 | /* | 1 | /* |
2 | * File: arch/blackfin/kernel/bfin_dma_5xx.c | 2 | * bfin_dma_5xx.c - Blackfin DMA implementation |
3 | * Based on: | ||
4 | * Author: | ||
5 | * | 3 | * |
6 | * Created: | 4 | * Copyright 2004-2008 Analog Devices Inc. |
7 | * Description: This file contains the simple DMA Implementation for Blackfin | 5 | * Licensed under the GPL-2 or later. |
8 | * | ||
9 | * Modified: | ||
10 | * Copyright 2004-2006 Analog Devices Inc. | ||
11 | * | ||
12 | * Bugs: Enter bugs at http://blackfin.uclinux.org/ | ||
13 | * | ||
14 | * This program is free software; you can redistribute it and/or modify | ||
15 | * it under the terms of the GNU General Public License as published by | ||
16 | * the Free Software Foundation; either version 2 of the License, or | ||
17 | * (at your option) any later version. | ||
18 | * | ||
19 | * This program is distributed in the hope that it will be useful, | ||
20 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
21 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
22 | * GNU General Public License for more details. | ||
23 | * | ||
24 | * You should have received a copy of the GNU General Public License | ||
25 | * along with this program; if not, see the file COPYING, or write | ||
26 | * to the Free Software Foundation, Inc., | ||
27 | * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
28 | */ | 6 | */ |
29 | 7 | ||
30 | #include <linux/errno.h> | 8 | #include <linux/errno.h> |
31 | #include <linux/module.h> | ||
32 | #include <linux/sched.h> | ||
33 | #include <linux/interrupt.h> | 9 | #include <linux/interrupt.h> |
34 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
11 | #include <linux/module.h> | ||
35 | #include <linux/param.h> | 12 | #include <linux/param.h> |
13 | #include <linux/proc_fs.h> | ||
14 | #include <linux/sched.h> | ||
15 | #include <linux/seq_file.h> | ||
16 | #include <linux/spinlock.h> | ||
36 | 17 | ||
37 | #include <asm/blackfin.h> | 18 | #include <asm/blackfin.h> |
38 | #include <asm/dma.h> | ||
39 | #include <asm/cacheflush.h> | 19 | #include <asm/cacheflush.h> |
20 | #include <asm/dma.h> | ||
21 | #include <asm/uaccess.h> | ||
40 | 22 | ||
41 | /* Remove unused code not exported by symbol or internally called */ | 23 | struct dma_channel dma_ch[MAX_DMA_CHANNELS]; |
42 | #define REMOVE_DEAD_CODE | 24 | EXPORT_SYMBOL(dma_ch); |
43 | |||
44 | /************************************************************************** | ||
45 | * Global Variables | ||
46 | ***************************************************************************/ | ||
47 | |||
48 | static struct dma_channel dma_ch[MAX_BLACKFIN_DMA_CHANNEL]; | ||
49 | |||
50 | /*------------------------------------------------------------------------------ | ||
51 | * Set the Buffer Clear bit in the Configuration register of specific DMA | ||
52 | * channel. This will stop the descriptor based DMA operation. | ||
53 | *-----------------------------------------------------------------------------*/ | ||
54 | static void clear_dma_buffer(unsigned int channel) | ||
55 | { | ||
56 | dma_ch[channel].regs->cfg |= RESTART; | ||
57 | SSYNC(); | ||
58 | dma_ch[channel].regs->cfg &= ~RESTART; | ||
59 | SSYNC(); | ||
60 | } | ||
61 | 25 | ||
62 | static int __init blackfin_dma_init(void) | 26 | static int __init blackfin_dma_init(void) |
63 | { | 27 | { |
@@ -65,32 +29,67 @@ static int __init blackfin_dma_init(void) | |||
65 | 29 | ||
66 | printk(KERN_INFO "Blackfin DMA Controller\n"); | 30 | printk(KERN_INFO "Blackfin DMA Controller\n"); |
67 | 31 | ||
68 | for (i = 0; i < MAX_BLACKFIN_DMA_CHANNEL; i++) { | 32 | for (i = 0; i < MAX_DMA_CHANNELS; i++) { |
69 | dma_ch[i].chan_status = DMA_CHANNEL_FREE; | 33 | dma_ch[i].chan_status = DMA_CHANNEL_FREE; |
70 | dma_ch[i].regs = dma_io_base_addr[i]; | 34 | dma_ch[i].regs = dma_io_base_addr[i]; |
71 | mutex_init(&(dma_ch[i].dmalock)); | 35 | mutex_init(&(dma_ch[i].dmalock)); |
72 | } | 36 | } |
73 | /* Mark MEMDMA Channel 0 as requested since we're using it internally */ | 37 | /* Mark MEMDMA Channel 0 as requested since we're using it internally */ |
74 | dma_ch[CH_MEM_STREAM0_DEST].chan_status = DMA_CHANNEL_REQUESTED; | 38 | request_dma(CH_MEM_STREAM0_DEST, "Blackfin dma_memcpy"); |
75 | dma_ch[CH_MEM_STREAM0_SRC].chan_status = DMA_CHANNEL_REQUESTED; | 39 | request_dma(CH_MEM_STREAM0_SRC, "Blackfin dma_memcpy"); |
76 | 40 | ||
77 | #if defined(CONFIG_DEB_DMA_URGENT) | 41 | #if defined(CONFIG_DEB_DMA_URGENT) |
78 | bfin_write_EBIU_DDRQUE(bfin_read_EBIU_DDRQUE() | 42 | bfin_write_EBIU_DDRQUE(bfin_read_EBIU_DDRQUE() |
79 | | DEB1_URGENT | DEB2_URGENT | DEB3_URGENT); | 43 | | DEB1_URGENT | DEB2_URGENT | DEB3_URGENT); |
80 | #endif | 44 | #endif |
45 | |||
81 | return 0; | 46 | return 0; |
82 | } | 47 | } |
83 | |||
84 | arch_initcall(blackfin_dma_init); | 48 | arch_initcall(blackfin_dma_init); |
85 | 49 | ||
86 | /*------------------------------------------------------------------------------ | 50 | #ifdef CONFIG_PROC_FS |
87 | * Request the specific DMA channel from the system. | 51 | static int proc_dma_show(struct seq_file *m, void *v) |
88 | *-----------------------------------------------------------------------------*/ | ||
89 | int request_dma(unsigned int channel, char *device_id) | ||
90 | { | 52 | { |
53 | int i; | ||
54 | |||
55 | for (i = 0; i < MAX_DMA_CHANNELS; ++i) | ||
56 | if (dma_ch[i].chan_status != DMA_CHANNEL_FREE) | ||
57 | seq_printf(m, "%2d: %s\n", i, dma_ch[i].device_id); | ||
58 | |||
59 | return 0; | ||
60 | } | ||
91 | 61 | ||
62 | static int proc_dma_open(struct inode *inode, struct file *file) | ||
63 | { | ||
64 | return single_open(file, proc_dma_show, NULL); | ||
65 | } | ||
66 | |||
67 | static const struct file_operations proc_dma_operations = { | ||
68 | .open = proc_dma_open, | ||
69 | .read = seq_read, | ||
70 | .llseek = seq_lseek, | ||
71 | .release = single_release, | ||
72 | }; | ||
73 | |||
74 | static int __init proc_dma_init(void) | ||
75 | { | ||
76 | return proc_create("dma", 0, NULL, &proc_dma_operations) != NULL; | ||
77 | } | ||
78 | late_initcall(proc_dma_init); | ||
79 | #endif | ||
80 | |||
81 | /** | ||
82 | * request_dma - request a DMA channel | ||
83 | * | ||
84 | * Request the specific DMA channel from the system if it's available. | ||
85 | */ | ||
86 | int request_dma(unsigned int channel, const char *device_id) | ||
87 | { | ||
92 | pr_debug("request_dma() : BEGIN \n"); | 88 | pr_debug("request_dma() : BEGIN \n"); |
93 | 89 | ||
90 | if (device_id == NULL) | ||
91 | printk(KERN_WARNING "request_dma(%u): no device_id given\n", channel); | ||
92 | |||
94 | #if defined(CONFIG_BF561) && ANOMALY_05000182 | 93 | #if defined(CONFIG_BF561) && ANOMALY_05000182 |
95 | if (channel >= CH_IMEM_STREAM0_DEST && channel <= CH_IMEM_STREAM1_DEST) { | 94 | if (channel >= CH_IMEM_STREAM0_DEST && channel <= CH_IMEM_STREAM1_DEST) { |
96 | if (get_cclk() > 500000000) { | 95 | if (get_cclk() > 500000000) { |
@@ -129,60 +128,63 @@ int request_dma(unsigned int channel, char *device_id) | |||
129 | #endif | 128 | #endif |
130 | 129 | ||
131 | dma_ch[channel].device_id = device_id; | 130 | dma_ch[channel].device_id = device_id; |
132 | dma_ch[channel].irq_callback = NULL; | 131 | dma_ch[channel].irq = 0; |
133 | 132 | ||
134 | /* This is to be enabled by putting a restriction - | 133 | /* This is to be enabled by putting a restriction - |
135 | * you have to request DMA, before doing any operations on | 134 | * you have to request DMA, before doing any operations on |
136 | * descriptor/channel | 135 | * descriptor/channel |
137 | */ | 136 | */ |
138 | pr_debug("request_dma() : END \n"); | 137 | pr_debug("request_dma() : END \n"); |
139 | return channel; | 138 | return 0; |
140 | } | 139 | } |
141 | EXPORT_SYMBOL(request_dma); | 140 | EXPORT_SYMBOL(request_dma); |
142 | 141 | ||
143 | int set_dma_callback(unsigned int channel, dma_interrupt_t callback, void *data) | 142 | int set_dma_callback(unsigned int channel, irq_handler_t callback, void *data) |
144 | { | 143 | { |
145 | int ret_irq = 0; | ||
146 | |||
147 | BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE | 144 | BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE |
148 | && channel < MAX_BLACKFIN_DMA_CHANNEL)); | 145 | && channel < MAX_DMA_CHANNELS)); |
149 | 146 | ||
150 | if (callback != NULL) { | 147 | if (callback != NULL) { |
151 | int ret_val; | 148 | int ret; |
152 | ret_irq = channel2irq(channel); | 149 | unsigned int irq = channel2irq(channel); |
153 | 150 | ||
154 | dma_ch[channel].data = data; | 151 | ret = request_irq(irq, callback, IRQF_DISABLED, |
152 | dma_ch[channel].device_id, data); | ||
153 | if (ret) | ||
154 | return ret; | ||
155 | 155 | ||
156 | ret_val = | 156 | dma_ch[channel].irq = irq; |
157 | request_irq(ret_irq, (void *)callback, IRQF_DISABLED, | 157 | dma_ch[channel].data = data; |
158 | dma_ch[channel].device_id, data); | ||
159 | if (ret_val) { | ||
160 | printk(KERN_NOTICE | ||
161 | "Request irq in DMA engine failed.\n"); | ||
162 | return -EPERM; | ||
163 | } | ||
164 | dma_ch[channel].irq_callback = callback; | ||
165 | } | 158 | } |
166 | return 0; | 159 | return 0; |
167 | } | 160 | } |
168 | EXPORT_SYMBOL(set_dma_callback); | 161 | EXPORT_SYMBOL(set_dma_callback); |
169 | 162 | ||
170 | void free_dma(unsigned int channel) | 163 | /** |
164 | * clear_dma_buffer - clear DMA fifos for specified channel | ||
165 | * | ||
166 | * Set the Buffer Clear bit in the Configuration register of specific DMA | ||
167 | * channel. This will stop the descriptor based DMA operation. | ||
168 | */ | ||
169 | static void clear_dma_buffer(unsigned int channel) | ||
171 | { | 170 | { |
172 | int ret_irq; | 171 | dma_ch[channel].regs->cfg |= RESTART; |
172 | SSYNC(); | ||
173 | dma_ch[channel].regs->cfg &= ~RESTART; | ||
174 | } | ||
173 | 175 | ||
176 | void free_dma(unsigned int channel) | ||
177 | { | ||
174 | pr_debug("freedma() : BEGIN \n"); | 178 | pr_debug("freedma() : BEGIN \n"); |
175 | BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE | 179 | BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE |
176 | && channel < MAX_BLACKFIN_DMA_CHANNEL)); | 180 | && channel < MAX_DMA_CHANNELS)); |
177 | 181 | ||
178 | /* Halt the DMA */ | 182 | /* Halt the DMA */ |
179 | disable_dma(channel); | 183 | disable_dma(channel); |
180 | clear_dma_buffer(channel); | 184 | clear_dma_buffer(channel); |
181 | 185 | ||
182 | if (dma_ch[channel].irq_callback != NULL) { | 186 | if (dma_ch[channel].irq) |
183 | ret_irq = channel2irq(channel); | 187 | free_irq(dma_ch[channel].irq, dma_ch[channel].data); |
184 | free_irq(ret_irq, dma_ch[channel].data); | ||
185 | } | ||
186 | 188 | ||
187 | /* Clear the DMA Variable in the Channel */ | 189 | /* Clear the DMA Variable in the Channel */ |
188 | mutex_lock(&(dma_ch[channel].dmalock)); | 190 | mutex_lock(&(dma_ch[channel].dmalock)); |
@@ -193,294 +195,15 @@ void free_dma(unsigned int channel) | |||
193 | } | 195 | } |
194 | EXPORT_SYMBOL(free_dma); | 196 | EXPORT_SYMBOL(free_dma); |
195 | 197 | ||
196 | void dma_enable_irq(unsigned int channel) | ||
197 | { | ||
198 | int ret_irq; | ||
199 | |||
200 | pr_debug("dma_enable_irq() : BEGIN \n"); | ||
201 | BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE | ||
202 | && channel < MAX_BLACKFIN_DMA_CHANNEL)); | ||
203 | |||
204 | ret_irq = channel2irq(channel); | ||
205 | enable_irq(ret_irq); | ||
206 | } | ||
207 | EXPORT_SYMBOL(dma_enable_irq); | ||
208 | |||
209 | void dma_disable_irq(unsigned int channel) | ||
210 | { | ||
211 | int ret_irq; | ||
212 | |||
213 | pr_debug("dma_disable_irq() : BEGIN \n"); | ||
214 | BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE | ||
215 | && channel < MAX_BLACKFIN_DMA_CHANNEL)); | ||
216 | |||
217 | ret_irq = channel2irq(channel); | ||
218 | disable_irq(ret_irq); | ||
219 | } | ||
220 | EXPORT_SYMBOL(dma_disable_irq); | ||
221 | |||
222 | int dma_channel_active(unsigned int channel) | ||
223 | { | ||
224 | if (dma_ch[channel].chan_status == DMA_CHANNEL_FREE) { | ||
225 | return 0; | ||
226 | } else { | ||
227 | return 1; | ||
228 | } | ||
229 | } | ||
230 | EXPORT_SYMBOL(dma_channel_active); | ||
231 | |||
232 | /*------------------------------------------------------------------------------ | ||
233 | * stop the specific DMA channel. | ||
234 | *-----------------------------------------------------------------------------*/ | ||
235 | void disable_dma(unsigned int channel) | ||
236 | { | ||
237 | pr_debug("stop_dma() : BEGIN \n"); | ||
238 | |||
239 | BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE | ||
240 | && channel < MAX_BLACKFIN_DMA_CHANNEL)); | ||
241 | |||
242 | dma_ch[channel].regs->cfg &= ~DMAEN; /* Clean the enable bit */ | ||
243 | SSYNC(); | ||
244 | dma_ch[channel].chan_status = DMA_CHANNEL_REQUESTED; | ||
245 | /* Needs to be enabled Later */ | ||
246 | pr_debug("stop_dma() : END \n"); | ||
247 | return; | ||
248 | } | ||
249 | EXPORT_SYMBOL(disable_dma); | ||
250 | |||
251 | void enable_dma(unsigned int channel) | ||
252 | { | ||
253 | pr_debug("enable_dma() : BEGIN \n"); | ||
254 | |||
255 | BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE | ||
256 | && channel < MAX_BLACKFIN_DMA_CHANNEL)); | ||
257 | |||
258 | dma_ch[channel].chan_status = DMA_CHANNEL_ENABLED; | ||
259 | dma_ch[channel].regs->curr_x_count = 0; | ||
260 | dma_ch[channel].regs->curr_y_count = 0; | ||
261 | |||
262 | dma_ch[channel].regs->cfg |= DMAEN; /* Set the enable bit */ | ||
263 | SSYNC(); | ||
264 | pr_debug("enable_dma() : END \n"); | ||
265 | return; | ||
266 | } | ||
267 | EXPORT_SYMBOL(enable_dma); | ||
268 | |||
269 | /*------------------------------------------------------------------------------ | ||
270 | * Set the Start Address register for the specific DMA channel | ||
271 | * This function can be used for register based DMA, | ||
272 | * to setup the start address | ||
273 | * addr: Starting address of the DMA Data to be transferred. | ||
274 | *-----------------------------------------------------------------------------*/ | ||
275 | void set_dma_start_addr(unsigned int channel, unsigned long addr) | ||
276 | { | ||
277 | pr_debug("set_dma_start_addr() : BEGIN \n"); | ||
278 | |||
279 | BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE | ||
280 | && channel < MAX_BLACKFIN_DMA_CHANNEL)); | ||
281 | |||
282 | dma_ch[channel].regs->start_addr = addr; | ||
283 | SSYNC(); | ||
284 | pr_debug("set_dma_start_addr() : END\n"); | ||
285 | } | ||
286 | EXPORT_SYMBOL(set_dma_start_addr); | ||
287 | |||
288 | void set_dma_next_desc_addr(unsigned int channel, unsigned long addr) | ||
289 | { | ||
290 | pr_debug("set_dma_next_desc_addr() : BEGIN \n"); | ||
291 | |||
292 | BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE | ||
293 | && channel < MAX_BLACKFIN_DMA_CHANNEL)); | ||
294 | |||
295 | dma_ch[channel].regs->next_desc_ptr = addr; | ||
296 | SSYNC(); | ||
297 | pr_debug("set_dma_next_desc_addr() : END\n"); | ||
298 | } | ||
299 | EXPORT_SYMBOL(set_dma_next_desc_addr); | ||
300 | |||
301 | void set_dma_curr_desc_addr(unsigned int channel, unsigned long addr) | ||
302 | { | ||
303 | pr_debug("set_dma_curr_desc_addr() : BEGIN \n"); | ||
304 | |||
305 | BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE | ||
306 | && channel < MAX_BLACKFIN_DMA_CHANNEL)); | ||
307 | |||
308 | dma_ch[channel].regs->curr_desc_ptr = addr; | ||
309 | SSYNC(); | ||
310 | pr_debug("set_dma_curr_desc_addr() : END\n"); | ||
311 | } | ||
312 | EXPORT_SYMBOL(set_dma_curr_desc_addr); | ||
313 | |||
314 | void set_dma_x_count(unsigned int channel, unsigned short x_count) | ||
315 | { | ||
316 | BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE | ||
317 | && channel < MAX_BLACKFIN_DMA_CHANNEL)); | ||
318 | |||
319 | dma_ch[channel].regs->x_count = x_count; | ||
320 | SSYNC(); | ||
321 | } | ||
322 | EXPORT_SYMBOL(set_dma_x_count); | ||
323 | |||
324 | void set_dma_y_count(unsigned int channel, unsigned short y_count) | ||
325 | { | ||
326 | BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE | ||
327 | && channel < MAX_BLACKFIN_DMA_CHANNEL)); | ||
328 | |||
329 | dma_ch[channel].regs->y_count = y_count; | ||
330 | SSYNC(); | ||
331 | } | ||
332 | EXPORT_SYMBOL(set_dma_y_count); | ||
333 | |||
334 | void set_dma_x_modify(unsigned int channel, short x_modify) | ||
335 | { | ||
336 | BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE | ||
337 | && channel < MAX_BLACKFIN_DMA_CHANNEL)); | ||
338 | |||
339 | dma_ch[channel].regs->x_modify = x_modify; | ||
340 | SSYNC(); | ||
341 | } | ||
342 | EXPORT_SYMBOL(set_dma_x_modify); | ||
343 | |||
344 | void set_dma_y_modify(unsigned int channel, short y_modify) | ||
345 | { | ||
346 | BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE | ||
347 | && channel < MAX_BLACKFIN_DMA_CHANNEL)); | ||
348 | |||
349 | dma_ch[channel].regs->y_modify = y_modify; | ||
350 | SSYNC(); | ||
351 | } | ||
352 | EXPORT_SYMBOL(set_dma_y_modify); | ||
353 | |||
354 | void set_dma_config(unsigned int channel, unsigned short config) | ||
355 | { | ||
356 | BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE | ||
357 | && channel < MAX_BLACKFIN_DMA_CHANNEL)); | ||
358 | |||
359 | dma_ch[channel].regs->cfg = config; | ||
360 | SSYNC(); | ||
361 | } | ||
362 | EXPORT_SYMBOL(set_dma_config); | ||
363 | |||
364 | unsigned short | ||
365 | set_bfin_dma_config(char direction, char flow_mode, | ||
366 | char intr_mode, char dma_mode, char width, char syncmode) | ||
367 | { | ||
368 | unsigned short config; | ||
369 | |||
370 | config = | ||
371 | ((direction << 1) | (width << 2) | (dma_mode << 4) | | ||
372 | (intr_mode << 6) | (flow_mode << 12) | (syncmode << 5)); | ||
373 | return config; | ||
374 | } | ||
375 | EXPORT_SYMBOL(set_bfin_dma_config); | ||
376 | |||
377 | void set_dma_sg(unsigned int channel, struct dmasg *sg, int nr_sg) | ||
378 | { | ||
379 | BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE | ||
380 | && channel < MAX_BLACKFIN_DMA_CHANNEL)); | ||
381 | |||
382 | dma_ch[channel].regs->cfg |= ((nr_sg & 0x0F) << 8); | ||
383 | |||
384 | dma_ch[channel].regs->next_desc_ptr = (unsigned int)sg; | ||
385 | |||
386 | SSYNC(); | ||
387 | } | ||
388 | EXPORT_SYMBOL(set_dma_sg); | ||
389 | |||
390 | void set_dma_curr_addr(unsigned int channel, unsigned long addr) | ||
391 | { | ||
392 | BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE | ||
393 | && channel < MAX_BLACKFIN_DMA_CHANNEL)); | ||
394 | |||
395 | dma_ch[channel].regs->curr_addr_ptr = addr; | ||
396 | SSYNC(); | ||
397 | } | ||
398 | EXPORT_SYMBOL(set_dma_curr_addr); | ||
399 | |||
400 | /*------------------------------------------------------------------------------ | ||
401 | * Get the DMA status of a specific DMA channel from the system. | ||
402 | *-----------------------------------------------------------------------------*/ | ||
403 | unsigned short get_dma_curr_irqstat(unsigned int channel) | ||
404 | { | ||
405 | BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE | ||
406 | && channel < MAX_BLACKFIN_DMA_CHANNEL)); | ||
407 | |||
408 | return dma_ch[channel].regs->irq_status; | ||
409 | } | ||
410 | EXPORT_SYMBOL(get_dma_curr_irqstat); | ||
411 | |||
412 | /*------------------------------------------------------------------------------ | ||
413 | * Clear the DMA_DONE bit in DMA status. Stop the DMA completion interrupt. | ||
414 | *-----------------------------------------------------------------------------*/ | ||
415 | void clear_dma_irqstat(unsigned int channel) | ||
416 | { | ||
417 | BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE | ||
418 | && channel < MAX_BLACKFIN_DMA_CHANNEL)); | ||
419 | dma_ch[channel].regs->irq_status |= 3; | ||
420 | } | ||
421 | EXPORT_SYMBOL(clear_dma_irqstat); | ||
422 | |||
423 | /*------------------------------------------------------------------------------ | ||
424 | * Get current DMA xcount of a specific DMA channel from the system. | ||
425 | *-----------------------------------------------------------------------------*/ | ||
426 | unsigned short get_dma_curr_xcount(unsigned int channel) | ||
427 | { | ||
428 | BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE | ||
429 | && channel < MAX_BLACKFIN_DMA_CHANNEL)); | ||
430 | |||
431 | return dma_ch[channel].regs->curr_x_count; | ||
432 | } | ||
433 | EXPORT_SYMBOL(get_dma_curr_xcount); | ||
434 | |||
435 | /*------------------------------------------------------------------------------ | ||
436 | * Get current DMA ycount of a specific DMA channel from the system. | ||
437 | *-----------------------------------------------------------------------------*/ | ||
438 | unsigned short get_dma_curr_ycount(unsigned int channel) | ||
439 | { | ||
440 | BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE | ||
441 | && channel < MAX_BLACKFIN_DMA_CHANNEL)); | ||
442 | |||
443 | return dma_ch[channel].regs->curr_y_count; | ||
444 | } | ||
445 | EXPORT_SYMBOL(get_dma_curr_ycount); | ||
446 | |||
447 | unsigned long get_dma_next_desc_ptr(unsigned int channel) | ||
448 | { | ||
449 | BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE | ||
450 | && channel < MAX_BLACKFIN_DMA_CHANNEL)); | ||
451 | |||
452 | return dma_ch[channel].regs->next_desc_ptr; | ||
453 | } | ||
454 | EXPORT_SYMBOL(get_dma_next_desc_ptr); | ||
455 | |||
456 | unsigned long get_dma_curr_desc_ptr(unsigned int channel) | ||
457 | { | ||
458 | BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE | ||
459 | && channel < MAX_BLACKFIN_DMA_CHANNEL)); | ||
460 | |||
461 | return dma_ch[channel].regs->curr_desc_ptr; | ||
462 | } | ||
463 | EXPORT_SYMBOL(get_dma_curr_desc_ptr); | ||
464 | |||
465 | unsigned long get_dma_curr_addr(unsigned int channel) | ||
466 | { | ||
467 | BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE | ||
468 | && channel < MAX_BLACKFIN_DMA_CHANNEL)); | ||
469 | |||
470 | return dma_ch[channel].regs->curr_addr_ptr; | ||
471 | } | ||
472 | EXPORT_SYMBOL(get_dma_curr_addr); | ||
473 | |||
474 | #ifdef CONFIG_PM | 198 | #ifdef CONFIG_PM |
199 | # ifndef MAX_DMA_SUSPEND_CHANNELS | ||
200 | # define MAX_DMA_SUSPEND_CHANNELS MAX_DMA_CHANNELS | ||
201 | # endif | ||
475 | int blackfin_dma_suspend(void) | 202 | int blackfin_dma_suspend(void) |
476 | { | 203 | { |
477 | int i; | 204 | int i; |
478 | 205 | ||
479 | #ifdef CONFIG_BF561 /* IMDMA channels doesn't have a PERIPHERAL_MAP */ | 206 | for (i = 0; i < MAX_DMA_SUSPEND_CHANNELS; ++i) { |
480 | for (i = 0; i <= CH_MEM_STREAM3_SRC; i++) { | ||
481 | #else | ||
482 | for (i = 0; i < MAX_BLACKFIN_DMA_CHANNEL; i++) { | ||
483 | #endif | ||
484 | if (dma_ch[i].chan_status == DMA_CHANNEL_ENABLED) { | 207 | if (dma_ch[i].chan_status == DMA_CHANNEL_ENABLED) { |
485 | printk(KERN_ERR "DMA Channel %d failed to suspend\n", i); | 208 | printk(KERN_ERR "DMA Channel %d failed to suspend\n", i); |
486 | return -EBUSY; | 209 | return -EBUSY; |
@@ -495,388 +218,201 @@ int blackfin_dma_suspend(void) | |||
495 | void blackfin_dma_resume(void) | 218 | void blackfin_dma_resume(void) |
496 | { | 219 | { |
497 | int i; | 220 | int i; |
498 | 221 | for (i = 0; i < MAX_DMA_SUSPEND_CHANNELS; ++i) | |
499 | #ifdef CONFIG_BF561 /* IMDMA channels doesn't have a PERIPHERAL_MAP */ | ||
500 | for (i = 0; i <= CH_MEM_STREAM3_SRC; i++) | ||
501 | #else | ||
502 | for (i = 0; i < MAX_BLACKFIN_DMA_CHANNEL; i++) | ||
503 | #endif | ||
504 | dma_ch[i].regs->peripheral_map = dma_ch[i].saved_peripheral_map; | 222 | dma_ch[i].regs->peripheral_map = dma_ch[i].saved_peripheral_map; |
505 | } | 223 | } |
506 | #endif | 224 | #endif |
507 | 225 | ||
508 | static void *__dma_memcpy(void *dest, const void *src, size_t size) | 226 | /** |
227 | * blackfin_dma_early_init - minimal DMA init | ||
228 | * | ||
229 | * Setup a few DMA registers so we can safely do DMA transfers early on in | ||
230 | * the kernel booting process. Really this just means using dma_memcpy(). | ||
231 | */ | ||
232 | void __init blackfin_dma_early_init(void) | ||
509 | { | 233 | { |
510 | int direction; /* 1 - address decrease, 0 - address increase */ | ||
511 | int flag_align; /* 1 - address aligned, 0 - address unaligned */ | ||
512 | int flag_2D; /* 1 - 2D DMA needed, 0 - 1D DMA needed */ | ||
513 | unsigned long flags; | ||
514 | |||
515 | if (size <= 0) | ||
516 | return NULL; | ||
517 | |||
518 | local_irq_save(flags); | ||
519 | |||
520 | if ((unsigned long)src < memory_end) | ||
521 | blackfin_dcache_flush_range((unsigned int)src, | ||
522 | (unsigned int)(src + size)); | ||
523 | |||
524 | if ((unsigned long)dest < memory_end) | ||
525 | blackfin_dcache_invalidate_range((unsigned int)dest, | ||
526 | (unsigned int)(dest + size)); | ||
527 | |||
528 | bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR); | ||
529 | |||
530 | if ((unsigned long)src < (unsigned long)dest) | ||
531 | direction = 1; | ||
532 | else | ||
533 | direction = 0; | ||
534 | |||
535 | if ((((unsigned long)dest % 2) == 0) && (((unsigned long)src % 2) == 0) | ||
536 | && ((size % 2) == 0)) | ||
537 | flag_align = 1; | ||
538 | else | ||
539 | flag_align = 0; | ||
540 | |||
541 | if (size > 0x10000) /* size > 64K */ | ||
542 | flag_2D = 1; | ||
543 | else | ||
544 | flag_2D = 0; | ||
545 | |||
546 | /* Setup destination and source start address */ | ||
547 | if (direction) { | ||
548 | if (flag_align) { | ||
549 | bfin_write_MDMA_D0_START_ADDR(dest + size - 2); | ||
550 | bfin_write_MDMA_S0_START_ADDR(src + size - 2); | ||
551 | } else { | ||
552 | bfin_write_MDMA_D0_START_ADDR(dest + size - 1); | ||
553 | bfin_write_MDMA_S0_START_ADDR(src + size - 1); | ||
554 | } | ||
555 | } else { | ||
556 | bfin_write_MDMA_D0_START_ADDR(dest); | ||
557 | bfin_write_MDMA_S0_START_ADDR(src); | ||
558 | } | ||
559 | |||
560 | /* Setup destination and source xcount */ | ||
561 | if (flag_2D) { | ||
562 | if (flag_align) { | ||
563 | bfin_write_MDMA_D0_X_COUNT(1024 / 2); | ||
564 | bfin_write_MDMA_S0_X_COUNT(1024 / 2); | ||
565 | } else { | ||
566 | bfin_write_MDMA_D0_X_COUNT(1024); | ||
567 | bfin_write_MDMA_S0_X_COUNT(1024); | ||
568 | } | ||
569 | bfin_write_MDMA_D0_Y_COUNT(size >> 10); | ||
570 | bfin_write_MDMA_S0_Y_COUNT(size >> 10); | ||
571 | } else { | ||
572 | if (flag_align) { | ||
573 | bfin_write_MDMA_D0_X_COUNT(size / 2); | ||
574 | bfin_write_MDMA_S0_X_COUNT(size / 2); | ||
575 | } else { | ||
576 | bfin_write_MDMA_D0_X_COUNT(size); | ||
577 | bfin_write_MDMA_S0_X_COUNT(size); | ||
578 | } | ||
579 | } | ||
580 | |||
581 | /* Setup destination and source xmodify and ymodify */ | ||
582 | if (direction) { | ||
583 | if (flag_align) { | ||
584 | bfin_write_MDMA_D0_X_MODIFY(-2); | ||
585 | bfin_write_MDMA_S0_X_MODIFY(-2); | ||
586 | if (flag_2D) { | ||
587 | bfin_write_MDMA_D0_Y_MODIFY(-2); | ||
588 | bfin_write_MDMA_S0_Y_MODIFY(-2); | ||
589 | } | ||
590 | } else { | ||
591 | bfin_write_MDMA_D0_X_MODIFY(-1); | ||
592 | bfin_write_MDMA_S0_X_MODIFY(-1); | ||
593 | if (flag_2D) { | ||
594 | bfin_write_MDMA_D0_Y_MODIFY(-1); | ||
595 | bfin_write_MDMA_S0_Y_MODIFY(-1); | ||
596 | } | ||
597 | } | ||
598 | } else { | ||
599 | if (flag_align) { | ||
600 | bfin_write_MDMA_D0_X_MODIFY(2); | ||
601 | bfin_write_MDMA_S0_X_MODIFY(2); | ||
602 | if (flag_2D) { | ||
603 | bfin_write_MDMA_D0_Y_MODIFY(2); | ||
604 | bfin_write_MDMA_S0_Y_MODIFY(2); | ||
605 | } | ||
606 | } else { | ||
607 | bfin_write_MDMA_D0_X_MODIFY(1); | ||
608 | bfin_write_MDMA_S0_X_MODIFY(1); | ||
609 | if (flag_2D) { | ||
610 | bfin_write_MDMA_D0_Y_MODIFY(1); | ||
611 | bfin_write_MDMA_S0_Y_MODIFY(1); | ||
612 | } | ||
613 | } | ||
614 | } | ||
615 | |||
616 | /* Enable source DMA */ | ||
617 | if (flag_2D) { | ||
618 | if (flag_align) { | ||
619 | bfin_write_MDMA_S0_CONFIG(DMAEN | DMA2D | WDSIZE_16); | ||
620 | bfin_write_MDMA_D0_CONFIG(WNR | DI_EN | DMAEN | DMA2D | WDSIZE_16); | ||
621 | } else { | ||
622 | bfin_write_MDMA_S0_CONFIG(DMAEN | DMA2D); | ||
623 | bfin_write_MDMA_D0_CONFIG(WNR | DI_EN | DMAEN | DMA2D); | ||
624 | } | ||
625 | } else { | ||
626 | if (flag_align) { | ||
627 | bfin_write_MDMA_S0_CONFIG(DMAEN | WDSIZE_16); | ||
628 | bfin_write_MDMA_D0_CONFIG(WNR | DI_EN | DMAEN | WDSIZE_16); | ||
629 | } else { | ||
630 | bfin_write_MDMA_S0_CONFIG(DMAEN); | ||
631 | bfin_write_MDMA_D0_CONFIG(WNR | DI_EN | DMAEN); | ||
632 | } | ||
633 | } | ||
634 | |||
635 | SSYNC(); | ||
636 | |||
637 | while (!(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE)) | ||
638 | ; | ||
639 | |||
640 | bfin_write_MDMA_D0_IRQ_STATUS(bfin_read_MDMA_D0_IRQ_STATUS() | | ||
641 | (DMA_DONE | DMA_ERR)); | ||
642 | |||
643 | bfin_write_MDMA_S0_CONFIG(0); | 234 | bfin_write_MDMA_S0_CONFIG(0); |
644 | bfin_write_MDMA_D0_CONFIG(0); | ||
645 | |||
646 | local_irq_restore(flags); | ||
647 | |||
648 | return dest; | ||
649 | } | 235 | } |
650 | 236 | ||
651 | void *dma_memcpy(void *dest, const void *src, size_t size) | 237 | /** |
652 | { | 238 | * __dma_memcpy - program the MDMA registers |
653 | size_t bulk; | 239 | * |
654 | size_t rest; | 240 | * Actually program MDMA0 and wait for the transfer to finish. Disable IRQs |
655 | void * addr; | 241 | * while programming registers so that everything is fully configured. Wait |
656 | 242 | * for DMA to finish with IRQs enabled. If interrupted, the initial DMA_DONE | |
657 | bulk = (size >> 16) << 16; | 243 | * check will make sure we don't clobber any existing transfer. |
658 | rest = size - bulk; | 244 | */ |
659 | if (bulk) | 245 | static void __dma_memcpy(u32 daddr, s16 dmod, u32 saddr, s16 smod, size_t cnt, u32 conf) |
660 | __dma_memcpy(dest, src, bulk); | ||
661 | addr = __dma_memcpy(dest+bulk, src+bulk, rest); | ||
662 | return addr; | ||
663 | } | ||
664 | EXPORT_SYMBOL(dma_memcpy); | ||
665 | |||
666 | void *safe_dma_memcpy(void *dest, const void *src, size_t size) | ||
667 | { | ||
668 | void *addr; | ||
669 | addr = dma_memcpy(dest, src, size); | ||
670 | return addr; | ||
671 | } | ||
672 | EXPORT_SYMBOL(safe_dma_memcpy); | ||
673 | |||
674 | void dma_outsb(unsigned long addr, const void *buf, unsigned short len) | ||
675 | { | 246 | { |
247 | static DEFINE_SPINLOCK(mdma_lock); | ||
676 | unsigned long flags; | 248 | unsigned long flags; |
677 | 249 | ||
678 | local_irq_save(flags); | 250 | spin_lock_irqsave(&mdma_lock, flags); |
679 | 251 | ||
680 | blackfin_dcache_flush_range((unsigned int)buf, | 252 | if (bfin_read_MDMA_S0_CONFIG()) |
681 | (unsigned int)(buf) + len); | 253 | while (!(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE)) |
254 | continue; | ||
255 | |||
256 | if (conf & DMA2D) { | ||
257 | /* For larger bit sizes, we've already divided down cnt so it | ||
258 | * is no longer a multiple of 64k. So we have to break down | ||
259 | * the limit here so it is a multiple of the incoming size. | ||
260 | * There is no limitation here in terms of total size other | ||
261 | * than the hardware though as the bits lost in the shift are | ||
262 | * made up by MODIFY (== we can hit the whole address space). | ||
263 | * X: (2^(16 - 0)) * 1 == (2^(16 - 1)) * 2 == (2^(16 - 2)) * 4 | ||
264 | */ | ||
265 | u32 shift = abs(dmod) >> 1; | ||
266 | size_t ycnt = cnt >> (16 - shift); | ||
267 | cnt = 1 << (16 - shift); | ||
268 | bfin_write_MDMA_D0_Y_COUNT(ycnt); | ||
269 | bfin_write_MDMA_S0_Y_COUNT(ycnt); | ||
270 | bfin_write_MDMA_D0_Y_MODIFY(dmod); | ||
271 | bfin_write_MDMA_S0_Y_MODIFY(smod); | ||
272 | } | ||
682 | 273 | ||
683 | bfin_write_MDMA_D0_START_ADDR(addr); | 274 | bfin_write_MDMA_D0_START_ADDR(daddr); |
684 | bfin_write_MDMA_D0_X_COUNT(len); | 275 | bfin_write_MDMA_D0_X_COUNT(cnt); |
685 | bfin_write_MDMA_D0_X_MODIFY(0); | 276 | bfin_write_MDMA_D0_X_MODIFY(dmod); |
686 | bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR); | 277 | bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR); |
687 | 278 | ||
688 | bfin_write_MDMA_S0_START_ADDR(buf); | 279 | bfin_write_MDMA_S0_START_ADDR(saddr); |
689 | bfin_write_MDMA_S0_X_COUNT(len); | 280 | bfin_write_MDMA_S0_X_COUNT(cnt); |
690 | bfin_write_MDMA_S0_X_MODIFY(1); | 281 | bfin_write_MDMA_S0_X_MODIFY(smod); |
691 | bfin_write_MDMA_S0_IRQ_STATUS(DMA_DONE | DMA_ERR); | 282 | bfin_write_MDMA_S0_IRQ_STATUS(DMA_DONE | DMA_ERR); |
692 | 283 | ||
693 | bfin_write_MDMA_S0_CONFIG(DMAEN | WDSIZE_8); | 284 | bfin_write_MDMA_S0_CONFIG(DMAEN | conf); |
694 | bfin_write_MDMA_D0_CONFIG(WNR | DI_EN | DMAEN | WDSIZE_8); | 285 | bfin_write_MDMA_D0_CONFIG(WNR | DI_EN | DMAEN | conf); |
286 | |||
287 | spin_unlock_irqrestore(&mdma_lock, flags); | ||
695 | 288 | ||
696 | SSYNC(); | 289 | SSYNC(); |
697 | 290 | ||
698 | while (!(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE)); | 291 | while (!(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE)) |
292 | if (bfin_read_MDMA_S0_CONFIG()) | ||
293 | continue; | ||
294 | else | ||
295 | return; | ||
699 | 296 | ||
700 | bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR); | 297 | bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR); |
701 | 298 | ||
702 | bfin_write_MDMA_S0_CONFIG(0); | 299 | bfin_write_MDMA_S0_CONFIG(0); |
703 | bfin_write_MDMA_D0_CONFIG(0); | 300 | bfin_write_MDMA_D0_CONFIG(0); |
704 | local_irq_restore(flags); | ||
705 | |||
706 | } | 301 | } |
707 | EXPORT_SYMBOL(dma_outsb); | ||
708 | |||
709 | 302 | ||
710 | void dma_insb(unsigned long addr, void *buf, unsigned short len) | 303 | /** |
304 | * _dma_memcpy - translate C memcpy settings into MDMA settings | ||
305 | * | ||
306 | * Handle all the high level steps before we touch the MDMA registers. So | ||
307 | * handle direction, tweaking of sizes, and formatting of addresses. | ||
308 | */ | ||
309 | static void *_dma_memcpy(void *pdst, const void *psrc, size_t size) | ||
711 | { | 310 | { |
712 | unsigned long flags; | 311 | u32 conf, shift; |
713 | 312 | s16 mod; | |
714 | blackfin_dcache_invalidate_range((unsigned int)buf, | 313 | unsigned long dst = (unsigned long)pdst; |
715 | (unsigned int)(buf) + len); | 314 | unsigned long src = (unsigned long)psrc; |
716 | |||
717 | local_irq_save(flags); | ||
718 | bfin_write_MDMA_D0_START_ADDR(buf); | ||
719 | bfin_write_MDMA_D0_X_COUNT(len); | ||
720 | bfin_write_MDMA_D0_X_MODIFY(1); | ||
721 | bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR); | ||
722 | |||
723 | bfin_write_MDMA_S0_START_ADDR(addr); | ||
724 | bfin_write_MDMA_S0_X_COUNT(len); | ||
725 | bfin_write_MDMA_S0_X_MODIFY(0); | ||
726 | bfin_write_MDMA_S0_IRQ_STATUS(DMA_DONE | DMA_ERR); | ||
727 | 315 | ||
728 | bfin_write_MDMA_S0_CONFIG(DMAEN | WDSIZE_8); | 316 | if (size == 0) |
729 | bfin_write_MDMA_D0_CONFIG(WNR | DI_EN | DMAEN | WDSIZE_8); | 317 | return NULL; |
730 | 318 | ||
731 | SSYNC(); | 319 | if (dst % 4 == 0 && src % 4 == 0 && size % 4 == 0) { |
320 | conf = WDSIZE_32; | ||
321 | shift = 2; | ||
322 | } else if (dst % 2 == 0 && src % 2 == 0 && size % 2 == 0) { | ||
323 | conf = WDSIZE_16; | ||
324 | shift = 1; | ||
325 | } else { | ||
326 | conf = WDSIZE_8; | ||
327 | shift = 0; | ||
328 | } | ||
732 | 329 | ||
733 | while (!(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE)); | 330 | /* If the two memory regions have a chance of overlapping, make |
331 | * sure the memcpy still works as expected. Do this by having the | ||
332 | * copy run backwards instead. | ||
333 | */ | ||
334 | mod = 1 << shift; | ||
335 | if (src < dst) { | ||
336 | mod *= -1; | ||
337 | dst += size + mod; | ||
338 | src += size + mod; | ||
339 | } | ||
340 | size >>= shift; | ||
734 | 341 | ||
735 | bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR); | 342 | if (size > 0x10000) |
343 | conf |= DMA2D; | ||
736 | 344 | ||
737 | bfin_write_MDMA_S0_CONFIG(0); | 345 | __dma_memcpy(dst, mod, src, mod, size, conf); |
738 | bfin_write_MDMA_D0_CONFIG(0); | ||
739 | local_irq_restore(flags); | ||
740 | 346 | ||
347 | return pdst; | ||
741 | } | 348 | } |
742 | EXPORT_SYMBOL(dma_insb); | ||
743 | 349 | ||
744 | void dma_outsw(unsigned long addr, const void *buf, unsigned short len) | 350 | /** |
351 | * dma_memcpy - DMA memcpy under mutex lock | ||
352 | * | ||
353 | * Do not check arguments before starting the DMA memcpy. Break the transfer | ||
354 | * up into two pieces. The first transfer is in multiples of 64k and the | ||
355 | * second transfer is the piece smaller than 64k. | ||
356 | */ | ||
357 | void *dma_memcpy(void *pdst, const void *psrc, size_t size) | ||
745 | { | 358 | { |
746 | unsigned long flags; | 359 | unsigned long dst = (unsigned long)pdst; |
747 | 360 | unsigned long src = (unsigned long)psrc; | |
748 | local_irq_save(flags); | 361 | size_t bulk, rest; |
749 | 362 | ||
750 | blackfin_dcache_flush_range((unsigned int)buf, | 363 | if (bfin_addr_dcachable(src)) |
751 | (unsigned int)(buf) + len * sizeof(short)); | 364 | blackfin_dcache_flush_range(src, src + size); |
752 | 365 | ||
753 | bfin_write_MDMA_D0_START_ADDR(addr); | 366 | if (bfin_addr_dcachable(dst)) |
754 | bfin_write_MDMA_D0_X_COUNT(len); | 367 | blackfin_dcache_invalidate_range(dst, dst + size); |
755 | bfin_write_MDMA_D0_X_MODIFY(0); | ||
756 | bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR); | ||
757 | |||
758 | bfin_write_MDMA_S0_START_ADDR(buf); | ||
759 | bfin_write_MDMA_S0_X_COUNT(len); | ||
760 | bfin_write_MDMA_S0_X_MODIFY(2); | ||
761 | bfin_write_MDMA_S0_IRQ_STATUS(DMA_DONE | DMA_ERR); | ||
762 | |||
763 | bfin_write_MDMA_S0_CONFIG(DMAEN | WDSIZE_16); | ||
764 | bfin_write_MDMA_D0_CONFIG(WNR | DI_EN | DMAEN | WDSIZE_16); | ||
765 | |||
766 | SSYNC(); | ||
767 | |||
768 | while (!(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE)); | ||
769 | |||
770 | bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR); | ||
771 | |||
772 | bfin_write_MDMA_S0_CONFIG(0); | ||
773 | bfin_write_MDMA_D0_CONFIG(0); | ||
774 | local_irq_restore(flags); | ||
775 | 368 | ||
369 | bulk = size & ~0xffff; | ||
370 | rest = size - bulk; | ||
371 | if (bulk) | ||
372 | _dma_memcpy(pdst, psrc, bulk); | ||
373 | _dma_memcpy(pdst + bulk, psrc + bulk, rest); | ||
374 | return pdst; | ||
776 | } | 375 | } |
777 | EXPORT_SYMBOL(dma_outsw); | 376 | EXPORT_SYMBOL(dma_memcpy); |
778 | 377 | ||
779 | void dma_insw(unsigned long addr, void *buf, unsigned short len) | 378 | /** |
379 | * safe_dma_memcpy - DMA memcpy w/argument checking | ||
380 | * | ||
381 | * Verify arguments are safe before heading to dma_memcpy(). | ||
382 | */ | ||
383 | void *safe_dma_memcpy(void *dst, const void *src, size_t size) | ||
780 | { | 384 | { |
781 | unsigned long flags; | 385 | if (!access_ok(VERIFY_WRITE, dst, size)) |
782 | 386 | return NULL; | |
783 | blackfin_dcache_invalidate_range((unsigned int)buf, | 387 | if (!access_ok(VERIFY_READ, src, size)) |
784 | (unsigned int)(buf) + len * sizeof(short)); | 388 | return NULL; |
785 | 389 | return dma_memcpy(dst, src, size); | |
786 | local_irq_save(flags); | ||
787 | |||
788 | bfin_write_MDMA_D0_START_ADDR(buf); | ||
789 | bfin_write_MDMA_D0_X_COUNT(len); | ||
790 | bfin_write_MDMA_D0_X_MODIFY(2); | ||
791 | bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR); | ||
792 | |||
793 | bfin_write_MDMA_S0_START_ADDR(addr); | ||
794 | bfin_write_MDMA_S0_X_COUNT(len); | ||
795 | bfin_write_MDMA_S0_X_MODIFY(0); | ||
796 | bfin_write_MDMA_S0_IRQ_STATUS(DMA_DONE | DMA_ERR); | ||
797 | |||
798 | bfin_write_MDMA_S0_CONFIG(DMAEN | WDSIZE_16); | ||
799 | bfin_write_MDMA_D0_CONFIG(WNR | DI_EN | DMAEN | WDSIZE_16); | ||
800 | |||
801 | SSYNC(); | ||
802 | |||
803 | while (!(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE)); | ||
804 | |||
805 | bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR); | ||
806 | |||
807 | bfin_write_MDMA_S0_CONFIG(0); | ||
808 | bfin_write_MDMA_D0_CONFIG(0); | ||
809 | local_irq_restore(flags); | ||
810 | |||
811 | } | 390 | } |
812 | EXPORT_SYMBOL(dma_insw); | 391 | EXPORT_SYMBOL(safe_dma_memcpy); |
813 | 392 | ||
814 | void dma_outsl(unsigned long addr, const void *buf, unsigned short len) | 393 | static void _dma_out(unsigned long addr, unsigned long buf, unsigned short len, |
394 | u16 size, u16 dma_size) | ||
815 | { | 395 | { |
816 | unsigned long flags; | 396 | blackfin_dcache_flush_range(buf, buf + len * size); |
817 | 397 | __dma_memcpy(addr, 0, buf, size, len, dma_size); | |
818 | local_irq_save(flags); | ||
819 | |||
820 | blackfin_dcache_flush_range((unsigned int)buf, | ||
821 | (unsigned int)(buf) + len * sizeof(long)); | ||
822 | |||
823 | bfin_write_MDMA_D0_START_ADDR(addr); | ||
824 | bfin_write_MDMA_D0_X_COUNT(len); | ||
825 | bfin_write_MDMA_D0_X_MODIFY(0); | ||
826 | bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR); | ||
827 | |||
828 | bfin_write_MDMA_S0_START_ADDR(buf); | ||
829 | bfin_write_MDMA_S0_X_COUNT(len); | ||
830 | bfin_write_MDMA_S0_X_MODIFY(4); | ||
831 | bfin_write_MDMA_S0_IRQ_STATUS(DMA_DONE | DMA_ERR); | ||
832 | |||
833 | bfin_write_MDMA_S0_CONFIG(DMAEN | WDSIZE_32); | ||
834 | bfin_write_MDMA_D0_CONFIG(WNR | DI_EN | DMAEN | WDSIZE_32); | ||
835 | |||
836 | SSYNC(); | ||
837 | |||
838 | while (!(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE)); | ||
839 | |||
840 | bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR); | ||
841 | |||
842 | bfin_write_MDMA_S0_CONFIG(0); | ||
843 | bfin_write_MDMA_D0_CONFIG(0); | ||
844 | local_irq_restore(flags); | ||
845 | |||
846 | } | 398 | } |
847 | EXPORT_SYMBOL(dma_outsl); | ||
848 | 399 | ||
849 | void dma_insl(unsigned long addr, void *buf, unsigned short len) | 400 | static void _dma_in(unsigned long addr, unsigned long buf, unsigned short len, |
401 | u16 size, u16 dma_size) | ||
850 | { | 402 | { |
851 | unsigned long flags; | 403 | blackfin_dcache_invalidate_range(buf, buf + len * size); |
852 | 404 | __dma_memcpy(buf, size, addr, 0, len, dma_size); | |
853 | blackfin_dcache_invalidate_range((unsigned int)buf, | ||
854 | (unsigned int)(buf) + len * sizeof(long)); | ||
855 | |||
856 | local_irq_save(flags); | ||
857 | |||
858 | bfin_write_MDMA_D0_START_ADDR(buf); | ||
859 | bfin_write_MDMA_D0_X_COUNT(len); | ||
860 | bfin_write_MDMA_D0_X_MODIFY(4); | ||
861 | bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR); | ||
862 | |||
863 | bfin_write_MDMA_S0_START_ADDR(addr); | ||
864 | bfin_write_MDMA_S0_X_COUNT(len); | ||
865 | bfin_write_MDMA_S0_X_MODIFY(0); | ||
866 | bfin_write_MDMA_S0_IRQ_STATUS(DMA_DONE | DMA_ERR); | ||
867 | |||
868 | bfin_write_MDMA_S0_CONFIG(DMAEN | WDSIZE_32); | ||
869 | bfin_write_MDMA_D0_CONFIG(WNR | DI_EN | DMAEN | WDSIZE_32); | ||
870 | |||
871 | SSYNC(); | ||
872 | |||
873 | while (!(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE)); | ||
874 | |||
875 | bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR); | ||
876 | |||
877 | bfin_write_MDMA_S0_CONFIG(0); | ||
878 | bfin_write_MDMA_D0_CONFIG(0); | ||
879 | local_irq_restore(flags); | ||
880 | |||
881 | } | 405 | } |
882 | EXPORT_SYMBOL(dma_insl); | 406 | |
407 | #define MAKE_DMA_IO(io, bwl, isize, dmasize, cnst) \ | ||
408 | void dma_##io##s##bwl(unsigned long addr, cnst void *buf, unsigned short len) \ | ||
409 | { \ | ||
410 | _dma_##io(addr, (unsigned long)buf, len, isize, WDSIZE_##dmasize); \ | ||
411 | } \ | ||
412 | EXPORT_SYMBOL(dma_##io##s##bwl) | ||
413 | MAKE_DMA_IO(out, b, 1, 8, const); | ||
414 | MAKE_DMA_IO(in, b, 1, 8, ); | ||
415 | MAKE_DMA_IO(out, w, 2, 16, const); | ||
416 | MAKE_DMA_IO(in, w, 2, 16, ); | ||
417 | MAKE_DMA_IO(out, l, 4, 32, const); | ||
418 | MAKE_DMA_IO(in, l, 4, 32, ); | ||