aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-arm/hardware/iop3xx-adma.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-arm/hardware/iop3xx-adma.h')
-rw-r--r--include/asm-arm/hardware/iop3xx-adma.h892
1 files changed, 892 insertions, 0 deletions
diff --git a/include/asm-arm/hardware/iop3xx-adma.h b/include/asm-arm/hardware/iop3xx-adma.h
new file mode 100644
index 000000000000..10834b54f681
--- /dev/null
+++ b/include/asm-arm/hardware/iop3xx-adma.h
@@ -0,0 +1,892 @@
1/*
2 * Copyright © 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 */
18#ifndef _ADMA_H
19#define _ADMA_H
20#include <linux/types.h>
21#include <linux/io.h>
22#include <asm/hardware.h>
23#include <asm/hardware/iop_adma.h>
24
25/* Memory copy units */
26#define DMA_CCR(chan) (chan->mmr_base + 0x0)
27#define DMA_CSR(chan) (chan->mmr_base + 0x4)
28#define DMA_DAR(chan) (chan->mmr_base + 0xc)
29#define DMA_NDAR(chan) (chan->mmr_base + 0x10)
30#define DMA_PADR(chan) (chan->mmr_base + 0x14)
31#define DMA_PUADR(chan) (chan->mmr_base + 0x18)
32#define DMA_LADR(chan) (chan->mmr_base + 0x1c)
33#define DMA_BCR(chan) (chan->mmr_base + 0x20)
34#define DMA_DCR(chan) (chan->mmr_base + 0x24)
35
36/* Application accelerator unit */
37#define AAU_ACR(chan) (chan->mmr_base + 0x0)
38#define AAU_ASR(chan) (chan->mmr_base + 0x4)
39#define AAU_ADAR(chan) (chan->mmr_base + 0x8)
40#define AAU_ANDAR(chan) (chan->mmr_base + 0xc)
41#define AAU_SAR(src, chan) (chan->mmr_base + (0x10 + ((src) << 2)))
42#define AAU_DAR(chan) (chan->mmr_base + 0x20)
43#define AAU_ABCR(chan) (chan->mmr_base + 0x24)
44#define AAU_ADCR(chan) (chan->mmr_base + 0x28)
45#define AAU_SAR_EDCR(src_edc) (chan->mmr_base + (0x02c + ((src_edc-4) << 2)))
46#define AAU_EDCR0_IDX 8
47#define AAU_EDCR1_IDX 17
48#define AAU_EDCR2_IDX 26
49
50#define DMA0_ID 0
51#define DMA1_ID 1
52#define AAU_ID 2
53
54struct iop3xx_aau_desc_ctrl {
55 unsigned int int_en:1;
56 unsigned int blk1_cmd_ctrl:3;
57 unsigned int blk2_cmd_ctrl:3;
58 unsigned int blk3_cmd_ctrl:3;
59 unsigned int blk4_cmd_ctrl:3;
60 unsigned int blk5_cmd_ctrl:3;
61 unsigned int blk6_cmd_ctrl:3;
62 unsigned int blk7_cmd_ctrl:3;
63 unsigned int blk8_cmd_ctrl:3;
64 unsigned int blk_ctrl:2;
65 unsigned int dual_xor_en:1;
66 unsigned int tx_complete:1;
67 unsigned int zero_result_err:1;
68 unsigned int zero_result_en:1;
69 unsigned int dest_write_en:1;
70};
71
72struct iop3xx_aau_e_desc_ctrl {
73 unsigned int reserved:1;
74 unsigned int blk1_cmd_ctrl:3;
75 unsigned int blk2_cmd_ctrl:3;
76 unsigned int blk3_cmd_ctrl:3;
77 unsigned int blk4_cmd_ctrl:3;
78 unsigned int blk5_cmd_ctrl:3;
79 unsigned int blk6_cmd_ctrl:3;
80 unsigned int blk7_cmd_ctrl:3;
81 unsigned int blk8_cmd_ctrl:3;
82 unsigned int reserved2:7;
83};
84
85struct iop3xx_dma_desc_ctrl {
86 unsigned int pci_transaction:4;
87 unsigned int int_en:1;
88 unsigned int dac_cycle_en:1;
89 unsigned int mem_to_mem_en:1;
90 unsigned int crc_data_tx_en:1;
91 unsigned int crc_gen_en:1;
92 unsigned int crc_seed_dis:1;
93 unsigned int reserved:21;
94 unsigned int crc_tx_complete:1;
95};
96
97struct iop3xx_desc_dma {
98 u32 next_desc;
99 union {
100 u32 pci_src_addr;
101 u32 pci_dest_addr;
102 u32 src_addr;
103 };
104 union {
105 u32 upper_pci_src_addr;
106 u32 upper_pci_dest_addr;
107 };
108 union {
109 u32 local_pci_src_addr;
110 u32 local_pci_dest_addr;
111 u32 dest_addr;
112 };
113 u32 byte_count;
114 union {
115 u32 desc_ctrl;
116 struct iop3xx_dma_desc_ctrl desc_ctrl_field;
117 };
118 u32 crc_addr;
119};
120
121struct iop3xx_desc_aau {
122 u32 next_desc;
123 u32 src[4];
124 u32 dest_addr;
125 u32 byte_count;
126 union {
127 u32 desc_ctrl;
128 struct iop3xx_aau_desc_ctrl desc_ctrl_field;
129 };
130 union {
131 u32 src_addr;
132 u32 e_desc_ctrl;
133 struct iop3xx_aau_e_desc_ctrl e_desc_ctrl_field;
134 } src_edc[31];
135};
136
137struct iop3xx_aau_gfmr {
138 unsigned int gfmr1:8;
139 unsigned int gfmr2:8;
140 unsigned int gfmr3:8;
141 unsigned int gfmr4:8;
142};
143
144struct iop3xx_desc_pq_xor {
145 u32 next_desc;
146 u32 src[3];
147 union {
148 u32 data_mult1;
149 struct iop3xx_aau_gfmr data_mult1_field;
150 };
151 u32 dest_addr;
152 u32 byte_count;
153 union {
154 u32 desc_ctrl;
155 struct iop3xx_aau_desc_ctrl desc_ctrl_field;
156 };
157 union {
158 u32 src_addr;
159 u32 e_desc_ctrl;
160 struct iop3xx_aau_e_desc_ctrl e_desc_ctrl_field;
161 u32 data_multiplier;
162 struct iop3xx_aau_gfmr data_mult_field;
163 u32 reserved;
164 } src_edc_gfmr[19];
165};
166
167struct iop3xx_desc_dual_xor {
168 u32 next_desc;
169 u32 src0_addr;
170 u32 src1_addr;
171 u32 h_src_addr;
172 u32 d_src_addr;
173 u32 h_dest_addr;
174 u32 byte_count;
175 union {
176 u32 desc_ctrl;
177 struct iop3xx_aau_desc_ctrl desc_ctrl_field;
178 };
179 u32 d_dest_addr;
180};
181
182union iop3xx_desc {
183 struct iop3xx_desc_aau *aau;
184 struct iop3xx_desc_dma *dma;
185 struct iop3xx_desc_pq_xor *pq_xor;
186 struct iop3xx_desc_dual_xor *dual_xor;
187 void *ptr;
188};
189
190static inline int iop_adma_get_max_xor(void)
191{
192 return 32;
193}
194
195static inline u32 iop_chan_get_current_descriptor(struct iop_adma_chan *chan)
196{
197 int id = chan->device->id;
198
199 switch (id) {
200 case DMA0_ID:
201 case DMA1_ID:
202 return __raw_readl(DMA_DAR(chan));
203 case AAU_ID:
204 return __raw_readl(AAU_ADAR(chan));
205 default:
206 BUG();
207 }
208 return 0;
209}
210
211static inline void iop_chan_set_next_descriptor(struct iop_adma_chan *chan,
212 u32 next_desc_addr)
213{
214 int id = chan->device->id;
215
216 switch (id) {
217 case DMA0_ID:
218 case DMA1_ID:
219 __raw_writel(next_desc_addr, DMA_NDAR(chan));
220 break;
221 case AAU_ID:
222 __raw_writel(next_desc_addr, AAU_ANDAR(chan));
223 break;
224 }
225
226}
227
228#define IOP_ADMA_STATUS_BUSY (1 << 10)
229#define IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT (1024)
230#define IOP_ADMA_XOR_MAX_BYTE_COUNT (16 * 1024 * 1024)
231#define IOP_ADMA_MAX_BYTE_COUNT (16 * 1024 * 1024)
232
233static inline int iop_chan_is_busy(struct iop_adma_chan *chan)
234{
235 u32 status = __raw_readl(DMA_CSR(chan));
236 return (status & IOP_ADMA_STATUS_BUSY) ? 1 : 0;
237}
238
239static inline int iop_desc_is_aligned(struct iop_adma_desc_slot *desc,
240 int num_slots)
241{
242 /* num_slots will only ever be 1, 2, 4, or 8 */
243 return (desc->idx & (num_slots - 1)) ? 0 : 1;
244}
245
246/* to do: support large (i.e. > hw max) buffer sizes */
247static inline int iop_chan_memcpy_slot_count(size_t len, int *slots_per_op)
248{
249 *slots_per_op = 1;
250 return 1;
251}
252
253/* to do: support large (i.e. > hw max) buffer sizes */
254static inline int iop_chan_memset_slot_count(size_t len, int *slots_per_op)
255{
256 *slots_per_op = 1;
257 return 1;
258}
259
260static inline int iop3xx_aau_xor_slot_count(size_t len, int src_cnt,
261 int *slots_per_op)
262{
263 const static int slot_count_table[] = { 0,
264 1, 1, 1, 1, /* 01 - 04 */
265 2, 2, 2, 2, /* 05 - 08 */
266 4, 4, 4, 4, /* 09 - 12 */
267 4, 4, 4, 4, /* 13 - 16 */
268 8, 8, 8, 8, /* 17 - 20 */
269 8, 8, 8, 8, /* 21 - 24 */
270 8, 8, 8, 8, /* 25 - 28 */
271 8, 8, 8, 8, /* 29 - 32 */
272 };
273 *slots_per_op = slot_count_table[src_cnt];
274 return *slots_per_op;
275}
276
277static inline int
278iop_chan_interrupt_slot_count(int *slots_per_op, struct iop_adma_chan *chan)
279{
280 switch (chan->device->id) {
281 case DMA0_ID:
282 case DMA1_ID:
283 return iop_chan_memcpy_slot_count(0, slots_per_op);
284 case AAU_ID:
285 return iop3xx_aau_xor_slot_count(0, 2, slots_per_op);
286 default:
287 BUG();
288 }
289 return 0;
290}
291
292static inline int iop_chan_xor_slot_count(size_t len, int src_cnt,
293 int *slots_per_op)
294{
295 int slot_cnt = iop3xx_aau_xor_slot_count(len, src_cnt, slots_per_op);
296
297 if (len <= IOP_ADMA_XOR_MAX_BYTE_COUNT)
298 return slot_cnt;
299
300 len -= IOP_ADMA_XOR_MAX_BYTE_COUNT;
301 while (len > IOP_ADMA_XOR_MAX_BYTE_COUNT) {
302 len -= IOP_ADMA_XOR_MAX_BYTE_COUNT;
303 slot_cnt += *slots_per_op;
304 }
305
306 if (len)
307 slot_cnt += *slots_per_op;
308
309 return slot_cnt;
310}
311
312/* zero sum on iop3xx is limited to 1k at a time so it requires multiple
313 * descriptors
314 */
315static inline int iop_chan_zero_sum_slot_count(size_t len, int src_cnt,
316 int *slots_per_op)
317{
318 int slot_cnt = iop3xx_aau_xor_slot_count(len, src_cnt, slots_per_op);
319
320 if (len <= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT)
321 return slot_cnt;
322
323 len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
324 while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
325 len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
326 slot_cnt += *slots_per_op;
327 }
328
329 if (len)
330 slot_cnt += *slots_per_op;
331
332 return slot_cnt;
333}
334
335static inline u32 iop_desc_get_dest_addr(struct iop_adma_desc_slot *desc,
336 struct iop_adma_chan *chan)
337{
338 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
339
340 switch (chan->device->id) {
341 case DMA0_ID:
342 case DMA1_ID:
343 return hw_desc.dma->dest_addr;
344 case AAU_ID:
345 return hw_desc.aau->dest_addr;
346 default:
347 BUG();
348 }
349 return 0;
350}
351
352static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc,
353 struct iop_adma_chan *chan)
354{
355 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
356
357 switch (chan->device->id) {
358 case DMA0_ID:
359 case DMA1_ID:
360 return hw_desc.dma->byte_count;
361 case AAU_ID:
362 return hw_desc.aau->byte_count;
363 default:
364 BUG();
365 }
366 return 0;
367}
368
369/* translate the src_idx to a descriptor word index */
370static inline int __desc_idx(int src_idx)
371{
372 const static int desc_idx_table[] = { 0, 0, 0, 0,
373 0, 1, 2, 3,
374 5, 6, 7, 8,
375 9, 10, 11, 12,
376 14, 15, 16, 17,
377 18, 19, 20, 21,
378 23, 24, 25, 26,
379 27, 28, 29, 30,
380 };
381
382 return desc_idx_table[src_idx];
383}
384
385static inline u32 iop_desc_get_src_addr(struct iop_adma_desc_slot *desc,
386 struct iop_adma_chan *chan,
387 int src_idx)
388{
389 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
390
391 switch (chan->device->id) {
392 case DMA0_ID:
393 case DMA1_ID:
394 return hw_desc.dma->src_addr;
395 case AAU_ID:
396 break;
397 default:
398 BUG();
399 }
400
401 if (src_idx < 4)
402 return hw_desc.aau->src[src_idx];
403 else
404 return hw_desc.aau->src_edc[__desc_idx(src_idx)].src_addr;
405}
406
407static inline void iop3xx_aau_desc_set_src_addr(struct iop3xx_desc_aau *hw_desc,
408 int src_idx, dma_addr_t addr)
409{
410 if (src_idx < 4)
411 hw_desc->src[src_idx] = addr;
412 else
413 hw_desc->src_edc[__desc_idx(src_idx)].src_addr = addr;
414}
415
416static inline void
417iop_desc_init_memcpy(struct iop_adma_desc_slot *desc, int int_en)
418{
419 struct iop3xx_desc_dma *hw_desc = desc->hw_desc;
420 union {
421 u32 value;
422 struct iop3xx_dma_desc_ctrl field;
423 } u_desc_ctrl;
424
425 u_desc_ctrl.value = 0;
426 u_desc_ctrl.field.mem_to_mem_en = 1;
427 u_desc_ctrl.field.pci_transaction = 0xe; /* memory read block */
428 u_desc_ctrl.field.int_en = int_en;
429 hw_desc->desc_ctrl = u_desc_ctrl.value;
430 hw_desc->upper_pci_src_addr = 0;
431 hw_desc->crc_addr = 0;
432}
433
434static inline void
435iop_desc_init_memset(struct iop_adma_desc_slot *desc, int int_en)
436{
437 struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
438 union {
439 u32 value;
440 struct iop3xx_aau_desc_ctrl field;
441 } u_desc_ctrl;
442
443 u_desc_ctrl.value = 0;
444 u_desc_ctrl.field.blk1_cmd_ctrl = 0x2; /* memory block fill */
445 u_desc_ctrl.field.dest_write_en = 1;
446 u_desc_ctrl.field.int_en = int_en;
447 hw_desc->desc_ctrl = u_desc_ctrl.value;
448}
449
450static inline u32
451iop3xx_desc_init_xor(struct iop3xx_desc_aau *hw_desc, int src_cnt, int int_en)
452{
453 int i, shift;
454 u32 edcr;
455 union {
456 u32 value;
457 struct iop3xx_aau_desc_ctrl field;
458 } u_desc_ctrl;
459
460 u_desc_ctrl.value = 0;
461 switch (src_cnt) {
462 case 25 ... 32:
463 u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
464 edcr = 0;
465 shift = 1;
466 for (i = 24; i < src_cnt; i++) {
467 edcr |= (1 << shift);
468 shift += 3;
469 }
470 hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = edcr;
471 src_cnt = 24;
472 /* fall through */
473 case 17 ... 24:
474 if (!u_desc_ctrl.field.blk_ctrl) {
475 hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
476 u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
477 }
478 edcr = 0;
479 shift = 1;
480 for (i = 16; i < src_cnt; i++) {
481 edcr |= (1 << shift);
482 shift += 3;
483 }
484 hw_desc->src_edc[AAU_EDCR1_IDX].e_desc_ctrl = edcr;
485 src_cnt = 16;
486 /* fall through */
487 case 9 ... 16:
488 if (!u_desc_ctrl.field.blk_ctrl)
489 u_desc_ctrl.field.blk_ctrl = 0x2; /* use EDCR0 */
490 edcr = 0;
491 shift = 1;
492 for (i = 8; i < src_cnt; i++) {
493 edcr |= (1 << shift);
494 shift += 3;
495 }
496 hw_desc->src_edc[AAU_EDCR0_IDX].e_desc_ctrl = edcr;
497 src_cnt = 8;
498 /* fall through */
499 case 2 ... 8:
500 shift = 1;
501 for (i = 0; i < src_cnt; i++) {
502 u_desc_ctrl.value |= (1 << shift);
503 shift += 3;
504 }
505
506 if (!u_desc_ctrl.field.blk_ctrl && src_cnt > 4)
507 u_desc_ctrl.field.blk_ctrl = 0x1; /* use mini-desc */
508 }
509
510 u_desc_ctrl.field.dest_write_en = 1;
511 u_desc_ctrl.field.blk1_cmd_ctrl = 0x7; /* direct fill */
512 u_desc_ctrl.field.int_en = int_en;
513 hw_desc->desc_ctrl = u_desc_ctrl.value;
514
515 return u_desc_ctrl.value;
516}
517
518static inline void
519iop_desc_init_xor(struct iop_adma_desc_slot *desc, int src_cnt, int int_en)
520{
521 iop3xx_desc_init_xor(desc->hw_desc, src_cnt, int_en);
522}
523
524/* return the number of operations */
525static inline int
526iop_desc_init_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt, int int_en)
527{
528 int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
529 struct iop3xx_desc_aau *hw_desc, *prev_hw_desc, *iter;
530 union {
531 u32 value;
532 struct iop3xx_aau_desc_ctrl field;
533 } u_desc_ctrl;
534 int i, j;
535
536 hw_desc = desc->hw_desc;
537
538 for (i = 0, j = 0; (slot_cnt -= slots_per_op) >= 0;
539 i += slots_per_op, j++) {
540 iter = iop_hw_desc_slot_idx(hw_desc, i);
541 u_desc_ctrl.value = iop3xx_desc_init_xor(iter, src_cnt, int_en);
542 u_desc_ctrl.field.dest_write_en = 0;
543 u_desc_ctrl.field.zero_result_en = 1;
544 u_desc_ctrl.field.int_en = int_en;
545 iter->desc_ctrl = u_desc_ctrl.value;
546
547 /* for the subsequent descriptors preserve the store queue
548 * and chain them together
549 */
550 if (i) {
551 prev_hw_desc =
552 iop_hw_desc_slot_idx(hw_desc, i - slots_per_op);
553 prev_hw_desc->next_desc =
554 (u32) (desc->async_tx.phys + (i << 5));
555 }
556 }
557
558 return j;
559}
560
561static inline void
562iop_desc_init_null_xor(struct iop_adma_desc_slot *desc, int src_cnt, int int_en)
563{
564 struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
565 union {
566 u32 value;
567 struct iop3xx_aau_desc_ctrl field;
568 } u_desc_ctrl;
569
570 u_desc_ctrl.value = 0;
571 switch (src_cnt) {
572 case 25 ... 32:
573 u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
574 hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
575 /* fall through */
576 case 17 ... 24:
577 if (!u_desc_ctrl.field.blk_ctrl) {
578 hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
579 u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
580 }
581 hw_desc->src_edc[AAU_EDCR1_IDX].e_desc_ctrl = 0;
582 /* fall through */
583 case 9 ... 16:
584 if (!u_desc_ctrl.field.blk_ctrl)
585 u_desc_ctrl.field.blk_ctrl = 0x2; /* use EDCR0 */
586 hw_desc->src_edc[AAU_EDCR0_IDX].e_desc_ctrl = 0;
587 /* fall through */
588 case 1 ... 8:
589 if (!u_desc_ctrl.field.blk_ctrl && src_cnt > 4)
590 u_desc_ctrl.field.blk_ctrl = 0x1; /* use mini-desc */
591 }
592
593 u_desc_ctrl.field.dest_write_en = 0;
594 u_desc_ctrl.field.int_en = int_en;
595 hw_desc->desc_ctrl = u_desc_ctrl.value;
596}
597
598static inline void iop_desc_set_byte_count(struct iop_adma_desc_slot *desc,
599 struct iop_adma_chan *chan,
600 u32 byte_count)
601{
602 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
603
604 switch (chan->device->id) {
605 case DMA0_ID:
606 case DMA1_ID:
607 hw_desc.dma->byte_count = byte_count;
608 break;
609 case AAU_ID:
610 hw_desc.aau->byte_count = byte_count;
611 break;
612 default:
613 BUG();
614 }
615}
616
617static inline void
618iop_desc_init_interrupt(struct iop_adma_desc_slot *desc,
619 struct iop_adma_chan *chan)
620{
621 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
622
623 switch (chan->device->id) {
624 case DMA0_ID:
625 case DMA1_ID:
626 iop_desc_init_memcpy(desc, 1);
627 hw_desc.dma->byte_count = 0;
628 hw_desc.dma->dest_addr = 0;
629 hw_desc.dma->src_addr = 0;
630 break;
631 case AAU_ID:
632 iop_desc_init_null_xor(desc, 2, 1);
633 hw_desc.aau->byte_count = 0;
634 hw_desc.aau->dest_addr = 0;
635 hw_desc.aau->src[0] = 0;
636 hw_desc.aau->src[1] = 0;
637 break;
638 default:
639 BUG();
640 }
641}
642
643static inline void
644iop_desc_set_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len)
645{
646 int slots_per_op = desc->slots_per_op;
647 struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
648 int i = 0;
649
650 if (len <= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
651 hw_desc->byte_count = len;
652 } else {
653 do {
654 iter = iop_hw_desc_slot_idx(hw_desc, i);
655 iter->byte_count = IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
656 len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
657 i += slots_per_op;
658 } while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT);
659
660 if (len) {
661 iter = iop_hw_desc_slot_idx(hw_desc, i);
662 iter->byte_count = len;
663 }
664 }
665}
666
667static inline void iop_desc_set_dest_addr(struct iop_adma_desc_slot *desc,
668 struct iop_adma_chan *chan,
669 dma_addr_t addr)
670{
671 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
672
673 switch (chan->device->id) {
674 case DMA0_ID:
675 case DMA1_ID:
676 hw_desc.dma->dest_addr = addr;
677 break;
678 case AAU_ID:
679 hw_desc.aau->dest_addr = addr;
680 break;
681 default:
682 BUG();
683 }
684}
685
686static inline void iop_desc_set_memcpy_src_addr(struct iop_adma_desc_slot *desc,
687 dma_addr_t addr)
688{
689 struct iop3xx_desc_dma *hw_desc = desc->hw_desc;
690 hw_desc->src_addr = addr;
691}
692
693static inline void
694iop_desc_set_zero_sum_src_addr(struct iop_adma_desc_slot *desc, int src_idx,
695 dma_addr_t addr)
696{
697
698 struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
699 int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
700 int i;
701
702 for (i = 0; (slot_cnt -= slots_per_op) >= 0;
703 i += slots_per_op, addr += IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
704 iter = iop_hw_desc_slot_idx(hw_desc, i);
705 iop3xx_aau_desc_set_src_addr(iter, src_idx, addr);
706 }
707}
708
709static inline void iop_desc_set_xor_src_addr(struct iop_adma_desc_slot *desc,
710 int src_idx, dma_addr_t addr)
711{
712
713 struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
714 int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
715 int i;
716
717 for (i = 0; (slot_cnt -= slots_per_op) >= 0;
718 i += slots_per_op, addr += IOP_ADMA_XOR_MAX_BYTE_COUNT) {
719 iter = iop_hw_desc_slot_idx(hw_desc, i);
720 iop3xx_aau_desc_set_src_addr(iter, src_idx, addr);
721 }
722}
723
724static inline void iop_desc_set_next_desc(struct iop_adma_desc_slot *desc,
725 u32 next_desc_addr)
726{
727 /* hw_desc->next_desc is the same location for all channels */
728 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
729 BUG_ON(hw_desc.dma->next_desc);
730 hw_desc.dma->next_desc = next_desc_addr;
731}
732
733static inline u32 iop_desc_get_next_desc(struct iop_adma_desc_slot *desc)
734{
735 /* hw_desc->next_desc is the same location for all channels */
736 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
737 return hw_desc.dma->next_desc;
738}
739
740static inline void iop_desc_clear_next_desc(struct iop_adma_desc_slot *desc)
741{
742 /* hw_desc->next_desc is the same location for all channels */
743 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
744 hw_desc.dma->next_desc = 0;
745}
746
747static inline void iop_desc_set_block_fill_val(struct iop_adma_desc_slot *desc,
748 u32 val)
749{
750 struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
751 hw_desc->src[0] = val;
752}
753
754static inline int iop_desc_get_zero_result(struct iop_adma_desc_slot *desc)
755{
756 struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
757 struct iop3xx_aau_desc_ctrl desc_ctrl = hw_desc->desc_ctrl_field;
758
759 BUG_ON(!(desc_ctrl.tx_complete && desc_ctrl.zero_result_en));
760 return desc_ctrl.zero_result_err;
761}
762
763static inline void iop_chan_append(struct iop_adma_chan *chan)
764{
765 u32 dma_chan_ctrl;
766 /* workaround dropped interrupts on 3xx */
767 mod_timer(&chan->cleanup_watchdog, jiffies + msecs_to_jiffies(3));
768
769 dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
770 dma_chan_ctrl |= 0x2;
771 __raw_writel(dma_chan_ctrl, DMA_CCR(chan));
772}
773
774static inline void iop_chan_idle(int busy, struct iop_adma_chan *chan)
775{
776 if (!busy)
777 del_timer(&chan->cleanup_watchdog);
778}
779
780static inline u32 iop_chan_get_status(struct iop_adma_chan *chan)
781{
782 return __raw_readl(DMA_CSR(chan));
783}
784
785static inline void iop_chan_disable(struct iop_adma_chan *chan)
786{
787 u32 dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
788 dma_chan_ctrl &= ~1;
789 __raw_writel(dma_chan_ctrl, DMA_CCR(chan));
790}
791
792static inline void iop_chan_enable(struct iop_adma_chan *chan)
793{
794 u32 dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
795
796 dma_chan_ctrl |= 1;
797 __raw_writel(dma_chan_ctrl, DMA_CCR(chan));
798}
799
800static inline void iop_adma_device_clear_eot_status(struct iop_adma_chan *chan)
801{
802 u32 status = __raw_readl(DMA_CSR(chan));
803 status &= (1 << 9);
804 __raw_writel(status, DMA_CSR(chan));
805}
806
807static inline void iop_adma_device_clear_eoc_status(struct iop_adma_chan *chan)
808{
809 u32 status = __raw_readl(DMA_CSR(chan));
810 status &= (1 << 8);
811 __raw_writel(status, DMA_CSR(chan));
812}
813
814static inline void iop_adma_device_clear_err_status(struct iop_adma_chan *chan)
815{
816 u32 status = __raw_readl(DMA_CSR(chan));
817
818 switch (chan->device->id) {
819 case DMA0_ID:
820 case DMA1_ID:
821 status &= (1 << 5) | (1 << 3) | (1 << 2) | (1 << 1);
822 break;
823 case AAU_ID:
824 status &= (1 << 5);
825 break;
826 default:
827 BUG();
828 }
829
830 __raw_writel(status, DMA_CSR(chan));
831}
832
833static inline int
834iop_is_err_int_parity(unsigned long status, struct iop_adma_chan *chan)
835{
836 return 0;
837}
838
839static inline int
840iop_is_err_mcu_abort(unsigned long status, struct iop_adma_chan *chan)
841{
842 return 0;
843}
844
845static inline int
846iop_is_err_int_tabort(unsigned long status, struct iop_adma_chan *chan)
847{
848 return 0;
849}
850
851static inline int
852iop_is_err_int_mabort(unsigned long status, struct iop_adma_chan *chan)
853{
854 return test_bit(5, &status);
855}
856
857static inline int
858iop_is_err_pci_tabort(unsigned long status, struct iop_adma_chan *chan)
859{
860 switch (chan->device->id) {
861 case DMA0_ID:
862 case DMA1_ID:
863 return test_bit(2, &status);
864 default:
865 return 0;
866 }
867}
868
869static inline int
870iop_is_err_pci_mabort(unsigned long status, struct iop_adma_chan *chan)
871{
872 switch (chan->device->id) {
873 case DMA0_ID:
874 case DMA1_ID:
875 return test_bit(3, &status);
876 default:
877 return 0;
878 }
879}
880
881static inline int
882iop_is_err_split_tx(unsigned long status, struct iop_adma_chan *chan)
883{
884 switch (chan->device->id) {
885 case DMA0_ID:
886 case DMA1_ID:
887 return test_bit(1, &status);
888 default:
889 return 0;
890 }
891}
892#endif /* _ADMA_H */