aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-13 13:52:27 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-13 13:52:27 -0400
commite030dbf91a87da7e8be3be3ca781558695bea683 (patch)
tree4ff2e01621a888be4098ca48c404775e56a55a0d /include
parent12a22960549979c10a95cc97f8ec63b461c55692 (diff)
parent3039f0735a280b54c7364fbfe6a9287f7f0b510a (diff)
Merge branch 'ioat-md-accel-for-linus' of git://lost.foo-projects.org/~dwillia2/git/iop
* 'ioat-md-accel-for-linus' of git://lost.foo-projects.org/~dwillia2/git/iop: (28 commits) ioatdma: add the unisys "i/oat" pci vendor/device id ARM: Add drivers/dma to arch/arm/Kconfig iop3xx: surface the iop3xx DMA and AAU units to the iop-adma driver iop13xx: surface the iop13xx adma units to the iop-adma driver dmaengine: driver for the iop32x, iop33x, and iop13xx raid engines md: remove raid5 compute_block and compute_parity5 md: handle_stripe5 - request io processing in raid5_run_ops md: handle_stripe5 - add request/completion logic for async expand ops md: handle_stripe5 - add request/completion logic for async read ops md: handle_stripe5 - add request/completion logic for async check ops md: handle_stripe5 - add request/completion logic for async compute ops md: handle_stripe5 - add request/completion logic for async write ops md: common infrastructure for running operations with raid5_run_ops md: raid5_run_ops - run stripe operations outside sh->lock raid5: replace custom debug PRINTKs with standard pr_debug raid5: refactor handle_stripe5 and handle_stripe6 (v3) async_tx: add the async_tx api xor: make 'xor_blocks' a library routine for use with async_tx dmaengine: make clients responsible for managing channels dmaengine: refactor dmaengine around dma_async_tx_descriptor ...
Diffstat (limited to 'include')
-rw-r--r--include/asm-arm/arch-iop13xx/adma.h544
-rw-r--r--include/asm-arm/arch-iop13xx/iop13xx.h38
-rw-r--r--include/asm-arm/arch-iop32x/adma.h5
-rw-r--r--include/asm-arm/arch-iop33x/adma.h5
-rw-r--r--include/asm-arm/hardware/iop3xx-adma.h892
-rw-r--r--include/asm-arm/hardware/iop3xx.h68
-rw-r--r--include/asm-arm/hardware/iop_adma.h118
-rw-r--r--include/linux/async_tx.h156
-rw-r--r--include/linux/dmaengine.h293
-rw-r--r--include/linux/pci_ids.h3
-rw-r--r--include/linux/raid/raid5.h97
-rw-r--r--include/linux/raid/xor.h5
12 files changed, 2027 insertions, 197 deletions
diff --git a/include/asm-arm/arch-iop13xx/adma.h b/include/asm-arm/arch-iop13xx/adma.h
new file mode 100644
index 000000000000..04006c1c5fd7
--- /dev/null
+++ b/include/asm-arm/arch-iop13xx/adma.h
@@ -0,0 +1,544 @@
1/*
2 * Copyright(c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 */
18#ifndef _ADMA_H
19#define _ADMA_H
20#include <linux/types.h>
21#include <linux/io.h>
22#include <asm/hardware.h>
23#include <asm/hardware/iop_adma.h>
24
25#define ADMA_ACCR(chan) (chan->mmr_base + 0x0)
26#define ADMA_ACSR(chan) (chan->mmr_base + 0x4)
27#define ADMA_ADAR(chan) (chan->mmr_base + 0x8)
28#define ADMA_IIPCR(chan) (chan->mmr_base + 0x18)
29#define ADMA_IIPAR(chan) (chan->mmr_base + 0x1c)
30#define ADMA_IIPUAR(chan) (chan->mmr_base + 0x20)
31#define ADMA_ANDAR(chan) (chan->mmr_base + 0x24)
32#define ADMA_ADCR(chan) (chan->mmr_base + 0x28)
33#define ADMA_CARMD(chan) (chan->mmr_base + 0x2c)
34#define ADMA_ABCR(chan) (chan->mmr_base + 0x30)
35#define ADMA_DLADR(chan) (chan->mmr_base + 0x34)
36#define ADMA_DUADR(chan) (chan->mmr_base + 0x38)
37#define ADMA_SLAR(src, chan) (chan->mmr_base + (0x3c + (src << 3)))
38#define ADMA_SUAR(src, chan) (chan->mmr_base + (0x40 + (src << 3)))
39
40struct iop13xx_adma_src {
41 u32 src_addr;
42 union {
43 u32 upper_src_addr;
44 struct {
45 unsigned int pq_upper_src_addr:24;
46 unsigned int pq_dmlt:8;
47 };
48 };
49};
50
51struct iop13xx_adma_desc_ctrl {
52 unsigned int int_en:1;
53 unsigned int xfer_dir:2;
54 unsigned int src_select:4;
55 unsigned int zero_result:1;
56 unsigned int block_fill_en:1;
57 unsigned int crc_gen_en:1;
58 unsigned int crc_xfer_dis:1;
59 unsigned int crc_seed_fetch_dis:1;
60 unsigned int status_write_back_en:1;
61 unsigned int endian_swap_en:1;
62 unsigned int reserved0:2;
63 unsigned int pq_update_xfer_en:1;
64 unsigned int dual_xor_en:1;
65 unsigned int pq_xfer_en:1;
66 unsigned int p_xfer_dis:1;
67 unsigned int reserved1:10;
68 unsigned int relax_order_en:1;
69 unsigned int no_snoop_en:1;
70};
71
72struct iop13xx_adma_byte_count {
73 unsigned int byte_count:24;
74 unsigned int host_if:3;
75 unsigned int reserved:2;
76 unsigned int zero_result_err_q:1;
77 unsigned int zero_result_err:1;
78 unsigned int tx_complete:1;
79};
80
81struct iop13xx_adma_desc_hw {
82 u32 next_desc;
83 union {
84 u32 desc_ctrl;
85 struct iop13xx_adma_desc_ctrl desc_ctrl_field;
86 };
87 union {
88 u32 crc_addr;
89 u32 block_fill_data;
90 u32 q_dest_addr;
91 };
92 union {
93 u32 byte_count;
94 struct iop13xx_adma_byte_count byte_count_field;
95 };
96 union {
97 u32 dest_addr;
98 u32 p_dest_addr;
99 };
100 union {
101 u32 upper_dest_addr;
102 u32 pq_upper_dest_addr;
103 };
104 struct iop13xx_adma_src src[1];
105};
106
107struct iop13xx_adma_desc_dual_xor {
108 u32 next_desc;
109 u32 desc_ctrl;
110 u32 reserved;
111 u32 byte_count;
112 u32 h_dest_addr;
113 u32 h_upper_dest_addr;
114 u32 src0_addr;
115 u32 upper_src0_addr;
116 u32 src1_addr;
117 u32 upper_src1_addr;
118 u32 h_src_addr;
119 u32 h_upper_src_addr;
120 u32 d_src_addr;
121 u32 d_upper_src_addr;
122 u32 d_dest_addr;
123 u32 d_upper_dest_addr;
124};
125
126struct iop13xx_adma_desc_pq_update {
127 u32 next_desc;
128 u32 desc_ctrl;
129 u32 reserved;
130 u32 byte_count;
131 u32 p_dest_addr;
132 u32 p_upper_dest_addr;
133 u32 src0_addr;
134 u32 upper_src0_addr;
135 u32 src1_addr;
136 u32 upper_src1_addr;
137 u32 p_src_addr;
138 u32 p_upper_src_addr;
139 u32 q_src_addr;
140 struct {
141 unsigned int q_upper_src_addr:24;
142 unsigned int q_dmlt:8;
143 };
144 u32 q_dest_addr;
145 u32 q_upper_dest_addr;
146};
147
148static inline int iop_adma_get_max_xor(void)
149{
150 return 16;
151}
152
153static inline u32 iop_chan_get_current_descriptor(struct iop_adma_chan *chan)
154{
155 return __raw_readl(ADMA_ADAR(chan));
156}
157
158static inline void iop_chan_set_next_descriptor(struct iop_adma_chan *chan,
159 u32 next_desc_addr)
160{
161 __raw_writel(next_desc_addr, ADMA_ANDAR(chan));
162}
163
164#define ADMA_STATUS_BUSY (1 << 13)
165
166static inline char iop_chan_is_busy(struct iop_adma_chan *chan)
167{
168 if (__raw_readl(ADMA_ACSR(chan)) &
169 ADMA_STATUS_BUSY)
170 return 1;
171 else
172 return 0;
173}
174
175static inline int
176iop_chan_get_desc_align(struct iop_adma_chan *chan, int num_slots)
177{
178 return 1;
179}
180#define iop_desc_is_aligned(x, y) 1
181
182static inline int
183iop_chan_memcpy_slot_count(size_t len, int *slots_per_op)
184{
185 *slots_per_op = 1;
186 return 1;
187}
188
189#define iop_chan_interrupt_slot_count(s, c) iop_chan_memcpy_slot_count(0, s)
190
191static inline int
192iop_chan_memset_slot_count(size_t len, int *slots_per_op)
193{
194 *slots_per_op = 1;
195 return 1;
196}
197
198static inline int
199iop_chan_xor_slot_count(size_t len, int src_cnt, int *slots_per_op)
200{
201 int num_slots;
202 /* slots_to_find = 1 for basic descriptor + 1 per 4 sources above 1
203 * (1 source => 8 bytes) (1 slot => 32 bytes)
204 */
205 num_slots = 1 + (((src_cnt - 1) << 3) >> 5);
206 if (((src_cnt - 1) << 3) & 0x1f)
207 num_slots++;
208
209 *slots_per_op = num_slots;
210
211 return num_slots;
212}
213
214#define ADMA_MAX_BYTE_COUNT (16 * 1024 * 1024)
215#define IOP_ADMA_MAX_BYTE_COUNT ADMA_MAX_BYTE_COUNT
216#define IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT ADMA_MAX_BYTE_COUNT
217#define IOP_ADMA_XOR_MAX_BYTE_COUNT ADMA_MAX_BYTE_COUNT
218#define iop_chan_zero_sum_slot_count(l, s, o) iop_chan_xor_slot_count(l, s, o)
219
220static inline u32 iop_desc_get_dest_addr(struct iop_adma_desc_slot *desc,
221 struct iop_adma_chan *chan)
222{
223 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc;
224 return hw_desc->dest_addr;
225}
226
227static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc,
228 struct iop_adma_chan *chan)
229{
230 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc;
231 return hw_desc->byte_count_field.byte_count;
232}
233
234static inline u32 iop_desc_get_src_addr(struct iop_adma_desc_slot *desc,
235 struct iop_adma_chan *chan,
236 int src_idx)
237{
238 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc;
239 return hw_desc->src[src_idx].src_addr;
240}
241
242static inline u32 iop_desc_get_src_count(struct iop_adma_desc_slot *desc,
243 struct iop_adma_chan *chan)
244{
245 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc;
246 return hw_desc->desc_ctrl_field.src_select + 1;
247}
248
249static inline void
250iop_desc_init_memcpy(struct iop_adma_desc_slot *desc, int int_en)
251{
252 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc;
253 union {
254 u32 value;
255 struct iop13xx_adma_desc_ctrl field;
256 } u_desc_ctrl;
257
258 u_desc_ctrl.value = 0;
259 u_desc_ctrl.field.xfer_dir = 3; /* local to internal bus */
260 u_desc_ctrl.field.int_en = int_en;
261 hw_desc->desc_ctrl = u_desc_ctrl.value;
262 hw_desc->crc_addr = 0;
263}
264
265static inline void
266iop_desc_init_memset(struct iop_adma_desc_slot *desc, int int_en)
267{
268 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc;
269 union {
270 u32 value;
271 struct iop13xx_adma_desc_ctrl field;
272 } u_desc_ctrl;
273
274 u_desc_ctrl.value = 0;
275 u_desc_ctrl.field.xfer_dir = 3; /* local to internal bus */
276 u_desc_ctrl.field.block_fill_en = 1;
277 u_desc_ctrl.field.int_en = int_en;
278 hw_desc->desc_ctrl = u_desc_ctrl.value;
279 hw_desc->crc_addr = 0;
280}
281
282/* to do: support buffers larger than ADMA_MAX_BYTE_COUNT */
283static inline void
284iop_desc_init_xor(struct iop_adma_desc_slot *desc, int src_cnt, int int_en)
285{
286 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc;
287 union {
288 u32 value;
289 struct iop13xx_adma_desc_ctrl field;
290 } u_desc_ctrl;
291
292 u_desc_ctrl.value = 0;
293 u_desc_ctrl.field.src_select = src_cnt - 1;
294 u_desc_ctrl.field.xfer_dir = 3; /* local to internal bus */
295 u_desc_ctrl.field.int_en = int_en;
296 hw_desc->desc_ctrl = u_desc_ctrl.value;
297 hw_desc->crc_addr = 0;
298
299}
300#define iop_desc_init_null_xor(d, s, i) iop_desc_init_xor(d, s, i)
301
302/* to do: support buffers larger than ADMA_MAX_BYTE_COUNT */
303static inline int
304iop_desc_init_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt, int int_en)
305{
306 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc;
307 union {
308 u32 value;
309 struct iop13xx_adma_desc_ctrl field;
310 } u_desc_ctrl;
311
312 u_desc_ctrl.value = 0;
313 u_desc_ctrl.field.src_select = src_cnt - 1;
314 u_desc_ctrl.field.xfer_dir = 3; /* local to internal bus */
315 u_desc_ctrl.field.zero_result = 1;
316 u_desc_ctrl.field.status_write_back_en = 1;
317 u_desc_ctrl.field.int_en = int_en;
318 hw_desc->desc_ctrl = u_desc_ctrl.value;
319 hw_desc->crc_addr = 0;
320
321 return 1;
322}
323
324static inline void iop_desc_set_byte_count(struct iop_adma_desc_slot *desc,
325 struct iop_adma_chan *chan,
326 u32 byte_count)
327{
328 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc;
329 hw_desc->byte_count = byte_count;
330}
331
332static inline void
333iop_desc_set_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len)
334{
335 int slots_per_op = desc->slots_per_op;
336 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc, *iter;
337 int i = 0;
338
339 if (len <= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
340 hw_desc->byte_count = len;
341 } else {
342 do {
343 iter = iop_hw_desc_slot_idx(hw_desc, i);
344 iter->byte_count = IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
345 len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
346 i += slots_per_op;
347 } while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT);
348
349 if (len) {
350 iter = iop_hw_desc_slot_idx(hw_desc, i);
351 iter->byte_count = len;
352 }
353 }
354}
355
356
357static inline void iop_desc_set_dest_addr(struct iop_adma_desc_slot *desc,
358 struct iop_adma_chan *chan,
359 dma_addr_t addr)
360{
361 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc;
362 hw_desc->dest_addr = addr;
363 hw_desc->upper_dest_addr = 0;
364}
365
366static inline void iop_desc_set_memcpy_src_addr(struct iop_adma_desc_slot *desc,
367 dma_addr_t addr)
368{
369 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc;
370 hw_desc->src[0].src_addr = addr;
371 hw_desc->src[0].upper_src_addr = 0;
372}
373
374static inline void iop_desc_set_xor_src_addr(struct iop_adma_desc_slot *desc,
375 int src_idx, dma_addr_t addr)
376{
377 int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
378 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc, *iter;
379 int i = 0;
380
381 do {
382 iter = iop_hw_desc_slot_idx(hw_desc, i);
383 iter->src[src_idx].src_addr = addr;
384 iter->src[src_idx].upper_src_addr = 0;
385 slot_cnt -= slots_per_op;
386 if (slot_cnt) {
387 i += slots_per_op;
388 addr += IOP_ADMA_XOR_MAX_BYTE_COUNT;
389 }
390 } while (slot_cnt);
391}
392
393static inline void
394iop_desc_init_interrupt(struct iop_adma_desc_slot *desc,
395 struct iop_adma_chan *chan)
396{
397 iop_desc_init_memcpy(desc, 1);
398 iop_desc_set_byte_count(desc, chan, 0);
399 iop_desc_set_dest_addr(desc, chan, 0);
400 iop_desc_set_memcpy_src_addr(desc, 0);
401}
402
403#define iop_desc_set_zero_sum_src_addr iop_desc_set_xor_src_addr
404
405static inline void iop_desc_set_next_desc(struct iop_adma_desc_slot *desc,
406 u32 next_desc_addr)
407{
408 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc;
409 BUG_ON(hw_desc->next_desc);
410 hw_desc->next_desc = next_desc_addr;
411}
412
413static inline u32 iop_desc_get_next_desc(struct iop_adma_desc_slot *desc)
414{
415 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc;
416 return hw_desc->next_desc;
417}
418
419static inline void iop_desc_clear_next_desc(struct iop_adma_desc_slot *desc)
420{
421 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc;
422 hw_desc->next_desc = 0;
423}
424
425static inline void iop_desc_set_block_fill_val(struct iop_adma_desc_slot *desc,
426 u32 val)
427{
428 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc;
429 hw_desc->block_fill_data = val;
430}
431
432static inline int iop_desc_get_zero_result(struct iop_adma_desc_slot *desc)
433{
434 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc;
435 struct iop13xx_adma_desc_ctrl desc_ctrl = hw_desc->desc_ctrl_field;
436 struct iop13xx_adma_byte_count byte_count = hw_desc->byte_count_field;
437
438 BUG_ON(!(byte_count.tx_complete && desc_ctrl.zero_result));
439
440 if (desc_ctrl.pq_xfer_en)
441 return byte_count.zero_result_err_q;
442 else
443 return byte_count.zero_result_err;
444}
445
446static inline void iop_chan_append(struct iop_adma_chan *chan)
447{
448 u32 adma_accr;
449
450 adma_accr = __raw_readl(ADMA_ACCR(chan));
451 adma_accr |= 0x2;
452 __raw_writel(adma_accr, ADMA_ACCR(chan));
453}
454
455static inline void iop_chan_idle(int busy, struct iop_adma_chan *chan)
456{
457 do { } while (0);
458}
459
460static inline u32 iop_chan_get_status(struct iop_adma_chan *chan)
461{
462 return __raw_readl(ADMA_ACSR(chan));
463}
464
465static inline void iop_chan_disable(struct iop_adma_chan *chan)
466{
467 u32 adma_chan_ctrl = __raw_readl(ADMA_ACCR(chan));
468 adma_chan_ctrl &= ~0x1;
469 __raw_writel(adma_chan_ctrl, ADMA_ACCR(chan));
470}
471
472static inline void iop_chan_enable(struct iop_adma_chan *chan)
473{
474 u32 adma_chan_ctrl;
475
476 adma_chan_ctrl = __raw_readl(ADMA_ACCR(chan));
477 adma_chan_ctrl |= 0x1;
478 __raw_writel(adma_chan_ctrl, ADMA_ACCR(chan));
479}
480
481static inline void iop_adma_device_clear_eot_status(struct iop_adma_chan *chan)
482{
483 u32 status = __raw_readl(ADMA_ACSR(chan));
484 status &= (1 << 12);
485 __raw_writel(status, ADMA_ACSR(chan));
486}
487
488static inline void iop_adma_device_clear_eoc_status(struct iop_adma_chan *chan)
489{
490 u32 status = __raw_readl(ADMA_ACSR(chan));
491 status &= (1 << 11);
492 __raw_writel(status, ADMA_ACSR(chan));
493}
494
495static inline void iop_adma_device_clear_err_status(struct iop_adma_chan *chan)
496{
497 u32 status = __raw_readl(ADMA_ACSR(chan));
498 status &= (1 << 9) | (1 << 5) | (1 << 4) | (1 << 3);
499 __raw_writel(status, ADMA_ACSR(chan));
500}
501
502static inline int
503iop_is_err_int_parity(unsigned long status, struct iop_adma_chan *chan)
504{
505 return test_bit(9, &status);
506}
507
508static inline int
509iop_is_err_mcu_abort(unsigned long status, struct iop_adma_chan *chan)
510{
511 return test_bit(5, &status);
512}
513
514static inline int
515iop_is_err_int_tabort(unsigned long status, struct iop_adma_chan *chan)
516{
517 return test_bit(4, &status);
518}
519
520static inline int
521iop_is_err_int_mabort(unsigned long status, struct iop_adma_chan *chan)
522{
523 return test_bit(3, &status);
524}
525
526static inline int
527iop_is_err_pci_tabort(unsigned long status, struct iop_adma_chan *chan)
528{
529 return 0;
530}
531
532static inline int
533iop_is_err_pci_mabort(unsigned long status, struct iop_adma_chan *chan)
534{
535 return 0;
536}
537
538static inline int
539iop_is_err_split_tx(unsigned long status, struct iop_adma_chan *chan)
540{
541 return 0;
542}
543
544#endif /* _ADMA_H */
diff --git a/include/asm-arm/arch-iop13xx/iop13xx.h b/include/asm-arm/arch-iop13xx/iop13xx.h
index e6736c3d1f7f..d4e4f828577c 100644
--- a/include/asm-arm/arch-iop13xx/iop13xx.h
+++ b/include/asm-arm/arch-iop13xx/iop13xx.h
@@ -166,12 +166,22 @@ static inline int iop13xx_cpu_id(void)
166#define IOP13XX_INIT_I2C_1 (1 << 1) 166#define IOP13XX_INIT_I2C_1 (1 << 1)
167#define IOP13XX_INIT_I2C_2 (1 << 2) 167#define IOP13XX_INIT_I2C_2 (1 << 2)
168 168
169#define IQ81340_NUM_UART 2 169/* ADMA selection flags */
170#define IQ81340_NUM_I2C 3 170/* INIT_ADMA_DEFAULT = Rely on CONFIG_IOP13XX_ADMA* */
171#define IQ81340_NUM_PHYS_MAP_FLASH 1 171#define IOP13XX_INIT_ADMA_DEFAULT (0)
172#define IQ81340_MAX_PLAT_DEVICES (IQ81340_NUM_UART +\ 172#define IOP13XX_INIT_ADMA_0 (1 << 0)
173 IQ81340_NUM_I2C +\ 173#define IOP13XX_INIT_ADMA_1 (1 << 1)
174 IQ81340_NUM_PHYS_MAP_FLASH) 174#define IOP13XX_INIT_ADMA_2 (1 << 2)
175
176/* Platform devices */
177#define IQ81340_NUM_UART 2
178#define IQ81340_NUM_I2C 3
179#define IQ81340_NUM_PHYS_MAP_FLASH 1
180#define IQ81340_NUM_ADMA 3
181#define IQ81340_MAX_PLAT_DEVICES (IQ81340_NUM_UART + \
182 IQ81340_NUM_I2C + \
183 IQ81340_NUM_PHYS_MAP_FLASH + \
184 IQ81340_NUM_ADMA)
175 185
176/*========================== PMMR offsets for key registers ============*/ 186/*========================== PMMR offsets for key registers ============*/
177#define IOP13XX_ATU0_PMMR_OFFSET 0x00048000 187#define IOP13XX_ATU0_PMMR_OFFSET 0x00048000
@@ -444,22 +454,6 @@ static inline int iop13xx_cpu_id(void)
444/*==============================ADMA UNITS===============================*/ 454/*==============================ADMA UNITS===============================*/
445#define IOP13XX_ADMA_PHYS_BASE(chan) IOP13XX_REG_ADDR32_PHYS((chan << 9)) 455#define IOP13XX_ADMA_PHYS_BASE(chan) IOP13XX_REG_ADDR32_PHYS((chan << 9))
446#define IOP13XX_ADMA_UPPER_PA(chan) (IOP13XX_ADMA_PHYS_BASE(chan) + 0xc0) 456#define IOP13XX_ADMA_UPPER_PA(chan) (IOP13XX_ADMA_PHYS_BASE(chan) + 0xc0)
447#define IOP13XX_ADMA_OFFSET(chan, ofs) IOP13XX_REG_ADDR32((chan << 9) + (ofs))
448
449#define IOP13XX_ADMA_ACCR(chan) IOP13XX_ADMA_OFFSET(chan, 0x0)
450#define IOP13XX_ADMA_ACSR(chan) IOP13XX_ADMA_OFFSET(chan, 0x4)
451#define IOP13XX_ADMA_ADAR(chan) IOP13XX_ADMA_OFFSET(chan, 0x8)
452#define IOP13XX_ADMA_IIPCR(chan) IOP13XX_ADMA_OFFSET(chan, 0x18)
453#define IOP13XX_ADMA_IIPAR(chan) IOP13XX_ADMA_OFFSET(chan, 0x1c)
454#define IOP13XX_ADMA_IIPUAR(chan) IOP13XX_ADMA_OFFSET(chan, 0x20)
455#define IOP13XX_ADMA_ANDAR(chan) IOP13XX_ADMA_OFFSET(chan, 0x24)
456#define IOP13XX_ADMA_ADCR(chan) IOP13XX_ADMA_OFFSET(chan, 0x28)
457#define IOP13XX_ADMA_CARMD(chan) IOP13XX_ADMA_OFFSET(chan, 0x2c)
458#define IOP13XX_ADMA_ABCR(chan) IOP13XX_ADMA_OFFSET(chan, 0x30)
459#define IOP13XX_ADMA_DLADR(chan) IOP13XX_ADMA_OFFSET(chan, 0x34)
460#define IOP13XX_ADMA_DUADR(chan) IOP13XX_ADMA_OFFSET(chan, 0x38)
461#define IOP13XX_ADMA_SLAR(src, chan) IOP13XX_ADMA_OFFSET(chan, 0x3c + (src <<3))
462#define IOP13XX_ADMA_SUAR(src, chan) IOP13XX_ADMA_OFFSET(chan, 0x40 + (src <<3))
463 457
464/*==============================XSI BRIDGE===============================*/ 458/*==============================XSI BRIDGE===============================*/
465#define IOP13XX_XBG_BECSR IOP13XX_REG_ADDR32(0x178c) 459#define IOP13XX_XBG_BECSR IOP13XX_REG_ADDR32(0x178c)
diff --git a/include/asm-arm/arch-iop32x/adma.h b/include/asm-arm/arch-iop32x/adma.h
new file mode 100644
index 000000000000..5ed92037dd10
--- /dev/null
+++ b/include/asm-arm/arch-iop32x/adma.h
@@ -0,0 +1,5 @@
1#ifndef IOP32X_ADMA_H
2#define IOP32X_ADMA_H
3#include <asm/hardware/iop3xx-adma.h>
4#endif
5
diff --git a/include/asm-arm/arch-iop33x/adma.h b/include/asm-arm/arch-iop33x/adma.h
new file mode 100644
index 000000000000..4b92f795f90e
--- /dev/null
+++ b/include/asm-arm/arch-iop33x/adma.h
@@ -0,0 +1,5 @@
1#ifndef IOP33X_ADMA_H
2#define IOP33X_ADMA_H
3#include <asm/hardware/iop3xx-adma.h>
4#endif
5
diff --git a/include/asm-arm/hardware/iop3xx-adma.h b/include/asm-arm/hardware/iop3xx-adma.h
new file mode 100644
index 000000000000..10834b54f681
--- /dev/null
+++ b/include/asm-arm/hardware/iop3xx-adma.h
@@ -0,0 +1,892 @@
1/*
2 * Copyright © 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 */
18#ifndef _ADMA_H
19#define _ADMA_H
20#include <linux/types.h>
21#include <linux/io.h>
22#include <asm/hardware.h>
23#include <asm/hardware/iop_adma.h>
24
25/* Memory copy units */
26#define DMA_CCR(chan) (chan->mmr_base + 0x0)
27#define DMA_CSR(chan) (chan->mmr_base + 0x4)
28#define DMA_DAR(chan) (chan->mmr_base + 0xc)
29#define DMA_NDAR(chan) (chan->mmr_base + 0x10)
30#define DMA_PADR(chan) (chan->mmr_base + 0x14)
31#define DMA_PUADR(chan) (chan->mmr_base + 0x18)
32#define DMA_LADR(chan) (chan->mmr_base + 0x1c)
33#define DMA_BCR(chan) (chan->mmr_base + 0x20)
34#define DMA_DCR(chan) (chan->mmr_base + 0x24)
35
36/* Application accelerator unit */
37#define AAU_ACR(chan) (chan->mmr_base + 0x0)
38#define AAU_ASR(chan) (chan->mmr_base + 0x4)
39#define AAU_ADAR(chan) (chan->mmr_base + 0x8)
40#define AAU_ANDAR(chan) (chan->mmr_base + 0xc)
41#define AAU_SAR(src, chan) (chan->mmr_base + (0x10 + ((src) << 2)))
42#define AAU_DAR(chan) (chan->mmr_base + 0x20)
43#define AAU_ABCR(chan) (chan->mmr_base + 0x24)
44#define AAU_ADCR(chan) (chan->mmr_base + 0x28)
45#define AAU_SAR_EDCR(src_edc) (chan->mmr_base + (0x02c + ((src_edc-4) << 2)))
46#define AAU_EDCR0_IDX 8
47#define AAU_EDCR1_IDX 17
48#define AAU_EDCR2_IDX 26
49
50#define DMA0_ID 0
51#define DMA1_ID 1
52#define AAU_ID 2
53
54struct iop3xx_aau_desc_ctrl {
55 unsigned int int_en:1;
56 unsigned int blk1_cmd_ctrl:3;
57 unsigned int blk2_cmd_ctrl:3;
58 unsigned int blk3_cmd_ctrl:3;
59 unsigned int blk4_cmd_ctrl:3;
60 unsigned int blk5_cmd_ctrl:3;
61 unsigned int blk6_cmd_ctrl:3;
62 unsigned int blk7_cmd_ctrl:3;
63 unsigned int blk8_cmd_ctrl:3;
64 unsigned int blk_ctrl:2;
65 unsigned int dual_xor_en:1;
66 unsigned int tx_complete:1;
67 unsigned int zero_result_err:1;
68 unsigned int zero_result_en:1;
69 unsigned int dest_write_en:1;
70};
71
72struct iop3xx_aau_e_desc_ctrl {
73 unsigned int reserved:1;
74 unsigned int blk1_cmd_ctrl:3;
75 unsigned int blk2_cmd_ctrl:3;
76 unsigned int blk3_cmd_ctrl:3;
77 unsigned int blk4_cmd_ctrl:3;
78 unsigned int blk5_cmd_ctrl:3;
79 unsigned int blk6_cmd_ctrl:3;
80 unsigned int blk7_cmd_ctrl:3;
81 unsigned int blk8_cmd_ctrl:3;
82 unsigned int reserved2:7;
83};
84
85struct iop3xx_dma_desc_ctrl {
86 unsigned int pci_transaction:4;
87 unsigned int int_en:1;
88 unsigned int dac_cycle_en:1;
89 unsigned int mem_to_mem_en:1;
90 unsigned int crc_data_tx_en:1;
91 unsigned int crc_gen_en:1;
92 unsigned int crc_seed_dis:1;
93 unsigned int reserved:21;
94 unsigned int crc_tx_complete:1;
95};
96
97struct iop3xx_desc_dma {
98 u32 next_desc;
99 union {
100 u32 pci_src_addr;
101 u32 pci_dest_addr;
102 u32 src_addr;
103 };
104 union {
105 u32 upper_pci_src_addr;
106 u32 upper_pci_dest_addr;
107 };
108 union {
109 u32 local_pci_src_addr;
110 u32 local_pci_dest_addr;
111 u32 dest_addr;
112 };
113 u32 byte_count;
114 union {
115 u32 desc_ctrl;
116 struct iop3xx_dma_desc_ctrl desc_ctrl_field;
117 };
118 u32 crc_addr;
119};
120
121struct iop3xx_desc_aau {
122 u32 next_desc;
123 u32 src[4];
124 u32 dest_addr;
125 u32 byte_count;
126 union {
127 u32 desc_ctrl;
128 struct iop3xx_aau_desc_ctrl desc_ctrl_field;
129 };
130 union {
131 u32 src_addr;
132 u32 e_desc_ctrl;
133 struct iop3xx_aau_e_desc_ctrl e_desc_ctrl_field;
134 } src_edc[31];
135};
136
137struct iop3xx_aau_gfmr {
138 unsigned int gfmr1:8;
139 unsigned int gfmr2:8;
140 unsigned int gfmr3:8;
141 unsigned int gfmr4:8;
142};
143
144struct iop3xx_desc_pq_xor {
145 u32 next_desc;
146 u32 src[3];
147 union {
148 u32 data_mult1;
149 struct iop3xx_aau_gfmr data_mult1_field;
150 };
151 u32 dest_addr;
152 u32 byte_count;
153 union {
154 u32 desc_ctrl;
155 struct iop3xx_aau_desc_ctrl desc_ctrl_field;
156 };
157 union {
158 u32 src_addr;
159 u32 e_desc_ctrl;
160 struct iop3xx_aau_e_desc_ctrl e_desc_ctrl_field;
161 u32 data_multiplier;
162 struct iop3xx_aau_gfmr data_mult_field;
163 u32 reserved;
164 } src_edc_gfmr[19];
165};
166
167struct iop3xx_desc_dual_xor {
168 u32 next_desc;
169 u32 src0_addr;
170 u32 src1_addr;
171 u32 h_src_addr;
172 u32 d_src_addr;
173 u32 h_dest_addr;
174 u32 byte_count;
175 union {
176 u32 desc_ctrl;
177 struct iop3xx_aau_desc_ctrl desc_ctrl_field;
178 };
179 u32 d_dest_addr;
180};
181
182union iop3xx_desc {
183 struct iop3xx_desc_aau *aau;
184 struct iop3xx_desc_dma *dma;
185 struct iop3xx_desc_pq_xor *pq_xor;
186 struct iop3xx_desc_dual_xor *dual_xor;
187 void *ptr;
188};
189
190static inline int iop_adma_get_max_xor(void)
191{
192 return 32;
193}
194
195static inline u32 iop_chan_get_current_descriptor(struct iop_adma_chan *chan)
196{
197 int id = chan->device->id;
198
199 switch (id) {
200 case DMA0_ID:
201 case DMA1_ID:
202 return __raw_readl(DMA_DAR(chan));
203 case AAU_ID:
204 return __raw_readl(AAU_ADAR(chan));
205 default:
206 BUG();
207 }
208 return 0;
209}
210
211static inline void iop_chan_set_next_descriptor(struct iop_adma_chan *chan,
212 u32 next_desc_addr)
213{
214 int id = chan->device->id;
215
216 switch (id) {
217 case DMA0_ID:
218 case DMA1_ID:
219 __raw_writel(next_desc_addr, DMA_NDAR(chan));
220 break;
221 case AAU_ID:
222 __raw_writel(next_desc_addr, AAU_ANDAR(chan));
223 break;
224 }
225
226}
227
228#define IOP_ADMA_STATUS_BUSY (1 << 10)
229#define IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT (1024)
230#define IOP_ADMA_XOR_MAX_BYTE_COUNT (16 * 1024 * 1024)
231#define IOP_ADMA_MAX_BYTE_COUNT (16 * 1024 * 1024)
232
233static inline int iop_chan_is_busy(struct iop_adma_chan *chan)
234{
235 u32 status = __raw_readl(DMA_CSR(chan));
236 return (status & IOP_ADMA_STATUS_BUSY) ? 1 : 0;
237}
238
239static inline int iop_desc_is_aligned(struct iop_adma_desc_slot *desc,
240 int num_slots)
241{
242 /* num_slots will only ever be 1, 2, 4, or 8 */
243 return (desc->idx & (num_slots - 1)) ? 0 : 1;
244}
245
246/* to do: support large (i.e. > hw max) buffer sizes */
247static inline int iop_chan_memcpy_slot_count(size_t len, int *slots_per_op)
248{
249 *slots_per_op = 1;
250 return 1;
251}
252
253/* to do: support large (i.e. > hw max) buffer sizes */
254static inline int iop_chan_memset_slot_count(size_t len, int *slots_per_op)
255{
256 *slots_per_op = 1;
257 return 1;
258}
259
260static inline int iop3xx_aau_xor_slot_count(size_t len, int src_cnt,
261 int *slots_per_op)
262{
263 const static int slot_count_table[] = { 0,
264 1, 1, 1, 1, /* 01 - 04 */
265 2, 2, 2, 2, /* 05 - 08 */
266 4, 4, 4, 4, /* 09 - 12 */
267 4, 4, 4, 4, /* 13 - 16 */
268 8, 8, 8, 8, /* 17 - 20 */
269 8, 8, 8, 8, /* 21 - 24 */
270 8, 8, 8, 8, /* 25 - 28 */
271 8, 8, 8, 8, /* 29 - 32 */
272 };
273 *slots_per_op = slot_count_table[src_cnt];
274 return *slots_per_op;
275}
276
277static inline int
278iop_chan_interrupt_slot_count(int *slots_per_op, struct iop_adma_chan *chan)
279{
280 switch (chan->device->id) {
281 case DMA0_ID:
282 case DMA1_ID:
283 return iop_chan_memcpy_slot_count(0, slots_per_op);
284 case AAU_ID:
285 return iop3xx_aau_xor_slot_count(0, 2, slots_per_op);
286 default:
287 BUG();
288 }
289 return 0;
290}
291
292static inline int iop_chan_xor_slot_count(size_t len, int src_cnt,
293 int *slots_per_op)
294{
295 int slot_cnt = iop3xx_aau_xor_slot_count(len, src_cnt, slots_per_op);
296
297 if (len <= IOP_ADMA_XOR_MAX_BYTE_COUNT)
298 return slot_cnt;
299
300 len -= IOP_ADMA_XOR_MAX_BYTE_COUNT;
301 while (len > IOP_ADMA_XOR_MAX_BYTE_COUNT) {
302 len -= IOP_ADMA_XOR_MAX_BYTE_COUNT;
303 slot_cnt += *slots_per_op;
304 }
305
306 if (len)
307 slot_cnt += *slots_per_op;
308
309 return slot_cnt;
310}
311
312/* zero sum on iop3xx is limited to 1k at a time so it requires multiple
313 * descriptors
314 */
315static inline int iop_chan_zero_sum_slot_count(size_t len, int src_cnt,
316 int *slots_per_op)
317{
318 int slot_cnt = iop3xx_aau_xor_slot_count(len, src_cnt, slots_per_op);
319
320 if (len <= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT)
321 return slot_cnt;
322
323 len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
324 while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
325 len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
326 slot_cnt += *slots_per_op;
327 }
328
329 if (len)
330 slot_cnt += *slots_per_op;
331
332 return slot_cnt;
333}
334
335static inline u32 iop_desc_get_dest_addr(struct iop_adma_desc_slot *desc,
336 struct iop_adma_chan *chan)
337{
338 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
339
340 switch (chan->device->id) {
341 case DMA0_ID:
342 case DMA1_ID:
343 return hw_desc.dma->dest_addr;
344 case AAU_ID:
345 return hw_desc.aau->dest_addr;
346 default:
347 BUG();
348 }
349 return 0;
350}
351
352static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc,
353 struct iop_adma_chan *chan)
354{
355 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
356
357 switch (chan->device->id) {
358 case DMA0_ID:
359 case DMA1_ID:
360 return hw_desc.dma->byte_count;
361 case AAU_ID:
362 return hw_desc.aau->byte_count;
363 default:
364 BUG();
365 }
366 return 0;
367}
368
369/* translate the src_idx to a descriptor word index */
370static inline int __desc_idx(int src_idx)
371{
372 const static int desc_idx_table[] = { 0, 0, 0, 0,
373 0, 1, 2, 3,
374 5, 6, 7, 8,
375 9, 10, 11, 12,
376 14, 15, 16, 17,
377 18, 19, 20, 21,
378 23, 24, 25, 26,
379 27, 28, 29, 30,
380 };
381
382 return desc_idx_table[src_idx];
383}
384
385static inline u32 iop_desc_get_src_addr(struct iop_adma_desc_slot *desc,
386 struct iop_adma_chan *chan,
387 int src_idx)
388{
389 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
390
391 switch (chan->device->id) {
392 case DMA0_ID:
393 case DMA1_ID:
394 return hw_desc.dma->src_addr;
395 case AAU_ID:
396 break;
397 default:
398 BUG();
399 }
400
401 if (src_idx < 4)
402 return hw_desc.aau->src[src_idx];
403 else
404 return hw_desc.aau->src_edc[__desc_idx(src_idx)].src_addr;
405}
406
407static inline void iop3xx_aau_desc_set_src_addr(struct iop3xx_desc_aau *hw_desc,
408 int src_idx, dma_addr_t addr)
409{
410 if (src_idx < 4)
411 hw_desc->src[src_idx] = addr;
412 else
413 hw_desc->src_edc[__desc_idx(src_idx)].src_addr = addr;
414}
415
416static inline void
417iop_desc_init_memcpy(struct iop_adma_desc_slot *desc, int int_en)
418{
419 struct iop3xx_desc_dma *hw_desc = desc->hw_desc;
420 union {
421 u32 value;
422 struct iop3xx_dma_desc_ctrl field;
423 } u_desc_ctrl;
424
425 u_desc_ctrl.value = 0;
426 u_desc_ctrl.field.mem_to_mem_en = 1;
427 u_desc_ctrl.field.pci_transaction = 0xe; /* memory read block */
428 u_desc_ctrl.field.int_en = int_en;
429 hw_desc->desc_ctrl = u_desc_ctrl.value;
430 hw_desc->upper_pci_src_addr = 0;
431 hw_desc->crc_addr = 0;
432}
433
434static inline void
435iop_desc_init_memset(struct iop_adma_desc_slot *desc, int int_en)
436{
437 struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
438 union {
439 u32 value;
440 struct iop3xx_aau_desc_ctrl field;
441 } u_desc_ctrl;
442
443 u_desc_ctrl.value = 0;
444 u_desc_ctrl.field.blk1_cmd_ctrl = 0x2; /* memory block fill */
445 u_desc_ctrl.field.dest_write_en = 1;
446 u_desc_ctrl.field.int_en = int_en;
447 hw_desc->desc_ctrl = u_desc_ctrl.value;
448}
449
450static inline u32
451iop3xx_desc_init_xor(struct iop3xx_desc_aau *hw_desc, int src_cnt, int int_en)
452{
453 int i, shift;
454 u32 edcr;
455 union {
456 u32 value;
457 struct iop3xx_aau_desc_ctrl field;
458 } u_desc_ctrl;
459
460 u_desc_ctrl.value = 0;
461 switch (src_cnt) {
462 case 25 ... 32:
463 u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
464 edcr = 0;
465 shift = 1;
466 for (i = 24; i < src_cnt; i++) {
467 edcr |= (1 << shift);
468 shift += 3;
469 }
470 hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = edcr;
471 src_cnt = 24;
472 /* fall through */
473 case 17 ... 24:
474 if (!u_desc_ctrl.field.blk_ctrl) {
475 hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
476 u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
477 }
478 edcr = 0;
479 shift = 1;
480 for (i = 16; i < src_cnt; i++) {
481 edcr |= (1 << shift);
482 shift += 3;
483 }
484 hw_desc->src_edc[AAU_EDCR1_IDX].e_desc_ctrl = edcr;
485 src_cnt = 16;
486 /* fall through */
487 case 9 ... 16:
488 if (!u_desc_ctrl.field.blk_ctrl)
489 u_desc_ctrl.field.blk_ctrl = 0x2; /* use EDCR0 */
490 edcr = 0;
491 shift = 1;
492 for (i = 8; i < src_cnt; i++) {
493 edcr |= (1 << shift);
494 shift += 3;
495 }
496 hw_desc->src_edc[AAU_EDCR0_IDX].e_desc_ctrl = edcr;
497 src_cnt = 8;
498 /* fall through */
499 case 2 ... 8:
500 shift = 1;
501 for (i = 0; i < src_cnt; i++) {
502 u_desc_ctrl.value |= (1 << shift);
503 shift += 3;
504 }
505
506 if (!u_desc_ctrl.field.blk_ctrl && src_cnt > 4)
507 u_desc_ctrl.field.blk_ctrl = 0x1; /* use mini-desc */
508 }
509
510 u_desc_ctrl.field.dest_write_en = 1;
511 u_desc_ctrl.field.blk1_cmd_ctrl = 0x7; /* direct fill */
512 u_desc_ctrl.field.int_en = int_en;
513 hw_desc->desc_ctrl = u_desc_ctrl.value;
514
515 return u_desc_ctrl.value;
516}
517
518static inline void
519iop_desc_init_xor(struct iop_adma_desc_slot *desc, int src_cnt, int int_en)
520{
521 iop3xx_desc_init_xor(desc->hw_desc, src_cnt, int_en);
522}
523
524/* return the number of operations */
525static inline int
526iop_desc_init_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt, int int_en)
527{
528 int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
529 struct iop3xx_desc_aau *hw_desc, *prev_hw_desc, *iter;
530 union {
531 u32 value;
532 struct iop3xx_aau_desc_ctrl field;
533 } u_desc_ctrl;
534 int i, j;
535
536 hw_desc = desc->hw_desc;
537
538 for (i = 0, j = 0; (slot_cnt -= slots_per_op) >= 0;
539 i += slots_per_op, j++) {
540 iter = iop_hw_desc_slot_idx(hw_desc, i);
541 u_desc_ctrl.value = iop3xx_desc_init_xor(iter, src_cnt, int_en);
542 u_desc_ctrl.field.dest_write_en = 0;
543 u_desc_ctrl.field.zero_result_en = 1;
544 u_desc_ctrl.field.int_en = int_en;
545 iter->desc_ctrl = u_desc_ctrl.value;
546
547 /* for the subsequent descriptors preserve the store queue
548 * and chain them together
549 */
550 if (i) {
551 prev_hw_desc =
552 iop_hw_desc_slot_idx(hw_desc, i - slots_per_op);
553 prev_hw_desc->next_desc =
554 (u32) (desc->async_tx.phys + (i << 5));
555 }
556 }
557
558 return j;
559}
560
561static inline void
562iop_desc_init_null_xor(struct iop_adma_desc_slot *desc, int src_cnt, int int_en)
563{
564 struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
565 union {
566 u32 value;
567 struct iop3xx_aau_desc_ctrl field;
568 } u_desc_ctrl;
569
570 u_desc_ctrl.value = 0;
571 switch (src_cnt) {
572 case 25 ... 32:
573 u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
574 hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
575 /* fall through */
576 case 17 ... 24:
577 if (!u_desc_ctrl.field.blk_ctrl) {
578 hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
579 u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
580 }
581 hw_desc->src_edc[AAU_EDCR1_IDX].e_desc_ctrl = 0;
582 /* fall through */
583 case 9 ... 16:
584 if (!u_desc_ctrl.field.blk_ctrl)
585 u_desc_ctrl.field.blk_ctrl = 0x2; /* use EDCR0 */
586 hw_desc->src_edc[AAU_EDCR0_IDX].e_desc_ctrl = 0;
587 /* fall through */
588 case 1 ... 8:
589 if (!u_desc_ctrl.field.blk_ctrl && src_cnt > 4)
590 u_desc_ctrl.field.blk_ctrl = 0x1; /* use mini-desc */
591 }
592
593 u_desc_ctrl.field.dest_write_en = 0;
594 u_desc_ctrl.field.int_en = int_en;
595 hw_desc->desc_ctrl = u_desc_ctrl.value;
596}
597
598static inline void iop_desc_set_byte_count(struct iop_adma_desc_slot *desc,
599 struct iop_adma_chan *chan,
600 u32 byte_count)
601{
602 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
603
604 switch (chan->device->id) {
605 case DMA0_ID:
606 case DMA1_ID:
607 hw_desc.dma->byte_count = byte_count;
608 break;
609 case AAU_ID:
610 hw_desc.aau->byte_count = byte_count;
611 break;
612 default:
613 BUG();
614 }
615}
616
617static inline void
618iop_desc_init_interrupt(struct iop_adma_desc_slot *desc,
619 struct iop_adma_chan *chan)
620{
621 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
622
623 switch (chan->device->id) {
624 case DMA0_ID:
625 case DMA1_ID:
626 iop_desc_init_memcpy(desc, 1);
627 hw_desc.dma->byte_count = 0;
628 hw_desc.dma->dest_addr = 0;
629 hw_desc.dma->src_addr = 0;
630 break;
631 case AAU_ID:
632 iop_desc_init_null_xor(desc, 2, 1);
633 hw_desc.aau->byte_count = 0;
634 hw_desc.aau->dest_addr = 0;
635 hw_desc.aau->src[0] = 0;
636 hw_desc.aau->src[1] = 0;
637 break;
638 default:
639 BUG();
640 }
641}
642
643static inline void
644iop_desc_set_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len)
645{
646 int slots_per_op = desc->slots_per_op;
647 struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
648 int i = 0;
649
650 if (len <= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
651 hw_desc->byte_count = len;
652 } else {
653 do {
654 iter = iop_hw_desc_slot_idx(hw_desc, i);
655 iter->byte_count = IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
656 len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
657 i += slots_per_op;
658 } while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT);
659
660 if (len) {
661 iter = iop_hw_desc_slot_idx(hw_desc, i);
662 iter->byte_count = len;
663 }
664 }
665}
666
667static inline void iop_desc_set_dest_addr(struct iop_adma_desc_slot *desc,
668 struct iop_adma_chan *chan,
669 dma_addr_t addr)
670{
671 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
672
673 switch (chan->device->id) {
674 case DMA0_ID:
675 case DMA1_ID:
676 hw_desc.dma->dest_addr = addr;
677 break;
678 case AAU_ID:
679 hw_desc.aau->dest_addr = addr;
680 break;
681 default:
682 BUG();
683 }
684}
685
686static inline void iop_desc_set_memcpy_src_addr(struct iop_adma_desc_slot *desc,
687 dma_addr_t addr)
688{
689 struct iop3xx_desc_dma *hw_desc = desc->hw_desc;
690 hw_desc->src_addr = addr;
691}
692
693static inline void
694iop_desc_set_zero_sum_src_addr(struct iop_adma_desc_slot *desc, int src_idx,
695 dma_addr_t addr)
696{
697
698 struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
699 int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
700 int i;
701
702 for (i = 0; (slot_cnt -= slots_per_op) >= 0;
703 i += slots_per_op, addr += IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
704 iter = iop_hw_desc_slot_idx(hw_desc, i);
705 iop3xx_aau_desc_set_src_addr(iter, src_idx, addr);
706 }
707}
708
709static inline void iop_desc_set_xor_src_addr(struct iop_adma_desc_slot *desc,
710 int src_idx, dma_addr_t addr)
711{
712
713 struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
714 int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
715 int i;
716
717 for (i = 0; (slot_cnt -= slots_per_op) >= 0;
718 i += slots_per_op, addr += IOP_ADMA_XOR_MAX_BYTE_COUNT) {
719 iter = iop_hw_desc_slot_idx(hw_desc, i);
720 iop3xx_aau_desc_set_src_addr(iter, src_idx, addr);
721 }
722}
723
724static inline void iop_desc_set_next_desc(struct iop_adma_desc_slot *desc,
725 u32 next_desc_addr)
726{
727 /* hw_desc->next_desc is the same location for all channels */
728 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
729 BUG_ON(hw_desc.dma->next_desc);
730 hw_desc.dma->next_desc = next_desc_addr;
731}
732
733static inline u32 iop_desc_get_next_desc(struct iop_adma_desc_slot *desc)
734{
735 /* hw_desc->next_desc is the same location for all channels */
736 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
737 return hw_desc.dma->next_desc;
738}
739
740static inline void iop_desc_clear_next_desc(struct iop_adma_desc_slot *desc)
741{
742 /* hw_desc->next_desc is the same location for all channels */
743 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
744 hw_desc.dma->next_desc = 0;
745}
746
747static inline void iop_desc_set_block_fill_val(struct iop_adma_desc_slot *desc,
748 u32 val)
749{
750 struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
751 hw_desc->src[0] = val;
752}
753
754static inline int iop_desc_get_zero_result(struct iop_adma_desc_slot *desc)
755{
756 struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
757 struct iop3xx_aau_desc_ctrl desc_ctrl = hw_desc->desc_ctrl_field;
758
759 BUG_ON(!(desc_ctrl.tx_complete && desc_ctrl.zero_result_en));
760 return desc_ctrl.zero_result_err;
761}
762
763static inline void iop_chan_append(struct iop_adma_chan *chan)
764{
765 u32 dma_chan_ctrl;
766 /* workaround dropped interrupts on 3xx */
767 mod_timer(&chan->cleanup_watchdog, jiffies + msecs_to_jiffies(3));
768
769 dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
770 dma_chan_ctrl |= 0x2;
771 __raw_writel(dma_chan_ctrl, DMA_CCR(chan));
772}
773
774static inline void iop_chan_idle(int busy, struct iop_adma_chan *chan)
775{
776 if (!busy)
777 del_timer(&chan->cleanup_watchdog);
778}
779
780static inline u32 iop_chan_get_status(struct iop_adma_chan *chan)
781{
782 return __raw_readl(DMA_CSR(chan));
783}
784
785static inline void iop_chan_disable(struct iop_adma_chan *chan)
786{
787 u32 dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
788 dma_chan_ctrl &= ~1;
789 __raw_writel(dma_chan_ctrl, DMA_CCR(chan));
790}
791
792static inline void iop_chan_enable(struct iop_adma_chan *chan)
793{
794 u32 dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
795
796 dma_chan_ctrl |= 1;
797 __raw_writel(dma_chan_ctrl, DMA_CCR(chan));
798}
799
800static inline void iop_adma_device_clear_eot_status(struct iop_adma_chan *chan)
801{
802 u32 status = __raw_readl(DMA_CSR(chan));
803 status &= (1 << 9);
804 __raw_writel(status, DMA_CSR(chan));
805}
806
807static inline void iop_adma_device_clear_eoc_status(struct iop_adma_chan *chan)
808{
809 u32 status = __raw_readl(DMA_CSR(chan));
810 status &= (1 << 8);
811 __raw_writel(status, DMA_CSR(chan));
812}
813
814static inline void iop_adma_device_clear_err_status(struct iop_adma_chan *chan)
815{
816 u32 status = __raw_readl(DMA_CSR(chan));
817
818 switch (chan->device->id) {
819 case DMA0_ID:
820 case DMA1_ID:
821 status &= (1 << 5) | (1 << 3) | (1 << 2) | (1 << 1);
822 break;
823 case AAU_ID:
824 status &= (1 << 5);
825 break;
826 default:
827 BUG();
828 }
829
830 __raw_writel(status, DMA_CSR(chan));
831}
832
833static inline int
834iop_is_err_int_parity(unsigned long status, struct iop_adma_chan *chan)
835{
836 return 0;
837}
838
839static inline int
840iop_is_err_mcu_abort(unsigned long status, struct iop_adma_chan *chan)
841{
842 return 0;
843}
844
845static inline int
846iop_is_err_int_tabort(unsigned long status, struct iop_adma_chan *chan)
847{
848 return 0;
849}
850
851static inline int
852iop_is_err_int_mabort(unsigned long status, struct iop_adma_chan *chan)
853{
854 return test_bit(5, &status);
855}
856
857static inline int
858iop_is_err_pci_tabort(unsigned long status, struct iop_adma_chan *chan)
859{
860 switch (chan->device->id) {
861 case DMA0_ID:
862 case DMA1_ID:
863 return test_bit(2, &status);
864 default:
865 return 0;
866 }
867}
868
869static inline int
870iop_is_err_pci_mabort(unsigned long status, struct iop_adma_chan *chan)
871{
872 switch (chan->device->id) {
873 case DMA0_ID:
874 case DMA1_ID:
875 return test_bit(3, &status);
876 default:
877 return 0;
878 }
879}
880
881static inline int
882iop_is_err_split_tx(unsigned long status, struct iop_adma_chan *chan)
883{
884 switch (chan->device->id) {
885 case DMA0_ID:
886 case DMA1_ID:
887 return test_bit(1, &status);
888 default:
889 return 0;
890 }
891}
892#endif /* _ADMA_H */
diff --git a/include/asm-arm/hardware/iop3xx.h b/include/asm-arm/hardware/iop3xx.h
index 63feceb7ede5..81ca5d3e2bff 100644
--- a/include/asm-arm/hardware/iop3xx.h
+++ b/include/asm-arm/hardware/iop3xx.h
@@ -144,24 +144,9 @@ extern int init_atu;
144#define IOP3XX_IAR (volatile u32 *)IOP3XX_REG_ADDR(0x0380) 144#define IOP3XX_IAR (volatile u32 *)IOP3XX_REG_ADDR(0x0380)
145 145
146/* DMA Controller */ 146/* DMA Controller */
147#define IOP3XX_DMA0_CCR (volatile u32 *)IOP3XX_REG_ADDR(0x0400) 147#define IOP3XX_DMA_PHYS_BASE(chan) (IOP3XX_PERIPHERAL_PHYS_BASE + \
148#define IOP3XX_DMA0_CSR (volatile u32 *)IOP3XX_REG_ADDR(0x0404) 148 (0x400 + (chan << 6)))
149#define IOP3XX_DMA0_DAR (volatile u32 *)IOP3XX_REG_ADDR(0x040c) 149#define IOP3XX_DMA_UPPER_PA(chan) (IOP3XX_DMA_PHYS_BASE(chan) + 0x27)
150#define IOP3XX_DMA0_NDAR (volatile u32 *)IOP3XX_REG_ADDR(0x0410)
151#define IOP3XX_DMA0_PADR (volatile u32 *)IOP3XX_REG_ADDR(0x0414)
152#define IOP3XX_DMA0_PUADR (volatile u32 *)IOP3XX_REG_ADDR(0x0418)
153#define IOP3XX_DMA0_LADR (volatile u32 *)IOP3XX_REG_ADDR(0x041c)
154#define IOP3XX_DMA0_BCR (volatile u32 *)IOP3XX_REG_ADDR(0x0420)
155#define IOP3XX_DMA0_DCR (volatile u32 *)IOP3XX_REG_ADDR(0x0424)
156#define IOP3XX_DMA1_CCR (volatile u32 *)IOP3XX_REG_ADDR(0x0440)
157#define IOP3XX_DMA1_CSR (volatile u32 *)IOP3XX_REG_ADDR(0x0444)
158#define IOP3XX_DMA1_DAR (volatile u32 *)IOP3XX_REG_ADDR(0x044c)
159#define IOP3XX_DMA1_NDAR (volatile u32 *)IOP3XX_REG_ADDR(0x0450)
160#define IOP3XX_DMA1_PADR (volatile u32 *)IOP3XX_REG_ADDR(0x0454)
161#define IOP3XX_DMA1_PUADR (volatile u32 *)IOP3XX_REG_ADDR(0x0458)
162#define IOP3XX_DMA1_LADR (volatile u32 *)IOP3XX_REG_ADDR(0x045c)
163#define IOP3XX_DMA1_BCR (volatile u32 *)IOP3XX_REG_ADDR(0x0460)
164#define IOP3XX_DMA1_DCR (volatile u32 *)IOP3XX_REG_ADDR(0x0464)
165 150
166/* Peripheral bus interface */ 151/* Peripheral bus interface */
167#define IOP3XX_PBCR (volatile u32 *)IOP3XX_REG_ADDR(0x0680) 152#define IOP3XX_PBCR (volatile u32 *)IOP3XX_REG_ADDR(0x0680)
@@ -210,48 +195,8 @@ extern int init_atu;
210#define IOP_TMR_RATIO_1_1 0x00 195#define IOP_TMR_RATIO_1_1 0x00
211 196
212/* Application accelerator unit */ 197/* Application accelerator unit */
213#define IOP3XX_AAU_ACR (volatile u32 *)IOP3XX_REG_ADDR(0x0800) 198#define IOP3XX_AAU_PHYS_BASE (IOP3XX_PERIPHERAL_PHYS_BASE + 0x800)
214#define IOP3XX_AAU_ASR (volatile u32 *)IOP3XX_REG_ADDR(0x0804) 199#define IOP3XX_AAU_UPPER_PA (IOP3XX_AAU_PHYS_BASE + 0xa7)
215#define IOP3XX_AAU_ADAR (volatile u32 *)IOP3XX_REG_ADDR(0x0808)
216#define IOP3XX_AAU_ANDAR (volatile u32 *)IOP3XX_REG_ADDR(0x080c)
217#define IOP3XX_AAU_SAR1 (volatile u32 *)IOP3XX_REG_ADDR(0x0810)
218#define IOP3XX_AAU_SAR2 (volatile u32 *)IOP3XX_REG_ADDR(0x0814)
219#define IOP3XX_AAU_SAR3 (volatile u32 *)IOP3XX_REG_ADDR(0x0818)
220#define IOP3XX_AAU_SAR4 (volatile u32 *)IOP3XX_REG_ADDR(0x081c)
221#define IOP3XX_AAU_DAR (volatile u32 *)IOP3XX_REG_ADDR(0x0820)
222#define IOP3XX_AAU_ABCR (volatile u32 *)IOP3XX_REG_ADDR(0x0824)
223#define IOP3XX_AAU_ADCR (volatile u32 *)IOP3XX_REG_ADDR(0x0828)
224#define IOP3XX_AAU_SAR5 (volatile u32 *)IOP3XX_REG_ADDR(0x082c)
225#define IOP3XX_AAU_SAR6 (volatile u32 *)IOP3XX_REG_ADDR(0x0830)
226#define IOP3XX_AAU_SAR7 (volatile u32 *)IOP3XX_REG_ADDR(0x0834)
227#define IOP3XX_AAU_SAR8 (volatile u32 *)IOP3XX_REG_ADDR(0x0838)
228#define IOP3XX_AAU_EDCR0 (volatile u32 *)IOP3XX_REG_ADDR(0x083c)
229#define IOP3XX_AAU_SAR9 (volatile u32 *)IOP3XX_REG_ADDR(0x0840)
230#define IOP3XX_AAU_SAR10 (volatile u32 *)IOP3XX_REG_ADDR(0x0844)
231#define IOP3XX_AAU_SAR11 (volatile u32 *)IOP3XX_REG_ADDR(0x0848)
232#define IOP3XX_AAU_SAR12 (volatile u32 *)IOP3XX_REG_ADDR(0x084c)
233#define IOP3XX_AAU_SAR13 (volatile u32 *)IOP3XX_REG_ADDR(0x0850)
234#define IOP3XX_AAU_SAR14 (volatile u32 *)IOP3XX_REG_ADDR(0x0854)
235#define IOP3XX_AAU_SAR15 (volatile u32 *)IOP3XX_REG_ADDR(0x0858)
236#define IOP3XX_AAU_SAR16 (volatile u32 *)IOP3XX_REG_ADDR(0x085c)
237#define IOP3XX_AAU_EDCR1 (volatile u32 *)IOP3XX_REG_ADDR(0x0860)
238#define IOP3XX_AAU_SAR17 (volatile u32 *)IOP3XX_REG_ADDR(0x0864)
239#define IOP3XX_AAU_SAR18 (volatile u32 *)IOP3XX_REG_ADDR(0x0868)
240#define IOP3XX_AAU_SAR19 (volatile u32 *)IOP3XX_REG_ADDR(0x086c)
241#define IOP3XX_AAU_SAR20 (volatile u32 *)IOP3XX_REG_ADDR(0x0870)
242#define IOP3XX_AAU_SAR21 (volatile u32 *)IOP3XX_REG_ADDR(0x0874)
243#define IOP3XX_AAU_SAR22 (volatile u32 *)IOP3XX_REG_ADDR(0x0878)
244#define IOP3XX_AAU_SAR23 (volatile u32 *)IOP3XX_REG_ADDR(0x087c)
245#define IOP3XX_AAU_SAR24 (volatile u32 *)IOP3XX_REG_ADDR(0x0880)
246#define IOP3XX_AAU_EDCR2 (volatile u32 *)IOP3XX_REG_ADDR(0x0884)
247#define IOP3XX_AAU_SAR25 (volatile u32 *)IOP3XX_REG_ADDR(0x0888)
248#define IOP3XX_AAU_SAR26 (volatile u32 *)IOP3XX_REG_ADDR(0x088c)
249#define IOP3XX_AAU_SAR27 (volatile u32 *)IOP3XX_REG_ADDR(0x0890)
250#define IOP3XX_AAU_SAR28 (volatile u32 *)IOP3XX_REG_ADDR(0x0894)
251#define IOP3XX_AAU_SAR29 (volatile u32 *)IOP3XX_REG_ADDR(0x0898)
252#define IOP3XX_AAU_SAR30 (volatile u32 *)IOP3XX_REG_ADDR(0x089c)
253#define IOP3XX_AAU_SAR31 (volatile u32 *)IOP3XX_REG_ADDR(0x08a0)
254#define IOP3XX_AAU_SAR32 (volatile u32 *)IOP3XX_REG_ADDR(0x08a4)
255 200
256/* I2C bus interface unit */ 201/* I2C bus interface unit */
257#define IOP3XX_ICR0 (volatile u32 *)IOP3XX_REG_ADDR(0x1680) 202#define IOP3XX_ICR0 (volatile u32 *)IOP3XX_REG_ADDR(0x1680)
@@ -329,6 +274,9 @@ static inline void write_tisr(u32 val)
329 asm volatile("mcr p6, 0, %0, c6, c1, 0" : : "r" (val)); 274 asm volatile("mcr p6, 0, %0, c6, c1, 0" : : "r" (val));
330} 275}
331 276
277extern struct platform_device iop3xx_dma_0_channel;
278extern struct platform_device iop3xx_dma_1_channel;
279extern struct platform_device iop3xx_aau_channel;
332extern struct platform_device iop3xx_i2c0_device; 280extern struct platform_device iop3xx_i2c0_device;
333extern struct platform_device iop3xx_i2c1_device; 281extern struct platform_device iop3xx_i2c1_device;
334 282
diff --git a/include/asm-arm/hardware/iop_adma.h b/include/asm-arm/hardware/iop_adma.h
new file mode 100644
index 000000000000..ca8e71f44346
--- /dev/null
+++ b/include/asm-arm/hardware/iop_adma.h
@@ -0,0 +1,118 @@
1/*
2 * Copyright © 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 */
18#ifndef IOP_ADMA_H
19#define IOP_ADMA_H
20#include <linux/types.h>
21#include <linux/dmaengine.h>
22#include <linux/interrupt.h>
23
24#define IOP_ADMA_SLOT_SIZE 32
25#define IOP_ADMA_THRESHOLD 4
26
27/**
28 * struct iop_adma_device - internal representation of an ADMA device
29 * @pdev: Platform device
30 * @id: HW ADMA Device selector
31 * @dma_desc_pool: base of DMA descriptor region (DMA address)
32 * @dma_desc_pool_virt: base of DMA descriptor region (CPU address)
33 * @common: embedded struct dma_device
34 */
35struct iop_adma_device {
36 struct platform_device *pdev;
37 int id;
38 dma_addr_t dma_desc_pool;
39 void *dma_desc_pool_virt;
40 struct dma_device common;
41};
42
43/**
44 * struct iop_adma_chan - internal representation of an ADMA device
45 * @pending: allows batching of hardware operations
46 * @completed_cookie: identifier for the most recently completed operation
47 * @lock: serializes enqueue/dequeue operations to the slot pool
48 * @mmr_base: memory mapped register base
49 * @chain: device chain view of the descriptors
50 * @device: parent device
51 * @common: common dmaengine channel object members
52 * @last_used: place holder for allocation to continue from where it left off
53 * @all_slots: complete domain of slots usable by the channel
54 * @cleanup_watchdog: workaround missed interrupts on iop3xx
55 * @slots_allocated: records the actual size of the descriptor slot pool
56 * @irq_tasklet: bottom half where iop_adma_slot_cleanup runs
57 */
58struct iop_adma_chan {
59 int pending;
60 dma_cookie_t completed_cookie;
61 spinlock_t lock; /* protects the descriptor slot pool */
62 void __iomem *mmr_base;
63 struct list_head chain;
64 struct iop_adma_device *device;
65 struct dma_chan common;
66 struct iop_adma_desc_slot *last_used;
67 struct list_head all_slots;
68 struct timer_list cleanup_watchdog;
69 int slots_allocated;
70 struct tasklet_struct irq_tasklet;
71};
72
73/**
74 * struct iop_adma_desc_slot - IOP-ADMA software descriptor
75 * @slot_node: node on the iop_adma_chan.all_slots list
76 * @chain_node: node on the op_adma_chan.chain list
77 * @hw_desc: virtual address of the hardware descriptor chain
78 * @phys: hardware address of the hardware descriptor chain
79 * @group_head: first operation in a transaction
80 * @slot_cnt: total slots used in an transaction (group of operations)
81 * @slots_per_op: number of slots per operation
82 * @idx: pool index
83 * @unmap_src_cnt: number of xor sources
84 * @unmap_len: transaction bytecount
85 * @async_tx: support for the async_tx api
86 * @group_list: list of slots that make up a multi-descriptor transaction
87 * for example transfer lengths larger than the supported hw max
88 * @xor_check_result: result of zero sum
89 * @crc32_result: result crc calculation
90 */
91struct iop_adma_desc_slot {
92 struct list_head slot_node;
93 struct list_head chain_node;
94 void *hw_desc;
95 struct iop_adma_desc_slot *group_head;
96 u16 slot_cnt;
97 u16 slots_per_op;
98 u16 idx;
99 u16 unmap_src_cnt;
100 size_t unmap_len;
101 struct dma_async_tx_descriptor async_tx;
102 union {
103 u32 *xor_check_result;
104 u32 *crc32_result;
105 };
106};
107
108struct iop_adma_platform_data {
109 int hw_id;
110 dma_cap_mask_t cap_mask;
111 size_t pool_size;
112};
113
114#define to_iop_sw_desc(addr_hw_desc) \
115 container_of(addr_hw_desc, struct iop_adma_desc_slot, hw_desc)
116#define iop_hw_desc_slot_idx(hw_desc, idx) \
117 ( (void *) (((unsigned long) hw_desc) + ((idx) << 5)) )
118#endif
diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h
new file mode 100644
index 000000000000..ff1255079fa1
--- /dev/null
+++ b/include/linux/async_tx.h
@@ -0,0 +1,156 @@
1/*
2 * Copyright © 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 */
18#ifndef _ASYNC_TX_H_
19#define _ASYNC_TX_H_
20#include <linux/dmaengine.h>
21#include <linux/spinlock.h>
22#include <linux/interrupt.h>
23
24/**
25 * dma_chan_ref - object used to manage dma channels received from the
26 * dmaengine core.
27 * @chan - the channel being tracked
28 * @node - node for the channel to be placed on async_tx_master_list
29 * @rcu - for list_del_rcu
30 * @count - number of times this channel is listed in the pool
31 * (for channels with multiple capabiities)
32 */
33struct dma_chan_ref {
34 struct dma_chan *chan;
35 struct list_head node;
36 struct rcu_head rcu;
37 atomic_t count;
38};
39
40/**
41 * async_tx_flags - modifiers for the async_* calls
42 * @ASYNC_TX_XOR_ZERO_DST: this flag must be used for xor operations where the
43 * the destination address is not a source. The asynchronous case handles this
44 * implicitly, the synchronous case needs to zero the destination block.
45 * @ASYNC_TX_XOR_DROP_DST: this flag must be used if the destination address is
46 * also one of the source addresses. In the synchronous case the destination
47 * address is an implied source, whereas the asynchronous case it must be listed
48 * as a source. The destination address must be the first address in the source
49 * array.
50 * @ASYNC_TX_ASSUME_COHERENT: skip cache maintenance operations
51 * @ASYNC_TX_ACK: immediately ack the descriptor, precludes setting up a
52 * dependency chain
53 * @ASYNC_TX_DEP_ACK: ack the dependency descriptor. Useful for chaining.
54 * @ASYNC_TX_KMAP_SRC: if the transaction is to be performed synchronously
55 * take an atomic mapping (KM_USER0) on the source page(s)
56 * @ASYNC_TX_KMAP_DST: if the transaction is to be performed synchronously
57 * take an atomic mapping (KM_USER0) on the dest page(s)
58 */
59enum async_tx_flags {
60 ASYNC_TX_XOR_ZERO_DST = (1 << 0),
61 ASYNC_TX_XOR_DROP_DST = (1 << 1),
62 ASYNC_TX_ASSUME_COHERENT = (1 << 2),
63 ASYNC_TX_ACK = (1 << 3),
64 ASYNC_TX_DEP_ACK = (1 << 4),
65 ASYNC_TX_KMAP_SRC = (1 << 5),
66 ASYNC_TX_KMAP_DST = (1 << 6),
67};
68
69#ifdef CONFIG_DMA_ENGINE
70void async_tx_issue_pending_all(void);
71enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
72void async_tx_run_dependencies(struct dma_async_tx_descriptor *tx);
73struct dma_chan *
74async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx,
75 enum dma_transaction_type tx_type);
76#else
77static inline void async_tx_issue_pending_all(void)
78{
79 do { } while (0);
80}
81
82static inline enum dma_status
83dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
84{
85 return DMA_SUCCESS;
86}
87
88static inline void
89async_tx_run_dependencies(struct dma_async_tx_descriptor *tx,
90 struct dma_chan *host_chan)
91{
92 do { } while (0);
93}
94
95static inline struct dma_chan *
96async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx,
97 enum dma_transaction_type tx_type)
98{
99 return NULL;
100}
101#endif
102
103/**
104 * async_tx_sync_epilog - actions to take if an operation is run synchronously
105 * @flags: async_tx flags
106 * @depend_tx: transaction depends on depend_tx
107 * @cb_fn: function to call when the transaction completes
108 * @cb_fn_param: parameter to pass to the callback routine
109 */
110static inline void
111async_tx_sync_epilog(unsigned long flags,
112 struct dma_async_tx_descriptor *depend_tx,
113 dma_async_tx_callback cb_fn, void *cb_fn_param)
114{
115 if (cb_fn)
116 cb_fn(cb_fn_param);
117
118 if (depend_tx && (flags & ASYNC_TX_DEP_ACK))
119 async_tx_ack(depend_tx);
120}
121
122void
123async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
124 enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx,
125 dma_async_tx_callback cb_fn, void *cb_fn_param);
126
127struct dma_async_tx_descriptor *
128async_xor(struct page *dest, struct page **src_list, unsigned int offset,
129 int src_cnt, size_t len, enum async_tx_flags flags,
130 struct dma_async_tx_descriptor *depend_tx,
131 dma_async_tx_callback cb_fn, void *cb_fn_param);
132
133struct dma_async_tx_descriptor *
134async_xor_zero_sum(struct page *dest, struct page **src_list,
135 unsigned int offset, int src_cnt, size_t len,
136 u32 *result, enum async_tx_flags flags,
137 struct dma_async_tx_descriptor *depend_tx,
138 dma_async_tx_callback cb_fn, void *cb_fn_param);
139
140struct dma_async_tx_descriptor *
141async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
142 unsigned int src_offset, size_t len, enum async_tx_flags flags,
143 struct dma_async_tx_descriptor *depend_tx,
144 dma_async_tx_callback cb_fn, void *cb_fn_param);
145
146struct dma_async_tx_descriptor *
147async_memset(struct page *dest, int val, unsigned int offset,
148 size_t len, enum async_tx_flags flags,
149 struct dma_async_tx_descriptor *depend_tx,
150 dma_async_tx_callback cb_fn, void *cb_fn_param);
151
152struct dma_async_tx_descriptor *
153async_trigger_callback(enum async_tx_flags flags,
154 struct dma_async_tx_descriptor *depend_tx,
155 dma_async_tx_callback cb_fn, void *cb_fn_param);
156#endif /* _ASYNC_TX_H_ */
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index c94d8f1d62e5..a3b6035b6c86 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -21,29 +21,40 @@
21#ifndef DMAENGINE_H 21#ifndef DMAENGINE_H
22#define DMAENGINE_H 22#define DMAENGINE_H
23 23
24#ifdef CONFIG_DMA_ENGINE
25
26#include <linux/device.h> 24#include <linux/device.h>
27#include <linux/uio.h> 25#include <linux/uio.h>
28#include <linux/kref.h> 26#include <linux/kref.h>
29#include <linux/completion.h> 27#include <linux/completion.h>
30#include <linux/rcupdate.h> 28#include <linux/rcupdate.h>
29#include <linux/dma-mapping.h>
31 30
32/** 31/**
33 * enum dma_event - resource PNP/power managment events 32 * enum dma_state - resource PNP/power managment state
34 * @DMA_RESOURCE_SUSPEND: DMA device going into low power state 33 * @DMA_RESOURCE_SUSPEND: DMA device going into low power state
35 * @DMA_RESOURCE_RESUME: DMA device returning to full power 34 * @DMA_RESOURCE_RESUME: DMA device returning to full power
36 * @DMA_RESOURCE_ADDED: DMA device added to the system 35 * @DMA_RESOURCE_AVAILABLE: DMA device available to the system
37 * @DMA_RESOURCE_REMOVED: DMA device removed from the system 36 * @DMA_RESOURCE_REMOVED: DMA device removed from the system
38 */ 37 */
39enum dma_event { 38enum dma_state {
40 DMA_RESOURCE_SUSPEND, 39 DMA_RESOURCE_SUSPEND,
41 DMA_RESOURCE_RESUME, 40 DMA_RESOURCE_RESUME,
42 DMA_RESOURCE_ADDED, 41 DMA_RESOURCE_AVAILABLE,
43 DMA_RESOURCE_REMOVED, 42 DMA_RESOURCE_REMOVED,
44}; 43};
45 44
46/** 45/**
46 * enum dma_state_client - state of the channel in the client
47 * @DMA_ACK: client would like to use, or was using this channel
48 * @DMA_DUP: client has already seen this channel, or is not using this channel
49 * @DMA_NAK: client does not want to see any more channels
50 */
51enum dma_state_client {
52 DMA_ACK,
53 DMA_DUP,
54 DMA_NAK,
55};
56
57/**
47 * typedef dma_cookie_t - an opaque DMA cookie 58 * typedef dma_cookie_t - an opaque DMA cookie
48 * 59 *
49 * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code 60 * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code
@@ -65,6 +76,31 @@ enum dma_status {
65}; 76};
66 77
67/** 78/**
79 * enum dma_transaction_type - DMA transaction types/indexes
80 */
81enum dma_transaction_type {
82 DMA_MEMCPY,
83 DMA_XOR,
84 DMA_PQ_XOR,
85 DMA_DUAL_XOR,
86 DMA_PQ_UPDATE,
87 DMA_ZERO_SUM,
88 DMA_PQ_ZERO_SUM,
89 DMA_MEMSET,
90 DMA_MEMCPY_CRC32C,
91 DMA_INTERRUPT,
92};
93
94/* last transaction type for creation of the capabilities mask */
95#define DMA_TX_TYPE_END (DMA_INTERRUPT + 1)
96
97/**
98 * dma_cap_mask_t - capabilities bitmap modeled after cpumask_t.
99 * See linux/cpumask.h
100 */
101typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
102
103/**
68 * struct dma_chan_percpu - the per-CPU part of struct dma_chan 104 * struct dma_chan_percpu - the per-CPU part of struct dma_chan
69 * @refcount: local_t used for open-coded "bigref" counting 105 * @refcount: local_t used for open-coded "bigref" counting
70 * @memcpy_count: transaction counter 106 * @memcpy_count: transaction counter
@@ -80,7 +116,6 @@ struct dma_chan_percpu {
80 116
81/** 117/**
82 * struct dma_chan - devices supply DMA channels, clients use them 118 * struct dma_chan - devices supply DMA channels, clients use them
83 * @client: ptr to the client user of this chan, will be %NULL when unused
84 * @device: ptr to the dma device who supplies this channel, always !%NULL 119 * @device: ptr to the dma device who supplies this channel, always !%NULL
85 * @cookie: last cookie value returned to client 120 * @cookie: last cookie value returned to client
86 * @chan_id: channel ID for sysfs 121 * @chan_id: channel ID for sysfs
@@ -88,12 +123,10 @@ struct dma_chan_percpu {
88 * @refcount: kref, used in "bigref" slow-mode 123 * @refcount: kref, used in "bigref" slow-mode
89 * @slow_ref: indicates that the DMA channel is free 124 * @slow_ref: indicates that the DMA channel is free
90 * @rcu: the DMA channel's RCU head 125 * @rcu: the DMA channel's RCU head
91 * @client_node: used to add this to the client chan list
92 * @device_node: used to add this to the device chan list 126 * @device_node: used to add this to the device chan list
93 * @local: per-cpu pointer to a struct dma_chan_percpu 127 * @local: per-cpu pointer to a struct dma_chan_percpu
94 */ 128 */
95struct dma_chan { 129struct dma_chan {
96 struct dma_client *client;
97 struct dma_device *device; 130 struct dma_device *device;
98 dma_cookie_t cookie; 131 dma_cookie_t cookie;
99 132
@@ -105,11 +138,11 @@ struct dma_chan {
105 int slow_ref; 138 int slow_ref;
106 struct rcu_head rcu; 139 struct rcu_head rcu;
107 140
108 struct list_head client_node;
109 struct list_head device_node; 141 struct list_head device_node;
110 struct dma_chan_percpu *local; 142 struct dma_chan_percpu *local;
111}; 143};
112 144
145
113void dma_chan_cleanup(struct kref *kref); 146void dma_chan_cleanup(struct kref *kref);
114 147
115static inline void dma_chan_get(struct dma_chan *chan) 148static inline void dma_chan_get(struct dma_chan *chan)
@@ -134,169 +167,206 @@ static inline void dma_chan_put(struct dma_chan *chan)
134 167
135/* 168/*
136 * typedef dma_event_callback - function pointer to a DMA event callback 169 * typedef dma_event_callback - function pointer to a DMA event callback
170 * For each channel added to the system this routine is called for each client.
171 * If the client would like to use the channel it returns '1' to signal (ack)
172 * the dmaengine core to take out a reference on the channel and its
173 * corresponding device. A client must not 'ack' an available channel more
174 * than once. When a channel is removed all clients are notified. If a client
175 * is using the channel it must 'ack' the removal. A client must not 'ack' a
176 * removed channel more than once.
177 * @client - 'this' pointer for the client context
178 * @chan - channel to be acted upon
179 * @state - available or removed
137 */ 180 */
138typedef void (*dma_event_callback) (struct dma_client *client, 181struct dma_client;
139 struct dma_chan *chan, enum dma_event event); 182typedef enum dma_state_client (*dma_event_callback) (struct dma_client *client,
183 struct dma_chan *chan, enum dma_state state);
140 184
141/** 185/**
142 * struct dma_client - info on the entity making use of DMA services 186 * struct dma_client - info on the entity making use of DMA services
143 * @event_callback: func ptr to call when something happens 187 * @event_callback: func ptr to call when something happens
144 * @chan_count: number of chans allocated 188 * @cap_mask: only return channels that satisfy the requested capabilities
145 * @chans_desired: number of chans requested. Can be +/- chan_count 189 * a value of zero corresponds to any capability
146 * @lock: protects access to the channels list
147 * @channels: the list of DMA channels allocated
148 * @global_node: list_head for global dma_client_list 190 * @global_node: list_head for global dma_client_list
149 */ 191 */
150struct dma_client { 192struct dma_client {
151 dma_event_callback event_callback; 193 dma_event_callback event_callback;
152 unsigned int chan_count; 194 dma_cap_mask_t cap_mask;
153 unsigned int chans_desired;
154
155 spinlock_t lock;
156 struct list_head channels;
157 struct list_head global_node; 195 struct list_head global_node;
158}; 196};
159 197
198typedef void (*dma_async_tx_callback)(void *dma_async_param);
199/**
200 * struct dma_async_tx_descriptor - async transaction descriptor
201 * ---dma generic offload fields---
202 * @cookie: tracking cookie for this transaction, set to -EBUSY if
203 * this tx is sitting on a dependency list
204 * @ack: the descriptor can not be reused until the client acknowledges
205 * receipt, i.e. has has a chance to establish any dependency chains
206 * @phys: physical address of the descriptor
207 * @tx_list: driver common field for operations that require multiple
208 * descriptors
209 * @chan: target channel for this operation
210 * @tx_submit: set the prepared descriptor(s) to be executed by the engine
211 * @tx_set_dest: set a destination address in a hardware descriptor
212 * @tx_set_src: set a source address in a hardware descriptor
213 * @callback: routine to call after this operation is complete
214 * @callback_param: general parameter to pass to the callback routine
215 * ---async_tx api specific fields---
216 * @depend_list: at completion this list of transactions are submitted
217 * @depend_node: allow this transaction to be executed after another
218 * transaction has completed, possibly on another channel
219 * @parent: pointer to the next level up in the dependency chain
220 * @lock: protect the dependency list
221 */
222struct dma_async_tx_descriptor {
223 dma_cookie_t cookie;
224 int ack;
225 dma_addr_t phys;
226 struct list_head tx_list;
227 struct dma_chan *chan;
228 dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
229 void (*tx_set_dest)(dma_addr_t addr,
230 struct dma_async_tx_descriptor *tx, int index);
231 void (*tx_set_src)(dma_addr_t addr,
232 struct dma_async_tx_descriptor *tx, int index);
233 dma_async_tx_callback callback;
234 void *callback_param;
235 struct list_head depend_list;
236 struct list_head depend_node;
237 struct dma_async_tx_descriptor *parent;
238 spinlock_t lock;
239};
240
160/** 241/**
161 * struct dma_device - info on the entity supplying DMA services 242 * struct dma_device - info on the entity supplying DMA services
162 * @chancnt: how many DMA channels are supported 243 * @chancnt: how many DMA channels are supported
163 * @channels: the list of struct dma_chan 244 * @channels: the list of struct dma_chan
164 * @global_node: list_head for global dma_device_list 245 * @global_node: list_head for global dma_device_list
246 * @cap_mask: one or more dma_capability flags
247 * @max_xor: maximum number of xor sources, 0 if no capability
165 * @refcount: reference count 248 * @refcount: reference count
166 * @done: IO completion struct 249 * @done: IO completion struct
167 * @dev_id: unique device ID 250 * @dev_id: unique device ID
251 * @dev: struct device reference for dma mapping api
168 * @device_alloc_chan_resources: allocate resources and return the 252 * @device_alloc_chan_resources: allocate resources and return the
169 * number of allocated descriptors 253 * number of allocated descriptors
170 * @device_free_chan_resources: release DMA channel's resources 254 * @device_free_chan_resources: release DMA channel's resources
171 * @device_memcpy_buf_to_buf: memcpy buf pointer to buf pointer 255 * @device_prep_dma_memcpy: prepares a memcpy operation
172 * @device_memcpy_buf_to_pg: memcpy buf pointer to struct page 256 * @device_prep_dma_xor: prepares a xor operation
173 * @device_memcpy_pg_to_pg: memcpy struct page/offset to struct page/offset 257 * @device_prep_dma_zero_sum: prepares a zero_sum operation
174 * @device_memcpy_complete: poll the status of an IOAT DMA transaction 258 * @device_prep_dma_memset: prepares a memset operation
175 * @device_memcpy_issue_pending: push appended descriptors to hardware 259 * @device_prep_dma_interrupt: prepares an end of chain interrupt operation
260 * @device_dependency_added: async_tx notifies the channel about new deps
261 * @device_issue_pending: push pending transactions to hardware
176 */ 262 */
177struct dma_device { 263struct dma_device {
178 264
179 unsigned int chancnt; 265 unsigned int chancnt;
180 struct list_head channels; 266 struct list_head channels;
181 struct list_head global_node; 267 struct list_head global_node;
268 dma_cap_mask_t cap_mask;
269 int max_xor;
182 270
183 struct kref refcount; 271 struct kref refcount;
184 struct completion done; 272 struct completion done;
185 273
186 int dev_id; 274 int dev_id;
275 struct device *dev;
187 276
188 int (*device_alloc_chan_resources)(struct dma_chan *chan); 277 int (*device_alloc_chan_resources)(struct dma_chan *chan);
189 void (*device_free_chan_resources)(struct dma_chan *chan); 278 void (*device_free_chan_resources)(struct dma_chan *chan);
190 dma_cookie_t (*device_memcpy_buf_to_buf)(struct dma_chan *chan, 279
191 void *dest, void *src, size_t len); 280 struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
192 dma_cookie_t (*device_memcpy_buf_to_pg)(struct dma_chan *chan, 281 struct dma_chan *chan, size_t len, int int_en);
193 struct page *page, unsigned int offset, void *kdata, 282 struct dma_async_tx_descriptor *(*device_prep_dma_xor)(
194 size_t len); 283 struct dma_chan *chan, unsigned int src_cnt, size_t len,
195 dma_cookie_t (*device_memcpy_pg_to_pg)(struct dma_chan *chan, 284 int int_en);
196 struct page *dest_pg, unsigned int dest_off, 285 struct dma_async_tx_descriptor *(*device_prep_dma_zero_sum)(
197 struct page *src_pg, unsigned int src_off, size_t len); 286 struct dma_chan *chan, unsigned int src_cnt, size_t len,
198 enum dma_status (*device_memcpy_complete)(struct dma_chan *chan, 287 u32 *result, int int_en);
288 struct dma_async_tx_descriptor *(*device_prep_dma_memset)(
289 struct dma_chan *chan, int value, size_t len, int int_en);
290 struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
291 struct dma_chan *chan);
292
293 void (*device_dependency_added)(struct dma_chan *chan);
294 enum dma_status (*device_is_tx_complete)(struct dma_chan *chan,
199 dma_cookie_t cookie, dma_cookie_t *last, 295 dma_cookie_t cookie, dma_cookie_t *last,
200 dma_cookie_t *used); 296 dma_cookie_t *used);
201 void (*device_memcpy_issue_pending)(struct dma_chan *chan); 297 void (*device_issue_pending)(struct dma_chan *chan);
202}; 298};
203 299
204/* --- public DMA engine API --- */ 300/* --- public DMA engine API --- */
205 301
206struct dma_client *dma_async_client_register(dma_event_callback event_callback); 302void dma_async_client_register(struct dma_client *client);
207void dma_async_client_unregister(struct dma_client *client); 303void dma_async_client_unregister(struct dma_client *client);
208void dma_async_client_chan_request(struct dma_client *client, 304void dma_async_client_chan_request(struct dma_client *client);
209 unsigned int number); 305dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
306 void *dest, void *src, size_t len);
307dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,
308 struct page *page, unsigned int offset, void *kdata, size_t len);
309dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan,
310 struct page *dest_pg, unsigned int dest_off, struct page *src_pg,
311 unsigned int src_off, size_t len);
312void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
313 struct dma_chan *chan);
210 314
211/** 315static inline void
212 * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses 316async_tx_ack(struct dma_async_tx_descriptor *tx)
213 * @chan: DMA channel to offload copy to
214 * @dest: destination address (virtual)
215 * @src: source address (virtual)
216 * @len: length
217 *
218 * Both @dest and @src must be mappable to a bus address according to the
219 * DMA mapping API rules for streaming mappings.
220 * Both @dest and @src must stay memory resident (kernel memory or locked
221 * user space pages).
222 */
223static inline dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
224 void *dest, void *src, size_t len)
225{ 317{
226 int cpu = get_cpu(); 318 tx->ack = 1;
227 per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
228 per_cpu_ptr(chan->local, cpu)->memcpy_count++;
229 put_cpu();
230
231 return chan->device->device_memcpy_buf_to_buf(chan, dest, src, len);
232} 319}
233 320
234/** 321#define first_dma_cap(mask) __first_dma_cap(&(mask))
235 * dma_async_memcpy_buf_to_pg - offloaded copy from address to page 322static inline int __first_dma_cap(const dma_cap_mask_t *srcp)
236 * @chan: DMA channel to offload copy to
237 * @page: destination page
238 * @offset: offset in page to copy to
239 * @kdata: source address (virtual)
240 * @len: length
241 *
242 * Both @page/@offset and @kdata must be mappable to a bus address according
243 * to the DMA mapping API rules for streaming mappings.
244 * Both @page/@offset and @kdata must stay memory resident (kernel memory or
245 * locked user space pages)
246 */
247static inline dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,
248 struct page *page, unsigned int offset, void *kdata, size_t len)
249{ 323{
250 int cpu = get_cpu(); 324 return min_t(int, DMA_TX_TYPE_END,
251 per_cpu_ptr(chan->local, cpu)->bytes_transferred += len; 325 find_first_bit(srcp->bits, DMA_TX_TYPE_END));
252 per_cpu_ptr(chan->local, cpu)->memcpy_count++; 326}
253 put_cpu();
254 327
255 return chan->device->device_memcpy_buf_to_pg(chan, page, offset, 328#define next_dma_cap(n, mask) __next_dma_cap((n), &(mask))
256 kdata, len); 329static inline int __next_dma_cap(int n, const dma_cap_mask_t *srcp)
330{
331 return min_t(int, DMA_TX_TYPE_END,
332 find_next_bit(srcp->bits, DMA_TX_TYPE_END, n+1));
257} 333}
258 334
259/** 335#define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask))
260 * dma_async_memcpy_pg_to_pg - offloaded copy from page to page 336static inline void
261 * @chan: DMA channel to offload copy to 337__dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
262 * @dest_pg: destination page
263 * @dest_off: offset in page to copy to
264 * @src_pg: source page
265 * @src_off: offset in page to copy from
266 * @len: length
267 *
268 * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus
269 * address according to the DMA mapping API rules for streaming mappings.
270 * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
271 * (kernel memory or locked user space pages).
272 */
273static inline dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan,
274 struct page *dest_pg, unsigned int dest_off, struct page *src_pg,
275 unsigned int src_off, size_t len)
276{ 338{
277 int cpu = get_cpu(); 339 set_bit(tx_type, dstp->bits);
278 per_cpu_ptr(chan->local, cpu)->bytes_transferred += len; 340}
279 per_cpu_ptr(chan->local, cpu)->memcpy_count++;
280 put_cpu();
281 341
282 return chan->device->device_memcpy_pg_to_pg(chan, dest_pg, dest_off, 342#define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask))
283 src_pg, src_off, len); 343static inline int
344__dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp)
345{
346 return test_bit(tx_type, srcp->bits);
284} 347}
285 348
349#define for_each_dma_cap_mask(cap, mask) \
350 for ((cap) = first_dma_cap(mask); \
351 (cap) < DMA_TX_TYPE_END; \
352 (cap) = next_dma_cap((cap), (mask)))
353
286/** 354/**
287 * dma_async_memcpy_issue_pending - flush pending copies to HW 355 * dma_async_issue_pending - flush pending transactions to HW
288 * @chan: target DMA channel 356 * @chan: target DMA channel
289 * 357 *
290 * This allows drivers to push copies to HW in batches, 358 * This allows drivers to push copies to HW in batches,
291 * reducing MMIO writes where possible. 359 * reducing MMIO writes where possible.
292 */ 360 */
293static inline void dma_async_memcpy_issue_pending(struct dma_chan *chan) 361static inline void dma_async_issue_pending(struct dma_chan *chan)
294{ 362{
295 return chan->device->device_memcpy_issue_pending(chan); 363 return chan->device->device_issue_pending(chan);
296} 364}
297 365
366#define dma_async_memcpy_issue_pending(chan) dma_async_issue_pending(chan)
367
298/** 368/**
299 * dma_async_memcpy_complete - poll for transaction completion 369 * dma_async_is_tx_complete - poll for transaction completion
300 * @chan: DMA channel 370 * @chan: DMA channel
301 * @cookie: transaction identifier to check status of 371 * @cookie: transaction identifier to check status of
302 * @last: returns last completed cookie, can be NULL 372 * @last: returns last completed cookie, can be NULL
@@ -306,12 +376,15 @@ static inline void dma_async_memcpy_issue_pending(struct dma_chan *chan)
306 * internal state and can be used with dma_async_is_complete() to check 376 * internal state and can be used with dma_async_is_complete() to check
307 * the status of multiple cookies without re-checking hardware state. 377 * the status of multiple cookies without re-checking hardware state.
308 */ 378 */
309static inline enum dma_status dma_async_memcpy_complete(struct dma_chan *chan, 379static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan,
310 dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used) 380 dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used)
311{ 381{
312 return chan->device->device_memcpy_complete(chan, cookie, last, used); 382 return chan->device->device_is_tx_complete(chan, cookie, last, used);
313} 383}
314 384
385#define dma_async_memcpy_complete(chan, cookie, last, used)\
386 dma_async_is_tx_complete(chan, cookie, last, used)
387
315/** 388/**
316 * dma_async_is_complete - test a cookie against chan state 389 * dma_async_is_complete - test a cookie against chan state
317 * @cookie: transaction identifier to test status of 390 * @cookie: transaction identifier to test status of
@@ -334,6 +407,7 @@ static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
334 return DMA_IN_PROGRESS; 407 return DMA_IN_PROGRESS;
335} 408}
336 409
410enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
337 411
338/* --- DMA device --- */ 412/* --- DMA device --- */
339 413
@@ -362,5 +436,4 @@ dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
362 struct dma_pinned_list *pinned_list, struct page *page, 436 struct dma_pinned_list *pinned_list, struct page *page,
363 unsigned int offset, size_t len); 437 unsigned int offset, size_t len);
364 438
365#endif /* CONFIG_DMA_ENGINE */
366#endif /* DMAENGINE_H */ 439#endif /* DMAENGINE_H */
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 9366182fffa7..2c7add169539 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -479,6 +479,9 @@
479#define PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM_PCIE 0x0361 479#define PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM_PCIE 0x0361
480#define PCI_DEVICE_ID_IBM_ICOM_FOUR_PORT_MODEL 0x252 480#define PCI_DEVICE_ID_IBM_ICOM_FOUR_PORT_MODEL 0x252
481 481
482#define PCI_VENDOR_ID_UNISYS 0x1018
483#define PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR 0x001C
484
482#define PCI_VENDOR_ID_COMPEX2 0x101a /* pci.ids says "AT&T GIS (NCR)" */ 485#define PCI_VENDOR_ID_COMPEX2 0x101a /* pci.ids says "AT&T GIS (NCR)" */
483#define PCI_DEVICE_ID_COMPEX2_100VG 0x0005 486#define PCI_DEVICE_ID_COMPEX2_100VG 0x0005
484 487
diff --git a/include/linux/raid/raid5.h b/include/linux/raid/raid5.h
index d8286db60b96..93678f57ccbe 100644
--- a/include/linux/raid/raid5.h
+++ b/include/linux/raid/raid5.h
@@ -116,13 +116,46 @@
116 * attach a request to an active stripe (add_stripe_bh()) 116 * attach a request to an active stripe (add_stripe_bh())
117 * lockdev attach-buffer unlockdev 117 * lockdev attach-buffer unlockdev
118 * handle a stripe (handle_stripe()) 118 * handle a stripe (handle_stripe())
119 * lockstripe clrSTRIPE_HANDLE ... (lockdev check-buffers unlockdev) .. change-state .. record io needed unlockstripe schedule io 119 * lockstripe clrSTRIPE_HANDLE ...
120 * (lockdev check-buffers unlockdev) ..
121 * change-state ..
122 * record io/ops needed unlockstripe schedule io/ops
120 * release an active stripe (release_stripe()) 123 * release an active stripe (release_stripe())
121 * lockdev if (!--cnt) { if STRIPE_HANDLE, add to handle_list else add to inactive-list } unlockdev 124 * lockdev if (!--cnt) { if STRIPE_HANDLE, add to handle_list else add to inactive-list } unlockdev
122 * 125 *
123 * The refcount counts each thread that have activated the stripe, 126 * The refcount counts each thread that have activated the stripe,
124 * plus raid5d if it is handling it, plus one for each active request 127 * plus raid5d if it is handling it, plus one for each active request
125 * on a cached buffer. 128 * on a cached buffer, and plus one if the stripe is undergoing stripe
129 * operations.
130 *
131 * Stripe operations are performed outside the stripe lock,
132 * the stripe operations are:
133 * -copying data between the stripe cache and user application buffers
134 * -computing blocks to save a disk access, or to recover a missing block
135 * -updating the parity on a write operation (reconstruct write and
136 * read-modify-write)
137 * -checking parity correctness
138 * -running i/o to disk
139 * These operations are carried out by raid5_run_ops which uses the async_tx
140 * api to (optionally) offload operations to dedicated hardware engines.
141 * When requesting an operation handle_stripe sets the pending bit for the
142 * operation and increments the count. raid5_run_ops is then run whenever
143 * the count is non-zero.
144 * There are some critical dependencies between the operations that prevent some
145 * from being requested while another is in flight.
146 * 1/ Parity check operations destroy the in cache version of the parity block,
147 * so we prevent parity dependent operations like writes and compute_blocks
148 * from starting while a check is in progress. Some dma engines can perform
149 * the check without damaging the parity block, in these cases the parity
150 * block is re-marked up to date (assuming the check was successful) and is
151 * not re-read from disk.
152 * 2/ When a write operation is requested we immediately lock the affected
153 * blocks, and mark them as not up to date. This causes new read requests
154 * to be held off, as well as parity checks and compute block operations.
155 * 3/ Once a compute block operation has been requested handle_stripe treats
156 * that block as if it is up to date. raid5_run_ops guaruntees that any
157 * operation that is dependent on the compute block result is initiated after
158 * the compute block completes.
126 */ 159 */
127 160
128struct stripe_head { 161struct stripe_head {
@@ -136,15 +169,46 @@ struct stripe_head {
136 spinlock_t lock; 169 spinlock_t lock;
137 int bm_seq; /* sequence number for bitmap flushes */ 170 int bm_seq; /* sequence number for bitmap flushes */
138 int disks; /* disks in stripe */ 171 int disks; /* disks in stripe */
172 /* stripe_operations
173 * @pending - pending ops flags (set for request->issue->complete)
174 * @ack - submitted ops flags (set for issue->complete)
175 * @complete - completed ops flags (set for complete)
176 * @target - STRIPE_OP_COMPUTE_BLK target
177 * @count - raid5_runs_ops is set to run when this is non-zero
178 */
179 struct stripe_operations {
180 unsigned long pending;
181 unsigned long ack;
182 unsigned long complete;
183 int target;
184 int count;
185 u32 zero_sum_result;
186 } ops;
139 struct r5dev { 187 struct r5dev {
140 struct bio req; 188 struct bio req;
141 struct bio_vec vec; 189 struct bio_vec vec;
142 struct page *page; 190 struct page *page;
143 struct bio *toread, *towrite, *written; 191 struct bio *toread, *read, *towrite, *written;
144 sector_t sector; /* sector of this page */ 192 sector_t sector; /* sector of this page */
145 unsigned long flags; 193 unsigned long flags;
146 } dev[1]; /* allocated with extra space depending of RAID geometry */ 194 } dev[1]; /* allocated with extra space depending of RAID geometry */
147}; 195};
196
197/* stripe_head_state - collects and tracks the dynamic state of a stripe_head
198 * for handle_stripe. It is only valid under spin_lock(sh->lock);
199 */
200struct stripe_head_state {
201 int syncing, expanding, expanded;
202 int locked, uptodate, to_read, to_write, failed, written;
203 int to_fill, compute, req_compute, non_overwrite;
204 int failed_num;
205};
206
207/* r6_state - extra state data only relevant to r6 */
208struct r6_state {
209 int p_failed, q_failed, qd_idx, failed_num[2];
210};
211
148/* Flags */ 212/* Flags */
149#define R5_UPTODATE 0 /* page contains current data */ 213#define R5_UPTODATE 0 /* page contains current data */
150#define R5_LOCKED 1 /* IO has been submitted on "req" */ 214#define R5_LOCKED 1 /* IO has been submitted on "req" */
@@ -158,6 +222,15 @@ struct stripe_head {
158#define R5_ReWrite 9 /* have tried to over-write the readerror */ 222#define R5_ReWrite 9 /* have tried to over-write the readerror */
159 223
160#define R5_Expanded 10 /* This block now has post-expand data */ 224#define R5_Expanded 10 /* This block now has post-expand data */
225#define R5_Wantcompute 11 /* compute_block in progress treat as
226 * uptodate
227 */
228#define R5_Wantfill 12 /* dev->toread contains a bio that needs
229 * filling
230 */
231#define R5_Wantprexor 13 /* distinguish blocks ready for rmw from
232 * other "towrites"
233 */
161/* 234/*
162 * Write method 235 * Write method
163 */ 236 */
@@ -180,6 +253,24 @@ struct stripe_head {
180#define STRIPE_EXPAND_SOURCE 10 253#define STRIPE_EXPAND_SOURCE 10
181#define STRIPE_EXPAND_READY 11 254#define STRIPE_EXPAND_READY 11
182/* 255/*
256 * Operations flags (in issue order)
257 */
258#define STRIPE_OP_BIOFILL 0
259#define STRIPE_OP_COMPUTE_BLK 1
260#define STRIPE_OP_PREXOR 2
261#define STRIPE_OP_BIODRAIN 3
262#define STRIPE_OP_POSTXOR 4
263#define STRIPE_OP_CHECK 5
264#define STRIPE_OP_IO 6
265
266/* modifiers to the base operations
267 * STRIPE_OP_MOD_REPAIR_PD - compute the parity block and write it back
268 * STRIPE_OP_MOD_DMA_CHECK - parity is not corrupted by the check
269 */
270#define STRIPE_OP_MOD_REPAIR_PD 7
271#define STRIPE_OP_MOD_DMA_CHECK 8
272
273/*
183 * Plugging: 274 * Plugging:
184 * 275 *
185 * To improve write throughput, we need to delay the handling of some 276 * To improve write throughput, we need to delay the handling of some
diff --git a/include/linux/raid/xor.h b/include/linux/raid/xor.h
index f0d67cbdea40..3e120587eada 100644
--- a/include/linux/raid/xor.h
+++ b/include/linux/raid/xor.h
@@ -3,9 +3,10 @@
3 3
4#include <linux/raid/md.h> 4#include <linux/raid/md.h>
5 5
6#define MAX_XOR_BLOCKS 5 6#define MAX_XOR_BLOCKS 4
7 7
8extern void xor_block(unsigned int count, unsigned int bytes, void **ptr); 8extern void xor_blocks(unsigned int count, unsigned int bytes,
9 void *dest, void **srcs);
9 10
10struct xor_block_template { 11struct xor_block_template {
11 struct xor_block_template *next; 12 struct xor_block_template *next;