aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorXuelin Shi <xuelin.shi@freescale.com>2015-03-03 01:26:22 -0500
committerVinod Koul <vinod.koul@intel.com>2015-04-02 06:40:27 -0400
commitad80da658bbcaaac1d3617ea6cb0f4d5e16da422 (patch)
tree3808e3df04cafb9dc9088ffdad7e6b40dff6cb38 /drivers/dma
parenta3f92e8ebe1547705dfda9c04bacb31417510692 (diff)
dmaengine: Driver support for FSL RaidEngine device.
The RaidEngine is a new FSL hardware used for Raid5/6 acceration. This patch enables the RaidEngine functionality and provides hardware offloading capability for memcpy, xor and pq computation. It works with async_tx. Signed-off-by: Harninder Rai <harninder.rai@freescale.com> Signed-off-by: Xuelin Shi <xuelin.shi@freescale.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/Kconfig11
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/fsl_raid.c904
-rw-r--r--drivers/dma/fsl_raid.h306
4 files changed, 1222 insertions, 0 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 4be766f43aa9..b674683de24b 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -125,6 +125,17 @@ config FSL_DMA
125 EloPlus is on mpc85xx and mpc86xx and Pxxx parts, and the Elo3 is on 125 EloPlus is on mpc85xx and mpc86xx and Pxxx parts, and the Elo3 is on
126 some Txxx and Bxxx parts. 126 some Txxx and Bxxx parts.
127 127
128config FSL_RAID
129 tristate "Freescale RAID engine Support"
130 depends on FSL_SOC && !ASYNC_TX_ENABLE_CHANNEL_SWITCH
131 select DMA_ENGINE
132 select DMA_ENGINE_RAID
133 ---help---
134 Enable support for Freescale RAID Engine. RAID Engine is
135 available on some QorIQ SoCs (like P5020/P5040). It has
136 the capability to offload memcpy, xor and pq computation
137 for raid5/6.
138
128config MPC512X_DMA 139config MPC512X_DMA
129 tristate "Freescale MPC512x built-in DMA engine support" 140 tristate "Freescale MPC512x built-in DMA engine support"
130 depends on PPC_MPC512x || PPC_MPC831x 141 depends on PPC_MPC512x || PPC_MPC831x
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 1dab9ef196b0..345ec4758b9d 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -45,6 +45,7 @@ obj-$(CONFIG_DMA_JZ4780) += dma-jz4780.o
45obj-$(CONFIG_TI_CPPI41) += cppi41.o 45obj-$(CONFIG_TI_CPPI41) += cppi41.o
46obj-$(CONFIG_K3_DMA) += k3dma.o 46obj-$(CONFIG_K3_DMA) += k3dma.o
47obj-$(CONFIG_MOXART_DMA) += moxart-dma.o 47obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
48obj-$(CONFIG_FSL_RAID) += fsl_raid.o
48obj-$(CONFIG_FSL_EDMA) += fsl-edma.o 49obj-$(CONFIG_FSL_EDMA) += fsl-edma.o
49obj-$(CONFIG_QCOM_BAM_DMA) += qcom_bam_dma.o 50obj-$(CONFIG_QCOM_BAM_DMA) += qcom_bam_dma.o
50obj-y += xilinx/ 51obj-y += xilinx/
diff --git a/drivers/dma/fsl_raid.c b/drivers/dma/fsl_raid.c
new file mode 100644
index 000000000000..12778bd6446b
--- /dev/null
+++ b/drivers/dma/fsl_raid.c
@@ -0,0 +1,904 @@
1/*
2 * drivers/dma/fsl_raid.c
3 *
4 * Freescale RAID Engine device driver
5 *
6 * Author:
7 * Harninder Rai <harninder.rai@freescale.com>
8 * Naveen Burmi <naveenburmi@freescale.com>
9 *
10 * Rewrite:
11 * Xuelin Shi <xuelin.shi@freescale.com>
12 *
13 * Copyright (c) 2010-2014 Freescale Semiconductor, Inc.
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions are met:
17 * * Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * * Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in the
21 * documentation and/or other materials provided with the distribution.
22 * * Neither the name of Freescale Semiconductor nor the
23 * names of its contributors may be used to endorse or promote products
24 * derived from this software without specific prior written permission.
25 *
26 * ALTERNATIVELY, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") as published by the Free Software
28 * Foundation, either version 2 of that License or (at your option) any
29 * later version.
30 *
31 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
32 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
33 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
34 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
35 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
36 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
37 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
38 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
39 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
40 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 *
42 * Theory of operation:
43 *
44 * General capabilities:
45 * RAID Engine (RE) block is capable of offloading XOR, memcpy and P/Q
46 * calculations required in RAID5 and RAID6 operations. RE driver
47 * registers with Linux's ASYNC layer as dma driver. RE hardware
48 * maintains strict ordering of the requests through chained
49 * command queueing.
50 *
51 * Data flow:
52 * Software RAID layer of Linux (MD layer) maintains RAID partitions,
53 * strips, stripes etc. It sends requests to the underlying ASYNC layer
54 * which further passes it to RE driver. ASYNC layer decides which request
55 * goes to which job ring of RE hardware. For every request processed by
56 * RAID Engine, driver gets an interrupt unless coalescing is set. The
57 * per job ring interrupt handler checks the status register for errors,
58 * clears the interrupt and leave the post interrupt processing to the irq
59 * thread.
60 */
61#include <linux/interrupt.h>
62#include <linux/module.h>
63#include <linux/of_irq.h>
64#include <linux/of_address.h>
65#include <linux/of_platform.h>
66#include <linux/dma-mapping.h>
67#include <linux/dmapool.h>
68#include <linux/dmaengine.h>
69#include <linux/io.h>
70#include <linux/spinlock.h>
71#include <linux/slab.h>
72
73#include "dmaengine.h"
74#include "fsl_raid.h"
75
76#define FSL_RE_MAX_XOR_SRCS 16
77#define FSL_RE_MAX_PQ_SRCS 16
78#define FSL_RE_MIN_DESCS 256
79#define FSL_RE_MAX_DESCS (4 * FSL_RE_MIN_DESCS)
80#define FSL_RE_FRAME_FORMAT 0x1
81#define FSL_RE_MAX_DATA_LEN (1024*1024)
82
83#define to_fsl_re_dma_desc(tx) container_of(tx, struct fsl_re_desc, async_tx)
84
85/* Add descriptors into per chan software queue - submit_q */
86static dma_cookie_t fsl_re_tx_submit(struct dma_async_tx_descriptor *tx)
87{
88 struct fsl_re_desc *desc;
89 struct fsl_re_chan *re_chan;
90 dma_cookie_t cookie;
91 unsigned long flags;
92
93 desc = to_fsl_re_dma_desc(tx);
94 re_chan = container_of(tx->chan, struct fsl_re_chan, chan);
95
96 spin_lock_irqsave(&re_chan->desc_lock, flags);
97 cookie = dma_cookie_assign(tx);
98 list_add_tail(&desc->node, &re_chan->submit_q);
99 spin_unlock_irqrestore(&re_chan->desc_lock, flags);
100
101 return cookie;
102}
103
104/* Copy descriptor from per chan software queue into hardware job ring */
105static void fsl_re_issue_pending(struct dma_chan *chan)
106{
107 struct fsl_re_chan *re_chan;
108 int avail;
109 struct fsl_re_desc *desc, *_desc;
110 unsigned long flags;
111
112 re_chan = container_of(chan, struct fsl_re_chan, chan);
113
114 spin_lock_irqsave(&re_chan->desc_lock, flags);
115 avail = FSL_RE_SLOT_AVAIL(
116 in_be32(&re_chan->jrregs->inbring_slot_avail));
117
118 list_for_each_entry_safe(desc, _desc, &re_chan->submit_q, node) {
119 if (!avail)
120 break;
121
122 list_move_tail(&desc->node, &re_chan->active_q);
123
124 memcpy(&re_chan->inb_ring_virt_addr[re_chan->inb_count],
125 &desc->hwdesc, sizeof(struct fsl_re_hw_desc));
126
127 re_chan->inb_count = (re_chan->inb_count + 1) &
128 FSL_RE_RING_SIZE_MASK;
129 out_be32(&re_chan->jrregs->inbring_add_job, FSL_RE_ADD_JOB(1));
130 avail--;
131 }
132 spin_unlock_irqrestore(&re_chan->desc_lock, flags);
133}
134
135static void fsl_re_desc_done(struct fsl_re_desc *desc)
136{
137 dma_async_tx_callback callback;
138 void *callback_param;
139
140 dma_cookie_complete(&desc->async_tx);
141
142 callback = desc->async_tx.callback;
143 callback_param = desc->async_tx.callback_param;
144 if (callback)
145 callback(callback_param);
146
147 dma_descriptor_unmap(&desc->async_tx);
148}
149
150static void fsl_re_cleanup_descs(struct fsl_re_chan *re_chan)
151{
152 struct fsl_re_desc *desc, *_desc;
153 unsigned long flags;
154
155 spin_lock_irqsave(&re_chan->desc_lock, flags);
156 list_for_each_entry_safe(desc, _desc, &re_chan->ack_q, node) {
157 if (async_tx_test_ack(&desc->async_tx))
158 list_move_tail(&desc->node, &re_chan->free_q);
159 }
160 spin_unlock_irqrestore(&re_chan->desc_lock, flags);
161
162 fsl_re_issue_pending(&re_chan->chan);
163}
164
165static void fsl_re_dequeue(unsigned long data)
166{
167 struct fsl_re_chan *re_chan;
168 struct fsl_re_desc *desc, *_desc;
169 struct fsl_re_hw_desc *hwdesc;
170 unsigned long flags;
171 unsigned int count, oub_count;
172 int found;
173
174 re_chan = dev_get_drvdata((struct device *)data);
175
176 fsl_re_cleanup_descs(re_chan);
177
178 spin_lock_irqsave(&re_chan->desc_lock, flags);
179 count = FSL_RE_SLOT_FULL(in_be32(&re_chan->jrregs->oubring_slot_full));
180 while (count--) {
181 found = 0;
182 hwdesc = &re_chan->oub_ring_virt_addr[re_chan->oub_count];
183 list_for_each_entry_safe(desc, _desc, &re_chan->active_q,
184 node) {
185 /* compare the hw dma addr to find the completed */
186 if (desc->hwdesc.lbea32 == hwdesc->lbea32 &&
187 desc->hwdesc.addr_low == hwdesc->addr_low) {
188 found = 1;
189 break;
190 }
191 }
192
193 if (found) {
194 fsl_re_desc_done(desc);
195 list_move_tail(&desc->node, &re_chan->ack_q);
196 } else {
197 dev_err(re_chan->dev,
198 "found hwdesc not in sw queue, discard it\n");
199 }
200
201 oub_count = (re_chan->oub_count + 1) & FSL_RE_RING_SIZE_MASK;
202 re_chan->oub_count = oub_count;
203
204 out_be32(&re_chan->jrregs->oubring_job_rmvd,
205 FSL_RE_RMVD_JOB(1));
206 }
207 spin_unlock_irqrestore(&re_chan->desc_lock, flags);
208}
209
210/* Per Job Ring interrupt handler */
211static irqreturn_t fsl_re_isr(int irq, void *data)
212{
213 struct fsl_re_chan *re_chan;
214 u32 irqstate, status;
215
216 re_chan = dev_get_drvdata((struct device *)data);
217
218 irqstate = in_be32(&re_chan->jrregs->jr_interrupt_status);
219 if (!irqstate)
220 return IRQ_NONE;
221
222 /*
223 * There's no way in upper layer (read MD layer) to recover from
224 * error conditions except restart everything. In long term we
225 * need to do something more than just crashing
226 */
227 if (irqstate & FSL_RE_ERROR) {
228 status = in_be32(&re_chan->jrregs->jr_status);
229 dev_err(re_chan->dev, "chan error irqstate: %x, status: %x\n",
230 irqstate, status);
231 }
232
233 /* Clear interrupt */
234 out_be32(&re_chan->jrregs->jr_interrupt_status, FSL_RE_CLR_INTR);
235
236 tasklet_schedule(&re_chan->irqtask);
237
238 return IRQ_HANDLED;
239}
240
241static enum dma_status fsl_re_tx_status(struct dma_chan *chan,
242 dma_cookie_t cookie,
243 struct dma_tx_state *txstate)
244{
245 return dma_cookie_status(chan, cookie, txstate);
246}
247
248static void fill_cfd_frame(struct fsl_re_cmpnd_frame *cf, u8 index,
249 size_t length, dma_addr_t addr, bool final)
250{
251 u32 efrl = length & FSL_RE_CF_LENGTH_MASK;
252
253 efrl |= final << FSL_RE_CF_FINAL_SHIFT;
254 cf[index].efrl32 = efrl;
255 cf[index].addr_high = upper_32_bits(addr);
256 cf[index].addr_low = lower_32_bits(addr);
257}
258
259static struct fsl_re_desc *fsl_re_init_desc(struct fsl_re_chan *re_chan,
260 struct fsl_re_desc *desc,
261 void *cf, dma_addr_t paddr)
262{
263 desc->re_chan = re_chan;
264 desc->async_tx.tx_submit = fsl_re_tx_submit;
265 dma_async_tx_descriptor_init(&desc->async_tx, &re_chan->chan);
266 INIT_LIST_HEAD(&desc->node);
267
268 desc->hwdesc.fmt32 = FSL_RE_FRAME_FORMAT << FSL_RE_HWDESC_FMT_SHIFT;
269 desc->hwdesc.lbea32 = upper_32_bits(paddr);
270 desc->hwdesc.addr_low = lower_32_bits(paddr);
271 desc->cf_addr = cf;
272 desc->cf_paddr = paddr;
273
274 desc->cdb_addr = (void *)(cf + FSL_RE_CF_DESC_SIZE);
275 desc->cdb_paddr = paddr + FSL_RE_CF_DESC_SIZE;
276
277 return desc;
278}
279
280static struct fsl_re_desc *fsl_re_chan_alloc_desc(struct fsl_re_chan *re_chan,
281 unsigned long flags)
282{
283 struct fsl_re_desc *desc = NULL;
284 void *cf;
285 dma_addr_t paddr;
286 unsigned long lock_flag;
287
288 fsl_re_cleanup_descs(re_chan);
289
290 spin_lock_irqsave(&re_chan->desc_lock, lock_flag);
291 if (!list_empty(&re_chan->free_q)) {
292 /* take one desc from free_q */
293 desc = list_first_entry(&re_chan->free_q,
294 struct fsl_re_desc, node);
295 list_del(&desc->node);
296
297 desc->async_tx.flags = flags;
298 }
299 spin_unlock_irqrestore(&re_chan->desc_lock, lock_flag);
300
301 if (!desc) {
302 desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
303 if (!desc)
304 return NULL;
305
306 cf = dma_pool_alloc(re_chan->re_dev->cf_desc_pool, GFP_NOWAIT,
307 &paddr);
308 if (!cf) {
309 kfree(desc);
310 return NULL;
311 }
312
313 desc = fsl_re_init_desc(re_chan, desc, cf, paddr);
314 desc->async_tx.flags = flags;
315
316 spin_lock_irqsave(&re_chan->desc_lock, lock_flag);
317 re_chan->alloc_count++;
318 spin_unlock_irqrestore(&re_chan->desc_lock, lock_flag);
319 }
320
321 return desc;
322}
323
324static struct dma_async_tx_descriptor *fsl_re_prep_dma_genq(
325 struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
326 unsigned int src_cnt, const unsigned char *scf, size_t len,
327 unsigned long flags)
328{
329 struct fsl_re_chan *re_chan;
330 struct fsl_re_desc *desc;
331 struct fsl_re_xor_cdb *xor;
332 struct fsl_re_cmpnd_frame *cf;
333 u32 cdb;
334 unsigned int i, j;
335 unsigned int save_src_cnt = src_cnt;
336 int cont_q = 0;
337
338 re_chan = container_of(chan, struct fsl_re_chan, chan);
339 if (len > FSL_RE_MAX_DATA_LEN) {
340 dev_err(re_chan->dev, "genq tx length %lu, max length %d\n",
341 len, FSL_RE_MAX_DATA_LEN);
342 return NULL;
343 }
344
345 desc = fsl_re_chan_alloc_desc(re_chan, flags);
346 if (desc <= 0)
347 return NULL;
348
349 if (scf && (flags & DMA_PREP_CONTINUE)) {
350 cont_q = 1;
351 src_cnt += 1;
352 }
353
354 /* Filling xor CDB */
355 cdb = FSL_RE_XOR_OPCODE << FSL_RE_CDB_OPCODE_SHIFT;
356 cdb |= (src_cnt - 1) << FSL_RE_CDB_NRCS_SHIFT;
357 cdb |= FSL_RE_BLOCK_SIZE << FSL_RE_CDB_BLKSIZE_SHIFT;
358 cdb |= FSL_RE_INTR_ON_ERROR << FSL_RE_CDB_ERROR_SHIFT;
359 cdb |= FSL_RE_DATA_DEP << FSL_RE_CDB_DEPEND_SHIFT;
360 xor = desc->cdb_addr;
361 xor->cdb32 = cdb;
362
363 if (scf) {
364 /* compute q = src0*coef0^src1*coef1^..., * is GF(8) mult */
365 for (i = 0; i < save_src_cnt; i++)
366 xor->gfm[i] = scf[i];
367 if (cont_q)
368 xor->gfm[i++] = 1;
369 } else {
370 /* compute P, that is XOR all srcs */
371 for (i = 0; i < src_cnt; i++)
372 xor->gfm[i] = 1;
373 }
374
375 /* Filling frame 0 of compound frame descriptor with CDB */
376 cf = desc->cf_addr;
377 fill_cfd_frame(cf, 0, sizeof(*xor), desc->cdb_paddr, 0);
378
379 /* Fill CFD's 1st frame with dest buffer */
380 fill_cfd_frame(cf, 1, len, dest, 0);
381
382 /* Fill CFD's rest of the frames with source buffers */
383 for (i = 2, j = 0; j < save_src_cnt; i++, j++)
384 fill_cfd_frame(cf, i, len, src[j], 0);
385
386 if (cont_q)
387 fill_cfd_frame(cf, i++, len, dest, 0);
388
389 /* Setting the final bit in the last source buffer frame in CFD */
390 cf[i - 1].efrl32 |= 1 << FSL_RE_CF_FINAL_SHIFT;
391
392 return &desc->async_tx;
393}
394
395/*
396 * Prep function for P parity calculation.In RAID Engine terminology,
397 * XOR calculation is called GenQ calculation done through GenQ command
398 */
399static struct dma_async_tx_descriptor *fsl_re_prep_dma_xor(
400 struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
401 unsigned int src_cnt, size_t len, unsigned long flags)
402{
403 /* NULL let genq take all coef as 1 */
404 return fsl_re_prep_dma_genq(chan, dest, src, src_cnt, NULL, len, flags);
405}
406
407/*
408 * Prep function for P/Q parity calculation.In RAID Engine terminology,
409 * P/Q calculation is called GenQQ done through GenQQ command
410 */
411static struct dma_async_tx_descriptor *fsl_re_prep_dma_pq(
412 struct dma_chan *chan, dma_addr_t *dest, dma_addr_t *src,
413 unsigned int src_cnt, const unsigned char *scf, size_t len,
414 unsigned long flags)
415{
416 struct fsl_re_chan *re_chan;
417 struct fsl_re_desc *desc;
418 struct fsl_re_pq_cdb *pq;
419 struct fsl_re_cmpnd_frame *cf;
420 u32 cdb;
421 u8 *p;
422 int gfmq_len, i, j;
423 unsigned int save_src_cnt = src_cnt;
424
425 re_chan = container_of(chan, struct fsl_re_chan, chan);
426 if (len > FSL_RE_MAX_DATA_LEN) {
427 dev_err(re_chan->dev, "pq tx length is %lu, max length is %d\n",
428 len, FSL_RE_MAX_DATA_LEN);
429 return NULL;
430 }
431
432 /*
433 * RE requires at least 2 sources, if given only one source, we pass the
434 * second source same as the first one.
435 * With only one source, generating P is meaningless, only generate Q.
436 */
437 if (src_cnt == 1) {
438 struct dma_async_tx_descriptor *tx;
439 dma_addr_t dma_src[2];
440 unsigned char coef[2];
441
442 dma_src[0] = *src;
443 coef[0] = *scf;
444 dma_src[1] = *src;
445 coef[1] = 0;
446 tx = fsl_re_prep_dma_genq(chan, dest[1], dma_src, 2, coef, len,
447 flags);
448 if (tx)
449 desc = to_fsl_re_dma_desc(tx);
450
451 return tx;
452 }
453
454 /*
455 * During RAID6 array creation, Linux's MD layer gets P and Q
456 * calculated separately in two steps. But our RAID Engine has
457 * the capability to calculate both P and Q with a single command
458 * Hence to merge well with MD layer, we need to provide a hook
459 * here and call re_jq_prep_dma_genq() function
460 */
461
462 if (flags & DMA_PREP_PQ_DISABLE_P)
463 return fsl_re_prep_dma_genq(chan, dest[1], src, src_cnt,
464 scf, len, flags);
465
466 if (flags & DMA_PREP_CONTINUE)
467 src_cnt += 3;
468
469 desc = fsl_re_chan_alloc_desc(re_chan, flags);
470 if (desc <= 0)
471 return NULL;
472
473 /* Filling GenQQ CDB */
474 cdb = FSL_RE_PQ_OPCODE << FSL_RE_CDB_OPCODE_SHIFT;
475 cdb |= (src_cnt - 1) << FSL_RE_CDB_NRCS_SHIFT;
476 cdb |= FSL_RE_BLOCK_SIZE << FSL_RE_CDB_BLKSIZE_SHIFT;
477 cdb |= FSL_RE_BUFFER_OUTPUT << FSL_RE_CDB_BUFFER_SHIFT;
478 cdb |= FSL_RE_DATA_DEP << FSL_RE_CDB_DEPEND_SHIFT;
479
480 pq = desc->cdb_addr;
481 pq->cdb32 = cdb;
482
483 p = pq->gfm_q1;
484 /* Init gfm_q1[] */
485 for (i = 0; i < src_cnt; i++)
486 p[i] = 1;
487
488 /* Align gfm[] to 32bit */
489 gfmq_len = ALIGN(src_cnt, 4);
490
491 /* Init gfm_q2[] */
492 p += gfmq_len;
493 for (i = 0; i < src_cnt; i++)
494 p[i] = scf[i];
495
496 /* Filling frame 0 of compound frame descriptor with CDB */
497 cf = desc->cf_addr;
498 fill_cfd_frame(cf, 0, sizeof(struct fsl_re_pq_cdb), desc->cdb_paddr, 0);
499
500 /* Fill CFD's 1st & 2nd frame with dest buffers */
501 for (i = 1, j = 0; i < 3; i++, j++)
502 fill_cfd_frame(cf, i, len, dest[j], 0);
503
504 /* Fill CFD's rest of the frames with source buffers */
505 for (i = 3, j = 0; j < save_src_cnt; i++, j++)
506 fill_cfd_frame(cf, i, len, src[j], 0);
507
508 /* PQ computation continuation */
509 if (flags & DMA_PREP_CONTINUE) {
510 if (src_cnt - save_src_cnt == 3) {
511 p[save_src_cnt] = 0;
512 p[save_src_cnt + 1] = 0;
513 p[save_src_cnt + 2] = 1;
514 fill_cfd_frame(cf, i++, len, dest[0], 0);
515 fill_cfd_frame(cf, i++, len, dest[1], 0);
516 fill_cfd_frame(cf, i++, len, dest[1], 0);
517 } else {
518 dev_err(re_chan->dev, "PQ tx continuation error!\n");
519 return NULL;
520 }
521 }
522
523 /* Setting the final bit in the last source buffer frame in CFD */
524 cf[i - 1].efrl32 |= 1 << FSL_RE_CF_FINAL_SHIFT;
525
526 return &desc->async_tx;
527}
528
529/*
530 * Prep function for memcpy. In RAID Engine, memcpy is done through MOVE
531 * command. Logic of this function will need to be modified once multipage
532 * support is added in Linux's MD/ASYNC Layer
533 */
534static struct dma_async_tx_descriptor *fsl_re_prep_dma_memcpy(
535 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
536 size_t len, unsigned long flags)
537{
538 struct fsl_re_chan *re_chan;
539 struct fsl_re_desc *desc;
540 size_t length;
541 struct fsl_re_cmpnd_frame *cf;
542 struct fsl_re_move_cdb *move;
543 u32 cdb;
544
545 re_chan = container_of(chan, struct fsl_re_chan, chan);
546
547 if (len > FSL_RE_MAX_DATA_LEN) {
548 dev_err(re_chan->dev, "cp tx length is %lu, max length is %d\n",
549 len, FSL_RE_MAX_DATA_LEN);
550 return NULL;
551 }
552
553 desc = fsl_re_chan_alloc_desc(re_chan, flags);
554 if (desc <= 0)
555 return NULL;
556
557 /* Filling move CDB */
558 cdb = FSL_RE_MOVE_OPCODE << FSL_RE_CDB_OPCODE_SHIFT;
559 cdb |= FSL_RE_BLOCK_SIZE << FSL_RE_CDB_BLKSIZE_SHIFT;
560 cdb |= FSL_RE_INTR_ON_ERROR << FSL_RE_CDB_ERROR_SHIFT;
561 cdb |= FSL_RE_DATA_DEP << FSL_RE_CDB_DEPEND_SHIFT;
562
563 move = desc->cdb_addr;
564 move->cdb32 = cdb;
565
566 /* Filling frame 0 of CFD with move CDB */
567 cf = desc->cf_addr;
568 fill_cfd_frame(cf, 0, sizeof(*move), desc->cdb_paddr, 0);
569
570 length = min_t(size_t, len, FSL_RE_MAX_DATA_LEN);
571
572 /* Fill CFD's 1st frame with dest buffer */
573 fill_cfd_frame(cf, 1, length, dest, 0);
574
575 /* Fill CFD's 2nd frame with src buffer */
576 fill_cfd_frame(cf, 2, length, src, 1);
577
578 return &desc->async_tx;
579}
580
581static int fsl_re_alloc_chan_resources(struct dma_chan *chan)
582{
583 struct fsl_re_chan *re_chan;
584 struct fsl_re_desc *desc;
585 void *cf;
586 dma_addr_t paddr;
587 int i;
588
589 re_chan = container_of(chan, struct fsl_re_chan, chan);
590 for (i = 0; i < FSL_RE_MIN_DESCS; i++) {
591 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
592 if (!desc)
593 break;
594
595 cf = dma_pool_alloc(re_chan->re_dev->cf_desc_pool, GFP_KERNEL,
596 &paddr);
597 if (!cf) {
598 kfree(desc);
599 break;
600 }
601
602 INIT_LIST_HEAD(&desc->node);
603 fsl_re_init_desc(re_chan, desc, cf, paddr);
604
605 list_add_tail(&desc->node, &re_chan->free_q);
606 re_chan->alloc_count++;
607 }
608 return re_chan->alloc_count;
609}
610
611static void fsl_re_free_chan_resources(struct dma_chan *chan)
612{
613 struct fsl_re_chan *re_chan;
614 struct fsl_re_desc *desc;
615
616 re_chan = container_of(chan, struct fsl_re_chan, chan);
617 while (re_chan->alloc_count--) {
618 desc = list_first_entry(&re_chan->free_q,
619 struct fsl_re_desc,
620 node);
621
622 list_del(&desc->node);
623 dma_pool_free(re_chan->re_dev->cf_desc_pool, desc->cf_addr,
624 desc->cf_paddr);
625 kfree(desc);
626 }
627
628 if (!list_empty(&re_chan->free_q))
629 dev_err(re_chan->dev, "chan resource cannot be cleaned!\n");
630}
631
632int fsl_re_chan_probe(struct platform_device *ofdev,
633 struct device_node *np, u8 q, u32 off)
634{
635 struct device *dev, *chandev;
636 struct fsl_re_drv_private *re_priv;
637 struct fsl_re_chan *chan;
638 struct dma_device *dma_dev;
639 u32 ptr;
640 u32 status;
641 int ret = 0, rc;
642 struct platform_device *chan_ofdev;
643
644 dev = &ofdev->dev;
645 re_priv = dev_get_drvdata(dev);
646 dma_dev = &re_priv->dma_dev;
647
648 chan = devm_kzalloc(dev, sizeof(*chan), GFP_KERNEL);
649 if (!chan)
650 return -ENOMEM;
651
652 /* create platform device for chan node */
653 chan_ofdev = of_platform_device_create(np, NULL, dev);
654 if (!chan_ofdev) {
655 dev_err(dev, "Not able to create ofdev for jr %d\n", q);
656 ret = -EINVAL;
657 goto err_free;
658 }
659
660 /* read reg property from dts */
661 rc = of_property_read_u32(np, "reg", &ptr);
662 if (rc) {
663 dev_err(dev, "Reg property not found in jr %d\n", q);
664 ret = -ENODEV;
665 goto err_free;
666 }
667
668 chan->jrregs = (struct fsl_re_chan_cfg *)((u8 *)re_priv->re_regs +
669 off + ptr);
670
671 /* read irq property from dts */
672 chan->irq = irq_of_parse_and_map(np, 0);
673 if (chan->irq == NO_IRQ) {
674 dev_err(dev, "No IRQ defined for JR %d\n", q);
675 ret = -ENODEV;
676 goto err_free;
677 }
678
679 snprintf(chan->name, sizeof(chan->name), "re_jr%02d", q);
680
681 chandev = &chan_ofdev->dev;
682 tasklet_init(&chan->irqtask, fsl_re_dequeue, (unsigned long)chandev);
683
684 ret = request_irq(chan->irq, fsl_re_isr, 0, chan->name, chandev);
685 if (ret) {
686 dev_err(dev, "Unable to register interrupt for JR %d\n", q);
687 ret = -EINVAL;
688 goto err_free;
689 }
690
691 re_priv->re_jrs[q] = chan;
692 chan->chan.device = dma_dev;
693 chan->chan.private = chan;
694 chan->dev = chandev;
695 chan->re_dev = re_priv;
696
697 spin_lock_init(&chan->desc_lock);
698 INIT_LIST_HEAD(&chan->ack_q);
699 INIT_LIST_HEAD(&chan->active_q);
700 INIT_LIST_HEAD(&chan->submit_q);
701 INIT_LIST_HEAD(&chan->free_q);
702
703 chan->inb_ring_virt_addr = dma_pool_alloc(chan->re_dev->hw_desc_pool,
704 GFP_KERNEL, &chan->inb_phys_addr);
705 if (!chan->inb_ring_virt_addr) {
706 dev_err(dev, "No dma memory for inb_ring_virt_addr\n");
707 ret = -ENOMEM;
708 goto err_free;
709 }
710
711 chan->oub_ring_virt_addr = dma_pool_alloc(chan->re_dev->hw_desc_pool,
712 GFP_KERNEL, &chan->oub_phys_addr);
713 if (!chan->oub_ring_virt_addr) {
714 dev_err(dev, "No dma memory for oub_ring_virt_addr\n");
715 ret = -ENOMEM;
716 goto err_free_1;
717 }
718
719 /* Program the Inbound/Outbound ring base addresses and size */
720 out_be32(&chan->jrregs->inbring_base_h,
721 chan->inb_phys_addr & FSL_RE_ADDR_BIT_MASK);
722 out_be32(&chan->jrregs->oubring_base_h,
723 chan->oub_phys_addr & FSL_RE_ADDR_BIT_MASK);
724 out_be32(&chan->jrregs->inbring_base_l,
725 chan->inb_phys_addr >> FSL_RE_ADDR_BIT_SHIFT);
726 out_be32(&chan->jrregs->oubring_base_l,
727 chan->oub_phys_addr >> FSL_RE_ADDR_BIT_SHIFT);
728 out_be32(&chan->jrregs->inbring_size,
729 FSL_RE_RING_SIZE << FSL_RE_RING_SIZE_SHIFT);
730 out_be32(&chan->jrregs->oubring_size,
731 FSL_RE_RING_SIZE << FSL_RE_RING_SIZE_SHIFT);
732
733 /* Read LIODN value from u-boot */
734 status = in_be32(&chan->jrregs->jr_config_1) & FSL_RE_REG_LIODN_MASK;
735
736 /* Program the CFG reg */
737 out_be32(&chan->jrregs->jr_config_1,
738 FSL_RE_CFG1_CBSI | FSL_RE_CFG1_CBS0 | status);
739
740 dev_set_drvdata(chandev, chan);
741
742 /* Enable RE/CHAN */
743 out_be32(&chan->jrregs->jr_command, FSL_RE_ENABLE);
744
745 return 0;
746
747err_free_1:
748 dma_pool_free(chan->re_dev->hw_desc_pool, chan->inb_ring_virt_addr,
749 chan->inb_phys_addr);
750err_free:
751 return ret;
752}
753
754/* Probe function for RAID Engine */
755static int fsl_re_probe(struct platform_device *ofdev)
756{
757 struct fsl_re_drv_private *re_priv;
758 struct device_node *np;
759 struct device_node *child;
760 u32 off;
761 u8 ridx = 0;
762 struct dma_device *dma_dev;
763 struct resource *res;
764 int rc;
765 struct device *dev = &ofdev->dev;
766
767 re_priv = devm_kzalloc(dev, sizeof(*re_priv), GFP_KERNEL);
768 if (!re_priv)
769 return -ENOMEM;
770
771 res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
772 if (!res)
773 return -ENODEV;
774
775 /* IOMAP the entire RAID Engine region */
776 re_priv->re_regs = devm_ioremap(dev, res->start, resource_size(res));
777 if (!re_priv->re_regs)
778 return -EBUSY;
779
780 /* Program the RE mode */
781 out_be32(&re_priv->re_regs->global_config, FSL_RE_NON_DPAA_MODE);
782
783 /* Program Galois Field polynomial */
784 out_be32(&re_priv->re_regs->galois_field_config, FSL_RE_GFM_POLY);
785
786 dev_info(dev, "version %x, mode %x, gfp %x\n",
787 in_be32(&re_priv->re_regs->re_version_id),
788 in_be32(&re_priv->re_regs->global_config),
789 in_be32(&re_priv->re_regs->galois_field_config));
790
791 dma_dev = &re_priv->dma_dev;
792 dma_dev->dev = dev;
793 INIT_LIST_HEAD(&dma_dev->channels);
794 dma_set_mask(dev, DMA_BIT_MASK(40));
795
796 dma_dev->device_alloc_chan_resources = fsl_re_alloc_chan_resources;
797 dma_dev->device_tx_status = fsl_re_tx_status;
798 dma_dev->device_issue_pending = fsl_re_issue_pending;
799
800 dma_dev->max_xor = FSL_RE_MAX_XOR_SRCS;
801 dma_dev->device_prep_dma_xor = fsl_re_prep_dma_xor;
802 dma_cap_set(DMA_XOR, dma_dev->cap_mask);
803
804 dma_dev->max_pq = FSL_RE_MAX_PQ_SRCS;
805 dma_dev->device_prep_dma_pq = fsl_re_prep_dma_pq;
806 dma_cap_set(DMA_PQ, dma_dev->cap_mask);
807
808 dma_dev->device_prep_dma_memcpy = fsl_re_prep_dma_memcpy;
809 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
810
811 dma_dev->device_free_chan_resources = fsl_re_free_chan_resources;
812
813 re_priv->total_chans = 0;
814
815 re_priv->cf_desc_pool = dmam_pool_create("fsl_re_cf_desc_pool", dev,
816 FSL_RE_CF_CDB_SIZE,
817 FSL_RE_CF_CDB_ALIGN, 0);
818
819 if (!re_priv->cf_desc_pool) {
820 dev_err(dev, "No memory for fsl re_cf desc pool\n");
821 return -ENOMEM;
822 }
823
824 re_priv->hw_desc_pool = dmam_pool_create("fsl_re_hw_desc_pool", dev,
825 sizeof(struct fsl_re_hw_desc) * FSL_RE_RING_SIZE,
826 FSL_RE_FRAME_ALIGN, 0);
827 if (!re_priv->hw_desc_pool) {
828 dev_err(dev, "No memory for fsl re_hw desc pool\n");
829 return -ENOMEM;
830 }
831
832 dev_set_drvdata(dev, re_priv);
833
834 /* Parse Device tree to find out the total number of JQs present */
835 for_each_compatible_node(np, NULL, "fsl,raideng-v1.0-job-queue") {
836 rc = of_property_read_u32(np, "reg", &off);
837 if (rc) {
838 dev_err(dev, "Reg property not found in JQ node\n");
839 return -ENODEV;
840 }
841 /* Find out the Job Rings present under each JQ */
842 for_each_child_of_node(np, child) {
843 rc = of_device_is_compatible(child,
844 "fsl,raideng-v1.0-job-ring");
845 if (rc) {
846 fsl_re_chan_probe(ofdev, child, ridx++, off);
847 re_priv->total_chans++;
848 }
849 }
850 }
851
852 dma_async_device_register(dma_dev);
853
854 return 0;
855}
856
857static void fsl_re_remove_chan(struct fsl_re_chan *chan)
858{
859 dma_pool_free(chan->re_dev->hw_desc_pool, chan->inb_ring_virt_addr,
860 chan->inb_phys_addr);
861
862 dma_pool_free(chan->re_dev->hw_desc_pool, chan->oub_ring_virt_addr,
863 chan->oub_phys_addr);
864}
865
866static int fsl_re_remove(struct platform_device *ofdev)
867{
868 struct fsl_re_drv_private *re_priv;
869 struct device *dev;
870 int i;
871
872 dev = &ofdev->dev;
873 re_priv = dev_get_drvdata(dev);
874
875 /* Cleanup chan related memory areas */
876 for (i = 0; i < re_priv->total_chans; i++)
877 fsl_re_remove_chan(re_priv->re_jrs[i]);
878
879 /* Unregister the driver */
880 dma_async_device_unregister(&re_priv->dma_dev);
881
882 return 0;
883}
884
885static struct of_device_id fsl_re_ids[] = {
886 { .compatible = "fsl,raideng-v1.0", },
887 {}
888};
889
890static struct platform_driver fsl_re_driver = {
891 .driver = {
892 .name = "fsl-raideng",
893 .owner = THIS_MODULE,
894 .of_match_table = fsl_re_ids,
895 },
896 .probe = fsl_re_probe,
897 .remove = fsl_re_remove,
898};
899
900module_platform_driver(fsl_re_driver);
901
902MODULE_AUTHOR("Harninder Rai <harninder.rai@freescale.com>");
903MODULE_LICENSE("GPL v2");
904MODULE_DESCRIPTION("Freescale RAID Engine Device Driver");
diff --git a/drivers/dma/fsl_raid.h b/drivers/dma/fsl_raid.h
new file mode 100644
index 000000000000..69d743c04973
--- /dev/null
+++ b/drivers/dma/fsl_raid.h
@@ -0,0 +1,306 @@
1/*
2 * drivers/dma/fsl_raid.h
3 *
4 * Freescale RAID Engine device driver
5 *
6 * Author:
7 * Harninder Rai <harninder.rai@freescale.com>
8 * Naveen Burmi <naveenburmi@freescale.com>
9 *
10 * Rewrite:
11 * Xuelin Shi <xuelin.shi@freescale.com>
12
13 * Copyright (c) 2010-2012 Freescale Semiconductor, Inc.
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions are met:
17 * * Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * * Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in the
21 * documentation and/or other materials provided with the distribution.
22 * * Neither the name of Freescale Semiconductor nor the
23 * names of its contributors may be used to endorse or promote products
24 * derived from this software without specific prior written permission.
25 *
26 * ALTERNATIVELY, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") as published by the Free Software
28 * Foundation, either version 2 of that License or (at your option) any
29 * later version.
30 *
31 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
32 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
33 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
34 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
35 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
36 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
37 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
38 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
39 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
40 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 *
42 */
43
44#define FSL_RE_MAX_CHANS 4
45#define FSL_RE_DPAA_MODE BIT(30)
46#define FSL_RE_NON_DPAA_MODE BIT(31)
47#define FSL_RE_GFM_POLY 0x1d000000
48#define FSL_RE_ADD_JOB(x) ((x) << 16)
49#define FSL_RE_RMVD_JOB(x) ((x) << 16)
50#define FSL_RE_CFG1_CBSI 0x08000000
51#define FSL_RE_CFG1_CBS0 0x00080000
52#define FSL_RE_SLOT_FULL_SHIFT 8
53#define FSL_RE_SLOT_FULL(x) ((x) >> FSL_RE_SLOT_FULL_SHIFT)
54#define FSL_RE_SLOT_AVAIL_SHIFT 8
55#define FSL_RE_SLOT_AVAIL(x) ((x) >> FSL_RE_SLOT_AVAIL_SHIFT)
56#define FSL_RE_PQ_OPCODE 0x1B
57#define FSL_RE_XOR_OPCODE 0x1A
58#define FSL_RE_MOVE_OPCODE 0x8
59#define FSL_RE_FRAME_ALIGN 16
60#define FSL_RE_BLOCK_SIZE 0x3 /* 4096 bytes */
61#define FSL_RE_CACHEABLE_IO 0x0
62#define FSL_RE_BUFFER_OUTPUT 0x0
63#define FSL_RE_INTR_ON_ERROR 0x1
64#define FSL_RE_DATA_DEP 0x1
65#define FSL_RE_ENABLE_DPI 0x0
66#define FSL_RE_RING_SIZE 0x400
67#define FSL_RE_RING_SIZE_MASK (FSL_RE_RING_SIZE - 1)
68#define FSL_RE_RING_SIZE_SHIFT 8
69#define FSL_RE_ADDR_BIT_SHIFT 4
70#define FSL_RE_ADDR_BIT_MASK (BIT(FSL_RE_ADDR_BIT_SHIFT) - 1)
71#define FSL_RE_ERROR 0x40000000
72#define FSL_RE_INTR 0x80000000
73#define FSL_RE_CLR_INTR 0x80000000
74#define FSL_RE_PAUSE 0x80000000
75#define FSL_RE_ENABLE 0x80000000
76#define FSL_RE_REG_LIODN_MASK 0x00000FFF
77
78#define FSL_RE_CDB_OPCODE_MASK 0xF8000000
79#define FSL_RE_CDB_OPCODE_SHIFT 27
80#define FSL_RE_CDB_EXCLEN_MASK 0x03000000
81#define FSL_RE_CDB_EXCLEN_SHIFT 24
82#define FSL_RE_CDB_EXCLQ1_MASK 0x00F00000
83#define FSL_RE_CDB_EXCLQ1_SHIFT 20
84#define FSL_RE_CDB_EXCLQ2_MASK 0x000F0000
85#define FSL_RE_CDB_EXCLQ2_SHIFT 16
86#define FSL_RE_CDB_BLKSIZE_MASK 0x0000C000
87#define FSL_RE_CDB_BLKSIZE_SHIFT 14
88#define FSL_RE_CDB_CACHE_MASK 0x00003000
89#define FSL_RE_CDB_CACHE_SHIFT 12
90#define FSL_RE_CDB_BUFFER_MASK 0x00000800
91#define FSL_RE_CDB_BUFFER_SHIFT 11
92#define FSL_RE_CDB_ERROR_MASK 0x00000400
93#define FSL_RE_CDB_ERROR_SHIFT 10
94#define FSL_RE_CDB_NRCS_MASK 0x0000003C
95#define FSL_RE_CDB_NRCS_SHIFT 6
96#define FSL_RE_CDB_DEPEND_MASK 0x00000008
97#define FSL_RE_CDB_DEPEND_SHIFT 3
98#define FSL_RE_CDB_DPI_MASK 0x00000004
99#define FSL_RE_CDB_DPI_SHIFT 2
100
101/*
102 * the largest cf block is 19*sizeof(struct cmpnd_frame), which is 304 bytes.
103 * here 19 = 1(cdb)+2(dest)+16(src), align to 64bytes, that is 320 bytes.
104 * the largest cdb block: struct pq_cdb which is 180 bytes, adding to cf block
105 * 320+180=500, align to 64bytes, that is 512 bytes.
106 */
107#define FSL_RE_CF_DESC_SIZE 320
108#define FSL_RE_CF_CDB_SIZE 512
109#define FSL_RE_CF_CDB_ALIGN 64
110
111struct fsl_re_ctrl {
112 /* General Configuration Registers */
113 __be32 global_config; /* Global Configuration Register */
114 u8 rsvd1[4];
115 __be32 galois_field_config; /* Galois Field Configuration Register */
116 u8 rsvd2[4];
117 __be32 jq_wrr_config; /* WRR Configuration register */
118 u8 rsvd3[4];
119 __be32 crc_config; /* CRC Configuration register */
120 u8 rsvd4[228];
121 __be32 system_reset; /* System Reset Register */
122 u8 rsvd5[252];
123 __be32 global_status; /* Global Status Register */
124 u8 rsvd6[832];
125 __be32 re_liodn_base; /* LIODN Base Register */
126 u8 rsvd7[1712];
127 __be32 re_version_id; /* Version ID register of RE */
128 __be32 re_version_id_2; /* Version ID 2 register of RE */
129 u8 rsvd8[512];
130 __be32 host_config; /* Host I/F Configuration Register */
131};
132
133struct fsl_re_chan_cfg {
134 /* Registers for JR interface */
135 __be32 jr_config_0; /* Job Queue Configuration 0 Register */
136 __be32 jr_config_1; /* Job Queue Configuration 1 Register */
137 __be32 jr_interrupt_status; /* Job Queue Interrupt Status Register */
138 u8 rsvd1[4];
139 __be32 jr_command; /* Job Queue Command Register */
140 u8 rsvd2[4];
141 __be32 jr_status; /* Job Queue Status Register */
142 u8 rsvd3[228];
143
144 /* Input Ring */
145 __be32 inbring_base_h; /* Inbound Ring Base Address Register - High */
146 __be32 inbring_base_l; /* Inbound Ring Base Address Register - Low */
147 __be32 inbring_size; /* Inbound Ring Size Register */
148 u8 rsvd4[4];
149 __be32 inbring_slot_avail; /* Inbound Ring Slot Available Register */
150 u8 rsvd5[4];
151 __be32 inbring_add_job; /* Inbound Ring Add Job Register */
152 u8 rsvd6[4];
153 __be32 inbring_cnsmr_indx; /* Inbound Ring Consumer Index Register */
154 u8 rsvd7[220];
155
156 /* Output Ring */
157 __be32 oubring_base_h; /* Outbound Ring Base Address Register - High */
158 __be32 oubring_base_l; /* Outbound Ring Base Address Register - Low */
159 __be32 oubring_size; /* Outbound Ring Size Register */
160 u8 rsvd8[4];
161 __be32 oubring_job_rmvd; /* Outbound Ring Job Removed Register */
162 u8 rsvd9[4];
163 __be32 oubring_slot_full; /* Outbound Ring Slot Full Register */
164 u8 rsvd10[4];
165 __be32 oubring_prdcr_indx; /* Outbound Ring Producer Index */
166};
167
168/*
169 * Command Descriptor Block (CDB) for unicast move command.
170 * In RAID Engine terms, memcpy is done through move command
171 */
172struct fsl_re_move_cdb {
173 __be32 cdb32;
174};
175
176/* Data protection/integrity related fields */
177#define FSL_RE_DPI_APPS_MASK 0xC0000000
178#define FSL_RE_DPI_APPS_SHIFT 30
179#define FSL_RE_DPI_REF_MASK 0x30000000
180#define FSL_RE_DPI_REF_SHIFT 28
181#define FSL_RE_DPI_GUARD_MASK 0x0C000000
182#define FSL_RE_DPI_GUARD_SHIFT 26
183#define FSL_RE_DPI_ATTR_MASK 0x03000000
184#define FSL_RE_DPI_ATTR_SHIFT 24
185#define FSL_RE_DPI_META_MASK 0x0000FFFF
186
187struct fsl_re_dpi {
188 __be32 dpi32;
189 __be32 ref;
190};
191
192/*
193 * CDB for GenQ command. In RAID Engine terminology, XOR is
194 * done through this command
195 */
196struct fsl_re_xor_cdb {
197 __be32 cdb32;
198 u8 gfm[16];
199 struct fsl_re_dpi dpi_dest_spec;
200 struct fsl_re_dpi dpi_src_spec[16];
201};
202
203/* CDB for no-op command */
204struct fsl_re_noop_cdb {
205 __be32 cdb32;
206};
207
208/*
209 * CDB for GenQQ command. In RAID Engine terminology, P/Q is
210 * done through this command
211 */
212struct fsl_re_pq_cdb {
213 __be32 cdb32;
214 u8 gfm_q1[16];
215 u8 gfm_q2[16];
216 struct fsl_re_dpi dpi_dest_spec[2];
217 struct fsl_re_dpi dpi_src_spec[16];
218};
219
220/* Compound frame */
221#define FSL_RE_CF_ADDR_HIGH_MASK 0x000000FF
222#define FSL_RE_CF_EXT_MASK 0x80000000
223#define FSL_RE_CF_EXT_SHIFT 31
224#define FSL_RE_CF_FINAL_MASK 0x40000000
225#define FSL_RE_CF_FINAL_SHIFT 30
226#define FSL_RE_CF_LENGTH_MASK 0x000FFFFF
227#define FSL_RE_CF_BPID_MASK 0x00FF0000
228#define FSL_RE_CF_BPID_SHIFT 16
229#define FSL_RE_CF_OFFSET_MASK 0x00001FFF
230
231struct fsl_re_cmpnd_frame {
232 __be32 addr_high;
233 __be32 addr_low;
234 __be32 efrl32;
235 __be32 rbro32;
236};
237
238/* Frame descriptor */
239#define FSL_RE_HWDESC_LIODN_MASK 0x3F000000
240#define FSL_RE_HWDESC_LIODN_SHIFT 24
241#define FSL_RE_HWDESC_BPID_MASK 0x00FF0000
242#define FSL_RE_HWDESC_BPID_SHIFT 16
243#define FSL_RE_HWDESC_ELIODN_MASK 0x0000F000
244#define FSL_RE_HWDESC_ELIODN_SHIFT 12
245#define FSL_RE_HWDESC_FMT_SHIFT 29
246#define FSL_RE_HWDESC_FMT_MASK (0x3 << FSL_RE_HWDESC_FMT_SHIFT)
247
248struct fsl_re_hw_desc {
249 __be32 lbea32;
250 __be32 addr_low;
251 __be32 fmt32;
252 __be32 status;
253};
254
255/* Raid Engine device private data */
256struct fsl_re_drv_private {
257 u8 total_chans;
258 struct dma_device dma_dev;
259 struct fsl_re_ctrl *re_regs;
260 struct fsl_re_chan *re_jrs[FSL_RE_MAX_CHANS];
261 struct dma_pool *cf_desc_pool;
262 struct dma_pool *hw_desc_pool;
263};
264
265/* Per job ring data structure */
266struct fsl_re_chan {
267 char name[16];
268 spinlock_t desc_lock; /* queue lock */
269 struct list_head ack_q; /* wait to acked queue */
270 struct list_head active_q; /* already issued on hw, not completed */
271 struct list_head submit_q;
272 struct list_head free_q; /* alloc available queue */
273 struct device *dev;
274 struct fsl_re_drv_private *re_dev;
275 struct dma_chan chan;
276 struct fsl_re_chan_cfg *jrregs;
277 int irq;
278 struct tasklet_struct irqtask;
279 u32 alloc_count;
280
281 /* hw descriptor ring for inbound queue*/
282 dma_addr_t inb_phys_addr;
283 struct fsl_re_hw_desc *inb_ring_virt_addr;
284 u32 inb_count;
285
286 /* hw descriptor ring for outbound queue */
287 dma_addr_t oub_phys_addr;
288 struct fsl_re_hw_desc *oub_ring_virt_addr;
289 u32 oub_count;
290};
291
292/* Async transaction descriptor */
293struct fsl_re_desc {
294 struct dma_async_tx_descriptor async_tx;
295 struct list_head node;
296 struct fsl_re_hw_desc hwdesc;
297 struct fsl_re_chan *re_chan;
298
299 /* hwdesc will point to cf_addr */
300 void *cf_addr;
301 dma_addr_t cf_paddr;
302
303 void *cdb_addr;
304 dma_addr_t cdb_paddr;
305 int status;
306};