aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ata
diff options
context:
space:
mode:
authorAndy Shevchenko <andriy.shevchenko@linux.intel.com>2015-03-03 15:41:21 -0500
committerTejun Heo <tj@kernel.org>2015-03-24 13:50:35 -0400
commit8b3444852a2b58129ee68a8dd69fef81ceb902a1 (patch)
tree69704b14b8aa21f86f65862beac037fc4882180a /drivers/ata
parentd578514b271e7c8cab8d6910075a2d137a9f0df8 (diff)
sata_dwc_460ex: move to generic DMA driver
The SATA implementation based on two actually different devices, i.e. SATA and DMA controllers. For Synopsys DesignWare DMA we have already a generic implementation of the driver. Thus, the patch converts the code to use DMAEngine framework and dw_dmac driver. In future it will be better to split the devices inside DTS as well like it's done on other platforms. Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'drivers/ata')
-rw-r--r--drivers/ata/sata_dwc_460ex.c736
1 files changed, 122 insertions, 614 deletions
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
index 7bc0c12882b9..08cd63fae7ef 100644
--- a/drivers/ata/sata_dwc_460ex.c
+++ b/drivers/ata/sata_dwc_460ex.c
@@ -36,11 +36,16 @@
36#include <linux/platform_device.h> 36#include <linux/platform_device.h>
37#include <linux/libata.h> 37#include <linux/libata.h>
38#include <linux/slab.h> 38#include <linux/slab.h>
39
39#include "libata.h" 40#include "libata.h"
40 41
41#include <scsi/scsi_host.h> 42#include <scsi/scsi_host.h>
42#include <scsi/scsi_cmnd.h> 43#include <scsi/scsi_cmnd.h>
43 44
45/* Supported DMA engine drivers */
46#include <linux/platform_data/dma-dw.h>
47#include <linux/dma/dw.h>
48
44/* These two are defined in "libata.h" */ 49/* These two are defined in "libata.h" */
45#undef DRV_NAME 50#undef DRV_NAME
46#undef DRV_VERSION 51#undef DRV_VERSION
@@ -60,153 +65,9 @@
60#define NO_IRQ 0 65#define NO_IRQ 0
61#endif 66#endif
62 67
63/* SATA DMA driver Globals */
64#define DMA_NUM_CHANS 1
65#define DMA_NUM_CHAN_REGS 8
66
67/* SATA DMA Register definitions */
68#define AHB_DMA_BRST_DFLT 64 /* 16 data items burst length*/ 68#define AHB_DMA_BRST_DFLT 64 /* 16 data items burst length*/
69 69
70struct dmareg {
71 u32 low; /* Low bits 0-31 */
72 u32 high; /* High bits 32-63 */
73};
74
75/* DMA Per Channel registers */
76struct dma_chan_regs {
77 struct dmareg sar; /* Source Address */
78 struct dmareg dar; /* Destination address */
79 struct dmareg llp; /* Linked List Pointer */
80 struct dmareg ctl; /* Control */
81 struct dmareg sstat; /* Source Status not implemented in core */
82 struct dmareg dstat; /* Destination Status not implemented in core*/
83 struct dmareg sstatar; /* Source Status Address not impl in core */
84 struct dmareg dstatar; /* Destination Status Address not implemente */
85 struct dmareg cfg; /* Config */
86 struct dmareg sgr; /* Source Gather */
87 struct dmareg dsr; /* Destination Scatter */
88};
89
90/* Generic Interrupt Registers */
91struct dma_interrupt_regs {
92 struct dmareg tfr; /* Transfer Interrupt */
93 struct dmareg block; /* Block Interrupt */
94 struct dmareg srctran; /* Source Transfer Interrupt */
95 struct dmareg dsttran; /* Dest Transfer Interrupt */
96 struct dmareg error; /* Error */
97};
98
99struct ahb_dma_regs {
100 struct dma_chan_regs chan_regs[DMA_NUM_CHAN_REGS];
101 struct dma_interrupt_regs interrupt_raw; /* Raw Interrupt */
102 struct dma_interrupt_regs interrupt_status; /* Interrupt Status */
103 struct dma_interrupt_regs interrupt_mask; /* Interrupt Mask */
104 struct dma_interrupt_regs interrupt_clear; /* Interrupt Clear */
105 struct dmareg statusInt; /* Interrupt combined*/
106 struct dmareg rq_srcreg; /* Src Trans Req */
107 struct dmareg rq_dstreg; /* Dst Trans Req */
108 struct dmareg rq_sgl_srcreg; /* Sngl Src Trans Req*/
109 struct dmareg rq_sgl_dstreg; /* Sngl Dst Trans Req*/
110 struct dmareg rq_lst_srcreg; /* Last Src Trans Req*/
111 struct dmareg rq_lst_dstreg; /* Last Dst Trans Req*/
112 struct dmareg dma_cfg; /* DMA Config */
113 struct dmareg dma_chan_en; /* DMA Channel Enable*/
114 struct dmareg dma_id; /* DMA ID */
115 struct dmareg dma_test; /* DMA Test */
116 struct dmareg res1; /* reserved */
117 struct dmareg res2; /* reserved */
118 /*
119 * DMA Comp Params
120 * Param 6 = dma_param[0], Param 5 = dma_param[1],
121 * Param 4 = dma_param[2] ...
122 */
123 struct dmareg dma_params[6];
124};
125
126/* Data structure for linked list item */
127struct lli {
128 u32 sar; /* Source Address */
129 u32 dar; /* Destination address */
130 u32 llp; /* Linked List Pointer */
131 struct dmareg ctl; /* Control */
132 struct dmareg dstat; /* Destination Status */
133};
134
135enum {
136 SATA_DWC_DMAC_LLI_SZ = (sizeof(struct lli)),
137 SATA_DWC_DMAC_LLI_NUM = 256,
138 SATA_DWC_DMAC_LLI_TBL_SZ = (SATA_DWC_DMAC_LLI_SZ * \
139 SATA_DWC_DMAC_LLI_NUM),
140 SATA_DWC_DMAC_TWIDTH_BYTES = 4,
141 SATA_DWC_DMAC_CTRL_TSIZE_MAX = (0x00000800 * \
142 SATA_DWC_DMAC_TWIDTH_BYTES),
143};
144
145/* DMA Register Operation Bits */
146enum { 70enum {
147 DMA_EN = 0x00000001, /* Enable AHB DMA */
148 DMA_CTL_LLP_SRCEN = 0x10000000, /* Blk chain enable Src */
149 DMA_CTL_LLP_DSTEN = 0x08000000, /* Blk chain enable Dst */
150};
151
152#define DMA_CTL_BLK_TS(size) ((size) & 0x000000FFF) /* Blk Transfer size */
153#define DMA_CHANNEL(ch) (0x00000001 << (ch)) /* Select channel */
154 /* Enable channel */
155#define DMA_ENABLE_CHAN(ch) ((0x00000001 << (ch)) | \
156 ((0x000000001 << (ch)) << 8))
157 /* Disable channel */
158#define DMA_DISABLE_CHAN(ch) (0x00000000 | ((0x000000001 << (ch)) << 8))
159 /* Transfer Type & Flow Controller */
160#define DMA_CTL_TTFC(type) (((type) & 0x7) << 20)
161#define DMA_CTL_SMS(num) (((num) & 0x3) << 25) /* Src Master Select */
162#define DMA_CTL_DMS(num) (((num) & 0x3) << 23)/* Dst Master Select */
163 /* Src Burst Transaction Length */
164#define DMA_CTL_SRC_MSIZE(size) (((size) & 0x7) << 14)
165 /* Dst Burst Transaction Length */
166#define DMA_CTL_DST_MSIZE(size) (((size) & 0x7) << 11)
167 /* Source Transfer Width */
168#define DMA_CTL_SRC_TRWID(size) (((size) & 0x7) << 4)
169 /* Destination Transfer Width */
170#define DMA_CTL_DST_TRWID(size) (((size) & 0x7) << 1)
171
172/* Assign HW handshaking interface (x) to destination / source peripheral */
173#define DMA_CFG_HW_HS_DEST(int_num) (((int_num) & 0xF) << 11)
174#define DMA_CFG_HW_HS_SRC(int_num) (((int_num) & 0xF) << 7)
175#define DMA_CFG_HW_CH_PRIOR(int_num) (((int_num) & 0xF) << 5)
176#define DMA_LLP_LMS(addr, master) (((addr) & 0xfffffffc) | (master))
177
178/*
179 * This define is used to set block chaining disabled in the control low
180 * register. It is already in little endian format so it can be &'d dirctly.
181 * It is essentially: cpu_to_le32(~(DMA_CTL_LLP_SRCEN | DMA_CTL_LLP_DSTEN))
182 */
183enum {
184 DMA_CTL_LLP_DISABLE_LE32 = 0xffffffe7,
185 DMA_CTL_TTFC_P2M_DMAC = 0x00000002, /* Per to mem, DMAC cntr */
186 DMA_CTL_TTFC_M2P_PER = 0x00000003, /* Mem to per, peripheral cntr */
187 DMA_CTL_SINC_INC = 0x00000000, /* Source Address Increment */
188 DMA_CTL_SINC_DEC = 0x00000200,
189 DMA_CTL_SINC_NOCHANGE = 0x00000400,
190 DMA_CTL_DINC_INC = 0x00000000, /* Destination Address Increment */
191 DMA_CTL_DINC_DEC = 0x00000080,
192 DMA_CTL_DINC_NOCHANGE = 0x00000100,
193 DMA_CTL_INT_EN = 0x00000001, /* Interrupt Enable */
194
195/* Channel Configuration Register high bits */
196 DMA_CFG_FCMOD_REQ = 0x00000001, /* Flow Control - request based */
197 DMA_CFG_PROTCTL = (0x00000003 << 2),/* Protection Control */
198
199/* Channel Configuration Register low bits */
200 DMA_CFG_RELD_DST = 0x80000000, /* Reload Dest / Src Addr */
201 DMA_CFG_RELD_SRC = 0x40000000,
202 DMA_CFG_HS_SELSRC = 0x00000800, /* Software handshake Src/ Dest */
203 DMA_CFG_HS_SELDST = 0x00000400,
204 DMA_CFG_FIFOEMPTY = (0x00000001 << 9), /* FIFO Empty bit */
205
206/* Channel Linked List Pointer Register */
207 DMA_LLP_AHBMASTER1 = 0, /* List Master Select */
208 DMA_LLP_AHBMASTER2 = 1,
209
210 SATA_DWC_MAX_PORTS = 1, 71 SATA_DWC_MAX_PORTS = 1,
211 72
212 SATA_DWC_SCR_OFFSET = 0x24, 73 SATA_DWC_SCR_OFFSET = 0x24,
@@ -287,7 +148,7 @@ struct sata_dwc_device {
287 struct ata_host *host; 148 struct ata_host *host;
288 u8 __iomem *reg_base; 149 u8 __iomem *reg_base;
289 struct sata_dwc_regs *sata_dwc_regs; /* DW Synopsys SATA specific */ 150 struct sata_dwc_regs *sata_dwc_regs; /* DW Synopsys SATA specific */
290 int irq_dma; 151 struct dw_dma_chip *dma;
291}; 152};
292 153
293#define SATA_DWC_QCMD_MAX 32 154#define SATA_DWC_QCMD_MAX 32
@@ -295,10 +156,13 @@ struct sata_dwc_device {
295struct sata_dwc_device_port { 156struct sata_dwc_device_port {
296 struct sata_dwc_device *hsdev; 157 struct sata_dwc_device *hsdev;
297 int cmd_issued[SATA_DWC_QCMD_MAX]; 158 int cmd_issued[SATA_DWC_QCMD_MAX];
298 struct lli *llit[SATA_DWC_QCMD_MAX]; /* DMA LLI table */
299 dma_addr_t llit_dma[SATA_DWC_QCMD_MAX];
300 u32 dma_chan[SATA_DWC_QCMD_MAX];
301 int dma_pending[SATA_DWC_QCMD_MAX]; 159 int dma_pending[SATA_DWC_QCMD_MAX];
160
161 /* DMA info */
162 struct dw_dma_slave *dws;
163 struct dma_chan *chan;
164 struct dma_async_tx_descriptor *desc[SATA_DWC_QCMD_MAX];
165 u32 dma_interrupt_count;
302}; 166};
303 167
304/* 168/*
@@ -330,14 +194,18 @@ struct sata_dwc_host_priv {
330 void __iomem *scr_addr_sstatus; 194 void __iomem *scr_addr_sstatus;
331 u32 sata_dwc_sactive_issued ; 195 u32 sata_dwc_sactive_issued ;
332 u32 sata_dwc_sactive_queued ; 196 u32 sata_dwc_sactive_queued ;
333 u32 dma_interrupt_count;
334 struct ahb_dma_regs *sata_dma_regs;
335 struct device *dwc_dev; 197 struct device *dwc_dev;
336 int dma_channel;
337}; 198};
338 199
339static struct sata_dwc_host_priv host_pvt; 200static struct sata_dwc_host_priv host_pvt;
340 201
202static struct dw_dma_slave sata_dwc_dma_dws = {
203 .src_id = 0,
204 .dst_id = 0,
205 .src_master = 0,
206 .dst_master = 1,
207};
208
341/* 209/*
342 * Prototypes 210 * Prototypes
343 */ 211 */
@@ -347,12 +215,6 @@ static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc,
347static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status); 215static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status);
348static void sata_dwc_port_stop(struct ata_port *ap); 216static void sata_dwc_port_stop(struct ata_port *ap);
349static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag); 217static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag);
350static int dma_dwc_init(struct sata_dwc_device *hsdev, int irq);
351static void dma_dwc_exit(struct sata_dwc_device *hsdev);
352static int dma_dwc_xfer_setup(struct scatterlist *sg, int num_elems,
353 struct lli *lli, dma_addr_t dma_lli,
354 void __iomem *addr, int dir);
355static void dma_dwc_xfer_start(int dma_ch);
356 218
357static const char *get_prot_descript(u8 protocol) 219static const char *get_prot_descript(u8 protocol)
358{ 220{
@@ -405,76 +267,8 @@ static void sata_dwc_tf_dump(struct ata_taskfile *tf)
405 tf->hob_lbah); 267 tf->hob_lbah);
406} 268}
407 269
408/* 270static void dma_dwc_xfer_done(void *hsdev_instance)
409 * Function: get_burst_length_encode
410 * arguments: datalength: length in bytes of data
411 * returns value to be programmed in register corresponding to data length
412 * This value is effectively the log(base 2) of the length
413 */
414static int get_burst_length_encode(int datalength)
415{
416 int items = datalength >> 2; /* div by 4 to get lword count */
417
418 if (items >= 64)
419 return 5;
420
421 if (items >= 32)
422 return 4;
423
424 if (items >= 16)
425 return 3;
426
427 if (items >= 8)
428 return 2;
429
430 if (items >= 4)
431 return 1;
432
433 return 0;
434}
435
436static void clear_chan_interrupts(int c)
437{ 271{
438 out_le32(&(host_pvt.sata_dma_regs->interrupt_clear.tfr.low),
439 DMA_CHANNEL(c));
440 out_le32(&(host_pvt.sata_dma_regs->interrupt_clear.block.low),
441 DMA_CHANNEL(c));
442 out_le32(&(host_pvt.sata_dma_regs->interrupt_clear.srctran.low),
443 DMA_CHANNEL(c));
444 out_le32(&(host_pvt.sata_dma_regs->interrupt_clear.dsttran.low),
445 DMA_CHANNEL(c));
446 out_le32(&(host_pvt.sata_dma_regs->interrupt_clear.error.low),
447 DMA_CHANNEL(c));
448}
449
450/*
451 * Function: dma_request_channel
452 * arguments: None
453 * returns channel number if available else -1
454 * This function assigns the next available DMA channel from the list to the
455 * requester
456 */
457static int dma_request_channel(void)
458{
459 /* Check if the channel is not currently in use */
460 if (!(in_le32(&(host_pvt.sata_dma_regs->dma_chan_en.low)) &
461 DMA_CHANNEL(host_pvt.dma_channel)))
462 return host_pvt.dma_channel;
463 dev_err(host_pvt.dwc_dev, "%s Channel %d is currently in use\n",
464 __func__, host_pvt.dma_channel);
465 return -1;
466}
467
468/*
469 * Function: dma_dwc_interrupt
470 * arguments: irq, dev_id, pt_regs
471 * returns channel number if available else -1
472 * Interrupt Handler for DW AHB SATA DMA
473 */
474static irqreturn_t dma_dwc_interrupt(int irq, void *hsdev_instance)
475{
476 int chan;
477 u32 tfr_reg, err_reg;
478 unsigned long flags; 272 unsigned long flags;
479 struct sata_dwc_device *hsdev = hsdev_instance; 273 struct sata_dwc_device *hsdev = hsdev_instance;
480 struct ata_host *host = (struct ata_host *)hsdev->host; 274 struct ata_host *host = (struct ata_host *)hsdev->host;
@@ -488,341 +282,65 @@ static irqreturn_t dma_dwc_interrupt(int irq, void *hsdev_instance)
488 hsdevp = HSDEVP_FROM_AP(ap); 282 hsdevp = HSDEVP_FROM_AP(ap);
489 tag = ap->link.active_tag; 283 tag = ap->link.active_tag;
490 284
491 tfr_reg = in_le32(&(host_pvt.sata_dma_regs->interrupt_status.tfr\
492 .low));
493 err_reg = in_le32(&(host_pvt.sata_dma_regs->interrupt_status.error\
494 .low));
495
496 dev_dbg(ap->dev, "eot=0x%08x err=0x%08x pending=%d active port=%d\n",
497 tfr_reg, err_reg, hsdevp->dma_pending[tag], port);
498
499 chan = host_pvt.dma_channel;
500 if (chan >= 0) {
501 /* Check for end-of-transfer interrupt. */
502 if (tfr_reg & DMA_CHANNEL(chan)) {
503 /*
504 * Each DMA command produces 2 interrupts. Only
505 * complete the command after both interrupts have been
506 * seen. (See sata_dwc_isr())
507 */
508 host_pvt.dma_interrupt_count++;
509 sata_dwc_clear_dmacr(hsdevp, tag);
510
511 if (hsdevp->dma_pending[tag] ==
512 SATA_DWC_DMA_PENDING_NONE) {
513 dev_err(ap->dev, "DMA not pending eot=0x%08x "
514 "err=0x%08x tag=0x%02x pending=%d\n",
515 tfr_reg, err_reg, tag,
516 hsdevp->dma_pending[tag]);
517 }
518
519 if ((host_pvt.dma_interrupt_count % 2) == 0)
520 sata_dwc_dma_xfer_complete(ap, 1);
521
522 /* Clear the interrupt */
523 out_le32(&(host_pvt.sata_dma_regs->interrupt_clear\
524 .tfr.low),
525 DMA_CHANNEL(chan));
526 }
527
528 /* Check for error interrupt. */
529 if (err_reg & DMA_CHANNEL(chan)) {
530 /* TODO Need error handler ! */
531 dev_err(ap->dev, "error interrupt err_reg=0x%08x\n",
532 err_reg);
533
534 /* Clear the interrupt. */
535 out_le32(&(host_pvt.sata_dma_regs->interrupt_clear\
536 .error.low),
537 DMA_CHANNEL(chan));
538 }
539 }
540 spin_unlock_irqrestore(&host->lock, flags);
541 return IRQ_HANDLED;
542}
543
544/*
545 * Function: dma_request_interrupts
546 * arguments: hsdev
547 * returns status
548 * This function registers ISR for a particular DMA channel interrupt
549 */
550static int dma_request_interrupts(struct sata_dwc_device *hsdev, int irq)
551{
552 int retval = 0;
553 int chan = host_pvt.dma_channel;
554
555 if (chan >= 0) {
556 /* Unmask error interrupt */
557 out_le32(&(host_pvt.sata_dma_regs)->interrupt_mask.error.low,
558 DMA_ENABLE_CHAN(chan));
559
560 /* Unmask end-of-transfer interrupt */
561 out_le32(&(host_pvt.sata_dma_regs)->interrupt_mask.tfr.low,
562 DMA_ENABLE_CHAN(chan));
563 }
564
565 retval = request_irq(irq, dma_dwc_interrupt, 0, "SATA DMA", hsdev);
566 if (retval) {
567 dev_err(host_pvt.dwc_dev, "%s: could not get IRQ %d\n",
568 __func__, irq);
569 return -ENODEV;
570 }
571
572 /* Mark this interrupt as requested */
573 hsdev->irq_dma = irq;
574 return 0;
575}
576
577/*
578 * Function: map_sg_to_lli
579 * The Synopsis driver has a comment proposing that better performance
580 * is possible by only enabling interrupts on the last item in the linked list.
581 * However, it seems that could be a problem if an error happened on one of the
582 * first items. The transfer would halt, but no error interrupt would occur.
583 * Currently this function sets interrupts enabled for each linked list item:
584 * DMA_CTL_INT_EN.
585 */
586static int map_sg_to_lli(struct scatterlist *sg, int num_elems,
587 struct lli *lli, dma_addr_t dma_lli,
588 void __iomem *dmadr_addr, int dir)
589{
590 int i, idx = 0;
591 int fis_len = 0;
592 dma_addr_t next_llp;
593 int bl;
594 int sms_val, dms_val;
595
596 sms_val = 0;
597 dms_val = 1 + host_pvt.dma_channel;
598 dev_dbg(host_pvt.dwc_dev,
599 "%s: sg=%p nelem=%d lli=%p dma_lli=0x%pad dmadr=0x%p\n",
600 __func__, sg, num_elems, lli, &dma_lli, dmadr_addr);
601
602 bl = get_burst_length_encode(AHB_DMA_BRST_DFLT);
603
604 for (i = 0; i < num_elems; i++, sg++) {
605 u32 addr, offset;
606 u32 sg_len, len;
607
608 addr = (u32) sg_dma_address(sg);
609 sg_len = sg_dma_len(sg);
610
611 dev_dbg(host_pvt.dwc_dev, "%s: elem=%d sg_addr=0x%x sg_len"
612 "=%d\n", __func__, i, addr, sg_len);
613
614 while (sg_len) {
615 if (idx >= SATA_DWC_DMAC_LLI_NUM) {
616 /* The LLI table is not large enough. */
617 dev_err(host_pvt.dwc_dev, "LLI table overrun "
618 "(idx=%d)\n", idx);
619 break;
620 }
621 len = (sg_len > SATA_DWC_DMAC_CTRL_TSIZE_MAX) ?
622 SATA_DWC_DMAC_CTRL_TSIZE_MAX : sg_len;
623
624 offset = addr & 0xffff;
625 if ((offset + sg_len) > 0x10000)
626 len = 0x10000 - offset;
627
628 /*
629 * Make sure a LLI block is not created that will span
630 * 8K max FIS boundary. If the block spans such a FIS
631 * boundary, there is a chance that a DMA burst will
632 * cross that boundary -- this results in an error in
633 * the host controller.
634 */
635 if (fis_len + len > 8192) {
636 dev_dbg(host_pvt.dwc_dev, "SPLITTING: fis_len="
637 "%d(0x%x) len=%d(0x%x)\n", fis_len,
638 fis_len, len, len);
639 len = 8192 - fis_len;
640 fis_len = 0;
641 } else {
642 fis_len += len;
643 }
644 if (fis_len == 8192)
645 fis_len = 0;
646
647 /*
648 * Set DMA addresses and lower half of control register
649 * based on direction.
650 */
651 if (dir == DMA_FROM_DEVICE) {
652 lli[idx].dar = cpu_to_le32(addr);
653 lli[idx].sar = cpu_to_le32((u32)dmadr_addr);
654
655 lli[idx].ctl.low = cpu_to_le32(
656 DMA_CTL_TTFC(DMA_CTL_TTFC_P2M_DMAC) |
657 DMA_CTL_SMS(sms_val) |
658 DMA_CTL_DMS(dms_val) |
659 DMA_CTL_SRC_MSIZE(bl) |
660 DMA_CTL_DST_MSIZE(bl) |
661 DMA_CTL_SINC_NOCHANGE |
662 DMA_CTL_SRC_TRWID(2) |
663 DMA_CTL_DST_TRWID(2) |
664 DMA_CTL_INT_EN |
665 DMA_CTL_LLP_SRCEN |
666 DMA_CTL_LLP_DSTEN);
667 } else { /* DMA_TO_DEVICE */
668 lli[idx].sar = cpu_to_le32(addr);
669 lli[idx].dar = cpu_to_le32((u32)dmadr_addr);
670
671 lli[idx].ctl.low = cpu_to_le32(
672 DMA_CTL_TTFC(DMA_CTL_TTFC_M2P_PER) |
673 DMA_CTL_SMS(dms_val) |
674 DMA_CTL_DMS(sms_val) |
675 DMA_CTL_SRC_MSIZE(bl) |
676 DMA_CTL_DST_MSIZE(bl) |
677 DMA_CTL_DINC_NOCHANGE |
678 DMA_CTL_SRC_TRWID(2) |
679 DMA_CTL_DST_TRWID(2) |
680 DMA_CTL_INT_EN |
681 DMA_CTL_LLP_SRCEN |
682 DMA_CTL_LLP_DSTEN);
683 }
684
685 dev_dbg(host_pvt.dwc_dev, "%s setting ctl.high len: "
686 "0x%08x val: 0x%08x\n", __func__,
687 len, DMA_CTL_BLK_TS(len / 4));
688
689 /* Program the LLI CTL high register */
690 lli[idx].ctl.high = cpu_to_le32(DMA_CTL_BLK_TS\
691 (len / 4));
692
693 /* Program the next pointer. The next pointer must be
694 * the physical address, not the virtual address.
695 */
696 next_llp = (dma_lli + ((idx + 1) * sizeof(struct \
697 lli)));
698
699 /* The last 2 bits encode the list master select. */
700 next_llp = DMA_LLP_LMS(next_llp, DMA_LLP_AHBMASTER2);
701
702 lli[idx].llp = cpu_to_le32(next_llp);
703 idx++;
704 sg_len -= len;
705 addr += len;
706 }
707 }
708
709 /* 285 /*
710 * The last next ptr has to be zero and the last control low register 286 * Each DMA command produces 2 interrupts. Only
711 * has to have LLP_SRC_EN and LLP_DST_EN (linked list pointer source 287 * complete the command after both interrupts have been
712 * and destination enable) set back to 0 (disabled.) This is what tells 288 * seen. (See sata_dwc_isr())
713 * the core that this is the last item in the linked list.
714 */ 289 */
715 if (idx) { 290 hsdevp->dma_interrupt_count++;
716 lli[idx-1].llp = 0x00000000; 291 sata_dwc_clear_dmacr(hsdevp, tag);
717 lli[idx-1].ctl.low &= DMA_CTL_LLP_DISABLE_LE32;
718 292
719 /* Flush cache to memory */ 293 if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_NONE) {
720 dma_cache_sync(NULL, lli, (sizeof(struct lli) * idx), 294 dev_err(ap->dev, "DMA not pending tag=0x%02x pending=%d\n",
721 DMA_BIDIRECTIONAL); 295 tag, hsdevp->dma_pending[tag]);
722 } 296 }
723 297
724 return idx; 298 if ((hsdevp->dma_interrupt_count % 2) == 0)
725} 299 sata_dwc_dma_xfer_complete(ap, 1);
726 300
727/* 301 spin_unlock_irqrestore(&host->lock, flags);
728 * Function: dma_dwc_xfer_start
729 * arguments: Channel number
730 * Return : None
731 * Enables the DMA channel
732 */
733static void dma_dwc_xfer_start(int dma_ch)
734{
735 /* Enable the DMA channel */
736 out_le32(&(host_pvt.sata_dma_regs->dma_chan_en.low),
737 in_le32(&(host_pvt.sata_dma_regs->dma_chan_en.low)) |
738 DMA_ENABLE_CHAN(dma_ch));
739} 302}
740 303
741static int dma_dwc_xfer_setup(struct scatterlist *sg, int num_elems, 304static struct dma_async_tx_descriptor *dma_dwc_xfer_setup(struct ata_queued_cmd *qc)
742 struct lli *lli, dma_addr_t dma_lli,
743 void __iomem *addr, int dir)
744{ 305{
745 int dma_ch; 306 struct ata_port *ap = qc->ap;
746 int num_lli; 307 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
747 /* Acquire DMA channel */ 308 struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
748 dma_ch = dma_request_channel(); 309 dma_addr_t addr = (dma_addr_t)&hsdev->sata_dwc_regs->dmadr;
749 if (dma_ch == -1) { 310 struct dma_slave_config sconf;
750 dev_err(host_pvt.dwc_dev, "%s: dma channel unavailable\n", 311 struct dma_async_tx_descriptor *desc;
751 __func__); 312
752 return -EAGAIN; 313 if (qc->dma_dir == DMA_DEV_TO_MEM) {
314 sconf.src_addr = addr;
315 sconf.device_fc = true;
316 } else { /* DMA_MEM_TO_DEV */
317 sconf.dst_addr = addr;
318 sconf.device_fc = false;
753 } 319 }
754 320
755 /* Convert SG list to linked list of items (LLIs) for AHB DMA */ 321 sconf.direction = qc->dma_dir;
756 num_lli = map_sg_to_lli(sg, num_elems, lli, dma_lli, addr, dir); 322 sconf.src_maxburst = AHB_DMA_BRST_DFLT;
757 323 sconf.dst_maxburst = AHB_DMA_BRST_DFLT;
758 dev_dbg(host_pvt.dwc_dev, "%s sg: 0x%p, count: %d lli: %p dma_lli:" 324 sconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
759 " 0x%0xlx addr: %p lli count: %d\n", __func__, sg, num_elems, 325 sconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
760 lli, (u32)dma_lli, addr, num_lli);
761
762 clear_chan_interrupts(dma_ch);
763
764 /* Program the CFG register. */
765 out_le32(&(host_pvt.sata_dma_regs->chan_regs[dma_ch].cfg.high),
766 DMA_CFG_HW_HS_SRC(dma_ch) | DMA_CFG_HW_HS_DEST(dma_ch) |
767 DMA_CFG_PROTCTL | DMA_CFG_FCMOD_REQ);
768 out_le32(&(host_pvt.sata_dma_regs->chan_regs[dma_ch].cfg.low),
769 DMA_CFG_HW_CH_PRIOR(dma_ch));
770
771 /* Program the address of the linked list */
772 out_le32(&(host_pvt.sata_dma_regs->chan_regs[dma_ch].llp.low),
773 DMA_LLP_LMS(dma_lli, DMA_LLP_AHBMASTER2));
774
775 /* Program the CTL register with src enable / dst enable */
776 out_le32(&(host_pvt.sata_dma_regs->chan_regs[dma_ch].ctl.low),
777 DMA_CTL_LLP_SRCEN | DMA_CTL_LLP_DSTEN);
778 return dma_ch;
779}
780
781/*
782 * Function: dma_dwc_exit
783 * arguments: None
784 * returns status
785 * This function exits the SATA DMA driver
786 */
787static void dma_dwc_exit(struct sata_dwc_device *hsdev)
788{
789 dev_dbg(host_pvt.dwc_dev, "%s:\n", __func__);
790 if (host_pvt.sata_dma_regs) {
791 iounmap((void __iomem *)host_pvt.sata_dma_regs);
792 host_pvt.sata_dma_regs = NULL;
793 }
794 326
795 if (hsdev->irq_dma) { 327 dmaengine_slave_config(hsdevp->chan, &sconf);
796 free_irq(hsdev->irq_dma, hsdev);
797 hsdev->irq_dma = 0;
798 }
799}
800 328
801/* 329 /* Convert SG list to linked list of items (LLIs) for AHB DMA */
802 * Function: dma_dwc_init 330 desc = dmaengine_prep_slave_sg(hsdevp->chan, qc->sg, qc->n_elem,
803 * arguments: hsdev 331 qc->dma_dir,
804 * returns status 332 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
805 * This function initializes the SATA DMA driver
806 */
807static int dma_dwc_init(struct sata_dwc_device *hsdev, int irq)
808{
809 int err;
810 333
811 err = dma_request_interrupts(hsdev, irq); 334 if (!desc)
812 if (err) { 335 return NULL;
813 dev_err(host_pvt.dwc_dev, "%s: dma_request_interrupts returns"
814 " %d\n", __func__, err);
815 return err;
816 }
817 336
818 /* Enabe DMA */ 337 desc->callback = dma_dwc_xfer_done;
819 out_le32(&(host_pvt.sata_dma_regs->dma_cfg.low), DMA_EN); 338 desc->callback_param = hsdev;
820 339
821 dev_notice(host_pvt.dwc_dev, "DMA initialized\n"); 340 dev_dbg(host_pvt.dwc_dev, "%s sg: 0x%p, count: %d addr: %pad\n",
822 dev_dbg(host_pvt.dwc_dev, "SATA DMA registers=0x%p\n", host_pvt.\ 341 __func__, qc->sg, qc->n_elem, &addr);
823 sata_dma_regs);
824 342
825 return 0; 343 return desc;
826} 344}
827 345
828static int sata_dwc_scr_read(struct ata_link *link, unsigned int scr, u32 *val) 346static int sata_dwc_scr_read(struct ata_link *link, unsigned int scr, u32 *val)
@@ -892,21 +410,18 @@ static void sata_dwc_error_intr(struct ata_port *ap,
892 struct ata_queued_cmd *qc; 410 struct ata_queued_cmd *qc;
893 u32 serror; 411 u32 serror;
894 u8 status, tag; 412 u8 status, tag;
895 u32 err_reg;
896 413
897 ata_ehi_clear_desc(ehi); 414 ata_ehi_clear_desc(ehi);
898 415
899 serror = core_scr_read(SCR_ERROR); 416 serror = core_scr_read(SCR_ERROR);
900 status = ap->ops->sff_check_status(ap); 417 status = ap->ops->sff_check_status(ap);
901 418
902 err_reg = in_le32(&(host_pvt.sata_dma_regs->interrupt_status.error.\
903 low));
904 tag = ap->link.active_tag; 419 tag = ap->link.active_tag;
905 420
906 dev_err(ap->dev, "%s SCR_ERROR=0x%08x intpr=0x%08x status=0x%08x " 421 dev_err(ap->dev,
907 "dma_intp=%d pending=%d issued=%d dma_err_status=0x%08x\n", 422 "%s SCR_ERROR=0x%08x intpr=0x%08x status=0x%08x dma_intp=%d pending=%d issued=%d",
908 __func__, serror, intpr, status, host_pvt.dma_interrupt_count, 423 __func__, serror, intpr, status, hsdevp->dma_interrupt_count,
909 hsdevp->dma_pending[tag], hsdevp->cmd_issued[tag], err_reg); 424 hsdevp->dma_pending[tag], hsdevp->cmd_issued[tag]);
910 425
911 /* Clear error register and interrupt bit */ 426 /* Clear error register and interrupt bit */
912 clear_serror(); 427 clear_serror();
@@ -1033,7 +548,7 @@ DRVSTILLBUSY:
1033 * operation done interrupt. The command should be 548 * operation done interrupt. The command should be
1034 * completed only after both interrupts are seen. 549 * completed only after both interrupts are seen.
1035 */ 550 */
1036 host_pvt.dma_interrupt_count++; 551 hsdevp->dma_interrupt_count++;
1037 if (hsdevp->dma_pending[tag] == \ 552 if (hsdevp->dma_pending[tag] == \
1038 SATA_DWC_DMA_PENDING_NONE) { 553 SATA_DWC_DMA_PENDING_NONE) {
1039 dev_err(ap->dev, 554 dev_err(ap->dev,
@@ -1042,7 +557,7 @@ DRVSTILLBUSY:
1042 hsdevp->dma_pending[tag]); 557 hsdevp->dma_pending[tag]);
1043 } 558 }
1044 559
1045 if ((host_pvt.dma_interrupt_count % 2) == 0) 560 if ((hsdevp->dma_interrupt_count % 2) == 0)
1046 sata_dwc_dma_xfer_complete(ap, 1); 561 sata_dwc_dma_xfer_complete(ap, 1);
1047 } else if (ata_is_pio(qc->tf.protocol)) { 562 } else if (ata_is_pio(qc->tf.protocol)) {
1048 ata_sff_hsm_move(ap, qc, status, 0); 563 ata_sff_hsm_move(ap, qc, status, 0);
@@ -1116,12 +631,12 @@ DRVSTILLBUSY:
1116 dev_dbg(ap->dev, "%s NCQ command, protocol: %s\n", __func__, 631 dev_dbg(ap->dev, "%s NCQ command, protocol: %s\n", __func__,
1117 get_prot_descript(qc->tf.protocol)); 632 get_prot_descript(qc->tf.protocol));
1118 if (ata_is_dma(qc->tf.protocol)) { 633 if (ata_is_dma(qc->tf.protocol)) {
1119 host_pvt.dma_interrupt_count++; 634 hsdevp->dma_interrupt_count++;
1120 if (hsdevp->dma_pending[tag] == \ 635 if (hsdevp->dma_pending[tag] == \
1121 SATA_DWC_DMA_PENDING_NONE) 636 SATA_DWC_DMA_PENDING_NONE)
1122 dev_warn(ap->dev, "%s: DMA not pending?\n", 637 dev_warn(ap->dev, "%s: DMA not pending?\n",
1123 __func__); 638 __func__);
1124 if ((host_pvt.dma_interrupt_count % 2) == 0) 639 if ((hsdevp->dma_interrupt_count % 2) == 0)
1125 sata_dwc_dma_xfer_complete(ap, 1); 640 sata_dwc_dma_xfer_complete(ap, 1);
1126 } else { 641 } else {
1127 if (unlikely(sata_dwc_qc_complete(ap, qc, 1))) 642 if (unlikely(sata_dwc_qc_complete(ap, qc, 1)))
@@ -1269,6 +784,18 @@ static void sata_dwc_enable_interrupts(struct sata_dwc_device *hsdev)
1269 in_le32(&hsdev->sata_dwc_regs->errmr)); 784 in_le32(&hsdev->sata_dwc_regs->errmr));
1270} 785}
1271 786
787static bool sata_dwc_dma_filter(struct dma_chan *chan, void *param)
788{
789 struct sata_dwc_device_port *hsdevp = param;
790 struct dw_dma_slave *dws = hsdevp->dws;
791
792 if (dws->dma_dev != chan->device->dev)
793 return false;
794
795 chan->private = dws;
796 return true;
797}
798
1272static void sata_dwc_setup_port(struct ata_ioports *port, unsigned long base) 799static void sata_dwc_setup_port(struct ata_ioports *port, unsigned long base)
1273{ 800{
1274 port->cmd_addr = (void __iomem *)base + 0x00; 801 port->cmd_addr = (void __iomem *)base + 0x00;
@@ -1303,6 +830,7 @@ static int sata_dwc_port_start(struct ata_port *ap)
1303 struct sata_dwc_device *hsdev; 830 struct sata_dwc_device *hsdev;
1304 struct sata_dwc_device_port *hsdevp = NULL; 831 struct sata_dwc_device_port *hsdevp = NULL;
1305 struct device *pdev; 832 struct device *pdev;
833 dma_cap_mask_t mask;
1306 int i; 834 int i;
1307 835
1308 hsdev = HSDEV_FROM_AP(ap); 836 hsdev = HSDEV_FROM_AP(ap);
@@ -1326,29 +854,27 @@ static int sata_dwc_port_start(struct ata_port *ap)
1326 } 854 }
1327 hsdevp->hsdev = hsdev; 855 hsdevp->hsdev = hsdev;
1328 856
857 hsdevp->dws = &sata_dwc_dma_dws;
858 hsdevp->dws->dma_dev = host_pvt.dwc_dev;
859
860 dma_cap_zero(mask);
861 dma_cap_set(DMA_SLAVE, mask);
862
863 /* Acquire DMA channel */
864 hsdevp->chan = dma_request_channel(mask, sata_dwc_dma_filter, hsdevp);
865 if (!hsdevp->chan) {
866 dev_err(host_pvt.dwc_dev, "%s: dma channel unavailable\n",
867 __func__);
868 err = -EAGAIN;
869 goto CLEANUP_ALLOC;
870 }
871
1329 for (i = 0; i < SATA_DWC_QCMD_MAX; i++) 872 for (i = 0; i < SATA_DWC_QCMD_MAX; i++)
1330 hsdevp->cmd_issued[i] = SATA_DWC_CMD_ISSUED_NOT; 873 hsdevp->cmd_issued[i] = SATA_DWC_CMD_ISSUED_NOT;
1331 874
1332 ap->bmdma_prd = NULL; /* set these so libata doesn't use them */ 875 ap->bmdma_prd = NULL; /* set these so libata doesn't use them */
1333 ap->bmdma_prd_dma = 0; 876 ap->bmdma_prd_dma = 0;
1334 877
1335 /*
1336 * DMA - Assign scatter gather LLI table. We can't use the libata
1337 * version since it's PRD is IDE PCI specific.
1338 */
1339 for (i = 0; i < SATA_DWC_QCMD_MAX; i++) {
1340 hsdevp->llit[i] = dma_alloc_coherent(pdev,
1341 SATA_DWC_DMAC_LLI_TBL_SZ,
1342 &(hsdevp->llit_dma[i]),
1343 GFP_ATOMIC);
1344 if (!hsdevp->llit[i]) {
1345 dev_err(ap->dev, "%s: dma_alloc_coherent failed\n",
1346 __func__);
1347 err = -ENOMEM;
1348 goto CLEANUP_ALLOC;
1349 }
1350 }
1351
1352 if (ap->port_no == 0) { 878 if (ap->port_no == 0) {
1353 dev_dbg(ap->dev, "%s: clearing TXCHEN, RXCHEN in DMAC\n", 879 dev_dbg(ap->dev, "%s: clearing TXCHEN, RXCHEN in DMAC\n",
1354 __func__); 880 __func__);
@@ -1377,22 +903,14 @@ CLEANUP:
1377 903
1378static void sata_dwc_port_stop(struct ata_port *ap) 904static void sata_dwc_port_stop(struct ata_port *ap)
1379{ 905{
1380 int i;
1381 struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
1382 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); 906 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
1383 907
1384 dev_dbg(ap->dev, "%s: ap->id = %d\n", __func__, ap->print_id); 908 dev_dbg(ap->dev, "%s: ap->id = %d\n", __func__, ap->print_id);
1385 909
1386 if (hsdevp && hsdev) { 910 dmaengine_terminate_all(hsdevp->chan);
1387 /* deallocate LLI table */ 911 dma_release_channel(hsdevp->chan);
1388 for (i = 0; i < SATA_DWC_QCMD_MAX; i++) {
1389 dma_free_coherent(ap->host->dev,
1390 SATA_DWC_DMAC_LLI_TBL_SZ,
1391 hsdevp->llit[i], hsdevp->llit_dma[i]);
1392 }
1393 912
1394 kfree(hsdevp); 913 kfree(hsdevp);
1395 }
1396 ap->private_data = NULL; 914 ap->private_data = NULL;
1397} 915}
1398 916
@@ -1448,12 +966,12 @@ static void sata_dwc_bmdma_setup(struct ata_queued_cmd *qc)
1448static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag) 966static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag)
1449{ 967{
1450 int start_dma; 968 int start_dma;
1451 u32 reg, dma_chan; 969 u32 reg;
1452 struct sata_dwc_device *hsdev = HSDEV_FROM_QC(qc); 970 struct sata_dwc_device *hsdev = HSDEV_FROM_QC(qc);
1453 struct ata_port *ap = qc->ap; 971 struct ata_port *ap = qc->ap;
1454 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); 972 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
973 struct dma_async_tx_descriptor *desc = hsdevp->desc[tag];
1455 int dir = qc->dma_dir; 974 int dir = qc->dma_dir;
1456 dma_chan = hsdevp->dma_chan[tag];
1457 975
1458 if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_NOT) { 976 if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_NOT) {
1459 start_dma = 1; 977 start_dma = 1;
@@ -1489,7 +1007,8 @@ static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag)
1489 SATA_DWC_DMACR_RXCHEN); 1007 SATA_DWC_DMACR_RXCHEN);
1490 1008
1491 /* Enable AHB DMA transfer on the specified channel */ 1009 /* Enable AHB DMA transfer on the specified channel */
1492 dma_dwc_xfer_start(dma_chan); 1010 dmaengine_submit(desc);
1011 dma_async_issue_pending(hsdevp->chan);
1493 } 1012 }
1494} 1013}
1495 1014
@@ -1515,26 +1034,21 @@ static void sata_dwc_bmdma_start(struct ata_queued_cmd *qc)
1515 */ 1034 */
1516static void sata_dwc_qc_prep_by_tag(struct ata_queued_cmd *qc, u8 tag) 1035static void sata_dwc_qc_prep_by_tag(struct ata_queued_cmd *qc, u8 tag)
1517{ 1036{
1518 struct scatterlist *sg = qc->sg; 1037 struct dma_async_tx_descriptor *desc;
1519 struct ata_port *ap = qc->ap; 1038 struct ata_port *ap = qc->ap;
1520 int dma_chan;
1521 struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
1522 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); 1039 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
1523 1040
1524 dev_dbg(ap->dev, "%s: port=%d dma dir=%s n_elem=%d\n", 1041 dev_dbg(ap->dev, "%s: port=%d dma dir=%s n_elem=%d\n",
1525 __func__, ap->port_no, get_dma_dir_descript(qc->dma_dir), 1042 __func__, ap->port_no, get_dma_dir_descript(qc->dma_dir),
1526 qc->n_elem); 1043 qc->n_elem);
1527 1044
1528 dma_chan = dma_dwc_xfer_setup(sg, qc->n_elem, hsdevp->llit[tag], 1045 desc = dma_dwc_xfer_setup(qc);
1529 hsdevp->llit_dma[tag], 1046 if (!desc) {
1530 (void __iomem *)&hsdev->sata_dwc_regs->dmadr, 1047 dev_err(ap->dev, "%s: dma_dwc_xfer_setup returns NULL\n",
1531 qc->dma_dir); 1048 __func__);
1532 if (dma_chan < 0) {
1533 dev_err(ap->dev, "%s: dma_dwc_xfer_setup returns err %d\n",
1534 __func__, dma_chan);
1535 return; 1049 return;
1536 } 1050 }
1537 hsdevp->dma_chan[tag] = dma_chan; 1051 hsdevp->desc[tag] = desc;
1538} 1052}
1539 1053
1540static unsigned int sata_dwc_qc_issue(struct ata_queued_cmd *qc) 1054static unsigned int sata_dwc_qc_issue(struct ata_queued_cmd *qc)
@@ -1678,7 +1192,6 @@ static int sata_dwc_probe(struct platform_device *ofdev)
1678 struct ata_port_info pi = sata_dwc_port_info[0]; 1192 struct ata_port_info pi = sata_dwc_port_info[0];
1679 const struct ata_port_info *ppi[] = { &pi, NULL }; 1193 const struct ata_port_info *ppi[] = { &pi, NULL };
1680 struct device_node *np = ofdev->dev.of_node; 1194 struct device_node *np = ofdev->dev.of_node;
1681 u32 dma_chan;
1682 1195
1683 /* Allocate DWC SATA device */ 1196 /* Allocate DWC SATA device */
1684 host = ata_host_alloc_pinfo(&ofdev->dev, ppi, SATA_DWC_MAX_PORTS); 1197 host = ata_host_alloc_pinfo(&ofdev->dev, ppi, SATA_DWC_MAX_PORTS);
@@ -1688,13 +1201,6 @@ static int sata_dwc_probe(struct platform_device *ofdev)
1688 1201
1689 host->private_data = hsdev; 1202 host->private_data = hsdev;
1690 1203
1691 if (of_property_read_u32(np, "dma-channel", &dma_chan)) {
1692 dev_warn(&ofdev->dev, "no dma-channel property set."
1693 " Use channel 0\n");
1694 dma_chan = 0;
1695 }
1696 host_pvt.dma_channel = dma_chan;
1697
1698 /* Ioremap SATA registers */ 1204 /* Ioremap SATA registers */
1699 base = of_iomap(np, 0); 1205 base = of_iomap(np, 0);
1700 if (!base) { 1206 if (!base) {
@@ -1721,16 +1227,16 @@ static int sata_dwc_probe(struct platform_device *ofdev)
1721 idr, ver[0], ver[1], ver[2]); 1227 idr, ver[0], ver[1], ver[2]);
1722 1228
1723 /* Get SATA DMA interrupt number */ 1229 /* Get SATA DMA interrupt number */
1724 irq = irq_of_parse_and_map(np, 1); 1230 hsdev->dma->irq = irq_of_parse_and_map(np, 1);
1725 if (irq == NO_IRQ) { 1231 if (hsdev->dma->irq == NO_IRQ) {
1726 dev_err(&ofdev->dev, "no SATA DMA irq\n"); 1232 dev_err(&ofdev->dev, "no SATA DMA irq\n");
1727 err = -ENODEV; 1233 err = -ENODEV;
1728 goto error_iomap; 1234 goto error_iomap;
1729 } 1235 }
1730 1236
1731 /* Get physical SATA DMA register base address */ 1237 /* Get physical SATA DMA register base address */
1732 host_pvt.sata_dma_regs = (void *)of_iomap(np, 1); 1238 hsdev->dma->regs = of_iomap(np, 1);
1733 if (!(host_pvt.sata_dma_regs)) { 1239 if (!hsdev->dma->regs) {
1734 dev_err(&ofdev->dev, 1240 dev_err(&ofdev->dev,
1735 "ioremap failed for AHBDMA register address\n"); 1241 "ioremap failed for AHBDMA register address\n");
1736 err = -ENODEV; 1242 err = -ENODEV;
@@ -1740,8 +1246,10 @@ static int sata_dwc_probe(struct platform_device *ofdev)
1740 /* Save dev for later use in dev_xxx() routines */ 1246 /* Save dev for later use in dev_xxx() routines */
1741 host_pvt.dwc_dev = &ofdev->dev; 1247 host_pvt.dwc_dev = &ofdev->dev;
1742 1248
1249 hsdev->dma->dev = &ofdev->dev;
1250
1743 /* Initialize AHB DMAC */ 1251 /* Initialize AHB DMAC */
1744 err = dma_dwc_init(hsdev, irq); 1252 err = dw_dma_probe(hsdev->dma, NULL);
1745 if (err) 1253 if (err)
1746 goto error_dma_iomap; 1254 goto error_dma_iomap;
1747 1255
@@ -1770,9 +1278,9 @@ static int sata_dwc_probe(struct platform_device *ofdev)
1770 1278
1771error_out: 1279error_out:
1772 /* Free SATA DMA resources */ 1280 /* Free SATA DMA resources */
1773 dma_dwc_exit(hsdev); 1281 dw_dma_remove(hsdev->dma);
1774error_dma_iomap: 1282error_dma_iomap:
1775 iounmap((void __iomem *)host_pvt.sata_dma_regs); 1283 iounmap(hsdev->dma->regs);
1776error_iomap: 1284error_iomap:
1777 iounmap(base); 1285 iounmap(base);
1778 return err; 1286 return err;
@@ -1787,9 +1295,9 @@ static int sata_dwc_remove(struct platform_device *ofdev)
1787 ata_host_detach(host); 1295 ata_host_detach(host);
1788 1296
1789 /* Free SATA DMA resources */ 1297 /* Free SATA DMA resources */
1790 dma_dwc_exit(hsdev); 1298 dw_dma_remove(hsdev->dma);
1791 1299
1792 iounmap((void __iomem *)host_pvt.sata_dma_regs); 1300 iounmap(hsdev->dma->regs);
1793 iounmap(hsdev->reg_base); 1301 iounmap(hsdev->reg_base);
1794 dev_dbg(&ofdev->dev, "done\n"); 1302 dev_dbg(&ofdev->dev, "done\n");
1795 return 0; 1303 return 0;