aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ata/pata_octeon_cf.c
diff options
context:
space:
mode:
authorDavid Daney <david.daney@cavium.com>2012-04-26 14:10:28 -0400
committerRalf Baechle <ralf@linux-mips.org>2012-12-13 12:15:24 -0500
commit43f01da0f2794b464ade2ffe1f780c69d7ce7b75 (patch)
treedcbf33bffe33aa06d967c451df48e0b78c32ebdb /drivers/ata/pata_octeon_cf.c
parentf772cdb2bd544eeb3e83a8bb42629d155c1b53fd (diff)
MIPS/OCTEON/ata: Convert pata_octeon_cf.c to use device tree.
The patch needs to eliminate the definition of OCTEON_IRQ_BOOTDMA so that the device tree code can map the interrupt, so in order to not temporarily break things, we do a single patch to both the interrupt registration code and the pata_octeon_cf driver. Also rolled in is a conversion to use hrtimers and corrections to the timing calculations. Acked-by: Jeff Garzik <jgarzik@redhat.com> Signed-off-by: David Daney <david.daney@cavium.com>
Diffstat (limited to 'drivers/ata/pata_octeon_cf.c')
-rw-r--r--drivers/ata/pata_octeon_cf.c419
1 files changed, 284 insertions, 135 deletions
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index 1d61d5d278fa..652d035aa833 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -5,17 +5,19 @@
5 * License. See the file "COPYING" in the main directory of this archive 5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details. 6 * for more details.
7 * 7 *
8 * Copyright (C) 2005 - 2009 Cavium Networks 8 * Copyright (C) 2005 - 2012 Cavium Inc.
9 * Copyright (C) 2008 Wind River Systems 9 * Copyright (C) 2008 Wind River Systems
10 */ 10 */
11 11
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/libata.h> 14#include <linux/libata.h>
15#include <linux/irq.h> 15#include <linux/hrtimer.h>
16#include <linux/slab.h> 16#include <linux/slab.h>
17#include <linux/irq.h>
18#include <linux/of.h>
19#include <linux/of_platform.h>
17#include <linux/platform_device.h> 20#include <linux/platform_device.h>
18#include <linux/workqueue.h>
19#include <scsi/scsi_host.h> 21#include <scsi/scsi_host.h>
20 22
21#include <asm/octeon/octeon.h> 23#include <asm/octeon/octeon.h>
@@ -34,20 +36,36 @@
34 */ 36 */
35 37
36#define DRV_NAME "pata_octeon_cf" 38#define DRV_NAME "pata_octeon_cf"
37#define DRV_VERSION "2.1" 39#define DRV_VERSION "2.2"
40
41/* Poll interval in nS. */
42#define OCTEON_CF_BUSY_POLL_INTERVAL 500000
38 43
44#define DMA_CFG 0
45#define DMA_TIM 0x20
46#define DMA_INT 0x38
47#define DMA_INT_EN 0x50
39 48
40struct octeon_cf_port { 49struct octeon_cf_port {
41 struct workqueue_struct *wq; 50 struct hrtimer delayed_finish;
42 struct delayed_work delayed_finish;
43 struct ata_port *ap; 51 struct ata_port *ap;
44 int dma_finished; 52 int dma_finished;
53 void *c0;
54 unsigned int cs0;
55 unsigned int cs1;
56 bool is_true_ide;
57 u64 dma_base;
45}; 58};
46 59
47static struct scsi_host_template octeon_cf_sht = { 60static struct scsi_host_template octeon_cf_sht = {
48 ATA_PIO_SHT(DRV_NAME), 61 ATA_PIO_SHT(DRV_NAME),
49}; 62};
50 63
64static int enable_dma;
65module_param(enable_dma, int, 0444);
66MODULE_PARM_DESC(enable_dma,
67 "Enable use of DMA on interfaces that support it (0=no dma [default], 1=use dma)");
68
51/** 69/**
52 * Convert nanosecond based time to setting used in the 70 * Convert nanosecond based time to setting used in the
53 * boot bus timing register, based on timing multiple 71 * boot bus timing register, based on timing multiple
@@ -66,12 +84,29 @@ static unsigned int ns_to_tim_reg(unsigned int tim_mult, unsigned int nsecs)
66 return val; 84 return val;
67} 85}
68 86
69static void octeon_cf_set_boot_reg_cfg(int cs) 87static void octeon_cf_set_boot_reg_cfg(int cs, unsigned int multiplier)
70{ 88{
71 union cvmx_mio_boot_reg_cfgx reg_cfg; 89 union cvmx_mio_boot_reg_cfgx reg_cfg;
90 unsigned int tim_mult;
91
92 switch (multiplier) {
93 case 8:
94 tim_mult = 3;
95 break;
96 case 4:
97 tim_mult = 0;
98 break;
99 case 2:
100 tim_mult = 2;
101 break;
102 default:
103 tim_mult = 1;
104 break;
105 }
106
72 reg_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(cs)); 107 reg_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(cs));
73 reg_cfg.s.dmack = 0; /* Don't assert DMACK on access */ 108 reg_cfg.s.dmack = 0; /* Don't assert DMACK on access */
74 reg_cfg.s.tim_mult = 2; /* Timing mutiplier 2x */ 109 reg_cfg.s.tim_mult = tim_mult; /* Timing mutiplier */
75 reg_cfg.s.rd_dly = 0; /* Sample on falling edge of BOOT_OE */ 110 reg_cfg.s.rd_dly = 0; /* Sample on falling edge of BOOT_OE */
76 reg_cfg.s.sam = 0; /* Don't combine write and output enable */ 111 reg_cfg.s.sam = 0; /* Don't combine write and output enable */
77 reg_cfg.s.we_ext = 0; /* No write enable extension */ 112 reg_cfg.s.we_ext = 0; /* No write enable extension */
@@ -92,12 +127,12 @@ static void octeon_cf_set_boot_reg_cfg(int cs)
92 */ 127 */
93static void octeon_cf_set_piomode(struct ata_port *ap, struct ata_device *dev) 128static void octeon_cf_set_piomode(struct ata_port *ap, struct ata_device *dev)
94{ 129{
95 struct octeon_cf_data *ocd = ap->dev->platform_data; 130 struct octeon_cf_port *cf_port = ap->private_data;
96 union cvmx_mio_boot_reg_timx reg_tim; 131 union cvmx_mio_boot_reg_timx reg_tim;
97 int cs = ocd->base_region;
98 int T; 132 int T;
99 struct ata_timing timing; 133 struct ata_timing timing;
100 134
135 unsigned int div;
101 int use_iordy; 136 int use_iordy;
102 int trh; 137 int trh;
103 int pause; 138 int pause;
@@ -106,7 +141,15 @@ static void octeon_cf_set_piomode(struct ata_port *ap, struct ata_device *dev)
106 int t2; 141 int t2;
107 int t2i; 142 int t2i;
108 143
109 T = (int)(2000000000000LL / octeon_get_clock_rate()); 144 /*
145 * A divisor value of four will overflow the timing fields at
146 * clock rates greater than 800MHz
147 */
148 if (octeon_get_io_clock_rate() <= 800000000)
149 div = 4;
150 else
151 div = 8;
152 T = (int)((1000000000000LL * div) / octeon_get_io_clock_rate());
110 153
111 if (ata_timing_compute(dev, dev->pio_mode, &timing, T, T)) 154 if (ata_timing_compute(dev, dev->pio_mode, &timing, T, T))
112 BUG(); 155 BUG();
@@ -121,23 +164,26 @@ static void octeon_cf_set_piomode(struct ata_port *ap, struct ata_device *dev)
121 if (t2i) 164 if (t2i)
122 t2i--; 165 t2i--;
123 166
124 trh = ns_to_tim_reg(2, 20); 167 trh = ns_to_tim_reg(div, 20);
125 if (trh) 168 if (trh)
126 trh--; 169 trh--;
127 170
128 pause = timing.cycle - timing.active - timing.setup - trh; 171 pause = (int)timing.cycle - (int)timing.active -
172 (int)timing.setup - trh;
173 if (pause < 0)
174 pause = 0;
129 if (pause) 175 if (pause)
130 pause--; 176 pause--;
131 177
132 octeon_cf_set_boot_reg_cfg(cs); 178 octeon_cf_set_boot_reg_cfg(cf_port->cs0, div);
133 if (ocd->dma_engine >= 0) 179 if (cf_port->is_true_ide)
134 /* True IDE mode, program both chip selects. */ 180 /* True IDE mode, program both chip selects. */
135 octeon_cf_set_boot_reg_cfg(cs + 1); 181 octeon_cf_set_boot_reg_cfg(cf_port->cs1, div);
136 182
137 183
138 use_iordy = ata_pio_need_iordy(dev); 184 use_iordy = ata_pio_need_iordy(dev);
139 185
140 reg_tim.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_TIMX(cs)); 186 reg_tim.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_TIMX(cf_port->cs0));
141 /* Disable page mode */ 187 /* Disable page mode */
142 reg_tim.s.pagem = 0; 188 reg_tim.s.pagem = 0;
143 /* Enable dynamic timing */ 189 /* Enable dynamic timing */
@@ -161,20 +207,22 @@ static void octeon_cf_set_piomode(struct ata_port *ap, struct ata_device *dev)
161 /* How long read enable is asserted */ 207 /* How long read enable is asserted */
162 reg_tim.s.oe = t2; 208 reg_tim.s.oe = t2;
163 /* Time after CE that read/write starts */ 209 /* Time after CE that read/write starts */
164 reg_tim.s.ce = ns_to_tim_reg(2, 5); 210 reg_tim.s.ce = ns_to_tim_reg(div, 5);
165 /* Time before CE that address is valid */ 211 /* Time before CE that address is valid */
166 reg_tim.s.adr = 0; 212 reg_tim.s.adr = 0;
167 213
168 /* Program the bootbus region timing for the data port chip select. */ 214 /* Program the bootbus region timing for the data port chip select. */
169 cvmx_write_csr(CVMX_MIO_BOOT_REG_TIMX(cs), reg_tim.u64); 215 cvmx_write_csr(CVMX_MIO_BOOT_REG_TIMX(cf_port->cs0), reg_tim.u64);
170 if (ocd->dma_engine >= 0) 216 if (cf_port->is_true_ide)
171 /* True IDE mode, program both chip selects. */ 217 /* True IDE mode, program both chip selects. */
172 cvmx_write_csr(CVMX_MIO_BOOT_REG_TIMX(cs + 1), reg_tim.u64); 218 cvmx_write_csr(CVMX_MIO_BOOT_REG_TIMX(cf_port->cs1),
219 reg_tim.u64);
173} 220}
174 221
175static void octeon_cf_set_dmamode(struct ata_port *ap, struct ata_device *dev) 222static void octeon_cf_set_dmamode(struct ata_port *ap, struct ata_device *dev)
176{ 223{
177 struct octeon_cf_data *ocd = dev->link->ap->dev->platform_data; 224 struct octeon_cf_port *cf_port = ap->private_data;
225 union cvmx_mio_boot_pin_defs pin_defs;
178 union cvmx_mio_boot_dma_timx dma_tim; 226 union cvmx_mio_boot_dma_timx dma_tim;
179 unsigned int oe_a; 227 unsigned int oe_a;
180 unsigned int oe_n; 228 unsigned int oe_n;
@@ -183,6 +231,7 @@ static void octeon_cf_set_dmamode(struct ata_port *ap, struct ata_device *dev)
183 unsigned int pause; 231 unsigned int pause;
184 unsigned int T0, Tkr, Td; 232 unsigned int T0, Tkr, Td;
185 unsigned int tim_mult; 233 unsigned int tim_mult;
234 int c;
186 235
187 const struct ata_timing *timing; 236 const struct ata_timing *timing;
188 237
@@ -199,13 +248,19 @@ static void octeon_cf_set_dmamode(struct ata_port *ap, struct ata_device *dev)
199 /* not spec'ed, value in eclocks, not affected by tim_mult */ 248 /* not spec'ed, value in eclocks, not affected by tim_mult */
200 dma_arq = 8; 249 dma_arq = 8;
201 pause = 25 - dma_arq * 1000 / 250 pause = 25 - dma_arq * 1000 /
202 (octeon_get_clock_rate() / 1000000); /* Tz */ 251 (octeon_get_io_clock_rate() / 1000000); /* Tz */
203 252
204 oe_a = Td; 253 oe_a = Td;
205 /* Tkr from cf spec, lengthened to meet T0 */ 254 /* Tkr from cf spec, lengthened to meet T0 */
206 oe_n = max(T0 - oe_a, Tkr); 255 oe_n = max(T0 - oe_a, Tkr);
207 256
208 dma_tim.s.dmack_pi = 1; 257 pin_defs.u64 = cvmx_read_csr(CVMX_MIO_BOOT_PIN_DEFS);
258
259 /* DMA channel number. */
260 c = (cf_port->dma_base & 8) >> 3;
261
262 /* Invert the polarity if the default is 0*/
263 dma_tim.s.dmack_pi = (pin_defs.u64 & (1ull << (11 + c))) ? 0 : 1;
209 264
210 dma_tim.s.oe_n = ns_to_tim_reg(tim_mult, oe_n); 265 dma_tim.s.oe_n = ns_to_tim_reg(tim_mult, oe_n);
211 dma_tim.s.oe_a = ns_to_tim_reg(tim_mult, oe_a); 266 dma_tim.s.oe_a = ns_to_tim_reg(tim_mult, oe_a);
@@ -228,14 +283,11 @@ static void octeon_cf_set_dmamode(struct ata_port *ap, struct ata_device *dev)
228 283
229 pr_debug("ns to ticks (mult %d) of %d is: %d\n", tim_mult, 60, 284 pr_debug("ns to ticks (mult %d) of %d is: %d\n", tim_mult, 60,
230 ns_to_tim_reg(tim_mult, 60)); 285 ns_to_tim_reg(tim_mult, 60));
231 pr_debug("oe_n: %d, oe_a: %d, dmack_s: %d, dmack_h: " 286 pr_debug("oe_n: %d, oe_a: %d, dmack_s: %d, dmack_h: %d, dmarq: %d, pause: %d\n",
232 "%d, dmarq: %d, pause: %d\n",
233 dma_tim.s.oe_n, dma_tim.s.oe_a, dma_tim.s.dmack_s, 287 dma_tim.s.oe_n, dma_tim.s.oe_a, dma_tim.s.dmack_s,
234 dma_tim.s.dmack_h, dma_tim.s.dmarq, dma_tim.s.pause); 288 dma_tim.s.dmack_h, dma_tim.s.dmarq, dma_tim.s.pause);
235 289
236 cvmx_write_csr(CVMX_MIO_BOOT_DMA_TIMX(ocd->dma_engine), 290 cvmx_write_csr(cf_port->dma_base + DMA_TIM, dma_tim.u64);
237 dma_tim.u64);
238
239} 291}
240 292
241/** 293/**
@@ -489,15 +541,10 @@ static void octeon_cf_exec_command16(struct ata_port *ap,
489 ata_wait_idle(ap); 541 ata_wait_idle(ap);
490} 542}
491 543
492static void octeon_cf_irq_on(struct ata_port *ap) 544static void octeon_cf_ata_port_noaction(struct ata_port *ap)
493{ 545{
494} 546}
495 547
496static void octeon_cf_irq_clear(struct ata_port *ap)
497{
498 return;
499}
500
501static void octeon_cf_dma_setup(struct ata_queued_cmd *qc) 548static void octeon_cf_dma_setup(struct ata_queued_cmd *qc)
502{ 549{
503 struct ata_port *ap = qc->ap; 550 struct ata_port *ap = qc->ap;
@@ -519,7 +566,7 @@ static void octeon_cf_dma_setup(struct ata_queued_cmd *qc)
519 */ 566 */
520static void octeon_cf_dma_start(struct ata_queued_cmd *qc) 567static void octeon_cf_dma_start(struct ata_queued_cmd *qc)
521{ 568{
522 struct octeon_cf_data *ocd = qc->ap->dev->platform_data; 569 struct octeon_cf_port *cf_port = qc->ap->private_data;
523 union cvmx_mio_boot_dma_cfgx mio_boot_dma_cfg; 570 union cvmx_mio_boot_dma_cfgx mio_boot_dma_cfg;
524 union cvmx_mio_boot_dma_intx mio_boot_dma_int; 571 union cvmx_mio_boot_dma_intx mio_boot_dma_int;
525 struct scatterlist *sg; 572 struct scatterlist *sg;
@@ -535,12 +582,10 @@ static void octeon_cf_dma_start(struct ata_queued_cmd *qc)
535 */ 582 */
536 mio_boot_dma_int.u64 = 0; 583 mio_boot_dma_int.u64 = 0;
537 mio_boot_dma_int.s.done = 1; 584 mio_boot_dma_int.s.done = 1;
538 cvmx_write_csr(CVMX_MIO_BOOT_DMA_INTX(ocd->dma_engine), 585 cvmx_write_csr(cf_port->dma_base + DMA_INT, mio_boot_dma_int.u64);
539 mio_boot_dma_int.u64);
540 586
541 /* Enable the interrupt. */ 587 /* Enable the interrupt. */
542 cvmx_write_csr(CVMX_MIO_BOOT_DMA_INT_ENX(ocd->dma_engine), 588 cvmx_write_csr(cf_port->dma_base + DMA_INT_EN, mio_boot_dma_int.u64);
543 mio_boot_dma_int.u64);
544 589
545 /* Set the direction of the DMA */ 590 /* Set the direction of the DMA */
546 mio_boot_dma_cfg.u64 = 0; 591 mio_boot_dma_cfg.u64 = 0;
@@ -569,8 +614,7 @@ static void octeon_cf_dma_start(struct ata_queued_cmd *qc)
569 (mio_boot_dma_cfg.s.rw) ? "write" : "read", sg->length, 614 (mio_boot_dma_cfg.s.rw) ? "write" : "read", sg->length,
570 (void *)(unsigned long)mio_boot_dma_cfg.s.adr); 615 (void *)(unsigned long)mio_boot_dma_cfg.s.adr);
571 616
572 cvmx_write_csr(CVMX_MIO_BOOT_DMA_CFGX(ocd->dma_engine), 617 cvmx_write_csr(cf_port->dma_base + DMA_CFG, mio_boot_dma_cfg.u64);
573 mio_boot_dma_cfg.u64);
574} 618}
575 619
576/** 620/**
@@ -583,10 +627,9 @@ static unsigned int octeon_cf_dma_finished(struct ata_port *ap,
583 struct ata_queued_cmd *qc) 627 struct ata_queued_cmd *qc)
584{ 628{
585 struct ata_eh_info *ehi = &ap->link.eh_info; 629 struct ata_eh_info *ehi = &ap->link.eh_info;
586 struct octeon_cf_data *ocd = ap->dev->platform_data; 630 struct octeon_cf_port *cf_port = ap->private_data;
587 union cvmx_mio_boot_dma_cfgx dma_cfg; 631 union cvmx_mio_boot_dma_cfgx dma_cfg;
588 union cvmx_mio_boot_dma_intx dma_int; 632 union cvmx_mio_boot_dma_intx dma_int;
589 struct octeon_cf_port *cf_port;
590 u8 status; 633 u8 status;
591 634
592 VPRINTK("ata%u: protocol %d task_state %d\n", 635 VPRINTK("ata%u: protocol %d task_state %d\n",
@@ -596,9 +639,7 @@ static unsigned int octeon_cf_dma_finished(struct ata_port *ap,
596 if (ap->hsm_task_state != HSM_ST_LAST) 639 if (ap->hsm_task_state != HSM_ST_LAST)
597 return 0; 640 return 0;
598 641
599 cf_port = ap->private_data; 642 dma_cfg.u64 = cvmx_read_csr(cf_port->dma_base + DMA_CFG);
600
601 dma_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_DMA_CFGX(ocd->dma_engine));
602 if (dma_cfg.s.size != 0xfffff) { 643 if (dma_cfg.s.size != 0xfffff) {
603 /* Error, the transfer was not complete. */ 644 /* Error, the transfer was not complete. */
604 qc->err_mask |= AC_ERR_HOST_BUS; 645 qc->err_mask |= AC_ERR_HOST_BUS;
@@ -608,15 +649,15 @@ static unsigned int octeon_cf_dma_finished(struct ata_port *ap,
608 /* Stop and clear the dma engine. */ 649 /* Stop and clear the dma engine. */
609 dma_cfg.u64 = 0; 650 dma_cfg.u64 = 0;
610 dma_cfg.s.size = -1; 651 dma_cfg.s.size = -1;
611 cvmx_write_csr(CVMX_MIO_BOOT_DMA_CFGX(ocd->dma_engine), dma_cfg.u64); 652 cvmx_write_csr(cf_port->dma_base + DMA_CFG, dma_cfg.u64);
612 653
613 /* Disable the interrupt. */ 654 /* Disable the interrupt. */
614 dma_int.u64 = 0; 655 dma_int.u64 = 0;
615 cvmx_write_csr(CVMX_MIO_BOOT_DMA_INT_ENX(ocd->dma_engine), dma_int.u64); 656 cvmx_write_csr(cf_port->dma_base + DMA_INT_EN, dma_int.u64);
616 657
617 /* Clear the DMA complete status */ 658 /* Clear the DMA complete status */
618 dma_int.s.done = 1; 659 dma_int.s.done = 1;
619 cvmx_write_csr(CVMX_MIO_BOOT_DMA_INTX(ocd->dma_engine), dma_int.u64); 660 cvmx_write_csr(cf_port->dma_base + DMA_INT, dma_int.u64);
620 661
621 status = ap->ops->sff_check_status(ap); 662 status = ap->ops->sff_check_status(ap);
622 663
@@ -649,69 +690,68 @@ static irqreturn_t octeon_cf_interrupt(int irq, void *dev_instance)
649 struct ata_queued_cmd *qc; 690 struct ata_queued_cmd *qc;
650 union cvmx_mio_boot_dma_intx dma_int; 691 union cvmx_mio_boot_dma_intx dma_int;
651 union cvmx_mio_boot_dma_cfgx dma_cfg; 692 union cvmx_mio_boot_dma_cfgx dma_cfg;
652 struct octeon_cf_data *ocd;
653 693
654 ap = host->ports[i]; 694 ap = host->ports[i];
655 ocd = ap->dev->platform_data;
656 cf_port = ap->private_data; 695 cf_port = ap->private_data;
657 dma_int.u64 = 696
658 cvmx_read_csr(CVMX_MIO_BOOT_DMA_INTX(ocd->dma_engine)); 697 dma_int.u64 = cvmx_read_csr(cf_port->dma_base + DMA_INT);
659 dma_cfg.u64 = 698 dma_cfg.u64 = cvmx_read_csr(cf_port->dma_base + DMA_CFG);
660 cvmx_read_csr(CVMX_MIO_BOOT_DMA_CFGX(ocd->dma_engine));
661 699
662 qc = ata_qc_from_tag(ap, ap->link.active_tag); 700 qc = ata_qc_from_tag(ap, ap->link.active_tag);
663 701
664 if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING)) { 702 if (!qc || (qc->tf.flags & ATA_TFLAG_POLLING))
665 if (dma_int.s.done && !dma_cfg.s.en) { 703 continue;
666 if (!sg_is_last(qc->cursg)) { 704
667 qc->cursg = sg_next(qc->cursg); 705 if (dma_int.s.done && !dma_cfg.s.en) {
668 handled = 1; 706 if (!sg_is_last(qc->cursg)) {
669 octeon_cf_dma_start(qc); 707 qc->cursg = sg_next(qc->cursg);
670 continue;
671 } else {
672 cf_port->dma_finished = 1;
673 }
674 }
675 if (!cf_port->dma_finished)
676 continue;
677 status = ioread8(ap->ioaddr.altstatus_addr);
678 if (status & (ATA_BUSY | ATA_DRQ)) {
679 /*
680 * We are busy, try to handle it
681 * later. This is the DMA finished
682 * interrupt, and it could take a
683 * little while for the card to be
684 * ready for more commands.
685 */
686 /* Clear DMA irq. */
687 dma_int.u64 = 0;
688 dma_int.s.done = 1;
689 cvmx_write_csr(CVMX_MIO_BOOT_DMA_INTX(ocd->dma_engine),
690 dma_int.u64);
691
692 queue_delayed_work(cf_port->wq,
693 &cf_port->delayed_finish, 1);
694 handled = 1; 708 handled = 1;
709 octeon_cf_dma_start(qc);
710 continue;
695 } else { 711 } else {
696 handled |= octeon_cf_dma_finished(ap, qc); 712 cf_port->dma_finished = 1;
697 } 713 }
698 } 714 }
715 if (!cf_port->dma_finished)
716 continue;
717 status = ioread8(ap->ioaddr.altstatus_addr);
718 if (status & (ATA_BUSY | ATA_DRQ)) {
719 /*
720 * We are busy, try to handle it later. This
721 * is the DMA finished interrupt, and it could
722 * take a little while for the card to be
723 * ready for more commands.
724 */
725 /* Clear DMA irq. */
726 dma_int.u64 = 0;
727 dma_int.s.done = 1;
728 cvmx_write_csr(cf_port->dma_base + DMA_INT,
729 dma_int.u64);
730 hrtimer_start_range_ns(&cf_port->delayed_finish,
731 ns_to_ktime(OCTEON_CF_BUSY_POLL_INTERVAL),
732 OCTEON_CF_BUSY_POLL_INTERVAL / 5,
733 HRTIMER_MODE_REL);
734 handled = 1;
735 } else {
736 handled |= octeon_cf_dma_finished(ap, qc);
737 }
699 } 738 }
700 spin_unlock_irqrestore(&host->lock, flags); 739 spin_unlock_irqrestore(&host->lock, flags);
701 DPRINTK("EXIT\n"); 740 DPRINTK("EXIT\n");
702 return IRQ_RETVAL(handled); 741 return IRQ_RETVAL(handled);
703} 742}
704 743
705static void octeon_cf_delayed_finish(struct work_struct *work) 744static enum hrtimer_restart octeon_cf_delayed_finish(struct hrtimer *hrt)
706{ 745{
707 struct octeon_cf_port *cf_port = container_of(work, 746 struct octeon_cf_port *cf_port = container_of(hrt,
708 struct octeon_cf_port, 747 struct octeon_cf_port,
709 delayed_finish.work); 748 delayed_finish);
710 struct ata_port *ap = cf_port->ap; 749 struct ata_port *ap = cf_port->ap;
711 struct ata_host *host = ap->host; 750 struct ata_host *host = ap->host;
712 struct ata_queued_cmd *qc; 751 struct ata_queued_cmd *qc;
713 unsigned long flags; 752 unsigned long flags;
714 u8 status; 753 u8 status;
754 enum hrtimer_restart rv = HRTIMER_NORESTART;
715 755
716 spin_lock_irqsave(&host->lock, flags); 756 spin_lock_irqsave(&host->lock, flags);
717 757
@@ -726,15 +766,17 @@ static void octeon_cf_delayed_finish(struct work_struct *work)
726 status = ioread8(ap->ioaddr.altstatus_addr); 766 status = ioread8(ap->ioaddr.altstatus_addr);
727 if (status & (ATA_BUSY | ATA_DRQ)) { 767 if (status & (ATA_BUSY | ATA_DRQ)) {
728 /* Still busy, try again. */ 768 /* Still busy, try again. */
729 queue_delayed_work(cf_port->wq, 769 hrtimer_forward_now(hrt,
730 &cf_port->delayed_finish, 1); 770 ns_to_ktime(OCTEON_CF_BUSY_POLL_INTERVAL));
771 rv = HRTIMER_RESTART;
731 goto out; 772 goto out;
732 } 773 }
733 qc = ata_qc_from_tag(ap, ap->link.active_tag); 774 qc = ata_qc_from_tag(ap, ap->link.active_tag);
734 if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING)) 775 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
735 octeon_cf_dma_finished(ap, qc); 776 octeon_cf_dma_finished(ap, qc);
736out: 777out:
737 spin_unlock_irqrestore(&host->lock, flags); 778 spin_unlock_irqrestore(&host->lock, flags);
779 return rv;
738} 780}
739 781
740static void octeon_cf_dev_config(struct ata_device *dev) 782static void octeon_cf_dev_config(struct ata_device *dev)
@@ -786,8 +828,8 @@ static struct ata_port_operations octeon_cf_ops = {
786 .qc_prep = ata_noop_qc_prep, 828 .qc_prep = ata_noop_qc_prep,
787 .qc_issue = octeon_cf_qc_issue, 829 .qc_issue = octeon_cf_qc_issue,
788 .sff_dev_select = octeon_cf_dev_select, 830 .sff_dev_select = octeon_cf_dev_select,
789 .sff_irq_on = octeon_cf_irq_on, 831 .sff_irq_on = octeon_cf_ata_port_noaction,
790 .sff_irq_clear = octeon_cf_irq_clear, 832 .sff_irq_clear = octeon_cf_ata_port_noaction,
791 .cable_detect = ata_cable_40wire, 833 .cable_detect = ata_cable_40wire,
792 .set_piomode = octeon_cf_set_piomode, 834 .set_piomode = octeon_cf_set_piomode,
793 .set_dmamode = octeon_cf_set_dmamode, 835 .set_dmamode = octeon_cf_set_dmamode,
@@ -798,46 +840,113 @@ static int __devinit octeon_cf_probe(struct platform_device *pdev)
798{ 840{
799 struct resource *res_cs0, *res_cs1; 841 struct resource *res_cs0, *res_cs1;
800 842
843 bool is_16bit;
844 const __be32 *cs_num;
845 struct property *reg_prop;
846 int n_addr, n_size, reg_len;
847 struct device_node *node;
848 const void *prop;
801 void __iomem *cs0; 849 void __iomem *cs0;
802 void __iomem *cs1 = NULL; 850 void __iomem *cs1 = NULL;
803 struct ata_host *host; 851 struct ata_host *host;
804 struct ata_port *ap; 852 struct ata_port *ap;
805 struct octeon_cf_data *ocd;
806 int irq = 0; 853 int irq = 0;
807 irq_handler_t irq_handler = NULL; 854 irq_handler_t irq_handler = NULL;
808 void __iomem *base; 855 void __iomem *base;
809 struct octeon_cf_port *cf_port; 856 struct octeon_cf_port *cf_port;
810 char version[32]; 857 int rv = -ENOMEM;
811 858
812 res_cs0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
813 859
814 if (!res_cs0) 860 node = pdev->dev.of_node;
861 if (node == NULL)
815 return -EINVAL; 862 return -EINVAL;
816 863
817 ocd = pdev->dev.platform_data; 864 cf_port = kzalloc(sizeof(*cf_port), GFP_KERNEL);
865 if (!cf_port)
866 return -ENOMEM;
818 867
819 cs0 = devm_ioremap_nocache(&pdev->dev, res_cs0->start, 868 cf_port->is_true_ide = (of_find_property(node, "cavium,true-ide", NULL) != NULL);
820 resource_size(res_cs0));
821 869
822 if (!cs0) 870 prop = of_get_property(node, "cavium,bus-width", NULL);
823 return -ENOMEM; 871 if (prop)
872 is_16bit = (be32_to_cpup(prop) == 16);
873 else
874 is_16bit = false;
824 875
825 /* Determine from availability of DMA if True IDE mode or not */ 876 n_addr = of_n_addr_cells(node);
826 if (ocd->dma_engine >= 0) { 877 n_size = of_n_size_cells(node);
827 res_cs1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
828 if (!res_cs1)
829 return -EINVAL;
830 878
879 reg_prop = of_find_property(node, "reg", &reg_len);
880 if (!reg_prop || reg_len < sizeof(__be32)) {
881 rv = -EINVAL;
882 goto free_cf_port;
883 }
884 cs_num = reg_prop->value;
885 cf_port->cs0 = be32_to_cpup(cs_num);
886
887 if (cf_port->is_true_ide) {
888 struct device_node *dma_node;
889 dma_node = of_parse_phandle(node,
890 "cavium,dma-engine-handle", 0);
891 if (dma_node) {
892 struct platform_device *dma_dev;
893 dma_dev = of_find_device_by_node(dma_node);
894 if (dma_dev) {
895 struct resource *res_dma;
896 int i;
897 res_dma = platform_get_resource(dma_dev, IORESOURCE_MEM, 0);
898 if (!res_dma) {
899 of_node_put(dma_node);
900 rv = -EINVAL;
901 goto free_cf_port;
902 }
903 cf_port->dma_base = (u64)devm_ioremap_nocache(&pdev->dev, res_dma->start,
904 resource_size(res_dma));
905
906 if (!cf_port->dma_base) {
907 of_node_put(dma_node);
908 rv = -EINVAL;
909 goto free_cf_port;
910 }
911
912 irq_handler = octeon_cf_interrupt;
913 i = platform_get_irq(dma_dev, 0);
914 if (i > 0)
915 irq = i;
916 }
917 of_node_put(dma_node);
918 }
919 res_cs1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
920 if (!res_cs1) {
921 rv = -EINVAL;
922 goto free_cf_port;
923 }
831 cs1 = devm_ioremap_nocache(&pdev->dev, res_cs1->start, 924 cs1 = devm_ioremap_nocache(&pdev->dev, res_cs1->start,
832 resource_size(res_cs1)); 925 res_cs1->end - res_cs1->start + 1);
833 926
834 if (!cs1) 927 if (!cs1)
835 return -ENOMEM; 928 goto free_cf_port;
929
930 if (reg_len < (n_addr + n_size + 1) * sizeof(__be32)) {
931 rv = -EINVAL;
932 goto free_cf_port;
933 }
934 cs_num += n_addr + n_size;
935 cf_port->cs1 = be32_to_cpup(cs_num);
836 } 936 }
837 937
838 cf_port = kzalloc(sizeof(*cf_port), GFP_KERNEL); 938 res_cs0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
839 if (!cf_port) 939
840 return -ENOMEM; 940 if (!res_cs0) {
941 rv = -EINVAL;
942 goto free_cf_port;
943 }
944
945 cs0 = devm_ioremap_nocache(&pdev->dev, res_cs0->start,
946 resource_size(res_cs0));
947
948 if (!cs0)
949 goto free_cf_port;
841 950
842 /* allocate host */ 951 /* allocate host */
843 host = ata_host_alloc(&pdev->dev, 1); 952 host = ata_host_alloc(&pdev->dev, 1);
@@ -846,21 +955,22 @@ static int __devinit octeon_cf_probe(struct platform_device *pdev)
846 955
847 ap = host->ports[0]; 956 ap = host->ports[0];
848 ap->private_data = cf_port; 957 ap->private_data = cf_port;
958 pdev->dev.platform_data = cf_port;
849 cf_port->ap = ap; 959 cf_port->ap = ap;
850 ap->ops = &octeon_cf_ops; 960 ap->ops = &octeon_cf_ops;
851 ap->pio_mask = ATA_PIO6; 961 ap->pio_mask = ATA_PIO6;
852 ap->flags |= ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING; 962 ap->flags |= ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING;
853 963
854 base = cs0 + ocd->base_region_bias; 964 if (!is_16bit) {
855 if (!ocd->is16bit) { 965 base = cs0 + 0x800;
856 ap->ioaddr.cmd_addr = base; 966 ap->ioaddr.cmd_addr = base;
857 ata_sff_std_ports(&ap->ioaddr); 967 ata_sff_std_ports(&ap->ioaddr);
858 968
859 ap->ioaddr.altstatus_addr = base + 0xe; 969 ap->ioaddr.altstatus_addr = base + 0xe;
860 ap->ioaddr.ctl_addr = base + 0xe; 970 ap->ioaddr.ctl_addr = base + 0xe;
861 octeon_cf_ops.sff_data_xfer = octeon_cf_data_xfer8; 971 octeon_cf_ops.sff_data_xfer = octeon_cf_data_xfer8;
862 } else if (cs1) { 972 } else if (cf_port->is_true_ide) {
863 /* Presence of cs1 indicates True IDE mode. */ 973 base = cs0;
864 ap->ioaddr.cmd_addr = base + (ATA_REG_CMD << 1) + 1; 974 ap->ioaddr.cmd_addr = base + (ATA_REG_CMD << 1) + 1;
865 ap->ioaddr.data_addr = base + (ATA_REG_DATA << 1); 975 ap->ioaddr.data_addr = base + (ATA_REG_DATA << 1);
866 ap->ioaddr.error_addr = base + (ATA_REG_ERR << 1) + 1; 976 ap->ioaddr.error_addr = base + (ATA_REG_ERR << 1) + 1;
@@ -876,19 +986,15 @@ static int __devinit octeon_cf_probe(struct platform_device *pdev)
876 ap->ioaddr.ctl_addr = cs1 + (6 << 1) + 1; 986 ap->ioaddr.ctl_addr = cs1 + (6 << 1) + 1;
877 octeon_cf_ops.sff_data_xfer = octeon_cf_data_xfer16; 987 octeon_cf_ops.sff_data_xfer = octeon_cf_data_xfer16;
878 988
879 ap->mwdma_mask = ATA_MWDMA4; 989 ap->mwdma_mask = enable_dma ? ATA_MWDMA4 : 0;
880 irq = platform_get_irq(pdev, 0);
881 irq_handler = octeon_cf_interrupt;
882
883 /* True IDE mode needs delayed work to poll for not-busy. */
884 cf_port->wq = create_singlethread_workqueue(DRV_NAME);
885 if (!cf_port->wq)
886 goto free_cf_port;
887 INIT_DELAYED_WORK(&cf_port->delayed_finish,
888 octeon_cf_delayed_finish);
889 990
991 /* True IDE mode needs a timer to poll for not-busy. */
992 hrtimer_init(&cf_port->delayed_finish, CLOCK_MONOTONIC,
993 HRTIMER_MODE_REL);
994 cf_port->delayed_finish.function = octeon_cf_delayed_finish;
890 } else { 995 } else {
891 /* 16 bit but not True IDE */ 996 /* 16 bit but not True IDE */
997 base = cs0 + 0x800;
892 octeon_cf_ops.sff_data_xfer = octeon_cf_data_xfer16; 998 octeon_cf_ops.sff_data_xfer = octeon_cf_data_xfer16;
893 octeon_cf_ops.softreset = octeon_cf_softreset16; 999 octeon_cf_ops.softreset = octeon_cf_softreset16;
894 octeon_cf_ops.sff_check_status = octeon_cf_check_status16; 1000 octeon_cf_ops.sff_check_status = octeon_cf_check_status16;
@@ -902,28 +1008,71 @@ static int __devinit octeon_cf_probe(struct platform_device *pdev)
902 ap->ioaddr.ctl_addr = base + 0xe; 1008 ap->ioaddr.ctl_addr = base + 0xe;
903 ap->ioaddr.altstatus_addr = base + 0xe; 1009 ap->ioaddr.altstatus_addr = base + 0xe;
904 } 1010 }
1011 cf_port->c0 = ap->ioaddr.ctl_addr;
1012
1013 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(64);
1014 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
905 1015
906 ata_port_desc(ap, "cmd %p ctl %p", base, ap->ioaddr.ctl_addr); 1016 ata_port_desc(ap, "cmd %p ctl %p", base, ap->ioaddr.ctl_addr);
907 1017
908 1018
909 snprintf(version, sizeof(version), "%s %d bit%s", 1019 dev_info(&pdev->dev, "version " DRV_VERSION" %d bit%s.\n",
910 DRV_VERSION, 1020 is_16bit ? 16 : 8,
911 (ocd->is16bit) ? 16 : 8, 1021 cf_port->is_true_ide ? ", True IDE" : "");
912 (cs1) ? ", True IDE" : "");
913 ata_print_version_once(&pdev->dev, version);
914 1022
915 return ata_host_activate(host, irq, irq_handler, 0, &octeon_cf_sht); 1023 return ata_host_activate(host, irq, irq_handler,
1024 IRQF_SHARED, &octeon_cf_sht);
916 1025
917free_cf_port: 1026free_cf_port:
918 kfree(cf_port); 1027 kfree(cf_port);
919 return -ENOMEM; 1028 return rv;
1029}
1030
1031static void octeon_cf_shutdown(struct device *dev)
1032{
1033 union cvmx_mio_boot_dma_cfgx dma_cfg;
1034 union cvmx_mio_boot_dma_intx dma_int;
1035
1036 struct octeon_cf_port *cf_port = dev->platform_data;
1037
1038 if (cf_port->dma_base) {
1039 /* Stop and clear the dma engine. */
1040 dma_cfg.u64 = 0;
1041 dma_cfg.s.size = -1;
1042 cvmx_write_csr(cf_port->dma_base + DMA_CFG, dma_cfg.u64);
1043
1044 /* Disable the interrupt. */
1045 dma_int.u64 = 0;
1046 cvmx_write_csr(cf_port->dma_base + DMA_INT_EN, dma_int.u64);
1047
1048 /* Clear the DMA complete status */
1049 dma_int.s.done = 1;
1050 cvmx_write_csr(cf_port->dma_base + DMA_INT, dma_int.u64);
1051
1052 __raw_writeb(0, cf_port->c0);
1053 udelay(20);
1054 __raw_writeb(ATA_SRST, cf_port->c0);
1055 udelay(20);
1056 __raw_writeb(0, cf_port->c0);
1057 mdelay(100);
1058 }
920} 1059}
921 1060
1061static struct of_device_id octeon_cf_match[] = {
1062 {
1063 .compatible = "cavium,ebt3000-compact-flash",
1064 },
1065 {},
1066};
1067MODULE_DEVICE_TABLE(of, octeon_i2c_match);
1068
922static struct platform_driver octeon_cf_driver = { 1069static struct platform_driver octeon_cf_driver = {
923 .probe = octeon_cf_probe, 1070 .probe = octeon_cf_probe,
924 .driver = { 1071 .driver = {
925 .name = DRV_NAME, 1072 .name = DRV_NAME,
926 .owner = THIS_MODULE, 1073 .owner = THIS_MODULE,
1074 .of_match_table = octeon_cf_match,
1075 .shutdown = octeon_cf_shutdown
927 }, 1076 },
928}; 1077};
929 1078