aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ata/pata_octeon_cf.c
diff options
context:
space:
mode:
authorDavid Daney <ddaney@caviumnetworks.com>2009-01-15 20:45:32 -0500
committerJeff Garzik <jgarzik@redhat.com>2009-01-16 10:23:39 -0500
commit3c929c6f5aa7501790586a38dd8faca8fed9a158 (patch)
tree772ff335f9a9da2d10a40cff5a4070e080d181dc /drivers/ata/pata_octeon_cf.c
parent3ada9c126499dd4700dcdbd5b9fe8110ad17f578 (diff)
libata: New driver for OCTEON SOC Compact Flash interface (v7).
Cavium OCTEON processor support was recently merged, so now we have this CF driver for your consideration. Most OCTEON variants have *no* DMA or interrupt support on the CF interface so for these, only PIO is supported. Although if DMA is available, we do take advantage of it. Signed-off-by: David Daney <ddaney@caviumnetworks.com> Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
Diffstat (limited to 'drivers/ata/pata_octeon_cf.c')
-rw-r--r--drivers/ata/pata_octeon_cf.c965
1 files changed, 965 insertions, 0 deletions
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
new file mode 100644
index 000000000000..0fe4ef309c62
--- /dev/null
+++ b/drivers/ata/pata_octeon_cf.c
@@ -0,0 +1,965 @@
1/*
2 * Driver for the Octeon bootbus compact flash.
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2005 - 2009 Cavium Networks
9 * Copyright (C) 2008 Wind River Systems
10 */
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/libata.h>
15#include <linux/irq.h>
16#include <linux/platform_device.h>
17#include <linux/workqueue.h>
18#include <scsi/scsi_host.h>
19
20#include <asm/octeon/octeon.h>
21
22/*
23 * The Octeon bootbus compact flash interface is connected in at least
24 * 3 different configurations on various evaluation boards:
25 *
26 * -- 8 bits no irq, no DMA
27 * -- 16 bits no irq, no DMA
28 * -- 16 bits True IDE mode with DMA, but no irq.
29 *
30 * In the last case the DMA engine can generate an interrupt when the
31 * transfer is complete. For the first two cases only PIO is supported.
32 *
33 */
34
35#define DRV_NAME "pata_octeon_cf"
36#define DRV_VERSION "2.1"
37
38
39struct octeon_cf_port {
40 struct workqueue_struct *wq;
41 struct delayed_work delayed_finish;
42 struct ata_port *ap;
43 int dma_finished;
44};
45
46static struct scsi_host_template octeon_cf_sht = {
47 ATA_PIO_SHT(DRV_NAME),
48};
49
50/**
51 * Convert nanosecond based time to setting used in the
52 * boot bus timing register, based on timing multiple
53 */
54static unsigned int ns_to_tim_reg(unsigned int tim_mult, unsigned int nsecs)
55{
56 unsigned int val;
57
58 /*
59 * Compute # of eclock periods to get desired duration in
60 * nanoseconds.
61 */
62 val = DIV_ROUND_UP(nsecs * (octeon_get_clock_rate() / 1000000),
63 1000 * tim_mult);
64
65 return val;
66}
67
68static void octeon_cf_set_boot_reg_cfg(int cs)
69{
70 union cvmx_mio_boot_reg_cfgx reg_cfg;
71 reg_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(cs));
72 reg_cfg.s.dmack = 0; /* Don't assert DMACK on access */
73 reg_cfg.s.tim_mult = 2; /* Timing mutiplier 2x */
74 reg_cfg.s.rd_dly = 0; /* Sample on falling edge of BOOT_OE */
75 reg_cfg.s.sam = 0; /* Don't combine write and output enable */
76 reg_cfg.s.we_ext = 0; /* No write enable extension */
77 reg_cfg.s.oe_ext = 0; /* No read enable extension */
78 reg_cfg.s.en = 1; /* Enable this region */
79 reg_cfg.s.orbit = 0; /* Don't combine with previous region */
80 reg_cfg.s.ale = 0; /* Don't do address multiplexing */
81 cvmx_write_csr(CVMX_MIO_BOOT_REG_CFGX(cs), reg_cfg.u64);
82}
83
84/**
85 * Called after libata determines the needed PIO mode. This
86 * function programs the Octeon bootbus regions to support the
87 * timing requirements of the PIO mode.
88 *
89 * @ap: ATA port information
90 * @dev: ATA device
91 */
92static void octeon_cf_set_piomode(struct ata_port *ap, struct ata_device *dev)
93{
94 struct octeon_cf_data *ocd = ap->dev->platform_data;
95 union cvmx_mio_boot_reg_timx reg_tim;
96 int cs = ocd->base_region;
97 int T;
98 struct ata_timing timing;
99
100 int use_iordy;
101 int trh;
102 int pause;
103 /* These names are timing parameters from the ATA spec */
104 int t1;
105 int t2;
106 int t2i;
107
108 T = (int)(2000000000000LL / octeon_get_clock_rate());
109
110 if (ata_timing_compute(dev, dev->pio_mode, &timing, T, T))
111 BUG();
112
113 t1 = timing.setup;
114 if (t1)
115 t1--;
116 t2 = timing.active;
117 if (t2)
118 t2--;
119 t2i = timing.act8b;
120 if (t2i)
121 t2i--;
122
123 trh = ns_to_tim_reg(2, 20);
124 if (trh)
125 trh--;
126
127 pause = timing.cycle - timing.active - timing.setup - trh;
128 if (pause)
129 pause--;
130
131 octeon_cf_set_boot_reg_cfg(cs);
132 if (ocd->dma_engine >= 0)
133 /* True IDE mode, program both chip selects. */
134 octeon_cf_set_boot_reg_cfg(cs + 1);
135
136
137 use_iordy = ata_pio_need_iordy(dev);
138
139 reg_tim.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_TIMX(cs));
140 /* Disable page mode */
141 reg_tim.s.pagem = 0;
142 /* Enable dynamic timing */
143 reg_tim.s.waitm = use_iordy;
144 /* Pages are disabled */
145 reg_tim.s.pages = 0;
146 /* We don't use multiplexed address mode */
147 reg_tim.s.ale = 0;
148 /* Not used */
149 reg_tim.s.page = 0;
150 /* Time after IORDY to coninue to assert the data */
151 reg_tim.s.wait = 0;
152 /* Time to wait to complete the cycle. */
153 reg_tim.s.pause = pause;
154 /* How long to hold after a write to de-assert CE. */
155 reg_tim.s.wr_hld = trh;
156 /* How long to wait after a read to de-assert CE. */
157 reg_tim.s.rd_hld = trh;
158 /* How long write enable is asserted */
159 reg_tim.s.we = t2;
160 /* How long read enable is asserted */
161 reg_tim.s.oe = t2;
162 /* Time after CE that read/write starts */
163 reg_tim.s.ce = ns_to_tim_reg(2, 5);
164 /* Time before CE that address is valid */
165 reg_tim.s.adr = 0;
166
167 /* Program the bootbus region timing for the data port chip select. */
168 cvmx_write_csr(CVMX_MIO_BOOT_REG_TIMX(cs), reg_tim.u64);
169 if (ocd->dma_engine >= 0)
170 /* True IDE mode, program both chip selects. */
171 cvmx_write_csr(CVMX_MIO_BOOT_REG_TIMX(cs + 1), reg_tim.u64);
172}
173
174static void octeon_cf_set_dmamode(struct ata_port *ap, struct ata_device *dev)
175{
176 struct octeon_cf_data *ocd = dev->link->ap->dev->platform_data;
177 union cvmx_mio_boot_dma_timx dma_tim;
178 unsigned int oe_a;
179 unsigned int oe_n;
180 unsigned int dma_ackh;
181 unsigned int dma_arq;
182 unsigned int pause;
183 unsigned int T0, Tkr, Td;
184 unsigned int tim_mult;
185
186 const struct ata_timing *timing;
187
188 timing = ata_timing_find_mode(dev->dma_mode);
189 T0 = timing->cycle;
190 Td = timing->active;
191 Tkr = timing->recover;
192 dma_ackh = timing->dmack_hold;
193
194 dma_tim.u64 = 0;
195 /* dma_tim.s.tim_mult = 0 --> 4x */
196 tim_mult = 4;
197
198 /* not spec'ed, value in eclocks, not affected by tim_mult */
199 dma_arq = 8;
200 pause = 25 - dma_arq * 1000 /
201 (octeon_get_clock_rate() / 1000000); /* Tz */
202
203 oe_a = Td;
204 /* Tkr from cf spec, lengthened to meet T0 */
205 oe_n = max(T0 - oe_a, Tkr);
206
207 dma_tim.s.dmack_pi = 1;
208
209 dma_tim.s.oe_n = ns_to_tim_reg(tim_mult, oe_n);
210 dma_tim.s.oe_a = ns_to_tim_reg(tim_mult, oe_a);
211
212 /*
213 * This is tI, C.F. spec. says 0, but Sony CF card requires
214 * more, we use 20 nS.
215 */
216 dma_tim.s.dmack_s = ns_to_tim_reg(tim_mult, 20);;
217 dma_tim.s.dmack_h = ns_to_tim_reg(tim_mult, dma_ackh);
218
219 dma_tim.s.dmarq = dma_arq;
220 dma_tim.s.pause = ns_to_tim_reg(tim_mult, pause);
221
222 dma_tim.s.rd_dly = 0; /* Sample right on edge */
223
224 /* writes only */
225 dma_tim.s.we_n = ns_to_tim_reg(tim_mult, oe_n);
226 dma_tim.s.we_a = ns_to_tim_reg(tim_mult, oe_a);
227
228 pr_debug("ns to ticks (mult %d) of %d is: %d\n", tim_mult, 60,
229 ns_to_tim_reg(tim_mult, 60));
230 pr_debug("oe_n: %d, oe_a: %d, dmack_s: %d, dmack_h: "
231 "%d, dmarq: %d, pause: %d\n",
232 dma_tim.s.oe_n, dma_tim.s.oe_a, dma_tim.s.dmack_s,
233 dma_tim.s.dmack_h, dma_tim.s.dmarq, dma_tim.s.pause);
234
235 cvmx_write_csr(CVMX_MIO_BOOT_DMA_TIMX(ocd->dma_engine),
236 dma_tim.u64);
237
238}
239
240/**
241 * Handle an 8 bit I/O request.
242 *
243 * @dev: Device to access
244 * @buffer: Data buffer
245 * @buflen: Length of the buffer.
246 * @rw: True to write.
247 */
248static unsigned int octeon_cf_data_xfer8(struct ata_device *dev,
249 unsigned char *buffer,
250 unsigned int buflen,
251 int rw)
252{
253 struct ata_port *ap = dev->link->ap;
254 void __iomem *data_addr = ap->ioaddr.data_addr;
255 unsigned long words;
256 int count;
257
258 words = buflen;
259 if (rw) {
260 count = 16;
261 while (words--) {
262 iowrite8(*buffer, data_addr);
263 buffer++;
264 /*
265 * Every 16 writes do a read so the bootbus
266 * FIFO doesn't fill up.
267 */
268 if (--count == 0) {
269 ioread8(ap->ioaddr.altstatus_addr);
270 count = 16;
271 }
272 }
273 } else {
274 ioread8_rep(data_addr, buffer, words);
275 }
276 return buflen;
277}
278
279/**
280 * Handle a 16 bit I/O request.
281 *
282 * @dev: Device to access
283 * @buffer: Data buffer
284 * @buflen: Length of the buffer.
285 * @rw: True to write.
286 */
287static unsigned int octeon_cf_data_xfer16(struct ata_device *dev,
288 unsigned char *buffer,
289 unsigned int buflen,
290 int rw)
291{
292 struct ata_port *ap = dev->link->ap;
293 void __iomem *data_addr = ap->ioaddr.data_addr;
294 unsigned long words;
295 int count;
296
297 words = buflen / 2;
298 if (rw) {
299 count = 16;
300 while (words--) {
301 iowrite16(*(uint16_t *)buffer, data_addr);
302 buffer += sizeof(uint16_t);
303 /*
304 * Every 16 writes do a read so the bootbus
305 * FIFO doesn't fill up.
306 */
307 if (--count == 0) {
308 ioread8(ap->ioaddr.altstatus_addr);
309 count = 16;
310 }
311 }
312 } else {
313 while (words--) {
314 *(uint16_t *)buffer = ioread16(data_addr);
315 buffer += sizeof(uint16_t);
316 }
317 }
318 /* Transfer trailing 1 byte, if any. */
319 if (unlikely(buflen & 0x01)) {
320 __le16 align_buf[1] = { 0 };
321
322 if (rw == READ) {
323 align_buf[0] = cpu_to_le16(ioread16(data_addr));
324 memcpy(buffer, align_buf, 1);
325 } else {
326 memcpy(align_buf, buffer, 1);
327 iowrite16(le16_to_cpu(align_buf[0]), data_addr);
328 }
329 words++;
330 }
331 return buflen;
332}
333
334/**
335 * Read the taskfile for 16bit non-True IDE only.
336 */
337static void octeon_cf_tf_read16(struct ata_port *ap, struct ata_taskfile *tf)
338{
339 u16 blob;
340 /* The base of the registers is at ioaddr.data_addr. */
341 void __iomem *base = ap->ioaddr.data_addr;
342
343 blob = __raw_readw(base + 0xc);
344 tf->feature = blob >> 8;
345
346 blob = __raw_readw(base + 2);
347 tf->nsect = blob & 0xff;
348 tf->lbal = blob >> 8;
349
350 blob = __raw_readw(base + 4);
351 tf->lbam = blob & 0xff;
352 tf->lbah = blob >> 8;
353
354 blob = __raw_readw(base + 6);
355 tf->device = blob & 0xff;
356 tf->command = blob >> 8;
357
358 if (tf->flags & ATA_TFLAG_LBA48) {
359 if (likely(ap->ioaddr.ctl_addr)) {
360 iowrite8(tf->ctl | ATA_HOB, ap->ioaddr.ctl_addr);
361
362 blob = __raw_readw(base + 0xc);
363 tf->hob_feature = blob >> 8;
364
365 blob = __raw_readw(base + 2);
366 tf->hob_nsect = blob & 0xff;
367 tf->hob_lbal = blob >> 8;
368
369 blob = __raw_readw(base + 4);
370 tf->hob_lbam = blob & 0xff;
371 tf->hob_lbah = blob >> 8;
372
373 iowrite8(tf->ctl, ap->ioaddr.ctl_addr);
374 ap->last_ctl = tf->ctl;
375 } else {
376 WARN_ON(1);
377 }
378 }
379}
380
381static u8 octeon_cf_check_status16(struct ata_port *ap)
382{
383 u16 blob;
384 void __iomem *base = ap->ioaddr.data_addr;
385
386 blob = __raw_readw(base + 6);
387 return blob >> 8;
388}
389
390static int octeon_cf_softreset16(struct ata_link *link, unsigned int *classes,
391 unsigned long deadline)
392{
393 struct ata_port *ap = link->ap;
394 void __iomem *base = ap->ioaddr.data_addr;
395 int rc;
396 u8 err;
397
398 DPRINTK("about to softreset\n");
399 __raw_writew(ap->ctl, base + 0xe);
400 udelay(20);
401 __raw_writew(ap->ctl | ATA_SRST, base + 0xe);
402 udelay(20);
403 __raw_writew(ap->ctl, base + 0xe);
404
405 rc = ata_sff_wait_after_reset(link, 1, deadline);
406 if (rc) {
407 ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
408 return rc;
409 }
410
411 /* determine by signature whether we have ATA or ATAPI devices */
412 classes[0] = ata_sff_dev_classify(&link->device[0], 1, &err);
413 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
414 return 0;
415}
416
417/**
418 * Load the taskfile for 16bit non-True IDE only. The device_addr is
419 * not loaded, we do this as part of octeon_cf_exec_command16.
420 */
421static void octeon_cf_tf_load16(struct ata_port *ap,
422 const struct ata_taskfile *tf)
423{
424 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
425 /* The base of the registers is at ioaddr.data_addr. */
426 void __iomem *base = ap->ioaddr.data_addr;
427
428 if (tf->ctl != ap->last_ctl) {
429 iowrite8(tf->ctl, ap->ioaddr.ctl_addr);
430 ap->last_ctl = tf->ctl;
431 ata_wait_idle(ap);
432 }
433 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
434 __raw_writew(tf->hob_feature << 8, base + 0xc);
435 __raw_writew(tf->hob_nsect | tf->hob_lbal << 8, base + 2);
436 __raw_writew(tf->hob_lbam | tf->hob_lbah << 8, base + 4);
437 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
438 tf->hob_feature,
439 tf->hob_nsect,
440 tf->hob_lbal,
441 tf->hob_lbam,
442 tf->hob_lbah);
443 }
444 if (is_addr) {
445 __raw_writew(tf->feature << 8, base + 0xc);
446 __raw_writew(tf->nsect | tf->lbal << 8, base + 2);
447 __raw_writew(tf->lbam | tf->lbah << 8, base + 4);
448 VPRINTK("feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
449 tf->feature,
450 tf->nsect,
451 tf->lbal,
452 tf->lbam,
453 tf->lbah);
454 }
455 ata_wait_idle(ap);
456}
457
458
459static void octeon_cf_dev_select(struct ata_port *ap, unsigned int device)
460{
461/* There is only one device, do nothing. */
462 return;
463}
464
465/*
466 * Issue ATA command to host controller. The device_addr is also sent
467 * as it must be written in a combined write with the command.
468 */
469static void octeon_cf_exec_command16(struct ata_port *ap,
470 const struct ata_taskfile *tf)
471{
472 /* The base of the registers is at ioaddr.data_addr. */
473 void __iomem *base = ap->ioaddr.data_addr;
474 u16 blob;
475
476 if (tf->flags & ATA_TFLAG_DEVICE) {
477 VPRINTK("device 0x%X\n", tf->device);
478 blob = tf->device;
479 } else {
480 blob = 0;
481 }
482
483 DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
484 blob |= (tf->command << 8);
485 __raw_writew(blob, base + 6);
486
487
488 ata_wait_idle(ap);
489}
490
491static u8 octeon_cf_irq_on(struct ata_port *ap)
492{
493 return 0;
494}
495
496static void octeon_cf_irq_clear(struct ata_port *ap)
497{
498 return;
499}
500
501static void octeon_cf_dma_setup(struct ata_queued_cmd *qc)
502{
503 struct ata_port *ap = qc->ap;
504 struct octeon_cf_port *cf_port;
505
506 cf_port = (struct octeon_cf_port *)ap->private_data;
507 DPRINTK("ENTER\n");
508 /* issue r/w command */
509 qc->cursg = qc->sg;
510 cf_port->dma_finished = 0;
511 ap->ops->sff_exec_command(ap, &qc->tf);
512 DPRINTK("EXIT\n");
513}
514
515/**
516 * Start a DMA transfer that was already setup
517 *
518 * @qc: Information about the DMA
519 */
520static void octeon_cf_dma_start(struct ata_queued_cmd *qc)
521{
522 struct octeon_cf_data *ocd = qc->ap->dev->platform_data;
523 union cvmx_mio_boot_dma_cfgx mio_boot_dma_cfg;
524 union cvmx_mio_boot_dma_intx mio_boot_dma_int;
525 struct scatterlist *sg;
526
527 VPRINTK("%d scatterlists\n", qc->n_elem);
528
529 /* Get the scatter list entry we need to DMA into */
530 sg = qc->cursg;
531 BUG_ON(!sg);
532
533 /*
534 * Clear the DMA complete status.
535 */
536 mio_boot_dma_int.u64 = 0;
537 mio_boot_dma_int.s.done = 1;
538 cvmx_write_csr(CVMX_MIO_BOOT_DMA_INTX(ocd->dma_engine),
539 mio_boot_dma_int.u64);
540
541 /* Enable the interrupt. */
542 cvmx_write_csr(CVMX_MIO_BOOT_DMA_INT_ENX(ocd->dma_engine),
543 mio_boot_dma_int.u64);
544
545 /* Set the direction of the DMA */
546 mio_boot_dma_cfg.u64 = 0;
547 mio_boot_dma_cfg.s.en = 1;
548 mio_boot_dma_cfg.s.rw = ((qc->tf.flags & ATA_TFLAG_WRITE) != 0);
549
550 /*
551 * Don't stop the DMA if the device deasserts DMARQ. Many
552 * compact flashes deassert DMARQ for a short time between
553 * sectors. Instead of stopping and restarting the DMA, we'll
554 * let the hardware do it. If the DMA is really stopped early
555 * due to an error condition, a later timeout will force us to
556 * stop.
557 */
558 mio_boot_dma_cfg.s.clr = 0;
559
560 /* Size is specified in 16bit words and minus one notation */
561 mio_boot_dma_cfg.s.size = sg_dma_len(sg) / 2 - 1;
562
563 /* We need to swap the high and low bytes of every 16 bits */
564 mio_boot_dma_cfg.s.swap8 = 1;
565
566 mio_boot_dma_cfg.s.adr = sg_dma_address(sg);
567
568 VPRINTK("%s %d bytes address=%p\n",
569 (mio_boot_dma_cfg.s.rw) ? "write" : "read", sg->length,
570 (void *)(unsigned long)mio_boot_dma_cfg.s.adr);
571
572 cvmx_write_csr(CVMX_MIO_BOOT_DMA_CFGX(ocd->dma_engine),
573 mio_boot_dma_cfg.u64);
574}
575
576/**
577 *
578 * LOCKING:
579 * spin_lock_irqsave(host lock)
580 *
581 */
582static unsigned int octeon_cf_dma_finished(struct ata_port *ap,
583 struct ata_queued_cmd *qc)
584{
585 struct ata_eh_info *ehi = &ap->link.eh_info;
586 struct octeon_cf_data *ocd = ap->dev->platform_data;
587 union cvmx_mio_boot_dma_cfgx dma_cfg;
588 union cvmx_mio_boot_dma_intx dma_int;
589 struct octeon_cf_port *cf_port;
590 u8 status;
591
592 VPRINTK("ata%u: protocol %d task_state %d\n",
593 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
594
595
596 if (ap->hsm_task_state != HSM_ST_LAST)
597 return 0;
598
599 cf_port = (struct octeon_cf_port *)ap->private_data;
600
601 dma_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_DMA_CFGX(ocd->dma_engine));
602 if (dma_cfg.s.size != 0xfffff) {
603 /* Error, the transfer was not complete. */
604 qc->err_mask |= AC_ERR_HOST_BUS;
605 ap->hsm_task_state = HSM_ST_ERR;
606 }
607
608 /* Stop and clear the dma engine. */
609 dma_cfg.u64 = 0;
610 dma_cfg.s.size = -1;
611 cvmx_write_csr(CVMX_MIO_BOOT_DMA_CFGX(ocd->dma_engine), dma_cfg.u64);
612
613 /* Disable the interrupt. */
614 dma_int.u64 = 0;
615 cvmx_write_csr(CVMX_MIO_BOOT_DMA_INT_ENX(ocd->dma_engine), dma_int.u64);
616
617 /* Clear the DMA complete status */
618 dma_int.s.done = 1;
619 cvmx_write_csr(CVMX_MIO_BOOT_DMA_INTX(ocd->dma_engine), dma_int.u64);
620
621 status = ap->ops->sff_check_status(ap);
622
623 ata_sff_hsm_move(ap, qc, status, 0);
624
625 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA))
626 ata_ehi_push_desc(ehi, "DMA stat 0x%x", status);
627
628 return 1;
629}
630
631/*
632 * Check if any queued commands have more DMAs, if so start the next
633 * transfer, else do end of transfer handling.
634 */
635static irqreturn_t octeon_cf_interrupt(int irq, void *dev_instance)
636{
637 struct ata_host *host = dev_instance;
638 struct octeon_cf_port *cf_port;
639 int i;
640 unsigned int handled = 0;
641 unsigned long flags;
642
643 spin_lock_irqsave(&host->lock, flags);
644
645 DPRINTK("ENTER\n");
646 for (i = 0; i < host->n_ports; i++) {
647 u8 status;
648 struct ata_port *ap;
649 struct ata_queued_cmd *qc;
650 union cvmx_mio_boot_dma_intx dma_int;
651 union cvmx_mio_boot_dma_cfgx dma_cfg;
652 struct octeon_cf_data *ocd;
653
654 ap = host->ports[i];
655 ocd = ap->dev->platform_data;
656 if (!ap || (ap->flags & ATA_FLAG_DISABLED))
657 continue;
658
659 ocd = ap->dev->platform_data;
660 cf_port = (struct octeon_cf_port *)ap->private_data;
661 dma_int.u64 =
662 cvmx_read_csr(CVMX_MIO_BOOT_DMA_INTX(ocd->dma_engine));
663 dma_cfg.u64 =
664 cvmx_read_csr(CVMX_MIO_BOOT_DMA_CFGX(ocd->dma_engine));
665
666 qc = ata_qc_from_tag(ap, ap->link.active_tag);
667
668 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
669 (qc->flags & ATA_QCFLAG_ACTIVE)) {
670 if (dma_int.s.done && !dma_cfg.s.en) {
671 if (!sg_is_last(qc->cursg)) {
672 qc->cursg = sg_next(qc->cursg);
673 handled = 1;
674 octeon_cf_dma_start(qc);
675 continue;
676 } else {
677 cf_port->dma_finished = 1;
678 }
679 }
680 if (!cf_port->dma_finished)
681 continue;
682 status = ioread8(ap->ioaddr.altstatus_addr);
683 if (status & (ATA_BUSY | ATA_DRQ)) {
684 /*
685 * We are busy, try to handle it
686 * later. This is the DMA finished
687 * interrupt, and it could take a
688 * little while for the card to be
689 * ready for more commands.
690 */
691 /* Clear DMA irq. */
692 dma_int.u64 = 0;
693 dma_int.s.done = 1;
694 cvmx_write_csr(CVMX_MIO_BOOT_DMA_INTX(ocd->dma_engine),
695 dma_int.u64);
696
697 queue_delayed_work(cf_port->wq,
698 &cf_port->delayed_finish, 1);
699 handled = 1;
700 } else {
701 handled |= octeon_cf_dma_finished(ap, qc);
702 }
703 }
704 }
705 spin_unlock_irqrestore(&host->lock, flags);
706 DPRINTK("EXIT\n");
707 return IRQ_RETVAL(handled);
708}
709
710static void octeon_cf_delayed_finish(struct work_struct *work)
711{
712 struct octeon_cf_port *cf_port = container_of(work,
713 struct octeon_cf_port,
714 delayed_finish.work);
715 struct ata_port *ap = cf_port->ap;
716 struct ata_host *host = ap->host;
717 struct ata_queued_cmd *qc;
718 unsigned long flags;
719 u8 status;
720
721 spin_lock_irqsave(&host->lock, flags);
722
723 /*
724 * If the port is not waiting for completion, it must have
725 * handled it previously. The hsm_task_state is
726 * protected by host->lock.
727 */
728 if (ap->hsm_task_state != HSM_ST_LAST || !cf_port->dma_finished)
729 goto out;
730
731 status = ioread8(ap->ioaddr.altstatus_addr);
732 if (status & (ATA_BUSY | ATA_DRQ)) {
733 /* Still busy, try again. */
734 queue_delayed_work(cf_port->wq,
735 &cf_port->delayed_finish, 1);
736 goto out;
737 }
738 qc = ata_qc_from_tag(ap, ap->link.active_tag);
739 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
740 (qc->flags & ATA_QCFLAG_ACTIVE))
741 octeon_cf_dma_finished(ap, qc);
742out:
743 spin_unlock_irqrestore(&host->lock, flags);
744}
745
746static void octeon_cf_dev_config(struct ata_device *dev)
747{
748 /*
749 * A maximum of 2^20 - 1 16 bit transfers are possible with
750 * the bootbus DMA. So we need to throttle max_sectors to
751 * (2^12 - 1 == 4095) to assure that this can never happen.
752 */
753 dev->max_sectors = min(dev->max_sectors, 4095U);
754}
755
756/*
757 * Trap if driver tries to do standard bmdma commands. They are not
758 * supported.
759 */
760static void unreachable_qc(struct ata_queued_cmd *qc)
761{
762 BUG();
763}
764
765static u8 unreachable_port(struct ata_port *ap)
766{
767 BUG();
768}
769
770/*
771 * We don't do ATAPI DMA so return 0.
772 */
773static int octeon_cf_check_atapi_dma(struct ata_queued_cmd *qc)
774{
775 return 0;
776}
777
778static unsigned int octeon_cf_qc_issue(struct ata_queued_cmd *qc)
779{
780 struct ata_port *ap = qc->ap;
781
782 switch (qc->tf.protocol) {
783 case ATA_PROT_DMA:
784 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
785
786 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
787 octeon_cf_dma_setup(qc); /* set up dma */
788 octeon_cf_dma_start(qc); /* initiate dma */
789 ap->hsm_task_state = HSM_ST_LAST;
790 break;
791
792 case ATAPI_PROT_DMA:
793 dev_err(ap->dev, "Error, ATAPI not supported\n");
794 BUG();
795
796 default:
797 return ata_sff_qc_issue(qc);
798 }
799
800 return 0;
801}
802
803static struct ata_port_operations octeon_cf_ops = {
804 .inherits = &ata_sff_port_ops,
805 .check_atapi_dma = octeon_cf_check_atapi_dma,
806 .qc_prep = ata_noop_qc_prep,
807 .qc_issue = octeon_cf_qc_issue,
808 .sff_dev_select = octeon_cf_dev_select,
809 .sff_irq_on = octeon_cf_irq_on,
810 .sff_irq_clear = octeon_cf_irq_clear,
811 .bmdma_setup = unreachable_qc,
812 .bmdma_start = unreachable_qc,
813 .bmdma_stop = unreachable_qc,
814 .bmdma_status = unreachable_port,
815 .cable_detect = ata_cable_40wire,
816 .set_piomode = octeon_cf_set_piomode,
817 .set_dmamode = octeon_cf_set_dmamode,
818 .dev_config = octeon_cf_dev_config,
819};
820
821static int __devinit octeon_cf_probe(struct platform_device *pdev)
822{
823 struct resource *res_cs0, *res_cs1;
824
825 void __iomem *cs0;
826 void __iomem *cs1 = NULL;
827 struct ata_host *host;
828 struct ata_port *ap;
829 struct octeon_cf_data *ocd;
830 int irq = 0;
831 irq_handler_t irq_handler = NULL;
832 void __iomem *base;
833 struct octeon_cf_port *cf_port;
834
835 res_cs0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
836
837 if (!res_cs0)
838 return -EINVAL;
839
840 ocd = pdev->dev.platform_data;
841
842 cs0 = devm_ioremap_nocache(&pdev->dev, res_cs0->start,
843 res_cs0->end - res_cs0->start + 1);
844
845 if (!cs0)
846 return -ENOMEM;
847
848 /* Determine from availability of DMA if True IDE mode or not */
849 if (ocd->dma_engine >= 0) {
850 res_cs1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
851 if (!res_cs1)
852 return -EINVAL;
853
854 cs1 = devm_ioremap_nocache(&pdev->dev, res_cs1->start,
855 res_cs0->end - res_cs1->start + 1);
856
857 if (!cs1)
858 return -ENOMEM;
859 }
860
861 cf_port = kzalloc(sizeof(*cf_port), GFP_KERNEL);
862 if (!cf_port)
863 return -ENOMEM;
864
865 /* allocate host */
866 host = ata_host_alloc(&pdev->dev, 1);
867 if (!host)
868 goto free_cf_port;
869
870 ap = host->ports[0];
871 ap->private_data = cf_port;
872 cf_port->ap = ap;
873 ap->ops = &octeon_cf_ops;
874 ap->pio_mask = 0x7f; /* Support PIO 0-6 */
875 ap->flags |= ATA_FLAG_MMIO | ATA_FLAG_NO_LEGACY
876 | ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING;
877
878 base = cs0 + ocd->base_region_bias;
879 if (!ocd->is16bit) {
880 ap->ioaddr.cmd_addr = base;
881 ata_sff_std_ports(&ap->ioaddr);
882
883 ap->ioaddr.altstatus_addr = base + 0xe;
884 ap->ioaddr.ctl_addr = base + 0xe;
885 octeon_cf_ops.sff_data_xfer = octeon_cf_data_xfer8;
886 } else if (cs1) {
887 /* Presence of cs1 indicates True IDE mode. */
888 ap->ioaddr.cmd_addr = base + (ATA_REG_CMD << 1) + 1;
889 ap->ioaddr.data_addr = base + (ATA_REG_DATA << 1);
890 ap->ioaddr.error_addr = base + (ATA_REG_ERR << 1) + 1;
891 ap->ioaddr.feature_addr = base + (ATA_REG_FEATURE << 1) + 1;
892 ap->ioaddr.nsect_addr = base + (ATA_REG_NSECT << 1) + 1;
893 ap->ioaddr.lbal_addr = base + (ATA_REG_LBAL << 1) + 1;
894 ap->ioaddr.lbam_addr = base + (ATA_REG_LBAM << 1) + 1;
895 ap->ioaddr.lbah_addr = base + (ATA_REG_LBAH << 1) + 1;
896 ap->ioaddr.device_addr = base + (ATA_REG_DEVICE << 1) + 1;
897 ap->ioaddr.status_addr = base + (ATA_REG_STATUS << 1) + 1;
898 ap->ioaddr.command_addr = base + (ATA_REG_CMD << 1) + 1;
899 ap->ioaddr.altstatus_addr = cs1 + (6 << 1) + 1;
900 ap->ioaddr.ctl_addr = cs1 + (6 << 1) + 1;
901 octeon_cf_ops.sff_data_xfer = octeon_cf_data_xfer16;
902
903 ap->mwdma_mask = 0x1f; /* Support MWDMA 0-4 */
904 irq = platform_get_irq(pdev, 0);
905 irq_handler = octeon_cf_interrupt;
906
907 /* True IDE mode needs delayed work to poll for not-busy. */
908 cf_port->wq = create_singlethread_workqueue(DRV_NAME);
909 if (!cf_port->wq)
910 goto free_cf_port;
911 INIT_DELAYED_WORK(&cf_port->delayed_finish,
912 octeon_cf_delayed_finish);
913
914 } else {
915 /* 16 bit but not True IDE */
916 octeon_cf_ops.sff_data_xfer = octeon_cf_data_xfer16;
917 octeon_cf_ops.softreset = octeon_cf_softreset16;
918 octeon_cf_ops.sff_check_status = octeon_cf_check_status16;
919 octeon_cf_ops.sff_tf_read = octeon_cf_tf_read16;
920 octeon_cf_ops.sff_tf_load = octeon_cf_tf_load16;
921 octeon_cf_ops.sff_exec_command = octeon_cf_exec_command16;
922
923 ap->ioaddr.data_addr = base + ATA_REG_DATA;
924 ap->ioaddr.nsect_addr = base + ATA_REG_NSECT;
925 ap->ioaddr.lbal_addr = base + ATA_REG_LBAL;
926 ap->ioaddr.ctl_addr = base + 0xe;
927 ap->ioaddr.altstatus_addr = base + 0xe;
928 }
929
930 ata_port_desc(ap, "cmd %p ctl %p", base, ap->ioaddr.ctl_addr);
931
932
933 dev_info(&pdev->dev, "version " DRV_VERSION" %d bit%s.\n",
934 (ocd->is16bit) ? 16 : 8,
935 (cs1) ? ", True IDE" : "");
936
937
938 return ata_host_activate(host, irq, irq_handler, 0, &octeon_cf_sht);
939
940free_cf_port:
941 kfree(cf_port);
942 return -ENOMEM;
943}
944
945static struct platform_driver octeon_cf_driver = {
946 .probe = octeon_cf_probe,
947 .driver = {
948 .name = DRV_NAME,
949 .owner = THIS_MODULE,
950 },
951};
952
953static int __init octeon_cf_init(void)
954{
955 return platform_driver_register(&octeon_cf_driver);
956}
957
958
959MODULE_AUTHOR("David Daney <ddaney@caviumnetworks.com>");
960MODULE_DESCRIPTION("low-level driver for Cavium OCTEON Compact Flash PATA");
961MODULE_LICENSE("GPL");
962MODULE_VERSION(DRV_VERSION);
963MODULE_ALIAS("platform:" DRV_NAME);
964
965module_init(octeon_cf_init);