aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ide/scc_pata.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ide/scc_pata.c')
-rw-r--r--drivers/ide/scc_pata.c966
1 files changed, 966 insertions, 0 deletions
diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
new file mode 100644
index 000000000000..49f163aa51e3
--- /dev/null
+++ b/drivers/ide/scc_pata.c
@@ -0,0 +1,966 @@
1/*
2 * Support for IDE interfaces on Celleb platform
3 *
4 * (C) Copyright 2006 TOSHIBA CORPORATION
5 *
6 * This code is based on drivers/ide/pci/siimage.c:
7 * Copyright (C) 2001-2002 Andre Hedrick <andre@linux-ide.org>
8 * Copyright (C) 2003 Red Hat <alan@redhat.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License along
21 * with this program; if not, write to the Free Software Foundation, Inc.,
22 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
23 */
24
25#include <linux/types.h>
26#include <linux/module.h>
27#include <linux/pci.h>
28#include <linux/delay.h>
29#include <linux/ide.h>
30#include <linux/init.h>
31
32#define PCI_DEVICE_ID_TOSHIBA_SCC_ATA 0x01b4
33
34#define SCC_PATA_NAME "scc IDE"
35
36#define TDVHSEL_MASTER 0x00000001
37#define TDVHSEL_SLAVE 0x00000004
38
39#define MODE_JCUSFEN 0x00000080
40
41#define CCKCTRL_ATARESET 0x00040000
42#define CCKCTRL_BUFCNT 0x00020000
43#define CCKCTRL_CRST 0x00010000
44#define CCKCTRL_OCLKEN 0x00000100
45#define CCKCTRL_ATACLKOEN 0x00000002
46#define CCKCTRL_LCLKEN 0x00000001
47
48#define QCHCD_IOS_SS 0x00000001
49
50#define QCHSD_STPDIAG 0x00020000
51
52#define INTMASK_MSK 0xD1000012
53#define INTSTS_SERROR 0x80000000
54#define INTSTS_PRERR 0x40000000
55#define INTSTS_RERR 0x10000000
56#define INTSTS_ICERR 0x01000000
57#define INTSTS_BMSINT 0x00000010
58#define INTSTS_BMHE 0x00000008
59#define INTSTS_IOIRQS 0x00000004
60#define INTSTS_INTRQ 0x00000002
61#define INTSTS_ACTEINT 0x00000001
62
63#define ECMODE_VALUE 0x01
64
65static struct scc_ports {
66 unsigned long ctl, dma;
67 struct ide_host *host; /* for removing port from system */
68} scc_ports[MAX_HWIFS];
69
70/* PIO transfer mode table */
71/* JCHST */
72static unsigned long JCHSTtbl[2][7] = {
73 {0x0E, 0x05, 0x02, 0x03, 0x02, 0x00, 0x00}, /* 100MHz */
74 {0x13, 0x07, 0x04, 0x04, 0x03, 0x00, 0x00} /* 133MHz */
75};
76
77/* JCHHT */
78static unsigned long JCHHTtbl[2][7] = {
79 {0x0E, 0x02, 0x02, 0x02, 0x02, 0x00, 0x00}, /* 100MHz */
80 {0x13, 0x03, 0x03, 0x03, 0x03, 0x00, 0x00} /* 133MHz */
81};
82
83/* JCHCT */
84static unsigned long JCHCTtbl[2][7] = {
85 {0x1D, 0x1D, 0x1C, 0x0B, 0x06, 0x00, 0x00}, /* 100MHz */
86 {0x27, 0x26, 0x26, 0x0E, 0x09, 0x00, 0x00} /* 133MHz */
87};
88
89
90/* DMA transfer mode table */
91/* JCHDCTM/JCHDCTS */
92static unsigned long JCHDCTxtbl[2][7] = {
93 {0x0A, 0x06, 0x04, 0x03, 0x01, 0x00, 0x00}, /* 100MHz */
94 {0x0E, 0x09, 0x06, 0x04, 0x02, 0x01, 0x00} /* 133MHz */
95};
96
97/* JCSTWTM/JCSTWTS */
98static unsigned long JCSTWTxtbl[2][7] = {
99 {0x06, 0x04, 0x03, 0x02, 0x02, 0x02, 0x00}, /* 100MHz */
100 {0x09, 0x06, 0x04, 0x02, 0x02, 0x02, 0x02} /* 133MHz */
101};
102
103/* JCTSS */
104static unsigned long JCTSStbl[2][7] = {
105 {0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x00}, /* 100MHz */
106 {0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05} /* 133MHz */
107};
108
109/* JCENVT */
110static unsigned long JCENVTtbl[2][7] = {
111 {0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00}, /* 100MHz */
112 {0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02} /* 133MHz */
113};
114
115/* JCACTSELS/JCACTSELM */
116static unsigned long JCACTSELtbl[2][7] = {
117 {0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00}, /* 100MHz */
118 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01} /* 133MHz */
119};
120
121
122static u8 scc_ide_inb(unsigned long port)
123{
124 u32 data = in_be32((void*)port);
125 return (u8)data;
126}
127
128static void scc_exec_command(ide_hwif_t *hwif, u8 cmd)
129{
130 out_be32((void *)hwif->io_ports.command_addr, cmd);
131 eieio();
132 in_be32((void *)(hwif->dma_base + 0x01c));
133 eieio();
134}
135
136static u8 scc_read_status(ide_hwif_t *hwif)
137{
138 return (u8)in_be32((void *)hwif->io_ports.status_addr);
139}
140
141static u8 scc_read_altstatus(ide_hwif_t *hwif)
142{
143 return (u8)in_be32((void *)hwif->io_ports.ctl_addr);
144}
145
146static u8 scc_read_sff_dma_status(ide_hwif_t *hwif)
147{
148 return (u8)in_be32((void *)(hwif->dma_base + 4));
149}
150
151static void scc_set_irq(ide_hwif_t *hwif, int on)
152{
153 u8 ctl = ATA_DEVCTL_OBS;
154
155 if (on == 4) { /* hack for SRST */
156 ctl |= 4;
157 on &= ~4;
158 }
159
160 ctl |= on ? 0 : 2;
161
162 out_be32((void *)hwif->io_ports.ctl_addr, ctl);
163 eieio();
164 in_be32((void *)(hwif->dma_base + 0x01c));
165 eieio();
166}
167
168static void scc_ide_insw(unsigned long port, void *addr, u32 count)
169{
170 u16 *ptr = (u16 *)addr;
171 while (count--) {
172 *ptr++ = le16_to_cpu(in_be32((void*)port));
173 }
174}
175
176static void scc_ide_insl(unsigned long port, void *addr, u32 count)
177{
178 u16 *ptr = (u16 *)addr;
179 while (count--) {
180 *ptr++ = le16_to_cpu(in_be32((void*)port));
181 *ptr++ = le16_to_cpu(in_be32((void*)port));
182 }
183}
184
185static void scc_ide_outb(u8 addr, unsigned long port)
186{
187 out_be32((void*)port, addr);
188}
189
190static void
191scc_ide_outsw(unsigned long port, void *addr, u32 count)
192{
193 u16 *ptr = (u16 *)addr;
194 while (count--) {
195 out_be32((void*)port, cpu_to_le16(*ptr++));
196 }
197}
198
199static void
200scc_ide_outsl(unsigned long port, void *addr, u32 count)
201{
202 u16 *ptr = (u16 *)addr;
203 while (count--) {
204 out_be32((void*)port, cpu_to_le16(*ptr++));
205 out_be32((void*)port, cpu_to_le16(*ptr++));
206 }
207}
208
209/**
210 * scc_set_pio_mode - set host controller for PIO mode
211 * @drive: drive
212 * @pio: PIO mode number
213 *
214 * Load the timing settings for this device mode into the
215 * controller.
216 */
217
218static void scc_set_pio_mode(ide_drive_t *drive, const u8 pio)
219{
220 ide_hwif_t *hwif = HWIF(drive);
221 struct scc_ports *ports = ide_get_hwifdata(hwif);
222 unsigned long ctl_base = ports->ctl;
223 unsigned long cckctrl_port = ctl_base + 0xff0;
224 unsigned long piosht_port = ctl_base + 0x000;
225 unsigned long pioct_port = ctl_base + 0x004;
226 unsigned long reg;
227 int offset;
228
229 reg = in_be32((void __iomem *)cckctrl_port);
230 if (reg & CCKCTRL_ATACLKOEN) {
231 offset = 1; /* 133MHz */
232 } else {
233 offset = 0; /* 100MHz */
234 }
235 reg = JCHSTtbl[offset][pio] << 16 | JCHHTtbl[offset][pio];
236 out_be32((void __iomem *)piosht_port, reg);
237 reg = JCHCTtbl[offset][pio];
238 out_be32((void __iomem *)pioct_port, reg);
239}
240
241/**
242 * scc_set_dma_mode - set host controller for DMA mode
243 * @drive: drive
244 * @speed: DMA mode
245 *
246 * Load the timing settings for this device mode into the
247 * controller.
248 */
249
250static void scc_set_dma_mode(ide_drive_t *drive, const u8 speed)
251{
252 ide_hwif_t *hwif = HWIF(drive);
253 struct scc_ports *ports = ide_get_hwifdata(hwif);
254 unsigned long ctl_base = ports->ctl;
255 unsigned long cckctrl_port = ctl_base + 0xff0;
256 unsigned long mdmact_port = ctl_base + 0x008;
257 unsigned long mcrcst_port = ctl_base + 0x00c;
258 unsigned long sdmact_port = ctl_base + 0x010;
259 unsigned long scrcst_port = ctl_base + 0x014;
260 unsigned long udenvt_port = ctl_base + 0x018;
261 unsigned long tdvhsel_port = ctl_base + 0x020;
262 int is_slave = (&hwif->drives[1] == drive);
263 int offset, idx;
264 unsigned long reg;
265 unsigned long jcactsel;
266
267 reg = in_be32((void __iomem *)cckctrl_port);
268 if (reg & CCKCTRL_ATACLKOEN) {
269 offset = 1; /* 133MHz */
270 } else {
271 offset = 0; /* 100MHz */
272 }
273
274 idx = speed - XFER_UDMA_0;
275
276 jcactsel = JCACTSELtbl[offset][idx];
277 if (is_slave) {
278 out_be32((void __iomem *)sdmact_port, JCHDCTxtbl[offset][idx]);
279 out_be32((void __iomem *)scrcst_port, JCSTWTxtbl[offset][idx]);
280 jcactsel = jcactsel << 2;
281 out_be32((void __iomem *)tdvhsel_port, (in_be32((void __iomem *)tdvhsel_port) & ~TDVHSEL_SLAVE) | jcactsel);
282 } else {
283 out_be32((void __iomem *)mdmact_port, JCHDCTxtbl[offset][idx]);
284 out_be32((void __iomem *)mcrcst_port, JCSTWTxtbl[offset][idx]);
285 out_be32((void __iomem *)tdvhsel_port, (in_be32((void __iomem *)tdvhsel_port) & ~TDVHSEL_MASTER) | jcactsel);
286 }
287 reg = JCTSStbl[offset][idx] << 16 | JCENVTtbl[offset][idx];
288 out_be32((void __iomem *)udenvt_port, reg);
289}
290
291static void scc_dma_host_set(ide_drive_t *drive, int on)
292{
293 ide_hwif_t *hwif = drive->hwif;
294 u8 unit = drive->dn & 1;
295 u8 dma_stat = scc_ide_inb(hwif->dma_base + 4);
296
297 if (on)
298 dma_stat |= (1 << (5 + unit));
299 else
300 dma_stat &= ~(1 << (5 + unit));
301
302 scc_ide_outb(dma_stat, hwif->dma_base + 4);
303}
304
305/**
306 * scc_ide_dma_setup - begin a DMA phase
307 * @drive: target device
308 *
309 * Build an IDE DMA PRD (IDE speak for scatter gather table)
310 * and then set up the DMA transfer registers.
311 *
312 * Returns 0 on success. If a PIO fallback is required then 1
313 * is returned.
314 */
315
316static int scc_dma_setup(ide_drive_t *drive)
317{
318 ide_hwif_t *hwif = drive->hwif;
319 struct request *rq = HWGROUP(drive)->rq;
320 unsigned int reading;
321 u8 dma_stat;
322
323 if (rq_data_dir(rq))
324 reading = 0;
325 else
326 reading = 1 << 3;
327
328 /* fall back to pio! */
329 if (!ide_build_dmatable(drive, rq)) {
330 ide_map_sg(drive, rq);
331 return 1;
332 }
333
334 /* PRD table */
335 out_be32((void __iomem *)(hwif->dma_base + 8), hwif->dmatable_dma);
336
337 /* specify r/w */
338 out_be32((void __iomem *)hwif->dma_base, reading);
339
340 /* read DMA status for INTR & ERROR flags */
341 dma_stat = in_be32((void __iomem *)(hwif->dma_base + 4));
342
343 /* clear INTR & ERROR flags */
344 out_be32((void __iomem *)(hwif->dma_base + 4), dma_stat | 6);
345 drive->waiting_for_dma = 1;
346 return 0;
347}
348
349static void scc_dma_start(ide_drive_t *drive)
350{
351 ide_hwif_t *hwif = drive->hwif;
352 u8 dma_cmd = scc_ide_inb(hwif->dma_base);
353
354 /* start DMA */
355 scc_ide_outb(dma_cmd | 1, hwif->dma_base);
356 wmb();
357}
358
359static int __scc_dma_end(ide_drive_t *drive)
360{
361 ide_hwif_t *hwif = drive->hwif;
362 u8 dma_stat, dma_cmd;
363
364 drive->waiting_for_dma = 0;
365 /* get DMA command mode */
366 dma_cmd = scc_ide_inb(hwif->dma_base);
367 /* stop DMA */
368 scc_ide_outb(dma_cmd & ~1, hwif->dma_base);
369 /* get DMA status */
370 dma_stat = scc_ide_inb(hwif->dma_base + 4);
371 /* clear the INTR & ERROR bits */
372 scc_ide_outb(dma_stat | 6, hwif->dma_base + 4);
373 /* purge DMA mappings */
374 ide_destroy_dmatable(drive);
375 /* verify good DMA status */
376 wmb();
377 return (dma_stat & 7) != 4 ? (0x10 | dma_stat) : 0;
378}
379
380/**
381 * scc_dma_end - Stop DMA
382 * @drive: IDE drive
383 *
384 * Check and clear INT Status register.
385 * Then call __scc_dma_end().
386 */
387
388static int scc_dma_end(ide_drive_t *drive)
389{
390 ide_hwif_t *hwif = HWIF(drive);
391 void __iomem *dma_base = (void __iomem *)hwif->dma_base;
392 unsigned long intsts_port = hwif->dma_base + 0x014;
393 u32 reg;
394 int dma_stat, data_loss = 0;
395 static int retry = 0;
396
397 /* errata A308 workaround: Step5 (check data loss) */
398 /* We don't check non ide_disk because it is limited to UDMA4 */
399 if (!(in_be32((void __iomem *)hwif->io_ports.ctl_addr)
400 & ATA_ERR) &&
401 drive->media == ide_disk && drive->current_speed > XFER_UDMA_4) {
402 reg = in_be32((void __iomem *)intsts_port);
403 if (!(reg & INTSTS_ACTEINT)) {
404 printk(KERN_WARNING "%s: operation failed (transfer data loss)\n",
405 drive->name);
406 data_loss = 1;
407 if (retry++) {
408 struct request *rq = HWGROUP(drive)->rq;
409 int unit;
410 /* ERROR_RESET and drive->crc_count are needed
411 * to reduce DMA transfer mode in retry process.
412 */
413 if (rq)
414 rq->errors |= ERROR_RESET;
415 for (unit = 0; unit < MAX_DRIVES; unit++) {
416 ide_drive_t *drive = &hwif->drives[unit];
417 drive->crc_count++;
418 }
419 }
420 }
421 }
422
423 while (1) {
424 reg = in_be32((void __iomem *)intsts_port);
425
426 if (reg & INTSTS_SERROR) {
427 printk(KERN_WARNING "%s: SERROR\n", SCC_PATA_NAME);
428 out_be32((void __iomem *)intsts_port, INTSTS_SERROR|INTSTS_BMSINT);
429
430 out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS);
431 continue;
432 }
433
434 if (reg & INTSTS_PRERR) {
435 u32 maea0, maec0;
436 unsigned long ctl_base = hwif->config_data;
437
438 maea0 = in_be32((void __iomem *)(ctl_base + 0xF50));
439 maec0 = in_be32((void __iomem *)(ctl_base + 0xF54));
440
441 printk(KERN_WARNING "%s: PRERR [addr:%x cmd:%x]\n", SCC_PATA_NAME, maea0, maec0);
442
443 out_be32((void __iomem *)intsts_port, INTSTS_PRERR|INTSTS_BMSINT);
444
445 out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS);
446 continue;
447 }
448
449 if (reg & INTSTS_RERR) {
450 printk(KERN_WARNING "%s: Response Error\n", SCC_PATA_NAME);
451 out_be32((void __iomem *)intsts_port, INTSTS_RERR|INTSTS_BMSINT);
452
453 out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS);
454 continue;
455 }
456
457 if (reg & INTSTS_ICERR) {
458 out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS);
459
460 printk(KERN_WARNING "%s: Illegal Configuration\n", SCC_PATA_NAME);
461 out_be32((void __iomem *)intsts_port, INTSTS_ICERR|INTSTS_BMSINT);
462 continue;
463 }
464
465 if (reg & INTSTS_BMSINT) {
466 printk(KERN_WARNING "%s: Internal Bus Error\n", SCC_PATA_NAME);
467 out_be32((void __iomem *)intsts_port, INTSTS_BMSINT);
468
469 ide_do_reset(drive);
470 continue;
471 }
472
473 if (reg & INTSTS_BMHE) {
474 out_be32((void __iomem *)intsts_port, INTSTS_BMHE);
475 continue;
476 }
477
478 if (reg & INTSTS_ACTEINT) {
479 out_be32((void __iomem *)intsts_port, INTSTS_ACTEINT);
480 continue;
481 }
482
483 if (reg & INTSTS_IOIRQS) {
484 out_be32((void __iomem *)intsts_port, INTSTS_IOIRQS);
485 continue;
486 }
487 break;
488 }
489
490 dma_stat = __scc_dma_end(drive);
491 if (data_loss)
492 dma_stat |= 2; /* emulate DMA error (to retry command) */
493 return dma_stat;
494}
495
496/* returns 1 if dma irq issued, 0 otherwise */
497static int scc_dma_test_irq(ide_drive_t *drive)
498{
499 ide_hwif_t *hwif = HWIF(drive);
500 u32 int_stat = in_be32((void __iomem *)hwif->dma_base + 0x014);
501
502 /* SCC errata A252,A308 workaround: Step4 */
503 if ((in_be32((void __iomem *)hwif->io_ports.ctl_addr)
504 & ATA_ERR) &&
505 (int_stat & INTSTS_INTRQ))
506 return 1;
507
508 /* SCC errata A308 workaround: Step5 (polling IOIRQS) */
509 if (int_stat & INTSTS_IOIRQS)
510 return 1;
511
512 return 0;
513}
514
515static u8 scc_udma_filter(ide_drive_t *drive)
516{
517 ide_hwif_t *hwif = drive->hwif;
518 u8 mask = hwif->ultra_mask;
519
520 /* errata A308 workaround: limit non ide_disk drive to UDMA4 */
521 if ((drive->media != ide_disk) && (mask & 0xE0)) {
522 printk(KERN_INFO "%s: limit %s to UDMA4\n",
523 SCC_PATA_NAME, drive->name);
524 mask = ATA_UDMA4;
525 }
526
527 return mask;
528}
529
530/**
531 * setup_mmio_scc - map CTRL/BMID region
532 * @dev: PCI device we are configuring
533 * @name: device name
534 *
535 */
536
537static int setup_mmio_scc (struct pci_dev *dev, const char *name)
538{
539 unsigned long ctl_base = pci_resource_start(dev, 0);
540 unsigned long dma_base = pci_resource_start(dev, 1);
541 unsigned long ctl_size = pci_resource_len(dev, 0);
542 unsigned long dma_size = pci_resource_len(dev, 1);
543 void __iomem *ctl_addr;
544 void __iomem *dma_addr;
545 int i, ret;
546
547 for (i = 0; i < MAX_HWIFS; i++) {
548 if (scc_ports[i].ctl == 0)
549 break;
550 }
551 if (i >= MAX_HWIFS)
552 return -ENOMEM;
553
554 ret = pci_request_selected_regions(dev, (1 << 2) - 1, name);
555 if (ret < 0) {
556 printk(KERN_ERR "%s: can't reserve resources\n", name);
557 return ret;
558 }
559
560 if ((ctl_addr = ioremap(ctl_base, ctl_size)) == NULL)
561 goto fail_0;
562
563 if ((dma_addr = ioremap(dma_base, dma_size)) == NULL)
564 goto fail_1;
565
566 pci_set_master(dev);
567 scc_ports[i].ctl = (unsigned long)ctl_addr;
568 scc_ports[i].dma = (unsigned long)dma_addr;
569 pci_set_drvdata(dev, (void *) &scc_ports[i]);
570
571 return 1;
572
573 fail_1:
574 iounmap(ctl_addr);
575 fail_0:
576 return -ENOMEM;
577}
578
579static int scc_ide_setup_pci_device(struct pci_dev *dev,
580 const struct ide_port_info *d)
581{
582 struct scc_ports *ports = pci_get_drvdata(dev);
583 struct ide_host *host;
584 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
585 int i, rc;
586
587 memset(&hw, 0, sizeof(hw));
588 for (i = 0; i <= 8; i++)
589 hw.io_ports_array[i] = ports->dma + 0x20 + i * 4;
590 hw.irq = dev->irq;
591 hw.dev = &dev->dev;
592 hw.chipset = ide_pci;
593
594 rc = ide_host_add(d, hws, &host);
595 if (rc)
596 return rc;
597
598 ports->host = host;
599
600 return 0;
601}
602
603/**
604 * init_setup_scc - set up an SCC PATA Controller
605 * @dev: PCI device
606 * @d: IDE port info
607 *
608 * Perform the initial set up for this device.
609 */
610
611static int __devinit init_setup_scc(struct pci_dev *dev,
612 const struct ide_port_info *d)
613{
614 unsigned long ctl_base;
615 unsigned long dma_base;
616 unsigned long cckctrl_port;
617 unsigned long intmask_port;
618 unsigned long mode_port;
619 unsigned long ecmode_port;
620 u32 reg = 0;
621 struct scc_ports *ports;
622 int rc;
623
624 rc = pci_enable_device(dev);
625 if (rc)
626 goto end;
627
628 rc = setup_mmio_scc(dev, d->name);
629 if (rc < 0)
630 goto end;
631
632 ports = pci_get_drvdata(dev);
633 ctl_base = ports->ctl;
634 dma_base = ports->dma;
635 cckctrl_port = ctl_base + 0xff0;
636 intmask_port = dma_base + 0x010;
637 mode_port = ctl_base + 0x024;
638 ecmode_port = ctl_base + 0xf00;
639
640 /* controller initialization */
641 reg = 0;
642 out_be32((void*)cckctrl_port, reg);
643 reg |= CCKCTRL_ATACLKOEN;
644 out_be32((void*)cckctrl_port, reg);
645 reg |= CCKCTRL_LCLKEN | CCKCTRL_OCLKEN;
646 out_be32((void*)cckctrl_port, reg);
647 reg |= CCKCTRL_CRST;
648 out_be32((void*)cckctrl_port, reg);
649
650 for (;;) {
651 reg = in_be32((void*)cckctrl_port);
652 if (reg & CCKCTRL_CRST)
653 break;
654 udelay(5000);
655 }
656
657 reg |= CCKCTRL_ATARESET;
658 out_be32((void*)cckctrl_port, reg);
659
660 out_be32((void*)ecmode_port, ECMODE_VALUE);
661 out_be32((void*)mode_port, MODE_JCUSFEN);
662 out_be32((void*)intmask_port, INTMASK_MSK);
663
664 rc = scc_ide_setup_pci_device(dev, d);
665
666 end:
667 return rc;
668}
669
670static void scc_tf_load(ide_drive_t *drive, ide_task_t *task)
671{
672 struct ide_io_ports *io_ports = &drive->hwif->io_ports;
673 struct ide_taskfile *tf = &task->tf;
674 u8 HIHI = (task->tf_flags & IDE_TFLAG_LBA48) ? 0xE0 : 0xEF;
675
676 if (task->tf_flags & IDE_TFLAG_FLAGGED)
677 HIHI = 0xFF;
678
679 if (task->tf_flags & IDE_TFLAG_OUT_DATA)
680 out_be32((void *)io_ports->data_addr,
681 (tf->hob_data << 8) | tf->data);
682
683 if (task->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE)
684 scc_ide_outb(tf->hob_feature, io_ports->feature_addr);
685 if (task->tf_flags & IDE_TFLAG_OUT_HOB_NSECT)
686 scc_ide_outb(tf->hob_nsect, io_ports->nsect_addr);
687 if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAL)
688 scc_ide_outb(tf->hob_lbal, io_ports->lbal_addr);
689 if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAM)
690 scc_ide_outb(tf->hob_lbam, io_ports->lbam_addr);
691 if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAH)
692 scc_ide_outb(tf->hob_lbah, io_ports->lbah_addr);
693
694 if (task->tf_flags & IDE_TFLAG_OUT_FEATURE)
695 scc_ide_outb(tf->feature, io_ports->feature_addr);
696 if (task->tf_flags & IDE_TFLAG_OUT_NSECT)
697 scc_ide_outb(tf->nsect, io_ports->nsect_addr);
698 if (task->tf_flags & IDE_TFLAG_OUT_LBAL)
699 scc_ide_outb(tf->lbal, io_ports->lbal_addr);
700 if (task->tf_flags & IDE_TFLAG_OUT_LBAM)
701 scc_ide_outb(tf->lbam, io_ports->lbam_addr);
702 if (task->tf_flags & IDE_TFLAG_OUT_LBAH)
703 scc_ide_outb(tf->lbah, io_ports->lbah_addr);
704
705 if (task->tf_flags & IDE_TFLAG_OUT_DEVICE)
706 scc_ide_outb((tf->device & HIHI) | drive->select,
707 io_ports->device_addr);
708}
709
710static void scc_tf_read(ide_drive_t *drive, ide_task_t *task)
711{
712 struct ide_io_ports *io_ports = &drive->hwif->io_ports;
713 struct ide_taskfile *tf = &task->tf;
714
715 if (task->tf_flags & IDE_TFLAG_IN_DATA) {
716 u16 data = (u16)in_be32((void *)io_ports->data_addr);
717
718 tf->data = data & 0xff;
719 tf->hob_data = (data >> 8) & 0xff;
720 }
721
722 /* be sure we're looking at the low order bits */
723 scc_ide_outb(ATA_DEVCTL_OBS & ~0x80, io_ports->ctl_addr);
724
725 if (task->tf_flags & IDE_TFLAG_IN_FEATURE)
726 tf->feature = scc_ide_inb(io_ports->feature_addr);
727 if (task->tf_flags & IDE_TFLAG_IN_NSECT)
728 tf->nsect = scc_ide_inb(io_ports->nsect_addr);
729 if (task->tf_flags & IDE_TFLAG_IN_LBAL)
730 tf->lbal = scc_ide_inb(io_ports->lbal_addr);
731 if (task->tf_flags & IDE_TFLAG_IN_LBAM)
732 tf->lbam = scc_ide_inb(io_ports->lbam_addr);
733 if (task->tf_flags & IDE_TFLAG_IN_LBAH)
734 tf->lbah = scc_ide_inb(io_ports->lbah_addr);
735 if (task->tf_flags & IDE_TFLAG_IN_DEVICE)
736 tf->device = scc_ide_inb(io_ports->device_addr);
737
738 if (task->tf_flags & IDE_TFLAG_LBA48) {
739 scc_ide_outb(ATA_DEVCTL_OBS | 0x80, io_ports->ctl_addr);
740
741 if (task->tf_flags & IDE_TFLAG_IN_HOB_FEATURE)
742 tf->hob_feature = scc_ide_inb(io_ports->feature_addr);
743 if (task->tf_flags & IDE_TFLAG_IN_HOB_NSECT)
744 tf->hob_nsect = scc_ide_inb(io_ports->nsect_addr);
745 if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAL)
746 tf->hob_lbal = scc_ide_inb(io_ports->lbal_addr);
747 if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAM)
748 tf->hob_lbam = scc_ide_inb(io_ports->lbam_addr);
749 if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAH)
750 tf->hob_lbah = scc_ide_inb(io_ports->lbah_addr);
751 }
752}
753
754static void scc_input_data(ide_drive_t *drive, struct request *rq,
755 void *buf, unsigned int len)
756{
757 unsigned long data_addr = drive->hwif->io_ports.data_addr;
758
759 len++;
760
761 if (drive->io_32bit) {
762 scc_ide_insl(data_addr, buf, len / 4);
763
764 if ((len & 3) >= 2)
765 scc_ide_insw(data_addr, (u8 *)buf + (len & ~3), 1);
766 } else
767 scc_ide_insw(data_addr, buf, len / 2);
768}
769
770static void scc_output_data(ide_drive_t *drive, struct request *rq,
771 void *buf, unsigned int len)
772{
773 unsigned long data_addr = drive->hwif->io_ports.data_addr;
774
775 len++;
776
777 if (drive->io_32bit) {
778 scc_ide_outsl(data_addr, buf, len / 4);
779
780 if ((len & 3) >= 2)
781 scc_ide_outsw(data_addr, (u8 *)buf + (len & ~3), 1);
782 } else
783 scc_ide_outsw(data_addr, buf, len / 2);
784}
785
786/**
787 * init_mmio_iops_scc - set up the iops for MMIO
788 * @hwif: interface to set up
789 *
790 */
791
792static void __devinit init_mmio_iops_scc(ide_hwif_t *hwif)
793{
794 struct pci_dev *dev = to_pci_dev(hwif->dev);
795 struct scc_ports *ports = pci_get_drvdata(dev);
796 unsigned long dma_base = ports->dma;
797
798 ide_set_hwifdata(hwif, ports);
799
800 hwif->dma_base = dma_base;
801 hwif->config_data = ports->ctl;
802}
803
804/**
805 * init_iops_scc - set up iops
806 * @hwif: interface to set up
807 *
808 * Do the basic setup for the SCC hardware interface
809 * and then do the MMIO setup.
810 */
811
812static void __devinit init_iops_scc(ide_hwif_t *hwif)
813{
814 struct pci_dev *dev = to_pci_dev(hwif->dev);
815
816 hwif->hwif_data = NULL;
817 if (pci_get_drvdata(dev) == NULL)
818 return;
819 init_mmio_iops_scc(hwif);
820}
821
822static int __devinit scc_init_dma(ide_hwif_t *hwif,
823 const struct ide_port_info *d)
824{
825 return ide_allocate_dma_engine(hwif);
826}
827
828static u8 scc_cable_detect(ide_hwif_t *hwif)
829{
830 return ATA_CBL_PATA80;
831}
832
833/**
834 * init_hwif_scc - set up hwif
835 * @hwif: interface to set up
836 *
837 * We do the basic set up of the interface structure. The SCC
838 * requires several custom handlers so we override the default
839 * ide DMA handlers appropriately.
840 */
841
842static void __devinit init_hwif_scc(ide_hwif_t *hwif)
843{
844 /* PTERADD */
845 out_be32((void __iomem *)(hwif->dma_base + 0x018), hwif->dmatable_dma);
846
847 if (in_be32((void __iomem *)(hwif->config_data + 0xff0)) & CCKCTRL_ATACLKOEN)
848 hwif->ultra_mask = ATA_UDMA6; /* 133MHz */
849 else
850 hwif->ultra_mask = ATA_UDMA5; /* 100MHz */
851}
852
853static const struct ide_tp_ops scc_tp_ops = {
854 .exec_command = scc_exec_command,
855 .read_status = scc_read_status,
856 .read_altstatus = scc_read_altstatus,
857 .read_sff_dma_status = scc_read_sff_dma_status,
858
859 .set_irq = scc_set_irq,
860
861 .tf_load = scc_tf_load,
862 .tf_read = scc_tf_read,
863
864 .input_data = scc_input_data,
865 .output_data = scc_output_data,
866};
867
868static const struct ide_port_ops scc_port_ops = {
869 .set_pio_mode = scc_set_pio_mode,
870 .set_dma_mode = scc_set_dma_mode,
871 .udma_filter = scc_udma_filter,
872 .cable_detect = scc_cable_detect,
873};
874
875static const struct ide_dma_ops scc_dma_ops = {
876 .dma_host_set = scc_dma_host_set,
877 .dma_setup = scc_dma_setup,
878 .dma_exec_cmd = ide_dma_exec_cmd,
879 .dma_start = scc_dma_start,
880 .dma_end = scc_dma_end,
881 .dma_test_irq = scc_dma_test_irq,
882 .dma_lost_irq = ide_dma_lost_irq,
883 .dma_timeout = ide_dma_timeout,
884};
885
886#define DECLARE_SCC_DEV(name_str) \
887 { \
888 .name = name_str, \
889 .init_iops = init_iops_scc, \
890 .init_dma = scc_init_dma, \
891 .init_hwif = init_hwif_scc, \
892 .tp_ops = &scc_tp_ops, \
893 .port_ops = &scc_port_ops, \
894 .dma_ops = &scc_dma_ops, \
895 .host_flags = IDE_HFLAG_SINGLE, \
896 .pio_mask = ATA_PIO4, \
897 }
898
899static const struct ide_port_info scc_chipsets[] __devinitdata = {
900 /* 0 */ DECLARE_SCC_DEV("sccIDE"),
901};
902
903/**
904 * scc_init_one - pci layer discovery entry
905 * @dev: PCI device
906 * @id: ident table entry
907 *
908 * Called by the PCI code when it finds an SCC PATA controller.
909 * We then use the IDE PCI generic helper to do most of the work.
910 */
911
912static int __devinit scc_init_one(struct pci_dev *dev, const struct pci_device_id *id)
913{
914 return init_setup_scc(dev, &scc_chipsets[id->driver_data]);
915}
916
917/**
918 * scc_remove - pci layer remove entry
919 * @dev: PCI device
920 *
921 * Called by the PCI code when it removes an SCC PATA controller.
922 */
923
924static void __devexit scc_remove(struct pci_dev *dev)
925{
926 struct scc_ports *ports = pci_get_drvdata(dev);
927 struct ide_host *host = ports->host;
928
929 ide_host_remove(host);
930
931 iounmap((void*)ports->dma);
932 iounmap((void*)ports->ctl);
933 pci_release_selected_regions(dev, (1 << 2) - 1);
934 memset(ports, 0, sizeof(*ports));
935}
936
937static const struct pci_device_id scc_pci_tbl[] = {
938 { PCI_VDEVICE(TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SCC_ATA), 0 },
939 { 0, },
940};
941MODULE_DEVICE_TABLE(pci, scc_pci_tbl);
942
943static struct pci_driver scc_pci_driver = {
944 .name = "SCC IDE",
945 .id_table = scc_pci_tbl,
946 .probe = scc_init_one,
947 .remove = __devexit_p(scc_remove),
948};
949
950static int scc_ide_init(void)
951{
952 return ide_pci_register_driver(&scc_pci_driver);
953}
954
955module_init(scc_ide_init);
956/* -- No exit code?
957static void scc_ide_exit(void)
958{
959 ide_pci_unregister_driver(&scc_pci_driver);
960}
961module_exit(scc_ide_exit);
962 */
963
964
965MODULE_DESCRIPTION("PCI driver module for Toshiba SCC IDE");
966MODULE_LICENSE("GPL");