aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/Kconfig138
-rw-r--r--drivers/scsi/Makefile16
-rw-r--r--drivers/scsi/ahci.c1473
-rw-r--r--drivers/scsi/ata_piix.c1040
-rw-r--r--drivers/scsi/libata-bmdma.c1149
-rw-r--r--drivers/scsi/libata-core.c6020
-rw-r--r--drivers/scsi/libata-eh.c2245
-rw-r--r--drivers/scsi/libata-scsi.c3173
-rw-r--r--drivers/scsi/libata.h117
-rw-r--r--drivers/scsi/pdc_adma.c740
-rw-r--r--drivers/scsi/sata_mv.c2467
-rw-r--r--drivers/scsi/sata_nv.c595
-rw-r--r--drivers/scsi/sata_promise.c844
-rw-r--r--drivers/scsi/sata_promise.h157
-rw-r--r--drivers/scsi/sata_qstor.c730
-rw-r--r--drivers/scsi/sata_sil.c727
-rw-r--r--drivers/scsi/sata_sil24.c1222
-rw-r--r--drivers/scsi/sata_sis.c347
-rw-r--r--drivers/scsi/sata_svw.c508
-rw-r--r--drivers/scsi/sata_sx4.c1502
-rw-r--r--drivers/scsi/sata_uli.c300
-rw-r--r--drivers/scsi/sata_via.c502
-rw-r--r--drivers/scsi/sata_vsc.c482
23 files changed, 0 insertions, 26494 deletions
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index a6f920d218a0..c4dfcc91ddda 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -494,67 +494,6 @@ config SCSI_ARCMSR
494 494
495source "drivers/scsi/megaraid/Kconfig.megaraid" 495source "drivers/scsi/megaraid/Kconfig.megaraid"
496 496
497config SCSI_SATA
498 tristate "Serial ATA (SATA) support"
499 depends on SCSI
500 help
501 This driver family supports Serial ATA host controllers
502 and devices.
503
504 If unsure, say N.
505
506config SCSI_SATA_AHCI
507 tristate "AHCI SATA support"
508 depends on SCSI_SATA && PCI
509 help
510 This option enables support for AHCI Serial ATA.
511
512 If unsure, say N.
513
514config SCSI_SATA_SVW
515 tristate "ServerWorks Frodo / Apple K2 SATA support"
516 depends on SCSI_SATA && PCI
517 help
518 This option enables support for Broadcom/Serverworks/Apple K2
519 SATA support.
520
521 If unsure, say N.
522
523config SCSI_ATA_PIIX
524 tristate "Intel PIIX/ICH SATA support"
525 depends on SCSI_SATA && PCI
526 help
527 This option enables support for ICH5/6/7/8 Serial ATA.
528 If PATA support was enabled previously, this enables
529 support for select Intel PIIX/ICH PATA host controllers.
530
531 If unsure, say N.
532
533config SCSI_SATA_MV
534 tristate "Marvell SATA support (HIGHLY EXPERIMENTAL)"
535 depends on SCSI_SATA && PCI && EXPERIMENTAL
536 help
537 This option enables support for the Marvell Serial ATA family.
538 Currently supports 88SX[56]0[48][01] chips.
539
540 If unsure, say N.
541
542config SCSI_SATA_NV
543 tristate "NVIDIA SATA support"
544 depends on SCSI_SATA && PCI && EXPERIMENTAL
545 help
546 This option enables support for NVIDIA Serial ATA.
547
548 If unsure, say N.
549
550config SCSI_PDC_ADMA
551 tristate "Pacific Digital ADMA support"
552 depends on SCSI_SATA && PCI
553 help
554 This option enables support for Pacific Digital ADMA controllers
555
556 If unsure, say N.
557
558config SCSI_HPTIOP 497config SCSI_HPTIOP
559 tristate "HighPoint RocketRAID 3xxx Controller support" 498 tristate "HighPoint RocketRAID 3xxx Controller support"
560 depends on SCSI && PCI 499 depends on SCSI && PCI
@@ -565,83 +504,6 @@ config SCSI_HPTIOP
565 To compile this driver as a module, choose M here; the module 504 To compile this driver as a module, choose M here; the module
566 will be called hptiop. If unsure, say N. 505 will be called hptiop. If unsure, say N.
567 506
568config SCSI_SATA_QSTOR
569 tristate "Pacific Digital SATA QStor support"
570 depends on SCSI_SATA && PCI
571 help
572 This option enables support for Pacific Digital Serial ATA QStor.
573
574 If unsure, say N.
575
576config SCSI_SATA_PROMISE
577 tristate "Promise SATA TX2/TX4 support"
578 depends on SCSI_SATA && PCI
579 help
580 This option enables support for Promise Serial ATA TX2/TX4.
581
582 If unsure, say N.
583
584config SCSI_SATA_SX4
585 tristate "Promise SATA SX4 support"
586 depends on SCSI_SATA && PCI && EXPERIMENTAL
587 help
588 This option enables support for Promise Serial ATA SX4.
589
590 If unsure, say N.
591
592config SCSI_SATA_SIL
593 tristate "Silicon Image SATA support"
594 depends on SCSI_SATA && PCI && EXPERIMENTAL
595 help
596 This option enables support for Silicon Image Serial ATA.
597
598 If unsure, say N.
599
600config SCSI_SATA_SIL24
601 tristate "Silicon Image 3124/3132 SATA support"
602 depends on SCSI_SATA && PCI && EXPERIMENTAL
603 help
604 This option enables support for Silicon Image 3124/3132 Serial ATA.
605
606 If unsure, say N.
607
608config SCSI_SATA_SIS
609 tristate "SiS 964/180 SATA support"
610 depends on SCSI_SATA && PCI && EXPERIMENTAL
611 help
612 This option enables support for SiS Serial ATA 964/180.
613
614 If unsure, say N.
615
616config SCSI_SATA_ULI
617 tristate "ULi Electronics SATA support"
618 depends on SCSI_SATA && PCI && EXPERIMENTAL
619 help
620 This option enables support for ULi Electronics SATA.
621
622 If unsure, say N.
623
624config SCSI_SATA_VIA
625 tristate "VIA SATA support"
626 depends on SCSI_SATA && PCI
627 help
628 This option enables support for VIA Serial ATA.
629
630 If unsure, say N.
631
632config SCSI_SATA_VITESSE
633 tristate "VITESSE VSC-7174 / INTEL 31244 SATA support"
634 depends on SCSI_SATA && PCI
635 help
636 This option enables support for Vitesse VSC7174 and Intel 31244 Serial ATA.
637
638 If unsure, say N.
639
640config SCSI_SATA_INTEL_COMBINED
641 bool
642 depends on IDE=y && !BLK_DEV_IDE_SATA && (SCSI_SATA_AHCI || SCSI_ATA_PIIX)
643 default y
644
645config SCSI_BUSLOGIC 507config SCSI_BUSLOGIC
646 tristate "BusLogic SCSI support" 508 tristate "BusLogic SCSI support"
647 depends on (PCI || ISA || MCA) && SCSI && ISA_DMA_API 509 depends on (PCI || ISA || MCA) && SCSI && ISA_DMA_API
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 8fc2c594b537..1ef951be7a5d 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -125,21 +125,6 @@ obj-$(CONFIG_SCSI_LASI700) += 53c700.o lasi700.o
125obj-$(CONFIG_SCSI_NSP32) += nsp32.o 125obj-$(CONFIG_SCSI_NSP32) += nsp32.o
126obj-$(CONFIG_SCSI_IPR) += ipr.o 126obj-$(CONFIG_SCSI_IPR) += ipr.o
127obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsi/ 127obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsi/
128obj-$(CONFIG_SCSI_SATA_AHCI) += libata.o ahci.o
129obj-$(CONFIG_SCSI_SATA_SVW) += libata.o sata_svw.o
130obj-$(CONFIG_SCSI_ATA_PIIX) += libata.o ata_piix.o
131obj-$(CONFIG_SCSI_SATA_PROMISE) += libata.o sata_promise.o
132obj-$(CONFIG_SCSI_SATA_QSTOR) += libata.o sata_qstor.o
133obj-$(CONFIG_SCSI_SATA_SIL) += libata.o sata_sil.o
134obj-$(CONFIG_SCSI_SATA_SIL24) += libata.o sata_sil24.o
135obj-$(CONFIG_SCSI_SATA_VIA) += libata.o sata_via.o
136obj-$(CONFIG_SCSI_SATA_VITESSE) += libata.o sata_vsc.o
137obj-$(CONFIG_SCSI_SATA_SIS) += libata.o sata_sis.o
138obj-$(CONFIG_SCSI_SATA_SX4) += libata.o sata_sx4.o
139obj-$(CONFIG_SCSI_SATA_NV) += libata.o sata_nv.o
140obj-$(CONFIG_SCSI_SATA_ULI) += libata.o sata_uli.o
141obj-$(CONFIG_SCSI_SATA_MV) += libata.o sata_mv.o
142obj-$(CONFIG_SCSI_PDC_ADMA) += libata.o pdc_adma.o
143obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o 128obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o
144obj-$(CONFIG_SCSI_STEX) += stex.o 129obj-$(CONFIG_SCSI_STEX) += stex.o
145 130
@@ -171,7 +156,6 @@ ncr53c8xx-flags-$(CONFIG_SCSI_ZALON) \
171CFLAGS_ncr53c8xx.o := $(ncr53c8xx-flags-y) $(ncr53c8xx-flags-m) 156CFLAGS_ncr53c8xx.o := $(ncr53c8xx-flags-y) $(ncr53c8xx-flags-m)
172zalon7xx-objs := zalon.o ncr53c8xx.o 157zalon7xx-objs := zalon.o ncr53c8xx.o
173NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o 158NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o
174libata-objs := libata-core.o libata-scsi.o libata-bmdma.o libata-eh.o
175oktagon_esp_mod-objs := oktagon_esp.o oktagon_io.o 159oktagon_esp_mod-objs := oktagon_esp.o oktagon_io.o
176 160
177# Files generated that shall be removed upon make clean 161# Files generated that shall be removed upon make clean
diff --git a/drivers/scsi/ahci.c b/drivers/scsi/ahci.c
deleted file mode 100644
index 904c25fb4ba4..000000000000
--- a/drivers/scsi/ahci.c
+++ /dev/null
@@ -1,1473 +0,0 @@
1/*
2 * ahci.c - AHCI SATA support
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2004-2005 Red Hat, Inc.
9 *
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 *
26 * libata documentation is available via 'make {ps|pdf}docs',
27 * as Documentation/DocBook/libata.*
28 *
29 * AHCI hardware documentation:
30 * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
31 * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
32 *
33 */
34
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/blkdev.h>
40#include <linux/delay.h>
41#include <linux/interrupt.h>
42#include <linux/sched.h>
43#include <linux/dma-mapping.h>
44#include <linux/device.h>
45#include <scsi/scsi_host.h>
46#include <scsi/scsi_cmnd.h>
47#include <linux/libata.h>
48#include <asm/io.h>
49
50#define DRV_NAME "ahci"
51#define DRV_VERSION "2.0"
52
53
54enum {
55 AHCI_PCI_BAR = 5,
56 AHCI_MAX_SG = 168, /* hardware max is 64K */
57 AHCI_DMA_BOUNDARY = 0xffffffff,
58 AHCI_USE_CLUSTERING = 0,
59 AHCI_MAX_CMDS = 32,
60 AHCI_CMD_SZ = 32,
61 AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ,
62 AHCI_RX_FIS_SZ = 256,
63 AHCI_CMD_TBL_CDB = 0x40,
64 AHCI_CMD_TBL_HDR_SZ = 0x80,
65 AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16),
66 AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS,
67 AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ +
68 AHCI_RX_FIS_SZ,
69 AHCI_IRQ_ON_SG = (1 << 31),
70 AHCI_CMD_ATAPI = (1 << 5),
71 AHCI_CMD_WRITE = (1 << 6),
72 AHCI_CMD_PREFETCH = (1 << 7),
73 AHCI_CMD_RESET = (1 << 8),
74 AHCI_CMD_CLR_BUSY = (1 << 10),
75
76 RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
77 RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */
78
79 board_ahci = 0,
80 board_ahci_vt8251 = 1,
81
82 /* global controller registers */
83 HOST_CAP = 0x00, /* host capabilities */
84 HOST_CTL = 0x04, /* global host control */
85 HOST_IRQ_STAT = 0x08, /* interrupt status */
86 HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */
87 HOST_VERSION = 0x10, /* AHCI spec. version compliancy */
88
89 /* HOST_CTL bits */
90 HOST_RESET = (1 << 0), /* reset controller; self-clear */
91 HOST_IRQ_EN = (1 << 1), /* global IRQ enable */
92 HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
93
94 /* HOST_CAP bits */
95 HOST_CAP_CLO = (1 << 24), /* Command List Override support */
96 HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */
97 HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
98
99 /* registers for each SATA port */
100 PORT_LST_ADDR = 0x00, /* command list DMA addr */
101 PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */
102 PORT_FIS_ADDR = 0x08, /* FIS rx buf addr */
103 PORT_FIS_ADDR_HI = 0x0c, /* FIS rx buf addr hi */
104 PORT_IRQ_STAT = 0x10, /* interrupt status */
105 PORT_IRQ_MASK = 0x14, /* interrupt enable/disable mask */
106 PORT_CMD = 0x18, /* port command */
107 PORT_TFDATA = 0x20, /* taskfile data */
108 PORT_SIG = 0x24, /* device TF signature */
109 PORT_CMD_ISSUE = 0x38, /* command issue */
110 PORT_SCR = 0x28, /* SATA phy register block */
111 PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */
112 PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */
113 PORT_SCR_ERR = 0x30, /* SATA phy register: SError */
114 PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */
115
116 /* PORT_IRQ_{STAT,MASK} bits */
117 PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */
118 PORT_IRQ_TF_ERR = (1 << 30), /* task file error */
119 PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */
120 PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */
121 PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */
122 PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */
123 PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */
124 PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */
125
126 PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */
127 PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */
128 PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */
129 PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */
130 PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */
131 PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */
132 PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */
133 PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */
134 PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */
135
136 PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR |
137 PORT_IRQ_IF_ERR |
138 PORT_IRQ_CONNECT |
139 PORT_IRQ_PHYRDY |
140 PORT_IRQ_UNK_FIS,
141 PORT_IRQ_ERROR = PORT_IRQ_FREEZE |
142 PORT_IRQ_TF_ERR |
143 PORT_IRQ_HBUS_DATA_ERR,
144 DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE |
145 PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS |
146 PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
147
148 /* PORT_CMD bits */
149 PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */
150 PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
151 PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */
152 PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */
153 PORT_CMD_CLO = (1 << 3), /* Command list override */
154 PORT_CMD_POWER_ON = (1 << 2), /* Power up device */
155 PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */
156 PORT_CMD_START = (1 << 0), /* Enable port DMA engine */
157
158 PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */
159 PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */
160 PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */
161
162 /* hpriv->flags bits */
163 AHCI_FLAG_MSI = (1 << 0),
164
165 /* ap->flags bits */
166 AHCI_FLAG_RESET_NEEDS_CLO = (1 << 24),
167 AHCI_FLAG_NO_NCQ = (1 << 25),
168};
169
170struct ahci_cmd_hdr {
171 u32 opts;
172 u32 status;
173 u32 tbl_addr;
174 u32 tbl_addr_hi;
175 u32 reserved[4];
176};
177
178struct ahci_sg {
179 u32 addr;
180 u32 addr_hi;
181 u32 reserved;
182 u32 flags_size;
183};
184
185struct ahci_host_priv {
186 unsigned long flags;
187 u32 cap; /* cache of HOST_CAP register */
188 u32 port_map; /* cache of HOST_PORTS_IMPL reg */
189};
190
191struct ahci_port_priv {
192 struct ahci_cmd_hdr *cmd_slot;
193 dma_addr_t cmd_slot_dma;
194 void *cmd_tbl;
195 dma_addr_t cmd_tbl_dma;
196 void *rx_fis;
197 dma_addr_t rx_fis_dma;
198};
199
200static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg);
201static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
202static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
203static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
204static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
205static void ahci_irq_clear(struct ata_port *ap);
206static int ahci_port_start(struct ata_port *ap);
207static void ahci_port_stop(struct ata_port *ap);
208static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
209static void ahci_qc_prep(struct ata_queued_cmd *qc);
210static u8 ahci_check_status(struct ata_port *ap);
211static void ahci_freeze(struct ata_port *ap);
212static void ahci_thaw(struct ata_port *ap);
213static void ahci_error_handler(struct ata_port *ap);
214static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
215static void ahci_remove_one (struct pci_dev *pdev);
216
217static struct scsi_host_template ahci_sht = {
218 .module = THIS_MODULE,
219 .name = DRV_NAME,
220 .ioctl = ata_scsi_ioctl,
221 .queuecommand = ata_scsi_queuecmd,
222 .change_queue_depth = ata_scsi_change_queue_depth,
223 .can_queue = AHCI_MAX_CMDS - 1,
224 .this_id = ATA_SHT_THIS_ID,
225 .sg_tablesize = AHCI_MAX_SG,
226 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
227 .emulated = ATA_SHT_EMULATED,
228 .use_clustering = AHCI_USE_CLUSTERING,
229 .proc_name = DRV_NAME,
230 .dma_boundary = AHCI_DMA_BOUNDARY,
231 .slave_configure = ata_scsi_slave_config,
232 .slave_destroy = ata_scsi_slave_destroy,
233 .bios_param = ata_std_bios_param,
234};
235
236static const struct ata_port_operations ahci_ops = {
237 .port_disable = ata_port_disable,
238
239 .check_status = ahci_check_status,
240 .check_altstatus = ahci_check_status,
241 .dev_select = ata_noop_dev_select,
242
243 .tf_read = ahci_tf_read,
244
245 .qc_prep = ahci_qc_prep,
246 .qc_issue = ahci_qc_issue,
247
248 .irq_handler = ahci_interrupt,
249 .irq_clear = ahci_irq_clear,
250
251 .scr_read = ahci_scr_read,
252 .scr_write = ahci_scr_write,
253
254 .freeze = ahci_freeze,
255 .thaw = ahci_thaw,
256
257 .error_handler = ahci_error_handler,
258 .post_internal_cmd = ahci_post_internal_cmd,
259
260 .port_start = ahci_port_start,
261 .port_stop = ahci_port_stop,
262};
263
264static const struct ata_port_info ahci_port_info[] = {
265 /* board_ahci */
266 {
267 .sht = &ahci_sht,
268 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
269 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
270 ATA_FLAG_SKIP_D2H_BSY,
271 .pio_mask = 0x1f, /* pio0-4 */
272 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
273 .port_ops = &ahci_ops,
274 },
275 /* board_ahci_vt8251 */
276 {
277 .sht = &ahci_sht,
278 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
279 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
280 ATA_FLAG_SKIP_D2H_BSY |
281 AHCI_FLAG_RESET_NEEDS_CLO | AHCI_FLAG_NO_NCQ,
282 .pio_mask = 0x1f, /* pio0-4 */
283 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
284 .port_ops = &ahci_ops,
285 },
286};
287
288static const struct pci_device_id ahci_pci_tbl[] = {
289 /* Intel */
290 { PCI_VENDOR_ID_INTEL, 0x2652, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
291 board_ahci }, /* ICH6 */
292 { PCI_VENDOR_ID_INTEL, 0x2653, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
293 board_ahci }, /* ICH6M */
294 { PCI_VENDOR_ID_INTEL, 0x27c1, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
295 board_ahci }, /* ICH7 */
296 { PCI_VENDOR_ID_INTEL, 0x27c5, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
297 board_ahci }, /* ICH7M */
298 { PCI_VENDOR_ID_INTEL, 0x27c3, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
299 board_ahci }, /* ICH7R */
300 { PCI_VENDOR_ID_AL, 0x5288, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
301 board_ahci }, /* ULi M5288 */
302 { PCI_VENDOR_ID_INTEL, 0x2681, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
303 board_ahci }, /* ESB2 */
304 { PCI_VENDOR_ID_INTEL, 0x2682, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
305 board_ahci }, /* ESB2 */
306 { PCI_VENDOR_ID_INTEL, 0x2683, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
307 board_ahci }, /* ESB2 */
308 { PCI_VENDOR_ID_INTEL, 0x27c6, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
309 board_ahci }, /* ICH7-M DH */
310 { PCI_VENDOR_ID_INTEL, 0x2821, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
311 board_ahci }, /* ICH8 */
312 { PCI_VENDOR_ID_INTEL, 0x2822, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
313 board_ahci }, /* ICH8 */
314 { PCI_VENDOR_ID_INTEL, 0x2824, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
315 board_ahci }, /* ICH8 */
316 { PCI_VENDOR_ID_INTEL, 0x2829, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
317 board_ahci }, /* ICH8M */
318 { PCI_VENDOR_ID_INTEL, 0x282a, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
319 board_ahci }, /* ICH8M */
320
321 /* JMicron */
322 { 0x197b, 0x2360, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
323 board_ahci }, /* JMicron JMB360 */
324 { 0x197b, 0x2361, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
325 board_ahci }, /* JMicron JMB361 */
326 { 0x197b, 0x2363, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
327 board_ahci }, /* JMicron JMB363 */
328 { 0x197b, 0x2365, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
329 board_ahci }, /* JMicron JMB365 */
330 { 0x197b, 0x2366, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
331 board_ahci }, /* JMicron JMB366 */
332
333 /* ATI */
334 { PCI_VENDOR_ID_ATI, 0x4380, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
335 board_ahci }, /* ATI SB600 non-raid */
336 { PCI_VENDOR_ID_ATI, 0x4381, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
337 board_ahci }, /* ATI SB600 raid */
338
339 /* VIA */
340 { PCI_VENDOR_ID_VIA, 0x3349, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
341 board_ahci_vt8251 }, /* VIA VT8251 */
342
343 /* NVIDIA */
344 { PCI_VENDOR_ID_NVIDIA, 0x044c, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
345 board_ahci }, /* MCP65 */
346 { PCI_VENDOR_ID_NVIDIA, 0x044d, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
347 board_ahci }, /* MCP65 */
348 { PCI_VENDOR_ID_NVIDIA, 0x044e, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
349 board_ahci }, /* MCP65 */
350 { PCI_VENDOR_ID_NVIDIA, 0x044f, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
351 board_ahci }, /* MCP65 */
352
353 { } /* terminate list */
354};
355
356
357static struct pci_driver ahci_pci_driver = {
358 .name = DRV_NAME,
359 .id_table = ahci_pci_tbl,
360 .probe = ahci_init_one,
361 .remove = ahci_remove_one,
362};
363
364
365static inline unsigned long ahci_port_base_ul (unsigned long base, unsigned int port)
366{
367 return base + 0x100 + (port * 0x80);
368}
369
370static inline void __iomem *ahci_port_base (void __iomem *base, unsigned int port)
371{
372 return (void __iomem *) ahci_port_base_ul((unsigned long)base, port);
373}
374
375static int ahci_port_start(struct ata_port *ap)
376{
377 struct device *dev = ap->host_set->dev;
378 struct ahci_host_priv *hpriv = ap->host_set->private_data;
379 struct ahci_port_priv *pp;
380 void __iomem *mmio = ap->host_set->mmio_base;
381 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
382 void *mem;
383 dma_addr_t mem_dma;
384 int rc;
385
386 pp = kmalloc(sizeof(*pp), GFP_KERNEL);
387 if (!pp)
388 return -ENOMEM;
389 memset(pp, 0, sizeof(*pp));
390
391 rc = ata_pad_alloc(ap, dev);
392 if (rc) {
393 kfree(pp);
394 return rc;
395 }
396
397 mem = dma_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma, GFP_KERNEL);
398 if (!mem) {
399 ata_pad_free(ap, dev);
400 kfree(pp);
401 return -ENOMEM;
402 }
403 memset(mem, 0, AHCI_PORT_PRIV_DMA_SZ);
404
405 /*
406 * First item in chunk of DMA memory: 32-slot command table,
407 * 32 bytes each in size
408 */
409 pp->cmd_slot = mem;
410 pp->cmd_slot_dma = mem_dma;
411
412 mem += AHCI_CMD_SLOT_SZ;
413 mem_dma += AHCI_CMD_SLOT_SZ;
414
415 /*
416 * Second item: Received-FIS area
417 */
418 pp->rx_fis = mem;
419 pp->rx_fis_dma = mem_dma;
420
421 mem += AHCI_RX_FIS_SZ;
422 mem_dma += AHCI_RX_FIS_SZ;
423
424 /*
425 * Third item: data area for storing a single command
426 * and its scatter-gather table
427 */
428 pp->cmd_tbl = mem;
429 pp->cmd_tbl_dma = mem_dma;
430
431 ap->private_data = pp;
432
433 if (hpriv->cap & HOST_CAP_64)
434 writel((pp->cmd_slot_dma >> 16) >> 16, port_mmio + PORT_LST_ADDR_HI);
435 writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
436 readl(port_mmio + PORT_LST_ADDR); /* flush */
437
438 if (hpriv->cap & HOST_CAP_64)
439 writel((pp->rx_fis_dma >> 16) >> 16, port_mmio + PORT_FIS_ADDR_HI);
440 writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
441 readl(port_mmio + PORT_FIS_ADDR); /* flush */
442
443 writel(PORT_CMD_ICC_ACTIVE | PORT_CMD_FIS_RX |
444 PORT_CMD_POWER_ON | PORT_CMD_SPIN_UP |
445 PORT_CMD_START, port_mmio + PORT_CMD);
446 readl(port_mmio + PORT_CMD); /* flush */
447
448 return 0;
449}
450
451
452static void ahci_port_stop(struct ata_port *ap)
453{
454 struct device *dev = ap->host_set->dev;
455 struct ahci_port_priv *pp = ap->private_data;
456 void __iomem *mmio = ap->host_set->mmio_base;
457 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
458 u32 tmp;
459
460 tmp = readl(port_mmio + PORT_CMD);
461 tmp &= ~(PORT_CMD_START | PORT_CMD_FIS_RX);
462 writel(tmp, port_mmio + PORT_CMD);
463 readl(port_mmio + PORT_CMD); /* flush */
464
465 /* spec says 500 msecs for each PORT_CMD_{START,FIS_RX} bit, so
466 * this is slightly incorrect.
467 */
468 msleep(500);
469
470 ap->private_data = NULL;
471 dma_free_coherent(dev, AHCI_PORT_PRIV_DMA_SZ,
472 pp->cmd_slot, pp->cmd_slot_dma);
473 ata_pad_free(ap, dev);
474 kfree(pp);
475}
476
477static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg_in)
478{
479 unsigned int sc_reg;
480
481 switch (sc_reg_in) {
482 case SCR_STATUS: sc_reg = 0; break;
483 case SCR_CONTROL: sc_reg = 1; break;
484 case SCR_ERROR: sc_reg = 2; break;
485 case SCR_ACTIVE: sc_reg = 3; break;
486 default:
487 return 0xffffffffU;
488 }
489
490 return readl((void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
491}
492
493
494static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg_in,
495 u32 val)
496{
497 unsigned int sc_reg;
498
499 switch (sc_reg_in) {
500 case SCR_STATUS: sc_reg = 0; break;
501 case SCR_CONTROL: sc_reg = 1; break;
502 case SCR_ERROR: sc_reg = 2; break;
503 case SCR_ACTIVE: sc_reg = 3; break;
504 default:
505 return;
506 }
507
508 writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
509}
510
511static int ahci_stop_engine(struct ata_port *ap)
512{
513 void __iomem *mmio = ap->host_set->mmio_base;
514 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
515 int work;
516 u32 tmp;
517
518 tmp = readl(port_mmio + PORT_CMD);
519 tmp &= ~PORT_CMD_START;
520 writel(tmp, port_mmio + PORT_CMD);
521
522 /* wait for engine to stop. TODO: this could be
523 * as long as 500 msec
524 */
525 work = 1000;
526 while (work-- > 0) {
527 tmp = readl(port_mmio + PORT_CMD);
528 if ((tmp & PORT_CMD_LIST_ON) == 0)
529 return 0;
530 udelay(10);
531 }
532
533 return -EIO;
534}
535
536static void ahci_start_engine(struct ata_port *ap)
537{
538 void __iomem *mmio = ap->host_set->mmio_base;
539 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
540 u32 tmp;
541
542 tmp = readl(port_mmio + PORT_CMD);
543 tmp |= PORT_CMD_START;
544 writel(tmp, port_mmio + PORT_CMD);
545 readl(port_mmio + PORT_CMD); /* flush */
546}
547
548static unsigned int ahci_dev_classify(struct ata_port *ap)
549{
550 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
551 struct ata_taskfile tf;
552 u32 tmp;
553
554 tmp = readl(port_mmio + PORT_SIG);
555 tf.lbah = (tmp >> 24) & 0xff;
556 tf.lbam = (tmp >> 16) & 0xff;
557 tf.lbal = (tmp >> 8) & 0xff;
558 tf.nsect = (tmp) & 0xff;
559
560 return ata_dev_classify(&tf);
561}
562
563static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
564 u32 opts)
565{
566 dma_addr_t cmd_tbl_dma;
567
568 cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
569
570 pp->cmd_slot[tag].opts = cpu_to_le32(opts);
571 pp->cmd_slot[tag].status = 0;
572 pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
573 pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
574}
575
576static int ahci_clo(struct ata_port *ap)
577{
578 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
579 struct ahci_host_priv *hpriv = ap->host_set->private_data;
580 u32 tmp;
581
582 if (!(hpriv->cap & HOST_CAP_CLO))
583 return -EOPNOTSUPP;
584
585 tmp = readl(port_mmio + PORT_CMD);
586 tmp |= PORT_CMD_CLO;
587 writel(tmp, port_mmio + PORT_CMD);
588
589 tmp = ata_wait_register(port_mmio + PORT_CMD,
590 PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
591 if (tmp & PORT_CMD_CLO)
592 return -EIO;
593
594 return 0;
595}
596
597static int ahci_prereset(struct ata_port *ap)
598{
599 if ((ap->flags & AHCI_FLAG_RESET_NEEDS_CLO) &&
600 (ata_busy_wait(ap, ATA_BUSY, 1000) & ATA_BUSY)) {
601 /* ATA_BUSY hasn't cleared, so send a CLO */
602 ahci_clo(ap);
603 }
604
605 return ata_std_prereset(ap);
606}
607
608static int ahci_softreset(struct ata_port *ap, unsigned int *class)
609{
610 struct ahci_port_priv *pp = ap->private_data;
611 void __iomem *mmio = ap->host_set->mmio_base;
612 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
613 const u32 cmd_fis_len = 5; /* five dwords */
614 const char *reason = NULL;
615 struct ata_taskfile tf;
616 u32 tmp;
617 u8 *fis;
618 int rc;
619
620 DPRINTK("ENTER\n");
621
622 if (ata_port_offline(ap)) {
623 DPRINTK("PHY reports no device\n");
624 *class = ATA_DEV_NONE;
625 return 0;
626 }
627
628 /* prepare for SRST (AHCI-1.1 10.4.1) */
629 rc = ahci_stop_engine(ap);
630 if (rc) {
631 reason = "failed to stop engine";
632 goto fail_restart;
633 }
634
635 /* check BUSY/DRQ, perform Command List Override if necessary */
636 ahci_tf_read(ap, &tf);
637 if (tf.command & (ATA_BUSY | ATA_DRQ)) {
638 rc = ahci_clo(ap);
639
640 if (rc == -EOPNOTSUPP) {
641 reason = "port busy but CLO unavailable";
642 goto fail_restart;
643 } else if (rc) {
644 reason = "port busy but CLO failed";
645 goto fail_restart;
646 }
647 }
648
649 /* restart engine */
650 ahci_start_engine(ap);
651
652 ata_tf_init(ap->device, &tf);
653 fis = pp->cmd_tbl;
654
655 /* issue the first D2H Register FIS */
656 ahci_fill_cmd_slot(pp, 0,
657 cmd_fis_len | AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY);
658
659 tf.ctl |= ATA_SRST;
660 ata_tf_to_fis(&tf, fis, 0);
661 fis[1] &= ~(1 << 7); /* turn off Command FIS bit */
662
663 writel(1, port_mmio + PORT_CMD_ISSUE);
664
665 tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1, 1, 500);
666 if (tmp & 0x1) {
667 rc = -EIO;
668 reason = "1st FIS failed";
669 goto fail;
670 }
671
672 /* spec says at least 5us, but be generous and sleep for 1ms */
673 msleep(1);
674
675 /* issue the second D2H Register FIS */
676 ahci_fill_cmd_slot(pp, 0, cmd_fis_len);
677
678 tf.ctl &= ~ATA_SRST;
679 ata_tf_to_fis(&tf, fis, 0);
680 fis[1] &= ~(1 << 7); /* turn off Command FIS bit */
681
682 writel(1, port_mmio + PORT_CMD_ISSUE);
683 readl(port_mmio + PORT_CMD_ISSUE); /* flush */
684
685 /* spec mandates ">= 2ms" before checking status.
686 * We wait 150ms, because that was the magic delay used for
687 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
688 * between when the ATA command register is written, and then
689 * status is checked. Because waiting for "a while" before
690 * checking status is fine, post SRST, we perform this magic
691 * delay here as well.
692 */
693 msleep(150);
694
695 *class = ATA_DEV_NONE;
696 if (ata_port_online(ap)) {
697 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
698 rc = -EIO;
699 reason = "device not ready";
700 goto fail;
701 }
702 *class = ahci_dev_classify(ap);
703 }
704
705 DPRINTK("EXIT, class=%u\n", *class);
706 return 0;
707
708 fail_restart:
709 ahci_start_engine(ap);
710 fail:
711 ata_port_printk(ap, KERN_ERR, "softreset failed (%s)\n", reason);
712 return rc;
713}
714
715static int ahci_hardreset(struct ata_port *ap, unsigned int *class)
716{
717 struct ahci_port_priv *pp = ap->private_data;
718 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
719 struct ata_taskfile tf;
720 int rc;
721
722 DPRINTK("ENTER\n");
723
724 ahci_stop_engine(ap);
725
726 /* clear D2H reception area to properly wait for D2H FIS */
727 ata_tf_init(ap->device, &tf);
728 tf.command = 0xff;
729 ata_tf_to_fis(&tf, d2h_fis, 0);
730
731 rc = sata_std_hardreset(ap, class);
732
733 ahci_start_engine(ap);
734
735 if (rc == 0 && ata_port_online(ap))
736 *class = ahci_dev_classify(ap);
737 if (*class == ATA_DEV_UNKNOWN)
738 *class = ATA_DEV_NONE;
739
740 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
741 return rc;
742}
743
744static void ahci_postreset(struct ata_port *ap, unsigned int *class)
745{
746 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
747 u32 new_tmp, tmp;
748
749 ata_std_postreset(ap, class);
750
751 /* Make sure port's ATAPI bit is set appropriately */
752 new_tmp = tmp = readl(port_mmio + PORT_CMD);
753 if (*class == ATA_DEV_ATAPI)
754 new_tmp |= PORT_CMD_ATAPI;
755 else
756 new_tmp &= ~PORT_CMD_ATAPI;
757 if (new_tmp != tmp) {
758 writel(new_tmp, port_mmio + PORT_CMD);
759 readl(port_mmio + PORT_CMD); /* flush */
760 }
761}
762
763static u8 ahci_check_status(struct ata_port *ap)
764{
765 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr;
766
767 return readl(mmio + PORT_TFDATA) & 0xFF;
768}
769
770static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
771{
772 struct ahci_port_priv *pp = ap->private_data;
773 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
774
775 ata_tf_from_fis(d2h_fis, tf);
776}
777
778static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
779{
780 struct scatterlist *sg;
781 struct ahci_sg *ahci_sg;
782 unsigned int n_sg = 0;
783
784 VPRINTK("ENTER\n");
785
786 /*
787 * Next, the S/G list.
788 */
789 ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
790 ata_for_each_sg(sg, qc) {
791 dma_addr_t addr = sg_dma_address(sg);
792 u32 sg_len = sg_dma_len(sg);
793
794 ahci_sg->addr = cpu_to_le32(addr & 0xffffffff);
795 ahci_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
796 ahci_sg->flags_size = cpu_to_le32(sg_len - 1);
797
798 ahci_sg++;
799 n_sg++;
800 }
801
802 return n_sg;
803}
804
805static void ahci_qc_prep(struct ata_queued_cmd *qc)
806{
807 struct ata_port *ap = qc->ap;
808 struct ahci_port_priv *pp = ap->private_data;
809 int is_atapi = is_atapi_taskfile(&qc->tf);
810 void *cmd_tbl;
811 u32 opts;
812 const u32 cmd_fis_len = 5; /* five dwords */
813 unsigned int n_elem;
814
815 /*
816 * Fill in command table information. First, the header,
817 * a SATA Register - Host to Device command FIS.
818 */
819 cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
820
821 ata_tf_to_fis(&qc->tf, cmd_tbl, 0);
822 if (is_atapi) {
823 memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
824 memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
825 }
826
827 n_elem = 0;
828 if (qc->flags & ATA_QCFLAG_DMAMAP)
829 n_elem = ahci_fill_sg(qc, cmd_tbl);
830
831 /*
832 * Fill in command slot information.
833 */
834 opts = cmd_fis_len | n_elem << 16;
835 if (qc->tf.flags & ATA_TFLAG_WRITE)
836 opts |= AHCI_CMD_WRITE;
837 if (is_atapi)
838 opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
839
840 ahci_fill_cmd_slot(pp, qc->tag, opts);
841}
842
843static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
844{
845 struct ahci_port_priv *pp = ap->private_data;
846 struct ata_eh_info *ehi = &ap->eh_info;
847 unsigned int err_mask = 0, action = 0;
848 struct ata_queued_cmd *qc;
849 u32 serror;
850
851 ata_ehi_clear_desc(ehi);
852
853 /* AHCI needs SError cleared; otherwise, it might lock up */
854 serror = ahci_scr_read(ap, SCR_ERROR);
855 ahci_scr_write(ap, SCR_ERROR, serror);
856
857 /* analyze @irq_stat */
858 ata_ehi_push_desc(ehi, "irq_stat 0x%08x", irq_stat);
859
860 if (irq_stat & PORT_IRQ_TF_ERR)
861 err_mask |= AC_ERR_DEV;
862
863 if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
864 err_mask |= AC_ERR_HOST_BUS;
865 action |= ATA_EH_SOFTRESET;
866 }
867
868 if (irq_stat & PORT_IRQ_IF_ERR) {
869 err_mask |= AC_ERR_ATA_BUS;
870 action |= ATA_EH_SOFTRESET;
871 ata_ehi_push_desc(ehi, ", interface fatal error");
872 }
873
874 if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
875 ata_ehi_hotplugged(ehi);
876 ata_ehi_push_desc(ehi, ", %s", irq_stat & PORT_IRQ_CONNECT ?
877 "connection status changed" : "PHY RDY changed");
878 }
879
880 if (irq_stat & PORT_IRQ_UNK_FIS) {
881 u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
882
883 err_mask |= AC_ERR_HSM;
884 action |= ATA_EH_SOFTRESET;
885 ata_ehi_push_desc(ehi, ", unknown FIS %08x %08x %08x %08x",
886 unk[0], unk[1], unk[2], unk[3]);
887 }
888
889 /* okay, let's hand over to EH */
890 ehi->serror |= serror;
891 ehi->action |= action;
892
893 qc = ata_qc_from_tag(ap, ap->active_tag);
894 if (qc)
895 qc->err_mask |= err_mask;
896 else
897 ehi->err_mask |= err_mask;
898
899 if (irq_stat & PORT_IRQ_FREEZE)
900 ata_port_freeze(ap);
901 else
902 ata_port_abort(ap);
903}
904
905static void ahci_host_intr(struct ata_port *ap)
906{
907 void __iomem *mmio = ap->host_set->mmio_base;
908 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
909 struct ata_eh_info *ehi = &ap->eh_info;
910 u32 status, qc_active;
911 int rc;
912
913 status = readl(port_mmio + PORT_IRQ_STAT);
914 writel(status, port_mmio + PORT_IRQ_STAT);
915
916 if (unlikely(status & PORT_IRQ_ERROR)) {
917 ahci_error_intr(ap, status);
918 return;
919 }
920
921 if (ap->sactive)
922 qc_active = readl(port_mmio + PORT_SCR_ACT);
923 else
924 qc_active = readl(port_mmio + PORT_CMD_ISSUE);
925
926 rc = ata_qc_complete_multiple(ap, qc_active, NULL);
927 if (rc > 0)
928 return;
929 if (rc < 0) {
930 ehi->err_mask |= AC_ERR_HSM;
931 ehi->action |= ATA_EH_SOFTRESET;
932 ata_port_freeze(ap);
933 return;
934 }
935
936 /* hmmm... a spurious interupt */
937
938 /* some devices send D2H reg with I bit set during NCQ command phase */
939 if (ap->sactive && status & PORT_IRQ_D2H_REG_FIS)
940 return;
941
942 /* ignore interim PIO setup fis interrupts */
943 if (ata_tag_valid(ap->active_tag) && (status & PORT_IRQ_PIOS_FIS))
944 return;
945
946 if (ata_ratelimit())
947 ata_port_printk(ap, KERN_INFO, "spurious interrupt "
948 "(irq_stat 0x%x active_tag %d sactive 0x%x)\n",
949 status, ap->active_tag, ap->sactive);
950}
951
952static void ahci_irq_clear(struct ata_port *ap)
953{
954 /* TODO */
955}
956
957static irqreturn_t ahci_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
958{
959 struct ata_host_set *host_set = dev_instance;
960 struct ahci_host_priv *hpriv;
961 unsigned int i, handled = 0;
962 void __iomem *mmio;
963 u32 irq_stat, irq_ack = 0;
964
965 VPRINTK("ENTER\n");
966
967 hpriv = host_set->private_data;
968 mmio = host_set->mmio_base;
969
970 /* sigh. 0xffffffff is a valid return from h/w */
971 irq_stat = readl(mmio + HOST_IRQ_STAT);
972 irq_stat &= hpriv->port_map;
973 if (!irq_stat)
974 return IRQ_NONE;
975
976 spin_lock(&host_set->lock);
977
978 for (i = 0; i < host_set->n_ports; i++) {
979 struct ata_port *ap;
980
981 if (!(irq_stat & (1 << i)))
982 continue;
983
984 ap = host_set->ports[i];
985 if (ap) {
986 ahci_host_intr(ap);
987 VPRINTK("port %u\n", i);
988 } else {
989 VPRINTK("port %u (no irq)\n", i);
990 if (ata_ratelimit())
991 dev_printk(KERN_WARNING, host_set->dev,
992 "interrupt on disabled port %u\n", i);
993 }
994
995 irq_ack |= (1 << i);
996 }
997
998 if (irq_ack) {
999 writel(irq_ack, mmio + HOST_IRQ_STAT);
1000 handled = 1;
1001 }
1002
1003 spin_unlock(&host_set->lock);
1004
1005 VPRINTK("EXIT\n");
1006
1007 return IRQ_RETVAL(handled);
1008}
1009
1010static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
1011{
1012 struct ata_port *ap = qc->ap;
1013 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
1014
1015 if (qc->tf.protocol == ATA_PROT_NCQ)
1016 writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
1017 writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE);
1018 readl(port_mmio + PORT_CMD_ISSUE); /* flush */
1019
1020 return 0;
1021}
1022
1023static void ahci_freeze(struct ata_port *ap)
1024{
1025 void __iomem *mmio = ap->host_set->mmio_base;
1026 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1027
1028 /* turn IRQ off */
1029 writel(0, port_mmio + PORT_IRQ_MASK);
1030}
1031
1032static void ahci_thaw(struct ata_port *ap)
1033{
1034 void __iomem *mmio = ap->host_set->mmio_base;
1035 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1036 u32 tmp;
1037
1038 /* clear IRQ */
1039 tmp = readl(port_mmio + PORT_IRQ_STAT);
1040 writel(tmp, port_mmio + PORT_IRQ_STAT);
1041 writel(1 << ap->id, mmio + HOST_IRQ_STAT);
1042
1043 /* turn IRQ back on */
1044 writel(DEF_PORT_IRQ, port_mmio + PORT_IRQ_MASK);
1045}
1046
1047static void ahci_error_handler(struct ata_port *ap)
1048{
1049 if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
1050 /* restart engine */
1051 ahci_stop_engine(ap);
1052 ahci_start_engine(ap);
1053 }
1054
1055 /* perform recovery */
1056 ata_do_eh(ap, ahci_prereset, ahci_softreset, ahci_hardreset,
1057 ahci_postreset);
1058}
1059
1060static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
1061{
1062 struct ata_port *ap = qc->ap;
1063
1064 if (qc->flags & ATA_QCFLAG_FAILED)
1065 qc->err_mask |= AC_ERR_OTHER;
1066
1067 if (qc->err_mask) {
1068 /* make DMA engine forget about the failed command */
1069 ahci_stop_engine(ap);
1070 ahci_start_engine(ap);
1071 }
1072}
1073
1074static void ahci_setup_port(struct ata_ioports *port, unsigned long base,
1075 unsigned int port_idx)
1076{
1077 VPRINTK("ENTER, base==0x%lx, port_idx %u\n", base, port_idx);
1078 base = ahci_port_base_ul(base, port_idx);
1079 VPRINTK("base now==0x%lx\n", base);
1080
1081 port->cmd_addr = base;
1082 port->scr_addr = base + PORT_SCR;
1083
1084 VPRINTK("EXIT\n");
1085}
1086
1087static int ahci_host_init(struct ata_probe_ent *probe_ent)
1088{
1089 struct ahci_host_priv *hpriv = probe_ent->private_data;
1090 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
1091 void __iomem *mmio = probe_ent->mmio_base;
1092 u32 tmp, cap_save;
1093 unsigned int i, j, using_dac;
1094 int rc;
1095 void __iomem *port_mmio;
1096
1097 cap_save = readl(mmio + HOST_CAP);
1098 cap_save &= ( (1<<28) | (1<<17) );
1099 cap_save |= (1 << 27);
1100
1101 /* global controller reset */
1102 tmp = readl(mmio + HOST_CTL);
1103 if ((tmp & HOST_RESET) == 0) {
1104 writel(tmp | HOST_RESET, mmio + HOST_CTL);
1105 readl(mmio + HOST_CTL); /* flush */
1106 }
1107
1108 /* reset must complete within 1 second, or
1109 * the hardware should be considered fried.
1110 */
1111 ssleep(1);
1112
1113 tmp = readl(mmio + HOST_CTL);
1114 if (tmp & HOST_RESET) {
1115 dev_printk(KERN_ERR, &pdev->dev,
1116 "controller reset failed (0x%x)\n", tmp);
1117 return -EIO;
1118 }
1119
1120 writel(HOST_AHCI_EN, mmio + HOST_CTL);
1121 (void) readl(mmio + HOST_CTL); /* flush */
1122 writel(cap_save, mmio + HOST_CAP);
1123 writel(0xf, mmio + HOST_PORTS_IMPL);
1124 (void) readl(mmio + HOST_PORTS_IMPL); /* flush */
1125
1126 if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
1127 u16 tmp16;
1128
1129 pci_read_config_word(pdev, 0x92, &tmp16);
1130 tmp16 |= 0xf;
1131 pci_write_config_word(pdev, 0x92, tmp16);
1132 }
1133
1134 hpriv->cap = readl(mmio + HOST_CAP);
1135 hpriv->port_map = readl(mmio + HOST_PORTS_IMPL);
1136 probe_ent->n_ports = (hpriv->cap & 0x1f) + 1;
1137
1138 VPRINTK("cap 0x%x port_map 0x%x n_ports %d\n",
1139 hpriv->cap, hpriv->port_map, probe_ent->n_ports);
1140
1141 using_dac = hpriv->cap & HOST_CAP_64;
1142 if (using_dac &&
1143 !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
1144 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
1145 if (rc) {
1146 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1147 if (rc) {
1148 dev_printk(KERN_ERR, &pdev->dev,
1149 "64-bit DMA enable failed\n");
1150 return rc;
1151 }
1152 }
1153 } else {
1154 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1155 if (rc) {
1156 dev_printk(KERN_ERR, &pdev->dev,
1157 "32-bit DMA enable failed\n");
1158 return rc;
1159 }
1160 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1161 if (rc) {
1162 dev_printk(KERN_ERR, &pdev->dev,
1163 "32-bit consistent DMA enable failed\n");
1164 return rc;
1165 }
1166 }
1167
1168 for (i = 0; i < probe_ent->n_ports; i++) {
1169#if 0 /* BIOSen initialize this incorrectly */
1170 if (!(hpriv->port_map & (1 << i)))
1171 continue;
1172#endif
1173
1174 port_mmio = ahci_port_base(mmio, i);
1175 VPRINTK("mmio %p port_mmio %p\n", mmio, port_mmio);
1176
1177 ahci_setup_port(&probe_ent->port[i],
1178 (unsigned long) mmio, i);
1179
1180 /* make sure port is not active */
1181 tmp = readl(port_mmio + PORT_CMD);
1182 VPRINTK("PORT_CMD 0x%x\n", tmp);
1183 if (tmp & (PORT_CMD_LIST_ON | PORT_CMD_FIS_ON |
1184 PORT_CMD_FIS_RX | PORT_CMD_START)) {
1185 tmp &= ~(PORT_CMD_LIST_ON | PORT_CMD_FIS_ON |
1186 PORT_CMD_FIS_RX | PORT_CMD_START);
1187 writel(tmp, port_mmio + PORT_CMD);
1188 readl(port_mmio + PORT_CMD); /* flush */
1189
1190 /* spec says 500 msecs for each bit, so
1191 * this is slightly incorrect.
1192 */
1193 msleep(500);
1194 }
1195
1196 writel(PORT_CMD_SPIN_UP, port_mmio + PORT_CMD);
1197
1198 j = 0;
1199 while (j < 100) {
1200 msleep(10);
1201 tmp = readl(port_mmio + PORT_SCR_STAT);
1202 if ((tmp & 0xf) == 0x3)
1203 break;
1204 j++;
1205 }
1206
1207 tmp = readl(port_mmio + PORT_SCR_ERR);
1208 VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
1209 writel(tmp, port_mmio + PORT_SCR_ERR);
1210
1211 /* ack any pending irq events for this port */
1212 tmp = readl(port_mmio + PORT_IRQ_STAT);
1213 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
1214 if (tmp)
1215 writel(tmp, port_mmio + PORT_IRQ_STAT);
1216
1217 writel(1 << i, mmio + HOST_IRQ_STAT);
1218 }
1219
1220 tmp = readl(mmio + HOST_CTL);
1221 VPRINTK("HOST_CTL 0x%x\n", tmp);
1222 writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
1223 tmp = readl(mmio + HOST_CTL);
1224 VPRINTK("HOST_CTL 0x%x\n", tmp);
1225
1226 pci_set_master(pdev);
1227
1228 return 0;
1229}
1230
1231static void ahci_print_info(struct ata_probe_ent *probe_ent)
1232{
1233 struct ahci_host_priv *hpriv = probe_ent->private_data;
1234 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
1235 void __iomem *mmio = probe_ent->mmio_base;
1236 u32 vers, cap, impl, speed;
1237 const char *speed_s;
1238 u16 cc;
1239 const char *scc_s;
1240
1241 vers = readl(mmio + HOST_VERSION);
1242 cap = hpriv->cap;
1243 impl = hpriv->port_map;
1244
1245 speed = (cap >> 20) & 0xf;
1246 if (speed == 1)
1247 speed_s = "1.5";
1248 else if (speed == 2)
1249 speed_s = "3";
1250 else
1251 speed_s = "?";
1252
1253 pci_read_config_word(pdev, 0x0a, &cc);
1254 if (cc == 0x0101)
1255 scc_s = "IDE";
1256 else if (cc == 0x0106)
1257 scc_s = "SATA";
1258 else if (cc == 0x0104)
1259 scc_s = "RAID";
1260 else
1261 scc_s = "unknown";
1262
1263 dev_printk(KERN_INFO, &pdev->dev,
1264 "AHCI %02x%02x.%02x%02x "
1265 "%u slots %u ports %s Gbps 0x%x impl %s mode\n"
1266 ,
1267
1268 (vers >> 24) & 0xff,
1269 (vers >> 16) & 0xff,
1270 (vers >> 8) & 0xff,
1271 vers & 0xff,
1272
1273 ((cap >> 8) & 0x1f) + 1,
1274 (cap & 0x1f) + 1,
1275 speed_s,
1276 impl,
1277 scc_s);
1278
1279 dev_printk(KERN_INFO, &pdev->dev,
1280 "flags: "
1281 "%s%s%s%s%s%s"
1282 "%s%s%s%s%s%s%s\n"
1283 ,
1284
1285 cap & (1 << 31) ? "64bit " : "",
1286 cap & (1 << 30) ? "ncq " : "",
1287 cap & (1 << 28) ? "ilck " : "",
1288 cap & (1 << 27) ? "stag " : "",
1289 cap & (1 << 26) ? "pm " : "",
1290 cap & (1 << 25) ? "led " : "",
1291
1292 cap & (1 << 24) ? "clo " : "",
1293 cap & (1 << 19) ? "nz " : "",
1294 cap & (1 << 18) ? "only " : "",
1295 cap & (1 << 17) ? "pmp " : "",
1296 cap & (1 << 15) ? "pio " : "",
1297 cap & (1 << 14) ? "slum " : "",
1298 cap & (1 << 13) ? "part " : ""
1299 );
1300}
1301
1302static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1303{
1304 static int printed_version;
1305 struct ata_probe_ent *probe_ent = NULL;
1306 struct ahci_host_priv *hpriv;
1307 unsigned long base;
1308 void __iomem *mmio_base;
1309 unsigned int board_idx = (unsigned int) ent->driver_data;
1310 int have_msi, pci_dev_busy = 0;
1311 int rc;
1312
1313 VPRINTK("ENTER\n");
1314
1315 WARN_ON(ATA_MAX_QUEUE > AHCI_MAX_CMDS);
1316
1317 if (!printed_version++)
1318 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1319
1320 /* JMicron-specific fixup: make sure we're in AHCI mode */
1321 /* This is protected from races with ata_jmicron by the pci probe
1322 locking */
1323 if (pdev->vendor == PCI_VENDOR_ID_JMICRON) {
1324 /* AHCI enable, AHCI on function 0 */
1325 pci_write_config_byte(pdev, 0x41, 0xa1);
1326 /* Function 1 is the PATA controller */
1327 if (PCI_FUNC(pdev->devfn))
1328 return -ENODEV;
1329 }
1330
1331 rc = pci_enable_device(pdev);
1332 if (rc)
1333 return rc;
1334
1335 rc = pci_request_regions(pdev, DRV_NAME);
1336 if (rc) {
1337 pci_dev_busy = 1;
1338 goto err_out;
1339 }
1340
1341 if (pci_enable_msi(pdev) == 0)
1342 have_msi = 1;
1343 else {
1344 pci_intx(pdev, 1);
1345 have_msi = 0;
1346 }
1347
1348 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
1349 if (probe_ent == NULL) {
1350 rc = -ENOMEM;
1351 goto err_out_msi;
1352 }
1353
1354 memset(probe_ent, 0, sizeof(*probe_ent));
1355 probe_ent->dev = pci_dev_to_dev(pdev);
1356 INIT_LIST_HEAD(&probe_ent->node);
1357
1358 mmio_base = pci_iomap(pdev, AHCI_PCI_BAR, 0);
1359 if (mmio_base == NULL) {
1360 rc = -ENOMEM;
1361 goto err_out_free_ent;
1362 }
1363 base = (unsigned long) mmio_base;
1364
1365 hpriv = kmalloc(sizeof(*hpriv), GFP_KERNEL);
1366 if (!hpriv) {
1367 rc = -ENOMEM;
1368 goto err_out_iounmap;
1369 }
1370 memset(hpriv, 0, sizeof(*hpriv));
1371
1372 probe_ent->sht = ahci_port_info[board_idx].sht;
1373 probe_ent->host_flags = ahci_port_info[board_idx].host_flags;
1374 probe_ent->pio_mask = ahci_port_info[board_idx].pio_mask;
1375 probe_ent->udma_mask = ahci_port_info[board_idx].udma_mask;
1376 probe_ent->port_ops = ahci_port_info[board_idx].port_ops;
1377
1378 probe_ent->irq = pdev->irq;
1379 probe_ent->irq_flags = IRQF_SHARED;
1380 probe_ent->mmio_base = mmio_base;
1381 probe_ent->private_data = hpriv;
1382
1383 if (have_msi)
1384 hpriv->flags |= AHCI_FLAG_MSI;
1385
1386 /* initialize adapter */
1387 rc = ahci_host_init(probe_ent);
1388 if (rc)
1389 goto err_out_hpriv;
1390
1391 if (!(probe_ent->host_flags & AHCI_FLAG_NO_NCQ) &&
1392 (hpriv->cap & HOST_CAP_NCQ))
1393 probe_ent->host_flags |= ATA_FLAG_NCQ;
1394
1395 ahci_print_info(probe_ent);
1396
1397 /* FIXME: check ata_device_add return value */
1398 ata_device_add(probe_ent);
1399 kfree(probe_ent);
1400
1401 return 0;
1402
1403err_out_hpriv:
1404 kfree(hpriv);
1405err_out_iounmap:
1406 pci_iounmap(pdev, mmio_base);
1407err_out_free_ent:
1408 kfree(probe_ent);
1409err_out_msi:
1410 if (have_msi)
1411 pci_disable_msi(pdev);
1412 else
1413 pci_intx(pdev, 0);
1414 pci_release_regions(pdev);
1415err_out:
1416 if (!pci_dev_busy)
1417 pci_disable_device(pdev);
1418 return rc;
1419}
1420
1421static void ahci_remove_one (struct pci_dev *pdev)
1422{
1423 struct device *dev = pci_dev_to_dev(pdev);
1424 struct ata_host_set *host_set = dev_get_drvdata(dev);
1425 struct ahci_host_priv *hpriv = host_set->private_data;
1426 unsigned int i;
1427 int have_msi;
1428
1429 for (i = 0; i < host_set->n_ports; i++)
1430 ata_port_detach(host_set->ports[i]);
1431
1432 have_msi = hpriv->flags & AHCI_FLAG_MSI;
1433 free_irq(host_set->irq, host_set);
1434
1435 for (i = 0; i < host_set->n_ports; i++) {
1436 struct ata_port *ap = host_set->ports[i];
1437
1438 ata_scsi_release(ap->host);
1439 scsi_host_put(ap->host);
1440 }
1441
1442 kfree(hpriv);
1443 pci_iounmap(pdev, host_set->mmio_base);
1444 kfree(host_set);
1445
1446 if (have_msi)
1447 pci_disable_msi(pdev);
1448 else
1449 pci_intx(pdev, 0);
1450 pci_release_regions(pdev);
1451 pci_disable_device(pdev);
1452 dev_set_drvdata(dev, NULL);
1453}
1454
1455static int __init ahci_init(void)
1456{
1457 return pci_module_init(&ahci_pci_driver);
1458}
1459
1460static void __exit ahci_exit(void)
1461{
1462 pci_unregister_driver(&ahci_pci_driver);
1463}
1464
1465
1466MODULE_AUTHOR("Jeff Garzik");
1467MODULE_DESCRIPTION("AHCI SATA low-level driver");
1468MODULE_LICENSE("GPL");
1469MODULE_DEVICE_TABLE(pci, ahci_pci_tbl);
1470MODULE_VERSION(DRV_VERSION);
1471
1472module_init(ahci_init);
1473module_exit(ahci_exit);
diff --git a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c
deleted file mode 100644
index a9bb3cb7e89b..000000000000
--- a/drivers/scsi/ata_piix.c
+++ /dev/null
@@ -1,1040 +0,0 @@
1/*
2 * ata_piix.c - Intel PATA/SATA controllers
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 *
9 * Copyright 2003-2005 Red Hat Inc
10 * Copyright 2003-2005 Jeff Garzik
11 *
12 *
13 * Copyright header from piix.c:
14 *
15 * Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer
16 * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
17 * Copyright (C) 2003 Red Hat Inc <alan@redhat.com>
18 *
19 *
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2, or (at your option)
23 * any later version.
24 *
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
29 *
30 * You should have received a copy of the GNU General Public License
31 * along with this program; see the file COPYING. If not, write to
32 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
33 *
34 *
35 * libata documentation is available via 'make {ps|pdf}docs',
36 * as Documentation/DocBook/libata.*
37 *
38 * Hardware documentation available at http://developer.intel.com/
39 *
40 * Documentation
41 * Publically available from Intel web site. Errata documentation
42 * is also publically available. As an aide to anyone hacking on this
43 * driver the list of errata that are relevant is below.going back to
44 * PIIX4. Older device documentation is now a bit tricky to find.
45 *
46 * The chipsets all follow very much the same design. The orginal Triton
47 * series chipsets do _not_ support independant device timings, but this
48 * is fixed in Triton II. With the odd mobile exception the chips then
49 * change little except in gaining more modes until SATA arrives. This
50 * driver supports only the chips with independant timing (that is those
51 * with SITRE and the 0x44 timing register). See pata_oldpiix and pata_mpiix
52 * for the early chip drivers.
53 *
54 * Errata of note:
55 *
56 * Unfixable
57 * PIIX4 errata #9 - Only on ultra obscure hw
58 * ICH3 errata #13 - Not observed to affect real hw
59 * by Intel
60 *
61 * Things we must deal with
62 * PIIX4 errata #10 - BM IDE hang with non UDMA
63 * (must stop/start dma to recover)
64 * 440MX errata #15 - As PIIX4 errata #10
65 * PIIX4 errata #15 - Must not read control registers
66 * during a PIO transfer
67 * 440MX errata #13 - As PIIX4 errata #15
68 * ICH2 errata #21 - DMA mode 0 doesn't work right
69 * ICH0/1 errata #55 - As ICH2 errata #21
70 * ICH2 spec c #9 - Extra operations needed to handle
71 * drive hotswap [NOT YET SUPPORTED]
72 * ICH2 spec c #20 - IDE PRD must not cross a 64K boundary
73 * and must be dword aligned
74 * ICH2 spec c #24 - UDMA mode 4,5 t85/86 should be 6ns not 3.3
75 *
76 * Should have been BIOS fixed:
77 * 450NX: errata #19 - DMA hangs on old 450NX
78 * 450NX: errata #20 - DMA hangs on old 450NX
79 * 450NX: errata #25 - Corruption with DMA on old 450NX
80 * ICH3 errata #15 - IDE deadlock under high load
81 * (BIOS must set dev 31 fn 0 bit 23)
82 * ICH3 errata #18 - Don't use native mode
83 */
84
85#include <linux/kernel.h>
86#include <linux/module.h>
87#include <linux/pci.h>
88#include <linux/init.h>
89#include <linux/blkdev.h>
90#include <linux/delay.h>
91#include <linux/device.h>
92#include <scsi/scsi_host.h>
93#include <linux/libata.h>
94
95#define DRV_NAME "ata_piix"
96#define DRV_VERSION "2.00"
97
98enum {
99 PIIX_IOCFG = 0x54, /* IDE I/O configuration register */
100 ICH5_PMR = 0x90, /* port mapping register */
101 ICH5_PCS = 0x92, /* port control and status */
102 PIIX_SCC = 0x0A, /* sub-class code register */
103
104 PIIX_FLAG_IGNORE_PCS = (1 << 25), /* ignore PCS present bits */
105 PIIX_FLAG_SCR = (1 << 26), /* SCR available */
106 PIIX_FLAG_AHCI = (1 << 27), /* AHCI possible */
107 PIIX_FLAG_CHECKINTR = (1 << 28), /* make sure PCI INTx enabled */
108
109 /* combined mode. if set, PATA is channel 0.
110 * if clear, PATA is channel 1.
111 */
112 PIIX_PORT_ENABLED = (1 << 0),
113 PIIX_PORT_PRESENT = (1 << 4),
114
115 PIIX_80C_PRI = (1 << 5) | (1 << 4),
116 PIIX_80C_SEC = (1 << 7) | (1 << 6),
117
118 /* controller IDs */
119 piix4_pata = 0,
120 ich5_pata = 1,
121 ich5_sata = 2,
122 esb_sata = 3,
123 ich6_sata = 4,
124 ich6_sata_ahci = 5,
125 ich6m_sata_ahci = 6,
126 ich7m_sata_ahci = 7,
127 ich8_sata_ahci = 8,
128
129 /* constants for mapping table */
130 P0 = 0, /* port 0 */
131 P1 = 1, /* port 1 */
132 P2 = 2, /* port 2 */
133 P3 = 3, /* port 3 */
134 IDE = -1, /* IDE */
135 NA = -2, /* not avaliable */
136 RV = -3, /* reserved */
137
138 PIIX_AHCI_DEVICE = 6,
139};
140
141struct piix_map_db {
142 const u32 mask;
143 const u16 port_enable;
144 const int present_shift;
145 const int map[][4];
146};
147
148struct piix_host_priv {
149 const int *map;
150 const struct piix_map_db *map_db;
151};
152
153static int piix_init_one (struct pci_dev *pdev,
154 const struct pci_device_id *ent);
155static void piix_host_stop(struct ata_host_set *host_set);
156static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev);
157static void piix_set_dmamode (struct ata_port *ap, struct ata_device *adev);
158static void piix_pata_error_handler(struct ata_port *ap);
159static void piix_sata_error_handler(struct ata_port *ap);
160
161static unsigned int in_module_init = 1;
162
163static const struct pci_device_id piix_pci_tbl[] = {
164#ifdef ATA_ENABLE_PATA
165 { 0x8086, 0x7111, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix4_pata },
166 { 0x8086, 0x24db, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_pata },
167 { 0x8086, 0x25a2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_pata },
168 { 0x8086, 0x27df, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_pata },
169#endif
170
171 /* NOTE: The following PCI ids must be kept in sync with the
172 * list in drivers/pci/quirks.c.
173 */
174
175 /* 82801EB (ICH5) */
176 { 0x8086, 0x24d1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
177 /* 82801EB (ICH5) */
178 { 0x8086, 0x24df, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
179 /* 6300ESB (ICH5 variant with broken PCS present bits) */
180 { 0x8086, 0x25a3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, esb_sata },
181 /* 6300ESB pretending RAID */
182 { 0x8086, 0x25b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, esb_sata },
183 /* 82801FB/FW (ICH6/ICH6W) */
184 { 0x8086, 0x2651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata },
185 /* 82801FR/FRW (ICH6R/ICH6RW) */
186 { 0x8086, 0x2652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
187 /* 82801FBM ICH6M (ICH6R with only port 0 and 2 implemented) */
188 { 0x8086, 0x2653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6m_sata_ahci },
189 /* 82801GB/GR/GH (ICH7, identical to ICH6) */
190 { 0x8086, 0x27c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
191 /* 2801GBM/GHM (ICH7M, identical to ICH6M) */
192 { 0x8086, 0x27c4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich7m_sata_ahci },
193 /* Enterprise Southbridge 2 (where's the datasheet?) */
194 { 0x8086, 0x2680, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
195 /* SATA Controller 1 IDE (ICH8, no datasheet yet) */
196 { 0x8086, 0x2820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
197 /* SATA Controller 2 IDE (ICH8, ditto) */
198 { 0x8086, 0x2825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
199 /* Mobile SATA Controller IDE (ICH8M, ditto) */
200 { 0x8086, 0x2828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
201
202 { } /* terminate list */
203};
204
205static struct pci_driver piix_pci_driver = {
206 .name = DRV_NAME,
207 .id_table = piix_pci_tbl,
208 .probe = piix_init_one,
209 .remove = ata_pci_remove_one,
210 .suspend = ata_pci_device_suspend,
211 .resume = ata_pci_device_resume,
212};
213
214static struct scsi_host_template piix_sht = {
215 .module = THIS_MODULE,
216 .name = DRV_NAME,
217 .ioctl = ata_scsi_ioctl,
218 .queuecommand = ata_scsi_queuecmd,
219 .can_queue = ATA_DEF_QUEUE,
220 .this_id = ATA_SHT_THIS_ID,
221 .sg_tablesize = LIBATA_MAX_PRD,
222 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
223 .emulated = ATA_SHT_EMULATED,
224 .use_clustering = ATA_SHT_USE_CLUSTERING,
225 .proc_name = DRV_NAME,
226 .dma_boundary = ATA_DMA_BOUNDARY,
227 .slave_configure = ata_scsi_slave_config,
228 .slave_destroy = ata_scsi_slave_destroy,
229 .bios_param = ata_std_bios_param,
230 .resume = ata_scsi_device_resume,
231 .suspend = ata_scsi_device_suspend,
232};
233
234static const struct ata_port_operations piix_pata_ops = {
235 .port_disable = ata_port_disable,
236 .set_piomode = piix_set_piomode,
237 .set_dmamode = piix_set_dmamode,
238 .mode_filter = ata_pci_default_filter,
239
240 .tf_load = ata_tf_load,
241 .tf_read = ata_tf_read,
242 .check_status = ata_check_status,
243 .exec_command = ata_exec_command,
244 .dev_select = ata_std_dev_select,
245
246 .bmdma_setup = ata_bmdma_setup,
247 .bmdma_start = ata_bmdma_start,
248 .bmdma_stop = ata_bmdma_stop,
249 .bmdma_status = ata_bmdma_status,
250 .qc_prep = ata_qc_prep,
251 .qc_issue = ata_qc_issue_prot,
252 .data_xfer = ata_pio_data_xfer,
253
254 .freeze = ata_bmdma_freeze,
255 .thaw = ata_bmdma_thaw,
256 .error_handler = piix_pata_error_handler,
257 .post_internal_cmd = ata_bmdma_post_internal_cmd,
258
259 .irq_handler = ata_interrupt,
260 .irq_clear = ata_bmdma_irq_clear,
261
262 .port_start = ata_port_start,
263 .port_stop = ata_port_stop,
264 .host_stop = piix_host_stop,
265};
266
267static const struct ata_port_operations piix_sata_ops = {
268 .port_disable = ata_port_disable,
269
270 .tf_load = ata_tf_load,
271 .tf_read = ata_tf_read,
272 .check_status = ata_check_status,
273 .exec_command = ata_exec_command,
274 .dev_select = ata_std_dev_select,
275
276 .bmdma_setup = ata_bmdma_setup,
277 .bmdma_start = ata_bmdma_start,
278 .bmdma_stop = ata_bmdma_stop,
279 .bmdma_status = ata_bmdma_status,
280 .qc_prep = ata_qc_prep,
281 .qc_issue = ata_qc_issue_prot,
282 .data_xfer = ata_pio_data_xfer,
283
284 .freeze = ata_bmdma_freeze,
285 .thaw = ata_bmdma_thaw,
286 .error_handler = piix_sata_error_handler,
287 .post_internal_cmd = ata_bmdma_post_internal_cmd,
288
289 .irq_handler = ata_interrupt,
290 .irq_clear = ata_bmdma_irq_clear,
291
292 .port_start = ata_port_start,
293 .port_stop = ata_port_stop,
294 .host_stop = piix_host_stop,
295};
296
297static const struct piix_map_db ich5_map_db = {
298 .mask = 0x7,
299 .port_enable = 0x3,
300 .present_shift = 4,
301 .map = {
302 /* PM PS SM SS MAP */
303 { P0, NA, P1, NA }, /* 000b */
304 { P1, NA, P0, NA }, /* 001b */
305 { RV, RV, RV, RV },
306 { RV, RV, RV, RV },
307 { P0, P1, IDE, IDE }, /* 100b */
308 { P1, P0, IDE, IDE }, /* 101b */
309 { IDE, IDE, P0, P1 }, /* 110b */
310 { IDE, IDE, P1, P0 }, /* 111b */
311 },
312};
313
314static const struct piix_map_db ich6_map_db = {
315 .mask = 0x3,
316 .port_enable = 0xf,
317 .present_shift = 4,
318 .map = {
319 /* PM PS SM SS MAP */
320 { P0, P2, P1, P3 }, /* 00b */
321 { IDE, IDE, P1, P3 }, /* 01b */
322 { P0, P2, IDE, IDE }, /* 10b */
323 { RV, RV, RV, RV },
324 },
325};
326
327static const struct piix_map_db ich6m_map_db = {
328 .mask = 0x3,
329 .port_enable = 0x5,
330 .present_shift = 4,
331 .map = {
332 /* PM PS SM SS MAP */
333 { P0, P2, RV, RV }, /* 00b */
334 { RV, RV, RV, RV },
335 { P0, P2, IDE, IDE }, /* 10b */
336 { RV, RV, RV, RV },
337 },
338};
339
340static const struct piix_map_db ich7m_map_db = {
341 .mask = 0x3,
342 .port_enable = 0x5,
343 .present_shift = 4,
344
345 /* Map 01b isn't specified in the doc but some notebooks use
346 * it anyway. ATM, the only case spotted carries subsystem ID
347 * 1025:0107. This is the only difference from ich6m.
348 */
349 .map = {
350 /* PM PS SM SS MAP */
351 { P0, P2, RV, RV }, /* 00b */
352 { IDE, IDE, P1, P3 }, /* 01b */
353 { P0, P2, IDE, IDE }, /* 10b */
354 { RV, RV, RV, RV },
355 },
356};
357
358static const struct piix_map_db ich8_map_db = {
359 .mask = 0x3,
360 .port_enable = 0x3,
361 .present_shift = 8,
362 .map = {
363 /* PM PS SM SS MAP */
364 { P0, NA, P1, NA }, /* 00b (hardwired) */
365 { RV, RV, RV, RV },
366 { RV, RV, RV, RV }, /* 10b (never) */
367 { RV, RV, RV, RV },
368 },
369};
370
371static const struct piix_map_db *piix_map_db_table[] = {
372 [ich5_sata] = &ich5_map_db,
373 [esb_sata] = &ich5_map_db,
374 [ich6_sata] = &ich6_map_db,
375 [ich6_sata_ahci] = &ich6_map_db,
376 [ich6m_sata_ahci] = &ich6m_map_db,
377 [ich7m_sata_ahci] = &ich7m_map_db,
378 [ich8_sata_ahci] = &ich8_map_db,
379};
380
381static struct ata_port_info piix_port_info[] = {
382 /* piix4_pata */
383 {
384 .sht = &piix_sht,
385 .host_flags = ATA_FLAG_SLAVE_POSS,
386 .pio_mask = 0x1f, /* pio0-4 */
387#if 0
388 .mwdma_mask = 0x06, /* mwdma1-2 */
389#else
390 .mwdma_mask = 0x00, /* mwdma broken */
391#endif
392 .udma_mask = ATA_UDMA_MASK_40C,
393 .port_ops = &piix_pata_ops,
394 },
395
396 /* ich5_pata */
397 {
398 .sht = &piix_sht,
399 .host_flags = ATA_FLAG_SLAVE_POSS | PIIX_FLAG_CHECKINTR,
400 .pio_mask = 0x1f, /* pio0-4 */
401#if 0
402 .mwdma_mask = 0x06, /* mwdma1-2 */
403#else
404 .mwdma_mask = 0x00, /* mwdma broken */
405#endif
406 .udma_mask = 0x3f, /* udma0-5 */
407 .port_ops = &piix_pata_ops,
408 },
409
410 /* ich5_sata */
411 {
412 .sht = &piix_sht,
413 .host_flags = ATA_FLAG_SATA | PIIX_FLAG_CHECKINTR |
414 PIIX_FLAG_IGNORE_PCS,
415 .pio_mask = 0x1f, /* pio0-4 */
416 .mwdma_mask = 0x07, /* mwdma0-2 */
417 .udma_mask = 0x7f, /* udma0-6 */
418 .port_ops = &piix_sata_ops,
419 },
420
421 /* i6300esb_sata */
422 {
423 .sht = &piix_sht,
424 .host_flags = ATA_FLAG_SATA |
425 PIIX_FLAG_CHECKINTR | PIIX_FLAG_IGNORE_PCS,
426 .pio_mask = 0x1f, /* pio0-4 */
427 .mwdma_mask = 0x07, /* mwdma0-2 */
428 .udma_mask = 0x7f, /* udma0-6 */
429 .port_ops = &piix_sata_ops,
430 },
431
432 /* ich6_sata */
433 {
434 .sht = &piix_sht,
435 .host_flags = ATA_FLAG_SATA |
436 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR,
437 .pio_mask = 0x1f, /* pio0-4 */
438 .mwdma_mask = 0x07, /* mwdma0-2 */
439 .udma_mask = 0x7f, /* udma0-6 */
440 .port_ops = &piix_sata_ops,
441 },
442
443 /* ich6_sata_ahci */
444 {
445 .sht = &piix_sht,
446 .host_flags = ATA_FLAG_SATA |
447 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR |
448 PIIX_FLAG_AHCI,
449 .pio_mask = 0x1f, /* pio0-4 */
450 .mwdma_mask = 0x07, /* mwdma0-2 */
451 .udma_mask = 0x7f, /* udma0-6 */
452 .port_ops = &piix_sata_ops,
453 },
454
455 /* ich6m_sata_ahci */
456 {
457 .sht = &piix_sht,
458 .host_flags = ATA_FLAG_SATA |
459 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR |
460 PIIX_FLAG_AHCI,
461 .pio_mask = 0x1f, /* pio0-4 */
462 .mwdma_mask = 0x07, /* mwdma0-2 */
463 .udma_mask = 0x7f, /* udma0-6 */
464 .port_ops = &piix_sata_ops,
465 },
466
467 /* ich7m_sata_ahci */
468 {
469 .sht = &piix_sht,
470 .host_flags = ATA_FLAG_SATA |
471 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR |
472 PIIX_FLAG_AHCI,
473 .pio_mask = 0x1f, /* pio0-4 */
474 .mwdma_mask = 0x07, /* mwdma0-2 */
475 .udma_mask = 0x7f, /* udma0-6 */
476 .port_ops = &piix_sata_ops,
477 },
478
479 /* ich8_sata_ahci */
480 {
481 .sht = &piix_sht,
482 .host_flags = ATA_FLAG_SATA |
483 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR |
484 PIIX_FLAG_AHCI,
485 .pio_mask = 0x1f, /* pio0-4 */
486 .mwdma_mask = 0x07, /* mwdma0-2 */
487 .udma_mask = 0x7f, /* udma0-6 */
488 .port_ops = &piix_sata_ops,
489 },
490};
491
492static struct pci_bits piix_enable_bits[] = {
493 { 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */
494 { 0x43U, 1U, 0x80UL, 0x80UL }, /* port 1 */
495};
496
497MODULE_AUTHOR("Andre Hedrick, Alan Cox, Andrzej Krzysztofowicz, Jeff Garzik");
498MODULE_DESCRIPTION("SCSI low-level driver for Intel PIIX/ICH ATA controllers");
499MODULE_LICENSE("GPL");
500MODULE_DEVICE_TABLE(pci, piix_pci_tbl);
501MODULE_VERSION(DRV_VERSION);
502
503static int force_pcs = 0;
504module_param(force_pcs, int, 0444);
505MODULE_PARM_DESC(force_pcs, "force honoring or ignoring PCS to work around "
506 "device mis-detection (0=default, 1=ignore PCS, 2=honor PCS)");
507
508/**
509 * piix_pata_cbl_detect - Probe host controller cable detect info
510 * @ap: Port for which cable detect info is desired
511 *
512 * Read 80c cable indicator from ATA PCI device's PCI config
513 * register. This register is normally set by firmware (BIOS).
514 *
515 * LOCKING:
516 * None (inherited from caller).
517 */
518static void piix_pata_cbl_detect(struct ata_port *ap)
519{
520 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
521 u8 tmp, mask;
522
523 /* no 80c support in host controller? */
524 if ((ap->udma_mask & ~ATA_UDMA_MASK_40C) == 0)
525 goto cbl40;
526
527 /* check BIOS cable detect results */
528 mask = ap->hard_port_no == 0 ? PIIX_80C_PRI : PIIX_80C_SEC;
529 pci_read_config_byte(pdev, PIIX_IOCFG, &tmp);
530 if ((tmp & mask) == 0)
531 goto cbl40;
532
533 ap->cbl = ATA_CBL_PATA80;
534 return;
535
536cbl40:
537 ap->cbl = ATA_CBL_PATA40;
538 ap->udma_mask &= ATA_UDMA_MASK_40C;
539}
540
541/**
542 * piix_pata_prereset - prereset for PATA host controller
543 * @ap: Target port
544 *
545 * Prereset including cable detection.
546 *
547 * LOCKING:
548 * None (inherited from caller).
549 */
550static int piix_pata_prereset(struct ata_port *ap)
551{
552 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
553
554 if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->hard_port_no])) {
555 ata_port_printk(ap, KERN_INFO, "port disabled. ignoring.\n");
556 ap->eh_context.i.action &= ~ATA_EH_RESET_MASK;
557 return 0;
558 }
559
560 piix_pata_cbl_detect(ap);
561
562 return ata_std_prereset(ap);
563}
564
565static void piix_pata_error_handler(struct ata_port *ap)
566{
567 ata_bmdma_drive_eh(ap, piix_pata_prereset, ata_std_softreset, NULL,
568 ata_std_postreset);
569}
570
571/**
572 * piix_sata_present_mask - determine present mask for SATA host controller
573 * @ap: Target port
574 *
575 * Reads SATA PCI device's PCI config register Port Configuration
576 * and Status (PCS) to determine port and device availability.
577 *
578 * LOCKING:
579 * None (inherited from caller).
580 *
581 * RETURNS:
582 * determined present_mask
583 */
584static unsigned int piix_sata_present_mask(struct ata_port *ap)
585{
586 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
587 struct piix_host_priv *hpriv = ap->host_set->private_data;
588 const unsigned int *map = hpriv->map;
589 int base = 2 * ap->hard_port_no;
590 unsigned int present_mask = 0;
591 int port, i;
592 u16 pcs;
593
594 pci_read_config_word(pdev, ICH5_PCS, &pcs);
595 DPRINTK("ata%u: ENTER, pcs=0x%x base=%d\n", ap->id, pcs, base);
596
597 for (i = 0; i < 2; i++) {
598 port = map[base + i];
599 if (port < 0)
600 continue;
601 if ((ap->flags & PIIX_FLAG_IGNORE_PCS) ||
602 (pcs & 1 << (hpriv->map_db->present_shift + port)))
603 present_mask |= 1 << i;
604 }
605
606 DPRINTK("ata%u: LEAVE, pcs=0x%x present_mask=0x%x\n",
607 ap->id, pcs, present_mask);
608
609 return present_mask;
610}
611
612/**
613 * piix_sata_softreset - reset SATA host port via ATA SRST
614 * @ap: port to reset
615 * @classes: resulting classes of attached devices
616 *
617 * Reset SATA host port via ATA SRST. On controllers with
618 * reliable PCS present bits, the bits are used to determine
619 * device presence.
620 *
621 * LOCKING:
622 * Kernel thread context (may sleep)
623 *
624 * RETURNS:
625 * 0 on success, -errno otherwise.
626 */
627static int piix_sata_softreset(struct ata_port *ap, unsigned int *classes)
628{
629 unsigned int present_mask;
630 int i, rc;
631
632 present_mask = piix_sata_present_mask(ap);
633
634 rc = ata_std_softreset(ap, classes);
635 if (rc)
636 return rc;
637
638 for (i = 0; i < ATA_MAX_DEVICES; i++) {
639 if (!(present_mask & (1 << i)))
640 classes[i] = ATA_DEV_NONE;
641 }
642
643 return 0;
644}
645
646static void piix_sata_error_handler(struct ata_port *ap)
647{
648 ata_bmdma_drive_eh(ap, ata_std_prereset, piix_sata_softreset, NULL,
649 ata_std_postreset);
650}
651
652/**
653 * piix_set_piomode - Initialize host controller PATA PIO timings
654 * @ap: Port whose timings we are configuring
655 * @adev: um
656 *
657 * Set PIO mode for device, in host controller PCI config space.
658 *
659 * LOCKING:
660 * None (inherited from caller).
661 */
662
663static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev)
664{
665 unsigned int pio = adev->pio_mode - XFER_PIO_0;
666 struct pci_dev *dev = to_pci_dev(ap->host_set->dev);
667 unsigned int is_slave = (adev->devno != 0);
668 unsigned int master_port= ap->hard_port_no ? 0x42 : 0x40;
669 unsigned int slave_port = 0x44;
670 u16 master_data;
671 u8 slave_data;
672
673 static const /* ISP RTC */
674 u8 timings[][2] = { { 0, 0 },
675 { 0, 0 },
676 { 1, 0 },
677 { 2, 1 },
678 { 2, 3 }, };
679
680 pci_read_config_word(dev, master_port, &master_data);
681 if (is_slave) {
682 master_data |= 0x4000;
683 /* enable PPE, IE and TIME */
684 master_data |= 0x0070;
685 pci_read_config_byte(dev, slave_port, &slave_data);
686 slave_data &= (ap->hard_port_no ? 0x0f : 0xf0);
687 slave_data |=
688 (timings[pio][0] << 2) |
689 (timings[pio][1] << (ap->hard_port_no ? 4 : 0));
690 } else {
691 master_data &= 0xccf8;
692 /* enable PPE, IE and TIME */
693 master_data |= 0x0007;
694 master_data |=
695 (timings[pio][0] << 12) |
696 (timings[pio][1] << 8);
697 }
698 pci_write_config_word(dev, master_port, master_data);
699 if (is_slave)
700 pci_write_config_byte(dev, slave_port, slave_data);
701}
702
703/**
704 * piix_set_dmamode - Initialize host controller PATA PIO timings
705 * @ap: Port whose timings we are configuring
706 * @adev: um
707 * @udma: udma mode, 0 - 6
708 *
709 * Set UDMA mode for device, in host controller PCI config space.
710 *
711 * LOCKING:
712 * None (inherited from caller).
713 */
714
715static void piix_set_dmamode (struct ata_port *ap, struct ata_device *adev)
716{
717 unsigned int udma = adev->dma_mode; /* FIXME: MWDMA too */
718 struct pci_dev *dev = to_pci_dev(ap->host_set->dev);
719 u8 maslave = ap->hard_port_no ? 0x42 : 0x40;
720 u8 speed = udma;
721 unsigned int drive_dn = (ap->hard_port_no ? 2 : 0) + adev->devno;
722 int a_speed = 3 << (drive_dn * 4);
723 int u_flag = 1 << drive_dn;
724 int v_flag = 0x01 << drive_dn;
725 int w_flag = 0x10 << drive_dn;
726 int u_speed = 0;
727 int sitre;
728 u16 reg4042, reg4a;
729 u8 reg48, reg54, reg55;
730
731 pci_read_config_word(dev, maslave, &reg4042);
732 DPRINTK("reg4042 = 0x%04x\n", reg4042);
733 sitre = (reg4042 & 0x4000) ? 1 : 0;
734 pci_read_config_byte(dev, 0x48, &reg48);
735 pci_read_config_word(dev, 0x4a, &reg4a);
736 pci_read_config_byte(dev, 0x54, &reg54);
737 pci_read_config_byte(dev, 0x55, &reg55);
738
739 switch(speed) {
740 case XFER_UDMA_4:
741 case XFER_UDMA_2: u_speed = 2 << (drive_dn * 4); break;
742 case XFER_UDMA_6:
743 case XFER_UDMA_5:
744 case XFER_UDMA_3:
745 case XFER_UDMA_1: u_speed = 1 << (drive_dn * 4); break;
746 case XFER_UDMA_0: u_speed = 0 << (drive_dn * 4); break;
747 case XFER_MW_DMA_2:
748 case XFER_MW_DMA_1: break;
749 default:
750 BUG();
751 return;
752 }
753
754 if (speed >= XFER_UDMA_0) {
755 if (!(reg48 & u_flag))
756 pci_write_config_byte(dev, 0x48, reg48 | u_flag);
757 if (speed == XFER_UDMA_5) {
758 pci_write_config_byte(dev, 0x55, (u8) reg55|w_flag);
759 } else {
760 pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag);
761 }
762 if ((reg4a & a_speed) != u_speed)
763 pci_write_config_word(dev, 0x4a, (reg4a & ~a_speed) | u_speed);
764 if (speed > XFER_UDMA_2) {
765 if (!(reg54 & v_flag))
766 pci_write_config_byte(dev, 0x54, reg54 | v_flag);
767 } else
768 pci_write_config_byte(dev, 0x54, reg54 & ~v_flag);
769 } else {
770 if (reg48 & u_flag)
771 pci_write_config_byte(dev, 0x48, reg48 & ~u_flag);
772 if (reg4a & a_speed)
773 pci_write_config_word(dev, 0x4a, reg4a & ~a_speed);
774 if (reg54 & v_flag)
775 pci_write_config_byte(dev, 0x54, reg54 & ~v_flag);
776 if (reg55 & w_flag)
777 pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag);
778 }
779}
780
781#define AHCI_PCI_BAR 5
782#define AHCI_GLOBAL_CTL 0x04
783#define AHCI_ENABLE (1 << 31)
784static int piix_disable_ahci(struct pci_dev *pdev)
785{
786 void __iomem *mmio;
787 u32 tmp;
788 int rc = 0;
789
790 /* BUG: pci_enable_device has not yet been called. This
791 * works because this device is usually set up by BIOS.
792 */
793
794 if (!pci_resource_start(pdev, AHCI_PCI_BAR) ||
795 !pci_resource_len(pdev, AHCI_PCI_BAR))
796 return 0;
797
798 mmio = pci_iomap(pdev, AHCI_PCI_BAR, 64);
799 if (!mmio)
800 return -ENOMEM;
801
802 tmp = readl(mmio + AHCI_GLOBAL_CTL);
803 if (tmp & AHCI_ENABLE) {
804 tmp &= ~AHCI_ENABLE;
805 writel(tmp, mmio + AHCI_GLOBAL_CTL);
806
807 tmp = readl(mmio + AHCI_GLOBAL_CTL);
808 if (tmp & AHCI_ENABLE)
809 rc = -EIO;
810 }
811
812 pci_iounmap(pdev, mmio);
813 return rc;
814}
815
816/**
817 * piix_check_450nx_errata - Check for problem 450NX setup
818 * @ata_dev: the PCI device to check
819 *
820 * Check for the present of 450NX errata #19 and errata #25. If
821 * they are found return an error code so we can turn off DMA
822 */
823
824static int __devinit piix_check_450nx_errata(struct pci_dev *ata_dev)
825{
826 struct pci_dev *pdev = NULL;
827 u16 cfg;
828 u8 rev;
829 int no_piix_dma = 0;
830
831 while((pdev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, pdev)) != NULL)
832 {
833 /* Look for 450NX PXB. Check for problem configurations
834 A PCI quirk checks bit 6 already */
835 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
836 pci_read_config_word(pdev, 0x41, &cfg);
837 /* Only on the original revision: IDE DMA can hang */
838 if (rev == 0x00)
839 no_piix_dma = 1;
840 /* On all revisions below 5 PXB bus lock must be disabled for IDE */
841 else if (cfg & (1<<14) && rev < 5)
842 no_piix_dma = 2;
843 }
844 if (no_piix_dma)
845 dev_printk(KERN_WARNING, &ata_dev->dev, "450NX errata present, disabling IDE DMA.\n");
846 if (no_piix_dma == 2)
847 dev_printk(KERN_WARNING, &ata_dev->dev, "A BIOS update may resolve this.\n");
848 return no_piix_dma;
849}
850
851static void __devinit piix_init_pcs(struct pci_dev *pdev,
852 struct ata_port_info *pinfo,
853 const struct piix_map_db *map_db)
854{
855 u16 pcs, new_pcs;
856
857 pci_read_config_word(pdev, ICH5_PCS, &pcs);
858
859 new_pcs = pcs | map_db->port_enable;
860
861 if (new_pcs != pcs) {
862 DPRINTK("updating PCS from 0x%x to 0x%x\n", pcs, new_pcs);
863 pci_write_config_word(pdev, ICH5_PCS, new_pcs);
864 msleep(150);
865 }
866
867 if (force_pcs == 1) {
868 dev_printk(KERN_INFO, &pdev->dev,
869 "force ignoring PCS (0x%x)\n", new_pcs);
870 pinfo[0].host_flags |= PIIX_FLAG_IGNORE_PCS;
871 pinfo[1].host_flags |= PIIX_FLAG_IGNORE_PCS;
872 } else if (force_pcs == 2) {
873 dev_printk(KERN_INFO, &pdev->dev,
874 "force honoring PCS (0x%x)\n", new_pcs);
875 pinfo[0].host_flags &= ~PIIX_FLAG_IGNORE_PCS;
876 pinfo[1].host_flags &= ~PIIX_FLAG_IGNORE_PCS;
877 }
878}
879
880static void __devinit piix_init_sata_map(struct pci_dev *pdev,
881 struct ata_port_info *pinfo,
882 const struct piix_map_db *map_db)
883{
884 struct piix_host_priv *hpriv = pinfo[0].private_data;
885 const unsigned int *map;
886 int i, invalid_map = 0;
887 u8 map_value;
888
889 pci_read_config_byte(pdev, ICH5_PMR, &map_value);
890
891 map = map_db->map[map_value & map_db->mask];
892
893 dev_printk(KERN_INFO, &pdev->dev, "MAP [");
894 for (i = 0; i < 4; i++) {
895 switch (map[i]) {
896 case RV:
897 invalid_map = 1;
898 printk(" XX");
899 break;
900
901 case NA:
902 printk(" --");
903 break;
904
905 case IDE:
906 WARN_ON((i & 1) || map[i + 1] != IDE);
907 pinfo[i / 2] = piix_port_info[ich5_pata];
908 pinfo[i / 2].private_data = hpriv;
909 i++;
910 printk(" IDE IDE");
911 break;
912
913 default:
914 printk(" P%d", map[i]);
915 if (i & 1)
916 pinfo[i / 2].host_flags |= ATA_FLAG_SLAVE_POSS;
917 break;
918 }
919 }
920 printk(" ]\n");
921
922 if (invalid_map)
923 dev_printk(KERN_ERR, &pdev->dev,
924 "invalid MAP value %u\n", map_value);
925
926 hpriv->map = map;
927 hpriv->map_db = map_db;
928}
929
930/**
931 * piix_init_one - Register PIIX ATA PCI device with kernel services
932 * @pdev: PCI device to register
933 * @ent: Entry in piix_pci_tbl matching with @pdev
934 *
935 * Called from kernel PCI layer. We probe for combined mode (sigh),
936 * and then hand over control to libata, for it to do the rest.
937 *
938 * LOCKING:
939 * Inherited from PCI layer (may sleep).
940 *
941 * RETURNS:
942 * Zero on success, or -ERRNO value.
943 */
944
945static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
946{
947 static int printed_version;
948 struct ata_port_info port_info[2];
949 struct ata_port_info *ppinfo[2] = { &port_info[0], &port_info[1] };
950 struct piix_host_priv *hpriv;
951 unsigned long host_flags;
952
953 if (!printed_version++)
954 dev_printk(KERN_DEBUG, &pdev->dev,
955 "version " DRV_VERSION "\n");
956
957 /* no hotplugging support (FIXME) */
958 if (!in_module_init)
959 return -ENODEV;
960
961 hpriv = kzalloc(sizeof(*hpriv), GFP_KERNEL);
962 if (!hpriv)
963 return -ENOMEM;
964
965 port_info[0] = piix_port_info[ent->driver_data];
966 port_info[1] = piix_port_info[ent->driver_data];
967 port_info[0].private_data = hpriv;
968 port_info[1].private_data = hpriv;
969
970 host_flags = port_info[0].host_flags;
971
972 if (host_flags & PIIX_FLAG_AHCI) {
973 u8 tmp;
974 pci_read_config_byte(pdev, PIIX_SCC, &tmp);
975 if (tmp == PIIX_AHCI_DEVICE) {
976 int rc = piix_disable_ahci(pdev);
977 if (rc)
978 return rc;
979 }
980 }
981
982 /* Initialize SATA map */
983 if (host_flags & ATA_FLAG_SATA) {
984 piix_init_sata_map(pdev, port_info,
985 piix_map_db_table[ent->driver_data]);
986 piix_init_pcs(pdev, port_info,
987 piix_map_db_table[ent->driver_data]);
988 }
989
990 /* On ICH5, some BIOSen disable the interrupt using the
991 * PCI_COMMAND_INTX_DISABLE bit added in PCI 2.3.
992 * On ICH6, this bit has the same effect, but only when
993 * MSI is disabled (and it is disabled, as we don't use
994 * message-signalled interrupts currently).
995 */
996 if (host_flags & PIIX_FLAG_CHECKINTR)
997 pci_intx(pdev, 1);
998
999 if (piix_check_450nx_errata(pdev)) {
1000 /* This writes into the master table but it does not
1001 really matter for this errata as we will apply it to
1002 all the PIIX devices on the board */
1003 port_info[0].mwdma_mask = 0;
1004 port_info[0].udma_mask = 0;
1005 port_info[1].mwdma_mask = 0;
1006 port_info[1].udma_mask = 0;
1007 }
1008 return ata_pci_init_one(pdev, ppinfo, 2);
1009}
1010
1011static void piix_host_stop(struct ata_host_set *host_set)
1012{
1013 if (host_set->next == NULL)
1014 kfree(host_set->private_data);
1015 ata_host_stop(host_set);
1016}
1017
1018static int __init piix_init(void)
1019{
1020 int rc;
1021
1022 DPRINTK("pci_module_init\n");
1023 rc = pci_module_init(&piix_pci_driver);
1024 if (rc)
1025 return rc;
1026
1027 in_module_init = 0;
1028
1029 DPRINTK("done\n");
1030 return 0;
1031}
1032
1033static void __exit piix_exit(void)
1034{
1035 pci_unregister_driver(&piix_pci_driver);
1036}
1037
1038module_init(piix_init);
1039module_exit(piix_exit);
1040
diff --git a/drivers/scsi/libata-bmdma.c b/drivers/scsi/libata-bmdma.c
deleted file mode 100644
index 9ce221f25954..000000000000
--- a/drivers/scsi/libata-bmdma.c
+++ /dev/null
@@ -1,1149 +0,0 @@
1/*
2 * libata-bmdma.c - helper library for PCI IDE BMDMA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2006 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2006 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 */
34
35#include <linux/kernel.h>
36#include <linux/pci.h>
37#include <linux/libata.h>
38
39#include "libata.h"
40
41/**
42 * ata_tf_load_pio - send taskfile registers to host controller
43 * @ap: Port to which output is sent
44 * @tf: ATA taskfile register set
45 *
46 * Outputs ATA taskfile to standard ATA host controller.
47 *
48 * LOCKING:
49 * Inherited from caller.
50 */
51
52static void ata_tf_load_pio(struct ata_port *ap, const struct ata_taskfile *tf)
53{
54 struct ata_ioports *ioaddr = &ap->ioaddr;
55 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
56
57 if (tf->ctl != ap->last_ctl) {
58 outb(tf->ctl, ioaddr->ctl_addr);
59 ap->last_ctl = tf->ctl;
60 ata_wait_idle(ap);
61 }
62
63 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
64 outb(tf->hob_feature, ioaddr->feature_addr);
65 outb(tf->hob_nsect, ioaddr->nsect_addr);
66 outb(tf->hob_lbal, ioaddr->lbal_addr);
67 outb(tf->hob_lbam, ioaddr->lbam_addr);
68 outb(tf->hob_lbah, ioaddr->lbah_addr);
69 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
70 tf->hob_feature,
71 tf->hob_nsect,
72 tf->hob_lbal,
73 tf->hob_lbam,
74 tf->hob_lbah);
75 }
76
77 if (is_addr) {
78 outb(tf->feature, ioaddr->feature_addr);
79 outb(tf->nsect, ioaddr->nsect_addr);
80 outb(tf->lbal, ioaddr->lbal_addr);
81 outb(tf->lbam, ioaddr->lbam_addr);
82 outb(tf->lbah, ioaddr->lbah_addr);
83 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
84 tf->feature,
85 tf->nsect,
86 tf->lbal,
87 tf->lbam,
88 tf->lbah);
89 }
90
91 if (tf->flags & ATA_TFLAG_DEVICE) {
92 outb(tf->device, ioaddr->device_addr);
93 VPRINTK("device 0x%X\n", tf->device);
94 }
95
96 ata_wait_idle(ap);
97}
98
99/**
100 * ata_tf_load_mmio - send taskfile registers to host controller
101 * @ap: Port to which output is sent
102 * @tf: ATA taskfile register set
103 *
104 * Outputs ATA taskfile to standard ATA host controller using MMIO.
105 *
106 * LOCKING:
107 * Inherited from caller.
108 */
109
110static void ata_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
111{
112 struct ata_ioports *ioaddr = &ap->ioaddr;
113 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
114
115 if (tf->ctl != ap->last_ctl) {
116 writeb(tf->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
117 ap->last_ctl = tf->ctl;
118 ata_wait_idle(ap);
119 }
120
121 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
122 writeb(tf->hob_feature, (void __iomem *) ioaddr->feature_addr);
123 writeb(tf->hob_nsect, (void __iomem *) ioaddr->nsect_addr);
124 writeb(tf->hob_lbal, (void __iomem *) ioaddr->lbal_addr);
125 writeb(tf->hob_lbam, (void __iomem *) ioaddr->lbam_addr);
126 writeb(tf->hob_lbah, (void __iomem *) ioaddr->lbah_addr);
127 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
128 tf->hob_feature,
129 tf->hob_nsect,
130 tf->hob_lbal,
131 tf->hob_lbam,
132 tf->hob_lbah);
133 }
134
135 if (is_addr) {
136 writeb(tf->feature, (void __iomem *) ioaddr->feature_addr);
137 writeb(tf->nsect, (void __iomem *) ioaddr->nsect_addr);
138 writeb(tf->lbal, (void __iomem *) ioaddr->lbal_addr);
139 writeb(tf->lbam, (void __iomem *) ioaddr->lbam_addr);
140 writeb(tf->lbah, (void __iomem *) ioaddr->lbah_addr);
141 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
142 tf->feature,
143 tf->nsect,
144 tf->lbal,
145 tf->lbam,
146 tf->lbah);
147 }
148
149 if (tf->flags & ATA_TFLAG_DEVICE) {
150 writeb(tf->device, (void __iomem *) ioaddr->device_addr);
151 VPRINTK("device 0x%X\n", tf->device);
152 }
153
154 ata_wait_idle(ap);
155}
156
157
158/**
159 * ata_tf_load - send taskfile registers to host controller
160 * @ap: Port to which output is sent
161 * @tf: ATA taskfile register set
162 *
163 * Outputs ATA taskfile to standard ATA host controller using MMIO
164 * or PIO as indicated by the ATA_FLAG_MMIO flag.
165 * Writes the control, feature, nsect, lbal, lbam, and lbah registers.
166 * Optionally (ATA_TFLAG_LBA48) writes hob_feature, hob_nsect,
167 * hob_lbal, hob_lbam, and hob_lbah.
168 *
169 * This function waits for idle (!BUSY and !DRQ) after writing
170 * registers. If the control register has a new value, this
171 * function also waits for idle after writing control and before
172 * writing the remaining registers.
173 *
174 * May be used as the tf_load() entry in ata_port_operations.
175 *
176 * LOCKING:
177 * Inherited from caller.
178 */
179void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
180{
181 if (ap->flags & ATA_FLAG_MMIO)
182 ata_tf_load_mmio(ap, tf);
183 else
184 ata_tf_load_pio(ap, tf);
185}
186
187/**
188 * ata_exec_command_pio - issue ATA command to host controller
189 * @ap: port to which command is being issued
190 * @tf: ATA taskfile register set
191 *
192 * Issues PIO write to ATA command register, with proper
193 * synchronization with interrupt handler / other threads.
194 *
195 * LOCKING:
196 * spin_lock_irqsave(host_set lock)
197 */
198
199static void ata_exec_command_pio(struct ata_port *ap, const struct ata_taskfile *tf)
200{
201 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
202
203 outb(tf->command, ap->ioaddr.command_addr);
204 ata_pause(ap);
205}
206
207
208/**
209 * ata_exec_command_mmio - issue ATA command to host controller
210 * @ap: port to which command is being issued
211 * @tf: ATA taskfile register set
212 *
213 * Issues MMIO write to ATA command register, with proper
214 * synchronization with interrupt handler / other threads.
215 *
216 * FIXME: missing write posting for 400nS delay enforcement
217 *
218 * LOCKING:
219 * spin_lock_irqsave(host_set lock)
220 */
221
222static void ata_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
223{
224 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
225
226 writeb(tf->command, (void __iomem *) ap->ioaddr.command_addr);
227 ata_pause(ap);
228}
229
230
231/**
232 * ata_exec_command - issue ATA command to host controller
233 * @ap: port to which command is being issued
234 * @tf: ATA taskfile register set
235 *
236 * Issues PIO/MMIO write to ATA command register, with proper
237 * synchronization with interrupt handler / other threads.
238 *
239 * LOCKING:
240 * spin_lock_irqsave(host_set lock)
241 */
242void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
243{
244 if (ap->flags & ATA_FLAG_MMIO)
245 ata_exec_command_mmio(ap, tf);
246 else
247 ata_exec_command_pio(ap, tf);
248}
249
250/**
251 * ata_tf_read_pio - input device's ATA taskfile shadow registers
252 * @ap: Port from which input is read
253 * @tf: ATA taskfile register set for storing input
254 *
255 * Reads ATA taskfile registers for currently-selected device
256 * into @tf.
257 *
258 * LOCKING:
259 * Inherited from caller.
260 */
261
262static void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf)
263{
264 struct ata_ioports *ioaddr = &ap->ioaddr;
265
266 tf->command = ata_check_status(ap);
267 tf->feature = inb(ioaddr->error_addr);
268 tf->nsect = inb(ioaddr->nsect_addr);
269 tf->lbal = inb(ioaddr->lbal_addr);
270 tf->lbam = inb(ioaddr->lbam_addr);
271 tf->lbah = inb(ioaddr->lbah_addr);
272 tf->device = inb(ioaddr->device_addr);
273
274 if (tf->flags & ATA_TFLAG_LBA48) {
275 outb(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
276 tf->hob_feature = inb(ioaddr->error_addr);
277 tf->hob_nsect = inb(ioaddr->nsect_addr);
278 tf->hob_lbal = inb(ioaddr->lbal_addr);
279 tf->hob_lbam = inb(ioaddr->lbam_addr);
280 tf->hob_lbah = inb(ioaddr->lbah_addr);
281 }
282}
283
284/**
285 * ata_tf_read_mmio - input device's ATA taskfile shadow registers
286 * @ap: Port from which input is read
287 * @tf: ATA taskfile register set for storing input
288 *
289 * Reads ATA taskfile registers for currently-selected device
290 * into @tf via MMIO.
291 *
292 * LOCKING:
293 * Inherited from caller.
294 */
295
296static void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf)
297{
298 struct ata_ioports *ioaddr = &ap->ioaddr;
299
300 tf->command = ata_check_status(ap);
301 tf->feature = readb((void __iomem *)ioaddr->error_addr);
302 tf->nsect = readb((void __iomem *)ioaddr->nsect_addr);
303 tf->lbal = readb((void __iomem *)ioaddr->lbal_addr);
304 tf->lbam = readb((void __iomem *)ioaddr->lbam_addr);
305 tf->lbah = readb((void __iomem *)ioaddr->lbah_addr);
306 tf->device = readb((void __iomem *)ioaddr->device_addr);
307
308 if (tf->flags & ATA_TFLAG_LBA48) {
309 writeb(tf->ctl | ATA_HOB, (void __iomem *) ap->ioaddr.ctl_addr);
310 tf->hob_feature = readb((void __iomem *)ioaddr->error_addr);
311 tf->hob_nsect = readb((void __iomem *)ioaddr->nsect_addr);
312 tf->hob_lbal = readb((void __iomem *)ioaddr->lbal_addr);
313 tf->hob_lbam = readb((void __iomem *)ioaddr->lbam_addr);
314 tf->hob_lbah = readb((void __iomem *)ioaddr->lbah_addr);
315 }
316}
317
318
319/**
320 * ata_tf_read - input device's ATA taskfile shadow registers
321 * @ap: Port from which input is read
322 * @tf: ATA taskfile register set for storing input
323 *
324 * Reads ATA taskfile registers for currently-selected device
325 * into @tf.
326 *
327 * Reads nsect, lbal, lbam, lbah, and device. If ATA_TFLAG_LBA48
328 * is set, also reads the hob registers.
329 *
330 * May be used as the tf_read() entry in ata_port_operations.
331 *
332 * LOCKING:
333 * Inherited from caller.
334 */
335void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
336{
337 if (ap->flags & ATA_FLAG_MMIO)
338 ata_tf_read_mmio(ap, tf);
339 else
340 ata_tf_read_pio(ap, tf);
341}
342
343/**
344 * ata_check_status_pio - Read device status reg & clear interrupt
345 * @ap: port where the device is
346 *
347 * Reads ATA taskfile status register for currently-selected device
348 * and return its value. This also clears pending interrupts
349 * from this device
350 *
351 * LOCKING:
352 * Inherited from caller.
353 */
354static u8 ata_check_status_pio(struct ata_port *ap)
355{
356 return inb(ap->ioaddr.status_addr);
357}
358
359/**
360 * ata_check_status_mmio - Read device status reg & clear interrupt
361 * @ap: port where the device is
362 *
363 * Reads ATA taskfile status register for currently-selected device
364 * via MMIO and return its value. This also clears pending interrupts
365 * from this device
366 *
367 * LOCKING:
368 * Inherited from caller.
369 */
370static u8 ata_check_status_mmio(struct ata_port *ap)
371{
372 return readb((void __iomem *) ap->ioaddr.status_addr);
373}
374
375
376/**
377 * ata_check_status - Read device status reg & clear interrupt
378 * @ap: port where the device is
379 *
380 * Reads ATA taskfile status register for currently-selected device
381 * and return its value. This also clears pending interrupts
382 * from this device
383 *
384 * May be used as the check_status() entry in ata_port_operations.
385 *
386 * LOCKING:
387 * Inherited from caller.
388 */
389u8 ata_check_status(struct ata_port *ap)
390{
391 if (ap->flags & ATA_FLAG_MMIO)
392 return ata_check_status_mmio(ap);
393 return ata_check_status_pio(ap);
394}
395
396
397/**
398 * ata_altstatus - Read device alternate status reg
399 * @ap: port where the device is
400 *
401 * Reads ATA taskfile alternate status register for
402 * currently-selected device and return its value.
403 *
404 * Note: may NOT be used as the check_altstatus() entry in
405 * ata_port_operations.
406 *
407 * LOCKING:
408 * Inherited from caller.
409 */
410u8 ata_altstatus(struct ata_port *ap)
411{
412 if (ap->ops->check_altstatus)
413 return ap->ops->check_altstatus(ap);
414
415 if (ap->flags & ATA_FLAG_MMIO)
416 return readb((void __iomem *)ap->ioaddr.altstatus_addr);
417 return inb(ap->ioaddr.altstatus_addr);
418}
419
420/**
421 * ata_bmdma_setup_mmio - Set up PCI IDE BMDMA transaction
422 * @qc: Info associated with this ATA transaction.
423 *
424 * LOCKING:
425 * spin_lock_irqsave(host_set lock)
426 */
427
428static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc)
429{
430 struct ata_port *ap = qc->ap;
431 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
432 u8 dmactl;
433 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
434
435 /* load PRD table addr. */
436 mb(); /* make sure PRD table writes are visible to controller */
437 writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS);
438
439 /* specify data direction, triple-check start bit is clear */
440 dmactl = readb(mmio + ATA_DMA_CMD);
441 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
442 if (!rw)
443 dmactl |= ATA_DMA_WR;
444 writeb(dmactl, mmio + ATA_DMA_CMD);
445
446 /* issue r/w command */
447 ap->ops->exec_command(ap, &qc->tf);
448}
449
450/**
451 * ata_bmdma_start_mmio - Start a PCI IDE BMDMA transaction
452 * @qc: Info associated with this ATA transaction.
453 *
454 * LOCKING:
455 * spin_lock_irqsave(host_set lock)
456 */
457
458static void ata_bmdma_start_mmio (struct ata_queued_cmd *qc)
459{
460 struct ata_port *ap = qc->ap;
461 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
462 u8 dmactl;
463
464 /* start host DMA transaction */
465 dmactl = readb(mmio + ATA_DMA_CMD);
466 writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD);
467
468 /* Strictly, one may wish to issue a readb() here, to
469 * flush the mmio write. However, control also passes
470 * to the hardware at this point, and it will interrupt
471 * us when we are to resume control. So, in effect,
472 * we don't care when the mmio write flushes.
473 * Further, a read of the DMA status register _immediately_
474 * following the write may not be what certain flaky hardware
475 * is expected, so I think it is best to not add a readb()
476 * without first all the MMIO ATA cards/mobos.
477 * Or maybe I'm just being paranoid.
478 */
479}
480
481/**
482 * ata_bmdma_setup_pio - Set up PCI IDE BMDMA transaction (PIO)
483 * @qc: Info associated with this ATA transaction.
484 *
485 * LOCKING:
486 * spin_lock_irqsave(host_set lock)
487 */
488
489static void ata_bmdma_setup_pio (struct ata_queued_cmd *qc)
490{
491 struct ata_port *ap = qc->ap;
492 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
493 u8 dmactl;
494
495 /* load PRD table addr. */
496 outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
497
498 /* specify data direction, triple-check start bit is clear */
499 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
500 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
501 if (!rw)
502 dmactl |= ATA_DMA_WR;
503 outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
504
505 /* issue r/w command */
506 ap->ops->exec_command(ap, &qc->tf);
507}
508
509/**
510 * ata_bmdma_start_pio - Start a PCI IDE BMDMA transaction (PIO)
511 * @qc: Info associated with this ATA transaction.
512 *
513 * LOCKING:
514 * spin_lock_irqsave(host_set lock)
515 */
516
517static void ata_bmdma_start_pio (struct ata_queued_cmd *qc)
518{
519 struct ata_port *ap = qc->ap;
520 u8 dmactl;
521
522 /* start host DMA transaction */
523 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
524 outb(dmactl | ATA_DMA_START,
525 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
526}
527
528
529/**
530 * ata_bmdma_start - Start a PCI IDE BMDMA transaction
531 * @qc: Info associated with this ATA transaction.
532 *
533 * Writes the ATA_DMA_START flag to the DMA command register.
534 *
535 * May be used as the bmdma_start() entry in ata_port_operations.
536 *
537 * LOCKING:
538 * spin_lock_irqsave(host_set lock)
539 */
540void ata_bmdma_start(struct ata_queued_cmd *qc)
541{
542 if (qc->ap->flags & ATA_FLAG_MMIO)
543 ata_bmdma_start_mmio(qc);
544 else
545 ata_bmdma_start_pio(qc);
546}
547
548
549/**
550 * ata_bmdma_setup - Set up PCI IDE BMDMA transaction
551 * @qc: Info associated with this ATA transaction.
552 *
553 * Writes address of PRD table to device's PRD Table Address
554 * register, sets the DMA control register, and calls
555 * ops->exec_command() to start the transfer.
556 *
557 * May be used as the bmdma_setup() entry in ata_port_operations.
558 *
559 * LOCKING:
560 * spin_lock_irqsave(host_set lock)
561 */
562void ata_bmdma_setup(struct ata_queued_cmd *qc)
563{
564 if (qc->ap->flags & ATA_FLAG_MMIO)
565 ata_bmdma_setup_mmio(qc);
566 else
567 ata_bmdma_setup_pio(qc);
568}
569
570
571/**
572 * ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt.
573 * @ap: Port associated with this ATA transaction.
574 *
575 * Clear interrupt and error flags in DMA status register.
576 *
577 * May be used as the irq_clear() entry in ata_port_operations.
578 *
579 * LOCKING:
580 * spin_lock_irqsave(host_set lock)
581 */
582
583void ata_bmdma_irq_clear(struct ata_port *ap)
584{
585 if (!ap->ioaddr.bmdma_addr)
586 return;
587
588 if (ap->flags & ATA_FLAG_MMIO) {
589 void __iomem *mmio =
590 ((void __iomem *) ap->ioaddr.bmdma_addr) + ATA_DMA_STATUS;
591 writeb(readb(mmio), mmio);
592 } else {
593 unsigned long addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
594 outb(inb(addr), addr);
595 }
596}
597
598
599/**
600 * ata_bmdma_status - Read PCI IDE BMDMA status
601 * @ap: Port associated with this ATA transaction.
602 *
603 * Read and return BMDMA status register.
604 *
605 * May be used as the bmdma_status() entry in ata_port_operations.
606 *
607 * LOCKING:
608 * spin_lock_irqsave(host_set lock)
609 */
610
611u8 ata_bmdma_status(struct ata_port *ap)
612{
613 u8 host_stat;
614 if (ap->flags & ATA_FLAG_MMIO) {
615 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
616 host_stat = readb(mmio + ATA_DMA_STATUS);
617 } else
618 host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
619 return host_stat;
620}
621
622
623/**
624 * ata_bmdma_stop - Stop PCI IDE BMDMA transfer
625 * @qc: Command we are ending DMA for
626 *
627 * Clears the ATA_DMA_START flag in the dma control register
628 *
629 * May be used as the bmdma_stop() entry in ata_port_operations.
630 *
631 * LOCKING:
632 * spin_lock_irqsave(host_set lock)
633 */
634
635void ata_bmdma_stop(struct ata_queued_cmd *qc)
636{
637 struct ata_port *ap = qc->ap;
638 if (ap->flags & ATA_FLAG_MMIO) {
639 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
640
641 /* clear start/stop bit */
642 writeb(readb(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
643 mmio + ATA_DMA_CMD);
644 } else {
645 /* clear start/stop bit */
646 outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
647 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
648 }
649
650 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
651 ata_altstatus(ap); /* dummy read */
652}
653
654/**
655 * ata_bmdma_freeze - Freeze BMDMA controller port
656 * @ap: port to freeze
657 *
658 * Freeze BMDMA controller port.
659 *
660 * LOCKING:
661 * Inherited from caller.
662 */
663void ata_bmdma_freeze(struct ata_port *ap)
664{
665 struct ata_ioports *ioaddr = &ap->ioaddr;
666
667 ap->ctl |= ATA_NIEN;
668 ap->last_ctl = ap->ctl;
669
670 if (ap->flags & ATA_FLAG_MMIO)
671 writeb(ap->ctl, (void __iomem *)ioaddr->ctl_addr);
672 else
673 outb(ap->ctl, ioaddr->ctl_addr);
674}
675
676/**
677 * ata_bmdma_thaw - Thaw BMDMA controller port
678 * @ap: port to thaw
679 *
680 * Thaw BMDMA controller port.
681 *
682 * LOCKING:
683 * Inherited from caller.
684 */
685void ata_bmdma_thaw(struct ata_port *ap)
686{
687 /* clear & re-enable interrupts */
688 ata_chk_status(ap);
689 ap->ops->irq_clear(ap);
690 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
691 ata_irq_on(ap);
692}
693
694/**
695 * ata_bmdma_drive_eh - Perform EH with given methods for BMDMA controller
696 * @ap: port to handle error for
697 * @prereset: prereset method (can be NULL)
698 * @softreset: softreset method (can be NULL)
699 * @hardreset: hardreset method (can be NULL)
700 * @postreset: postreset method (can be NULL)
701 *
702 * Handle error for ATA BMDMA controller. It can handle both
703 * PATA and SATA controllers. Many controllers should be able to
704 * use this EH as-is or with some added handling before and
705 * after.
706 *
707 * This function is intended to be used for constructing
708 * ->error_handler callback by low level drivers.
709 *
710 * LOCKING:
711 * Kernel thread context (may sleep)
712 */
713void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
714 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
715 ata_postreset_fn_t postreset)
716{
717 struct ata_eh_context *ehc = &ap->eh_context;
718 struct ata_queued_cmd *qc;
719 unsigned long flags;
720 int thaw = 0;
721
722 qc = __ata_qc_from_tag(ap, ap->active_tag);
723 if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
724 qc = NULL;
725
726 /* reset PIO HSM and stop DMA engine */
727 spin_lock_irqsave(ap->lock, flags);
728
729 ap->hsm_task_state = HSM_ST_IDLE;
730
731 if (qc && (qc->tf.protocol == ATA_PROT_DMA ||
732 qc->tf.protocol == ATA_PROT_ATAPI_DMA)) {
733 u8 host_stat;
734
735 host_stat = ata_bmdma_status(ap);
736
737 ata_ehi_push_desc(&ehc->i, "BMDMA stat 0x%x", host_stat);
738
739 /* BMDMA controllers indicate host bus error by
740 * setting DMA_ERR bit and timing out. As it wasn't
741 * really a timeout event, adjust error mask and
742 * cancel frozen state.
743 */
744 if (qc->err_mask == AC_ERR_TIMEOUT && host_stat & ATA_DMA_ERR) {
745 qc->err_mask = AC_ERR_HOST_BUS;
746 thaw = 1;
747 }
748
749 ap->ops->bmdma_stop(qc);
750 }
751
752 ata_altstatus(ap);
753 ata_chk_status(ap);
754 ap->ops->irq_clear(ap);
755
756 spin_unlock_irqrestore(ap->lock, flags);
757
758 if (thaw)
759 ata_eh_thaw_port(ap);
760
761 /* PIO and DMA engines have been stopped, perform recovery */
762 ata_do_eh(ap, prereset, softreset, hardreset, postreset);
763}
764
765/**
766 * ata_bmdma_error_handler - Stock error handler for BMDMA controller
767 * @ap: port to handle error for
768 *
769 * Stock error handler for BMDMA controller.
770 *
771 * LOCKING:
772 * Kernel thread context (may sleep)
773 */
774void ata_bmdma_error_handler(struct ata_port *ap)
775{
776 ata_reset_fn_t hardreset;
777
778 hardreset = NULL;
779 if (sata_scr_valid(ap))
780 hardreset = sata_std_hardreset;
781
782 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset, hardreset,
783 ata_std_postreset);
784}
785
786/**
787 * ata_bmdma_post_internal_cmd - Stock post_internal_cmd for
788 * BMDMA controller
789 * @qc: internal command to clean up
790 *
791 * LOCKING:
792 * Kernel thread context (may sleep)
793 */
794void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc)
795{
796 ata_bmdma_stop(qc);
797}
798
799#ifdef CONFIG_PCI
800static struct ata_probe_ent *
801ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
802{
803 struct ata_probe_ent *probe_ent;
804
805 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
806 if (!probe_ent) {
807 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
808 kobject_name(&(dev->kobj)));
809 return NULL;
810 }
811
812 INIT_LIST_HEAD(&probe_ent->node);
813 probe_ent->dev = dev;
814
815 probe_ent->sht = port->sht;
816 probe_ent->host_flags = port->host_flags;
817 probe_ent->pio_mask = port->pio_mask;
818 probe_ent->mwdma_mask = port->mwdma_mask;
819 probe_ent->udma_mask = port->udma_mask;
820 probe_ent->port_ops = port->port_ops;
821
822 return probe_ent;
823}
824
825
826/**
827 * ata_pci_init_native_mode - Initialize native-mode driver
828 * @pdev: pci device to be initialized
829 * @port: array[2] of pointers to port info structures.
830 * @ports: bitmap of ports present
831 *
832 * Utility function which allocates and initializes an
833 * ata_probe_ent structure for a standard dual-port
834 * PIO-based IDE controller. The returned ata_probe_ent
835 * structure can be passed to ata_device_add(). The returned
836 * ata_probe_ent structure should then be freed with kfree().
837 *
838 * The caller need only pass the address of the primary port, the
839 * secondary will be deduced automatically. If the device has non
840 * standard secondary port mappings this function can be called twice,
841 * once for each interface.
842 */
843
844struct ata_probe_ent *
845ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int ports)
846{
847 struct ata_probe_ent *probe_ent =
848 ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
849 int p = 0;
850 unsigned long bmdma;
851
852 if (!probe_ent)
853 return NULL;
854
855 probe_ent->irq = pdev->irq;
856 probe_ent->irq_flags = IRQF_SHARED;
857 probe_ent->private_data = port[0]->private_data;
858
859 if (ports & ATA_PORT_PRIMARY) {
860 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 0);
861 probe_ent->port[p].altstatus_addr =
862 probe_ent->port[p].ctl_addr =
863 pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS;
864 bmdma = pci_resource_start(pdev, 4);
865 if (bmdma) {
866 if (inb(bmdma + 2) & 0x80)
867 probe_ent->host_set_flags |= ATA_HOST_SIMPLEX;
868 probe_ent->port[p].bmdma_addr = bmdma;
869 }
870 ata_std_ports(&probe_ent->port[p]);
871 p++;
872 }
873
874 if (ports & ATA_PORT_SECONDARY) {
875 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 2);
876 probe_ent->port[p].altstatus_addr =
877 probe_ent->port[p].ctl_addr =
878 pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS;
879 bmdma = pci_resource_start(pdev, 4);
880 if (bmdma) {
881 bmdma += 8;
882 if(inb(bmdma + 2) & 0x80)
883 probe_ent->host_set_flags |= ATA_HOST_SIMPLEX;
884 probe_ent->port[p].bmdma_addr = bmdma;
885 }
886 ata_std_ports(&probe_ent->port[p]);
887 p++;
888 }
889
890 probe_ent->n_ports = p;
891 return probe_ent;
892}
893
894
895static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev,
896 struct ata_port_info *port, int port_num)
897{
898 struct ata_probe_ent *probe_ent;
899 unsigned long bmdma;
900
901 probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port);
902 if (!probe_ent)
903 return NULL;
904
905 probe_ent->legacy_mode = 1;
906 probe_ent->n_ports = 1;
907 probe_ent->hard_port_no = port_num;
908 probe_ent->private_data = port->private_data;
909
910 switch(port_num)
911 {
912 case 0:
913 probe_ent->irq = 14;
914 probe_ent->port[0].cmd_addr = 0x1f0;
915 probe_ent->port[0].altstatus_addr =
916 probe_ent->port[0].ctl_addr = 0x3f6;
917 break;
918 case 1:
919 probe_ent->irq = 15;
920 probe_ent->port[0].cmd_addr = 0x170;
921 probe_ent->port[0].altstatus_addr =
922 probe_ent->port[0].ctl_addr = 0x376;
923 break;
924 }
925
926 bmdma = pci_resource_start(pdev, 4);
927 if (bmdma != 0) {
928 bmdma += 8 * port_num;
929 probe_ent->port[0].bmdma_addr = bmdma;
930 if (inb(bmdma + 2) & 0x80)
931 probe_ent->host_set_flags |= ATA_HOST_SIMPLEX;
932 }
933 ata_std_ports(&probe_ent->port[0]);
934
935 return probe_ent;
936}
937
938
939/**
940 * ata_pci_init_one - Initialize/register PCI IDE host controller
941 * @pdev: Controller to be initialized
942 * @port_info: Information from low-level host driver
943 * @n_ports: Number of ports attached to host controller
944 *
945 * This is a helper function which can be called from a driver's
946 * xxx_init_one() probe function if the hardware uses traditional
947 * IDE taskfile registers.
948 *
949 * This function calls pci_enable_device(), reserves its register
950 * regions, sets the dma mask, enables bus master mode, and calls
951 * ata_device_add()
952 *
953 * LOCKING:
954 * Inherited from PCI layer (may sleep).
955 *
956 * RETURNS:
957 * Zero on success, negative on errno-based value on error.
958 */
959
960int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
961 unsigned int n_ports)
962{
963 struct ata_probe_ent *probe_ent = NULL, *probe_ent2 = NULL;
964 struct ata_port_info *port[2];
965 u8 tmp8, mask;
966 unsigned int legacy_mode = 0;
967 int disable_dev_on_err = 1;
968 int rc;
969
970 DPRINTK("ENTER\n");
971
972 port[0] = port_info[0];
973 if (n_ports > 1)
974 port[1] = port_info[1];
975 else
976 port[1] = port[0];
977
978 if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0
979 && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
980 /* TODO: What if one channel is in native mode ... */
981 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
982 mask = (1 << 2) | (1 << 0);
983 if ((tmp8 & mask) != mask)
984 legacy_mode = (1 << 3);
985 }
986
987 /* FIXME... */
988 if ((!legacy_mode) && (n_ports > 2)) {
989 printk(KERN_ERR "ata: BUG: native mode, n_ports > 2\n");
990 n_ports = 2;
991 /* For now */
992 }
993
994 /* FIXME: Really for ATA it isn't safe because the device may be
995 multi-purpose and we want to leave it alone if it was already
996 enabled. Secondly for shared use as Arjan says we want refcounting
997
998 Checking dev->is_enabled is insufficient as this is not set at
999 boot for the primary video which is BIOS enabled
1000 */
1001
1002 rc = pci_enable_device(pdev);
1003 if (rc)
1004 return rc;
1005
1006 rc = pci_request_regions(pdev, DRV_NAME);
1007 if (rc) {
1008 disable_dev_on_err = 0;
1009 goto err_out;
1010 }
1011
1012 /* FIXME: Should use platform specific mappers for legacy port ranges */
1013 if (legacy_mode) {
1014 if (!request_region(0x1f0, 8, "libata")) {
1015 struct resource *conflict, res;
1016 res.start = 0x1f0;
1017 res.end = 0x1f0 + 8 - 1;
1018 conflict = ____request_resource(&ioport_resource, &res);
1019 if (!strcmp(conflict->name, "libata"))
1020 legacy_mode |= (1 << 0);
1021 else {
1022 disable_dev_on_err = 0;
1023 printk(KERN_WARNING "ata: 0x1f0 IDE port busy\n");
1024 }
1025 } else
1026 legacy_mode |= (1 << 0);
1027
1028 if (!request_region(0x170, 8, "libata")) {
1029 struct resource *conflict, res;
1030 res.start = 0x170;
1031 res.end = 0x170 + 8 - 1;
1032 conflict = ____request_resource(&ioport_resource, &res);
1033 if (!strcmp(conflict->name, "libata"))
1034 legacy_mode |= (1 << 1);
1035 else {
1036 disable_dev_on_err = 0;
1037 printk(KERN_WARNING "ata: 0x170 IDE port busy\n");
1038 }
1039 } else
1040 legacy_mode |= (1 << 1);
1041 }
1042
1043 /* we have legacy mode, but all ports are unavailable */
1044 if (legacy_mode == (1 << 3)) {
1045 rc = -EBUSY;
1046 goto err_out_regions;
1047 }
1048
1049 /* FIXME: If we get no DMA mask we should fall back to PIO */
1050 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
1051 if (rc)
1052 goto err_out_regions;
1053 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
1054 if (rc)
1055 goto err_out_regions;
1056
1057 if (legacy_mode) {
1058 if (legacy_mode & (1 << 0))
1059 probe_ent = ata_pci_init_legacy_port(pdev, port[0], 0);
1060 if (legacy_mode & (1 << 1))
1061 probe_ent2 = ata_pci_init_legacy_port(pdev, port[1], 1);
1062 } else {
1063 if (n_ports == 2)
1064 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
1065 else
1066 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY);
1067 }
1068 if (!probe_ent && !probe_ent2) {
1069 rc = -ENOMEM;
1070 goto err_out_regions;
1071 }
1072
1073 pci_set_master(pdev);
1074
1075 /* FIXME: check ata_device_add return */
1076 if (legacy_mode) {
1077 struct device *dev = &pdev->dev;
1078 struct ata_host_set *host_set = NULL;
1079
1080 if (legacy_mode & (1 << 0)) {
1081 ata_device_add(probe_ent);
1082 host_set = dev_get_drvdata(dev);
1083 }
1084
1085 if (legacy_mode & (1 << 1)) {
1086 ata_device_add(probe_ent2);
1087 if (host_set) {
1088 host_set->next = dev_get_drvdata(dev);
1089 dev_set_drvdata(dev, host_set);
1090 }
1091 }
1092 } else
1093 ata_device_add(probe_ent);
1094
1095 kfree(probe_ent);
1096 kfree(probe_ent2);
1097
1098 return 0;
1099
1100err_out_regions:
1101 if (legacy_mode & (1 << 0))
1102 release_region(0x1f0, 8);
1103 if (legacy_mode & (1 << 1))
1104 release_region(0x170, 8);
1105 pci_release_regions(pdev);
1106err_out:
1107 if (disable_dev_on_err)
1108 pci_disable_device(pdev);
1109 return rc;
1110}
1111
1112/**
1113 * ata_pci_clear_simplex - attempt to kick device out of simplex
1114 * @pdev: PCI device
1115 *
1116 * Some PCI ATA devices report simplex mode but in fact can be told to
1117 * enter non simplex mode. This implements the neccessary logic to
1118 * perform the task on such devices. Calling it on other devices will
1119 * have -undefined- behaviour.
1120 */
1121
1122int ata_pci_clear_simplex(struct pci_dev *pdev)
1123{
1124 unsigned long bmdma = pci_resource_start(pdev, 4);
1125 u8 simplex;
1126
1127 if (bmdma == 0)
1128 return -ENOENT;
1129
1130 simplex = inb(bmdma + 0x02);
1131 outb(simplex & 0x60, bmdma + 0x02);
1132 simplex = inb(bmdma + 0x02);
1133 if (simplex & 0x80)
1134 return -EOPNOTSUPP;
1135 return 0;
1136}
1137
1138unsigned long ata_pci_default_filter(const struct ata_port *ap, struct ata_device *adev, unsigned long xfer_mask)
1139{
1140 /* Filter out DMA modes if the device has been configured by
1141 the BIOS as PIO only */
1142
1143 if (ap->ioaddr.bmdma_addr == 0)
1144 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
1145 return xfer_mask;
1146}
1147
1148#endif /* CONFIG_PCI */
1149
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
deleted file mode 100644
index 427b73a3886a..000000000000
--- a/drivers/scsi/libata-core.c
+++ /dev/null
@@ -1,6020 +0,0 @@
1/*
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 */
34
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/list.h>
40#include <linux/mm.h>
41#include <linux/highmem.h>
42#include <linux/spinlock.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/timer.h>
46#include <linux/interrupt.h>
47#include <linux/completion.h>
48#include <linux/suspend.h>
49#include <linux/workqueue.h>
50#include <linux/jiffies.h>
51#include <linux/scatterlist.h>
52#include <scsi/scsi.h>
53#include "scsi_priv.h"
54#include <scsi/scsi_cmnd.h>
55#include <scsi/scsi_host.h>
56#include <linux/libata.h>
57#include <asm/io.h>
58#include <asm/semaphore.h>
59#include <asm/byteorder.h>
60
61#include "libata.h"
62
63/* debounce timing parameters in msecs { interval, duration, timeout } */
64const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
65const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
66const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
67
68static unsigned int ata_dev_init_params(struct ata_device *dev,
69 u16 heads, u16 sectors);
70static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
71static void ata_dev_xfermask(struct ata_device *dev);
72
73static unsigned int ata_unique_id = 1;
74static struct workqueue_struct *ata_wq;
75
76struct workqueue_struct *ata_aux_wq;
77
78int atapi_enabled = 1;
79module_param(atapi_enabled, int, 0444);
80MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
81
82int atapi_dmadir = 0;
83module_param(atapi_dmadir, int, 0444);
84MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
85
86int libata_fua = 0;
87module_param_named(fua, libata_fua, int, 0444);
88MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
89
90static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
91module_param(ata_probe_timeout, int, 0444);
92MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
93
94MODULE_AUTHOR("Jeff Garzik");
95MODULE_DESCRIPTION("Library module for ATA devices");
96MODULE_LICENSE("GPL");
97MODULE_VERSION(DRV_VERSION);
98
99
100/**
101 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
102 * @tf: Taskfile to convert
103 * @fis: Buffer into which data will output
104 * @pmp: Port multiplier port
105 *
106 * Converts a standard ATA taskfile to a Serial ATA
107 * FIS structure (Register - Host to Device).
108 *
109 * LOCKING:
110 * Inherited from caller.
111 */
112
113void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
114{
115 fis[0] = 0x27; /* Register - Host to Device FIS */
116 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
117 bit 7 indicates Command FIS */
118 fis[2] = tf->command;
119 fis[3] = tf->feature;
120
121 fis[4] = tf->lbal;
122 fis[5] = tf->lbam;
123 fis[6] = tf->lbah;
124 fis[7] = tf->device;
125
126 fis[8] = tf->hob_lbal;
127 fis[9] = tf->hob_lbam;
128 fis[10] = tf->hob_lbah;
129 fis[11] = tf->hob_feature;
130
131 fis[12] = tf->nsect;
132 fis[13] = tf->hob_nsect;
133 fis[14] = 0;
134 fis[15] = tf->ctl;
135
136 fis[16] = 0;
137 fis[17] = 0;
138 fis[18] = 0;
139 fis[19] = 0;
140}
141
142/**
143 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
144 * @fis: Buffer from which data will be input
145 * @tf: Taskfile to output
146 *
147 * Converts a serial ATA FIS structure to a standard ATA taskfile.
148 *
149 * LOCKING:
150 * Inherited from caller.
151 */
152
153void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
154{
155 tf->command = fis[2]; /* status */
156 tf->feature = fis[3]; /* error */
157
158 tf->lbal = fis[4];
159 tf->lbam = fis[5];
160 tf->lbah = fis[6];
161 tf->device = fis[7];
162
163 tf->hob_lbal = fis[8];
164 tf->hob_lbam = fis[9];
165 tf->hob_lbah = fis[10];
166
167 tf->nsect = fis[12];
168 tf->hob_nsect = fis[13];
169}
170
171static const u8 ata_rw_cmds[] = {
172 /* pio multi */
173 ATA_CMD_READ_MULTI,
174 ATA_CMD_WRITE_MULTI,
175 ATA_CMD_READ_MULTI_EXT,
176 ATA_CMD_WRITE_MULTI_EXT,
177 0,
178 0,
179 0,
180 ATA_CMD_WRITE_MULTI_FUA_EXT,
181 /* pio */
182 ATA_CMD_PIO_READ,
183 ATA_CMD_PIO_WRITE,
184 ATA_CMD_PIO_READ_EXT,
185 ATA_CMD_PIO_WRITE_EXT,
186 0,
187 0,
188 0,
189 0,
190 /* dma */
191 ATA_CMD_READ,
192 ATA_CMD_WRITE,
193 ATA_CMD_READ_EXT,
194 ATA_CMD_WRITE_EXT,
195 0,
196 0,
197 0,
198 ATA_CMD_WRITE_FUA_EXT
199};
200
201/**
202 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
203 * @qc: command to examine and configure
204 *
205 * Examine the device configuration and tf->flags to calculate
206 * the proper read/write commands and protocol to use.
207 *
208 * LOCKING:
209 * caller.
210 */
211int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
212{
213 struct ata_taskfile *tf = &qc->tf;
214 struct ata_device *dev = qc->dev;
215 u8 cmd;
216
217 int index, fua, lba48, write;
218
219 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
220 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
221 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
222
223 if (dev->flags & ATA_DFLAG_PIO) {
224 tf->protocol = ATA_PROT_PIO;
225 index = dev->multi_count ? 0 : 8;
226 } else if (lba48 && (qc->ap->flags & ATA_FLAG_PIO_LBA48)) {
227 /* Unable to use DMA due to host limitation */
228 tf->protocol = ATA_PROT_PIO;
229 index = dev->multi_count ? 0 : 8;
230 } else {
231 tf->protocol = ATA_PROT_DMA;
232 index = 16;
233 }
234
235 cmd = ata_rw_cmds[index + fua + lba48 + write];
236 if (cmd) {
237 tf->command = cmd;
238 return 0;
239 }
240 return -1;
241}
242
243/**
244 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
245 * @pio_mask: pio_mask
246 * @mwdma_mask: mwdma_mask
247 * @udma_mask: udma_mask
248 *
249 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
250 * unsigned int xfer_mask.
251 *
252 * LOCKING:
253 * None.
254 *
255 * RETURNS:
256 * Packed xfer_mask.
257 */
258static unsigned int ata_pack_xfermask(unsigned int pio_mask,
259 unsigned int mwdma_mask,
260 unsigned int udma_mask)
261{
262 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
263 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
264 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
265}
266
267/**
268 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
269 * @xfer_mask: xfer_mask to unpack
270 * @pio_mask: resulting pio_mask
271 * @mwdma_mask: resulting mwdma_mask
272 * @udma_mask: resulting udma_mask
273 *
274 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
275 * Any NULL distination masks will be ignored.
276 */
277static void ata_unpack_xfermask(unsigned int xfer_mask,
278 unsigned int *pio_mask,
279 unsigned int *mwdma_mask,
280 unsigned int *udma_mask)
281{
282 if (pio_mask)
283 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
284 if (mwdma_mask)
285 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
286 if (udma_mask)
287 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
288}
289
290static const struct ata_xfer_ent {
291 int shift, bits;
292 u8 base;
293} ata_xfer_tbl[] = {
294 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
295 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
296 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
297 { -1, },
298};
299
300/**
301 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
302 * @xfer_mask: xfer_mask of interest
303 *
304 * Return matching XFER_* value for @xfer_mask. Only the highest
305 * bit of @xfer_mask is considered.
306 *
307 * LOCKING:
308 * None.
309 *
310 * RETURNS:
311 * Matching XFER_* value, 0 if no match found.
312 */
313static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
314{
315 int highbit = fls(xfer_mask) - 1;
316 const struct ata_xfer_ent *ent;
317
318 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
319 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
320 return ent->base + highbit - ent->shift;
321 return 0;
322}
323
324/**
325 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
326 * @xfer_mode: XFER_* of interest
327 *
328 * Return matching xfer_mask for @xfer_mode.
329 *
330 * LOCKING:
331 * None.
332 *
333 * RETURNS:
334 * Matching xfer_mask, 0 if no match found.
335 */
336static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
337{
338 const struct ata_xfer_ent *ent;
339
340 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
341 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
342 return 1 << (ent->shift + xfer_mode - ent->base);
343 return 0;
344}
345
346/**
347 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
348 * @xfer_mode: XFER_* of interest
349 *
350 * Return matching xfer_shift for @xfer_mode.
351 *
352 * LOCKING:
353 * None.
354 *
355 * RETURNS:
356 * Matching xfer_shift, -1 if no match found.
357 */
358static int ata_xfer_mode2shift(unsigned int xfer_mode)
359{
360 const struct ata_xfer_ent *ent;
361
362 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
363 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
364 return ent->shift;
365 return -1;
366}
367
368/**
369 * ata_mode_string - convert xfer_mask to string
370 * @xfer_mask: mask of bits supported; only highest bit counts.
371 *
372 * Determine string which represents the highest speed
373 * (highest bit in @modemask).
374 *
375 * LOCKING:
376 * None.
377 *
378 * RETURNS:
379 * Constant C string representing highest speed listed in
380 * @mode_mask, or the constant C string "<n/a>".
381 */
382static const char *ata_mode_string(unsigned int xfer_mask)
383{
384 static const char * const xfer_mode_str[] = {
385 "PIO0",
386 "PIO1",
387 "PIO2",
388 "PIO3",
389 "PIO4",
390 "MWDMA0",
391 "MWDMA1",
392 "MWDMA2",
393 "UDMA/16",
394 "UDMA/25",
395 "UDMA/33",
396 "UDMA/44",
397 "UDMA/66",
398 "UDMA/100",
399 "UDMA/133",
400 "UDMA7",
401 };
402 int highbit;
403
404 highbit = fls(xfer_mask) - 1;
405 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
406 return xfer_mode_str[highbit];
407 return "<n/a>";
408}
409
410static const char *sata_spd_string(unsigned int spd)
411{
412 static const char * const spd_str[] = {
413 "1.5 Gbps",
414 "3.0 Gbps",
415 };
416
417 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
418 return "<unknown>";
419 return spd_str[spd - 1];
420}
421
422void ata_dev_disable(struct ata_device *dev)
423{
424 if (ata_dev_enabled(dev) && ata_msg_drv(dev->ap)) {
425 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
426 dev->class++;
427 }
428}
429
430/**
431 * ata_pio_devchk - PATA device presence detection
432 * @ap: ATA channel to examine
433 * @device: Device to examine (starting at zero)
434 *
435 * This technique was originally described in
436 * Hale Landis's ATADRVR (www.ata-atapi.com), and
437 * later found its way into the ATA/ATAPI spec.
438 *
439 * Write a pattern to the ATA shadow registers,
440 * and if a device is present, it will respond by
441 * correctly storing and echoing back the
442 * ATA shadow register contents.
443 *
444 * LOCKING:
445 * caller.
446 */
447
448static unsigned int ata_pio_devchk(struct ata_port *ap,
449 unsigned int device)
450{
451 struct ata_ioports *ioaddr = &ap->ioaddr;
452 u8 nsect, lbal;
453
454 ap->ops->dev_select(ap, device);
455
456 outb(0x55, ioaddr->nsect_addr);
457 outb(0xaa, ioaddr->lbal_addr);
458
459 outb(0xaa, ioaddr->nsect_addr);
460 outb(0x55, ioaddr->lbal_addr);
461
462 outb(0x55, ioaddr->nsect_addr);
463 outb(0xaa, ioaddr->lbal_addr);
464
465 nsect = inb(ioaddr->nsect_addr);
466 lbal = inb(ioaddr->lbal_addr);
467
468 if ((nsect == 0x55) && (lbal == 0xaa))
469 return 1; /* we found a device */
470
471 return 0; /* nothing found */
472}
473
474/**
475 * ata_mmio_devchk - PATA device presence detection
476 * @ap: ATA channel to examine
477 * @device: Device to examine (starting at zero)
478 *
479 * This technique was originally described in
480 * Hale Landis's ATADRVR (www.ata-atapi.com), and
481 * later found its way into the ATA/ATAPI spec.
482 *
483 * Write a pattern to the ATA shadow registers,
484 * and if a device is present, it will respond by
485 * correctly storing and echoing back the
486 * ATA shadow register contents.
487 *
488 * LOCKING:
489 * caller.
490 */
491
492static unsigned int ata_mmio_devchk(struct ata_port *ap,
493 unsigned int device)
494{
495 struct ata_ioports *ioaddr = &ap->ioaddr;
496 u8 nsect, lbal;
497
498 ap->ops->dev_select(ap, device);
499
500 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
501 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
502
503 writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
504 writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
505
506 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
507 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
508
509 nsect = readb((void __iomem *) ioaddr->nsect_addr);
510 lbal = readb((void __iomem *) ioaddr->lbal_addr);
511
512 if ((nsect == 0x55) && (lbal == 0xaa))
513 return 1; /* we found a device */
514
515 return 0; /* nothing found */
516}
517
518/**
519 * ata_devchk - PATA device presence detection
520 * @ap: ATA channel to examine
521 * @device: Device to examine (starting at zero)
522 *
523 * Dispatch ATA device presence detection, depending
524 * on whether we are using PIO or MMIO to talk to the
525 * ATA shadow registers.
526 *
527 * LOCKING:
528 * caller.
529 */
530
531static unsigned int ata_devchk(struct ata_port *ap,
532 unsigned int device)
533{
534 if (ap->flags & ATA_FLAG_MMIO)
535 return ata_mmio_devchk(ap, device);
536 return ata_pio_devchk(ap, device);
537}
538
539/**
540 * ata_dev_classify - determine device type based on ATA-spec signature
541 * @tf: ATA taskfile register set for device to be identified
542 *
543 * Determine from taskfile register contents whether a device is
544 * ATA or ATAPI, as per "Signature and persistence" section
545 * of ATA/PI spec (volume 1, sect 5.14).
546 *
547 * LOCKING:
548 * None.
549 *
550 * RETURNS:
551 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
552 * the event of failure.
553 */
554
555unsigned int ata_dev_classify(const struct ata_taskfile *tf)
556{
557 /* Apple's open source Darwin code hints that some devices only
558 * put a proper signature into the LBA mid/high registers,
559 * So, we only check those. It's sufficient for uniqueness.
560 */
561
562 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
563 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
564 DPRINTK("found ATA device by sig\n");
565 return ATA_DEV_ATA;
566 }
567
568 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
569 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
570 DPRINTK("found ATAPI device by sig\n");
571 return ATA_DEV_ATAPI;
572 }
573
574 DPRINTK("unknown device\n");
575 return ATA_DEV_UNKNOWN;
576}
577
578/**
579 * ata_dev_try_classify - Parse returned ATA device signature
580 * @ap: ATA channel to examine
581 * @device: Device to examine (starting at zero)
582 * @r_err: Value of error register on completion
583 *
584 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
585 * an ATA/ATAPI-defined set of values is placed in the ATA
586 * shadow registers, indicating the results of device detection
587 * and diagnostics.
588 *
589 * Select the ATA device, and read the values from the ATA shadow
590 * registers. Then parse according to the Error register value,
591 * and the spec-defined values examined by ata_dev_classify().
592 *
593 * LOCKING:
594 * caller.
595 *
596 * RETURNS:
597 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
598 */
599
600static unsigned int
601ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
602{
603 struct ata_taskfile tf;
604 unsigned int class;
605 u8 err;
606
607 ap->ops->dev_select(ap, device);
608
609 memset(&tf, 0, sizeof(tf));
610
611 ap->ops->tf_read(ap, &tf);
612 err = tf.feature;
613 if (r_err)
614 *r_err = err;
615
616 /* see if device passed diags */
617 if (err == 1)
618 /* do nothing */ ;
619 else if ((device == 0) && (err == 0x81))
620 /* do nothing */ ;
621 else
622 return ATA_DEV_NONE;
623
624 /* determine if device is ATA or ATAPI */
625 class = ata_dev_classify(&tf);
626
627 if (class == ATA_DEV_UNKNOWN)
628 return ATA_DEV_NONE;
629 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
630 return ATA_DEV_NONE;
631 return class;
632}
633
634/**
635 * ata_id_string - Convert IDENTIFY DEVICE page into string
636 * @id: IDENTIFY DEVICE results we will examine
637 * @s: string into which data is output
638 * @ofs: offset into identify device page
639 * @len: length of string to return. must be an even number.
640 *
641 * The strings in the IDENTIFY DEVICE page are broken up into
642 * 16-bit chunks. Run through the string, and output each
643 * 8-bit chunk linearly, regardless of platform.
644 *
645 * LOCKING:
646 * caller.
647 */
648
649void ata_id_string(const u16 *id, unsigned char *s,
650 unsigned int ofs, unsigned int len)
651{
652 unsigned int c;
653
654 while (len > 0) {
655 c = id[ofs] >> 8;
656 *s = c;
657 s++;
658
659 c = id[ofs] & 0xff;
660 *s = c;
661 s++;
662
663 ofs++;
664 len -= 2;
665 }
666}
667
668/**
669 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
670 * @id: IDENTIFY DEVICE results we will examine
671 * @s: string into which data is output
672 * @ofs: offset into identify device page
673 * @len: length of string to return. must be an odd number.
674 *
675 * This function is identical to ata_id_string except that it
676 * trims trailing spaces and terminates the resulting string with
677 * null. @len must be actual maximum length (even number) + 1.
678 *
679 * LOCKING:
680 * caller.
681 */
682void ata_id_c_string(const u16 *id, unsigned char *s,
683 unsigned int ofs, unsigned int len)
684{
685 unsigned char *p;
686
687 WARN_ON(!(len & 1));
688
689 ata_id_string(id, s, ofs, len - 1);
690
691 p = s + strnlen(s, len - 1);
692 while (p > s && p[-1] == ' ')
693 p--;
694 *p = '\0';
695}
696
697static u64 ata_id_n_sectors(const u16 *id)
698{
699 if (ata_id_has_lba(id)) {
700 if (ata_id_has_lba48(id))
701 return ata_id_u64(id, 100);
702 else
703 return ata_id_u32(id, 60);
704 } else {
705 if (ata_id_current_chs_valid(id))
706 return ata_id_u32(id, 57);
707 else
708 return id[1] * id[3] * id[6];
709 }
710}
711
712/**
713 * ata_noop_dev_select - Select device 0/1 on ATA bus
714 * @ap: ATA channel to manipulate
715 * @device: ATA device (numbered from zero) to select
716 *
717 * This function performs no actual function.
718 *
719 * May be used as the dev_select() entry in ata_port_operations.
720 *
721 * LOCKING:
722 * caller.
723 */
724void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
725{
726}
727
728
729/**
730 * ata_std_dev_select - Select device 0/1 on ATA bus
731 * @ap: ATA channel to manipulate
732 * @device: ATA device (numbered from zero) to select
733 *
734 * Use the method defined in the ATA specification to
735 * make either device 0, or device 1, active on the
736 * ATA channel. Works with both PIO and MMIO.
737 *
738 * May be used as the dev_select() entry in ata_port_operations.
739 *
740 * LOCKING:
741 * caller.
742 */
743
744void ata_std_dev_select (struct ata_port *ap, unsigned int device)
745{
746 u8 tmp;
747
748 if (device == 0)
749 tmp = ATA_DEVICE_OBS;
750 else
751 tmp = ATA_DEVICE_OBS | ATA_DEV1;
752
753 if (ap->flags & ATA_FLAG_MMIO) {
754 writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
755 } else {
756 outb(tmp, ap->ioaddr.device_addr);
757 }
758 ata_pause(ap); /* needed; also flushes, for mmio */
759}
760
761/**
762 * ata_dev_select - Select device 0/1 on ATA bus
763 * @ap: ATA channel to manipulate
764 * @device: ATA device (numbered from zero) to select
765 * @wait: non-zero to wait for Status register BSY bit to clear
766 * @can_sleep: non-zero if context allows sleeping
767 *
768 * Use the method defined in the ATA specification to
769 * make either device 0, or device 1, active on the
770 * ATA channel.
771 *
772 * This is a high-level version of ata_std_dev_select(),
773 * which additionally provides the services of inserting
774 * the proper pauses and status polling, where needed.
775 *
776 * LOCKING:
777 * caller.
778 */
779
780void ata_dev_select(struct ata_port *ap, unsigned int device,
781 unsigned int wait, unsigned int can_sleep)
782{
783 if (ata_msg_probe(ap))
784 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, ata%u: "
785 "device %u, wait %u\n", ap->id, device, wait);
786
787 if (wait)
788 ata_wait_idle(ap);
789
790 ap->ops->dev_select(ap, device);
791
792 if (wait) {
793 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
794 msleep(150);
795 ata_wait_idle(ap);
796 }
797}
798
799/**
800 * ata_dump_id - IDENTIFY DEVICE info debugging output
801 * @id: IDENTIFY DEVICE page to dump
802 *
803 * Dump selected 16-bit words from the given IDENTIFY DEVICE
804 * page.
805 *
806 * LOCKING:
807 * caller.
808 */
809
810static inline void ata_dump_id(const u16 *id)
811{
812 DPRINTK("49==0x%04x "
813 "53==0x%04x "
814 "63==0x%04x "
815 "64==0x%04x "
816 "75==0x%04x \n",
817 id[49],
818 id[53],
819 id[63],
820 id[64],
821 id[75]);
822 DPRINTK("80==0x%04x "
823 "81==0x%04x "
824 "82==0x%04x "
825 "83==0x%04x "
826 "84==0x%04x \n",
827 id[80],
828 id[81],
829 id[82],
830 id[83],
831 id[84]);
832 DPRINTK("88==0x%04x "
833 "93==0x%04x\n",
834 id[88],
835 id[93]);
836}
837
838/**
839 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
840 * @id: IDENTIFY data to compute xfer mask from
841 *
842 * Compute the xfermask for this device. This is not as trivial
843 * as it seems if we must consider early devices correctly.
844 *
845 * FIXME: pre IDE drive timing (do we care ?).
846 *
847 * LOCKING:
848 * None.
849 *
850 * RETURNS:
851 * Computed xfermask
852 */
853static unsigned int ata_id_xfermask(const u16 *id)
854{
855 unsigned int pio_mask, mwdma_mask, udma_mask;
856
857 /* Usual case. Word 53 indicates word 64 is valid */
858 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
859 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
860 pio_mask <<= 3;
861 pio_mask |= 0x7;
862 } else {
863 /* If word 64 isn't valid then Word 51 high byte holds
864 * the PIO timing number for the maximum. Turn it into
865 * a mask.
866 */
867 pio_mask = (2 << (id[ATA_ID_OLD_PIO_MODES] & 0xFF)) - 1 ;
868
869 /* But wait.. there's more. Design your standards by
870 * committee and you too can get a free iordy field to
871 * process. However its the speeds not the modes that
872 * are supported... Note drivers using the timing API
873 * will get this right anyway
874 */
875 }
876
877 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
878
879 udma_mask = 0;
880 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
881 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
882
883 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
884}
885
886/**
887 * ata_port_queue_task - Queue port_task
888 * @ap: The ata_port to queue port_task for
889 * @fn: workqueue function to be scheduled
890 * @data: data value to pass to workqueue function
891 * @delay: delay time for workqueue function
892 *
893 * Schedule @fn(@data) for execution after @delay jiffies using
894 * port_task. There is one port_task per port and it's the
895 * user(low level driver)'s responsibility to make sure that only
896 * one task is active at any given time.
897 *
898 * libata core layer takes care of synchronization between
899 * port_task and EH. ata_port_queue_task() may be ignored for EH
900 * synchronization.
901 *
902 * LOCKING:
903 * Inherited from caller.
904 */
905void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
906 unsigned long delay)
907{
908 int rc;
909
910 if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK)
911 return;
912
913 PREPARE_WORK(&ap->port_task, fn, data);
914
915 if (!delay)
916 rc = queue_work(ata_wq, &ap->port_task);
917 else
918 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
919
920 /* rc == 0 means that another user is using port task */
921 WARN_ON(rc == 0);
922}
923
924/**
925 * ata_port_flush_task - Flush port_task
926 * @ap: The ata_port to flush port_task for
927 *
928 * After this function completes, port_task is guranteed not to
929 * be running or scheduled.
930 *
931 * LOCKING:
932 * Kernel thread context (may sleep)
933 */
934void ata_port_flush_task(struct ata_port *ap)
935{
936 unsigned long flags;
937
938 DPRINTK("ENTER\n");
939
940 spin_lock_irqsave(ap->lock, flags);
941 ap->pflags |= ATA_PFLAG_FLUSH_PORT_TASK;
942 spin_unlock_irqrestore(ap->lock, flags);
943
944 DPRINTK("flush #1\n");
945 flush_workqueue(ata_wq);
946
947 /*
948 * At this point, if a task is running, it's guaranteed to see
949 * the FLUSH flag; thus, it will never queue pio tasks again.
950 * Cancel and flush.
951 */
952 if (!cancel_delayed_work(&ap->port_task)) {
953 if (ata_msg_ctl(ap))
954 ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n",
955 __FUNCTION__);
956 flush_workqueue(ata_wq);
957 }
958
959 spin_lock_irqsave(ap->lock, flags);
960 ap->pflags &= ~ATA_PFLAG_FLUSH_PORT_TASK;
961 spin_unlock_irqrestore(ap->lock, flags);
962
963 if (ata_msg_ctl(ap))
964 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
965}
966
967void ata_qc_complete_internal(struct ata_queued_cmd *qc)
968{
969 struct completion *waiting = qc->private_data;
970
971 complete(waiting);
972}
973
974/**
975 * ata_exec_internal - execute libata internal command
976 * @dev: Device to which the command is sent
977 * @tf: Taskfile registers for the command and the result
978 * @cdb: CDB for packet command
979 * @dma_dir: Data tranfer direction of the command
980 * @buf: Data buffer of the command
981 * @buflen: Length of data buffer
982 *
983 * Executes libata internal command with timeout. @tf contains
984 * command on entry and result on return. Timeout and error
985 * conditions are reported via return value. No recovery action
986 * is taken after a command times out. It's caller's duty to
987 * clean up after timeout.
988 *
989 * LOCKING:
990 * None. Should be called with kernel context, might sleep.
991 *
992 * RETURNS:
993 * Zero on success, AC_ERR_* mask on failure
994 */
995unsigned ata_exec_internal(struct ata_device *dev,
996 struct ata_taskfile *tf, const u8 *cdb,
997 int dma_dir, void *buf, unsigned int buflen)
998{
999 struct ata_port *ap = dev->ap;
1000 u8 command = tf->command;
1001 struct ata_queued_cmd *qc;
1002 unsigned int tag, preempted_tag;
1003 u32 preempted_sactive, preempted_qc_active;
1004 DECLARE_COMPLETION_ONSTACK(wait);
1005 unsigned long flags;
1006 unsigned int err_mask;
1007 int rc;
1008
1009 spin_lock_irqsave(ap->lock, flags);
1010
1011 /* no internal command while frozen */
1012 if (ap->pflags & ATA_PFLAG_FROZEN) {
1013 spin_unlock_irqrestore(ap->lock, flags);
1014 return AC_ERR_SYSTEM;
1015 }
1016
1017 /* initialize internal qc */
1018
1019 /* XXX: Tag 0 is used for drivers with legacy EH as some
1020 * drivers choke if any other tag is given. This breaks
1021 * ata_tag_internal() test for those drivers. Don't use new
1022 * EH stuff without converting to it.
1023 */
1024 if (ap->ops->error_handler)
1025 tag = ATA_TAG_INTERNAL;
1026 else
1027 tag = 0;
1028
1029 if (test_and_set_bit(tag, &ap->qc_allocated))
1030 BUG();
1031 qc = __ata_qc_from_tag(ap, tag);
1032
1033 qc->tag = tag;
1034 qc->scsicmd = NULL;
1035 qc->ap = ap;
1036 qc->dev = dev;
1037 ata_qc_reinit(qc);
1038
1039 preempted_tag = ap->active_tag;
1040 preempted_sactive = ap->sactive;
1041 preempted_qc_active = ap->qc_active;
1042 ap->active_tag = ATA_TAG_POISON;
1043 ap->sactive = 0;
1044 ap->qc_active = 0;
1045
1046 /* prepare & issue qc */
1047 qc->tf = *tf;
1048 if (cdb)
1049 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1050 qc->flags |= ATA_QCFLAG_RESULT_TF;
1051 qc->dma_dir = dma_dir;
1052 if (dma_dir != DMA_NONE) {
1053 ata_sg_init_one(qc, buf, buflen);
1054 qc->nsect = buflen / ATA_SECT_SIZE;
1055 }
1056
1057 qc->private_data = &wait;
1058 qc->complete_fn = ata_qc_complete_internal;
1059
1060 ata_qc_issue(qc);
1061
1062 spin_unlock_irqrestore(ap->lock, flags);
1063
1064 rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
1065
1066 ata_port_flush_task(ap);
1067
1068 if (!rc) {
1069 spin_lock_irqsave(ap->lock, flags);
1070
1071 /* We're racing with irq here. If we lose, the
1072 * following test prevents us from completing the qc
1073 * twice. If we win, the port is frozen and will be
1074 * cleaned up by ->post_internal_cmd().
1075 */
1076 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1077 qc->err_mask |= AC_ERR_TIMEOUT;
1078
1079 if (ap->ops->error_handler)
1080 ata_port_freeze(ap);
1081 else
1082 ata_qc_complete(qc);
1083
1084 if (ata_msg_warn(ap))
1085 ata_dev_printk(dev, KERN_WARNING,
1086 "qc timeout (cmd 0x%x)\n", command);
1087 }
1088
1089 spin_unlock_irqrestore(ap->lock, flags);
1090 }
1091
1092 /* do post_internal_cmd */
1093 if (ap->ops->post_internal_cmd)
1094 ap->ops->post_internal_cmd(qc);
1095
1096 if (qc->flags & ATA_QCFLAG_FAILED && !qc->err_mask) {
1097 if (ata_msg_warn(ap))
1098 ata_dev_printk(dev, KERN_WARNING,
1099 "zero err_mask for failed "
1100 "internal command, assuming AC_ERR_OTHER\n");
1101 qc->err_mask |= AC_ERR_OTHER;
1102 }
1103
1104 /* finish up */
1105 spin_lock_irqsave(ap->lock, flags);
1106
1107 *tf = qc->result_tf;
1108 err_mask = qc->err_mask;
1109
1110 ata_qc_free(qc);
1111 ap->active_tag = preempted_tag;
1112 ap->sactive = preempted_sactive;
1113 ap->qc_active = preempted_qc_active;
1114
1115 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1116 * Until those drivers are fixed, we detect the condition
1117 * here, fail the command with AC_ERR_SYSTEM and reenable the
1118 * port.
1119 *
1120 * Note that this doesn't change any behavior as internal
1121 * command failure results in disabling the device in the
1122 * higher layer for LLDDs without new reset/EH callbacks.
1123 *
1124 * Kill the following code as soon as those drivers are fixed.
1125 */
1126 if (ap->flags & ATA_FLAG_DISABLED) {
1127 err_mask |= AC_ERR_SYSTEM;
1128 ata_port_probe(ap);
1129 }
1130
1131 spin_unlock_irqrestore(ap->lock, flags);
1132
1133 return err_mask;
1134}
1135
1136/**
1137 * ata_do_simple_cmd - execute simple internal command
1138 * @dev: Device to which the command is sent
1139 * @cmd: Opcode to execute
1140 *
1141 * Execute a 'simple' command, that only consists of the opcode
1142 * 'cmd' itself, without filling any other registers
1143 *
1144 * LOCKING:
1145 * Kernel thread context (may sleep).
1146 *
1147 * RETURNS:
1148 * Zero on success, AC_ERR_* mask on failure
1149 */
1150unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1151{
1152 struct ata_taskfile tf;
1153
1154 ata_tf_init(dev, &tf);
1155
1156 tf.command = cmd;
1157 tf.flags |= ATA_TFLAG_DEVICE;
1158 tf.protocol = ATA_PROT_NODATA;
1159
1160 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1161}
1162
1163/**
1164 * ata_pio_need_iordy - check if iordy needed
1165 * @adev: ATA device
1166 *
1167 * Check if the current speed of the device requires IORDY. Used
1168 * by various controllers for chip configuration.
1169 */
1170
1171unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1172{
1173 int pio;
1174 int speed = adev->pio_mode - XFER_PIO_0;
1175
1176 if (speed < 2)
1177 return 0;
1178 if (speed > 2)
1179 return 1;
1180
1181 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1182
1183 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1184 pio = adev->id[ATA_ID_EIDE_PIO];
1185 /* Is the speed faster than the drive allows non IORDY ? */
1186 if (pio) {
1187 /* This is cycle times not frequency - watch the logic! */
1188 if (pio > 240) /* PIO2 is 240nS per cycle */
1189 return 1;
1190 return 0;
1191 }
1192 }
1193 return 0;
1194}
1195
1196/**
1197 * ata_dev_read_id - Read ID data from the specified device
1198 * @dev: target device
1199 * @p_class: pointer to class of the target device (may be changed)
1200 * @post_reset: is this read ID post-reset?
1201 * @id: buffer to read IDENTIFY data into
1202 *
1203 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1204 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1205 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1206 * for pre-ATA4 drives.
1207 *
1208 * LOCKING:
1209 * Kernel thread context (may sleep)
1210 *
1211 * RETURNS:
1212 * 0 on success, -errno otherwise.
1213 */
1214int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1215 int post_reset, u16 *id)
1216{
1217 struct ata_port *ap = dev->ap;
1218 unsigned int class = *p_class;
1219 struct ata_taskfile tf;
1220 unsigned int err_mask = 0;
1221 const char *reason;
1222 int rc;
1223
1224 if (ata_msg_ctl(ap))
1225 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
1226 __FUNCTION__, ap->id, dev->devno);
1227
1228 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1229
1230 retry:
1231 ata_tf_init(dev, &tf);
1232
1233 switch (class) {
1234 case ATA_DEV_ATA:
1235 tf.command = ATA_CMD_ID_ATA;
1236 break;
1237 case ATA_DEV_ATAPI:
1238 tf.command = ATA_CMD_ID_ATAPI;
1239 break;
1240 default:
1241 rc = -ENODEV;
1242 reason = "unsupported class";
1243 goto err_out;
1244 }
1245
1246 tf.protocol = ATA_PROT_PIO;
1247
1248 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1249 id, sizeof(id[0]) * ATA_ID_WORDS);
1250 if (err_mask) {
1251 rc = -EIO;
1252 reason = "I/O error";
1253 goto err_out;
1254 }
1255
1256 swap_buf_le16(id, ATA_ID_WORDS);
1257
1258 /* sanity check */
1259 rc = -EINVAL;
1260 reason = "device reports illegal type";
1261
1262 if (class == ATA_DEV_ATA) {
1263 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1264 goto err_out;
1265 } else {
1266 if (ata_id_is_ata(id))
1267 goto err_out;
1268 }
1269
1270 if (post_reset && class == ATA_DEV_ATA) {
1271 /*
1272 * The exact sequence expected by certain pre-ATA4 drives is:
1273 * SRST RESET
1274 * IDENTIFY
1275 * INITIALIZE DEVICE PARAMETERS
1276 * anything else..
1277 * Some drives were very specific about that exact sequence.
1278 */
1279 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1280 err_mask = ata_dev_init_params(dev, id[3], id[6]);
1281 if (err_mask) {
1282 rc = -EIO;
1283 reason = "INIT_DEV_PARAMS failed";
1284 goto err_out;
1285 }
1286
1287 /* current CHS translation info (id[53-58]) might be
1288 * changed. reread the identify device info.
1289 */
1290 post_reset = 0;
1291 goto retry;
1292 }
1293 }
1294
1295 *p_class = class;
1296
1297 return 0;
1298
1299 err_out:
1300 if (ata_msg_warn(ap))
1301 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
1302 "(%s, err_mask=0x%x)\n", reason, err_mask);
1303 return rc;
1304}
1305
1306static inline u8 ata_dev_knobble(struct ata_device *dev)
1307{
1308 return ((dev->ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1309}
1310
1311static void ata_dev_config_ncq(struct ata_device *dev,
1312 char *desc, size_t desc_sz)
1313{
1314 struct ata_port *ap = dev->ap;
1315 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1316
1317 if (!ata_id_has_ncq(dev->id)) {
1318 desc[0] = '\0';
1319 return;
1320 }
1321
1322 if (ap->flags & ATA_FLAG_NCQ) {
1323 hdepth = min(ap->host->can_queue, ATA_MAX_QUEUE - 1);
1324 dev->flags |= ATA_DFLAG_NCQ;
1325 }
1326
1327 if (hdepth >= ddepth)
1328 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1329 else
1330 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1331}
1332
1333static void ata_set_port_max_cmd_len(struct ata_port *ap)
1334{
1335 int i;
1336
1337 if (ap->host) {
1338 ap->host->max_cmd_len = 0;
1339 for (i = 0; i < ATA_MAX_DEVICES; i++)
1340 ap->host->max_cmd_len = max_t(unsigned int,
1341 ap->host->max_cmd_len,
1342 ap->device[i].cdb_len);
1343 }
1344}
1345
1346/**
1347 * ata_dev_configure - Configure the specified ATA/ATAPI device
1348 * @dev: Target device to configure
1349 * @print_info: Enable device info printout
1350 *
1351 * Configure @dev according to @dev->id. Generic and low-level
1352 * driver specific fixups are also applied.
1353 *
1354 * LOCKING:
1355 * Kernel thread context (may sleep)
1356 *
1357 * RETURNS:
1358 * 0 on success, -errno otherwise
1359 */
1360int ata_dev_configure(struct ata_device *dev, int print_info)
1361{
1362 struct ata_port *ap = dev->ap;
1363 const u16 *id = dev->id;
1364 unsigned int xfer_mask;
1365 int rc;
1366
1367 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
1368 ata_dev_printk(dev, KERN_INFO,
1369 "%s: ENTER/EXIT (host %u, dev %u) -- nodev\n",
1370 __FUNCTION__, ap->id, dev->devno);
1371 return 0;
1372 }
1373
1374 if (ata_msg_probe(ap))
1375 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
1376 __FUNCTION__, ap->id, dev->devno);
1377
1378 /* print device capabilities */
1379 if (ata_msg_probe(ap))
1380 ata_dev_printk(dev, KERN_DEBUG,
1381 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1382 "85:%04x 86:%04x 87:%04x 88:%04x\n",
1383 __FUNCTION__,
1384 id[49], id[82], id[83], id[84],
1385 id[85], id[86], id[87], id[88]);
1386
1387 /* initialize to-be-configured parameters */
1388 dev->flags &= ~ATA_DFLAG_CFG_MASK;
1389 dev->max_sectors = 0;
1390 dev->cdb_len = 0;
1391 dev->n_sectors = 0;
1392 dev->cylinders = 0;
1393 dev->heads = 0;
1394 dev->sectors = 0;
1395
1396 /*
1397 * common ATA, ATAPI feature tests
1398 */
1399
1400 /* find max transfer mode; for printk only */
1401 xfer_mask = ata_id_xfermask(id);
1402
1403 if (ata_msg_probe(ap))
1404 ata_dump_id(id);
1405
1406 /* ATA-specific feature tests */
1407 if (dev->class == ATA_DEV_ATA) {
1408 dev->n_sectors = ata_id_n_sectors(id);
1409
1410 if (ata_id_has_lba(id)) {
1411 const char *lba_desc;
1412 char ncq_desc[20];
1413
1414 lba_desc = "LBA";
1415 dev->flags |= ATA_DFLAG_LBA;
1416 if (ata_id_has_lba48(id)) {
1417 dev->flags |= ATA_DFLAG_LBA48;
1418 lba_desc = "LBA48";
1419 }
1420
1421 /* config NCQ */
1422 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1423
1424 /* print device info to dmesg */
1425 if (ata_msg_drv(ap) && print_info)
1426 ata_dev_printk(dev, KERN_INFO, "ATA-%d, "
1427 "max %s, %Lu sectors: %s %s\n",
1428 ata_id_major_version(id),
1429 ata_mode_string(xfer_mask),
1430 (unsigned long long)dev->n_sectors,
1431 lba_desc, ncq_desc);
1432 } else {
1433 /* CHS */
1434
1435 /* Default translation */
1436 dev->cylinders = id[1];
1437 dev->heads = id[3];
1438 dev->sectors = id[6];
1439
1440 if (ata_id_current_chs_valid(id)) {
1441 /* Current CHS translation is valid. */
1442 dev->cylinders = id[54];
1443 dev->heads = id[55];
1444 dev->sectors = id[56];
1445 }
1446
1447 /* print device info to dmesg */
1448 if (ata_msg_drv(ap) && print_info)
1449 ata_dev_printk(dev, KERN_INFO, "ATA-%d, "
1450 "max %s, %Lu sectors: CHS %u/%u/%u\n",
1451 ata_id_major_version(id),
1452 ata_mode_string(xfer_mask),
1453 (unsigned long long)dev->n_sectors,
1454 dev->cylinders, dev->heads,
1455 dev->sectors);
1456 }
1457
1458 if (dev->id[59] & 0x100) {
1459 dev->multi_count = dev->id[59] & 0xff;
1460 if (ata_msg_drv(ap) && print_info)
1461 ata_dev_printk(dev, KERN_INFO,
1462 "ata%u: dev %u multi count %u\n",
1463 ap->id, dev->devno, dev->multi_count);
1464 }
1465
1466 dev->cdb_len = 16;
1467 }
1468
1469 /* ATAPI-specific feature tests */
1470 else if (dev->class == ATA_DEV_ATAPI) {
1471 char *cdb_intr_string = "";
1472
1473 rc = atapi_cdb_len(id);
1474 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1475 if (ata_msg_warn(ap))
1476 ata_dev_printk(dev, KERN_WARNING,
1477 "unsupported CDB len\n");
1478 rc = -EINVAL;
1479 goto err_out_nosup;
1480 }
1481 dev->cdb_len = (unsigned int) rc;
1482
1483 if (ata_id_cdb_intr(dev->id)) {
1484 dev->flags |= ATA_DFLAG_CDB_INTR;
1485 cdb_intr_string = ", CDB intr";
1486 }
1487
1488 /* print device info to dmesg */
1489 if (ata_msg_drv(ap) && print_info)
1490 ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s%s\n",
1491 ata_mode_string(xfer_mask),
1492 cdb_intr_string);
1493 }
1494
1495 ata_set_port_max_cmd_len(ap);
1496
1497 /* limit bridge transfers to udma5, 200 sectors */
1498 if (ata_dev_knobble(dev)) {
1499 if (ata_msg_drv(ap) && print_info)
1500 ata_dev_printk(dev, KERN_INFO,
1501 "applying bridge limits\n");
1502 dev->udma_mask &= ATA_UDMA5;
1503 dev->max_sectors = ATA_MAX_SECTORS;
1504 }
1505
1506 if (ap->ops->dev_config)
1507 ap->ops->dev_config(ap, dev);
1508
1509 if (ata_msg_probe(ap))
1510 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
1511 __FUNCTION__, ata_chk_status(ap));
1512 return 0;
1513
1514err_out_nosup:
1515 if (ata_msg_probe(ap))
1516 ata_dev_printk(dev, KERN_DEBUG,
1517 "%s: EXIT, err\n", __FUNCTION__);
1518 return rc;
1519}
1520
1521/**
1522 * ata_bus_probe - Reset and probe ATA bus
1523 * @ap: Bus to probe
1524 *
1525 * Master ATA bus probing function. Initiates a hardware-dependent
1526 * bus reset, then attempts to identify any devices found on
1527 * the bus.
1528 *
1529 * LOCKING:
1530 * PCI/etc. bus probe sem.
1531 *
1532 * RETURNS:
1533 * Zero on success, negative errno otherwise.
1534 */
1535
1536static int ata_bus_probe(struct ata_port *ap)
1537{
1538 unsigned int classes[ATA_MAX_DEVICES];
1539 int tries[ATA_MAX_DEVICES];
1540 int i, rc, down_xfermask;
1541 struct ata_device *dev;
1542
1543 ata_port_probe(ap);
1544
1545 for (i = 0; i < ATA_MAX_DEVICES; i++)
1546 tries[i] = ATA_PROBE_MAX_TRIES;
1547
1548 retry:
1549 down_xfermask = 0;
1550
1551 /* reset and determine device classes */
1552 ap->ops->phy_reset(ap);
1553
1554 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1555 dev = &ap->device[i];
1556
1557 if (!(ap->flags & ATA_FLAG_DISABLED) &&
1558 dev->class != ATA_DEV_UNKNOWN)
1559 classes[dev->devno] = dev->class;
1560 else
1561 classes[dev->devno] = ATA_DEV_NONE;
1562
1563 dev->class = ATA_DEV_UNKNOWN;
1564 }
1565
1566 ata_port_probe(ap);
1567
1568 /* after the reset the device state is PIO 0 and the controller
1569 state is undefined. Record the mode */
1570
1571 for (i = 0; i < ATA_MAX_DEVICES; i++)
1572 ap->device[i].pio_mode = XFER_PIO_0;
1573
1574 /* read IDENTIFY page and configure devices */
1575 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1576 dev = &ap->device[i];
1577
1578 if (tries[i])
1579 dev->class = classes[i];
1580
1581 if (!ata_dev_enabled(dev))
1582 continue;
1583
1584 rc = ata_dev_read_id(dev, &dev->class, 1, dev->id);
1585 if (rc)
1586 goto fail;
1587
1588 rc = ata_dev_configure(dev, 1);
1589 if (rc)
1590 goto fail;
1591 }
1592
1593 /* configure transfer mode */
1594 rc = ata_set_mode(ap, &dev);
1595 if (rc) {
1596 down_xfermask = 1;
1597 goto fail;
1598 }
1599
1600 for (i = 0; i < ATA_MAX_DEVICES; i++)
1601 if (ata_dev_enabled(&ap->device[i]))
1602 return 0;
1603
1604 /* no device present, disable port */
1605 ata_port_disable(ap);
1606 ap->ops->port_disable(ap);
1607 return -ENODEV;
1608
1609 fail:
1610 switch (rc) {
1611 case -EINVAL:
1612 case -ENODEV:
1613 tries[dev->devno] = 0;
1614 break;
1615 case -EIO:
1616 sata_down_spd_limit(ap);
1617 /* fall through */
1618 default:
1619 tries[dev->devno]--;
1620 if (down_xfermask &&
1621 ata_down_xfermask_limit(dev, tries[dev->devno] == 1))
1622 tries[dev->devno] = 0;
1623 }
1624
1625 if (!tries[dev->devno]) {
1626 ata_down_xfermask_limit(dev, 1);
1627 ata_dev_disable(dev);
1628 }
1629
1630 goto retry;
1631}
1632
1633/**
1634 * ata_port_probe - Mark port as enabled
1635 * @ap: Port for which we indicate enablement
1636 *
1637 * Modify @ap data structure such that the system
1638 * thinks that the entire port is enabled.
1639 *
1640 * LOCKING: host_set lock, or some other form of
1641 * serialization.
1642 */
1643
1644void ata_port_probe(struct ata_port *ap)
1645{
1646 ap->flags &= ~ATA_FLAG_DISABLED;
1647}
1648
1649/**
1650 * sata_print_link_status - Print SATA link status
1651 * @ap: SATA port to printk link status about
1652 *
1653 * This function prints link speed and status of a SATA link.
1654 *
1655 * LOCKING:
1656 * None.
1657 */
1658static void sata_print_link_status(struct ata_port *ap)
1659{
1660 u32 sstatus, scontrol, tmp;
1661
1662 if (sata_scr_read(ap, SCR_STATUS, &sstatus))
1663 return;
1664 sata_scr_read(ap, SCR_CONTROL, &scontrol);
1665
1666 if (ata_port_online(ap)) {
1667 tmp = (sstatus >> 4) & 0xf;
1668 ata_port_printk(ap, KERN_INFO,
1669 "SATA link up %s (SStatus %X SControl %X)\n",
1670 sata_spd_string(tmp), sstatus, scontrol);
1671 } else {
1672 ata_port_printk(ap, KERN_INFO,
1673 "SATA link down (SStatus %X SControl %X)\n",
1674 sstatus, scontrol);
1675 }
1676}
1677
1678/**
1679 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1680 * @ap: SATA port associated with target SATA PHY.
1681 *
1682 * This function issues commands to standard SATA Sxxx
1683 * PHY registers, to wake up the phy (and device), and
1684 * clear any reset condition.
1685 *
1686 * LOCKING:
1687 * PCI/etc. bus probe sem.
1688 *
1689 */
1690void __sata_phy_reset(struct ata_port *ap)
1691{
1692 u32 sstatus;
1693 unsigned long timeout = jiffies + (HZ * 5);
1694
1695 if (ap->flags & ATA_FLAG_SATA_RESET) {
1696 /* issue phy wake/reset */
1697 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
1698 /* Couldn't find anything in SATA I/II specs, but
1699 * AHCI-1.1 10.4.2 says at least 1 ms. */
1700 mdelay(1);
1701 }
1702 /* phy wake/clear reset */
1703 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1704
1705 /* wait for phy to become ready, if necessary */
1706 do {
1707 msleep(200);
1708 sata_scr_read(ap, SCR_STATUS, &sstatus);
1709 if ((sstatus & 0xf) != 1)
1710 break;
1711 } while (time_before(jiffies, timeout));
1712
1713 /* print link status */
1714 sata_print_link_status(ap);
1715
1716 /* TODO: phy layer with polling, timeouts, etc. */
1717 if (!ata_port_offline(ap))
1718 ata_port_probe(ap);
1719 else
1720 ata_port_disable(ap);
1721
1722 if (ap->flags & ATA_FLAG_DISABLED)
1723 return;
1724
1725 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1726 ata_port_disable(ap);
1727 return;
1728 }
1729
1730 ap->cbl = ATA_CBL_SATA;
1731}
1732
1733/**
1734 * sata_phy_reset - Reset SATA bus.
1735 * @ap: SATA port associated with target SATA PHY.
1736 *
1737 * This function resets the SATA bus, and then probes
1738 * the bus for devices.
1739 *
1740 * LOCKING:
1741 * PCI/etc. bus probe sem.
1742 *
1743 */
1744void sata_phy_reset(struct ata_port *ap)
1745{
1746 __sata_phy_reset(ap);
1747 if (ap->flags & ATA_FLAG_DISABLED)
1748 return;
1749 ata_bus_reset(ap);
1750}
1751
1752/**
1753 * ata_dev_pair - return other device on cable
1754 * @adev: device
1755 *
1756 * Obtain the other device on the same cable, or if none is
1757 * present NULL is returned
1758 */
1759
1760struct ata_device *ata_dev_pair(struct ata_device *adev)
1761{
1762 struct ata_port *ap = adev->ap;
1763 struct ata_device *pair = &ap->device[1 - adev->devno];
1764 if (!ata_dev_enabled(pair))
1765 return NULL;
1766 return pair;
1767}
1768
1769/**
1770 * ata_port_disable - Disable port.
1771 * @ap: Port to be disabled.
1772 *
1773 * Modify @ap data structure such that the system
1774 * thinks that the entire port is disabled, and should
1775 * never attempt to probe or communicate with devices
1776 * on this port.
1777 *
1778 * LOCKING: host_set lock, or some other form of
1779 * serialization.
1780 */
1781
1782void ata_port_disable(struct ata_port *ap)
1783{
1784 ap->device[0].class = ATA_DEV_NONE;
1785 ap->device[1].class = ATA_DEV_NONE;
1786 ap->flags |= ATA_FLAG_DISABLED;
1787}
1788
1789/**
1790 * sata_down_spd_limit - adjust SATA spd limit downward
1791 * @ap: Port to adjust SATA spd limit for
1792 *
1793 * Adjust SATA spd limit of @ap downward. Note that this
1794 * function only adjusts the limit. The change must be applied
1795 * using sata_set_spd().
1796 *
1797 * LOCKING:
1798 * Inherited from caller.
1799 *
1800 * RETURNS:
1801 * 0 on success, negative errno on failure
1802 */
1803int sata_down_spd_limit(struct ata_port *ap)
1804{
1805 u32 sstatus, spd, mask;
1806 int rc, highbit;
1807
1808 rc = sata_scr_read(ap, SCR_STATUS, &sstatus);
1809 if (rc)
1810 return rc;
1811
1812 mask = ap->sata_spd_limit;
1813 if (mask <= 1)
1814 return -EINVAL;
1815 highbit = fls(mask) - 1;
1816 mask &= ~(1 << highbit);
1817
1818 spd = (sstatus >> 4) & 0xf;
1819 if (spd <= 1)
1820 return -EINVAL;
1821 spd--;
1822 mask &= (1 << spd) - 1;
1823 if (!mask)
1824 return -EINVAL;
1825
1826 ap->sata_spd_limit = mask;
1827
1828 ata_port_printk(ap, KERN_WARNING, "limiting SATA link speed to %s\n",
1829 sata_spd_string(fls(mask)));
1830
1831 return 0;
1832}
1833
1834static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol)
1835{
1836 u32 spd, limit;
1837
1838 if (ap->sata_spd_limit == UINT_MAX)
1839 limit = 0;
1840 else
1841 limit = fls(ap->sata_spd_limit);
1842
1843 spd = (*scontrol >> 4) & 0xf;
1844 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
1845
1846 return spd != limit;
1847}
1848
1849/**
1850 * sata_set_spd_needed - is SATA spd configuration needed
1851 * @ap: Port in question
1852 *
1853 * Test whether the spd limit in SControl matches
1854 * @ap->sata_spd_limit. This function is used to determine
1855 * whether hardreset is necessary to apply SATA spd
1856 * configuration.
1857 *
1858 * LOCKING:
1859 * Inherited from caller.
1860 *
1861 * RETURNS:
1862 * 1 if SATA spd configuration is needed, 0 otherwise.
1863 */
1864int sata_set_spd_needed(struct ata_port *ap)
1865{
1866 u32 scontrol;
1867
1868 if (sata_scr_read(ap, SCR_CONTROL, &scontrol))
1869 return 0;
1870
1871 return __sata_set_spd_needed(ap, &scontrol);
1872}
1873
1874/**
1875 * sata_set_spd - set SATA spd according to spd limit
1876 * @ap: Port to set SATA spd for
1877 *
1878 * Set SATA spd of @ap according to sata_spd_limit.
1879 *
1880 * LOCKING:
1881 * Inherited from caller.
1882 *
1883 * RETURNS:
1884 * 0 if spd doesn't need to be changed, 1 if spd has been
1885 * changed. Negative errno if SCR registers are inaccessible.
1886 */
1887int sata_set_spd(struct ata_port *ap)
1888{
1889 u32 scontrol;
1890 int rc;
1891
1892 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
1893 return rc;
1894
1895 if (!__sata_set_spd_needed(ap, &scontrol))
1896 return 0;
1897
1898 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
1899 return rc;
1900
1901 return 1;
1902}
1903
1904/*
1905 * This mode timing computation functionality is ported over from
1906 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
1907 */
1908/*
1909 * PIO 0-5, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
1910 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
1911 * for PIO 5, which is a nonstandard extension and UDMA6, which
1912 * is currently supported only by Maxtor drives.
1913 */
1914
1915static const struct ata_timing ata_timing[] = {
1916
1917 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
1918 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
1919 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
1920 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
1921
1922 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
1923 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
1924 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
1925
1926/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
1927
1928 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
1929 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
1930 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
1931
1932 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
1933 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
1934 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
1935
1936/* { XFER_PIO_5, 20, 50, 30, 100, 50, 30, 100, 0 }, */
1937 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
1938 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
1939
1940 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
1941 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
1942 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
1943
1944/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
1945
1946 { 0xFF }
1947};
1948
1949#define ENOUGH(v,unit) (((v)-1)/(unit)+1)
1950#define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
1951
1952static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
1953{
1954 q->setup = EZ(t->setup * 1000, T);
1955 q->act8b = EZ(t->act8b * 1000, T);
1956 q->rec8b = EZ(t->rec8b * 1000, T);
1957 q->cyc8b = EZ(t->cyc8b * 1000, T);
1958 q->active = EZ(t->active * 1000, T);
1959 q->recover = EZ(t->recover * 1000, T);
1960 q->cycle = EZ(t->cycle * 1000, T);
1961 q->udma = EZ(t->udma * 1000, UT);
1962}
1963
1964void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
1965 struct ata_timing *m, unsigned int what)
1966{
1967 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
1968 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
1969 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
1970 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
1971 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
1972 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
1973 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
1974 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
1975}
1976
1977static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
1978{
1979 const struct ata_timing *t;
1980
1981 for (t = ata_timing; t->mode != speed; t++)
1982 if (t->mode == 0xFF)
1983 return NULL;
1984 return t;
1985}
1986
1987int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1988 struct ata_timing *t, int T, int UT)
1989{
1990 const struct ata_timing *s;
1991 struct ata_timing p;
1992
1993 /*
1994 * Find the mode.
1995 */
1996
1997 if (!(s = ata_timing_find_mode(speed)))
1998 return -EINVAL;
1999
2000 memcpy(t, s, sizeof(*s));
2001
2002 /*
2003 * If the drive is an EIDE drive, it can tell us it needs extended
2004 * PIO/MW_DMA cycle timing.
2005 */
2006
2007 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2008 memset(&p, 0, sizeof(p));
2009 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2010 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2011 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2012 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2013 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2014 }
2015 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2016 }
2017
2018 /*
2019 * Convert the timing to bus clock counts.
2020 */
2021
2022 ata_timing_quantize(t, t, T, UT);
2023
2024 /*
2025 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2026 * S.M.A.R.T * and some other commands. We have to ensure that the
2027 * DMA cycle timing is slower/equal than the fastest PIO timing.
2028 */
2029
2030 if (speed > XFER_PIO_4) {
2031 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2032 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2033 }
2034
2035 /*
2036 * Lengthen active & recovery time so that cycle time is correct.
2037 */
2038
2039 if (t->act8b + t->rec8b < t->cyc8b) {
2040 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2041 t->rec8b = t->cyc8b - t->act8b;
2042 }
2043
2044 if (t->active + t->recover < t->cycle) {
2045 t->active += (t->cycle - (t->active + t->recover)) / 2;
2046 t->recover = t->cycle - t->active;
2047 }
2048
2049 return 0;
2050}
2051
2052/**
2053 * ata_down_xfermask_limit - adjust dev xfer masks downward
2054 * @dev: Device to adjust xfer masks
2055 * @force_pio0: Force PIO0
2056 *
2057 * Adjust xfer masks of @dev downward. Note that this function
2058 * does not apply the change. Invoking ata_set_mode() afterwards
2059 * will apply the limit.
2060 *
2061 * LOCKING:
2062 * Inherited from caller.
2063 *
2064 * RETURNS:
2065 * 0 on success, negative errno on failure
2066 */
2067int ata_down_xfermask_limit(struct ata_device *dev, int force_pio0)
2068{
2069 unsigned long xfer_mask;
2070 int highbit;
2071
2072 xfer_mask = ata_pack_xfermask(dev->pio_mask, dev->mwdma_mask,
2073 dev->udma_mask);
2074
2075 if (!xfer_mask)
2076 goto fail;
2077 /* don't gear down to MWDMA from UDMA, go directly to PIO */
2078 if (xfer_mask & ATA_MASK_UDMA)
2079 xfer_mask &= ~ATA_MASK_MWDMA;
2080
2081 highbit = fls(xfer_mask) - 1;
2082 xfer_mask &= ~(1 << highbit);
2083 if (force_pio0)
2084 xfer_mask &= 1 << ATA_SHIFT_PIO;
2085 if (!xfer_mask)
2086 goto fail;
2087
2088 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2089 &dev->udma_mask);
2090
2091 ata_dev_printk(dev, KERN_WARNING, "limiting speed to %s\n",
2092 ata_mode_string(xfer_mask));
2093
2094 return 0;
2095
2096 fail:
2097 return -EINVAL;
2098}
2099
2100static int ata_dev_set_mode(struct ata_device *dev)
2101{
2102 unsigned int err_mask;
2103 int rc;
2104
2105 dev->flags &= ~ATA_DFLAG_PIO;
2106 if (dev->xfer_shift == ATA_SHIFT_PIO)
2107 dev->flags |= ATA_DFLAG_PIO;
2108
2109 err_mask = ata_dev_set_xfermode(dev);
2110 if (err_mask) {
2111 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2112 "(err_mask=0x%x)\n", err_mask);
2113 return -EIO;
2114 }
2115
2116 rc = ata_dev_revalidate(dev, 0);
2117 if (rc)
2118 return rc;
2119
2120 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2121 dev->xfer_shift, (int)dev->xfer_mode);
2122
2123 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2124 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
2125 return 0;
2126}
2127
2128/**
2129 * ata_set_mode - Program timings and issue SET FEATURES - XFER
2130 * @ap: port on which timings will be programmed
2131 * @r_failed_dev: out paramter for failed device
2132 *
2133 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2134 * ata_set_mode() fails, pointer to the failing device is
2135 * returned in @r_failed_dev.
2136 *
2137 * LOCKING:
2138 * PCI/etc. bus probe sem.
2139 *
2140 * RETURNS:
2141 * 0 on success, negative errno otherwise
2142 */
2143int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
2144{
2145 struct ata_device *dev;
2146 int i, rc = 0, used_dma = 0, found = 0;
2147
2148 /* has private set_mode? */
2149 if (ap->ops->set_mode) {
2150 /* FIXME: make ->set_mode handle no device case and
2151 * return error code and failing device on failure.
2152 */
2153 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2154 if (ata_dev_ready(&ap->device[i])) {
2155 ap->ops->set_mode(ap);
2156 break;
2157 }
2158 }
2159 return 0;
2160 }
2161
2162 /* step 1: calculate xfer_mask */
2163 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2164 unsigned int pio_mask, dma_mask;
2165
2166 dev = &ap->device[i];
2167
2168 if (!ata_dev_enabled(dev))
2169 continue;
2170
2171 ata_dev_xfermask(dev);
2172
2173 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2174 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2175 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2176 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
2177
2178 found = 1;
2179 if (dev->dma_mode)
2180 used_dma = 1;
2181 }
2182 if (!found)
2183 goto out;
2184
2185 /* step 2: always set host PIO timings */
2186 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2187 dev = &ap->device[i];
2188 if (!ata_dev_enabled(dev))
2189 continue;
2190
2191 if (!dev->pio_mode) {
2192 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
2193 rc = -EINVAL;
2194 goto out;
2195 }
2196
2197 dev->xfer_mode = dev->pio_mode;
2198 dev->xfer_shift = ATA_SHIFT_PIO;
2199 if (ap->ops->set_piomode)
2200 ap->ops->set_piomode(ap, dev);
2201 }
2202
2203 /* step 3: set host DMA timings */
2204 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2205 dev = &ap->device[i];
2206
2207 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2208 continue;
2209
2210 dev->xfer_mode = dev->dma_mode;
2211 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2212 if (ap->ops->set_dmamode)
2213 ap->ops->set_dmamode(ap, dev);
2214 }
2215
2216 /* step 4: update devices' xfer mode */
2217 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2218 dev = &ap->device[i];
2219
2220 /* don't udpate suspended devices' xfer mode */
2221 if (!ata_dev_ready(dev))
2222 continue;
2223
2224 rc = ata_dev_set_mode(dev);
2225 if (rc)
2226 goto out;
2227 }
2228
2229 /* Record simplex status. If we selected DMA then the other
2230 * host channels are not permitted to do so.
2231 */
2232 if (used_dma && (ap->host_set->flags & ATA_HOST_SIMPLEX))
2233 ap->host_set->simplex_claimed = 1;
2234
2235 /* step5: chip specific finalisation */
2236 if (ap->ops->post_set_mode)
2237 ap->ops->post_set_mode(ap);
2238
2239 out:
2240 if (rc)
2241 *r_failed_dev = dev;
2242 return rc;
2243}
2244
2245/**
2246 * ata_tf_to_host - issue ATA taskfile to host controller
2247 * @ap: port to which command is being issued
2248 * @tf: ATA taskfile register set
2249 *
2250 * Issues ATA taskfile register set to ATA host controller,
2251 * with proper synchronization with interrupt handler and
2252 * other threads.
2253 *
2254 * LOCKING:
2255 * spin_lock_irqsave(host_set lock)
2256 */
2257
2258static inline void ata_tf_to_host(struct ata_port *ap,
2259 const struct ata_taskfile *tf)
2260{
2261 ap->ops->tf_load(ap, tf);
2262 ap->ops->exec_command(ap, tf);
2263}
2264
2265/**
2266 * ata_busy_sleep - sleep until BSY clears, or timeout
2267 * @ap: port containing status register to be polled
2268 * @tmout_pat: impatience timeout
2269 * @tmout: overall timeout
2270 *
2271 * Sleep until ATA Status register bit BSY clears,
2272 * or a timeout occurs.
2273 *
2274 * LOCKING: None.
2275 */
2276
2277unsigned int ata_busy_sleep (struct ata_port *ap,
2278 unsigned long tmout_pat, unsigned long tmout)
2279{
2280 unsigned long timer_start, timeout;
2281 u8 status;
2282
2283 status = ata_busy_wait(ap, ATA_BUSY, 300);
2284 timer_start = jiffies;
2285 timeout = timer_start + tmout_pat;
2286 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
2287 msleep(50);
2288 status = ata_busy_wait(ap, ATA_BUSY, 3);
2289 }
2290
2291 if (status & ATA_BUSY)
2292 ata_port_printk(ap, KERN_WARNING,
2293 "port is slow to respond, please be patient\n");
2294
2295 timeout = timer_start + tmout;
2296 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
2297 msleep(50);
2298 status = ata_chk_status(ap);
2299 }
2300
2301 if (status & ATA_BUSY) {
2302 ata_port_printk(ap, KERN_ERR, "port failed to respond "
2303 "(%lu secs)\n", tmout / HZ);
2304 return 1;
2305 }
2306
2307 return 0;
2308}
2309
2310static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
2311{
2312 struct ata_ioports *ioaddr = &ap->ioaddr;
2313 unsigned int dev0 = devmask & (1 << 0);
2314 unsigned int dev1 = devmask & (1 << 1);
2315 unsigned long timeout;
2316
2317 /* if device 0 was found in ata_devchk, wait for its
2318 * BSY bit to clear
2319 */
2320 if (dev0)
2321 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2322
2323 /* if device 1 was found in ata_devchk, wait for
2324 * register access, then wait for BSY to clear
2325 */
2326 timeout = jiffies + ATA_TMOUT_BOOT;
2327 while (dev1) {
2328 u8 nsect, lbal;
2329
2330 ap->ops->dev_select(ap, 1);
2331 if (ap->flags & ATA_FLAG_MMIO) {
2332 nsect = readb((void __iomem *) ioaddr->nsect_addr);
2333 lbal = readb((void __iomem *) ioaddr->lbal_addr);
2334 } else {
2335 nsect = inb(ioaddr->nsect_addr);
2336 lbal = inb(ioaddr->lbal_addr);
2337 }
2338 if ((nsect == 1) && (lbal == 1))
2339 break;
2340 if (time_after(jiffies, timeout)) {
2341 dev1 = 0;
2342 break;
2343 }
2344 msleep(50); /* give drive a breather */
2345 }
2346 if (dev1)
2347 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2348
2349 /* is all this really necessary? */
2350 ap->ops->dev_select(ap, 0);
2351 if (dev1)
2352 ap->ops->dev_select(ap, 1);
2353 if (dev0)
2354 ap->ops->dev_select(ap, 0);
2355}
2356
2357static unsigned int ata_bus_softreset(struct ata_port *ap,
2358 unsigned int devmask)
2359{
2360 struct ata_ioports *ioaddr = &ap->ioaddr;
2361
2362 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
2363
2364 /* software reset. causes dev0 to be selected */
2365 if (ap->flags & ATA_FLAG_MMIO) {
2366 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2367 udelay(20); /* FIXME: flush */
2368 writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
2369 udelay(20); /* FIXME: flush */
2370 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2371 } else {
2372 outb(ap->ctl, ioaddr->ctl_addr);
2373 udelay(10);
2374 outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2375 udelay(10);
2376 outb(ap->ctl, ioaddr->ctl_addr);
2377 }
2378
2379 /* spec mandates ">= 2ms" before checking status.
2380 * We wait 150ms, because that was the magic delay used for
2381 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
2382 * between when the ATA command register is written, and then
2383 * status is checked. Because waiting for "a while" before
2384 * checking status is fine, post SRST, we perform this magic
2385 * delay here as well.
2386 *
2387 * Old drivers/ide uses the 2mS rule and then waits for ready
2388 */
2389 msleep(150);
2390
2391 /* Before we perform post reset processing we want to see if
2392 * the bus shows 0xFF because the odd clown forgets the D7
2393 * pulldown resistor.
2394 */
2395 if (ata_check_status(ap) == 0xFF) {
2396 ata_port_printk(ap, KERN_ERR, "SRST failed (status 0xFF)\n");
2397 return AC_ERR_OTHER;
2398 }
2399
2400 ata_bus_post_reset(ap, devmask);
2401
2402 return 0;
2403}
2404
2405/**
2406 * ata_bus_reset - reset host port and associated ATA channel
2407 * @ap: port to reset
2408 *
2409 * This is typically the first time we actually start issuing
2410 * commands to the ATA channel. We wait for BSY to clear, then
2411 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2412 * result. Determine what devices, if any, are on the channel
2413 * by looking at the device 0/1 error register. Look at the signature
2414 * stored in each device's taskfile registers, to determine if
2415 * the device is ATA or ATAPI.
2416 *
2417 * LOCKING:
2418 * PCI/etc. bus probe sem.
2419 * Obtains host_set lock.
2420 *
2421 * SIDE EFFECTS:
2422 * Sets ATA_FLAG_DISABLED if bus reset fails.
2423 */
2424
2425void ata_bus_reset(struct ata_port *ap)
2426{
2427 struct ata_ioports *ioaddr = &ap->ioaddr;
2428 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2429 u8 err;
2430 unsigned int dev0, dev1 = 0, devmask = 0;
2431
2432 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
2433
2434 /* determine if device 0/1 are present */
2435 if (ap->flags & ATA_FLAG_SATA_RESET)
2436 dev0 = 1;
2437 else {
2438 dev0 = ata_devchk(ap, 0);
2439 if (slave_possible)
2440 dev1 = ata_devchk(ap, 1);
2441 }
2442
2443 if (dev0)
2444 devmask |= (1 << 0);
2445 if (dev1)
2446 devmask |= (1 << 1);
2447
2448 /* select device 0 again */
2449 ap->ops->dev_select(ap, 0);
2450
2451 /* issue bus reset */
2452 if (ap->flags & ATA_FLAG_SRST)
2453 if (ata_bus_softreset(ap, devmask))
2454 goto err_out;
2455
2456 /*
2457 * determine by signature whether we have ATA or ATAPI devices
2458 */
2459 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
2460 if ((slave_possible) && (err != 0x81))
2461 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
2462
2463 /* re-enable interrupts */
2464 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2465 ata_irq_on(ap);
2466
2467 /* is double-select really necessary? */
2468 if (ap->device[1].class != ATA_DEV_NONE)
2469 ap->ops->dev_select(ap, 1);
2470 if (ap->device[0].class != ATA_DEV_NONE)
2471 ap->ops->dev_select(ap, 0);
2472
2473 /* if no devices were detected, disable this port */
2474 if ((ap->device[0].class == ATA_DEV_NONE) &&
2475 (ap->device[1].class == ATA_DEV_NONE))
2476 goto err_out;
2477
2478 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2479 /* set up device control for ATA_FLAG_SATA_RESET */
2480 if (ap->flags & ATA_FLAG_MMIO)
2481 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2482 else
2483 outb(ap->ctl, ioaddr->ctl_addr);
2484 }
2485
2486 DPRINTK("EXIT\n");
2487 return;
2488
2489err_out:
2490 ata_port_printk(ap, KERN_ERR, "disabling port\n");
2491 ap->ops->port_disable(ap);
2492
2493 DPRINTK("EXIT\n");
2494}
2495
2496/**
2497 * sata_phy_debounce - debounce SATA phy status
2498 * @ap: ATA port to debounce SATA phy status for
2499 * @params: timing parameters { interval, duratinon, timeout } in msec
2500 *
2501 * Make sure SStatus of @ap reaches stable state, determined by
2502 * holding the same value where DET is not 1 for @duration polled
2503 * every @interval, before @timeout. Timeout constraints the
2504 * beginning of the stable state. Because, after hot unplugging,
2505 * DET gets stuck at 1 on some controllers, this functions waits
2506 * until timeout then returns 0 if DET is stable at 1.
2507 *
2508 * LOCKING:
2509 * Kernel thread context (may sleep)
2510 *
2511 * RETURNS:
2512 * 0 on success, -errno on failure.
2513 */
2514int sata_phy_debounce(struct ata_port *ap, const unsigned long *params)
2515{
2516 unsigned long interval_msec = params[0];
2517 unsigned long duration = params[1] * HZ / 1000;
2518 unsigned long timeout = jiffies + params[2] * HZ / 1000;
2519 unsigned long last_jiffies;
2520 u32 last, cur;
2521 int rc;
2522
2523 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2524 return rc;
2525 cur &= 0xf;
2526
2527 last = cur;
2528 last_jiffies = jiffies;
2529
2530 while (1) {
2531 msleep(interval_msec);
2532 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2533 return rc;
2534 cur &= 0xf;
2535
2536 /* DET stable? */
2537 if (cur == last) {
2538 if (cur == 1 && time_before(jiffies, timeout))
2539 continue;
2540 if (time_after(jiffies, last_jiffies + duration))
2541 return 0;
2542 continue;
2543 }
2544
2545 /* unstable, start over */
2546 last = cur;
2547 last_jiffies = jiffies;
2548
2549 /* check timeout */
2550 if (time_after(jiffies, timeout))
2551 return -EBUSY;
2552 }
2553}
2554
2555/**
2556 * sata_phy_resume - resume SATA phy
2557 * @ap: ATA port to resume SATA phy for
2558 * @params: timing parameters { interval, duratinon, timeout } in msec
2559 *
2560 * Resume SATA phy of @ap and debounce it.
2561 *
2562 * LOCKING:
2563 * Kernel thread context (may sleep)
2564 *
2565 * RETURNS:
2566 * 0 on success, -errno on failure.
2567 */
2568int sata_phy_resume(struct ata_port *ap, const unsigned long *params)
2569{
2570 u32 scontrol;
2571 int rc;
2572
2573 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2574 return rc;
2575
2576 scontrol = (scontrol & 0x0f0) | 0x300;
2577
2578 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2579 return rc;
2580
2581 /* Some PHYs react badly if SStatus is pounded immediately
2582 * after resuming. Delay 200ms before debouncing.
2583 */
2584 msleep(200);
2585
2586 return sata_phy_debounce(ap, params);
2587}
2588
2589static void ata_wait_spinup(struct ata_port *ap)
2590{
2591 struct ata_eh_context *ehc = &ap->eh_context;
2592 unsigned long end, secs;
2593 int rc;
2594
2595 /* first, debounce phy if SATA */
2596 if (ap->cbl == ATA_CBL_SATA) {
2597 rc = sata_phy_debounce(ap, sata_deb_timing_hotplug);
2598
2599 /* if debounced successfully and offline, no need to wait */
2600 if ((rc == 0 || rc == -EOPNOTSUPP) && ata_port_offline(ap))
2601 return;
2602 }
2603
2604 /* okay, let's give the drive time to spin up */
2605 end = ehc->i.hotplug_timestamp + ATA_SPINUP_WAIT * HZ / 1000;
2606 secs = ((end - jiffies) + HZ - 1) / HZ;
2607
2608 if (time_after(jiffies, end))
2609 return;
2610
2611 if (secs > 5)
2612 ata_port_printk(ap, KERN_INFO, "waiting for device to spin up "
2613 "(%lu secs)\n", secs);
2614
2615 schedule_timeout_uninterruptible(end - jiffies);
2616}
2617
2618/**
2619 * ata_std_prereset - prepare for reset
2620 * @ap: ATA port to be reset
2621 *
2622 * @ap is about to be reset. Initialize it.
2623 *
2624 * LOCKING:
2625 * Kernel thread context (may sleep)
2626 *
2627 * RETURNS:
2628 * 0 on success, -errno otherwise.
2629 */
2630int ata_std_prereset(struct ata_port *ap)
2631{
2632 struct ata_eh_context *ehc = &ap->eh_context;
2633 const unsigned long *timing = sata_ehc_deb_timing(ehc);
2634 int rc;
2635
2636 /* handle link resume & hotplug spinup */
2637 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
2638 (ap->flags & ATA_FLAG_HRST_TO_RESUME))
2639 ehc->i.action |= ATA_EH_HARDRESET;
2640
2641 if ((ehc->i.flags & ATA_EHI_HOTPLUGGED) &&
2642 (ap->flags & ATA_FLAG_SKIP_D2H_BSY))
2643 ata_wait_spinup(ap);
2644
2645 /* if we're about to do hardreset, nothing more to do */
2646 if (ehc->i.action & ATA_EH_HARDRESET)
2647 return 0;
2648
2649 /* if SATA, resume phy */
2650 if (ap->cbl == ATA_CBL_SATA) {
2651 rc = sata_phy_resume(ap, timing);
2652 if (rc && rc != -EOPNOTSUPP) {
2653 /* phy resume failed */
2654 ata_port_printk(ap, KERN_WARNING, "failed to resume "
2655 "link for reset (errno=%d)\n", rc);
2656 return rc;
2657 }
2658 }
2659
2660 /* Wait for !BSY if the controller can wait for the first D2H
2661 * Reg FIS and we don't know that no device is attached.
2662 */
2663 if (!(ap->flags & ATA_FLAG_SKIP_D2H_BSY) && !ata_port_offline(ap))
2664 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2665
2666 return 0;
2667}
2668
2669/**
2670 * ata_std_softreset - reset host port via ATA SRST
2671 * @ap: port to reset
2672 * @classes: resulting classes of attached devices
2673 *
2674 * Reset host port using ATA SRST.
2675 *
2676 * LOCKING:
2677 * Kernel thread context (may sleep)
2678 *
2679 * RETURNS:
2680 * 0 on success, -errno otherwise.
2681 */
2682int ata_std_softreset(struct ata_port *ap, unsigned int *classes)
2683{
2684 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2685 unsigned int devmask = 0, err_mask;
2686 u8 err;
2687
2688 DPRINTK("ENTER\n");
2689
2690 if (ata_port_offline(ap)) {
2691 classes[0] = ATA_DEV_NONE;
2692 goto out;
2693 }
2694
2695 /* determine if device 0/1 are present */
2696 if (ata_devchk(ap, 0))
2697 devmask |= (1 << 0);
2698 if (slave_possible && ata_devchk(ap, 1))
2699 devmask |= (1 << 1);
2700
2701 /* select device 0 again */
2702 ap->ops->dev_select(ap, 0);
2703
2704 /* issue bus reset */
2705 DPRINTK("about to softreset, devmask=%x\n", devmask);
2706 err_mask = ata_bus_softreset(ap, devmask);
2707 if (err_mask) {
2708 ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n",
2709 err_mask);
2710 return -EIO;
2711 }
2712
2713 /* determine by signature whether we have ATA or ATAPI devices */
2714 classes[0] = ata_dev_try_classify(ap, 0, &err);
2715 if (slave_possible && err != 0x81)
2716 classes[1] = ata_dev_try_classify(ap, 1, &err);
2717
2718 out:
2719 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2720 return 0;
2721}
2722
2723/**
2724 * sata_std_hardreset - reset host port via SATA phy reset
2725 * @ap: port to reset
2726 * @class: resulting class of attached device
2727 *
2728 * SATA phy-reset host port using DET bits of SControl register.
2729 *
2730 * LOCKING:
2731 * Kernel thread context (may sleep)
2732 *
2733 * RETURNS:
2734 * 0 on success, -errno otherwise.
2735 */
2736int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
2737{
2738 struct ata_eh_context *ehc = &ap->eh_context;
2739 const unsigned long *timing = sata_ehc_deb_timing(ehc);
2740 u32 scontrol;
2741 int rc;
2742
2743 DPRINTK("ENTER\n");
2744
2745 if (sata_set_spd_needed(ap)) {
2746 /* SATA spec says nothing about how to reconfigure
2747 * spd. To be on the safe side, turn off phy during
2748 * reconfiguration. This works for at least ICH7 AHCI
2749 * and Sil3124.
2750 */
2751 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2752 return rc;
2753
2754 scontrol = (scontrol & 0x0f0) | 0x304;
2755
2756 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2757 return rc;
2758
2759 sata_set_spd(ap);
2760 }
2761
2762 /* issue phy wake/reset */
2763 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2764 return rc;
2765
2766 scontrol = (scontrol & 0x0f0) | 0x301;
2767
2768 if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol)))
2769 return rc;
2770
2771 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
2772 * 10.4.2 says at least 1 ms.
2773 */
2774 msleep(1);
2775
2776 /* bring phy back */
2777 sata_phy_resume(ap, timing);
2778
2779 /* TODO: phy layer with polling, timeouts, etc. */
2780 if (ata_port_offline(ap)) {
2781 *class = ATA_DEV_NONE;
2782 DPRINTK("EXIT, link offline\n");
2783 return 0;
2784 }
2785
2786 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2787 ata_port_printk(ap, KERN_ERR,
2788 "COMRESET failed (device not ready)\n");
2789 return -EIO;
2790 }
2791
2792 ap->ops->dev_select(ap, 0); /* probably unnecessary */
2793
2794 *class = ata_dev_try_classify(ap, 0, NULL);
2795
2796 DPRINTK("EXIT, class=%u\n", *class);
2797 return 0;
2798}
2799
2800/**
2801 * ata_std_postreset - standard postreset callback
2802 * @ap: the target ata_port
2803 * @classes: classes of attached devices
2804 *
2805 * This function is invoked after a successful reset. Note that
2806 * the device might have been reset more than once using
2807 * different reset methods before postreset is invoked.
2808 *
2809 * LOCKING:
2810 * Kernel thread context (may sleep)
2811 */
2812void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
2813{
2814 u32 serror;
2815
2816 DPRINTK("ENTER\n");
2817
2818 /* print link status */
2819 sata_print_link_status(ap);
2820
2821 /* clear SError */
2822 if (sata_scr_read(ap, SCR_ERROR, &serror) == 0)
2823 sata_scr_write(ap, SCR_ERROR, serror);
2824
2825 /* re-enable interrupts */
2826 if (!ap->ops->error_handler) {
2827 /* FIXME: hack. create a hook instead */
2828 if (ap->ioaddr.ctl_addr)
2829 ata_irq_on(ap);
2830 }
2831
2832 /* is double-select really necessary? */
2833 if (classes[0] != ATA_DEV_NONE)
2834 ap->ops->dev_select(ap, 1);
2835 if (classes[1] != ATA_DEV_NONE)
2836 ap->ops->dev_select(ap, 0);
2837
2838 /* bail out if no device is present */
2839 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2840 DPRINTK("EXIT, no device\n");
2841 return;
2842 }
2843
2844 /* set up device control */
2845 if (ap->ioaddr.ctl_addr) {
2846 if (ap->flags & ATA_FLAG_MMIO)
2847 writeb(ap->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
2848 else
2849 outb(ap->ctl, ap->ioaddr.ctl_addr);
2850 }
2851
2852 DPRINTK("EXIT\n");
2853}
2854
2855/**
2856 * ata_dev_same_device - Determine whether new ID matches configured device
2857 * @dev: device to compare against
2858 * @new_class: class of the new device
2859 * @new_id: IDENTIFY page of the new device
2860 *
2861 * Compare @new_class and @new_id against @dev and determine
2862 * whether @dev is the device indicated by @new_class and
2863 * @new_id.
2864 *
2865 * LOCKING:
2866 * None.
2867 *
2868 * RETURNS:
2869 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
2870 */
2871static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
2872 const u16 *new_id)
2873{
2874 const u16 *old_id = dev->id;
2875 unsigned char model[2][41], serial[2][21];
2876 u64 new_n_sectors;
2877
2878 if (dev->class != new_class) {
2879 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
2880 dev->class, new_class);
2881 return 0;
2882 }
2883
2884 ata_id_c_string(old_id, model[0], ATA_ID_PROD_OFS, sizeof(model[0]));
2885 ata_id_c_string(new_id, model[1], ATA_ID_PROD_OFS, sizeof(model[1]));
2886 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO_OFS, sizeof(serial[0]));
2887 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO_OFS, sizeof(serial[1]));
2888 new_n_sectors = ata_id_n_sectors(new_id);
2889
2890 if (strcmp(model[0], model[1])) {
2891 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
2892 "'%s' != '%s'\n", model[0], model[1]);
2893 return 0;
2894 }
2895
2896 if (strcmp(serial[0], serial[1])) {
2897 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
2898 "'%s' != '%s'\n", serial[0], serial[1]);
2899 return 0;
2900 }
2901
2902 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
2903 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
2904 "%llu != %llu\n",
2905 (unsigned long long)dev->n_sectors,
2906 (unsigned long long)new_n_sectors);
2907 return 0;
2908 }
2909
2910 return 1;
2911}
2912
2913/**
2914 * ata_dev_revalidate - Revalidate ATA device
2915 * @dev: device to revalidate
2916 * @post_reset: is this revalidation after reset?
2917 *
2918 * Re-read IDENTIFY page and make sure @dev is still attached to
2919 * the port.
2920 *
2921 * LOCKING:
2922 * Kernel thread context (may sleep)
2923 *
2924 * RETURNS:
2925 * 0 on success, negative errno otherwise
2926 */
2927int ata_dev_revalidate(struct ata_device *dev, int post_reset)
2928{
2929 unsigned int class = dev->class;
2930 u16 *id = (void *)dev->ap->sector_buf;
2931 int rc;
2932
2933 if (!ata_dev_enabled(dev)) {
2934 rc = -ENODEV;
2935 goto fail;
2936 }
2937
2938 /* read ID data */
2939 rc = ata_dev_read_id(dev, &class, post_reset, id);
2940 if (rc)
2941 goto fail;
2942
2943 /* is the device still there? */
2944 if (!ata_dev_same_device(dev, class, id)) {
2945 rc = -ENODEV;
2946 goto fail;
2947 }
2948
2949 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
2950
2951 /* configure device according to the new ID */
2952 rc = ata_dev_configure(dev, 0);
2953 if (rc == 0)
2954 return 0;
2955
2956 fail:
2957 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
2958 return rc;
2959}
2960
2961static const char * const ata_dma_blacklist [] = {
2962 "WDC AC11000H", NULL,
2963 "WDC AC22100H", NULL,
2964 "WDC AC32500H", NULL,
2965 "WDC AC33100H", NULL,
2966 "WDC AC31600H", NULL,
2967 "WDC AC32100H", "24.09P07",
2968 "WDC AC23200L", "21.10N21",
2969 "Compaq CRD-8241B", NULL,
2970 "CRD-8400B", NULL,
2971 "CRD-8480B", NULL,
2972 "CRD-8482B", NULL,
2973 "CRD-84", NULL,
2974 "SanDisk SDP3B", NULL,
2975 "SanDisk SDP3B-64", NULL,
2976 "SANYO CD-ROM CRD", NULL,
2977 "HITACHI CDR-8", NULL,
2978 "HITACHI CDR-8335", NULL,
2979 "HITACHI CDR-8435", NULL,
2980 "Toshiba CD-ROM XM-6202B", NULL,
2981 "TOSHIBA CD-ROM XM-1702BC", NULL,
2982 "CD-532E-A", NULL,
2983 "E-IDE CD-ROM CR-840", NULL,
2984 "CD-ROM Drive/F5A", NULL,
2985 "WPI CDD-820", NULL,
2986 "SAMSUNG CD-ROM SC-148C", NULL,
2987 "SAMSUNG CD-ROM SC", NULL,
2988 "SanDisk SDP3B-64", NULL,
2989 "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,
2990 "_NEC DV5800A", NULL,
2991 "SAMSUNG CD-ROM SN-124", "N001"
2992};
2993
2994static int ata_strim(char *s, size_t len)
2995{
2996 len = strnlen(s, len);
2997
2998 /* ATAPI specifies that empty space is blank-filled; remove blanks */
2999 while ((len > 0) && (s[len - 1] == ' ')) {
3000 len--;
3001 s[len] = 0;
3002 }
3003 return len;
3004}
3005
3006static int ata_dma_blacklisted(const struct ata_device *dev)
3007{
3008 unsigned char model_num[40];
3009 unsigned char model_rev[16];
3010 unsigned int nlen, rlen;
3011 int i;
3012
3013 /* We don't support polling DMA.
3014 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3015 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3016 */
3017 if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) &&
3018 (dev->flags & ATA_DFLAG_CDB_INTR))
3019 return 1;
3020
3021 ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
3022 sizeof(model_num));
3023 ata_id_string(dev->id, model_rev, ATA_ID_FW_REV_OFS,
3024 sizeof(model_rev));
3025 nlen = ata_strim(model_num, sizeof(model_num));
3026 rlen = ata_strim(model_rev, sizeof(model_rev));
3027
3028 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i += 2) {
3029 if (!strncmp(ata_dma_blacklist[i], model_num, nlen)) {
3030 if (ata_dma_blacklist[i+1] == NULL)
3031 return 1;
3032 if (!strncmp(ata_dma_blacklist[i], model_rev, rlen))
3033 return 1;
3034 }
3035 }
3036 return 0;
3037}
3038
3039/**
3040 * ata_dev_xfermask - Compute supported xfermask of the given device
3041 * @dev: Device to compute xfermask for
3042 *
3043 * Compute supported xfermask of @dev and store it in
3044 * dev->*_mask. This function is responsible for applying all
3045 * known limits including host controller limits, device
3046 * blacklist, etc...
3047 *
3048 * FIXME: The current implementation limits all transfer modes to
3049 * the fastest of the lowested device on the port. This is not
3050 * required on most controllers.
3051 *
3052 * LOCKING:
3053 * None.
3054 */
3055static void ata_dev_xfermask(struct ata_device *dev)
3056{
3057 struct ata_port *ap = dev->ap;
3058 struct ata_host_set *hs = ap->host_set;
3059 unsigned long xfer_mask;
3060 int i;
3061
3062 xfer_mask = ata_pack_xfermask(ap->pio_mask,
3063 ap->mwdma_mask, ap->udma_mask);
3064
3065 /* Apply cable rule here. Don't apply it early because when
3066 * we handle hot plug the cable type can itself change.
3067 */
3068 if (ap->cbl == ATA_CBL_PATA40)
3069 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
3070
3071 /* FIXME: Use port-wide xfermask for now */
3072 for (i = 0; i < ATA_MAX_DEVICES; i++) {
3073 struct ata_device *d = &ap->device[i];
3074
3075 if (ata_dev_absent(d))
3076 continue;
3077
3078 if (ata_dev_disabled(d)) {
3079 /* to avoid violating device selection timing */
3080 xfer_mask &= ata_pack_xfermask(d->pio_mask,
3081 UINT_MAX, UINT_MAX);
3082 continue;
3083 }
3084
3085 xfer_mask &= ata_pack_xfermask(d->pio_mask,
3086 d->mwdma_mask, d->udma_mask);
3087 xfer_mask &= ata_id_xfermask(d->id);
3088 if (ata_dma_blacklisted(d))
3089 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3090 }
3091
3092 if (ata_dma_blacklisted(dev))
3093 ata_dev_printk(dev, KERN_WARNING,
3094 "device is on DMA blacklist, disabling DMA\n");
3095
3096 if (hs->flags & ATA_HOST_SIMPLEX) {
3097 if (hs->simplex_claimed)
3098 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3099 }
3100
3101 if (ap->ops->mode_filter)
3102 xfer_mask = ap->ops->mode_filter(ap, dev, xfer_mask);
3103
3104 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
3105 &dev->mwdma_mask, &dev->udma_mask);
3106}
3107
3108/**
3109 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
3110 * @dev: Device to which command will be sent
3111 *
3112 * Issue SET FEATURES - XFER MODE command to device @dev
3113 * on port @ap.
3114 *
3115 * LOCKING:
3116 * PCI/etc. bus probe sem.
3117 *
3118 * RETURNS:
3119 * 0 on success, AC_ERR_* mask otherwise.
3120 */
3121
3122static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
3123{
3124 struct ata_taskfile tf;
3125 unsigned int err_mask;
3126
3127 /* set up set-features taskfile */
3128 DPRINTK("set features - xfer mode\n");
3129
3130 ata_tf_init(dev, &tf);
3131 tf.command = ATA_CMD_SET_FEATURES;
3132 tf.feature = SETFEATURES_XFER;
3133 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3134 tf.protocol = ATA_PROT_NODATA;
3135 tf.nsect = dev->xfer_mode;
3136
3137 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
3138
3139 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3140 return err_mask;
3141}
3142
3143/**
3144 * ata_dev_init_params - Issue INIT DEV PARAMS command
3145 * @dev: Device to which command will be sent
3146 * @heads: Number of heads (taskfile parameter)
3147 * @sectors: Number of sectors (taskfile parameter)
3148 *
3149 * LOCKING:
3150 * Kernel thread context (may sleep)
3151 *
3152 * RETURNS:
3153 * 0 on success, AC_ERR_* mask otherwise.
3154 */
3155static unsigned int ata_dev_init_params(struct ata_device *dev,
3156 u16 heads, u16 sectors)
3157{
3158 struct ata_taskfile tf;
3159 unsigned int err_mask;
3160
3161 /* Number of sectors per track 1-255. Number of heads 1-16 */
3162 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
3163 return AC_ERR_INVALID;
3164
3165 /* set up init dev params taskfile */
3166 DPRINTK("init dev params \n");
3167
3168 ata_tf_init(dev, &tf);
3169 tf.command = ATA_CMD_INIT_DEV_PARAMS;
3170 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3171 tf.protocol = ATA_PROT_NODATA;
3172 tf.nsect = sectors;
3173 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
3174
3175 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
3176
3177 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3178 return err_mask;
3179}
3180
3181/**
3182 * ata_sg_clean - Unmap DMA memory associated with command
3183 * @qc: Command containing DMA memory to be released
3184 *
3185 * Unmap all mapped DMA memory associated with this command.
3186 *
3187 * LOCKING:
3188 * spin_lock_irqsave(host_set lock)
3189 */
3190
3191static void ata_sg_clean(struct ata_queued_cmd *qc)
3192{
3193 struct ata_port *ap = qc->ap;
3194 struct scatterlist *sg = qc->__sg;
3195 int dir = qc->dma_dir;
3196 void *pad_buf = NULL;
3197
3198 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
3199 WARN_ON(sg == NULL);
3200
3201 if (qc->flags & ATA_QCFLAG_SINGLE)
3202 WARN_ON(qc->n_elem > 1);
3203
3204 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
3205
3206 /* if we padded the buffer out to 32-bit bound, and data
3207 * xfer direction is from-device, we must copy from the
3208 * pad buffer back into the supplied buffer
3209 */
3210 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
3211 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3212
3213 if (qc->flags & ATA_QCFLAG_SG) {
3214 if (qc->n_elem)
3215 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
3216 /* restore last sg */
3217 sg[qc->orig_n_elem - 1].length += qc->pad_len;
3218 if (pad_buf) {
3219 struct scatterlist *psg = &qc->pad_sgent;
3220 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3221 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
3222 kunmap_atomic(addr, KM_IRQ0);
3223 }
3224 } else {
3225 if (qc->n_elem)
3226 dma_unmap_single(ap->dev,
3227 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
3228 dir);
3229 /* restore sg */
3230 sg->length += qc->pad_len;
3231 if (pad_buf)
3232 memcpy(qc->buf_virt + sg->length - qc->pad_len,
3233 pad_buf, qc->pad_len);
3234 }
3235
3236 qc->flags &= ~ATA_QCFLAG_DMAMAP;
3237 qc->__sg = NULL;
3238}
3239
3240/**
3241 * ata_fill_sg - Fill PCI IDE PRD table
3242 * @qc: Metadata associated with taskfile to be transferred
3243 *
3244 * Fill PCI IDE PRD (scatter-gather) table with segments
3245 * associated with the current disk command.
3246 *
3247 * LOCKING:
3248 * spin_lock_irqsave(host_set lock)
3249 *
3250 */
3251static void ata_fill_sg(struct ata_queued_cmd *qc)
3252{
3253 struct ata_port *ap = qc->ap;
3254 struct scatterlist *sg;
3255 unsigned int idx;
3256
3257 WARN_ON(qc->__sg == NULL);
3258 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
3259
3260 idx = 0;
3261 ata_for_each_sg(sg, qc) {
3262 u32 addr, offset;
3263 u32 sg_len, len;
3264
3265 /* determine if physical DMA addr spans 64K boundary.
3266 * Note h/w doesn't support 64-bit, so we unconditionally
3267 * truncate dma_addr_t to u32.
3268 */
3269 addr = (u32) sg_dma_address(sg);
3270 sg_len = sg_dma_len(sg);
3271
3272 while (sg_len) {
3273 offset = addr & 0xffff;
3274 len = sg_len;
3275 if ((offset + sg_len) > 0x10000)
3276 len = 0x10000 - offset;
3277
3278 ap->prd[idx].addr = cpu_to_le32(addr);
3279 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
3280 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
3281
3282 idx++;
3283 sg_len -= len;
3284 addr += len;
3285 }
3286 }
3287
3288 if (idx)
3289 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
3290}
3291/**
3292 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
3293 * @qc: Metadata associated with taskfile to check
3294 *
3295 * Allow low-level driver to filter ATA PACKET commands, returning
3296 * a status indicating whether or not it is OK to use DMA for the
3297 * supplied PACKET command.
3298 *
3299 * LOCKING:
3300 * spin_lock_irqsave(host_set lock)
3301 *
3302 * RETURNS: 0 when ATAPI DMA can be used
3303 * nonzero otherwise
3304 */
3305int ata_check_atapi_dma(struct ata_queued_cmd *qc)
3306{
3307 struct ata_port *ap = qc->ap;
3308 int rc = 0; /* Assume ATAPI DMA is OK by default */
3309
3310 if (ap->ops->check_atapi_dma)
3311 rc = ap->ops->check_atapi_dma(qc);
3312
3313 return rc;
3314}
3315/**
3316 * ata_qc_prep - Prepare taskfile for submission
3317 * @qc: Metadata associated with taskfile to be prepared
3318 *
3319 * Prepare ATA taskfile for submission.
3320 *
3321 * LOCKING:
3322 * spin_lock_irqsave(host_set lock)
3323 */
3324void ata_qc_prep(struct ata_queued_cmd *qc)
3325{
3326 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
3327 return;
3328
3329 ata_fill_sg(qc);
3330}
3331
3332void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
3333
3334/**
3335 * ata_sg_init_one - Associate command with memory buffer
3336 * @qc: Command to be associated
3337 * @buf: Memory buffer
3338 * @buflen: Length of memory buffer, in bytes.
3339 *
3340 * Initialize the data-related elements of queued_cmd @qc
3341 * to point to a single memory buffer, @buf of byte length @buflen.
3342 *
3343 * LOCKING:
3344 * spin_lock_irqsave(host_set lock)
3345 */
3346
3347void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
3348{
3349 struct scatterlist *sg;
3350
3351 qc->flags |= ATA_QCFLAG_SINGLE;
3352
3353 memset(&qc->sgent, 0, sizeof(qc->sgent));
3354 qc->__sg = &qc->sgent;
3355 qc->n_elem = 1;
3356 qc->orig_n_elem = 1;
3357 qc->buf_virt = buf;
3358 qc->nbytes = buflen;
3359
3360 sg = qc->__sg;
3361 sg_init_one(sg, buf, buflen);
3362}
3363
3364/**
3365 * ata_sg_init - Associate command with scatter-gather table.
3366 * @qc: Command to be associated
3367 * @sg: Scatter-gather table.
3368 * @n_elem: Number of elements in s/g table.
3369 *
3370 * Initialize the data-related elements of queued_cmd @qc
3371 * to point to a scatter-gather table @sg, containing @n_elem
3372 * elements.
3373 *
3374 * LOCKING:
3375 * spin_lock_irqsave(host_set lock)
3376 */
3377
3378void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
3379 unsigned int n_elem)
3380{
3381 qc->flags |= ATA_QCFLAG_SG;
3382 qc->__sg = sg;
3383 qc->n_elem = n_elem;
3384 qc->orig_n_elem = n_elem;
3385}
3386
3387/**
3388 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
3389 * @qc: Command with memory buffer to be mapped.
3390 *
3391 * DMA-map the memory buffer associated with queued_cmd @qc.
3392 *
3393 * LOCKING:
3394 * spin_lock_irqsave(host_set lock)
3395 *
3396 * RETURNS:
3397 * Zero on success, negative on error.
3398 */
3399
3400static int ata_sg_setup_one(struct ata_queued_cmd *qc)
3401{
3402 struct ata_port *ap = qc->ap;
3403 int dir = qc->dma_dir;
3404 struct scatterlist *sg = qc->__sg;
3405 dma_addr_t dma_address;
3406 int trim_sg = 0;
3407
3408 /* we must lengthen transfers to end on a 32-bit boundary */
3409 qc->pad_len = sg->length & 3;
3410 if (qc->pad_len) {
3411 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3412 struct scatterlist *psg = &qc->pad_sgent;
3413
3414 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3415
3416 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3417
3418 if (qc->tf.flags & ATA_TFLAG_WRITE)
3419 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
3420 qc->pad_len);
3421
3422 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3423 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3424 /* trim sg */
3425 sg->length -= qc->pad_len;
3426 if (sg->length == 0)
3427 trim_sg = 1;
3428
3429 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
3430 sg->length, qc->pad_len);
3431 }
3432
3433 if (trim_sg) {
3434 qc->n_elem--;
3435 goto skip_map;
3436 }
3437
3438 dma_address = dma_map_single(ap->dev, qc->buf_virt,
3439 sg->length, dir);
3440 if (dma_mapping_error(dma_address)) {
3441 /* restore sg */
3442 sg->length += qc->pad_len;
3443 return -1;
3444 }
3445
3446 sg_dma_address(sg) = dma_address;
3447 sg_dma_len(sg) = sg->length;
3448
3449skip_map:
3450 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
3451 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3452
3453 return 0;
3454}
3455
3456/**
3457 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
3458 * @qc: Command with scatter-gather table to be mapped.
3459 *
3460 * DMA-map the scatter-gather table associated with queued_cmd @qc.
3461 *
3462 * LOCKING:
3463 * spin_lock_irqsave(host_set lock)
3464 *
3465 * RETURNS:
3466 * Zero on success, negative on error.
3467 *
3468 */
3469
3470static int ata_sg_setup(struct ata_queued_cmd *qc)
3471{
3472 struct ata_port *ap = qc->ap;
3473 struct scatterlist *sg = qc->__sg;
3474 struct scatterlist *lsg = &sg[qc->n_elem - 1];
3475 int n_elem, pre_n_elem, dir, trim_sg = 0;
3476
3477 VPRINTK("ENTER, ata%u\n", ap->id);
3478 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
3479
3480 /* we must lengthen transfers to end on a 32-bit boundary */
3481 qc->pad_len = lsg->length & 3;
3482 if (qc->pad_len) {
3483 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3484 struct scatterlist *psg = &qc->pad_sgent;
3485 unsigned int offset;
3486
3487 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3488
3489 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3490
3491 /*
3492 * psg->page/offset are used to copy to-be-written
3493 * data in this function or read data in ata_sg_clean.
3494 */
3495 offset = lsg->offset + lsg->length - qc->pad_len;
3496 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
3497 psg->offset = offset_in_page(offset);
3498
3499 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3500 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3501 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
3502 kunmap_atomic(addr, KM_IRQ0);
3503 }
3504
3505 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3506 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3507 /* trim last sg */
3508 lsg->length -= qc->pad_len;
3509 if (lsg->length == 0)
3510 trim_sg = 1;
3511
3512 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
3513 qc->n_elem - 1, lsg->length, qc->pad_len);
3514 }
3515
3516 pre_n_elem = qc->n_elem;
3517 if (trim_sg && pre_n_elem)
3518 pre_n_elem--;
3519
3520 if (!pre_n_elem) {
3521 n_elem = 0;
3522 goto skip_map;
3523 }
3524
3525 dir = qc->dma_dir;
3526 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
3527 if (n_elem < 1) {
3528 /* restore last sg */
3529 lsg->length += qc->pad_len;
3530 return -1;
3531 }
3532
3533 DPRINTK("%d sg elements mapped\n", n_elem);
3534
3535skip_map:
3536 qc->n_elem = n_elem;
3537
3538 return 0;
3539}
3540
3541/**
3542 * swap_buf_le16 - swap halves of 16-bit words in place
3543 * @buf: Buffer to swap
3544 * @buf_words: Number of 16-bit words in buffer.
3545 *
3546 * Swap halves of 16-bit words if needed to convert from
3547 * little-endian byte order to native cpu byte order, or
3548 * vice-versa.
3549 *
3550 * LOCKING:
3551 * Inherited from caller.
3552 */
3553void swap_buf_le16(u16 *buf, unsigned int buf_words)
3554{
3555#ifdef __BIG_ENDIAN
3556 unsigned int i;
3557
3558 for (i = 0; i < buf_words; i++)
3559 buf[i] = le16_to_cpu(buf[i]);
3560#endif /* __BIG_ENDIAN */
3561}
3562
3563/**
3564 * ata_mmio_data_xfer - Transfer data by MMIO
3565 * @adev: device for this I/O
3566 * @buf: data buffer
3567 * @buflen: buffer length
3568 * @write_data: read/write
3569 *
3570 * Transfer data from/to the device data register by MMIO.
3571 *
3572 * LOCKING:
3573 * Inherited from caller.
3574 */
3575
3576void ata_mmio_data_xfer(struct ata_device *adev, unsigned char *buf,
3577 unsigned int buflen, int write_data)
3578{
3579 struct ata_port *ap = adev->ap;
3580 unsigned int i;
3581 unsigned int words = buflen >> 1;
3582 u16 *buf16 = (u16 *) buf;
3583 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
3584
3585 /* Transfer multiple of 2 bytes */
3586 if (write_data) {
3587 for (i = 0; i < words; i++)
3588 writew(le16_to_cpu(buf16[i]), mmio);
3589 } else {
3590 for (i = 0; i < words; i++)
3591 buf16[i] = cpu_to_le16(readw(mmio));
3592 }
3593
3594 /* Transfer trailing 1 byte, if any. */
3595 if (unlikely(buflen & 0x01)) {
3596 u16 align_buf[1] = { 0 };
3597 unsigned char *trailing_buf = buf + buflen - 1;
3598
3599 if (write_data) {
3600 memcpy(align_buf, trailing_buf, 1);
3601 writew(le16_to_cpu(align_buf[0]), mmio);
3602 } else {
3603 align_buf[0] = cpu_to_le16(readw(mmio));
3604 memcpy(trailing_buf, align_buf, 1);
3605 }
3606 }
3607}
3608
3609/**
3610 * ata_pio_data_xfer - Transfer data by PIO
3611 * @adev: device to target
3612 * @buf: data buffer
3613 * @buflen: buffer length
3614 * @write_data: read/write
3615 *
3616 * Transfer data from/to the device data register by PIO.
3617 *
3618 * LOCKING:
3619 * Inherited from caller.
3620 */
3621
3622void ata_pio_data_xfer(struct ata_device *adev, unsigned char *buf,
3623 unsigned int buflen, int write_data)
3624{
3625 struct ata_port *ap = adev->ap;
3626 unsigned int words = buflen >> 1;
3627
3628 /* Transfer multiple of 2 bytes */
3629 if (write_data)
3630 outsw(ap->ioaddr.data_addr, buf, words);
3631 else
3632 insw(ap->ioaddr.data_addr, buf, words);
3633
3634 /* Transfer trailing 1 byte, if any. */
3635 if (unlikely(buflen & 0x01)) {
3636 u16 align_buf[1] = { 0 };
3637 unsigned char *trailing_buf = buf + buflen - 1;
3638
3639 if (write_data) {
3640 memcpy(align_buf, trailing_buf, 1);
3641 outw(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
3642 } else {
3643 align_buf[0] = cpu_to_le16(inw(ap->ioaddr.data_addr));
3644 memcpy(trailing_buf, align_buf, 1);
3645 }
3646 }
3647}
3648
3649/**
3650 * ata_pio_data_xfer_noirq - Transfer data by PIO
3651 * @adev: device to target
3652 * @buf: data buffer
3653 * @buflen: buffer length
3654 * @write_data: read/write
3655 *
3656 * Transfer data from/to the device data register by PIO. Do the
3657 * transfer with interrupts disabled.
3658 *
3659 * LOCKING:
3660 * Inherited from caller.
3661 */
3662
3663void ata_pio_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
3664 unsigned int buflen, int write_data)
3665{
3666 unsigned long flags;
3667 local_irq_save(flags);
3668 ata_pio_data_xfer(adev, buf, buflen, write_data);
3669 local_irq_restore(flags);
3670}
3671
3672
3673/**
3674 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
3675 * @qc: Command on going
3676 *
3677 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
3678 *
3679 * LOCKING:
3680 * Inherited from caller.
3681 */
3682
3683static void ata_pio_sector(struct ata_queued_cmd *qc)
3684{
3685 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3686 struct scatterlist *sg = qc->__sg;
3687 struct ata_port *ap = qc->ap;
3688 struct page *page;
3689 unsigned int offset;
3690 unsigned char *buf;
3691
3692 if (qc->cursect == (qc->nsect - 1))
3693 ap->hsm_task_state = HSM_ST_LAST;
3694
3695 page = sg[qc->cursg].page;
3696 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
3697
3698 /* get the current page and offset */
3699 page = nth_page(page, (offset >> PAGE_SHIFT));
3700 offset %= PAGE_SIZE;
3701
3702 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3703
3704 if (PageHighMem(page)) {
3705 unsigned long flags;
3706
3707 /* FIXME: use a bounce buffer */
3708 local_irq_save(flags);
3709 buf = kmap_atomic(page, KM_IRQ0);
3710
3711 /* do the actual data transfer */
3712 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
3713
3714 kunmap_atomic(buf, KM_IRQ0);
3715 local_irq_restore(flags);
3716 } else {
3717 buf = page_address(page);
3718 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
3719 }
3720
3721 qc->cursect++;
3722 qc->cursg_ofs++;
3723
3724 if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) {
3725 qc->cursg++;
3726 qc->cursg_ofs = 0;
3727 }
3728}
3729
3730/**
3731 * ata_pio_sectors - Transfer one or many 512-byte sectors.
3732 * @qc: Command on going
3733 *
3734 * Transfer one or many ATA_SECT_SIZE of data from/to the
3735 * ATA device for the DRQ request.
3736 *
3737 * LOCKING:
3738 * Inherited from caller.
3739 */
3740
3741static void ata_pio_sectors(struct ata_queued_cmd *qc)
3742{
3743 if (is_multi_taskfile(&qc->tf)) {
3744 /* READ/WRITE MULTIPLE */
3745 unsigned int nsect;
3746
3747 WARN_ON(qc->dev->multi_count == 0);
3748
3749 nsect = min(qc->nsect - qc->cursect, qc->dev->multi_count);
3750 while (nsect--)
3751 ata_pio_sector(qc);
3752 } else
3753 ata_pio_sector(qc);
3754}
3755
3756/**
3757 * atapi_send_cdb - Write CDB bytes to hardware
3758 * @ap: Port to which ATAPI device is attached.
3759 * @qc: Taskfile currently active
3760 *
3761 * When device has indicated its readiness to accept
3762 * a CDB, this function is called. Send the CDB.
3763 *
3764 * LOCKING:
3765 * caller.
3766 */
3767
3768static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
3769{
3770 /* send SCSI cdb */
3771 DPRINTK("send cdb\n");
3772 WARN_ON(qc->dev->cdb_len < 12);
3773
3774 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
3775 ata_altstatus(ap); /* flush */
3776
3777 switch (qc->tf.protocol) {
3778 case ATA_PROT_ATAPI:
3779 ap->hsm_task_state = HSM_ST;
3780 break;
3781 case ATA_PROT_ATAPI_NODATA:
3782 ap->hsm_task_state = HSM_ST_LAST;
3783 break;
3784 case ATA_PROT_ATAPI_DMA:
3785 ap->hsm_task_state = HSM_ST_LAST;
3786 /* initiate bmdma */
3787 ap->ops->bmdma_start(qc);
3788 break;
3789 }
3790}
3791
3792/**
3793 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
3794 * @qc: Command on going
3795 * @bytes: number of bytes
3796 *
3797 * Transfer Transfer data from/to the ATAPI device.
3798 *
3799 * LOCKING:
3800 * Inherited from caller.
3801 *
3802 */
3803
3804static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
3805{
3806 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3807 struct scatterlist *sg = qc->__sg;
3808 struct ata_port *ap = qc->ap;
3809 struct page *page;
3810 unsigned char *buf;
3811 unsigned int offset, count;
3812
3813 if (qc->curbytes + bytes >= qc->nbytes)
3814 ap->hsm_task_state = HSM_ST_LAST;
3815
3816next_sg:
3817 if (unlikely(qc->cursg >= qc->n_elem)) {
3818 /*
3819 * The end of qc->sg is reached and the device expects
3820 * more data to transfer. In order not to overrun qc->sg
3821 * and fulfill length specified in the byte count register,
3822 * - for read case, discard trailing data from the device
3823 * - for write case, padding zero data to the device
3824 */
3825 u16 pad_buf[1] = { 0 };
3826 unsigned int words = bytes >> 1;
3827 unsigned int i;
3828
3829 if (words) /* warning if bytes > 1 */
3830 ata_dev_printk(qc->dev, KERN_WARNING,
3831 "%u bytes trailing data\n", bytes);
3832
3833 for (i = 0; i < words; i++)
3834 ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
3835
3836 ap->hsm_task_state = HSM_ST_LAST;
3837 return;
3838 }
3839
3840 sg = &qc->__sg[qc->cursg];
3841
3842 page = sg->page;
3843 offset = sg->offset + qc->cursg_ofs;
3844
3845 /* get the current page and offset */
3846 page = nth_page(page, (offset >> PAGE_SHIFT));
3847 offset %= PAGE_SIZE;
3848
3849 /* don't overrun current sg */
3850 count = min(sg->length - qc->cursg_ofs, bytes);
3851
3852 /* don't cross page boundaries */
3853 count = min(count, (unsigned int)PAGE_SIZE - offset);
3854
3855 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3856
3857 if (PageHighMem(page)) {
3858 unsigned long flags;
3859
3860 /* FIXME: use bounce buffer */
3861 local_irq_save(flags);
3862 buf = kmap_atomic(page, KM_IRQ0);
3863
3864 /* do the actual data transfer */
3865 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
3866
3867 kunmap_atomic(buf, KM_IRQ0);
3868 local_irq_restore(flags);
3869 } else {
3870 buf = page_address(page);
3871 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
3872 }
3873
3874 bytes -= count;
3875 qc->curbytes += count;
3876 qc->cursg_ofs += count;
3877
3878 if (qc->cursg_ofs == sg->length) {
3879 qc->cursg++;
3880 qc->cursg_ofs = 0;
3881 }
3882
3883 if (bytes)
3884 goto next_sg;
3885}
3886
3887/**
3888 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
3889 * @qc: Command on going
3890 *
3891 * Transfer Transfer data from/to the ATAPI device.
3892 *
3893 * LOCKING:
3894 * Inherited from caller.
3895 */
3896
3897static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3898{
3899 struct ata_port *ap = qc->ap;
3900 struct ata_device *dev = qc->dev;
3901 unsigned int ireason, bc_lo, bc_hi, bytes;
3902 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
3903
3904 /* Abuse qc->result_tf for temp storage of intermediate TF
3905 * here to save some kernel stack usage.
3906 * For normal completion, qc->result_tf is not relevant. For
3907 * error, qc->result_tf is later overwritten by ata_qc_complete().
3908 * So, the correctness of qc->result_tf is not affected.
3909 */
3910 ap->ops->tf_read(ap, &qc->result_tf);
3911 ireason = qc->result_tf.nsect;
3912 bc_lo = qc->result_tf.lbam;
3913 bc_hi = qc->result_tf.lbah;
3914 bytes = (bc_hi << 8) | bc_lo;
3915
3916 /* shall be cleared to zero, indicating xfer of data */
3917 if (ireason & (1 << 0))
3918 goto err_out;
3919
3920 /* make sure transfer direction matches expected */
3921 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
3922 if (do_write != i_write)
3923 goto err_out;
3924
3925 VPRINTK("ata%u: xfering %d bytes\n", ap->id, bytes);
3926
3927 __atapi_pio_bytes(qc, bytes);
3928
3929 return;
3930
3931err_out:
3932 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
3933 qc->err_mask |= AC_ERR_HSM;
3934 ap->hsm_task_state = HSM_ST_ERR;
3935}
3936
3937/**
3938 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
3939 * @ap: the target ata_port
3940 * @qc: qc on going
3941 *
3942 * RETURNS:
3943 * 1 if ok in workqueue, 0 otherwise.
3944 */
3945
3946static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
3947{
3948 if (qc->tf.flags & ATA_TFLAG_POLLING)
3949 return 1;
3950
3951 if (ap->hsm_task_state == HSM_ST_FIRST) {
3952 if (qc->tf.protocol == ATA_PROT_PIO &&
3953 (qc->tf.flags & ATA_TFLAG_WRITE))
3954 return 1;
3955
3956 if (is_atapi_taskfile(&qc->tf) &&
3957 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
3958 return 1;
3959 }
3960
3961 return 0;
3962}
3963
3964/**
3965 * ata_hsm_qc_complete - finish a qc running on standard HSM
3966 * @qc: Command to complete
3967 * @in_wq: 1 if called from workqueue, 0 otherwise
3968 *
3969 * Finish @qc which is running on standard HSM.
3970 *
3971 * LOCKING:
3972 * If @in_wq is zero, spin_lock_irqsave(host_set lock).
3973 * Otherwise, none on entry and grabs host lock.
3974 */
3975static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
3976{
3977 struct ata_port *ap = qc->ap;
3978 unsigned long flags;
3979
3980 if (ap->ops->error_handler) {
3981 if (in_wq) {
3982 spin_lock_irqsave(ap->lock, flags);
3983
3984 /* EH might have kicked in while host_set lock
3985 * is released.
3986 */
3987 qc = ata_qc_from_tag(ap, qc->tag);
3988 if (qc) {
3989 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
3990 ata_irq_on(ap);
3991 ata_qc_complete(qc);
3992 } else
3993 ata_port_freeze(ap);
3994 }
3995
3996 spin_unlock_irqrestore(ap->lock, flags);
3997 } else {
3998 if (likely(!(qc->err_mask & AC_ERR_HSM)))
3999 ata_qc_complete(qc);
4000 else
4001 ata_port_freeze(ap);
4002 }
4003 } else {
4004 if (in_wq) {
4005 spin_lock_irqsave(ap->lock, flags);
4006 ata_irq_on(ap);
4007 ata_qc_complete(qc);
4008 spin_unlock_irqrestore(ap->lock, flags);
4009 } else
4010 ata_qc_complete(qc);
4011 }
4012
4013 ata_altstatus(ap); /* flush */
4014}
4015
4016/**
4017 * ata_hsm_move - move the HSM to the next state.
4018 * @ap: the target ata_port
4019 * @qc: qc on going
4020 * @status: current device status
4021 * @in_wq: 1 if called from workqueue, 0 otherwise
4022 *
4023 * RETURNS:
4024 * 1 when poll next status needed, 0 otherwise.
4025 */
4026int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
4027 u8 status, int in_wq)
4028{
4029 unsigned long flags = 0;
4030 int poll_next;
4031
4032 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
4033
4034 /* Make sure ata_qc_issue_prot() does not throw things
4035 * like DMA polling into the workqueue. Notice that
4036 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
4037 */
4038 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
4039
4040fsm_start:
4041 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
4042 ap->id, qc->tf.protocol, ap->hsm_task_state, status);
4043
4044 switch (ap->hsm_task_state) {
4045 case HSM_ST_FIRST:
4046 /* Send first data block or PACKET CDB */
4047
4048 /* If polling, we will stay in the work queue after
4049 * sending the data. Otherwise, interrupt handler
4050 * takes over after sending the data.
4051 */
4052 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
4053
4054 /* check device status */
4055 if (unlikely((status & ATA_DRQ) == 0)) {
4056 /* handle BSY=0, DRQ=0 as error */
4057 if (likely(status & (ATA_ERR | ATA_DF)))
4058 /* device stops HSM for abort/error */
4059 qc->err_mask |= AC_ERR_DEV;
4060 else
4061 /* HSM violation. Let EH handle this */
4062 qc->err_mask |= AC_ERR_HSM;
4063
4064 ap->hsm_task_state = HSM_ST_ERR;
4065 goto fsm_start;
4066 }
4067
4068 /* Device should not ask for data transfer (DRQ=1)
4069 * when it finds something wrong.
4070 * We ignore DRQ here and stop the HSM by
4071 * changing hsm_task_state to HSM_ST_ERR and
4072 * let the EH abort the command or reset the device.
4073 */
4074 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4075 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4076 ap->id, status);
4077 qc->err_mask |= AC_ERR_HSM;
4078 ap->hsm_task_state = HSM_ST_ERR;
4079 goto fsm_start;
4080 }
4081
4082 /* Send the CDB (atapi) or the first data block (ata pio out).
4083 * During the state transition, interrupt handler shouldn't
4084 * be invoked before the data transfer is complete and
4085 * hsm_task_state is changed. Hence, the following locking.
4086 */
4087 if (in_wq)
4088 spin_lock_irqsave(ap->lock, flags);
4089
4090 if (qc->tf.protocol == ATA_PROT_PIO) {
4091 /* PIO data out protocol.
4092 * send first data block.
4093 */
4094
4095 /* ata_pio_sectors() might change the state
4096 * to HSM_ST_LAST. so, the state is changed here
4097 * before ata_pio_sectors().
4098 */
4099 ap->hsm_task_state = HSM_ST;
4100 ata_pio_sectors(qc);
4101 ata_altstatus(ap); /* flush */
4102 } else
4103 /* send CDB */
4104 atapi_send_cdb(ap, qc);
4105
4106 if (in_wq)
4107 spin_unlock_irqrestore(ap->lock, flags);
4108
4109 /* if polling, ata_pio_task() handles the rest.
4110 * otherwise, interrupt handler takes over from here.
4111 */
4112 break;
4113
4114 case HSM_ST:
4115 /* complete command or read/write the data register */
4116 if (qc->tf.protocol == ATA_PROT_ATAPI) {
4117 /* ATAPI PIO protocol */
4118 if ((status & ATA_DRQ) == 0) {
4119 /* No more data to transfer or device error.
4120 * Device error will be tagged in HSM_ST_LAST.
4121 */
4122 ap->hsm_task_state = HSM_ST_LAST;
4123 goto fsm_start;
4124 }
4125
4126 /* Device should not ask for data transfer (DRQ=1)
4127 * when it finds something wrong.
4128 * We ignore DRQ here and stop the HSM by
4129 * changing hsm_task_state to HSM_ST_ERR and
4130 * let the EH abort the command or reset the device.
4131 */
4132 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4133 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4134 ap->id, status);
4135 qc->err_mask |= AC_ERR_HSM;
4136 ap->hsm_task_state = HSM_ST_ERR;
4137 goto fsm_start;
4138 }
4139
4140 atapi_pio_bytes(qc);
4141
4142 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
4143 /* bad ireason reported by device */
4144 goto fsm_start;
4145
4146 } else {
4147 /* ATA PIO protocol */
4148 if (unlikely((status & ATA_DRQ) == 0)) {
4149 /* handle BSY=0, DRQ=0 as error */
4150 if (likely(status & (ATA_ERR | ATA_DF)))
4151 /* device stops HSM for abort/error */
4152 qc->err_mask |= AC_ERR_DEV;
4153 else
4154 /* HSM violation. Let EH handle this */
4155 qc->err_mask |= AC_ERR_HSM;
4156
4157 ap->hsm_task_state = HSM_ST_ERR;
4158 goto fsm_start;
4159 }
4160
4161 /* For PIO reads, some devices may ask for
4162 * data transfer (DRQ=1) alone with ERR=1.
4163 * We respect DRQ here and transfer one
4164 * block of junk data before changing the
4165 * hsm_task_state to HSM_ST_ERR.
4166 *
4167 * For PIO writes, ERR=1 DRQ=1 doesn't make
4168 * sense since the data block has been
4169 * transferred to the device.
4170 */
4171 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4172 /* data might be corrputed */
4173 qc->err_mask |= AC_ERR_DEV;
4174
4175 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
4176 ata_pio_sectors(qc);
4177 ata_altstatus(ap);
4178 status = ata_wait_idle(ap);
4179 }
4180
4181 if (status & (ATA_BUSY | ATA_DRQ))
4182 qc->err_mask |= AC_ERR_HSM;
4183
4184 /* ata_pio_sectors() might change the
4185 * state to HSM_ST_LAST. so, the state
4186 * is changed after ata_pio_sectors().
4187 */
4188 ap->hsm_task_state = HSM_ST_ERR;
4189 goto fsm_start;
4190 }
4191
4192 ata_pio_sectors(qc);
4193
4194 if (ap->hsm_task_state == HSM_ST_LAST &&
4195 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
4196 /* all data read */
4197 ata_altstatus(ap);
4198 status = ata_wait_idle(ap);
4199 goto fsm_start;
4200 }
4201 }
4202
4203 ata_altstatus(ap); /* flush */
4204 poll_next = 1;
4205 break;
4206
4207 case HSM_ST_LAST:
4208 if (unlikely(!ata_ok(status))) {
4209 qc->err_mask |= __ac_err_mask(status);
4210 ap->hsm_task_state = HSM_ST_ERR;
4211 goto fsm_start;
4212 }
4213
4214 /* no more data to transfer */
4215 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
4216 ap->id, qc->dev->devno, status);
4217
4218 WARN_ON(qc->err_mask);
4219
4220 ap->hsm_task_state = HSM_ST_IDLE;
4221
4222 /* complete taskfile transaction */
4223 ata_hsm_qc_complete(qc, in_wq);
4224
4225 poll_next = 0;
4226 break;
4227
4228 case HSM_ST_ERR:
4229 /* make sure qc->err_mask is available to
4230 * know what's wrong and recover
4231 */
4232 WARN_ON(qc->err_mask == 0);
4233
4234 ap->hsm_task_state = HSM_ST_IDLE;
4235
4236 /* complete taskfile transaction */
4237 ata_hsm_qc_complete(qc, in_wq);
4238
4239 poll_next = 0;
4240 break;
4241 default:
4242 poll_next = 0;
4243 BUG();
4244 }
4245
4246 return poll_next;
4247}
4248
4249static void ata_pio_task(void *_data)
4250{
4251 struct ata_queued_cmd *qc = _data;
4252 struct ata_port *ap = qc->ap;
4253 u8 status;
4254 int poll_next;
4255
4256fsm_start:
4257 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
4258
4259 /*
4260 * This is purely heuristic. This is a fast path.
4261 * Sometimes when we enter, BSY will be cleared in
4262 * a chk-status or two. If not, the drive is probably seeking
4263 * or something. Snooze for a couple msecs, then
4264 * chk-status again. If still busy, queue delayed work.
4265 */
4266 status = ata_busy_wait(ap, ATA_BUSY, 5);
4267 if (status & ATA_BUSY) {
4268 msleep(2);
4269 status = ata_busy_wait(ap, ATA_BUSY, 10);
4270 if (status & ATA_BUSY) {
4271 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
4272 return;
4273 }
4274 }
4275
4276 /* move the HSM */
4277 poll_next = ata_hsm_move(ap, qc, status, 1);
4278
4279 /* another command or interrupt handler
4280 * may be running at this point.
4281 */
4282 if (poll_next)
4283 goto fsm_start;
4284}
4285
4286/**
4287 * ata_qc_new - Request an available ATA command, for queueing
4288 * @ap: Port associated with device @dev
4289 * @dev: Device from whom we request an available command structure
4290 *
4291 * LOCKING:
4292 * None.
4293 */
4294
4295static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4296{
4297 struct ata_queued_cmd *qc = NULL;
4298 unsigned int i;
4299
4300 /* no command while frozen */
4301 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4302 return NULL;
4303
4304 /* the last tag is reserved for internal command. */
4305 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
4306 if (!test_and_set_bit(i, &ap->qc_allocated)) {
4307 qc = __ata_qc_from_tag(ap, i);
4308 break;
4309 }
4310
4311 if (qc)
4312 qc->tag = i;
4313
4314 return qc;
4315}
4316
4317/**
4318 * ata_qc_new_init - Request an available ATA command, and initialize it
4319 * @dev: Device from whom we request an available command structure
4320 *
4321 * LOCKING:
4322 * None.
4323 */
4324
4325struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
4326{
4327 struct ata_port *ap = dev->ap;
4328 struct ata_queued_cmd *qc;
4329
4330 qc = ata_qc_new(ap);
4331 if (qc) {
4332 qc->scsicmd = NULL;
4333 qc->ap = ap;
4334 qc->dev = dev;
4335
4336 ata_qc_reinit(qc);
4337 }
4338
4339 return qc;
4340}
4341
4342/**
4343 * ata_qc_free - free unused ata_queued_cmd
4344 * @qc: Command to complete
4345 *
4346 * Designed to free unused ata_queued_cmd object
4347 * in case something prevents using it.
4348 *
4349 * LOCKING:
4350 * spin_lock_irqsave(host_set lock)
4351 */
4352void ata_qc_free(struct ata_queued_cmd *qc)
4353{
4354 struct ata_port *ap = qc->ap;
4355 unsigned int tag;
4356
4357 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4358
4359 qc->flags = 0;
4360 tag = qc->tag;
4361 if (likely(ata_tag_valid(tag))) {
4362 qc->tag = ATA_TAG_POISON;
4363 clear_bit(tag, &ap->qc_allocated);
4364 }
4365}
4366
4367void __ata_qc_complete(struct ata_queued_cmd *qc)
4368{
4369 struct ata_port *ap = qc->ap;
4370
4371 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4372 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
4373
4374 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4375 ata_sg_clean(qc);
4376
4377 /* command should be marked inactive atomically with qc completion */
4378 if (qc->tf.protocol == ATA_PROT_NCQ)
4379 ap->sactive &= ~(1 << qc->tag);
4380 else
4381 ap->active_tag = ATA_TAG_POISON;
4382
4383 /* atapi: mark qc as inactive to prevent the interrupt handler
4384 * from completing the command twice later, before the error handler
4385 * is called. (when rc != 0 and atapi request sense is needed)
4386 */
4387 qc->flags &= ~ATA_QCFLAG_ACTIVE;
4388 ap->qc_active &= ~(1 << qc->tag);
4389
4390 /* call completion callback */
4391 qc->complete_fn(qc);
4392}
4393
4394/**
4395 * ata_qc_complete - Complete an active ATA command
4396 * @qc: Command to complete
4397 * @err_mask: ATA Status register contents
4398 *
4399 * Indicate to the mid and upper layers that an ATA
4400 * command has completed, with either an ok or not-ok status.
4401 *
4402 * LOCKING:
4403 * spin_lock_irqsave(host_set lock)
4404 */
4405void ata_qc_complete(struct ata_queued_cmd *qc)
4406{
4407 struct ata_port *ap = qc->ap;
4408
4409 /* XXX: New EH and old EH use different mechanisms to
4410 * synchronize EH with regular execution path.
4411 *
4412 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4413 * Normal execution path is responsible for not accessing a
4414 * failed qc. libata core enforces the rule by returning NULL
4415 * from ata_qc_from_tag() for failed qcs.
4416 *
4417 * Old EH depends on ata_qc_complete() nullifying completion
4418 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4419 * not synchronize with interrupt handler. Only PIO task is
4420 * taken care of.
4421 */
4422 if (ap->ops->error_handler) {
4423 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
4424
4425 if (unlikely(qc->err_mask))
4426 qc->flags |= ATA_QCFLAG_FAILED;
4427
4428 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4429 if (!ata_tag_internal(qc->tag)) {
4430 /* always fill result TF for failed qc */
4431 ap->ops->tf_read(ap, &qc->result_tf);
4432 ata_qc_schedule_eh(qc);
4433 return;
4434 }
4435 }
4436
4437 /* read result TF if requested */
4438 if (qc->flags & ATA_QCFLAG_RESULT_TF)
4439 ap->ops->tf_read(ap, &qc->result_tf);
4440
4441 __ata_qc_complete(qc);
4442 } else {
4443 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4444 return;
4445
4446 /* read result TF if failed or requested */
4447 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
4448 ap->ops->tf_read(ap, &qc->result_tf);
4449
4450 __ata_qc_complete(qc);
4451 }
4452}
4453
4454/**
4455 * ata_qc_complete_multiple - Complete multiple qcs successfully
4456 * @ap: port in question
4457 * @qc_active: new qc_active mask
4458 * @finish_qc: LLDD callback invoked before completing a qc
4459 *
4460 * Complete in-flight commands. This functions is meant to be
4461 * called from low-level driver's interrupt routine to complete
4462 * requests normally. ap->qc_active and @qc_active is compared
4463 * and commands are completed accordingly.
4464 *
4465 * LOCKING:
4466 * spin_lock_irqsave(host_set lock)
4467 *
4468 * RETURNS:
4469 * Number of completed commands on success, -errno otherwise.
4470 */
4471int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
4472 void (*finish_qc)(struct ata_queued_cmd *))
4473{
4474 int nr_done = 0;
4475 u32 done_mask;
4476 int i;
4477
4478 done_mask = ap->qc_active ^ qc_active;
4479
4480 if (unlikely(done_mask & qc_active)) {
4481 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
4482 "(%08x->%08x)\n", ap->qc_active, qc_active);
4483 return -EINVAL;
4484 }
4485
4486 for (i = 0; i < ATA_MAX_QUEUE; i++) {
4487 struct ata_queued_cmd *qc;
4488
4489 if (!(done_mask & (1 << i)))
4490 continue;
4491
4492 if ((qc = ata_qc_from_tag(ap, i))) {
4493 if (finish_qc)
4494 finish_qc(qc);
4495 ata_qc_complete(qc);
4496 nr_done++;
4497 }
4498 }
4499
4500 return nr_done;
4501}
4502
4503static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
4504{
4505 struct ata_port *ap = qc->ap;
4506
4507 switch (qc->tf.protocol) {
4508 case ATA_PROT_NCQ:
4509 case ATA_PROT_DMA:
4510 case ATA_PROT_ATAPI_DMA:
4511 return 1;
4512
4513 case ATA_PROT_ATAPI:
4514 case ATA_PROT_PIO:
4515 if (ap->flags & ATA_FLAG_PIO_DMA)
4516 return 1;
4517
4518 /* fall through */
4519
4520 default:
4521 return 0;
4522 }
4523
4524 /* never reached */
4525}
4526
4527/**
4528 * ata_qc_issue - issue taskfile to device
4529 * @qc: command to issue to device
4530 *
4531 * Prepare an ATA command to submission to device.
4532 * This includes mapping the data into a DMA-able
4533 * area, filling in the S/G table, and finally
4534 * writing the taskfile to hardware, starting the command.
4535 *
4536 * LOCKING:
4537 * spin_lock_irqsave(host_set lock)
4538 */
4539void ata_qc_issue(struct ata_queued_cmd *qc)
4540{
4541 struct ata_port *ap = qc->ap;
4542
4543 /* Make sure only one non-NCQ command is outstanding. The
4544 * check is skipped for old EH because it reuses active qc to
4545 * request ATAPI sense.
4546 */
4547 WARN_ON(ap->ops->error_handler && ata_tag_valid(ap->active_tag));
4548
4549 if (qc->tf.protocol == ATA_PROT_NCQ) {
4550 WARN_ON(ap->sactive & (1 << qc->tag));
4551 ap->sactive |= 1 << qc->tag;
4552 } else {
4553 WARN_ON(ap->sactive);
4554 ap->active_tag = qc->tag;
4555 }
4556
4557 qc->flags |= ATA_QCFLAG_ACTIVE;
4558 ap->qc_active |= 1 << qc->tag;
4559
4560 if (ata_should_dma_map(qc)) {
4561 if (qc->flags & ATA_QCFLAG_SG) {
4562 if (ata_sg_setup(qc))
4563 goto sg_err;
4564 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
4565 if (ata_sg_setup_one(qc))
4566 goto sg_err;
4567 }
4568 } else {
4569 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4570 }
4571
4572 ap->ops->qc_prep(qc);
4573
4574 qc->err_mask |= ap->ops->qc_issue(qc);
4575 if (unlikely(qc->err_mask))
4576 goto err;
4577 return;
4578
4579sg_err:
4580 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4581 qc->err_mask |= AC_ERR_SYSTEM;
4582err:
4583 ata_qc_complete(qc);
4584}
4585
4586/**
4587 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
4588 * @qc: command to issue to device
4589 *
4590 * Using various libata functions and hooks, this function
4591 * starts an ATA command. ATA commands are grouped into
4592 * classes called "protocols", and issuing each type of protocol
4593 * is slightly different.
4594 *
4595 * May be used as the qc_issue() entry in ata_port_operations.
4596 *
4597 * LOCKING:
4598 * spin_lock_irqsave(host_set lock)
4599 *
4600 * RETURNS:
4601 * Zero on success, AC_ERR_* mask on failure
4602 */
4603
4604unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
4605{
4606 struct ata_port *ap = qc->ap;
4607
4608 /* Use polling pio if the LLD doesn't handle
4609 * interrupt driven pio and atapi CDB interrupt.
4610 */
4611 if (ap->flags & ATA_FLAG_PIO_POLLING) {
4612 switch (qc->tf.protocol) {
4613 case ATA_PROT_PIO:
4614 case ATA_PROT_ATAPI:
4615 case ATA_PROT_ATAPI_NODATA:
4616 qc->tf.flags |= ATA_TFLAG_POLLING;
4617 break;
4618 case ATA_PROT_ATAPI_DMA:
4619 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
4620 /* see ata_dma_blacklisted() */
4621 BUG();
4622 break;
4623 default:
4624 break;
4625 }
4626 }
4627
4628 /* select the device */
4629 ata_dev_select(ap, qc->dev->devno, 1, 0);
4630
4631 /* start the command */
4632 switch (qc->tf.protocol) {
4633 case ATA_PROT_NODATA:
4634 if (qc->tf.flags & ATA_TFLAG_POLLING)
4635 ata_qc_set_polling(qc);
4636
4637 ata_tf_to_host(ap, &qc->tf);
4638 ap->hsm_task_state = HSM_ST_LAST;
4639
4640 if (qc->tf.flags & ATA_TFLAG_POLLING)
4641 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4642
4643 break;
4644
4645 case ATA_PROT_DMA:
4646 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
4647
4648 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4649 ap->ops->bmdma_setup(qc); /* set up bmdma */
4650 ap->ops->bmdma_start(qc); /* initiate bmdma */
4651 ap->hsm_task_state = HSM_ST_LAST;
4652 break;
4653
4654 case ATA_PROT_PIO:
4655 if (qc->tf.flags & ATA_TFLAG_POLLING)
4656 ata_qc_set_polling(qc);
4657
4658 ata_tf_to_host(ap, &qc->tf);
4659
4660 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4661 /* PIO data out protocol */
4662 ap->hsm_task_state = HSM_ST_FIRST;
4663 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4664
4665 /* always send first data block using
4666 * the ata_pio_task() codepath.
4667 */
4668 } else {
4669 /* PIO data in protocol */
4670 ap->hsm_task_state = HSM_ST;
4671
4672 if (qc->tf.flags & ATA_TFLAG_POLLING)
4673 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4674
4675 /* if polling, ata_pio_task() handles the rest.
4676 * otherwise, interrupt handler takes over from here.
4677 */
4678 }
4679
4680 break;
4681
4682 case ATA_PROT_ATAPI:
4683 case ATA_PROT_ATAPI_NODATA:
4684 if (qc->tf.flags & ATA_TFLAG_POLLING)
4685 ata_qc_set_polling(qc);
4686
4687 ata_tf_to_host(ap, &qc->tf);
4688
4689 ap->hsm_task_state = HSM_ST_FIRST;
4690
4691 /* send cdb by polling if no cdb interrupt */
4692 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
4693 (qc->tf.flags & ATA_TFLAG_POLLING))
4694 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4695 break;
4696
4697 case ATA_PROT_ATAPI_DMA:
4698 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
4699
4700 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4701 ap->ops->bmdma_setup(qc); /* set up bmdma */
4702 ap->hsm_task_state = HSM_ST_FIRST;
4703
4704 /* send cdb by polling if no cdb interrupt */
4705 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4706 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4707 break;
4708
4709 default:
4710 WARN_ON(1);
4711 return AC_ERR_SYSTEM;
4712 }
4713
4714 return 0;
4715}
4716
4717/**
4718 * ata_host_intr - Handle host interrupt for given (port, task)
4719 * @ap: Port on which interrupt arrived (possibly...)
4720 * @qc: Taskfile currently active in engine
4721 *
4722 * Handle host interrupt for given queued command. Currently,
4723 * only DMA interrupts are handled. All other commands are
4724 * handled via polling with interrupts disabled (nIEN bit).
4725 *
4726 * LOCKING:
4727 * spin_lock_irqsave(host_set lock)
4728 *
4729 * RETURNS:
4730 * One if interrupt was handled, zero if not (shared irq).
4731 */
4732
4733inline unsigned int ata_host_intr (struct ata_port *ap,
4734 struct ata_queued_cmd *qc)
4735{
4736 u8 status, host_stat = 0;
4737
4738 VPRINTK("ata%u: protocol %d task_state %d\n",
4739 ap->id, qc->tf.protocol, ap->hsm_task_state);
4740
4741 /* Check whether we are expecting interrupt in this state */
4742 switch (ap->hsm_task_state) {
4743 case HSM_ST_FIRST:
4744 /* Some pre-ATAPI-4 devices assert INTRQ
4745 * at this state when ready to receive CDB.
4746 */
4747
4748 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
4749 * The flag was turned on only for atapi devices.
4750 * No need to check is_atapi_taskfile(&qc->tf) again.
4751 */
4752 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4753 goto idle_irq;
4754 break;
4755 case HSM_ST_LAST:
4756 if (qc->tf.protocol == ATA_PROT_DMA ||
4757 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
4758 /* check status of DMA engine */
4759 host_stat = ap->ops->bmdma_status(ap);
4760 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
4761
4762 /* if it's not our irq... */
4763 if (!(host_stat & ATA_DMA_INTR))
4764 goto idle_irq;
4765
4766 /* before we do anything else, clear DMA-Start bit */
4767 ap->ops->bmdma_stop(qc);
4768
4769 if (unlikely(host_stat & ATA_DMA_ERR)) {
4770 /* error when transfering data to/from memory */
4771 qc->err_mask |= AC_ERR_HOST_BUS;
4772 ap->hsm_task_state = HSM_ST_ERR;
4773 }
4774 }
4775 break;
4776 case HSM_ST:
4777 break;
4778 default:
4779 goto idle_irq;
4780 }
4781
4782 /* check altstatus */
4783 status = ata_altstatus(ap);
4784 if (status & ATA_BUSY)
4785 goto idle_irq;
4786
4787 /* check main status, clearing INTRQ */
4788 status = ata_chk_status(ap);
4789 if (unlikely(status & ATA_BUSY))
4790 goto idle_irq;
4791
4792 /* ack bmdma irq events */
4793 ap->ops->irq_clear(ap);
4794
4795 ata_hsm_move(ap, qc, status, 0);
4796 return 1; /* irq handled */
4797
4798idle_irq:
4799 ap->stats.idle_irq++;
4800
4801#ifdef ATA_IRQ_TRAP
4802 if ((ap->stats.idle_irq % 1000) == 0) {
4803 ata_irq_ack(ap, 0); /* debug trap */
4804 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
4805 return 1;
4806 }
4807#endif
4808 return 0; /* irq not handled */
4809}
4810
4811/**
4812 * ata_interrupt - Default ATA host interrupt handler
4813 * @irq: irq line (unused)
4814 * @dev_instance: pointer to our ata_host_set information structure
4815 * @regs: unused
4816 *
4817 * Default interrupt handler for PCI IDE devices. Calls
4818 * ata_host_intr() for each port that is not disabled.
4819 *
4820 * LOCKING:
4821 * Obtains host_set lock during operation.
4822 *
4823 * RETURNS:
4824 * IRQ_NONE or IRQ_HANDLED.
4825 */
4826
4827irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
4828{
4829 struct ata_host_set *host_set = dev_instance;
4830 unsigned int i;
4831 unsigned int handled = 0;
4832 unsigned long flags;
4833
4834 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
4835 spin_lock_irqsave(&host_set->lock, flags);
4836
4837 for (i = 0; i < host_set->n_ports; i++) {
4838 struct ata_port *ap;
4839
4840 ap = host_set->ports[i];
4841 if (ap &&
4842 !(ap->flags & ATA_FLAG_DISABLED)) {
4843 struct ata_queued_cmd *qc;
4844
4845 qc = ata_qc_from_tag(ap, ap->active_tag);
4846 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
4847 (qc->flags & ATA_QCFLAG_ACTIVE))
4848 handled |= ata_host_intr(ap, qc);
4849 }
4850 }
4851
4852 spin_unlock_irqrestore(&host_set->lock, flags);
4853
4854 return IRQ_RETVAL(handled);
4855}
4856
4857/**
4858 * sata_scr_valid - test whether SCRs are accessible
4859 * @ap: ATA port to test SCR accessibility for
4860 *
4861 * Test whether SCRs are accessible for @ap.
4862 *
4863 * LOCKING:
4864 * None.
4865 *
4866 * RETURNS:
4867 * 1 if SCRs are accessible, 0 otherwise.
4868 */
4869int sata_scr_valid(struct ata_port *ap)
4870{
4871 return ap->cbl == ATA_CBL_SATA && ap->ops->scr_read;
4872}
4873
4874/**
4875 * sata_scr_read - read SCR register of the specified port
4876 * @ap: ATA port to read SCR for
4877 * @reg: SCR to read
4878 * @val: Place to store read value
4879 *
4880 * Read SCR register @reg of @ap into *@val. This function is
4881 * guaranteed to succeed if the cable type of the port is SATA
4882 * and the port implements ->scr_read.
4883 *
4884 * LOCKING:
4885 * None.
4886 *
4887 * RETURNS:
4888 * 0 on success, negative errno on failure.
4889 */
4890int sata_scr_read(struct ata_port *ap, int reg, u32 *val)
4891{
4892 if (sata_scr_valid(ap)) {
4893 *val = ap->ops->scr_read(ap, reg);
4894 return 0;
4895 }
4896 return -EOPNOTSUPP;
4897}
4898
4899/**
4900 * sata_scr_write - write SCR register of the specified port
4901 * @ap: ATA port to write SCR for
4902 * @reg: SCR to write
4903 * @val: value to write
4904 *
4905 * Write @val to SCR register @reg of @ap. This function is
4906 * guaranteed to succeed if the cable type of the port is SATA
4907 * and the port implements ->scr_read.
4908 *
4909 * LOCKING:
4910 * None.
4911 *
4912 * RETURNS:
4913 * 0 on success, negative errno on failure.
4914 */
4915int sata_scr_write(struct ata_port *ap, int reg, u32 val)
4916{
4917 if (sata_scr_valid(ap)) {
4918 ap->ops->scr_write(ap, reg, val);
4919 return 0;
4920 }
4921 return -EOPNOTSUPP;
4922}
4923
4924/**
4925 * sata_scr_write_flush - write SCR register of the specified port and flush
4926 * @ap: ATA port to write SCR for
4927 * @reg: SCR to write
4928 * @val: value to write
4929 *
4930 * This function is identical to sata_scr_write() except that this
4931 * function performs flush after writing to the register.
4932 *
4933 * LOCKING:
4934 * None.
4935 *
4936 * RETURNS:
4937 * 0 on success, negative errno on failure.
4938 */
4939int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val)
4940{
4941 if (sata_scr_valid(ap)) {
4942 ap->ops->scr_write(ap, reg, val);
4943 ap->ops->scr_read(ap, reg);
4944 return 0;
4945 }
4946 return -EOPNOTSUPP;
4947}
4948
4949/**
4950 * ata_port_online - test whether the given port is online
4951 * @ap: ATA port to test
4952 *
4953 * Test whether @ap is online. Note that this function returns 0
4954 * if online status of @ap cannot be obtained, so
4955 * ata_port_online(ap) != !ata_port_offline(ap).
4956 *
4957 * LOCKING:
4958 * None.
4959 *
4960 * RETURNS:
4961 * 1 if the port online status is available and online.
4962 */
4963int ata_port_online(struct ata_port *ap)
4964{
4965 u32 sstatus;
4966
4967 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) == 0x3)
4968 return 1;
4969 return 0;
4970}
4971
4972/**
4973 * ata_port_offline - test whether the given port is offline
4974 * @ap: ATA port to test
4975 *
4976 * Test whether @ap is offline. Note that this function returns
4977 * 0 if offline status of @ap cannot be obtained, so
4978 * ata_port_online(ap) != !ata_port_offline(ap).
4979 *
4980 * LOCKING:
4981 * None.
4982 *
4983 * RETURNS:
4984 * 1 if the port offline status is available and offline.
4985 */
4986int ata_port_offline(struct ata_port *ap)
4987{
4988 u32 sstatus;
4989
4990 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) != 0x3)
4991 return 1;
4992 return 0;
4993}
4994
4995int ata_flush_cache(struct ata_device *dev)
4996{
4997 unsigned int err_mask;
4998 u8 cmd;
4999
5000 if (!ata_try_flush_cache(dev))
5001 return 0;
5002
5003 if (ata_id_has_flush_ext(dev->id))
5004 cmd = ATA_CMD_FLUSH_EXT;
5005 else
5006 cmd = ATA_CMD_FLUSH;
5007
5008 err_mask = ata_do_simple_cmd(dev, cmd);
5009 if (err_mask) {
5010 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
5011 return -EIO;
5012 }
5013
5014 return 0;
5015}
5016
5017static int ata_host_set_request_pm(struct ata_host_set *host_set,
5018 pm_message_t mesg, unsigned int action,
5019 unsigned int ehi_flags, int wait)
5020{
5021 unsigned long flags;
5022 int i, rc;
5023
5024 for (i = 0; i < host_set->n_ports; i++) {
5025 struct ata_port *ap = host_set->ports[i];
5026
5027 /* Previous resume operation might still be in
5028 * progress. Wait for PM_PENDING to clear.
5029 */
5030 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5031 ata_port_wait_eh(ap);
5032 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5033 }
5034
5035 /* request PM ops to EH */
5036 spin_lock_irqsave(ap->lock, flags);
5037
5038 ap->pm_mesg = mesg;
5039 if (wait) {
5040 rc = 0;
5041 ap->pm_result = &rc;
5042 }
5043
5044 ap->pflags |= ATA_PFLAG_PM_PENDING;
5045 ap->eh_info.action |= action;
5046 ap->eh_info.flags |= ehi_flags;
5047
5048 ata_port_schedule_eh(ap);
5049
5050 spin_unlock_irqrestore(ap->lock, flags);
5051
5052 /* wait and check result */
5053 if (wait) {
5054 ata_port_wait_eh(ap);
5055 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5056 if (rc)
5057 return rc;
5058 }
5059 }
5060
5061 return 0;
5062}
5063
5064/**
5065 * ata_host_set_suspend - suspend host_set
5066 * @host_set: host_set to suspend
5067 * @mesg: PM message
5068 *
5069 * Suspend @host_set. Actual operation is performed by EH. This
5070 * function requests EH to perform PM operations and waits for EH
5071 * to finish.
5072 *
5073 * LOCKING:
5074 * Kernel thread context (may sleep).
5075 *
5076 * RETURNS:
5077 * 0 on success, -errno on failure.
5078 */
5079int ata_host_set_suspend(struct ata_host_set *host_set, pm_message_t mesg)
5080{
5081 int i, j, rc;
5082
5083 rc = ata_host_set_request_pm(host_set, mesg, 0, ATA_EHI_QUIET, 1);
5084 if (rc)
5085 goto fail;
5086
5087 /* EH is quiescent now. Fail if we have any ready device.
5088 * This happens if hotplug occurs between completion of device
5089 * suspension and here.
5090 */
5091 for (i = 0; i < host_set->n_ports; i++) {
5092 struct ata_port *ap = host_set->ports[i];
5093
5094 for (j = 0; j < ATA_MAX_DEVICES; j++) {
5095 struct ata_device *dev = &ap->device[j];
5096
5097 if (ata_dev_ready(dev)) {
5098 ata_port_printk(ap, KERN_WARNING,
5099 "suspend failed, device %d "
5100 "still active\n", dev->devno);
5101 rc = -EBUSY;
5102 goto fail;
5103 }
5104 }
5105 }
5106
5107 host_set->dev->power.power_state = mesg;
5108 return 0;
5109
5110 fail:
5111 ata_host_set_resume(host_set);
5112 return rc;
5113}
5114
5115/**
5116 * ata_host_set_resume - resume host_set
5117 * @host_set: host_set to resume
5118 *
5119 * Resume @host_set. Actual operation is performed by EH. This
5120 * function requests EH to perform PM operations and returns.
5121 * Note that all resume operations are performed parallely.
5122 *
5123 * LOCKING:
5124 * Kernel thread context (may sleep).
5125 */
5126void ata_host_set_resume(struct ata_host_set *host_set)
5127{
5128 ata_host_set_request_pm(host_set, PMSG_ON, ATA_EH_SOFTRESET,
5129 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5130 host_set->dev->power.power_state = PMSG_ON;
5131}
5132
5133/**
5134 * ata_port_start - Set port up for dma.
5135 * @ap: Port to initialize
5136 *
5137 * Called just after data structures for each port are
5138 * initialized. Allocates space for PRD table.
5139 *
5140 * May be used as the port_start() entry in ata_port_operations.
5141 *
5142 * LOCKING:
5143 * Inherited from caller.
5144 */
5145
5146int ata_port_start (struct ata_port *ap)
5147{
5148 struct device *dev = ap->dev;
5149 int rc;
5150
5151 ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
5152 if (!ap->prd)
5153 return -ENOMEM;
5154
5155 rc = ata_pad_alloc(ap, dev);
5156 if (rc) {
5157 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
5158 return rc;
5159 }
5160
5161 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
5162
5163 return 0;
5164}
5165
5166
5167/**
5168 * ata_port_stop - Undo ata_port_start()
5169 * @ap: Port to shut down
5170 *
5171 * Frees the PRD table.
5172 *
5173 * May be used as the port_stop() entry in ata_port_operations.
5174 *
5175 * LOCKING:
5176 * Inherited from caller.
5177 */
5178
5179void ata_port_stop (struct ata_port *ap)
5180{
5181 struct device *dev = ap->dev;
5182
5183 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
5184 ata_pad_free(ap, dev);
5185}
5186
5187void ata_host_stop (struct ata_host_set *host_set)
5188{
5189 if (host_set->mmio_base)
5190 iounmap(host_set->mmio_base);
5191}
5192
5193/**
5194 * ata_dev_init - Initialize an ata_device structure
5195 * @dev: Device structure to initialize
5196 *
5197 * Initialize @dev in preparation for probing.
5198 *
5199 * LOCKING:
5200 * Inherited from caller.
5201 */
5202void ata_dev_init(struct ata_device *dev)
5203{
5204 struct ata_port *ap = dev->ap;
5205 unsigned long flags;
5206
5207 /* SATA spd limit is bound to the first device */
5208 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5209
5210 /* High bits of dev->flags are used to record warm plug
5211 * requests which occur asynchronously. Synchronize using
5212 * host_set lock.
5213 */
5214 spin_lock_irqsave(ap->lock, flags);
5215 dev->flags &= ~ATA_DFLAG_INIT_MASK;
5216 spin_unlock_irqrestore(ap->lock, flags);
5217
5218 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
5219 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
5220 dev->pio_mask = UINT_MAX;
5221 dev->mwdma_mask = UINT_MAX;
5222 dev->udma_mask = UINT_MAX;
5223}
5224
5225/**
5226 * ata_host_init - Initialize an ata_port structure
5227 * @ap: Structure to initialize
5228 * @host: associated SCSI mid-layer structure
5229 * @host_set: Collection of hosts to which @ap belongs
5230 * @ent: Probe information provided by low-level driver
5231 * @port_no: Port number associated with this ata_port
5232 *
5233 * Initialize a new ata_port structure, and its associated
5234 * scsi_host.
5235 *
5236 * LOCKING:
5237 * Inherited from caller.
5238 */
5239static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
5240 struct ata_host_set *host_set,
5241 const struct ata_probe_ent *ent, unsigned int port_no)
5242{
5243 unsigned int i;
5244
5245 host->max_id = 16;
5246 host->max_lun = 1;
5247 host->max_channel = 1;
5248 host->unique_id = ata_unique_id++;
5249 host->max_cmd_len = 12;
5250
5251 ap->lock = &host_set->lock;
5252 ap->flags = ATA_FLAG_DISABLED;
5253 ap->id = host->unique_id;
5254 ap->host = host;
5255 ap->ctl = ATA_DEVCTL_OBS;
5256 ap->host_set = host_set;
5257 ap->dev = ent->dev;
5258 ap->port_no = port_no;
5259 ap->hard_port_no =
5260 ent->legacy_mode ? ent->hard_port_no : port_no;
5261 ap->pio_mask = ent->pio_mask;
5262 ap->mwdma_mask = ent->mwdma_mask;
5263 ap->udma_mask = ent->udma_mask;
5264 ap->flags |= ent->host_flags;
5265 ap->ops = ent->port_ops;
5266 ap->hw_sata_spd_limit = UINT_MAX;
5267 ap->active_tag = ATA_TAG_POISON;
5268 ap->last_ctl = 0xFF;
5269
5270#if defined(ATA_VERBOSE_DEBUG)
5271 /* turn on all debugging levels */
5272 ap->msg_enable = 0x00FF;
5273#elif defined(ATA_DEBUG)
5274 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5275#else
5276 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5277#endif
5278
5279 INIT_WORK(&ap->port_task, NULL, NULL);
5280 INIT_WORK(&ap->hotplug_task, ata_scsi_hotplug, ap);
5281 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan, ap);
5282 INIT_LIST_HEAD(&ap->eh_done_q);
5283 init_waitqueue_head(&ap->eh_wait_q);
5284
5285 /* set cable type */
5286 ap->cbl = ATA_CBL_NONE;
5287 if (ap->flags & ATA_FLAG_SATA)
5288 ap->cbl = ATA_CBL_SATA;
5289
5290 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5291 struct ata_device *dev = &ap->device[i];
5292 dev->ap = ap;
5293 dev->devno = i;
5294 ata_dev_init(dev);
5295 }
5296
5297#ifdef ATA_IRQ_TRAP
5298 ap->stats.unhandled_irq = 1;
5299 ap->stats.idle_irq = 1;
5300#endif
5301
5302 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
5303}
5304
5305/**
5306 * ata_host_add - Attach low-level ATA driver to system
5307 * @ent: Information provided by low-level driver
5308 * @host_set: Collections of ports to which we add
5309 * @port_no: Port number associated with this host
5310 *
5311 * Attach low-level ATA driver to system.
5312 *
5313 * LOCKING:
5314 * PCI/etc. bus probe sem.
5315 *
5316 * RETURNS:
5317 * New ata_port on success, for NULL on error.
5318 */
5319
5320static struct ata_port * ata_host_add(const struct ata_probe_ent *ent,
5321 struct ata_host_set *host_set,
5322 unsigned int port_no)
5323{
5324 struct Scsi_Host *host;
5325 struct ata_port *ap;
5326 int rc;
5327
5328 DPRINTK("ENTER\n");
5329
5330 if (!ent->port_ops->error_handler &&
5331 !(ent->host_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) {
5332 printk(KERN_ERR "ata%u: no reset mechanism available\n",
5333 port_no);
5334 return NULL;
5335 }
5336
5337 host = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
5338 if (!host)
5339 return NULL;
5340
5341 host->transportt = &ata_scsi_transport_template;
5342
5343 ap = ata_shost_to_port(host);
5344
5345 ata_host_init(ap, host, host_set, ent, port_no);
5346
5347 rc = ap->ops->port_start(ap);
5348 if (rc)
5349 goto err_out;
5350
5351 return ap;
5352
5353err_out:
5354 scsi_host_put(host);
5355 return NULL;
5356}
5357
5358/**
5359 * ata_device_add - Register hardware device with ATA and SCSI layers
5360 * @ent: Probe information describing hardware device to be registered
5361 *
5362 * This function processes the information provided in the probe
5363 * information struct @ent, allocates the necessary ATA and SCSI
5364 * host information structures, initializes them, and registers
5365 * everything with requisite kernel subsystems.
5366 *
5367 * This function requests irqs, probes the ATA bus, and probes
5368 * the SCSI bus.
5369 *
5370 * LOCKING:
5371 * PCI/etc. bus probe sem.
5372 *
5373 * RETURNS:
5374 * Number of ports registered. Zero on error (no ports registered).
5375 */
5376int ata_device_add(const struct ata_probe_ent *ent)
5377{
5378 unsigned int count = 0, i;
5379 struct device *dev = ent->dev;
5380 struct ata_host_set *host_set;
5381 int rc;
5382
5383 DPRINTK("ENTER\n");
5384 /* alloc a container for our list of ATA ports (buses) */
5385 host_set = kzalloc(sizeof(struct ata_host_set) +
5386 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
5387 if (!host_set)
5388 return 0;
5389 spin_lock_init(&host_set->lock);
5390
5391 host_set->dev = dev;
5392 host_set->n_ports = ent->n_ports;
5393 host_set->irq = ent->irq;
5394 host_set->mmio_base = ent->mmio_base;
5395 host_set->private_data = ent->private_data;
5396 host_set->ops = ent->port_ops;
5397 host_set->flags = ent->host_set_flags;
5398
5399 /* register each port bound to this device */
5400 for (i = 0; i < ent->n_ports; i++) {
5401 struct ata_port *ap;
5402 unsigned long xfer_mode_mask;
5403
5404 ap = ata_host_add(ent, host_set, i);
5405 if (!ap)
5406 goto err_out;
5407
5408 host_set->ports[i] = ap;
5409 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
5410 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
5411 (ap->pio_mask << ATA_SHIFT_PIO);
5412
5413 /* print per-port info to dmesg */
5414 ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%lX "
5415 "ctl 0x%lX bmdma 0x%lX irq %lu\n",
5416 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
5417 ata_mode_string(xfer_mode_mask),
5418 ap->ioaddr.cmd_addr,
5419 ap->ioaddr.ctl_addr,
5420 ap->ioaddr.bmdma_addr,
5421 ent->irq);
5422
5423 ata_chk_status(ap);
5424 host_set->ops->irq_clear(ap);
5425 ata_eh_freeze_port(ap); /* freeze port before requesting IRQ */
5426 count++;
5427 }
5428
5429 if (!count)
5430 goto err_free_ret;
5431
5432 /* obtain irq, that is shared between channels */
5433 rc = request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
5434 DRV_NAME, host_set);
5435 if (rc) {
5436 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5437 ent->irq, rc);
5438 goto err_out;
5439 }
5440
5441 /* perform each probe synchronously */
5442 DPRINTK("probe begin\n");
5443 for (i = 0; i < count; i++) {
5444 struct ata_port *ap;
5445 u32 scontrol;
5446 int rc;
5447
5448 ap = host_set->ports[i];
5449
5450 /* init sata_spd_limit to the current value */
5451 if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
5452 int spd = (scontrol >> 4) & 0xf;
5453 ap->hw_sata_spd_limit &= (1 << spd) - 1;
5454 }
5455 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5456
5457 rc = scsi_add_host(ap->host, dev);
5458 if (rc) {
5459 ata_port_printk(ap, KERN_ERR, "scsi_add_host failed\n");
5460 /* FIXME: do something useful here */
5461 /* FIXME: handle unconditional calls to
5462 * scsi_scan_host and ata_host_remove, below,
5463 * at the very least
5464 */
5465 }
5466
5467 if (ap->ops->error_handler) {
5468 struct ata_eh_info *ehi = &ap->eh_info;
5469 unsigned long flags;
5470
5471 ata_port_probe(ap);
5472
5473 /* kick EH for boot probing */
5474 spin_lock_irqsave(ap->lock, flags);
5475
5476 ehi->probe_mask = (1 << ATA_MAX_DEVICES) - 1;
5477 ehi->action |= ATA_EH_SOFTRESET;
5478 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
5479
5480 ap->pflags |= ATA_PFLAG_LOADING;
5481 ata_port_schedule_eh(ap);
5482
5483 spin_unlock_irqrestore(ap->lock, flags);
5484
5485 /* wait for EH to finish */
5486 ata_port_wait_eh(ap);
5487 } else {
5488 DPRINTK("ata%u: bus probe begin\n", ap->id);
5489 rc = ata_bus_probe(ap);
5490 DPRINTK("ata%u: bus probe end\n", ap->id);
5491
5492 if (rc) {
5493 /* FIXME: do something useful here?
5494 * Current libata behavior will
5495 * tear down everything when
5496 * the module is removed
5497 * or the h/w is unplugged.
5498 */
5499 }
5500 }
5501 }
5502
5503 /* probes are done, now scan each port's disk(s) */
5504 DPRINTK("host probe begin\n");
5505 for (i = 0; i < count; i++) {
5506 struct ata_port *ap = host_set->ports[i];
5507
5508 ata_scsi_scan_host(ap);
5509 }
5510
5511 dev_set_drvdata(dev, host_set);
5512
5513 VPRINTK("EXIT, returning %u\n", ent->n_ports);
5514 return ent->n_ports; /* success */
5515
5516err_out:
5517 for (i = 0; i < count; i++) {
5518 struct ata_port *ap = host_set->ports[i];
5519 if (ap) {
5520 ap->ops->port_stop(ap);
5521 scsi_host_put(ap->host);
5522 }
5523 }
5524err_free_ret:
5525 kfree(host_set);
5526 VPRINTK("EXIT, returning 0\n");
5527 return 0;
5528}
5529
5530/**
5531 * ata_port_detach - Detach ATA port in prepration of device removal
5532 * @ap: ATA port to be detached
5533 *
5534 * Detach all ATA devices and the associated SCSI devices of @ap;
5535 * then, remove the associated SCSI host. @ap is guaranteed to
5536 * be quiescent on return from this function.
5537 *
5538 * LOCKING:
5539 * Kernel thread context (may sleep).
5540 */
5541void ata_port_detach(struct ata_port *ap)
5542{
5543 unsigned long flags;
5544 int i;
5545
5546 if (!ap->ops->error_handler)
5547 goto skip_eh;
5548
5549 /* tell EH we're leaving & flush EH */
5550 spin_lock_irqsave(ap->lock, flags);
5551 ap->pflags |= ATA_PFLAG_UNLOADING;
5552 spin_unlock_irqrestore(ap->lock, flags);
5553
5554 ata_port_wait_eh(ap);
5555
5556 /* EH is now guaranteed to see UNLOADING, so no new device
5557 * will be attached. Disable all existing devices.
5558 */
5559 spin_lock_irqsave(ap->lock, flags);
5560
5561 for (i = 0; i < ATA_MAX_DEVICES; i++)
5562 ata_dev_disable(&ap->device[i]);
5563
5564 spin_unlock_irqrestore(ap->lock, flags);
5565
5566 /* Final freeze & EH. All in-flight commands are aborted. EH
5567 * will be skipped and retrials will be terminated with bad
5568 * target.
5569 */
5570 spin_lock_irqsave(ap->lock, flags);
5571 ata_port_freeze(ap); /* won't be thawed */
5572 spin_unlock_irqrestore(ap->lock, flags);
5573
5574 ata_port_wait_eh(ap);
5575
5576 /* Flush hotplug task. The sequence is similar to
5577 * ata_port_flush_task().
5578 */
5579 flush_workqueue(ata_aux_wq);
5580 cancel_delayed_work(&ap->hotplug_task);
5581 flush_workqueue(ata_aux_wq);
5582
5583 skip_eh:
5584 /* remove the associated SCSI host */
5585 scsi_remove_host(ap->host);
5586}
5587
5588/**
5589 * ata_host_set_remove - PCI layer callback for device removal
5590 * @host_set: ATA host set that was removed
5591 *
5592 * Unregister all objects associated with this host set. Free those
5593 * objects.
5594 *
5595 * LOCKING:
5596 * Inherited from calling layer (may sleep).
5597 */
5598
5599void ata_host_set_remove(struct ata_host_set *host_set)
5600{
5601 unsigned int i;
5602
5603 for (i = 0; i < host_set->n_ports; i++)
5604 ata_port_detach(host_set->ports[i]);
5605
5606 free_irq(host_set->irq, host_set);
5607
5608 for (i = 0; i < host_set->n_ports; i++) {
5609 struct ata_port *ap = host_set->ports[i];
5610
5611 ata_scsi_release(ap->host);
5612
5613 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
5614 struct ata_ioports *ioaddr = &ap->ioaddr;
5615
5616 if (ioaddr->cmd_addr == 0x1f0)
5617 release_region(0x1f0, 8);
5618 else if (ioaddr->cmd_addr == 0x170)
5619 release_region(0x170, 8);
5620 }
5621
5622 scsi_host_put(ap->host);
5623 }
5624
5625 if (host_set->ops->host_stop)
5626 host_set->ops->host_stop(host_set);
5627
5628 kfree(host_set);
5629}
5630
5631/**
5632 * ata_scsi_release - SCSI layer callback hook for host unload
5633 * @host: libata host to be unloaded
5634 *
5635 * Performs all duties necessary to shut down a libata port...
5636 * Kill port kthread, disable port, and release resources.
5637 *
5638 * LOCKING:
5639 * Inherited from SCSI layer.
5640 *
5641 * RETURNS:
5642 * One.
5643 */
5644
5645int ata_scsi_release(struct Scsi_Host *host)
5646{
5647 struct ata_port *ap = ata_shost_to_port(host);
5648
5649 DPRINTK("ENTER\n");
5650
5651 ap->ops->port_disable(ap);
5652 ap->ops->port_stop(ap);
5653
5654 DPRINTK("EXIT\n");
5655 return 1;
5656}
5657
5658/**
5659 * ata_std_ports - initialize ioaddr with standard port offsets.
5660 * @ioaddr: IO address structure to be initialized
5661 *
5662 * Utility function which initializes data_addr, error_addr,
5663 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
5664 * device_addr, status_addr, and command_addr to standard offsets
5665 * relative to cmd_addr.
5666 *
5667 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
5668 */
5669
5670void ata_std_ports(struct ata_ioports *ioaddr)
5671{
5672 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
5673 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
5674 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
5675 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
5676 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
5677 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
5678 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
5679 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
5680 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
5681 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
5682}
5683
5684
5685#ifdef CONFIG_PCI
5686
5687void ata_pci_host_stop (struct ata_host_set *host_set)
5688{
5689 struct pci_dev *pdev = to_pci_dev(host_set->dev);
5690
5691 pci_iounmap(pdev, host_set->mmio_base);
5692}
5693
5694/**
5695 * ata_pci_remove_one - PCI layer callback for device removal
5696 * @pdev: PCI device that was removed
5697 *
5698 * PCI layer indicates to libata via this hook that
5699 * hot-unplug or module unload event has occurred.
5700 * Handle this by unregistering all objects associated
5701 * with this PCI device. Free those objects. Then finally
5702 * release PCI resources and disable device.
5703 *
5704 * LOCKING:
5705 * Inherited from PCI layer (may sleep).
5706 */
5707
5708void ata_pci_remove_one (struct pci_dev *pdev)
5709{
5710 struct device *dev = pci_dev_to_dev(pdev);
5711 struct ata_host_set *host_set = dev_get_drvdata(dev);
5712 struct ata_host_set *host_set2 = host_set->next;
5713
5714 ata_host_set_remove(host_set);
5715 if (host_set2)
5716 ata_host_set_remove(host_set2);
5717
5718 pci_release_regions(pdev);
5719 pci_disable_device(pdev);
5720 dev_set_drvdata(dev, NULL);
5721}
5722
5723/* move to PCI subsystem */
5724int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
5725{
5726 unsigned long tmp = 0;
5727
5728 switch (bits->width) {
5729 case 1: {
5730 u8 tmp8 = 0;
5731 pci_read_config_byte(pdev, bits->reg, &tmp8);
5732 tmp = tmp8;
5733 break;
5734 }
5735 case 2: {
5736 u16 tmp16 = 0;
5737 pci_read_config_word(pdev, bits->reg, &tmp16);
5738 tmp = tmp16;
5739 break;
5740 }
5741 case 4: {
5742 u32 tmp32 = 0;
5743 pci_read_config_dword(pdev, bits->reg, &tmp32);
5744 tmp = tmp32;
5745 break;
5746 }
5747
5748 default:
5749 return -EINVAL;
5750 }
5751
5752 tmp &= bits->mask;
5753
5754 return (tmp == bits->val) ? 1 : 0;
5755}
5756
5757void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t state)
5758{
5759 pci_save_state(pdev);
5760
5761 if (state.event == PM_EVENT_SUSPEND) {
5762 pci_disable_device(pdev);
5763 pci_set_power_state(pdev, PCI_D3hot);
5764 }
5765}
5766
5767void ata_pci_device_do_resume(struct pci_dev *pdev)
5768{
5769 pci_set_power_state(pdev, PCI_D0);
5770 pci_restore_state(pdev);
5771 pci_enable_device(pdev);
5772 pci_set_master(pdev);
5773}
5774
5775int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state)
5776{
5777 struct ata_host_set *host_set = dev_get_drvdata(&pdev->dev);
5778 int rc = 0;
5779
5780 rc = ata_host_set_suspend(host_set, state);
5781 if (rc)
5782 return rc;
5783
5784 if (host_set->next) {
5785 rc = ata_host_set_suspend(host_set->next, state);
5786 if (rc) {
5787 ata_host_set_resume(host_set);
5788 return rc;
5789 }
5790 }
5791
5792 ata_pci_device_do_suspend(pdev, state);
5793
5794 return 0;
5795}
5796
5797int ata_pci_device_resume(struct pci_dev *pdev)
5798{
5799 struct ata_host_set *host_set = dev_get_drvdata(&pdev->dev);
5800
5801 ata_pci_device_do_resume(pdev);
5802 ata_host_set_resume(host_set);
5803 if (host_set->next)
5804 ata_host_set_resume(host_set->next);
5805
5806 return 0;
5807}
5808#endif /* CONFIG_PCI */
5809
5810
5811static int __init ata_init(void)
5812{
5813 ata_probe_timeout *= HZ;
5814 ata_wq = create_workqueue("ata");
5815 if (!ata_wq)
5816 return -ENOMEM;
5817
5818 ata_aux_wq = create_singlethread_workqueue("ata_aux");
5819 if (!ata_aux_wq) {
5820 destroy_workqueue(ata_wq);
5821 return -ENOMEM;
5822 }
5823
5824 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
5825 return 0;
5826}
5827
5828static void __exit ata_exit(void)
5829{
5830 destroy_workqueue(ata_wq);
5831 destroy_workqueue(ata_aux_wq);
5832}
5833
5834module_init(ata_init);
5835module_exit(ata_exit);
5836
5837static unsigned long ratelimit_time;
5838static DEFINE_SPINLOCK(ata_ratelimit_lock);
5839
5840int ata_ratelimit(void)
5841{
5842 int rc;
5843 unsigned long flags;
5844
5845 spin_lock_irqsave(&ata_ratelimit_lock, flags);
5846
5847 if (time_after(jiffies, ratelimit_time)) {
5848 rc = 1;
5849 ratelimit_time = jiffies + (HZ/5);
5850 } else
5851 rc = 0;
5852
5853 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
5854
5855 return rc;
5856}
5857
5858/**
5859 * ata_wait_register - wait until register value changes
5860 * @reg: IO-mapped register
5861 * @mask: Mask to apply to read register value
5862 * @val: Wait condition
5863 * @interval_msec: polling interval in milliseconds
5864 * @timeout_msec: timeout in milliseconds
5865 *
5866 * Waiting for some bits of register to change is a common
5867 * operation for ATA controllers. This function reads 32bit LE
5868 * IO-mapped register @reg and tests for the following condition.
5869 *
5870 * (*@reg & mask) != val
5871 *
5872 * If the condition is met, it returns; otherwise, the process is
5873 * repeated after @interval_msec until timeout.
5874 *
5875 * LOCKING:
5876 * Kernel thread context (may sleep)
5877 *
5878 * RETURNS:
5879 * The final register value.
5880 */
5881u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
5882 unsigned long interval_msec,
5883 unsigned long timeout_msec)
5884{
5885 unsigned long timeout;
5886 u32 tmp;
5887
5888 tmp = ioread32(reg);
5889
5890 /* Calculate timeout _after_ the first read to make sure
5891 * preceding writes reach the controller before starting to
5892 * eat away the timeout.
5893 */
5894 timeout = jiffies + (timeout_msec * HZ) / 1000;
5895
5896 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
5897 msleep(interval_msec);
5898 tmp = ioread32(reg);
5899 }
5900
5901 return tmp;
5902}
5903
5904/*
5905 * libata is essentially a library of internal helper functions for
5906 * low-level ATA host controller drivers. As such, the API/ABI is
5907 * likely to change as new drivers are added and updated.
5908 * Do not depend on ABI/API stability.
5909 */
5910
5911EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
5912EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
5913EXPORT_SYMBOL_GPL(sata_deb_timing_long);
5914EXPORT_SYMBOL_GPL(ata_std_bios_param);
5915EXPORT_SYMBOL_GPL(ata_std_ports);
5916EXPORT_SYMBOL_GPL(ata_device_add);
5917EXPORT_SYMBOL_GPL(ata_port_detach);
5918EXPORT_SYMBOL_GPL(ata_host_set_remove);
5919EXPORT_SYMBOL_GPL(ata_sg_init);
5920EXPORT_SYMBOL_GPL(ata_sg_init_one);
5921EXPORT_SYMBOL_GPL(ata_hsm_move);
5922EXPORT_SYMBOL_GPL(ata_qc_complete);
5923EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
5924EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
5925EXPORT_SYMBOL_GPL(ata_tf_load);
5926EXPORT_SYMBOL_GPL(ata_tf_read);
5927EXPORT_SYMBOL_GPL(ata_noop_dev_select);
5928EXPORT_SYMBOL_GPL(ata_std_dev_select);
5929EXPORT_SYMBOL_GPL(ata_tf_to_fis);
5930EXPORT_SYMBOL_GPL(ata_tf_from_fis);
5931EXPORT_SYMBOL_GPL(ata_check_status);
5932EXPORT_SYMBOL_GPL(ata_altstatus);
5933EXPORT_SYMBOL_GPL(ata_exec_command);
5934EXPORT_SYMBOL_GPL(ata_port_start);
5935EXPORT_SYMBOL_GPL(ata_port_stop);
5936EXPORT_SYMBOL_GPL(ata_host_stop);
5937EXPORT_SYMBOL_GPL(ata_interrupt);
5938EXPORT_SYMBOL_GPL(ata_mmio_data_xfer);
5939EXPORT_SYMBOL_GPL(ata_pio_data_xfer);
5940EXPORT_SYMBOL_GPL(ata_pio_data_xfer_noirq);
5941EXPORT_SYMBOL_GPL(ata_qc_prep);
5942EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
5943EXPORT_SYMBOL_GPL(ata_bmdma_setup);
5944EXPORT_SYMBOL_GPL(ata_bmdma_start);
5945EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
5946EXPORT_SYMBOL_GPL(ata_bmdma_status);
5947EXPORT_SYMBOL_GPL(ata_bmdma_stop);
5948EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
5949EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
5950EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
5951EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
5952EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
5953EXPORT_SYMBOL_GPL(ata_port_probe);
5954EXPORT_SYMBOL_GPL(sata_set_spd);
5955EXPORT_SYMBOL_GPL(sata_phy_debounce);
5956EXPORT_SYMBOL_GPL(sata_phy_resume);
5957EXPORT_SYMBOL_GPL(sata_phy_reset);
5958EXPORT_SYMBOL_GPL(__sata_phy_reset);
5959EXPORT_SYMBOL_GPL(ata_bus_reset);
5960EXPORT_SYMBOL_GPL(ata_std_prereset);
5961EXPORT_SYMBOL_GPL(ata_std_softreset);
5962EXPORT_SYMBOL_GPL(sata_std_hardreset);
5963EXPORT_SYMBOL_GPL(ata_std_postreset);
5964EXPORT_SYMBOL_GPL(ata_dev_revalidate);
5965EXPORT_SYMBOL_GPL(ata_dev_classify);
5966EXPORT_SYMBOL_GPL(ata_dev_pair);
5967EXPORT_SYMBOL_GPL(ata_port_disable);
5968EXPORT_SYMBOL_GPL(ata_ratelimit);
5969EXPORT_SYMBOL_GPL(ata_wait_register);
5970EXPORT_SYMBOL_GPL(ata_busy_sleep);
5971EXPORT_SYMBOL_GPL(ata_port_queue_task);
5972EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
5973EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
5974EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
5975EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
5976EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
5977EXPORT_SYMBOL_GPL(ata_scsi_release);
5978EXPORT_SYMBOL_GPL(ata_host_intr);
5979EXPORT_SYMBOL_GPL(sata_scr_valid);
5980EXPORT_SYMBOL_GPL(sata_scr_read);
5981EXPORT_SYMBOL_GPL(sata_scr_write);
5982EXPORT_SYMBOL_GPL(sata_scr_write_flush);
5983EXPORT_SYMBOL_GPL(ata_port_online);
5984EXPORT_SYMBOL_GPL(ata_port_offline);
5985EXPORT_SYMBOL_GPL(ata_host_set_suspend);
5986EXPORT_SYMBOL_GPL(ata_host_set_resume);
5987EXPORT_SYMBOL_GPL(ata_id_string);
5988EXPORT_SYMBOL_GPL(ata_id_c_string);
5989EXPORT_SYMBOL_GPL(ata_scsi_simulate);
5990
5991EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
5992EXPORT_SYMBOL_GPL(ata_timing_compute);
5993EXPORT_SYMBOL_GPL(ata_timing_merge);
5994
5995#ifdef CONFIG_PCI
5996EXPORT_SYMBOL_GPL(pci_test_config_bits);
5997EXPORT_SYMBOL_GPL(ata_pci_host_stop);
5998EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
5999EXPORT_SYMBOL_GPL(ata_pci_init_one);
6000EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6001EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6002EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6003EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6004EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6005EXPORT_SYMBOL_GPL(ata_pci_default_filter);
6006EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
6007#endif /* CONFIG_PCI */
6008
6009EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
6010EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
6011
6012EXPORT_SYMBOL_GPL(ata_eng_timeout);
6013EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6014EXPORT_SYMBOL_GPL(ata_port_abort);
6015EXPORT_SYMBOL_GPL(ata_port_freeze);
6016EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6017EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
6018EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6019EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
6020EXPORT_SYMBOL_GPL(ata_do_eh);
diff --git a/drivers/scsi/libata-eh.c b/drivers/scsi/libata-eh.c
deleted file mode 100644
index 2c34af99627d..000000000000
--- a/drivers/scsi/libata-eh.c
+++ /dev/null
@@ -1,2245 +0,0 @@
1/*
2 * libata-eh.c - libata error handling
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2006 Tejun Heo <htejun@gmail.com>
9 *
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; either version 2, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
24 * USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 */
34
35#include <linux/kernel.h>
36#include <scsi/scsi.h>
37#include <scsi/scsi_host.h>
38#include <scsi/scsi_eh.h>
39#include <scsi/scsi_device.h>
40#include <scsi/scsi_cmnd.h>
41#include "scsi_transport_api.h"
42
43#include <linux/libata.h>
44
45#include "libata.h"
46
47static void __ata_port_freeze(struct ata_port *ap);
48static void ata_eh_finish(struct ata_port *ap);
49static void ata_eh_handle_port_suspend(struct ata_port *ap);
50static void ata_eh_handle_port_resume(struct ata_port *ap);
51
52static void ata_ering_record(struct ata_ering *ering, int is_io,
53 unsigned int err_mask)
54{
55 struct ata_ering_entry *ent;
56
57 WARN_ON(!err_mask);
58
59 ering->cursor++;
60 ering->cursor %= ATA_ERING_SIZE;
61
62 ent = &ering->ring[ering->cursor];
63 ent->is_io = is_io;
64 ent->err_mask = err_mask;
65 ent->timestamp = get_jiffies_64();
66}
67
68static struct ata_ering_entry * ata_ering_top(struct ata_ering *ering)
69{
70 struct ata_ering_entry *ent = &ering->ring[ering->cursor];
71 if (!ent->err_mask)
72 return NULL;
73 return ent;
74}
75
76static int ata_ering_map(struct ata_ering *ering,
77 int (*map_fn)(struct ata_ering_entry *, void *),
78 void *arg)
79{
80 int idx, rc = 0;
81 struct ata_ering_entry *ent;
82
83 idx = ering->cursor;
84 do {
85 ent = &ering->ring[idx];
86 if (!ent->err_mask)
87 break;
88 rc = map_fn(ent, arg);
89 if (rc)
90 break;
91 idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE;
92 } while (idx != ering->cursor);
93
94 return rc;
95}
96
97static unsigned int ata_eh_dev_action(struct ata_device *dev)
98{
99 struct ata_eh_context *ehc = &dev->ap->eh_context;
100
101 return ehc->i.action | ehc->i.dev_action[dev->devno];
102}
103
104static void ata_eh_clear_action(struct ata_device *dev,
105 struct ata_eh_info *ehi, unsigned int action)
106{
107 int i;
108
109 if (!dev) {
110 ehi->action &= ~action;
111 for (i = 0; i < ATA_MAX_DEVICES; i++)
112 ehi->dev_action[i] &= ~action;
113 } else {
114 /* doesn't make sense for port-wide EH actions */
115 WARN_ON(!(action & ATA_EH_PERDEV_MASK));
116
117 /* break ehi->action into ehi->dev_action */
118 if (ehi->action & action) {
119 for (i = 0; i < ATA_MAX_DEVICES; i++)
120 ehi->dev_action[i] |= ehi->action & action;
121 ehi->action &= ~action;
122 }
123
124 /* turn off the specified per-dev action */
125 ehi->dev_action[dev->devno] &= ~action;
126 }
127}
128
129/**
130 * ata_scsi_timed_out - SCSI layer time out callback
131 * @cmd: timed out SCSI command
132 *
133 * Handles SCSI layer timeout. We race with normal completion of
134 * the qc for @cmd. If the qc is already gone, we lose and let
135 * the scsi command finish (EH_HANDLED). Otherwise, the qc has
136 * timed out and EH should be invoked. Prevent ata_qc_complete()
137 * from finishing it by setting EH_SCHEDULED and return
138 * EH_NOT_HANDLED.
139 *
140 * TODO: kill this function once old EH is gone.
141 *
142 * LOCKING:
143 * Called from timer context
144 *
145 * RETURNS:
146 * EH_HANDLED or EH_NOT_HANDLED
147 */
148enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
149{
150 struct Scsi_Host *host = cmd->device->host;
151 struct ata_port *ap = ata_shost_to_port(host);
152 unsigned long flags;
153 struct ata_queued_cmd *qc;
154 enum scsi_eh_timer_return ret;
155
156 DPRINTK("ENTER\n");
157
158 if (ap->ops->error_handler) {
159 ret = EH_NOT_HANDLED;
160 goto out;
161 }
162
163 ret = EH_HANDLED;
164 spin_lock_irqsave(ap->lock, flags);
165 qc = ata_qc_from_tag(ap, ap->active_tag);
166 if (qc) {
167 WARN_ON(qc->scsicmd != cmd);
168 qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
169 qc->err_mask |= AC_ERR_TIMEOUT;
170 ret = EH_NOT_HANDLED;
171 }
172 spin_unlock_irqrestore(ap->lock, flags);
173
174 out:
175 DPRINTK("EXIT, ret=%d\n", ret);
176 return ret;
177}
178
179/**
180 * ata_scsi_error - SCSI layer error handler callback
181 * @host: SCSI host on which error occurred
182 *
183 * Handles SCSI-layer-thrown error events.
184 *
185 * LOCKING:
186 * Inherited from SCSI layer (none, can sleep)
187 *
188 * RETURNS:
189 * Zero.
190 */
191void ata_scsi_error(struct Scsi_Host *host)
192{
193 struct ata_port *ap = ata_shost_to_port(host);
194 int i, repeat_cnt = ATA_EH_MAX_REPEAT;
195 unsigned long flags;
196
197 DPRINTK("ENTER\n");
198
199 /* synchronize with port task */
200 ata_port_flush_task(ap);
201
202 /* synchronize with host_set lock and sort out timeouts */
203
204 /* For new EH, all qcs are finished in one of three ways -
205 * normal completion, error completion, and SCSI timeout.
206 * Both cmpletions can race against SCSI timeout. When normal
207 * completion wins, the qc never reaches EH. When error
208 * completion wins, the qc has ATA_QCFLAG_FAILED set.
209 *
210 * When SCSI timeout wins, things are a bit more complex.
211 * Normal or error completion can occur after the timeout but
212 * before this point. In such cases, both types of
213 * completions are honored. A scmd is determined to have
214 * timed out iff its associated qc is active and not failed.
215 */
216 if (ap->ops->error_handler) {
217 struct scsi_cmnd *scmd, *tmp;
218 int nr_timedout = 0;
219
220 spin_lock_irqsave(ap->lock, flags);
221
222 list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) {
223 struct ata_queued_cmd *qc;
224
225 for (i = 0; i < ATA_MAX_QUEUE; i++) {
226 qc = __ata_qc_from_tag(ap, i);
227 if (qc->flags & ATA_QCFLAG_ACTIVE &&
228 qc->scsicmd == scmd)
229 break;
230 }
231
232 if (i < ATA_MAX_QUEUE) {
233 /* the scmd has an associated qc */
234 if (!(qc->flags & ATA_QCFLAG_FAILED)) {
235 /* which hasn't failed yet, timeout */
236 qc->err_mask |= AC_ERR_TIMEOUT;
237 qc->flags |= ATA_QCFLAG_FAILED;
238 nr_timedout++;
239 }
240 } else {
241 /* Normal completion occurred after
242 * SCSI timeout but before this point.
243 * Successfully complete it.
244 */
245 scmd->retries = scmd->allowed;
246 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
247 }
248 }
249
250 /* If we have timed out qcs. They belong to EH from
251 * this point but the state of the controller is
252 * unknown. Freeze the port to make sure the IRQ
253 * handler doesn't diddle with those qcs. This must
254 * be done atomically w.r.t. setting QCFLAG_FAILED.
255 */
256 if (nr_timedout)
257 __ata_port_freeze(ap);
258
259 spin_unlock_irqrestore(ap->lock, flags);
260 } else
261 spin_unlock_wait(ap->lock);
262
263 repeat:
264 /* invoke error handler */
265 if (ap->ops->error_handler) {
266 /* process port resume request */
267 ata_eh_handle_port_resume(ap);
268
269 /* fetch & clear EH info */
270 spin_lock_irqsave(ap->lock, flags);
271
272 memset(&ap->eh_context, 0, sizeof(ap->eh_context));
273 ap->eh_context.i = ap->eh_info;
274 memset(&ap->eh_info, 0, sizeof(ap->eh_info));
275
276 ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS;
277 ap->pflags &= ~ATA_PFLAG_EH_PENDING;
278
279 spin_unlock_irqrestore(ap->lock, flags);
280
281 /* invoke EH, skip if unloading or suspended */
282 if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED)))
283 ap->ops->error_handler(ap);
284 else
285 ata_eh_finish(ap);
286
287 /* process port suspend request */
288 ata_eh_handle_port_suspend(ap);
289
290 /* Exception might have happend after ->error_handler
291 * recovered the port but before this point. Repeat
292 * EH in such case.
293 */
294 spin_lock_irqsave(ap->lock, flags);
295
296 if (ap->pflags & ATA_PFLAG_EH_PENDING) {
297 if (--repeat_cnt) {
298 ata_port_printk(ap, KERN_INFO,
299 "EH pending after completion, "
300 "repeating EH (cnt=%d)\n", repeat_cnt);
301 spin_unlock_irqrestore(ap->lock, flags);
302 goto repeat;
303 }
304 ata_port_printk(ap, KERN_ERR, "EH pending after %d "
305 "tries, giving up\n", ATA_EH_MAX_REPEAT);
306 }
307
308 /* this run is complete, make sure EH info is clear */
309 memset(&ap->eh_info, 0, sizeof(ap->eh_info));
310
311 /* Clear host_eh_scheduled while holding ap->lock such
312 * that if exception occurs after this point but
313 * before EH completion, SCSI midlayer will
314 * re-initiate EH.
315 */
316 host->host_eh_scheduled = 0;
317
318 spin_unlock_irqrestore(ap->lock, flags);
319 } else {
320 WARN_ON(ata_qc_from_tag(ap, ap->active_tag) == NULL);
321 ap->ops->eng_timeout(ap);
322 }
323
324 /* finish or retry handled scmd's and clean up */
325 WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q));
326
327 scsi_eh_flush_done_q(&ap->eh_done_q);
328
329 /* clean up */
330 spin_lock_irqsave(ap->lock, flags);
331
332 if (ap->pflags & ATA_PFLAG_LOADING)
333 ap->pflags &= ~ATA_PFLAG_LOADING;
334 else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG)
335 queue_work(ata_aux_wq, &ap->hotplug_task);
336
337 if (ap->pflags & ATA_PFLAG_RECOVERED)
338 ata_port_printk(ap, KERN_INFO, "EH complete\n");
339
340 ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED);
341
342 /* tell wait_eh that we're done */
343 ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS;
344 wake_up_all(&ap->eh_wait_q);
345
346 spin_unlock_irqrestore(ap->lock, flags);
347
348 DPRINTK("EXIT\n");
349}
350
351/**
352 * ata_port_wait_eh - Wait for the currently pending EH to complete
353 * @ap: Port to wait EH for
354 *
355 * Wait until the currently pending EH is complete.
356 *
357 * LOCKING:
358 * Kernel thread context (may sleep).
359 */
360void ata_port_wait_eh(struct ata_port *ap)
361{
362 unsigned long flags;
363 DEFINE_WAIT(wait);
364
365 retry:
366 spin_lock_irqsave(ap->lock, flags);
367
368 while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) {
369 prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
370 spin_unlock_irqrestore(ap->lock, flags);
371 schedule();
372 spin_lock_irqsave(ap->lock, flags);
373 }
374 finish_wait(&ap->eh_wait_q, &wait);
375
376 spin_unlock_irqrestore(ap->lock, flags);
377
378 /* make sure SCSI EH is complete */
379 if (scsi_host_in_recovery(ap->host)) {
380 msleep(10);
381 goto retry;
382 }
383}
384
385/**
386 * ata_qc_timeout - Handle timeout of queued command
387 * @qc: Command that timed out
388 *
389 * Some part of the kernel (currently, only the SCSI layer)
390 * has noticed that the active command on port @ap has not
391 * completed after a specified length of time. Handle this
392 * condition by disabling DMA (if necessary) and completing
393 * transactions, with error if necessary.
394 *
395 * This also handles the case of the "lost interrupt", where
396 * for some reason (possibly hardware bug, possibly driver bug)
397 * an interrupt was not delivered to the driver, even though the
398 * transaction completed successfully.
399 *
400 * TODO: kill this function once old EH is gone.
401 *
402 * LOCKING:
403 * Inherited from SCSI layer (none, can sleep)
404 */
405static void ata_qc_timeout(struct ata_queued_cmd *qc)
406{
407 struct ata_port *ap = qc->ap;
408 u8 host_stat = 0, drv_stat;
409 unsigned long flags;
410
411 DPRINTK("ENTER\n");
412
413 ap->hsm_task_state = HSM_ST_IDLE;
414
415 spin_lock_irqsave(ap->lock, flags);
416
417 switch (qc->tf.protocol) {
418
419 case ATA_PROT_DMA:
420 case ATA_PROT_ATAPI_DMA:
421 host_stat = ap->ops->bmdma_status(ap);
422
423 /* before we do anything else, clear DMA-Start bit */
424 ap->ops->bmdma_stop(qc);
425
426 /* fall through */
427
428 default:
429 ata_altstatus(ap);
430 drv_stat = ata_chk_status(ap);
431
432 /* ack bmdma irq events */
433 ap->ops->irq_clear(ap);
434
435 ata_dev_printk(qc->dev, KERN_ERR, "command 0x%x timeout, "
436 "stat 0x%x host_stat 0x%x\n",
437 qc->tf.command, drv_stat, host_stat);
438
439 /* complete taskfile transaction */
440 qc->err_mask |= AC_ERR_TIMEOUT;
441 break;
442 }
443
444 spin_unlock_irqrestore(ap->lock, flags);
445
446 ata_eh_qc_complete(qc);
447
448 DPRINTK("EXIT\n");
449}
450
451/**
452 * ata_eng_timeout - Handle timeout of queued command
453 * @ap: Port on which timed-out command is active
454 *
455 * Some part of the kernel (currently, only the SCSI layer)
456 * has noticed that the active command on port @ap has not
457 * completed after a specified length of time. Handle this
458 * condition by disabling DMA (if necessary) and completing
459 * transactions, with error if necessary.
460 *
461 * This also handles the case of the "lost interrupt", where
462 * for some reason (possibly hardware bug, possibly driver bug)
463 * an interrupt was not delivered to the driver, even though the
464 * transaction completed successfully.
465 *
466 * TODO: kill this function once old EH is gone.
467 *
468 * LOCKING:
469 * Inherited from SCSI layer (none, can sleep)
470 */
471void ata_eng_timeout(struct ata_port *ap)
472{
473 DPRINTK("ENTER\n");
474
475 ata_qc_timeout(ata_qc_from_tag(ap, ap->active_tag));
476
477 DPRINTK("EXIT\n");
478}
479
480/**
481 * ata_qc_schedule_eh - schedule qc for error handling
482 * @qc: command to schedule error handling for
483 *
484 * Schedule error handling for @qc. EH will kick in as soon as
485 * other commands are drained.
486 *
487 * LOCKING:
488 * spin_lock_irqsave(host_set lock)
489 */
490void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
491{
492 struct ata_port *ap = qc->ap;
493
494 WARN_ON(!ap->ops->error_handler);
495
496 qc->flags |= ATA_QCFLAG_FAILED;
497 qc->ap->pflags |= ATA_PFLAG_EH_PENDING;
498
499 /* The following will fail if timeout has already expired.
500 * ata_scsi_error() takes care of such scmds on EH entry.
501 * Note that ATA_QCFLAG_FAILED is unconditionally set after
502 * this function completes.
503 */
504 scsi_req_abort_cmd(qc->scsicmd);
505}
506
507/**
508 * ata_port_schedule_eh - schedule error handling without a qc
509 * @ap: ATA port to schedule EH for
510 *
511 * Schedule error handling for @ap. EH will kick in as soon as
512 * all commands are drained.
513 *
514 * LOCKING:
515 * spin_lock_irqsave(host_set lock)
516 */
517void ata_port_schedule_eh(struct ata_port *ap)
518{
519 WARN_ON(!ap->ops->error_handler);
520
521 ap->pflags |= ATA_PFLAG_EH_PENDING;
522 scsi_schedule_eh(ap->host);
523
524 DPRINTK("port EH scheduled\n");
525}
526
527/**
528 * ata_port_abort - abort all qc's on the port
529 * @ap: ATA port to abort qc's for
530 *
531 * Abort all active qc's of @ap and schedule EH.
532 *
533 * LOCKING:
534 * spin_lock_irqsave(host_set lock)
535 *
536 * RETURNS:
537 * Number of aborted qc's.
538 */
539int ata_port_abort(struct ata_port *ap)
540{
541 int tag, nr_aborted = 0;
542
543 WARN_ON(!ap->ops->error_handler);
544
545 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
546 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
547
548 if (qc) {
549 qc->flags |= ATA_QCFLAG_FAILED;
550 ata_qc_complete(qc);
551 nr_aborted++;
552 }
553 }
554
555 if (!nr_aborted)
556 ata_port_schedule_eh(ap);
557
558 return nr_aborted;
559}
560
561/**
562 * __ata_port_freeze - freeze port
563 * @ap: ATA port to freeze
564 *
565 * This function is called when HSM violation or some other
566 * condition disrupts normal operation of the port. Frozen port
567 * is not allowed to perform any operation until the port is
568 * thawed, which usually follows a successful reset.
569 *
570 * ap->ops->freeze() callback can be used for freezing the port
571 * hardware-wise (e.g. mask interrupt and stop DMA engine). If a
572 * port cannot be frozen hardware-wise, the interrupt handler
573 * must ack and clear interrupts unconditionally while the port
574 * is frozen.
575 *
576 * LOCKING:
577 * spin_lock_irqsave(host_set lock)
578 */
579static void __ata_port_freeze(struct ata_port *ap)
580{
581 WARN_ON(!ap->ops->error_handler);
582
583 if (ap->ops->freeze)
584 ap->ops->freeze(ap);
585
586 ap->pflags |= ATA_PFLAG_FROZEN;
587
588 DPRINTK("ata%u port frozen\n", ap->id);
589}
590
591/**
592 * ata_port_freeze - abort & freeze port
593 * @ap: ATA port to freeze
594 *
595 * Abort and freeze @ap.
596 *
597 * LOCKING:
598 * spin_lock_irqsave(host_set lock)
599 *
600 * RETURNS:
601 * Number of aborted commands.
602 */
603int ata_port_freeze(struct ata_port *ap)
604{
605 int nr_aborted;
606
607 WARN_ON(!ap->ops->error_handler);
608
609 nr_aborted = ata_port_abort(ap);
610 __ata_port_freeze(ap);
611
612 return nr_aborted;
613}
614
615/**
616 * ata_eh_freeze_port - EH helper to freeze port
617 * @ap: ATA port to freeze
618 *
619 * Freeze @ap.
620 *
621 * LOCKING:
622 * None.
623 */
624void ata_eh_freeze_port(struct ata_port *ap)
625{
626 unsigned long flags;
627
628 if (!ap->ops->error_handler)
629 return;
630
631 spin_lock_irqsave(ap->lock, flags);
632 __ata_port_freeze(ap);
633 spin_unlock_irqrestore(ap->lock, flags);
634}
635
636/**
637 * ata_port_thaw_port - EH helper to thaw port
638 * @ap: ATA port to thaw
639 *
640 * Thaw frozen port @ap.
641 *
642 * LOCKING:
643 * None.
644 */
645void ata_eh_thaw_port(struct ata_port *ap)
646{
647 unsigned long flags;
648
649 if (!ap->ops->error_handler)
650 return;
651
652 spin_lock_irqsave(ap->lock, flags);
653
654 ap->pflags &= ~ATA_PFLAG_FROZEN;
655
656 if (ap->ops->thaw)
657 ap->ops->thaw(ap);
658
659 spin_unlock_irqrestore(ap->lock, flags);
660
661 DPRINTK("ata%u port thawed\n", ap->id);
662}
663
664static void ata_eh_scsidone(struct scsi_cmnd *scmd)
665{
666 /* nada */
667}
668
669static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
670{
671 struct ata_port *ap = qc->ap;
672 struct scsi_cmnd *scmd = qc->scsicmd;
673 unsigned long flags;
674
675 spin_lock_irqsave(ap->lock, flags);
676 qc->scsidone = ata_eh_scsidone;
677 __ata_qc_complete(qc);
678 WARN_ON(ata_tag_valid(qc->tag));
679 spin_unlock_irqrestore(ap->lock, flags);
680
681 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
682}
683
684/**
685 * ata_eh_qc_complete - Complete an active ATA command from EH
686 * @qc: Command to complete
687 *
688 * Indicate to the mid and upper layers that an ATA command has
689 * completed. To be used from EH.
690 */
691void ata_eh_qc_complete(struct ata_queued_cmd *qc)
692{
693 struct scsi_cmnd *scmd = qc->scsicmd;
694 scmd->retries = scmd->allowed;
695 __ata_eh_qc_complete(qc);
696}
697
698/**
699 * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
700 * @qc: Command to retry
701 *
702 * Indicate to the mid and upper layers that an ATA command
703 * should be retried. To be used from EH.
704 *
705 * SCSI midlayer limits the number of retries to scmd->allowed.
706 * scmd->retries is decremented for commands which get retried
707 * due to unrelated failures (qc->err_mask is zero).
708 */
709void ata_eh_qc_retry(struct ata_queued_cmd *qc)
710{
711 struct scsi_cmnd *scmd = qc->scsicmd;
712 if (!qc->err_mask && scmd->retries)
713 scmd->retries--;
714 __ata_eh_qc_complete(qc);
715}
716
717/**
718 * ata_eh_detach_dev - detach ATA device
719 * @dev: ATA device to detach
720 *
721 * Detach @dev.
722 *
723 * LOCKING:
724 * None.
725 */
726static void ata_eh_detach_dev(struct ata_device *dev)
727{
728 struct ata_port *ap = dev->ap;
729 unsigned long flags;
730
731 ata_dev_disable(dev);
732
733 spin_lock_irqsave(ap->lock, flags);
734
735 dev->flags &= ~ATA_DFLAG_DETACH;
736
737 if (ata_scsi_offline_dev(dev)) {
738 dev->flags |= ATA_DFLAG_DETACHED;
739 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
740 }
741
742 /* clear per-dev EH actions */
743 ata_eh_clear_action(dev, &ap->eh_info, ATA_EH_PERDEV_MASK);
744 ata_eh_clear_action(dev, &ap->eh_context.i, ATA_EH_PERDEV_MASK);
745
746 spin_unlock_irqrestore(ap->lock, flags);
747}
748
749/**
750 * ata_eh_about_to_do - about to perform eh_action
751 * @ap: target ATA port
752 * @dev: target ATA dev for per-dev action (can be NULL)
753 * @action: action about to be performed
754 *
755 * Called just before performing EH actions to clear related bits
756 * in @ap->eh_info such that eh actions are not unnecessarily
757 * repeated.
758 *
759 * LOCKING:
760 * None.
761 */
762static void ata_eh_about_to_do(struct ata_port *ap, struct ata_device *dev,
763 unsigned int action)
764{
765 unsigned long flags;
766 struct ata_eh_info *ehi = &ap->eh_info;
767 struct ata_eh_context *ehc = &ap->eh_context;
768
769 spin_lock_irqsave(ap->lock, flags);
770
771 /* Reset is represented by combination of actions and EHI
772 * flags. Suck in all related bits before clearing eh_info to
773 * avoid losing requested action.
774 */
775 if (action & ATA_EH_RESET_MASK) {
776 ehc->i.action |= ehi->action & ATA_EH_RESET_MASK;
777 ehc->i.flags |= ehi->flags & ATA_EHI_RESET_MODIFIER_MASK;
778
779 /* make sure all reset actions are cleared & clear EHI flags */
780 action |= ATA_EH_RESET_MASK;
781 ehi->flags &= ~ATA_EHI_RESET_MODIFIER_MASK;
782 }
783
784 ata_eh_clear_action(dev, ehi, action);
785
786 if (!(ehc->i.flags & ATA_EHI_QUIET))
787 ap->pflags |= ATA_PFLAG_RECOVERED;
788
789 spin_unlock_irqrestore(ap->lock, flags);
790}
791
792/**
793 * ata_eh_done - EH action complete
794 * @ap: target ATA port
795 * @dev: target ATA dev for per-dev action (can be NULL)
796 * @action: action just completed
797 *
798 * Called right after performing EH actions to clear related bits
799 * in @ap->eh_context.
800 *
801 * LOCKING:
802 * None.
803 */
804static void ata_eh_done(struct ata_port *ap, struct ata_device *dev,
805 unsigned int action)
806{
807 /* if reset is complete, clear all reset actions & reset modifier */
808 if (action & ATA_EH_RESET_MASK) {
809 action |= ATA_EH_RESET_MASK;
810 ap->eh_context.i.flags &= ~ATA_EHI_RESET_MODIFIER_MASK;
811 }
812
813 ata_eh_clear_action(dev, &ap->eh_context.i, action);
814}
815
816/**
817 * ata_err_string - convert err_mask to descriptive string
818 * @err_mask: error mask to convert to string
819 *
820 * Convert @err_mask to descriptive string. Errors are
821 * prioritized according to severity and only the most severe
822 * error is reported.
823 *
824 * LOCKING:
825 * None.
826 *
827 * RETURNS:
828 * Descriptive string for @err_mask
829 */
830static const char * ata_err_string(unsigned int err_mask)
831{
832 if (err_mask & AC_ERR_HOST_BUS)
833 return "host bus error";
834 if (err_mask & AC_ERR_ATA_BUS)
835 return "ATA bus error";
836 if (err_mask & AC_ERR_TIMEOUT)
837 return "timeout";
838 if (err_mask & AC_ERR_HSM)
839 return "HSM violation";
840 if (err_mask & AC_ERR_SYSTEM)
841 return "internal error";
842 if (err_mask & AC_ERR_MEDIA)
843 return "media error";
844 if (err_mask & AC_ERR_INVALID)
845 return "invalid argument";
846 if (err_mask & AC_ERR_DEV)
847 return "device error";
848 return "unknown error";
849}
850
851/**
852 * ata_read_log_page - read a specific log page
853 * @dev: target device
854 * @page: page to read
855 * @buf: buffer to store read page
856 * @sectors: number of sectors to read
857 *
858 * Read log page using READ_LOG_EXT command.
859 *
860 * LOCKING:
861 * Kernel thread context (may sleep).
862 *
863 * RETURNS:
864 * 0 on success, AC_ERR_* mask otherwise.
865 */
866static unsigned int ata_read_log_page(struct ata_device *dev,
867 u8 page, void *buf, unsigned int sectors)
868{
869 struct ata_taskfile tf;
870 unsigned int err_mask;
871
872 DPRINTK("read log page - page %d\n", page);
873
874 ata_tf_init(dev, &tf);
875 tf.command = ATA_CMD_READ_LOG_EXT;
876 tf.lbal = page;
877 tf.nsect = sectors;
878 tf.hob_nsect = sectors >> 8;
879 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
880 tf.protocol = ATA_PROT_PIO;
881
882 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
883 buf, sectors * ATA_SECT_SIZE);
884
885 DPRINTK("EXIT, err_mask=%x\n", err_mask);
886 return err_mask;
887}
888
889/**
890 * ata_eh_read_log_10h - Read log page 10h for NCQ error details
891 * @dev: Device to read log page 10h from
892 * @tag: Resulting tag of the failed command
893 * @tf: Resulting taskfile registers of the failed command
894 *
895 * Read log page 10h to obtain NCQ error details and clear error
896 * condition.
897 *
898 * LOCKING:
899 * Kernel thread context (may sleep).
900 *
901 * RETURNS:
902 * 0 on success, -errno otherwise.
903 */
904static int ata_eh_read_log_10h(struct ata_device *dev,
905 int *tag, struct ata_taskfile *tf)
906{
907 u8 *buf = dev->ap->sector_buf;
908 unsigned int err_mask;
909 u8 csum;
910 int i;
911
912 err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, buf, 1);
913 if (err_mask)
914 return -EIO;
915
916 csum = 0;
917 for (i = 0; i < ATA_SECT_SIZE; i++)
918 csum += buf[i];
919 if (csum)
920 ata_dev_printk(dev, KERN_WARNING,
921 "invalid checksum 0x%x on log page 10h\n", csum);
922
923 if (buf[0] & 0x80)
924 return -ENOENT;
925
926 *tag = buf[0] & 0x1f;
927
928 tf->command = buf[2];
929 tf->feature = buf[3];
930 tf->lbal = buf[4];
931 tf->lbam = buf[5];
932 tf->lbah = buf[6];
933 tf->device = buf[7];
934 tf->hob_lbal = buf[8];
935 tf->hob_lbam = buf[9];
936 tf->hob_lbah = buf[10];
937 tf->nsect = buf[12];
938 tf->hob_nsect = buf[13];
939
940 return 0;
941}
942
943/**
944 * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
945 * @dev: device to perform REQUEST_SENSE to
946 * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
947 *
948 * Perform ATAPI REQUEST_SENSE after the device reported CHECK
949 * SENSE. This function is EH helper.
950 *
951 * LOCKING:
952 * Kernel thread context (may sleep).
953 *
954 * RETURNS:
955 * 0 on success, AC_ERR_* mask on failure
956 */
957static unsigned int atapi_eh_request_sense(struct ata_device *dev,
958 unsigned char *sense_buf)
959{
960 struct ata_port *ap = dev->ap;
961 struct ata_taskfile tf;
962 u8 cdb[ATAPI_CDB_LEN];
963
964 DPRINTK("ATAPI request sense\n");
965
966 ata_tf_init(dev, &tf);
967
968 /* FIXME: is this needed? */
969 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
970
971 /* XXX: why tf_read here? */
972 ap->ops->tf_read(ap, &tf);
973
974 /* fill these in, for the case where they are -not- overwritten */
975 sense_buf[0] = 0x70;
976 sense_buf[2] = tf.feature >> 4;
977
978 memset(cdb, 0, ATAPI_CDB_LEN);
979 cdb[0] = REQUEST_SENSE;
980 cdb[4] = SCSI_SENSE_BUFFERSIZE;
981
982 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
983 tf.command = ATA_CMD_PACKET;
984
985 /* is it pointless to prefer PIO for "safety reasons"? */
986 if (ap->flags & ATA_FLAG_PIO_DMA) {
987 tf.protocol = ATA_PROT_ATAPI_DMA;
988 tf.feature |= ATAPI_PKT_DMA;
989 } else {
990 tf.protocol = ATA_PROT_ATAPI;
991 tf.lbam = (8 * 1024) & 0xff;
992 tf.lbah = (8 * 1024) >> 8;
993 }
994
995 return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
996 sense_buf, SCSI_SENSE_BUFFERSIZE);
997}
998
999/**
1000 * ata_eh_analyze_serror - analyze SError for a failed port
1001 * @ap: ATA port to analyze SError for
1002 *
1003 * Analyze SError if available and further determine cause of
1004 * failure.
1005 *
1006 * LOCKING:
1007 * None.
1008 */
1009static void ata_eh_analyze_serror(struct ata_port *ap)
1010{
1011 struct ata_eh_context *ehc = &ap->eh_context;
1012 u32 serror = ehc->i.serror;
1013 unsigned int err_mask = 0, action = 0;
1014
1015 if (serror & SERR_PERSISTENT) {
1016 err_mask |= AC_ERR_ATA_BUS;
1017 action |= ATA_EH_HARDRESET;
1018 }
1019 if (serror &
1020 (SERR_DATA_RECOVERED | SERR_COMM_RECOVERED | SERR_DATA)) {
1021 err_mask |= AC_ERR_ATA_BUS;
1022 action |= ATA_EH_SOFTRESET;
1023 }
1024 if (serror & SERR_PROTOCOL) {
1025 err_mask |= AC_ERR_HSM;
1026 action |= ATA_EH_SOFTRESET;
1027 }
1028 if (serror & SERR_INTERNAL) {
1029 err_mask |= AC_ERR_SYSTEM;
1030 action |= ATA_EH_SOFTRESET;
1031 }
1032 if (serror & (SERR_PHYRDY_CHG | SERR_DEV_XCHG))
1033 ata_ehi_hotplugged(&ehc->i);
1034
1035 ehc->i.err_mask |= err_mask;
1036 ehc->i.action |= action;
1037}
1038
1039/**
1040 * ata_eh_analyze_ncq_error - analyze NCQ error
1041 * @ap: ATA port to analyze NCQ error for
1042 *
1043 * Read log page 10h, determine the offending qc and acquire
1044 * error status TF. For NCQ device errors, all LLDDs have to do
1045 * is setting AC_ERR_DEV in ehi->err_mask. This function takes
1046 * care of the rest.
1047 *
1048 * LOCKING:
1049 * Kernel thread context (may sleep).
1050 */
1051static void ata_eh_analyze_ncq_error(struct ata_port *ap)
1052{
1053 struct ata_eh_context *ehc = &ap->eh_context;
1054 struct ata_device *dev = ap->device;
1055 struct ata_queued_cmd *qc;
1056 struct ata_taskfile tf;
1057 int tag, rc;
1058
1059 /* if frozen, we can't do much */
1060 if (ap->pflags & ATA_PFLAG_FROZEN)
1061 return;
1062
1063 /* is it NCQ device error? */
1064 if (!ap->sactive || !(ehc->i.err_mask & AC_ERR_DEV))
1065 return;
1066
1067 /* has LLDD analyzed already? */
1068 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1069 qc = __ata_qc_from_tag(ap, tag);
1070
1071 if (!(qc->flags & ATA_QCFLAG_FAILED))
1072 continue;
1073
1074 if (qc->err_mask)
1075 return;
1076 }
1077
1078 /* okay, this error is ours */
1079 rc = ata_eh_read_log_10h(dev, &tag, &tf);
1080 if (rc) {
1081 ata_port_printk(ap, KERN_ERR, "failed to read log page 10h "
1082 "(errno=%d)\n", rc);
1083 return;
1084 }
1085
1086 if (!(ap->sactive & (1 << tag))) {
1087 ata_port_printk(ap, KERN_ERR, "log page 10h reported "
1088 "inactive tag %d\n", tag);
1089 return;
1090 }
1091
1092 /* we've got the perpetrator, condemn it */
1093 qc = __ata_qc_from_tag(ap, tag);
1094 memcpy(&qc->result_tf, &tf, sizeof(tf));
1095 qc->err_mask |= AC_ERR_DEV;
1096 ehc->i.err_mask &= ~AC_ERR_DEV;
1097}
1098
1099/**
1100 * ata_eh_analyze_tf - analyze taskfile of a failed qc
1101 * @qc: qc to analyze
1102 * @tf: Taskfile registers to analyze
1103 *
1104 * Analyze taskfile of @qc and further determine cause of
1105 * failure. This function also requests ATAPI sense data if
1106 * avaliable.
1107 *
1108 * LOCKING:
1109 * Kernel thread context (may sleep).
1110 *
1111 * RETURNS:
1112 * Determined recovery action
1113 */
1114static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
1115 const struct ata_taskfile *tf)
1116{
1117 unsigned int tmp, action = 0;
1118 u8 stat = tf->command, err = tf->feature;
1119
1120 if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) {
1121 qc->err_mask |= AC_ERR_HSM;
1122 return ATA_EH_SOFTRESET;
1123 }
1124
1125 if (!(qc->err_mask & AC_ERR_DEV))
1126 return 0;
1127
1128 switch (qc->dev->class) {
1129 case ATA_DEV_ATA:
1130 if (err & ATA_ICRC)
1131 qc->err_mask |= AC_ERR_ATA_BUS;
1132 if (err & ATA_UNC)
1133 qc->err_mask |= AC_ERR_MEDIA;
1134 if (err & ATA_IDNF)
1135 qc->err_mask |= AC_ERR_INVALID;
1136 break;
1137
1138 case ATA_DEV_ATAPI:
1139 tmp = atapi_eh_request_sense(qc->dev,
1140 qc->scsicmd->sense_buffer);
1141 if (!tmp) {
1142 /* ATA_QCFLAG_SENSE_VALID is used to tell
1143 * atapi_qc_complete() that sense data is
1144 * already valid.
1145 *
1146 * TODO: interpret sense data and set
1147 * appropriate err_mask.
1148 */
1149 qc->flags |= ATA_QCFLAG_SENSE_VALID;
1150 } else
1151 qc->err_mask |= tmp;
1152 }
1153
1154 if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS))
1155 action |= ATA_EH_SOFTRESET;
1156
1157 return action;
1158}
1159
1160static int ata_eh_categorize_ering_entry(struct ata_ering_entry *ent)
1161{
1162 if (ent->err_mask & (AC_ERR_ATA_BUS | AC_ERR_TIMEOUT))
1163 return 1;
1164
1165 if (ent->is_io) {
1166 if (ent->err_mask & AC_ERR_HSM)
1167 return 1;
1168 if ((ent->err_mask &
1169 (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV)
1170 return 2;
1171 }
1172
1173 return 0;
1174}
1175
1176struct speed_down_needed_arg {
1177 u64 since;
1178 int nr_errors[3];
1179};
1180
1181static int speed_down_needed_cb(struct ata_ering_entry *ent, void *void_arg)
1182{
1183 struct speed_down_needed_arg *arg = void_arg;
1184
1185 if (ent->timestamp < arg->since)
1186 return -1;
1187
1188 arg->nr_errors[ata_eh_categorize_ering_entry(ent)]++;
1189 return 0;
1190}
1191
1192/**
1193 * ata_eh_speed_down_needed - Determine wheter speed down is necessary
1194 * @dev: Device of interest
1195 *
1196 * This function examines error ring of @dev and determines
1197 * whether speed down is necessary. Speed down is necessary if
1198 * there have been more than 3 of Cat-1 errors or 10 of Cat-2
1199 * errors during last 15 minutes.
1200 *
1201 * Cat-1 errors are ATA_BUS, TIMEOUT for any command and HSM
1202 * violation for known supported commands.
1203 *
1204 * Cat-2 errors are unclassified DEV error for known supported
1205 * command.
1206 *
1207 * LOCKING:
1208 * Inherited from caller.
1209 *
1210 * RETURNS:
1211 * 1 if speed down is necessary, 0 otherwise
1212 */
1213static int ata_eh_speed_down_needed(struct ata_device *dev)
1214{
1215 const u64 interval = 15LLU * 60 * HZ;
1216 static const int err_limits[3] = { -1, 3, 10 };
1217 struct speed_down_needed_arg arg;
1218 struct ata_ering_entry *ent;
1219 int err_cat;
1220 u64 j64;
1221
1222 ent = ata_ering_top(&dev->ering);
1223 if (!ent)
1224 return 0;
1225
1226 err_cat = ata_eh_categorize_ering_entry(ent);
1227 if (err_cat == 0)
1228 return 0;
1229
1230 memset(&arg, 0, sizeof(arg));
1231
1232 j64 = get_jiffies_64();
1233 if (j64 >= interval)
1234 arg.since = j64 - interval;
1235 else
1236 arg.since = 0;
1237
1238 ata_ering_map(&dev->ering, speed_down_needed_cb, &arg);
1239
1240 return arg.nr_errors[err_cat] > err_limits[err_cat];
1241}
1242
1243/**
1244 * ata_eh_speed_down - record error and speed down if necessary
1245 * @dev: Failed device
1246 * @is_io: Did the device fail during normal IO?
1247 * @err_mask: err_mask of the error
1248 *
1249 * Record error and examine error history to determine whether
1250 * adjusting transmission speed is necessary. It also sets
1251 * transmission limits appropriately if such adjustment is
1252 * necessary.
1253 *
1254 * LOCKING:
1255 * Kernel thread context (may sleep).
1256 *
1257 * RETURNS:
1258 * 0 on success, -errno otherwise
1259 */
1260static int ata_eh_speed_down(struct ata_device *dev, int is_io,
1261 unsigned int err_mask)
1262{
1263 if (!err_mask)
1264 return 0;
1265
1266 /* record error and determine whether speed down is necessary */
1267 ata_ering_record(&dev->ering, is_io, err_mask);
1268
1269 if (!ata_eh_speed_down_needed(dev))
1270 return 0;
1271
1272 /* speed down SATA link speed if possible */
1273 if (sata_down_spd_limit(dev->ap) == 0)
1274 return ATA_EH_HARDRESET;
1275
1276 /* lower transfer mode */
1277 if (ata_down_xfermask_limit(dev, 0) == 0)
1278 return ATA_EH_SOFTRESET;
1279
1280 ata_dev_printk(dev, KERN_ERR,
1281 "speed down requested but no transfer mode left\n");
1282 return 0;
1283}
1284
1285/**
1286 * ata_eh_autopsy - analyze error and determine recovery action
1287 * @ap: ATA port to perform autopsy on
1288 *
1289 * Analyze why @ap failed and determine which recovery action is
1290 * needed. This function also sets more detailed AC_ERR_* values
1291 * and fills sense data for ATAPI CHECK SENSE.
1292 *
1293 * LOCKING:
1294 * Kernel thread context (may sleep).
1295 */
1296static void ata_eh_autopsy(struct ata_port *ap)
1297{
1298 struct ata_eh_context *ehc = &ap->eh_context;
1299 unsigned int all_err_mask = 0;
1300 int tag, is_io = 0;
1301 u32 serror;
1302 int rc;
1303
1304 DPRINTK("ENTER\n");
1305
1306 if (ehc->i.flags & ATA_EHI_NO_AUTOPSY)
1307 return;
1308
1309 /* obtain and analyze SError */
1310 rc = sata_scr_read(ap, SCR_ERROR, &serror);
1311 if (rc == 0) {
1312 ehc->i.serror |= serror;
1313 ata_eh_analyze_serror(ap);
1314 } else if (rc != -EOPNOTSUPP)
1315 ehc->i.action |= ATA_EH_HARDRESET;
1316
1317 /* analyze NCQ failure */
1318 ata_eh_analyze_ncq_error(ap);
1319
1320 /* any real error trumps AC_ERR_OTHER */
1321 if (ehc->i.err_mask & ~AC_ERR_OTHER)
1322 ehc->i.err_mask &= ~AC_ERR_OTHER;
1323
1324 all_err_mask |= ehc->i.err_mask;
1325
1326 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1327 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1328
1329 if (!(qc->flags & ATA_QCFLAG_FAILED))
1330 continue;
1331
1332 /* inherit upper level err_mask */
1333 qc->err_mask |= ehc->i.err_mask;
1334
1335 /* analyze TF */
1336 ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf);
1337
1338 /* DEV errors are probably spurious in case of ATA_BUS error */
1339 if (qc->err_mask & AC_ERR_ATA_BUS)
1340 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA |
1341 AC_ERR_INVALID);
1342
1343 /* any real error trumps unknown error */
1344 if (qc->err_mask & ~AC_ERR_OTHER)
1345 qc->err_mask &= ~AC_ERR_OTHER;
1346
1347 /* SENSE_VALID trumps dev/unknown error and revalidation */
1348 if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
1349 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
1350 ehc->i.action &= ~ATA_EH_REVALIDATE;
1351 }
1352
1353 /* accumulate error info */
1354 ehc->i.dev = qc->dev;
1355 all_err_mask |= qc->err_mask;
1356 if (qc->flags & ATA_QCFLAG_IO)
1357 is_io = 1;
1358 }
1359
1360 /* enforce default EH actions */
1361 if (ap->pflags & ATA_PFLAG_FROZEN ||
1362 all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT))
1363 ehc->i.action |= ATA_EH_SOFTRESET;
1364 else if (all_err_mask)
1365 ehc->i.action |= ATA_EH_REVALIDATE;
1366
1367 /* if we have offending qcs and the associated failed device */
1368 if (ehc->i.dev) {
1369 /* speed down */
1370 ehc->i.action |= ata_eh_speed_down(ehc->i.dev, is_io,
1371 all_err_mask);
1372
1373 /* perform per-dev EH action only on the offending device */
1374 ehc->i.dev_action[ehc->i.dev->devno] |=
1375 ehc->i.action & ATA_EH_PERDEV_MASK;
1376 ehc->i.action &= ~ATA_EH_PERDEV_MASK;
1377 }
1378
1379 DPRINTK("EXIT\n");
1380}
1381
1382/**
1383 * ata_eh_report - report error handling to user
1384 * @ap: ATA port EH is going on
1385 *
1386 * Report EH to user.
1387 *
1388 * LOCKING:
1389 * None.
1390 */
1391static void ata_eh_report(struct ata_port *ap)
1392{
1393 struct ata_eh_context *ehc = &ap->eh_context;
1394 const char *frozen, *desc;
1395 int tag, nr_failed = 0;
1396
1397 desc = NULL;
1398 if (ehc->i.desc[0] != '\0')
1399 desc = ehc->i.desc;
1400
1401 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1402 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1403
1404 if (!(qc->flags & ATA_QCFLAG_FAILED))
1405 continue;
1406 if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask)
1407 continue;
1408
1409 nr_failed++;
1410 }
1411
1412 if (!nr_failed && !ehc->i.err_mask)
1413 return;
1414
1415 frozen = "";
1416 if (ap->pflags & ATA_PFLAG_FROZEN)
1417 frozen = " frozen";
1418
1419 if (ehc->i.dev) {
1420 ata_dev_printk(ehc->i.dev, KERN_ERR, "exception Emask 0x%x "
1421 "SAct 0x%x SErr 0x%x action 0x%x%s\n",
1422 ehc->i.err_mask, ap->sactive, ehc->i.serror,
1423 ehc->i.action, frozen);
1424 if (desc)
1425 ata_dev_printk(ehc->i.dev, KERN_ERR, "(%s)\n", desc);
1426 } else {
1427 ata_port_printk(ap, KERN_ERR, "exception Emask 0x%x "
1428 "SAct 0x%x SErr 0x%x action 0x%x%s\n",
1429 ehc->i.err_mask, ap->sactive, ehc->i.serror,
1430 ehc->i.action, frozen);
1431 if (desc)
1432 ata_port_printk(ap, KERN_ERR, "(%s)\n", desc);
1433 }
1434
1435 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1436 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1437
1438 if (!(qc->flags & ATA_QCFLAG_FAILED) || !qc->err_mask)
1439 continue;
1440
1441 ata_dev_printk(qc->dev, KERN_ERR, "tag %d cmd 0x%x "
1442 "Emask 0x%x stat 0x%x err 0x%x (%s)\n",
1443 qc->tag, qc->tf.command, qc->err_mask,
1444 qc->result_tf.command, qc->result_tf.feature,
1445 ata_err_string(qc->err_mask));
1446 }
1447}
1448
1449static int ata_do_reset(struct ata_port *ap, ata_reset_fn_t reset,
1450 unsigned int *classes)
1451{
1452 int i, rc;
1453
1454 for (i = 0; i < ATA_MAX_DEVICES; i++)
1455 classes[i] = ATA_DEV_UNKNOWN;
1456
1457 rc = reset(ap, classes);
1458 if (rc)
1459 return rc;
1460
1461 /* If any class isn't ATA_DEV_UNKNOWN, consider classification
1462 * is complete and convert all ATA_DEV_UNKNOWN to
1463 * ATA_DEV_NONE.
1464 */
1465 for (i = 0; i < ATA_MAX_DEVICES; i++)
1466 if (classes[i] != ATA_DEV_UNKNOWN)
1467 break;
1468
1469 if (i < ATA_MAX_DEVICES)
1470 for (i = 0; i < ATA_MAX_DEVICES; i++)
1471 if (classes[i] == ATA_DEV_UNKNOWN)
1472 classes[i] = ATA_DEV_NONE;
1473
1474 return 0;
1475}
1476
1477static int ata_eh_followup_srst_needed(int rc, int classify,
1478 const unsigned int *classes)
1479{
1480 if (rc == -EAGAIN)
1481 return 1;
1482 if (rc != 0)
1483 return 0;
1484 if (classify && classes[0] == ATA_DEV_UNKNOWN)
1485 return 1;
1486 return 0;
1487}
1488
1489static int ata_eh_reset(struct ata_port *ap, int classify,
1490 ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
1491 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
1492{
1493 struct ata_eh_context *ehc = &ap->eh_context;
1494 unsigned int *classes = ehc->classes;
1495 int tries = ATA_EH_RESET_TRIES;
1496 int verbose = !(ehc->i.flags & ATA_EHI_QUIET);
1497 unsigned int action;
1498 ata_reset_fn_t reset;
1499 int i, did_followup_srst, rc;
1500
1501 /* about to reset */
1502 ata_eh_about_to_do(ap, NULL, ehc->i.action & ATA_EH_RESET_MASK);
1503
1504 /* Determine which reset to use and record in ehc->i.action.
1505 * prereset() may examine and modify it.
1506 */
1507 action = ehc->i.action;
1508 ehc->i.action &= ~ATA_EH_RESET_MASK;
1509 if (softreset && (!hardreset || (!sata_set_spd_needed(ap) &&
1510 !(action & ATA_EH_HARDRESET))))
1511 ehc->i.action |= ATA_EH_SOFTRESET;
1512 else
1513 ehc->i.action |= ATA_EH_HARDRESET;
1514
1515 if (prereset) {
1516 rc = prereset(ap);
1517 if (rc) {
1518 ata_port_printk(ap, KERN_ERR,
1519 "prereset failed (errno=%d)\n", rc);
1520 return rc;
1521 }
1522 }
1523
1524 /* prereset() might have modified ehc->i.action */
1525 if (ehc->i.action & ATA_EH_HARDRESET)
1526 reset = hardreset;
1527 else if (ehc->i.action & ATA_EH_SOFTRESET)
1528 reset = softreset;
1529 else {
1530 /* prereset told us not to reset, bang classes and return */
1531 for (i = 0; i < ATA_MAX_DEVICES; i++)
1532 classes[i] = ATA_DEV_NONE;
1533 return 0;
1534 }
1535
1536 /* did prereset() screw up? if so, fix up to avoid oopsing */
1537 if (!reset) {
1538 ata_port_printk(ap, KERN_ERR, "BUG: prereset() requested "
1539 "invalid reset type\n");
1540 if (softreset)
1541 reset = softreset;
1542 else
1543 reset = hardreset;
1544 }
1545
1546 retry:
1547 /* shut up during boot probing */
1548 if (verbose)
1549 ata_port_printk(ap, KERN_INFO, "%s resetting port\n",
1550 reset == softreset ? "soft" : "hard");
1551
1552 /* mark that this EH session started with reset */
1553 ehc->i.flags |= ATA_EHI_DID_RESET;
1554
1555 rc = ata_do_reset(ap, reset, classes);
1556
1557 did_followup_srst = 0;
1558 if (reset == hardreset &&
1559 ata_eh_followup_srst_needed(rc, classify, classes)) {
1560 /* okay, let's do follow-up softreset */
1561 did_followup_srst = 1;
1562 reset = softreset;
1563
1564 if (!reset) {
1565 ata_port_printk(ap, KERN_ERR,
1566 "follow-up softreset required "
1567 "but no softreset avaliable\n");
1568 return -EINVAL;
1569 }
1570
1571 ata_eh_about_to_do(ap, NULL, ATA_EH_RESET_MASK);
1572 rc = ata_do_reset(ap, reset, classes);
1573
1574 if (rc == 0 && classify &&
1575 classes[0] == ATA_DEV_UNKNOWN) {
1576 ata_port_printk(ap, KERN_ERR,
1577 "classification failed\n");
1578 return -EINVAL;
1579 }
1580 }
1581
1582 if (rc && --tries) {
1583 const char *type;
1584
1585 if (reset == softreset) {
1586 if (did_followup_srst)
1587 type = "follow-up soft";
1588 else
1589 type = "soft";
1590 } else
1591 type = "hard";
1592
1593 ata_port_printk(ap, KERN_WARNING,
1594 "%sreset failed, retrying in 5 secs\n", type);
1595 ssleep(5);
1596
1597 if (reset == hardreset)
1598 sata_down_spd_limit(ap);
1599 if (hardreset)
1600 reset = hardreset;
1601 goto retry;
1602 }
1603
1604 if (rc == 0) {
1605 /* After the reset, the device state is PIO 0 and the
1606 * controller state is undefined. Record the mode.
1607 */
1608 for (i = 0; i < ATA_MAX_DEVICES; i++)
1609 ap->device[i].pio_mode = XFER_PIO_0;
1610
1611 if (postreset)
1612 postreset(ap, classes);
1613
1614 /* reset successful, schedule revalidation */
1615 ata_eh_done(ap, NULL, ehc->i.action & ATA_EH_RESET_MASK);
1616 ehc->i.action |= ATA_EH_REVALIDATE;
1617 }
1618
1619 return rc;
1620}
1621
1622static int ata_eh_revalidate_and_attach(struct ata_port *ap,
1623 struct ata_device **r_failed_dev)
1624{
1625 struct ata_eh_context *ehc = &ap->eh_context;
1626 struct ata_device *dev;
1627 unsigned long flags;
1628 int i, rc = 0;
1629
1630 DPRINTK("ENTER\n");
1631
1632 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1633 unsigned int action;
1634
1635 dev = &ap->device[i];
1636 action = ata_eh_dev_action(dev);
1637
1638 if (action & ATA_EH_REVALIDATE && ata_dev_ready(dev)) {
1639 if (ata_port_offline(ap)) {
1640 rc = -EIO;
1641 break;
1642 }
1643
1644 ata_eh_about_to_do(ap, dev, ATA_EH_REVALIDATE);
1645 rc = ata_dev_revalidate(dev,
1646 ehc->i.flags & ATA_EHI_DID_RESET);
1647 if (rc)
1648 break;
1649
1650 ata_eh_done(ap, dev, ATA_EH_REVALIDATE);
1651
1652 /* schedule the scsi_rescan_device() here */
1653 queue_work(ata_aux_wq, &(ap->scsi_rescan_task));
1654 } else if (dev->class == ATA_DEV_UNKNOWN &&
1655 ehc->tries[dev->devno] &&
1656 ata_class_enabled(ehc->classes[dev->devno])) {
1657 dev->class = ehc->classes[dev->devno];
1658
1659 rc = ata_dev_read_id(dev, &dev->class, 1, dev->id);
1660 if (rc == 0)
1661 rc = ata_dev_configure(dev, 1);
1662
1663 if (rc) {
1664 dev->class = ATA_DEV_UNKNOWN;
1665 break;
1666 }
1667
1668 spin_lock_irqsave(ap->lock, flags);
1669 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
1670 spin_unlock_irqrestore(ap->lock, flags);
1671 }
1672 }
1673
1674 if (rc)
1675 *r_failed_dev = dev;
1676
1677 DPRINTK("EXIT\n");
1678 return rc;
1679}
1680
1681/**
1682 * ata_eh_suspend - handle suspend EH action
1683 * @ap: target host port
1684 * @r_failed_dev: result parameter to indicate failing device
1685 *
1686 * Handle suspend EH action. Disk devices are spinned down and
1687 * other types of devices are just marked suspended. Once
1688 * suspended, no EH action to the device is allowed until it is
1689 * resumed.
1690 *
1691 * LOCKING:
1692 * Kernel thread context (may sleep).
1693 *
1694 * RETURNS:
1695 * 0 on success, -errno otherwise
1696 */
1697static int ata_eh_suspend(struct ata_port *ap, struct ata_device **r_failed_dev)
1698{
1699 struct ata_device *dev;
1700 int i, rc = 0;
1701
1702 DPRINTK("ENTER\n");
1703
1704 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1705 unsigned long flags;
1706 unsigned int action, err_mask;
1707
1708 dev = &ap->device[i];
1709 action = ata_eh_dev_action(dev);
1710
1711 if (!ata_dev_enabled(dev) || !(action & ATA_EH_SUSPEND))
1712 continue;
1713
1714 WARN_ON(dev->flags & ATA_DFLAG_SUSPENDED);
1715
1716 ata_eh_about_to_do(ap, dev, ATA_EH_SUSPEND);
1717
1718 if (dev->class == ATA_DEV_ATA && !(action & ATA_EH_PM_FREEZE)) {
1719 /* flush cache */
1720 rc = ata_flush_cache(dev);
1721 if (rc)
1722 break;
1723
1724 /* spin down */
1725 err_mask = ata_do_simple_cmd(dev, ATA_CMD_STANDBYNOW1);
1726 if (err_mask) {
1727 ata_dev_printk(dev, KERN_ERR, "failed to "
1728 "spin down (err_mask=0x%x)\n",
1729 err_mask);
1730 rc = -EIO;
1731 break;
1732 }
1733 }
1734
1735 spin_lock_irqsave(ap->lock, flags);
1736 dev->flags |= ATA_DFLAG_SUSPENDED;
1737 spin_unlock_irqrestore(ap->lock, flags);
1738
1739 ata_eh_done(ap, dev, ATA_EH_SUSPEND);
1740 }
1741
1742 if (rc)
1743 *r_failed_dev = dev;
1744
1745 DPRINTK("EXIT\n");
1746 return 0;
1747}
1748
1749/**
1750 * ata_eh_prep_resume - prep for resume EH action
1751 * @ap: target host port
1752 *
1753 * Clear SUSPENDED in preparation for scheduled resume actions.
1754 * This allows other parts of EH to access the devices being
1755 * resumed.
1756 *
1757 * LOCKING:
1758 * Kernel thread context (may sleep).
1759 */
1760static void ata_eh_prep_resume(struct ata_port *ap)
1761{
1762 struct ata_device *dev;
1763 unsigned long flags;
1764 int i;
1765
1766 DPRINTK("ENTER\n");
1767
1768 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1769 unsigned int action;
1770
1771 dev = &ap->device[i];
1772 action = ata_eh_dev_action(dev);
1773
1774 if (!ata_dev_enabled(dev) || !(action & ATA_EH_RESUME))
1775 continue;
1776
1777 spin_lock_irqsave(ap->lock, flags);
1778 dev->flags &= ~ATA_DFLAG_SUSPENDED;
1779 spin_unlock_irqrestore(ap->lock, flags);
1780 }
1781
1782 DPRINTK("EXIT\n");
1783}
1784
1785/**
1786 * ata_eh_resume - handle resume EH action
1787 * @ap: target host port
1788 * @r_failed_dev: result parameter to indicate failing device
1789 *
1790 * Handle resume EH action. Target devices are already reset and
1791 * revalidated. Spinning up is the only operation left.
1792 *
1793 * LOCKING:
1794 * Kernel thread context (may sleep).
1795 *
1796 * RETURNS:
1797 * 0 on success, -errno otherwise
1798 */
1799static int ata_eh_resume(struct ata_port *ap, struct ata_device **r_failed_dev)
1800{
1801 struct ata_device *dev;
1802 int i, rc = 0;
1803
1804 DPRINTK("ENTER\n");
1805
1806 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1807 unsigned int action, err_mask;
1808
1809 dev = &ap->device[i];
1810 action = ata_eh_dev_action(dev);
1811
1812 if (!ata_dev_enabled(dev) || !(action & ATA_EH_RESUME))
1813 continue;
1814
1815 ata_eh_about_to_do(ap, dev, ATA_EH_RESUME);
1816
1817 if (dev->class == ATA_DEV_ATA && !(action & ATA_EH_PM_FREEZE)) {
1818 err_mask = ata_do_simple_cmd(dev,
1819 ATA_CMD_IDLEIMMEDIATE);
1820 if (err_mask) {
1821 ata_dev_printk(dev, KERN_ERR, "failed to "
1822 "spin up (err_mask=0x%x)\n",
1823 err_mask);
1824 rc = -EIO;
1825 break;
1826 }
1827 }
1828
1829 ata_eh_done(ap, dev, ATA_EH_RESUME);
1830 }
1831
1832 if (rc)
1833 *r_failed_dev = dev;
1834
1835 DPRINTK("EXIT\n");
1836 return 0;
1837}
1838
1839static int ata_port_nr_enabled(struct ata_port *ap)
1840{
1841 int i, cnt = 0;
1842
1843 for (i = 0; i < ATA_MAX_DEVICES; i++)
1844 if (ata_dev_enabled(&ap->device[i]))
1845 cnt++;
1846 return cnt;
1847}
1848
1849static int ata_port_nr_vacant(struct ata_port *ap)
1850{
1851 int i, cnt = 0;
1852
1853 for (i = 0; i < ATA_MAX_DEVICES; i++)
1854 if (ap->device[i].class == ATA_DEV_UNKNOWN)
1855 cnt++;
1856 return cnt;
1857}
1858
1859static int ata_eh_skip_recovery(struct ata_port *ap)
1860{
1861 struct ata_eh_context *ehc = &ap->eh_context;
1862 int i;
1863
1864 /* skip if all possible devices are suspended */
1865 for (i = 0; i < ata_port_max_devices(ap); i++) {
1866 struct ata_device *dev = &ap->device[i];
1867
1868 if (!(dev->flags & ATA_DFLAG_SUSPENDED))
1869 break;
1870 }
1871
1872 if (i == ata_port_max_devices(ap))
1873 return 1;
1874
1875 /* thaw frozen port, resume link and recover failed devices */
1876 if ((ap->pflags & ATA_PFLAG_FROZEN) ||
1877 (ehc->i.flags & ATA_EHI_RESUME_LINK) || ata_port_nr_enabled(ap))
1878 return 0;
1879
1880 /* skip if class codes for all vacant slots are ATA_DEV_NONE */
1881 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1882 struct ata_device *dev = &ap->device[i];
1883
1884 if (dev->class == ATA_DEV_UNKNOWN &&
1885 ehc->classes[dev->devno] != ATA_DEV_NONE)
1886 return 0;
1887 }
1888
1889 return 1;
1890}
1891
1892/**
1893 * ata_eh_recover - recover host port after error
1894 * @ap: host port to recover
1895 * @prereset: prereset method (can be NULL)
1896 * @softreset: softreset method (can be NULL)
1897 * @hardreset: hardreset method (can be NULL)
1898 * @postreset: postreset method (can be NULL)
1899 *
1900 * This is the alpha and omega, eum and yang, heart and soul of
1901 * libata exception handling. On entry, actions required to
1902 * recover the port and hotplug requests are recorded in
1903 * eh_context. This function executes all the operations with
1904 * appropriate retrials and fallbacks to resurrect failed
1905 * devices, detach goners and greet newcomers.
1906 *
1907 * LOCKING:
1908 * Kernel thread context (may sleep).
1909 *
1910 * RETURNS:
1911 * 0 on success, -errno on failure.
1912 */
1913static int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
1914 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
1915 ata_postreset_fn_t postreset)
1916{
1917 struct ata_eh_context *ehc = &ap->eh_context;
1918 struct ata_device *dev;
1919 int down_xfermask, i, rc;
1920
1921 DPRINTK("ENTER\n");
1922
1923 /* prep for recovery */
1924 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1925 dev = &ap->device[i];
1926
1927 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
1928
1929 /* process hotplug request */
1930 if (dev->flags & ATA_DFLAG_DETACH)
1931 ata_eh_detach_dev(dev);
1932
1933 if (!ata_dev_enabled(dev) &&
1934 ((ehc->i.probe_mask & (1 << dev->devno)) &&
1935 !(ehc->did_probe_mask & (1 << dev->devno)))) {
1936 ata_eh_detach_dev(dev);
1937 ata_dev_init(dev);
1938 ehc->did_probe_mask |= (1 << dev->devno);
1939 ehc->i.action |= ATA_EH_SOFTRESET;
1940 }
1941 }
1942
1943 retry:
1944 down_xfermask = 0;
1945 rc = 0;
1946
1947 /* if UNLOADING, finish immediately */
1948 if (ap->pflags & ATA_PFLAG_UNLOADING)
1949 goto out;
1950
1951 /* prep for resume */
1952 ata_eh_prep_resume(ap);
1953
1954 /* skip EH if possible. */
1955 if (ata_eh_skip_recovery(ap))
1956 ehc->i.action = 0;
1957
1958 for (i = 0; i < ATA_MAX_DEVICES; i++)
1959 ehc->classes[i] = ATA_DEV_UNKNOWN;
1960
1961 /* reset */
1962 if (ehc->i.action & ATA_EH_RESET_MASK) {
1963 ata_eh_freeze_port(ap);
1964
1965 rc = ata_eh_reset(ap, ata_port_nr_vacant(ap), prereset,
1966 softreset, hardreset, postreset);
1967 if (rc) {
1968 ata_port_printk(ap, KERN_ERR,
1969 "reset failed, giving up\n");
1970 goto out;
1971 }
1972
1973 ata_eh_thaw_port(ap);
1974 }
1975
1976 /* revalidate existing devices and attach new ones */
1977 rc = ata_eh_revalidate_and_attach(ap, &dev);
1978 if (rc)
1979 goto dev_fail;
1980
1981 /* resume devices */
1982 rc = ata_eh_resume(ap, &dev);
1983 if (rc)
1984 goto dev_fail;
1985
1986 /* configure transfer mode if the port has been reset */
1987 if (ehc->i.flags & ATA_EHI_DID_RESET) {
1988 rc = ata_set_mode(ap, &dev);
1989 if (rc) {
1990 down_xfermask = 1;
1991 goto dev_fail;
1992 }
1993 }
1994
1995 /* suspend devices */
1996 rc = ata_eh_suspend(ap, &dev);
1997 if (rc)
1998 goto dev_fail;
1999
2000 goto out;
2001
2002 dev_fail:
2003 switch (rc) {
2004 case -ENODEV:
2005 /* device missing, schedule probing */
2006 ehc->i.probe_mask |= (1 << dev->devno);
2007 case -EINVAL:
2008 ehc->tries[dev->devno] = 0;
2009 break;
2010 case -EIO:
2011 sata_down_spd_limit(ap);
2012 default:
2013 ehc->tries[dev->devno]--;
2014 if (down_xfermask &&
2015 ata_down_xfermask_limit(dev, ehc->tries[dev->devno] == 1))
2016 ehc->tries[dev->devno] = 0;
2017 }
2018
2019 if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) {
2020 /* disable device if it has used up all its chances */
2021 ata_dev_disable(dev);
2022
2023 /* detach if offline */
2024 if (ata_port_offline(ap))
2025 ata_eh_detach_dev(dev);
2026
2027 /* probe if requested */
2028 if ((ehc->i.probe_mask & (1 << dev->devno)) &&
2029 !(ehc->did_probe_mask & (1 << dev->devno))) {
2030 ata_eh_detach_dev(dev);
2031 ata_dev_init(dev);
2032
2033 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
2034 ehc->did_probe_mask |= (1 << dev->devno);
2035 ehc->i.action |= ATA_EH_SOFTRESET;
2036 }
2037 } else {
2038 /* soft didn't work? be haaaaard */
2039 if (ehc->i.flags & ATA_EHI_DID_RESET)
2040 ehc->i.action |= ATA_EH_HARDRESET;
2041 else
2042 ehc->i.action |= ATA_EH_SOFTRESET;
2043 }
2044
2045 if (ata_port_nr_enabled(ap)) {
2046 ata_port_printk(ap, KERN_WARNING, "failed to recover some "
2047 "devices, retrying in 5 secs\n");
2048 ssleep(5);
2049 } else {
2050 /* no device left, repeat fast */
2051 msleep(500);
2052 }
2053
2054 goto retry;
2055
2056 out:
2057 if (rc) {
2058 for (i = 0; i < ATA_MAX_DEVICES; i++)
2059 ata_dev_disable(&ap->device[i]);
2060 }
2061
2062 DPRINTK("EXIT, rc=%d\n", rc);
2063 return rc;
2064}
2065
2066/**
2067 * ata_eh_finish - finish up EH
2068 * @ap: host port to finish EH for
2069 *
2070 * Recovery is complete. Clean up EH states and retry or finish
2071 * failed qcs.
2072 *
2073 * LOCKING:
2074 * None.
2075 */
2076static void ata_eh_finish(struct ata_port *ap)
2077{
2078 int tag;
2079
2080 /* retry or finish qcs */
2081 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2082 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2083
2084 if (!(qc->flags & ATA_QCFLAG_FAILED))
2085 continue;
2086
2087 if (qc->err_mask) {
2088 /* FIXME: Once EH migration is complete,
2089 * generate sense data in this function,
2090 * considering both err_mask and tf.
2091 */
2092 if (qc->err_mask & AC_ERR_INVALID)
2093 ata_eh_qc_complete(qc);
2094 else
2095 ata_eh_qc_retry(qc);
2096 } else {
2097 if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
2098 ata_eh_qc_complete(qc);
2099 } else {
2100 /* feed zero TF to sense generation */
2101 memset(&qc->result_tf, 0, sizeof(qc->result_tf));
2102 ata_eh_qc_retry(qc);
2103 }
2104 }
2105 }
2106}
2107
2108/**
2109 * ata_do_eh - do standard error handling
2110 * @ap: host port to handle error for
2111 * @prereset: prereset method (can be NULL)
2112 * @softreset: softreset method (can be NULL)
2113 * @hardreset: hardreset method (can be NULL)
2114 * @postreset: postreset method (can be NULL)
2115 *
2116 * Perform standard error handling sequence.
2117 *
2118 * LOCKING:
2119 * Kernel thread context (may sleep).
2120 */
2121void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
2122 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
2123 ata_postreset_fn_t postreset)
2124{
2125 ata_eh_autopsy(ap);
2126 ata_eh_report(ap);
2127 ata_eh_recover(ap, prereset, softreset, hardreset, postreset);
2128 ata_eh_finish(ap);
2129}
2130
2131/**
2132 * ata_eh_handle_port_suspend - perform port suspend operation
2133 * @ap: port to suspend
2134 *
2135 * Suspend @ap.
2136 *
2137 * LOCKING:
2138 * Kernel thread context (may sleep).
2139 */
2140static void ata_eh_handle_port_suspend(struct ata_port *ap)
2141{
2142 unsigned long flags;
2143 int rc = 0;
2144
2145 /* are we suspending? */
2146 spin_lock_irqsave(ap->lock, flags);
2147 if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
2148 ap->pm_mesg.event == PM_EVENT_ON) {
2149 spin_unlock_irqrestore(ap->lock, flags);
2150 return;
2151 }
2152 spin_unlock_irqrestore(ap->lock, flags);
2153
2154 WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED);
2155
2156 /* suspend */
2157 ata_eh_freeze_port(ap);
2158
2159 if (ap->ops->port_suspend)
2160 rc = ap->ops->port_suspend(ap, ap->pm_mesg);
2161
2162 /* report result */
2163 spin_lock_irqsave(ap->lock, flags);
2164
2165 ap->pflags &= ~ATA_PFLAG_PM_PENDING;
2166 if (rc == 0)
2167 ap->pflags |= ATA_PFLAG_SUSPENDED;
2168 else
2169 ata_port_schedule_eh(ap);
2170
2171 if (ap->pm_result) {
2172 *ap->pm_result = rc;
2173 ap->pm_result = NULL;
2174 }
2175
2176 spin_unlock_irqrestore(ap->lock, flags);
2177
2178 return;
2179}
2180
2181/**
2182 * ata_eh_handle_port_resume - perform port resume operation
2183 * @ap: port to resume
2184 *
2185 * Resume @ap.
2186 *
2187 * This function also waits upto one second until all devices
2188 * hanging off this port requests resume EH action. This is to
2189 * prevent invoking EH and thus reset multiple times on resume.
2190 *
2191 * On DPM resume, where some of devices might not be resumed
2192 * together, this may delay port resume upto one second, but such
2193 * DPM resumes are rare and 1 sec delay isn't too bad.
2194 *
2195 * LOCKING:
2196 * Kernel thread context (may sleep).
2197 */
2198static void ata_eh_handle_port_resume(struct ata_port *ap)
2199{
2200 unsigned long timeout;
2201 unsigned long flags;
2202 int i, rc = 0;
2203
2204 /* are we resuming? */
2205 spin_lock_irqsave(ap->lock, flags);
2206 if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
2207 ap->pm_mesg.event != PM_EVENT_ON) {
2208 spin_unlock_irqrestore(ap->lock, flags);
2209 return;
2210 }
2211 spin_unlock_irqrestore(ap->lock, flags);
2212
2213 /* spurious? */
2214 if (!(ap->pflags & ATA_PFLAG_SUSPENDED))
2215 goto done;
2216
2217 if (ap->ops->port_resume)
2218 rc = ap->ops->port_resume(ap);
2219
2220 /* give devices time to request EH */
2221 timeout = jiffies + HZ; /* 1s max */
2222 while (1) {
2223 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2224 struct ata_device *dev = &ap->device[i];
2225 unsigned int action = ata_eh_dev_action(dev);
2226
2227 if ((dev->flags & ATA_DFLAG_SUSPENDED) &&
2228 !(action & ATA_EH_RESUME))
2229 break;
2230 }
2231
2232 if (i == ATA_MAX_DEVICES || time_after(jiffies, timeout))
2233 break;
2234 msleep(10);
2235 }
2236
2237 done:
2238 spin_lock_irqsave(ap->lock, flags);
2239 ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED);
2240 if (ap->pm_result) {
2241 *ap->pm_result = rc;
2242 ap->pm_result = NULL;
2243 }
2244 spin_unlock_irqrestore(ap->lock, flags);
2245}
diff --git a/drivers/scsi/libata-scsi.c b/drivers/scsi/libata-scsi.c
deleted file mode 100644
index e92c31d698ff..000000000000
--- a/drivers/scsi/libata-scsi.c
+++ /dev/null
@@ -1,3173 +0,0 @@
1/*
2 * libata-scsi.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from
31 * - http://www.t10.org/
32 * - http://www.t13.org/
33 *
34 */
35
36#include <linux/kernel.h>
37#include <linux/blkdev.h>
38#include <linux/spinlock.h>
39#include <scsi/scsi.h>
40#include <scsi/scsi_host.h>
41#include <scsi/scsi_cmnd.h>
42#include <scsi/scsi_eh.h>
43#include <scsi/scsi_device.h>
44#include <scsi/scsi_tcq.h>
45#include <scsi/scsi_transport.h>
46#include <linux/libata.h>
47#include <linux/hdreg.h>
48#include <asm/uaccess.h>
49
50#include "libata.h"
51
52#define SECTOR_SIZE 512
53
54typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *qc, const u8 *scsicmd);
55
56static struct ata_device * __ata_scsi_find_dev(struct ata_port *ap,
57 const struct scsi_device *scsidev);
58static struct ata_device * ata_scsi_find_dev(struct ata_port *ap,
59 const struct scsi_device *scsidev);
60static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
61 unsigned int id, unsigned int lun);
62
63
64#define RW_RECOVERY_MPAGE 0x1
65#define RW_RECOVERY_MPAGE_LEN 12
66#define CACHE_MPAGE 0x8
67#define CACHE_MPAGE_LEN 20
68#define CONTROL_MPAGE 0xa
69#define CONTROL_MPAGE_LEN 12
70#define ALL_MPAGES 0x3f
71#define ALL_SUB_MPAGES 0xff
72
73
74static const u8 def_rw_recovery_mpage[] = {
75 RW_RECOVERY_MPAGE,
76 RW_RECOVERY_MPAGE_LEN - 2,
77 (1 << 7) | /* AWRE, sat-r06 say it shall be 0 */
78 (1 << 6), /* ARRE (auto read reallocation) */
79 0, /* read retry count */
80 0, 0, 0, 0,
81 0, /* write retry count */
82 0, 0, 0
83};
84
85static const u8 def_cache_mpage[CACHE_MPAGE_LEN] = {
86 CACHE_MPAGE,
87 CACHE_MPAGE_LEN - 2,
88 0, /* contains WCE, needs to be 0 for logic */
89 0, 0, 0, 0, 0, 0, 0, 0, 0,
90 0, /* contains DRA, needs to be 0 for logic */
91 0, 0, 0, 0, 0, 0, 0
92};
93
94static const u8 def_control_mpage[CONTROL_MPAGE_LEN] = {
95 CONTROL_MPAGE,
96 CONTROL_MPAGE_LEN - 2,
97 2, /* DSENSE=0, GLTSD=1 */
98 0, /* [QAM+QERR may be 1, see 05-359r1] */
99 0, 0, 0, 0, 0xff, 0xff,
100 0, 30 /* extended self test time, see 05-359r1 */
101};
102
103/*
104 * libata transport template. libata doesn't do real transport stuff.
105 * It just needs the eh_timed_out hook.
106 */
107struct scsi_transport_template ata_scsi_transport_template = {
108 .eh_strategy_handler = ata_scsi_error,
109 .eh_timed_out = ata_scsi_timed_out,
110 .user_scan = ata_scsi_user_scan,
111};
112
113
114static void ata_scsi_invalid_field(struct scsi_cmnd *cmd,
115 void (*done)(struct scsi_cmnd *))
116{
117 ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x24, 0x0);
118 /* "Invalid field in cbd" */
119 done(cmd);
120}
121
122/**
123 * ata_std_bios_param - generic bios head/sector/cylinder calculator used by sd.
124 * @sdev: SCSI device for which BIOS geometry is to be determined
125 * @bdev: block device associated with @sdev
126 * @capacity: capacity of SCSI device
127 * @geom: location to which geometry will be output
128 *
129 * Generic bios head/sector/cylinder calculator
130 * used by sd. Most BIOSes nowadays expect a XXX/255/16 (CHS)
131 * mapping. Some situations may arise where the disk is not
132 * bootable if this is not used.
133 *
134 * LOCKING:
135 * Defined by the SCSI layer. We don't really care.
136 *
137 * RETURNS:
138 * Zero.
139 */
140int ata_std_bios_param(struct scsi_device *sdev, struct block_device *bdev,
141 sector_t capacity, int geom[])
142{
143 geom[0] = 255;
144 geom[1] = 63;
145 sector_div(capacity, 255*63);
146 geom[2] = capacity;
147
148 return 0;
149}
150
151/**
152 * ata_cmd_ioctl - Handler for HDIO_DRIVE_CMD ioctl
153 * @scsidev: Device to which we are issuing command
154 * @arg: User provided data for issuing command
155 *
156 * LOCKING:
157 * Defined by the SCSI layer. We don't really care.
158 *
159 * RETURNS:
160 * Zero on success, negative errno on error.
161 */
162
163int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
164{
165 int rc = 0;
166 u8 scsi_cmd[MAX_COMMAND_SIZE];
167 u8 args[4], *argbuf = NULL;
168 int argsize = 0;
169 struct scsi_sense_hdr sshdr;
170 enum dma_data_direction data_dir;
171
172 if (arg == NULL)
173 return -EINVAL;
174
175 if (copy_from_user(args, arg, sizeof(args)))
176 return -EFAULT;
177
178 memset(scsi_cmd, 0, sizeof(scsi_cmd));
179
180 if (args[3]) {
181 argsize = SECTOR_SIZE * args[3];
182 argbuf = kmalloc(argsize, GFP_KERNEL);
183 if (argbuf == NULL) {
184 rc = -ENOMEM;
185 goto error;
186 }
187
188 scsi_cmd[1] = (4 << 1); /* PIO Data-in */
189 scsi_cmd[2] = 0x0e; /* no off.line or cc, read from dev,
190 block count in sector count field */
191 data_dir = DMA_FROM_DEVICE;
192 } else {
193 scsi_cmd[1] = (3 << 1); /* Non-data */
194 /* scsi_cmd[2] is already 0 -- no off.line, cc, or data xfer */
195 data_dir = DMA_NONE;
196 }
197
198 scsi_cmd[0] = ATA_16;
199
200 scsi_cmd[4] = args[2];
201 if (args[0] == WIN_SMART) { /* hack -- ide driver does this too... */
202 scsi_cmd[6] = args[3];
203 scsi_cmd[8] = args[1];
204 scsi_cmd[10] = 0x4f;
205 scsi_cmd[12] = 0xc2;
206 } else {
207 scsi_cmd[6] = args[1];
208 }
209 scsi_cmd[14] = args[0];
210
211 /* Good values for timeout and retries? Values below
212 from scsi_ioctl_send_command() for default case... */
213 if (scsi_execute_req(scsidev, scsi_cmd, data_dir, argbuf, argsize,
214 &sshdr, (10*HZ), 5)) {
215 rc = -EIO;
216 goto error;
217 }
218
219 /* Need code to retrieve data from check condition? */
220
221 if ((argbuf)
222 && copy_to_user(arg + sizeof(args), argbuf, argsize))
223 rc = -EFAULT;
224error:
225 kfree(argbuf);
226 return rc;
227}
228
229/**
230 * ata_task_ioctl - Handler for HDIO_DRIVE_TASK ioctl
231 * @scsidev: Device to which we are issuing command
232 * @arg: User provided data for issuing command
233 *
234 * LOCKING:
235 * Defined by the SCSI layer. We don't really care.
236 *
237 * RETURNS:
238 * Zero on success, negative errno on error.
239 */
240int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg)
241{
242 int rc = 0;
243 u8 scsi_cmd[MAX_COMMAND_SIZE];
244 u8 args[7];
245 struct scsi_sense_hdr sshdr;
246
247 if (arg == NULL)
248 return -EINVAL;
249
250 if (copy_from_user(args, arg, sizeof(args)))
251 return -EFAULT;
252
253 memset(scsi_cmd, 0, sizeof(scsi_cmd));
254 scsi_cmd[0] = ATA_16;
255 scsi_cmd[1] = (3 << 1); /* Non-data */
256 /* scsi_cmd[2] is already 0 -- no off.line, cc, or data xfer */
257 scsi_cmd[4] = args[1];
258 scsi_cmd[6] = args[2];
259 scsi_cmd[8] = args[3];
260 scsi_cmd[10] = args[4];
261 scsi_cmd[12] = args[5];
262 scsi_cmd[14] = args[0];
263
264 /* Good values for timeout and retries? Values below
265 from scsi_ioctl_send_command() for default case... */
266 if (scsi_execute_req(scsidev, scsi_cmd, DMA_NONE, NULL, 0, &sshdr,
267 (10*HZ), 5))
268 rc = -EIO;
269
270 /* Need code to retrieve data from check condition? */
271 return rc;
272}
273
274int ata_scsi_ioctl(struct scsi_device *scsidev, int cmd, void __user *arg)
275{
276 int val = -EINVAL, rc = -EINVAL;
277
278 switch (cmd) {
279 case ATA_IOC_GET_IO32:
280 val = 0;
281 if (copy_to_user(arg, &val, 1))
282 return -EFAULT;
283 return 0;
284
285 case ATA_IOC_SET_IO32:
286 val = (unsigned long) arg;
287 if (val != 0)
288 return -EINVAL;
289 return 0;
290
291 case HDIO_DRIVE_CMD:
292 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
293 return -EACCES;
294 return ata_cmd_ioctl(scsidev, arg);
295
296 case HDIO_DRIVE_TASK:
297 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
298 return -EACCES;
299 return ata_task_ioctl(scsidev, arg);
300
301 default:
302 rc = -ENOTTY;
303 break;
304 }
305
306 return rc;
307}
308
309/**
310 * ata_scsi_qc_new - acquire new ata_queued_cmd reference
311 * @dev: ATA device to which the new command is attached
312 * @cmd: SCSI command that originated this ATA command
313 * @done: SCSI command completion function
314 *
315 * Obtain a reference to an unused ata_queued_cmd structure,
316 * which is the basic libata structure representing a single
317 * ATA command sent to the hardware.
318 *
319 * If a command was available, fill in the SCSI-specific
320 * portions of the structure with information on the
321 * current command.
322 *
323 * LOCKING:
324 * spin_lock_irqsave(host_set lock)
325 *
326 * RETURNS:
327 * Command allocated, or %NULL if none available.
328 */
329struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev,
330 struct scsi_cmnd *cmd,
331 void (*done)(struct scsi_cmnd *))
332{
333 struct ata_queued_cmd *qc;
334
335 qc = ata_qc_new_init(dev);
336 if (qc) {
337 qc->scsicmd = cmd;
338 qc->scsidone = done;
339
340 if (cmd->use_sg) {
341 qc->__sg = (struct scatterlist *) cmd->request_buffer;
342 qc->n_elem = cmd->use_sg;
343 } else {
344 qc->__sg = &qc->sgent;
345 qc->n_elem = 1;
346 }
347 } else {
348 cmd->result = (DID_OK << 16) | (QUEUE_FULL << 1);
349 done(cmd);
350 }
351
352 return qc;
353}
354
355/**
356 * ata_dump_status - user friendly display of error info
357 * @id: id of the port in question
358 * @tf: ptr to filled out taskfile
359 *
360 * Decode and dump the ATA error/status registers for the user so
361 * that they have some idea what really happened at the non
362 * make-believe layer.
363 *
364 * LOCKING:
365 * inherited from caller
366 */
367void ata_dump_status(unsigned id, struct ata_taskfile *tf)
368{
369 u8 stat = tf->command, err = tf->feature;
370
371 printk(KERN_WARNING "ata%u: status=0x%02x { ", id, stat);
372 if (stat & ATA_BUSY) {
373 printk("Busy }\n"); /* Data is not valid in this case */
374 } else {
375 if (stat & 0x40) printk("DriveReady ");
376 if (stat & 0x20) printk("DeviceFault ");
377 if (stat & 0x10) printk("SeekComplete ");
378 if (stat & 0x08) printk("DataRequest ");
379 if (stat & 0x04) printk("CorrectedError ");
380 if (stat & 0x02) printk("Index ");
381 if (stat & 0x01) printk("Error ");
382 printk("}\n");
383
384 if (err) {
385 printk(KERN_WARNING "ata%u: error=0x%02x { ", id, err);
386 if (err & 0x04) printk("DriveStatusError ");
387 if (err & 0x80) {
388 if (err & 0x04) printk("BadCRC ");
389 else printk("Sector ");
390 }
391 if (err & 0x40) printk("UncorrectableError ");
392 if (err & 0x10) printk("SectorIdNotFound ");
393 if (err & 0x02) printk("TrackZeroNotFound ");
394 if (err & 0x01) printk("AddrMarkNotFound ");
395 printk("}\n");
396 }
397 }
398}
399
400/**
401 * ata_scsi_device_suspend - suspend ATA device associated with sdev
402 * @sdev: the SCSI device to suspend
403 * @state: target power management state
404 *
405 * Request suspend EH action on the ATA device associated with
406 * @sdev and wait for the operation to complete.
407 *
408 * LOCKING:
409 * Kernel thread context (may sleep).
410 *
411 * RETURNS:
412 * 0 on success, -errno otherwise.
413 */
414int ata_scsi_device_suspend(struct scsi_device *sdev, pm_message_t state)
415{
416 struct ata_port *ap = ata_shost_to_port(sdev->host);
417 struct ata_device *dev = ata_scsi_find_dev(ap, sdev);
418 unsigned long flags;
419 unsigned int action;
420 int rc = 0;
421
422 if (!dev)
423 goto out;
424
425 spin_lock_irqsave(ap->lock, flags);
426
427 /* wait for the previous resume to complete */
428 while (dev->flags & ATA_DFLAG_SUSPENDED) {
429 spin_unlock_irqrestore(ap->lock, flags);
430 ata_port_wait_eh(ap);
431 spin_lock_irqsave(ap->lock, flags);
432 }
433
434 /* if @sdev is already detached, nothing to do */
435 if (sdev->sdev_state == SDEV_OFFLINE ||
436 sdev->sdev_state == SDEV_CANCEL || sdev->sdev_state == SDEV_DEL)
437 goto out_unlock;
438
439 /* request suspend */
440 action = ATA_EH_SUSPEND;
441 if (state.event != PM_EVENT_SUSPEND)
442 action |= ATA_EH_PM_FREEZE;
443 ap->eh_info.dev_action[dev->devno] |= action;
444 ap->eh_info.flags |= ATA_EHI_QUIET;
445 ata_port_schedule_eh(ap);
446
447 spin_unlock_irqrestore(ap->lock, flags);
448
449 /* wait for EH to do the job */
450 ata_port_wait_eh(ap);
451
452 spin_lock_irqsave(ap->lock, flags);
453
454 /* If @sdev is still attached but the associated ATA device
455 * isn't suspended, the operation failed.
456 */
457 if (sdev->sdev_state != SDEV_OFFLINE &&
458 sdev->sdev_state != SDEV_CANCEL && sdev->sdev_state != SDEV_DEL &&
459 !(dev->flags & ATA_DFLAG_SUSPENDED))
460 rc = -EIO;
461
462 out_unlock:
463 spin_unlock_irqrestore(ap->lock, flags);
464 out:
465 if (rc == 0)
466 sdev->sdev_gendev.power.power_state = state;
467 return rc;
468}
469
470/**
471 * ata_scsi_device_resume - resume ATA device associated with sdev
472 * @sdev: the SCSI device to resume
473 *
474 * Request resume EH action on the ATA device associated with
475 * @sdev and return immediately. This enables parallel
476 * wakeup/spinup of devices.
477 *
478 * LOCKING:
479 * Kernel thread context (may sleep).
480 *
481 * RETURNS:
482 * 0.
483 */
484int ata_scsi_device_resume(struct scsi_device *sdev)
485{
486 struct ata_port *ap = ata_shost_to_port(sdev->host);
487 struct ata_device *dev = ata_scsi_find_dev(ap, sdev);
488 struct ata_eh_info *ehi = &ap->eh_info;
489 unsigned long flags;
490 unsigned int action;
491
492 if (!dev)
493 goto out;
494
495 spin_lock_irqsave(ap->lock, flags);
496
497 /* if @sdev is already detached, nothing to do */
498 if (sdev->sdev_state == SDEV_OFFLINE ||
499 sdev->sdev_state == SDEV_CANCEL || sdev->sdev_state == SDEV_DEL)
500 goto out_unlock;
501
502 /* request resume */
503 action = ATA_EH_RESUME;
504 if (sdev->sdev_gendev.power.power_state.event == PM_EVENT_SUSPEND)
505 __ata_ehi_hotplugged(ehi);
506 else
507 action |= ATA_EH_PM_FREEZE | ATA_EH_SOFTRESET;
508 ehi->dev_action[dev->devno] |= action;
509
510 /* We don't want autopsy and verbose EH messages. Disable
511 * those if we're the only device on this link.
512 */
513 if (ata_port_max_devices(ap) == 1)
514 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
515
516 ata_port_schedule_eh(ap);
517
518 out_unlock:
519 spin_unlock_irqrestore(ap->lock, flags);
520 out:
521 sdev->sdev_gendev.power.power_state = PMSG_ON;
522 return 0;
523}
524
525/**
526 * ata_to_sense_error - convert ATA error to SCSI error
527 * @id: ATA device number
528 * @drv_stat: value contained in ATA status register
529 * @drv_err: value contained in ATA error register
530 * @sk: the sense key we'll fill out
531 * @asc: the additional sense code we'll fill out
532 * @ascq: the additional sense code qualifier we'll fill out
533 * @verbose: be verbose
534 *
535 * Converts an ATA error into a SCSI error. Fill out pointers to
536 * SK, ASC, and ASCQ bytes for later use in fixed or descriptor
537 * format sense blocks.
538 *
539 * LOCKING:
540 * spin_lock_irqsave(host_set lock)
541 */
542void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc,
543 u8 *ascq, int verbose)
544{
545 int i;
546
547 /* Based on the 3ware driver translation table */
548 static const unsigned char sense_table[][4] = {
549 /* BBD|ECC|ID|MAR */
550 {0xd1, ABORTED_COMMAND, 0x00, 0x00}, // Device busy Aborted command
551 /* BBD|ECC|ID */
552 {0xd0, ABORTED_COMMAND, 0x00, 0x00}, // Device busy Aborted command
553 /* ECC|MC|MARK */
554 {0x61, HARDWARE_ERROR, 0x00, 0x00}, // Device fault Hardware error
555 /* ICRC|ABRT */ /* NB: ICRC & !ABRT is BBD */
556 {0x84, ABORTED_COMMAND, 0x47, 0x00}, // Data CRC error SCSI parity error
557 /* MC|ID|ABRT|TRK0|MARK */
558 {0x37, NOT_READY, 0x04, 0x00}, // Unit offline Not ready
559 /* MCR|MARK */
560 {0x09, NOT_READY, 0x04, 0x00}, // Unrecovered disk error Not ready
561 /* Bad address mark */
562 {0x01, MEDIUM_ERROR, 0x13, 0x00}, // Address mark not found Address mark not found for data field
563 /* TRK0 */
564 {0x02, HARDWARE_ERROR, 0x00, 0x00}, // Track 0 not found Hardware error
565 /* Abort & !ICRC */
566 {0x04, ABORTED_COMMAND, 0x00, 0x00}, // Aborted command Aborted command
567 /* Media change request */
568 {0x08, NOT_READY, 0x04, 0x00}, // Media change request FIXME: faking offline
569 /* SRV */
570 {0x10, ABORTED_COMMAND, 0x14, 0x00}, // ID not found Recorded entity not found
571 /* Media change */
572 {0x08, NOT_READY, 0x04, 0x00}, // Media change FIXME: faking offline
573 /* ECC */
574 {0x40, MEDIUM_ERROR, 0x11, 0x04}, // Uncorrectable ECC error Unrecovered read error
575 /* BBD - block marked bad */
576 {0x80, MEDIUM_ERROR, 0x11, 0x04}, // Block marked bad Medium error, unrecovered read error
577 {0xFF, 0xFF, 0xFF, 0xFF}, // END mark
578 };
579 static const unsigned char stat_table[][4] = {
580 /* Must be first because BUSY means no other bits valid */
581 {0x80, ABORTED_COMMAND, 0x47, 0x00}, // Busy, fake parity for now
582 {0x20, HARDWARE_ERROR, 0x00, 0x00}, // Device fault
583 {0x08, ABORTED_COMMAND, 0x47, 0x00}, // Timed out in xfer, fake parity for now
584 {0x04, RECOVERED_ERROR, 0x11, 0x00}, // Recovered ECC error Medium error, recovered
585 {0xFF, 0xFF, 0xFF, 0xFF}, // END mark
586 };
587
588 /*
589 * Is this an error we can process/parse
590 */
591 if (drv_stat & ATA_BUSY) {
592 drv_err = 0; /* Ignore the err bits, they're invalid */
593 }
594
595 if (drv_err) {
596 /* Look for drv_err */
597 for (i = 0; sense_table[i][0] != 0xFF; i++) {
598 /* Look for best matches first */
599 if ((sense_table[i][0] & drv_err) ==
600 sense_table[i][0]) {
601 *sk = sense_table[i][1];
602 *asc = sense_table[i][2];
603 *ascq = sense_table[i][3];
604 goto translate_done;
605 }
606 }
607 /* No immediate match */
608 if (verbose)
609 printk(KERN_WARNING "ata%u: no sense translation for "
610 "error 0x%02x\n", id, drv_err);
611 }
612
613 /* Fall back to interpreting status bits */
614 for (i = 0; stat_table[i][0] != 0xFF; i++) {
615 if (stat_table[i][0] & drv_stat) {
616 *sk = stat_table[i][1];
617 *asc = stat_table[i][2];
618 *ascq = stat_table[i][3];
619 goto translate_done;
620 }
621 }
622 /* No error? Undecoded? */
623 if (verbose)
624 printk(KERN_WARNING "ata%u: no sense translation for "
625 "status: 0x%02x\n", id, drv_stat);
626
627 /* We need a sensible error return here, which is tricky, and one
628 that won't cause people to do things like return a disk wrongly */
629 *sk = ABORTED_COMMAND;
630 *asc = 0x00;
631 *ascq = 0x00;
632
633 translate_done:
634 if (verbose)
635 printk(KERN_ERR "ata%u: translated ATA stat/err 0x%02x/%02x "
636 "to SCSI SK/ASC/ASCQ 0x%x/%02x/%02x\n",
637 id, drv_stat, drv_err, *sk, *asc, *ascq);
638 return;
639}
640
641/*
642 * ata_gen_ata_desc_sense - Generate check condition sense block.
643 * @qc: Command that completed.
644 *
645 * This function is specific to the ATA descriptor format sense
646 * block specified for the ATA pass through commands. Regardless
647 * of whether the command errored or not, return a sense
648 * block. Copy all controller registers into the sense
649 * block. Clear sense key, ASC & ASCQ if there is no error.
650 *
651 * LOCKING:
652 * spin_lock_irqsave(host_set lock)
653 */
654void ata_gen_ata_desc_sense(struct ata_queued_cmd *qc)
655{
656 struct scsi_cmnd *cmd = qc->scsicmd;
657 struct ata_taskfile *tf = &qc->result_tf;
658 unsigned char *sb = cmd->sense_buffer;
659 unsigned char *desc = sb + 8;
660 int verbose = qc->ap->ops->error_handler == NULL;
661
662 memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
663
664 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
665
666 /*
667 * Use ata_to_sense_error() to map status register bits
668 * onto sense key, asc & ascq.
669 */
670 if (qc->err_mask ||
671 tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
672 ata_to_sense_error(qc->ap->id, tf->command, tf->feature,
673 &sb[1], &sb[2], &sb[3], verbose);
674 sb[1] &= 0x0f;
675 }
676
677 /*
678 * Sense data is current and format is descriptor.
679 */
680 sb[0] = 0x72;
681
682 desc[0] = 0x09;
683
684 /*
685 * Set length of additional sense data.
686 * Since we only populate descriptor 0, the total
687 * length is the same (fixed) length as descriptor 0.
688 */
689 desc[1] = sb[7] = 14;
690
691 /*
692 * Copy registers into sense buffer.
693 */
694 desc[2] = 0x00;
695 desc[3] = tf->feature; /* == error reg */
696 desc[5] = tf->nsect;
697 desc[7] = tf->lbal;
698 desc[9] = tf->lbam;
699 desc[11] = tf->lbah;
700 desc[12] = tf->device;
701 desc[13] = tf->command; /* == status reg */
702
703 /*
704 * Fill in Extend bit, and the high order bytes
705 * if applicable.
706 */
707 if (tf->flags & ATA_TFLAG_LBA48) {
708 desc[2] |= 0x01;
709 desc[4] = tf->hob_nsect;
710 desc[6] = tf->hob_lbal;
711 desc[8] = tf->hob_lbam;
712 desc[10] = tf->hob_lbah;
713 }
714}
715
716/**
717 * ata_gen_fixed_sense - generate a SCSI fixed sense block
718 * @qc: Command that we are erroring out
719 *
720 * Leverage ata_to_sense_error() to give us the codes. Fit our
721 * LBA in here if there's room.
722 *
723 * LOCKING:
724 * inherited from caller
725 */
726void ata_gen_fixed_sense(struct ata_queued_cmd *qc)
727{
728 struct scsi_cmnd *cmd = qc->scsicmd;
729 struct ata_taskfile *tf = &qc->result_tf;
730 unsigned char *sb = cmd->sense_buffer;
731 int verbose = qc->ap->ops->error_handler == NULL;
732
733 memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
734
735 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
736
737 /*
738 * Use ata_to_sense_error() to map status register bits
739 * onto sense key, asc & ascq.
740 */
741 if (qc->err_mask ||
742 tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
743 ata_to_sense_error(qc->ap->id, tf->command, tf->feature,
744 &sb[2], &sb[12], &sb[13], verbose);
745 sb[2] &= 0x0f;
746 }
747
748 sb[0] = 0x70;
749 sb[7] = 0x0a;
750
751 if (tf->flags & ATA_TFLAG_LBA48) {
752 /* TODO: find solution for LBA48 descriptors */
753 }
754
755 else if (tf->flags & ATA_TFLAG_LBA) {
756 /* A small (28b) LBA will fit in the 32b info field */
757 sb[0] |= 0x80; /* set valid bit */
758 sb[3] = tf->device & 0x0f;
759 sb[4] = tf->lbah;
760 sb[5] = tf->lbam;
761 sb[6] = tf->lbal;
762 }
763
764 else {
765 /* TODO: C/H/S */
766 }
767}
768
769static void ata_scsi_sdev_config(struct scsi_device *sdev)
770{
771 sdev->use_10_for_rw = 1;
772 sdev->use_10_for_ms = 1;
773}
774
775static void ata_scsi_dev_config(struct scsi_device *sdev,
776 struct ata_device *dev)
777{
778 unsigned int max_sectors;
779
780 /* TODO: 2048 is an arbitrary number, not the
781 * hardware maximum. This should be increased to
782 * 65534 when Jens Axboe's patch for dynamically
783 * determining max_sectors is merged.
784 */
785 max_sectors = ATA_MAX_SECTORS;
786 if (dev->flags & ATA_DFLAG_LBA48)
787 max_sectors = ATA_MAX_SECTORS_LBA48;
788 if (dev->max_sectors)
789 max_sectors = dev->max_sectors;
790
791 blk_queue_max_sectors(sdev->request_queue, max_sectors);
792
793 /*
794 * SATA DMA transfers must be multiples of 4 byte, so
795 * we need to pad ATAPI transfers using an extra sg.
796 * Decrement max hw segments accordingly.
797 */
798 if (dev->class == ATA_DEV_ATAPI) {
799 request_queue_t *q = sdev->request_queue;
800 blk_queue_max_hw_segments(q, q->max_hw_segments - 1);
801 }
802
803 if (dev->flags & ATA_DFLAG_NCQ) {
804 int depth;
805
806 depth = min(sdev->host->can_queue, ata_id_queue_depth(dev->id));
807 depth = min(ATA_MAX_QUEUE - 1, depth);
808 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth);
809 }
810}
811
812/**
813 * ata_scsi_slave_config - Set SCSI device attributes
814 * @sdev: SCSI device to examine
815 *
816 * This is called before we actually start reading
817 * and writing to the device, to configure certain
818 * SCSI mid-layer behaviors.
819 *
820 * LOCKING:
821 * Defined by SCSI layer. We don't really care.
822 */
823
824int ata_scsi_slave_config(struct scsi_device *sdev)
825{
826 struct ata_port *ap = ata_shost_to_port(sdev->host);
827 struct ata_device *dev = __ata_scsi_find_dev(ap, sdev);
828
829 ata_scsi_sdev_config(sdev);
830
831 blk_queue_max_phys_segments(sdev->request_queue, LIBATA_MAX_PRD);
832
833 if (dev)
834 ata_scsi_dev_config(sdev, dev);
835
836 return 0; /* scsi layer doesn't check return value, sigh */
837}
838
839/**
840 * ata_scsi_slave_destroy - SCSI device is about to be destroyed
841 * @sdev: SCSI device to be destroyed
842 *
843 * @sdev is about to be destroyed for hot/warm unplugging. If
844 * this unplugging was initiated by libata as indicated by NULL
845 * dev->sdev, this function doesn't have to do anything.
846 * Otherwise, SCSI layer initiated warm-unplug is in progress.
847 * Clear dev->sdev, schedule the device for ATA detach and invoke
848 * EH.
849 *
850 * LOCKING:
851 * Defined by SCSI layer. We don't really care.
852 */
853void ata_scsi_slave_destroy(struct scsi_device *sdev)
854{
855 struct ata_port *ap = ata_shost_to_port(sdev->host);
856 unsigned long flags;
857 struct ata_device *dev;
858
859 if (!ap->ops->error_handler)
860 return;
861
862 spin_lock_irqsave(ap->lock, flags);
863 dev = __ata_scsi_find_dev(ap, sdev);
864 if (dev && dev->sdev) {
865 /* SCSI device already in CANCEL state, no need to offline it */
866 dev->sdev = NULL;
867 dev->flags |= ATA_DFLAG_DETACH;
868 ata_port_schedule_eh(ap);
869 }
870 spin_unlock_irqrestore(ap->lock, flags);
871}
872
873/**
874 * ata_scsi_change_queue_depth - SCSI callback for queue depth config
875 * @sdev: SCSI device to configure queue depth for
876 * @queue_depth: new queue depth
877 *
878 * This is libata standard hostt->change_queue_depth callback.
879 * SCSI will call into this callback when user tries to set queue
880 * depth via sysfs.
881 *
882 * LOCKING:
883 * SCSI layer (we don't care)
884 *
885 * RETURNS:
886 * Newly configured queue depth.
887 */
888int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth)
889{
890 struct ata_port *ap = ata_shost_to_port(sdev->host);
891 struct ata_device *dev;
892 int max_depth;
893
894 if (queue_depth < 1)
895 return sdev->queue_depth;
896
897 dev = ata_scsi_find_dev(ap, sdev);
898 if (!dev || !ata_dev_enabled(dev))
899 return sdev->queue_depth;
900
901 max_depth = min(sdev->host->can_queue, ata_id_queue_depth(dev->id));
902 max_depth = min(ATA_MAX_QUEUE - 1, max_depth);
903 if (queue_depth > max_depth)
904 queue_depth = max_depth;
905
906 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, queue_depth);
907 return queue_depth;
908}
909
910/**
911 * ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command
912 * @qc: Storage for translated ATA taskfile
913 * @scsicmd: SCSI command to translate
914 *
915 * Sets up an ATA taskfile to issue STANDBY (to stop) or READ VERIFY
916 * (to start). Perhaps these commands should be preceded by
917 * CHECK POWER MODE to see what power mode the device is already in.
918 * [See SAT revision 5 at www.t10.org]
919 *
920 * LOCKING:
921 * spin_lock_irqsave(host_set lock)
922 *
923 * RETURNS:
924 * Zero on success, non-zero on error.
925 */
926
927static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc,
928 const u8 *scsicmd)
929{
930 struct ata_taskfile *tf = &qc->tf;
931
932 tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
933 tf->protocol = ATA_PROT_NODATA;
934 if (scsicmd[1] & 0x1) {
935 ; /* ignore IMMED bit, violates sat-r05 */
936 }
937 if (scsicmd[4] & 0x2)
938 goto invalid_fld; /* LOEJ bit set not supported */
939 if (((scsicmd[4] >> 4) & 0xf) != 0)
940 goto invalid_fld; /* power conditions not supported */
941 if (scsicmd[4] & 0x1) {
942 tf->nsect = 1; /* 1 sector, lba=0 */
943
944 if (qc->dev->flags & ATA_DFLAG_LBA) {
945 tf->flags |= ATA_TFLAG_LBA;
946
947 tf->lbah = 0x0;
948 tf->lbam = 0x0;
949 tf->lbal = 0x0;
950 tf->device |= ATA_LBA;
951 } else {
952 /* CHS */
953 tf->lbal = 0x1; /* sect */
954 tf->lbam = 0x0; /* cyl low */
955 tf->lbah = 0x0; /* cyl high */
956 }
957
958 tf->command = ATA_CMD_VERIFY; /* READ VERIFY */
959 } else {
960 tf->nsect = 0; /* time period value (0 implies now) */
961 tf->command = ATA_CMD_STANDBY;
962 /* Consider: ATA STANDBY IMMEDIATE command */
963 }
964 /*
965 * Standby and Idle condition timers could be implemented but that
966 * would require libata to implement the Power condition mode page
967 * and allow the user to change it. Changing mode pages requires
968 * MODE SELECT to be implemented.
969 */
970
971 return 0;
972
973invalid_fld:
974 ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x24, 0x0);
975 /* "Invalid field in cbd" */
976 return 1;
977}
978
979
980/**
981 * ata_scsi_flush_xlat - Translate SCSI SYNCHRONIZE CACHE command
982 * @qc: Storage for translated ATA taskfile
983 * @scsicmd: SCSI command to translate (ignored)
984 *
985 * Sets up an ATA taskfile to issue FLUSH CACHE or
986 * FLUSH CACHE EXT.
987 *
988 * LOCKING:
989 * spin_lock_irqsave(host_set lock)
990 *
991 * RETURNS:
992 * Zero on success, non-zero on error.
993 */
994
995static unsigned int ata_scsi_flush_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd)
996{
997 struct ata_taskfile *tf = &qc->tf;
998
999 tf->flags |= ATA_TFLAG_DEVICE;
1000 tf->protocol = ATA_PROT_NODATA;
1001
1002 if ((qc->dev->flags & ATA_DFLAG_LBA48) &&
1003 (ata_id_has_flush_ext(qc->dev->id)))
1004 tf->command = ATA_CMD_FLUSH_EXT;
1005 else
1006 tf->command = ATA_CMD_FLUSH;
1007
1008 return 0;
1009}
1010
1011/**
1012 * scsi_6_lba_len - Get LBA and transfer length
1013 * @scsicmd: SCSI command to translate
1014 *
1015 * Calculate LBA and transfer length for 6-byte commands.
1016 *
1017 * RETURNS:
1018 * @plba: the LBA
1019 * @plen: the transfer length
1020 */
1021
1022static void scsi_6_lba_len(const u8 *scsicmd, u64 *plba, u32 *plen)
1023{
1024 u64 lba = 0;
1025 u32 len = 0;
1026
1027 VPRINTK("six-byte command\n");
1028
1029 lba |= ((u64)scsicmd[2]) << 8;
1030 lba |= ((u64)scsicmd[3]);
1031
1032 len |= ((u32)scsicmd[4]);
1033
1034 *plba = lba;
1035 *plen = len;
1036}
1037
1038/**
1039 * scsi_10_lba_len - Get LBA and transfer length
1040 * @scsicmd: SCSI command to translate
1041 *
1042 * Calculate LBA and transfer length for 10-byte commands.
1043 *
1044 * RETURNS:
1045 * @plba: the LBA
1046 * @plen: the transfer length
1047 */
1048
1049static void scsi_10_lba_len(const u8 *scsicmd, u64 *plba, u32 *plen)
1050{
1051 u64 lba = 0;
1052 u32 len = 0;
1053
1054 VPRINTK("ten-byte command\n");
1055
1056 lba |= ((u64)scsicmd[2]) << 24;
1057 lba |= ((u64)scsicmd[3]) << 16;
1058 lba |= ((u64)scsicmd[4]) << 8;
1059 lba |= ((u64)scsicmd[5]);
1060
1061 len |= ((u32)scsicmd[7]) << 8;
1062 len |= ((u32)scsicmd[8]);
1063
1064 *plba = lba;
1065 *plen = len;
1066}
1067
1068/**
1069 * scsi_16_lba_len - Get LBA and transfer length
1070 * @scsicmd: SCSI command to translate
1071 *
1072 * Calculate LBA and transfer length for 16-byte commands.
1073 *
1074 * RETURNS:
1075 * @plba: the LBA
1076 * @plen: the transfer length
1077 */
1078
1079static void scsi_16_lba_len(const u8 *scsicmd, u64 *plba, u32 *plen)
1080{
1081 u64 lba = 0;
1082 u32 len = 0;
1083
1084 VPRINTK("sixteen-byte command\n");
1085
1086 lba |= ((u64)scsicmd[2]) << 56;
1087 lba |= ((u64)scsicmd[3]) << 48;
1088 lba |= ((u64)scsicmd[4]) << 40;
1089 lba |= ((u64)scsicmd[5]) << 32;
1090 lba |= ((u64)scsicmd[6]) << 24;
1091 lba |= ((u64)scsicmd[7]) << 16;
1092 lba |= ((u64)scsicmd[8]) << 8;
1093 lba |= ((u64)scsicmd[9]);
1094
1095 len |= ((u32)scsicmd[10]) << 24;
1096 len |= ((u32)scsicmd[11]) << 16;
1097 len |= ((u32)scsicmd[12]) << 8;
1098 len |= ((u32)scsicmd[13]);
1099
1100 *plba = lba;
1101 *plen = len;
1102}
1103
1104/**
1105 * ata_scsi_verify_xlat - Translate SCSI VERIFY command into an ATA one
1106 * @qc: Storage for translated ATA taskfile
1107 * @scsicmd: SCSI command to translate
1108 *
1109 * Converts SCSI VERIFY command to an ATA READ VERIFY command.
1110 *
1111 * LOCKING:
1112 * spin_lock_irqsave(host_set lock)
1113 *
1114 * RETURNS:
1115 * Zero on success, non-zero on error.
1116 */
1117
1118static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd)
1119{
1120 struct ata_taskfile *tf = &qc->tf;
1121 struct ata_device *dev = qc->dev;
1122 u64 dev_sectors = qc->dev->n_sectors;
1123 u64 block;
1124 u32 n_block;
1125
1126 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1127 tf->protocol = ATA_PROT_NODATA;
1128
1129 if (scsicmd[0] == VERIFY)
1130 scsi_10_lba_len(scsicmd, &block, &n_block);
1131 else if (scsicmd[0] == VERIFY_16)
1132 scsi_16_lba_len(scsicmd, &block, &n_block);
1133 else
1134 goto invalid_fld;
1135
1136 if (!n_block)
1137 goto nothing_to_do;
1138 if (block >= dev_sectors)
1139 goto out_of_range;
1140 if ((block + n_block) > dev_sectors)
1141 goto out_of_range;
1142
1143 if (dev->flags & ATA_DFLAG_LBA) {
1144 tf->flags |= ATA_TFLAG_LBA;
1145
1146 if (lba_28_ok(block, n_block)) {
1147 /* use LBA28 */
1148 tf->command = ATA_CMD_VERIFY;
1149 tf->device |= (block >> 24) & 0xf;
1150 } else if (lba_48_ok(block, n_block)) {
1151 if (!(dev->flags & ATA_DFLAG_LBA48))
1152 goto out_of_range;
1153
1154 /* use LBA48 */
1155 tf->flags |= ATA_TFLAG_LBA48;
1156 tf->command = ATA_CMD_VERIFY_EXT;
1157
1158 tf->hob_nsect = (n_block >> 8) & 0xff;
1159
1160 tf->hob_lbah = (block >> 40) & 0xff;
1161 tf->hob_lbam = (block >> 32) & 0xff;
1162 tf->hob_lbal = (block >> 24) & 0xff;
1163 } else
1164 /* request too large even for LBA48 */
1165 goto out_of_range;
1166
1167 tf->nsect = n_block & 0xff;
1168
1169 tf->lbah = (block >> 16) & 0xff;
1170 tf->lbam = (block >> 8) & 0xff;
1171 tf->lbal = block & 0xff;
1172
1173 tf->device |= ATA_LBA;
1174 } else {
1175 /* CHS */
1176 u32 sect, head, cyl, track;
1177
1178 if (!lba_28_ok(block, n_block))
1179 goto out_of_range;
1180
1181 /* Convert LBA to CHS */
1182 track = (u32)block / dev->sectors;
1183 cyl = track / dev->heads;
1184 head = track % dev->heads;
1185 sect = (u32)block % dev->sectors + 1;
1186
1187 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
1188 (u32)block, track, cyl, head, sect);
1189
1190 /* Check whether the converted CHS can fit.
1191 Cylinder: 0-65535
1192 Head: 0-15
1193 Sector: 1-255*/
1194 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
1195 goto out_of_range;
1196
1197 tf->command = ATA_CMD_VERIFY;
1198 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
1199 tf->lbal = sect;
1200 tf->lbam = cyl;
1201 tf->lbah = cyl >> 8;
1202 tf->device |= head;
1203 }
1204
1205 return 0;
1206
1207invalid_fld:
1208 ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x24, 0x0);
1209 /* "Invalid field in cbd" */
1210 return 1;
1211
1212out_of_range:
1213 ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x21, 0x0);
1214 /* "Logical Block Address out of range" */
1215 return 1;
1216
1217nothing_to_do:
1218 qc->scsicmd->result = SAM_STAT_GOOD;
1219 return 1;
1220}
1221
1222/**
1223 * ata_scsi_rw_xlat - Translate SCSI r/w command into an ATA one
1224 * @qc: Storage for translated ATA taskfile
1225 * @scsicmd: SCSI command to translate
1226 *
1227 * Converts any of six SCSI read/write commands into the
1228 * ATA counterpart, including starting sector (LBA),
1229 * sector count, and taking into account the device's LBA48
1230 * support.
1231 *
1232 * Commands %READ_6, %READ_10, %READ_16, %WRITE_6, %WRITE_10, and
1233 * %WRITE_16 are currently supported.
1234 *
1235 * LOCKING:
1236 * spin_lock_irqsave(host_set lock)
1237 *
1238 * RETURNS:
1239 * Zero on success, non-zero on error.
1240 */
1241
1242static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd)
1243{
1244 struct ata_taskfile *tf = &qc->tf;
1245 struct ata_device *dev = qc->dev;
1246 u64 block;
1247 u32 n_block;
1248
1249 qc->flags |= ATA_QCFLAG_IO;
1250 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1251
1252 if (scsicmd[0] == WRITE_10 || scsicmd[0] == WRITE_6 ||
1253 scsicmd[0] == WRITE_16)
1254 tf->flags |= ATA_TFLAG_WRITE;
1255
1256 /* Calculate the SCSI LBA, transfer length and FUA. */
1257 switch (scsicmd[0]) {
1258 case READ_10:
1259 case WRITE_10:
1260 scsi_10_lba_len(scsicmd, &block, &n_block);
1261 if (unlikely(scsicmd[1] & (1 << 3)))
1262 tf->flags |= ATA_TFLAG_FUA;
1263 break;
1264 case READ_6:
1265 case WRITE_6:
1266 scsi_6_lba_len(scsicmd, &block, &n_block);
1267
1268 /* for 6-byte r/w commands, transfer length 0
1269 * means 256 blocks of data, not 0 block.
1270 */
1271 if (!n_block)
1272 n_block = 256;
1273 break;
1274 case READ_16:
1275 case WRITE_16:
1276 scsi_16_lba_len(scsicmd, &block, &n_block);
1277 if (unlikely(scsicmd[1] & (1 << 3)))
1278 tf->flags |= ATA_TFLAG_FUA;
1279 break;
1280 default:
1281 DPRINTK("no-byte command\n");
1282 goto invalid_fld;
1283 }
1284
1285 /* Check and compose ATA command */
1286 if (!n_block)
1287 /* For 10-byte and 16-byte SCSI R/W commands, transfer
1288 * length 0 means transfer 0 block of data.
1289 * However, for ATA R/W commands, sector count 0 means
1290 * 256 or 65536 sectors, not 0 sectors as in SCSI.
1291 *
1292 * WARNING: one or two older ATA drives treat 0 as 0...
1293 */
1294 goto nothing_to_do;
1295
1296 if ((dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ) {
1297 /* yay, NCQ */
1298 if (!lba_48_ok(block, n_block))
1299 goto out_of_range;
1300
1301 tf->protocol = ATA_PROT_NCQ;
1302 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
1303
1304 if (tf->flags & ATA_TFLAG_WRITE)
1305 tf->command = ATA_CMD_FPDMA_WRITE;
1306 else
1307 tf->command = ATA_CMD_FPDMA_READ;
1308
1309 qc->nsect = n_block;
1310
1311 tf->nsect = qc->tag << 3;
1312 tf->hob_feature = (n_block >> 8) & 0xff;
1313 tf->feature = n_block & 0xff;
1314
1315 tf->hob_lbah = (block >> 40) & 0xff;
1316 tf->hob_lbam = (block >> 32) & 0xff;
1317 tf->hob_lbal = (block >> 24) & 0xff;
1318 tf->lbah = (block >> 16) & 0xff;
1319 tf->lbam = (block >> 8) & 0xff;
1320 tf->lbal = block & 0xff;
1321
1322 tf->device = 1 << 6;
1323 if (tf->flags & ATA_TFLAG_FUA)
1324 tf->device |= 1 << 7;
1325 } else if (dev->flags & ATA_DFLAG_LBA) {
1326 tf->flags |= ATA_TFLAG_LBA;
1327
1328 if (lba_28_ok(block, n_block)) {
1329 /* use LBA28 */
1330 tf->device |= (block >> 24) & 0xf;
1331 } else if (lba_48_ok(block, n_block)) {
1332 if (!(dev->flags & ATA_DFLAG_LBA48))
1333 goto out_of_range;
1334
1335 /* use LBA48 */
1336 tf->flags |= ATA_TFLAG_LBA48;
1337
1338 tf->hob_nsect = (n_block >> 8) & 0xff;
1339
1340 tf->hob_lbah = (block >> 40) & 0xff;
1341 tf->hob_lbam = (block >> 32) & 0xff;
1342 tf->hob_lbal = (block >> 24) & 0xff;
1343 } else
1344 /* request too large even for LBA48 */
1345 goto out_of_range;
1346
1347 if (unlikely(ata_rwcmd_protocol(qc) < 0))
1348 goto invalid_fld;
1349
1350 qc->nsect = n_block;
1351 tf->nsect = n_block & 0xff;
1352
1353 tf->lbah = (block >> 16) & 0xff;
1354 tf->lbam = (block >> 8) & 0xff;
1355 tf->lbal = block & 0xff;
1356
1357 tf->device |= ATA_LBA;
1358 } else {
1359 /* CHS */
1360 u32 sect, head, cyl, track;
1361
1362 /* The request -may- be too large for CHS addressing. */
1363 if (!lba_28_ok(block, n_block))
1364 goto out_of_range;
1365
1366 if (unlikely(ata_rwcmd_protocol(qc) < 0))
1367 goto invalid_fld;
1368
1369 /* Convert LBA to CHS */
1370 track = (u32)block / dev->sectors;
1371 cyl = track / dev->heads;
1372 head = track % dev->heads;
1373 sect = (u32)block % dev->sectors + 1;
1374
1375 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
1376 (u32)block, track, cyl, head, sect);
1377
1378 /* Check whether the converted CHS can fit.
1379 Cylinder: 0-65535
1380 Head: 0-15
1381 Sector: 1-255*/
1382 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
1383 goto out_of_range;
1384
1385 qc->nsect = n_block;
1386 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
1387 tf->lbal = sect;
1388 tf->lbam = cyl;
1389 tf->lbah = cyl >> 8;
1390 tf->device |= head;
1391 }
1392
1393 return 0;
1394
1395invalid_fld:
1396 ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x24, 0x0);
1397 /* "Invalid field in cbd" */
1398 return 1;
1399
1400out_of_range:
1401 ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x21, 0x0);
1402 /* "Logical Block Address out of range" */
1403 return 1;
1404
1405nothing_to_do:
1406 qc->scsicmd->result = SAM_STAT_GOOD;
1407 return 1;
1408}
1409
1410static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
1411{
1412 struct scsi_cmnd *cmd = qc->scsicmd;
1413 u8 *cdb = cmd->cmnd;
1414 int need_sense = (qc->err_mask != 0);
1415
1416 /* We snoop the SET_FEATURES - Write Cache ON/OFF command, and
1417 * schedule EH_REVALIDATE operation to update the IDENTIFY DEVICE
1418 * cache
1419 */
1420 if (!need_sense && (qc->tf.command == ATA_CMD_SET_FEATURES) &&
1421 ((qc->tf.feature == SETFEATURES_WC_ON) ||
1422 (qc->tf.feature == SETFEATURES_WC_OFF))) {
1423 qc->ap->eh_info.action |= ATA_EH_REVALIDATE;
1424 ata_port_schedule_eh(qc->ap);
1425 }
1426
1427 /* For ATA pass thru (SAT) commands, generate a sense block if
1428 * user mandated it or if there's an error. Note that if we
1429 * generate because the user forced us to, a check condition
1430 * is generated and the ATA register values are returned
1431 * whether the command completed successfully or not. If there
1432 * was no error, SK, ASC and ASCQ will all be zero.
1433 */
1434 if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) &&
1435 ((cdb[2] & 0x20) || need_sense)) {
1436 ata_gen_ata_desc_sense(qc);
1437 } else {
1438 if (!need_sense) {
1439 cmd->result = SAM_STAT_GOOD;
1440 } else {
1441 /* TODO: decide which descriptor format to use
1442 * for 48b LBA devices and call that here
1443 * instead of the fixed desc, which is only
1444 * good for smaller LBA (and maybe CHS?)
1445 * devices.
1446 */
1447 ata_gen_fixed_sense(qc);
1448 }
1449 }
1450
1451 if (need_sense && !qc->ap->ops->error_handler)
1452 ata_dump_status(qc->ap->id, &qc->result_tf);
1453
1454 qc->scsidone(cmd);
1455
1456 ata_qc_free(qc);
1457}
1458
1459/**
1460 * ata_scmd_need_defer - Check whether we need to defer scmd
1461 * @dev: ATA device to which the command is addressed
1462 * @is_io: Is the command IO (and thus possibly NCQ)?
1463 *
1464 * NCQ and non-NCQ commands cannot run together. As upper layer
1465 * only knows the queue depth, we are responsible for maintaining
1466 * exclusion. This function checks whether a new command can be
1467 * issued to @dev.
1468 *
1469 * LOCKING:
1470 * spin_lock_irqsave(host_set lock)
1471 *
1472 * RETURNS:
1473 * 1 if deferring is needed, 0 otherwise.
1474 */
1475static int ata_scmd_need_defer(struct ata_device *dev, int is_io)
1476{
1477 struct ata_port *ap = dev->ap;
1478
1479 if (!(dev->flags & ATA_DFLAG_NCQ))
1480 return 0;
1481
1482 if (is_io) {
1483 if (!ata_tag_valid(ap->active_tag))
1484 return 0;
1485 } else {
1486 if (!ata_tag_valid(ap->active_tag) && !ap->sactive)
1487 return 0;
1488 }
1489 return 1;
1490}
1491
1492/**
1493 * ata_scsi_translate - Translate then issue SCSI command to ATA device
1494 * @dev: ATA device to which the command is addressed
1495 * @cmd: SCSI command to execute
1496 * @done: SCSI command completion function
1497 * @xlat_func: Actor which translates @cmd to an ATA taskfile
1498 *
1499 * Our ->queuecommand() function has decided that the SCSI
1500 * command issued can be directly translated into an ATA
1501 * command, rather than handled internally.
1502 *
1503 * This function sets up an ata_queued_cmd structure for the
1504 * SCSI command, and sends that ata_queued_cmd to the hardware.
1505 *
1506 * The xlat_func argument (actor) returns 0 if ready to execute
1507 * ATA command, else 1 to finish translation. If 1 is returned
1508 * then cmd->result (and possibly cmd->sense_buffer) are assumed
1509 * to be set reflecting an error condition or clean (early)
1510 * termination.
1511 *
1512 * LOCKING:
1513 * spin_lock_irqsave(host_set lock)
1514 *
1515 * RETURNS:
1516 * 0 on success, SCSI_ML_QUEUE_DEVICE_BUSY if the command
1517 * needs to be deferred.
1518 */
1519static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd,
1520 void (*done)(struct scsi_cmnd *),
1521 ata_xlat_func_t xlat_func)
1522{
1523 struct ata_queued_cmd *qc;
1524 u8 *scsicmd = cmd->cmnd;
1525 int is_io = xlat_func == ata_scsi_rw_xlat;
1526
1527 VPRINTK("ENTER\n");
1528
1529 if (unlikely(ata_scmd_need_defer(dev, is_io)))
1530 goto defer;
1531
1532 qc = ata_scsi_qc_new(dev, cmd, done);
1533 if (!qc)
1534 goto err_mem;
1535
1536 /* data is present; dma-map it */
1537 if (cmd->sc_data_direction == DMA_FROM_DEVICE ||
1538 cmd->sc_data_direction == DMA_TO_DEVICE) {
1539 if (unlikely(cmd->request_bufflen < 1)) {
1540 ata_dev_printk(dev, KERN_WARNING,
1541 "WARNING: zero len r/w req\n");
1542 goto err_did;
1543 }
1544
1545 if (cmd->use_sg)
1546 ata_sg_init(qc, cmd->request_buffer, cmd->use_sg);
1547 else
1548 ata_sg_init_one(qc, cmd->request_buffer,
1549 cmd->request_bufflen);
1550
1551 qc->dma_dir = cmd->sc_data_direction;
1552 }
1553
1554 qc->complete_fn = ata_scsi_qc_complete;
1555
1556 if (xlat_func(qc, scsicmd))
1557 goto early_finish;
1558
1559 /* select device, send command to hardware */
1560 ata_qc_issue(qc);
1561
1562 VPRINTK("EXIT\n");
1563 return 0;
1564
1565early_finish:
1566 ata_qc_free(qc);
1567 done(cmd);
1568 DPRINTK("EXIT - early finish (good or error)\n");
1569 return 0;
1570
1571err_did:
1572 ata_qc_free(qc);
1573err_mem:
1574 cmd->result = (DID_ERROR << 16);
1575 done(cmd);
1576 DPRINTK("EXIT - internal\n");
1577 return 0;
1578
1579defer:
1580 DPRINTK("EXIT - defer\n");
1581 return SCSI_MLQUEUE_DEVICE_BUSY;
1582}
1583
1584/**
1585 * ata_scsi_rbuf_get - Map response buffer.
1586 * @cmd: SCSI command containing buffer to be mapped.
1587 * @buf_out: Pointer to mapped area.
1588 *
1589 * Maps buffer contained within SCSI command @cmd.
1590 *
1591 * LOCKING:
1592 * spin_lock_irqsave(host_set lock)
1593 *
1594 * RETURNS:
1595 * Length of response buffer.
1596 */
1597
1598static unsigned int ata_scsi_rbuf_get(struct scsi_cmnd *cmd, u8 **buf_out)
1599{
1600 u8 *buf;
1601 unsigned int buflen;
1602
1603 if (cmd->use_sg) {
1604 struct scatterlist *sg;
1605
1606 sg = (struct scatterlist *) cmd->request_buffer;
1607 buf = kmap_atomic(sg->page, KM_USER0) + sg->offset;
1608 buflen = sg->length;
1609 } else {
1610 buf = cmd->request_buffer;
1611 buflen = cmd->request_bufflen;
1612 }
1613
1614 *buf_out = buf;
1615 return buflen;
1616}
1617
1618/**
1619 * ata_scsi_rbuf_put - Unmap response buffer.
1620 * @cmd: SCSI command containing buffer to be unmapped.
1621 * @buf: buffer to unmap
1622 *
1623 * Unmaps response buffer contained within @cmd.
1624 *
1625 * LOCKING:
1626 * spin_lock_irqsave(host_set lock)
1627 */
1628
1629static inline void ata_scsi_rbuf_put(struct scsi_cmnd *cmd, u8 *buf)
1630{
1631 if (cmd->use_sg) {
1632 struct scatterlist *sg;
1633
1634 sg = (struct scatterlist *) cmd->request_buffer;
1635 kunmap_atomic(buf - sg->offset, KM_USER0);
1636 }
1637}
1638
1639/**
1640 * ata_scsi_rbuf_fill - wrapper for SCSI command simulators
1641 * @args: device IDENTIFY data / SCSI command of interest.
1642 * @actor: Callback hook for desired SCSI command simulator
1643 *
1644 * Takes care of the hard work of simulating a SCSI command...
1645 * Mapping the response buffer, calling the command's handler,
1646 * and handling the handler's return value. This return value
1647 * indicates whether the handler wishes the SCSI command to be
1648 * completed successfully (0), or not (in which case cmd->result
1649 * and sense buffer are assumed to be set).
1650 *
1651 * LOCKING:
1652 * spin_lock_irqsave(host_set lock)
1653 */
1654
1655void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
1656 unsigned int (*actor) (struct ata_scsi_args *args,
1657 u8 *rbuf, unsigned int buflen))
1658{
1659 u8 *rbuf;
1660 unsigned int buflen, rc;
1661 struct scsi_cmnd *cmd = args->cmd;
1662
1663 buflen = ata_scsi_rbuf_get(cmd, &rbuf);
1664 memset(rbuf, 0, buflen);
1665 rc = actor(args, rbuf, buflen);
1666 ata_scsi_rbuf_put(cmd, rbuf);
1667
1668 if (rc == 0)
1669 cmd->result = SAM_STAT_GOOD;
1670 args->done(cmd);
1671}
1672
1673/**
1674 * ata_scsiop_inq_std - Simulate INQUIRY command
1675 * @args: device IDENTIFY data / SCSI command of interest.
1676 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
1677 * @buflen: Response buffer length.
1678 *
1679 * Returns standard device identification data associated
1680 * with non-VPD INQUIRY command output.
1681 *
1682 * LOCKING:
1683 * spin_lock_irqsave(host_set lock)
1684 */
1685
1686unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf,
1687 unsigned int buflen)
1688{
1689 u8 hdr[] = {
1690 TYPE_DISK,
1691 0,
1692 0x5, /* claim SPC-3 version compatibility */
1693 2,
1694 95 - 4
1695 };
1696
1697 /* set scsi removeable (RMB) bit per ata bit */
1698 if (ata_id_removeable(args->id))
1699 hdr[1] |= (1 << 7);
1700
1701 VPRINTK("ENTER\n");
1702
1703 memcpy(rbuf, hdr, sizeof(hdr));
1704
1705 if (buflen > 35) {
1706 memcpy(&rbuf[8], "ATA ", 8);
1707 ata_id_string(args->id, &rbuf[16], ATA_ID_PROD_OFS, 16);
1708 ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV_OFS, 4);
1709 if (rbuf[32] == 0 || rbuf[32] == ' ')
1710 memcpy(&rbuf[32], "n/a ", 4);
1711 }
1712
1713 if (buflen > 63) {
1714 const u8 versions[] = {
1715 0x60, /* SAM-3 (no version claimed) */
1716
1717 0x03,
1718 0x20, /* SBC-2 (no version claimed) */
1719
1720 0x02,
1721 0x60 /* SPC-3 (no version claimed) */
1722 };
1723
1724 memcpy(rbuf + 59, versions, sizeof(versions));
1725 }
1726
1727 return 0;
1728}
1729
1730/**
1731 * ata_scsiop_inq_00 - Simulate INQUIRY VPD page 0, list of pages
1732 * @args: device IDENTIFY data / SCSI command of interest.
1733 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
1734 * @buflen: Response buffer length.
1735 *
1736 * Returns list of inquiry VPD pages available.
1737 *
1738 * LOCKING:
1739 * spin_lock_irqsave(host_set lock)
1740 */
1741
1742unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf,
1743 unsigned int buflen)
1744{
1745 const u8 pages[] = {
1746 0x00, /* page 0x00, this page */
1747 0x80, /* page 0x80, unit serial no page */
1748 0x83 /* page 0x83, device ident page */
1749 };
1750 rbuf[3] = sizeof(pages); /* number of supported VPD pages */
1751
1752 if (buflen > 6)
1753 memcpy(rbuf + 4, pages, sizeof(pages));
1754
1755 return 0;
1756}
1757
1758/**
1759 * ata_scsiop_inq_80 - Simulate INQUIRY VPD page 80, device serial number
1760 * @args: device IDENTIFY data / SCSI command of interest.
1761 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
1762 * @buflen: Response buffer length.
1763 *
1764 * Returns ATA device serial number.
1765 *
1766 * LOCKING:
1767 * spin_lock_irqsave(host_set lock)
1768 */
1769
1770unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf,
1771 unsigned int buflen)
1772{
1773 const u8 hdr[] = {
1774 0,
1775 0x80, /* this page code */
1776 0,
1777 ATA_SERNO_LEN, /* page len */
1778 };
1779 memcpy(rbuf, hdr, sizeof(hdr));
1780
1781 if (buflen > (ATA_SERNO_LEN + 4 - 1))
1782 ata_id_string(args->id, (unsigned char *) &rbuf[4],
1783 ATA_ID_SERNO_OFS, ATA_SERNO_LEN);
1784
1785 return 0;
1786}
1787
1788/**
1789 * ata_scsiop_inq_83 - Simulate INQUIRY VPD page 83, device identity
1790 * @args: device IDENTIFY data / SCSI command of interest.
1791 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
1792 * @buflen: Response buffer length.
1793 *
1794 * Yields two logical unit device identification designators:
1795 * - vendor specific ASCII containing the ATA serial number
1796 * - SAT defined "t10 vendor id based" containing ASCII vendor
1797 * name ("ATA "), model and serial numbers.
1798 *
1799 * LOCKING:
1800 * spin_lock_irqsave(host_set lock)
1801 */
1802
1803unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf,
1804 unsigned int buflen)
1805{
1806 int num;
1807 const int sat_model_serial_desc_len = 68;
1808 const int ata_model_byte_len = 40;
1809
1810 rbuf[1] = 0x83; /* this page code */
1811 num = 4;
1812
1813 if (buflen > (ATA_SERNO_LEN + num + 3)) {
1814 /* piv=0, assoc=lu, code_set=ACSII, designator=vendor */
1815 rbuf[num + 0] = 2;
1816 rbuf[num + 3] = ATA_SERNO_LEN;
1817 num += 4;
1818 ata_id_string(args->id, (unsigned char *) rbuf + num,
1819 ATA_ID_SERNO_OFS, ATA_SERNO_LEN);
1820 num += ATA_SERNO_LEN;
1821 }
1822 if (buflen > (sat_model_serial_desc_len + num + 3)) {
1823 /* SAT defined lu model and serial numbers descriptor */
1824 /* piv=0, assoc=lu, code_set=ACSII, designator=t10 vendor id */
1825 rbuf[num + 0] = 2;
1826 rbuf[num + 1] = 1;
1827 rbuf[num + 3] = sat_model_serial_desc_len;
1828 num += 4;
1829 memcpy(rbuf + num, "ATA ", 8);
1830 num += 8;
1831 ata_id_string(args->id, (unsigned char *) rbuf + num,
1832 ATA_ID_PROD_OFS, ata_model_byte_len);
1833 num += ata_model_byte_len;
1834 ata_id_string(args->id, (unsigned char *) rbuf + num,
1835 ATA_ID_SERNO_OFS, ATA_SERNO_LEN);
1836 num += ATA_SERNO_LEN;
1837 }
1838 rbuf[3] = num - 4; /* page len (assume less than 256 bytes) */
1839 return 0;
1840}
1841
1842/**
1843 * ata_scsiop_noop - Command handler that simply returns success.
1844 * @args: device IDENTIFY data / SCSI command of interest.
1845 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
1846 * @buflen: Response buffer length.
1847 *
1848 * No operation. Simply returns success to caller, to indicate
1849 * that the caller should successfully complete this SCSI command.
1850 *
1851 * LOCKING:
1852 * spin_lock_irqsave(host_set lock)
1853 */
1854
1855unsigned int ata_scsiop_noop(struct ata_scsi_args *args, u8 *rbuf,
1856 unsigned int buflen)
1857{
1858 VPRINTK("ENTER\n");
1859 return 0;
1860}
1861
1862/**
1863 * ata_msense_push - Push data onto MODE SENSE data output buffer
1864 * @ptr_io: (input/output) Location to store more output data
1865 * @last: End of output data buffer
1866 * @buf: Pointer to BLOB being added to output buffer
1867 * @buflen: Length of BLOB
1868 *
1869 * Store MODE SENSE data on an output buffer.
1870 *
1871 * LOCKING:
1872 * None.
1873 */
1874
1875static void ata_msense_push(u8 **ptr_io, const u8 *last,
1876 const u8 *buf, unsigned int buflen)
1877{
1878 u8 *ptr = *ptr_io;
1879
1880 if ((ptr + buflen - 1) > last)
1881 return;
1882
1883 memcpy(ptr, buf, buflen);
1884
1885 ptr += buflen;
1886
1887 *ptr_io = ptr;
1888}
1889
1890/**
1891 * ata_msense_caching - Simulate MODE SENSE caching info page
1892 * @id: device IDENTIFY data
1893 * @ptr_io: (input/output) Location to store more output data
1894 * @last: End of output data buffer
1895 *
1896 * Generate a caching info page, which conditionally indicates
1897 * write caching to the SCSI layer, depending on device
1898 * capabilities.
1899 *
1900 * LOCKING:
1901 * None.
1902 */
1903
1904static unsigned int ata_msense_caching(u16 *id, u8 **ptr_io,
1905 const u8 *last)
1906{
1907 u8 page[CACHE_MPAGE_LEN];
1908
1909 memcpy(page, def_cache_mpage, sizeof(page));
1910 if (ata_id_wcache_enabled(id))
1911 page[2] |= (1 << 2); /* write cache enable */
1912 if (!ata_id_rahead_enabled(id))
1913 page[12] |= (1 << 5); /* disable read ahead */
1914
1915 ata_msense_push(ptr_io, last, page, sizeof(page));
1916 return sizeof(page);
1917}
1918
1919/**
1920 * ata_msense_ctl_mode - Simulate MODE SENSE control mode page
1921 * @dev: Device associated with this MODE SENSE command
1922 * @ptr_io: (input/output) Location to store more output data
1923 * @last: End of output data buffer
1924 *
1925 * Generate a generic MODE SENSE control mode page.
1926 *
1927 * LOCKING:
1928 * None.
1929 */
1930
1931static unsigned int ata_msense_ctl_mode(u8 **ptr_io, const u8 *last)
1932{
1933 ata_msense_push(ptr_io, last, def_control_mpage,
1934 sizeof(def_control_mpage));
1935 return sizeof(def_control_mpage);
1936}
1937
1938/**
1939 * ata_msense_rw_recovery - Simulate MODE SENSE r/w error recovery page
1940 * @dev: Device associated with this MODE SENSE command
1941 * @ptr_io: (input/output) Location to store more output data
1942 * @last: End of output data buffer
1943 *
1944 * Generate a generic MODE SENSE r/w error recovery page.
1945 *
1946 * LOCKING:
1947 * None.
1948 */
1949
1950static unsigned int ata_msense_rw_recovery(u8 **ptr_io, const u8 *last)
1951{
1952
1953 ata_msense_push(ptr_io, last, def_rw_recovery_mpage,
1954 sizeof(def_rw_recovery_mpage));
1955 return sizeof(def_rw_recovery_mpage);
1956}
1957
1958/*
1959 * We can turn this into a real blacklist if it's needed, for now just
1960 * blacklist any Maxtor BANC1G10 revision firmware
1961 */
1962static int ata_dev_supports_fua(u16 *id)
1963{
1964 unsigned char model[41], fw[9];
1965
1966 if (!libata_fua)
1967 return 0;
1968 if (!ata_id_has_fua(id))
1969 return 0;
1970
1971 ata_id_c_string(id, model, ATA_ID_PROD_OFS, sizeof(model));
1972 ata_id_c_string(id, fw, ATA_ID_FW_REV_OFS, sizeof(fw));
1973
1974 if (strcmp(model, "Maxtor"))
1975 return 1;
1976 if (strcmp(fw, "BANC1G10"))
1977 return 1;
1978
1979 return 0; /* blacklisted */
1980}
1981
1982/**
1983 * ata_scsiop_mode_sense - Simulate MODE SENSE 6, 10 commands
1984 * @args: device IDENTIFY data / SCSI command of interest.
1985 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
1986 * @buflen: Response buffer length.
1987 *
1988 * Simulate MODE SENSE commands. Assume this is invoked for direct
1989 * access devices (e.g. disks) only. There should be no block
1990 * descriptor for other device types.
1991 *
1992 * LOCKING:
1993 * spin_lock_irqsave(host_set lock)
1994 */
1995
1996unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
1997 unsigned int buflen)
1998{
1999 struct ata_device *dev = args->dev;
2000 u8 *scsicmd = args->cmd->cmnd, *p, *last;
2001 const u8 sat_blk_desc[] = {
2002 0, 0, 0, 0, /* number of blocks: sat unspecified */
2003 0,
2004 0, 0x2, 0x0 /* block length: 512 bytes */
2005 };
2006 u8 pg, spg;
2007 unsigned int ebd, page_control, six_byte, output_len, alloc_len, minlen;
2008 u8 dpofua;
2009
2010 VPRINTK("ENTER\n");
2011
2012 six_byte = (scsicmd[0] == MODE_SENSE);
2013 ebd = !(scsicmd[1] & 0x8); /* dbd bit inverted == edb */
2014 /*
2015 * LLBA bit in msense(10) ignored (compliant)
2016 */
2017
2018 page_control = scsicmd[2] >> 6;
2019 switch (page_control) {
2020 case 0: /* current */
2021 break; /* supported */
2022 case 3: /* saved */
2023 goto saving_not_supp;
2024 case 1: /* changeable */
2025 case 2: /* defaults */
2026 default:
2027 goto invalid_fld;
2028 }
2029
2030 if (six_byte) {
2031 output_len = 4 + (ebd ? 8 : 0);
2032 alloc_len = scsicmd[4];
2033 } else {
2034 output_len = 8 + (ebd ? 8 : 0);
2035 alloc_len = (scsicmd[7] << 8) + scsicmd[8];
2036 }
2037 minlen = (alloc_len < buflen) ? alloc_len : buflen;
2038
2039 p = rbuf + output_len;
2040 last = rbuf + minlen - 1;
2041
2042 pg = scsicmd[2] & 0x3f;
2043 spg = scsicmd[3];
2044 /*
2045 * No mode subpages supported (yet) but asking for _all_
2046 * subpages may be valid
2047 */
2048 if (spg && (spg != ALL_SUB_MPAGES))
2049 goto invalid_fld;
2050
2051 switch(pg) {
2052 case RW_RECOVERY_MPAGE:
2053 output_len += ata_msense_rw_recovery(&p, last);
2054 break;
2055
2056 case CACHE_MPAGE:
2057 output_len += ata_msense_caching(args->id, &p, last);
2058 break;
2059
2060 case CONTROL_MPAGE: {
2061 output_len += ata_msense_ctl_mode(&p, last);
2062 break;
2063 }
2064
2065 case ALL_MPAGES:
2066 output_len += ata_msense_rw_recovery(&p, last);
2067 output_len += ata_msense_caching(args->id, &p, last);
2068 output_len += ata_msense_ctl_mode(&p, last);
2069 break;
2070
2071 default: /* invalid page code */
2072 goto invalid_fld;
2073 }
2074
2075 if (minlen < 1)
2076 return 0;
2077
2078 dpofua = 0;
2079 if (ata_dev_supports_fua(args->id) && (dev->flags & ATA_DFLAG_LBA48) &&
2080 (!(dev->flags & ATA_DFLAG_PIO) || dev->multi_count))
2081 dpofua = 1 << 4;
2082
2083 if (six_byte) {
2084 output_len--;
2085 rbuf[0] = output_len;
2086 if (minlen > 2)
2087 rbuf[2] |= dpofua;
2088 if (ebd) {
2089 if (minlen > 3)
2090 rbuf[3] = sizeof(sat_blk_desc);
2091 if (minlen > 11)
2092 memcpy(rbuf + 4, sat_blk_desc,
2093 sizeof(sat_blk_desc));
2094 }
2095 } else {
2096 output_len -= 2;
2097 rbuf[0] = output_len >> 8;
2098 if (minlen > 1)
2099 rbuf[1] = output_len;
2100 if (minlen > 3)
2101 rbuf[3] |= dpofua;
2102 if (ebd) {
2103 if (minlen > 7)
2104 rbuf[7] = sizeof(sat_blk_desc);
2105 if (minlen > 15)
2106 memcpy(rbuf + 8, sat_blk_desc,
2107 sizeof(sat_blk_desc));
2108 }
2109 }
2110 return 0;
2111
2112invalid_fld:
2113 ata_scsi_set_sense(args->cmd, ILLEGAL_REQUEST, 0x24, 0x0);
2114 /* "Invalid field in cbd" */
2115 return 1;
2116
2117saving_not_supp:
2118 ata_scsi_set_sense(args->cmd, ILLEGAL_REQUEST, 0x39, 0x0);
2119 /* "Saving parameters not supported" */
2120 return 1;
2121}
2122
2123/**
2124 * ata_scsiop_read_cap - Simulate READ CAPACITY[ 16] commands
2125 * @args: device IDENTIFY data / SCSI command of interest.
2126 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
2127 * @buflen: Response buffer length.
2128 *
2129 * Simulate READ CAPACITY commands.
2130 *
2131 * LOCKING:
2132 * spin_lock_irqsave(host_set lock)
2133 */
2134
2135unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf,
2136 unsigned int buflen)
2137{
2138 u64 n_sectors;
2139 u32 tmp;
2140
2141 VPRINTK("ENTER\n");
2142
2143 if (ata_id_has_lba(args->id)) {
2144 if (ata_id_has_lba48(args->id))
2145 n_sectors = ata_id_u64(args->id, 100);
2146 else
2147 n_sectors = ata_id_u32(args->id, 60);
2148 } else {
2149 /* CHS default translation */
2150 n_sectors = args->id[1] * args->id[3] * args->id[6];
2151
2152 if (ata_id_current_chs_valid(args->id))
2153 /* CHS current translation */
2154 n_sectors = ata_id_u32(args->id, 57);
2155 }
2156
2157 n_sectors--; /* ATA TotalUserSectors - 1 */
2158
2159 if (args->cmd->cmnd[0] == READ_CAPACITY) {
2160 if( n_sectors >= 0xffffffffULL )
2161 tmp = 0xffffffff ; /* Return max count on overflow */
2162 else
2163 tmp = n_sectors ;
2164
2165 /* sector count, 32-bit */
2166 rbuf[0] = tmp >> (8 * 3);
2167 rbuf[1] = tmp >> (8 * 2);
2168 rbuf[2] = tmp >> (8 * 1);
2169 rbuf[3] = tmp;
2170
2171 /* sector size */
2172 tmp = ATA_SECT_SIZE;
2173 rbuf[6] = tmp >> 8;
2174 rbuf[7] = tmp;
2175
2176 } else {
2177 /* sector count, 64-bit */
2178 tmp = n_sectors >> (8 * 4);
2179 rbuf[2] = tmp >> (8 * 3);
2180 rbuf[3] = tmp >> (8 * 2);
2181 rbuf[4] = tmp >> (8 * 1);
2182 rbuf[5] = tmp;
2183 tmp = n_sectors;
2184 rbuf[6] = tmp >> (8 * 3);
2185 rbuf[7] = tmp >> (8 * 2);
2186 rbuf[8] = tmp >> (8 * 1);
2187 rbuf[9] = tmp;
2188
2189 /* sector size */
2190 tmp = ATA_SECT_SIZE;
2191 rbuf[12] = tmp >> 8;
2192 rbuf[13] = tmp;
2193 }
2194
2195 return 0;
2196}
2197
2198/**
2199 * ata_scsiop_report_luns - Simulate REPORT LUNS command
2200 * @args: device IDENTIFY data / SCSI command of interest.
2201 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
2202 * @buflen: Response buffer length.
2203 *
2204 * Simulate REPORT LUNS command.
2205 *
2206 * LOCKING:
2207 * spin_lock_irqsave(host_set lock)
2208 */
2209
2210unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf,
2211 unsigned int buflen)
2212{
2213 VPRINTK("ENTER\n");
2214 rbuf[3] = 8; /* just one lun, LUN 0, size 8 bytes */
2215
2216 return 0;
2217}
2218
2219/**
2220 * ata_scsi_set_sense - Set SCSI sense data and status
2221 * @cmd: SCSI request to be handled
2222 * @sk: SCSI-defined sense key
2223 * @asc: SCSI-defined additional sense code
2224 * @ascq: SCSI-defined additional sense code qualifier
2225 *
2226 * Helper function that builds a valid fixed format, current
2227 * response code and the given sense key (sk), additional sense
2228 * code (asc) and additional sense code qualifier (ascq) with
2229 * a SCSI command status of %SAM_STAT_CHECK_CONDITION and
2230 * DRIVER_SENSE set in the upper bits of scsi_cmnd::result .
2231 *
2232 * LOCKING:
2233 * Not required
2234 */
2235
2236void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
2237{
2238 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
2239
2240 cmd->sense_buffer[0] = 0x70; /* fixed format, current */
2241 cmd->sense_buffer[2] = sk;
2242 cmd->sense_buffer[7] = 18 - 8; /* additional sense length */
2243 cmd->sense_buffer[12] = asc;
2244 cmd->sense_buffer[13] = ascq;
2245}
2246
2247/**
2248 * ata_scsi_badcmd - End a SCSI request with an error
2249 * @cmd: SCSI request to be handled
2250 * @done: SCSI command completion function
2251 * @asc: SCSI-defined additional sense code
2252 * @ascq: SCSI-defined additional sense code qualifier
2253 *
2254 * Helper function that completes a SCSI command with
2255 * %SAM_STAT_CHECK_CONDITION, with a sense key %ILLEGAL_REQUEST
2256 * and the specified additional sense codes.
2257 *
2258 * LOCKING:
2259 * spin_lock_irqsave(host_set lock)
2260 */
2261
2262void ata_scsi_badcmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), u8 asc, u8 ascq)
2263{
2264 DPRINTK("ENTER\n");
2265 ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, asc, ascq);
2266
2267 done(cmd);
2268}
2269
2270static void atapi_sense_complete(struct ata_queued_cmd *qc)
2271{
2272 if (qc->err_mask && ((qc->err_mask & AC_ERR_DEV) == 0)) {
2273 /* FIXME: not quite right; we don't want the
2274 * translation of taskfile registers into
2275 * a sense descriptors, since that's only
2276 * correct for ATA, not ATAPI
2277 */
2278 ata_gen_ata_desc_sense(qc);
2279 }
2280
2281 qc->scsidone(qc->scsicmd);
2282 ata_qc_free(qc);
2283}
2284
2285/* is it pointless to prefer PIO for "safety reasons"? */
2286static inline int ata_pio_use_silly(struct ata_port *ap)
2287{
2288 return (ap->flags & ATA_FLAG_PIO_DMA);
2289}
2290
2291static void atapi_request_sense(struct ata_queued_cmd *qc)
2292{
2293 struct ata_port *ap = qc->ap;
2294 struct scsi_cmnd *cmd = qc->scsicmd;
2295
2296 DPRINTK("ATAPI request sense\n");
2297
2298 /* FIXME: is this needed? */
2299 memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
2300
2301 ap->ops->tf_read(ap, &qc->tf);
2302
2303 /* fill these in, for the case where they are -not- overwritten */
2304 cmd->sense_buffer[0] = 0x70;
2305 cmd->sense_buffer[2] = qc->tf.feature >> 4;
2306
2307 ata_qc_reinit(qc);
2308
2309 ata_sg_init_one(qc, cmd->sense_buffer, sizeof(cmd->sense_buffer));
2310 qc->dma_dir = DMA_FROM_DEVICE;
2311
2312 memset(&qc->cdb, 0, qc->dev->cdb_len);
2313 qc->cdb[0] = REQUEST_SENSE;
2314 qc->cdb[4] = SCSI_SENSE_BUFFERSIZE;
2315
2316 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2317 qc->tf.command = ATA_CMD_PACKET;
2318
2319 if (ata_pio_use_silly(ap)) {
2320 qc->tf.protocol = ATA_PROT_ATAPI_DMA;
2321 qc->tf.feature |= ATAPI_PKT_DMA;
2322 } else {
2323 qc->tf.protocol = ATA_PROT_ATAPI;
2324 qc->tf.lbam = (8 * 1024) & 0xff;
2325 qc->tf.lbah = (8 * 1024) >> 8;
2326 }
2327 qc->nbytes = SCSI_SENSE_BUFFERSIZE;
2328
2329 qc->complete_fn = atapi_sense_complete;
2330
2331 ata_qc_issue(qc);
2332
2333 DPRINTK("EXIT\n");
2334}
2335
2336static void atapi_qc_complete(struct ata_queued_cmd *qc)
2337{
2338 struct scsi_cmnd *cmd = qc->scsicmd;
2339 unsigned int err_mask = qc->err_mask;
2340
2341 VPRINTK("ENTER, err_mask 0x%X\n", err_mask);
2342
2343 /* handle completion from new EH */
2344 if (unlikely(qc->ap->ops->error_handler &&
2345 (err_mask || qc->flags & ATA_QCFLAG_SENSE_VALID))) {
2346
2347 if (!(qc->flags & ATA_QCFLAG_SENSE_VALID)) {
2348 /* FIXME: not quite right; we don't want the
2349 * translation of taskfile registers into a
2350 * sense descriptors, since that's only
2351 * correct for ATA, not ATAPI
2352 */
2353 ata_gen_ata_desc_sense(qc);
2354 }
2355
2356 /* SCSI EH automatically locks door if sdev->locked is
2357 * set. Sometimes door lock request continues to
2358 * fail, for example, when no media is present. This
2359 * creates a loop - SCSI EH issues door lock which
2360 * fails and gets invoked again to acquire sense data
2361 * for the failed command.
2362 *
2363 * If door lock fails, always clear sdev->locked to
2364 * avoid this infinite loop.
2365 */
2366 if (qc->cdb[0] == ALLOW_MEDIUM_REMOVAL)
2367 qc->dev->sdev->locked = 0;
2368
2369 qc->scsicmd->result = SAM_STAT_CHECK_CONDITION;
2370 qc->scsidone(cmd);
2371 ata_qc_free(qc);
2372 return;
2373 }
2374
2375 /* successful completion or old EH failure path */
2376 if (unlikely(err_mask & AC_ERR_DEV)) {
2377 cmd->result = SAM_STAT_CHECK_CONDITION;
2378 atapi_request_sense(qc);
2379 return;
2380 } else if (unlikely(err_mask)) {
2381 /* FIXME: not quite right; we don't want the
2382 * translation of taskfile registers into
2383 * a sense descriptors, since that's only
2384 * correct for ATA, not ATAPI
2385 */
2386 ata_gen_ata_desc_sense(qc);
2387 } else {
2388 u8 *scsicmd = cmd->cmnd;
2389
2390 if ((scsicmd[0] == INQUIRY) && ((scsicmd[1] & 0x03) == 0)) {
2391 u8 *buf = NULL;
2392 unsigned int buflen;
2393
2394 buflen = ata_scsi_rbuf_get(cmd, &buf);
2395
2396 /* ATAPI devices typically report zero for their SCSI version,
2397 * and sometimes deviate from the spec WRT response data
2398 * format. If SCSI version is reported as zero like normal,
2399 * then we make the following fixups: 1) Fake MMC-5 version,
2400 * to indicate to the Linux scsi midlayer this is a modern
2401 * device. 2) Ensure response data format / ATAPI information
2402 * are always correct.
2403 */
2404 if (buf[2] == 0) {
2405 buf[2] = 0x5;
2406 buf[3] = 0x32;
2407 }
2408
2409 ata_scsi_rbuf_put(cmd, buf);
2410 }
2411
2412 cmd->result = SAM_STAT_GOOD;
2413 }
2414
2415 qc->scsidone(cmd);
2416 ata_qc_free(qc);
2417}
2418/**
2419 * atapi_xlat - Initialize PACKET taskfile
2420 * @qc: command structure to be initialized
2421 * @scsicmd: SCSI CDB associated with this PACKET command
2422 *
2423 * LOCKING:
2424 * spin_lock_irqsave(host_set lock)
2425 *
2426 * RETURNS:
2427 * Zero on success, non-zero on failure.
2428 */
2429
2430static unsigned int atapi_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd)
2431{
2432 struct scsi_cmnd *cmd = qc->scsicmd;
2433 struct ata_device *dev = qc->dev;
2434 int using_pio = (dev->flags & ATA_DFLAG_PIO);
2435 int nodata = (cmd->sc_data_direction == DMA_NONE);
2436
2437 if (!using_pio)
2438 /* Check whether ATAPI DMA is safe */
2439 if (ata_check_atapi_dma(qc))
2440 using_pio = 1;
2441
2442 memcpy(&qc->cdb, scsicmd, dev->cdb_len);
2443
2444 qc->complete_fn = atapi_qc_complete;
2445
2446 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2447 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
2448 qc->tf.flags |= ATA_TFLAG_WRITE;
2449 DPRINTK("direction: write\n");
2450 }
2451
2452 qc->tf.command = ATA_CMD_PACKET;
2453
2454 /* no data, or PIO data xfer */
2455 if (using_pio || nodata) {
2456 if (nodata)
2457 qc->tf.protocol = ATA_PROT_ATAPI_NODATA;
2458 else
2459 qc->tf.protocol = ATA_PROT_ATAPI;
2460 qc->tf.lbam = (8 * 1024) & 0xff;
2461 qc->tf.lbah = (8 * 1024) >> 8;
2462 }
2463
2464 /* DMA data xfer */
2465 else {
2466 qc->tf.protocol = ATA_PROT_ATAPI_DMA;
2467 qc->tf.feature |= ATAPI_PKT_DMA;
2468
2469 if (atapi_dmadir && (cmd->sc_data_direction != DMA_TO_DEVICE))
2470 /* some SATA bridges need us to indicate data xfer direction */
2471 qc->tf.feature |= ATAPI_DMADIR;
2472 }
2473
2474 qc->nbytes = cmd->request_bufflen;
2475
2476 return 0;
2477}
2478
2479static struct ata_device * ata_find_dev(struct ata_port *ap, int id)
2480{
2481 if (likely(id < ATA_MAX_DEVICES))
2482 return &ap->device[id];
2483 return NULL;
2484}
2485
2486static struct ata_device * __ata_scsi_find_dev(struct ata_port *ap,
2487 const struct scsi_device *scsidev)
2488{
2489 /* skip commands not addressed to targets we simulate */
2490 if (unlikely(scsidev->channel || scsidev->lun))
2491 return NULL;
2492
2493 return ata_find_dev(ap, scsidev->id);
2494}
2495
2496/**
2497 * ata_scsi_dev_enabled - determine if device is enabled
2498 * @dev: ATA device
2499 *
2500 * Determine if commands should be sent to the specified device.
2501 *
2502 * LOCKING:
2503 * spin_lock_irqsave(host_set lock)
2504 *
2505 * RETURNS:
2506 * 0 if commands are not allowed / 1 if commands are allowed
2507 */
2508
2509static int ata_scsi_dev_enabled(struct ata_device *dev)
2510{
2511 if (unlikely(!ata_dev_enabled(dev)))
2512 return 0;
2513
2514 if (!atapi_enabled || (dev->ap->flags & ATA_FLAG_NO_ATAPI)) {
2515 if (unlikely(dev->class == ATA_DEV_ATAPI)) {
2516 ata_dev_printk(dev, KERN_WARNING,
2517 "WARNING: ATAPI is %s, device ignored.\n",
2518 atapi_enabled ? "not supported with this driver" : "disabled");
2519 return 0;
2520 }
2521 }
2522
2523 return 1;
2524}
2525
2526/**
2527 * ata_scsi_find_dev - lookup ata_device from scsi_cmnd
2528 * @ap: ATA port to which the device is attached
2529 * @scsidev: SCSI device from which we derive the ATA device
2530 *
2531 * Given various information provided in struct scsi_cmnd,
2532 * map that onto an ATA bus, and using that mapping
2533 * determine which ata_device is associated with the
2534 * SCSI command to be sent.
2535 *
2536 * LOCKING:
2537 * spin_lock_irqsave(host_set lock)
2538 *
2539 * RETURNS:
2540 * Associated ATA device, or %NULL if not found.
2541 */
2542static struct ata_device *
2543ata_scsi_find_dev(struct ata_port *ap, const struct scsi_device *scsidev)
2544{
2545 struct ata_device *dev = __ata_scsi_find_dev(ap, scsidev);
2546
2547 if (unlikely(!dev || !ata_scsi_dev_enabled(dev)))
2548 return NULL;
2549
2550 return dev;
2551}
2552
2553/*
2554 * ata_scsi_map_proto - Map pass-thru protocol value to taskfile value.
2555 * @byte1: Byte 1 from pass-thru CDB.
2556 *
2557 * RETURNS:
2558 * ATA_PROT_UNKNOWN if mapping failed/unimplemented, protocol otherwise.
2559 */
2560static u8
2561ata_scsi_map_proto(u8 byte1)
2562{
2563 switch((byte1 & 0x1e) >> 1) {
2564 case 3: /* Non-data */
2565 return ATA_PROT_NODATA;
2566
2567 case 6: /* DMA */
2568 return ATA_PROT_DMA;
2569
2570 case 4: /* PIO Data-in */
2571 case 5: /* PIO Data-out */
2572 return ATA_PROT_PIO;
2573
2574 case 10: /* Device Reset */
2575 case 0: /* Hard Reset */
2576 case 1: /* SRST */
2577 case 2: /* Bus Idle */
2578 case 7: /* Packet */
2579 case 8: /* DMA Queued */
2580 case 9: /* Device Diagnostic */
2581 case 11: /* UDMA Data-in */
2582 case 12: /* UDMA Data-Out */
2583 case 13: /* FPDMA */
2584 default: /* Reserved */
2585 break;
2586 }
2587
2588 return ATA_PROT_UNKNOWN;
2589}
2590
2591/**
2592 * ata_scsi_pass_thru - convert ATA pass-thru CDB to taskfile
2593 * @qc: command structure to be initialized
2594 * @scsicmd: SCSI command to convert
2595 *
2596 * Handles either 12 or 16-byte versions of the CDB.
2597 *
2598 * RETURNS:
2599 * Zero on success, non-zero on failure.
2600 */
2601static unsigned int
2602ata_scsi_pass_thru(struct ata_queued_cmd *qc, const u8 *scsicmd)
2603{
2604 struct ata_taskfile *tf = &(qc->tf);
2605 struct scsi_cmnd *cmd = qc->scsicmd;
2606 struct ata_device *dev = qc->dev;
2607
2608 if ((tf->protocol = ata_scsi_map_proto(scsicmd[1])) == ATA_PROT_UNKNOWN)
2609 goto invalid_fld;
2610
2611 /* We may not issue DMA commands if no DMA mode is set */
2612 if (tf->protocol == ATA_PROT_DMA && dev->dma_mode == 0)
2613 goto invalid_fld;
2614
2615 if (scsicmd[1] & 0xe0)
2616 /* PIO multi not supported yet */
2617 goto invalid_fld;
2618
2619 /*
2620 * 12 and 16 byte CDBs use different offsets to
2621 * provide the various register values.
2622 */
2623 if (scsicmd[0] == ATA_16) {
2624 /*
2625 * 16-byte CDB - may contain extended commands.
2626 *
2627 * If that is the case, copy the upper byte register values.
2628 */
2629 if (scsicmd[1] & 0x01) {
2630 tf->hob_feature = scsicmd[3];
2631 tf->hob_nsect = scsicmd[5];
2632 tf->hob_lbal = scsicmd[7];
2633 tf->hob_lbam = scsicmd[9];
2634 tf->hob_lbah = scsicmd[11];
2635 tf->flags |= ATA_TFLAG_LBA48;
2636 } else
2637 tf->flags &= ~ATA_TFLAG_LBA48;
2638
2639 /*
2640 * Always copy low byte, device and command registers.
2641 */
2642 tf->feature = scsicmd[4];
2643 tf->nsect = scsicmd[6];
2644 tf->lbal = scsicmd[8];
2645 tf->lbam = scsicmd[10];
2646 tf->lbah = scsicmd[12];
2647 tf->device = scsicmd[13];
2648 tf->command = scsicmd[14];
2649 } else {
2650 /*
2651 * 12-byte CDB - incapable of extended commands.
2652 */
2653 tf->flags &= ~ATA_TFLAG_LBA48;
2654
2655 tf->feature = scsicmd[3];
2656 tf->nsect = scsicmd[4];
2657 tf->lbal = scsicmd[5];
2658 tf->lbam = scsicmd[6];
2659 tf->lbah = scsicmd[7];
2660 tf->device = scsicmd[8];
2661 tf->command = scsicmd[9];
2662 }
2663 /*
2664 * If slave is possible, enforce correct master/slave bit
2665 */
2666 if (qc->ap->flags & ATA_FLAG_SLAVE_POSS)
2667 tf->device = qc->dev->devno ?
2668 tf->device | ATA_DEV1 : tf->device & ~ATA_DEV1;
2669
2670 /*
2671 * Filter SET_FEATURES - XFER MODE command -- otherwise,
2672 * SET_FEATURES - XFER MODE must be preceded/succeeded
2673 * by an update to hardware-specific registers for each
2674 * controller (i.e. the reason for ->set_piomode(),
2675 * ->set_dmamode(), and ->post_set_mode() hooks).
2676 */
2677 if ((tf->command == ATA_CMD_SET_FEATURES)
2678 && (tf->feature == SETFEATURES_XFER))
2679 goto invalid_fld;
2680
2681 /*
2682 * Set flags so that all registers will be written,
2683 * and pass on write indication (used for PIO/DMA
2684 * setup.)
2685 */
2686 tf->flags |= (ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE);
2687
2688 if (cmd->sc_data_direction == DMA_TO_DEVICE)
2689 tf->flags |= ATA_TFLAG_WRITE;
2690
2691 /*
2692 * Set transfer length.
2693 *
2694 * TODO: find out if we need to do more here to
2695 * cover scatter/gather case.
2696 */
2697 qc->nsect = cmd->request_bufflen / ATA_SECT_SIZE;
2698
2699 /* request result TF */
2700 qc->flags |= ATA_QCFLAG_RESULT_TF;
2701
2702 return 0;
2703
2704 invalid_fld:
2705 ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x24, 0x00);
2706 /* "Invalid field in cdb" */
2707 return 1;
2708}
2709
2710/**
2711 * ata_get_xlat_func - check if SCSI to ATA translation is possible
2712 * @dev: ATA device
2713 * @cmd: SCSI command opcode to consider
2714 *
2715 * Look up the SCSI command given, and determine whether the
2716 * SCSI command is to be translated or simulated.
2717 *
2718 * RETURNS:
2719 * Pointer to translation function if possible, %NULL if not.
2720 */
2721
2722static inline ata_xlat_func_t ata_get_xlat_func(struct ata_device *dev, u8 cmd)
2723{
2724 switch (cmd) {
2725 case READ_6:
2726 case READ_10:
2727 case READ_16:
2728
2729 case WRITE_6:
2730 case WRITE_10:
2731 case WRITE_16:
2732 return ata_scsi_rw_xlat;
2733
2734 case SYNCHRONIZE_CACHE:
2735 if (ata_try_flush_cache(dev))
2736 return ata_scsi_flush_xlat;
2737 break;
2738
2739 case VERIFY:
2740 case VERIFY_16:
2741 return ata_scsi_verify_xlat;
2742
2743 case ATA_12:
2744 case ATA_16:
2745 return ata_scsi_pass_thru;
2746
2747 case START_STOP:
2748 return ata_scsi_start_stop_xlat;
2749 }
2750
2751 return NULL;
2752}
2753
2754/**
2755 * ata_scsi_dump_cdb - dump SCSI command contents to dmesg
2756 * @ap: ATA port to which the command was being sent
2757 * @cmd: SCSI command to dump
2758 *
2759 * Prints the contents of a SCSI command via printk().
2760 */
2761
2762static inline void ata_scsi_dump_cdb(struct ata_port *ap,
2763 struct scsi_cmnd *cmd)
2764{
2765#ifdef ATA_DEBUG
2766 struct scsi_device *scsidev = cmd->device;
2767 u8 *scsicmd = cmd->cmnd;
2768
2769 DPRINTK("CDB (%u:%d,%d,%d) %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
2770 ap->id,
2771 scsidev->channel, scsidev->id, scsidev->lun,
2772 scsicmd[0], scsicmd[1], scsicmd[2], scsicmd[3],
2773 scsicmd[4], scsicmd[5], scsicmd[6], scsicmd[7],
2774 scsicmd[8]);
2775#endif
2776}
2777
2778static inline int __ata_scsi_queuecmd(struct scsi_cmnd *cmd,
2779 void (*done)(struct scsi_cmnd *),
2780 struct ata_device *dev)
2781{
2782 int rc = 0;
2783
2784 if (dev->class == ATA_DEV_ATA) {
2785 ata_xlat_func_t xlat_func = ata_get_xlat_func(dev,
2786 cmd->cmnd[0]);
2787
2788 if (xlat_func)
2789 rc = ata_scsi_translate(dev, cmd, done, xlat_func);
2790 else
2791 ata_scsi_simulate(dev, cmd, done);
2792 } else
2793 rc = ata_scsi_translate(dev, cmd, done, atapi_xlat);
2794
2795 return rc;
2796}
2797
2798/**
2799 * ata_scsi_queuecmd - Issue SCSI cdb to libata-managed device
2800 * @cmd: SCSI command to be sent
2801 * @done: Completion function, called when command is complete
2802 *
2803 * In some cases, this function translates SCSI commands into
2804 * ATA taskfiles, and queues the taskfiles to be sent to
2805 * hardware. In other cases, this function simulates a
2806 * SCSI device by evaluating and responding to certain
2807 * SCSI commands. This creates the overall effect of
2808 * ATA and ATAPI devices appearing as SCSI devices.
2809 *
2810 * LOCKING:
2811 * Releases scsi-layer-held lock, and obtains host_set lock.
2812 *
2813 * RETURNS:
2814 * Return value from __ata_scsi_queuecmd() if @cmd can be queued,
2815 * 0 otherwise.
2816 */
2817int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
2818{
2819 struct ata_port *ap;
2820 struct ata_device *dev;
2821 struct scsi_device *scsidev = cmd->device;
2822 struct Scsi_Host *shost = scsidev->host;
2823 int rc = 0;
2824
2825 ap = ata_shost_to_port(shost);
2826
2827 spin_unlock(shost->host_lock);
2828 spin_lock(ap->lock);
2829
2830 ata_scsi_dump_cdb(ap, cmd);
2831
2832 dev = ata_scsi_find_dev(ap, scsidev);
2833 if (likely(dev))
2834 rc = __ata_scsi_queuecmd(cmd, done, dev);
2835 else {
2836 cmd->result = (DID_BAD_TARGET << 16);
2837 done(cmd);
2838 }
2839
2840 spin_unlock(ap->lock);
2841 spin_lock(shost->host_lock);
2842 return rc;
2843}
2844
2845/**
2846 * ata_scsi_simulate - simulate SCSI command on ATA device
2847 * @dev: the target device
2848 * @cmd: SCSI command being sent to device.
2849 * @done: SCSI command completion function.
2850 *
2851 * Interprets and directly executes a select list of SCSI commands
2852 * that can be handled internally.
2853 *
2854 * LOCKING:
2855 * spin_lock_irqsave(host_set lock)
2856 */
2857
2858void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
2859 void (*done)(struct scsi_cmnd *))
2860{
2861 struct ata_scsi_args args;
2862 const u8 *scsicmd = cmd->cmnd;
2863
2864 args.dev = dev;
2865 args.id = dev->id;
2866 args.cmd = cmd;
2867 args.done = done;
2868
2869 switch(scsicmd[0]) {
2870 /* no-op's, complete with success */
2871 case SYNCHRONIZE_CACHE:
2872 case REZERO_UNIT:
2873 case SEEK_6:
2874 case SEEK_10:
2875 case TEST_UNIT_READY:
2876 case FORMAT_UNIT: /* FIXME: correct? */
2877 case SEND_DIAGNOSTIC: /* FIXME: correct? */
2878 ata_scsi_rbuf_fill(&args, ata_scsiop_noop);
2879 break;
2880
2881 case INQUIRY:
2882 if (scsicmd[1] & 2) /* is CmdDt set? */
2883 ata_scsi_invalid_field(cmd, done);
2884 else if ((scsicmd[1] & 1) == 0) /* is EVPD clear? */
2885 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std);
2886 else if (scsicmd[2] == 0x00)
2887 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_00);
2888 else if (scsicmd[2] == 0x80)
2889 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_80);
2890 else if (scsicmd[2] == 0x83)
2891 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_83);
2892 else
2893 ata_scsi_invalid_field(cmd, done);
2894 break;
2895
2896 case MODE_SENSE:
2897 case MODE_SENSE_10:
2898 ata_scsi_rbuf_fill(&args, ata_scsiop_mode_sense);
2899 break;
2900
2901 case MODE_SELECT: /* unconditionally return */
2902 case MODE_SELECT_10: /* bad-field-in-cdb */
2903 ata_scsi_invalid_field(cmd, done);
2904 break;
2905
2906 case READ_CAPACITY:
2907 ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
2908 break;
2909
2910 case SERVICE_ACTION_IN:
2911 if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16)
2912 ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
2913 else
2914 ata_scsi_invalid_field(cmd, done);
2915 break;
2916
2917 case REPORT_LUNS:
2918 ata_scsi_rbuf_fill(&args, ata_scsiop_report_luns);
2919 break;
2920
2921 /* mandatory commands we haven't implemented yet */
2922 case REQUEST_SENSE:
2923
2924 /* all other commands */
2925 default:
2926 ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x20, 0x0);
2927 /* "Invalid command operation code" */
2928 done(cmd);
2929 break;
2930 }
2931}
2932
2933void ata_scsi_scan_host(struct ata_port *ap)
2934{
2935 unsigned int i;
2936
2937 if (ap->flags & ATA_FLAG_DISABLED)
2938 return;
2939
2940 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2941 struct ata_device *dev = &ap->device[i];
2942 struct scsi_device *sdev;
2943
2944 if (!ata_dev_enabled(dev) || dev->sdev)
2945 continue;
2946
2947 sdev = __scsi_add_device(ap->host, 0, i, 0, NULL);
2948 if (!IS_ERR(sdev)) {
2949 dev->sdev = sdev;
2950 scsi_device_put(sdev);
2951 }
2952 }
2953}
2954
2955/**
2956 * ata_scsi_offline_dev - offline attached SCSI device
2957 * @dev: ATA device to offline attached SCSI device for
2958 *
2959 * This function is called from ata_eh_hotplug() and responsible
2960 * for taking the SCSI device attached to @dev offline. This
2961 * function is called with host_set lock which protects dev->sdev
2962 * against clearing.
2963 *
2964 * LOCKING:
2965 * spin_lock_irqsave(host_set lock)
2966 *
2967 * RETURNS:
2968 * 1 if attached SCSI device exists, 0 otherwise.
2969 */
2970int ata_scsi_offline_dev(struct ata_device *dev)
2971{
2972 if (dev->sdev) {
2973 scsi_device_set_state(dev->sdev, SDEV_OFFLINE);
2974 return 1;
2975 }
2976 return 0;
2977}
2978
2979/**
2980 * ata_scsi_remove_dev - remove attached SCSI device
2981 * @dev: ATA device to remove attached SCSI device for
2982 *
2983 * This function is called from ata_eh_scsi_hotplug() and
2984 * responsible for removing the SCSI device attached to @dev.
2985 *
2986 * LOCKING:
2987 * Kernel thread context (may sleep).
2988 */
2989static void ata_scsi_remove_dev(struct ata_device *dev)
2990{
2991 struct ata_port *ap = dev->ap;
2992 struct scsi_device *sdev;
2993 unsigned long flags;
2994
2995 /* Alas, we need to grab scan_mutex to ensure SCSI device
2996 * state doesn't change underneath us and thus
2997 * scsi_device_get() always succeeds. The mutex locking can
2998 * be removed if there is __scsi_device_get() interface which
2999 * increments reference counts regardless of device state.
3000 */
3001 mutex_lock(&ap->host->scan_mutex);
3002 spin_lock_irqsave(ap->lock, flags);
3003
3004 /* clearing dev->sdev is protected by host_set lock */
3005 sdev = dev->sdev;
3006 dev->sdev = NULL;
3007
3008 if (sdev) {
3009 /* If user initiated unplug races with us, sdev can go
3010 * away underneath us after the host_set lock and
3011 * scan_mutex are released. Hold onto it.
3012 */
3013 if (scsi_device_get(sdev) == 0) {
3014 /* The following ensures the attached sdev is
3015 * offline on return from ata_scsi_offline_dev()
3016 * regardless it wins or loses the race
3017 * against this function.
3018 */
3019 scsi_device_set_state(sdev, SDEV_OFFLINE);
3020 } else {
3021 WARN_ON(1);
3022 sdev = NULL;
3023 }
3024 }
3025
3026 spin_unlock_irqrestore(ap->lock, flags);
3027 mutex_unlock(&ap->host->scan_mutex);
3028
3029 if (sdev) {
3030 ata_dev_printk(dev, KERN_INFO, "detaching (SCSI %s)\n",
3031 sdev->sdev_gendev.bus_id);
3032
3033 scsi_remove_device(sdev);
3034 scsi_device_put(sdev);
3035 }
3036}
3037
3038/**
3039 * ata_scsi_hotplug - SCSI part of hotplug
3040 * @data: Pointer to ATA port to perform SCSI hotplug on
3041 *
3042 * Perform SCSI part of hotplug. It's executed from a separate
3043 * workqueue after EH completes. This is necessary because SCSI
3044 * hot plugging requires working EH and hot unplugging is
3045 * synchronized with hot plugging with a mutex.
3046 *
3047 * LOCKING:
3048 * Kernel thread context (may sleep).
3049 */
3050void ata_scsi_hotplug(void *data)
3051{
3052 struct ata_port *ap = data;
3053 int i;
3054
3055 if (ap->pflags & ATA_PFLAG_UNLOADING) {
3056 DPRINTK("ENTER/EXIT - unloading\n");
3057 return;
3058 }
3059
3060 DPRINTK("ENTER\n");
3061
3062 /* unplug detached devices */
3063 for (i = 0; i < ATA_MAX_DEVICES; i++) {
3064 struct ata_device *dev = &ap->device[i];
3065 unsigned long flags;
3066
3067 if (!(dev->flags & ATA_DFLAG_DETACHED))
3068 continue;
3069
3070 spin_lock_irqsave(ap->lock, flags);
3071 dev->flags &= ~ATA_DFLAG_DETACHED;
3072 spin_unlock_irqrestore(ap->lock, flags);
3073
3074 ata_scsi_remove_dev(dev);
3075 }
3076
3077 /* scan for new ones */
3078 ata_scsi_scan_host(ap);
3079
3080 /* If we scanned while EH was in progress, scan would have
3081 * failed silently. Requeue if there are enabled but
3082 * unattached devices.
3083 */
3084 for (i = 0; i < ATA_MAX_DEVICES; i++) {
3085 struct ata_device *dev = &ap->device[i];
3086 if (ata_dev_enabled(dev) && !dev->sdev) {
3087 queue_delayed_work(ata_aux_wq, &ap->hotplug_task, HZ);
3088 break;
3089 }
3090 }
3091
3092 DPRINTK("EXIT\n");
3093}
3094
3095/**
3096 * ata_scsi_user_scan - indication for user-initiated bus scan
3097 * @shost: SCSI host to scan
3098 * @channel: Channel to scan
3099 * @id: ID to scan
3100 * @lun: LUN to scan
3101 *
3102 * This function is called when user explicitly requests bus
3103 * scan. Set probe pending flag and invoke EH.
3104 *
3105 * LOCKING:
3106 * SCSI layer (we don't care)
3107 *
3108 * RETURNS:
3109 * Zero.
3110 */
3111static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
3112 unsigned int id, unsigned int lun)
3113{
3114 struct ata_port *ap = ata_shost_to_port(shost);
3115 unsigned long flags;
3116 int rc = 0;
3117
3118 if (!ap->ops->error_handler)
3119 return -EOPNOTSUPP;
3120
3121 if ((channel != SCAN_WILD_CARD && channel != 0) ||
3122 (lun != SCAN_WILD_CARD && lun != 0))
3123 return -EINVAL;
3124
3125 spin_lock_irqsave(ap->lock, flags);
3126
3127 if (id == SCAN_WILD_CARD) {
3128 ap->eh_info.probe_mask |= (1 << ATA_MAX_DEVICES) - 1;
3129 ap->eh_info.action |= ATA_EH_SOFTRESET;
3130 } else {
3131 struct ata_device *dev = ata_find_dev(ap, id);
3132
3133 if (dev) {
3134 ap->eh_info.probe_mask |= 1 << dev->devno;
3135 ap->eh_info.action |= ATA_EH_SOFTRESET;
3136 ap->eh_info.flags |= ATA_EHI_RESUME_LINK;
3137 } else
3138 rc = -EINVAL;
3139 }
3140
3141 if (rc == 0)
3142 ata_port_schedule_eh(ap);
3143
3144 spin_unlock_irqrestore(ap->lock, flags);
3145
3146 return rc;
3147}
3148
3149/**
3150 * ata_scsi_dev_rescan - initiate scsi_rescan_device()
3151 * @data: Pointer to ATA port to perform scsi_rescan_device()
3152 *
3153 * After ATA pass thru (SAT) commands are executed successfully,
3154 * libata need to propagate the changes to SCSI layer. This
3155 * function must be executed from ata_aux_wq such that sdev
3156 * attach/detach don't race with rescan.
3157 *
3158 * LOCKING:
3159 * Kernel thread context (may sleep).
3160 */
3161void ata_scsi_dev_rescan(void *data)
3162{
3163 struct ata_port *ap = data;
3164 struct ata_device *dev;
3165 unsigned int i;
3166
3167 for (i = 0; i < ATA_MAX_DEVICES; i++) {
3168 dev = &ap->device[i];
3169
3170 if (ata_dev_enabled(dev) && dev->sdev)
3171 scsi_rescan_device(&(dev->sdev->sdev_gendev));
3172 }
3173}
diff --git a/drivers/scsi/libata.h b/drivers/scsi/libata.h
deleted file mode 100644
index c325679d9b54..000000000000
--- a/drivers/scsi/libata.h
+++ /dev/null
@@ -1,117 +0,0 @@
1/*
2 * libata.h - helper library for ATA
3 *
4 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
5 * Copyright 2003-2004 Jeff Garzik
6 *
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
21 *
22 *
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
25 *
26 */
27
28#ifndef __LIBATA_H__
29#define __LIBATA_H__
30
31#define DRV_NAME "libata"
32#define DRV_VERSION "2.00" /* must be exactly four chars */
33
34struct ata_scsi_args {
35 struct ata_device *dev;
36 u16 *id;
37 struct scsi_cmnd *cmd;
38 void (*done)(struct scsi_cmnd *);
39};
40
41/* libata-core.c */
42extern struct workqueue_struct *ata_aux_wq;
43extern int atapi_enabled;
44extern int atapi_dmadir;
45extern int libata_fua;
46extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev);
47extern int ata_rwcmd_protocol(struct ata_queued_cmd *qc);
48extern void ata_dev_disable(struct ata_device *dev);
49extern void ata_port_flush_task(struct ata_port *ap);
50extern unsigned ata_exec_internal(struct ata_device *dev,
51 struct ata_taskfile *tf, const u8 *cdb,
52 int dma_dir, void *buf, unsigned int buflen);
53extern unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd);
54extern int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
55 int post_reset, u16 *id);
56extern int ata_dev_configure(struct ata_device *dev, int print_info);
57extern int sata_down_spd_limit(struct ata_port *ap);
58extern int sata_set_spd_needed(struct ata_port *ap);
59extern int ata_down_xfermask_limit(struct ata_device *dev, int force_pio0);
60extern int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev);
61extern void ata_qc_free(struct ata_queued_cmd *qc);
62extern void ata_qc_issue(struct ata_queued_cmd *qc);
63extern void __ata_qc_complete(struct ata_queued_cmd *qc);
64extern int ata_check_atapi_dma(struct ata_queued_cmd *qc);
65extern void ata_dev_select(struct ata_port *ap, unsigned int device,
66 unsigned int wait, unsigned int can_sleep);
67extern void swap_buf_le16(u16 *buf, unsigned int buf_words);
68extern int ata_flush_cache(struct ata_device *dev);
69extern void ata_dev_init(struct ata_device *dev);
70extern int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg);
71extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg);
72
73
74/* libata-scsi.c */
75extern struct scsi_transport_template ata_scsi_transport_template;
76
77extern void ata_scsi_scan_host(struct ata_port *ap);
78extern int ata_scsi_offline_dev(struct ata_device *dev);
79extern void ata_scsi_hotplug(void *data);
80extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf,
81 unsigned int buflen);
82
83extern unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf,
84 unsigned int buflen);
85
86extern unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf,
87 unsigned int buflen);
88extern unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf,
89 unsigned int buflen);
90extern unsigned int ata_scsiop_noop(struct ata_scsi_args *args, u8 *rbuf,
91 unsigned int buflen);
92extern unsigned int ata_scsiop_sync_cache(struct ata_scsi_args *args, u8 *rbuf,
93 unsigned int buflen);
94extern unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
95 unsigned int buflen);
96extern unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf,
97 unsigned int buflen);
98extern unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf,
99 unsigned int buflen);
100extern void ata_scsi_badcmd(struct scsi_cmnd *cmd,
101 void (*done)(struct scsi_cmnd *),
102 u8 asc, u8 ascq);
103extern void ata_scsi_set_sense(struct scsi_cmnd *cmd,
104 u8 sk, u8 asc, u8 ascq);
105extern void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
106 unsigned int (*actor) (struct ata_scsi_args *args,
107 u8 *rbuf, unsigned int buflen));
108extern void ata_schedule_scsi_eh(struct Scsi_Host *shost);
109extern void ata_scsi_dev_rescan(void *data);
110
111/* libata-eh.c */
112extern enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd);
113extern void ata_scsi_error(struct Scsi_Host *host);
114extern void ata_port_wait_eh(struct ata_port *ap);
115extern void ata_qc_schedule_eh(struct ata_queued_cmd *qc);
116
117#endif /* __LIBATA_H__ */
diff --git a/drivers/scsi/pdc_adma.c b/drivers/scsi/pdc_adma.c
deleted file mode 100644
index efc8fff1d250..000000000000
--- a/drivers/scsi/pdc_adma.c
+++ /dev/null
@@ -1,740 +0,0 @@
1/*
2 * pdc_adma.c - Pacific Digital Corporation ADMA
3 *
4 * Maintained by: Mark Lord <mlord@pobox.com>
5 *
6 * Copyright 2005 Mark Lord
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
21 *
22 *
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
25 *
26 *
27 * Supports ATA disks in single-packet ADMA mode.
28 * Uses PIO for everything else.
29 *
30 * TODO: Use ADMA transfers for ATAPI devices, when possible.
31 * This requires careful attention to a number of quirks of the chip.
32 *
33 */
34
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/blkdev.h>
40#include <linux/delay.h>
41#include <linux/interrupt.h>
42#include <linux/sched.h>
43#include <linux/device.h>
44#include <scsi/scsi_host.h>
45#include <asm/io.h>
46#include <linux/libata.h>
47
48#define DRV_NAME "pdc_adma"
49#define DRV_VERSION "0.04"
50
51/* macro to calculate base address for ATA regs */
52#define ADMA_ATA_REGS(base,port_no) ((base) + ((port_no) * 0x40))
53
54/* macro to calculate base address for ADMA regs */
55#define ADMA_REGS(base,port_no) ((base) + 0x80 + ((port_no) * 0x20))
56
57enum {
58 ADMA_PORTS = 2,
59 ADMA_CPB_BYTES = 40,
60 ADMA_PRD_BYTES = LIBATA_MAX_PRD * 16,
61 ADMA_PKT_BYTES = ADMA_CPB_BYTES + ADMA_PRD_BYTES,
62
63 ADMA_DMA_BOUNDARY = 0xffffffff,
64
65 /* global register offsets */
66 ADMA_MODE_LOCK = 0x00c7,
67
68 /* per-channel register offsets */
69 ADMA_CONTROL = 0x0000, /* ADMA control */
70 ADMA_STATUS = 0x0002, /* ADMA status */
71 ADMA_CPB_COUNT = 0x0004, /* CPB count */
72 ADMA_CPB_CURRENT = 0x000c, /* current CPB address */
73 ADMA_CPB_NEXT = 0x000c, /* next CPB address */
74 ADMA_CPB_LOOKUP = 0x0010, /* CPB lookup table */
75 ADMA_FIFO_IN = 0x0014, /* input FIFO threshold */
76 ADMA_FIFO_OUT = 0x0016, /* output FIFO threshold */
77
78 /* ADMA_CONTROL register bits */
79 aNIEN = (1 << 8), /* irq mask: 1==masked */
80 aGO = (1 << 7), /* packet trigger ("Go!") */
81 aRSTADM = (1 << 5), /* ADMA logic reset */
82 aPIOMD4 = 0x0003, /* PIO mode 4 */
83
84 /* ADMA_STATUS register bits */
85 aPSD = (1 << 6),
86 aUIRQ = (1 << 4),
87 aPERR = (1 << 0),
88
89 /* CPB bits */
90 cDONE = (1 << 0),
91 cVLD = (1 << 0),
92 cDAT = (1 << 2),
93 cIEN = (1 << 3),
94
95 /* PRD bits */
96 pORD = (1 << 4),
97 pDIRO = (1 << 5),
98 pEND = (1 << 7),
99
100 /* ATA register flags */
101 rIGN = (1 << 5),
102 rEND = (1 << 7),
103
104 /* ATA register addresses */
105 ADMA_REGS_CONTROL = 0x0e,
106 ADMA_REGS_SECTOR_COUNT = 0x12,
107 ADMA_REGS_LBA_LOW = 0x13,
108 ADMA_REGS_LBA_MID = 0x14,
109 ADMA_REGS_LBA_HIGH = 0x15,
110 ADMA_REGS_DEVICE = 0x16,
111 ADMA_REGS_COMMAND = 0x17,
112
113 /* PCI device IDs */
114 board_1841_idx = 0, /* ADMA 2-port controller */
115};
116
117typedef enum { adma_state_idle, adma_state_pkt, adma_state_mmio } adma_state_t;
118
119struct adma_port_priv {
120 u8 *pkt;
121 dma_addr_t pkt_dma;
122 adma_state_t state;
123};
124
125static int adma_ata_init_one (struct pci_dev *pdev,
126 const struct pci_device_id *ent);
127static irqreturn_t adma_intr (int irq, void *dev_instance,
128 struct pt_regs *regs);
129static int adma_port_start(struct ata_port *ap);
130static void adma_host_stop(struct ata_host_set *host_set);
131static void adma_port_stop(struct ata_port *ap);
132static void adma_phy_reset(struct ata_port *ap);
133static void adma_qc_prep(struct ata_queued_cmd *qc);
134static unsigned int adma_qc_issue(struct ata_queued_cmd *qc);
135static int adma_check_atapi_dma(struct ata_queued_cmd *qc);
136static void adma_bmdma_stop(struct ata_queued_cmd *qc);
137static u8 adma_bmdma_status(struct ata_port *ap);
138static void adma_irq_clear(struct ata_port *ap);
139static void adma_eng_timeout(struct ata_port *ap);
140
141static struct scsi_host_template adma_ata_sht = {
142 .module = THIS_MODULE,
143 .name = DRV_NAME,
144 .ioctl = ata_scsi_ioctl,
145 .queuecommand = ata_scsi_queuecmd,
146 .can_queue = ATA_DEF_QUEUE,
147 .this_id = ATA_SHT_THIS_ID,
148 .sg_tablesize = LIBATA_MAX_PRD,
149 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
150 .emulated = ATA_SHT_EMULATED,
151 .use_clustering = ENABLE_CLUSTERING,
152 .proc_name = DRV_NAME,
153 .dma_boundary = ADMA_DMA_BOUNDARY,
154 .slave_configure = ata_scsi_slave_config,
155 .slave_destroy = ata_scsi_slave_destroy,
156 .bios_param = ata_std_bios_param,
157};
158
159static const struct ata_port_operations adma_ata_ops = {
160 .port_disable = ata_port_disable,
161 .tf_load = ata_tf_load,
162 .tf_read = ata_tf_read,
163 .check_status = ata_check_status,
164 .check_atapi_dma = adma_check_atapi_dma,
165 .exec_command = ata_exec_command,
166 .dev_select = ata_std_dev_select,
167 .phy_reset = adma_phy_reset,
168 .qc_prep = adma_qc_prep,
169 .qc_issue = adma_qc_issue,
170 .eng_timeout = adma_eng_timeout,
171 .data_xfer = ata_mmio_data_xfer,
172 .irq_handler = adma_intr,
173 .irq_clear = adma_irq_clear,
174 .port_start = adma_port_start,
175 .port_stop = adma_port_stop,
176 .host_stop = adma_host_stop,
177 .bmdma_stop = adma_bmdma_stop,
178 .bmdma_status = adma_bmdma_status,
179};
180
181static struct ata_port_info adma_port_info[] = {
182 /* board_1841_idx */
183 {
184 .sht = &adma_ata_sht,
185 .host_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST |
186 ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO |
187 ATA_FLAG_PIO_POLLING,
188 .pio_mask = 0x10, /* pio4 */
189 .udma_mask = 0x1f, /* udma0-4 */
190 .port_ops = &adma_ata_ops,
191 },
192};
193
194static const struct pci_device_id adma_ata_pci_tbl[] = {
195 { PCI_VENDOR_ID_PDC, 0x1841, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
196 board_1841_idx },
197
198 { } /* terminate list */
199};
200
201static struct pci_driver adma_ata_pci_driver = {
202 .name = DRV_NAME,
203 .id_table = adma_ata_pci_tbl,
204 .probe = adma_ata_init_one,
205 .remove = ata_pci_remove_one,
206};
207
208static int adma_check_atapi_dma(struct ata_queued_cmd *qc)
209{
210 return 1; /* ATAPI DMA not yet supported */
211}
212
213static void adma_bmdma_stop(struct ata_queued_cmd *qc)
214{
215 /* nothing */
216}
217
218static u8 adma_bmdma_status(struct ata_port *ap)
219{
220 return 0;
221}
222
223static void adma_irq_clear(struct ata_port *ap)
224{
225 /* nothing */
226}
227
228static void adma_reset_engine(void __iomem *chan)
229{
230 /* reset ADMA to idle state */
231 writew(aPIOMD4 | aNIEN | aRSTADM, chan + ADMA_CONTROL);
232 udelay(2);
233 writew(aPIOMD4, chan + ADMA_CONTROL);
234 udelay(2);
235}
236
237static void adma_reinit_engine(struct ata_port *ap)
238{
239 struct adma_port_priv *pp = ap->private_data;
240 void __iomem *mmio_base = ap->host_set->mmio_base;
241 void __iomem *chan = ADMA_REGS(mmio_base, ap->port_no);
242
243 /* mask/clear ATA interrupts */
244 writeb(ATA_NIEN, (void __iomem *)ap->ioaddr.ctl_addr);
245 ata_check_status(ap);
246
247 /* reset the ADMA engine */
248 adma_reset_engine(chan);
249
250 /* set in-FIFO threshold to 0x100 */
251 writew(0x100, chan + ADMA_FIFO_IN);
252
253 /* set CPB pointer */
254 writel((u32)pp->pkt_dma, chan + ADMA_CPB_NEXT);
255
256 /* set out-FIFO threshold to 0x100 */
257 writew(0x100, chan + ADMA_FIFO_OUT);
258
259 /* set CPB count */
260 writew(1, chan + ADMA_CPB_COUNT);
261
262 /* read/discard ADMA status */
263 readb(chan + ADMA_STATUS);
264}
265
266static inline void adma_enter_reg_mode(struct ata_port *ap)
267{
268 void __iomem *chan = ADMA_REGS(ap->host_set->mmio_base, ap->port_no);
269
270 writew(aPIOMD4, chan + ADMA_CONTROL);
271 readb(chan + ADMA_STATUS); /* flush */
272}
273
274static void adma_phy_reset(struct ata_port *ap)
275{
276 struct adma_port_priv *pp = ap->private_data;
277
278 pp->state = adma_state_idle;
279 adma_reinit_engine(ap);
280 ata_port_probe(ap);
281 ata_bus_reset(ap);
282}
283
284static void adma_eng_timeout(struct ata_port *ap)
285{
286 struct adma_port_priv *pp = ap->private_data;
287
288 if (pp->state != adma_state_idle) /* healthy paranoia */
289 pp->state = adma_state_mmio;
290 adma_reinit_engine(ap);
291 ata_eng_timeout(ap);
292}
293
294static int adma_fill_sg(struct ata_queued_cmd *qc)
295{
296 struct scatterlist *sg;
297 struct ata_port *ap = qc->ap;
298 struct adma_port_priv *pp = ap->private_data;
299 u8 *buf = pp->pkt;
300 int i = (2 + buf[3]) * 8;
301 u8 pFLAGS = pORD | ((qc->tf.flags & ATA_TFLAG_WRITE) ? pDIRO : 0);
302
303 ata_for_each_sg(sg, qc) {
304 u32 addr;
305 u32 len;
306
307 addr = (u32)sg_dma_address(sg);
308 *(__le32 *)(buf + i) = cpu_to_le32(addr);
309 i += 4;
310
311 len = sg_dma_len(sg) >> 3;
312 *(__le32 *)(buf + i) = cpu_to_le32(len);
313 i += 4;
314
315 if (ata_sg_is_last(sg, qc))
316 pFLAGS |= pEND;
317 buf[i++] = pFLAGS;
318 buf[i++] = qc->dev->dma_mode & 0xf;
319 buf[i++] = 0; /* pPKLW */
320 buf[i++] = 0; /* reserved */
321
322 *(__le32 *)(buf + i)
323 = (pFLAGS & pEND) ? 0 : cpu_to_le32(pp->pkt_dma + i + 4);
324 i += 4;
325
326 VPRINTK("PRD[%u] = (0x%lX, 0x%X)\n", i/4,
327 (unsigned long)addr, len);
328 }
329 return i;
330}
331
332static void adma_qc_prep(struct ata_queued_cmd *qc)
333{
334 struct adma_port_priv *pp = qc->ap->private_data;
335 u8 *buf = pp->pkt;
336 u32 pkt_dma = (u32)pp->pkt_dma;
337 int i = 0;
338
339 VPRINTK("ENTER\n");
340
341 adma_enter_reg_mode(qc->ap);
342 if (qc->tf.protocol != ATA_PROT_DMA) {
343 ata_qc_prep(qc);
344 return;
345 }
346
347 buf[i++] = 0; /* Response flags */
348 buf[i++] = 0; /* reserved */
349 buf[i++] = cVLD | cDAT | cIEN;
350 i++; /* cLEN, gets filled in below */
351
352 *(__le32 *)(buf+i) = cpu_to_le32(pkt_dma); /* cNCPB */
353 i += 4; /* cNCPB */
354 i += 4; /* cPRD, gets filled in below */
355
356 buf[i++] = 0; /* reserved */
357 buf[i++] = 0; /* reserved */
358 buf[i++] = 0; /* reserved */
359 buf[i++] = 0; /* reserved */
360
361 /* ATA registers; must be a multiple of 4 */
362 buf[i++] = qc->tf.device;
363 buf[i++] = ADMA_REGS_DEVICE;
364 if ((qc->tf.flags & ATA_TFLAG_LBA48)) {
365 buf[i++] = qc->tf.hob_nsect;
366 buf[i++] = ADMA_REGS_SECTOR_COUNT;
367 buf[i++] = qc->tf.hob_lbal;
368 buf[i++] = ADMA_REGS_LBA_LOW;
369 buf[i++] = qc->tf.hob_lbam;
370 buf[i++] = ADMA_REGS_LBA_MID;
371 buf[i++] = qc->tf.hob_lbah;
372 buf[i++] = ADMA_REGS_LBA_HIGH;
373 }
374 buf[i++] = qc->tf.nsect;
375 buf[i++] = ADMA_REGS_SECTOR_COUNT;
376 buf[i++] = qc->tf.lbal;
377 buf[i++] = ADMA_REGS_LBA_LOW;
378 buf[i++] = qc->tf.lbam;
379 buf[i++] = ADMA_REGS_LBA_MID;
380 buf[i++] = qc->tf.lbah;
381 buf[i++] = ADMA_REGS_LBA_HIGH;
382 buf[i++] = 0;
383 buf[i++] = ADMA_REGS_CONTROL;
384 buf[i++] = rIGN;
385 buf[i++] = 0;
386 buf[i++] = qc->tf.command;
387 buf[i++] = ADMA_REGS_COMMAND | rEND;
388
389 buf[3] = (i >> 3) - 2; /* cLEN */
390 *(__le32 *)(buf+8) = cpu_to_le32(pkt_dma + i); /* cPRD */
391
392 i = adma_fill_sg(qc);
393 wmb(); /* flush PRDs and pkt to memory */
394#if 0
395 /* dump out CPB + PRDs for debug */
396 {
397 int j, len = 0;
398 static char obuf[2048];
399 for (j = 0; j < i; ++j) {
400 len += sprintf(obuf+len, "%02x ", buf[j]);
401 if ((j & 7) == 7) {
402 printk("%s\n", obuf);
403 len = 0;
404 }
405 }
406 if (len)
407 printk("%s\n", obuf);
408 }
409#endif
410}
411
412static inline void adma_packet_start(struct ata_queued_cmd *qc)
413{
414 struct ata_port *ap = qc->ap;
415 void __iomem *chan = ADMA_REGS(ap->host_set->mmio_base, ap->port_no);
416
417 VPRINTK("ENTER, ap %p\n", ap);
418
419 /* fire up the ADMA engine */
420 writew(aPIOMD4 | aGO, chan + ADMA_CONTROL);
421}
422
423static unsigned int adma_qc_issue(struct ata_queued_cmd *qc)
424{
425 struct adma_port_priv *pp = qc->ap->private_data;
426
427 switch (qc->tf.protocol) {
428 case ATA_PROT_DMA:
429 pp->state = adma_state_pkt;
430 adma_packet_start(qc);
431 return 0;
432
433 case ATA_PROT_ATAPI_DMA:
434 BUG();
435 break;
436
437 default:
438 break;
439 }
440
441 pp->state = adma_state_mmio;
442 return ata_qc_issue_prot(qc);
443}
444
445static inline unsigned int adma_intr_pkt(struct ata_host_set *host_set)
446{
447 unsigned int handled = 0, port_no;
448 u8 __iomem *mmio_base = host_set->mmio_base;
449
450 for (port_no = 0; port_no < host_set->n_ports; ++port_no) {
451 struct ata_port *ap = host_set->ports[port_no];
452 struct adma_port_priv *pp;
453 struct ata_queued_cmd *qc;
454 void __iomem *chan = ADMA_REGS(mmio_base, port_no);
455 u8 status = readb(chan + ADMA_STATUS);
456
457 if (status == 0)
458 continue;
459 handled = 1;
460 adma_enter_reg_mode(ap);
461 if (ap->flags & ATA_FLAG_DISABLED)
462 continue;
463 pp = ap->private_data;
464 if (!pp || pp->state != adma_state_pkt)
465 continue;
466 qc = ata_qc_from_tag(ap, ap->active_tag);
467 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
468 if ((status & (aPERR | aPSD | aUIRQ)))
469 qc->err_mask |= AC_ERR_OTHER;
470 else if (pp->pkt[0] != cDONE)
471 qc->err_mask |= AC_ERR_OTHER;
472
473 ata_qc_complete(qc);
474 }
475 }
476 return handled;
477}
478
479static inline unsigned int adma_intr_mmio(struct ata_host_set *host_set)
480{
481 unsigned int handled = 0, port_no;
482
483 for (port_no = 0; port_no < host_set->n_ports; ++port_no) {
484 struct ata_port *ap;
485 ap = host_set->ports[port_no];
486 if (ap && (!(ap->flags & ATA_FLAG_DISABLED))) {
487 struct ata_queued_cmd *qc;
488 struct adma_port_priv *pp = ap->private_data;
489 if (!pp || pp->state != adma_state_mmio)
490 continue;
491 qc = ata_qc_from_tag(ap, ap->active_tag);
492 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
493
494 /* check main status, clearing INTRQ */
495 u8 status = ata_check_status(ap);
496 if ((status & ATA_BUSY))
497 continue;
498 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
499 ap->id, qc->tf.protocol, status);
500
501 /* complete taskfile transaction */
502 pp->state = adma_state_idle;
503 qc->err_mask |= ac_err_mask(status);
504 ata_qc_complete(qc);
505 handled = 1;
506 }
507 }
508 }
509 return handled;
510}
511
512static irqreturn_t adma_intr(int irq, void *dev_instance, struct pt_regs *regs)
513{
514 struct ata_host_set *host_set = dev_instance;
515 unsigned int handled = 0;
516
517 VPRINTK("ENTER\n");
518
519 spin_lock(&host_set->lock);
520 handled = adma_intr_pkt(host_set) | adma_intr_mmio(host_set);
521 spin_unlock(&host_set->lock);
522
523 VPRINTK("EXIT\n");
524
525 return IRQ_RETVAL(handled);
526}
527
528static void adma_ata_setup_port(struct ata_ioports *port, unsigned long base)
529{
530 port->cmd_addr =
531 port->data_addr = base + 0x000;
532 port->error_addr =
533 port->feature_addr = base + 0x004;
534 port->nsect_addr = base + 0x008;
535 port->lbal_addr = base + 0x00c;
536 port->lbam_addr = base + 0x010;
537 port->lbah_addr = base + 0x014;
538 port->device_addr = base + 0x018;
539 port->status_addr =
540 port->command_addr = base + 0x01c;
541 port->altstatus_addr =
542 port->ctl_addr = base + 0x038;
543}
544
545static int adma_port_start(struct ata_port *ap)
546{
547 struct device *dev = ap->host_set->dev;
548 struct adma_port_priv *pp;
549 int rc;
550
551 rc = ata_port_start(ap);
552 if (rc)
553 return rc;
554 adma_enter_reg_mode(ap);
555 rc = -ENOMEM;
556 pp = kcalloc(1, sizeof(*pp), GFP_KERNEL);
557 if (!pp)
558 goto err_out;
559 pp->pkt = dma_alloc_coherent(dev, ADMA_PKT_BYTES, &pp->pkt_dma,
560 GFP_KERNEL);
561 if (!pp->pkt)
562 goto err_out_kfree;
563 /* paranoia? */
564 if ((pp->pkt_dma & 7) != 0) {
565 printk("bad alignment for pp->pkt_dma: %08x\n",
566 (u32)pp->pkt_dma);
567 dma_free_coherent(dev, ADMA_PKT_BYTES,
568 pp->pkt, pp->pkt_dma);
569 goto err_out_kfree;
570 }
571 memset(pp->pkt, 0, ADMA_PKT_BYTES);
572 ap->private_data = pp;
573 adma_reinit_engine(ap);
574 return 0;
575
576err_out_kfree:
577 kfree(pp);
578err_out:
579 ata_port_stop(ap);
580 return rc;
581}
582
583static void adma_port_stop(struct ata_port *ap)
584{
585 struct device *dev = ap->host_set->dev;
586 struct adma_port_priv *pp = ap->private_data;
587
588 adma_reset_engine(ADMA_REGS(ap->host_set->mmio_base, ap->port_no));
589 if (pp != NULL) {
590 ap->private_data = NULL;
591 if (pp->pkt != NULL)
592 dma_free_coherent(dev, ADMA_PKT_BYTES,
593 pp->pkt, pp->pkt_dma);
594 kfree(pp);
595 }
596 ata_port_stop(ap);
597}
598
599static void adma_host_stop(struct ata_host_set *host_set)
600{
601 unsigned int port_no;
602
603 for (port_no = 0; port_no < ADMA_PORTS; ++port_no)
604 adma_reset_engine(ADMA_REGS(host_set->mmio_base, port_no));
605
606 ata_pci_host_stop(host_set);
607}
608
609static void adma_host_init(unsigned int chip_id,
610 struct ata_probe_ent *probe_ent)
611{
612 unsigned int port_no;
613 void __iomem *mmio_base = probe_ent->mmio_base;
614
615 /* enable/lock aGO operation */
616 writeb(7, mmio_base + ADMA_MODE_LOCK);
617
618 /* reset the ADMA logic */
619 for (port_no = 0; port_no < ADMA_PORTS; ++port_no)
620 adma_reset_engine(ADMA_REGS(mmio_base, port_no));
621}
622
623static int adma_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base)
624{
625 int rc;
626
627 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
628 if (rc) {
629 dev_printk(KERN_ERR, &pdev->dev,
630 "32-bit DMA enable failed\n");
631 return rc;
632 }
633 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
634 if (rc) {
635 dev_printk(KERN_ERR, &pdev->dev,
636 "32-bit consistent DMA enable failed\n");
637 return rc;
638 }
639 return 0;
640}
641
642static int adma_ata_init_one(struct pci_dev *pdev,
643 const struct pci_device_id *ent)
644{
645 static int printed_version;
646 struct ata_probe_ent *probe_ent = NULL;
647 void __iomem *mmio_base;
648 unsigned int board_idx = (unsigned int) ent->driver_data;
649 int rc, port_no;
650
651 if (!printed_version++)
652 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
653
654 rc = pci_enable_device(pdev);
655 if (rc)
656 return rc;
657
658 rc = pci_request_regions(pdev, DRV_NAME);
659 if (rc)
660 goto err_out;
661
662 if ((pci_resource_flags(pdev, 4) & IORESOURCE_MEM) == 0) {
663 rc = -ENODEV;
664 goto err_out_regions;
665 }
666
667 mmio_base = pci_iomap(pdev, 4, 0);
668 if (mmio_base == NULL) {
669 rc = -ENOMEM;
670 goto err_out_regions;
671 }
672
673 rc = adma_set_dma_masks(pdev, mmio_base);
674 if (rc)
675 goto err_out_iounmap;
676
677 probe_ent = kcalloc(1, sizeof(*probe_ent), GFP_KERNEL);
678 if (probe_ent == NULL) {
679 rc = -ENOMEM;
680 goto err_out_iounmap;
681 }
682
683 probe_ent->dev = pci_dev_to_dev(pdev);
684 INIT_LIST_HEAD(&probe_ent->node);
685
686 probe_ent->sht = adma_port_info[board_idx].sht;
687 probe_ent->host_flags = adma_port_info[board_idx].host_flags;
688 probe_ent->pio_mask = adma_port_info[board_idx].pio_mask;
689 probe_ent->mwdma_mask = adma_port_info[board_idx].mwdma_mask;
690 probe_ent->udma_mask = adma_port_info[board_idx].udma_mask;
691 probe_ent->port_ops = adma_port_info[board_idx].port_ops;
692
693 probe_ent->irq = pdev->irq;
694 probe_ent->irq_flags = IRQF_SHARED;
695 probe_ent->mmio_base = mmio_base;
696 probe_ent->n_ports = ADMA_PORTS;
697
698 for (port_no = 0; port_no < probe_ent->n_ports; ++port_no) {
699 adma_ata_setup_port(&probe_ent->port[port_no],
700 ADMA_ATA_REGS((unsigned long)mmio_base, port_no));
701 }
702
703 pci_set_master(pdev);
704
705 /* initialize adapter */
706 adma_host_init(board_idx, probe_ent);
707
708 rc = ata_device_add(probe_ent);
709 kfree(probe_ent);
710 if (rc != ADMA_PORTS)
711 goto err_out_iounmap;
712 return 0;
713
714err_out_iounmap:
715 pci_iounmap(pdev, mmio_base);
716err_out_regions:
717 pci_release_regions(pdev);
718err_out:
719 pci_disable_device(pdev);
720 return rc;
721}
722
723static int __init adma_ata_init(void)
724{
725 return pci_module_init(&adma_ata_pci_driver);
726}
727
728static void __exit adma_ata_exit(void)
729{
730 pci_unregister_driver(&adma_ata_pci_driver);
731}
732
733MODULE_AUTHOR("Mark Lord");
734MODULE_DESCRIPTION("Pacific Digital Corporation ADMA low-level driver");
735MODULE_LICENSE("GPL");
736MODULE_DEVICE_TABLE(pci, adma_ata_pci_tbl);
737MODULE_VERSION(DRV_VERSION);
738
739module_init(adma_ata_init);
740module_exit(adma_ata_exit);
diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c
deleted file mode 100644
index fa38a413d16b..000000000000
--- a/drivers/scsi/sata_mv.c
+++ /dev/null
@@ -1,2467 +0,0 @@
1/*
2 * sata_mv.c - Marvell SATA support
3 *
4 * Copyright 2005: EMC Corporation, all rights reserved.
5 * Copyright 2005 Red Hat, Inc. All rights reserved.
6 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/pci.h>
27#include <linux/init.h>
28#include <linux/blkdev.h>
29#include <linux/delay.h>
30#include <linux/interrupt.h>
31#include <linux/sched.h>
32#include <linux/dma-mapping.h>
33#include <linux/device.h>
34#include <scsi/scsi_host.h>
35#include <scsi/scsi_cmnd.h>
36#include <linux/libata.h>
37#include <asm/io.h>
38
39#define DRV_NAME "sata_mv"
40#define DRV_VERSION "0.7"
41
42enum {
43 /* BAR's are enumerated in terms of pci_resource_start() terms */
44 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
45 MV_IO_BAR = 2, /* offset 0x18: IO space */
46 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
47
48 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
49 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
50
51 MV_PCI_REG_BASE = 0,
52 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
53 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
54 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
55 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
56 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
57 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
58
59 MV_SATAHC0_REG_BASE = 0x20000,
60 MV_FLASH_CTL = 0x1046c,
61 MV_GPIO_PORT_CTL = 0x104f0,
62 MV_RESET_CFG = 0x180d8,
63
64 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
65 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
66 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
67 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
68
69 MV_USE_Q_DEPTH = ATA_DEF_QUEUE,
70
71 MV_MAX_Q_DEPTH = 32,
72 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
73
74 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
75 * CRPB needs alignment on a 256B boundary. Size == 256B
76 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
77 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
78 */
79 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
80 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
81 MV_MAX_SG_CT = 176,
82 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
83 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
84
85 MV_PORTS_PER_HC = 4,
86 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
87 MV_PORT_HC_SHIFT = 2,
88 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
89 MV_PORT_MASK = 3,
90
91 /* Host Flags */
92 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
93 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
94 MV_COMMON_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
95 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
96 ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING),
97 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
98
99 CRQB_FLAG_READ = (1 << 0),
100 CRQB_TAG_SHIFT = 1,
101 CRQB_CMD_ADDR_SHIFT = 8,
102 CRQB_CMD_CS = (0x2 << 11),
103 CRQB_CMD_LAST = (1 << 15),
104
105 CRPB_FLAG_STATUS_SHIFT = 8,
106
107 EPRD_FLAG_END_OF_TBL = (1 << 31),
108
109 /* PCI interface registers */
110
111 PCI_COMMAND_OFS = 0xc00,
112
113 PCI_MAIN_CMD_STS_OFS = 0xd30,
114 STOP_PCI_MASTER = (1 << 2),
115 PCI_MASTER_EMPTY = (1 << 3),
116 GLOB_SFT_RST = (1 << 4),
117
118 MV_PCI_MODE = 0xd00,
119 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
120 MV_PCI_DISC_TIMER = 0xd04,
121 MV_PCI_MSI_TRIGGER = 0xc38,
122 MV_PCI_SERR_MASK = 0xc28,
123 MV_PCI_XBAR_TMOUT = 0x1d04,
124 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
125 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
126 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
127 MV_PCI_ERR_COMMAND = 0x1d50,
128
129 PCI_IRQ_CAUSE_OFS = 0x1d58,
130 PCI_IRQ_MASK_OFS = 0x1d5c,
131 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
132
133 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
134 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
135 PORT0_ERR = (1 << 0), /* shift by port # */
136 PORT0_DONE = (1 << 1), /* shift by port # */
137 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
138 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
139 PCI_ERR = (1 << 18),
140 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
141 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
142 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
143 GPIO_INT = (1 << 22),
144 SELF_INT = (1 << 23),
145 TWSI_INT = (1 << 24),
146 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
147 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
148 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
149 HC_MAIN_RSVD),
150
151 /* SATAHC registers */
152 HC_CFG_OFS = 0,
153
154 HC_IRQ_CAUSE_OFS = 0x14,
155 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
156 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
157 DEV_IRQ = (1 << 8), /* shift by port # */
158
159 /* Shadow block registers */
160 SHD_BLK_OFS = 0x100,
161 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
162
163 /* SATA registers */
164 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
165 SATA_ACTIVE_OFS = 0x350,
166 PHY_MODE3 = 0x310,
167 PHY_MODE4 = 0x314,
168 PHY_MODE2 = 0x330,
169 MV5_PHY_MODE = 0x74,
170 MV5_LT_MODE = 0x30,
171 MV5_PHY_CTL = 0x0C,
172 SATA_INTERFACE_CTL = 0x050,
173
174 MV_M2_PREAMP_MASK = 0x7e0,
175
176 /* Port registers */
177 EDMA_CFG_OFS = 0,
178 EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */
179 EDMA_CFG_NCQ = (1 << 5),
180 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
181 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
182 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
183
184 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
185 EDMA_ERR_IRQ_MASK_OFS = 0xc,
186 EDMA_ERR_D_PAR = (1 << 0),
187 EDMA_ERR_PRD_PAR = (1 << 1),
188 EDMA_ERR_DEV = (1 << 2),
189 EDMA_ERR_DEV_DCON = (1 << 3),
190 EDMA_ERR_DEV_CON = (1 << 4),
191 EDMA_ERR_SERR = (1 << 5),
192 EDMA_ERR_SELF_DIS = (1 << 7),
193 EDMA_ERR_BIST_ASYNC = (1 << 8),
194 EDMA_ERR_CRBQ_PAR = (1 << 9),
195 EDMA_ERR_CRPB_PAR = (1 << 10),
196 EDMA_ERR_INTRL_PAR = (1 << 11),
197 EDMA_ERR_IORDY = (1 << 12),
198 EDMA_ERR_LNK_CTRL_RX = (0xf << 13),
199 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15),
200 EDMA_ERR_LNK_DATA_RX = (0xf << 17),
201 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21),
202 EDMA_ERR_LNK_DATA_TX = (0x1f << 26),
203 EDMA_ERR_TRANS_PROTO = (1 << 31),
204 EDMA_ERR_FATAL = (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
205 EDMA_ERR_DEV_DCON | EDMA_ERR_CRBQ_PAR |
206 EDMA_ERR_CRPB_PAR | EDMA_ERR_INTRL_PAR |
207 EDMA_ERR_IORDY | EDMA_ERR_LNK_CTRL_RX_2 |
208 EDMA_ERR_LNK_DATA_RX |
209 EDMA_ERR_LNK_DATA_TX |
210 EDMA_ERR_TRANS_PROTO),
211
212 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
213 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
214
215 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
216 EDMA_REQ_Q_PTR_SHIFT = 5,
217
218 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
219 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
220 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
221 EDMA_RSP_Q_PTR_SHIFT = 3,
222
223 EDMA_CMD_OFS = 0x28,
224 EDMA_EN = (1 << 0),
225 EDMA_DS = (1 << 1),
226 ATA_RST = (1 << 2),
227
228 EDMA_IORDY_TMOUT = 0x34,
229 EDMA_ARB_CFG = 0x38,
230
231 /* Host private flags (hp_flags) */
232 MV_HP_FLAG_MSI = (1 << 0),
233 MV_HP_ERRATA_50XXB0 = (1 << 1),
234 MV_HP_ERRATA_50XXB2 = (1 << 2),
235 MV_HP_ERRATA_60X1B2 = (1 << 3),
236 MV_HP_ERRATA_60X1C0 = (1 << 4),
237 MV_HP_ERRATA_XX42A0 = (1 << 5),
238 MV_HP_50XX = (1 << 6),
239 MV_HP_GEN_IIE = (1 << 7),
240
241 /* Port private flags (pp_flags) */
242 MV_PP_FLAG_EDMA_EN = (1 << 0),
243 MV_PP_FLAG_EDMA_DS_ACT = (1 << 1),
244};
245
246#define IS_50XX(hpriv) ((hpriv)->hp_flags & MV_HP_50XX)
247#define IS_60XX(hpriv) (((hpriv)->hp_flags & MV_HP_50XX) == 0)
248#define IS_GEN_I(hpriv) IS_50XX(hpriv)
249#define IS_GEN_II(hpriv) IS_60XX(hpriv)
250#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
251
252enum {
253 /* Our DMA boundary is determined by an ePRD being unable to handle
254 * anything larger than 64KB
255 */
256 MV_DMA_BOUNDARY = 0xffffU,
257
258 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
259
260 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
261};
262
263enum chip_type {
264 chip_504x,
265 chip_508x,
266 chip_5080,
267 chip_604x,
268 chip_608x,
269 chip_6042,
270 chip_7042,
271};
272
273/* Command ReQuest Block: 32B */
274struct mv_crqb {
275 __le32 sg_addr;
276 __le32 sg_addr_hi;
277 __le16 ctrl_flags;
278 __le16 ata_cmd[11];
279};
280
281struct mv_crqb_iie {
282 __le32 addr;
283 __le32 addr_hi;
284 __le32 flags;
285 __le32 len;
286 __le32 ata_cmd[4];
287};
288
289/* Command ResPonse Block: 8B */
290struct mv_crpb {
291 __le16 id;
292 __le16 flags;
293 __le32 tmstmp;
294};
295
296/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
297struct mv_sg {
298 __le32 addr;
299 __le32 flags_size;
300 __le32 addr_hi;
301 __le32 reserved;
302};
303
304struct mv_port_priv {
305 struct mv_crqb *crqb;
306 dma_addr_t crqb_dma;
307 struct mv_crpb *crpb;
308 dma_addr_t crpb_dma;
309 struct mv_sg *sg_tbl;
310 dma_addr_t sg_tbl_dma;
311 u32 pp_flags;
312};
313
314struct mv_port_signal {
315 u32 amps;
316 u32 pre;
317};
318
319struct mv_host_priv;
320struct mv_hw_ops {
321 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
322 unsigned int port);
323 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
324 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
325 void __iomem *mmio);
326 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
327 unsigned int n_hc);
328 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
329 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
330};
331
332struct mv_host_priv {
333 u32 hp_flags;
334 struct mv_port_signal signal[8];
335 const struct mv_hw_ops *ops;
336};
337
338static void mv_irq_clear(struct ata_port *ap);
339static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
340static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
341static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
342static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
343static void mv_phy_reset(struct ata_port *ap);
344static void __mv_phy_reset(struct ata_port *ap, int can_sleep);
345static void mv_host_stop(struct ata_host_set *host_set);
346static int mv_port_start(struct ata_port *ap);
347static void mv_port_stop(struct ata_port *ap);
348static void mv_qc_prep(struct ata_queued_cmd *qc);
349static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
350static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
351static irqreturn_t mv_interrupt(int irq, void *dev_instance,
352 struct pt_regs *regs);
353static void mv_eng_timeout(struct ata_port *ap);
354static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
355
356static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
357 unsigned int port);
358static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
359static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
360 void __iomem *mmio);
361static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
362 unsigned int n_hc);
363static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
364static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
365
366static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
367 unsigned int port);
368static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
369static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
370 void __iomem *mmio);
371static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
372 unsigned int n_hc);
373static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
374static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
375static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
376 unsigned int port_no);
377static void mv_stop_and_reset(struct ata_port *ap);
378
379static struct scsi_host_template mv_sht = {
380 .module = THIS_MODULE,
381 .name = DRV_NAME,
382 .ioctl = ata_scsi_ioctl,
383 .queuecommand = ata_scsi_queuecmd,
384 .can_queue = MV_USE_Q_DEPTH,
385 .this_id = ATA_SHT_THIS_ID,
386 .sg_tablesize = MV_MAX_SG_CT / 2,
387 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
388 .emulated = ATA_SHT_EMULATED,
389 .use_clustering = ATA_SHT_USE_CLUSTERING,
390 .proc_name = DRV_NAME,
391 .dma_boundary = MV_DMA_BOUNDARY,
392 .slave_configure = ata_scsi_slave_config,
393 .slave_destroy = ata_scsi_slave_destroy,
394 .bios_param = ata_std_bios_param,
395};
396
397static const struct ata_port_operations mv5_ops = {
398 .port_disable = ata_port_disable,
399
400 .tf_load = ata_tf_load,
401 .tf_read = ata_tf_read,
402 .check_status = ata_check_status,
403 .exec_command = ata_exec_command,
404 .dev_select = ata_std_dev_select,
405
406 .phy_reset = mv_phy_reset,
407
408 .qc_prep = mv_qc_prep,
409 .qc_issue = mv_qc_issue,
410 .data_xfer = ata_mmio_data_xfer,
411
412 .eng_timeout = mv_eng_timeout,
413
414 .irq_handler = mv_interrupt,
415 .irq_clear = mv_irq_clear,
416
417 .scr_read = mv5_scr_read,
418 .scr_write = mv5_scr_write,
419
420 .port_start = mv_port_start,
421 .port_stop = mv_port_stop,
422 .host_stop = mv_host_stop,
423};
424
425static const struct ata_port_operations mv6_ops = {
426 .port_disable = ata_port_disable,
427
428 .tf_load = ata_tf_load,
429 .tf_read = ata_tf_read,
430 .check_status = ata_check_status,
431 .exec_command = ata_exec_command,
432 .dev_select = ata_std_dev_select,
433
434 .phy_reset = mv_phy_reset,
435
436 .qc_prep = mv_qc_prep,
437 .qc_issue = mv_qc_issue,
438 .data_xfer = ata_mmio_data_xfer,
439
440 .eng_timeout = mv_eng_timeout,
441
442 .irq_handler = mv_interrupt,
443 .irq_clear = mv_irq_clear,
444
445 .scr_read = mv_scr_read,
446 .scr_write = mv_scr_write,
447
448 .port_start = mv_port_start,
449 .port_stop = mv_port_stop,
450 .host_stop = mv_host_stop,
451};
452
453static const struct ata_port_operations mv_iie_ops = {
454 .port_disable = ata_port_disable,
455
456 .tf_load = ata_tf_load,
457 .tf_read = ata_tf_read,
458 .check_status = ata_check_status,
459 .exec_command = ata_exec_command,
460 .dev_select = ata_std_dev_select,
461
462 .phy_reset = mv_phy_reset,
463
464 .qc_prep = mv_qc_prep_iie,
465 .qc_issue = mv_qc_issue,
466
467 .eng_timeout = mv_eng_timeout,
468
469 .irq_handler = mv_interrupt,
470 .irq_clear = mv_irq_clear,
471
472 .scr_read = mv_scr_read,
473 .scr_write = mv_scr_write,
474
475 .port_start = mv_port_start,
476 .port_stop = mv_port_stop,
477 .host_stop = mv_host_stop,
478};
479
480static const struct ata_port_info mv_port_info[] = {
481 { /* chip_504x */
482 .sht = &mv_sht,
483 .host_flags = MV_COMMON_FLAGS,
484 .pio_mask = 0x1f, /* pio0-4 */
485 .udma_mask = 0x7f, /* udma0-6 */
486 .port_ops = &mv5_ops,
487 },
488 { /* chip_508x */
489 .sht = &mv_sht,
490 .host_flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
491 .pio_mask = 0x1f, /* pio0-4 */
492 .udma_mask = 0x7f, /* udma0-6 */
493 .port_ops = &mv5_ops,
494 },
495 { /* chip_5080 */
496 .sht = &mv_sht,
497 .host_flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
498 .pio_mask = 0x1f, /* pio0-4 */
499 .udma_mask = 0x7f, /* udma0-6 */
500 .port_ops = &mv5_ops,
501 },
502 { /* chip_604x */
503 .sht = &mv_sht,
504 .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
505 .pio_mask = 0x1f, /* pio0-4 */
506 .udma_mask = 0x7f, /* udma0-6 */
507 .port_ops = &mv6_ops,
508 },
509 { /* chip_608x */
510 .sht = &mv_sht,
511 .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS |
512 MV_FLAG_DUAL_HC),
513 .pio_mask = 0x1f, /* pio0-4 */
514 .udma_mask = 0x7f, /* udma0-6 */
515 .port_ops = &mv6_ops,
516 },
517 { /* chip_6042 */
518 .sht = &mv_sht,
519 .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
520 .pio_mask = 0x1f, /* pio0-4 */
521 .udma_mask = 0x7f, /* udma0-6 */
522 .port_ops = &mv_iie_ops,
523 },
524 { /* chip_7042 */
525 .sht = &mv_sht,
526 .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS |
527 MV_FLAG_DUAL_HC),
528 .pio_mask = 0x1f, /* pio0-4 */
529 .udma_mask = 0x7f, /* udma0-6 */
530 .port_ops = &mv_iie_ops,
531 },
532};
533
534static const struct pci_device_id mv_pci_tbl[] = {
535 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5040), 0, 0, chip_504x},
536 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5041), 0, 0, chip_504x},
537 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5080), 0, 0, chip_5080},
538 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5081), 0, 0, chip_508x},
539
540 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6040), 0, 0, chip_604x},
541 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6041), 0, 0, chip_604x},
542 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6042), 0, 0, chip_6042},
543 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6080), 0, 0, chip_608x},
544 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6081), 0, 0, chip_608x},
545
546 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x0241), 0, 0, chip_604x},
547 {} /* terminate list */
548};
549
550static struct pci_driver mv_pci_driver = {
551 .name = DRV_NAME,
552 .id_table = mv_pci_tbl,
553 .probe = mv_init_one,
554 .remove = ata_pci_remove_one,
555};
556
557static const struct mv_hw_ops mv5xxx_ops = {
558 .phy_errata = mv5_phy_errata,
559 .enable_leds = mv5_enable_leds,
560 .read_preamp = mv5_read_preamp,
561 .reset_hc = mv5_reset_hc,
562 .reset_flash = mv5_reset_flash,
563 .reset_bus = mv5_reset_bus,
564};
565
566static const struct mv_hw_ops mv6xxx_ops = {
567 .phy_errata = mv6_phy_errata,
568 .enable_leds = mv6_enable_leds,
569 .read_preamp = mv6_read_preamp,
570 .reset_hc = mv6_reset_hc,
571 .reset_flash = mv6_reset_flash,
572 .reset_bus = mv_reset_pci_bus,
573};
574
575/*
576 * module options
577 */
578static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
579
580
581/*
582 * Functions
583 */
584
585static inline void writelfl(unsigned long data, void __iomem *addr)
586{
587 writel(data, addr);
588 (void) readl(addr); /* flush to avoid PCI posted write */
589}
590
591static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
592{
593 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
594}
595
596static inline unsigned int mv_hc_from_port(unsigned int port)
597{
598 return port >> MV_PORT_HC_SHIFT;
599}
600
601static inline unsigned int mv_hardport_from_port(unsigned int port)
602{
603 return port & MV_PORT_MASK;
604}
605
606static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
607 unsigned int port)
608{
609 return mv_hc_base(base, mv_hc_from_port(port));
610}
611
612static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
613{
614 return mv_hc_base_from_port(base, port) +
615 MV_SATAHC_ARBTR_REG_SZ +
616 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
617}
618
619static inline void __iomem *mv_ap_base(struct ata_port *ap)
620{
621 return mv_port_base(ap->host_set->mmio_base, ap->port_no);
622}
623
624static inline int mv_get_hc_count(unsigned long host_flags)
625{
626 return ((host_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
627}
628
629static void mv_irq_clear(struct ata_port *ap)
630{
631}
632
633/**
634 * mv_start_dma - Enable eDMA engine
635 * @base: port base address
636 * @pp: port private data
637 *
638 * Verify the local cache of the eDMA state is accurate with a
639 * WARN_ON.
640 *
641 * LOCKING:
642 * Inherited from caller.
643 */
644static void mv_start_dma(void __iomem *base, struct mv_port_priv *pp)
645{
646 if (!(MV_PP_FLAG_EDMA_EN & pp->pp_flags)) {
647 writelfl(EDMA_EN, base + EDMA_CMD_OFS);
648 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
649 }
650 WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
651}
652
653/**
654 * mv_stop_dma - Disable eDMA engine
655 * @ap: ATA channel to manipulate
656 *
657 * Verify the local cache of the eDMA state is accurate with a
658 * WARN_ON.
659 *
660 * LOCKING:
661 * Inherited from caller.
662 */
663static void mv_stop_dma(struct ata_port *ap)
664{
665 void __iomem *port_mmio = mv_ap_base(ap);
666 struct mv_port_priv *pp = ap->private_data;
667 u32 reg;
668 int i;
669
670 if (MV_PP_FLAG_EDMA_EN & pp->pp_flags) {
671 /* Disable EDMA if active. The disable bit auto clears.
672 */
673 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
674 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
675 } else {
676 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
677 }
678
679 /* now properly wait for the eDMA to stop */
680 for (i = 1000; i > 0; i--) {
681 reg = readl(port_mmio + EDMA_CMD_OFS);
682 if (!(EDMA_EN & reg)) {
683 break;
684 }
685 udelay(100);
686 }
687
688 if (EDMA_EN & reg) {
689 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
690 /* FIXME: Consider doing a reset here to recover */
691 }
692}
693
694#ifdef ATA_DEBUG
695static void mv_dump_mem(void __iomem *start, unsigned bytes)
696{
697 int b, w;
698 for (b = 0; b < bytes; ) {
699 DPRINTK("%p: ", start + b);
700 for (w = 0; b < bytes && w < 4; w++) {
701 printk("%08x ",readl(start + b));
702 b += sizeof(u32);
703 }
704 printk("\n");
705 }
706}
707#endif
708
709static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
710{
711#ifdef ATA_DEBUG
712 int b, w;
713 u32 dw;
714 for (b = 0; b < bytes; ) {
715 DPRINTK("%02x: ", b);
716 for (w = 0; b < bytes && w < 4; w++) {
717 (void) pci_read_config_dword(pdev,b,&dw);
718 printk("%08x ",dw);
719 b += sizeof(u32);
720 }
721 printk("\n");
722 }
723#endif
724}
725static void mv_dump_all_regs(void __iomem *mmio_base, int port,
726 struct pci_dev *pdev)
727{
728#ifdef ATA_DEBUG
729 void __iomem *hc_base = mv_hc_base(mmio_base,
730 port >> MV_PORT_HC_SHIFT);
731 void __iomem *port_base;
732 int start_port, num_ports, p, start_hc, num_hcs, hc;
733
734 if (0 > port) {
735 start_hc = start_port = 0;
736 num_ports = 8; /* shld be benign for 4 port devs */
737 num_hcs = 2;
738 } else {
739 start_hc = port >> MV_PORT_HC_SHIFT;
740 start_port = port;
741 num_ports = num_hcs = 1;
742 }
743 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
744 num_ports > 1 ? num_ports - 1 : start_port);
745
746 if (NULL != pdev) {
747 DPRINTK("PCI config space regs:\n");
748 mv_dump_pci_cfg(pdev, 0x68);
749 }
750 DPRINTK("PCI regs:\n");
751 mv_dump_mem(mmio_base+0xc00, 0x3c);
752 mv_dump_mem(mmio_base+0xd00, 0x34);
753 mv_dump_mem(mmio_base+0xf00, 0x4);
754 mv_dump_mem(mmio_base+0x1d00, 0x6c);
755 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
756 hc_base = mv_hc_base(mmio_base, hc);
757 DPRINTK("HC regs (HC %i):\n", hc);
758 mv_dump_mem(hc_base, 0x1c);
759 }
760 for (p = start_port; p < start_port + num_ports; p++) {
761 port_base = mv_port_base(mmio_base, p);
762 DPRINTK("EDMA regs (port %i):\n",p);
763 mv_dump_mem(port_base, 0x54);
764 DPRINTK("SATA regs (port %i):\n",p);
765 mv_dump_mem(port_base+0x300, 0x60);
766 }
767#endif
768}
769
770static unsigned int mv_scr_offset(unsigned int sc_reg_in)
771{
772 unsigned int ofs;
773
774 switch (sc_reg_in) {
775 case SCR_STATUS:
776 case SCR_CONTROL:
777 case SCR_ERROR:
778 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
779 break;
780 case SCR_ACTIVE:
781 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
782 break;
783 default:
784 ofs = 0xffffffffU;
785 break;
786 }
787 return ofs;
788}
789
790static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
791{
792 unsigned int ofs = mv_scr_offset(sc_reg_in);
793
794 if (0xffffffffU != ofs) {
795 return readl(mv_ap_base(ap) + ofs);
796 } else {
797 return (u32) ofs;
798 }
799}
800
801static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
802{
803 unsigned int ofs = mv_scr_offset(sc_reg_in);
804
805 if (0xffffffffU != ofs) {
806 writelfl(val, mv_ap_base(ap) + ofs);
807 }
808}
809
810/**
811 * mv_host_stop - Host specific cleanup/stop routine.
812 * @host_set: host data structure
813 *
814 * Disable ints, cleanup host memory, call general purpose
815 * host_stop.
816 *
817 * LOCKING:
818 * Inherited from caller.
819 */
820static void mv_host_stop(struct ata_host_set *host_set)
821{
822 struct mv_host_priv *hpriv = host_set->private_data;
823 struct pci_dev *pdev = to_pci_dev(host_set->dev);
824
825 if (hpriv->hp_flags & MV_HP_FLAG_MSI) {
826 pci_disable_msi(pdev);
827 } else {
828 pci_intx(pdev, 0);
829 }
830 kfree(hpriv);
831 ata_host_stop(host_set);
832}
833
834static inline void mv_priv_free(struct mv_port_priv *pp, struct device *dev)
835{
836 dma_free_coherent(dev, MV_PORT_PRIV_DMA_SZ, pp->crpb, pp->crpb_dma);
837}
838
839static void mv_edma_cfg(struct mv_host_priv *hpriv, void __iomem *port_mmio)
840{
841 u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
842
843 /* set up non-NCQ EDMA configuration */
844 cfg &= ~0x1f; /* clear queue depth */
845 cfg &= ~EDMA_CFG_NCQ; /* clear NCQ mode */
846 cfg &= ~(1 << 9); /* disable equeue */
847
848 if (IS_GEN_I(hpriv))
849 cfg |= (1 << 8); /* enab config burst size mask */
850
851 else if (IS_GEN_II(hpriv))
852 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
853
854 else if (IS_GEN_IIE(hpriv)) {
855 cfg |= (1 << 23); /* dis RX PM port mask */
856 cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
857 cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */
858 cfg |= (1 << 18); /* enab early completion */
859 cfg |= (1 << 17); /* enab host q cache */
860 cfg |= (1 << 22); /* enab cutthrough */
861 }
862
863 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
864}
865
866/**
867 * mv_port_start - Port specific init/start routine.
868 * @ap: ATA channel to manipulate
869 *
870 * Allocate and point to DMA memory, init port private memory,
871 * zero indices.
872 *
873 * LOCKING:
874 * Inherited from caller.
875 */
876static int mv_port_start(struct ata_port *ap)
877{
878 struct device *dev = ap->host_set->dev;
879 struct mv_host_priv *hpriv = ap->host_set->private_data;
880 struct mv_port_priv *pp;
881 void __iomem *port_mmio = mv_ap_base(ap);
882 void *mem;
883 dma_addr_t mem_dma;
884 int rc = -ENOMEM;
885
886 pp = kmalloc(sizeof(*pp), GFP_KERNEL);
887 if (!pp)
888 goto err_out;
889 memset(pp, 0, sizeof(*pp));
890
891 mem = dma_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
892 GFP_KERNEL);
893 if (!mem)
894 goto err_out_pp;
895 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
896
897 rc = ata_pad_alloc(ap, dev);
898 if (rc)
899 goto err_out_priv;
900
901 /* First item in chunk of DMA memory:
902 * 32-slot command request table (CRQB), 32 bytes each in size
903 */
904 pp->crqb = mem;
905 pp->crqb_dma = mem_dma;
906 mem += MV_CRQB_Q_SZ;
907 mem_dma += MV_CRQB_Q_SZ;
908
909 /* Second item:
910 * 32-slot command response table (CRPB), 8 bytes each in size
911 */
912 pp->crpb = mem;
913 pp->crpb_dma = mem_dma;
914 mem += MV_CRPB_Q_SZ;
915 mem_dma += MV_CRPB_Q_SZ;
916
917 /* Third item:
918 * Table of scatter-gather descriptors (ePRD), 16 bytes each
919 */
920 pp->sg_tbl = mem;
921 pp->sg_tbl_dma = mem_dma;
922
923 mv_edma_cfg(hpriv, port_mmio);
924
925 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
926 writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK,
927 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
928
929 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
930 writelfl(pp->crqb_dma & 0xffffffff,
931 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
932 else
933 writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
934
935 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
936
937 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
938 writelfl(pp->crpb_dma & 0xffffffff,
939 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
940 else
941 writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
942
943 writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK,
944 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
945
946 /* Don't turn on EDMA here...do it before DMA commands only. Else
947 * we'll be unable to send non-data, PIO, etc due to restricted access
948 * to shadow regs.
949 */
950 ap->private_data = pp;
951 return 0;
952
953err_out_priv:
954 mv_priv_free(pp, dev);
955err_out_pp:
956 kfree(pp);
957err_out:
958 return rc;
959}
960
961/**
962 * mv_port_stop - Port specific cleanup/stop routine.
963 * @ap: ATA channel to manipulate
964 *
965 * Stop DMA, cleanup port memory.
966 *
967 * LOCKING:
968 * This routine uses the host_set lock to protect the DMA stop.
969 */
970static void mv_port_stop(struct ata_port *ap)
971{
972 struct device *dev = ap->host_set->dev;
973 struct mv_port_priv *pp = ap->private_data;
974 unsigned long flags;
975
976 spin_lock_irqsave(&ap->host_set->lock, flags);
977 mv_stop_dma(ap);
978 spin_unlock_irqrestore(&ap->host_set->lock, flags);
979
980 ap->private_data = NULL;
981 ata_pad_free(ap, dev);
982 mv_priv_free(pp, dev);
983 kfree(pp);
984}
985
986/**
987 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
988 * @qc: queued command whose SG list to source from
989 *
990 * Populate the SG list and mark the last entry.
991 *
992 * LOCKING:
993 * Inherited from caller.
994 */
995static void mv_fill_sg(struct ata_queued_cmd *qc)
996{
997 struct mv_port_priv *pp = qc->ap->private_data;
998 unsigned int i = 0;
999 struct scatterlist *sg;
1000
1001 ata_for_each_sg(sg, qc) {
1002 dma_addr_t addr;
1003 u32 sg_len, len, offset;
1004
1005 addr = sg_dma_address(sg);
1006 sg_len = sg_dma_len(sg);
1007
1008 while (sg_len) {
1009 offset = addr & MV_DMA_BOUNDARY;
1010 len = sg_len;
1011 if ((offset + sg_len) > 0x10000)
1012 len = 0x10000 - offset;
1013
1014 pp->sg_tbl[i].addr = cpu_to_le32(addr & 0xffffffff);
1015 pp->sg_tbl[i].addr_hi = cpu_to_le32((addr >> 16) >> 16);
1016 pp->sg_tbl[i].flags_size = cpu_to_le32(len & 0xffff);
1017
1018 sg_len -= len;
1019 addr += len;
1020
1021 if (!sg_len && ata_sg_is_last(sg, qc))
1022 pp->sg_tbl[i].flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1023
1024 i++;
1025 }
1026 }
1027}
1028
1029static inline unsigned mv_inc_q_index(unsigned index)
1030{
1031 return (index + 1) & MV_MAX_Q_DEPTH_MASK;
1032}
1033
1034static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1035{
1036 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1037 (last ? CRQB_CMD_LAST : 0);
1038 *cmdw = cpu_to_le16(tmp);
1039}
1040
1041/**
1042 * mv_qc_prep - Host specific command preparation.
1043 * @qc: queued command to prepare
1044 *
1045 * This routine simply redirects to the general purpose routine
1046 * if command is not DMA. Else, it handles prep of the CRQB
1047 * (command request block), does some sanity checking, and calls
1048 * the SG load routine.
1049 *
1050 * LOCKING:
1051 * Inherited from caller.
1052 */
1053static void mv_qc_prep(struct ata_queued_cmd *qc)
1054{
1055 struct ata_port *ap = qc->ap;
1056 struct mv_port_priv *pp = ap->private_data;
1057 __le16 *cw;
1058 struct ata_taskfile *tf;
1059 u16 flags = 0;
1060 unsigned in_index;
1061
1062 if (ATA_PROT_DMA != qc->tf.protocol)
1063 return;
1064
1065 /* Fill in command request block
1066 */
1067 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1068 flags |= CRQB_FLAG_READ;
1069 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1070 flags |= qc->tag << CRQB_TAG_SHIFT;
1071
1072 /* get current queue index from hardware */
1073 in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS)
1074 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1075
1076 pp->crqb[in_index].sg_addr =
1077 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1078 pp->crqb[in_index].sg_addr_hi =
1079 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1080 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1081
1082 cw = &pp->crqb[in_index].ata_cmd[0];
1083 tf = &qc->tf;
1084
1085 /* Sadly, the CRQB cannot accomodate all registers--there are
1086 * only 11 bytes...so we must pick and choose required
1087 * registers based on the command. So, we drop feature and
1088 * hob_feature for [RW] DMA commands, but they are needed for
1089 * NCQ. NCQ will drop hob_nsect.
1090 */
1091 switch (tf->command) {
1092 case ATA_CMD_READ:
1093 case ATA_CMD_READ_EXT:
1094 case ATA_CMD_WRITE:
1095 case ATA_CMD_WRITE_EXT:
1096 case ATA_CMD_WRITE_FUA_EXT:
1097 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1098 break;
1099#ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1100 case ATA_CMD_FPDMA_READ:
1101 case ATA_CMD_FPDMA_WRITE:
1102 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1103 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1104 break;
1105#endif /* FIXME: remove this line when NCQ added */
1106 default:
1107 /* The only other commands EDMA supports in non-queued and
1108 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1109 * of which are defined/used by Linux. If we get here, this
1110 * driver needs work.
1111 *
1112 * FIXME: modify libata to give qc_prep a return value and
1113 * return error here.
1114 */
1115 BUG_ON(tf->command);
1116 break;
1117 }
1118 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1119 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1120 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1121 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1122 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1123 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1124 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1125 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1126 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1127
1128 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1129 return;
1130 mv_fill_sg(qc);
1131}
1132
1133/**
1134 * mv_qc_prep_iie - Host specific command preparation.
1135 * @qc: queued command to prepare
1136 *
1137 * This routine simply redirects to the general purpose routine
1138 * if command is not DMA. Else, it handles prep of the CRQB
1139 * (command request block), does some sanity checking, and calls
1140 * the SG load routine.
1141 *
1142 * LOCKING:
1143 * Inherited from caller.
1144 */
1145static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1146{
1147 struct ata_port *ap = qc->ap;
1148 struct mv_port_priv *pp = ap->private_data;
1149 struct mv_crqb_iie *crqb;
1150 struct ata_taskfile *tf;
1151 unsigned in_index;
1152 u32 flags = 0;
1153
1154 if (ATA_PROT_DMA != qc->tf.protocol)
1155 return;
1156
1157 /* Fill in Gen IIE command request block
1158 */
1159 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1160 flags |= CRQB_FLAG_READ;
1161
1162 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1163 flags |= qc->tag << CRQB_TAG_SHIFT;
1164
1165 /* get current queue index from hardware */
1166 in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS)
1167 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1168
1169 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1170 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1171 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1172 crqb->flags = cpu_to_le32(flags);
1173
1174 tf = &qc->tf;
1175 crqb->ata_cmd[0] = cpu_to_le32(
1176 (tf->command << 16) |
1177 (tf->feature << 24)
1178 );
1179 crqb->ata_cmd[1] = cpu_to_le32(
1180 (tf->lbal << 0) |
1181 (tf->lbam << 8) |
1182 (tf->lbah << 16) |
1183 (tf->device << 24)
1184 );
1185 crqb->ata_cmd[2] = cpu_to_le32(
1186 (tf->hob_lbal << 0) |
1187 (tf->hob_lbam << 8) |
1188 (tf->hob_lbah << 16) |
1189 (tf->hob_feature << 24)
1190 );
1191 crqb->ata_cmd[3] = cpu_to_le32(
1192 (tf->nsect << 0) |
1193 (tf->hob_nsect << 8)
1194 );
1195
1196 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1197 return;
1198 mv_fill_sg(qc);
1199}
1200
1201/**
1202 * mv_qc_issue - Initiate a command to the host
1203 * @qc: queued command to start
1204 *
1205 * This routine simply redirects to the general purpose routine
1206 * if command is not DMA. Else, it sanity checks our local
1207 * caches of the request producer/consumer indices then enables
1208 * DMA and bumps the request producer index.
1209 *
1210 * LOCKING:
1211 * Inherited from caller.
1212 */
1213static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1214{
1215 void __iomem *port_mmio = mv_ap_base(qc->ap);
1216 struct mv_port_priv *pp = qc->ap->private_data;
1217 unsigned in_index;
1218 u32 in_ptr;
1219
1220 if (ATA_PROT_DMA != qc->tf.protocol) {
1221 /* We're about to send a non-EDMA capable command to the
1222 * port. Turn off EDMA so there won't be problems accessing
1223 * shadow block, etc registers.
1224 */
1225 mv_stop_dma(qc->ap);
1226 return ata_qc_issue_prot(qc);
1227 }
1228
1229 in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1230 in_index = (in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1231
1232 /* until we do queuing, the queue should be empty at this point */
1233 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1234 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1235
1236 in_index = mv_inc_q_index(in_index); /* now incr producer index */
1237
1238 mv_start_dma(port_mmio, pp);
1239
1240 /* and write the request in pointer to kick the EDMA to life */
1241 in_ptr &= EDMA_REQ_Q_BASE_LO_MASK;
1242 in_ptr |= in_index << EDMA_REQ_Q_PTR_SHIFT;
1243 writelfl(in_ptr, port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1244
1245 return 0;
1246}
1247
1248/**
1249 * mv_get_crpb_status - get status from most recently completed cmd
1250 * @ap: ATA channel to manipulate
1251 *
1252 * This routine is for use when the port is in DMA mode, when it
1253 * will be using the CRPB (command response block) method of
1254 * returning command completion information. We check indices
1255 * are good, grab status, and bump the response consumer index to
1256 * prove that we're up to date.
1257 *
1258 * LOCKING:
1259 * Inherited from caller.
1260 */
1261static u8 mv_get_crpb_status(struct ata_port *ap)
1262{
1263 void __iomem *port_mmio = mv_ap_base(ap);
1264 struct mv_port_priv *pp = ap->private_data;
1265 unsigned out_index;
1266 u32 out_ptr;
1267 u8 ata_status;
1268
1269 out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1270 out_index = (out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1271
1272 ata_status = le16_to_cpu(pp->crpb[out_index].flags)
1273 >> CRPB_FLAG_STATUS_SHIFT;
1274
1275 /* increment our consumer index... */
1276 out_index = mv_inc_q_index(out_index);
1277
1278 /* and, until we do NCQ, there should only be 1 CRPB waiting */
1279 WARN_ON(out_index != ((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1280 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1281
1282 /* write out our inc'd consumer index so EDMA knows we're caught up */
1283 out_ptr &= EDMA_RSP_Q_BASE_LO_MASK;
1284 out_ptr |= out_index << EDMA_RSP_Q_PTR_SHIFT;
1285 writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1286
1287 /* Return ATA status register for completed CRPB */
1288 return ata_status;
1289}
1290
1291/**
1292 * mv_err_intr - Handle error interrupts on the port
1293 * @ap: ATA channel to manipulate
1294 * @reset_allowed: bool: 0 == don't trigger from reset here
1295 *
1296 * In most cases, just clear the interrupt and move on. However,
1297 * some cases require an eDMA reset, which is done right before
1298 * the COMRESET in mv_phy_reset(). The SERR case requires a
1299 * clear of pending errors in the SATA SERROR register. Finally,
1300 * if the port disabled DMA, update our cached copy to match.
1301 *
1302 * LOCKING:
1303 * Inherited from caller.
1304 */
1305static void mv_err_intr(struct ata_port *ap, int reset_allowed)
1306{
1307 void __iomem *port_mmio = mv_ap_base(ap);
1308 u32 edma_err_cause, serr = 0;
1309
1310 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1311
1312 if (EDMA_ERR_SERR & edma_err_cause) {
1313 sata_scr_read(ap, SCR_ERROR, &serr);
1314 sata_scr_write_flush(ap, SCR_ERROR, serr);
1315 }
1316 if (EDMA_ERR_SELF_DIS & edma_err_cause) {
1317 struct mv_port_priv *pp = ap->private_data;
1318 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1319 }
1320 DPRINTK(KERN_ERR "ata%u: port error; EDMA err cause: 0x%08x "
1321 "SERR: 0x%08x\n", ap->id, edma_err_cause, serr);
1322
1323 /* Clear EDMA now that SERR cleanup done */
1324 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1325
1326 /* check for fatal here and recover if needed */
1327 if (reset_allowed && (EDMA_ERR_FATAL & edma_err_cause))
1328 mv_stop_and_reset(ap);
1329}
1330
1331/**
1332 * mv_host_intr - Handle all interrupts on the given host controller
1333 * @host_set: host specific structure
1334 * @relevant: port error bits relevant to this host controller
1335 * @hc: which host controller we're to look at
1336 *
1337 * Read then write clear the HC interrupt status then walk each
1338 * port connected to the HC and see if it needs servicing. Port
1339 * success ints are reported in the HC interrupt status reg, the
1340 * port error ints are reported in the higher level main
1341 * interrupt status register and thus are passed in via the
1342 * 'relevant' argument.
1343 *
1344 * LOCKING:
1345 * Inherited from caller.
1346 */
1347static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
1348 unsigned int hc)
1349{
1350 void __iomem *mmio = host_set->mmio_base;
1351 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1352 struct ata_queued_cmd *qc;
1353 u32 hc_irq_cause;
1354 int shift, port, port0, hard_port, handled;
1355 unsigned int err_mask;
1356
1357 if (hc == 0) {
1358 port0 = 0;
1359 } else {
1360 port0 = MV_PORTS_PER_HC;
1361 }
1362
1363 /* we'll need the HC success int register in most cases */
1364 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1365 if (hc_irq_cause) {
1366 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1367 }
1368
1369 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1370 hc,relevant,hc_irq_cause);
1371
1372 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
1373 u8 ata_status = 0;
1374 struct ata_port *ap = host_set->ports[port];
1375 struct mv_port_priv *pp = ap->private_data;
1376
1377 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1378 handled = 0; /* ensure ata_status is set if handled++ */
1379
1380 /* Note that DEV_IRQ might happen spuriously during EDMA,
1381 * and should be ignored in such cases.
1382 * The cause of this is still under investigation.
1383 */
1384 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1385 /* EDMA: check for response queue interrupt */
1386 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) {
1387 ata_status = mv_get_crpb_status(ap);
1388 handled = 1;
1389 }
1390 } else {
1391 /* PIO: check for device (drive) interrupt */
1392 if ((DEV_IRQ << hard_port) & hc_irq_cause) {
1393 ata_status = readb((void __iomem *)
1394 ap->ioaddr.status_addr);
1395 handled = 1;
1396 /* ignore spurious intr if drive still BUSY */
1397 if (ata_status & ATA_BUSY) {
1398 ata_status = 0;
1399 handled = 0;
1400 }
1401 }
1402 }
1403
1404 if (ap && (ap->flags & ATA_FLAG_DISABLED))
1405 continue;
1406
1407 err_mask = ac_err_mask(ata_status);
1408
1409 shift = port << 1; /* (port * 2) */
1410 if (port >= MV_PORTS_PER_HC) {
1411 shift++; /* skip bit 8 in the HC Main IRQ reg */
1412 }
1413 if ((PORT0_ERR << shift) & relevant) {
1414 mv_err_intr(ap, 1);
1415 err_mask |= AC_ERR_OTHER;
1416 handled = 1;
1417 }
1418
1419 if (handled) {
1420 qc = ata_qc_from_tag(ap, ap->active_tag);
1421 if (qc && (qc->flags & ATA_QCFLAG_ACTIVE)) {
1422 VPRINTK("port %u IRQ found for qc, "
1423 "ata_status 0x%x\n", port,ata_status);
1424 /* mark qc status appropriately */
1425 if (!(qc->tf.flags & ATA_TFLAG_POLLING)) {
1426 qc->err_mask |= err_mask;
1427 ata_qc_complete(qc);
1428 }
1429 }
1430 }
1431 }
1432 VPRINTK("EXIT\n");
1433}
1434
1435/**
1436 * mv_interrupt -
1437 * @irq: unused
1438 * @dev_instance: private data; in this case the host structure
1439 * @regs: unused
1440 *
1441 * Read the read only register to determine if any host
1442 * controllers have pending interrupts. If so, call lower level
1443 * routine to handle. Also check for PCI errors which are only
1444 * reported here.
1445 *
1446 * LOCKING:
1447 * This routine holds the host_set lock while processing pending
1448 * interrupts.
1449 */
1450static irqreturn_t mv_interrupt(int irq, void *dev_instance,
1451 struct pt_regs *regs)
1452{
1453 struct ata_host_set *host_set = dev_instance;
1454 unsigned int hc, handled = 0, n_hcs;
1455 void __iomem *mmio = host_set->mmio_base;
1456 struct mv_host_priv *hpriv;
1457 u32 irq_stat;
1458
1459 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
1460
1461 /* check the cases where we either have nothing pending or have read
1462 * a bogus register value which can indicate HW removal or PCI fault
1463 */
1464 if (!irq_stat || (0xffffffffU == irq_stat)) {
1465 return IRQ_NONE;
1466 }
1467
1468 n_hcs = mv_get_hc_count(host_set->ports[0]->flags);
1469 spin_lock(&host_set->lock);
1470
1471 for (hc = 0; hc < n_hcs; hc++) {
1472 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1473 if (relevant) {
1474 mv_host_intr(host_set, relevant, hc);
1475 handled++;
1476 }
1477 }
1478
1479 hpriv = host_set->private_data;
1480 if (IS_60XX(hpriv)) {
1481 /* deal with the interrupt coalescing bits */
1482 if (irq_stat & (TRAN_LO_DONE | TRAN_HI_DONE | PORTS_0_7_COAL_DONE)) {
1483 writelfl(0, mmio + MV_IRQ_COAL_CAUSE_LO);
1484 writelfl(0, mmio + MV_IRQ_COAL_CAUSE_HI);
1485 writelfl(0, mmio + MV_IRQ_COAL_CAUSE);
1486 }
1487 }
1488
1489 if (PCI_ERR & irq_stat) {
1490 printk(KERN_ERR DRV_NAME ": PCI ERROR; PCI IRQ cause=0x%08x\n",
1491 readl(mmio + PCI_IRQ_CAUSE_OFS));
1492
1493 DPRINTK("All regs @ PCI error\n");
1494 mv_dump_all_regs(mmio, -1, to_pci_dev(host_set->dev));
1495
1496 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1497 handled++;
1498 }
1499 spin_unlock(&host_set->lock);
1500
1501 return IRQ_RETVAL(handled);
1502}
1503
1504static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1505{
1506 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1507 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1508
1509 return hc_mmio + ofs;
1510}
1511
1512static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1513{
1514 unsigned int ofs;
1515
1516 switch (sc_reg_in) {
1517 case SCR_STATUS:
1518 case SCR_ERROR:
1519 case SCR_CONTROL:
1520 ofs = sc_reg_in * sizeof(u32);
1521 break;
1522 default:
1523 ofs = 0xffffffffU;
1524 break;
1525 }
1526 return ofs;
1527}
1528
1529static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
1530{
1531 void __iomem *mmio = mv5_phy_base(ap->host_set->mmio_base, ap->port_no);
1532 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1533
1534 if (ofs != 0xffffffffU)
1535 return readl(mmio + ofs);
1536 else
1537 return (u32) ofs;
1538}
1539
1540static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1541{
1542 void __iomem *mmio = mv5_phy_base(ap->host_set->mmio_base, ap->port_no);
1543 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1544
1545 if (ofs != 0xffffffffU)
1546 writelfl(val, mmio + ofs);
1547}
1548
1549static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1550{
1551 u8 rev_id;
1552 int early_5080;
1553
1554 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
1555
1556 early_5080 = (pdev->device == 0x5080) && (rev_id == 0);
1557
1558 if (!early_5080) {
1559 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1560 tmp |= (1 << 0);
1561 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1562 }
1563
1564 mv_reset_pci_bus(pdev, mmio);
1565}
1566
1567static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1568{
1569 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1570}
1571
1572static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1573 void __iomem *mmio)
1574{
1575 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1576 u32 tmp;
1577
1578 tmp = readl(phy_mmio + MV5_PHY_MODE);
1579
1580 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1581 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
1582}
1583
1584static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1585{
1586 u32 tmp;
1587
1588 writel(0, mmio + MV_GPIO_PORT_CTL);
1589
1590 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1591
1592 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1593 tmp |= ~(1 << 0);
1594 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1595}
1596
1597static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1598 unsigned int port)
1599{
1600 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1601 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1602 u32 tmp;
1603 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1604
1605 if (fix_apm_sq) {
1606 tmp = readl(phy_mmio + MV5_LT_MODE);
1607 tmp |= (1 << 19);
1608 writel(tmp, phy_mmio + MV5_LT_MODE);
1609
1610 tmp = readl(phy_mmio + MV5_PHY_CTL);
1611 tmp &= ~0x3;
1612 tmp |= 0x1;
1613 writel(tmp, phy_mmio + MV5_PHY_CTL);
1614 }
1615
1616 tmp = readl(phy_mmio + MV5_PHY_MODE);
1617 tmp &= ~mask;
1618 tmp |= hpriv->signal[port].pre;
1619 tmp |= hpriv->signal[port].amps;
1620 writel(tmp, phy_mmio + MV5_PHY_MODE);
1621}
1622
1623
1624#undef ZERO
1625#define ZERO(reg) writel(0, port_mmio + (reg))
1626static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1627 unsigned int port)
1628{
1629 void __iomem *port_mmio = mv_port_base(mmio, port);
1630
1631 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1632
1633 mv_channel_reset(hpriv, mmio, port);
1634
1635 ZERO(0x028); /* command */
1636 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1637 ZERO(0x004); /* timer */
1638 ZERO(0x008); /* irq err cause */
1639 ZERO(0x00c); /* irq err mask */
1640 ZERO(0x010); /* rq bah */
1641 ZERO(0x014); /* rq inp */
1642 ZERO(0x018); /* rq outp */
1643 ZERO(0x01c); /* respq bah */
1644 ZERO(0x024); /* respq outp */
1645 ZERO(0x020); /* respq inp */
1646 ZERO(0x02c); /* test control */
1647 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1648}
1649#undef ZERO
1650
1651#define ZERO(reg) writel(0, hc_mmio + (reg))
1652static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1653 unsigned int hc)
1654{
1655 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1656 u32 tmp;
1657
1658 ZERO(0x00c);
1659 ZERO(0x010);
1660 ZERO(0x014);
1661 ZERO(0x018);
1662
1663 tmp = readl(hc_mmio + 0x20);
1664 tmp &= 0x1c1c1c1c;
1665 tmp |= 0x03030303;
1666 writel(tmp, hc_mmio + 0x20);
1667}
1668#undef ZERO
1669
1670static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1671 unsigned int n_hc)
1672{
1673 unsigned int hc, port;
1674
1675 for (hc = 0; hc < n_hc; hc++) {
1676 for (port = 0; port < MV_PORTS_PER_HC; port++)
1677 mv5_reset_hc_port(hpriv, mmio,
1678 (hc * MV_PORTS_PER_HC) + port);
1679
1680 mv5_reset_one_hc(hpriv, mmio, hc);
1681 }
1682
1683 return 0;
1684}
1685
1686#undef ZERO
1687#define ZERO(reg) writel(0, mmio + (reg))
1688static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1689{
1690 u32 tmp;
1691
1692 tmp = readl(mmio + MV_PCI_MODE);
1693 tmp &= 0xff00ffff;
1694 writel(tmp, mmio + MV_PCI_MODE);
1695
1696 ZERO(MV_PCI_DISC_TIMER);
1697 ZERO(MV_PCI_MSI_TRIGGER);
1698 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1699 ZERO(HC_MAIN_IRQ_MASK_OFS);
1700 ZERO(MV_PCI_SERR_MASK);
1701 ZERO(PCI_IRQ_CAUSE_OFS);
1702 ZERO(PCI_IRQ_MASK_OFS);
1703 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1704 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1705 ZERO(MV_PCI_ERR_ATTRIBUTE);
1706 ZERO(MV_PCI_ERR_COMMAND);
1707}
1708#undef ZERO
1709
1710static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1711{
1712 u32 tmp;
1713
1714 mv5_reset_flash(hpriv, mmio);
1715
1716 tmp = readl(mmio + MV_GPIO_PORT_CTL);
1717 tmp &= 0x3;
1718 tmp |= (1 << 5) | (1 << 6);
1719 writel(tmp, mmio + MV_GPIO_PORT_CTL);
1720}
1721
1722/**
1723 * mv6_reset_hc - Perform the 6xxx global soft reset
1724 * @mmio: base address of the HBA
1725 *
1726 * This routine only applies to 6xxx parts.
1727 *
1728 * LOCKING:
1729 * Inherited from caller.
1730 */
1731static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1732 unsigned int n_hc)
1733{
1734 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
1735 int i, rc = 0;
1736 u32 t;
1737
1738 /* Following procedure defined in PCI "main command and status
1739 * register" table.
1740 */
1741 t = readl(reg);
1742 writel(t | STOP_PCI_MASTER, reg);
1743
1744 for (i = 0; i < 1000; i++) {
1745 udelay(1);
1746 t = readl(reg);
1747 if (PCI_MASTER_EMPTY & t) {
1748 break;
1749 }
1750 }
1751 if (!(PCI_MASTER_EMPTY & t)) {
1752 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
1753 rc = 1;
1754 goto done;
1755 }
1756
1757 /* set reset */
1758 i = 5;
1759 do {
1760 writel(t | GLOB_SFT_RST, reg);
1761 t = readl(reg);
1762 udelay(1);
1763 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
1764
1765 if (!(GLOB_SFT_RST & t)) {
1766 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
1767 rc = 1;
1768 goto done;
1769 }
1770
1771 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
1772 i = 5;
1773 do {
1774 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
1775 t = readl(reg);
1776 udelay(1);
1777 } while ((GLOB_SFT_RST & t) && (i-- > 0));
1778
1779 if (GLOB_SFT_RST & t) {
1780 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
1781 rc = 1;
1782 }
1783done:
1784 return rc;
1785}
1786
1787static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
1788 void __iomem *mmio)
1789{
1790 void __iomem *port_mmio;
1791 u32 tmp;
1792
1793 tmp = readl(mmio + MV_RESET_CFG);
1794 if ((tmp & (1 << 0)) == 0) {
1795 hpriv->signal[idx].amps = 0x7 << 8;
1796 hpriv->signal[idx].pre = 0x1 << 5;
1797 return;
1798 }
1799
1800 port_mmio = mv_port_base(mmio, idx);
1801 tmp = readl(port_mmio + PHY_MODE2);
1802
1803 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
1804 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
1805}
1806
1807static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1808{
1809 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
1810}
1811
1812static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1813 unsigned int port)
1814{
1815 void __iomem *port_mmio = mv_port_base(mmio, port);
1816
1817 u32 hp_flags = hpriv->hp_flags;
1818 int fix_phy_mode2 =
1819 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
1820 int fix_phy_mode4 =
1821 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
1822 u32 m2, tmp;
1823
1824 if (fix_phy_mode2) {
1825 m2 = readl(port_mmio + PHY_MODE2);
1826 m2 &= ~(1 << 16);
1827 m2 |= (1 << 31);
1828 writel(m2, port_mmio + PHY_MODE2);
1829
1830 udelay(200);
1831
1832 m2 = readl(port_mmio + PHY_MODE2);
1833 m2 &= ~((1 << 16) | (1 << 31));
1834 writel(m2, port_mmio + PHY_MODE2);
1835
1836 udelay(200);
1837 }
1838
1839 /* who knows what this magic does */
1840 tmp = readl(port_mmio + PHY_MODE3);
1841 tmp &= ~0x7F800000;
1842 tmp |= 0x2A800000;
1843 writel(tmp, port_mmio + PHY_MODE3);
1844
1845 if (fix_phy_mode4) {
1846 u32 m4;
1847
1848 m4 = readl(port_mmio + PHY_MODE4);
1849
1850 if (hp_flags & MV_HP_ERRATA_60X1B2)
1851 tmp = readl(port_mmio + 0x310);
1852
1853 m4 = (m4 & ~(1 << 1)) | (1 << 0);
1854
1855 writel(m4, port_mmio + PHY_MODE4);
1856
1857 if (hp_flags & MV_HP_ERRATA_60X1B2)
1858 writel(tmp, port_mmio + 0x310);
1859 }
1860
1861 /* Revert values of pre-emphasis and signal amps to the saved ones */
1862 m2 = readl(port_mmio + PHY_MODE2);
1863
1864 m2 &= ~MV_M2_PREAMP_MASK;
1865 m2 |= hpriv->signal[port].amps;
1866 m2 |= hpriv->signal[port].pre;
1867 m2 &= ~(1 << 16);
1868
1869 /* according to mvSata 3.6.1, some IIE values are fixed */
1870 if (IS_GEN_IIE(hpriv)) {
1871 m2 &= ~0xC30FF01F;
1872 m2 |= 0x0000900F;
1873 }
1874
1875 writel(m2, port_mmio + PHY_MODE2);
1876}
1877
1878static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
1879 unsigned int port_no)
1880{
1881 void __iomem *port_mmio = mv_port_base(mmio, port_no);
1882
1883 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
1884
1885 if (IS_60XX(hpriv)) {
1886 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
1887 ifctl |= (1 << 7); /* enable gen2i speed */
1888 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
1889 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
1890 }
1891
1892 udelay(25); /* allow reset propagation */
1893
1894 /* Spec never mentions clearing the bit. Marvell's driver does
1895 * clear the bit, however.
1896 */
1897 writelfl(0, port_mmio + EDMA_CMD_OFS);
1898
1899 hpriv->ops->phy_errata(hpriv, mmio, port_no);
1900
1901 if (IS_50XX(hpriv))
1902 mdelay(1);
1903}
1904
1905static void mv_stop_and_reset(struct ata_port *ap)
1906{
1907 struct mv_host_priv *hpriv = ap->host_set->private_data;
1908 void __iomem *mmio = ap->host_set->mmio_base;
1909
1910 mv_stop_dma(ap);
1911
1912 mv_channel_reset(hpriv, mmio, ap->port_no);
1913
1914 __mv_phy_reset(ap, 0);
1915}
1916
1917static inline void __msleep(unsigned int msec, int can_sleep)
1918{
1919 if (can_sleep)
1920 msleep(msec);
1921 else
1922 mdelay(msec);
1923}
1924
1925/**
1926 * __mv_phy_reset - Perform eDMA reset followed by COMRESET
1927 * @ap: ATA channel to manipulate
1928 *
1929 * Part of this is taken from __sata_phy_reset and modified to
1930 * not sleep since this routine gets called from interrupt level.
1931 *
1932 * LOCKING:
1933 * Inherited from caller. This is coded to safe to call at
1934 * interrupt level, i.e. it does not sleep.
1935 */
1936static void __mv_phy_reset(struct ata_port *ap, int can_sleep)
1937{
1938 struct mv_port_priv *pp = ap->private_data;
1939 struct mv_host_priv *hpriv = ap->host_set->private_data;
1940 void __iomem *port_mmio = mv_ap_base(ap);
1941 struct ata_taskfile tf;
1942 struct ata_device *dev = &ap->device[0];
1943 unsigned long timeout;
1944 int retry = 5;
1945 u32 sstatus;
1946
1947 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
1948
1949 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
1950 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
1951 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
1952
1953 /* Issue COMRESET via SControl */
1954comreset_retry:
1955 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
1956 __msleep(1, can_sleep);
1957
1958 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1959 __msleep(20, can_sleep);
1960
1961 timeout = jiffies + msecs_to_jiffies(200);
1962 do {
1963 sata_scr_read(ap, SCR_STATUS, &sstatus);
1964 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
1965 break;
1966
1967 __msleep(1, can_sleep);
1968 } while (time_before(jiffies, timeout));
1969
1970 /* work around errata */
1971 if (IS_60XX(hpriv) &&
1972 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
1973 (retry-- > 0))
1974 goto comreset_retry;
1975
1976 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
1977 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
1978 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
1979
1980 if (ata_port_online(ap)) {
1981 ata_port_probe(ap);
1982 } else {
1983 sata_scr_read(ap, SCR_STATUS, &sstatus);
1984 ata_port_printk(ap, KERN_INFO,
1985 "no device found (phy stat %08x)\n", sstatus);
1986 ata_port_disable(ap);
1987 return;
1988 }
1989 ap->cbl = ATA_CBL_SATA;
1990
1991 /* even after SStatus reflects that device is ready,
1992 * it seems to take a while for link to be fully
1993 * established (and thus Status no longer 0x80/0x7F),
1994 * so we poll a bit for that, here.
1995 */
1996 retry = 20;
1997 while (1) {
1998 u8 drv_stat = ata_check_status(ap);
1999 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2000 break;
2001 __msleep(500, can_sleep);
2002 if (retry-- <= 0)
2003 break;
2004 }
2005
2006 tf.lbah = readb((void __iomem *) ap->ioaddr.lbah_addr);
2007 tf.lbam = readb((void __iomem *) ap->ioaddr.lbam_addr);
2008 tf.lbal = readb((void __iomem *) ap->ioaddr.lbal_addr);
2009 tf.nsect = readb((void __iomem *) ap->ioaddr.nsect_addr);
2010
2011 dev->class = ata_dev_classify(&tf);
2012 if (!ata_dev_enabled(dev)) {
2013 VPRINTK("Port disabled post-sig: No device present.\n");
2014 ata_port_disable(ap);
2015 }
2016
2017 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2018
2019 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2020
2021 VPRINTK("EXIT\n");
2022}
2023
2024static void mv_phy_reset(struct ata_port *ap)
2025{
2026 __mv_phy_reset(ap, 1);
2027}
2028
2029/**
2030 * mv_eng_timeout - Routine called by libata when SCSI times out I/O
2031 * @ap: ATA channel to manipulate
2032 *
2033 * Intent is to clear all pending error conditions, reset the
2034 * chip/bus, fail the command, and move on.
2035 *
2036 * LOCKING:
2037 * This routine holds the host_set lock while failing the command.
2038 */
2039static void mv_eng_timeout(struct ata_port *ap)
2040{
2041 struct ata_queued_cmd *qc;
2042 unsigned long flags;
2043
2044 ata_port_printk(ap, KERN_ERR, "Entering mv_eng_timeout\n");
2045 DPRINTK("All regs @ start of eng_timeout\n");
2046 mv_dump_all_regs(ap->host_set->mmio_base, ap->port_no,
2047 to_pci_dev(ap->host_set->dev));
2048
2049 qc = ata_qc_from_tag(ap, ap->active_tag);
2050 printk(KERN_ERR "mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n",
2051 ap->host_set->mmio_base, ap, qc, qc->scsicmd,
2052 &qc->scsicmd->cmnd);
2053
2054 spin_lock_irqsave(&ap->host_set->lock, flags);
2055 mv_err_intr(ap, 0);
2056 mv_stop_and_reset(ap);
2057 spin_unlock_irqrestore(&ap->host_set->lock, flags);
2058
2059 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
2060 if (qc->flags & ATA_QCFLAG_ACTIVE) {
2061 qc->err_mask |= AC_ERR_TIMEOUT;
2062 ata_eh_qc_complete(qc);
2063 }
2064}
2065
2066/**
2067 * mv_port_init - Perform some early initialization on a single port.
2068 * @port: libata data structure storing shadow register addresses
2069 * @port_mmio: base address of the port
2070 *
2071 * Initialize shadow register mmio addresses, clear outstanding
2072 * interrupts on the port, and unmask interrupts for the future
2073 * start of the port.
2074 *
2075 * LOCKING:
2076 * Inherited from caller.
2077 */
2078static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2079{
2080 unsigned long shd_base = (unsigned long) port_mmio + SHD_BLK_OFS;
2081 unsigned serr_ofs;
2082
2083 /* PIO related setup
2084 */
2085 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2086 port->error_addr =
2087 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2088 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2089 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2090 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2091 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2092 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2093 port->status_addr =
2094 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2095 /* special case: control/altstatus doesn't have ATA_REG_ address */
2096 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2097
2098 /* unused: */
2099 port->cmd_addr = port->bmdma_addr = port->scr_addr = 0;
2100
2101 /* Clear any currently outstanding port interrupt conditions */
2102 serr_ofs = mv_scr_offset(SCR_ERROR);
2103 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2104 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2105
2106 /* unmask all EDMA error interrupts */
2107 writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2108
2109 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2110 readl(port_mmio + EDMA_CFG_OFS),
2111 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2112 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2113}
2114
2115static int mv_chip_id(struct pci_dev *pdev, struct mv_host_priv *hpriv,
2116 unsigned int board_idx)
2117{
2118 u8 rev_id;
2119 u32 hp_flags = hpriv->hp_flags;
2120
2121 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
2122
2123 switch(board_idx) {
2124 case chip_5080:
2125 hpriv->ops = &mv5xxx_ops;
2126 hp_flags |= MV_HP_50XX;
2127
2128 switch (rev_id) {
2129 case 0x1:
2130 hp_flags |= MV_HP_ERRATA_50XXB0;
2131 break;
2132 case 0x3:
2133 hp_flags |= MV_HP_ERRATA_50XXB2;
2134 break;
2135 default:
2136 dev_printk(KERN_WARNING, &pdev->dev,
2137 "Applying 50XXB2 workarounds to unknown rev\n");
2138 hp_flags |= MV_HP_ERRATA_50XXB2;
2139 break;
2140 }
2141 break;
2142
2143 case chip_504x:
2144 case chip_508x:
2145 hpriv->ops = &mv5xxx_ops;
2146 hp_flags |= MV_HP_50XX;
2147
2148 switch (rev_id) {
2149 case 0x0:
2150 hp_flags |= MV_HP_ERRATA_50XXB0;
2151 break;
2152 case 0x3:
2153 hp_flags |= MV_HP_ERRATA_50XXB2;
2154 break;
2155 default:
2156 dev_printk(KERN_WARNING, &pdev->dev,
2157 "Applying B2 workarounds to unknown rev\n");
2158 hp_flags |= MV_HP_ERRATA_50XXB2;
2159 break;
2160 }
2161 break;
2162
2163 case chip_604x:
2164 case chip_608x:
2165 hpriv->ops = &mv6xxx_ops;
2166
2167 switch (rev_id) {
2168 case 0x7:
2169 hp_flags |= MV_HP_ERRATA_60X1B2;
2170 break;
2171 case 0x9:
2172 hp_flags |= MV_HP_ERRATA_60X1C0;
2173 break;
2174 default:
2175 dev_printk(KERN_WARNING, &pdev->dev,
2176 "Applying B2 workarounds to unknown rev\n");
2177 hp_flags |= MV_HP_ERRATA_60X1B2;
2178 break;
2179 }
2180 break;
2181
2182 case chip_7042:
2183 case chip_6042:
2184 hpriv->ops = &mv6xxx_ops;
2185
2186 hp_flags |= MV_HP_GEN_IIE;
2187
2188 switch (rev_id) {
2189 case 0x0:
2190 hp_flags |= MV_HP_ERRATA_XX42A0;
2191 break;
2192 case 0x1:
2193 hp_flags |= MV_HP_ERRATA_60X1C0;
2194 break;
2195 default:
2196 dev_printk(KERN_WARNING, &pdev->dev,
2197 "Applying 60X1C0 workarounds to unknown rev\n");
2198 hp_flags |= MV_HP_ERRATA_60X1C0;
2199 break;
2200 }
2201 break;
2202
2203 default:
2204 printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx);
2205 return 1;
2206 }
2207
2208 hpriv->hp_flags = hp_flags;
2209
2210 return 0;
2211}
2212
2213/**
2214 * mv_init_host - Perform some early initialization of the host.
2215 * @pdev: host PCI device
2216 * @probe_ent: early data struct representing the host
2217 *
2218 * If possible, do an early global reset of the host. Then do
2219 * our port init and clear/unmask all/relevant host interrupts.
2220 *
2221 * LOCKING:
2222 * Inherited from caller.
2223 */
2224static int mv_init_host(struct pci_dev *pdev, struct ata_probe_ent *probe_ent,
2225 unsigned int board_idx)
2226{
2227 int rc = 0, n_hc, port, hc;
2228 void __iomem *mmio = probe_ent->mmio_base;
2229 struct mv_host_priv *hpriv = probe_ent->private_data;
2230
2231 /* global interrupt mask */
2232 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2233
2234 rc = mv_chip_id(pdev, hpriv, board_idx);
2235 if (rc)
2236 goto done;
2237
2238 n_hc = mv_get_hc_count(probe_ent->host_flags);
2239 probe_ent->n_ports = MV_PORTS_PER_HC * n_hc;
2240
2241 for (port = 0; port < probe_ent->n_ports; port++)
2242 hpriv->ops->read_preamp(hpriv, port, mmio);
2243
2244 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2245 if (rc)
2246 goto done;
2247
2248 hpriv->ops->reset_flash(hpriv, mmio);
2249 hpriv->ops->reset_bus(pdev, mmio);
2250 hpriv->ops->enable_leds(hpriv, mmio);
2251
2252 for (port = 0; port < probe_ent->n_ports; port++) {
2253 if (IS_60XX(hpriv)) {
2254 void __iomem *port_mmio = mv_port_base(mmio, port);
2255
2256 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2257 ifctl |= (1 << 7); /* enable gen2i speed */
2258 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2259 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2260 }
2261
2262 hpriv->ops->phy_errata(hpriv, mmio, port);
2263 }
2264
2265 for (port = 0; port < probe_ent->n_ports; port++) {
2266 void __iomem *port_mmio = mv_port_base(mmio, port);
2267 mv_port_init(&probe_ent->port[port], port_mmio);
2268 }
2269
2270 for (hc = 0; hc < n_hc; hc++) {
2271 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2272
2273 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2274 "(before clear)=0x%08x\n", hc,
2275 readl(hc_mmio + HC_CFG_OFS),
2276 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2277
2278 /* Clear any currently outstanding hc interrupt conditions */
2279 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2280 }
2281
2282 /* Clear any currently outstanding host interrupt conditions */
2283 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
2284
2285 /* and unmask interrupt generation for host regs */
2286 writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
2287 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
2288
2289 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2290 "PCI int cause/mask=0x%08x/0x%08x\n",
2291 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2292 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
2293 readl(mmio + PCI_IRQ_CAUSE_OFS),
2294 readl(mmio + PCI_IRQ_MASK_OFS));
2295
2296done:
2297 return rc;
2298}
2299
2300/**
2301 * mv_print_info - Dump key info to kernel log for perusal.
2302 * @probe_ent: early data struct representing the host
2303 *
2304 * FIXME: complete this.
2305 *
2306 * LOCKING:
2307 * Inherited from caller.
2308 */
2309static void mv_print_info(struct ata_probe_ent *probe_ent)
2310{
2311 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
2312 struct mv_host_priv *hpriv = probe_ent->private_data;
2313 u8 rev_id, scc;
2314 const char *scc_s;
2315
2316 /* Use this to determine the HW stepping of the chip so we know
2317 * what errata to workaround
2318 */
2319 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
2320
2321 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2322 if (scc == 0)
2323 scc_s = "SCSI";
2324 else if (scc == 0x01)
2325 scc_s = "RAID";
2326 else
2327 scc_s = "unknown";
2328
2329 dev_printk(KERN_INFO, &pdev->dev,
2330 "%u slots %u ports %s mode IRQ via %s\n",
2331 (unsigned)MV_MAX_Q_DEPTH, probe_ent->n_ports,
2332 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2333}
2334
2335/**
2336 * mv_init_one - handle a positive probe of a Marvell host
2337 * @pdev: PCI device found
2338 * @ent: PCI device ID entry for the matched host
2339 *
2340 * LOCKING:
2341 * Inherited from caller.
2342 */
2343static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2344{
2345 static int printed_version = 0;
2346 struct ata_probe_ent *probe_ent = NULL;
2347 struct mv_host_priv *hpriv;
2348 unsigned int board_idx = (unsigned int)ent->driver_data;
2349 void __iomem *mmio_base;
2350 int pci_dev_busy = 0, rc;
2351
2352 if (!printed_version++)
2353 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2354
2355 rc = pci_enable_device(pdev);
2356 if (rc) {
2357 return rc;
2358 }
2359 pci_set_master(pdev);
2360
2361 rc = pci_request_regions(pdev, DRV_NAME);
2362 if (rc) {
2363 pci_dev_busy = 1;
2364 goto err_out;
2365 }
2366
2367 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
2368 if (probe_ent == NULL) {
2369 rc = -ENOMEM;
2370 goto err_out_regions;
2371 }
2372
2373 memset(probe_ent, 0, sizeof(*probe_ent));
2374 probe_ent->dev = pci_dev_to_dev(pdev);
2375 INIT_LIST_HEAD(&probe_ent->node);
2376
2377 mmio_base = pci_iomap(pdev, MV_PRIMARY_BAR, 0);
2378 if (mmio_base == NULL) {
2379 rc = -ENOMEM;
2380 goto err_out_free_ent;
2381 }
2382
2383 hpriv = kmalloc(sizeof(*hpriv), GFP_KERNEL);
2384 if (!hpriv) {
2385 rc = -ENOMEM;
2386 goto err_out_iounmap;
2387 }
2388 memset(hpriv, 0, sizeof(*hpriv));
2389
2390 probe_ent->sht = mv_port_info[board_idx].sht;
2391 probe_ent->host_flags = mv_port_info[board_idx].host_flags;
2392 probe_ent->pio_mask = mv_port_info[board_idx].pio_mask;
2393 probe_ent->udma_mask = mv_port_info[board_idx].udma_mask;
2394 probe_ent->port_ops = mv_port_info[board_idx].port_ops;
2395
2396 probe_ent->irq = pdev->irq;
2397 probe_ent->irq_flags = IRQF_SHARED;
2398 probe_ent->mmio_base = mmio_base;
2399 probe_ent->private_data = hpriv;
2400
2401 /* initialize adapter */
2402 rc = mv_init_host(pdev, probe_ent, board_idx);
2403 if (rc) {
2404 goto err_out_hpriv;
2405 }
2406
2407 /* Enable interrupts */
2408 if (msi && pci_enable_msi(pdev) == 0) {
2409 hpriv->hp_flags |= MV_HP_FLAG_MSI;
2410 } else {
2411 pci_intx(pdev, 1);
2412 }
2413
2414 mv_dump_pci_cfg(pdev, 0x68);
2415 mv_print_info(probe_ent);
2416
2417 if (ata_device_add(probe_ent) == 0) {
2418 rc = -ENODEV; /* No devices discovered */
2419 goto err_out_dev_add;
2420 }
2421
2422 kfree(probe_ent);
2423 return 0;
2424
2425err_out_dev_add:
2426 if (MV_HP_FLAG_MSI & hpriv->hp_flags) {
2427 pci_disable_msi(pdev);
2428 } else {
2429 pci_intx(pdev, 0);
2430 }
2431err_out_hpriv:
2432 kfree(hpriv);
2433err_out_iounmap:
2434 pci_iounmap(pdev, mmio_base);
2435err_out_free_ent:
2436 kfree(probe_ent);
2437err_out_regions:
2438 pci_release_regions(pdev);
2439err_out:
2440 if (!pci_dev_busy) {
2441 pci_disable_device(pdev);
2442 }
2443
2444 return rc;
2445}
2446
2447static int __init mv_init(void)
2448{
2449 return pci_module_init(&mv_pci_driver);
2450}
2451
2452static void __exit mv_exit(void)
2453{
2454 pci_unregister_driver(&mv_pci_driver);
2455}
2456
2457MODULE_AUTHOR("Brett Russ");
2458MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2459MODULE_LICENSE("GPL");
2460MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2461MODULE_VERSION(DRV_VERSION);
2462
2463module_param(msi, int, 0444);
2464MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2465
2466module_init(mv_init);
2467module_exit(mv_exit);
diff --git a/drivers/scsi/sata_nv.c b/drivers/scsi/sata_nv.c
deleted file mode 100644
index 56da25581f31..000000000000
--- a/drivers/scsi/sata_nv.c
+++ /dev/null
@@ -1,595 +0,0 @@
1/*
2 * sata_nv.c - NVIDIA nForce SATA
3 *
4 * Copyright 2004 NVIDIA Corp. All rights reserved.
5 * Copyright 2004 Andrew Chew
6 *
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
21 *
22 *
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
25 *
26 * No hardware documentation available outside of NVIDIA.
27 * This driver programs the NVIDIA SATA controller in a similar
28 * fashion as with other PCI IDE BMDMA controllers, with a few
29 * NV-specific details such as register offsets, SATA phy location,
30 * hotplug info, etc.
31 *
32 */
33
34#include <linux/kernel.h>
35#include <linux/module.h>
36#include <linux/pci.h>
37#include <linux/init.h>
38#include <linux/blkdev.h>
39#include <linux/delay.h>
40#include <linux/interrupt.h>
41#include <linux/device.h>
42#include <scsi/scsi_host.h>
43#include <linux/libata.h>
44
45#define DRV_NAME "sata_nv"
46#define DRV_VERSION "2.0"
47
48enum {
49 NV_PORTS = 2,
50 NV_PIO_MASK = 0x1f,
51 NV_MWDMA_MASK = 0x07,
52 NV_UDMA_MASK = 0x7f,
53 NV_PORT0_SCR_REG_OFFSET = 0x00,
54 NV_PORT1_SCR_REG_OFFSET = 0x40,
55
56 /* INT_STATUS/ENABLE */
57 NV_INT_STATUS = 0x10,
58 NV_INT_ENABLE = 0x11,
59 NV_INT_STATUS_CK804 = 0x440,
60 NV_INT_ENABLE_CK804 = 0x441,
61
62 /* INT_STATUS/ENABLE bits */
63 NV_INT_DEV = 0x01,
64 NV_INT_PM = 0x02,
65 NV_INT_ADDED = 0x04,
66 NV_INT_REMOVED = 0x08,
67
68 NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
69
70 NV_INT_ALL = 0x0f,
71 NV_INT_MASK = NV_INT_DEV |
72 NV_INT_ADDED | NV_INT_REMOVED,
73
74 /* INT_CONFIG */
75 NV_INT_CONFIG = 0x12,
76 NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
77
78 // For PCI config register 20
79 NV_MCP_SATA_CFG_20 = 0x50,
80 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
81};
82
83static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
84static void nv_ck804_host_stop(struct ata_host_set *host_set);
85static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance,
86 struct pt_regs *regs);
87static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance,
88 struct pt_regs *regs);
89static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance,
90 struct pt_regs *regs);
91static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg);
92static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
93
94static void nv_nf2_freeze(struct ata_port *ap);
95static void nv_nf2_thaw(struct ata_port *ap);
96static void nv_ck804_freeze(struct ata_port *ap);
97static void nv_ck804_thaw(struct ata_port *ap);
98static void nv_error_handler(struct ata_port *ap);
99
100enum nv_host_type
101{
102 GENERIC,
103 NFORCE2,
104 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
105 CK804
106};
107
108static const struct pci_device_id nv_pci_tbl[] = {
109 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA,
110 PCI_ANY_ID, PCI_ANY_ID, 0, 0, NFORCE2 },
111 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA,
112 PCI_ANY_ID, PCI_ANY_ID, 0, 0, NFORCE3 },
113 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2,
114 PCI_ANY_ID, PCI_ANY_ID, 0, 0, NFORCE3 },
115 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA,
116 PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
117 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
119 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
121 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2,
122 PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
123 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA,
124 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
125 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2,
126 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
127 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA,
128 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
129 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2,
130 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
131 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA,
132 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
133 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2,
134 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
135 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3,
136 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
137 { PCI_VENDOR_ID_NVIDIA, 0x045c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
138 { PCI_VENDOR_ID_NVIDIA, 0x045d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
139 { PCI_VENDOR_ID_NVIDIA, 0x045e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
140 { PCI_VENDOR_ID_NVIDIA, 0x045f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
141 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
142 PCI_ANY_ID, PCI_ANY_ID,
143 PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC },
144 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
145 PCI_ANY_ID, PCI_ANY_ID,
146 PCI_CLASS_STORAGE_RAID<<8, 0xffff00, GENERIC },
147 { 0, } /* terminate list */
148};
149
150static struct pci_driver nv_pci_driver = {
151 .name = DRV_NAME,
152 .id_table = nv_pci_tbl,
153 .probe = nv_init_one,
154 .remove = ata_pci_remove_one,
155};
156
157static struct scsi_host_template nv_sht = {
158 .module = THIS_MODULE,
159 .name = DRV_NAME,
160 .ioctl = ata_scsi_ioctl,
161 .queuecommand = ata_scsi_queuecmd,
162 .can_queue = ATA_DEF_QUEUE,
163 .this_id = ATA_SHT_THIS_ID,
164 .sg_tablesize = LIBATA_MAX_PRD,
165 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
166 .emulated = ATA_SHT_EMULATED,
167 .use_clustering = ATA_SHT_USE_CLUSTERING,
168 .proc_name = DRV_NAME,
169 .dma_boundary = ATA_DMA_BOUNDARY,
170 .slave_configure = ata_scsi_slave_config,
171 .slave_destroy = ata_scsi_slave_destroy,
172 .bios_param = ata_std_bios_param,
173};
174
175static const struct ata_port_operations nv_generic_ops = {
176 .port_disable = ata_port_disable,
177 .tf_load = ata_tf_load,
178 .tf_read = ata_tf_read,
179 .exec_command = ata_exec_command,
180 .check_status = ata_check_status,
181 .dev_select = ata_std_dev_select,
182 .bmdma_setup = ata_bmdma_setup,
183 .bmdma_start = ata_bmdma_start,
184 .bmdma_stop = ata_bmdma_stop,
185 .bmdma_status = ata_bmdma_status,
186 .qc_prep = ata_qc_prep,
187 .qc_issue = ata_qc_issue_prot,
188 .freeze = ata_bmdma_freeze,
189 .thaw = ata_bmdma_thaw,
190 .error_handler = nv_error_handler,
191 .post_internal_cmd = ata_bmdma_post_internal_cmd,
192 .data_xfer = ata_pio_data_xfer,
193 .irq_handler = nv_generic_interrupt,
194 .irq_clear = ata_bmdma_irq_clear,
195 .scr_read = nv_scr_read,
196 .scr_write = nv_scr_write,
197 .port_start = ata_port_start,
198 .port_stop = ata_port_stop,
199 .host_stop = ata_pci_host_stop,
200};
201
202static const struct ata_port_operations nv_nf2_ops = {
203 .port_disable = ata_port_disable,
204 .tf_load = ata_tf_load,
205 .tf_read = ata_tf_read,
206 .exec_command = ata_exec_command,
207 .check_status = ata_check_status,
208 .dev_select = ata_std_dev_select,
209 .bmdma_setup = ata_bmdma_setup,
210 .bmdma_start = ata_bmdma_start,
211 .bmdma_stop = ata_bmdma_stop,
212 .bmdma_status = ata_bmdma_status,
213 .qc_prep = ata_qc_prep,
214 .qc_issue = ata_qc_issue_prot,
215 .freeze = nv_nf2_freeze,
216 .thaw = nv_nf2_thaw,
217 .error_handler = nv_error_handler,
218 .post_internal_cmd = ata_bmdma_post_internal_cmd,
219 .data_xfer = ata_pio_data_xfer,
220 .irq_handler = nv_nf2_interrupt,
221 .irq_clear = ata_bmdma_irq_clear,
222 .scr_read = nv_scr_read,
223 .scr_write = nv_scr_write,
224 .port_start = ata_port_start,
225 .port_stop = ata_port_stop,
226 .host_stop = ata_pci_host_stop,
227};
228
229static const struct ata_port_operations nv_ck804_ops = {
230 .port_disable = ata_port_disable,
231 .tf_load = ata_tf_load,
232 .tf_read = ata_tf_read,
233 .exec_command = ata_exec_command,
234 .check_status = ata_check_status,
235 .dev_select = ata_std_dev_select,
236 .bmdma_setup = ata_bmdma_setup,
237 .bmdma_start = ata_bmdma_start,
238 .bmdma_stop = ata_bmdma_stop,
239 .bmdma_status = ata_bmdma_status,
240 .qc_prep = ata_qc_prep,
241 .qc_issue = ata_qc_issue_prot,
242 .freeze = nv_ck804_freeze,
243 .thaw = nv_ck804_thaw,
244 .error_handler = nv_error_handler,
245 .post_internal_cmd = ata_bmdma_post_internal_cmd,
246 .data_xfer = ata_pio_data_xfer,
247 .irq_handler = nv_ck804_interrupt,
248 .irq_clear = ata_bmdma_irq_clear,
249 .scr_read = nv_scr_read,
250 .scr_write = nv_scr_write,
251 .port_start = ata_port_start,
252 .port_stop = ata_port_stop,
253 .host_stop = nv_ck804_host_stop,
254};
255
256static struct ata_port_info nv_port_info[] = {
257 /* generic */
258 {
259 .sht = &nv_sht,
260 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
261 .pio_mask = NV_PIO_MASK,
262 .mwdma_mask = NV_MWDMA_MASK,
263 .udma_mask = NV_UDMA_MASK,
264 .port_ops = &nv_generic_ops,
265 },
266 /* nforce2/3 */
267 {
268 .sht = &nv_sht,
269 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
270 .pio_mask = NV_PIO_MASK,
271 .mwdma_mask = NV_MWDMA_MASK,
272 .udma_mask = NV_UDMA_MASK,
273 .port_ops = &nv_nf2_ops,
274 },
275 /* ck804 */
276 {
277 .sht = &nv_sht,
278 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
279 .pio_mask = NV_PIO_MASK,
280 .mwdma_mask = NV_MWDMA_MASK,
281 .udma_mask = NV_UDMA_MASK,
282 .port_ops = &nv_ck804_ops,
283 },
284};
285
286MODULE_AUTHOR("NVIDIA");
287MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
288MODULE_LICENSE("GPL");
289MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
290MODULE_VERSION(DRV_VERSION);
291
292static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance,
293 struct pt_regs *regs)
294{
295 struct ata_host_set *host_set = dev_instance;
296 unsigned int i;
297 unsigned int handled = 0;
298 unsigned long flags;
299
300 spin_lock_irqsave(&host_set->lock, flags);
301
302 for (i = 0; i < host_set->n_ports; i++) {
303 struct ata_port *ap;
304
305 ap = host_set->ports[i];
306 if (ap &&
307 !(ap->flags & ATA_FLAG_DISABLED)) {
308 struct ata_queued_cmd *qc;
309
310 qc = ata_qc_from_tag(ap, ap->active_tag);
311 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
312 handled += ata_host_intr(ap, qc);
313 else
314 // No request pending? Clear interrupt status
315 // anyway, in case there's one pending.
316 ap->ops->check_status(ap);
317 }
318
319 }
320
321 spin_unlock_irqrestore(&host_set->lock, flags);
322
323 return IRQ_RETVAL(handled);
324}
325
326static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
327{
328 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
329 int handled;
330
331 /* freeze if hotplugged */
332 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
333 ata_port_freeze(ap);
334 return 1;
335 }
336
337 /* bail out if not our interrupt */
338 if (!(irq_stat & NV_INT_DEV))
339 return 0;
340
341 /* DEV interrupt w/ no active qc? */
342 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
343 ata_check_status(ap);
344 return 1;
345 }
346
347 /* handle interrupt */
348 handled = ata_host_intr(ap, qc);
349 if (unlikely(!handled)) {
350 /* spurious, clear it */
351 ata_check_status(ap);
352 }
353
354 return 1;
355}
356
357static irqreturn_t nv_do_interrupt(struct ata_host_set *host_set, u8 irq_stat)
358{
359 int i, handled = 0;
360
361 for (i = 0; i < host_set->n_ports; i++) {
362 struct ata_port *ap = host_set->ports[i];
363
364 if (ap && !(ap->flags & ATA_FLAG_DISABLED))
365 handled += nv_host_intr(ap, irq_stat);
366
367 irq_stat >>= NV_INT_PORT_SHIFT;
368 }
369
370 return IRQ_RETVAL(handled);
371}
372
373static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance,
374 struct pt_regs *regs)
375{
376 struct ata_host_set *host_set = dev_instance;
377 u8 irq_stat;
378 irqreturn_t ret;
379
380 spin_lock(&host_set->lock);
381 irq_stat = inb(host_set->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
382 ret = nv_do_interrupt(host_set, irq_stat);
383 spin_unlock(&host_set->lock);
384
385 return ret;
386}
387
388static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance,
389 struct pt_regs *regs)
390{
391 struct ata_host_set *host_set = dev_instance;
392 u8 irq_stat;
393 irqreturn_t ret;
394
395 spin_lock(&host_set->lock);
396 irq_stat = readb(host_set->mmio_base + NV_INT_STATUS_CK804);
397 ret = nv_do_interrupt(host_set, irq_stat);
398 spin_unlock(&host_set->lock);
399
400 return ret;
401}
402
403static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg)
404{
405 if (sc_reg > SCR_CONTROL)
406 return 0xffffffffU;
407
408 return ioread32((void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4));
409}
410
411static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
412{
413 if (sc_reg > SCR_CONTROL)
414 return;
415
416 iowrite32(val, (void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4));
417}
418
419static void nv_nf2_freeze(struct ata_port *ap)
420{
421 unsigned long scr_addr = ap->host_set->ports[0]->ioaddr.scr_addr;
422 int shift = ap->port_no * NV_INT_PORT_SHIFT;
423 u8 mask;
424
425 mask = inb(scr_addr + NV_INT_ENABLE);
426 mask &= ~(NV_INT_ALL << shift);
427 outb(mask, scr_addr + NV_INT_ENABLE);
428}
429
430static void nv_nf2_thaw(struct ata_port *ap)
431{
432 unsigned long scr_addr = ap->host_set->ports[0]->ioaddr.scr_addr;
433 int shift = ap->port_no * NV_INT_PORT_SHIFT;
434 u8 mask;
435
436 outb(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
437
438 mask = inb(scr_addr + NV_INT_ENABLE);
439 mask |= (NV_INT_MASK << shift);
440 outb(mask, scr_addr + NV_INT_ENABLE);
441}
442
443static void nv_ck804_freeze(struct ata_port *ap)
444{
445 void __iomem *mmio_base = ap->host_set->mmio_base;
446 int shift = ap->port_no * NV_INT_PORT_SHIFT;
447 u8 mask;
448
449 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
450 mask &= ~(NV_INT_ALL << shift);
451 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
452}
453
454static void nv_ck804_thaw(struct ata_port *ap)
455{
456 void __iomem *mmio_base = ap->host_set->mmio_base;
457 int shift = ap->port_no * NV_INT_PORT_SHIFT;
458 u8 mask;
459
460 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
461
462 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
463 mask |= (NV_INT_MASK << shift);
464 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
465}
466
467static int nv_hardreset(struct ata_port *ap, unsigned int *class)
468{
469 unsigned int dummy;
470
471 /* SATA hardreset fails to retrieve proper device signature on
472 * some controllers. Don't classify on hardreset. For more
473 * info, see http://bugme.osdl.org/show_bug.cgi?id=3352
474 */
475 return sata_std_hardreset(ap, &dummy);
476}
477
478static void nv_error_handler(struct ata_port *ap)
479{
480 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
481 nv_hardreset, ata_std_postreset);
482}
483
484static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
485{
486 static int printed_version = 0;
487 struct ata_port_info *ppi;
488 struct ata_probe_ent *probe_ent;
489 int pci_dev_busy = 0;
490 int rc;
491 u32 bar;
492 unsigned long base;
493
494 // Make sure this is a SATA controller by counting the number of bars
495 // (NVIDIA SATA controllers will always have six bars). Otherwise,
496 // it's an IDE controller and we ignore it.
497 for (bar=0; bar<6; bar++)
498 if (pci_resource_start(pdev, bar) == 0)
499 return -ENODEV;
500
501 if (!printed_version++)
502 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
503
504 rc = pci_enable_device(pdev);
505 if (rc)
506 goto err_out;
507
508 rc = pci_request_regions(pdev, DRV_NAME);
509 if (rc) {
510 pci_dev_busy = 1;
511 goto err_out_disable;
512 }
513
514 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
515 if (rc)
516 goto err_out_regions;
517 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
518 if (rc)
519 goto err_out_regions;
520
521 rc = -ENOMEM;
522
523 ppi = &nv_port_info[ent->driver_data];
524 probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
525 if (!probe_ent)
526 goto err_out_regions;
527
528 probe_ent->mmio_base = pci_iomap(pdev, 5, 0);
529 if (!probe_ent->mmio_base) {
530 rc = -EIO;
531 goto err_out_free_ent;
532 }
533
534 base = (unsigned long)probe_ent->mmio_base;
535
536 probe_ent->port[0].scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
537 probe_ent->port[1].scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
538
539 /* enable SATA space for CK804 */
540 if (ent->driver_data == CK804) {
541 u8 regval;
542
543 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
544 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
545 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
546 }
547
548 pci_set_master(pdev);
549
550 rc = ata_device_add(probe_ent);
551 if (rc != NV_PORTS)
552 goto err_out_iounmap;
553
554 kfree(probe_ent);
555
556 return 0;
557
558err_out_iounmap:
559 pci_iounmap(pdev, probe_ent->mmio_base);
560err_out_free_ent:
561 kfree(probe_ent);
562err_out_regions:
563 pci_release_regions(pdev);
564err_out_disable:
565 if (!pci_dev_busy)
566 pci_disable_device(pdev);
567err_out:
568 return rc;
569}
570
571static void nv_ck804_host_stop(struct ata_host_set *host_set)
572{
573 struct pci_dev *pdev = to_pci_dev(host_set->dev);
574 u8 regval;
575
576 /* disable SATA space for CK804 */
577 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
578 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
579 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
580
581 ata_pci_host_stop(host_set);
582}
583
584static int __init nv_init(void)
585{
586 return pci_module_init(&nv_pci_driver);
587}
588
589static void __exit nv_exit(void)
590{
591 pci_unregister_driver(&nv_pci_driver);
592}
593
594module_init(nv_init);
595module_exit(nv_exit);
diff --git a/drivers/scsi/sata_promise.c b/drivers/scsi/sata_promise.c
deleted file mode 100644
index 4776f4e55839..000000000000
--- a/drivers/scsi/sata_promise.c
+++ /dev/null
@@ -1,844 +0,0 @@
1/*
2 * sata_promise.c - Promise SATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc.
9 *
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 *
26 * libata documentation is available via 'make {ps|pdf}docs',
27 * as Documentation/DocBook/libata.*
28 *
29 * Hardware information only available under NDA.
30 *
31 */
32
33#include <linux/kernel.h>
34#include <linux/module.h>
35#include <linux/pci.h>
36#include <linux/init.h>
37#include <linux/blkdev.h>
38#include <linux/delay.h>
39#include <linux/interrupt.h>
40#include <linux/sched.h>
41#include <linux/device.h>
42#include <scsi/scsi_host.h>
43#include <scsi/scsi_cmnd.h>
44#include <linux/libata.h>
45#include <asm/io.h>
46#include "sata_promise.h"
47
48#define DRV_NAME "sata_promise"
49#define DRV_VERSION "1.04"
50
51
52enum {
53 PDC_PKT_SUBMIT = 0x40, /* Command packet pointer addr */
54 PDC_INT_SEQMASK = 0x40, /* Mask of asserted SEQ INTs */
55 PDC_TBG_MODE = 0x41, /* TBG mode */
56 PDC_FLASH_CTL = 0x44, /* Flash control register */
57 PDC_PCI_CTL = 0x48, /* PCI control and status register */
58 PDC_GLOBAL_CTL = 0x48, /* Global control/status (per port) */
59 PDC_CTLSTAT = 0x60, /* IDE control and status (per port) */
60 PDC_SATA_PLUG_CSR = 0x6C, /* SATA Plug control/status reg */
61 PDC2_SATA_PLUG_CSR = 0x60, /* SATAII Plug control/status reg */
62 PDC_SLEW_CTL = 0x470, /* slew rate control reg */
63
64 PDC_ERR_MASK = (1<<19) | (1<<20) | (1<<21) | (1<<22) |
65 (1<<8) | (1<<9) | (1<<10),
66
67 board_2037x = 0, /* FastTrak S150 TX2plus */
68 board_20319 = 1, /* FastTrak S150 TX4 */
69 board_20619 = 2, /* FastTrak TX4000 */
70 board_20771 = 3, /* FastTrak TX2300 */
71 board_2057x = 4, /* SATAII150 Tx2plus */
72 board_40518 = 5, /* SATAII150 Tx4 */
73
74 PDC_HAS_PATA = (1 << 1), /* PDC20375/20575 has PATA */
75
76 PDC_RESET = (1 << 11), /* HDMA reset */
77
78 PDC_COMMON_FLAGS = ATA_FLAG_NO_LEGACY | ATA_FLAG_SRST |
79 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
80 ATA_FLAG_PIO_POLLING,
81};
82
83
84struct pdc_port_priv {
85 u8 *pkt;
86 dma_addr_t pkt_dma;
87};
88
89struct pdc_host_priv {
90 int hotplug_offset;
91};
92
93static u32 pdc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg);
94static void pdc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
95static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
96static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
97static void pdc_eng_timeout(struct ata_port *ap);
98static int pdc_port_start(struct ata_port *ap);
99static void pdc_port_stop(struct ata_port *ap);
100static void pdc_pata_phy_reset(struct ata_port *ap);
101static void pdc_sata_phy_reset(struct ata_port *ap);
102static void pdc_qc_prep(struct ata_queued_cmd *qc);
103static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
104static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
105static void pdc_irq_clear(struct ata_port *ap);
106static unsigned int pdc_qc_issue_prot(struct ata_queued_cmd *qc);
107static void pdc_host_stop(struct ata_host_set *host_set);
108
109
110static struct scsi_host_template pdc_ata_sht = {
111 .module = THIS_MODULE,
112 .name = DRV_NAME,
113 .ioctl = ata_scsi_ioctl,
114 .queuecommand = ata_scsi_queuecmd,
115 .can_queue = ATA_DEF_QUEUE,
116 .this_id = ATA_SHT_THIS_ID,
117 .sg_tablesize = LIBATA_MAX_PRD,
118 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
119 .emulated = ATA_SHT_EMULATED,
120 .use_clustering = ATA_SHT_USE_CLUSTERING,
121 .proc_name = DRV_NAME,
122 .dma_boundary = ATA_DMA_BOUNDARY,
123 .slave_configure = ata_scsi_slave_config,
124 .slave_destroy = ata_scsi_slave_destroy,
125 .bios_param = ata_std_bios_param,
126};
127
128static const struct ata_port_operations pdc_sata_ops = {
129 .port_disable = ata_port_disable,
130 .tf_load = pdc_tf_load_mmio,
131 .tf_read = ata_tf_read,
132 .check_status = ata_check_status,
133 .exec_command = pdc_exec_command_mmio,
134 .dev_select = ata_std_dev_select,
135
136 .phy_reset = pdc_sata_phy_reset,
137
138 .qc_prep = pdc_qc_prep,
139 .qc_issue = pdc_qc_issue_prot,
140 .eng_timeout = pdc_eng_timeout,
141 .data_xfer = ata_mmio_data_xfer,
142 .irq_handler = pdc_interrupt,
143 .irq_clear = pdc_irq_clear,
144
145 .scr_read = pdc_sata_scr_read,
146 .scr_write = pdc_sata_scr_write,
147 .port_start = pdc_port_start,
148 .port_stop = pdc_port_stop,
149 .host_stop = pdc_host_stop,
150};
151
152static const struct ata_port_operations pdc_pata_ops = {
153 .port_disable = ata_port_disable,
154 .tf_load = pdc_tf_load_mmio,
155 .tf_read = ata_tf_read,
156 .check_status = ata_check_status,
157 .exec_command = pdc_exec_command_mmio,
158 .dev_select = ata_std_dev_select,
159
160 .phy_reset = pdc_pata_phy_reset,
161
162 .qc_prep = pdc_qc_prep,
163 .qc_issue = pdc_qc_issue_prot,
164 .data_xfer = ata_mmio_data_xfer,
165 .eng_timeout = pdc_eng_timeout,
166 .irq_handler = pdc_interrupt,
167 .irq_clear = pdc_irq_clear,
168
169 .port_start = pdc_port_start,
170 .port_stop = pdc_port_stop,
171 .host_stop = pdc_host_stop,
172};
173
174static const struct ata_port_info pdc_port_info[] = {
175 /* board_2037x */
176 {
177 .sht = &pdc_ata_sht,
178 .host_flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
179 .pio_mask = 0x1f, /* pio0-4 */
180 .mwdma_mask = 0x07, /* mwdma0-2 */
181 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
182 .port_ops = &pdc_sata_ops,
183 },
184
185 /* board_20319 */
186 {
187 .sht = &pdc_ata_sht,
188 .host_flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
189 .pio_mask = 0x1f, /* pio0-4 */
190 .mwdma_mask = 0x07, /* mwdma0-2 */
191 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
192 .port_ops = &pdc_sata_ops,
193 },
194
195 /* board_20619 */
196 {
197 .sht = &pdc_ata_sht,
198 .host_flags = PDC_COMMON_FLAGS | ATA_FLAG_SLAVE_POSS,
199 .pio_mask = 0x1f, /* pio0-4 */
200 .mwdma_mask = 0x07, /* mwdma0-2 */
201 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
202 .port_ops = &pdc_pata_ops,
203 },
204
205 /* board_20771 */
206 {
207 .sht = &pdc_ata_sht,
208 .host_flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
209 .pio_mask = 0x1f, /* pio0-4 */
210 .mwdma_mask = 0x07, /* mwdma0-2 */
211 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
212 .port_ops = &pdc_sata_ops,
213 },
214
215 /* board_2057x */
216 {
217 .sht = &pdc_ata_sht,
218 .host_flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
219 .pio_mask = 0x1f, /* pio0-4 */
220 .mwdma_mask = 0x07, /* mwdma0-2 */
221 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
222 .port_ops = &pdc_sata_ops,
223 },
224
225 /* board_40518 */
226 {
227 .sht = &pdc_ata_sht,
228 .host_flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
229 .pio_mask = 0x1f, /* pio0-4 */
230 .mwdma_mask = 0x07, /* mwdma0-2 */
231 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
232 .port_ops = &pdc_sata_ops,
233 },
234};
235
236static const struct pci_device_id pdc_ata_pci_tbl[] = {
237 { PCI_VENDOR_ID_PROMISE, 0x3371, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
238 board_2037x },
239 { PCI_VENDOR_ID_PROMISE, 0x3570, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
240 board_2037x },
241 { PCI_VENDOR_ID_PROMISE, 0x3571, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
242 board_2037x },
243 { PCI_VENDOR_ID_PROMISE, 0x3373, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
244 board_2037x },
245 { PCI_VENDOR_ID_PROMISE, 0x3375, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
246 board_2037x },
247 { PCI_VENDOR_ID_PROMISE, 0x3376, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
248 board_2037x },
249 { PCI_VENDOR_ID_PROMISE, 0x3574, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
250 board_2057x },
251 { PCI_VENDOR_ID_PROMISE, 0x3d75, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
252 board_2057x },
253 { PCI_VENDOR_ID_PROMISE, 0x3d73, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
254 board_2037x },
255
256 { PCI_VENDOR_ID_PROMISE, 0x3318, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
257 board_20319 },
258 { PCI_VENDOR_ID_PROMISE, 0x3319, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
259 board_20319 },
260 { PCI_VENDOR_ID_PROMISE, 0x3515, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
261 board_20319 },
262 { PCI_VENDOR_ID_PROMISE, 0x3519, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
263 board_20319 },
264 { PCI_VENDOR_ID_PROMISE, 0x3d17, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
265 board_20319 },
266 { PCI_VENDOR_ID_PROMISE, 0x3d18, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
267 board_40518 },
268
269 { PCI_VENDOR_ID_PROMISE, 0x6629, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
270 board_20619 },
271
272/* TODO: remove all associated board_20771 code, as it completely
273 * duplicates board_2037x code, unless reason for separation can be
274 * divined.
275 */
276#if 0
277 { PCI_VENDOR_ID_PROMISE, 0x3570, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
278 board_20771 },
279#endif
280
281 { } /* terminate list */
282};
283
284
285static struct pci_driver pdc_ata_pci_driver = {
286 .name = DRV_NAME,
287 .id_table = pdc_ata_pci_tbl,
288 .probe = pdc_ata_init_one,
289 .remove = ata_pci_remove_one,
290};
291
292
293static int pdc_port_start(struct ata_port *ap)
294{
295 struct device *dev = ap->host_set->dev;
296 struct pdc_port_priv *pp;
297 int rc;
298
299 rc = ata_port_start(ap);
300 if (rc)
301 return rc;
302
303 pp = kzalloc(sizeof(*pp), GFP_KERNEL);
304 if (!pp) {
305 rc = -ENOMEM;
306 goto err_out;
307 }
308
309 pp->pkt = dma_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL);
310 if (!pp->pkt) {
311 rc = -ENOMEM;
312 goto err_out_kfree;
313 }
314
315 ap->private_data = pp;
316
317 return 0;
318
319err_out_kfree:
320 kfree(pp);
321err_out:
322 ata_port_stop(ap);
323 return rc;
324}
325
326
327static void pdc_port_stop(struct ata_port *ap)
328{
329 struct device *dev = ap->host_set->dev;
330 struct pdc_port_priv *pp = ap->private_data;
331
332 ap->private_data = NULL;
333 dma_free_coherent(dev, 128, pp->pkt, pp->pkt_dma);
334 kfree(pp);
335 ata_port_stop(ap);
336}
337
338
339static void pdc_host_stop(struct ata_host_set *host_set)
340{
341 struct pdc_host_priv *hp = host_set->private_data;
342
343 ata_pci_host_stop(host_set);
344
345 kfree(hp);
346}
347
348
349static void pdc_reset_port(struct ata_port *ap)
350{
351 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr + PDC_CTLSTAT;
352 unsigned int i;
353 u32 tmp;
354
355 for (i = 11; i > 0; i--) {
356 tmp = readl(mmio);
357 if (tmp & PDC_RESET)
358 break;
359
360 udelay(100);
361
362 tmp |= PDC_RESET;
363 writel(tmp, mmio);
364 }
365
366 tmp &= ~PDC_RESET;
367 writel(tmp, mmio);
368 readl(mmio); /* flush */
369}
370
371static void pdc_sata_phy_reset(struct ata_port *ap)
372{
373 pdc_reset_port(ap);
374 sata_phy_reset(ap);
375}
376
377static void pdc_pata_cbl_detect(struct ata_port *ap)
378{
379 u8 tmp;
380 void __iomem *mmio = (void *) ap->ioaddr.cmd_addr + PDC_CTLSTAT + 0x03;
381
382 tmp = readb(mmio);
383
384 if (tmp & 0x01) {
385 ap->cbl = ATA_CBL_PATA40;
386 ap->udma_mask &= ATA_UDMA_MASK_40C;
387 } else
388 ap->cbl = ATA_CBL_PATA80;
389}
390
391static void pdc_pata_phy_reset(struct ata_port *ap)
392{
393 pdc_pata_cbl_detect(ap);
394 pdc_reset_port(ap);
395 ata_port_probe(ap);
396 ata_bus_reset(ap);
397}
398
399static u32 pdc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg)
400{
401 if (sc_reg > SCR_CONTROL)
402 return 0xffffffffU;
403 return readl((void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
404}
405
406
407static void pdc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg,
408 u32 val)
409{
410 if (sc_reg > SCR_CONTROL)
411 return;
412 writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
413}
414
415static void pdc_qc_prep(struct ata_queued_cmd *qc)
416{
417 struct pdc_port_priv *pp = qc->ap->private_data;
418 unsigned int i;
419
420 VPRINTK("ENTER\n");
421
422 switch (qc->tf.protocol) {
423 case ATA_PROT_DMA:
424 ata_qc_prep(qc);
425 /* fall through */
426
427 case ATA_PROT_NODATA:
428 i = pdc_pkt_header(&qc->tf, qc->ap->prd_dma,
429 qc->dev->devno, pp->pkt);
430
431 if (qc->tf.flags & ATA_TFLAG_LBA48)
432 i = pdc_prep_lba48(&qc->tf, pp->pkt, i);
433 else
434 i = pdc_prep_lba28(&qc->tf, pp->pkt, i);
435
436 pdc_pkt_footer(&qc->tf, pp->pkt, i);
437 break;
438
439 default:
440 break;
441 }
442}
443
444static void pdc_eng_timeout(struct ata_port *ap)
445{
446 struct ata_host_set *host_set = ap->host_set;
447 u8 drv_stat;
448 struct ata_queued_cmd *qc;
449 unsigned long flags;
450
451 DPRINTK("ENTER\n");
452
453 spin_lock_irqsave(&host_set->lock, flags);
454
455 qc = ata_qc_from_tag(ap, ap->active_tag);
456
457 switch (qc->tf.protocol) {
458 case ATA_PROT_DMA:
459 case ATA_PROT_NODATA:
460 ata_port_printk(ap, KERN_ERR, "command timeout\n");
461 drv_stat = ata_wait_idle(ap);
462 qc->err_mask |= __ac_err_mask(drv_stat);
463 break;
464
465 default:
466 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
467
468 ata_port_printk(ap, KERN_ERR,
469 "unknown timeout, cmd 0x%x stat 0x%x\n",
470 qc->tf.command, drv_stat);
471
472 qc->err_mask |= ac_err_mask(drv_stat);
473 break;
474 }
475
476 spin_unlock_irqrestore(&host_set->lock, flags);
477 ata_eh_qc_complete(qc);
478 DPRINTK("EXIT\n");
479}
480
481static inline unsigned int pdc_host_intr( struct ata_port *ap,
482 struct ata_queued_cmd *qc)
483{
484 unsigned int handled = 0;
485 u32 tmp;
486 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr + PDC_GLOBAL_CTL;
487
488 tmp = readl(mmio);
489 if (tmp & PDC_ERR_MASK) {
490 qc->err_mask |= AC_ERR_DEV;
491 pdc_reset_port(ap);
492 }
493
494 switch (qc->tf.protocol) {
495 case ATA_PROT_DMA:
496 case ATA_PROT_NODATA:
497 qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
498 ata_qc_complete(qc);
499 handled = 1;
500 break;
501
502 default:
503 ap->stats.idle_irq++;
504 break;
505 }
506
507 return handled;
508}
509
510static void pdc_irq_clear(struct ata_port *ap)
511{
512 struct ata_host_set *host_set = ap->host_set;
513 void __iomem *mmio = host_set->mmio_base;
514
515 readl(mmio + PDC_INT_SEQMASK);
516}
517
518static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
519{
520 struct ata_host_set *host_set = dev_instance;
521 struct ata_port *ap;
522 u32 mask = 0;
523 unsigned int i, tmp;
524 unsigned int handled = 0;
525 void __iomem *mmio_base;
526
527 VPRINTK("ENTER\n");
528
529 if (!host_set || !host_set->mmio_base) {
530 VPRINTK("QUICK EXIT\n");
531 return IRQ_NONE;
532 }
533
534 mmio_base = host_set->mmio_base;
535
536 /* reading should also clear interrupts */
537 mask = readl(mmio_base + PDC_INT_SEQMASK);
538
539 if (mask == 0xffffffff) {
540 VPRINTK("QUICK EXIT 2\n");
541 return IRQ_NONE;
542 }
543
544 spin_lock(&host_set->lock);
545
546 mask &= 0xffff; /* only 16 tags possible */
547 if (!mask) {
548 VPRINTK("QUICK EXIT 3\n");
549 goto done_irq;
550 }
551
552 writel(mask, mmio_base + PDC_INT_SEQMASK);
553
554 for (i = 0; i < host_set->n_ports; i++) {
555 VPRINTK("port %u\n", i);
556 ap = host_set->ports[i];
557 tmp = mask & (1 << (i + 1));
558 if (tmp && ap &&
559 !(ap->flags & ATA_FLAG_DISABLED)) {
560 struct ata_queued_cmd *qc;
561
562 qc = ata_qc_from_tag(ap, ap->active_tag);
563 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
564 handled += pdc_host_intr(ap, qc);
565 }
566 }
567
568 VPRINTK("EXIT\n");
569
570done_irq:
571 spin_unlock(&host_set->lock);
572 return IRQ_RETVAL(handled);
573}
574
575static inline void pdc_packet_start(struct ata_queued_cmd *qc)
576{
577 struct ata_port *ap = qc->ap;
578 struct pdc_port_priv *pp = ap->private_data;
579 unsigned int port_no = ap->port_no;
580 u8 seq = (u8) (port_no + 1);
581
582 VPRINTK("ENTER, ap %p\n", ap);
583
584 writel(0x00000001, ap->host_set->mmio_base + (seq * 4));
585 readl(ap->host_set->mmio_base + (seq * 4)); /* flush */
586
587 pp->pkt[2] = seq;
588 wmb(); /* flush PRD, pkt writes */
589 writel(pp->pkt_dma, (void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
590 readl((void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); /* flush */
591}
592
593static unsigned int pdc_qc_issue_prot(struct ata_queued_cmd *qc)
594{
595 switch (qc->tf.protocol) {
596 case ATA_PROT_DMA:
597 case ATA_PROT_NODATA:
598 pdc_packet_start(qc);
599 return 0;
600
601 case ATA_PROT_ATAPI_DMA:
602 BUG();
603 break;
604
605 default:
606 break;
607 }
608
609 return ata_qc_issue_prot(qc);
610}
611
612static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
613{
614 WARN_ON (tf->protocol == ATA_PROT_DMA ||
615 tf->protocol == ATA_PROT_NODATA);
616 ata_tf_load(ap, tf);
617}
618
619
620static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
621{
622 WARN_ON (tf->protocol == ATA_PROT_DMA ||
623 tf->protocol == ATA_PROT_NODATA);
624 ata_exec_command(ap, tf);
625}
626
627
628static void pdc_ata_setup_port(struct ata_ioports *port, unsigned long base)
629{
630 port->cmd_addr = base;
631 port->data_addr = base;
632 port->feature_addr =
633 port->error_addr = base + 0x4;
634 port->nsect_addr = base + 0x8;
635 port->lbal_addr = base + 0xc;
636 port->lbam_addr = base + 0x10;
637 port->lbah_addr = base + 0x14;
638 port->device_addr = base + 0x18;
639 port->command_addr =
640 port->status_addr = base + 0x1c;
641 port->altstatus_addr =
642 port->ctl_addr = base + 0x38;
643}
644
645
646static void pdc_host_init(unsigned int chip_id, struct ata_probe_ent *pe)
647{
648 void __iomem *mmio = pe->mmio_base;
649 struct pdc_host_priv *hp = pe->private_data;
650 int hotplug_offset = hp->hotplug_offset;
651 u32 tmp;
652
653 /*
654 * Except for the hotplug stuff, this is voodoo from the
655 * Promise driver. Label this entire section
656 * "TODO: figure out why we do this"
657 */
658
659 /* change FIFO_SHD to 8 dwords, enable BMR_BURST */
660 tmp = readl(mmio + PDC_FLASH_CTL);
661 tmp |= 0x12000; /* bit 16 (fifo 8 dw) and 13 (bmr burst?) */
662 writel(tmp, mmio + PDC_FLASH_CTL);
663
664 /* clear plug/unplug flags for all ports */
665 tmp = readl(mmio + hotplug_offset);
666 writel(tmp | 0xff, mmio + hotplug_offset);
667
668 /* mask plug/unplug ints */
669 tmp = readl(mmio + hotplug_offset);
670 writel(tmp | 0xff0000, mmio + hotplug_offset);
671
672 /* reduce TBG clock to 133 Mhz. */
673 tmp = readl(mmio + PDC_TBG_MODE);
674 tmp &= ~0x30000; /* clear bit 17, 16*/
675 tmp |= 0x10000; /* set bit 17:16 = 0:1 */
676 writel(tmp, mmio + PDC_TBG_MODE);
677
678 readl(mmio + PDC_TBG_MODE); /* flush */
679 msleep(10);
680
681 /* adjust slew rate control register. */
682 tmp = readl(mmio + PDC_SLEW_CTL);
683 tmp &= 0xFFFFF03F; /* clear bit 11 ~ 6 */
684 tmp |= 0x00000900; /* set bit 11-9 = 100b , bit 8-6 = 100 */
685 writel(tmp, mmio + PDC_SLEW_CTL);
686}
687
688static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
689{
690 static int printed_version;
691 struct ata_probe_ent *probe_ent = NULL;
692 struct pdc_host_priv *hp;
693 unsigned long base;
694 void __iomem *mmio_base;
695 unsigned int board_idx = (unsigned int) ent->driver_data;
696 int pci_dev_busy = 0;
697 int rc;
698
699 if (!printed_version++)
700 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
701
702 rc = pci_enable_device(pdev);
703 if (rc)
704 return rc;
705
706 rc = pci_request_regions(pdev, DRV_NAME);
707 if (rc) {
708 pci_dev_busy = 1;
709 goto err_out;
710 }
711
712 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
713 if (rc)
714 goto err_out_regions;
715 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
716 if (rc)
717 goto err_out_regions;
718
719 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
720 if (probe_ent == NULL) {
721 rc = -ENOMEM;
722 goto err_out_regions;
723 }
724
725 probe_ent->dev = pci_dev_to_dev(pdev);
726 INIT_LIST_HEAD(&probe_ent->node);
727
728 mmio_base = pci_iomap(pdev, 3, 0);
729 if (mmio_base == NULL) {
730 rc = -ENOMEM;
731 goto err_out_free_ent;
732 }
733 base = (unsigned long) mmio_base;
734
735 hp = kzalloc(sizeof(*hp), GFP_KERNEL);
736 if (hp == NULL) {
737 rc = -ENOMEM;
738 goto err_out_free_ent;
739 }
740
741 /* Set default hotplug offset */
742 hp->hotplug_offset = PDC_SATA_PLUG_CSR;
743 probe_ent->private_data = hp;
744
745 probe_ent->sht = pdc_port_info[board_idx].sht;
746 probe_ent->host_flags = pdc_port_info[board_idx].host_flags;
747 probe_ent->pio_mask = pdc_port_info[board_idx].pio_mask;
748 probe_ent->mwdma_mask = pdc_port_info[board_idx].mwdma_mask;
749 probe_ent->udma_mask = pdc_port_info[board_idx].udma_mask;
750 probe_ent->port_ops = pdc_port_info[board_idx].port_ops;
751
752 probe_ent->irq = pdev->irq;
753 probe_ent->irq_flags = IRQF_SHARED;
754 probe_ent->mmio_base = mmio_base;
755
756 pdc_ata_setup_port(&probe_ent->port[0], base + 0x200);
757 pdc_ata_setup_port(&probe_ent->port[1], base + 0x280);
758
759 probe_ent->port[0].scr_addr = base + 0x400;
760 probe_ent->port[1].scr_addr = base + 0x500;
761
762 /* notice 4-port boards */
763 switch (board_idx) {
764 case board_40518:
765 /* Override hotplug offset for SATAII150 */
766 hp->hotplug_offset = PDC2_SATA_PLUG_CSR;
767 /* Fall through */
768 case board_20319:
769 probe_ent->n_ports = 4;
770
771 pdc_ata_setup_port(&probe_ent->port[2], base + 0x300);
772 pdc_ata_setup_port(&probe_ent->port[3], base + 0x380);
773
774 probe_ent->port[2].scr_addr = base + 0x600;
775 probe_ent->port[3].scr_addr = base + 0x700;
776 break;
777 case board_2057x:
778 /* Override hotplug offset for SATAII150 */
779 hp->hotplug_offset = PDC2_SATA_PLUG_CSR;
780 /* Fall through */
781 case board_2037x:
782 probe_ent->n_ports = 2;
783 break;
784 case board_20771:
785 probe_ent->n_ports = 2;
786 break;
787 case board_20619:
788 probe_ent->n_ports = 4;
789
790 pdc_ata_setup_port(&probe_ent->port[2], base + 0x300);
791 pdc_ata_setup_port(&probe_ent->port[3], base + 0x380);
792
793 probe_ent->port[2].scr_addr = base + 0x600;
794 probe_ent->port[3].scr_addr = base + 0x700;
795 break;
796 default:
797 BUG();
798 break;
799 }
800
801 pci_set_master(pdev);
802
803 /* initialize adapter */
804 pdc_host_init(board_idx, probe_ent);
805
806 /* FIXME: Need any other frees than hp? */
807 if (!ata_device_add(probe_ent))
808 kfree(hp);
809
810 kfree(probe_ent);
811
812 return 0;
813
814err_out_free_ent:
815 kfree(probe_ent);
816err_out_regions:
817 pci_release_regions(pdev);
818err_out:
819 if (!pci_dev_busy)
820 pci_disable_device(pdev);
821 return rc;
822}
823
824
825static int __init pdc_ata_init(void)
826{
827 return pci_module_init(&pdc_ata_pci_driver);
828}
829
830
831static void __exit pdc_ata_exit(void)
832{
833 pci_unregister_driver(&pdc_ata_pci_driver);
834}
835
836
837MODULE_AUTHOR("Jeff Garzik");
838MODULE_DESCRIPTION("Promise ATA TX2/TX4/TX4000 low-level driver");
839MODULE_LICENSE("GPL");
840MODULE_DEVICE_TABLE(pci, pdc_ata_pci_tbl);
841MODULE_VERSION(DRV_VERSION);
842
843module_init(pdc_ata_init);
844module_exit(pdc_ata_exit);
diff --git a/drivers/scsi/sata_promise.h b/drivers/scsi/sata_promise.h
deleted file mode 100644
index 6ee5e190262d..000000000000
--- a/drivers/scsi/sata_promise.h
+++ /dev/null
@@ -1,157 +0,0 @@
1/*
2 * sata_promise.h - Promise SATA common definitions and inline funcs
3 *
4 * Copyright 2003-2004 Red Hat, Inc.
5 *
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2, or (at your option)
10 * any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; see the file COPYING. If not, write to
19 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
20 *
21 *
22 * libata documentation is available via 'make {ps|pdf}docs',
23 * as Documentation/DocBook/libata.*
24 *
25 */
26
27#ifndef __SATA_PROMISE_H__
28#define __SATA_PROMISE_H__
29
30#include <linux/ata.h>
31
32enum pdc_packet_bits {
33 PDC_PKT_READ = (1 << 2),
34 PDC_PKT_NODATA = (1 << 3),
35
36 PDC_PKT_SIZEMASK = (1 << 7) | (1 << 6) | (1 << 5),
37 PDC_PKT_CLEAR_BSY = (1 << 4),
38 PDC_PKT_WAIT_DRDY = (1 << 3) | (1 << 4),
39 PDC_LAST_REG = (1 << 3),
40
41 PDC_REG_DEVCTL = (1 << 3) | (1 << 2) | (1 << 1),
42};
43
44static inline unsigned int pdc_pkt_header(struct ata_taskfile *tf,
45 dma_addr_t sg_table,
46 unsigned int devno, u8 *buf)
47{
48 u8 dev_reg;
49 u32 *buf32 = (u32 *) buf;
50
51 /* set control bits (byte 0), zero delay seq id (byte 3),
52 * and seq id (byte 2)
53 */
54 switch (tf->protocol) {
55 case ATA_PROT_DMA:
56 if (!(tf->flags & ATA_TFLAG_WRITE))
57 buf32[0] = cpu_to_le32(PDC_PKT_READ);
58 else
59 buf32[0] = 0;
60 break;
61
62 case ATA_PROT_NODATA:
63 buf32[0] = cpu_to_le32(PDC_PKT_NODATA);
64 break;
65
66 default:
67 BUG();
68 break;
69 }
70
71 buf32[1] = cpu_to_le32(sg_table); /* S/G table addr */
72 buf32[2] = 0; /* no next-packet */
73
74 if (devno == 0)
75 dev_reg = ATA_DEVICE_OBS;
76 else
77 dev_reg = ATA_DEVICE_OBS | ATA_DEV1;
78
79 /* select device */
80 buf[12] = (1 << 5) | PDC_PKT_CLEAR_BSY | ATA_REG_DEVICE;
81 buf[13] = dev_reg;
82
83 /* device control register */
84 buf[14] = (1 << 5) | PDC_REG_DEVCTL;
85 buf[15] = tf->ctl;
86
87 return 16; /* offset of next byte */
88}
89
90static inline unsigned int pdc_pkt_footer(struct ata_taskfile *tf, u8 *buf,
91 unsigned int i)
92{
93 if (tf->flags & ATA_TFLAG_DEVICE) {
94 buf[i++] = (1 << 5) | ATA_REG_DEVICE;
95 buf[i++] = tf->device;
96 }
97
98 /* and finally the command itself; also includes end-of-pkt marker */
99 buf[i++] = (1 << 5) | PDC_LAST_REG | ATA_REG_CMD;
100 buf[i++] = tf->command;
101
102 return i;
103}
104
105static inline unsigned int pdc_prep_lba28(struct ata_taskfile *tf, u8 *buf, unsigned int i)
106{
107 /* the "(1 << 5)" should be read "(count << 5)" */
108
109 /* ATA command block registers */
110 buf[i++] = (1 << 5) | ATA_REG_FEATURE;
111 buf[i++] = tf->feature;
112
113 buf[i++] = (1 << 5) | ATA_REG_NSECT;
114 buf[i++] = tf->nsect;
115
116 buf[i++] = (1 << 5) | ATA_REG_LBAL;
117 buf[i++] = tf->lbal;
118
119 buf[i++] = (1 << 5) | ATA_REG_LBAM;
120 buf[i++] = tf->lbam;
121
122 buf[i++] = (1 << 5) | ATA_REG_LBAH;
123 buf[i++] = tf->lbah;
124
125 return i;
126}
127
128static inline unsigned int pdc_prep_lba48(struct ata_taskfile *tf, u8 *buf, unsigned int i)
129{
130 /* the "(2 << 5)" should be read "(count << 5)" */
131
132 /* ATA command block registers */
133 buf[i++] = (2 << 5) | ATA_REG_FEATURE;
134 buf[i++] = tf->hob_feature;
135 buf[i++] = tf->feature;
136
137 buf[i++] = (2 << 5) | ATA_REG_NSECT;
138 buf[i++] = tf->hob_nsect;
139 buf[i++] = tf->nsect;
140
141 buf[i++] = (2 << 5) | ATA_REG_LBAL;
142 buf[i++] = tf->hob_lbal;
143 buf[i++] = tf->lbal;
144
145 buf[i++] = (2 << 5) | ATA_REG_LBAM;
146 buf[i++] = tf->hob_lbam;
147 buf[i++] = tf->lbam;
148
149 buf[i++] = (2 << 5) | ATA_REG_LBAH;
150 buf[i++] = tf->hob_lbah;
151 buf[i++] = tf->lbah;
152
153 return i;
154}
155
156
157#endif /* __SATA_PROMISE_H__ */
diff --git a/drivers/scsi/sata_qstor.c b/drivers/scsi/sata_qstor.c
deleted file mode 100644
index d374c1db0cf3..000000000000
--- a/drivers/scsi/sata_qstor.c
+++ /dev/null
@@ -1,730 +0,0 @@
1/*
2 * sata_qstor.c - Pacific Digital Corporation QStor SATA
3 *
4 * Maintained by: Mark Lord <mlord@pobox.com>
5 *
6 * Copyright 2005 Pacific Digital Corporation.
7 * (OSL/GPL code release authorized by Jalil Fadavi).
8 *
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2, or (at your option)
13 * any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; see the file COPYING. If not, write to
22 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 *
24 *
25 * libata documentation is available via 'make {ps|pdf}docs',
26 * as Documentation/DocBook/libata.*
27 *
28 */
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/pci.h>
33#include <linux/init.h>
34#include <linux/blkdev.h>
35#include <linux/delay.h>
36#include <linux/interrupt.h>
37#include <linux/sched.h>
38#include <linux/device.h>
39#include <scsi/scsi_host.h>
40#include <asm/io.h>
41#include <linux/libata.h>
42
43#define DRV_NAME "sata_qstor"
44#define DRV_VERSION "0.06"
45
46enum {
47 QS_PORTS = 4,
48 QS_MAX_PRD = LIBATA_MAX_PRD,
49 QS_CPB_ORDER = 6,
50 QS_CPB_BYTES = (1 << QS_CPB_ORDER),
51 QS_PRD_BYTES = QS_MAX_PRD * 16,
52 QS_PKT_BYTES = QS_CPB_BYTES + QS_PRD_BYTES,
53
54 /* global register offsets */
55 QS_HCF_CNFG3 = 0x0003, /* host configuration offset */
56 QS_HID_HPHY = 0x0004, /* host physical interface info */
57 QS_HCT_CTRL = 0x00e4, /* global interrupt mask offset */
58 QS_HST_SFF = 0x0100, /* host status fifo offset */
59 QS_HVS_SERD3 = 0x0393, /* PHY enable offset */
60
61 /* global control bits */
62 QS_HPHY_64BIT = (1 << 1), /* 64-bit bus detected */
63 QS_CNFG3_GSRST = 0x01, /* global chip reset */
64 QS_SERD3_PHY_ENA = 0xf0, /* PHY detection ENAble*/
65
66 /* per-channel register offsets */
67 QS_CCF_CPBA = 0x0710, /* chan CPB base address */
68 QS_CCF_CSEP = 0x0718, /* chan CPB separation factor */
69 QS_CFC_HUFT = 0x0800, /* host upstream fifo threshold */
70 QS_CFC_HDFT = 0x0804, /* host downstream fifo threshold */
71 QS_CFC_DUFT = 0x0808, /* dev upstream fifo threshold */
72 QS_CFC_DDFT = 0x080c, /* dev downstream fifo threshold */
73 QS_CCT_CTR0 = 0x0900, /* chan control-0 offset */
74 QS_CCT_CTR1 = 0x0901, /* chan control-1 offset */
75 QS_CCT_CFF = 0x0a00, /* chan command fifo offset */
76
77 /* channel control bits */
78 QS_CTR0_REG = (1 << 1), /* register mode (vs. pkt mode) */
79 QS_CTR0_CLER = (1 << 2), /* clear channel errors */
80 QS_CTR1_RDEV = (1 << 1), /* sata phy/comms reset */
81 QS_CTR1_RCHN = (1 << 4), /* reset channel logic */
82 QS_CCF_RUN_PKT = 0x107, /* RUN a new dma PKT */
83
84 /* pkt sub-field headers */
85 QS_HCB_HDR = 0x01, /* Host Control Block header */
86 QS_DCB_HDR = 0x02, /* Device Control Block header */
87
88 /* pkt HCB flag bits */
89 QS_HF_DIRO = (1 << 0), /* data DIRection Out */
90 QS_HF_DAT = (1 << 3), /* DATa pkt */
91 QS_HF_IEN = (1 << 4), /* Interrupt ENable */
92 QS_HF_VLD = (1 << 5), /* VaLiD pkt */
93
94 /* pkt DCB flag bits */
95 QS_DF_PORD = (1 << 2), /* Pio OR Dma */
96 QS_DF_ELBA = (1 << 3), /* Extended LBA (lba48) */
97
98 /* PCI device IDs */
99 board_2068_idx = 0, /* QStor 4-port SATA/RAID */
100};
101
102enum {
103 QS_DMA_BOUNDARY = ~0UL
104};
105
106typedef enum { qs_state_idle, qs_state_pkt, qs_state_mmio } qs_state_t;
107
108struct qs_port_priv {
109 u8 *pkt;
110 dma_addr_t pkt_dma;
111 qs_state_t state;
112};
113
114static u32 qs_scr_read (struct ata_port *ap, unsigned int sc_reg);
115static void qs_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
116static int qs_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
117static irqreturn_t qs_intr (int irq, void *dev_instance, struct pt_regs *regs);
118static int qs_port_start(struct ata_port *ap);
119static void qs_host_stop(struct ata_host_set *host_set);
120static void qs_port_stop(struct ata_port *ap);
121static void qs_phy_reset(struct ata_port *ap);
122static void qs_qc_prep(struct ata_queued_cmd *qc);
123static unsigned int qs_qc_issue(struct ata_queued_cmd *qc);
124static int qs_check_atapi_dma(struct ata_queued_cmd *qc);
125static void qs_bmdma_stop(struct ata_queued_cmd *qc);
126static u8 qs_bmdma_status(struct ata_port *ap);
127static void qs_irq_clear(struct ata_port *ap);
128static void qs_eng_timeout(struct ata_port *ap);
129
130static struct scsi_host_template qs_ata_sht = {
131 .module = THIS_MODULE,
132 .name = DRV_NAME,
133 .ioctl = ata_scsi_ioctl,
134 .queuecommand = ata_scsi_queuecmd,
135 .can_queue = ATA_DEF_QUEUE,
136 .this_id = ATA_SHT_THIS_ID,
137 .sg_tablesize = QS_MAX_PRD,
138 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
139 .emulated = ATA_SHT_EMULATED,
140 //FIXME .use_clustering = ATA_SHT_USE_CLUSTERING,
141 .use_clustering = ENABLE_CLUSTERING,
142 .proc_name = DRV_NAME,
143 .dma_boundary = QS_DMA_BOUNDARY,
144 .slave_configure = ata_scsi_slave_config,
145 .slave_destroy = ata_scsi_slave_destroy,
146 .bios_param = ata_std_bios_param,
147};
148
149static const struct ata_port_operations qs_ata_ops = {
150 .port_disable = ata_port_disable,
151 .tf_load = ata_tf_load,
152 .tf_read = ata_tf_read,
153 .check_status = ata_check_status,
154 .check_atapi_dma = qs_check_atapi_dma,
155 .exec_command = ata_exec_command,
156 .dev_select = ata_std_dev_select,
157 .phy_reset = qs_phy_reset,
158 .qc_prep = qs_qc_prep,
159 .qc_issue = qs_qc_issue,
160 .data_xfer = ata_mmio_data_xfer,
161 .eng_timeout = qs_eng_timeout,
162 .irq_handler = qs_intr,
163 .irq_clear = qs_irq_clear,
164 .scr_read = qs_scr_read,
165 .scr_write = qs_scr_write,
166 .port_start = qs_port_start,
167 .port_stop = qs_port_stop,
168 .host_stop = qs_host_stop,
169 .bmdma_stop = qs_bmdma_stop,
170 .bmdma_status = qs_bmdma_status,
171};
172
173static const struct ata_port_info qs_port_info[] = {
174 /* board_2068_idx */
175 {
176 .sht = &qs_ata_sht,
177 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
178 ATA_FLAG_SATA_RESET |
179 //FIXME ATA_FLAG_SRST |
180 ATA_FLAG_MMIO | ATA_FLAG_PIO_POLLING,
181 .pio_mask = 0x10, /* pio4 */
182 .udma_mask = 0x7f, /* udma0-6 */
183 .port_ops = &qs_ata_ops,
184 },
185};
186
187static const struct pci_device_id qs_ata_pci_tbl[] = {
188 { PCI_VENDOR_ID_PDC, 0x2068, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
189 board_2068_idx },
190
191 { } /* terminate list */
192};
193
194static struct pci_driver qs_ata_pci_driver = {
195 .name = DRV_NAME,
196 .id_table = qs_ata_pci_tbl,
197 .probe = qs_ata_init_one,
198 .remove = ata_pci_remove_one,
199};
200
201static int qs_check_atapi_dma(struct ata_queued_cmd *qc)
202{
203 return 1; /* ATAPI DMA not supported */
204}
205
206static void qs_bmdma_stop(struct ata_queued_cmd *qc)
207{
208 /* nothing */
209}
210
211static u8 qs_bmdma_status(struct ata_port *ap)
212{
213 return 0;
214}
215
216static void qs_irq_clear(struct ata_port *ap)
217{
218 /* nothing */
219}
220
221static inline void qs_enter_reg_mode(struct ata_port *ap)
222{
223 u8 __iomem *chan = ap->host_set->mmio_base + (ap->port_no * 0x4000);
224
225 writeb(QS_CTR0_REG, chan + QS_CCT_CTR0);
226 readb(chan + QS_CCT_CTR0); /* flush */
227}
228
229static inline void qs_reset_channel_logic(struct ata_port *ap)
230{
231 u8 __iomem *chan = ap->host_set->mmio_base + (ap->port_no * 0x4000);
232
233 writeb(QS_CTR1_RCHN, chan + QS_CCT_CTR1);
234 readb(chan + QS_CCT_CTR0); /* flush */
235 qs_enter_reg_mode(ap);
236}
237
238static void qs_phy_reset(struct ata_port *ap)
239{
240 struct qs_port_priv *pp = ap->private_data;
241
242 pp->state = qs_state_idle;
243 qs_reset_channel_logic(ap);
244 sata_phy_reset(ap);
245}
246
247static void qs_eng_timeout(struct ata_port *ap)
248{
249 struct qs_port_priv *pp = ap->private_data;
250
251 if (pp->state != qs_state_idle) /* healthy paranoia */
252 pp->state = qs_state_mmio;
253 qs_reset_channel_logic(ap);
254 ata_eng_timeout(ap);
255}
256
257static u32 qs_scr_read (struct ata_port *ap, unsigned int sc_reg)
258{
259 if (sc_reg > SCR_CONTROL)
260 return ~0U;
261 return readl((void __iomem *)(ap->ioaddr.scr_addr + (sc_reg * 8)));
262}
263
264static void qs_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
265{
266 if (sc_reg > SCR_CONTROL)
267 return;
268 writel(val, (void __iomem *)(ap->ioaddr.scr_addr + (sc_reg * 8)));
269}
270
271static unsigned int qs_fill_sg(struct ata_queued_cmd *qc)
272{
273 struct scatterlist *sg;
274 struct ata_port *ap = qc->ap;
275 struct qs_port_priv *pp = ap->private_data;
276 unsigned int nelem;
277 u8 *prd = pp->pkt + QS_CPB_BYTES;
278
279 WARN_ON(qc->__sg == NULL);
280 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
281
282 nelem = 0;
283 ata_for_each_sg(sg, qc) {
284 u64 addr;
285 u32 len;
286
287 addr = sg_dma_address(sg);
288 *(__le64 *)prd = cpu_to_le64(addr);
289 prd += sizeof(u64);
290
291 len = sg_dma_len(sg);
292 *(__le32 *)prd = cpu_to_le32(len);
293 prd += sizeof(u64);
294
295 VPRINTK("PRD[%u] = (0x%llX, 0x%X)\n", nelem,
296 (unsigned long long)addr, len);
297 nelem++;
298 }
299
300 return nelem;
301}
302
303static void qs_qc_prep(struct ata_queued_cmd *qc)
304{
305 struct qs_port_priv *pp = qc->ap->private_data;
306 u8 dflags = QS_DF_PORD, *buf = pp->pkt;
307 u8 hflags = QS_HF_DAT | QS_HF_IEN | QS_HF_VLD;
308 u64 addr;
309 unsigned int nelem;
310
311 VPRINTK("ENTER\n");
312
313 qs_enter_reg_mode(qc->ap);
314 if (qc->tf.protocol != ATA_PROT_DMA) {
315 ata_qc_prep(qc);
316 return;
317 }
318
319 nelem = qs_fill_sg(qc);
320
321 if ((qc->tf.flags & ATA_TFLAG_WRITE))
322 hflags |= QS_HF_DIRO;
323 if ((qc->tf.flags & ATA_TFLAG_LBA48))
324 dflags |= QS_DF_ELBA;
325
326 /* host control block (HCB) */
327 buf[ 0] = QS_HCB_HDR;
328 buf[ 1] = hflags;
329 *(__le32 *)(&buf[ 4]) = cpu_to_le32(qc->nsect * ATA_SECT_SIZE);
330 *(__le32 *)(&buf[ 8]) = cpu_to_le32(nelem);
331 addr = ((u64)pp->pkt_dma) + QS_CPB_BYTES;
332 *(__le64 *)(&buf[16]) = cpu_to_le64(addr);
333
334 /* device control block (DCB) */
335 buf[24] = QS_DCB_HDR;
336 buf[28] = dflags;
337
338 /* frame information structure (FIS) */
339 ata_tf_to_fis(&qc->tf, &buf[32], 0);
340}
341
342static inline void qs_packet_start(struct ata_queued_cmd *qc)
343{
344 struct ata_port *ap = qc->ap;
345 u8 __iomem *chan = ap->host_set->mmio_base + (ap->port_no * 0x4000);
346
347 VPRINTK("ENTER, ap %p\n", ap);
348
349 writeb(QS_CTR0_CLER, chan + QS_CCT_CTR0);
350 wmb(); /* flush PRDs and pkt to memory */
351 writel(QS_CCF_RUN_PKT, chan + QS_CCT_CFF);
352 readl(chan + QS_CCT_CFF); /* flush */
353}
354
355static unsigned int qs_qc_issue(struct ata_queued_cmd *qc)
356{
357 struct qs_port_priv *pp = qc->ap->private_data;
358
359 switch (qc->tf.protocol) {
360 case ATA_PROT_DMA:
361
362 pp->state = qs_state_pkt;
363 qs_packet_start(qc);
364 return 0;
365
366 case ATA_PROT_ATAPI_DMA:
367 BUG();
368 break;
369
370 default:
371 break;
372 }
373
374 pp->state = qs_state_mmio;
375 return ata_qc_issue_prot(qc);
376}
377
378static inline unsigned int qs_intr_pkt(struct ata_host_set *host_set)
379{
380 unsigned int handled = 0;
381 u8 sFFE;
382 u8 __iomem *mmio_base = host_set->mmio_base;
383
384 do {
385 u32 sff0 = readl(mmio_base + QS_HST_SFF);
386 u32 sff1 = readl(mmio_base + QS_HST_SFF + 4);
387 u8 sEVLD = (sff1 >> 30) & 0x01; /* valid flag */
388 sFFE = sff1 >> 31; /* empty flag */
389
390 if (sEVLD) {
391 u8 sDST = sff0 >> 16; /* dev status */
392 u8 sHST = sff1 & 0x3f; /* host status */
393 unsigned int port_no = (sff1 >> 8) & 0x03;
394 struct ata_port *ap = host_set->ports[port_no];
395
396 DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n",
397 sff1, sff0, port_no, sHST, sDST);
398 handled = 1;
399 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
400 struct ata_queued_cmd *qc;
401 struct qs_port_priv *pp = ap->private_data;
402 if (!pp || pp->state != qs_state_pkt)
403 continue;
404 qc = ata_qc_from_tag(ap, ap->active_tag);
405 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
406 switch (sHST) {
407 case 0: /* successful CPB */
408 case 3: /* device error */
409 pp->state = qs_state_idle;
410 qs_enter_reg_mode(qc->ap);
411 qc->err_mask |= ac_err_mask(sDST);
412 ata_qc_complete(qc);
413 break;
414 default:
415 break;
416 }
417 }
418 }
419 }
420 } while (!sFFE);
421 return handled;
422}
423
424static inline unsigned int qs_intr_mmio(struct ata_host_set *host_set)
425{
426 unsigned int handled = 0, port_no;
427
428 for (port_no = 0; port_no < host_set->n_ports; ++port_no) {
429 struct ata_port *ap;
430 ap = host_set->ports[port_no];
431 if (ap &&
432 !(ap->flags & ATA_FLAG_DISABLED)) {
433 struct ata_queued_cmd *qc;
434 struct qs_port_priv *pp = ap->private_data;
435 if (!pp || pp->state != qs_state_mmio)
436 continue;
437 qc = ata_qc_from_tag(ap, ap->active_tag);
438 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
439
440 /* check main status, clearing INTRQ */
441 u8 status = ata_check_status(ap);
442 if ((status & ATA_BUSY))
443 continue;
444 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
445 ap->id, qc->tf.protocol, status);
446
447 /* complete taskfile transaction */
448 pp->state = qs_state_idle;
449 qc->err_mask |= ac_err_mask(status);
450 ata_qc_complete(qc);
451 handled = 1;
452 }
453 }
454 }
455 return handled;
456}
457
458static irqreturn_t qs_intr(int irq, void *dev_instance, struct pt_regs *regs)
459{
460 struct ata_host_set *host_set = dev_instance;
461 unsigned int handled = 0;
462
463 VPRINTK("ENTER\n");
464
465 spin_lock(&host_set->lock);
466 handled = qs_intr_pkt(host_set) | qs_intr_mmio(host_set);
467 spin_unlock(&host_set->lock);
468
469 VPRINTK("EXIT\n");
470
471 return IRQ_RETVAL(handled);
472}
473
474static void qs_ata_setup_port(struct ata_ioports *port, unsigned long base)
475{
476 port->cmd_addr =
477 port->data_addr = base + 0x400;
478 port->error_addr =
479 port->feature_addr = base + 0x408; /* hob_feature = 0x409 */
480 port->nsect_addr = base + 0x410; /* hob_nsect = 0x411 */
481 port->lbal_addr = base + 0x418; /* hob_lbal = 0x419 */
482 port->lbam_addr = base + 0x420; /* hob_lbam = 0x421 */
483 port->lbah_addr = base + 0x428; /* hob_lbah = 0x429 */
484 port->device_addr = base + 0x430;
485 port->status_addr =
486 port->command_addr = base + 0x438;
487 port->altstatus_addr =
488 port->ctl_addr = base + 0x440;
489 port->scr_addr = base + 0xc00;
490}
491
492static int qs_port_start(struct ata_port *ap)
493{
494 struct device *dev = ap->host_set->dev;
495 struct qs_port_priv *pp;
496 void __iomem *mmio_base = ap->host_set->mmio_base;
497 void __iomem *chan = mmio_base + (ap->port_no * 0x4000);
498 u64 addr;
499 int rc;
500
501 rc = ata_port_start(ap);
502 if (rc)
503 return rc;
504 qs_enter_reg_mode(ap);
505 pp = kzalloc(sizeof(*pp), GFP_KERNEL);
506 if (!pp) {
507 rc = -ENOMEM;
508 goto err_out;
509 }
510 pp->pkt = dma_alloc_coherent(dev, QS_PKT_BYTES, &pp->pkt_dma,
511 GFP_KERNEL);
512 if (!pp->pkt) {
513 rc = -ENOMEM;
514 goto err_out_kfree;
515 }
516 memset(pp->pkt, 0, QS_PKT_BYTES);
517 ap->private_data = pp;
518
519 addr = (u64)pp->pkt_dma;
520 writel((u32) addr, chan + QS_CCF_CPBA);
521 writel((u32)(addr >> 32), chan + QS_CCF_CPBA + 4);
522 return 0;
523
524err_out_kfree:
525 kfree(pp);
526err_out:
527 ata_port_stop(ap);
528 return rc;
529}
530
531static void qs_port_stop(struct ata_port *ap)
532{
533 struct device *dev = ap->host_set->dev;
534 struct qs_port_priv *pp = ap->private_data;
535
536 if (pp != NULL) {
537 ap->private_data = NULL;
538 if (pp->pkt != NULL)
539 dma_free_coherent(dev, QS_PKT_BYTES, pp->pkt,
540 pp->pkt_dma);
541 kfree(pp);
542 }
543 ata_port_stop(ap);
544}
545
546static void qs_host_stop(struct ata_host_set *host_set)
547{
548 void __iomem *mmio_base = host_set->mmio_base;
549 struct pci_dev *pdev = to_pci_dev(host_set->dev);
550
551 writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */
552 writeb(QS_CNFG3_GSRST, mmio_base + QS_HCF_CNFG3); /* global reset */
553
554 pci_iounmap(pdev, mmio_base);
555}
556
557static void qs_host_init(unsigned int chip_id, struct ata_probe_ent *pe)
558{
559 void __iomem *mmio_base = pe->mmio_base;
560 unsigned int port_no;
561
562 writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */
563 writeb(QS_CNFG3_GSRST, mmio_base + QS_HCF_CNFG3); /* global reset */
564
565 /* reset each channel in turn */
566 for (port_no = 0; port_no < pe->n_ports; ++port_no) {
567 u8 __iomem *chan = mmio_base + (port_no * 0x4000);
568 writeb(QS_CTR1_RDEV|QS_CTR1_RCHN, chan + QS_CCT_CTR1);
569 writeb(QS_CTR0_REG, chan + QS_CCT_CTR0);
570 readb(chan + QS_CCT_CTR0); /* flush */
571 }
572 writeb(QS_SERD3_PHY_ENA, mmio_base + QS_HVS_SERD3); /* enable phy */
573
574 for (port_no = 0; port_no < pe->n_ports; ++port_no) {
575 u8 __iomem *chan = mmio_base + (port_no * 0x4000);
576 /* set FIFO depths to same settings as Windows driver */
577 writew(32, chan + QS_CFC_HUFT);
578 writew(32, chan + QS_CFC_HDFT);
579 writew(10, chan + QS_CFC_DUFT);
580 writew( 8, chan + QS_CFC_DDFT);
581 /* set CPB size in bytes, as a power of two */
582 writeb(QS_CPB_ORDER, chan + QS_CCF_CSEP);
583 }
584 writeb(1, mmio_base + QS_HCT_CTRL); /* enable host interrupts */
585}
586
587/*
588 * The QStor understands 64-bit buses, and uses 64-bit fields
589 * for DMA pointers regardless of bus width. We just have to
590 * make sure our DMA masks are set appropriately for whatever
591 * bridge lies between us and the QStor, and then the DMA mapping
592 * code will ensure we only ever "see" appropriate buffer addresses.
593 * If we're 32-bit limited somewhere, then our 64-bit fields will
594 * just end up with zeros in the upper 32-bits, without any special
595 * logic required outside of this routine (below).
596 */
597static int qs_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base)
598{
599 u32 bus_info = readl(mmio_base + QS_HID_HPHY);
600 int rc, have_64bit_bus = (bus_info & QS_HPHY_64BIT);
601
602 if (have_64bit_bus &&
603 !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
604 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
605 if (rc) {
606 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
607 if (rc) {
608 dev_printk(KERN_ERR, &pdev->dev,
609 "64-bit DMA enable failed\n");
610 return rc;
611 }
612 }
613 } else {
614 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
615 if (rc) {
616 dev_printk(KERN_ERR, &pdev->dev,
617 "32-bit DMA enable failed\n");
618 return rc;
619 }
620 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
621 if (rc) {
622 dev_printk(KERN_ERR, &pdev->dev,
623 "32-bit consistent DMA enable failed\n");
624 return rc;
625 }
626 }
627 return 0;
628}
629
630static int qs_ata_init_one(struct pci_dev *pdev,
631 const struct pci_device_id *ent)
632{
633 static int printed_version;
634 struct ata_probe_ent *probe_ent = NULL;
635 void __iomem *mmio_base;
636 unsigned int board_idx = (unsigned int) ent->driver_data;
637 int rc, port_no;
638
639 if (!printed_version++)
640 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
641
642 rc = pci_enable_device(pdev);
643 if (rc)
644 return rc;
645
646 rc = pci_request_regions(pdev, DRV_NAME);
647 if (rc)
648 goto err_out;
649
650 if ((pci_resource_flags(pdev, 4) & IORESOURCE_MEM) == 0) {
651 rc = -ENODEV;
652 goto err_out_regions;
653 }
654
655 mmio_base = pci_iomap(pdev, 4, 0);
656 if (mmio_base == NULL) {
657 rc = -ENOMEM;
658 goto err_out_regions;
659 }
660
661 rc = qs_set_dma_masks(pdev, mmio_base);
662 if (rc)
663 goto err_out_iounmap;
664
665 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
666 if (probe_ent == NULL) {
667 rc = -ENOMEM;
668 goto err_out_iounmap;
669 }
670
671 memset(probe_ent, 0, sizeof(*probe_ent));
672 probe_ent->dev = pci_dev_to_dev(pdev);
673 INIT_LIST_HEAD(&probe_ent->node);
674
675 probe_ent->sht = qs_port_info[board_idx].sht;
676 probe_ent->host_flags = qs_port_info[board_idx].host_flags;
677 probe_ent->pio_mask = qs_port_info[board_idx].pio_mask;
678 probe_ent->mwdma_mask = qs_port_info[board_idx].mwdma_mask;
679 probe_ent->udma_mask = qs_port_info[board_idx].udma_mask;
680 probe_ent->port_ops = qs_port_info[board_idx].port_ops;
681
682 probe_ent->irq = pdev->irq;
683 probe_ent->irq_flags = IRQF_SHARED;
684 probe_ent->mmio_base = mmio_base;
685 probe_ent->n_ports = QS_PORTS;
686
687 for (port_no = 0; port_no < probe_ent->n_ports; ++port_no) {
688 unsigned long chan = (unsigned long)mmio_base +
689 (port_no * 0x4000);
690 qs_ata_setup_port(&probe_ent->port[port_no], chan);
691 }
692
693 pci_set_master(pdev);
694
695 /* initialize adapter */
696 qs_host_init(board_idx, probe_ent);
697
698 rc = ata_device_add(probe_ent);
699 kfree(probe_ent);
700 if (rc != QS_PORTS)
701 goto err_out_iounmap;
702 return 0;
703
704err_out_iounmap:
705 pci_iounmap(pdev, mmio_base);
706err_out_regions:
707 pci_release_regions(pdev);
708err_out:
709 pci_disable_device(pdev);
710 return rc;
711}
712
713static int __init qs_ata_init(void)
714{
715 return pci_module_init(&qs_ata_pci_driver);
716}
717
718static void __exit qs_ata_exit(void)
719{
720 pci_unregister_driver(&qs_ata_pci_driver);
721}
722
723MODULE_AUTHOR("Mark Lord");
724MODULE_DESCRIPTION("Pacific Digital Corporation QStor SATA low-level driver");
725MODULE_LICENSE("GPL");
726MODULE_DEVICE_TABLE(pci, qs_ata_pci_tbl);
727MODULE_VERSION(DRV_VERSION);
728
729module_init(qs_ata_init);
730module_exit(qs_ata_exit);
diff --git a/drivers/scsi/sata_sil.c b/drivers/scsi/sata_sil.c
deleted file mode 100644
index d0a85073ebf7..000000000000
--- a/drivers/scsi/sata_sil.c
+++ /dev/null
@@ -1,727 +0,0 @@
1/*
2 * sata_sil.c - Silicon Image SATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2005 Red Hat, Inc.
9 * Copyright 2003 Benjamin Herrenschmidt
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Documentation for SiI 3112:
31 * http://gkernel.sourceforge.net/specs/sii/3112A_SiI-DS-0095-B2.pdf.bz2
32 *
33 * Other errata and documentation available under NDA.
34 *
35 */
36
37#include <linux/kernel.h>
38#include <linux/module.h>
39#include <linux/pci.h>
40#include <linux/init.h>
41#include <linux/blkdev.h>
42#include <linux/delay.h>
43#include <linux/interrupt.h>
44#include <linux/device.h>
45#include <scsi/scsi_host.h>
46#include <linux/libata.h>
47
48#define DRV_NAME "sata_sil"
49#define DRV_VERSION "2.0"
50
51enum {
52 /*
53 * host flags
54 */
55 SIL_FLAG_NO_SATA_IRQ = (1 << 28),
56 SIL_FLAG_RERR_ON_DMA_ACT = (1 << 29),
57 SIL_FLAG_MOD15WRITE = (1 << 30),
58
59 SIL_DFL_HOST_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
60 ATA_FLAG_MMIO | ATA_FLAG_HRST_TO_RESUME,
61
62 /*
63 * Controller IDs
64 */
65 sil_3112 = 0,
66 sil_3112_no_sata_irq = 1,
67 sil_3512 = 2,
68 sil_3114 = 3,
69
70 /*
71 * Register offsets
72 */
73 SIL_SYSCFG = 0x48,
74
75 /*
76 * Register bits
77 */
78 /* SYSCFG */
79 SIL_MASK_IDE0_INT = (1 << 22),
80 SIL_MASK_IDE1_INT = (1 << 23),
81 SIL_MASK_IDE2_INT = (1 << 24),
82 SIL_MASK_IDE3_INT = (1 << 25),
83 SIL_MASK_2PORT = SIL_MASK_IDE0_INT | SIL_MASK_IDE1_INT,
84 SIL_MASK_4PORT = SIL_MASK_2PORT |
85 SIL_MASK_IDE2_INT | SIL_MASK_IDE3_INT,
86
87 /* BMDMA/BMDMA2 */
88 SIL_INTR_STEERING = (1 << 1),
89
90 SIL_DMA_ENABLE = (1 << 0), /* DMA run switch */
91 SIL_DMA_RDWR = (1 << 3), /* DMA Rd-Wr */
92 SIL_DMA_SATA_IRQ = (1 << 4), /* OR of all SATA IRQs */
93 SIL_DMA_ACTIVE = (1 << 16), /* DMA running */
94 SIL_DMA_ERROR = (1 << 17), /* PCI bus error */
95 SIL_DMA_COMPLETE = (1 << 18), /* cmd complete / IRQ pending */
96 SIL_DMA_N_SATA_IRQ = (1 << 6), /* SATA_IRQ for the next channel */
97 SIL_DMA_N_ACTIVE = (1 << 24), /* ACTIVE for the next channel */
98 SIL_DMA_N_ERROR = (1 << 25), /* ERROR for the next channel */
99 SIL_DMA_N_COMPLETE = (1 << 26), /* COMPLETE for the next channel */
100
101 /* SIEN */
102 SIL_SIEN_N = (1 << 16), /* triggered by SError.N */
103
104 /*
105 * Others
106 */
107 SIL_QUIRK_MOD15WRITE = (1 << 0),
108 SIL_QUIRK_UDMA5MAX = (1 << 1),
109};
110
111static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
112static int sil_pci_device_resume(struct pci_dev *pdev);
113static void sil_dev_config(struct ata_port *ap, struct ata_device *dev);
114static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg);
115static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
116static void sil_post_set_mode (struct ata_port *ap);
117static irqreturn_t sil_interrupt(int irq, void *dev_instance,
118 struct pt_regs *regs);
119static void sil_freeze(struct ata_port *ap);
120static void sil_thaw(struct ata_port *ap);
121
122
123static const struct pci_device_id sil_pci_tbl[] = {
124 { 0x1095, 0x3112, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
125 { 0x1095, 0x0240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
126 { 0x1095, 0x3512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3512 },
127 { 0x1095, 0x3114, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3114 },
128 { 0x1002, 0x436e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
129 { 0x1002, 0x4379, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_no_sata_irq },
130 { 0x1002, 0x437a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_no_sata_irq },
131 { } /* terminate list */
132};
133
134
135/* TODO firmware versions should be added - eric */
136static const struct sil_drivelist {
137 const char * product;
138 unsigned int quirk;
139} sil_blacklist [] = {
140 { "ST320012AS", SIL_QUIRK_MOD15WRITE },
141 { "ST330013AS", SIL_QUIRK_MOD15WRITE },
142 { "ST340017AS", SIL_QUIRK_MOD15WRITE },
143 { "ST360015AS", SIL_QUIRK_MOD15WRITE },
144 { "ST380013AS", SIL_QUIRK_MOD15WRITE },
145 { "ST380023AS", SIL_QUIRK_MOD15WRITE },
146 { "ST3120023AS", SIL_QUIRK_MOD15WRITE },
147 { "ST3160023AS", SIL_QUIRK_MOD15WRITE },
148 { "ST3120026AS", SIL_QUIRK_MOD15WRITE },
149 { "ST3200822AS", SIL_QUIRK_MOD15WRITE },
150 { "ST340014ASL", SIL_QUIRK_MOD15WRITE },
151 { "ST360014ASL", SIL_QUIRK_MOD15WRITE },
152 { "ST380011ASL", SIL_QUIRK_MOD15WRITE },
153 { "ST3120022ASL", SIL_QUIRK_MOD15WRITE },
154 { "ST3160021ASL", SIL_QUIRK_MOD15WRITE },
155 { "Maxtor 4D060H3", SIL_QUIRK_UDMA5MAX },
156 { }
157};
158
159static struct pci_driver sil_pci_driver = {
160 .name = DRV_NAME,
161 .id_table = sil_pci_tbl,
162 .probe = sil_init_one,
163 .remove = ata_pci_remove_one,
164 .suspend = ata_pci_device_suspend,
165 .resume = sil_pci_device_resume,
166};
167
168static struct scsi_host_template sil_sht = {
169 .module = THIS_MODULE,
170 .name = DRV_NAME,
171 .ioctl = ata_scsi_ioctl,
172 .queuecommand = ata_scsi_queuecmd,
173 .can_queue = ATA_DEF_QUEUE,
174 .this_id = ATA_SHT_THIS_ID,
175 .sg_tablesize = LIBATA_MAX_PRD,
176 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
177 .emulated = ATA_SHT_EMULATED,
178 .use_clustering = ATA_SHT_USE_CLUSTERING,
179 .proc_name = DRV_NAME,
180 .dma_boundary = ATA_DMA_BOUNDARY,
181 .slave_configure = ata_scsi_slave_config,
182 .slave_destroy = ata_scsi_slave_destroy,
183 .bios_param = ata_std_bios_param,
184 .suspend = ata_scsi_device_suspend,
185 .resume = ata_scsi_device_resume,
186};
187
188static const struct ata_port_operations sil_ops = {
189 .port_disable = ata_port_disable,
190 .dev_config = sil_dev_config,
191 .tf_load = ata_tf_load,
192 .tf_read = ata_tf_read,
193 .check_status = ata_check_status,
194 .exec_command = ata_exec_command,
195 .dev_select = ata_std_dev_select,
196 .post_set_mode = sil_post_set_mode,
197 .bmdma_setup = ata_bmdma_setup,
198 .bmdma_start = ata_bmdma_start,
199 .bmdma_stop = ata_bmdma_stop,
200 .bmdma_status = ata_bmdma_status,
201 .qc_prep = ata_qc_prep,
202 .qc_issue = ata_qc_issue_prot,
203 .data_xfer = ata_mmio_data_xfer,
204 .freeze = sil_freeze,
205 .thaw = sil_thaw,
206 .error_handler = ata_bmdma_error_handler,
207 .post_internal_cmd = ata_bmdma_post_internal_cmd,
208 .irq_handler = sil_interrupt,
209 .irq_clear = ata_bmdma_irq_clear,
210 .scr_read = sil_scr_read,
211 .scr_write = sil_scr_write,
212 .port_start = ata_port_start,
213 .port_stop = ata_port_stop,
214 .host_stop = ata_pci_host_stop,
215};
216
217static const struct ata_port_info sil_port_info[] = {
218 /* sil_3112 */
219 {
220 .sht = &sil_sht,
221 .host_flags = SIL_DFL_HOST_FLAGS | SIL_FLAG_MOD15WRITE,
222 .pio_mask = 0x1f, /* pio0-4 */
223 .mwdma_mask = 0x07, /* mwdma0-2 */
224 .udma_mask = 0x3f, /* udma0-5 */
225 .port_ops = &sil_ops,
226 },
227 /* sil_3112_no_sata_irq */
228 {
229 .sht = &sil_sht,
230 .host_flags = SIL_DFL_HOST_FLAGS | SIL_FLAG_MOD15WRITE |
231 SIL_FLAG_NO_SATA_IRQ,
232 .pio_mask = 0x1f, /* pio0-4 */
233 .mwdma_mask = 0x07, /* mwdma0-2 */
234 .udma_mask = 0x3f, /* udma0-5 */
235 .port_ops = &sil_ops,
236 },
237 /* sil_3512 */
238 {
239 .sht = &sil_sht,
240 .host_flags = SIL_DFL_HOST_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT,
241 .pio_mask = 0x1f, /* pio0-4 */
242 .mwdma_mask = 0x07, /* mwdma0-2 */
243 .udma_mask = 0x3f, /* udma0-5 */
244 .port_ops = &sil_ops,
245 },
246 /* sil_3114 */
247 {
248 .sht = &sil_sht,
249 .host_flags = SIL_DFL_HOST_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT,
250 .pio_mask = 0x1f, /* pio0-4 */
251 .mwdma_mask = 0x07, /* mwdma0-2 */
252 .udma_mask = 0x3f, /* udma0-5 */
253 .port_ops = &sil_ops,
254 },
255};
256
257/* per-port register offsets */
258/* TODO: we can probably calculate rather than use a table */
259static const struct {
260 unsigned long tf; /* ATA taskfile register block */
261 unsigned long ctl; /* ATA control/altstatus register block */
262 unsigned long bmdma; /* DMA register block */
263 unsigned long bmdma2; /* DMA register block #2 */
264 unsigned long fifo_cfg; /* FIFO Valid Byte Count and Control */
265 unsigned long scr; /* SATA control register block */
266 unsigned long sien; /* SATA Interrupt Enable register */
267 unsigned long xfer_mode;/* data transfer mode register */
268 unsigned long sfis_cfg; /* SATA FIS reception config register */
269} sil_port[] = {
270 /* port 0 ... */
271 { 0x80, 0x8A, 0x00, 0x10, 0x40, 0x100, 0x148, 0xb4, 0x14c },
272 { 0xC0, 0xCA, 0x08, 0x18, 0x44, 0x180, 0x1c8, 0xf4, 0x1cc },
273 { 0x280, 0x28A, 0x200, 0x210, 0x240, 0x300, 0x348, 0x2b4, 0x34c },
274 { 0x2C0, 0x2CA, 0x208, 0x218, 0x244, 0x380, 0x3c8, 0x2f4, 0x3cc },
275 /* ... port 3 */
276};
277
278MODULE_AUTHOR("Jeff Garzik");
279MODULE_DESCRIPTION("low-level driver for Silicon Image SATA controller");
280MODULE_LICENSE("GPL");
281MODULE_DEVICE_TABLE(pci, sil_pci_tbl);
282MODULE_VERSION(DRV_VERSION);
283
284static int slow_down = 0;
285module_param(slow_down, int, 0444);
286MODULE_PARM_DESC(slow_down, "Sledgehammer used to work around random problems, by limiting commands to 15 sectors (0=off, 1=on)");
287
288
289static unsigned char sil_get_device_cache_line(struct pci_dev *pdev)
290{
291 u8 cache_line = 0;
292 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_line);
293 return cache_line;
294}
295
296static void sil_post_set_mode (struct ata_port *ap)
297{
298 struct ata_host_set *host_set = ap->host_set;
299 struct ata_device *dev;
300 void __iomem *addr =
301 host_set->mmio_base + sil_port[ap->port_no].xfer_mode;
302 u32 tmp, dev_mode[2];
303 unsigned int i;
304
305 for (i = 0; i < 2; i++) {
306 dev = &ap->device[i];
307 if (!ata_dev_enabled(dev))
308 dev_mode[i] = 0; /* PIO0/1/2 */
309 else if (dev->flags & ATA_DFLAG_PIO)
310 dev_mode[i] = 1; /* PIO3/4 */
311 else
312 dev_mode[i] = 3; /* UDMA */
313 /* value 2 indicates MDMA */
314 }
315
316 tmp = readl(addr);
317 tmp &= ~((1<<5) | (1<<4) | (1<<1) | (1<<0));
318 tmp |= dev_mode[0];
319 tmp |= (dev_mode[1] << 4);
320 writel(tmp, addr);
321 readl(addr); /* flush */
322}
323
324static inline unsigned long sil_scr_addr(struct ata_port *ap, unsigned int sc_reg)
325{
326 unsigned long offset = ap->ioaddr.scr_addr;
327
328 switch (sc_reg) {
329 case SCR_STATUS:
330 return offset + 4;
331 case SCR_ERROR:
332 return offset + 8;
333 case SCR_CONTROL:
334 return offset;
335 default:
336 /* do nothing */
337 break;
338 }
339
340 return 0;
341}
342
343static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg)
344{
345 void __iomem *mmio = (void __iomem *) sil_scr_addr(ap, sc_reg);
346 if (mmio)
347 return readl(mmio);
348 return 0xffffffffU;
349}
350
351static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
352{
353 void *mmio = (void __iomem *) sil_scr_addr(ap, sc_reg);
354 if (mmio)
355 writel(val, mmio);
356}
357
358static void sil_host_intr(struct ata_port *ap, u32 bmdma2)
359{
360 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
361 u8 status;
362
363 if (unlikely(bmdma2 & SIL_DMA_SATA_IRQ)) {
364 u32 serror;
365
366 /* SIEN doesn't mask SATA IRQs on some 3112s. Those
367 * controllers continue to assert IRQ as long as
368 * SError bits are pending. Clear SError immediately.
369 */
370 serror = sil_scr_read(ap, SCR_ERROR);
371 sil_scr_write(ap, SCR_ERROR, serror);
372
373 /* Trigger hotplug and accumulate SError only if the
374 * port isn't already frozen. Otherwise, PHY events
375 * during hardreset makes controllers with broken SIEN
376 * repeat probing needlessly.
377 */
378 if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
379 ata_ehi_hotplugged(&ap->eh_info);
380 ap->eh_info.serror |= serror;
381 }
382
383 goto freeze;
384 }
385
386 if (unlikely(!qc || qc->tf.ctl & ATA_NIEN))
387 goto freeze;
388
389 /* Check whether we are expecting interrupt in this state */
390 switch (ap->hsm_task_state) {
391 case HSM_ST_FIRST:
392 /* Some pre-ATAPI-4 devices assert INTRQ
393 * at this state when ready to receive CDB.
394 */
395
396 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
397 * The flag was turned on only for atapi devices.
398 * No need to check is_atapi_taskfile(&qc->tf) again.
399 */
400 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
401 goto err_hsm;
402 break;
403 case HSM_ST_LAST:
404 if (qc->tf.protocol == ATA_PROT_DMA ||
405 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
406 /* clear DMA-Start bit */
407 ap->ops->bmdma_stop(qc);
408
409 if (bmdma2 & SIL_DMA_ERROR) {
410 qc->err_mask |= AC_ERR_HOST_BUS;
411 ap->hsm_task_state = HSM_ST_ERR;
412 }
413 }
414 break;
415 case HSM_ST:
416 break;
417 default:
418 goto err_hsm;
419 }
420
421 /* check main status, clearing INTRQ */
422 status = ata_chk_status(ap);
423 if (unlikely(status & ATA_BUSY))
424 goto err_hsm;
425
426 /* ack bmdma irq events */
427 ata_bmdma_irq_clear(ap);
428
429 /* kick HSM in the ass */
430 ata_hsm_move(ap, qc, status, 0);
431
432 return;
433
434 err_hsm:
435 qc->err_mask |= AC_ERR_HSM;
436 freeze:
437 ata_port_freeze(ap);
438}
439
440static irqreturn_t sil_interrupt(int irq, void *dev_instance,
441 struct pt_regs *regs)
442{
443 struct ata_host_set *host_set = dev_instance;
444 void __iomem *mmio_base = host_set->mmio_base;
445 int handled = 0;
446 int i;
447
448 spin_lock(&host_set->lock);
449
450 for (i = 0; i < host_set->n_ports; i++) {
451 struct ata_port *ap = host_set->ports[i];
452 u32 bmdma2 = readl(mmio_base + sil_port[ap->port_no].bmdma2);
453
454 if (unlikely(!ap || ap->flags & ATA_FLAG_DISABLED))
455 continue;
456
457 /* turn off SATA_IRQ if not supported */
458 if (ap->flags & SIL_FLAG_NO_SATA_IRQ)
459 bmdma2 &= ~SIL_DMA_SATA_IRQ;
460
461 if (bmdma2 == 0xffffffff ||
462 !(bmdma2 & (SIL_DMA_COMPLETE | SIL_DMA_SATA_IRQ)))
463 continue;
464
465 sil_host_intr(ap, bmdma2);
466 handled = 1;
467 }
468
469 spin_unlock(&host_set->lock);
470
471 return IRQ_RETVAL(handled);
472}
473
474static void sil_freeze(struct ata_port *ap)
475{
476 void __iomem *mmio_base = ap->host_set->mmio_base;
477 u32 tmp;
478
479 /* global IRQ mask doesn't block SATA IRQ, turn off explicitly */
480 writel(0, mmio_base + sil_port[ap->port_no].sien);
481
482 /* plug IRQ */
483 tmp = readl(mmio_base + SIL_SYSCFG);
484 tmp |= SIL_MASK_IDE0_INT << ap->port_no;
485 writel(tmp, mmio_base + SIL_SYSCFG);
486 readl(mmio_base + SIL_SYSCFG); /* flush */
487}
488
489static void sil_thaw(struct ata_port *ap)
490{
491 void __iomem *mmio_base = ap->host_set->mmio_base;
492 u32 tmp;
493
494 /* clear IRQ */
495 ata_chk_status(ap);
496 ata_bmdma_irq_clear(ap);
497
498 /* turn on SATA IRQ if supported */
499 if (!(ap->flags & SIL_FLAG_NO_SATA_IRQ))
500 writel(SIL_SIEN_N, mmio_base + sil_port[ap->port_no].sien);
501
502 /* turn on IRQ */
503 tmp = readl(mmio_base + SIL_SYSCFG);
504 tmp &= ~(SIL_MASK_IDE0_INT << ap->port_no);
505 writel(tmp, mmio_base + SIL_SYSCFG);
506}
507
508/**
509 * sil_dev_config - Apply device/host-specific errata fixups
510 * @ap: Port containing device to be examined
511 * @dev: Device to be examined
512 *
513 * After the IDENTIFY [PACKET] DEVICE step is complete, and a
514 * device is known to be present, this function is called.
515 * We apply two errata fixups which are specific to Silicon Image,
516 * a Seagate and a Maxtor fixup.
517 *
518 * For certain Seagate devices, we must limit the maximum sectors
519 * to under 8K.
520 *
521 * For certain Maxtor devices, we must not program the drive
522 * beyond udma5.
523 *
524 * Both fixups are unfairly pessimistic. As soon as I get more
525 * information on these errata, I will create a more exhaustive
526 * list, and apply the fixups to only the specific
527 * devices/hosts/firmwares that need it.
528 *
529 * 20040111 - Seagate drives affected by the Mod15Write bug are blacklisted
530 * The Maxtor quirk is in the blacklist, but I'm keeping the original
531 * pessimistic fix for the following reasons...
532 * - There seems to be less info on it, only one device gleaned off the
533 * Windows driver, maybe only one is affected. More info would be greatly
534 * appreciated.
535 * - But then again UDMA5 is hardly anything to complain about
536 */
537static void sil_dev_config(struct ata_port *ap, struct ata_device *dev)
538{
539 unsigned int n, quirks = 0;
540 unsigned char model_num[41];
541
542 ata_id_c_string(dev->id, model_num, ATA_ID_PROD_OFS, sizeof(model_num));
543
544 for (n = 0; sil_blacklist[n].product; n++)
545 if (!strcmp(sil_blacklist[n].product, model_num)) {
546 quirks = sil_blacklist[n].quirk;
547 break;
548 }
549
550 /* limit requests to 15 sectors */
551 if (slow_down ||
552 ((ap->flags & SIL_FLAG_MOD15WRITE) &&
553 (quirks & SIL_QUIRK_MOD15WRITE))) {
554 ata_dev_printk(dev, KERN_INFO, "applying Seagate errata fix "
555 "(mod15write workaround)\n");
556 dev->max_sectors = 15;
557 return;
558 }
559
560 /* limit to udma5 */
561 if (quirks & SIL_QUIRK_UDMA5MAX) {
562 ata_dev_printk(dev, KERN_INFO,
563 "applying Maxtor errata fix %s\n", model_num);
564 dev->udma_mask &= ATA_UDMA5;
565 return;
566 }
567}
568
569static void sil_init_controller(struct pci_dev *pdev,
570 int n_ports, unsigned long host_flags,
571 void __iomem *mmio_base)
572{
573 u8 cls;
574 u32 tmp;
575 int i;
576
577 /* Initialize FIFO PCI bus arbitration */
578 cls = sil_get_device_cache_line(pdev);
579 if (cls) {
580 cls >>= 3;
581 cls++; /* cls = (line_size/8)+1 */
582 for (i = 0; i < n_ports; i++)
583 writew(cls << 8 | cls,
584 mmio_base + sil_port[i].fifo_cfg);
585 } else
586 dev_printk(KERN_WARNING, &pdev->dev,
587 "cache line size not set. Driver may not function\n");
588
589 /* Apply R_ERR on DMA activate FIS errata workaround */
590 if (host_flags & SIL_FLAG_RERR_ON_DMA_ACT) {
591 int cnt;
592
593 for (i = 0, cnt = 0; i < n_ports; i++) {
594 tmp = readl(mmio_base + sil_port[i].sfis_cfg);
595 if ((tmp & 0x3) != 0x01)
596 continue;
597 if (!cnt)
598 dev_printk(KERN_INFO, &pdev->dev,
599 "Applying R_ERR on DMA activate "
600 "FIS errata fix\n");
601 writel(tmp & ~0x3, mmio_base + sil_port[i].sfis_cfg);
602 cnt++;
603 }
604 }
605
606 if (n_ports == 4) {
607 /* flip the magic "make 4 ports work" bit */
608 tmp = readl(mmio_base + sil_port[2].bmdma);
609 if ((tmp & SIL_INTR_STEERING) == 0)
610 writel(tmp | SIL_INTR_STEERING,
611 mmio_base + sil_port[2].bmdma);
612 }
613}
614
615static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
616{
617 static int printed_version;
618 struct ata_probe_ent *probe_ent = NULL;
619 unsigned long base;
620 void __iomem *mmio_base;
621 int rc;
622 unsigned int i;
623 int pci_dev_busy = 0;
624
625 if (!printed_version++)
626 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
627
628 rc = pci_enable_device(pdev);
629 if (rc)
630 return rc;
631
632 rc = pci_request_regions(pdev, DRV_NAME);
633 if (rc) {
634 pci_dev_busy = 1;
635 goto err_out;
636 }
637
638 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
639 if (rc)
640 goto err_out_regions;
641 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
642 if (rc)
643 goto err_out_regions;
644
645 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
646 if (probe_ent == NULL) {
647 rc = -ENOMEM;
648 goto err_out_regions;
649 }
650
651 INIT_LIST_HEAD(&probe_ent->node);
652 probe_ent->dev = pci_dev_to_dev(pdev);
653 probe_ent->port_ops = sil_port_info[ent->driver_data].port_ops;
654 probe_ent->sht = sil_port_info[ent->driver_data].sht;
655 probe_ent->n_ports = (ent->driver_data == sil_3114) ? 4 : 2;
656 probe_ent->pio_mask = sil_port_info[ent->driver_data].pio_mask;
657 probe_ent->mwdma_mask = sil_port_info[ent->driver_data].mwdma_mask;
658 probe_ent->udma_mask = sil_port_info[ent->driver_data].udma_mask;
659 probe_ent->irq = pdev->irq;
660 probe_ent->irq_flags = IRQF_SHARED;
661 probe_ent->host_flags = sil_port_info[ent->driver_data].host_flags;
662
663 mmio_base = pci_iomap(pdev, 5, 0);
664 if (mmio_base == NULL) {
665 rc = -ENOMEM;
666 goto err_out_free_ent;
667 }
668
669 probe_ent->mmio_base = mmio_base;
670
671 base = (unsigned long) mmio_base;
672
673 for (i = 0; i < probe_ent->n_ports; i++) {
674 probe_ent->port[i].cmd_addr = base + sil_port[i].tf;
675 probe_ent->port[i].altstatus_addr =
676 probe_ent->port[i].ctl_addr = base + sil_port[i].ctl;
677 probe_ent->port[i].bmdma_addr = base + sil_port[i].bmdma;
678 probe_ent->port[i].scr_addr = base + sil_port[i].scr;
679 ata_std_ports(&probe_ent->port[i]);
680 }
681
682 sil_init_controller(pdev, probe_ent->n_ports, probe_ent->host_flags,
683 mmio_base);
684
685 pci_set_master(pdev);
686
687 /* FIXME: check ata_device_add return value */
688 ata_device_add(probe_ent);
689 kfree(probe_ent);
690
691 return 0;
692
693err_out_free_ent:
694 kfree(probe_ent);
695err_out_regions:
696 pci_release_regions(pdev);
697err_out:
698 if (!pci_dev_busy)
699 pci_disable_device(pdev);
700 return rc;
701}
702
703static int sil_pci_device_resume(struct pci_dev *pdev)
704{
705 struct ata_host_set *host_set = dev_get_drvdata(&pdev->dev);
706
707 ata_pci_device_do_resume(pdev);
708 sil_init_controller(pdev, host_set->n_ports, host_set->ports[0]->flags,
709 host_set->mmio_base);
710 ata_host_set_resume(host_set);
711
712 return 0;
713}
714
715static int __init sil_init(void)
716{
717 return pci_module_init(&sil_pci_driver);
718}
719
720static void __exit sil_exit(void)
721{
722 pci_unregister_driver(&sil_pci_driver);
723}
724
725
726module_init(sil_init);
727module_exit(sil_exit);
diff --git a/drivers/scsi/sata_sil24.c b/drivers/scsi/sata_sil24.c
deleted file mode 100644
index 3f368c7d3ef9..000000000000
--- a/drivers/scsi/sata_sil24.c
+++ /dev/null
@@ -1,1222 +0,0 @@
1/*
2 * sata_sil24.c - Driver for Silicon Image 3124/3132 SATA-2 controllers
3 *
4 * Copyright 2005 Tejun Heo
5 *
6 * Based on preview driver from Silicon Image.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2, or (at your option) any
11 * later version.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 */
19
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/pci.h>
23#include <linux/blkdev.h>
24#include <linux/delay.h>
25#include <linux/interrupt.h>
26#include <linux/dma-mapping.h>
27#include <linux/device.h>
28#include <scsi/scsi_host.h>
29#include <scsi/scsi_cmnd.h>
30#include <linux/libata.h>
31#include <asm/io.h>
32
33#define DRV_NAME "sata_sil24"
34#define DRV_VERSION "0.3"
35
36/*
37 * Port request block (PRB) 32 bytes
38 */
39struct sil24_prb {
40 __le16 ctrl;
41 __le16 prot;
42 __le32 rx_cnt;
43 u8 fis[6 * 4];
44};
45
46/*
47 * Scatter gather entry (SGE) 16 bytes
48 */
49struct sil24_sge {
50 __le64 addr;
51 __le32 cnt;
52 __le32 flags;
53};
54
55/*
56 * Port multiplier
57 */
58struct sil24_port_multiplier {
59 __le32 diag;
60 __le32 sactive;
61};
62
63enum {
64 /*
65 * Global controller registers (128 bytes @ BAR0)
66 */
67 /* 32 bit regs */
68 HOST_SLOT_STAT = 0x00, /* 32 bit slot stat * 4 */
69 HOST_CTRL = 0x40,
70 HOST_IRQ_STAT = 0x44,
71 HOST_PHY_CFG = 0x48,
72 HOST_BIST_CTRL = 0x50,
73 HOST_BIST_PTRN = 0x54,
74 HOST_BIST_STAT = 0x58,
75 HOST_MEM_BIST_STAT = 0x5c,
76 HOST_FLASH_CMD = 0x70,
77 /* 8 bit regs */
78 HOST_FLASH_DATA = 0x74,
79 HOST_TRANSITION_DETECT = 0x75,
80 HOST_GPIO_CTRL = 0x76,
81 HOST_I2C_ADDR = 0x78, /* 32 bit */
82 HOST_I2C_DATA = 0x7c,
83 HOST_I2C_XFER_CNT = 0x7e,
84 HOST_I2C_CTRL = 0x7f,
85
86 /* HOST_SLOT_STAT bits */
87 HOST_SSTAT_ATTN = (1 << 31),
88
89 /* HOST_CTRL bits */
90 HOST_CTRL_M66EN = (1 << 16), /* M66EN PCI bus signal */
91 HOST_CTRL_TRDY = (1 << 17), /* latched PCI TRDY */
92 HOST_CTRL_STOP = (1 << 18), /* latched PCI STOP */
93 HOST_CTRL_DEVSEL = (1 << 19), /* latched PCI DEVSEL */
94 HOST_CTRL_REQ64 = (1 << 20), /* latched PCI REQ64 */
95 HOST_CTRL_GLOBAL_RST = (1 << 31), /* global reset */
96
97 /*
98 * Port registers
99 * (8192 bytes @ +0x0000, +0x2000, +0x4000 and +0x6000 @ BAR2)
100 */
101 PORT_REGS_SIZE = 0x2000,
102
103 PORT_LRAM = 0x0000, /* 31 LRAM slots and PM regs */
104 PORT_LRAM_SLOT_SZ = 0x0080, /* 32 bytes PRB + 2 SGE, ACT... */
105
106 PORT_PM = 0x0f80, /* 8 bytes PM * 16 (128 bytes) */
107 /* 32 bit regs */
108 PORT_CTRL_STAT = 0x1000, /* write: ctrl-set, read: stat */
109 PORT_CTRL_CLR = 0x1004, /* write: ctrl-clear */
110 PORT_IRQ_STAT = 0x1008, /* high: status, low: interrupt */
111 PORT_IRQ_ENABLE_SET = 0x1010, /* write: enable-set */
112 PORT_IRQ_ENABLE_CLR = 0x1014, /* write: enable-clear */
113 PORT_ACTIVATE_UPPER_ADDR= 0x101c,
114 PORT_EXEC_FIFO = 0x1020, /* command execution fifo */
115 PORT_CMD_ERR = 0x1024, /* command error number */
116 PORT_FIS_CFG = 0x1028,
117 PORT_FIFO_THRES = 0x102c,
118 /* 16 bit regs */
119 PORT_DECODE_ERR_CNT = 0x1040,
120 PORT_DECODE_ERR_THRESH = 0x1042,
121 PORT_CRC_ERR_CNT = 0x1044,
122 PORT_CRC_ERR_THRESH = 0x1046,
123 PORT_HSHK_ERR_CNT = 0x1048,
124 PORT_HSHK_ERR_THRESH = 0x104a,
125 /* 32 bit regs */
126 PORT_PHY_CFG = 0x1050,
127 PORT_SLOT_STAT = 0x1800,
128 PORT_CMD_ACTIVATE = 0x1c00, /* 64 bit cmd activate * 31 (248 bytes) */
129 PORT_EXEC_DIAG = 0x1e00, /* 32bit exec diag * 16 (64 bytes, 0-10 used on 3124) */
130 PORT_PSD_DIAG = 0x1e40, /* 32bit psd diag * 16 (64 bytes, 0-8 used on 3124) */
131 PORT_SCONTROL = 0x1f00,
132 PORT_SSTATUS = 0x1f04,
133 PORT_SERROR = 0x1f08,
134 PORT_SACTIVE = 0x1f0c,
135
136 /* PORT_CTRL_STAT bits */
137 PORT_CS_PORT_RST = (1 << 0), /* port reset */
138 PORT_CS_DEV_RST = (1 << 1), /* device reset */
139 PORT_CS_INIT = (1 << 2), /* port initialize */
140 PORT_CS_IRQ_WOC = (1 << 3), /* interrupt write one to clear */
141 PORT_CS_CDB16 = (1 << 5), /* 0=12b cdb, 1=16b cdb */
142 PORT_CS_RESUME = (1 << 6), /* port resume */
143 PORT_CS_32BIT_ACTV = (1 << 10), /* 32-bit activation */
144 PORT_CS_PM_EN = (1 << 13), /* port multiplier enable */
145 PORT_CS_RDY = (1 << 31), /* port ready to accept commands */
146
147 /* PORT_IRQ_STAT/ENABLE_SET/CLR */
148 /* bits[11:0] are masked */
149 PORT_IRQ_COMPLETE = (1 << 0), /* command(s) completed */
150 PORT_IRQ_ERROR = (1 << 1), /* command execution error */
151 PORT_IRQ_PORTRDY_CHG = (1 << 2), /* port ready change */
152 PORT_IRQ_PWR_CHG = (1 << 3), /* power management change */
153 PORT_IRQ_PHYRDY_CHG = (1 << 4), /* PHY ready change */
154 PORT_IRQ_COMWAKE = (1 << 5), /* COMWAKE received */
155 PORT_IRQ_UNK_FIS = (1 << 6), /* unknown FIS received */
156 PORT_IRQ_DEV_XCHG = (1 << 7), /* device exchanged */
157 PORT_IRQ_8B10B = (1 << 8), /* 8b/10b decode error threshold */
158 PORT_IRQ_CRC = (1 << 9), /* CRC error threshold */
159 PORT_IRQ_HANDSHAKE = (1 << 10), /* handshake error threshold */
160 PORT_IRQ_SDB_NOTIFY = (1 << 11), /* SDB notify received */
161
162 DEF_PORT_IRQ = PORT_IRQ_COMPLETE | PORT_IRQ_ERROR |
163 PORT_IRQ_PHYRDY_CHG | PORT_IRQ_DEV_XCHG |
164 PORT_IRQ_UNK_FIS,
165
166 /* bits[27:16] are unmasked (raw) */
167 PORT_IRQ_RAW_SHIFT = 16,
168 PORT_IRQ_MASKED_MASK = 0x7ff,
169 PORT_IRQ_RAW_MASK = (0x7ff << PORT_IRQ_RAW_SHIFT),
170
171 /* ENABLE_SET/CLR specific, intr steering - 2 bit field */
172 PORT_IRQ_STEER_SHIFT = 30,
173 PORT_IRQ_STEER_MASK = (3 << PORT_IRQ_STEER_SHIFT),
174
175 /* PORT_CMD_ERR constants */
176 PORT_CERR_DEV = 1, /* Error bit in D2H Register FIS */
177 PORT_CERR_SDB = 2, /* Error bit in SDB FIS */
178 PORT_CERR_DATA = 3, /* Error in data FIS not detected by dev */
179 PORT_CERR_SEND = 4, /* Initial cmd FIS transmission failure */
180 PORT_CERR_INCONSISTENT = 5, /* Protocol mismatch */
181 PORT_CERR_DIRECTION = 6, /* Data direction mismatch */
182 PORT_CERR_UNDERRUN = 7, /* Ran out of SGEs while writing */
183 PORT_CERR_OVERRUN = 8, /* Ran out of SGEs while reading */
184 PORT_CERR_PKT_PROT = 11, /* DIR invalid in 1st PIO setup of ATAPI */
185 PORT_CERR_SGT_BOUNDARY = 16, /* PLD ecode 00 - SGT not on qword boundary */
186 PORT_CERR_SGT_TGTABRT = 17, /* PLD ecode 01 - target abort */
187 PORT_CERR_SGT_MSTABRT = 18, /* PLD ecode 10 - master abort */
188 PORT_CERR_SGT_PCIPERR = 19, /* PLD ecode 11 - PCI parity err while fetching SGT */
189 PORT_CERR_CMD_BOUNDARY = 24, /* ctrl[15:13] 001 - PRB not on qword boundary */
190 PORT_CERR_CMD_TGTABRT = 25, /* ctrl[15:13] 010 - target abort */
191 PORT_CERR_CMD_MSTABRT = 26, /* ctrl[15:13] 100 - master abort */
192 PORT_CERR_CMD_PCIPERR = 27, /* ctrl[15:13] 110 - PCI parity err while fetching PRB */
193 PORT_CERR_XFR_UNDEF = 32, /* PSD ecode 00 - undefined */
194 PORT_CERR_XFR_TGTABRT = 33, /* PSD ecode 01 - target abort */
195 PORT_CERR_XFR_MSTABRT = 34, /* PSD ecode 10 - master abort */
196 PORT_CERR_XFR_PCIPERR = 35, /* PSD ecode 11 - PCI prity err during transfer */
197 PORT_CERR_SENDSERVICE = 36, /* FIS received while sending service */
198
199 /* bits of PRB control field */
200 PRB_CTRL_PROTOCOL = (1 << 0), /* override def. ATA protocol */
201 PRB_CTRL_PACKET_READ = (1 << 4), /* PACKET cmd read */
202 PRB_CTRL_PACKET_WRITE = (1 << 5), /* PACKET cmd write */
203 PRB_CTRL_NIEN = (1 << 6), /* Mask completion irq */
204 PRB_CTRL_SRST = (1 << 7), /* Soft reset request (ign BSY?) */
205
206 /* PRB protocol field */
207 PRB_PROT_PACKET = (1 << 0),
208 PRB_PROT_TCQ = (1 << 1),
209 PRB_PROT_NCQ = (1 << 2),
210 PRB_PROT_READ = (1 << 3),
211 PRB_PROT_WRITE = (1 << 4),
212 PRB_PROT_TRANSPARENT = (1 << 5),
213
214 /*
215 * Other constants
216 */
217 SGE_TRM = (1 << 31), /* Last SGE in chain */
218 SGE_LNK = (1 << 30), /* linked list
219 Points to SGT, not SGE */
220 SGE_DRD = (1 << 29), /* discard data read (/dev/null)
221 data address ignored */
222
223 SIL24_MAX_CMDS = 31,
224
225 /* board id */
226 BID_SIL3124 = 0,
227 BID_SIL3132 = 1,
228 BID_SIL3131 = 2,
229
230 /* host flags */
231 SIL24_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
232 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
233 ATA_FLAG_NCQ | ATA_FLAG_SKIP_D2H_BSY,
234 SIL24_FLAG_PCIX_IRQ_WOC = (1 << 24), /* IRQ loss errata on PCI-X */
235
236 IRQ_STAT_4PORTS = 0xf,
237};
238
239struct sil24_ata_block {
240 struct sil24_prb prb;
241 struct sil24_sge sge[LIBATA_MAX_PRD];
242};
243
244struct sil24_atapi_block {
245 struct sil24_prb prb;
246 u8 cdb[16];
247 struct sil24_sge sge[LIBATA_MAX_PRD - 1];
248};
249
250union sil24_cmd_block {
251 struct sil24_ata_block ata;
252 struct sil24_atapi_block atapi;
253};
254
255static struct sil24_cerr_info {
256 unsigned int err_mask, action;
257 const char *desc;
258} sil24_cerr_db[] = {
259 [0] = { AC_ERR_DEV, ATA_EH_REVALIDATE,
260 "device error" },
261 [PORT_CERR_DEV] = { AC_ERR_DEV, ATA_EH_REVALIDATE,
262 "device error via D2H FIS" },
263 [PORT_CERR_SDB] = { AC_ERR_DEV, ATA_EH_REVALIDATE,
264 "device error via SDB FIS" },
265 [PORT_CERR_DATA] = { AC_ERR_ATA_BUS, ATA_EH_SOFTRESET,
266 "error in data FIS" },
267 [PORT_CERR_SEND] = { AC_ERR_ATA_BUS, ATA_EH_SOFTRESET,
268 "failed to transmit command FIS" },
269 [PORT_CERR_INCONSISTENT] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
270 "protocol mismatch" },
271 [PORT_CERR_DIRECTION] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
272 "data directon mismatch" },
273 [PORT_CERR_UNDERRUN] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
274 "ran out of SGEs while writing" },
275 [PORT_CERR_OVERRUN] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
276 "ran out of SGEs while reading" },
277 [PORT_CERR_PKT_PROT] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
278 "invalid data directon for ATAPI CDB" },
279 [PORT_CERR_SGT_BOUNDARY] = { AC_ERR_SYSTEM, ATA_EH_SOFTRESET,
280 "SGT no on qword boundary" },
281 [PORT_CERR_SGT_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
282 "PCI target abort while fetching SGT" },
283 [PORT_CERR_SGT_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
284 "PCI master abort while fetching SGT" },
285 [PORT_CERR_SGT_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
286 "PCI parity error while fetching SGT" },
287 [PORT_CERR_CMD_BOUNDARY] = { AC_ERR_SYSTEM, ATA_EH_SOFTRESET,
288 "PRB not on qword boundary" },
289 [PORT_CERR_CMD_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
290 "PCI target abort while fetching PRB" },
291 [PORT_CERR_CMD_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
292 "PCI master abort while fetching PRB" },
293 [PORT_CERR_CMD_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
294 "PCI parity error while fetching PRB" },
295 [PORT_CERR_XFR_UNDEF] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
296 "undefined error while transferring data" },
297 [PORT_CERR_XFR_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
298 "PCI target abort while transferring data" },
299 [PORT_CERR_XFR_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
300 "PCI master abort while transferring data" },
301 [PORT_CERR_XFR_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
302 "PCI parity error while transferring data" },
303 [PORT_CERR_SENDSERVICE] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
304 "FIS received while sending service FIS" },
305};
306
307/*
308 * ap->private_data
309 *
310 * The preview driver always returned 0 for status. We emulate it
311 * here from the previous interrupt.
312 */
313struct sil24_port_priv {
314 union sil24_cmd_block *cmd_block; /* 32 cmd blocks */
315 dma_addr_t cmd_block_dma; /* DMA base addr for them */
316 struct ata_taskfile tf; /* Cached taskfile registers */
317};
318
319/* ap->host_set->private_data */
320struct sil24_host_priv {
321 void __iomem *host_base; /* global controller control (128 bytes @BAR0) */
322 void __iomem *port_base; /* port registers (4 * 8192 bytes @BAR2) */
323};
324
325static void sil24_dev_config(struct ata_port *ap, struct ata_device *dev);
326static u8 sil24_check_status(struct ata_port *ap);
327static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg);
328static void sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val);
329static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
330static void sil24_qc_prep(struct ata_queued_cmd *qc);
331static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc);
332static void sil24_irq_clear(struct ata_port *ap);
333static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
334static void sil24_freeze(struct ata_port *ap);
335static void sil24_thaw(struct ata_port *ap);
336static void sil24_error_handler(struct ata_port *ap);
337static void sil24_post_internal_cmd(struct ata_queued_cmd *qc);
338static int sil24_port_start(struct ata_port *ap);
339static void sil24_port_stop(struct ata_port *ap);
340static void sil24_host_stop(struct ata_host_set *host_set);
341static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
342static int sil24_pci_device_resume(struct pci_dev *pdev);
343
344static const struct pci_device_id sil24_pci_tbl[] = {
345 { 0x1095, 0x3124, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3124 },
346 { 0x8086, 0x3124, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3124 },
347 { 0x1095, 0x3132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3132 },
348 { 0x1095, 0x3131, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3131 },
349 { 0x1095, 0x3531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3131 },
350 { } /* terminate list */
351};
352
353static struct pci_driver sil24_pci_driver = {
354 .name = DRV_NAME,
355 .id_table = sil24_pci_tbl,
356 .probe = sil24_init_one,
357 .remove = ata_pci_remove_one, /* safe? */
358 .suspend = ata_pci_device_suspend,
359 .resume = sil24_pci_device_resume,
360};
361
362static struct scsi_host_template sil24_sht = {
363 .module = THIS_MODULE,
364 .name = DRV_NAME,
365 .ioctl = ata_scsi_ioctl,
366 .queuecommand = ata_scsi_queuecmd,
367 .change_queue_depth = ata_scsi_change_queue_depth,
368 .can_queue = SIL24_MAX_CMDS,
369 .this_id = ATA_SHT_THIS_ID,
370 .sg_tablesize = LIBATA_MAX_PRD,
371 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
372 .emulated = ATA_SHT_EMULATED,
373 .use_clustering = ATA_SHT_USE_CLUSTERING,
374 .proc_name = DRV_NAME,
375 .dma_boundary = ATA_DMA_BOUNDARY,
376 .slave_configure = ata_scsi_slave_config,
377 .slave_destroy = ata_scsi_slave_destroy,
378 .bios_param = ata_std_bios_param,
379 .suspend = ata_scsi_device_suspend,
380 .resume = ata_scsi_device_resume,
381};
382
383static const struct ata_port_operations sil24_ops = {
384 .port_disable = ata_port_disable,
385
386 .dev_config = sil24_dev_config,
387
388 .check_status = sil24_check_status,
389 .check_altstatus = sil24_check_status,
390 .dev_select = ata_noop_dev_select,
391
392 .tf_read = sil24_tf_read,
393
394 .qc_prep = sil24_qc_prep,
395 .qc_issue = sil24_qc_issue,
396
397 .irq_handler = sil24_interrupt,
398 .irq_clear = sil24_irq_clear,
399
400 .scr_read = sil24_scr_read,
401 .scr_write = sil24_scr_write,
402
403 .freeze = sil24_freeze,
404 .thaw = sil24_thaw,
405 .error_handler = sil24_error_handler,
406 .post_internal_cmd = sil24_post_internal_cmd,
407
408 .port_start = sil24_port_start,
409 .port_stop = sil24_port_stop,
410 .host_stop = sil24_host_stop,
411};
412
413/*
414 * Use bits 30-31 of host_flags to encode available port numbers.
415 * Current maxium is 4.
416 */
417#define SIL24_NPORTS2FLAG(nports) ((((unsigned)(nports) - 1) & 0x3) << 30)
418#define SIL24_FLAG2NPORTS(flag) ((((flag) >> 30) & 0x3) + 1)
419
420static struct ata_port_info sil24_port_info[] = {
421 /* sil_3124 */
422 {
423 .sht = &sil24_sht,
424 .host_flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(4) |
425 SIL24_FLAG_PCIX_IRQ_WOC,
426 .pio_mask = 0x1f, /* pio0-4 */
427 .mwdma_mask = 0x07, /* mwdma0-2 */
428 .udma_mask = 0x3f, /* udma0-5 */
429 .port_ops = &sil24_ops,
430 },
431 /* sil_3132 */
432 {
433 .sht = &sil24_sht,
434 .host_flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(2),
435 .pio_mask = 0x1f, /* pio0-4 */
436 .mwdma_mask = 0x07, /* mwdma0-2 */
437 .udma_mask = 0x3f, /* udma0-5 */
438 .port_ops = &sil24_ops,
439 },
440 /* sil_3131/sil_3531 */
441 {
442 .sht = &sil24_sht,
443 .host_flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(1),
444 .pio_mask = 0x1f, /* pio0-4 */
445 .mwdma_mask = 0x07, /* mwdma0-2 */
446 .udma_mask = 0x3f, /* udma0-5 */
447 .port_ops = &sil24_ops,
448 },
449};
450
451static int sil24_tag(int tag)
452{
453 if (unlikely(ata_tag_internal(tag)))
454 return 0;
455 return tag;
456}
457
458static void sil24_dev_config(struct ata_port *ap, struct ata_device *dev)
459{
460 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
461
462 if (dev->cdb_len == 16)
463 writel(PORT_CS_CDB16, port + PORT_CTRL_STAT);
464 else
465 writel(PORT_CS_CDB16, port + PORT_CTRL_CLR);
466}
467
468static inline void sil24_update_tf(struct ata_port *ap)
469{
470 struct sil24_port_priv *pp = ap->private_data;
471 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
472 struct sil24_prb __iomem *prb = port;
473 u8 fis[6 * 4];
474
475 memcpy_fromio(fis, prb->fis, 6 * 4);
476 ata_tf_from_fis(fis, &pp->tf);
477}
478
479static u8 sil24_check_status(struct ata_port *ap)
480{
481 struct sil24_port_priv *pp = ap->private_data;
482 return pp->tf.command;
483}
484
485static int sil24_scr_map[] = {
486 [SCR_CONTROL] = 0,
487 [SCR_STATUS] = 1,
488 [SCR_ERROR] = 2,
489 [SCR_ACTIVE] = 3,
490};
491
492static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg)
493{
494 void __iomem *scr_addr = (void __iomem *)ap->ioaddr.scr_addr;
495 if (sc_reg < ARRAY_SIZE(sil24_scr_map)) {
496 void __iomem *addr;
497 addr = scr_addr + sil24_scr_map[sc_reg] * 4;
498 return readl(scr_addr + sil24_scr_map[sc_reg] * 4);
499 }
500 return 0xffffffffU;
501}
502
503static void sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val)
504{
505 void __iomem *scr_addr = (void __iomem *)ap->ioaddr.scr_addr;
506 if (sc_reg < ARRAY_SIZE(sil24_scr_map)) {
507 void __iomem *addr;
508 addr = scr_addr + sil24_scr_map[sc_reg] * 4;
509 writel(val, scr_addr + sil24_scr_map[sc_reg] * 4);
510 }
511}
512
513static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
514{
515 struct sil24_port_priv *pp = ap->private_data;
516 *tf = pp->tf;
517}
518
519static int sil24_init_port(struct ata_port *ap)
520{
521 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
522 u32 tmp;
523
524 writel(PORT_CS_INIT, port + PORT_CTRL_STAT);
525 ata_wait_register(port + PORT_CTRL_STAT,
526 PORT_CS_INIT, PORT_CS_INIT, 10, 100);
527 tmp = ata_wait_register(port + PORT_CTRL_STAT,
528 PORT_CS_RDY, 0, 10, 100);
529
530 if ((tmp & (PORT_CS_INIT | PORT_CS_RDY)) != PORT_CS_RDY)
531 return -EIO;
532 return 0;
533}
534
535static int sil24_softreset(struct ata_port *ap, unsigned int *class)
536{
537 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
538 struct sil24_port_priv *pp = ap->private_data;
539 struct sil24_prb *prb = &pp->cmd_block[0].ata.prb;
540 dma_addr_t paddr = pp->cmd_block_dma;
541 u32 mask, irq_stat;
542 const char *reason;
543
544 DPRINTK("ENTER\n");
545
546 if (ata_port_offline(ap)) {
547 DPRINTK("PHY reports no device\n");
548 *class = ATA_DEV_NONE;
549 goto out;
550 }
551
552 /* put the port into known state */
553 if (sil24_init_port(ap)) {
554 reason ="port not ready";
555 goto err;
556 }
557
558 /* do SRST */
559 prb->ctrl = cpu_to_le16(PRB_CTRL_SRST);
560 prb->fis[1] = 0; /* no PM yet */
561
562 writel((u32)paddr, port + PORT_CMD_ACTIVATE);
563 writel((u64)paddr >> 32, port + PORT_CMD_ACTIVATE + 4);
564
565 mask = (PORT_IRQ_COMPLETE | PORT_IRQ_ERROR) << PORT_IRQ_RAW_SHIFT;
566 irq_stat = ata_wait_register(port + PORT_IRQ_STAT, mask, 0x0,
567 100, ATA_TMOUT_BOOT / HZ * 1000);
568
569 writel(irq_stat, port + PORT_IRQ_STAT); /* clear IRQs */
570 irq_stat >>= PORT_IRQ_RAW_SHIFT;
571
572 if (!(irq_stat & PORT_IRQ_COMPLETE)) {
573 if (irq_stat & PORT_IRQ_ERROR)
574 reason = "SRST command error";
575 else
576 reason = "timeout";
577 goto err;
578 }
579
580 sil24_update_tf(ap);
581 *class = ata_dev_classify(&pp->tf);
582
583 if (*class == ATA_DEV_UNKNOWN)
584 *class = ATA_DEV_NONE;
585
586 out:
587 DPRINTK("EXIT, class=%u\n", *class);
588 return 0;
589
590 err:
591 ata_port_printk(ap, KERN_ERR, "softreset failed (%s)\n", reason);
592 return -EIO;
593}
594
595static int sil24_hardreset(struct ata_port *ap, unsigned int *class)
596{
597 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
598 const char *reason;
599 int tout_msec, rc;
600 u32 tmp;
601
602 /* sil24 does the right thing(tm) without any protection */
603 sata_set_spd(ap);
604
605 tout_msec = 100;
606 if (ata_port_online(ap))
607 tout_msec = 5000;
608
609 writel(PORT_CS_DEV_RST, port + PORT_CTRL_STAT);
610 tmp = ata_wait_register(port + PORT_CTRL_STAT,
611 PORT_CS_DEV_RST, PORT_CS_DEV_RST, 10, tout_msec);
612
613 /* SStatus oscillates between zero and valid status after
614 * DEV_RST, debounce it.
615 */
616 rc = sata_phy_debounce(ap, sata_deb_timing_long);
617 if (rc) {
618 reason = "PHY debouncing failed";
619 goto err;
620 }
621
622 if (tmp & PORT_CS_DEV_RST) {
623 if (ata_port_offline(ap))
624 return 0;
625 reason = "link not ready";
626 goto err;
627 }
628
629 /* Sil24 doesn't store signature FIS after hardreset, so we
630 * can't wait for BSY to clear. Some devices take a long time
631 * to get ready and those devices will choke if we don't wait
632 * for BSY clearance here. Tell libata to perform follow-up
633 * softreset.
634 */
635 return -EAGAIN;
636
637 err:
638 ata_port_printk(ap, KERN_ERR, "hardreset failed (%s)\n", reason);
639 return -EIO;
640}
641
642static inline void sil24_fill_sg(struct ata_queued_cmd *qc,
643 struct sil24_sge *sge)
644{
645 struct scatterlist *sg;
646 unsigned int idx = 0;
647
648 ata_for_each_sg(sg, qc) {
649 sge->addr = cpu_to_le64(sg_dma_address(sg));
650 sge->cnt = cpu_to_le32(sg_dma_len(sg));
651 if (ata_sg_is_last(sg, qc))
652 sge->flags = cpu_to_le32(SGE_TRM);
653 else
654 sge->flags = 0;
655
656 sge++;
657 idx++;
658 }
659}
660
661static void sil24_qc_prep(struct ata_queued_cmd *qc)
662{
663 struct ata_port *ap = qc->ap;
664 struct sil24_port_priv *pp = ap->private_data;
665 union sil24_cmd_block *cb;
666 struct sil24_prb *prb;
667 struct sil24_sge *sge;
668 u16 ctrl = 0;
669
670 cb = &pp->cmd_block[sil24_tag(qc->tag)];
671
672 switch (qc->tf.protocol) {
673 case ATA_PROT_PIO:
674 case ATA_PROT_DMA:
675 case ATA_PROT_NCQ:
676 case ATA_PROT_NODATA:
677 prb = &cb->ata.prb;
678 sge = cb->ata.sge;
679 break;
680
681 case ATA_PROT_ATAPI:
682 case ATA_PROT_ATAPI_DMA:
683 case ATA_PROT_ATAPI_NODATA:
684 prb = &cb->atapi.prb;
685 sge = cb->atapi.sge;
686 memset(cb->atapi.cdb, 0, 32);
687 memcpy(cb->atapi.cdb, qc->cdb, qc->dev->cdb_len);
688
689 if (qc->tf.protocol != ATA_PROT_ATAPI_NODATA) {
690 if (qc->tf.flags & ATA_TFLAG_WRITE)
691 ctrl = PRB_CTRL_PACKET_WRITE;
692 else
693 ctrl = PRB_CTRL_PACKET_READ;
694 }
695 break;
696
697 default:
698 prb = NULL; /* shut up, gcc */
699 sge = NULL;
700 BUG();
701 }
702
703 prb->ctrl = cpu_to_le16(ctrl);
704 ata_tf_to_fis(&qc->tf, prb->fis, 0);
705
706 if (qc->flags & ATA_QCFLAG_DMAMAP)
707 sil24_fill_sg(qc, sge);
708}
709
710static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc)
711{
712 struct ata_port *ap = qc->ap;
713 struct sil24_port_priv *pp = ap->private_data;
714 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
715 unsigned int tag = sil24_tag(qc->tag);
716 dma_addr_t paddr;
717 void __iomem *activate;
718
719 paddr = pp->cmd_block_dma + tag * sizeof(*pp->cmd_block);
720 activate = port + PORT_CMD_ACTIVATE + tag * 8;
721
722 writel((u32)paddr, activate);
723 writel((u64)paddr >> 32, activate + 4);
724
725 return 0;
726}
727
728static void sil24_irq_clear(struct ata_port *ap)
729{
730 /* unused */
731}
732
733static void sil24_freeze(struct ata_port *ap)
734{
735 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
736
737 /* Port-wide IRQ mask in HOST_CTRL doesn't really work, clear
738 * PORT_IRQ_ENABLE instead.
739 */
740 writel(0xffff, port + PORT_IRQ_ENABLE_CLR);
741}
742
743static void sil24_thaw(struct ata_port *ap)
744{
745 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
746 u32 tmp;
747
748 /* clear IRQ */
749 tmp = readl(port + PORT_IRQ_STAT);
750 writel(tmp, port + PORT_IRQ_STAT);
751
752 /* turn IRQ back on */
753 writel(DEF_PORT_IRQ, port + PORT_IRQ_ENABLE_SET);
754}
755
756static void sil24_error_intr(struct ata_port *ap)
757{
758 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
759 struct ata_eh_info *ehi = &ap->eh_info;
760 int freeze = 0;
761 u32 irq_stat;
762
763 /* on error, we need to clear IRQ explicitly */
764 irq_stat = readl(port + PORT_IRQ_STAT);
765 writel(irq_stat, port + PORT_IRQ_STAT);
766
767 /* first, analyze and record host port events */
768 ata_ehi_clear_desc(ehi);
769
770 ata_ehi_push_desc(ehi, "irq_stat 0x%08x", irq_stat);
771
772 if (irq_stat & (PORT_IRQ_PHYRDY_CHG | PORT_IRQ_DEV_XCHG)) {
773 ata_ehi_hotplugged(ehi);
774 ata_ehi_push_desc(ehi, ", %s",
775 irq_stat & PORT_IRQ_PHYRDY_CHG ?
776 "PHY RDY changed" : "device exchanged");
777 freeze = 1;
778 }
779
780 if (irq_stat & PORT_IRQ_UNK_FIS) {
781 ehi->err_mask |= AC_ERR_HSM;
782 ehi->action |= ATA_EH_SOFTRESET;
783 ata_ehi_push_desc(ehi , ", unknown FIS");
784 freeze = 1;
785 }
786
787 /* deal with command error */
788 if (irq_stat & PORT_IRQ_ERROR) {
789 struct sil24_cerr_info *ci = NULL;
790 unsigned int err_mask = 0, action = 0;
791 struct ata_queued_cmd *qc;
792 u32 cerr;
793
794 /* analyze CMD_ERR */
795 cerr = readl(port + PORT_CMD_ERR);
796 if (cerr < ARRAY_SIZE(sil24_cerr_db))
797 ci = &sil24_cerr_db[cerr];
798
799 if (ci && ci->desc) {
800 err_mask |= ci->err_mask;
801 action |= ci->action;
802 ata_ehi_push_desc(ehi, ", %s", ci->desc);
803 } else {
804 err_mask |= AC_ERR_OTHER;
805 action |= ATA_EH_SOFTRESET;
806 ata_ehi_push_desc(ehi, ", unknown command error %d",
807 cerr);
808 }
809
810 /* record error info */
811 qc = ata_qc_from_tag(ap, ap->active_tag);
812 if (qc) {
813 sil24_update_tf(ap);
814 qc->err_mask |= err_mask;
815 } else
816 ehi->err_mask |= err_mask;
817
818 ehi->action |= action;
819 }
820
821 /* freeze or abort */
822 if (freeze)
823 ata_port_freeze(ap);
824 else
825 ata_port_abort(ap);
826}
827
828static void sil24_finish_qc(struct ata_queued_cmd *qc)
829{
830 if (qc->flags & ATA_QCFLAG_RESULT_TF)
831 sil24_update_tf(qc->ap);
832}
833
834static inline void sil24_host_intr(struct ata_port *ap)
835{
836 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
837 u32 slot_stat, qc_active;
838 int rc;
839
840 slot_stat = readl(port + PORT_SLOT_STAT);
841
842 if (unlikely(slot_stat & HOST_SSTAT_ATTN)) {
843 sil24_error_intr(ap);
844 return;
845 }
846
847 if (ap->flags & SIL24_FLAG_PCIX_IRQ_WOC)
848 writel(PORT_IRQ_COMPLETE, port + PORT_IRQ_STAT);
849
850 qc_active = slot_stat & ~HOST_SSTAT_ATTN;
851 rc = ata_qc_complete_multiple(ap, qc_active, sil24_finish_qc);
852 if (rc > 0)
853 return;
854 if (rc < 0) {
855 struct ata_eh_info *ehi = &ap->eh_info;
856 ehi->err_mask |= AC_ERR_HSM;
857 ehi->action |= ATA_EH_SOFTRESET;
858 ata_port_freeze(ap);
859 return;
860 }
861
862 if (ata_ratelimit())
863 ata_port_printk(ap, KERN_INFO, "spurious interrupt "
864 "(slot_stat 0x%x active_tag %d sactive 0x%x)\n",
865 slot_stat, ap->active_tag, ap->sactive);
866}
867
868static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
869{
870 struct ata_host_set *host_set = dev_instance;
871 struct sil24_host_priv *hpriv = host_set->private_data;
872 unsigned handled = 0;
873 u32 status;
874 int i;
875
876 status = readl(hpriv->host_base + HOST_IRQ_STAT);
877
878 if (status == 0xffffffff) {
879 printk(KERN_ERR DRV_NAME ": IRQ status == 0xffffffff, "
880 "PCI fault or device removal?\n");
881 goto out;
882 }
883
884 if (!(status & IRQ_STAT_4PORTS))
885 goto out;
886
887 spin_lock(&host_set->lock);
888
889 for (i = 0; i < host_set->n_ports; i++)
890 if (status & (1 << i)) {
891 struct ata_port *ap = host_set->ports[i];
892 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
893 sil24_host_intr(host_set->ports[i]);
894 handled++;
895 } else
896 printk(KERN_ERR DRV_NAME
897 ": interrupt from disabled port %d\n", i);
898 }
899
900 spin_unlock(&host_set->lock);
901 out:
902 return IRQ_RETVAL(handled);
903}
904
905static void sil24_error_handler(struct ata_port *ap)
906{
907 struct ata_eh_context *ehc = &ap->eh_context;
908
909 if (sil24_init_port(ap)) {
910 ata_eh_freeze_port(ap);
911 ehc->i.action |= ATA_EH_HARDRESET;
912 }
913
914 /* perform recovery */
915 ata_do_eh(ap, ata_std_prereset, sil24_softreset, sil24_hardreset,
916 ata_std_postreset);
917}
918
919static void sil24_post_internal_cmd(struct ata_queued_cmd *qc)
920{
921 struct ata_port *ap = qc->ap;
922
923 if (qc->flags & ATA_QCFLAG_FAILED)
924 qc->err_mask |= AC_ERR_OTHER;
925
926 /* make DMA engine forget about the failed command */
927 if (qc->err_mask)
928 sil24_init_port(ap);
929}
930
931static inline void sil24_cblk_free(struct sil24_port_priv *pp, struct device *dev)
932{
933 const size_t cb_size = sizeof(*pp->cmd_block) * SIL24_MAX_CMDS;
934
935 dma_free_coherent(dev, cb_size, pp->cmd_block, pp->cmd_block_dma);
936}
937
938static int sil24_port_start(struct ata_port *ap)
939{
940 struct device *dev = ap->host_set->dev;
941 struct sil24_port_priv *pp;
942 union sil24_cmd_block *cb;
943 size_t cb_size = sizeof(*cb) * SIL24_MAX_CMDS;
944 dma_addr_t cb_dma;
945 int rc = -ENOMEM;
946
947 pp = kzalloc(sizeof(*pp), GFP_KERNEL);
948 if (!pp)
949 goto err_out;
950
951 pp->tf.command = ATA_DRDY;
952
953 cb = dma_alloc_coherent(dev, cb_size, &cb_dma, GFP_KERNEL);
954 if (!cb)
955 goto err_out_pp;
956 memset(cb, 0, cb_size);
957
958 rc = ata_pad_alloc(ap, dev);
959 if (rc)
960 goto err_out_pad;
961
962 pp->cmd_block = cb;
963 pp->cmd_block_dma = cb_dma;
964
965 ap->private_data = pp;
966
967 return 0;
968
969err_out_pad:
970 sil24_cblk_free(pp, dev);
971err_out_pp:
972 kfree(pp);
973err_out:
974 return rc;
975}
976
977static void sil24_port_stop(struct ata_port *ap)
978{
979 struct device *dev = ap->host_set->dev;
980 struct sil24_port_priv *pp = ap->private_data;
981
982 sil24_cblk_free(pp, dev);
983 ata_pad_free(ap, dev);
984 kfree(pp);
985}
986
987static void sil24_host_stop(struct ata_host_set *host_set)
988{
989 struct sil24_host_priv *hpriv = host_set->private_data;
990 struct pci_dev *pdev = to_pci_dev(host_set->dev);
991
992 pci_iounmap(pdev, hpriv->host_base);
993 pci_iounmap(pdev, hpriv->port_base);
994 kfree(hpriv);
995}
996
997static void sil24_init_controller(struct pci_dev *pdev, int n_ports,
998 unsigned long host_flags,
999 void __iomem *host_base,
1000 void __iomem *port_base)
1001{
1002 u32 tmp;
1003 int i;
1004
1005 /* GPIO off */
1006 writel(0, host_base + HOST_FLASH_CMD);
1007
1008 /* clear global reset & mask interrupts during initialization */
1009 writel(0, host_base + HOST_CTRL);
1010
1011 /* init ports */
1012 for (i = 0; i < n_ports; i++) {
1013 void __iomem *port = port_base + i * PORT_REGS_SIZE;
1014
1015 /* Initial PHY setting */
1016 writel(0x20c, port + PORT_PHY_CFG);
1017
1018 /* Clear port RST */
1019 tmp = readl(port + PORT_CTRL_STAT);
1020 if (tmp & PORT_CS_PORT_RST) {
1021 writel(PORT_CS_PORT_RST, port + PORT_CTRL_CLR);
1022 tmp = ata_wait_register(port + PORT_CTRL_STAT,
1023 PORT_CS_PORT_RST,
1024 PORT_CS_PORT_RST, 10, 100);
1025 if (tmp & PORT_CS_PORT_RST)
1026 dev_printk(KERN_ERR, &pdev->dev,
1027 "failed to clear port RST\n");
1028 }
1029
1030 /* Configure IRQ WoC */
1031 if (host_flags & SIL24_FLAG_PCIX_IRQ_WOC)
1032 writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_STAT);
1033 else
1034 writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_CLR);
1035
1036 /* Zero error counters. */
1037 writel(0x8000, port + PORT_DECODE_ERR_THRESH);
1038 writel(0x8000, port + PORT_CRC_ERR_THRESH);
1039 writel(0x8000, port + PORT_HSHK_ERR_THRESH);
1040 writel(0x0000, port + PORT_DECODE_ERR_CNT);
1041 writel(0x0000, port + PORT_CRC_ERR_CNT);
1042 writel(0x0000, port + PORT_HSHK_ERR_CNT);
1043
1044 /* Always use 64bit activation */
1045 writel(PORT_CS_32BIT_ACTV, port + PORT_CTRL_CLR);
1046
1047 /* Clear port multiplier enable and resume bits */
1048 writel(PORT_CS_PM_EN | PORT_CS_RESUME, port + PORT_CTRL_CLR);
1049 }
1050
1051 /* Turn on interrupts */
1052 writel(IRQ_STAT_4PORTS, host_base + HOST_CTRL);
1053}
1054
1055static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1056{
1057 static int printed_version = 0;
1058 unsigned int board_id = (unsigned int)ent->driver_data;
1059 struct ata_port_info *pinfo = &sil24_port_info[board_id];
1060 struct ata_probe_ent *probe_ent = NULL;
1061 struct sil24_host_priv *hpriv = NULL;
1062 void __iomem *host_base = NULL;
1063 void __iomem *port_base = NULL;
1064 int i, rc;
1065 u32 tmp;
1066
1067 if (!printed_version++)
1068 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1069
1070 rc = pci_enable_device(pdev);
1071 if (rc)
1072 return rc;
1073
1074 rc = pci_request_regions(pdev, DRV_NAME);
1075 if (rc)
1076 goto out_disable;
1077
1078 rc = -ENOMEM;
1079 /* map mmio registers */
1080 host_base = pci_iomap(pdev, 0, 0);
1081 if (!host_base)
1082 goto out_free;
1083 port_base = pci_iomap(pdev, 2, 0);
1084 if (!port_base)
1085 goto out_free;
1086
1087 /* allocate & init probe_ent and hpriv */
1088 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
1089 if (!probe_ent)
1090 goto out_free;
1091
1092 hpriv = kzalloc(sizeof(*hpriv), GFP_KERNEL);
1093 if (!hpriv)
1094 goto out_free;
1095
1096 probe_ent->dev = pci_dev_to_dev(pdev);
1097 INIT_LIST_HEAD(&probe_ent->node);
1098
1099 probe_ent->sht = pinfo->sht;
1100 probe_ent->host_flags = pinfo->host_flags;
1101 probe_ent->pio_mask = pinfo->pio_mask;
1102 probe_ent->mwdma_mask = pinfo->mwdma_mask;
1103 probe_ent->udma_mask = pinfo->udma_mask;
1104 probe_ent->port_ops = pinfo->port_ops;
1105 probe_ent->n_ports = SIL24_FLAG2NPORTS(pinfo->host_flags);
1106
1107 probe_ent->irq = pdev->irq;
1108 probe_ent->irq_flags = IRQF_SHARED;
1109 probe_ent->private_data = hpriv;
1110
1111 hpriv->host_base = host_base;
1112 hpriv->port_base = port_base;
1113
1114 /*
1115 * Configure the device
1116 */
1117 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
1118 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
1119 if (rc) {
1120 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1121 if (rc) {
1122 dev_printk(KERN_ERR, &pdev->dev,
1123 "64-bit DMA enable failed\n");
1124 goto out_free;
1125 }
1126 }
1127 } else {
1128 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1129 if (rc) {
1130 dev_printk(KERN_ERR, &pdev->dev,
1131 "32-bit DMA enable failed\n");
1132 goto out_free;
1133 }
1134 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1135 if (rc) {
1136 dev_printk(KERN_ERR, &pdev->dev,
1137 "32-bit consistent DMA enable failed\n");
1138 goto out_free;
1139 }
1140 }
1141
1142 /* Apply workaround for completion IRQ loss on PCI-X errata */
1143 if (probe_ent->host_flags & SIL24_FLAG_PCIX_IRQ_WOC) {
1144 tmp = readl(host_base + HOST_CTRL);
1145 if (tmp & (HOST_CTRL_TRDY | HOST_CTRL_STOP | HOST_CTRL_DEVSEL))
1146 dev_printk(KERN_INFO, &pdev->dev,
1147 "Applying completion IRQ loss on PCI-X "
1148 "errata fix\n");
1149 else
1150 probe_ent->host_flags &= ~SIL24_FLAG_PCIX_IRQ_WOC;
1151 }
1152
1153 for (i = 0; i < probe_ent->n_ports; i++) {
1154 unsigned long portu =
1155 (unsigned long)port_base + i * PORT_REGS_SIZE;
1156
1157 probe_ent->port[i].cmd_addr = portu;
1158 probe_ent->port[i].scr_addr = portu + PORT_SCONTROL;
1159
1160 ata_std_ports(&probe_ent->port[i]);
1161 }
1162
1163 sil24_init_controller(pdev, probe_ent->n_ports, probe_ent->host_flags,
1164 host_base, port_base);
1165
1166 pci_set_master(pdev);
1167
1168 /* FIXME: check ata_device_add return value */
1169 ata_device_add(probe_ent);
1170
1171 kfree(probe_ent);
1172 return 0;
1173
1174 out_free:
1175 if (host_base)
1176 pci_iounmap(pdev, host_base);
1177 if (port_base)
1178 pci_iounmap(pdev, port_base);
1179 kfree(probe_ent);
1180 kfree(hpriv);
1181 pci_release_regions(pdev);
1182 out_disable:
1183 pci_disable_device(pdev);
1184 return rc;
1185}
1186
1187static int sil24_pci_device_resume(struct pci_dev *pdev)
1188{
1189 struct ata_host_set *host_set = dev_get_drvdata(&pdev->dev);
1190 struct sil24_host_priv *hpriv = host_set->private_data;
1191
1192 ata_pci_device_do_resume(pdev);
1193
1194 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND)
1195 writel(HOST_CTRL_GLOBAL_RST, hpriv->host_base + HOST_CTRL);
1196
1197 sil24_init_controller(pdev, host_set->n_ports,
1198 host_set->ports[0]->flags,
1199 hpriv->host_base, hpriv->port_base);
1200
1201 ata_host_set_resume(host_set);
1202
1203 return 0;
1204}
1205
1206static int __init sil24_init(void)
1207{
1208 return pci_module_init(&sil24_pci_driver);
1209}
1210
1211static void __exit sil24_exit(void)
1212{
1213 pci_unregister_driver(&sil24_pci_driver);
1214}
1215
1216MODULE_AUTHOR("Tejun Heo");
1217MODULE_DESCRIPTION("Silicon Image 3124/3132 SATA low-level driver");
1218MODULE_LICENSE("GPL");
1219MODULE_DEVICE_TABLE(pci, sil24_pci_tbl);
1220
1221module_init(sil24_init);
1222module_exit(sil24_exit);
diff --git a/drivers/scsi/sata_sis.c b/drivers/scsi/sata_sis.c
deleted file mode 100644
index ee6b5df41d30..000000000000
--- a/drivers/scsi/sata_sis.c
+++ /dev/null
@@ -1,347 +0,0 @@
1/*
2 * sata_sis.c - Silicon Integrated Systems SATA
3 *
4 * Maintained by: Uwe Koziolek
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2004 Uwe Koziolek
9 *
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 *
26 * libata documentation is available via 'make {ps|pdf}docs',
27 * as Documentation/DocBook/libata.*
28 *
29 * Hardware documentation available under NDA.
30 *
31 */
32
33#include <linux/kernel.h>
34#include <linux/module.h>
35#include <linux/pci.h>
36#include <linux/init.h>
37#include <linux/blkdev.h>
38#include <linux/delay.h>
39#include <linux/interrupt.h>
40#include <linux/device.h>
41#include <scsi/scsi_host.h>
42#include <linux/libata.h>
43
44#define DRV_NAME "sata_sis"
45#define DRV_VERSION "0.6"
46
47enum {
48 sis_180 = 0,
49 SIS_SCR_PCI_BAR = 5,
50
51 /* PCI configuration registers */
52 SIS_GENCTL = 0x54, /* IDE General Control register */
53 SIS_SCR_BASE = 0xc0, /* sata0 phy SCR registers */
54 SIS180_SATA1_OFS = 0x10, /* offset from sata0->sata1 phy regs */
55 SIS182_SATA1_OFS = 0x20, /* offset from sata0->sata1 phy regs */
56 SIS_PMR = 0x90, /* port mapping register */
57 SIS_PMR_COMBINED = 0x30,
58
59 /* random bits */
60 SIS_FLAG_CFGSCR = (1 << 30), /* host flag: SCRs via PCI cfg */
61
62 GENCTL_IOMAPPED_SCR = (1 << 26), /* if set, SCRs are in IO space */
63};
64
65static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
66static u32 sis_scr_read (struct ata_port *ap, unsigned int sc_reg);
67static void sis_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
68
69static const struct pci_device_id sis_pci_tbl[] = {
70 { PCI_VENDOR_ID_SI, 0x180, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sis_180 },
71 { PCI_VENDOR_ID_SI, 0x181, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sis_180 },
72 { PCI_VENDOR_ID_SI, 0x182, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sis_180 },
73 { } /* terminate list */
74};
75
76
77static struct pci_driver sis_pci_driver = {
78 .name = DRV_NAME,
79 .id_table = sis_pci_tbl,
80 .probe = sis_init_one,
81 .remove = ata_pci_remove_one,
82};
83
84static struct scsi_host_template sis_sht = {
85 .module = THIS_MODULE,
86 .name = DRV_NAME,
87 .ioctl = ata_scsi_ioctl,
88 .queuecommand = ata_scsi_queuecmd,
89 .can_queue = ATA_DEF_QUEUE,
90 .this_id = ATA_SHT_THIS_ID,
91 .sg_tablesize = ATA_MAX_PRD,
92 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
93 .emulated = ATA_SHT_EMULATED,
94 .use_clustering = ATA_SHT_USE_CLUSTERING,
95 .proc_name = DRV_NAME,
96 .dma_boundary = ATA_DMA_BOUNDARY,
97 .slave_configure = ata_scsi_slave_config,
98 .slave_destroy = ata_scsi_slave_destroy,
99 .bios_param = ata_std_bios_param,
100};
101
102static const struct ata_port_operations sis_ops = {
103 .port_disable = ata_port_disable,
104 .tf_load = ata_tf_load,
105 .tf_read = ata_tf_read,
106 .check_status = ata_check_status,
107 .exec_command = ata_exec_command,
108 .dev_select = ata_std_dev_select,
109 .bmdma_setup = ata_bmdma_setup,
110 .bmdma_start = ata_bmdma_start,
111 .bmdma_stop = ata_bmdma_stop,
112 .bmdma_status = ata_bmdma_status,
113 .qc_prep = ata_qc_prep,
114 .qc_issue = ata_qc_issue_prot,
115 .data_xfer = ata_pio_data_xfer,
116 .freeze = ata_bmdma_freeze,
117 .thaw = ata_bmdma_thaw,
118 .error_handler = ata_bmdma_error_handler,
119 .post_internal_cmd = ata_bmdma_post_internal_cmd,
120 .irq_handler = ata_interrupt,
121 .irq_clear = ata_bmdma_irq_clear,
122 .scr_read = sis_scr_read,
123 .scr_write = sis_scr_write,
124 .port_start = ata_port_start,
125 .port_stop = ata_port_stop,
126 .host_stop = ata_host_stop,
127};
128
129static struct ata_port_info sis_port_info = {
130 .sht = &sis_sht,
131 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
132 .pio_mask = 0x1f,
133 .mwdma_mask = 0x7,
134 .udma_mask = 0x7f,
135 .port_ops = &sis_ops,
136};
137
138
139MODULE_AUTHOR("Uwe Koziolek");
140MODULE_DESCRIPTION("low-level driver for Silicon Integratad Systems SATA controller");
141MODULE_LICENSE("GPL");
142MODULE_DEVICE_TABLE(pci, sis_pci_tbl);
143MODULE_VERSION(DRV_VERSION);
144
145static unsigned int get_scr_cfg_addr(unsigned int port_no, unsigned int sc_reg, int device)
146{
147 unsigned int addr = SIS_SCR_BASE + (4 * sc_reg);
148
149 if (port_no) {
150 if (device == 0x182)
151 addr += SIS182_SATA1_OFS;
152 else
153 addr += SIS180_SATA1_OFS;
154 }
155
156 return addr;
157}
158
159static u32 sis_scr_cfg_read (struct ata_port *ap, unsigned int sc_reg)
160{
161 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
162 unsigned int cfg_addr = get_scr_cfg_addr(ap->port_no, sc_reg, pdev->device);
163 u32 val, val2 = 0;
164 u8 pmr;
165
166 if (sc_reg == SCR_ERROR) /* doesn't exist in PCI cfg space */
167 return 0xffffffff;
168
169 pci_read_config_byte(pdev, SIS_PMR, &pmr);
170
171 pci_read_config_dword(pdev, cfg_addr, &val);
172
173 if ((pdev->device == 0x182) || (pmr & SIS_PMR_COMBINED))
174 pci_read_config_dword(pdev, cfg_addr+0x10, &val2);
175
176 return val|val2;
177}
178
179static void sis_scr_cfg_write (struct ata_port *ap, unsigned int scr, u32 val)
180{
181 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
182 unsigned int cfg_addr = get_scr_cfg_addr(ap->port_no, scr, pdev->device);
183 u8 pmr;
184
185 if (scr == SCR_ERROR) /* doesn't exist in PCI cfg space */
186 return;
187
188 pci_read_config_byte(pdev, SIS_PMR, &pmr);
189
190 pci_write_config_dword(pdev, cfg_addr, val);
191
192 if ((pdev->device == 0x182) || (pmr & SIS_PMR_COMBINED))
193 pci_write_config_dword(pdev, cfg_addr+0x10, val);
194}
195
196static u32 sis_scr_read (struct ata_port *ap, unsigned int sc_reg)
197{
198 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
199 u32 val, val2 = 0;
200 u8 pmr;
201
202 if (sc_reg > SCR_CONTROL)
203 return 0xffffffffU;
204
205 if (ap->flags & SIS_FLAG_CFGSCR)
206 return sis_scr_cfg_read(ap, sc_reg);
207
208 pci_read_config_byte(pdev, SIS_PMR, &pmr);
209
210 val = inl(ap->ioaddr.scr_addr + (sc_reg * 4));
211
212 if ((pdev->device == 0x182) || (pmr & SIS_PMR_COMBINED))
213 val2 = inl(ap->ioaddr.scr_addr + (sc_reg * 4) + 0x10);
214
215 return val | val2;
216}
217
218static void sis_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
219{
220 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
221 u8 pmr;
222
223 if (sc_reg > SCR_CONTROL)
224 return;
225
226 pci_read_config_byte(pdev, SIS_PMR, &pmr);
227
228 if (ap->flags & SIS_FLAG_CFGSCR)
229 sis_scr_cfg_write(ap, sc_reg, val);
230 else {
231 outl(val, ap->ioaddr.scr_addr + (sc_reg * 4));
232 if ((pdev->device == 0x182) || (pmr & SIS_PMR_COMBINED))
233 outl(val, ap->ioaddr.scr_addr + (sc_reg * 4)+0x10);
234 }
235}
236
237static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
238{
239 static int printed_version;
240 struct ata_probe_ent *probe_ent = NULL;
241 int rc;
242 u32 genctl;
243 struct ata_port_info *ppi;
244 int pci_dev_busy = 0;
245 u8 pmr;
246 u8 port2_start;
247
248 if (!printed_version++)
249 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
250
251 rc = pci_enable_device(pdev);
252 if (rc)
253 return rc;
254
255 rc = pci_request_regions(pdev, DRV_NAME);
256 if (rc) {
257 pci_dev_busy = 1;
258 goto err_out;
259 }
260
261 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
262 if (rc)
263 goto err_out_regions;
264 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
265 if (rc)
266 goto err_out_regions;
267
268 ppi = &sis_port_info;
269 probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
270 if (!probe_ent) {
271 rc = -ENOMEM;
272 goto err_out_regions;
273 }
274
275 /* check and see if the SCRs are in IO space or PCI cfg space */
276 pci_read_config_dword(pdev, SIS_GENCTL, &genctl);
277 if ((genctl & GENCTL_IOMAPPED_SCR) == 0)
278 probe_ent->host_flags |= SIS_FLAG_CFGSCR;
279
280 /* if hardware thinks SCRs are in IO space, but there are
281 * no IO resources assigned, change to PCI cfg space.
282 */
283 if ((!(probe_ent->host_flags & SIS_FLAG_CFGSCR)) &&
284 ((pci_resource_start(pdev, SIS_SCR_PCI_BAR) == 0) ||
285 (pci_resource_len(pdev, SIS_SCR_PCI_BAR) < 128))) {
286 genctl &= ~GENCTL_IOMAPPED_SCR;
287 pci_write_config_dword(pdev, SIS_GENCTL, genctl);
288 probe_ent->host_flags |= SIS_FLAG_CFGSCR;
289 }
290
291 pci_read_config_byte(pdev, SIS_PMR, &pmr);
292 if (ent->device != 0x182) {
293 if ((pmr & SIS_PMR_COMBINED) == 0) {
294 dev_printk(KERN_INFO, &pdev->dev,
295 "Detected SiS 180/181 chipset in SATA mode\n");
296 port2_start = 64;
297 }
298 else {
299 dev_printk(KERN_INFO, &pdev->dev,
300 "Detected SiS 180/181 chipset in combined mode\n");
301 port2_start=0;
302 }
303 }
304 else {
305 dev_printk(KERN_INFO, &pdev->dev, "Detected SiS 182 chipset\n");
306 port2_start = 0x20;
307 }
308
309 if (!(probe_ent->host_flags & SIS_FLAG_CFGSCR)) {
310 probe_ent->port[0].scr_addr =
311 pci_resource_start(pdev, SIS_SCR_PCI_BAR);
312 probe_ent->port[1].scr_addr =
313 pci_resource_start(pdev, SIS_SCR_PCI_BAR) + port2_start;
314 }
315
316 pci_set_master(pdev);
317 pci_intx(pdev, 1);
318
319 /* FIXME: check ata_device_add return value */
320 ata_device_add(probe_ent);
321 kfree(probe_ent);
322
323 return 0;
324
325err_out_regions:
326 pci_release_regions(pdev);
327
328err_out:
329 if (!pci_dev_busy)
330 pci_disable_device(pdev);
331 return rc;
332
333}
334
335static int __init sis_init(void)
336{
337 return pci_module_init(&sis_pci_driver);
338}
339
340static void __exit sis_exit(void)
341{
342 pci_unregister_driver(&sis_pci_driver);
343}
344
345module_init(sis_init);
346module_exit(sis_exit);
347
diff --git a/drivers/scsi/sata_svw.c b/drivers/scsi/sata_svw.c
deleted file mode 100644
index 6b70c3c76dfd..000000000000
--- a/drivers/scsi/sata_svw.c
+++ /dev/null
@@ -1,508 +0,0 @@
1/*
2 * sata_svw.c - ServerWorks / Apple K2 SATA
3 *
4 * Maintained by: Benjamin Herrenschmidt <benh@kernel.crashing.org> and
5 * Jeff Garzik <jgarzik@pobox.com>
6 * Please ALWAYS copy linux-ide@vger.kernel.org
7 * on emails.
8 *
9 * Copyright 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
10 *
11 * Bits from Jeff Garzik, Copyright RedHat, Inc.
12 *
13 * This driver probably works with non-Apple versions of the
14 * Broadcom chipset...
15 *
16 *
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by
19 * the Free Software Foundation; either version 2, or (at your option)
20 * any later version.
21 *
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
26 *
27 * You should have received a copy of the GNU General Public License
28 * along with this program; see the file COPYING. If not, write to
29 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
30 *
31 *
32 * libata documentation is available via 'make {ps|pdf}docs',
33 * as Documentation/DocBook/libata.*
34 *
35 * Hardware documentation available under NDA.
36 *
37 */
38
39#include <linux/kernel.h>
40#include <linux/module.h>
41#include <linux/pci.h>
42#include <linux/init.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/interrupt.h>
46#include <linux/device.h>
47#include <scsi/scsi_host.h>
48#include <linux/libata.h>
49
50#ifdef CONFIG_PPC_OF
51#include <asm/prom.h>
52#include <asm/pci-bridge.h>
53#endif /* CONFIG_PPC_OF */
54
55#define DRV_NAME "sata_svw"
56#define DRV_VERSION "2.0"
57
58enum {
59 /* Taskfile registers offsets */
60 K2_SATA_TF_CMD_OFFSET = 0x00,
61 K2_SATA_TF_DATA_OFFSET = 0x00,
62 K2_SATA_TF_ERROR_OFFSET = 0x04,
63 K2_SATA_TF_NSECT_OFFSET = 0x08,
64 K2_SATA_TF_LBAL_OFFSET = 0x0c,
65 K2_SATA_TF_LBAM_OFFSET = 0x10,
66 K2_SATA_TF_LBAH_OFFSET = 0x14,
67 K2_SATA_TF_DEVICE_OFFSET = 0x18,
68 K2_SATA_TF_CMDSTAT_OFFSET = 0x1c,
69 K2_SATA_TF_CTL_OFFSET = 0x20,
70
71 /* DMA base */
72 K2_SATA_DMA_CMD_OFFSET = 0x30,
73
74 /* SCRs base */
75 K2_SATA_SCR_STATUS_OFFSET = 0x40,
76 K2_SATA_SCR_ERROR_OFFSET = 0x44,
77 K2_SATA_SCR_CONTROL_OFFSET = 0x48,
78
79 /* Others */
80 K2_SATA_SICR1_OFFSET = 0x80,
81 K2_SATA_SICR2_OFFSET = 0x84,
82 K2_SATA_SIM_OFFSET = 0x88,
83
84 /* Port stride */
85 K2_SATA_PORT_OFFSET = 0x100,
86};
87
88static u8 k2_stat_check_status(struct ata_port *ap);
89
90
91static u32 k2_sata_scr_read (struct ata_port *ap, unsigned int sc_reg)
92{
93 if (sc_reg > SCR_CONTROL)
94 return 0xffffffffU;
95 return readl((void *) ap->ioaddr.scr_addr + (sc_reg * 4));
96}
97
98
99static void k2_sata_scr_write (struct ata_port *ap, unsigned int sc_reg,
100 u32 val)
101{
102 if (sc_reg > SCR_CONTROL)
103 return;
104 writel(val, (void *) ap->ioaddr.scr_addr + (sc_reg * 4));
105}
106
107
108static void k2_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
109{
110 struct ata_ioports *ioaddr = &ap->ioaddr;
111 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
112
113 if (tf->ctl != ap->last_ctl) {
114 writeb(tf->ctl, ioaddr->ctl_addr);
115 ap->last_ctl = tf->ctl;
116 ata_wait_idle(ap);
117 }
118 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
119 writew(tf->feature | (((u16)tf->hob_feature) << 8), ioaddr->feature_addr);
120 writew(tf->nsect | (((u16)tf->hob_nsect) << 8), ioaddr->nsect_addr);
121 writew(tf->lbal | (((u16)tf->hob_lbal) << 8), ioaddr->lbal_addr);
122 writew(tf->lbam | (((u16)tf->hob_lbam) << 8), ioaddr->lbam_addr);
123 writew(tf->lbah | (((u16)tf->hob_lbah) << 8), ioaddr->lbah_addr);
124 } else if (is_addr) {
125 writew(tf->feature, ioaddr->feature_addr);
126 writew(tf->nsect, ioaddr->nsect_addr);
127 writew(tf->lbal, ioaddr->lbal_addr);
128 writew(tf->lbam, ioaddr->lbam_addr);
129 writew(tf->lbah, ioaddr->lbah_addr);
130 }
131
132 if (tf->flags & ATA_TFLAG_DEVICE)
133 writeb(tf->device, ioaddr->device_addr);
134
135 ata_wait_idle(ap);
136}
137
138
139static void k2_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
140{
141 struct ata_ioports *ioaddr = &ap->ioaddr;
142 u16 nsect, lbal, lbam, lbah, feature;
143
144 tf->command = k2_stat_check_status(ap);
145 tf->device = readw(ioaddr->device_addr);
146 feature = readw(ioaddr->error_addr);
147 nsect = readw(ioaddr->nsect_addr);
148 lbal = readw(ioaddr->lbal_addr);
149 lbam = readw(ioaddr->lbam_addr);
150 lbah = readw(ioaddr->lbah_addr);
151
152 tf->feature = feature;
153 tf->nsect = nsect;
154 tf->lbal = lbal;
155 tf->lbam = lbam;
156 tf->lbah = lbah;
157
158 if (tf->flags & ATA_TFLAG_LBA48) {
159 tf->hob_feature = feature >> 8;
160 tf->hob_nsect = nsect >> 8;
161 tf->hob_lbal = lbal >> 8;
162 tf->hob_lbam = lbam >> 8;
163 tf->hob_lbah = lbah >> 8;
164 }
165}
166
167/**
168 * k2_bmdma_setup_mmio - Set up PCI IDE BMDMA transaction (MMIO)
169 * @qc: Info associated with this ATA transaction.
170 *
171 * LOCKING:
172 * spin_lock_irqsave(host_set lock)
173 */
174
175static void k2_bmdma_setup_mmio (struct ata_queued_cmd *qc)
176{
177 struct ata_port *ap = qc->ap;
178 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
179 u8 dmactl;
180 void *mmio = (void *) ap->ioaddr.bmdma_addr;
181 /* load PRD table addr. */
182 mb(); /* make sure PRD table writes are visible to controller */
183 writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS);
184
185 /* specify data direction, triple-check start bit is clear */
186 dmactl = readb(mmio + ATA_DMA_CMD);
187 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
188 if (!rw)
189 dmactl |= ATA_DMA_WR;
190 writeb(dmactl, mmio + ATA_DMA_CMD);
191
192 /* issue r/w command if this is not a ATA DMA command*/
193 if (qc->tf.protocol != ATA_PROT_DMA)
194 ap->ops->exec_command(ap, &qc->tf);
195}
196
197/**
198 * k2_bmdma_start_mmio - Start a PCI IDE BMDMA transaction (MMIO)
199 * @qc: Info associated with this ATA transaction.
200 *
201 * LOCKING:
202 * spin_lock_irqsave(host_set lock)
203 */
204
205static void k2_bmdma_start_mmio (struct ata_queued_cmd *qc)
206{
207 struct ata_port *ap = qc->ap;
208 void *mmio = (void *) ap->ioaddr.bmdma_addr;
209 u8 dmactl;
210
211 /* start host DMA transaction */
212 dmactl = readb(mmio + ATA_DMA_CMD);
213 writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD);
214 /* There is a race condition in certain SATA controllers that can
215 be seen when the r/w command is given to the controller before the
216 host DMA is started. On a Read command, the controller would initiate
217 the command to the drive even before it sees the DMA start. When there
218 are very fast drives connected to the controller, or when the data request
219 hits in the drive cache, there is the possibility that the drive returns a part
220 or all of the requested data to the controller before the DMA start is issued.
221 In this case, the controller would become confused as to what to do with the data.
222 In the worst case when all the data is returned back to the controller, the
223 controller could hang. In other cases it could return partial data returning
224 in data corruption. This problem has been seen in PPC systems and can also appear
225 on an system with very fast disks, where the SATA controller is sitting behind a
226 number of bridges, and hence there is significant latency between the r/w command
227 and the start command. */
228 /* issue r/w command if the access is to ATA*/
229 if (qc->tf.protocol == ATA_PROT_DMA)
230 ap->ops->exec_command(ap, &qc->tf);
231}
232
233
234static u8 k2_stat_check_status(struct ata_port *ap)
235{
236 return readl((void *) ap->ioaddr.status_addr);
237}
238
239#ifdef CONFIG_PPC_OF
240/*
241 * k2_sata_proc_info
242 * inout : decides on the direction of the dataflow and the meaning of the
243 * variables
244 * buffer: If inout==FALSE data is being written to it else read from it
245 * *start: If inout==FALSE start of the valid data in the buffer
246 * offset: If inout==FALSE offset from the beginning of the imaginary file
247 * from which we start writing into the buffer
248 * length: If inout==FALSE max number of bytes to be written into the buffer
249 * else number of bytes in the buffer
250 */
251static int k2_sata_proc_info(struct Scsi_Host *shost, char *page, char **start,
252 off_t offset, int count, int inout)
253{
254 struct ata_port *ap;
255 struct device_node *np;
256 int len, index;
257
258 /* Find the ata_port */
259 ap = ata_shost_to_port(shost);
260 if (ap == NULL)
261 return 0;
262
263 /* Find the OF node for the PCI device proper */
264 np = pci_device_to_OF_node(to_pci_dev(ap->host_set->dev));
265 if (np == NULL)
266 return 0;
267
268 /* Match it to a port node */
269 index = (ap == ap->host_set->ports[0]) ? 0 : 1;
270 for (np = np->child; np != NULL; np = np->sibling) {
271 const u32 *reg = get_property(np, "reg", NULL);
272 if (!reg)
273 continue;
274 if (index == *reg)
275 break;
276 }
277 if (np == NULL)
278 return 0;
279
280 len = sprintf(page, "devspec: %s\n", np->full_name);
281
282 return len;
283}
284#endif /* CONFIG_PPC_OF */
285
286
287static struct scsi_host_template k2_sata_sht = {
288 .module = THIS_MODULE,
289 .name = DRV_NAME,
290 .ioctl = ata_scsi_ioctl,
291 .queuecommand = ata_scsi_queuecmd,
292 .can_queue = ATA_DEF_QUEUE,
293 .this_id = ATA_SHT_THIS_ID,
294 .sg_tablesize = LIBATA_MAX_PRD,
295 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
296 .emulated = ATA_SHT_EMULATED,
297 .use_clustering = ATA_SHT_USE_CLUSTERING,
298 .proc_name = DRV_NAME,
299 .dma_boundary = ATA_DMA_BOUNDARY,
300 .slave_configure = ata_scsi_slave_config,
301 .slave_destroy = ata_scsi_slave_destroy,
302#ifdef CONFIG_PPC_OF
303 .proc_info = k2_sata_proc_info,
304#endif
305 .bios_param = ata_std_bios_param,
306};
307
308
309static const struct ata_port_operations k2_sata_ops = {
310 .port_disable = ata_port_disable,
311 .tf_load = k2_sata_tf_load,
312 .tf_read = k2_sata_tf_read,
313 .check_status = k2_stat_check_status,
314 .exec_command = ata_exec_command,
315 .dev_select = ata_std_dev_select,
316 .bmdma_setup = k2_bmdma_setup_mmio,
317 .bmdma_start = k2_bmdma_start_mmio,
318 .bmdma_stop = ata_bmdma_stop,
319 .bmdma_status = ata_bmdma_status,
320 .qc_prep = ata_qc_prep,
321 .qc_issue = ata_qc_issue_prot,
322 .data_xfer = ata_mmio_data_xfer,
323 .freeze = ata_bmdma_freeze,
324 .thaw = ata_bmdma_thaw,
325 .error_handler = ata_bmdma_error_handler,
326 .post_internal_cmd = ata_bmdma_post_internal_cmd,
327 .irq_handler = ata_interrupt,
328 .irq_clear = ata_bmdma_irq_clear,
329 .scr_read = k2_sata_scr_read,
330 .scr_write = k2_sata_scr_write,
331 .port_start = ata_port_start,
332 .port_stop = ata_port_stop,
333 .host_stop = ata_pci_host_stop,
334};
335
336static void k2_sata_setup_port(struct ata_ioports *port, unsigned long base)
337{
338 port->cmd_addr = base + K2_SATA_TF_CMD_OFFSET;
339 port->data_addr = base + K2_SATA_TF_DATA_OFFSET;
340 port->feature_addr =
341 port->error_addr = base + K2_SATA_TF_ERROR_OFFSET;
342 port->nsect_addr = base + K2_SATA_TF_NSECT_OFFSET;
343 port->lbal_addr = base + K2_SATA_TF_LBAL_OFFSET;
344 port->lbam_addr = base + K2_SATA_TF_LBAM_OFFSET;
345 port->lbah_addr = base + K2_SATA_TF_LBAH_OFFSET;
346 port->device_addr = base + K2_SATA_TF_DEVICE_OFFSET;
347 port->command_addr =
348 port->status_addr = base + K2_SATA_TF_CMDSTAT_OFFSET;
349 port->altstatus_addr =
350 port->ctl_addr = base + K2_SATA_TF_CTL_OFFSET;
351 port->bmdma_addr = base + K2_SATA_DMA_CMD_OFFSET;
352 port->scr_addr = base + K2_SATA_SCR_STATUS_OFFSET;
353}
354
355
356static int k2_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
357{
358 static int printed_version;
359 struct ata_probe_ent *probe_ent = NULL;
360 unsigned long base;
361 void __iomem *mmio_base;
362 int pci_dev_busy = 0;
363 int rc;
364 int i;
365
366 if (!printed_version++)
367 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
368
369 /*
370 * If this driver happens to only be useful on Apple's K2, then
371 * we should check that here as it has a normal Serverworks ID
372 */
373 rc = pci_enable_device(pdev);
374 if (rc)
375 return rc;
376 /*
377 * Check if we have resources mapped at all (second function may
378 * have been disabled by firmware)
379 */
380 if (pci_resource_len(pdev, 5) == 0)
381 return -ENODEV;
382
383 /* Request PCI regions */
384 rc = pci_request_regions(pdev, DRV_NAME);
385 if (rc) {
386 pci_dev_busy = 1;
387 goto err_out;
388 }
389
390 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
391 if (rc)
392 goto err_out_regions;
393 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
394 if (rc)
395 goto err_out_regions;
396
397 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
398 if (probe_ent == NULL) {
399 rc = -ENOMEM;
400 goto err_out_regions;
401 }
402
403 memset(probe_ent, 0, sizeof(*probe_ent));
404 probe_ent->dev = pci_dev_to_dev(pdev);
405 INIT_LIST_HEAD(&probe_ent->node);
406
407 mmio_base = pci_iomap(pdev, 5, 0);
408 if (mmio_base == NULL) {
409 rc = -ENOMEM;
410 goto err_out_free_ent;
411 }
412 base = (unsigned long) mmio_base;
413
414 /* Clear a magic bit in SCR1 according to Darwin, those help
415 * some funky seagate drives (though so far, those were already
416 * set by the firmware on the machines I had access to)
417 */
418 writel(readl(mmio_base + K2_SATA_SICR1_OFFSET) & ~0x00040000,
419 mmio_base + K2_SATA_SICR1_OFFSET);
420
421 /* Clear SATA error & interrupts we don't use */
422 writel(0xffffffff, mmio_base + K2_SATA_SCR_ERROR_OFFSET);
423 writel(0x0, mmio_base + K2_SATA_SIM_OFFSET);
424
425 probe_ent->sht = &k2_sata_sht;
426 probe_ent->host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
427 ATA_FLAG_MMIO;
428 probe_ent->port_ops = &k2_sata_ops;
429 probe_ent->n_ports = 4;
430 probe_ent->irq = pdev->irq;
431 probe_ent->irq_flags = IRQF_SHARED;
432 probe_ent->mmio_base = mmio_base;
433
434 /* We don't care much about the PIO/UDMA masks, but the core won't like us
435 * if we don't fill these
436 */
437 probe_ent->pio_mask = 0x1f;
438 probe_ent->mwdma_mask = 0x7;
439 probe_ent->udma_mask = 0x7f;
440
441 /* different controllers have different number of ports - currently 4 or 8 */
442 /* All ports are on the same function. Multi-function device is no
443 * longer available. This should not be seen in any system. */
444 for (i = 0; i < ent->driver_data; i++)
445 k2_sata_setup_port(&probe_ent->port[i], base + i * K2_SATA_PORT_OFFSET);
446
447 pci_set_master(pdev);
448
449 /* FIXME: check ata_device_add return value */
450 ata_device_add(probe_ent);
451 kfree(probe_ent);
452
453 return 0;
454
455err_out_free_ent:
456 kfree(probe_ent);
457err_out_regions:
458 pci_release_regions(pdev);
459err_out:
460 if (!pci_dev_busy)
461 pci_disable_device(pdev);
462 return rc;
463}
464
465/* 0x240 is device ID for Apple K2 device
466 * 0x241 is device ID for Serverworks Frodo4
467 * 0x242 is device ID for Serverworks Frodo8
468 * 0x24a is device ID for BCM5785 (aka HT1000) HT southbridge integrated SATA
469 * controller
470 * */
471static const struct pci_device_id k2_sata_pci_tbl[] = {
472 { 0x1166, 0x0240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
473 { 0x1166, 0x0241, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
474 { 0x1166, 0x0242, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
475 { 0x1166, 0x024a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
476 { 0x1166, 0x024b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
477 { }
478};
479
480
481static struct pci_driver k2_sata_pci_driver = {
482 .name = DRV_NAME,
483 .id_table = k2_sata_pci_tbl,
484 .probe = k2_sata_init_one,
485 .remove = ata_pci_remove_one,
486};
487
488
489static int __init k2_sata_init(void)
490{
491 return pci_module_init(&k2_sata_pci_driver);
492}
493
494
495static void __exit k2_sata_exit(void)
496{
497 pci_unregister_driver(&k2_sata_pci_driver);
498}
499
500
501MODULE_AUTHOR("Benjamin Herrenschmidt");
502MODULE_DESCRIPTION("low-level driver for K2 SATA controller");
503MODULE_LICENSE("GPL");
504MODULE_DEVICE_TABLE(pci, k2_sata_pci_tbl);
505MODULE_VERSION(DRV_VERSION);
506
507module_init(k2_sata_init);
508module_exit(k2_sata_exit);
diff --git a/drivers/scsi/sata_sx4.c b/drivers/scsi/sata_sx4.c
deleted file mode 100644
index ccc8cad24f7d..000000000000
--- a/drivers/scsi/sata_sx4.c
+++ /dev/null
@@ -1,1502 +0,0 @@
1/*
2 * sata_sx4.c - Promise SATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc.
9 *
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 *
26 * libata documentation is available via 'make {ps|pdf}docs',
27 * as Documentation/DocBook/libata.*
28 *
29 * Hardware documentation available under NDA.
30 *
31 */
32
33#include <linux/kernel.h>
34#include <linux/module.h>
35#include <linux/pci.h>
36#include <linux/init.h>
37#include <linux/blkdev.h>
38#include <linux/delay.h>
39#include <linux/interrupt.h>
40#include <linux/sched.h>
41#include <linux/device.h>
42#include <scsi/scsi_host.h>
43#include <scsi/scsi_cmnd.h>
44#include <linux/libata.h>
45#include <asm/io.h>
46#include "sata_promise.h"
47
48#define DRV_NAME "sata_sx4"
49#define DRV_VERSION "0.9"
50
51
52enum {
53 PDC_PRD_TBL = 0x44, /* Direct command DMA table addr */
54
55 PDC_PKT_SUBMIT = 0x40, /* Command packet pointer addr */
56 PDC_HDMA_PKT_SUBMIT = 0x100, /* Host DMA packet pointer addr */
57 PDC_INT_SEQMASK = 0x40, /* Mask of asserted SEQ INTs */
58 PDC_HDMA_CTLSTAT = 0x12C, /* Host DMA control / status */
59
60 PDC_20621_SEQCTL = 0x400,
61 PDC_20621_SEQMASK = 0x480,
62 PDC_20621_GENERAL_CTL = 0x484,
63 PDC_20621_PAGE_SIZE = (32 * 1024),
64
65 /* chosen, not constant, values; we design our own DIMM mem map */
66 PDC_20621_DIMM_WINDOW = 0x0C, /* page# for 32K DIMM window */
67 PDC_20621_DIMM_BASE = 0x00200000,
68 PDC_20621_DIMM_DATA = (64 * 1024),
69 PDC_DIMM_DATA_STEP = (256 * 1024),
70 PDC_DIMM_WINDOW_STEP = (8 * 1024),
71 PDC_DIMM_HOST_PRD = (6 * 1024),
72 PDC_DIMM_HOST_PKT = (128 * 0),
73 PDC_DIMM_HPKT_PRD = (128 * 1),
74 PDC_DIMM_ATA_PKT = (128 * 2),
75 PDC_DIMM_APKT_PRD = (128 * 3),
76 PDC_DIMM_HEADER_SZ = PDC_DIMM_APKT_PRD + 128,
77 PDC_PAGE_WINDOW = 0x40,
78 PDC_PAGE_DATA = PDC_PAGE_WINDOW +
79 (PDC_20621_DIMM_DATA / PDC_20621_PAGE_SIZE),
80 PDC_PAGE_SET = PDC_DIMM_DATA_STEP / PDC_20621_PAGE_SIZE,
81
82 PDC_CHIP0_OFS = 0xC0000, /* offset of chip #0 */
83
84 PDC_20621_ERR_MASK = (1<<19) | (1<<20) | (1<<21) | (1<<22) |
85 (1<<23),
86
87 board_20621 = 0, /* FastTrak S150 SX4 */
88
89 PDC_RESET = (1 << 11), /* HDMA reset */
90
91 PDC_MAX_HDMA = 32,
92 PDC_HDMA_Q_MASK = (PDC_MAX_HDMA - 1),
93
94 PDC_DIMM0_SPD_DEV_ADDRESS = 0x50,
95 PDC_DIMM1_SPD_DEV_ADDRESS = 0x51,
96 PDC_MAX_DIMM_MODULE = 0x02,
97 PDC_I2C_CONTROL_OFFSET = 0x48,
98 PDC_I2C_ADDR_DATA_OFFSET = 0x4C,
99 PDC_DIMM0_CONTROL_OFFSET = 0x80,
100 PDC_DIMM1_CONTROL_OFFSET = 0x84,
101 PDC_SDRAM_CONTROL_OFFSET = 0x88,
102 PDC_I2C_WRITE = 0x00000000,
103 PDC_I2C_READ = 0x00000040,
104 PDC_I2C_START = 0x00000080,
105 PDC_I2C_MASK_INT = 0x00000020,
106 PDC_I2C_COMPLETE = 0x00010000,
107 PDC_I2C_NO_ACK = 0x00100000,
108 PDC_DIMM_SPD_SUBADDRESS_START = 0x00,
109 PDC_DIMM_SPD_SUBADDRESS_END = 0x7F,
110 PDC_DIMM_SPD_ROW_NUM = 3,
111 PDC_DIMM_SPD_COLUMN_NUM = 4,
112 PDC_DIMM_SPD_MODULE_ROW = 5,
113 PDC_DIMM_SPD_TYPE = 11,
114 PDC_DIMM_SPD_FRESH_RATE = 12,
115 PDC_DIMM_SPD_BANK_NUM = 17,
116 PDC_DIMM_SPD_CAS_LATENCY = 18,
117 PDC_DIMM_SPD_ATTRIBUTE = 21,
118 PDC_DIMM_SPD_ROW_PRE_CHARGE = 27,
119 PDC_DIMM_SPD_ROW_ACTIVE_DELAY = 28,
120 PDC_DIMM_SPD_RAS_CAS_DELAY = 29,
121 PDC_DIMM_SPD_ACTIVE_PRECHARGE = 30,
122 PDC_DIMM_SPD_SYSTEM_FREQ = 126,
123 PDC_CTL_STATUS = 0x08,
124 PDC_DIMM_WINDOW_CTLR = 0x0C,
125 PDC_TIME_CONTROL = 0x3C,
126 PDC_TIME_PERIOD = 0x40,
127 PDC_TIME_COUNTER = 0x44,
128 PDC_GENERAL_CTLR = 0x484,
129 PCI_PLL_INIT = 0x8A531824,
130 PCI_X_TCOUNT = 0xEE1E5CFF
131};
132
133
134struct pdc_port_priv {
135 u8 dimm_buf[(ATA_PRD_SZ * ATA_MAX_PRD) + 512];
136 u8 *pkt;
137 dma_addr_t pkt_dma;
138};
139
140struct pdc_host_priv {
141 void __iomem *dimm_mmio;
142
143 unsigned int doing_hdma;
144 unsigned int hdma_prod;
145 unsigned int hdma_cons;
146 struct {
147 struct ata_queued_cmd *qc;
148 unsigned int seq;
149 unsigned long pkt_ofs;
150 } hdma[32];
151};
152
153
154static int pdc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
155static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
156static void pdc_eng_timeout(struct ata_port *ap);
157static void pdc_20621_phy_reset (struct ata_port *ap);
158static int pdc_port_start(struct ata_port *ap);
159static void pdc_port_stop(struct ata_port *ap);
160static void pdc20621_qc_prep(struct ata_queued_cmd *qc);
161static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
162static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
163static void pdc20621_host_stop(struct ata_host_set *host_set);
164static unsigned int pdc20621_dimm_init(struct ata_probe_ent *pe);
165static int pdc20621_detect_dimm(struct ata_probe_ent *pe);
166static unsigned int pdc20621_i2c_read(struct ata_probe_ent *pe,
167 u32 device, u32 subaddr, u32 *pdata);
168static int pdc20621_prog_dimm0(struct ata_probe_ent *pe);
169static unsigned int pdc20621_prog_dimm_global(struct ata_probe_ent *pe);
170#ifdef ATA_VERBOSE_DEBUG
171static void pdc20621_get_from_dimm(struct ata_probe_ent *pe,
172 void *psource, u32 offset, u32 size);
173#endif
174static void pdc20621_put_to_dimm(struct ata_probe_ent *pe,
175 void *psource, u32 offset, u32 size);
176static void pdc20621_irq_clear(struct ata_port *ap);
177static unsigned int pdc20621_qc_issue_prot(struct ata_queued_cmd *qc);
178
179
180static struct scsi_host_template pdc_sata_sht = {
181 .module = THIS_MODULE,
182 .name = DRV_NAME,
183 .ioctl = ata_scsi_ioctl,
184 .queuecommand = ata_scsi_queuecmd,
185 .can_queue = ATA_DEF_QUEUE,
186 .this_id = ATA_SHT_THIS_ID,
187 .sg_tablesize = LIBATA_MAX_PRD,
188 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
189 .emulated = ATA_SHT_EMULATED,
190 .use_clustering = ATA_SHT_USE_CLUSTERING,
191 .proc_name = DRV_NAME,
192 .dma_boundary = ATA_DMA_BOUNDARY,
193 .slave_configure = ata_scsi_slave_config,
194 .slave_destroy = ata_scsi_slave_destroy,
195 .bios_param = ata_std_bios_param,
196};
197
198static const struct ata_port_operations pdc_20621_ops = {
199 .port_disable = ata_port_disable,
200 .tf_load = pdc_tf_load_mmio,
201 .tf_read = ata_tf_read,
202 .check_status = ata_check_status,
203 .exec_command = pdc_exec_command_mmio,
204 .dev_select = ata_std_dev_select,
205 .phy_reset = pdc_20621_phy_reset,
206 .qc_prep = pdc20621_qc_prep,
207 .qc_issue = pdc20621_qc_issue_prot,
208 .data_xfer = ata_mmio_data_xfer,
209 .eng_timeout = pdc_eng_timeout,
210 .irq_handler = pdc20621_interrupt,
211 .irq_clear = pdc20621_irq_clear,
212 .port_start = pdc_port_start,
213 .port_stop = pdc_port_stop,
214 .host_stop = pdc20621_host_stop,
215};
216
217static const struct ata_port_info pdc_port_info[] = {
218 /* board_20621 */
219 {
220 .sht = &pdc_sata_sht,
221 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
222 ATA_FLAG_SRST | ATA_FLAG_MMIO |
223 ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING,
224 .pio_mask = 0x1f, /* pio0-4 */
225 .mwdma_mask = 0x07, /* mwdma0-2 */
226 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
227 .port_ops = &pdc_20621_ops,
228 },
229
230};
231
232static const struct pci_device_id pdc_sata_pci_tbl[] = {
233 { PCI_VENDOR_ID_PROMISE, 0x6622, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
234 board_20621 },
235 { } /* terminate list */
236};
237
238
239static struct pci_driver pdc_sata_pci_driver = {
240 .name = DRV_NAME,
241 .id_table = pdc_sata_pci_tbl,
242 .probe = pdc_sata_init_one,
243 .remove = ata_pci_remove_one,
244};
245
246
247static void pdc20621_host_stop(struct ata_host_set *host_set)
248{
249 struct pci_dev *pdev = to_pci_dev(host_set->dev);
250 struct pdc_host_priv *hpriv = host_set->private_data;
251 void __iomem *dimm_mmio = hpriv->dimm_mmio;
252
253 pci_iounmap(pdev, dimm_mmio);
254 kfree(hpriv);
255
256 pci_iounmap(pdev, host_set->mmio_base);
257}
258
259static int pdc_port_start(struct ata_port *ap)
260{
261 struct device *dev = ap->host_set->dev;
262 struct pdc_port_priv *pp;
263 int rc;
264
265 rc = ata_port_start(ap);
266 if (rc)
267 return rc;
268
269 pp = kmalloc(sizeof(*pp), GFP_KERNEL);
270 if (!pp) {
271 rc = -ENOMEM;
272 goto err_out;
273 }
274 memset(pp, 0, sizeof(*pp));
275
276 pp->pkt = dma_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL);
277 if (!pp->pkt) {
278 rc = -ENOMEM;
279 goto err_out_kfree;
280 }
281
282 ap->private_data = pp;
283
284 return 0;
285
286err_out_kfree:
287 kfree(pp);
288err_out:
289 ata_port_stop(ap);
290 return rc;
291}
292
293
294static void pdc_port_stop(struct ata_port *ap)
295{
296 struct device *dev = ap->host_set->dev;
297 struct pdc_port_priv *pp = ap->private_data;
298
299 ap->private_data = NULL;
300 dma_free_coherent(dev, 128, pp->pkt, pp->pkt_dma);
301 kfree(pp);
302 ata_port_stop(ap);
303}
304
305
306static void pdc_20621_phy_reset (struct ata_port *ap)
307{
308 VPRINTK("ENTER\n");
309 ap->cbl = ATA_CBL_SATA;
310 ata_port_probe(ap);
311 ata_bus_reset(ap);
312}
313
314static inline void pdc20621_ata_sg(struct ata_taskfile *tf, u8 *buf,
315 unsigned int portno,
316 unsigned int total_len)
317{
318 u32 addr;
319 unsigned int dw = PDC_DIMM_APKT_PRD >> 2;
320 u32 *buf32 = (u32 *) buf;
321
322 /* output ATA packet S/G table */
323 addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA +
324 (PDC_DIMM_DATA_STEP * portno);
325 VPRINTK("ATA sg addr 0x%x, %d\n", addr, addr);
326 buf32[dw] = cpu_to_le32(addr);
327 buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT);
328
329 VPRINTK("ATA PSG @ %x == (0x%x, 0x%x)\n",
330 PDC_20621_DIMM_BASE +
331 (PDC_DIMM_WINDOW_STEP * portno) +
332 PDC_DIMM_APKT_PRD,
333 buf32[dw], buf32[dw + 1]);
334}
335
336static inline void pdc20621_host_sg(struct ata_taskfile *tf, u8 *buf,
337 unsigned int portno,
338 unsigned int total_len)
339{
340 u32 addr;
341 unsigned int dw = PDC_DIMM_HPKT_PRD >> 2;
342 u32 *buf32 = (u32 *) buf;
343
344 /* output Host DMA packet S/G table */
345 addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA +
346 (PDC_DIMM_DATA_STEP * portno);
347
348 buf32[dw] = cpu_to_le32(addr);
349 buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT);
350
351 VPRINTK("HOST PSG @ %x == (0x%x, 0x%x)\n",
352 PDC_20621_DIMM_BASE +
353 (PDC_DIMM_WINDOW_STEP * portno) +
354 PDC_DIMM_HPKT_PRD,
355 buf32[dw], buf32[dw + 1]);
356}
357
358static inline unsigned int pdc20621_ata_pkt(struct ata_taskfile *tf,
359 unsigned int devno, u8 *buf,
360 unsigned int portno)
361{
362 unsigned int i, dw;
363 u32 *buf32 = (u32 *) buf;
364 u8 dev_reg;
365
366 unsigned int dimm_sg = PDC_20621_DIMM_BASE +
367 (PDC_DIMM_WINDOW_STEP * portno) +
368 PDC_DIMM_APKT_PRD;
369 VPRINTK("ENTER, dimm_sg == 0x%x, %d\n", dimm_sg, dimm_sg);
370
371 i = PDC_DIMM_ATA_PKT;
372
373 /*
374 * Set up ATA packet
375 */
376 if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE)))
377 buf[i++] = PDC_PKT_READ;
378 else if (tf->protocol == ATA_PROT_NODATA)
379 buf[i++] = PDC_PKT_NODATA;
380 else
381 buf[i++] = 0;
382 buf[i++] = 0; /* reserved */
383 buf[i++] = portno + 1; /* seq. id */
384 buf[i++] = 0xff; /* delay seq. id */
385
386 /* dimm dma S/G, and next-pkt */
387 dw = i >> 2;
388 if (tf->protocol == ATA_PROT_NODATA)
389 buf32[dw] = 0;
390 else
391 buf32[dw] = cpu_to_le32(dimm_sg);
392 buf32[dw + 1] = 0;
393 i += 8;
394
395 if (devno == 0)
396 dev_reg = ATA_DEVICE_OBS;
397 else
398 dev_reg = ATA_DEVICE_OBS | ATA_DEV1;
399
400 /* select device */
401 buf[i++] = (1 << 5) | PDC_PKT_CLEAR_BSY | ATA_REG_DEVICE;
402 buf[i++] = dev_reg;
403
404 /* device control register */
405 buf[i++] = (1 << 5) | PDC_REG_DEVCTL;
406 buf[i++] = tf->ctl;
407
408 return i;
409}
410
411static inline void pdc20621_host_pkt(struct ata_taskfile *tf, u8 *buf,
412 unsigned int portno)
413{
414 unsigned int dw;
415 u32 tmp, *buf32 = (u32 *) buf;
416
417 unsigned int host_sg = PDC_20621_DIMM_BASE +
418 (PDC_DIMM_WINDOW_STEP * portno) +
419 PDC_DIMM_HOST_PRD;
420 unsigned int dimm_sg = PDC_20621_DIMM_BASE +
421 (PDC_DIMM_WINDOW_STEP * portno) +
422 PDC_DIMM_HPKT_PRD;
423 VPRINTK("ENTER, dimm_sg == 0x%x, %d\n", dimm_sg, dimm_sg);
424 VPRINTK("host_sg == 0x%x, %d\n", host_sg, host_sg);
425
426 dw = PDC_DIMM_HOST_PKT >> 2;
427
428 /*
429 * Set up Host DMA packet
430 */
431 if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE)))
432 tmp = PDC_PKT_READ;
433 else
434 tmp = 0;
435 tmp |= ((portno + 1 + 4) << 16); /* seq. id */
436 tmp |= (0xff << 24); /* delay seq. id */
437 buf32[dw + 0] = cpu_to_le32(tmp);
438 buf32[dw + 1] = cpu_to_le32(host_sg);
439 buf32[dw + 2] = cpu_to_le32(dimm_sg);
440 buf32[dw + 3] = 0;
441
442 VPRINTK("HOST PKT @ %x == (0x%x 0x%x 0x%x 0x%x)\n",
443 PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * portno) +
444 PDC_DIMM_HOST_PKT,
445 buf32[dw + 0],
446 buf32[dw + 1],
447 buf32[dw + 2],
448 buf32[dw + 3]);
449}
450
451static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
452{
453 struct scatterlist *sg;
454 struct ata_port *ap = qc->ap;
455 struct pdc_port_priv *pp = ap->private_data;
456 void __iomem *mmio = ap->host_set->mmio_base;
457 struct pdc_host_priv *hpriv = ap->host_set->private_data;
458 void __iomem *dimm_mmio = hpriv->dimm_mmio;
459 unsigned int portno = ap->port_no;
460 unsigned int i, idx, total_len = 0, sgt_len;
461 u32 *buf = (u32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ];
462
463 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
464
465 VPRINTK("ata%u: ENTER\n", ap->id);
466
467 /* hard-code chip #0 */
468 mmio += PDC_CHIP0_OFS;
469
470 /*
471 * Build S/G table
472 */
473 idx = 0;
474 ata_for_each_sg(sg, qc) {
475 buf[idx++] = cpu_to_le32(sg_dma_address(sg));
476 buf[idx++] = cpu_to_le32(sg_dma_len(sg));
477 total_len += sg_dma_len(sg);
478 }
479 buf[idx - 1] |= cpu_to_le32(ATA_PRD_EOT);
480 sgt_len = idx * 4;
481
482 /*
483 * Build ATA, host DMA packets
484 */
485 pdc20621_host_sg(&qc->tf, &pp->dimm_buf[0], portno, total_len);
486 pdc20621_host_pkt(&qc->tf, &pp->dimm_buf[0], portno);
487
488 pdc20621_ata_sg(&qc->tf, &pp->dimm_buf[0], portno, total_len);
489 i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno);
490
491 if (qc->tf.flags & ATA_TFLAG_LBA48)
492 i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i);
493 else
494 i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i);
495
496 pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i);
497
498 /* copy three S/G tables and two packets to DIMM MMIO window */
499 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP),
500 &pp->dimm_buf, PDC_DIMM_HEADER_SZ);
501 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP) +
502 PDC_DIMM_HOST_PRD,
503 &pp->dimm_buf[PDC_DIMM_HEADER_SZ], sgt_len);
504
505 /* force host FIFO dump */
506 writel(0x00000001, mmio + PDC_20621_GENERAL_CTL);
507
508 readl(dimm_mmio); /* MMIO PCI posting flush */
509
510 VPRINTK("ata pkt buf ofs %u, prd size %u, mmio copied\n", i, sgt_len);
511}
512
513static void pdc20621_nodata_prep(struct ata_queued_cmd *qc)
514{
515 struct ata_port *ap = qc->ap;
516 struct pdc_port_priv *pp = ap->private_data;
517 void __iomem *mmio = ap->host_set->mmio_base;
518 struct pdc_host_priv *hpriv = ap->host_set->private_data;
519 void __iomem *dimm_mmio = hpriv->dimm_mmio;
520 unsigned int portno = ap->port_no;
521 unsigned int i;
522
523 VPRINTK("ata%u: ENTER\n", ap->id);
524
525 /* hard-code chip #0 */
526 mmio += PDC_CHIP0_OFS;
527
528 i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno);
529
530 if (qc->tf.flags & ATA_TFLAG_LBA48)
531 i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i);
532 else
533 i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i);
534
535 pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i);
536
537 /* copy three S/G tables and two packets to DIMM MMIO window */
538 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP),
539 &pp->dimm_buf, PDC_DIMM_HEADER_SZ);
540
541 /* force host FIFO dump */
542 writel(0x00000001, mmio + PDC_20621_GENERAL_CTL);
543
544 readl(dimm_mmio); /* MMIO PCI posting flush */
545
546 VPRINTK("ata pkt buf ofs %u, mmio copied\n", i);
547}
548
549static void pdc20621_qc_prep(struct ata_queued_cmd *qc)
550{
551 switch (qc->tf.protocol) {
552 case ATA_PROT_DMA:
553 pdc20621_dma_prep(qc);
554 break;
555 case ATA_PROT_NODATA:
556 pdc20621_nodata_prep(qc);
557 break;
558 default:
559 break;
560 }
561}
562
563static void __pdc20621_push_hdma(struct ata_queued_cmd *qc,
564 unsigned int seq,
565 u32 pkt_ofs)
566{
567 struct ata_port *ap = qc->ap;
568 struct ata_host_set *host_set = ap->host_set;
569 void __iomem *mmio = host_set->mmio_base;
570
571 /* hard-code chip #0 */
572 mmio += PDC_CHIP0_OFS;
573
574 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
575 readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */
576
577 writel(pkt_ofs, mmio + PDC_HDMA_PKT_SUBMIT);
578 readl(mmio + PDC_HDMA_PKT_SUBMIT); /* flush */
579}
580
581static void pdc20621_push_hdma(struct ata_queued_cmd *qc,
582 unsigned int seq,
583 u32 pkt_ofs)
584{
585 struct ata_port *ap = qc->ap;
586 struct pdc_host_priv *pp = ap->host_set->private_data;
587 unsigned int idx = pp->hdma_prod & PDC_HDMA_Q_MASK;
588
589 if (!pp->doing_hdma) {
590 __pdc20621_push_hdma(qc, seq, pkt_ofs);
591 pp->doing_hdma = 1;
592 return;
593 }
594
595 pp->hdma[idx].qc = qc;
596 pp->hdma[idx].seq = seq;
597 pp->hdma[idx].pkt_ofs = pkt_ofs;
598 pp->hdma_prod++;
599}
600
601static void pdc20621_pop_hdma(struct ata_queued_cmd *qc)
602{
603 struct ata_port *ap = qc->ap;
604 struct pdc_host_priv *pp = ap->host_set->private_data;
605 unsigned int idx = pp->hdma_cons & PDC_HDMA_Q_MASK;
606
607 /* if nothing on queue, we're done */
608 if (pp->hdma_prod == pp->hdma_cons) {
609 pp->doing_hdma = 0;
610 return;
611 }
612
613 __pdc20621_push_hdma(pp->hdma[idx].qc, pp->hdma[idx].seq,
614 pp->hdma[idx].pkt_ofs);
615 pp->hdma_cons++;
616}
617
618#ifdef ATA_VERBOSE_DEBUG
619static void pdc20621_dump_hdma(struct ata_queued_cmd *qc)
620{
621 struct ata_port *ap = qc->ap;
622 unsigned int port_no = ap->port_no;
623 struct pdc_host_priv *hpriv = ap->host_set->private_data;
624 void *dimm_mmio = hpriv->dimm_mmio;
625
626 dimm_mmio += (port_no * PDC_DIMM_WINDOW_STEP);
627 dimm_mmio += PDC_DIMM_HOST_PKT;
628
629 printk(KERN_ERR "HDMA[0] == 0x%08X\n", readl(dimm_mmio));
630 printk(KERN_ERR "HDMA[1] == 0x%08X\n", readl(dimm_mmio + 4));
631 printk(KERN_ERR "HDMA[2] == 0x%08X\n", readl(dimm_mmio + 8));
632 printk(KERN_ERR "HDMA[3] == 0x%08X\n", readl(dimm_mmio + 12));
633}
634#else
635static inline void pdc20621_dump_hdma(struct ata_queued_cmd *qc) { }
636#endif /* ATA_VERBOSE_DEBUG */
637
638static void pdc20621_packet_start(struct ata_queued_cmd *qc)
639{
640 struct ata_port *ap = qc->ap;
641 struct ata_host_set *host_set = ap->host_set;
642 unsigned int port_no = ap->port_no;
643 void __iomem *mmio = host_set->mmio_base;
644 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
645 u8 seq = (u8) (port_no + 1);
646 unsigned int port_ofs;
647
648 /* hard-code chip #0 */
649 mmio += PDC_CHIP0_OFS;
650
651 VPRINTK("ata%u: ENTER\n", ap->id);
652
653 wmb(); /* flush PRD, pkt writes */
654
655 port_ofs = PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no);
656
657 /* if writing, we (1) DMA to DIMM, then (2) do ATA command */
658 if (rw && qc->tf.protocol == ATA_PROT_DMA) {
659 seq += 4;
660
661 pdc20621_dump_hdma(qc);
662 pdc20621_push_hdma(qc, seq, port_ofs + PDC_DIMM_HOST_PKT);
663 VPRINTK("queued ofs 0x%x (%u), seq %u\n",
664 port_ofs + PDC_DIMM_HOST_PKT,
665 port_ofs + PDC_DIMM_HOST_PKT,
666 seq);
667 } else {
668 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
669 readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */
670
671 writel(port_ofs + PDC_DIMM_ATA_PKT,
672 (void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
673 readl((void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
674 VPRINTK("submitted ofs 0x%x (%u), seq %u\n",
675 port_ofs + PDC_DIMM_ATA_PKT,
676 port_ofs + PDC_DIMM_ATA_PKT,
677 seq);
678 }
679}
680
681static unsigned int pdc20621_qc_issue_prot(struct ata_queued_cmd *qc)
682{
683 switch (qc->tf.protocol) {
684 case ATA_PROT_DMA:
685 case ATA_PROT_NODATA:
686 pdc20621_packet_start(qc);
687 return 0;
688
689 case ATA_PROT_ATAPI_DMA:
690 BUG();
691 break;
692
693 default:
694 break;
695 }
696
697 return ata_qc_issue_prot(qc);
698}
699
700static inline unsigned int pdc20621_host_intr( struct ata_port *ap,
701 struct ata_queued_cmd *qc,
702 unsigned int doing_hdma,
703 void __iomem *mmio)
704{
705 unsigned int port_no = ap->port_no;
706 unsigned int port_ofs =
707 PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no);
708 u8 status;
709 unsigned int handled = 0;
710
711 VPRINTK("ENTER\n");
712
713 if ((qc->tf.protocol == ATA_PROT_DMA) && /* read */
714 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
715
716 /* step two - DMA from DIMM to host */
717 if (doing_hdma) {
718 VPRINTK("ata%u: read hdma, 0x%x 0x%x\n", ap->id,
719 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
720 /* get drive status; clear intr; complete txn */
721 qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
722 ata_qc_complete(qc);
723 pdc20621_pop_hdma(qc);
724 }
725
726 /* step one - exec ATA command */
727 else {
728 u8 seq = (u8) (port_no + 1 + 4);
729 VPRINTK("ata%u: read ata, 0x%x 0x%x\n", ap->id,
730 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
731
732 /* submit hdma pkt */
733 pdc20621_dump_hdma(qc);
734 pdc20621_push_hdma(qc, seq,
735 port_ofs + PDC_DIMM_HOST_PKT);
736 }
737 handled = 1;
738
739 } else if (qc->tf.protocol == ATA_PROT_DMA) { /* write */
740
741 /* step one - DMA from host to DIMM */
742 if (doing_hdma) {
743 u8 seq = (u8) (port_no + 1);
744 VPRINTK("ata%u: write hdma, 0x%x 0x%x\n", ap->id,
745 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
746
747 /* submit ata pkt */
748 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
749 readl(mmio + PDC_20621_SEQCTL + (seq * 4));
750 writel(port_ofs + PDC_DIMM_ATA_PKT,
751 (void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
752 readl((void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
753 }
754
755 /* step two - execute ATA command */
756 else {
757 VPRINTK("ata%u: write ata, 0x%x 0x%x\n", ap->id,
758 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
759 /* get drive status; clear intr; complete txn */
760 qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
761 ata_qc_complete(qc);
762 pdc20621_pop_hdma(qc);
763 }
764 handled = 1;
765
766 /* command completion, but no data xfer */
767 } else if (qc->tf.protocol == ATA_PROT_NODATA) {
768
769 status = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
770 DPRINTK("BUS_NODATA (drv_stat 0x%X)\n", status);
771 qc->err_mask |= ac_err_mask(status);
772 ata_qc_complete(qc);
773 handled = 1;
774
775 } else {
776 ap->stats.idle_irq++;
777 }
778
779 return handled;
780}
781
782static void pdc20621_irq_clear(struct ata_port *ap)
783{
784 struct ata_host_set *host_set = ap->host_set;
785 void __iomem *mmio = host_set->mmio_base;
786
787 mmio += PDC_CHIP0_OFS;
788
789 readl(mmio + PDC_20621_SEQMASK);
790}
791
792static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
793{
794 struct ata_host_set *host_set = dev_instance;
795 struct ata_port *ap;
796 u32 mask = 0;
797 unsigned int i, tmp, port_no;
798 unsigned int handled = 0;
799 void __iomem *mmio_base;
800
801 VPRINTK("ENTER\n");
802
803 if (!host_set || !host_set->mmio_base) {
804 VPRINTK("QUICK EXIT\n");
805 return IRQ_NONE;
806 }
807
808 mmio_base = host_set->mmio_base;
809
810 /* reading should also clear interrupts */
811 mmio_base += PDC_CHIP0_OFS;
812 mask = readl(mmio_base + PDC_20621_SEQMASK);
813 VPRINTK("mask == 0x%x\n", mask);
814
815 if (mask == 0xffffffff) {
816 VPRINTK("QUICK EXIT 2\n");
817 return IRQ_NONE;
818 }
819 mask &= 0xffff; /* only 16 tags possible */
820 if (!mask) {
821 VPRINTK("QUICK EXIT 3\n");
822 return IRQ_NONE;
823 }
824
825 spin_lock(&host_set->lock);
826
827 for (i = 1; i < 9; i++) {
828 port_no = i - 1;
829 if (port_no > 3)
830 port_no -= 4;
831 if (port_no >= host_set->n_ports)
832 ap = NULL;
833 else
834 ap = host_set->ports[port_no];
835 tmp = mask & (1 << i);
836 VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp);
837 if (tmp && ap &&
838 !(ap->flags & ATA_FLAG_DISABLED)) {
839 struct ata_queued_cmd *qc;
840
841 qc = ata_qc_from_tag(ap, ap->active_tag);
842 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
843 handled += pdc20621_host_intr(ap, qc, (i > 4),
844 mmio_base);
845 }
846 }
847
848 spin_unlock(&host_set->lock);
849
850 VPRINTK("mask == 0x%x\n", mask);
851
852 VPRINTK("EXIT\n");
853
854 return IRQ_RETVAL(handled);
855}
856
857static void pdc_eng_timeout(struct ata_port *ap)
858{
859 u8 drv_stat;
860 struct ata_host_set *host_set = ap->host_set;
861 struct ata_queued_cmd *qc;
862 unsigned long flags;
863
864 DPRINTK("ENTER\n");
865
866 spin_lock_irqsave(&host_set->lock, flags);
867
868 qc = ata_qc_from_tag(ap, ap->active_tag);
869
870 switch (qc->tf.protocol) {
871 case ATA_PROT_DMA:
872 case ATA_PROT_NODATA:
873 ata_port_printk(ap, KERN_ERR, "command timeout\n");
874 qc->err_mask |= __ac_err_mask(ata_wait_idle(ap));
875 break;
876
877 default:
878 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
879
880 ata_port_printk(ap, KERN_ERR,
881 "unknown timeout, cmd 0x%x stat 0x%x\n",
882 qc->tf.command, drv_stat);
883
884 qc->err_mask |= ac_err_mask(drv_stat);
885 break;
886 }
887
888 spin_unlock_irqrestore(&host_set->lock, flags);
889 ata_eh_qc_complete(qc);
890 DPRINTK("EXIT\n");
891}
892
893static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
894{
895 WARN_ON (tf->protocol == ATA_PROT_DMA ||
896 tf->protocol == ATA_PROT_NODATA);
897 ata_tf_load(ap, tf);
898}
899
900
901static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
902{
903 WARN_ON (tf->protocol == ATA_PROT_DMA ||
904 tf->protocol == ATA_PROT_NODATA);
905 ata_exec_command(ap, tf);
906}
907
908
909static void pdc_sata_setup_port(struct ata_ioports *port, unsigned long base)
910{
911 port->cmd_addr = base;
912 port->data_addr = base;
913 port->feature_addr =
914 port->error_addr = base + 0x4;
915 port->nsect_addr = base + 0x8;
916 port->lbal_addr = base + 0xc;
917 port->lbam_addr = base + 0x10;
918 port->lbah_addr = base + 0x14;
919 port->device_addr = base + 0x18;
920 port->command_addr =
921 port->status_addr = base + 0x1c;
922 port->altstatus_addr =
923 port->ctl_addr = base + 0x38;
924}
925
926
927#ifdef ATA_VERBOSE_DEBUG
928static void pdc20621_get_from_dimm(struct ata_probe_ent *pe, void *psource,
929 u32 offset, u32 size)
930{
931 u32 window_size;
932 u16 idx;
933 u8 page_mask;
934 long dist;
935 void __iomem *mmio = pe->mmio_base;
936 struct pdc_host_priv *hpriv = pe->private_data;
937 void __iomem *dimm_mmio = hpriv->dimm_mmio;
938
939 /* hard-code chip #0 */
940 mmio += PDC_CHIP0_OFS;
941
942 page_mask = 0x00;
943 window_size = 0x2000 * 4; /* 32K byte uchar size */
944 idx = (u16) (offset / window_size);
945
946 writel(0x01, mmio + PDC_GENERAL_CTLR);
947 readl(mmio + PDC_GENERAL_CTLR);
948 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
949 readl(mmio + PDC_DIMM_WINDOW_CTLR);
950
951 offset -= (idx * window_size);
952 idx++;
953 dist = ((long) (window_size - (offset + size))) >= 0 ? size :
954 (long) (window_size - offset);
955 memcpy_fromio((char *) psource, (char *) (dimm_mmio + offset / 4),
956 dist);
957
958 psource += dist;
959 size -= dist;
960 for (; (long) size >= (long) window_size ;) {
961 writel(0x01, mmio + PDC_GENERAL_CTLR);
962 readl(mmio + PDC_GENERAL_CTLR);
963 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
964 readl(mmio + PDC_DIMM_WINDOW_CTLR);
965 memcpy_fromio((char *) psource, (char *) (dimm_mmio),
966 window_size / 4);
967 psource += window_size;
968 size -= window_size;
969 idx ++;
970 }
971
972 if (size) {
973 writel(0x01, mmio + PDC_GENERAL_CTLR);
974 readl(mmio + PDC_GENERAL_CTLR);
975 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
976 readl(mmio + PDC_DIMM_WINDOW_CTLR);
977 memcpy_fromio((char *) psource, (char *) (dimm_mmio),
978 size / 4);
979 }
980}
981#endif
982
983
984static void pdc20621_put_to_dimm(struct ata_probe_ent *pe, void *psource,
985 u32 offset, u32 size)
986{
987 u32 window_size;
988 u16 idx;
989 u8 page_mask;
990 long dist;
991 void __iomem *mmio = pe->mmio_base;
992 struct pdc_host_priv *hpriv = pe->private_data;
993 void __iomem *dimm_mmio = hpriv->dimm_mmio;
994
995 /* hard-code chip #0 */
996 mmio += PDC_CHIP0_OFS;
997
998 page_mask = 0x00;
999 window_size = 0x2000 * 4; /* 32K byte uchar size */
1000 idx = (u16) (offset / window_size);
1001
1002 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1003 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1004 offset -= (idx * window_size);
1005 idx++;
1006 dist = ((long)(s32)(window_size - (offset + size))) >= 0 ? size :
1007 (long) (window_size - offset);
1008 memcpy_toio(dimm_mmio + offset / 4, psource, dist);
1009 writel(0x01, mmio + PDC_GENERAL_CTLR);
1010 readl(mmio + PDC_GENERAL_CTLR);
1011
1012 psource += dist;
1013 size -= dist;
1014 for (; (long) size >= (long) window_size ;) {
1015 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1016 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1017 memcpy_toio(dimm_mmio, psource, window_size / 4);
1018 writel(0x01, mmio + PDC_GENERAL_CTLR);
1019 readl(mmio + PDC_GENERAL_CTLR);
1020 psource += window_size;
1021 size -= window_size;
1022 idx ++;
1023 }
1024
1025 if (size) {
1026 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1027 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1028 memcpy_toio(dimm_mmio, psource, size / 4);
1029 writel(0x01, mmio + PDC_GENERAL_CTLR);
1030 readl(mmio + PDC_GENERAL_CTLR);
1031 }
1032}
1033
1034
1035static unsigned int pdc20621_i2c_read(struct ata_probe_ent *pe, u32 device,
1036 u32 subaddr, u32 *pdata)
1037{
1038 void __iomem *mmio = pe->mmio_base;
1039 u32 i2creg = 0;
1040 u32 status;
1041 u32 count =0;
1042
1043 /* hard-code chip #0 */
1044 mmio += PDC_CHIP0_OFS;
1045
1046 i2creg |= device << 24;
1047 i2creg |= subaddr << 16;
1048
1049 /* Set the device and subaddress */
1050 writel(i2creg, mmio + PDC_I2C_ADDR_DATA_OFFSET);
1051 readl(mmio + PDC_I2C_ADDR_DATA_OFFSET);
1052
1053 /* Write Control to perform read operation, mask int */
1054 writel(PDC_I2C_READ | PDC_I2C_START | PDC_I2C_MASK_INT,
1055 mmio + PDC_I2C_CONTROL_OFFSET);
1056
1057 for (count = 0; count <= 1000; count ++) {
1058 status = readl(mmio + PDC_I2C_CONTROL_OFFSET);
1059 if (status & PDC_I2C_COMPLETE) {
1060 status = readl(mmio + PDC_I2C_ADDR_DATA_OFFSET);
1061 break;
1062 } else if (count == 1000)
1063 return 0;
1064 }
1065
1066 *pdata = (status >> 8) & 0x000000ff;
1067 return 1;
1068}
1069
1070
1071static int pdc20621_detect_dimm(struct ata_probe_ent *pe)
1072{
1073 u32 data=0 ;
1074 if (pdc20621_i2c_read(pe, PDC_DIMM0_SPD_DEV_ADDRESS,
1075 PDC_DIMM_SPD_SYSTEM_FREQ, &data)) {
1076 if (data == 100)
1077 return 100;
1078 } else
1079 return 0;
1080
1081 if (pdc20621_i2c_read(pe, PDC_DIMM0_SPD_DEV_ADDRESS, 9, &data)) {
1082 if(data <= 0x75)
1083 return 133;
1084 } else
1085 return 0;
1086
1087 return 0;
1088}
1089
1090
1091static int pdc20621_prog_dimm0(struct ata_probe_ent *pe)
1092{
1093 u32 spd0[50];
1094 u32 data = 0;
1095 int size, i;
1096 u8 bdimmsize;
1097 void __iomem *mmio = pe->mmio_base;
1098 static const struct {
1099 unsigned int reg;
1100 unsigned int ofs;
1101 } pdc_i2c_read_data [] = {
1102 { PDC_DIMM_SPD_TYPE, 11 },
1103 { PDC_DIMM_SPD_FRESH_RATE, 12 },
1104 { PDC_DIMM_SPD_COLUMN_NUM, 4 },
1105 { PDC_DIMM_SPD_ATTRIBUTE, 21 },
1106 { PDC_DIMM_SPD_ROW_NUM, 3 },
1107 { PDC_DIMM_SPD_BANK_NUM, 17 },
1108 { PDC_DIMM_SPD_MODULE_ROW, 5 },
1109 { PDC_DIMM_SPD_ROW_PRE_CHARGE, 27 },
1110 { PDC_DIMM_SPD_ROW_ACTIVE_DELAY, 28 },
1111 { PDC_DIMM_SPD_RAS_CAS_DELAY, 29 },
1112 { PDC_DIMM_SPD_ACTIVE_PRECHARGE, 30 },
1113 { PDC_DIMM_SPD_CAS_LATENCY, 18 },
1114 };
1115
1116 /* hard-code chip #0 */
1117 mmio += PDC_CHIP0_OFS;
1118
1119 for(i=0; i<ARRAY_SIZE(pdc_i2c_read_data); i++)
1120 pdc20621_i2c_read(pe, PDC_DIMM0_SPD_DEV_ADDRESS,
1121 pdc_i2c_read_data[i].reg,
1122 &spd0[pdc_i2c_read_data[i].ofs]);
1123
1124 data |= (spd0[4] - 8) | ((spd0[21] != 0) << 3) | ((spd0[3]-11) << 4);
1125 data |= ((spd0[17] / 4) << 6) | ((spd0[5] / 2) << 7) |
1126 ((((spd0[27] + 9) / 10) - 1) << 8) ;
1127 data |= (((((spd0[29] > spd0[28])
1128 ? spd0[29] : spd0[28]) + 9) / 10) - 1) << 10;
1129 data |= ((spd0[30] - spd0[29] + 9) / 10 - 2) << 12;
1130
1131 if (spd0[18] & 0x08)
1132 data |= ((0x03) << 14);
1133 else if (spd0[18] & 0x04)
1134 data |= ((0x02) << 14);
1135 else if (spd0[18] & 0x01)
1136 data |= ((0x01) << 14);
1137 else
1138 data |= (0 << 14);
1139
1140 /*
1141 Calculate the size of bDIMMSize (power of 2) and
1142 merge the DIMM size by program start/end address.
1143 */
1144
1145 bdimmsize = spd0[4] + (spd0[5] / 2) + spd0[3] + (spd0[17] / 2) + 3;
1146 size = (1 << bdimmsize) >> 20; /* size = xxx(MB) */
1147 data |= (((size / 16) - 1) << 16);
1148 data |= (0 << 23);
1149 data |= 8;
1150 writel(data, mmio + PDC_DIMM0_CONTROL_OFFSET);
1151 readl(mmio + PDC_DIMM0_CONTROL_OFFSET);
1152 return size;
1153}
1154
1155
1156static unsigned int pdc20621_prog_dimm_global(struct ata_probe_ent *pe)
1157{
1158 u32 data, spd0;
1159 int error, i;
1160 void __iomem *mmio = pe->mmio_base;
1161
1162 /* hard-code chip #0 */
1163 mmio += PDC_CHIP0_OFS;
1164
1165 /*
1166 Set To Default : DIMM Module Global Control Register (0x022259F1)
1167 DIMM Arbitration Disable (bit 20)
1168 DIMM Data/Control Output Driving Selection (bit12 - bit15)
1169 Refresh Enable (bit 17)
1170 */
1171
1172 data = 0x022259F1;
1173 writel(data, mmio + PDC_SDRAM_CONTROL_OFFSET);
1174 readl(mmio + PDC_SDRAM_CONTROL_OFFSET);
1175
1176 /* Turn on for ECC */
1177 pdc20621_i2c_read(pe, PDC_DIMM0_SPD_DEV_ADDRESS,
1178 PDC_DIMM_SPD_TYPE, &spd0);
1179 if (spd0 == 0x02) {
1180 data |= (0x01 << 16);
1181 writel(data, mmio + PDC_SDRAM_CONTROL_OFFSET);
1182 readl(mmio + PDC_SDRAM_CONTROL_OFFSET);
1183 printk(KERN_ERR "Local DIMM ECC Enabled\n");
1184 }
1185
1186 /* DIMM Initialization Select/Enable (bit 18/19) */
1187 data &= (~(1<<18));
1188 data |= (1<<19);
1189 writel(data, mmio + PDC_SDRAM_CONTROL_OFFSET);
1190
1191 error = 1;
1192 for (i = 1; i <= 10; i++) { /* polling ~5 secs */
1193 data = readl(mmio + PDC_SDRAM_CONTROL_OFFSET);
1194 if (!(data & (1<<19))) {
1195 error = 0;
1196 break;
1197 }
1198 msleep(i*100);
1199 }
1200 return error;
1201}
1202
1203
1204static unsigned int pdc20621_dimm_init(struct ata_probe_ent *pe)
1205{
1206 int speed, size, length;
1207 u32 addr,spd0,pci_status;
1208 u32 tmp=0;
1209 u32 time_period=0;
1210 u32 tcount=0;
1211 u32 ticks=0;
1212 u32 clock=0;
1213 u32 fparam=0;
1214 void __iomem *mmio = pe->mmio_base;
1215
1216 /* hard-code chip #0 */
1217 mmio += PDC_CHIP0_OFS;
1218
1219 /* Initialize PLL based upon PCI Bus Frequency */
1220
1221 /* Initialize Time Period Register */
1222 writel(0xffffffff, mmio + PDC_TIME_PERIOD);
1223 time_period = readl(mmio + PDC_TIME_PERIOD);
1224 VPRINTK("Time Period Register (0x40): 0x%x\n", time_period);
1225
1226 /* Enable timer */
1227 writel(0x00001a0, mmio + PDC_TIME_CONTROL);
1228 readl(mmio + PDC_TIME_CONTROL);
1229
1230 /* Wait 3 seconds */
1231 msleep(3000);
1232
1233 /*
1234 When timer is enabled, counter is decreased every internal
1235 clock cycle.
1236 */
1237
1238 tcount = readl(mmio + PDC_TIME_COUNTER);
1239 VPRINTK("Time Counter Register (0x44): 0x%x\n", tcount);
1240
1241 /*
1242 If SX4 is on PCI-X bus, after 3 seconds, the timer counter
1243 register should be >= (0xffffffff - 3x10^8).
1244 */
1245 if(tcount >= PCI_X_TCOUNT) {
1246 ticks = (time_period - tcount);
1247 VPRINTK("Num counters 0x%x (%d)\n", ticks, ticks);
1248
1249 clock = (ticks / 300000);
1250 VPRINTK("10 * Internal clk = 0x%x (%d)\n", clock, clock);
1251
1252 clock = (clock * 33);
1253 VPRINTK("10 * Internal clk * 33 = 0x%x (%d)\n", clock, clock);
1254
1255 /* PLL F Param (bit 22:16) */
1256 fparam = (1400000 / clock) - 2;
1257 VPRINTK("PLL F Param: 0x%x (%d)\n", fparam, fparam);
1258
1259 /* OD param = 0x2 (bit 31:30), R param = 0x5 (bit 29:25) */
1260 pci_status = (0x8a001824 | (fparam << 16));
1261 } else
1262 pci_status = PCI_PLL_INIT;
1263
1264 /* Initialize PLL. */
1265 VPRINTK("pci_status: 0x%x\n", pci_status);
1266 writel(pci_status, mmio + PDC_CTL_STATUS);
1267 readl(mmio + PDC_CTL_STATUS);
1268
1269 /*
1270 Read SPD of DIMM by I2C interface,
1271 and program the DIMM Module Controller.
1272 */
1273 if (!(speed = pdc20621_detect_dimm(pe))) {
1274 printk(KERN_ERR "Detect Local DIMM Fail\n");
1275 return 1; /* DIMM error */
1276 }
1277 VPRINTK("Local DIMM Speed = %d\n", speed);
1278
1279 /* Programming DIMM0 Module Control Register (index_CID0:80h) */
1280 size = pdc20621_prog_dimm0(pe);
1281 VPRINTK("Local DIMM Size = %dMB\n",size);
1282
1283 /* Programming DIMM Module Global Control Register (index_CID0:88h) */
1284 if (pdc20621_prog_dimm_global(pe)) {
1285 printk(KERN_ERR "Programming DIMM Module Global Control Register Fail\n");
1286 return 1;
1287 }
1288
1289#ifdef ATA_VERBOSE_DEBUG
1290 {
1291 u8 test_parttern1[40] = {0x55,0xAA,'P','r','o','m','i','s','e',' ',
1292 'N','o','t',' ','Y','e','t',' ','D','e','f','i','n','e','d',' ',
1293 '1','.','1','0',
1294 '9','8','0','3','1','6','1','2',0,0};
1295 u8 test_parttern2[40] = {0};
1296
1297 pdc20621_put_to_dimm(pe, (void *) test_parttern2, 0x10040, 40);
1298 pdc20621_put_to_dimm(pe, (void *) test_parttern2, 0x40, 40);
1299
1300 pdc20621_put_to_dimm(pe, (void *) test_parttern1, 0x10040, 40);
1301 pdc20621_get_from_dimm(pe, (void *) test_parttern2, 0x40, 40);
1302 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1303 test_parttern2[1], &(test_parttern2[2]));
1304 pdc20621_get_from_dimm(pe, (void *) test_parttern2, 0x10040,
1305 40);
1306 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1307 test_parttern2[1], &(test_parttern2[2]));
1308
1309 pdc20621_put_to_dimm(pe, (void *) test_parttern1, 0x40, 40);
1310 pdc20621_get_from_dimm(pe, (void *) test_parttern2, 0x40, 40);
1311 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1312 test_parttern2[1], &(test_parttern2[2]));
1313 }
1314#endif
1315
1316 /* ECC initiliazation. */
1317
1318 pdc20621_i2c_read(pe, PDC_DIMM0_SPD_DEV_ADDRESS,
1319 PDC_DIMM_SPD_TYPE, &spd0);
1320 if (spd0 == 0x02) {
1321 VPRINTK("Start ECC initialization\n");
1322 addr = 0;
1323 length = size * 1024 * 1024;
1324 while (addr < length) {
1325 pdc20621_put_to_dimm(pe, (void *) &tmp, addr,
1326 sizeof(u32));
1327 addr += sizeof(u32);
1328 }
1329 VPRINTK("Finish ECC initialization\n");
1330 }
1331 return 0;
1332}
1333
1334
1335static void pdc_20621_init(struct ata_probe_ent *pe)
1336{
1337 u32 tmp;
1338 void __iomem *mmio = pe->mmio_base;
1339
1340 /* hard-code chip #0 */
1341 mmio += PDC_CHIP0_OFS;
1342
1343 /*
1344 * Select page 0x40 for our 32k DIMM window
1345 */
1346 tmp = readl(mmio + PDC_20621_DIMM_WINDOW) & 0xffff0000;
1347 tmp |= PDC_PAGE_WINDOW; /* page 40h; arbitrarily selected */
1348 writel(tmp, mmio + PDC_20621_DIMM_WINDOW);
1349
1350 /*
1351 * Reset Host DMA
1352 */
1353 tmp = readl(mmio + PDC_HDMA_CTLSTAT);
1354 tmp |= PDC_RESET;
1355 writel(tmp, mmio + PDC_HDMA_CTLSTAT);
1356 readl(mmio + PDC_HDMA_CTLSTAT); /* flush */
1357
1358 udelay(10);
1359
1360 tmp = readl(mmio + PDC_HDMA_CTLSTAT);
1361 tmp &= ~PDC_RESET;
1362 writel(tmp, mmio + PDC_HDMA_CTLSTAT);
1363 readl(mmio + PDC_HDMA_CTLSTAT); /* flush */
1364}
1365
1366static int pdc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1367{
1368 static int printed_version;
1369 struct ata_probe_ent *probe_ent = NULL;
1370 unsigned long base;
1371 void __iomem *mmio_base;
1372 void __iomem *dimm_mmio = NULL;
1373 struct pdc_host_priv *hpriv = NULL;
1374 unsigned int board_idx = (unsigned int) ent->driver_data;
1375 int pci_dev_busy = 0;
1376 int rc;
1377
1378 if (!printed_version++)
1379 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1380
1381 rc = pci_enable_device(pdev);
1382 if (rc)
1383 return rc;
1384
1385 rc = pci_request_regions(pdev, DRV_NAME);
1386 if (rc) {
1387 pci_dev_busy = 1;
1388 goto err_out;
1389 }
1390
1391 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
1392 if (rc)
1393 goto err_out_regions;
1394 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
1395 if (rc)
1396 goto err_out_regions;
1397
1398 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
1399 if (probe_ent == NULL) {
1400 rc = -ENOMEM;
1401 goto err_out_regions;
1402 }
1403
1404 memset(probe_ent, 0, sizeof(*probe_ent));
1405 probe_ent->dev = pci_dev_to_dev(pdev);
1406 INIT_LIST_HEAD(&probe_ent->node);
1407
1408 mmio_base = pci_iomap(pdev, 3, 0);
1409 if (mmio_base == NULL) {
1410 rc = -ENOMEM;
1411 goto err_out_free_ent;
1412 }
1413 base = (unsigned long) mmio_base;
1414
1415 hpriv = kmalloc(sizeof(*hpriv), GFP_KERNEL);
1416 if (!hpriv) {
1417 rc = -ENOMEM;
1418 goto err_out_iounmap;
1419 }
1420 memset(hpriv, 0, sizeof(*hpriv));
1421
1422 dimm_mmio = pci_iomap(pdev, 4, 0);
1423 if (!dimm_mmio) {
1424 kfree(hpriv);
1425 rc = -ENOMEM;
1426 goto err_out_iounmap;
1427 }
1428
1429 hpriv->dimm_mmio = dimm_mmio;
1430
1431 probe_ent->sht = pdc_port_info[board_idx].sht;
1432 probe_ent->host_flags = pdc_port_info[board_idx].host_flags;
1433 probe_ent->pio_mask = pdc_port_info[board_idx].pio_mask;
1434 probe_ent->mwdma_mask = pdc_port_info[board_idx].mwdma_mask;
1435 probe_ent->udma_mask = pdc_port_info[board_idx].udma_mask;
1436 probe_ent->port_ops = pdc_port_info[board_idx].port_ops;
1437
1438 probe_ent->irq = pdev->irq;
1439 probe_ent->irq_flags = IRQF_SHARED;
1440 probe_ent->mmio_base = mmio_base;
1441
1442 probe_ent->private_data = hpriv;
1443 base += PDC_CHIP0_OFS;
1444
1445 probe_ent->n_ports = 4;
1446 pdc_sata_setup_port(&probe_ent->port[0], base + 0x200);
1447 pdc_sata_setup_port(&probe_ent->port[1], base + 0x280);
1448 pdc_sata_setup_port(&probe_ent->port[2], base + 0x300);
1449 pdc_sata_setup_port(&probe_ent->port[3], base + 0x380);
1450
1451 pci_set_master(pdev);
1452
1453 /* initialize adapter */
1454 /* initialize local dimm */
1455 if (pdc20621_dimm_init(probe_ent)) {
1456 rc = -ENOMEM;
1457 goto err_out_iounmap_dimm;
1458 }
1459 pdc_20621_init(probe_ent);
1460
1461 /* FIXME: check ata_device_add return value */
1462 ata_device_add(probe_ent);
1463 kfree(probe_ent);
1464
1465 return 0;
1466
1467err_out_iounmap_dimm: /* only get to this label if 20621 */
1468 kfree(hpriv);
1469 pci_iounmap(pdev, dimm_mmio);
1470err_out_iounmap:
1471 pci_iounmap(pdev, mmio_base);
1472err_out_free_ent:
1473 kfree(probe_ent);
1474err_out_regions:
1475 pci_release_regions(pdev);
1476err_out:
1477 if (!pci_dev_busy)
1478 pci_disable_device(pdev);
1479 return rc;
1480}
1481
1482
1483static int __init pdc_sata_init(void)
1484{
1485 return pci_module_init(&pdc_sata_pci_driver);
1486}
1487
1488
1489static void __exit pdc_sata_exit(void)
1490{
1491 pci_unregister_driver(&pdc_sata_pci_driver);
1492}
1493
1494
1495MODULE_AUTHOR("Jeff Garzik");
1496MODULE_DESCRIPTION("Promise SATA low-level driver");
1497MODULE_LICENSE("GPL");
1498MODULE_DEVICE_TABLE(pci, pdc_sata_pci_tbl);
1499MODULE_VERSION(DRV_VERSION);
1500
1501module_init(pdc_sata_init);
1502module_exit(pdc_sata_exit);
diff --git a/drivers/scsi/sata_uli.c b/drivers/scsi/sata_uli.c
deleted file mode 100644
index 33cdb4867ef1..000000000000
--- a/drivers/scsi/sata_uli.c
+++ /dev/null
@@ -1,300 +0,0 @@
1/*
2 * sata_uli.c - ULi Electronics SATA
3 *
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2, or (at your option)
8 * any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; see the file COPYING. If not, write to
17 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
18 *
19 *
20 * libata documentation is available via 'make {ps|pdf}docs',
21 * as Documentation/DocBook/libata.*
22 *
23 * Hardware documentation available under NDA.
24 *
25 */
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/pci.h>
30#include <linux/init.h>
31#include <linux/blkdev.h>
32#include <linux/delay.h>
33#include <linux/interrupt.h>
34#include <linux/device.h>
35#include <scsi/scsi_host.h>
36#include <linux/libata.h>
37
38#define DRV_NAME "sata_uli"
39#define DRV_VERSION "1.0"
40
41enum {
42 uli_5289 = 0,
43 uli_5287 = 1,
44 uli_5281 = 2,
45
46 uli_max_ports = 4,
47
48 /* PCI configuration registers */
49 ULI5287_BASE = 0x90, /* sata0 phy SCR registers */
50 ULI5287_OFFS = 0x10, /* offset from sata0->sata1 phy regs */
51 ULI5281_BASE = 0x60, /* sata0 phy SCR registers */
52 ULI5281_OFFS = 0x60, /* offset from sata0->sata1 phy regs */
53};
54
55struct uli_priv {
56 unsigned int scr_cfg_addr[uli_max_ports];
57};
58
59static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
60static u32 uli_scr_read (struct ata_port *ap, unsigned int sc_reg);
61static void uli_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
62
63static const struct pci_device_id uli_pci_tbl[] = {
64 { PCI_VENDOR_ID_AL, 0x5289, PCI_ANY_ID, PCI_ANY_ID, 0, 0, uli_5289 },
65 { PCI_VENDOR_ID_AL, 0x5287, PCI_ANY_ID, PCI_ANY_ID, 0, 0, uli_5287 },
66 { PCI_VENDOR_ID_AL, 0x5281, PCI_ANY_ID, PCI_ANY_ID, 0, 0, uli_5281 },
67 { } /* terminate list */
68};
69
70
71static struct pci_driver uli_pci_driver = {
72 .name = DRV_NAME,
73 .id_table = uli_pci_tbl,
74 .probe = uli_init_one,
75 .remove = ata_pci_remove_one,
76};
77
78static struct scsi_host_template uli_sht = {
79 .module = THIS_MODULE,
80 .name = DRV_NAME,
81 .ioctl = ata_scsi_ioctl,
82 .queuecommand = ata_scsi_queuecmd,
83 .can_queue = ATA_DEF_QUEUE,
84 .this_id = ATA_SHT_THIS_ID,
85 .sg_tablesize = LIBATA_MAX_PRD,
86 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
87 .emulated = ATA_SHT_EMULATED,
88 .use_clustering = ATA_SHT_USE_CLUSTERING,
89 .proc_name = DRV_NAME,
90 .dma_boundary = ATA_DMA_BOUNDARY,
91 .slave_configure = ata_scsi_slave_config,
92 .slave_destroy = ata_scsi_slave_destroy,
93 .bios_param = ata_std_bios_param,
94};
95
96static const struct ata_port_operations uli_ops = {
97 .port_disable = ata_port_disable,
98
99 .tf_load = ata_tf_load,
100 .tf_read = ata_tf_read,
101 .check_status = ata_check_status,
102 .exec_command = ata_exec_command,
103 .dev_select = ata_std_dev_select,
104
105 .bmdma_setup = ata_bmdma_setup,
106 .bmdma_start = ata_bmdma_start,
107 .bmdma_stop = ata_bmdma_stop,
108 .bmdma_status = ata_bmdma_status,
109 .qc_prep = ata_qc_prep,
110 .qc_issue = ata_qc_issue_prot,
111 .data_xfer = ata_pio_data_xfer,
112
113 .freeze = ata_bmdma_freeze,
114 .thaw = ata_bmdma_thaw,
115 .error_handler = ata_bmdma_error_handler,
116 .post_internal_cmd = ata_bmdma_post_internal_cmd,
117
118 .irq_handler = ata_interrupt,
119 .irq_clear = ata_bmdma_irq_clear,
120
121 .scr_read = uli_scr_read,
122 .scr_write = uli_scr_write,
123
124 .port_start = ata_port_start,
125 .port_stop = ata_port_stop,
126 .host_stop = ata_host_stop,
127};
128
129static struct ata_port_info uli_port_info = {
130 .sht = &uli_sht,
131 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
132 .pio_mask = 0x1f, /* pio0-4 */
133 .udma_mask = 0x7f, /* udma0-6 */
134 .port_ops = &uli_ops,
135};
136
137
138MODULE_AUTHOR("Peer Chen");
139MODULE_DESCRIPTION("low-level driver for ULi Electronics SATA controller");
140MODULE_LICENSE("GPL");
141MODULE_DEVICE_TABLE(pci, uli_pci_tbl);
142MODULE_VERSION(DRV_VERSION);
143
144static unsigned int get_scr_cfg_addr(struct ata_port *ap, unsigned int sc_reg)
145{
146 struct uli_priv *hpriv = ap->host_set->private_data;
147 return hpriv->scr_cfg_addr[ap->port_no] + (4 * sc_reg);
148}
149
150static u32 uli_scr_cfg_read (struct ata_port *ap, unsigned int sc_reg)
151{
152 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
153 unsigned int cfg_addr = get_scr_cfg_addr(ap, sc_reg);
154 u32 val;
155
156 pci_read_config_dword(pdev, cfg_addr, &val);
157 return val;
158}
159
160static void uli_scr_cfg_write (struct ata_port *ap, unsigned int scr, u32 val)
161{
162 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
163 unsigned int cfg_addr = get_scr_cfg_addr(ap, scr);
164
165 pci_write_config_dword(pdev, cfg_addr, val);
166}
167
168static u32 uli_scr_read (struct ata_port *ap, unsigned int sc_reg)
169{
170 if (sc_reg > SCR_CONTROL)
171 return 0xffffffffU;
172
173 return uli_scr_cfg_read(ap, sc_reg);
174}
175
176static void uli_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
177{
178 if (sc_reg > SCR_CONTROL) //SCR_CONTROL=2, SCR_ERROR=1, SCR_STATUS=0
179 return;
180
181 uli_scr_cfg_write(ap, sc_reg, val);
182}
183
184static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
185{
186 static int printed_version;
187 struct ata_probe_ent *probe_ent;
188 struct ata_port_info *ppi;
189 int rc;
190 unsigned int board_idx = (unsigned int) ent->driver_data;
191 int pci_dev_busy = 0;
192 struct uli_priv *hpriv;
193
194 if (!printed_version++)
195 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
196
197 rc = pci_enable_device(pdev);
198 if (rc)
199 return rc;
200
201 rc = pci_request_regions(pdev, DRV_NAME);
202 if (rc) {
203 pci_dev_busy = 1;
204 goto err_out;
205 }
206
207 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
208 if (rc)
209 goto err_out_regions;
210 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
211 if (rc)
212 goto err_out_regions;
213
214 ppi = &uli_port_info;
215 probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
216 if (!probe_ent) {
217 rc = -ENOMEM;
218 goto err_out_regions;
219 }
220
221 hpriv = kzalloc(sizeof(*hpriv), GFP_KERNEL);
222 if (!hpriv) {
223 rc = -ENOMEM;
224 goto err_out_probe_ent;
225 }
226
227 probe_ent->private_data = hpriv;
228
229 switch (board_idx) {
230 case uli_5287:
231 hpriv->scr_cfg_addr[0] = ULI5287_BASE;
232 hpriv->scr_cfg_addr[1] = ULI5287_BASE + ULI5287_OFFS;
233 probe_ent->n_ports = 4;
234
235 probe_ent->port[2].cmd_addr = pci_resource_start(pdev, 0) + 8;
236 probe_ent->port[2].altstatus_addr =
237 probe_ent->port[2].ctl_addr =
238 (pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS) + 4;
239 probe_ent->port[2].bmdma_addr = pci_resource_start(pdev, 4) + 16;
240 hpriv->scr_cfg_addr[2] = ULI5287_BASE + ULI5287_OFFS*4;
241
242 probe_ent->port[3].cmd_addr = pci_resource_start(pdev, 2) + 8;
243 probe_ent->port[3].altstatus_addr =
244 probe_ent->port[3].ctl_addr =
245 (pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS) + 4;
246 probe_ent->port[3].bmdma_addr = pci_resource_start(pdev, 4) + 24;
247 hpriv->scr_cfg_addr[3] = ULI5287_BASE + ULI5287_OFFS*5;
248
249 ata_std_ports(&probe_ent->port[2]);
250 ata_std_ports(&probe_ent->port[3]);
251 break;
252
253 case uli_5289:
254 hpriv->scr_cfg_addr[0] = ULI5287_BASE;
255 hpriv->scr_cfg_addr[1] = ULI5287_BASE + ULI5287_OFFS;
256 break;
257
258 case uli_5281:
259 hpriv->scr_cfg_addr[0] = ULI5281_BASE;
260 hpriv->scr_cfg_addr[1] = ULI5281_BASE + ULI5281_OFFS;
261 break;
262
263 default:
264 BUG();
265 break;
266 }
267
268 pci_set_master(pdev);
269 pci_intx(pdev, 1);
270
271 /* FIXME: check ata_device_add return value */
272 ata_device_add(probe_ent);
273 kfree(probe_ent);
274
275 return 0;
276
277err_out_probe_ent:
278 kfree(probe_ent);
279err_out_regions:
280 pci_release_regions(pdev);
281err_out:
282 if (!pci_dev_busy)
283 pci_disable_device(pdev);
284 return rc;
285
286}
287
288static int __init uli_init(void)
289{
290 return pci_module_init(&uli_pci_driver);
291}
292
293static void __exit uli_exit(void)
294{
295 pci_unregister_driver(&uli_pci_driver);
296}
297
298
299module_init(uli_init);
300module_exit(uli_exit);
diff --git a/drivers/scsi/sata_via.c b/drivers/scsi/sata_via.c
deleted file mode 100644
index a3727af8b9c1..000000000000
--- a/drivers/scsi/sata_via.c
+++ /dev/null
@@ -1,502 +0,0 @@
1/*
2 * sata_via.c - VIA Serial ATA controllers
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available under NDA.
31 *
32 *
33 * To-do list:
34 * - VT6421 PATA support
35 *
36 */
37
38#include <linux/kernel.h>
39#include <linux/module.h>
40#include <linux/pci.h>
41#include <linux/init.h>
42#include <linux/blkdev.h>
43#include <linux/delay.h>
44#include <linux/device.h>
45#include <scsi/scsi_host.h>
46#include <linux/libata.h>
47#include <asm/io.h>
48
49#define DRV_NAME "sata_via"
50#define DRV_VERSION "2.0"
51
52enum board_ids_enum {
53 vt6420,
54 vt6421,
55};
56
57enum {
58 SATA_CHAN_ENAB = 0x40, /* SATA channel enable */
59 SATA_INT_GATE = 0x41, /* SATA interrupt gating */
60 SATA_NATIVE_MODE = 0x42, /* Native mode enable */
61 SATA_PATA_SHARING = 0x49, /* PATA/SATA sharing func ctrl */
62
63 PORT0 = (1 << 1),
64 PORT1 = (1 << 0),
65 ALL_PORTS = PORT0 | PORT1,
66 N_PORTS = 2,
67
68 NATIVE_MODE_ALL = (1 << 7) | (1 << 6) | (1 << 5) | (1 << 4),
69
70 SATA_EXT_PHY = (1 << 6), /* 0==use PATA, 1==ext phy */
71 SATA_2DEV = (1 << 5), /* SATA is master/slave */
72};
73
74static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
75static u32 svia_scr_read (struct ata_port *ap, unsigned int sc_reg);
76static void svia_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
77static void vt6420_error_handler(struct ata_port *ap);
78
79static const struct pci_device_id svia_pci_tbl[] = {
80 { 0x1106, 0x0591, PCI_ANY_ID, PCI_ANY_ID, 0, 0, vt6420 },
81 { 0x1106, 0x3149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, vt6420 },
82 { 0x1106, 0x3249, PCI_ANY_ID, PCI_ANY_ID, 0, 0, vt6421 },
83
84 { } /* terminate list */
85};
86
87static struct pci_driver svia_pci_driver = {
88 .name = DRV_NAME,
89 .id_table = svia_pci_tbl,
90 .probe = svia_init_one,
91 .remove = ata_pci_remove_one,
92};
93
94static struct scsi_host_template svia_sht = {
95 .module = THIS_MODULE,
96 .name = DRV_NAME,
97 .ioctl = ata_scsi_ioctl,
98 .queuecommand = ata_scsi_queuecmd,
99 .can_queue = ATA_DEF_QUEUE,
100 .this_id = ATA_SHT_THIS_ID,
101 .sg_tablesize = LIBATA_MAX_PRD,
102 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
103 .emulated = ATA_SHT_EMULATED,
104 .use_clustering = ATA_SHT_USE_CLUSTERING,
105 .proc_name = DRV_NAME,
106 .dma_boundary = ATA_DMA_BOUNDARY,
107 .slave_configure = ata_scsi_slave_config,
108 .slave_destroy = ata_scsi_slave_destroy,
109 .bios_param = ata_std_bios_param,
110};
111
112static const struct ata_port_operations vt6420_sata_ops = {
113 .port_disable = ata_port_disable,
114
115 .tf_load = ata_tf_load,
116 .tf_read = ata_tf_read,
117 .check_status = ata_check_status,
118 .exec_command = ata_exec_command,
119 .dev_select = ata_std_dev_select,
120
121 .bmdma_setup = ata_bmdma_setup,
122 .bmdma_start = ata_bmdma_start,
123 .bmdma_stop = ata_bmdma_stop,
124 .bmdma_status = ata_bmdma_status,
125
126 .qc_prep = ata_qc_prep,
127 .qc_issue = ata_qc_issue_prot,
128 .data_xfer = ata_pio_data_xfer,
129
130 .freeze = ata_bmdma_freeze,
131 .thaw = ata_bmdma_thaw,
132 .error_handler = vt6420_error_handler,
133 .post_internal_cmd = ata_bmdma_post_internal_cmd,
134
135 .irq_handler = ata_interrupt,
136 .irq_clear = ata_bmdma_irq_clear,
137
138 .port_start = ata_port_start,
139 .port_stop = ata_port_stop,
140 .host_stop = ata_host_stop,
141};
142
143static const struct ata_port_operations vt6421_sata_ops = {
144 .port_disable = ata_port_disable,
145
146 .tf_load = ata_tf_load,
147 .tf_read = ata_tf_read,
148 .check_status = ata_check_status,
149 .exec_command = ata_exec_command,
150 .dev_select = ata_std_dev_select,
151
152 .bmdma_setup = ata_bmdma_setup,
153 .bmdma_start = ata_bmdma_start,
154 .bmdma_stop = ata_bmdma_stop,
155 .bmdma_status = ata_bmdma_status,
156
157 .qc_prep = ata_qc_prep,
158 .qc_issue = ata_qc_issue_prot,
159 .data_xfer = ata_pio_data_xfer,
160
161 .freeze = ata_bmdma_freeze,
162 .thaw = ata_bmdma_thaw,
163 .error_handler = ata_bmdma_error_handler,
164 .post_internal_cmd = ata_bmdma_post_internal_cmd,
165
166 .irq_handler = ata_interrupt,
167 .irq_clear = ata_bmdma_irq_clear,
168
169 .scr_read = svia_scr_read,
170 .scr_write = svia_scr_write,
171
172 .port_start = ata_port_start,
173 .port_stop = ata_port_stop,
174 .host_stop = ata_host_stop,
175};
176
177static struct ata_port_info vt6420_port_info = {
178 .sht = &svia_sht,
179 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
180 .pio_mask = 0x1f,
181 .mwdma_mask = 0x07,
182 .udma_mask = 0x7f,
183 .port_ops = &vt6420_sata_ops,
184};
185
186MODULE_AUTHOR("Jeff Garzik");
187MODULE_DESCRIPTION("SCSI low-level driver for VIA SATA controllers");
188MODULE_LICENSE("GPL");
189MODULE_DEVICE_TABLE(pci, svia_pci_tbl);
190MODULE_VERSION(DRV_VERSION);
191
192static u32 svia_scr_read (struct ata_port *ap, unsigned int sc_reg)
193{
194 if (sc_reg > SCR_CONTROL)
195 return 0xffffffffU;
196 return inl(ap->ioaddr.scr_addr + (4 * sc_reg));
197}
198
199static void svia_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
200{
201 if (sc_reg > SCR_CONTROL)
202 return;
203 outl(val, ap->ioaddr.scr_addr + (4 * sc_reg));
204}
205
206/**
207 * vt6420_prereset - prereset for vt6420
208 * @ap: target ATA port
209 *
210 * SCR registers on vt6420 are pieces of shit and may hang the
211 * whole machine completely if accessed with the wrong timing.
212 * To avoid such catastrophe, vt6420 doesn't provide generic SCR
213 * access operations, but uses SStatus and SControl only during
214 * boot probing in controlled way.
215 *
216 * As the old (pre EH update) probing code is proven to work, we
217 * strictly follow the access pattern.
218 *
219 * LOCKING:
220 * Kernel thread context (may sleep)
221 *
222 * RETURNS:
223 * 0 on success, -errno otherwise.
224 */
225static int vt6420_prereset(struct ata_port *ap)
226{
227 struct ata_eh_context *ehc = &ap->eh_context;
228 unsigned long timeout = jiffies + (HZ * 5);
229 u32 sstatus, scontrol;
230 int online;
231
232 /* don't do any SCR stuff if we're not loading */
233 if (!ATA_PFLAG_LOADING)
234 goto skip_scr;
235
236 /* Resume phy. This is the old resume sequence from
237 * __sata_phy_reset().
238 */
239 svia_scr_write(ap, SCR_CONTROL, 0x300);
240 svia_scr_read(ap, SCR_CONTROL); /* flush */
241
242 /* wait for phy to become ready, if necessary */
243 do {
244 msleep(200);
245 if ((svia_scr_read(ap, SCR_STATUS) & 0xf) != 1)
246 break;
247 } while (time_before(jiffies, timeout));
248
249 /* open code sata_print_link_status() */
250 sstatus = svia_scr_read(ap, SCR_STATUS);
251 scontrol = svia_scr_read(ap, SCR_CONTROL);
252
253 online = (sstatus & 0xf) == 0x3;
254
255 ata_port_printk(ap, KERN_INFO,
256 "SATA link %s 1.5 Gbps (SStatus %X SControl %X)\n",
257 online ? "up" : "down", sstatus, scontrol);
258
259 /* SStatus is read one more time */
260 svia_scr_read(ap, SCR_STATUS);
261
262 if (!online) {
263 /* tell EH to bail */
264 ehc->i.action &= ~ATA_EH_RESET_MASK;
265 return 0;
266 }
267
268 skip_scr:
269 /* wait for !BSY */
270 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
271
272 return 0;
273}
274
275static void vt6420_error_handler(struct ata_port *ap)
276{
277 return ata_bmdma_drive_eh(ap, vt6420_prereset, ata_std_softreset,
278 NULL, ata_std_postreset);
279}
280
281static const unsigned int svia_bar_sizes[] = {
282 8, 4, 8, 4, 16, 256
283};
284
285static const unsigned int vt6421_bar_sizes[] = {
286 16, 16, 16, 16, 32, 128
287};
288
289static unsigned long svia_scr_addr(unsigned long addr, unsigned int port)
290{
291 return addr + (port * 128);
292}
293
294static unsigned long vt6421_scr_addr(unsigned long addr, unsigned int port)
295{
296 return addr + (port * 64);
297}
298
299static void vt6421_init_addrs(struct ata_probe_ent *probe_ent,
300 struct pci_dev *pdev,
301 unsigned int port)
302{
303 unsigned long reg_addr = pci_resource_start(pdev, port);
304 unsigned long bmdma_addr = pci_resource_start(pdev, 4) + (port * 8);
305 unsigned long scr_addr;
306
307 probe_ent->port[port].cmd_addr = reg_addr;
308 probe_ent->port[port].altstatus_addr =
309 probe_ent->port[port].ctl_addr = (reg_addr + 8) | ATA_PCI_CTL_OFS;
310 probe_ent->port[port].bmdma_addr = bmdma_addr;
311
312 scr_addr = vt6421_scr_addr(pci_resource_start(pdev, 5), port);
313 probe_ent->port[port].scr_addr = scr_addr;
314
315 ata_std_ports(&probe_ent->port[port]);
316}
317
318static struct ata_probe_ent *vt6420_init_probe_ent(struct pci_dev *pdev)
319{
320 struct ata_probe_ent *probe_ent;
321 struct ata_port_info *ppi = &vt6420_port_info;
322
323 probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
324 if (!probe_ent)
325 return NULL;
326
327 probe_ent->port[0].scr_addr =
328 svia_scr_addr(pci_resource_start(pdev, 5), 0);
329 probe_ent->port[1].scr_addr =
330 svia_scr_addr(pci_resource_start(pdev, 5), 1);
331
332 return probe_ent;
333}
334
335static struct ata_probe_ent *vt6421_init_probe_ent(struct pci_dev *pdev)
336{
337 struct ata_probe_ent *probe_ent;
338 unsigned int i;
339
340 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
341 if (!probe_ent)
342 return NULL;
343
344 memset(probe_ent, 0, sizeof(*probe_ent));
345 probe_ent->dev = pci_dev_to_dev(pdev);
346 INIT_LIST_HEAD(&probe_ent->node);
347
348 probe_ent->sht = &svia_sht;
349 probe_ent->host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY;
350 probe_ent->port_ops = &vt6421_sata_ops;
351 probe_ent->n_ports = N_PORTS;
352 probe_ent->irq = pdev->irq;
353 probe_ent->irq_flags = IRQF_SHARED;
354 probe_ent->pio_mask = 0x1f;
355 probe_ent->mwdma_mask = 0x07;
356 probe_ent->udma_mask = 0x7f;
357
358 for (i = 0; i < N_PORTS; i++)
359 vt6421_init_addrs(probe_ent, pdev, i);
360
361 return probe_ent;
362}
363
364static void svia_configure(struct pci_dev *pdev)
365{
366 u8 tmp8;
367
368 pci_read_config_byte(pdev, PCI_INTERRUPT_LINE, &tmp8);
369 dev_printk(KERN_INFO, &pdev->dev, "routed to hard irq line %d\n",
370 (int) (tmp8 & 0xf0) == 0xf0 ? 0 : tmp8 & 0x0f);
371
372 /* make sure SATA channels are enabled */
373 pci_read_config_byte(pdev, SATA_CHAN_ENAB, &tmp8);
374 if ((tmp8 & ALL_PORTS) != ALL_PORTS) {
375 dev_printk(KERN_DEBUG, &pdev->dev,
376 "enabling SATA channels (0x%x)\n",
377 (int) tmp8);
378 tmp8 |= ALL_PORTS;
379 pci_write_config_byte(pdev, SATA_CHAN_ENAB, tmp8);
380 }
381
382 /* make sure interrupts for each channel sent to us */
383 pci_read_config_byte(pdev, SATA_INT_GATE, &tmp8);
384 if ((tmp8 & ALL_PORTS) != ALL_PORTS) {
385 dev_printk(KERN_DEBUG, &pdev->dev,
386 "enabling SATA channel interrupts (0x%x)\n",
387 (int) tmp8);
388 tmp8 |= ALL_PORTS;
389 pci_write_config_byte(pdev, SATA_INT_GATE, tmp8);
390 }
391
392 /* make sure native mode is enabled */
393 pci_read_config_byte(pdev, SATA_NATIVE_MODE, &tmp8);
394 if ((tmp8 & NATIVE_MODE_ALL) != NATIVE_MODE_ALL) {
395 dev_printk(KERN_DEBUG, &pdev->dev,
396 "enabling SATA channel native mode (0x%x)\n",
397 (int) tmp8);
398 tmp8 |= NATIVE_MODE_ALL;
399 pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8);
400 }
401}
402
403static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
404{
405 static int printed_version;
406 unsigned int i;
407 int rc;
408 struct ata_probe_ent *probe_ent;
409 int board_id = (int) ent->driver_data;
410 const int *bar_sizes;
411 int pci_dev_busy = 0;
412 u8 tmp8;
413
414 if (!printed_version++)
415 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
416
417 rc = pci_enable_device(pdev);
418 if (rc)
419 return rc;
420
421 rc = pci_request_regions(pdev, DRV_NAME);
422 if (rc) {
423 pci_dev_busy = 1;
424 goto err_out;
425 }
426
427 if (board_id == vt6420) {
428 pci_read_config_byte(pdev, SATA_PATA_SHARING, &tmp8);
429 if (tmp8 & SATA_2DEV) {
430 dev_printk(KERN_ERR, &pdev->dev,
431 "SATA master/slave not supported (0x%x)\n",
432 (int) tmp8);
433 rc = -EIO;
434 goto err_out_regions;
435 }
436
437 bar_sizes = &svia_bar_sizes[0];
438 } else {
439 bar_sizes = &vt6421_bar_sizes[0];
440 }
441
442 for (i = 0; i < ARRAY_SIZE(svia_bar_sizes); i++)
443 if ((pci_resource_start(pdev, i) == 0) ||
444 (pci_resource_len(pdev, i) < bar_sizes[i])) {
445 dev_printk(KERN_ERR, &pdev->dev,
446 "invalid PCI BAR %u (sz 0x%llx, val 0x%llx)\n",
447 i,
448 (unsigned long long)pci_resource_start(pdev, i),
449 (unsigned long long)pci_resource_len(pdev, i));
450 rc = -ENODEV;
451 goto err_out_regions;
452 }
453
454 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
455 if (rc)
456 goto err_out_regions;
457 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
458 if (rc)
459 goto err_out_regions;
460
461 if (board_id == vt6420)
462 probe_ent = vt6420_init_probe_ent(pdev);
463 else
464 probe_ent = vt6421_init_probe_ent(pdev);
465
466 if (!probe_ent) {
467 dev_printk(KERN_ERR, &pdev->dev, "out of memory\n");
468 rc = -ENOMEM;
469 goto err_out_regions;
470 }
471
472 svia_configure(pdev);
473
474 pci_set_master(pdev);
475
476 /* FIXME: check ata_device_add return value */
477 ata_device_add(probe_ent);
478 kfree(probe_ent);
479
480 return 0;
481
482err_out_regions:
483 pci_release_regions(pdev);
484err_out:
485 if (!pci_dev_busy)
486 pci_disable_device(pdev);
487 return rc;
488}
489
490static int __init svia_init(void)
491{
492 return pci_module_init(&svia_pci_driver);
493}
494
495static void __exit svia_exit(void)
496{
497 pci_unregister_driver(&svia_pci_driver);
498}
499
500module_init(svia_init);
501module_exit(svia_exit);
502
diff --git a/drivers/scsi/sata_vsc.c b/drivers/scsi/sata_vsc.c
deleted file mode 100644
index ad37871594f5..000000000000
--- a/drivers/scsi/sata_vsc.c
+++ /dev/null
@@ -1,482 +0,0 @@
1/*
2 * sata_vsc.c - Vitesse VSC7174 4 port DPA SATA
3 *
4 * Maintained by: Jeremy Higdon @ SGI
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2004 SGI
9 *
10 * Bits from Jeff Garzik, Copyright RedHat, Inc.
11 *
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2, or (at your option)
16 * any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; see the file COPYING. If not, write to
25 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 *
27 *
28 * libata documentation is available via 'make {ps|pdf}docs',
29 * as Documentation/DocBook/libata.*
30 *
31 * Vitesse hardware documentation presumably available under NDA.
32 * Intel 31244 (same hardware interface) documentation presumably
33 * available from http://developer.intel.com/
34 *
35 */
36
37#include <linux/kernel.h>
38#include <linux/module.h>
39#include <linux/pci.h>
40#include <linux/init.h>
41#include <linux/blkdev.h>
42#include <linux/delay.h>
43#include <linux/interrupt.h>
44#include <linux/dma-mapping.h>
45#include <linux/device.h>
46#include <scsi/scsi_host.h>
47#include <linux/libata.h>
48
49#define DRV_NAME "sata_vsc"
50#define DRV_VERSION "2.0"
51
52enum {
53 /* Interrupt register offsets (from chip base address) */
54 VSC_SATA_INT_STAT_OFFSET = 0x00,
55 VSC_SATA_INT_MASK_OFFSET = 0x04,
56
57 /* Taskfile registers offsets */
58 VSC_SATA_TF_CMD_OFFSET = 0x00,
59 VSC_SATA_TF_DATA_OFFSET = 0x00,
60 VSC_SATA_TF_ERROR_OFFSET = 0x04,
61 VSC_SATA_TF_FEATURE_OFFSET = 0x06,
62 VSC_SATA_TF_NSECT_OFFSET = 0x08,
63 VSC_SATA_TF_LBAL_OFFSET = 0x0c,
64 VSC_SATA_TF_LBAM_OFFSET = 0x10,
65 VSC_SATA_TF_LBAH_OFFSET = 0x14,
66 VSC_SATA_TF_DEVICE_OFFSET = 0x18,
67 VSC_SATA_TF_STATUS_OFFSET = 0x1c,
68 VSC_SATA_TF_COMMAND_OFFSET = 0x1d,
69 VSC_SATA_TF_ALTSTATUS_OFFSET = 0x28,
70 VSC_SATA_TF_CTL_OFFSET = 0x29,
71
72 /* DMA base */
73 VSC_SATA_UP_DESCRIPTOR_OFFSET = 0x64,
74 VSC_SATA_UP_DATA_BUFFER_OFFSET = 0x6C,
75 VSC_SATA_DMA_CMD_OFFSET = 0x70,
76
77 /* SCRs base */
78 VSC_SATA_SCR_STATUS_OFFSET = 0x100,
79 VSC_SATA_SCR_ERROR_OFFSET = 0x104,
80 VSC_SATA_SCR_CONTROL_OFFSET = 0x108,
81
82 /* Port stride */
83 VSC_SATA_PORT_OFFSET = 0x200,
84
85 /* Error interrupt status bit offsets */
86 VSC_SATA_INT_ERROR_CRC = 0x40,
87 VSC_SATA_INT_ERROR_T = 0x20,
88 VSC_SATA_INT_ERROR_P = 0x10,
89 VSC_SATA_INT_ERROR_R = 0x8,
90 VSC_SATA_INT_ERROR_E = 0x4,
91 VSC_SATA_INT_ERROR_M = 0x2,
92 VSC_SATA_INT_PHY_CHANGE = 0x1,
93 VSC_SATA_INT_ERROR = (VSC_SATA_INT_ERROR_CRC | VSC_SATA_INT_ERROR_T | \
94 VSC_SATA_INT_ERROR_P | VSC_SATA_INT_ERROR_R | \
95 VSC_SATA_INT_ERROR_E | VSC_SATA_INT_ERROR_M | \
96 VSC_SATA_INT_PHY_CHANGE),
97};
98
99
100#define is_vsc_sata_int_err(port_idx, int_status) \
101 (int_status & (VSC_SATA_INT_ERROR << (8 * port_idx)))
102
103
104static u32 vsc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg)
105{
106 if (sc_reg > SCR_CONTROL)
107 return 0xffffffffU;
108 return readl((void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
109}
110
111
112static void vsc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg,
113 u32 val)
114{
115 if (sc_reg > SCR_CONTROL)
116 return;
117 writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
118}
119
120
121static void vsc_intr_mask_update(struct ata_port *ap, u8 ctl)
122{
123 void __iomem *mask_addr;
124 u8 mask;
125
126 mask_addr = ap->host_set->mmio_base +
127 VSC_SATA_INT_MASK_OFFSET + ap->port_no;
128 mask = readb(mask_addr);
129 if (ctl & ATA_NIEN)
130 mask |= 0x80;
131 else
132 mask &= 0x7F;
133 writeb(mask, mask_addr);
134}
135
136
137static void vsc_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
138{
139 struct ata_ioports *ioaddr = &ap->ioaddr;
140 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
141
142 /*
143 * The only thing the ctl register is used for is SRST.
144 * That is not enabled or disabled via tf_load.
145 * However, if ATA_NIEN is changed, then we need to change the interrupt register.
146 */
147 if ((tf->ctl & ATA_NIEN) != (ap->last_ctl & ATA_NIEN)) {
148 ap->last_ctl = tf->ctl;
149 vsc_intr_mask_update(ap, tf->ctl & ATA_NIEN);
150 }
151 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
152 writew(tf->feature | (((u16)tf->hob_feature) << 8), ioaddr->feature_addr);
153 writew(tf->nsect | (((u16)tf->hob_nsect) << 8), ioaddr->nsect_addr);
154 writew(tf->lbal | (((u16)tf->hob_lbal) << 8), ioaddr->lbal_addr);
155 writew(tf->lbam | (((u16)tf->hob_lbam) << 8), ioaddr->lbam_addr);
156 writew(tf->lbah | (((u16)tf->hob_lbah) << 8), ioaddr->lbah_addr);
157 } else if (is_addr) {
158 writew(tf->feature, ioaddr->feature_addr);
159 writew(tf->nsect, ioaddr->nsect_addr);
160 writew(tf->lbal, ioaddr->lbal_addr);
161 writew(tf->lbam, ioaddr->lbam_addr);
162 writew(tf->lbah, ioaddr->lbah_addr);
163 }
164
165 if (tf->flags & ATA_TFLAG_DEVICE)
166 writeb(tf->device, ioaddr->device_addr);
167
168 ata_wait_idle(ap);
169}
170
171
172static void vsc_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
173{
174 struct ata_ioports *ioaddr = &ap->ioaddr;
175 u16 nsect, lbal, lbam, lbah, feature;
176
177 tf->command = ata_check_status(ap);
178 tf->device = readw(ioaddr->device_addr);
179 feature = readw(ioaddr->error_addr);
180 nsect = readw(ioaddr->nsect_addr);
181 lbal = readw(ioaddr->lbal_addr);
182 lbam = readw(ioaddr->lbam_addr);
183 lbah = readw(ioaddr->lbah_addr);
184
185 tf->feature = feature;
186 tf->nsect = nsect;
187 tf->lbal = lbal;
188 tf->lbam = lbam;
189 tf->lbah = lbah;
190
191 if (tf->flags & ATA_TFLAG_LBA48) {
192 tf->hob_feature = feature >> 8;
193 tf->hob_nsect = nsect >> 8;
194 tf->hob_lbal = lbal >> 8;
195 tf->hob_lbam = lbam >> 8;
196 tf->hob_lbah = lbah >> 8;
197 }
198}
199
200
201/*
202 * vsc_sata_interrupt
203 *
204 * Read the interrupt register and process for the devices that have them pending.
205 */
206static irqreturn_t vsc_sata_interrupt (int irq, void *dev_instance,
207 struct pt_regs *regs)
208{
209 struct ata_host_set *host_set = dev_instance;
210 unsigned int i;
211 unsigned int handled = 0;
212 u32 int_status;
213
214 spin_lock(&host_set->lock);
215
216 int_status = readl(host_set->mmio_base + VSC_SATA_INT_STAT_OFFSET);
217
218 for (i = 0; i < host_set->n_ports; i++) {
219 if (int_status & ((u32) 0xFF << (8 * i))) {
220 struct ata_port *ap;
221
222 ap = host_set->ports[i];
223
224 if (is_vsc_sata_int_err(i, int_status)) {
225 u32 err_status;
226 printk(KERN_DEBUG "%s: ignoring interrupt(s)\n", __FUNCTION__);
227 err_status = ap ? vsc_sata_scr_read(ap, SCR_ERROR) : 0;
228 vsc_sata_scr_write(ap, SCR_ERROR, err_status);
229 handled++;
230 }
231
232 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
233 struct ata_queued_cmd *qc;
234
235 qc = ata_qc_from_tag(ap, ap->active_tag);
236 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
237 handled += ata_host_intr(ap, qc);
238 else if (is_vsc_sata_int_err(i, int_status)) {
239 /*
240 * On some chips (i.e. Intel 31244), an error
241 * interrupt will sneak in at initialization
242 * time (phy state changes). Clearing the SCR
243 * error register is not required, but it prevents
244 * the phy state change interrupts from recurring
245 * later.
246 */
247 u32 err_status;
248 err_status = vsc_sata_scr_read(ap, SCR_ERROR);
249 printk(KERN_DEBUG "%s: clearing interrupt, "
250 "status %x; sata err status %x\n",
251 __FUNCTION__,
252 int_status, err_status);
253 vsc_sata_scr_write(ap, SCR_ERROR, err_status);
254 /* Clear interrupt status */
255 ata_chk_status(ap);
256 handled++;
257 }
258 }
259 }
260 }
261
262 spin_unlock(&host_set->lock);
263
264 return IRQ_RETVAL(handled);
265}
266
267
268static struct scsi_host_template vsc_sata_sht = {
269 .module = THIS_MODULE,
270 .name = DRV_NAME,
271 .ioctl = ata_scsi_ioctl,
272 .queuecommand = ata_scsi_queuecmd,
273 .can_queue = ATA_DEF_QUEUE,
274 .this_id = ATA_SHT_THIS_ID,
275 .sg_tablesize = LIBATA_MAX_PRD,
276 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
277 .emulated = ATA_SHT_EMULATED,
278 .use_clustering = ATA_SHT_USE_CLUSTERING,
279 .proc_name = DRV_NAME,
280 .dma_boundary = ATA_DMA_BOUNDARY,
281 .slave_configure = ata_scsi_slave_config,
282 .slave_destroy = ata_scsi_slave_destroy,
283 .bios_param = ata_std_bios_param,
284};
285
286
287static const struct ata_port_operations vsc_sata_ops = {
288 .port_disable = ata_port_disable,
289 .tf_load = vsc_sata_tf_load,
290 .tf_read = vsc_sata_tf_read,
291 .exec_command = ata_exec_command,
292 .check_status = ata_check_status,
293 .dev_select = ata_std_dev_select,
294 .bmdma_setup = ata_bmdma_setup,
295 .bmdma_start = ata_bmdma_start,
296 .bmdma_stop = ata_bmdma_stop,
297 .bmdma_status = ata_bmdma_status,
298 .qc_prep = ata_qc_prep,
299 .qc_issue = ata_qc_issue_prot,
300 .data_xfer = ata_mmio_data_xfer,
301 .freeze = ata_bmdma_freeze,
302 .thaw = ata_bmdma_thaw,
303 .error_handler = ata_bmdma_error_handler,
304 .post_internal_cmd = ata_bmdma_post_internal_cmd,
305 .irq_handler = vsc_sata_interrupt,
306 .irq_clear = ata_bmdma_irq_clear,
307 .scr_read = vsc_sata_scr_read,
308 .scr_write = vsc_sata_scr_write,
309 .port_start = ata_port_start,
310 .port_stop = ata_port_stop,
311 .host_stop = ata_pci_host_stop,
312};
313
314static void __devinit vsc_sata_setup_port(struct ata_ioports *port, unsigned long base)
315{
316 port->cmd_addr = base + VSC_SATA_TF_CMD_OFFSET;
317 port->data_addr = base + VSC_SATA_TF_DATA_OFFSET;
318 port->error_addr = base + VSC_SATA_TF_ERROR_OFFSET;
319 port->feature_addr = base + VSC_SATA_TF_FEATURE_OFFSET;
320 port->nsect_addr = base + VSC_SATA_TF_NSECT_OFFSET;
321 port->lbal_addr = base + VSC_SATA_TF_LBAL_OFFSET;
322 port->lbam_addr = base + VSC_SATA_TF_LBAM_OFFSET;
323 port->lbah_addr = base + VSC_SATA_TF_LBAH_OFFSET;
324 port->device_addr = base + VSC_SATA_TF_DEVICE_OFFSET;
325 port->status_addr = base + VSC_SATA_TF_STATUS_OFFSET;
326 port->command_addr = base + VSC_SATA_TF_COMMAND_OFFSET;
327 port->altstatus_addr = base + VSC_SATA_TF_ALTSTATUS_OFFSET;
328 port->ctl_addr = base + VSC_SATA_TF_CTL_OFFSET;
329 port->bmdma_addr = base + VSC_SATA_DMA_CMD_OFFSET;
330 port->scr_addr = base + VSC_SATA_SCR_STATUS_OFFSET;
331 writel(0, base + VSC_SATA_UP_DESCRIPTOR_OFFSET);
332 writel(0, base + VSC_SATA_UP_DATA_BUFFER_OFFSET);
333}
334
335
336static int __devinit vsc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
337{
338 static int printed_version;
339 struct ata_probe_ent *probe_ent = NULL;
340 unsigned long base;
341 int pci_dev_busy = 0;
342 void __iomem *mmio_base;
343 int rc;
344
345 if (!printed_version++)
346 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
347
348 rc = pci_enable_device(pdev);
349 if (rc)
350 return rc;
351
352 /*
353 * Check if we have needed resource mapped.
354 */
355 if (pci_resource_len(pdev, 0) == 0) {
356 rc = -ENODEV;
357 goto err_out;
358 }
359
360 rc = pci_request_regions(pdev, DRV_NAME);
361 if (rc) {
362 pci_dev_busy = 1;
363 goto err_out;
364 }
365
366 /*
367 * Use 32 bit DMA mask, because 64 bit address support is poor.
368 */
369 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
370 if (rc)
371 goto err_out_regions;
372 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
373 if (rc)
374 goto err_out_regions;
375
376 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
377 if (probe_ent == NULL) {
378 rc = -ENOMEM;
379 goto err_out_regions;
380 }
381 memset(probe_ent, 0, sizeof(*probe_ent));
382 probe_ent->dev = pci_dev_to_dev(pdev);
383 INIT_LIST_HEAD(&probe_ent->node);
384
385 mmio_base = pci_iomap(pdev, 0, 0);
386 if (mmio_base == NULL) {
387 rc = -ENOMEM;
388 goto err_out_free_ent;
389 }
390 base = (unsigned long) mmio_base;
391
392 /*
393 * Due to a bug in the chip, the default cache line size can't be used
394 */
395 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x80);
396
397 probe_ent->sht = &vsc_sata_sht;
398 probe_ent->host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
399 ATA_FLAG_MMIO;
400 probe_ent->port_ops = &vsc_sata_ops;
401 probe_ent->n_ports = 4;
402 probe_ent->irq = pdev->irq;
403 probe_ent->irq_flags = IRQF_SHARED;
404 probe_ent->mmio_base = mmio_base;
405
406 /* We don't care much about the PIO/UDMA masks, but the core won't like us
407 * if we don't fill these
408 */
409 probe_ent->pio_mask = 0x1f;
410 probe_ent->mwdma_mask = 0x07;
411 probe_ent->udma_mask = 0x7f;
412
413 /* We have 4 ports per PCI function */
414 vsc_sata_setup_port(&probe_ent->port[0], base + 1 * VSC_SATA_PORT_OFFSET);
415 vsc_sata_setup_port(&probe_ent->port[1], base + 2 * VSC_SATA_PORT_OFFSET);
416 vsc_sata_setup_port(&probe_ent->port[2], base + 3 * VSC_SATA_PORT_OFFSET);
417 vsc_sata_setup_port(&probe_ent->port[3], base + 4 * VSC_SATA_PORT_OFFSET);
418
419 pci_set_master(pdev);
420
421 /*
422 * Config offset 0x98 is "Extended Control and Status Register 0"
423 * Default value is (1 << 28). All bits except bit 28 are reserved in
424 * DPA mode. If bit 28 is set, LED 0 reflects all ports' activity.
425 * If bit 28 is clear, each port has its own LED.
426 */
427 pci_write_config_dword(pdev, 0x98, 0);
428
429 /* FIXME: check ata_device_add return value */
430 ata_device_add(probe_ent);
431 kfree(probe_ent);
432
433 return 0;
434
435err_out_free_ent:
436 kfree(probe_ent);
437err_out_regions:
438 pci_release_regions(pdev);
439err_out:
440 if (!pci_dev_busy)
441 pci_disable_device(pdev);
442 return rc;
443}
444
445
446static const struct pci_device_id vsc_sata_pci_tbl[] = {
447 { PCI_VENDOR_ID_VITESSE, 0x7174,
448 PCI_ANY_ID, PCI_ANY_ID, 0x10600, 0xFFFFFF, 0 },
449 { PCI_VENDOR_ID_INTEL, 0x3200,
450 PCI_ANY_ID, PCI_ANY_ID, 0x10600, 0xFFFFFF, 0 },
451 { } /* terminate list */
452};
453
454
455static struct pci_driver vsc_sata_pci_driver = {
456 .name = DRV_NAME,
457 .id_table = vsc_sata_pci_tbl,
458 .probe = vsc_sata_init_one,
459 .remove = ata_pci_remove_one,
460};
461
462
463static int __init vsc_sata_init(void)
464{
465 return pci_module_init(&vsc_sata_pci_driver);
466}
467
468
469static void __exit vsc_sata_exit(void)
470{
471 pci_unregister_driver(&vsc_sata_pci_driver);
472}
473
474
475MODULE_AUTHOR("Jeremy Higdon");
476MODULE_DESCRIPTION("low-level driver for Vitesse VSC7174 SATA controller");
477MODULE_LICENSE("GPL");
478MODULE_DEVICE_TABLE(pci, vsc_sata_pci_tbl);
479MODULE_VERSION(DRV_VERSION);
480
481module_init(vsc_sata_init);
482module_exit(vsc_sata_exit);