aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ata
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ata')
-rw-r--r--drivers/ata/Kconfig140
-rw-r--r--drivers/ata/Makefile19
-rw-r--r--drivers/ata/ahci.c1684
-rw-r--r--drivers/ata/ata_piix.c960
-rw-r--r--drivers/ata/libata-bmdma.c1109
-rw-r--r--drivers/ata/libata-core.c6097
-rw-r--r--drivers/ata/libata-eh.c2246
-rw-r--r--drivers/ata/libata-scsi.c3322
-rw-r--r--drivers/ata/libata.h122
-rw-r--r--drivers/ata/pdc_adma.c740
-rw-r--r--drivers/ata/sata_mv.c2468
-rw-r--r--drivers/ata/sata_nv.c595
-rw-r--r--drivers/ata/sata_promise.c844
-rw-r--r--drivers/ata/sata_promise.h157
-rw-r--r--drivers/ata/sata_qstor.c730
-rw-r--r--drivers/ata/sata_sil.c723
-rw-r--r--drivers/ata/sata_sil24.c1222
-rw-r--r--drivers/ata/sata_sis.c347
-rw-r--r--drivers/ata/sata_svw.c508
-rw-r--r--drivers/ata/sata_sx4.c1502
-rw-r--r--drivers/ata/sata_uli.c300
-rw-r--r--drivers/ata/sata_via.c394
-rw-r--r--drivers/ata/sata_vsc.c482
23 files changed, 26711 insertions, 0 deletions
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
new file mode 100644
index 000000000000..95d6b417af65
--- /dev/null
+++ b/drivers/ata/Kconfig
@@ -0,0 +1,140 @@
1
2config ATA
3 tristate "ATA device support"
4 depends on SCSI
5 ---help---
6 If you want to use a ATA hard disk, ATA tape drive, ATA CD-ROM or
7 any other ATA device under Linux, say Y and make sure that you know
8 the name of your ATA host adapter (the card inside your computer
9 that "speaks" the ATA protocol, also called ATA controller),
10 because you will be asked for it.
11
12config SCSI_SATA_AHCI
13 tristate "AHCI SATA support"
14 depends on ATA && PCI
15 help
16 This option enables support for AHCI Serial ATA.
17
18 If unsure, say N.
19
20config SCSI_SATA_SVW
21 tristate "ServerWorks Frodo / Apple K2 SATA support"
22 depends on ATA && PCI
23 help
24 This option enables support for Broadcom/Serverworks/Apple K2
25 SATA support.
26
27 If unsure, say N.
28
29config SCSI_ATA_PIIX
30 tristate "Intel PIIX/ICH SATA support"
31 depends on ATA && PCI
32 help
33 This option enables support for ICH5/6/7/8 Serial ATA.
34 If PATA support was enabled previously, this enables
35 support for select Intel PIIX/ICH PATA host controllers.
36
37 If unsure, say N.
38
39config SCSI_SATA_MV
40 tristate "Marvell SATA support (HIGHLY EXPERIMENTAL)"
41 depends on ATA && PCI && EXPERIMENTAL
42 help
43 This option enables support for the Marvell Serial ATA family.
44 Currently supports 88SX[56]0[48][01] chips.
45
46 If unsure, say N.
47
48config SCSI_SATA_NV
49 tristate "NVIDIA SATA support"
50 depends on ATA && PCI
51 help
52 This option enables support for NVIDIA Serial ATA.
53
54 If unsure, say N.
55
56config SCSI_PDC_ADMA
57 tristate "Pacific Digital ADMA support"
58 depends on ATA && PCI
59 help
60 This option enables support for Pacific Digital ADMA controllers
61
62 If unsure, say N.
63
64config SCSI_SATA_QSTOR
65 tristate "Pacific Digital SATA QStor support"
66 depends on ATA && PCI
67 help
68 This option enables support for Pacific Digital Serial ATA QStor.
69
70 If unsure, say N.
71
72config SCSI_SATA_PROMISE
73 tristate "Promise SATA TX2/TX4 support"
74 depends on ATA && PCI
75 help
76 This option enables support for Promise Serial ATA TX2/TX4.
77
78 If unsure, say N.
79
80config SCSI_SATA_SX4
81 tristate "Promise SATA SX4 support"
82 depends on ATA && PCI && EXPERIMENTAL
83 help
84 This option enables support for Promise Serial ATA SX4.
85
86 If unsure, say N.
87
88config SCSI_SATA_SIL
89 tristate "Silicon Image SATA support"
90 depends on ATA && PCI
91 help
92 This option enables support for Silicon Image Serial ATA.
93
94 If unsure, say N.
95
96config SCSI_SATA_SIL24
97 tristate "Silicon Image 3124/3132 SATA support"
98 depends on ATA && PCI
99 help
100 This option enables support for Silicon Image 3124/3132 Serial ATA.
101
102 If unsure, say N.
103
104config SCSI_SATA_SIS
105 tristate "SiS 964/180 SATA support"
106 depends on ATA && PCI
107 help
108 This option enables support for SiS Serial ATA 964/180.
109
110 If unsure, say N.
111
112config SCSI_SATA_ULI
113 tristate "ULi Electronics SATA support"
114 depends on ATA && PCI
115 help
116 This option enables support for ULi Electronics SATA.
117
118 If unsure, say N.
119
120config SCSI_SATA_VIA
121 tristate "VIA SATA support"
122 depends on ATA && PCI
123 help
124 This option enables support for VIA Serial ATA.
125
126 If unsure, say N.
127
128config SCSI_SATA_VITESSE
129 tristate "VITESSE VSC-7174 / INTEL 31244 SATA support"
130 depends on ATA && PCI
131 help
132 This option enables support for Vitesse VSC7174 and Intel 31244 Serial ATA.
133
134 If unsure, say N.
135
136config SCSI_SATA_INTEL_COMBINED
137 bool
138 depends on IDE=y && !BLK_DEV_IDE_SATA && (SCSI_SATA_AHCI || SCSI_ATA_PIIX)
139 default y
140
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
new file mode 100644
index 000000000000..60bdb7b2b5ca
--- /dev/null
+++ b/drivers/ata/Makefile
@@ -0,0 +1,19 @@
1
2obj-$(CONFIG_SCSI_SATA_AHCI) += libata.o ahci.o
3obj-$(CONFIG_SCSI_SATA_SVW) += libata.o sata_svw.o
4obj-$(CONFIG_SCSI_ATA_PIIX) += libata.o ata_piix.o
5obj-$(CONFIG_SCSI_SATA_PROMISE) += libata.o sata_promise.o
6obj-$(CONFIG_SCSI_SATA_QSTOR) += libata.o sata_qstor.o
7obj-$(CONFIG_SCSI_SATA_SIL) += libata.o sata_sil.o
8obj-$(CONFIG_SCSI_SATA_SIL24) += libata.o sata_sil24.o
9obj-$(CONFIG_SCSI_SATA_VIA) += libata.o sata_via.o
10obj-$(CONFIG_SCSI_SATA_VITESSE) += libata.o sata_vsc.o
11obj-$(CONFIG_SCSI_SATA_SIS) += libata.o sata_sis.o
12obj-$(CONFIG_SCSI_SATA_SX4) += libata.o sata_sx4.o
13obj-$(CONFIG_SCSI_SATA_NV) += libata.o sata_nv.o
14obj-$(CONFIG_SCSI_SATA_ULI) += libata.o sata_uli.o
15obj-$(CONFIG_SCSI_SATA_MV) += libata.o sata_mv.o
16obj-$(CONFIG_SCSI_PDC_ADMA) += libata.o pdc_adma.o
17
18libata-objs := libata-core.o libata-scsi.o libata-bmdma.o libata-eh.o
19
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
new file mode 100644
index 000000000000..813031c01fba
--- /dev/null
+++ b/drivers/ata/ahci.c
@@ -0,0 +1,1684 @@
1/*
2 * ahci.c - AHCI SATA support
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2004-2005 Red Hat, Inc.
9 *
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 *
26 * libata documentation is available via 'make {ps|pdf}docs',
27 * as Documentation/DocBook/libata.*
28 *
29 * AHCI hardware documentation:
30 * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
31 * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
32 *
33 */
34
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/blkdev.h>
40#include <linux/delay.h>
41#include <linux/interrupt.h>
42#include <linux/sched.h>
43#include <linux/dma-mapping.h>
44#include <linux/device.h>
45#include <scsi/scsi_host.h>
46#include <scsi/scsi_cmnd.h>
47#include <linux/libata.h>
48#include <asm/io.h>
49
50#define DRV_NAME "ahci"
51#define DRV_VERSION "2.0"
52
53
54enum {
55 AHCI_PCI_BAR = 5,
56 AHCI_MAX_SG = 168, /* hardware max is 64K */
57 AHCI_DMA_BOUNDARY = 0xffffffff,
58 AHCI_USE_CLUSTERING = 0,
59 AHCI_MAX_CMDS = 32,
60 AHCI_CMD_SZ = 32,
61 AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ,
62 AHCI_RX_FIS_SZ = 256,
63 AHCI_CMD_TBL_CDB = 0x40,
64 AHCI_CMD_TBL_HDR_SZ = 0x80,
65 AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16),
66 AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS,
67 AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ +
68 AHCI_RX_FIS_SZ,
69 AHCI_IRQ_ON_SG = (1 << 31),
70 AHCI_CMD_ATAPI = (1 << 5),
71 AHCI_CMD_WRITE = (1 << 6),
72 AHCI_CMD_PREFETCH = (1 << 7),
73 AHCI_CMD_RESET = (1 << 8),
74 AHCI_CMD_CLR_BUSY = (1 << 10),
75
76 RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
77 RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */
78
79 board_ahci = 0,
80 board_ahci_vt8251 = 1,
81
82 /* global controller registers */
83 HOST_CAP = 0x00, /* host capabilities */
84 HOST_CTL = 0x04, /* global host control */
85 HOST_IRQ_STAT = 0x08, /* interrupt status */
86 HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */
87 HOST_VERSION = 0x10, /* AHCI spec. version compliancy */
88
89 /* HOST_CTL bits */
90 HOST_RESET = (1 << 0), /* reset controller; self-clear */
91 HOST_IRQ_EN = (1 << 1), /* global IRQ enable */
92 HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
93
94 /* HOST_CAP bits */
95 HOST_CAP_SSC = (1 << 14), /* Slumber capable */
96 HOST_CAP_CLO = (1 << 24), /* Command List Override support */
97 HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */
98 HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */
99 HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
100
101 /* registers for each SATA port */
102 PORT_LST_ADDR = 0x00, /* command list DMA addr */
103 PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */
104 PORT_FIS_ADDR = 0x08, /* FIS rx buf addr */
105 PORT_FIS_ADDR_HI = 0x0c, /* FIS rx buf addr hi */
106 PORT_IRQ_STAT = 0x10, /* interrupt status */
107 PORT_IRQ_MASK = 0x14, /* interrupt enable/disable mask */
108 PORT_CMD = 0x18, /* port command */
109 PORT_TFDATA = 0x20, /* taskfile data */
110 PORT_SIG = 0x24, /* device TF signature */
111 PORT_CMD_ISSUE = 0x38, /* command issue */
112 PORT_SCR = 0x28, /* SATA phy register block */
113 PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */
114 PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */
115 PORT_SCR_ERR = 0x30, /* SATA phy register: SError */
116 PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */
117
118 /* PORT_IRQ_{STAT,MASK} bits */
119 PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */
120 PORT_IRQ_TF_ERR = (1 << 30), /* task file error */
121 PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */
122 PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */
123 PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */
124 PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */
125 PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */
126 PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */
127
128 PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */
129 PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */
130 PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */
131 PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */
132 PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */
133 PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */
134 PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */
135 PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */
136 PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */
137
138 PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR |
139 PORT_IRQ_IF_ERR |
140 PORT_IRQ_CONNECT |
141 PORT_IRQ_PHYRDY |
142 PORT_IRQ_UNK_FIS,
143 PORT_IRQ_ERROR = PORT_IRQ_FREEZE |
144 PORT_IRQ_TF_ERR |
145 PORT_IRQ_HBUS_DATA_ERR,
146 DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE |
147 PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS |
148 PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
149
150 /* PORT_CMD bits */
151 PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */
152 PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
153 PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */
154 PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */
155 PORT_CMD_CLO = (1 << 3), /* Command list override */
156 PORT_CMD_POWER_ON = (1 << 2), /* Power up device */
157 PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */
158 PORT_CMD_START = (1 << 0), /* Enable port DMA engine */
159
160 PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */
161 PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */
162 PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */
163 PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */
164
165 /* hpriv->flags bits */
166 AHCI_FLAG_MSI = (1 << 0),
167
168 /* ap->flags bits */
169 AHCI_FLAG_RESET_NEEDS_CLO = (1 << 24),
170 AHCI_FLAG_NO_NCQ = (1 << 25),
171};
172
173struct ahci_cmd_hdr {
174 u32 opts;
175 u32 status;
176 u32 tbl_addr;
177 u32 tbl_addr_hi;
178 u32 reserved[4];
179};
180
181struct ahci_sg {
182 u32 addr;
183 u32 addr_hi;
184 u32 reserved;
185 u32 flags_size;
186};
187
188struct ahci_host_priv {
189 unsigned long flags;
190 u32 cap; /* cache of HOST_CAP register */
191 u32 port_map; /* cache of HOST_PORTS_IMPL reg */
192};
193
194struct ahci_port_priv {
195 struct ahci_cmd_hdr *cmd_slot;
196 dma_addr_t cmd_slot_dma;
197 void *cmd_tbl;
198 dma_addr_t cmd_tbl_dma;
199 void *rx_fis;
200 dma_addr_t rx_fis_dma;
201};
202
203static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg);
204static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
205static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
206static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
207static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
208static void ahci_irq_clear(struct ata_port *ap);
209static int ahci_port_start(struct ata_port *ap);
210static void ahci_port_stop(struct ata_port *ap);
211static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
212static void ahci_qc_prep(struct ata_queued_cmd *qc);
213static u8 ahci_check_status(struct ata_port *ap);
214static void ahci_freeze(struct ata_port *ap);
215static void ahci_thaw(struct ata_port *ap);
216static void ahci_error_handler(struct ata_port *ap);
217static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
218static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
219static int ahci_port_resume(struct ata_port *ap);
220static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
221static int ahci_pci_device_resume(struct pci_dev *pdev);
222static void ahci_remove_one (struct pci_dev *pdev);
223
224static struct scsi_host_template ahci_sht = {
225 .module = THIS_MODULE,
226 .name = DRV_NAME,
227 .ioctl = ata_scsi_ioctl,
228 .queuecommand = ata_scsi_queuecmd,
229 .change_queue_depth = ata_scsi_change_queue_depth,
230 .can_queue = AHCI_MAX_CMDS - 1,
231 .this_id = ATA_SHT_THIS_ID,
232 .sg_tablesize = AHCI_MAX_SG,
233 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
234 .emulated = ATA_SHT_EMULATED,
235 .use_clustering = AHCI_USE_CLUSTERING,
236 .proc_name = DRV_NAME,
237 .dma_boundary = AHCI_DMA_BOUNDARY,
238 .slave_configure = ata_scsi_slave_config,
239 .slave_destroy = ata_scsi_slave_destroy,
240 .bios_param = ata_std_bios_param,
241 .suspend = ata_scsi_device_suspend,
242 .resume = ata_scsi_device_resume,
243};
244
245static const struct ata_port_operations ahci_ops = {
246 .port_disable = ata_port_disable,
247
248 .check_status = ahci_check_status,
249 .check_altstatus = ahci_check_status,
250 .dev_select = ata_noop_dev_select,
251
252 .tf_read = ahci_tf_read,
253
254 .qc_prep = ahci_qc_prep,
255 .qc_issue = ahci_qc_issue,
256
257 .irq_handler = ahci_interrupt,
258 .irq_clear = ahci_irq_clear,
259
260 .scr_read = ahci_scr_read,
261 .scr_write = ahci_scr_write,
262
263 .freeze = ahci_freeze,
264 .thaw = ahci_thaw,
265
266 .error_handler = ahci_error_handler,
267 .post_internal_cmd = ahci_post_internal_cmd,
268
269 .port_suspend = ahci_port_suspend,
270 .port_resume = ahci_port_resume,
271
272 .port_start = ahci_port_start,
273 .port_stop = ahci_port_stop,
274};
275
276static const struct ata_port_info ahci_port_info[] = {
277 /* board_ahci */
278 {
279 .sht = &ahci_sht,
280 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
281 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
282 ATA_FLAG_SKIP_D2H_BSY,
283 .pio_mask = 0x1f, /* pio0-4 */
284 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
285 .port_ops = &ahci_ops,
286 },
287 /* board_ahci_vt8251 */
288 {
289 .sht = &ahci_sht,
290 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
291 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
292 ATA_FLAG_SKIP_D2H_BSY |
293 AHCI_FLAG_RESET_NEEDS_CLO | AHCI_FLAG_NO_NCQ,
294 .pio_mask = 0x1f, /* pio0-4 */
295 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
296 .port_ops = &ahci_ops,
297 },
298};
299
300static const struct pci_device_id ahci_pci_tbl[] = {
301 /* Intel */
302 { PCI_VENDOR_ID_INTEL, 0x2652, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
303 board_ahci }, /* ICH6 */
304 { PCI_VENDOR_ID_INTEL, 0x2653, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
305 board_ahci }, /* ICH6M */
306 { PCI_VENDOR_ID_INTEL, 0x27c1, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
307 board_ahci }, /* ICH7 */
308 { PCI_VENDOR_ID_INTEL, 0x27c5, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
309 board_ahci }, /* ICH7M */
310 { PCI_VENDOR_ID_INTEL, 0x27c3, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
311 board_ahci }, /* ICH7R */
312 { PCI_VENDOR_ID_AL, 0x5288, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
313 board_ahci }, /* ULi M5288 */
314 { PCI_VENDOR_ID_INTEL, 0x2681, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
315 board_ahci }, /* ESB2 */
316 { PCI_VENDOR_ID_INTEL, 0x2682, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
317 board_ahci }, /* ESB2 */
318 { PCI_VENDOR_ID_INTEL, 0x2683, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
319 board_ahci }, /* ESB2 */
320 { PCI_VENDOR_ID_INTEL, 0x27c6, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
321 board_ahci }, /* ICH7-M DH */
322 { PCI_VENDOR_ID_INTEL, 0x2821, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
323 board_ahci }, /* ICH8 */
324 { PCI_VENDOR_ID_INTEL, 0x2822, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
325 board_ahci }, /* ICH8 */
326 { PCI_VENDOR_ID_INTEL, 0x2824, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
327 board_ahci }, /* ICH8 */
328 { PCI_VENDOR_ID_INTEL, 0x2829, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
329 board_ahci }, /* ICH8M */
330 { PCI_VENDOR_ID_INTEL, 0x282a, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
331 board_ahci }, /* ICH8M */
332
333 /* JMicron */
334 { 0x197b, 0x2360, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
335 board_ahci }, /* JMicron JMB360 */
336 { 0x197b, 0x2361, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
337 board_ahci }, /* JMicron JMB361 */
338 { 0x197b, 0x2363, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
339 board_ahci }, /* JMicron JMB363 */
340 { 0x197b, 0x2365, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
341 board_ahci }, /* JMicron JMB365 */
342 { 0x197b, 0x2366, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
343 board_ahci }, /* JMicron JMB366 */
344
345 /* ATI */
346 { PCI_VENDOR_ID_ATI, 0x4380, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
347 board_ahci }, /* ATI SB600 non-raid */
348 { PCI_VENDOR_ID_ATI, 0x4381, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
349 board_ahci }, /* ATI SB600 raid */
350
351 /* VIA */
352 { PCI_VENDOR_ID_VIA, 0x3349, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
353 board_ahci_vt8251 }, /* VIA VT8251 */
354
355 /* NVIDIA */
356 { PCI_VENDOR_ID_NVIDIA, 0x044c, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
357 board_ahci }, /* MCP65 */
358 { PCI_VENDOR_ID_NVIDIA, 0x044d, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
359 board_ahci }, /* MCP65 */
360 { PCI_VENDOR_ID_NVIDIA, 0x044e, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
361 board_ahci }, /* MCP65 */
362 { PCI_VENDOR_ID_NVIDIA, 0x044f, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
363 board_ahci }, /* MCP65 */
364
365 /* SiS */
366 { PCI_VENDOR_ID_SI, 0x1184, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
367 board_ahci }, /* SiS 966 */
368 { PCI_VENDOR_ID_SI, 0x1185, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
369 board_ahci }, /* SiS 966 */
370 { PCI_VENDOR_ID_SI, 0x0186, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
371 board_ahci }, /* SiS 968 */
372
373 { } /* terminate list */
374};
375
376
377static struct pci_driver ahci_pci_driver = {
378 .name = DRV_NAME,
379 .id_table = ahci_pci_tbl,
380 .probe = ahci_init_one,
381 .suspend = ahci_pci_device_suspend,
382 .resume = ahci_pci_device_resume,
383 .remove = ahci_remove_one,
384};
385
386
387static inline unsigned long ahci_port_base_ul (unsigned long base, unsigned int port)
388{
389 return base + 0x100 + (port * 0x80);
390}
391
392static inline void __iomem *ahci_port_base (void __iomem *base, unsigned int port)
393{
394 return (void __iomem *) ahci_port_base_ul((unsigned long)base, port);
395}
396
397static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg_in)
398{
399 unsigned int sc_reg;
400
401 switch (sc_reg_in) {
402 case SCR_STATUS: sc_reg = 0; break;
403 case SCR_CONTROL: sc_reg = 1; break;
404 case SCR_ERROR: sc_reg = 2; break;
405 case SCR_ACTIVE: sc_reg = 3; break;
406 default:
407 return 0xffffffffU;
408 }
409
410 return readl((void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
411}
412
413
414static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg_in,
415 u32 val)
416{
417 unsigned int sc_reg;
418
419 switch (sc_reg_in) {
420 case SCR_STATUS: sc_reg = 0; break;
421 case SCR_CONTROL: sc_reg = 1; break;
422 case SCR_ERROR: sc_reg = 2; break;
423 case SCR_ACTIVE: sc_reg = 3; break;
424 default:
425 return;
426 }
427
428 writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
429}
430
431static void ahci_start_engine(void __iomem *port_mmio)
432{
433 u32 tmp;
434
435 /* start DMA */
436 tmp = readl(port_mmio + PORT_CMD);
437 tmp |= PORT_CMD_START;
438 writel(tmp, port_mmio + PORT_CMD);
439 readl(port_mmio + PORT_CMD); /* flush */
440}
441
442static int ahci_stop_engine(void __iomem *port_mmio)
443{
444 u32 tmp;
445
446 tmp = readl(port_mmio + PORT_CMD);
447
448 /* check if the HBA is idle */
449 if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
450 return 0;
451
452 /* setting HBA to idle */
453 tmp &= ~PORT_CMD_START;
454 writel(tmp, port_mmio + PORT_CMD);
455
456 /* wait for engine to stop. This could be as long as 500 msec */
457 tmp = ata_wait_register(port_mmio + PORT_CMD,
458 PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
459 if (tmp & PORT_CMD_LIST_ON)
460 return -EIO;
461
462 return 0;
463}
464
465static void ahci_start_fis_rx(void __iomem *port_mmio, u32 cap,
466 dma_addr_t cmd_slot_dma, dma_addr_t rx_fis_dma)
467{
468 u32 tmp;
469
470 /* set FIS registers */
471 if (cap & HOST_CAP_64)
472 writel((cmd_slot_dma >> 16) >> 16, port_mmio + PORT_LST_ADDR_HI);
473 writel(cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
474
475 if (cap & HOST_CAP_64)
476 writel((rx_fis_dma >> 16) >> 16, port_mmio + PORT_FIS_ADDR_HI);
477 writel(rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
478
479 /* enable FIS reception */
480 tmp = readl(port_mmio + PORT_CMD);
481 tmp |= PORT_CMD_FIS_RX;
482 writel(tmp, port_mmio + PORT_CMD);
483
484 /* flush */
485 readl(port_mmio + PORT_CMD);
486}
487
488static int ahci_stop_fis_rx(void __iomem *port_mmio)
489{
490 u32 tmp;
491
492 /* disable FIS reception */
493 tmp = readl(port_mmio + PORT_CMD);
494 tmp &= ~PORT_CMD_FIS_RX;
495 writel(tmp, port_mmio + PORT_CMD);
496
497 /* wait for completion, spec says 500ms, give it 1000 */
498 tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON,
499 PORT_CMD_FIS_ON, 10, 1000);
500 if (tmp & PORT_CMD_FIS_ON)
501 return -EBUSY;
502
503 return 0;
504}
505
506static void ahci_power_up(void __iomem *port_mmio, u32 cap)
507{
508 u32 cmd;
509
510 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
511
512 /* spin up device */
513 if (cap & HOST_CAP_SSS) {
514 cmd |= PORT_CMD_SPIN_UP;
515 writel(cmd, port_mmio + PORT_CMD);
516 }
517
518 /* wake up link */
519 writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD);
520}
521
522static void ahci_power_down(void __iomem *port_mmio, u32 cap)
523{
524 u32 cmd, scontrol;
525
526 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
527
528 if (cap & HOST_CAP_SSC) {
529 /* enable transitions to slumber mode */
530 scontrol = readl(port_mmio + PORT_SCR_CTL);
531 if ((scontrol & 0x0f00) > 0x100) {
532 scontrol &= ~0xf00;
533 writel(scontrol, port_mmio + PORT_SCR_CTL);
534 }
535
536 /* put device into slumber mode */
537 writel(cmd | PORT_CMD_ICC_SLUMBER, port_mmio + PORT_CMD);
538
539 /* wait for the transition to complete */
540 ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_ICC_SLUMBER,
541 PORT_CMD_ICC_SLUMBER, 1, 50);
542 }
543
544 /* put device into listen mode */
545 if (cap & HOST_CAP_SSS) {
546 /* first set PxSCTL.DET to 0 */
547 scontrol = readl(port_mmio + PORT_SCR_CTL);
548 scontrol &= ~0xf;
549 writel(scontrol, port_mmio + PORT_SCR_CTL);
550
551 /* then set PxCMD.SUD to 0 */
552 cmd &= ~PORT_CMD_SPIN_UP;
553 writel(cmd, port_mmio + PORT_CMD);
554 }
555}
556
557static void ahci_init_port(void __iomem *port_mmio, u32 cap,
558 dma_addr_t cmd_slot_dma, dma_addr_t rx_fis_dma)
559{
560 /* power up */
561 ahci_power_up(port_mmio, cap);
562
563 /* enable FIS reception */
564 ahci_start_fis_rx(port_mmio, cap, cmd_slot_dma, rx_fis_dma);
565
566 /* enable DMA */
567 ahci_start_engine(port_mmio);
568}
569
570static int ahci_deinit_port(void __iomem *port_mmio, u32 cap, const char **emsg)
571{
572 int rc;
573
574 /* disable DMA */
575 rc = ahci_stop_engine(port_mmio);
576 if (rc) {
577 *emsg = "failed to stop engine";
578 return rc;
579 }
580
581 /* disable FIS reception */
582 rc = ahci_stop_fis_rx(port_mmio);
583 if (rc) {
584 *emsg = "failed stop FIS RX";
585 return rc;
586 }
587
588 /* put device into slumber mode */
589 ahci_power_down(port_mmio, cap);
590
591 return 0;
592}
593
594static int ahci_reset_controller(void __iomem *mmio, struct pci_dev *pdev)
595{
596 u32 cap_save, tmp;
597
598 cap_save = readl(mmio + HOST_CAP);
599 cap_save &= ( (1<<28) | (1<<17) );
600 cap_save |= (1 << 27);
601
602 /* global controller reset */
603 tmp = readl(mmio + HOST_CTL);
604 if ((tmp & HOST_RESET) == 0) {
605 writel(tmp | HOST_RESET, mmio + HOST_CTL);
606 readl(mmio + HOST_CTL); /* flush */
607 }
608
609 /* reset must complete within 1 second, or
610 * the hardware should be considered fried.
611 */
612 ssleep(1);
613
614 tmp = readl(mmio + HOST_CTL);
615 if (tmp & HOST_RESET) {
616 dev_printk(KERN_ERR, &pdev->dev,
617 "controller reset failed (0x%x)\n", tmp);
618 return -EIO;
619 }
620
621 writel(HOST_AHCI_EN, mmio + HOST_CTL);
622 (void) readl(mmio + HOST_CTL); /* flush */
623 writel(cap_save, mmio + HOST_CAP);
624 writel(0xf, mmio + HOST_PORTS_IMPL);
625 (void) readl(mmio + HOST_PORTS_IMPL); /* flush */
626
627 if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
628 u16 tmp16;
629
630 /* configure PCS */
631 pci_read_config_word(pdev, 0x92, &tmp16);
632 tmp16 |= 0xf;
633 pci_write_config_word(pdev, 0x92, tmp16);
634 }
635
636 return 0;
637}
638
639static void ahci_init_controller(void __iomem *mmio, struct pci_dev *pdev,
640 int n_ports, u32 cap)
641{
642 int i, rc;
643 u32 tmp;
644
645 for (i = 0; i < n_ports; i++) {
646 void __iomem *port_mmio = ahci_port_base(mmio, i);
647 const char *emsg = NULL;
648
649#if 0 /* BIOSen initialize this incorrectly */
650 if (!(hpriv->port_map & (1 << i)))
651 continue;
652#endif
653
654 /* make sure port is not active */
655 rc = ahci_deinit_port(port_mmio, cap, &emsg);
656 if (rc)
657 dev_printk(KERN_WARNING, &pdev->dev,
658 "%s (%d)\n", emsg, rc);
659
660 /* clear SError */
661 tmp = readl(port_mmio + PORT_SCR_ERR);
662 VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
663 writel(tmp, port_mmio + PORT_SCR_ERR);
664
665 /* clear port IRQ */
666 tmp = readl(port_mmio + PORT_IRQ_STAT);
667 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
668 if (tmp)
669 writel(tmp, port_mmio + PORT_IRQ_STAT);
670
671 writel(1 << i, mmio + HOST_IRQ_STAT);
672 }
673
674 tmp = readl(mmio + HOST_CTL);
675 VPRINTK("HOST_CTL 0x%x\n", tmp);
676 writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
677 tmp = readl(mmio + HOST_CTL);
678 VPRINTK("HOST_CTL 0x%x\n", tmp);
679}
680
681static unsigned int ahci_dev_classify(struct ata_port *ap)
682{
683 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
684 struct ata_taskfile tf;
685 u32 tmp;
686
687 tmp = readl(port_mmio + PORT_SIG);
688 tf.lbah = (tmp >> 24) & 0xff;
689 tf.lbam = (tmp >> 16) & 0xff;
690 tf.lbal = (tmp >> 8) & 0xff;
691 tf.nsect = (tmp) & 0xff;
692
693 return ata_dev_classify(&tf);
694}
695
696static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
697 u32 opts)
698{
699 dma_addr_t cmd_tbl_dma;
700
701 cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
702
703 pp->cmd_slot[tag].opts = cpu_to_le32(opts);
704 pp->cmd_slot[tag].status = 0;
705 pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
706 pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
707}
708
709static int ahci_clo(struct ata_port *ap)
710{
711 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
712 struct ahci_host_priv *hpriv = ap->host_set->private_data;
713 u32 tmp;
714
715 if (!(hpriv->cap & HOST_CAP_CLO))
716 return -EOPNOTSUPP;
717
718 tmp = readl(port_mmio + PORT_CMD);
719 tmp |= PORT_CMD_CLO;
720 writel(tmp, port_mmio + PORT_CMD);
721
722 tmp = ata_wait_register(port_mmio + PORT_CMD,
723 PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
724 if (tmp & PORT_CMD_CLO)
725 return -EIO;
726
727 return 0;
728}
729
730static int ahci_prereset(struct ata_port *ap)
731{
732 if ((ap->flags & AHCI_FLAG_RESET_NEEDS_CLO) &&
733 (ata_busy_wait(ap, ATA_BUSY, 1000) & ATA_BUSY)) {
734 /* ATA_BUSY hasn't cleared, so send a CLO */
735 ahci_clo(ap);
736 }
737
738 return ata_std_prereset(ap);
739}
740
741static int ahci_softreset(struct ata_port *ap, unsigned int *class)
742{
743 struct ahci_port_priv *pp = ap->private_data;
744 void __iomem *mmio = ap->host_set->mmio_base;
745 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
746 const u32 cmd_fis_len = 5; /* five dwords */
747 const char *reason = NULL;
748 struct ata_taskfile tf;
749 u32 tmp;
750 u8 *fis;
751 int rc;
752
753 DPRINTK("ENTER\n");
754
755 if (ata_port_offline(ap)) {
756 DPRINTK("PHY reports no device\n");
757 *class = ATA_DEV_NONE;
758 return 0;
759 }
760
761 /* prepare for SRST (AHCI-1.1 10.4.1) */
762 rc = ahci_stop_engine(port_mmio);
763 if (rc) {
764 reason = "failed to stop engine";
765 goto fail_restart;
766 }
767
768 /* check BUSY/DRQ, perform Command List Override if necessary */
769 ahci_tf_read(ap, &tf);
770 if (tf.command & (ATA_BUSY | ATA_DRQ)) {
771 rc = ahci_clo(ap);
772
773 if (rc == -EOPNOTSUPP) {
774 reason = "port busy but CLO unavailable";
775 goto fail_restart;
776 } else if (rc) {
777 reason = "port busy but CLO failed";
778 goto fail_restart;
779 }
780 }
781
782 /* restart engine */
783 ahci_start_engine(port_mmio);
784
785 ata_tf_init(ap->device, &tf);
786 fis = pp->cmd_tbl;
787
788 /* issue the first D2H Register FIS */
789 ahci_fill_cmd_slot(pp, 0,
790 cmd_fis_len | AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY);
791
792 tf.ctl |= ATA_SRST;
793 ata_tf_to_fis(&tf, fis, 0);
794 fis[1] &= ~(1 << 7); /* turn off Command FIS bit */
795
796 writel(1, port_mmio + PORT_CMD_ISSUE);
797
798 tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1, 1, 500);
799 if (tmp & 0x1) {
800 rc = -EIO;
801 reason = "1st FIS failed";
802 goto fail;
803 }
804
805 /* spec says at least 5us, but be generous and sleep for 1ms */
806 msleep(1);
807
808 /* issue the second D2H Register FIS */
809 ahci_fill_cmd_slot(pp, 0, cmd_fis_len);
810
811 tf.ctl &= ~ATA_SRST;
812 ata_tf_to_fis(&tf, fis, 0);
813 fis[1] &= ~(1 << 7); /* turn off Command FIS bit */
814
815 writel(1, port_mmio + PORT_CMD_ISSUE);
816 readl(port_mmio + PORT_CMD_ISSUE); /* flush */
817
818 /* spec mandates ">= 2ms" before checking status.
819 * We wait 150ms, because that was the magic delay used for
820 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
821 * between when the ATA command register is written, and then
822 * status is checked. Because waiting for "a while" before
823 * checking status is fine, post SRST, we perform this magic
824 * delay here as well.
825 */
826 msleep(150);
827
828 *class = ATA_DEV_NONE;
829 if (ata_port_online(ap)) {
830 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
831 rc = -EIO;
832 reason = "device not ready";
833 goto fail;
834 }
835 *class = ahci_dev_classify(ap);
836 }
837
838 DPRINTK("EXIT, class=%u\n", *class);
839 return 0;
840
841 fail_restart:
842 ahci_start_engine(port_mmio);
843 fail:
844 ata_port_printk(ap, KERN_ERR, "softreset failed (%s)\n", reason);
845 return rc;
846}
847
848static int ahci_hardreset(struct ata_port *ap, unsigned int *class)
849{
850 struct ahci_port_priv *pp = ap->private_data;
851 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
852 struct ata_taskfile tf;
853 void __iomem *mmio = ap->host_set->mmio_base;
854 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
855 int rc;
856
857 DPRINTK("ENTER\n");
858
859 ahci_stop_engine(port_mmio);
860
861 /* clear D2H reception area to properly wait for D2H FIS */
862 ata_tf_init(ap->device, &tf);
863 tf.command = 0xff;
864 ata_tf_to_fis(&tf, d2h_fis, 0);
865
866 rc = sata_std_hardreset(ap, class);
867
868 ahci_start_engine(port_mmio);
869
870 if (rc == 0 && ata_port_online(ap))
871 *class = ahci_dev_classify(ap);
872 if (*class == ATA_DEV_UNKNOWN)
873 *class = ATA_DEV_NONE;
874
875 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
876 return rc;
877}
878
879static void ahci_postreset(struct ata_port *ap, unsigned int *class)
880{
881 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
882 u32 new_tmp, tmp;
883
884 ata_std_postreset(ap, class);
885
886 /* Make sure port's ATAPI bit is set appropriately */
887 new_tmp = tmp = readl(port_mmio + PORT_CMD);
888 if (*class == ATA_DEV_ATAPI)
889 new_tmp |= PORT_CMD_ATAPI;
890 else
891 new_tmp &= ~PORT_CMD_ATAPI;
892 if (new_tmp != tmp) {
893 writel(new_tmp, port_mmio + PORT_CMD);
894 readl(port_mmio + PORT_CMD); /* flush */
895 }
896}
897
898static u8 ahci_check_status(struct ata_port *ap)
899{
900 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr;
901
902 return readl(mmio + PORT_TFDATA) & 0xFF;
903}
904
905static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
906{
907 struct ahci_port_priv *pp = ap->private_data;
908 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
909
910 ata_tf_from_fis(d2h_fis, tf);
911}
912
913static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
914{
915 struct scatterlist *sg;
916 struct ahci_sg *ahci_sg;
917 unsigned int n_sg = 0;
918
919 VPRINTK("ENTER\n");
920
921 /*
922 * Next, the S/G list.
923 */
924 ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
925 ata_for_each_sg(sg, qc) {
926 dma_addr_t addr = sg_dma_address(sg);
927 u32 sg_len = sg_dma_len(sg);
928
929 ahci_sg->addr = cpu_to_le32(addr & 0xffffffff);
930 ahci_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
931 ahci_sg->flags_size = cpu_to_le32(sg_len - 1);
932
933 ahci_sg++;
934 n_sg++;
935 }
936
937 return n_sg;
938}
939
940static void ahci_qc_prep(struct ata_queued_cmd *qc)
941{
942 struct ata_port *ap = qc->ap;
943 struct ahci_port_priv *pp = ap->private_data;
944 int is_atapi = is_atapi_taskfile(&qc->tf);
945 void *cmd_tbl;
946 u32 opts;
947 const u32 cmd_fis_len = 5; /* five dwords */
948 unsigned int n_elem;
949
950 /*
951 * Fill in command table information. First, the header,
952 * a SATA Register - Host to Device command FIS.
953 */
954 cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
955
956 ata_tf_to_fis(&qc->tf, cmd_tbl, 0);
957 if (is_atapi) {
958 memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
959 memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
960 }
961
962 n_elem = 0;
963 if (qc->flags & ATA_QCFLAG_DMAMAP)
964 n_elem = ahci_fill_sg(qc, cmd_tbl);
965
966 /*
967 * Fill in command slot information.
968 */
969 opts = cmd_fis_len | n_elem << 16;
970 if (qc->tf.flags & ATA_TFLAG_WRITE)
971 opts |= AHCI_CMD_WRITE;
972 if (is_atapi)
973 opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
974
975 ahci_fill_cmd_slot(pp, qc->tag, opts);
976}
977
978static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
979{
980 struct ahci_port_priv *pp = ap->private_data;
981 struct ata_eh_info *ehi = &ap->eh_info;
982 unsigned int err_mask = 0, action = 0;
983 struct ata_queued_cmd *qc;
984 u32 serror;
985
986 ata_ehi_clear_desc(ehi);
987
988 /* AHCI needs SError cleared; otherwise, it might lock up */
989 serror = ahci_scr_read(ap, SCR_ERROR);
990 ahci_scr_write(ap, SCR_ERROR, serror);
991
992 /* analyze @irq_stat */
993 ata_ehi_push_desc(ehi, "irq_stat 0x%08x", irq_stat);
994
995 if (irq_stat & PORT_IRQ_TF_ERR)
996 err_mask |= AC_ERR_DEV;
997
998 if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
999 err_mask |= AC_ERR_HOST_BUS;
1000 action |= ATA_EH_SOFTRESET;
1001 }
1002
1003 if (irq_stat & PORT_IRQ_IF_ERR) {
1004 err_mask |= AC_ERR_ATA_BUS;
1005 action |= ATA_EH_SOFTRESET;
1006 ata_ehi_push_desc(ehi, ", interface fatal error");
1007 }
1008
1009 if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
1010 ata_ehi_hotplugged(ehi);
1011 ata_ehi_push_desc(ehi, ", %s", irq_stat & PORT_IRQ_CONNECT ?
1012 "connection status changed" : "PHY RDY changed");
1013 }
1014
1015 if (irq_stat & PORT_IRQ_UNK_FIS) {
1016 u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
1017
1018 err_mask |= AC_ERR_HSM;
1019 action |= ATA_EH_SOFTRESET;
1020 ata_ehi_push_desc(ehi, ", unknown FIS %08x %08x %08x %08x",
1021 unk[0], unk[1], unk[2], unk[3]);
1022 }
1023
1024 /* okay, let's hand over to EH */
1025 ehi->serror |= serror;
1026 ehi->action |= action;
1027
1028 qc = ata_qc_from_tag(ap, ap->active_tag);
1029 if (qc)
1030 qc->err_mask |= err_mask;
1031 else
1032 ehi->err_mask |= err_mask;
1033
1034 if (irq_stat & PORT_IRQ_FREEZE)
1035 ata_port_freeze(ap);
1036 else
1037 ata_port_abort(ap);
1038}
1039
1040static void ahci_host_intr(struct ata_port *ap)
1041{
1042 void __iomem *mmio = ap->host_set->mmio_base;
1043 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1044 struct ata_eh_info *ehi = &ap->eh_info;
1045 u32 status, qc_active;
1046 int rc;
1047
1048 status = readl(port_mmio + PORT_IRQ_STAT);
1049 writel(status, port_mmio + PORT_IRQ_STAT);
1050
1051 if (unlikely(status & PORT_IRQ_ERROR)) {
1052 ahci_error_intr(ap, status);
1053 return;
1054 }
1055
1056 if (ap->sactive)
1057 qc_active = readl(port_mmio + PORT_SCR_ACT);
1058 else
1059 qc_active = readl(port_mmio + PORT_CMD_ISSUE);
1060
1061 rc = ata_qc_complete_multiple(ap, qc_active, NULL);
1062 if (rc > 0)
1063 return;
1064 if (rc < 0) {
1065 ehi->err_mask |= AC_ERR_HSM;
1066 ehi->action |= ATA_EH_SOFTRESET;
1067 ata_port_freeze(ap);
1068 return;
1069 }
1070
1071 /* hmmm... a spurious interupt */
1072
1073 /* some devices send D2H reg with I bit set during NCQ command phase */
1074 if (ap->sactive && status & PORT_IRQ_D2H_REG_FIS)
1075 return;
1076
1077 /* ignore interim PIO setup fis interrupts */
1078 if (ata_tag_valid(ap->active_tag) && (status & PORT_IRQ_PIOS_FIS))
1079 return;
1080
1081 if (ata_ratelimit())
1082 ata_port_printk(ap, KERN_INFO, "spurious interrupt "
1083 "(irq_stat 0x%x active_tag %d sactive 0x%x)\n",
1084 status, ap->active_tag, ap->sactive);
1085}
1086
1087static void ahci_irq_clear(struct ata_port *ap)
1088{
1089 /* TODO */
1090}
1091
1092static irqreturn_t ahci_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
1093{
1094 struct ata_host_set *host_set = dev_instance;
1095 struct ahci_host_priv *hpriv;
1096 unsigned int i, handled = 0;
1097 void __iomem *mmio;
1098 u32 irq_stat, irq_ack = 0;
1099
1100 VPRINTK("ENTER\n");
1101
1102 hpriv = host_set->private_data;
1103 mmio = host_set->mmio_base;
1104
1105 /* sigh. 0xffffffff is a valid return from h/w */
1106 irq_stat = readl(mmio + HOST_IRQ_STAT);
1107 irq_stat &= hpriv->port_map;
1108 if (!irq_stat)
1109 return IRQ_NONE;
1110
1111 spin_lock(&host_set->lock);
1112
1113 for (i = 0; i < host_set->n_ports; i++) {
1114 struct ata_port *ap;
1115
1116 if (!(irq_stat & (1 << i)))
1117 continue;
1118
1119 ap = host_set->ports[i];
1120 if (ap) {
1121 ahci_host_intr(ap);
1122 VPRINTK("port %u\n", i);
1123 } else {
1124 VPRINTK("port %u (no irq)\n", i);
1125 if (ata_ratelimit())
1126 dev_printk(KERN_WARNING, host_set->dev,
1127 "interrupt on disabled port %u\n", i);
1128 }
1129
1130 irq_ack |= (1 << i);
1131 }
1132
1133 if (irq_ack) {
1134 writel(irq_ack, mmio + HOST_IRQ_STAT);
1135 handled = 1;
1136 }
1137
1138 spin_unlock(&host_set->lock);
1139
1140 VPRINTK("EXIT\n");
1141
1142 return IRQ_RETVAL(handled);
1143}
1144
1145static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
1146{
1147 struct ata_port *ap = qc->ap;
1148 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
1149
1150 if (qc->tf.protocol == ATA_PROT_NCQ)
1151 writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
1152 writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE);
1153 readl(port_mmio + PORT_CMD_ISSUE); /* flush */
1154
1155 return 0;
1156}
1157
1158static void ahci_freeze(struct ata_port *ap)
1159{
1160 void __iomem *mmio = ap->host_set->mmio_base;
1161 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1162
1163 /* turn IRQ off */
1164 writel(0, port_mmio + PORT_IRQ_MASK);
1165}
1166
1167static void ahci_thaw(struct ata_port *ap)
1168{
1169 void __iomem *mmio = ap->host_set->mmio_base;
1170 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1171 u32 tmp;
1172
1173 /* clear IRQ */
1174 tmp = readl(port_mmio + PORT_IRQ_STAT);
1175 writel(tmp, port_mmio + PORT_IRQ_STAT);
1176 writel(1 << ap->id, mmio + HOST_IRQ_STAT);
1177
1178 /* turn IRQ back on */
1179 writel(DEF_PORT_IRQ, port_mmio + PORT_IRQ_MASK);
1180}
1181
1182static void ahci_error_handler(struct ata_port *ap)
1183{
1184 void __iomem *mmio = ap->host_set->mmio_base;
1185 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1186
1187 if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
1188 /* restart engine */
1189 ahci_stop_engine(port_mmio);
1190 ahci_start_engine(port_mmio);
1191 }
1192
1193 /* perform recovery */
1194 ata_do_eh(ap, ahci_prereset, ahci_softreset, ahci_hardreset,
1195 ahci_postreset);
1196}
1197
1198static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
1199{
1200 struct ata_port *ap = qc->ap;
1201 void __iomem *mmio = ap->host_set->mmio_base;
1202 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1203
1204 if (qc->flags & ATA_QCFLAG_FAILED)
1205 qc->err_mask |= AC_ERR_OTHER;
1206
1207 if (qc->err_mask) {
1208 /* make DMA engine forget about the failed command */
1209 ahci_stop_engine(port_mmio);
1210 ahci_start_engine(port_mmio);
1211 }
1212}
1213
1214static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
1215{
1216 struct ahci_host_priv *hpriv = ap->host_set->private_data;
1217 struct ahci_port_priv *pp = ap->private_data;
1218 void __iomem *mmio = ap->host_set->mmio_base;
1219 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1220 const char *emsg = NULL;
1221 int rc;
1222
1223 rc = ahci_deinit_port(port_mmio, hpriv->cap, &emsg);
1224 if (rc) {
1225 ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc);
1226 ahci_init_port(port_mmio, hpriv->cap,
1227 pp->cmd_slot_dma, pp->rx_fis_dma);
1228 }
1229
1230 return rc;
1231}
1232
1233static int ahci_port_resume(struct ata_port *ap)
1234{
1235 struct ahci_port_priv *pp = ap->private_data;
1236 struct ahci_host_priv *hpriv = ap->host_set->private_data;
1237 void __iomem *mmio = ap->host_set->mmio_base;
1238 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1239
1240 ahci_init_port(port_mmio, hpriv->cap, pp->cmd_slot_dma, pp->rx_fis_dma);
1241
1242 return 0;
1243}
1244
1245static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
1246{
1247 struct ata_host_set *host_set = dev_get_drvdata(&pdev->dev);
1248 void __iomem *mmio = host_set->mmio_base;
1249 u32 ctl;
1250
1251 if (mesg.event == PM_EVENT_SUSPEND) {
1252 /* AHCI spec rev1.1 section 8.3.3:
1253 * Software must disable interrupts prior to requesting a
1254 * transition of the HBA to D3 state.
1255 */
1256 ctl = readl(mmio + HOST_CTL);
1257 ctl &= ~HOST_IRQ_EN;
1258 writel(ctl, mmio + HOST_CTL);
1259 readl(mmio + HOST_CTL); /* flush */
1260 }
1261
1262 return ata_pci_device_suspend(pdev, mesg);
1263}
1264
1265static int ahci_pci_device_resume(struct pci_dev *pdev)
1266{
1267 struct ata_host_set *host_set = dev_get_drvdata(&pdev->dev);
1268 struct ahci_host_priv *hpriv = host_set->private_data;
1269 void __iomem *mmio = host_set->mmio_base;
1270 int rc;
1271
1272 ata_pci_device_do_resume(pdev);
1273
1274 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
1275 rc = ahci_reset_controller(mmio, pdev);
1276 if (rc)
1277 return rc;
1278
1279 ahci_init_controller(mmio, pdev, host_set->n_ports, hpriv->cap);
1280 }
1281
1282 ata_host_set_resume(host_set);
1283
1284 return 0;
1285}
1286
1287static int ahci_port_start(struct ata_port *ap)
1288{
1289 struct device *dev = ap->host_set->dev;
1290 struct ahci_host_priv *hpriv = ap->host_set->private_data;
1291 struct ahci_port_priv *pp;
1292 void __iomem *mmio = ap->host_set->mmio_base;
1293 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1294 void *mem;
1295 dma_addr_t mem_dma;
1296 int rc;
1297
1298 pp = kmalloc(sizeof(*pp), GFP_KERNEL);
1299 if (!pp)
1300 return -ENOMEM;
1301 memset(pp, 0, sizeof(*pp));
1302
1303 rc = ata_pad_alloc(ap, dev);
1304 if (rc) {
1305 kfree(pp);
1306 return rc;
1307 }
1308
1309 mem = dma_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma, GFP_KERNEL);
1310 if (!mem) {
1311 ata_pad_free(ap, dev);
1312 kfree(pp);
1313 return -ENOMEM;
1314 }
1315 memset(mem, 0, AHCI_PORT_PRIV_DMA_SZ);
1316
1317 /*
1318 * First item in chunk of DMA memory: 32-slot command table,
1319 * 32 bytes each in size
1320 */
1321 pp->cmd_slot = mem;
1322 pp->cmd_slot_dma = mem_dma;
1323
1324 mem += AHCI_CMD_SLOT_SZ;
1325 mem_dma += AHCI_CMD_SLOT_SZ;
1326
1327 /*
1328 * Second item: Received-FIS area
1329 */
1330 pp->rx_fis = mem;
1331 pp->rx_fis_dma = mem_dma;
1332
1333 mem += AHCI_RX_FIS_SZ;
1334 mem_dma += AHCI_RX_FIS_SZ;
1335
1336 /*
1337 * Third item: data area for storing a single command
1338 * and its scatter-gather table
1339 */
1340 pp->cmd_tbl = mem;
1341 pp->cmd_tbl_dma = mem_dma;
1342
1343 ap->private_data = pp;
1344
1345 /* initialize port */
1346 ahci_init_port(port_mmio, hpriv->cap, pp->cmd_slot_dma, pp->rx_fis_dma);
1347
1348 return 0;
1349}
1350
1351static void ahci_port_stop(struct ata_port *ap)
1352{
1353 struct device *dev = ap->host_set->dev;
1354 struct ahci_host_priv *hpriv = ap->host_set->private_data;
1355 struct ahci_port_priv *pp = ap->private_data;
1356 void __iomem *mmio = ap->host_set->mmio_base;
1357 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1358 const char *emsg = NULL;
1359 int rc;
1360
1361 /* de-initialize port */
1362 rc = ahci_deinit_port(port_mmio, hpriv->cap, &emsg);
1363 if (rc)
1364 ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc);
1365
1366 ap->private_data = NULL;
1367 dma_free_coherent(dev, AHCI_PORT_PRIV_DMA_SZ,
1368 pp->cmd_slot, pp->cmd_slot_dma);
1369 ata_pad_free(ap, dev);
1370 kfree(pp);
1371}
1372
1373static void ahci_setup_port(struct ata_ioports *port, unsigned long base,
1374 unsigned int port_idx)
1375{
1376 VPRINTK("ENTER, base==0x%lx, port_idx %u\n", base, port_idx);
1377 base = ahci_port_base_ul(base, port_idx);
1378 VPRINTK("base now==0x%lx\n", base);
1379
1380 port->cmd_addr = base;
1381 port->scr_addr = base + PORT_SCR;
1382
1383 VPRINTK("EXIT\n");
1384}
1385
1386static int ahci_host_init(struct ata_probe_ent *probe_ent)
1387{
1388 struct ahci_host_priv *hpriv = probe_ent->private_data;
1389 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
1390 void __iomem *mmio = probe_ent->mmio_base;
1391 unsigned int i, using_dac;
1392 int rc;
1393
1394 rc = ahci_reset_controller(mmio, pdev);
1395 if (rc)
1396 return rc;
1397
1398 hpriv->cap = readl(mmio + HOST_CAP);
1399 hpriv->port_map = readl(mmio + HOST_PORTS_IMPL);
1400 probe_ent->n_ports = (hpriv->cap & 0x1f) + 1;
1401
1402 VPRINTK("cap 0x%x port_map 0x%x n_ports %d\n",
1403 hpriv->cap, hpriv->port_map, probe_ent->n_ports);
1404
1405 using_dac = hpriv->cap & HOST_CAP_64;
1406 if (using_dac &&
1407 !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
1408 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
1409 if (rc) {
1410 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1411 if (rc) {
1412 dev_printk(KERN_ERR, &pdev->dev,
1413 "64-bit DMA enable failed\n");
1414 return rc;
1415 }
1416 }
1417 } else {
1418 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1419 if (rc) {
1420 dev_printk(KERN_ERR, &pdev->dev,
1421 "32-bit DMA enable failed\n");
1422 return rc;
1423 }
1424 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1425 if (rc) {
1426 dev_printk(KERN_ERR, &pdev->dev,
1427 "32-bit consistent DMA enable failed\n");
1428 return rc;
1429 }
1430 }
1431
1432 for (i = 0; i < probe_ent->n_ports; i++)
1433 ahci_setup_port(&probe_ent->port[i], (unsigned long) mmio, i);
1434
1435 ahci_init_controller(mmio, pdev, probe_ent->n_ports, hpriv->cap);
1436
1437 pci_set_master(pdev);
1438
1439 return 0;
1440}
1441
1442static void ahci_print_info(struct ata_probe_ent *probe_ent)
1443{
1444 struct ahci_host_priv *hpriv = probe_ent->private_data;
1445 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
1446 void __iomem *mmio = probe_ent->mmio_base;
1447 u32 vers, cap, impl, speed;
1448 const char *speed_s;
1449 u16 cc;
1450 const char *scc_s;
1451
1452 vers = readl(mmio + HOST_VERSION);
1453 cap = hpriv->cap;
1454 impl = hpriv->port_map;
1455
1456 speed = (cap >> 20) & 0xf;
1457 if (speed == 1)
1458 speed_s = "1.5";
1459 else if (speed == 2)
1460 speed_s = "3";
1461 else
1462 speed_s = "?";
1463
1464 pci_read_config_word(pdev, 0x0a, &cc);
1465 if (cc == 0x0101)
1466 scc_s = "IDE";
1467 else if (cc == 0x0106)
1468 scc_s = "SATA";
1469 else if (cc == 0x0104)
1470 scc_s = "RAID";
1471 else
1472 scc_s = "unknown";
1473
1474 dev_printk(KERN_INFO, &pdev->dev,
1475 "AHCI %02x%02x.%02x%02x "
1476 "%u slots %u ports %s Gbps 0x%x impl %s mode\n"
1477 ,
1478
1479 (vers >> 24) & 0xff,
1480 (vers >> 16) & 0xff,
1481 (vers >> 8) & 0xff,
1482 vers & 0xff,
1483
1484 ((cap >> 8) & 0x1f) + 1,
1485 (cap & 0x1f) + 1,
1486 speed_s,
1487 impl,
1488 scc_s);
1489
1490 dev_printk(KERN_INFO, &pdev->dev,
1491 "flags: "
1492 "%s%s%s%s%s%s"
1493 "%s%s%s%s%s%s%s\n"
1494 ,
1495
1496 cap & (1 << 31) ? "64bit " : "",
1497 cap & (1 << 30) ? "ncq " : "",
1498 cap & (1 << 28) ? "ilck " : "",
1499 cap & (1 << 27) ? "stag " : "",
1500 cap & (1 << 26) ? "pm " : "",
1501 cap & (1 << 25) ? "led " : "",
1502
1503 cap & (1 << 24) ? "clo " : "",
1504 cap & (1 << 19) ? "nz " : "",
1505 cap & (1 << 18) ? "only " : "",
1506 cap & (1 << 17) ? "pmp " : "",
1507 cap & (1 << 15) ? "pio " : "",
1508 cap & (1 << 14) ? "slum " : "",
1509 cap & (1 << 13) ? "part " : ""
1510 );
1511}
1512
1513static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1514{
1515 static int printed_version;
1516 struct ata_probe_ent *probe_ent = NULL;
1517 struct ahci_host_priv *hpriv;
1518 unsigned long base;
1519 void __iomem *mmio_base;
1520 unsigned int board_idx = (unsigned int) ent->driver_data;
1521 int have_msi, pci_dev_busy = 0;
1522 int rc;
1523
1524 VPRINTK("ENTER\n");
1525
1526 WARN_ON(ATA_MAX_QUEUE > AHCI_MAX_CMDS);
1527
1528 if (!printed_version++)
1529 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1530
1531 /* JMicron-specific fixup: make sure we're in AHCI mode */
1532 /* This is protected from races with ata_jmicron by the pci probe
1533 locking */
1534 if (pdev->vendor == PCI_VENDOR_ID_JMICRON) {
1535 /* AHCI enable, AHCI on function 0 */
1536 pci_write_config_byte(pdev, 0x41, 0xa1);
1537 /* Function 1 is the PATA controller */
1538 if (PCI_FUNC(pdev->devfn))
1539 return -ENODEV;
1540 }
1541
1542 rc = pci_enable_device(pdev);
1543 if (rc)
1544 return rc;
1545
1546 rc = pci_request_regions(pdev, DRV_NAME);
1547 if (rc) {
1548 pci_dev_busy = 1;
1549 goto err_out;
1550 }
1551
1552 if (pci_enable_msi(pdev) == 0)
1553 have_msi = 1;
1554 else {
1555 pci_intx(pdev, 1);
1556 have_msi = 0;
1557 }
1558
1559 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
1560 if (probe_ent == NULL) {
1561 rc = -ENOMEM;
1562 goto err_out_msi;
1563 }
1564
1565 memset(probe_ent, 0, sizeof(*probe_ent));
1566 probe_ent->dev = pci_dev_to_dev(pdev);
1567 INIT_LIST_HEAD(&probe_ent->node);
1568
1569 mmio_base = pci_iomap(pdev, AHCI_PCI_BAR, 0);
1570 if (mmio_base == NULL) {
1571 rc = -ENOMEM;
1572 goto err_out_free_ent;
1573 }
1574 base = (unsigned long) mmio_base;
1575
1576 hpriv = kmalloc(sizeof(*hpriv), GFP_KERNEL);
1577 if (!hpriv) {
1578 rc = -ENOMEM;
1579 goto err_out_iounmap;
1580 }
1581 memset(hpriv, 0, sizeof(*hpriv));
1582
1583 probe_ent->sht = ahci_port_info[board_idx].sht;
1584 probe_ent->host_flags = ahci_port_info[board_idx].host_flags;
1585 probe_ent->pio_mask = ahci_port_info[board_idx].pio_mask;
1586 probe_ent->udma_mask = ahci_port_info[board_idx].udma_mask;
1587 probe_ent->port_ops = ahci_port_info[board_idx].port_ops;
1588
1589 probe_ent->irq = pdev->irq;
1590 probe_ent->irq_flags = IRQF_SHARED;
1591 probe_ent->mmio_base = mmio_base;
1592 probe_ent->private_data = hpriv;
1593
1594 if (have_msi)
1595 hpriv->flags |= AHCI_FLAG_MSI;
1596
1597 /* initialize adapter */
1598 rc = ahci_host_init(probe_ent);
1599 if (rc)
1600 goto err_out_hpriv;
1601
1602 if (!(probe_ent->host_flags & AHCI_FLAG_NO_NCQ) &&
1603 (hpriv->cap & HOST_CAP_NCQ))
1604 probe_ent->host_flags |= ATA_FLAG_NCQ;
1605
1606 ahci_print_info(probe_ent);
1607
1608 /* FIXME: check ata_device_add return value */
1609 ata_device_add(probe_ent);
1610 kfree(probe_ent);
1611
1612 return 0;
1613
1614err_out_hpriv:
1615 kfree(hpriv);
1616err_out_iounmap:
1617 pci_iounmap(pdev, mmio_base);
1618err_out_free_ent:
1619 kfree(probe_ent);
1620err_out_msi:
1621 if (have_msi)
1622 pci_disable_msi(pdev);
1623 else
1624 pci_intx(pdev, 0);
1625 pci_release_regions(pdev);
1626err_out:
1627 if (!pci_dev_busy)
1628 pci_disable_device(pdev);
1629 return rc;
1630}
1631
1632static void ahci_remove_one (struct pci_dev *pdev)
1633{
1634 struct device *dev = pci_dev_to_dev(pdev);
1635 struct ata_host_set *host_set = dev_get_drvdata(dev);
1636 struct ahci_host_priv *hpriv = host_set->private_data;
1637 unsigned int i;
1638 int have_msi;
1639
1640 for (i = 0; i < host_set->n_ports; i++)
1641 ata_port_detach(host_set->ports[i]);
1642
1643 have_msi = hpriv->flags & AHCI_FLAG_MSI;
1644 free_irq(host_set->irq, host_set);
1645
1646 for (i = 0; i < host_set->n_ports; i++) {
1647 struct ata_port *ap = host_set->ports[i];
1648
1649 ata_scsi_release(ap->host);
1650 scsi_host_put(ap->host);
1651 }
1652
1653 kfree(hpriv);
1654 pci_iounmap(pdev, host_set->mmio_base);
1655 kfree(host_set);
1656
1657 if (have_msi)
1658 pci_disable_msi(pdev);
1659 else
1660 pci_intx(pdev, 0);
1661 pci_release_regions(pdev);
1662 pci_disable_device(pdev);
1663 dev_set_drvdata(dev, NULL);
1664}
1665
1666static int __init ahci_init(void)
1667{
1668 return pci_register_driver(&ahci_pci_driver);
1669}
1670
1671static void __exit ahci_exit(void)
1672{
1673 pci_unregister_driver(&ahci_pci_driver);
1674}
1675
1676
1677MODULE_AUTHOR("Jeff Garzik");
1678MODULE_DESCRIPTION("AHCI SATA low-level driver");
1679MODULE_LICENSE("GPL");
1680MODULE_DEVICE_TABLE(pci, ahci_pci_tbl);
1681MODULE_VERSION(DRV_VERSION);
1682
1683module_init(ahci_init);
1684module_exit(ahci_exit);
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
new file mode 100644
index 000000000000..46c34fd5af8f
--- /dev/null
+++ b/drivers/ata/ata_piix.c
@@ -0,0 +1,960 @@
1/*
2 * ata_piix.c - Intel PATA/SATA controllers
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 *
9 * Copyright 2003-2005 Red Hat Inc
10 * Copyright 2003-2005 Jeff Garzik
11 *
12 *
13 * Copyright header from piix.c:
14 *
15 * Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer
16 * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
17 * Copyright (C) 2003 Red Hat Inc <alan@redhat.com>
18 *
19 *
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2, or (at your option)
23 * any later version.
24 *
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
29 *
30 * You should have received a copy of the GNU General Public License
31 * along with this program; see the file COPYING. If not, write to
32 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
33 *
34 *
35 * libata documentation is available via 'make {ps|pdf}docs',
36 * as Documentation/DocBook/libata.*
37 *
38 * Hardware documentation available at http://developer.intel.com/
39 *
40 * Documentation
41 * Publically available from Intel web site. Errata documentation
42 * is also publically available. As an aide to anyone hacking on this
43 * driver the list of errata that are relevant is below.going back to
44 * PIIX4. Older device documentation is now a bit tricky to find.
45 *
46 * The chipsets all follow very much the same design. The orginal Triton
47 * series chipsets do _not_ support independant device timings, but this
48 * is fixed in Triton II. With the odd mobile exception the chips then
49 * change little except in gaining more modes until SATA arrives. This
50 * driver supports only the chips with independant timing (that is those
51 * with SITRE and the 0x44 timing register). See pata_oldpiix and pata_mpiix
52 * for the early chip drivers.
53 *
54 * Errata of note:
55 *
56 * Unfixable
57 * PIIX4 errata #9 - Only on ultra obscure hw
58 * ICH3 errata #13 - Not observed to affect real hw
59 * by Intel
60 *
61 * Things we must deal with
62 * PIIX4 errata #10 - BM IDE hang with non UDMA
63 * (must stop/start dma to recover)
64 * 440MX errata #15 - As PIIX4 errata #10
65 * PIIX4 errata #15 - Must not read control registers
66 * during a PIO transfer
67 * 440MX errata #13 - As PIIX4 errata #15
68 * ICH2 errata #21 - DMA mode 0 doesn't work right
69 * ICH0/1 errata #55 - As ICH2 errata #21
70 * ICH2 spec c #9 - Extra operations needed to handle
71 * drive hotswap [NOT YET SUPPORTED]
72 * ICH2 spec c #20 - IDE PRD must not cross a 64K boundary
73 * and must be dword aligned
74 * ICH2 spec c #24 - UDMA mode 4,5 t85/86 should be 6ns not 3.3
75 *
76 * Should have been BIOS fixed:
77 * 450NX: errata #19 - DMA hangs on old 450NX
78 * 450NX: errata #20 - DMA hangs on old 450NX
79 * 450NX: errata #25 - Corruption with DMA on old 450NX
80 * ICH3 errata #15 - IDE deadlock under high load
81 * (BIOS must set dev 31 fn 0 bit 23)
82 * ICH3 errata #18 - Don't use native mode
83 */
84
85#include <linux/kernel.h>
86#include <linux/module.h>
87#include <linux/pci.h>
88#include <linux/init.h>
89#include <linux/blkdev.h>
90#include <linux/delay.h>
91#include <linux/device.h>
92#include <scsi/scsi_host.h>
93#include <linux/libata.h>
94
95#define DRV_NAME "ata_piix"
96#define DRV_VERSION "2.00"
97
98enum {
99 PIIX_IOCFG = 0x54, /* IDE I/O configuration register */
100 ICH5_PMR = 0x90, /* port mapping register */
101 ICH5_PCS = 0x92, /* port control and status */
102 PIIX_SCC = 0x0A, /* sub-class code register */
103
104 PIIX_FLAG_IGNORE_PCS = (1 << 25), /* ignore PCS present bits */
105 PIIX_FLAG_SCR = (1 << 26), /* SCR available */
106 PIIX_FLAG_AHCI = (1 << 27), /* AHCI possible */
107 PIIX_FLAG_CHECKINTR = (1 << 28), /* make sure PCI INTx enabled */
108
109 /* combined mode. if set, PATA is channel 0.
110 * if clear, PATA is channel 1.
111 */
112 PIIX_PORT_ENABLED = (1 << 0),
113 PIIX_PORT_PRESENT = (1 << 4),
114
115 PIIX_80C_PRI = (1 << 5) | (1 << 4),
116 PIIX_80C_SEC = (1 << 7) | (1 << 6),
117
118 /* controller IDs */
119 piix4_pata = 0,
120 ich5_pata = 1,
121 ich5_sata = 2,
122 esb_sata = 3,
123 ich6_sata = 4,
124 ich6_sata_ahci = 5,
125 ich6m_sata_ahci = 6,
126 ich8_sata_ahci = 7,
127
128 /* constants for mapping table */
129 P0 = 0, /* port 0 */
130 P1 = 1, /* port 1 */
131 P2 = 2, /* port 2 */
132 P3 = 3, /* port 3 */
133 IDE = -1, /* IDE */
134 NA = -2, /* not avaliable */
135 RV = -3, /* reserved */
136
137 PIIX_AHCI_DEVICE = 6,
138};
139
140struct piix_map_db {
141 const u32 mask;
142 const u16 port_enable;
143 const int present_shift;
144 const int map[][4];
145};
146
147struct piix_host_priv {
148 const int *map;
149 const struct piix_map_db *map_db;
150};
151
152static int piix_init_one (struct pci_dev *pdev,
153 const struct pci_device_id *ent);
154static void piix_host_stop(struct ata_host_set *host_set);
155static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev);
156static void piix_set_dmamode (struct ata_port *ap, struct ata_device *adev);
157static void piix_pata_error_handler(struct ata_port *ap);
158static void piix_sata_error_handler(struct ata_port *ap);
159
160static unsigned int in_module_init = 1;
161
162static const struct pci_device_id piix_pci_tbl[] = {
163#ifdef ATA_ENABLE_PATA
164 { 0x8086, 0x7111, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix4_pata },
165 { 0x8086, 0x24db, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_pata },
166 { 0x8086, 0x25a2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_pata },
167 { 0x8086, 0x27df, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_pata },
168#endif
169
170 /* NOTE: The following PCI ids must be kept in sync with the
171 * list in drivers/pci/quirks.c.
172 */
173
174 /* 82801EB (ICH5) */
175 { 0x8086, 0x24d1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
176 /* 82801EB (ICH5) */
177 { 0x8086, 0x24df, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
178 /* 6300ESB (ICH5 variant with broken PCS present bits) */
179 { 0x8086, 0x25a3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, esb_sata },
180 /* 6300ESB pretending RAID */
181 { 0x8086, 0x25b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, esb_sata },
182 /* 82801FB/FW (ICH6/ICH6W) */
183 { 0x8086, 0x2651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata },
184 /* 82801FR/FRW (ICH6R/ICH6RW) */
185 { 0x8086, 0x2652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
186 /* 82801FBM ICH6M (ICH6R with only port 0 and 2 implemented) */
187 { 0x8086, 0x2653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6m_sata_ahci },
188 /* 82801GB/GR/GH (ICH7, identical to ICH6) */
189 { 0x8086, 0x27c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
190 /* 2801GBM/GHM (ICH7M, identical to ICH6M) */
191 { 0x8086, 0x27c4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6m_sata_ahci },
192 /* Enterprise Southbridge 2 (where's the datasheet?) */
193 { 0x8086, 0x2680, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
194 /* SATA Controller 1 IDE (ICH8, no datasheet yet) */
195 { 0x8086, 0x2820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
196 /* SATA Controller 2 IDE (ICH8, ditto) */
197 { 0x8086, 0x2825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
198 /* Mobile SATA Controller IDE (ICH8M, ditto) */
199 { 0x8086, 0x2828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
200
201 { } /* terminate list */
202};
203
204static struct pci_driver piix_pci_driver = {
205 .name = DRV_NAME,
206 .id_table = piix_pci_tbl,
207 .probe = piix_init_one,
208 .remove = ata_pci_remove_one,
209 .suspend = ata_pci_device_suspend,
210 .resume = ata_pci_device_resume,
211};
212
213static struct scsi_host_template piix_sht = {
214 .module = THIS_MODULE,
215 .name = DRV_NAME,
216 .ioctl = ata_scsi_ioctl,
217 .queuecommand = ata_scsi_queuecmd,
218 .can_queue = ATA_DEF_QUEUE,
219 .this_id = ATA_SHT_THIS_ID,
220 .sg_tablesize = LIBATA_MAX_PRD,
221 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
222 .emulated = ATA_SHT_EMULATED,
223 .use_clustering = ATA_SHT_USE_CLUSTERING,
224 .proc_name = DRV_NAME,
225 .dma_boundary = ATA_DMA_BOUNDARY,
226 .slave_configure = ata_scsi_slave_config,
227 .slave_destroy = ata_scsi_slave_destroy,
228 .bios_param = ata_std_bios_param,
229 .resume = ata_scsi_device_resume,
230 .suspend = ata_scsi_device_suspend,
231};
232
233static const struct ata_port_operations piix_pata_ops = {
234 .port_disable = ata_port_disable,
235 .set_piomode = piix_set_piomode,
236 .set_dmamode = piix_set_dmamode,
237 .mode_filter = ata_pci_default_filter,
238
239 .tf_load = ata_tf_load,
240 .tf_read = ata_tf_read,
241 .check_status = ata_check_status,
242 .exec_command = ata_exec_command,
243 .dev_select = ata_std_dev_select,
244
245 .bmdma_setup = ata_bmdma_setup,
246 .bmdma_start = ata_bmdma_start,
247 .bmdma_stop = ata_bmdma_stop,
248 .bmdma_status = ata_bmdma_status,
249 .qc_prep = ata_qc_prep,
250 .qc_issue = ata_qc_issue_prot,
251 .data_xfer = ata_pio_data_xfer,
252
253 .freeze = ata_bmdma_freeze,
254 .thaw = ata_bmdma_thaw,
255 .error_handler = piix_pata_error_handler,
256 .post_internal_cmd = ata_bmdma_post_internal_cmd,
257
258 .irq_handler = ata_interrupt,
259 .irq_clear = ata_bmdma_irq_clear,
260
261 .port_start = ata_port_start,
262 .port_stop = ata_port_stop,
263 .host_stop = piix_host_stop,
264};
265
266static const struct ata_port_operations piix_sata_ops = {
267 .port_disable = ata_port_disable,
268
269 .tf_load = ata_tf_load,
270 .tf_read = ata_tf_read,
271 .check_status = ata_check_status,
272 .exec_command = ata_exec_command,
273 .dev_select = ata_std_dev_select,
274
275 .bmdma_setup = ata_bmdma_setup,
276 .bmdma_start = ata_bmdma_start,
277 .bmdma_stop = ata_bmdma_stop,
278 .bmdma_status = ata_bmdma_status,
279 .qc_prep = ata_qc_prep,
280 .qc_issue = ata_qc_issue_prot,
281 .data_xfer = ata_pio_data_xfer,
282
283 .freeze = ata_bmdma_freeze,
284 .thaw = ata_bmdma_thaw,
285 .error_handler = piix_sata_error_handler,
286 .post_internal_cmd = ata_bmdma_post_internal_cmd,
287
288 .irq_handler = ata_interrupt,
289 .irq_clear = ata_bmdma_irq_clear,
290
291 .port_start = ata_port_start,
292 .port_stop = ata_port_stop,
293 .host_stop = piix_host_stop,
294};
295
296static const struct piix_map_db ich5_map_db = {
297 .mask = 0x7,
298 .port_enable = 0x3,
299 .present_shift = 4,
300 .map = {
301 /* PM PS SM SS MAP */
302 { P0, NA, P1, NA }, /* 000b */
303 { P1, NA, P0, NA }, /* 001b */
304 { RV, RV, RV, RV },
305 { RV, RV, RV, RV },
306 { P0, P1, IDE, IDE }, /* 100b */
307 { P1, P0, IDE, IDE }, /* 101b */
308 { IDE, IDE, P0, P1 }, /* 110b */
309 { IDE, IDE, P1, P0 }, /* 111b */
310 },
311};
312
313static const struct piix_map_db ich6_map_db = {
314 .mask = 0x3,
315 .port_enable = 0xf,
316 .present_shift = 4,
317 .map = {
318 /* PM PS SM SS MAP */
319 { P0, P2, P1, P3 }, /* 00b */
320 { IDE, IDE, P1, P3 }, /* 01b */
321 { P0, P2, IDE, IDE }, /* 10b */
322 { RV, RV, RV, RV },
323 },
324};
325
326static const struct piix_map_db ich6m_map_db = {
327 .mask = 0x3,
328 .port_enable = 0x5,
329 .present_shift = 4,
330 .map = {
331 /* PM PS SM SS MAP */
332 { P0, P2, RV, RV }, /* 00b */
333 { RV, RV, RV, RV },
334 { P0, P2, IDE, IDE }, /* 10b */
335 { RV, RV, RV, RV },
336 },
337};
338
339static const struct piix_map_db ich8_map_db = {
340 .mask = 0x3,
341 .port_enable = 0x3,
342 .present_shift = 8,
343 .map = {
344 /* PM PS SM SS MAP */
345 { P0, NA, P1, NA }, /* 00b (hardwired) */
346 { RV, RV, RV, RV },
347 { RV, RV, RV, RV }, /* 10b (never) */
348 { RV, RV, RV, RV },
349 },
350};
351
352static const struct piix_map_db *piix_map_db_table[] = {
353 [ich5_sata] = &ich5_map_db,
354 [esb_sata] = &ich5_map_db,
355 [ich6_sata] = &ich6_map_db,
356 [ich6_sata_ahci] = &ich6_map_db,
357 [ich6m_sata_ahci] = &ich6m_map_db,
358 [ich8_sata_ahci] = &ich8_map_db,
359};
360
361static struct ata_port_info piix_port_info[] = {
362 /* piix4_pata */
363 {
364 .sht = &piix_sht,
365 .host_flags = ATA_FLAG_SLAVE_POSS,
366 .pio_mask = 0x1f, /* pio0-4 */
367#if 0
368 .mwdma_mask = 0x06, /* mwdma1-2 */
369#else
370 .mwdma_mask = 0x00, /* mwdma broken */
371#endif
372 .udma_mask = ATA_UDMA_MASK_40C,
373 .port_ops = &piix_pata_ops,
374 },
375
376 /* ich5_pata */
377 {
378 .sht = &piix_sht,
379 .host_flags = ATA_FLAG_SLAVE_POSS | PIIX_FLAG_CHECKINTR,
380 .pio_mask = 0x1f, /* pio0-4 */
381#if 0
382 .mwdma_mask = 0x06, /* mwdma1-2 */
383#else
384 .mwdma_mask = 0x00, /* mwdma broken */
385#endif
386 .udma_mask = 0x3f, /* udma0-5 */
387 .port_ops = &piix_pata_ops,
388 },
389
390 /* ich5_sata */
391 {
392 .sht = &piix_sht,
393 .host_flags = ATA_FLAG_SATA | PIIX_FLAG_CHECKINTR,
394 .pio_mask = 0x1f, /* pio0-4 */
395 .mwdma_mask = 0x07, /* mwdma0-2 */
396 .udma_mask = 0x7f, /* udma0-6 */
397 .port_ops = &piix_sata_ops,
398 },
399
400 /* i6300esb_sata */
401 {
402 .sht = &piix_sht,
403 .host_flags = ATA_FLAG_SATA |
404 PIIX_FLAG_CHECKINTR | PIIX_FLAG_IGNORE_PCS,
405 .pio_mask = 0x1f, /* pio0-4 */
406 .mwdma_mask = 0x07, /* mwdma0-2 */
407 .udma_mask = 0x7f, /* udma0-6 */
408 .port_ops = &piix_sata_ops,
409 },
410
411 /* ich6_sata */
412 {
413 .sht = &piix_sht,
414 .host_flags = ATA_FLAG_SATA |
415 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR,
416 .pio_mask = 0x1f, /* pio0-4 */
417 .mwdma_mask = 0x07, /* mwdma0-2 */
418 .udma_mask = 0x7f, /* udma0-6 */
419 .port_ops = &piix_sata_ops,
420 },
421
422 /* ich6_sata_ahci */
423 {
424 .sht = &piix_sht,
425 .host_flags = ATA_FLAG_SATA |
426 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR |
427 PIIX_FLAG_AHCI,
428 .pio_mask = 0x1f, /* pio0-4 */
429 .mwdma_mask = 0x07, /* mwdma0-2 */
430 .udma_mask = 0x7f, /* udma0-6 */
431 .port_ops = &piix_sata_ops,
432 },
433
434 /* ich6m_sata_ahci */
435 {
436 .sht = &piix_sht,
437 .host_flags = ATA_FLAG_SATA |
438 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR |
439 PIIX_FLAG_AHCI,
440 .pio_mask = 0x1f, /* pio0-4 */
441 .mwdma_mask = 0x07, /* mwdma0-2 */
442 .udma_mask = 0x7f, /* udma0-6 */
443 .port_ops = &piix_sata_ops,
444 },
445
446 /* ich8_sata_ahci */
447 {
448 .sht = &piix_sht,
449 .host_flags = ATA_FLAG_SATA |
450 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR |
451 PIIX_FLAG_AHCI,
452 .pio_mask = 0x1f, /* pio0-4 */
453 .mwdma_mask = 0x07, /* mwdma0-2 */
454 .udma_mask = 0x7f, /* udma0-6 */
455 .port_ops = &piix_sata_ops,
456 },
457};
458
459static struct pci_bits piix_enable_bits[] = {
460 { 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */
461 { 0x43U, 1U, 0x80UL, 0x80UL }, /* port 1 */
462};
463
464MODULE_AUTHOR("Andre Hedrick, Alan Cox, Andrzej Krzysztofowicz, Jeff Garzik");
465MODULE_DESCRIPTION("SCSI low-level driver for Intel PIIX/ICH ATA controllers");
466MODULE_LICENSE("GPL");
467MODULE_DEVICE_TABLE(pci, piix_pci_tbl);
468MODULE_VERSION(DRV_VERSION);
469
470/**
471 * piix_pata_cbl_detect - Probe host controller cable detect info
472 * @ap: Port for which cable detect info is desired
473 *
474 * Read 80c cable indicator from ATA PCI device's PCI config
475 * register. This register is normally set by firmware (BIOS).
476 *
477 * LOCKING:
478 * None (inherited from caller).
479 */
480static void piix_pata_cbl_detect(struct ata_port *ap)
481{
482 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
483 u8 tmp, mask;
484
485 /* no 80c support in host controller? */
486 if ((ap->udma_mask & ~ATA_UDMA_MASK_40C) == 0)
487 goto cbl40;
488
489 /* check BIOS cable detect results */
490 mask = ap->port_no == 0 ? PIIX_80C_PRI : PIIX_80C_SEC;
491 pci_read_config_byte(pdev, PIIX_IOCFG, &tmp);
492 if ((tmp & mask) == 0)
493 goto cbl40;
494
495 ap->cbl = ATA_CBL_PATA80;
496 return;
497
498cbl40:
499 ap->cbl = ATA_CBL_PATA40;
500 ap->udma_mask &= ATA_UDMA_MASK_40C;
501}
502
503/**
504 * piix_pata_prereset - prereset for PATA host controller
505 * @ap: Target port
506 *
507 * Prereset including cable detection.
508 *
509 * LOCKING:
510 * None (inherited from caller).
511 */
512static int piix_pata_prereset(struct ata_port *ap)
513{
514 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
515
516 if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->port_no])) {
517 ata_port_printk(ap, KERN_INFO, "port disabled. ignoring.\n");
518 ap->eh_context.i.action &= ~ATA_EH_RESET_MASK;
519 return 0;
520 }
521
522 piix_pata_cbl_detect(ap);
523
524 return ata_std_prereset(ap);
525}
526
527static void piix_pata_error_handler(struct ata_port *ap)
528{
529 ata_bmdma_drive_eh(ap, piix_pata_prereset, ata_std_softreset, NULL,
530 ata_std_postreset);
531}
532
533/**
534 * piix_sata_prereset - prereset for SATA host controller
535 * @ap: Target port
536 *
537 * Reads and configures SATA PCI device's PCI config register
538 * Port Configuration and Status (PCS) to determine port and
539 * device availability. Return -ENODEV to skip reset if no
540 * device is present.
541 *
542 * LOCKING:
543 * None (inherited from caller).
544 *
545 * RETURNS:
546 * 0 if device is present, -ENODEV otherwise.
547 */
548static int piix_sata_prereset(struct ata_port *ap)
549{
550 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
551 struct piix_host_priv *hpriv = ap->host_set->private_data;
552 const unsigned int *map = hpriv->map;
553 int base = 2 * ap->port_no;
554 unsigned int present = 0;
555 int port, i;
556 u16 pcs;
557
558 pci_read_config_word(pdev, ICH5_PCS, &pcs);
559 DPRINTK("ata%u: ENTER, pcs=0x%x base=%d\n", ap->id, pcs, base);
560
561 for (i = 0; i < 2; i++) {
562 port = map[base + i];
563 if (port < 0)
564 continue;
565 if ((ap->flags & PIIX_FLAG_IGNORE_PCS) ||
566 (pcs & 1 << (hpriv->map_db->present_shift + port)))
567 present = 1;
568 }
569
570 DPRINTK("ata%u: LEAVE, pcs=0x%x present=0x%x\n",
571 ap->id, pcs, present);
572
573 if (!present) {
574 ata_port_printk(ap, KERN_INFO, "SATA port has no device.\n");
575 ap->eh_context.i.action &= ~ATA_EH_RESET_MASK;
576 return 0;
577 }
578
579 return ata_std_prereset(ap);
580}
581
582static void piix_sata_error_handler(struct ata_port *ap)
583{
584 ata_bmdma_drive_eh(ap, piix_sata_prereset, ata_std_softreset, NULL,
585 ata_std_postreset);
586}
587
588/**
589 * piix_set_piomode - Initialize host controller PATA PIO timings
590 * @ap: Port whose timings we are configuring
591 * @adev: um
592 *
593 * Set PIO mode for device, in host controller PCI config space.
594 *
595 * LOCKING:
596 * None (inherited from caller).
597 */
598
599static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev)
600{
601 unsigned int pio = adev->pio_mode - XFER_PIO_0;
602 struct pci_dev *dev = to_pci_dev(ap->host_set->dev);
603 unsigned int is_slave = (adev->devno != 0);
604 unsigned int master_port= ap->port_no ? 0x42 : 0x40;
605 unsigned int slave_port = 0x44;
606 u16 master_data;
607 u8 slave_data;
608
609 static const /* ISP RTC */
610 u8 timings[][2] = { { 0, 0 },
611 { 0, 0 },
612 { 1, 0 },
613 { 2, 1 },
614 { 2, 3 }, };
615
616 pci_read_config_word(dev, master_port, &master_data);
617 if (is_slave) {
618 master_data |= 0x4000;
619 /* enable PPE, IE and TIME */
620 master_data |= 0x0070;
621 pci_read_config_byte(dev, slave_port, &slave_data);
622 slave_data &= (ap->port_no ? 0x0f : 0xf0);
623 slave_data |=
624 (timings[pio][0] << 2) |
625 (timings[pio][1] << (ap->port_no ? 4 : 0));
626 } else {
627 master_data &= 0xccf8;
628 /* enable PPE, IE and TIME */
629 master_data |= 0x0007;
630 master_data |=
631 (timings[pio][0] << 12) |
632 (timings[pio][1] << 8);
633 }
634 pci_write_config_word(dev, master_port, master_data);
635 if (is_slave)
636 pci_write_config_byte(dev, slave_port, slave_data);
637}
638
639/**
640 * piix_set_dmamode - Initialize host controller PATA PIO timings
641 * @ap: Port whose timings we are configuring
642 * @adev: um
643 * @udma: udma mode, 0 - 6
644 *
645 * Set UDMA mode for device, in host controller PCI config space.
646 *
647 * LOCKING:
648 * None (inherited from caller).
649 */
650
651static void piix_set_dmamode (struct ata_port *ap, struct ata_device *adev)
652{
653 unsigned int udma = adev->dma_mode; /* FIXME: MWDMA too */
654 struct pci_dev *dev = to_pci_dev(ap->host_set->dev);
655 u8 maslave = ap->port_no ? 0x42 : 0x40;
656 u8 speed = udma;
657 unsigned int drive_dn = (ap->port_no ? 2 : 0) + adev->devno;
658 int a_speed = 3 << (drive_dn * 4);
659 int u_flag = 1 << drive_dn;
660 int v_flag = 0x01 << drive_dn;
661 int w_flag = 0x10 << drive_dn;
662 int u_speed = 0;
663 int sitre;
664 u16 reg4042, reg4a;
665 u8 reg48, reg54, reg55;
666
667 pci_read_config_word(dev, maslave, &reg4042);
668 DPRINTK("reg4042 = 0x%04x\n", reg4042);
669 sitre = (reg4042 & 0x4000) ? 1 : 0;
670 pci_read_config_byte(dev, 0x48, &reg48);
671 pci_read_config_word(dev, 0x4a, &reg4a);
672 pci_read_config_byte(dev, 0x54, &reg54);
673 pci_read_config_byte(dev, 0x55, &reg55);
674
675 switch(speed) {
676 case XFER_UDMA_4:
677 case XFER_UDMA_2: u_speed = 2 << (drive_dn * 4); break;
678 case XFER_UDMA_6:
679 case XFER_UDMA_5:
680 case XFER_UDMA_3:
681 case XFER_UDMA_1: u_speed = 1 << (drive_dn * 4); break;
682 case XFER_UDMA_0: u_speed = 0 << (drive_dn * 4); break;
683 case XFER_MW_DMA_2:
684 case XFER_MW_DMA_1: break;
685 default:
686 BUG();
687 return;
688 }
689
690 if (speed >= XFER_UDMA_0) {
691 if (!(reg48 & u_flag))
692 pci_write_config_byte(dev, 0x48, reg48 | u_flag);
693 if (speed == XFER_UDMA_5) {
694 pci_write_config_byte(dev, 0x55, (u8) reg55|w_flag);
695 } else {
696 pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag);
697 }
698 if ((reg4a & a_speed) != u_speed)
699 pci_write_config_word(dev, 0x4a, (reg4a & ~a_speed) | u_speed);
700 if (speed > XFER_UDMA_2) {
701 if (!(reg54 & v_flag))
702 pci_write_config_byte(dev, 0x54, reg54 | v_flag);
703 } else
704 pci_write_config_byte(dev, 0x54, reg54 & ~v_flag);
705 } else {
706 if (reg48 & u_flag)
707 pci_write_config_byte(dev, 0x48, reg48 & ~u_flag);
708 if (reg4a & a_speed)
709 pci_write_config_word(dev, 0x4a, reg4a & ~a_speed);
710 if (reg54 & v_flag)
711 pci_write_config_byte(dev, 0x54, reg54 & ~v_flag);
712 if (reg55 & w_flag)
713 pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag);
714 }
715}
716
717#define AHCI_PCI_BAR 5
718#define AHCI_GLOBAL_CTL 0x04
719#define AHCI_ENABLE (1 << 31)
720static int piix_disable_ahci(struct pci_dev *pdev)
721{
722 void __iomem *mmio;
723 u32 tmp;
724 int rc = 0;
725
726 /* BUG: pci_enable_device has not yet been called. This
727 * works because this device is usually set up by BIOS.
728 */
729
730 if (!pci_resource_start(pdev, AHCI_PCI_BAR) ||
731 !pci_resource_len(pdev, AHCI_PCI_BAR))
732 return 0;
733
734 mmio = pci_iomap(pdev, AHCI_PCI_BAR, 64);
735 if (!mmio)
736 return -ENOMEM;
737
738 tmp = readl(mmio + AHCI_GLOBAL_CTL);
739 if (tmp & AHCI_ENABLE) {
740 tmp &= ~AHCI_ENABLE;
741 writel(tmp, mmio + AHCI_GLOBAL_CTL);
742
743 tmp = readl(mmio + AHCI_GLOBAL_CTL);
744 if (tmp & AHCI_ENABLE)
745 rc = -EIO;
746 }
747
748 pci_iounmap(pdev, mmio);
749 return rc;
750}
751
752/**
753 * piix_check_450nx_errata - Check for problem 450NX setup
754 * @ata_dev: the PCI device to check
755 *
756 * Check for the present of 450NX errata #19 and errata #25. If
757 * they are found return an error code so we can turn off DMA
758 */
759
760static int __devinit piix_check_450nx_errata(struct pci_dev *ata_dev)
761{
762 struct pci_dev *pdev = NULL;
763 u16 cfg;
764 u8 rev;
765 int no_piix_dma = 0;
766
767 while((pdev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, pdev)) != NULL)
768 {
769 /* Look for 450NX PXB. Check for problem configurations
770 A PCI quirk checks bit 6 already */
771 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
772 pci_read_config_word(pdev, 0x41, &cfg);
773 /* Only on the original revision: IDE DMA can hang */
774 if (rev == 0x00)
775 no_piix_dma = 1;
776 /* On all revisions below 5 PXB bus lock must be disabled for IDE */
777 else if (cfg & (1<<14) && rev < 5)
778 no_piix_dma = 2;
779 }
780 if (no_piix_dma)
781 dev_printk(KERN_WARNING, &ata_dev->dev, "450NX errata present, disabling IDE DMA.\n");
782 if (no_piix_dma == 2)
783 dev_printk(KERN_WARNING, &ata_dev->dev, "A BIOS update may resolve this.\n");
784 return no_piix_dma;
785}
786
787static void __devinit piix_init_pcs(struct pci_dev *pdev,
788 const struct piix_map_db *map_db)
789{
790 u16 pcs, new_pcs;
791
792 pci_read_config_word(pdev, ICH5_PCS, &pcs);
793
794 new_pcs = pcs | map_db->port_enable;
795
796 if (new_pcs != pcs) {
797 DPRINTK("updating PCS from 0x%x to 0x%x\n", pcs, new_pcs);
798 pci_write_config_word(pdev, ICH5_PCS, new_pcs);
799 msleep(150);
800 }
801}
802
803static void __devinit piix_init_sata_map(struct pci_dev *pdev,
804 struct ata_port_info *pinfo,
805 const struct piix_map_db *map_db)
806{
807 struct piix_host_priv *hpriv = pinfo[0].private_data;
808 const unsigned int *map;
809 int i, invalid_map = 0;
810 u8 map_value;
811
812 pci_read_config_byte(pdev, ICH5_PMR, &map_value);
813
814 map = map_db->map[map_value & map_db->mask];
815
816 dev_printk(KERN_INFO, &pdev->dev, "MAP [");
817 for (i = 0; i < 4; i++) {
818 switch (map[i]) {
819 case RV:
820 invalid_map = 1;
821 printk(" XX");
822 break;
823
824 case NA:
825 printk(" --");
826 break;
827
828 case IDE:
829 WARN_ON((i & 1) || map[i + 1] != IDE);
830 pinfo[i / 2] = piix_port_info[ich5_pata];
831 pinfo[i / 2].private_data = hpriv;
832 i++;
833 printk(" IDE IDE");
834 break;
835
836 default:
837 printk(" P%d", map[i]);
838 if (i & 1)
839 pinfo[i / 2].host_flags |= ATA_FLAG_SLAVE_POSS;
840 break;
841 }
842 }
843 printk(" ]\n");
844
845 if (invalid_map)
846 dev_printk(KERN_ERR, &pdev->dev,
847 "invalid MAP value %u\n", map_value);
848
849 hpriv->map = map;
850 hpriv->map_db = map_db;
851}
852
853/**
854 * piix_init_one - Register PIIX ATA PCI device with kernel services
855 * @pdev: PCI device to register
856 * @ent: Entry in piix_pci_tbl matching with @pdev
857 *
858 * Called from kernel PCI layer. We probe for combined mode (sigh),
859 * and then hand over control to libata, for it to do the rest.
860 *
861 * LOCKING:
862 * Inherited from PCI layer (may sleep).
863 *
864 * RETURNS:
865 * Zero on success, or -ERRNO value.
866 */
867
868static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
869{
870 static int printed_version;
871 struct ata_port_info port_info[2];
872 struct ata_port_info *ppinfo[2] = { &port_info[0], &port_info[1] };
873 struct piix_host_priv *hpriv;
874 unsigned long host_flags;
875
876 if (!printed_version++)
877 dev_printk(KERN_DEBUG, &pdev->dev,
878 "version " DRV_VERSION "\n");
879
880 /* no hotplugging support (FIXME) */
881 if (!in_module_init)
882 return -ENODEV;
883
884 hpriv = kzalloc(sizeof(*hpriv), GFP_KERNEL);
885 if (!hpriv)
886 return -ENOMEM;
887
888 port_info[0] = piix_port_info[ent->driver_data];
889 port_info[1] = piix_port_info[ent->driver_data];
890 port_info[0].private_data = hpriv;
891 port_info[1].private_data = hpriv;
892
893 host_flags = port_info[0].host_flags;
894
895 if (host_flags & PIIX_FLAG_AHCI) {
896 u8 tmp;
897 pci_read_config_byte(pdev, PIIX_SCC, &tmp);
898 if (tmp == PIIX_AHCI_DEVICE) {
899 int rc = piix_disable_ahci(pdev);
900 if (rc)
901 return rc;
902 }
903 }
904
905 /* Initialize SATA map */
906 if (host_flags & ATA_FLAG_SATA) {
907 piix_init_sata_map(pdev, port_info,
908 piix_map_db_table[ent->driver_data]);
909 piix_init_pcs(pdev, piix_map_db_table[ent->driver_data]);
910 }
911
912 /* On ICH5, some BIOSen disable the interrupt using the
913 * PCI_COMMAND_INTX_DISABLE bit added in PCI 2.3.
914 * On ICH6, this bit has the same effect, but only when
915 * MSI is disabled (and it is disabled, as we don't use
916 * message-signalled interrupts currently).
917 */
918 if (host_flags & PIIX_FLAG_CHECKINTR)
919 pci_intx(pdev, 1);
920
921 if (piix_check_450nx_errata(pdev)) {
922 /* This writes into the master table but it does not
923 really matter for this errata as we will apply it to
924 all the PIIX devices on the board */
925 port_info[0].mwdma_mask = 0;
926 port_info[0].udma_mask = 0;
927 port_info[1].mwdma_mask = 0;
928 port_info[1].udma_mask = 0;
929 }
930 return ata_pci_init_one(pdev, ppinfo, 2);
931}
932
933static void piix_host_stop(struct ata_host_set *host_set)
934{
935 ata_host_stop(host_set);
936}
937
938static int __init piix_init(void)
939{
940 int rc;
941
942 DPRINTK("pci_register_driver\n");
943 rc = pci_register_driver(&piix_pci_driver);
944 if (rc)
945 return rc;
946
947 in_module_init = 0;
948
949 DPRINTK("done\n");
950 return 0;
951}
952
953static void __exit piix_exit(void)
954{
955 pci_unregister_driver(&piix_pci_driver);
956}
957
958module_init(piix_init);
959module_exit(piix_exit);
960
diff --git a/drivers/ata/libata-bmdma.c b/drivers/ata/libata-bmdma.c
new file mode 100644
index 000000000000..158f62dbf21b
--- /dev/null
+++ b/drivers/ata/libata-bmdma.c
@@ -0,0 +1,1109 @@
1/*
2 * libata-bmdma.c - helper library for PCI IDE BMDMA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2006 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2006 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 */
34
35#include <linux/kernel.h>
36#include <linux/pci.h>
37#include <linux/libata.h>
38
39#include "libata.h"
40
41/**
42 * ata_tf_load_pio - send taskfile registers to host controller
43 * @ap: Port to which output is sent
44 * @tf: ATA taskfile register set
45 *
46 * Outputs ATA taskfile to standard ATA host controller.
47 *
48 * LOCKING:
49 * Inherited from caller.
50 */
51
52static void ata_tf_load_pio(struct ata_port *ap, const struct ata_taskfile *tf)
53{
54 struct ata_ioports *ioaddr = &ap->ioaddr;
55 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
56
57 if (tf->ctl != ap->last_ctl) {
58 outb(tf->ctl, ioaddr->ctl_addr);
59 ap->last_ctl = tf->ctl;
60 ata_wait_idle(ap);
61 }
62
63 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
64 outb(tf->hob_feature, ioaddr->feature_addr);
65 outb(tf->hob_nsect, ioaddr->nsect_addr);
66 outb(tf->hob_lbal, ioaddr->lbal_addr);
67 outb(tf->hob_lbam, ioaddr->lbam_addr);
68 outb(tf->hob_lbah, ioaddr->lbah_addr);
69 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
70 tf->hob_feature,
71 tf->hob_nsect,
72 tf->hob_lbal,
73 tf->hob_lbam,
74 tf->hob_lbah);
75 }
76
77 if (is_addr) {
78 outb(tf->feature, ioaddr->feature_addr);
79 outb(tf->nsect, ioaddr->nsect_addr);
80 outb(tf->lbal, ioaddr->lbal_addr);
81 outb(tf->lbam, ioaddr->lbam_addr);
82 outb(tf->lbah, ioaddr->lbah_addr);
83 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
84 tf->feature,
85 tf->nsect,
86 tf->lbal,
87 tf->lbam,
88 tf->lbah);
89 }
90
91 if (tf->flags & ATA_TFLAG_DEVICE) {
92 outb(tf->device, ioaddr->device_addr);
93 VPRINTK("device 0x%X\n", tf->device);
94 }
95
96 ata_wait_idle(ap);
97}
98
99/**
100 * ata_tf_load_mmio - send taskfile registers to host controller
101 * @ap: Port to which output is sent
102 * @tf: ATA taskfile register set
103 *
104 * Outputs ATA taskfile to standard ATA host controller using MMIO.
105 *
106 * LOCKING:
107 * Inherited from caller.
108 */
109
110static void ata_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
111{
112 struct ata_ioports *ioaddr = &ap->ioaddr;
113 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
114
115 if (tf->ctl != ap->last_ctl) {
116 writeb(tf->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
117 ap->last_ctl = tf->ctl;
118 ata_wait_idle(ap);
119 }
120
121 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
122 writeb(tf->hob_feature, (void __iomem *) ioaddr->feature_addr);
123 writeb(tf->hob_nsect, (void __iomem *) ioaddr->nsect_addr);
124 writeb(tf->hob_lbal, (void __iomem *) ioaddr->lbal_addr);
125 writeb(tf->hob_lbam, (void __iomem *) ioaddr->lbam_addr);
126 writeb(tf->hob_lbah, (void __iomem *) ioaddr->lbah_addr);
127 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
128 tf->hob_feature,
129 tf->hob_nsect,
130 tf->hob_lbal,
131 tf->hob_lbam,
132 tf->hob_lbah);
133 }
134
135 if (is_addr) {
136 writeb(tf->feature, (void __iomem *) ioaddr->feature_addr);
137 writeb(tf->nsect, (void __iomem *) ioaddr->nsect_addr);
138 writeb(tf->lbal, (void __iomem *) ioaddr->lbal_addr);
139 writeb(tf->lbam, (void __iomem *) ioaddr->lbam_addr);
140 writeb(tf->lbah, (void __iomem *) ioaddr->lbah_addr);
141 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
142 tf->feature,
143 tf->nsect,
144 tf->lbal,
145 tf->lbam,
146 tf->lbah);
147 }
148
149 if (tf->flags & ATA_TFLAG_DEVICE) {
150 writeb(tf->device, (void __iomem *) ioaddr->device_addr);
151 VPRINTK("device 0x%X\n", tf->device);
152 }
153
154 ata_wait_idle(ap);
155}
156
157
158/**
159 * ata_tf_load - send taskfile registers to host controller
160 * @ap: Port to which output is sent
161 * @tf: ATA taskfile register set
162 *
163 * Outputs ATA taskfile to standard ATA host controller using MMIO
164 * or PIO as indicated by the ATA_FLAG_MMIO flag.
165 * Writes the control, feature, nsect, lbal, lbam, and lbah registers.
166 * Optionally (ATA_TFLAG_LBA48) writes hob_feature, hob_nsect,
167 * hob_lbal, hob_lbam, and hob_lbah.
168 *
169 * This function waits for idle (!BUSY and !DRQ) after writing
170 * registers. If the control register has a new value, this
171 * function also waits for idle after writing control and before
172 * writing the remaining registers.
173 *
174 * May be used as the tf_load() entry in ata_port_operations.
175 *
176 * LOCKING:
177 * Inherited from caller.
178 */
179void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
180{
181 if (ap->flags & ATA_FLAG_MMIO)
182 ata_tf_load_mmio(ap, tf);
183 else
184 ata_tf_load_pio(ap, tf);
185}
186
187/**
188 * ata_exec_command_pio - issue ATA command to host controller
189 * @ap: port to which command is being issued
190 * @tf: ATA taskfile register set
191 *
192 * Issues PIO write to ATA command register, with proper
193 * synchronization with interrupt handler / other threads.
194 *
195 * LOCKING:
196 * spin_lock_irqsave(host_set lock)
197 */
198
199static void ata_exec_command_pio(struct ata_port *ap, const struct ata_taskfile *tf)
200{
201 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
202
203 outb(tf->command, ap->ioaddr.command_addr);
204 ata_pause(ap);
205}
206
207
208/**
209 * ata_exec_command_mmio - issue ATA command to host controller
210 * @ap: port to which command is being issued
211 * @tf: ATA taskfile register set
212 *
213 * Issues MMIO write to ATA command register, with proper
214 * synchronization with interrupt handler / other threads.
215 *
216 * FIXME: missing write posting for 400nS delay enforcement
217 *
218 * LOCKING:
219 * spin_lock_irqsave(host_set lock)
220 */
221
222static void ata_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
223{
224 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
225
226 writeb(tf->command, (void __iomem *) ap->ioaddr.command_addr);
227 ata_pause(ap);
228}
229
230
231/**
232 * ata_exec_command - issue ATA command to host controller
233 * @ap: port to which command is being issued
234 * @tf: ATA taskfile register set
235 *
236 * Issues PIO/MMIO write to ATA command register, with proper
237 * synchronization with interrupt handler / other threads.
238 *
239 * LOCKING:
240 * spin_lock_irqsave(host_set lock)
241 */
242void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
243{
244 if (ap->flags & ATA_FLAG_MMIO)
245 ata_exec_command_mmio(ap, tf);
246 else
247 ata_exec_command_pio(ap, tf);
248}
249
250/**
251 * ata_tf_read_pio - input device's ATA taskfile shadow registers
252 * @ap: Port from which input is read
253 * @tf: ATA taskfile register set for storing input
254 *
255 * Reads ATA taskfile registers for currently-selected device
256 * into @tf.
257 *
258 * LOCKING:
259 * Inherited from caller.
260 */
261
262static void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf)
263{
264 struct ata_ioports *ioaddr = &ap->ioaddr;
265
266 tf->command = ata_check_status(ap);
267 tf->feature = inb(ioaddr->error_addr);
268 tf->nsect = inb(ioaddr->nsect_addr);
269 tf->lbal = inb(ioaddr->lbal_addr);
270 tf->lbam = inb(ioaddr->lbam_addr);
271 tf->lbah = inb(ioaddr->lbah_addr);
272 tf->device = inb(ioaddr->device_addr);
273
274 if (tf->flags & ATA_TFLAG_LBA48) {
275 outb(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
276 tf->hob_feature = inb(ioaddr->error_addr);
277 tf->hob_nsect = inb(ioaddr->nsect_addr);
278 tf->hob_lbal = inb(ioaddr->lbal_addr);
279 tf->hob_lbam = inb(ioaddr->lbam_addr);
280 tf->hob_lbah = inb(ioaddr->lbah_addr);
281 }
282}
283
284/**
285 * ata_tf_read_mmio - input device's ATA taskfile shadow registers
286 * @ap: Port from which input is read
287 * @tf: ATA taskfile register set for storing input
288 *
289 * Reads ATA taskfile registers for currently-selected device
290 * into @tf via MMIO.
291 *
292 * LOCKING:
293 * Inherited from caller.
294 */
295
296static void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf)
297{
298 struct ata_ioports *ioaddr = &ap->ioaddr;
299
300 tf->command = ata_check_status(ap);
301 tf->feature = readb((void __iomem *)ioaddr->error_addr);
302 tf->nsect = readb((void __iomem *)ioaddr->nsect_addr);
303 tf->lbal = readb((void __iomem *)ioaddr->lbal_addr);
304 tf->lbam = readb((void __iomem *)ioaddr->lbam_addr);
305 tf->lbah = readb((void __iomem *)ioaddr->lbah_addr);
306 tf->device = readb((void __iomem *)ioaddr->device_addr);
307
308 if (tf->flags & ATA_TFLAG_LBA48) {
309 writeb(tf->ctl | ATA_HOB, (void __iomem *) ap->ioaddr.ctl_addr);
310 tf->hob_feature = readb((void __iomem *)ioaddr->error_addr);
311 tf->hob_nsect = readb((void __iomem *)ioaddr->nsect_addr);
312 tf->hob_lbal = readb((void __iomem *)ioaddr->lbal_addr);
313 tf->hob_lbam = readb((void __iomem *)ioaddr->lbam_addr);
314 tf->hob_lbah = readb((void __iomem *)ioaddr->lbah_addr);
315 }
316}
317
318
319/**
320 * ata_tf_read - input device's ATA taskfile shadow registers
321 * @ap: Port from which input is read
322 * @tf: ATA taskfile register set for storing input
323 *
324 * Reads ATA taskfile registers for currently-selected device
325 * into @tf.
326 *
327 * Reads nsect, lbal, lbam, lbah, and device. If ATA_TFLAG_LBA48
328 * is set, also reads the hob registers.
329 *
330 * May be used as the tf_read() entry in ata_port_operations.
331 *
332 * LOCKING:
333 * Inherited from caller.
334 */
335void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
336{
337 if (ap->flags & ATA_FLAG_MMIO)
338 ata_tf_read_mmio(ap, tf);
339 else
340 ata_tf_read_pio(ap, tf);
341}
342
343/**
344 * ata_check_status_pio - Read device status reg & clear interrupt
345 * @ap: port where the device is
346 *
347 * Reads ATA taskfile status register for currently-selected device
348 * and return its value. This also clears pending interrupts
349 * from this device
350 *
351 * LOCKING:
352 * Inherited from caller.
353 */
354static u8 ata_check_status_pio(struct ata_port *ap)
355{
356 return inb(ap->ioaddr.status_addr);
357}
358
359/**
360 * ata_check_status_mmio - Read device status reg & clear interrupt
361 * @ap: port where the device is
362 *
363 * Reads ATA taskfile status register for currently-selected device
364 * via MMIO and return its value. This also clears pending interrupts
365 * from this device
366 *
367 * LOCKING:
368 * Inherited from caller.
369 */
370static u8 ata_check_status_mmio(struct ata_port *ap)
371{
372 return readb((void __iomem *) ap->ioaddr.status_addr);
373}
374
375
376/**
377 * ata_check_status - Read device status reg & clear interrupt
378 * @ap: port where the device is
379 *
380 * Reads ATA taskfile status register for currently-selected device
381 * and return its value. This also clears pending interrupts
382 * from this device
383 *
384 * May be used as the check_status() entry in ata_port_operations.
385 *
386 * LOCKING:
387 * Inherited from caller.
388 */
389u8 ata_check_status(struct ata_port *ap)
390{
391 if (ap->flags & ATA_FLAG_MMIO)
392 return ata_check_status_mmio(ap);
393 return ata_check_status_pio(ap);
394}
395
396
397/**
398 * ata_altstatus - Read device alternate status reg
399 * @ap: port where the device is
400 *
401 * Reads ATA taskfile alternate status register for
402 * currently-selected device and return its value.
403 *
404 * Note: may NOT be used as the check_altstatus() entry in
405 * ata_port_operations.
406 *
407 * LOCKING:
408 * Inherited from caller.
409 */
410u8 ata_altstatus(struct ata_port *ap)
411{
412 if (ap->ops->check_altstatus)
413 return ap->ops->check_altstatus(ap);
414
415 if (ap->flags & ATA_FLAG_MMIO)
416 return readb((void __iomem *)ap->ioaddr.altstatus_addr);
417 return inb(ap->ioaddr.altstatus_addr);
418}
419
420/**
421 * ata_bmdma_setup_mmio - Set up PCI IDE BMDMA transaction
422 * @qc: Info associated with this ATA transaction.
423 *
424 * LOCKING:
425 * spin_lock_irqsave(host_set lock)
426 */
427
428static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc)
429{
430 struct ata_port *ap = qc->ap;
431 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
432 u8 dmactl;
433 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
434
435 /* load PRD table addr. */
436 mb(); /* make sure PRD table writes are visible to controller */
437 writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS);
438
439 /* specify data direction, triple-check start bit is clear */
440 dmactl = readb(mmio + ATA_DMA_CMD);
441 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
442 if (!rw)
443 dmactl |= ATA_DMA_WR;
444 writeb(dmactl, mmio + ATA_DMA_CMD);
445
446 /* issue r/w command */
447 ap->ops->exec_command(ap, &qc->tf);
448}
449
450/**
451 * ata_bmdma_start_mmio - Start a PCI IDE BMDMA transaction
452 * @qc: Info associated with this ATA transaction.
453 *
454 * LOCKING:
455 * spin_lock_irqsave(host_set lock)
456 */
457
458static void ata_bmdma_start_mmio (struct ata_queued_cmd *qc)
459{
460 struct ata_port *ap = qc->ap;
461 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
462 u8 dmactl;
463
464 /* start host DMA transaction */
465 dmactl = readb(mmio + ATA_DMA_CMD);
466 writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD);
467
468 /* Strictly, one may wish to issue a readb() here, to
469 * flush the mmio write. However, control also passes
470 * to the hardware at this point, and it will interrupt
471 * us when we are to resume control. So, in effect,
472 * we don't care when the mmio write flushes.
473 * Further, a read of the DMA status register _immediately_
474 * following the write may not be what certain flaky hardware
475 * is expected, so I think it is best to not add a readb()
476 * without first all the MMIO ATA cards/mobos.
477 * Or maybe I'm just being paranoid.
478 */
479}
480
481/**
482 * ata_bmdma_setup_pio - Set up PCI IDE BMDMA transaction (PIO)
483 * @qc: Info associated with this ATA transaction.
484 *
485 * LOCKING:
486 * spin_lock_irqsave(host_set lock)
487 */
488
489static void ata_bmdma_setup_pio (struct ata_queued_cmd *qc)
490{
491 struct ata_port *ap = qc->ap;
492 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
493 u8 dmactl;
494
495 /* load PRD table addr. */
496 outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
497
498 /* specify data direction, triple-check start bit is clear */
499 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
500 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
501 if (!rw)
502 dmactl |= ATA_DMA_WR;
503 outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
504
505 /* issue r/w command */
506 ap->ops->exec_command(ap, &qc->tf);
507}
508
509/**
510 * ata_bmdma_start_pio - Start a PCI IDE BMDMA transaction (PIO)
511 * @qc: Info associated with this ATA transaction.
512 *
513 * LOCKING:
514 * spin_lock_irqsave(host_set lock)
515 */
516
517static void ata_bmdma_start_pio (struct ata_queued_cmd *qc)
518{
519 struct ata_port *ap = qc->ap;
520 u8 dmactl;
521
522 /* start host DMA transaction */
523 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
524 outb(dmactl | ATA_DMA_START,
525 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
526}
527
528
529/**
530 * ata_bmdma_start - Start a PCI IDE BMDMA transaction
531 * @qc: Info associated with this ATA transaction.
532 *
533 * Writes the ATA_DMA_START flag to the DMA command register.
534 *
535 * May be used as the bmdma_start() entry in ata_port_operations.
536 *
537 * LOCKING:
538 * spin_lock_irqsave(host_set lock)
539 */
540void ata_bmdma_start(struct ata_queued_cmd *qc)
541{
542 if (qc->ap->flags & ATA_FLAG_MMIO)
543 ata_bmdma_start_mmio(qc);
544 else
545 ata_bmdma_start_pio(qc);
546}
547
548
549/**
550 * ata_bmdma_setup - Set up PCI IDE BMDMA transaction
551 * @qc: Info associated with this ATA transaction.
552 *
553 * Writes address of PRD table to device's PRD Table Address
554 * register, sets the DMA control register, and calls
555 * ops->exec_command() to start the transfer.
556 *
557 * May be used as the bmdma_setup() entry in ata_port_operations.
558 *
559 * LOCKING:
560 * spin_lock_irqsave(host_set lock)
561 */
562void ata_bmdma_setup(struct ata_queued_cmd *qc)
563{
564 if (qc->ap->flags & ATA_FLAG_MMIO)
565 ata_bmdma_setup_mmio(qc);
566 else
567 ata_bmdma_setup_pio(qc);
568}
569
570
571/**
572 * ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt.
573 * @ap: Port associated with this ATA transaction.
574 *
575 * Clear interrupt and error flags in DMA status register.
576 *
577 * May be used as the irq_clear() entry in ata_port_operations.
578 *
579 * LOCKING:
580 * spin_lock_irqsave(host_set lock)
581 */
582
583void ata_bmdma_irq_clear(struct ata_port *ap)
584{
585 if (!ap->ioaddr.bmdma_addr)
586 return;
587
588 if (ap->flags & ATA_FLAG_MMIO) {
589 void __iomem *mmio =
590 ((void __iomem *) ap->ioaddr.bmdma_addr) + ATA_DMA_STATUS;
591 writeb(readb(mmio), mmio);
592 } else {
593 unsigned long addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
594 outb(inb(addr), addr);
595 }
596}
597
598
599/**
600 * ata_bmdma_status - Read PCI IDE BMDMA status
601 * @ap: Port associated with this ATA transaction.
602 *
603 * Read and return BMDMA status register.
604 *
605 * May be used as the bmdma_status() entry in ata_port_operations.
606 *
607 * LOCKING:
608 * spin_lock_irqsave(host_set lock)
609 */
610
611u8 ata_bmdma_status(struct ata_port *ap)
612{
613 u8 host_stat;
614 if (ap->flags & ATA_FLAG_MMIO) {
615 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
616 host_stat = readb(mmio + ATA_DMA_STATUS);
617 } else
618 host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
619 return host_stat;
620}
621
622
623/**
624 * ata_bmdma_stop - Stop PCI IDE BMDMA transfer
625 * @qc: Command we are ending DMA for
626 *
627 * Clears the ATA_DMA_START flag in the dma control register
628 *
629 * May be used as the bmdma_stop() entry in ata_port_operations.
630 *
631 * LOCKING:
632 * spin_lock_irqsave(host_set lock)
633 */
634
635void ata_bmdma_stop(struct ata_queued_cmd *qc)
636{
637 struct ata_port *ap = qc->ap;
638 if (ap->flags & ATA_FLAG_MMIO) {
639 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
640
641 /* clear start/stop bit */
642 writeb(readb(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
643 mmio + ATA_DMA_CMD);
644 } else {
645 /* clear start/stop bit */
646 outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
647 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
648 }
649
650 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
651 ata_altstatus(ap); /* dummy read */
652}
653
654/**
655 * ata_bmdma_freeze - Freeze BMDMA controller port
656 * @ap: port to freeze
657 *
658 * Freeze BMDMA controller port.
659 *
660 * LOCKING:
661 * Inherited from caller.
662 */
663void ata_bmdma_freeze(struct ata_port *ap)
664{
665 struct ata_ioports *ioaddr = &ap->ioaddr;
666
667 ap->ctl |= ATA_NIEN;
668 ap->last_ctl = ap->ctl;
669
670 if (ap->flags & ATA_FLAG_MMIO)
671 writeb(ap->ctl, (void __iomem *)ioaddr->ctl_addr);
672 else
673 outb(ap->ctl, ioaddr->ctl_addr);
674}
675
676/**
677 * ata_bmdma_thaw - Thaw BMDMA controller port
678 * @ap: port to thaw
679 *
680 * Thaw BMDMA controller port.
681 *
682 * LOCKING:
683 * Inherited from caller.
684 */
685void ata_bmdma_thaw(struct ata_port *ap)
686{
687 /* clear & re-enable interrupts */
688 ata_chk_status(ap);
689 ap->ops->irq_clear(ap);
690 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
691 ata_irq_on(ap);
692}
693
694/**
695 * ata_bmdma_drive_eh - Perform EH with given methods for BMDMA controller
696 * @ap: port to handle error for
697 * @prereset: prereset method (can be NULL)
698 * @softreset: softreset method (can be NULL)
699 * @hardreset: hardreset method (can be NULL)
700 * @postreset: postreset method (can be NULL)
701 *
702 * Handle error for ATA BMDMA controller. It can handle both
703 * PATA and SATA controllers. Many controllers should be able to
704 * use this EH as-is or with some added handling before and
705 * after.
706 *
707 * This function is intended to be used for constructing
708 * ->error_handler callback by low level drivers.
709 *
710 * LOCKING:
711 * Kernel thread context (may sleep)
712 */
713void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
714 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
715 ata_postreset_fn_t postreset)
716{
717 struct ata_eh_context *ehc = &ap->eh_context;
718 struct ata_queued_cmd *qc;
719 unsigned long flags;
720 int thaw = 0;
721
722 qc = __ata_qc_from_tag(ap, ap->active_tag);
723 if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
724 qc = NULL;
725
726 /* reset PIO HSM and stop DMA engine */
727 spin_lock_irqsave(ap->lock, flags);
728
729 ap->hsm_task_state = HSM_ST_IDLE;
730
731 if (qc && (qc->tf.protocol == ATA_PROT_DMA ||
732 qc->tf.protocol == ATA_PROT_ATAPI_DMA)) {
733 u8 host_stat;
734
735 host_stat = ata_bmdma_status(ap);
736
737 ata_ehi_push_desc(&ehc->i, "BMDMA stat 0x%x", host_stat);
738
739 /* BMDMA controllers indicate host bus error by
740 * setting DMA_ERR bit and timing out. As it wasn't
741 * really a timeout event, adjust error mask and
742 * cancel frozen state.
743 */
744 if (qc->err_mask == AC_ERR_TIMEOUT && host_stat & ATA_DMA_ERR) {
745 qc->err_mask = AC_ERR_HOST_BUS;
746 thaw = 1;
747 }
748
749 ap->ops->bmdma_stop(qc);
750 }
751
752 ata_altstatus(ap);
753 ata_chk_status(ap);
754 ap->ops->irq_clear(ap);
755
756 spin_unlock_irqrestore(ap->lock, flags);
757
758 if (thaw)
759 ata_eh_thaw_port(ap);
760
761 /* PIO and DMA engines have been stopped, perform recovery */
762 ata_do_eh(ap, prereset, softreset, hardreset, postreset);
763}
764
765/**
766 * ata_bmdma_error_handler - Stock error handler for BMDMA controller
767 * @ap: port to handle error for
768 *
769 * Stock error handler for BMDMA controller.
770 *
771 * LOCKING:
772 * Kernel thread context (may sleep)
773 */
774void ata_bmdma_error_handler(struct ata_port *ap)
775{
776 ata_reset_fn_t hardreset;
777
778 hardreset = NULL;
779 if (sata_scr_valid(ap))
780 hardreset = sata_std_hardreset;
781
782 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset, hardreset,
783 ata_std_postreset);
784}
785
786/**
787 * ata_bmdma_post_internal_cmd - Stock post_internal_cmd for
788 * BMDMA controller
789 * @qc: internal command to clean up
790 *
791 * LOCKING:
792 * Kernel thread context (may sleep)
793 */
794void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc)
795{
796 ata_bmdma_stop(qc);
797}
798
799#ifdef CONFIG_PCI
800/**
801 * ata_pci_init_native_mode - Initialize native-mode driver
802 * @pdev: pci device to be initialized
803 * @port: array[2] of pointers to port info structures.
804 * @ports: bitmap of ports present
805 *
806 * Utility function which allocates and initializes an
807 * ata_probe_ent structure for a standard dual-port
808 * PIO-based IDE controller. The returned ata_probe_ent
809 * structure can be passed to ata_device_add(). The returned
810 * ata_probe_ent structure should then be freed with kfree().
811 *
812 * The caller need only pass the address of the primary port, the
813 * secondary will be deduced automatically. If the device has non
814 * standard secondary port mappings this function can be called twice,
815 * once for each interface.
816 */
817
818struct ata_probe_ent *
819ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int ports)
820{
821 struct ata_probe_ent *probe_ent =
822 ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
823 int p = 0;
824 unsigned long bmdma;
825
826 if (!probe_ent)
827 return NULL;
828
829 probe_ent->irq = pdev->irq;
830 probe_ent->irq_flags = IRQF_SHARED;
831 probe_ent->private_data = port[0]->private_data;
832
833 if (ports & ATA_PORT_PRIMARY) {
834 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 0);
835 probe_ent->port[p].altstatus_addr =
836 probe_ent->port[p].ctl_addr =
837 pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS;
838 bmdma = pci_resource_start(pdev, 4);
839 if (bmdma) {
840 if (inb(bmdma + 2) & 0x80)
841 probe_ent->host_set_flags |= ATA_HOST_SIMPLEX;
842 probe_ent->port[p].bmdma_addr = bmdma;
843 }
844 ata_std_ports(&probe_ent->port[p]);
845 p++;
846 }
847
848 if (ports & ATA_PORT_SECONDARY) {
849 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 2);
850 probe_ent->port[p].altstatus_addr =
851 probe_ent->port[p].ctl_addr =
852 pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS;
853 bmdma = pci_resource_start(pdev, 4);
854 if (bmdma) {
855 bmdma += 8;
856 if(inb(bmdma + 2) & 0x80)
857 probe_ent->host_set_flags |= ATA_HOST_SIMPLEX;
858 probe_ent->port[p].bmdma_addr = bmdma;
859 }
860 ata_std_ports(&probe_ent->port[p]);
861 p++;
862 }
863
864 probe_ent->n_ports = p;
865 return probe_ent;
866}
867
868
869static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev,
870 struct ata_port_info **port, int port_mask)
871{
872 struct ata_probe_ent *probe_ent;
873 unsigned long bmdma = pci_resource_start(pdev, 4);
874
875 probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
876 if (!probe_ent)
877 return NULL;
878
879 probe_ent->n_ports = 2;
880 probe_ent->private_data = port[0]->private_data;
881
882 if (port_mask & ATA_PORT_PRIMARY) {
883 probe_ent->irq = 14;
884 probe_ent->port[0].cmd_addr = ATA_PRIMARY_CMD;
885 probe_ent->port[0].altstatus_addr =
886 probe_ent->port[0].ctl_addr = ATA_PRIMARY_CTL;
887 if (bmdma) {
888 probe_ent->port[0].bmdma_addr = bmdma;
889 if (inb(bmdma + 2) & 0x80)
890 probe_ent->host_set_flags |= ATA_HOST_SIMPLEX;
891 }
892 ata_std_ports(&probe_ent->port[0]);
893 } else
894 probe_ent->dummy_port_mask |= ATA_PORT_PRIMARY;
895
896 if (port_mask & ATA_PORT_SECONDARY) {
897 if (probe_ent->irq)
898 probe_ent->irq2 = 15;
899 else
900 probe_ent->irq = 15;
901 probe_ent->port[1].cmd_addr = ATA_SECONDARY_CMD;
902 probe_ent->port[1].altstatus_addr =
903 probe_ent->port[1].ctl_addr = ATA_SECONDARY_CTL;
904 if (bmdma) {
905 probe_ent->port[1].bmdma_addr = bmdma + 8;
906 if (inb(bmdma + 10) & 0x80)
907 probe_ent->host_set_flags |= ATA_HOST_SIMPLEX;
908 }
909 ata_std_ports(&probe_ent->port[1]);
910 } else
911 probe_ent->dummy_port_mask |= ATA_PORT_SECONDARY;
912
913 return probe_ent;
914}
915
916
917/**
918 * ata_pci_init_one - Initialize/register PCI IDE host controller
919 * @pdev: Controller to be initialized
920 * @port_info: Information from low-level host driver
921 * @n_ports: Number of ports attached to host controller
922 *
923 * This is a helper function which can be called from a driver's
924 * xxx_init_one() probe function if the hardware uses traditional
925 * IDE taskfile registers.
926 *
927 * This function calls pci_enable_device(), reserves its register
928 * regions, sets the dma mask, enables bus master mode, and calls
929 * ata_device_add()
930 *
931 * ASSUMPTION:
932 * Nobody makes a single channel controller that appears solely as
933 * the secondary legacy port on PCI.
934 *
935 * LOCKING:
936 * Inherited from PCI layer (may sleep).
937 *
938 * RETURNS:
939 * Zero on success, negative on errno-based value on error.
940 */
941
942int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
943 unsigned int n_ports)
944{
945 struct ata_probe_ent *probe_ent = NULL;
946 struct ata_port_info *port[2];
947 u8 tmp8, mask;
948 unsigned int legacy_mode = 0;
949 int disable_dev_on_err = 1;
950 int rc;
951
952 DPRINTK("ENTER\n");
953
954 port[0] = port_info[0];
955 if (n_ports > 1)
956 port[1] = port_info[1];
957 else
958 port[1] = port[0];
959
960 if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0
961 && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
962 /* TODO: What if one channel is in native mode ... */
963 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
964 mask = (1 << 2) | (1 << 0);
965 if ((tmp8 & mask) != mask)
966 legacy_mode = (1 << 3);
967 }
968
969 /* FIXME... */
970 if ((!legacy_mode) && (n_ports > 2)) {
971 printk(KERN_ERR "ata: BUG: native mode, n_ports > 2\n");
972 n_ports = 2;
973 /* For now */
974 }
975
976 /* FIXME: Really for ATA it isn't safe because the device may be
977 multi-purpose and we want to leave it alone if it was already
978 enabled. Secondly for shared use as Arjan says we want refcounting
979
980 Checking dev->is_enabled is insufficient as this is not set at
981 boot for the primary video which is BIOS enabled
982 */
983
984 rc = pci_enable_device(pdev);
985 if (rc)
986 return rc;
987
988 rc = pci_request_regions(pdev, DRV_NAME);
989 if (rc) {
990 disable_dev_on_err = 0;
991 goto err_out;
992 }
993
994 if (legacy_mode) {
995 if (!request_region(ATA_PRIMARY_CMD, 8, "libata")) {
996 struct resource *conflict, res;
997 res.start = ATA_PRIMARY_CMD;
998 res.end = ATA_PRIMARY_CMD + 8 - 1;
999 conflict = ____request_resource(&ioport_resource, &res);
1000 if (!strcmp(conflict->name, "libata"))
1001 legacy_mode |= ATA_PORT_PRIMARY;
1002 else {
1003 disable_dev_on_err = 0;
1004 printk(KERN_WARNING "ata: 0x%0X IDE port busy\n", ATA_PRIMARY_CMD);
1005 }
1006 } else
1007 legacy_mode |= ATA_PORT_PRIMARY;
1008
1009 if (!request_region(ATA_SECONDARY_CMD, 8, "libata")) {
1010 struct resource *conflict, res;
1011 res.start = ATA_SECONDARY_CMD;
1012 res.end = ATA_SECONDARY_CMD + 8 - 1;
1013 conflict = ____request_resource(&ioport_resource, &res);
1014 if (!strcmp(conflict->name, "libata"))
1015 legacy_mode |= ATA_PORT_SECONDARY;
1016 else {
1017 disable_dev_on_err = 0;
1018 printk(KERN_WARNING "ata: 0x%X IDE port busy\n", ATA_SECONDARY_CMD);
1019 }
1020 } else
1021 legacy_mode |= ATA_PORT_SECONDARY;
1022 }
1023
1024 /* we have legacy mode, but all ports are unavailable */
1025 if (legacy_mode == (1 << 3)) {
1026 rc = -EBUSY;
1027 goto err_out_regions;
1028 }
1029
1030 /* FIXME: If we get no DMA mask we should fall back to PIO */
1031 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
1032 if (rc)
1033 goto err_out_regions;
1034 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
1035 if (rc)
1036 goto err_out_regions;
1037
1038 if (legacy_mode) {
1039 probe_ent = ata_pci_init_legacy_port(pdev, port, legacy_mode);
1040 } else {
1041 if (n_ports == 2)
1042 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
1043 else
1044 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY);
1045 }
1046 if (!probe_ent) {
1047 rc = -ENOMEM;
1048 goto err_out_regions;
1049 }
1050
1051 pci_set_master(pdev);
1052
1053 /* FIXME: check ata_device_add return */
1054 ata_device_add(probe_ent);
1055
1056 kfree(probe_ent);
1057
1058 return 0;
1059
1060err_out_regions:
1061 if (legacy_mode & ATA_PORT_PRIMARY)
1062 release_region(ATA_PRIMARY_CMD, 8);
1063 if (legacy_mode & ATA_PORT_SECONDARY)
1064 release_region(ATA_SECONDARY_CMD, 8);
1065 pci_release_regions(pdev);
1066err_out:
1067 if (disable_dev_on_err)
1068 pci_disable_device(pdev);
1069 return rc;
1070}
1071
1072/**
1073 * ata_pci_clear_simplex - attempt to kick device out of simplex
1074 * @pdev: PCI device
1075 *
1076 * Some PCI ATA devices report simplex mode but in fact can be told to
1077 * enter non simplex mode. This implements the neccessary logic to
1078 * perform the task on such devices. Calling it on other devices will
1079 * have -undefined- behaviour.
1080 */
1081
1082int ata_pci_clear_simplex(struct pci_dev *pdev)
1083{
1084 unsigned long bmdma = pci_resource_start(pdev, 4);
1085 u8 simplex;
1086
1087 if (bmdma == 0)
1088 return -ENOENT;
1089
1090 simplex = inb(bmdma + 0x02);
1091 outb(simplex & 0x60, bmdma + 0x02);
1092 simplex = inb(bmdma + 0x02);
1093 if (simplex & 0x80)
1094 return -EOPNOTSUPP;
1095 return 0;
1096}
1097
1098unsigned long ata_pci_default_filter(const struct ata_port *ap, struct ata_device *adev, unsigned long xfer_mask)
1099{
1100 /* Filter out DMA modes if the device has been configured by
1101 the BIOS as PIO only */
1102
1103 if (ap->ioaddr.bmdma_addr == 0)
1104 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
1105 return xfer_mask;
1106}
1107
1108#endif /* CONFIG_PCI */
1109
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
new file mode 100644
index 000000000000..7d786fba4d82
--- /dev/null
+++ b/drivers/ata/libata-core.c
@@ -0,0 +1,6097 @@
1/*
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 */
34
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/list.h>
40#include <linux/mm.h>
41#include <linux/highmem.h>
42#include <linux/spinlock.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/timer.h>
46#include <linux/interrupt.h>
47#include <linux/completion.h>
48#include <linux/suspend.h>
49#include <linux/workqueue.h>
50#include <linux/jiffies.h>
51#include <linux/scatterlist.h>
52#include <scsi/scsi.h>
53#include <scsi/scsi_cmnd.h>
54#include <scsi/scsi_host.h>
55#include <linux/libata.h>
56#include <asm/io.h>
57#include <asm/semaphore.h>
58#include <asm/byteorder.h>
59
60#include "libata.h"
61
62/* debounce timing parameters in msecs { interval, duration, timeout } */
63const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
64const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
65const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
66
67static unsigned int ata_dev_init_params(struct ata_device *dev,
68 u16 heads, u16 sectors);
69static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
70static void ata_dev_xfermask(struct ata_device *dev);
71
72static unsigned int ata_unique_id = 1;
73static struct workqueue_struct *ata_wq;
74
75struct workqueue_struct *ata_aux_wq;
76
77int atapi_enabled = 1;
78module_param(atapi_enabled, int, 0444);
79MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
80
81int atapi_dmadir = 0;
82module_param(atapi_dmadir, int, 0444);
83MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
84
85int libata_fua = 0;
86module_param_named(fua, libata_fua, int, 0444);
87MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
88
89static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
90module_param(ata_probe_timeout, int, 0444);
91MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
92
93MODULE_AUTHOR("Jeff Garzik");
94MODULE_DESCRIPTION("Library module for ATA devices");
95MODULE_LICENSE("GPL");
96MODULE_VERSION(DRV_VERSION);
97
98
99/**
100 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
101 * @tf: Taskfile to convert
102 * @fis: Buffer into which data will output
103 * @pmp: Port multiplier port
104 *
105 * Converts a standard ATA taskfile to a Serial ATA
106 * FIS structure (Register - Host to Device).
107 *
108 * LOCKING:
109 * Inherited from caller.
110 */
111
112void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
113{
114 fis[0] = 0x27; /* Register - Host to Device FIS */
115 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
116 bit 7 indicates Command FIS */
117 fis[2] = tf->command;
118 fis[3] = tf->feature;
119
120 fis[4] = tf->lbal;
121 fis[5] = tf->lbam;
122 fis[6] = tf->lbah;
123 fis[7] = tf->device;
124
125 fis[8] = tf->hob_lbal;
126 fis[9] = tf->hob_lbam;
127 fis[10] = tf->hob_lbah;
128 fis[11] = tf->hob_feature;
129
130 fis[12] = tf->nsect;
131 fis[13] = tf->hob_nsect;
132 fis[14] = 0;
133 fis[15] = tf->ctl;
134
135 fis[16] = 0;
136 fis[17] = 0;
137 fis[18] = 0;
138 fis[19] = 0;
139}
140
141/**
142 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
143 * @fis: Buffer from which data will be input
144 * @tf: Taskfile to output
145 *
146 * Converts a serial ATA FIS structure to a standard ATA taskfile.
147 *
148 * LOCKING:
149 * Inherited from caller.
150 */
151
152void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
153{
154 tf->command = fis[2]; /* status */
155 tf->feature = fis[3]; /* error */
156
157 tf->lbal = fis[4];
158 tf->lbam = fis[5];
159 tf->lbah = fis[6];
160 tf->device = fis[7];
161
162 tf->hob_lbal = fis[8];
163 tf->hob_lbam = fis[9];
164 tf->hob_lbah = fis[10];
165
166 tf->nsect = fis[12];
167 tf->hob_nsect = fis[13];
168}
169
170static const u8 ata_rw_cmds[] = {
171 /* pio multi */
172 ATA_CMD_READ_MULTI,
173 ATA_CMD_WRITE_MULTI,
174 ATA_CMD_READ_MULTI_EXT,
175 ATA_CMD_WRITE_MULTI_EXT,
176 0,
177 0,
178 0,
179 ATA_CMD_WRITE_MULTI_FUA_EXT,
180 /* pio */
181 ATA_CMD_PIO_READ,
182 ATA_CMD_PIO_WRITE,
183 ATA_CMD_PIO_READ_EXT,
184 ATA_CMD_PIO_WRITE_EXT,
185 0,
186 0,
187 0,
188 0,
189 /* dma */
190 ATA_CMD_READ,
191 ATA_CMD_WRITE,
192 ATA_CMD_READ_EXT,
193 ATA_CMD_WRITE_EXT,
194 0,
195 0,
196 0,
197 ATA_CMD_WRITE_FUA_EXT
198};
199
200/**
201 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
202 * @qc: command to examine and configure
203 *
204 * Examine the device configuration and tf->flags to calculate
205 * the proper read/write commands and protocol to use.
206 *
207 * LOCKING:
208 * caller.
209 */
210int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
211{
212 struct ata_taskfile *tf = &qc->tf;
213 struct ata_device *dev = qc->dev;
214 u8 cmd;
215
216 int index, fua, lba48, write;
217
218 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
219 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
220 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
221
222 if (dev->flags & ATA_DFLAG_PIO) {
223 tf->protocol = ATA_PROT_PIO;
224 index = dev->multi_count ? 0 : 8;
225 } else if (lba48 && (qc->ap->flags & ATA_FLAG_PIO_LBA48)) {
226 /* Unable to use DMA due to host limitation */
227 tf->protocol = ATA_PROT_PIO;
228 index = dev->multi_count ? 0 : 8;
229 } else {
230 tf->protocol = ATA_PROT_DMA;
231 index = 16;
232 }
233
234 cmd = ata_rw_cmds[index + fua + lba48 + write];
235 if (cmd) {
236 tf->command = cmd;
237 return 0;
238 }
239 return -1;
240}
241
242/**
243 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
244 * @pio_mask: pio_mask
245 * @mwdma_mask: mwdma_mask
246 * @udma_mask: udma_mask
247 *
248 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
249 * unsigned int xfer_mask.
250 *
251 * LOCKING:
252 * None.
253 *
254 * RETURNS:
255 * Packed xfer_mask.
256 */
257static unsigned int ata_pack_xfermask(unsigned int pio_mask,
258 unsigned int mwdma_mask,
259 unsigned int udma_mask)
260{
261 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
262 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
263 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
264}
265
266/**
267 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
268 * @xfer_mask: xfer_mask to unpack
269 * @pio_mask: resulting pio_mask
270 * @mwdma_mask: resulting mwdma_mask
271 * @udma_mask: resulting udma_mask
272 *
273 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
274 * Any NULL distination masks will be ignored.
275 */
276static void ata_unpack_xfermask(unsigned int xfer_mask,
277 unsigned int *pio_mask,
278 unsigned int *mwdma_mask,
279 unsigned int *udma_mask)
280{
281 if (pio_mask)
282 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
283 if (mwdma_mask)
284 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
285 if (udma_mask)
286 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
287}
288
289static const struct ata_xfer_ent {
290 int shift, bits;
291 u8 base;
292} ata_xfer_tbl[] = {
293 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
294 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
295 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
296 { -1, },
297};
298
299/**
300 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
301 * @xfer_mask: xfer_mask of interest
302 *
303 * Return matching XFER_* value for @xfer_mask. Only the highest
304 * bit of @xfer_mask is considered.
305 *
306 * LOCKING:
307 * None.
308 *
309 * RETURNS:
310 * Matching XFER_* value, 0 if no match found.
311 */
312static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
313{
314 int highbit = fls(xfer_mask) - 1;
315 const struct ata_xfer_ent *ent;
316
317 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
318 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
319 return ent->base + highbit - ent->shift;
320 return 0;
321}
322
323/**
324 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
325 * @xfer_mode: XFER_* of interest
326 *
327 * Return matching xfer_mask for @xfer_mode.
328 *
329 * LOCKING:
330 * None.
331 *
332 * RETURNS:
333 * Matching xfer_mask, 0 if no match found.
334 */
335static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
336{
337 const struct ata_xfer_ent *ent;
338
339 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
340 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
341 return 1 << (ent->shift + xfer_mode - ent->base);
342 return 0;
343}
344
345/**
346 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
347 * @xfer_mode: XFER_* of interest
348 *
349 * Return matching xfer_shift for @xfer_mode.
350 *
351 * LOCKING:
352 * None.
353 *
354 * RETURNS:
355 * Matching xfer_shift, -1 if no match found.
356 */
357static int ata_xfer_mode2shift(unsigned int xfer_mode)
358{
359 const struct ata_xfer_ent *ent;
360
361 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
362 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
363 return ent->shift;
364 return -1;
365}
366
367/**
368 * ata_mode_string - convert xfer_mask to string
369 * @xfer_mask: mask of bits supported; only highest bit counts.
370 *
371 * Determine string which represents the highest speed
372 * (highest bit in @modemask).
373 *
374 * LOCKING:
375 * None.
376 *
377 * RETURNS:
378 * Constant C string representing highest speed listed in
379 * @mode_mask, or the constant C string "<n/a>".
380 */
381static const char *ata_mode_string(unsigned int xfer_mask)
382{
383 static const char * const xfer_mode_str[] = {
384 "PIO0",
385 "PIO1",
386 "PIO2",
387 "PIO3",
388 "PIO4",
389 "MWDMA0",
390 "MWDMA1",
391 "MWDMA2",
392 "UDMA/16",
393 "UDMA/25",
394 "UDMA/33",
395 "UDMA/44",
396 "UDMA/66",
397 "UDMA/100",
398 "UDMA/133",
399 "UDMA7",
400 };
401 int highbit;
402
403 highbit = fls(xfer_mask) - 1;
404 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
405 return xfer_mode_str[highbit];
406 return "<n/a>";
407}
408
409static const char *sata_spd_string(unsigned int spd)
410{
411 static const char * const spd_str[] = {
412 "1.5 Gbps",
413 "3.0 Gbps",
414 };
415
416 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
417 return "<unknown>";
418 return spd_str[spd - 1];
419}
420
421void ata_dev_disable(struct ata_device *dev)
422{
423 if (ata_dev_enabled(dev) && ata_msg_drv(dev->ap)) {
424 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
425 dev->class++;
426 }
427}
428
429/**
430 * ata_pio_devchk - PATA device presence detection
431 * @ap: ATA channel to examine
432 * @device: Device to examine (starting at zero)
433 *
434 * This technique was originally described in
435 * Hale Landis's ATADRVR (www.ata-atapi.com), and
436 * later found its way into the ATA/ATAPI spec.
437 *
438 * Write a pattern to the ATA shadow registers,
439 * and if a device is present, it will respond by
440 * correctly storing and echoing back the
441 * ATA shadow register contents.
442 *
443 * LOCKING:
444 * caller.
445 */
446
447static unsigned int ata_pio_devchk(struct ata_port *ap,
448 unsigned int device)
449{
450 struct ata_ioports *ioaddr = &ap->ioaddr;
451 u8 nsect, lbal;
452
453 ap->ops->dev_select(ap, device);
454
455 outb(0x55, ioaddr->nsect_addr);
456 outb(0xaa, ioaddr->lbal_addr);
457
458 outb(0xaa, ioaddr->nsect_addr);
459 outb(0x55, ioaddr->lbal_addr);
460
461 outb(0x55, ioaddr->nsect_addr);
462 outb(0xaa, ioaddr->lbal_addr);
463
464 nsect = inb(ioaddr->nsect_addr);
465 lbal = inb(ioaddr->lbal_addr);
466
467 if ((nsect == 0x55) && (lbal == 0xaa))
468 return 1; /* we found a device */
469
470 return 0; /* nothing found */
471}
472
473/**
474 * ata_mmio_devchk - PATA device presence detection
475 * @ap: ATA channel to examine
476 * @device: Device to examine (starting at zero)
477 *
478 * This technique was originally described in
479 * Hale Landis's ATADRVR (www.ata-atapi.com), and
480 * later found its way into the ATA/ATAPI spec.
481 *
482 * Write a pattern to the ATA shadow registers,
483 * and if a device is present, it will respond by
484 * correctly storing and echoing back the
485 * ATA shadow register contents.
486 *
487 * LOCKING:
488 * caller.
489 */
490
491static unsigned int ata_mmio_devchk(struct ata_port *ap,
492 unsigned int device)
493{
494 struct ata_ioports *ioaddr = &ap->ioaddr;
495 u8 nsect, lbal;
496
497 ap->ops->dev_select(ap, device);
498
499 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
500 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
501
502 writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
503 writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
504
505 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
506 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
507
508 nsect = readb((void __iomem *) ioaddr->nsect_addr);
509 lbal = readb((void __iomem *) ioaddr->lbal_addr);
510
511 if ((nsect == 0x55) && (lbal == 0xaa))
512 return 1; /* we found a device */
513
514 return 0; /* nothing found */
515}
516
517/**
518 * ata_devchk - PATA device presence detection
519 * @ap: ATA channel to examine
520 * @device: Device to examine (starting at zero)
521 *
522 * Dispatch ATA device presence detection, depending
523 * on whether we are using PIO or MMIO to talk to the
524 * ATA shadow registers.
525 *
526 * LOCKING:
527 * caller.
528 */
529
530static unsigned int ata_devchk(struct ata_port *ap,
531 unsigned int device)
532{
533 if (ap->flags & ATA_FLAG_MMIO)
534 return ata_mmio_devchk(ap, device);
535 return ata_pio_devchk(ap, device);
536}
537
538/**
539 * ata_dev_classify - determine device type based on ATA-spec signature
540 * @tf: ATA taskfile register set for device to be identified
541 *
542 * Determine from taskfile register contents whether a device is
543 * ATA or ATAPI, as per "Signature and persistence" section
544 * of ATA/PI spec (volume 1, sect 5.14).
545 *
546 * LOCKING:
547 * None.
548 *
549 * RETURNS:
550 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
551 * the event of failure.
552 */
553
554unsigned int ata_dev_classify(const struct ata_taskfile *tf)
555{
556 /* Apple's open source Darwin code hints that some devices only
557 * put a proper signature into the LBA mid/high registers,
558 * So, we only check those. It's sufficient for uniqueness.
559 */
560
561 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
562 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
563 DPRINTK("found ATA device by sig\n");
564 return ATA_DEV_ATA;
565 }
566
567 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
568 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
569 DPRINTK("found ATAPI device by sig\n");
570 return ATA_DEV_ATAPI;
571 }
572
573 DPRINTK("unknown device\n");
574 return ATA_DEV_UNKNOWN;
575}
576
577/**
578 * ata_dev_try_classify - Parse returned ATA device signature
579 * @ap: ATA channel to examine
580 * @device: Device to examine (starting at zero)
581 * @r_err: Value of error register on completion
582 *
583 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
584 * an ATA/ATAPI-defined set of values is placed in the ATA
585 * shadow registers, indicating the results of device detection
586 * and diagnostics.
587 *
588 * Select the ATA device, and read the values from the ATA shadow
589 * registers. Then parse according to the Error register value,
590 * and the spec-defined values examined by ata_dev_classify().
591 *
592 * LOCKING:
593 * caller.
594 *
595 * RETURNS:
596 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
597 */
598
599static unsigned int
600ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
601{
602 struct ata_taskfile tf;
603 unsigned int class;
604 u8 err;
605
606 ap->ops->dev_select(ap, device);
607
608 memset(&tf, 0, sizeof(tf));
609
610 ap->ops->tf_read(ap, &tf);
611 err = tf.feature;
612 if (r_err)
613 *r_err = err;
614
615 /* see if device passed diags */
616 if (err == 1)
617 /* do nothing */ ;
618 else if ((device == 0) && (err == 0x81))
619 /* do nothing */ ;
620 else
621 return ATA_DEV_NONE;
622
623 /* determine if device is ATA or ATAPI */
624 class = ata_dev_classify(&tf);
625
626 if (class == ATA_DEV_UNKNOWN)
627 return ATA_DEV_NONE;
628 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
629 return ATA_DEV_NONE;
630 return class;
631}
632
633/**
634 * ata_id_string - Convert IDENTIFY DEVICE page into string
635 * @id: IDENTIFY DEVICE results we will examine
636 * @s: string into which data is output
637 * @ofs: offset into identify device page
638 * @len: length of string to return. must be an even number.
639 *
640 * The strings in the IDENTIFY DEVICE page are broken up into
641 * 16-bit chunks. Run through the string, and output each
642 * 8-bit chunk linearly, regardless of platform.
643 *
644 * LOCKING:
645 * caller.
646 */
647
648void ata_id_string(const u16 *id, unsigned char *s,
649 unsigned int ofs, unsigned int len)
650{
651 unsigned int c;
652
653 while (len > 0) {
654 c = id[ofs] >> 8;
655 *s = c;
656 s++;
657
658 c = id[ofs] & 0xff;
659 *s = c;
660 s++;
661
662 ofs++;
663 len -= 2;
664 }
665}
666
667/**
668 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
669 * @id: IDENTIFY DEVICE results we will examine
670 * @s: string into which data is output
671 * @ofs: offset into identify device page
672 * @len: length of string to return. must be an odd number.
673 *
674 * This function is identical to ata_id_string except that it
675 * trims trailing spaces and terminates the resulting string with
676 * null. @len must be actual maximum length (even number) + 1.
677 *
678 * LOCKING:
679 * caller.
680 */
681void ata_id_c_string(const u16 *id, unsigned char *s,
682 unsigned int ofs, unsigned int len)
683{
684 unsigned char *p;
685
686 WARN_ON(!(len & 1));
687
688 ata_id_string(id, s, ofs, len - 1);
689
690 p = s + strnlen(s, len - 1);
691 while (p > s && p[-1] == ' ')
692 p--;
693 *p = '\0';
694}
695
696static u64 ata_id_n_sectors(const u16 *id)
697{
698 if (ata_id_has_lba(id)) {
699 if (ata_id_has_lba48(id))
700 return ata_id_u64(id, 100);
701 else
702 return ata_id_u32(id, 60);
703 } else {
704 if (ata_id_current_chs_valid(id))
705 return ata_id_u32(id, 57);
706 else
707 return id[1] * id[3] * id[6];
708 }
709}
710
711/**
712 * ata_noop_dev_select - Select device 0/1 on ATA bus
713 * @ap: ATA channel to manipulate
714 * @device: ATA device (numbered from zero) to select
715 *
716 * This function performs no actual function.
717 *
718 * May be used as the dev_select() entry in ata_port_operations.
719 *
720 * LOCKING:
721 * caller.
722 */
723void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
724{
725}
726
727
728/**
729 * ata_std_dev_select - Select device 0/1 on ATA bus
730 * @ap: ATA channel to manipulate
731 * @device: ATA device (numbered from zero) to select
732 *
733 * Use the method defined in the ATA specification to
734 * make either device 0, or device 1, active on the
735 * ATA channel. Works with both PIO and MMIO.
736 *
737 * May be used as the dev_select() entry in ata_port_operations.
738 *
739 * LOCKING:
740 * caller.
741 */
742
743void ata_std_dev_select (struct ata_port *ap, unsigned int device)
744{
745 u8 tmp;
746
747 if (device == 0)
748 tmp = ATA_DEVICE_OBS;
749 else
750 tmp = ATA_DEVICE_OBS | ATA_DEV1;
751
752 if (ap->flags & ATA_FLAG_MMIO) {
753 writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
754 } else {
755 outb(tmp, ap->ioaddr.device_addr);
756 }
757 ata_pause(ap); /* needed; also flushes, for mmio */
758}
759
760/**
761 * ata_dev_select - Select device 0/1 on ATA bus
762 * @ap: ATA channel to manipulate
763 * @device: ATA device (numbered from zero) to select
764 * @wait: non-zero to wait for Status register BSY bit to clear
765 * @can_sleep: non-zero if context allows sleeping
766 *
767 * Use the method defined in the ATA specification to
768 * make either device 0, or device 1, active on the
769 * ATA channel.
770 *
771 * This is a high-level version of ata_std_dev_select(),
772 * which additionally provides the services of inserting
773 * the proper pauses and status polling, where needed.
774 *
775 * LOCKING:
776 * caller.
777 */
778
779void ata_dev_select(struct ata_port *ap, unsigned int device,
780 unsigned int wait, unsigned int can_sleep)
781{
782 if (ata_msg_probe(ap))
783 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, ata%u: "
784 "device %u, wait %u\n", ap->id, device, wait);
785
786 if (wait)
787 ata_wait_idle(ap);
788
789 ap->ops->dev_select(ap, device);
790
791 if (wait) {
792 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
793 msleep(150);
794 ata_wait_idle(ap);
795 }
796}
797
798/**
799 * ata_dump_id - IDENTIFY DEVICE info debugging output
800 * @id: IDENTIFY DEVICE page to dump
801 *
802 * Dump selected 16-bit words from the given IDENTIFY DEVICE
803 * page.
804 *
805 * LOCKING:
806 * caller.
807 */
808
809static inline void ata_dump_id(const u16 *id)
810{
811 DPRINTK("49==0x%04x "
812 "53==0x%04x "
813 "63==0x%04x "
814 "64==0x%04x "
815 "75==0x%04x \n",
816 id[49],
817 id[53],
818 id[63],
819 id[64],
820 id[75]);
821 DPRINTK("80==0x%04x "
822 "81==0x%04x "
823 "82==0x%04x "
824 "83==0x%04x "
825 "84==0x%04x \n",
826 id[80],
827 id[81],
828 id[82],
829 id[83],
830 id[84]);
831 DPRINTK("88==0x%04x "
832 "93==0x%04x\n",
833 id[88],
834 id[93]);
835}
836
837/**
838 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
839 * @id: IDENTIFY data to compute xfer mask from
840 *
841 * Compute the xfermask for this device. This is not as trivial
842 * as it seems if we must consider early devices correctly.
843 *
844 * FIXME: pre IDE drive timing (do we care ?).
845 *
846 * LOCKING:
847 * None.
848 *
849 * RETURNS:
850 * Computed xfermask
851 */
852static unsigned int ata_id_xfermask(const u16 *id)
853{
854 unsigned int pio_mask, mwdma_mask, udma_mask;
855
856 /* Usual case. Word 53 indicates word 64 is valid */
857 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
858 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
859 pio_mask <<= 3;
860 pio_mask |= 0x7;
861 } else {
862 /* If word 64 isn't valid then Word 51 high byte holds
863 * the PIO timing number for the maximum. Turn it into
864 * a mask.
865 */
866 pio_mask = (2 << (id[ATA_ID_OLD_PIO_MODES] & 0xFF)) - 1 ;
867
868 /* But wait.. there's more. Design your standards by
869 * committee and you too can get a free iordy field to
870 * process. However its the speeds not the modes that
871 * are supported... Note drivers using the timing API
872 * will get this right anyway
873 */
874 }
875
876 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
877
878 udma_mask = 0;
879 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
880 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
881
882 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
883}
884
885/**
886 * ata_port_queue_task - Queue port_task
887 * @ap: The ata_port to queue port_task for
888 * @fn: workqueue function to be scheduled
889 * @data: data value to pass to workqueue function
890 * @delay: delay time for workqueue function
891 *
892 * Schedule @fn(@data) for execution after @delay jiffies using
893 * port_task. There is one port_task per port and it's the
894 * user(low level driver)'s responsibility to make sure that only
895 * one task is active at any given time.
896 *
897 * libata core layer takes care of synchronization between
898 * port_task and EH. ata_port_queue_task() may be ignored for EH
899 * synchronization.
900 *
901 * LOCKING:
902 * Inherited from caller.
903 */
904void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
905 unsigned long delay)
906{
907 int rc;
908
909 if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK)
910 return;
911
912 PREPARE_WORK(&ap->port_task, fn, data);
913
914 if (!delay)
915 rc = queue_work(ata_wq, &ap->port_task);
916 else
917 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
918
919 /* rc == 0 means that another user is using port task */
920 WARN_ON(rc == 0);
921}
922
923/**
924 * ata_port_flush_task - Flush port_task
925 * @ap: The ata_port to flush port_task for
926 *
927 * After this function completes, port_task is guranteed not to
928 * be running or scheduled.
929 *
930 * LOCKING:
931 * Kernel thread context (may sleep)
932 */
933void ata_port_flush_task(struct ata_port *ap)
934{
935 unsigned long flags;
936
937 DPRINTK("ENTER\n");
938
939 spin_lock_irqsave(ap->lock, flags);
940 ap->pflags |= ATA_PFLAG_FLUSH_PORT_TASK;
941 spin_unlock_irqrestore(ap->lock, flags);
942
943 DPRINTK("flush #1\n");
944 flush_workqueue(ata_wq);
945
946 /*
947 * At this point, if a task is running, it's guaranteed to see
948 * the FLUSH flag; thus, it will never queue pio tasks again.
949 * Cancel and flush.
950 */
951 if (!cancel_delayed_work(&ap->port_task)) {
952 if (ata_msg_ctl(ap))
953 ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n",
954 __FUNCTION__);
955 flush_workqueue(ata_wq);
956 }
957
958 spin_lock_irqsave(ap->lock, flags);
959 ap->pflags &= ~ATA_PFLAG_FLUSH_PORT_TASK;
960 spin_unlock_irqrestore(ap->lock, flags);
961
962 if (ata_msg_ctl(ap))
963 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
964}
965
966void ata_qc_complete_internal(struct ata_queued_cmd *qc)
967{
968 struct completion *waiting = qc->private_data;
969
970 complete(waiting);
971}
972
973/**
974 * ata_exec_internal - execute libata internal command
975 * @dev: Device to which the command is sent
976 * @tf: Taskfile registers for the command and the result
977 * @cdb: CDB for packet command
978 * @dma_dir: Data tranfer direction of the command
979 * @buf: Data buffer of the command
980 * @buflen: Length of data buffer
981 *
982 * Executes libata internal command with timeout. @tf contains
983 * command on entry and result on return. Timeout and error
984 * conditions are reported via return value. No recovery action
985 * is taken after a command times out. It's caller's duty to
986 * clean up after timeout.
987 *
988 * LOCKING:
989 * None. Should be called with kernel context, might sleep.
990 *
991 * RETURNS:
992 * Zero on success, AC_ERR_* mask on failure
993 */
994unsigned ata_exec_internal(struct ata_device *dev,
995 struct ata_taskfile *tf, const u8 *cdb,
996 int dma_dir, void *buf, unsigned int buflen)
997{
998 struct ata_port *ap = dev->ap;
999 u8 command = tf->command;
1000 struct ata_queued_cmd *qc;
1001 unsigned int tag, preempted_tag;
1002 u32 preempted_sactive, preempted_qc_active;
1003 DECLARE_COMPLETION_ONSTACK(wait);
1004 unsigned long flags;
1005 unsigned int err_mask;
1006 int rc;
1007
1008 spin_lock_irqsave(ap->lock, flags);
1009
1010 /* no internal command while frozen */
1011 if (ap->pflags & ATA_PFLAG_FROZEN) {
1012 spin_unlock_irqrestore(ap->lock, flags);
1013 return AC_ERR_SYSTEM;
1014 }
1015
1016 /* initialize internal qc */
1017
1018 /* XXX: Tag 0 is used for drivers with legacy EH as some
1019 * drivers choke if any other tag is given. This breaks
1020 * ata_tag_internal() test for those drivers. Don't use new
1021 * EH stuff without converting to it.
1022 */
1023 if (ap->ops->error_handler)
1024 tag = ATA_TAG_INTERNAL;
1025 else
1026 tag = 0;
1027
1028 if (test_and_set_bit(tag, &ap->qc_allocated))
1029 BUG();
1030 qc = __ata_qc_from_tag(ap, tag);
1031
1032 qc->tag = tag;
1033 qc->scsicmd = NULL;
1034 qc->ap = ap;
1035 qc->dev = dev;
1036 ata_qc_reinit(qc);
1037
1038 preempted_tag = ap->active_tag;
1039 preempted_sactive = ap->sactive;
1040 preempted_qc_active = ap->qc_active;
1041 ap->active_tag = ATA_TAG_POISON;
1042 ap->sactive = 0;
1043 ap->qc_active = 0;
1044
1045 /* prepare & issue qc */
1046 qc->tf = *tf;
1047 if (cdb)
1048 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1049 qc->flags |= ATA_QCFLAG_RESULT_TF;
1050 qc->dma_dir = dma_dir;
1051 if (dma_dir != DMA_NONE) {
1052 ata_sg_init_one(qc, buf, buflen);
1053 qc->nsect = buflen / ATA_SECT_SIZE;
1054 }
1055
1056 qc->private_data = &wait;
1057 qc->complete_fn = ata_qc_complete_internal;
1058
1059 ata_qc_issue(qc);
1060
1061 spin_unlock_irqrestore(ap->lock, flags);
1062
1063 rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
1064
1065 ata_port_flush_task(ap);
1066
1067 if (!rc) {
1068 spin_lock_irqsave(ap->lock, flags);
1069
1070 /* We're racing with irq here. If we lose, the
1071 * following test prevents us from completing the qc
1072 * twice. If we win, the port is frozen and will be
1073 * cleaned up by ->post_internal_cmd().
1074 */
1075 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1076 qc->err_mask |= AC_ERR_TIMEOUT;
1077
1078 if (ap->ops->error_handler)
1079 ata_port_freeze(ap);
1080 else
1081 ata_qc_complete(qc);
1082
1083 if (ata_msg_warn(ap))
1084 ata_dev_printk(dev, KERN_WARNING,
1085 "qc timeout (cmd 0x%x)\n", command);
1086 }
1087
1088 spin_unlock_irqrestore(ap->lock, flags);
1089 }
1090
1091 /* do post_internal_cmd */
1092 if (ap->ops->post_internal_cmd)
1093 ap->ops->post_internal_cmd(qc);
1094
1095 if (qc->flags & ATA_QCFLAG_FAILED && !qc->err_mask) {
1096 if (ata_msg_warn(ap))
1097 ata_dev_printk(dev, KERN_WARNING,
1098 "zero err_mask for failed "
1099 "internal command, assuming AC_ERR_OTHER\n");
1100 qc->err_mask |= AC_ERR_OTHER;
1101 }
1102
1103 /* finish up */
1104 spin_lock_irqsave(ap->lock, flags);
1105
1106 *tf = qc->result_tf;
1107 err_mask = qc->err_mask;
1108
1109 ata_qc_free(qc);
1110 ap->active_tag = preempted_tag;
1111 ap->sactive = preempted_sactive;
1112 ap->qc_active = preempted_qc_active;
1113
1114 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1115 * Until those drivers are fixed, we detect the condition
1116 * here, fail the command with AC_ERR_SYSTEM and reenable the
1117 * port.
1118 *
1119 * Note that this doesn't change any behavior as internal
1120 * command failure results in disabling the device in the
1121 * higher layer for LLDDs without new reset/EH callbacks.
1122 *
1123 * Kill the following code as soon as those drivers are fixed.
1124 */
1125 if (ap->flags & ATA_FLAG_DISABLED) {
1126 err_mask |= AC_ERR_SYSTEM;
1127 ata_port_probe(ap);
1128 }
1129
1130 spin_unlock_irqrestore(ap->lock, flags);
1131
1132 return err_mask;
1133}
1134
1135/**
1136 * ata_do_simple_cmd - execute simple internal command
1137 * @dev: Device to which the command is sent
1138 * @cmd: Opcode to execute
1139 *
1140 * Execute a 'simple' command, that only consists of the opcode
1141 * 'cmd' itself, without filling any other registers
1142 *
1143 * LOCKING:
1144 * Kernel thread context (may sleep).
1145 *
1146 * RETURNS:
1147 * Zero on success, AC_ERR_* mask on failure
1148 */
1149unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1150{
1151 struct ata_taskfile tf;
1152
1153 ata_tf_init(dev, &tf);
1154
1155 tf.command = cmd;
1156 tf.flags |= ATA_TFLAG_DEVICE;
1157 tf.protocol = ATA_PROT_NODATA;
1158
1159 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1160}
1161
1162/**
1163 * ata_pio_need_iordy - check if iordy needed
1164 * @adev: ATA device
1165 *
1166 * Check if the current speed of the device requires IORDY. Used
1167 * by various controllers for chip configuration.
1168 */
1169
1170unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1171{
1172 int pio;
1173 int speed = adev->pio_mode - XFER_PIO_0;
1174
1175 if (speed < 2)
1176 return 0;
1177 if (speed > 2)
1178 return 1;
1179
1180 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1181
1182 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1183 pio = adev->id[ATA_ID_EIDE_PIO];
1184 /* Is the speed faster than the drive allows non IORDY ? */
1185 if (pio) {
1186 /* This is cycle times not frequency - watch the logic! */
1187 if (pio > 240) /* PIO2 is 240nS per cycle */
1188 return 1;
1189 return 0;
1190 }
1191 }
1192 return 0;
1193}
1194
1195/**
1196 * ata_dev_read_id - Read ID data from the specified device
1197 * @dev: target device
1198 * @p_class: pointer to class of the target device (may be changed)
1199 * @post_reset: is this read ID post-reset?
1200 * @id: buffer to read IDENTIFY data into
1201 *
1202 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1203 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1204 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1205 * for pre-ATA4 drives.
1206 *
1207 * LOCKING:
1208 * Kernel thread context (may sleep)
1209 *
1210 * RETURNS:
1211 * 0 on success, -errno otherwise.
1212 */
1213int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1214 int post_reset, u16 *id)
1215{
1216 struct ata_port *ap = dev->ap;
1217 unsigned int class = *p_class;
1218 struct ata_taskfile tf;
1219 unsigned int err_mask = 0;
1220 const char *reason;
1221 int rc;
1222
1223 if (ata_msg_ctl(ap))
1224 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
1225 __FUNCTION__, ap->id, dev->devno);
1226
1227 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1228
1229 retry:
1230 ata_tf_init(dev, &tf);
1231
1232 switch (class) {
1233 case ATA_DEV_ATA:
1234 tf.command = ATA_CMD_ID_ATA;
1235 break;
1236 case ATA_DEV_ATAPI:
1237 tf.command = ATA_CMD_ID_ATAPI;
1238 break;
1239 default:
1240 rc = -ENODEV;
1241 reason = "unsupported class";
1242 goto err_out;
1243 }
1244
1245 tf.protocol = ATA_PROT_PIO;
1246
1247 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1248 id, sizeof(id[0]) * ATA_ID_WORDS);
1249 if (err_mask) {
1250 rc = -EIO;
1251 reason = "I/O error";
1252 goto err_out;
1253 }
1254
1255 swap_buf_le16(id, ATA_ID_WORDS);
1256
1257 /* sanity check */
1258 if ((class == ATA_DEV_ATA) != (ata_id_is_ata(id) | ata_id_is_cfa(id))) {
1259 rc = -EINVAL;
1260 reason = "device reports illegal type";
1261 goto err_out;
1262 }
1263
1264 if (post_reset && class == ATA_DEV_ATA) {
1265 /*
1266 * The exact sequence expected by certain pre-ATA4 drives is:
1267 * SRST RESET
1268 * IDENTIFY
1269 * INITIALIZE DEVICE PARAMETERS
1270 * anything else..
1271 * Some drives were very specific about that exact sequence.
1272 */
1273 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1274 err_mask = ata_dev_init_params(dev, id[3], id[6]);
1275 if (err_mask) {
1276 rc = -EIO;
1277 reason = "INIT_DEV_PARAMS failed";
1278 goto err_out;
1279 }
1280
1281 /* current CHS translation info (id[53-58]) might be
1282 * changed. reread the identify device info.
1283 */
1284 post_reset = 0;
1285 goto retry;
1286 }
1287 }
1288
1289 *p_class = class;
1290
1291 return 0;
1292
1293 err_out:
1294 if (ata_msg_warn(ap))
1295 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
1296 "(%s, err_mask=0x%x)\n", reason, err_mask);
1297 return rc;
1298}
1299
1300static inline u8 ata_dev_knobble(struct ata_device *dev)
1301{
1302 return ((dev->ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1303}
1304
1305static void ata_dev_config_ncq(struct ata_device *dev,
1306 char *desc, size_t desc_sz)
1307{
1308 struct ata_port *ap = dev->ap;
1309 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1310
1311 if (!ata_id_has_ncq(dev->id)) {
1312 desc[0] = '\0';
1313 return;
1314 }
1315
1316 if (ap->flags & ATA_FLAG_NCQ) {
1317 hdepth = min(ap->host->can_queue, ATA_MAX_QUEUE - 1);
1318 dev->flags |= ATA_DFLAG_NCQ;
1319 }
1320
1321 if (hdepth >= ddepth)
1322 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1323 else
1324 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1325}
1326
1327static void ata_set_port_max_cmd_len(struct ata_port *ap)
1328{
1329 int i;
1330
1331 if (ap->host) {
1332 ap->host->max_cmd_len = 0;
1333 for (i = 0; i < ATA_MAX_DEVICES; i++)
1334 ap->host->max_cmd_len = max_t(unsigned int,
1335 ap->host->max_cmd_len,
1336 ap->device[i].cdb_len);
1337 }
1338}
1339
1340/**
1341 * ata_dev_configure - Configure the specified ATA/ATAPI device
1342 * @dev: Target device to configure
1343 * @print_info: Enable device info printout
1344 *
1345 * Configure @dev according to @dev->id. Generic and low-level
1346 * driver specific fixups are also applied.
1347 *
1348 * LOCKING:
1349 * Kernel thread context (may sleep)
1350 *
1351 * RETURNS:
1352 * 0 on success, -errno otherwise
1353 */
1354int ata_dev_configure(struct ata_device *dev, int print_info)
1355{
1356 struct ata_port *ap = dev->ap;
1357 const u16 *id = dev->id;
1358 unsigned int xfer_mask;
1359 int rc;
1360
1361 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
1362 ata_dev_printk(dev, KERN_INFO,
1363 "%s: ENTER/EXIT (host %u, dev %u) -- nodev\n",
1364 __FUNCTION__, ap->id, dev->devno);
1365 return 0;
1366 }
1367
1368 if (ata_msg_probe(ap))
1369 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
1370 __FUNCTION__, ap->id, dev->devno);
1371
1372 /* print device capabilities */
1373 if (ata_msg_probe(ap))
1374 ata_dev_printk(dev, KERN_DEBUG,
1375 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1376 "85:%04x 86:%04x 87:%04x 88:%04x\n",
1377 __FUNCTION__,
1378 id[49], id[82], id[83], id[84],
1379 id[85], id[86], id[87], id[88]);
1380
1381 /* initialize to-be-configured parameters */
1382 dev->flags &= ~ATA_DFLAG_CFG_MASK;
1383 dev->max_sectors = 0;
1384 dev->cdb_len = 0;
1385 dev->n_sectors = 0;
1386 dev->cylinders = 0;
1387 dev->heads = 0;
1388 dev->sectors = 0;
1389
1390 /*
1391 * common ATA, ATAPI feature tests
1392 */
1393
1394 /* find max transfer mode; for printk only */
1395 xfer_mask = ata_id_xfermask(id);
1396
1397 if (ata_msg_probe(ap))
1398 ata_dump_id(id);
1399
1400 /* ATA-specific feature tests */
1401 if (dev->class == ATA_DEV_ATA) {
1402 dev->n_sectors = ata_id_n_sectors(id);
1403
1404 if (ata_id_has_lba(id)) {
1405 const char *lba_desc;
1406 char ncq_desc[20];
1407
1408 lba_desc = "LBA";
1409 dev->flags |= ATA_DFLAG_LBA;
1410 if (ata_id_has_lba48(id)) {
1411 dev->flags |= ATA_DFLAG_LBA48;
1412 lba_desc = "LBA48";
1413 }
1414
1415 /* config NCQ */
1416 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1417
1418 /* print device info to dmesg */
1419 if (ata_msg_drv(ap) && print_info)
1420 ata_dev_printk(dev, KERN_INFO, "ATA-%d, "
1421 "max %s, %Lu sectors: %s %s\n",
1422 ata_id_major_version(id),
1423 ata_mode_string(xfer_mask),
1424 (unsigned long long)dev->n_sectors,
1425 lba_desc, ncq_desc);
1426 } else {
1427 /* CHS */
1428
1429 /* Default translation */
1430 dev->cylinders = id[1];
1431 dev->heads = id[3];
1432 dev->sectors = id[6];
1433
1434 if (ata_id_current_chs_valid(id)) {
1435 /* Current CHS translation is valid. */
1436 dev->cylinders = id[54];
1437 dev->heads = id[55];
1438 dev->sectors = id[56];
1439 }
1440
1441 /* print device info to dmesg */
1442 if (ata_msg_drv(ap) && print_info)
1443 ata_dev_printk(dev, KERN_INFO, "ATA-%d, "
1444 "max %s, %Lu sectors: CHS %u/%u/%u\n",
1445 ata_id_major_version(id),
1446 ata_mode_string(xfer_mask),
1447 (unsigned long long)dev->n_sectors,
1448 dev->cylinders, dev->heads,
1449 dev->sectors);
1450 }
1451
1452 if (dev->id[59] & 0x100) {
1453 dev->multi_count = dev->id[59] & 0xff;
1454 if (ata_msg_drv(ap) && print_info)
1455 ata_dev_printk(dev, KERN_INFO,
1456 "ata%u: dev %u multi count %u\n",
1457 ap->id, dev->devno, dev->multi_count);
1458 }
1459
1460 dev->cdb_len = 16;
1461 }
1462
1463 /* ATAPI-specific feature tests */
1464 else if (dev->class == ATA_DEV_ATAPI) {
1465 char *cdb_intr_string = "";
1466
1467 rc = atapi_cdb_len(id);
1468 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1469 if (ata_msg_warn(ap))
1470 ata_dev_printk(dev, KERN_WARNING,
1471 "unsupported CDB len\n");
1472 rc = -EINVAL;
1473 goto err_out_nosup;
1474 }
1475 dev->cdb_len = (unsigned int) rc;
1476
1477 if (ata_id_cdb_intr(dev->id)) {
1478 dev->flags |= ATA_DFLAG_CDB_INTR;
1479 cdb_intr_string = ", CDB intr";
1480 }
1481
1482 /* print device info to dmesg */
1483 if (ata_msg_drv(ap) && print_info)
1484 ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s%s\n",
1485 ata_mode_string(xfer_mask),
1486 cdb_intr_string);
1487 }
1488
1489 ata_set_port_max_cmd_len(ap);
1490
1491 /* limit bridge transfers to udma5, 200 sectors */
1492 if (ata_dev_knobble(dev)) {
1493 if (ata_msg_drv(ap) && print_info)
1494 ata_dev_printk(dev, KERN_INFO,
1495 "applying bridge limits\n");
1496 dev->udma_mask &= ATA_UDMA5;
1497 dev->max_sectors = ATA_MAX_SECTORS;
1498 }
1499
1500 if (ap->ops->dev_config)
1501 ap->ops->dev_config(ap, dev);
1502
1503 if (ata_msg_probe(ap))
1504 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
1505 __FUNCTION__, ata_chk_status(ap));
1506 return 0;
1507
1508err_out_nosup:
1509 if (ata_msg_probe(ap))
1510 ata_dev_printk(dev, KERN_DEBUG,
1511 "%s: EXIT, err\n", __FUNCTION__);
1512 return rc;
1513}
1514
1515/**
1516 * ata_bus_probe - Reset and probe ATA bus
1517 * @ap: Bus to probe
1518 *
1519 * Master ATA bus probing function. Initiates a hardware-dependent
1520 * bus reset, then attempts to identify any devices found on
1521 * the bus.
1522 *
1523 * LOCKING:
1524 * PCI/etc. bus probe sem.
1525 *
1526 * RETURNS:
1527 * Zero on success, negative errno otherwise.
1528 */
1529
1530int ata_bus_probe(struct ata_port *ap)
1531{
1532 unsigned int classes[ATA_MAX_DEVICES];
1533 int tries[ATA_MAX_DEVICES];
1534 int i, rc, down_xfermask;
1535 struct ata_device *dev;
1536
1537 ata_port_probe(ap);
1538
1539 for (i = 0; i < ATA_MAX_DEVICES; i++)
1540 tries[i] = ATA_PROBE_MAX_TRIES;
1541
1542 retry:
1543 down_xfermask = 0;
1544
1545 /* reset and determine device classes */
1546 ap->ops->phy_reset(ap);
1547
1548 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1549 dev = &ap->device[i];
1550
1551 if (!(ap->flags & ATA_FLAG_DISABLED) &&
1552 dev->class != ATA_DEV_UNKNOWN)
1553 classes[dev->devno] = dev->class;
1554 else
1555 classes[dev->devno] = ATA_DEV_NONE;
1556
1557 dev->class = ATA_DEV_UNKNOWN;
1558 }
1559
1560 ata_port_probe(ap);
1561
1562 /* after the reset the device state is PIO 0 and the controller
1563 state is undefined. Record the mode */
1564
1565 for (i = 0; i < ATA_MAX_DEVICES; i++)
1566 ap->device[i].pio_mode = XFER_PIO_0;
1567
1568 /* read IDENTIFY page and configure devices */
1569 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1570 dev = &ap->device[i];
1571
1572 if (tries[i])
1573 dev->class = classes[i];
1574
1575 if (!ata_dev_enabled(dev))
1576 continue;
1577
1578 rc = ata_dev_read_id(dev, &dev->class, 1, dev->id);
1579 if (rc)
1580 goto fail;
1581
1582 rc = ata_dev_configure(dev, 1);
1583 if (rc)
1584 goto fail;
1585 }
1586
1587 /* configure transfer mode */
1588 rc = ata_set_mode(ap, &dev);
1589 if (rc) {
1590 down_xfermask = 1;
1591 goto fail;
1592 }
1593
1594 for (i = 0; i < ATA_MAX_DEVICES; i++)
1595 if (ata_dev_enabled(&ap->device[i]))
1596 return 0;
1597
1598 /* no device present, disable port */
1599 ata_port_disable(ap);
1600 ap->ops->port_disable(ap);
1601 return -ENODEV;
1602
1603 fail:
1604 switch (rc) {
1605 case -EINVAL:
1606 case -ENODEV:
1607 tries[dev->devno] = 0;
1608 break;
1609 case -EIO:
1610 sata_down_spd_limit(ap);
1611 /* fall through */
1612 default:
1613 tries[dev->devno]--;
1614 if (down_xfermask &&
1615 ata_down_xfermask_limit(dev, tries[dev->devno] == 1))
1616 tries[dev->devno] = 0;
1617 }
1618
1619 if (!tries[dev->devno]) {
1620 ata_down_xfermask_limit(dev, 1);
1621 ata_dev_disable(dev);
1622 }
1623
1624 goto retry;
1625}
1626
1627/**
1628 * ata_port_probe - Mark port as enabled
1629 * @ap: Port for which we indicate enablement
1630 *
1631 * Modify @ap data structure such that the system
1632 * thinks that the entire port is enabled.
1633 *
1634 * LOCKING: host_set lock, or some other form of
1635 * serialization.
1636 */
1637
1638void ata_port_probe(struct ata_port *ap)
1639{
1640 ap->flags &= ~ATA_FLAG_DISABLED;
1641}
1642
1643/**
1644 * sata_print_link_status - Print SATA link status
1645 * @ap: SATA port to printk link status about
1646 *
1647 * This function prints link speed and status of a SATA link.
1648 *
1649 * LOCKING:
1650 * None.
1651 */
1652static void sata_print_link_status(struct ata_port *ap)
1653{
1654 u32 sstatus, scontrol, tmp;
1655
1656 if (sata_scr_read(ap, SCR_STATUS, &sstatus))
1657 return;
1658 sata_scr_read(ap, SCR_CONTROL, &scontrol);
1659
1660 if (ata_port_online(ap)) {
1661 tmp = (sstatus >> 4) & 0xf;
1662 ata_port_printk(ap, KERN_INFO,
1663 "SATA link up %s (SStatus %X SControl %X)\n",
1664 sata_spd_string(tmp), sstatus, scontrol);
1665 } else {
1666 ata_port_printk(ap, KERN_INFO,
1667 "SATA link down (SStatus %X SControl %X)\n",
1668 sstatus, scontrol);
1669 }
1670}
1671
1672/**
1673 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1674 * @ap: SATA port associated with target SATA PHY.
1675 *
1676 * This function issues commands to standard SATA Sxxx
1677 * PHY registers, to wake up the phy (and device), and
1678 * clear any reset condition.
1679 *
1680 * LOCKING:
1681 * PCI/etc. bus probe sem.
1682 *
1683 */
1684void __sata_phy_reset(struct ata_port *ap)
1685{
1686 u32 sstatus;
1687 unsigned long timeout = jiffies + (HZ * 5);
1688
1689 if (ap->flags & ATA_FLAG_SATA_RESET) {
1690 /* issue phy wake/reset */
1691 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
1692 /* Couldn't find anything in SATA I/II specs, but
1693 * AHCI-1.1 10.4.2 says at least 1 ms. */
1694 mdelay(1);
1695 }
1696 /* phy wake/clear reset */
1697 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1698
1699 /* wait for phy to become ready, if necessary */
1700 do {
1701 msleep(200);
1702 sata_scr_read(ap, SCR_STATUS, &sstatus);
1703 if ((sstatus & 0xf) != 1)
1704 break;
1705 } while (time_before(jiffies, timeout));
1706
1707 /* print link status */
1708 sata_print_link_status(ap);
1709
1710 /* TODO: phy layer with polling, timeouts, etc. */
1711 if (!ata_port_offline(ap))
1712 ata_port_probe(ap);
1713 else
1714 ata_port_disable(ap);
1715
1716 if (ap->flags & ATA_FLAG_DISABLED)
1717 return;
1718
1719 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1720 ata_port_disable(ap);
1721 return;
1722 }
1723
1724 ap->cbl = ATA_CBL_SATA;
1725}
1726
1727/**
1728 * sata_phy_reset - Reset SATA bus.
1729 * @ap: SATA port associated with target SATA PHY.
1730 *
1731 * This function resets the SATA bus, and then probes
1732 * the bus for devices.
1733 *
1734 * LOCKING:
1735 * PCI/etc. bus probe sem.
1736 *
1737 */
1738void sata_phy_reset(struct ata_port *ap)
1739{
1740 __sata_phy_reset(ap);
1741 if (ap->flags & ATA_FLAG_DISABLED)
1742 return;
1743 ata_bus_reset(ap);
1744}
1745
1746/**
1747 * ata_dev_pair - return other device on cable
1748 * @adev: device
1749 *
1750 * Obtain the other device on the same cable, or if none is
1751 * present NULL is returned
1752 */
1753
1754struct ata_device *ata_dev_pair(struct ata_device *adev)
1755{
1756 struct ata_port *ap = adev->ap;
1757 struct ata_device *pair = &ap->device[1 - adev->devno];
1758 if (!ata_dev_enabled(pair))
1759 return NULL;
1760 return pair;
1761}
1762
1763/**
1764 * ata_port_disable - Disable port.
1765 * @ap: Port to be disabled.
1766 *
1767 * Modify @ap data structure such that the system
1768 * thinks that the entire port is disabled, and should
1769 * never attempt to probe or communicate with devices
1770 * on this port.
1771 *
1772 * LOCKING: host_set lock, or some other form of
1773 * serialization.
1774 */
1775
1776void ata_port_disable(struct ata_port *ap)
1777{
1778 ap->device[0].class = ATA_DEV_NONE;
1779 ap->device[1].class = ATA_DEV_NONE;
1780 ap->flags |= ATA_FLAG_DISABLED;
1781}
1782
1783/**
1784 * sata_down_spd_limit - adjust SATA spd limit downward
1785 * @ap: Port to adjust SATA spd limit for
1786 *
1787 * Adjust SATA spd limit of @ap downward. Note that this
1788 * function only adjusts the limit. The change must be applied
1789 * using sata_set_spd().
1790 *
1791 * LOCKING:
1792 * Inherited from caller.
1793 *
1794 * RETURNS:
1795 * 0 on success, negative errno on failure
1796 */
1797int sata_down_spd_limit(struct ata_port *ap)
1798{
1799 u32 sstatus, spd, mask;
1800 int rc, highbit;
1801
1802 rc = sata_scr_read(ap, SCR_STATUS, &sstatus);
1803 if (rc)
1804 return rc;
1805
1806 mask = ap->sata_spd_limit;
1807 if (mask <= 1)
1808 return -EINVAL;
1809 highbit = fls(mask) - 1;
1810 mask &= ~(1 << highbit);
1811
1812 spd = (sstatus >> 4) & 0xf;
1813 if (spd <= 1)
1814 return -EINVAL;
1815 spd--;
1816 mask &= (1 << spd) - 1;
1817 if (!mask)
1818 return -EINVAL;
1819
1820 ap->sata_spd_limit = mask;
1821
1822 ata_port_printk(ap, KERN_WARNING, "limiting SATA link speed to %s\n",
1823 sata_spd_string(fls(mask)));
1824
1825 return 0;
1826}
1827
1828static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol)
1829{
1830 u32 spd, limit;
1831
1832 if (ap->sata_spd_limit == UINT_MAX)
1833 limit = 0;
1834 else
1835 limit = fls(ap->sata_spd_limit);
1836
1837 spd = (*scontrol >> 4) & 0xf;
1838 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
1839
1840 return spd != limit;
1841}
1842
1843/**
1844 * sata_set_spd_needed - is SATA spd configuration needed
1845 * @ap: Port in question
1846 *
1847 * Test whether the spd limit in SControl matches
1848 * @ap->sata_spd_limit. This function is used to determine
1849 * whether hardreset is necessary to apply SATA spd
1850 * configuration.
1851 *
1852 * LOCKING:
1853 * Inherited from caller.
1854 *
1855 * RETURNS:
1856 * 1 if SATA spd configuration is needed, 0 otherwise.
1857 */
1858int sata_set_spd_needed(struct ata_port *ap)
1859{
1860 u32 scontrol;
1861
1862 if (sata_scr_read(ap, SCR_CONTROL, &scontrol))
1863 return 0;
1864
1865 return __sata_set_spd_needed(ap, &scontrol);
1866}
1867
1868/**
1869 * sata_set_spd - set SATA spd according to spd limit
1870 * @ap: Port to set SATA spd for
1871 *
1872 * Set SATA spd of @ap according to sata_spd_limit.
1873 *
1874 * LOCKING:
1875 * Inherited from caller.
1876 *
1877 * RETURNS:
1878 * 0 if spd doesn't need to be changed, 1 if spd has been
1879 * changed. Negative errno if SCR registers are inaccessible.
1880 */
1881int sata_set_spd(struct ata_port *ap)
1882{
1883 u32 scontrol;
1884 int rc;
1885
1886 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
1887 return rc;
1888
1889 if (!__sata_set_spd_needed(ap, &scontrol))
1890 return 0;
1891
1892 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
1893 return rc;
1894
1895 return 1;
1896}
1897
1898/*
1899 * This mode timing computation functionality is ported over from
1900 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
1901 */
1902/*
1903 * PIO 0-5, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
1904 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
1905 * for PIO 5, which is a nonstandard extension and UDMA6, which
1906 * is currently supported only by Maxtor drives.
1907 */
1908
1909static const struct ata_timing ata_timing[] = {
1910
1911 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
1912 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
1913 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
1914 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
1915
1916 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
1917 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
1918 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
1919
1920/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
1921
1922 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
1923 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
1924 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
1925
1926 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
1927 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
1928 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
1929
1930/* { XFER_PIO_5, 20, 50, 30, 100, 50, 30, 100, 0 }, */
1931 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
1932 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
1933
1934 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
1935 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
1936 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
1937
1938/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
1939
1940 { 0xFF }
1941};
1942
1943#define ENOUGH(v,unit) (((v)-1)/(unit)+1)
1944#define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
1945
1946static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
1947{
1948 q->setup = EZ(t->setup * 1000, T);
1949 q->act8b = EZ(t->act8b * 1000, T);
1950 q->rec8b = EZ(t->rec8b * 1000, T);
1951 q->cyc8b = EZ(t->cyc8b * 1000, T);
1952 q->active = EZ(t->active * 1000, T);
1953 q->recover = EZ(t->recover * 1000, T);
1954 q->cycle = EZ(t->cycle * 1000, T);
1955 q->udma = EZ(t->udma * 1000, UT);
1956}
1957
1958void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
1959 struct ata_timing *m, unsigned int what)
1960{
1961 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
1962 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
1963 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
1964 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
1965 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
1966 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
1967 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
1968 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
1969}
1970
1971static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
1972{
1973 const struct ata_timing *t;
1974
1975 for (t = ata_timing; t->mode != speed; t++)
1976 if (t->mode == 0xFF)
1977 return NULL;
1978 return t;
1979}
1980
1981int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1982 struct ata_timing *t, int T, int UT)
1983{
1984 const struct ata_timing *s;
1985 struct ata_timing p;
1986
1987 /*
1988 * Find the mode.
1989 */
1990
1991 if (!(s = ata_timing_find_mode(speed)))
1992 return -EINVAL;
1993
1994 memcpy(t, s, sizeof(*s));
1995
1996 /*
1997 * If the drive is an EIDE drive, it can tell us it needs extended
1998 * PIO/MW_DMA cycle timing.
1999 */
2000
2001 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2002 memset(&p, 0, sizeof(p));
2003 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2004 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2005 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2006 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2007 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2008 }
2009 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2010 }
2011
2012 /*
2013 * Convert the timing to bus clock counts.
2014 */
2015
2016 ata_timing_quantize(t, t, T, UT);
2017
2018 /*
2019 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2020 * S.M.A.R.T * and some other commands. We have to ensure that the
2021 * DMA cycle timing is slower/equal than the fastest PIO timing.
2022 */
2023
2024 if (speed > XFER_PIO_4) {
2025 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2026 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2027 }
2028
2029 /*
2030 * Lengthen active & recovery time so that cycle time is correct.
2031 */
2032
2033 if (t->act8b + t->rec8b < t->cyc8b) {
2034 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2035 t->rec8b = t->cyc8b - t->act8b;
2036 }
2037
2038 if (t->active + t->recover < t->cycle) {
2039 t->active += (t->cycle - (t->active + t->recover)) / 2;
2040 t->recover = t->cycle - t->active;
2041 }
2042
2043 return 0;
2044}
2045
2046/**
2047 * ata_down_xfermask_limit - adjust dev xfer masks downward
2048 * @dev: Device to adjust xfer masks
2049 * @force_pio0: Force PIO0
2050 *
2051 * Adjust xfer masks of @dev downward. Note that this function
2052 * does not apply the change. Invoking ata_set_mode() afterwards
2053 * will apply the limit.
2054 *
2055 * LOCKING:
2056 * Inherited from caller.
2057 *
2058 * RETURNS:
2059 * 0 on success, negative errno on failure
2060 */
2061int ata_down_xfermask_limit(struct ata_device *dev, int force_pio0)
2062{
2063 unsigned long xfer_mask;
2064 int highbit;
2065
2066 xfer_mask = ata_pack_xfermask(dev->pio_mask, dev->mwdma_mask,
2067 dev->udma_mask);
2068
2069 if (!xfer_mask)
2070 goto fail;
2071 /* don't gear down to MWDMA from UDMA, go directly to PIO */
2072 if (xfer_mask & ATA_MASK_UDMA)
2073 xfer_mask &= ~ATA_MASK_MWDMA;
2074
2075 highbit = fls(xfer_mask) - 1;
2076 xfer_mask &= ~(1 << highbit);
2077 if (force_pio0)
2078 xfer_mask &= 1 << ATA_SHIFT_PIO;
2079 if (!xfer_mask)
2080 goto fail;
2081
2082 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2083 &dev->udma_mask);
2084
2085 ata_dev_printk(dev, KERN_WARNING, "limiting speed to %s\n",
2086 ata_mode_string(xfer_mask));
2087
2088 return 0;
2089
2090 fail:
2091 return -EINVAL;
2092}
2093
2094static int ata_dev_set_mode(struct ata_device *dev)
2095{
2096 unsigned int err_mask;
2097 int rc;
2098
2099 dev->flags &= ~ATA_DFLAG_PIO;
2100 if (dev->xfer_shift == ATA_SHIFT_PIO)
2101 dev->flags |= ATA_DFLAG_PIO;
2102
2103 err_mask = ata_dev_set_xfermode(dev);
2104 if (err_mask) {
2105 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2106 "(err_mask=0x%x)\n", err_mask);
2107 return -EIO;
2108 }
2109
2110 rc = ata_dev_revalidate(dev, 0);
2111 if (rc)
2112 return rc;
2113
2114 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2115 dev->xfer_shift, (int)dev->xfer_mode);
2116
2117 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2118 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
2119 return 0;
2120}
2121
2122/**
2123 * ata_set_mode - Program timings and issue SET FEATURES - XFER
2124 * @ap: port on which timings will be programmed
2125 * @r_failed_dev: out paramter for failed device
2126 *
2127 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2128 * ata_set_mode() fails, pointer to the failing device is
2129 * returned in @r_failed_dev.
2130 *
2131 * LOCKING:
2132 * PCI/etc. bus probe sem.
2133 *
2134 * RETURNS:
2135 * 0 on success, negative errno otherwise
2136 */
2137int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
2138{
2139 struct ata_device *dev;
2140 int i, rc = 0, used_dma = 0, found = 0;
2141
2142 /* has private set_mode? */
2143 if (ap->ops->set_mode) {
2144 /* FIXME: make ->set_mode handle no device case and
2145 * return error code and failing device on failure.
2146 */
2147 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2148 if (ata_dev_ready(&ap->device[i])) {
2149 ap->ops->set_mode(ap);
2150 break;
2151 }
2152 }
2153 return 0;
2154 }
2155
2156 /* step 1: calculate xfer_mask */
2157 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2158 unsigned int pio_mask, dma_mask;
2159
2160 dev = &ap->device[i];
2161
2162 if (!ata_dev_enabled(dev))
2163 continue;
2164
2165 ata_dev_xfermask(dev);
2166
2167 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2168 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2169 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2170 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
2171
2172 found = 1;
2173 if (dev->dma_mode)
2174 used_dma = 1;
2175 }
2176 if (!found)
2177 goto out;
2178
2179 /* step 2: always set host PIO timings */
2180 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2181 dev = &ap->device[i];
2182 if (!ata_dev_enabled(dev))
2183 continue;
2184
2185 if (!dev->pio_mode) {
2186 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
2187 rc = -EINVAL;
2188 goto out;
2189 }
2190
2191 dev->xfer_mode = dev->pio_mode;
2192 dev->xfer_shift = ATA_SHIFT_PIO;
2193 if (ap->ops->set_piomode)
2194 ap->ops->set_piomode(ap, dev);
2195 }
2196
2197 /* step 3: set host DMA timings */
2198 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2199 dev = &ap->device[i];
2200
2201 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2202 continue;
2203
2204 dev->xfer_mode = dev->dma_mode;
2205 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2206 if (ap->ops->set_dmamode)
2207 ap->ops->set_dmamode(ap, dev);
2208 }
2209
2210 /* step 4: update devices' xfer mode */
2211 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2212 dev = &ap->device[i];
2213
2214 /* don't udpate suspended devices' xfer mode */
2215 if (!ata_dev_ready(dev))
2216 continue;
2217
2218 rc = ata_dev_set_mode(dev);
2219 if (rc)
2220 goto out;
2221 }
2222
2223 /* Record simplex status. If we selected DMA then the other
2224 * host channels are not permitted to do so.
2225 */
2226 if (used_dma && (ap->host_set->flags & ATA_HOST_SIMPLEX))
2227 ap->host_set->simplex_claimed = 1;
2228
2229 /* step5: chip specific finalisation */
2230 if (ap->ops->post_set_mode)
2231 ap->ops->post_set_mode(ap);
2232
2233 out:
2234 if (rc)
2235 *r_failed_dev = dev;
2236 return rc;
2237}
2238
2239/**
2240 * ata_tf_to_host - issue ATA taskfile to host controller
2241 * @ap: port to which command is being issued
2242 * @tf: ATA taskfile register set
2243 *
2244 * Issues ATA taskfile register set to ATA host controller,
2245 * with proper synchronization with interrupt handler and
2246 * other threads.
2247 *
2248 * LOCKING:
2249 * spin_lock_irqsave(host_set lock)
2250 */
2251
2252static inline void ata_tf_to_host(struct ata_port *ap,
2253 const struct ata_taskfile *tf)
2254{
2255 ap->ops->tf_load(ap, tf);
2256 ap->ops->exec_command(ap, tf);
2257}
2258
2259/**
2260 * ata_busy_sleep - sleep until BSY clears, or timeout
2261 * @ap: port containing status register to be polled
2262 * @tmout_pat: impatience timeout
2263 * @tmout: overall timeout
2264 *
2265 * Sleep until ATA Status register bit BSY clears,
2266 * or a timeout occurs.
2267 *
2268 * LOCKING: None.
2269 */
2270
2271unsigned int ata_busy_sleep (struct ata_port *ap,
2272 unsigned long tmout_pat, unsigned long tmout)
2273{
2274 unsigned long timer_start, timeout;
2275 u8 status;
2276
2277 status = ata_busy_wait(ap, ATA_BUSY, 300);
2278 timer_start = jiffies;
2279 timeout = timer_start + tmout_pat;
2280 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
2281 msleep(50);
2282 status = ata_busy_wait(ap, ATA_BUSY, 3);
2283 }
2284
2285 if (status & ATA_BUSY)
2286 ata_port_printk(ap, KERN_WARNING,
2287 "port is slow to respond, please be patient\n");
2288
2289 timeout = timer_start + tmout;
2290 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
2291 msleep(50);
2292 status = ata_chk_status(ap);
2293 }
2294
2295 if (status & ATA_BUSY) {
2296 ata_port_printk(ap, KERN_ERR, "port failed to respond "
2297 "(%lu secs)\n", tmout / HZ);
2298 return 1;
2299 }
2300
2301 return 0;
2302}
2303
2304static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
2305{
2306 struct ata_ioports *ioaddr = &ap->ioaddr;
2307 unsigned int dev0 = devmask & (1 << 0);
2308 unsigned int dev1 = devmask & (1 << 1);
2309 unsigned long timeout;
2310
2311 /* if device 0 was found in ata_devchk, wait for its
2312 * BSY bit to clear
2313 */
2314 if (dev0)
2315 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2316
2317 /* if device 1 was found in ata_devchk, wait for
2318 * register access, then wait for BSY to clear
2319 */
2320 timeout = jiffies + ATA_TMOUT_BOOT;
2321 while (dev1) {
2322 u8 nsect, lbal;
2323
2324 ap->ops->dev_select(ap, 1);
2325 if (ap->flags & ATA_FLAG_MMIO) {
2326 nsect = readb((void __iomem *) ioaddr->nsect_addr);
2327 lbal = readb((void __iomem *) ioaddr->lbal_addr);
2328 } else {
2329 nsect = inb(ioaddr->nsect_addr);
2330 lbal = inb(ioaddr->lbal_addr);
2331 }
2332 if ((nsect == 1) && (lbal == 1))
2333 break;
2334 if (time_after(jiffies, timeout)) {
2335 dev1 = 0;
2336 break;
2337 }
2338 msleep(50); /* give drive a breather */
2339 }
2340 if (dev1)
2341 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2342
2343 /* is all this really necessary? */
2344 ap->ops->dev_select(ap, 0);
2345 if (dev1)
2346 ap->ops->dev_select(ap, 1);
2347 if (dev0)
2348 ap->ops->dev_select(ap, 0);
2349}
2350
2351static unsigned int ata_bus_softreset(struct ata_port *ap,
2352 unsigned int devmask)
2353{
2354 struct ata_ioports *ioaddr = &ap->ioaddr;
2355
2356 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
2357
2358 /* software reset. causes dev0 to be selected */
2359 if (ap->flags & ATA_FLAG_MMIO) {
2360 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2361 udelay(20); /* FIXME: flush */
2362 writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
2363 udelay(20); /* FIXME: flush */
2364 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2365 } else {
2366 outb(ap->ctl, ioaddr->ctl_addr);
2367 udelay(10);
2368 outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2369 udelay(10);
2370 outb(ap->ctl, ioaddr->ctl_addr);
2371 }
2372
2373 /* spec mandates ">= 2ms" before checking status.
2374 * We wait 150ms, because that was the magic delay used for
2375 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
2376 * between when the ATA command register is written, and then
2377 * status is checked. Because waiting for "a while" before
2378 * checking status is fine, post SRST, we perform this magic
2379 * delay here as well.
2380 *
2381 * Old drivers/ide uses the 2mS rule and then waits for ready
2382 */
2383 msleep(150);
2384
2385 /* Before we perform post reset processing we want to see if
2386 * the bus shows 0xFF because the odd clown forgets the D7
2387 * pulldown resistor.
2388 */
2389 if (ata_check_status(ap) == 0xFF) {
2390 ata_port_printk(ap, KERN_ERR, "SRST failed (status 0xFF)\n");
2391 return AC_ERR_OTHER;
2392 }
2393
2394 ata_bus_post_reset(ap, devmask);
2395
2396 return 0;
2397}
2398
2399/**
2400 * ata_bus_reset - reset host port and associated ATA channel
2401 * @ap: port to reset
2402 *
2403 * This is typically the first time we actually start issuing
2404 * commands to the ATA channel. We wait for BSY to clear, then
2405 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2406 * result. Determine what devices, if any, are on the channel
2407 * by looking at the device 0/1 error register. Look at the signature
2408 * stored in each device's taskfile registers, to determine if
2409 * the device is ATA or ATAPI.
2410 *
2411 * LOCKING:
2412 * PCI/etc. bus probe sem.
2413 * Obtains host_set lock.
2414 *
2415 * SIDE EFFECTS:
2416 * Sets ATA_FLAG_DISABLED if bus reset fails.
2417 */
2418
2419void ata_bus_reset(struct ata_port *ap)
2420{
2421 struct ata_ioports *ioaddr = &ap->ioaddr;
2422 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2423 u8 err;
2424 unsigned int dev0, dev1 = 0, devmask = 0;
2425
2426 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
2427
2428 /* determine if device 0/1 are present */
2429 if (ap->flags & ATA_FLAG_SATA_RESET)
2430 dev0 = 1;
2431 else {
2432 dev0 = ata_devchk(ap, 0);
2433 if (slave_possible)
2434 dev1 = ata_devchk(ap, 1);
2435 }
2436
2437 if (dev0)
2438 devmask |= (1 << 0);
2439 if (dev1)
2440 devmask |= (1 << 1);
2441
2442 /* select device 0 again */
2443 ap->ops->dev_select(ap, 0);
2444
2445 /* issue bus reset */
2446 if (ap->flags & ATA_FLAG_SRST)
2447 if (ata_bus_softreset(ap, devmask))
2448 goto err_out;
2449
2450 /*
2451 * determine by signature whether we have ATA or ATAPI devices
2452 */
2453 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
2454 if ((slave_possible) && (err != 0x81))
2455 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
2456
2457 /* re-enable interrupts */
2458 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2459 ata_irq_on(ap);
2460
2461 /* is double-select really necessary? */
2462 if (ap->device[1].class != ATA_DEV_NONE)
2463 ap->ops->dev_select(ap, 1);
2464 if (ap->device[0].class != ATA_DEV_NONE)
2465 ap->ops->dev_select(ap, 0);
2466
2467 /* if no devices were detected, disable this port */
2468 if ((ap->device[0].class == ATA_DEV_NONE) &&
2469 (ap->device[1].class == ATA_DEV_NONE))
2470 goto err_out;
2471
2472 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2473 /* set up device control for ATA_FLAG_SATA_RESET */
2474 if (ap->flags & ATA_FLAG_MMIO)
2475 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2476 else
2477 outb(ap->ctl, ioaddr->ctl_addr);
2478 }
2479
2480 DPRINTK("EXIT\n");
2481 return;
2482
2483err_out:
2484 ata_port_printk(ap, KERN_ERR, "disabling port\n");
2485 ap->ops->port_disable(ap);
2486
2487 DPRINTK("EXIT\n");
2488}
2489
2490/**
2491 * sata_phy_debounce - debounce SATA phy status
2492 * @ap: ATA port to debounce SATA phy status for
2493 * @params: timing parameters { interval, duratinon, timeout } in msec
2494 *
2495 * Make sure SStatus of @ap reaches stable state, determined by
2496 * holding the same value where DET is not 1 for @duration polled
2497 * every @interval, before @timeout. Timeout constraints the
2498 * beginning of the stable state. Because, after hot unplugging,
2499 * DET gets stuck at 1 on some controllers, this functions waits
2500 * until timeout then returns 0 if DET is stable at 1.
2501 *
2502 * LOCKING:
2503 * Kernel thread context (may sleep)
2504 *
2505 * RETURNS:
2506 * 0 on success, -errno on failure.
2507 */
2508int sata_phy_debounce(struct ata_port *ap, const unsigned long *params)
2509{
2510 unsigned long interval_msec = params[0];
2511 unsigned long duration = params[1] * HZ / 1000;
2512 unsigned long timeout = jiffies + params[2] * HZ / 1000;
2513 unsigned long last_jiffies;
2514 u32 last, cur;
2515 int rc;
2516
2517 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2518 return rc;
2519 cur &= 0xf;
2520
2521 last = cur;
2522 last_jiffies = jiffies;
2523
2524 while (1) {
2525 msleep(interval_msec);
2526 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2527 return rc;
2528 cur &= 0xf;
2529
2530 /* DET stable? */
2531 if (cur == last) {
2532 if (cur == 1 && time_before(jiffies, timeout))
2533 continue;
2534 if (time_after(jiffies, last_jiffies + duration))
2535 return 0;
2536 continue;
2537 }
2538
2539 /* unstable, start over */
2540 last = cur;
2541 last_jiffies = jiffies;
2542
2543 /* check timeout */
2544 if (time_after(jiffies, timeout))
2545 return -EBUSY;
2546 }
2547}
2548
2549/**
2550 * sata_phy_resume - resume SATA phy
2551 * @ap: ATA port to resume SATA phy for
2552 * @params: timing parameters { interval, duratinon, timeout } in msec
2553 *
2554 * Resume SATA phy of @ap and debounce it.
2555 *
2556 * LOCKING:
2557 * Kernel thread context (may sleep)
2558 *
2559 * RETURNS:
2560 * 0 on success, -errno on failure.
2561 */
2562int sata_phy_resume(struct ata_port *ap, const unsigned long *params)
2563{
2564 u32 scontrol;
2565 int rc;
2566
2567 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2568 return rc;
2569
2570 scontrol = (scontrol & 0x0f0) | 0x300;
2571
2572 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2573 return rc;
2574
2575 /* Some PHYs react badly if SStatus is pounded immediately
2576 * after resuming. Delay 200ms before debouncing.
2577 */
2578 msleep(200);
2579
2580 return sata_phy_debounce(ap, params);
2581}
2582
2583static void ata_wait_spinup(struct ata_port *ap)
2584{
2585 struct ata_eh_context *ehc = &ap->eh_context;
2586 unsigned long end, secs;
2587 int rc;
2588
2589 /* first, debounce phy if SATA */
2590 if (ap->cbl == ATA_CBL_SATA) {
2591 rc = sata_phy_debounce(ap, sata_deb_timing_hotplug);
2592
2593 /* if debounced successfully and offline, no need to wait */
2594 if ((rc == 0 || rc == -EOPNOTSUPP) && ata_port_offline(ap))
2595 return;
2596 }
2597
2598 /* okay, let's give the drive time to spin up */
2599 end = ehc->i.hotplug_timestamp + ATA_SPINUP_WAIT * HZ / 1000;
2600 secs = ((end - jiffies) + HZ - 1) / HZ;
2601
2602 if (time_after(jiffies, end))
2603 return;
2604
2605 if (secs > 5)
2606 ata_port_printk(ap, KERN_INFO, "waiting for device to spin up "
2607 "(%lu secs)\n", secs);
2608
2609 schedule_timeout_uninterruptible(end - jiffies);
2610}
2611
2612/**
2613 * ata_std_prereset - prepare for reset
2614 * @ap: ATA port to be reset
2615 *
2616 * @ap is about to be reset. Initialize it.
2617 *
2618 * LOCKING:
2619 * Kernel thread context (may sleep)
2620 *
2621 * RETURNS:
2622 * 0 on success, -errno otherwise.
2623 */
2624int ata_std_prereset(struct ata_port *ap)
2625{
2626 struct ata_eh_context *ehc = &ap->eh_context;
2627 const unsigned long *timing = sata_ehc_deb_timing(ehc);
2628 int rc;
2629
2630 /* handle link resume & hotplug spinup */
2631 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
2632 (ap->flags & ATA_FLAG_HRST_TO_RESUME))
2633 ehc->i.action |= ATA_EH_HARDRESET;
2634
2635 if ((ehc->i.flags & ATA_EHI_HOTPLUGGED) &&
2636 (ap->flags & ATA_FLAG_SKIP_D2H_BSY))
2637 ata_wait_spinup(ap);
2638
2639 /* if we're about to do hardreset, nothing more to do */
2640 if (ehc->i.action & ATA_EH_HARDRESET)
2641 return 0;
2642
2643 /* if SATA, resume phy */
2644 if (ap->cbl == ATA_CBL_SATA) {
2645 rc = sata_phy_resume(ap, timing);
2646 if (rc && rc != -EOPNOTSUPP) {
2647 /* phy resume failed */
2648 ata_port_printk(ap, KERN_WARNING, "failed to resume "
2649 "link for reset (errno=%d)\n", rc);
2650 return rc;
2651 }
2652 }
2653
2654 /* Wait for !BSY if the controller can wait for the first D2H
2655 * Reg FIS and we don't know that no device is attached.
2656 */
2657 if (!(ap->flags & ATA_FLAG_SKIP_D2H_BSY) && !ata_port_offline(ap))
2658 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2659
2660 return 0;
2661}
2662
2663/**
2664 * ata_std_softreset - reset host port via ATA SRST
2665 * @ap: port to reset
2666 * @classes: resulting classes of attached devices
2667 *
2668 * Reset host port using ATA SRST.
2669 *
2670 * LOCKING:
2671 * Kernel thread context (may sleep)
2672 *
2673 * RETURNS:
2674 * 0 on success, -errno otherwise.
2675 */
2676int ata_std_softreset(struct ata_port *ap, unsigned int *classes)
2677{
2678 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2679 unsigned int devmask = 0, err_mask;
2680 u8 err;
2681
2682 DPRINTK("ENTER\n");
2683
2684 if (ata_port_offline(ap)) {
2685 classes[0] = ATA_DEV_NONE;
2686 goto out;
2687 }
2688
2689 /* determine if device 0/1 are present */
2690 if (ata_devchk(ap, 0))
2691 devmask |= (1 << 0);
2692 if (slave_possible && ata_devchk(ap, 1))
2693 devmask |= (1 << 1);
2694
2695 /* select device 0 again */
2696 ap->ops->dev_select(ap, 0);
2697
2698 /* issue bus reset */
2699 DPRINTK("about to softreset, devmask=%x\n", devmask);
2700 err_mask = ata_bus_softreset(ap, devmask);
2701 if (err_mask) {
2702 ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n",
2703 err_mask);
2704 return -EIO;
2705 }
2706
2707 /* determine by signature whether we have ATA or ATAPI devices */
2708 classes[0] = ata_dev_try_classify(ap, 0, &err);
2709 if (slave_possible && err != 0x81)
2710 classes[1] = ata_dev_try_classify(ap, 1, &err);
2711
2712 out:
2713 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2714 return 0;
2715}
2716
2717/**
2718 * sata_std_hardreset - reset host port via SATA phy reset
2719 * @ap: port to reset
2720 * @class: resulting class of attached device
2721 *
2722 * SATA phy-reset host port using DET bits of SControl register.
2723 *
2724 * LOCKING:
2725 * Kernel thread context (may sleep)
2726 *
2727 * RETURNS:
2728 * 0 on success, -errno otherwise.
2729 */
2730int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
2731{
2732 struct ata_eh_context *ehc = &ap->eh_context;
2733 const unsigned long *timing = sata_ehc_deb_timing(ehc);
2734 u32 scontrol;
2735 int rc;
2736
2737 DPRINTK("ENTER\n");
2738
2739 if (sata_set_spd_needed(ap)) {
2740 /* SATA spec says nothing about how to reconfigure
2741 * spd. To be on the safe side, turn off phy during
2742 * reconfiguration. This works for at least ICH7 AHCI
2743 * and Sil3124.
2744 */
2745 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2746 return rc;
2747
2748 scontrol = (scontrol & 0x0f0) | 0x302;
2749
2750 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2751 return rc;
2752
2753 sata_set_spd(ap);
2754 }
2755
2756 /* issue phy wake/reset */
2757 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2758 return rc;
2759
2760 scontrol = (scontrol & 0x0f0) | 0x301;
2761
2762 if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol)))
2763 return rc;
2764
2765 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
2766 * 10.4.2 says at least 1 ms.
2767 */
2768 msleep(1);
2769
2770 /* bring phy back */
2771 sata_phy_resume(ap, timing);
2772
2773 /* TODO: phy layer with polling, timeouts, etc. */
2774 if (ata_port_offline(ap)) {
2775 *class = ATA_DEV_NONE;
2776 DPRINTK("EXIT, link offline\n");
2777 return 0;
2778 }
2779
2780 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2781 ata_port_printk(ap, KERN_ERR,
2782 "COMRESET failed (device not ready)\n");
2783 return -EIO;
2784 }
2785
2786 ap->ops->dev_select(ap, 0); /* probably unnecessary */
2787
2788 *class = ata_dev_try_classify(ap, 0, NULL);
2789
2790 DPRINTK("EXIT, class=%u\n", *class);
2791 return 0;
2792}
2793
2794/**
2795 * ata_std_postreset - standard postreset callback
2796 * @ap: the target ata_port
2797 * @classes: classes of attached devices
2798 *
2799 * This function is invoked after a successful reset. Note that
2800 * the device might have been reset more than once using
2801 * different reset methods before postreset is invoked.
2802 *
2803 * LOCKING:
2804 * Kernel thread context (may sleep)
2805 */
2806void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
2807{
2808 u32 serror;
2809
2810 DPRINTK("ENTER\n");
2811
2812 /* print link status */
2813 sata_print_link_status(ap);
2814
2815 /* clear SError */
2816 if (sata_scr_read(ap, SCR_ERROR, &serror) == 0)
2817 sata_scr_write(ap, SCR_ERROR, serror);
2818
2819 /* re-enable interrupts */
2820 if (!ap->ops->error_handler) {
2821 /* FIXME: hack. create a hook instead */
2822 if (ap->ioaddr.ctl_addr)
2823 ata_irq_on(ap);
2824 }
2825
2826 /* is double-select really necessary? */
2827 if (classes[0] != ATA_DEV_NONE)
2828 ap->ops->dev_select(ap, 1);
2829 if (classes[1] != ATA_DEV_NONE)
2830 ap->ops->dev_select(ap, 0);
2831
2832 /* bail out if no device is present */
2833 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2834 DPRINTK("EXIT, no device\n");
2835 return;
2836 }
2837
2838 /* set up device control */
2839 if (ap->ioaddr.ctl_addr) {
2840 if (ap->flags & ATA_FLAG_MMIO)
2841 writeb(ap->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
2842 else
2843 outb(ap->ctl, ap->ioaddr.ctl_addr);
2844 }
2845
2846 DPRINTK("EXIT\n");
2847}
2848
2849/**
2850 * ata_dev_same_device - Determine whether new ID matches configured device
2851 * @dev: device to compare against
2852 * @new_class: class of the new device
2853 * @new_id: IDENTIFY page of the new device
2854 *
2855 * Compare @new_class and @new_id against @dev and determine
2856 * whether @dev is the device indicated by @new_class and
2857 * @new_id.
2858 *
2859 * LOCKING:
2860 * None.
2861 *
2862 * RETURNS:
2863 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
2864 */
2865static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
2866 const u16 *new_id)
2867{
2868 const u16 *old_id = dev->id;
2869 unsigned char model[2][41], serial[2][21];
2870 u64 new_n_sectors;
2871
2872 if (dev->class != new_class) {
2873 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
2874 dev->class, new_class);
2875 return 0;
2876 }
2877
2878 ata_id_c_string(old_id, model[0], ATA_ID_PROD_OFS, sizeof(model[0]));
2879 ata_id_c_string(new_id, model[1], ATA_ID_PROD_OFS, sizeof(model[1]));
2880 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO_OFS, sizeof(serial[0]));
2881 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO_OFS, sizeof(serial[1]));
2882 new_n_sectors = ata_id_n_sectors(new_id);
2883
2884 if (strcmp(model[0], model[1])) {
2885 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
2886 "'%s' != '%s'\n", model[0], model[1]);
2887 return 0;
2888 }
2889
2890 if (strcmp(serial[0], serial[1])) {
2891 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
2892 "'%s' != '%s'\n", serial[0], serial[1]);
2893 return 0;
2894 }
2895
2896 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
2897 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
2898 "%llu != %llu\n",
2899 (unsigned long long)dev->n_sectors,
2900 (unsigned long long)new_n_sectors);
2901 return 0;
2902 }
2903
2904 return 1;
2905}
2906
2907/**
2908 * ata_dev_revalidate - Revalidate ATA device
2909 * @dev: device to revalidate
2910 * @post_reset: is this revalidation after reset?
2911 *
2912 * Re-read IDENTIFY page and make sure @dev is still attached to
2913 * the port.
2914 *
2915 * LOCKING:
2916 * Kernel thread context (may sleep)
2917 *
2918 * RETURNS:
2919 * 0 on success, negative errno otherwise
2920 */
2921int ata_dev_revalidate(struct ata_device *dev, int post_reset)
2922{
2923 unsigned int class = dev->class;
2924 u16 *id = (void *)dev->ap->sector_buf;
2925 int rc;
2926
2927 if (!ata_dev_enabled(dev)) {
2928 rc = -ENODEV;
2929 goto fail;
2930 }
2931
2932 /* read ID data */
2933 rc = ata_dev_read_id(dev, &class, post_reset, id);
2934 if (rc)
2935 goto fail;
2936
2937 /* is the device still there? */
2938 if (!ata_dev_same_device(dev, class, id)) {
2939 rc = -ENODEV;
2940 goto fail;
2941 }
2942
2943 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
2944
2945 /* configure device according to the new ID */
2946 rc = ata_dev_configure(dev, 0);
2947 if (rc == 0)
2948 return 0;
2949
2950 fail:
2951 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
2952 return rc;
2953}
2954
2955static const char * const ata_dma_blacklist [] = {
2956 "WDC AC11000H", NULL,
2957 "WDC AC22100H", NULL,
2958 "WDC AC32500H", NULL,
2959 "WDC AC33100H", NULL,
2960 "WDC AC31600H", NULL,
2961 "WDC AC32100H", "24.09P07",
2962 "WDC AC23200L", "21.10N21",
2963 "Compaq CRD-8241B", NULL,
2964 "CRD-8400B", NULL,
2965 "CRD-8480B", NULL,
2966 "CRD-8482B", NULL,
2967 "CRD-84", NULL,
2968 "SanDisk SDP3B", NULL,
2969 "SanDisk SDP3B-64", NULL,
2970 "SANYO CD-ROM CRD", NULL,
2971 "HITACHI CDR-8", NULL,
2972 "HITACHI CDR-8335", NULL,
2973 "HITACHI CDR-8435", NULL,
2974 "Toshiba CD-ROM XM-6202B", NULL,
2975 "TOSHIBA CD-ROM XM-1702BC", NULL,
2976 "CD-532E-A", NULL,
2977 "E-IDE CD-ROM CR-840", NULL,
2978 "CD-ROM Drive/F5A", NULL,
2979 "WPI CDD-820", NULL,
2980 "SAMSUNG CD-ROM SC-148C", NULL,
2981 "SAMSUNG CD-ROM SC", NULL,
2982 "SanDisk SDP3B-64", NULL,
2983 "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,
2984 "_NEC DV5800A", NULL,
2985 "SAMSUNG CD-ROM SN-124", "N001"
2986};
2987
2988static int ata_strim(char *s, size_t len)
2989{
2990 len = strnlen(s, len);
2991
2992 /* ATAPI specifies that empty space is blank-filled; remove blanks */
2993 while ((len > 0) && (s[len - 1] == ' ')) {
2994 len--;
2995 s[len] = 0;
2996 }
2997 return len;
2998}
2999
3000static int ata_dma_blacklisted(const struct ata_device *dev)
3001{
3002 unsigned char model_num[40];
3003 unsigned char model_rev[16];
3004 unsigned int nlen, rlen;
3005 int i;
3006
3007 /* We don't support polling DMA.
3008 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3009 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3010 */
3011 if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) &&
3012 (dev->flags & ATA_DFLAG_CDB_INTR))
3013 return 1;
3014
3015 ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
3016 sizeof(model_num));
3017 ata_id_string(dev->id, model_rev, ATA_ID_FW_REV_OFS,
3018 sizeof(model_rev));
3019 nlen = ata_strim(model_num, sizeof(model_num));
3020 rlen = ata_strim(model_rev, sizeof(model_rev));
3021
3022 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i += 2) {
3023 if (!strncmp(ata_dma_blacklist[i], model_num, nlen)) {
3024 if (ata_dma_blacklist[i+1] == NULL)
3025 return 1;
3026 if (!strncmp(ata_dma_blacklist[i], model_rev, rlen))
3027 return 1;
3028 }
3029 }
3030 return 0;
3031}
3032
3033/**
3034 * ata_dev_xfermask - Compute supported xfermask of the given device
3035 * @dev: Device to compute xfermask for
3036 *
3037 * Compute supported xfermask of @dev and store it in
3038 * dev->*_mask. This function is responsible for applying all
3039 * known limits including host controller limits, device
3040 * blacklist, etc...
3041 *
3042 * LOCKING:
3043 * None.
3044 */
3045static void ata_dev_xfermask(struct ata_device *dev)
3046{
3047 struct ata_port *ap = dev->ap;
3048 struct ata_host_set *hs = ap->host_set;
3049 unsigned long xfer_mask;
3050
3051 /* controller modes available */
3052 xfer_mask = ata_pack_xfermask(ap->pio_mask,
3053 ap->mwdma_mask, ap->udma_mask);
3054
3055 /* Apply cable rule here. Don't apply it early because when
3056 * we handle hot plug the cable type can itself change.
3057 */
3058 if (ap->cbl == ATA_CBL_PATA40)
3059 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
3060
3061 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
3062 dev->mwdma_mask, dev->udma_mask);
3063 xfer_mask &= ata_id_xfermask(dev->id);
3064
3065 if (ata_dma_blacklisted(dev)) {
3066 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3067 ata_dev_printk(dev, KERN_WARNING,
3068 "device is on DMA blacklist, disabling DMA\n");
3069 }
3070
3071 if ((hs->flags & ATA_HOST_SIMPLEX) && hs->simplex_claimed) {
3072 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3073 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
3074 "other device, disabling DMA\n");
3075 }
3076
3077 if (ap->ops->mode_filter)
3078 xfer_mask = ap->ops->mode_filter(ap, dev, xfer_mask);
3079
3080 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
3081 &dev->mwdma_mask, &dev->udma_mask);
3082}
3083
3084/**
3085 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
3086 * @dev: Device to which command will be sent
3087 *
3088 * Issue SET FEATURES - XFER MODE command to device @dev
3089 * on port @ap.
3090 *
3091 * LOCKING:
3092 * PCI/etc. bus probe sem.
3093 *
3094 * RETURNS:
3095 * 0 on success, AC_ERR_* mask otherwise.
3096 */
3097
3098static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
3099{
3100 struct ata_taskfile tf;
3101 unsigned int err_mask;
3102
3103 /* set up set-features taskfile */
3104 DPRINTK("set features - xfer mode\n");
3105
3106 ata_tf_init(dev, &tf);
3107 tf.command = ATA_CMD_SET_FEATURES;
3108 tf.feature = SETFEATURES_XFER;
3109 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3110 tf.protocol = ATA_PROT_NODATA;
3111 tf.nsect = dev->xfer_mode;
3112
3113 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
3114
3115 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3116 return err_mask;
3117}
3118
3119/**
3120 * ata_dev_init_params - Issue INIT DEV PARAMS command
3121 * @dev: Device to which command will be sent
3122 * @heads: Number of heads (taskfile parameter)
3123 * @sectors: Number of sectors (taskfile parameter)
3124 *
3125 * LOCKING:
3126 * Kernel thread context (may sleep)
3127 *
3128 * RETURNS:
3129 * 0 on success, AC_ERR_* mask otherwise.
3130 */
3131static unsigned int ata_dev_init_params(struct ata_device *dev,
3132 u16 heads, u16 sectors)
3133{
3134 struct ata_taskfile tf;
3135 unsigned int err_mask;
3136
3137 /* Number of sectors per track 1-255. Number of heads 1-16 */
3138 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
3139 return AC_ERR_INVALID;
3140
3141 /* set up init dev params taskfile */
3142 DPRINTK("init dev params \n");
3143
3144 ata_tf_init(dev, &tf);
3145 tf.command = ATA_CMD_INIT_DEV_PARAMS;
3146 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3147 tf.protocol = ATA_PROT_NODATA;
3148 tf.nsect = sectors;
3149 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
3150
3151 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
3152
3153 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3154 return err_mask;
3155}
3156
3157/**
3158 * ata_sg_clean - Unmap DMA memory associated with command
3159 * @qc: Command containing DMA memory to be released
3160 *
3161 * Unmap all mapped DMA memory associated with this command.
3162 *
3163 * LOCKING:
3164 * spin_lock_irqsave(host_set lock)
3165 */
3166
3167static void ata_sg_clean(struct ata_queued_cmd *qc)
3168{
3169 struct ata_port *ap = qc->ap;
3170 struct scatterlist *sg = qc->__sg;
3171 int dir = qc->dma_dir;
3172 void *pad_buf = NULL;
3173
3174 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
3175 WARN_ON(sg == NULL);
3176
3177 if (qc->flags & ATA_QCFLAG_SINGLE)
3178 WARN_ON(qc->n_elem > 1);
3179
3180 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
3181
3182 /* if we padded the buffer out to 32-bit bound, and data
3183 * xfer direction is from-device, we must copy from the
3184 * pad buffer back into the supplied buffer
3185 */
3186 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
3187 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3188
3189 if (qc->flags & ATA_QCFLAG_SG) {
3190 if (qc->n_elem)
3191 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
3192 /* restore last sg */
3193 sg[qc->orig_n_elem - 1].length += qc->pad_len;
3194 if (pad_buf) {
3195 struct scatterlist *psg = &qc->pad_sgent;
3196 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3197 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
3198 kunmap_atomic(addr, KM_IRQ0);
3199 }
3200 } else {
3201 if (qc->n_elem)
3202 dma_unmap_single(ap->dev,
3203 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
3204 dir);
3205 /* restore sg */
3206 sg->length += qc->pad_len;
3207 if (pad_buf)
3208 memcpy(qc->buf_virt + sg->length - qc->pad_len,
3209 pad_buf, qc->pad_len);
3210 }
3211
3212 qc->flags &= ~ATA_QCFLAG_DMAMAP;
3213 qc->__sg = NULL;
3214}
3215
3216/**
3217 * ata_fill_sg - Fill PCI IDE PRD table
3218 * @qc: Metadata associated with taskfile to be transferred
3219 *
3220 * Fill PCI IDE PRD (scatter-gather) table with segments
3221 * associated with the current disk command.
3222 *
3223 * LOCKING:
3224 * spin_lock_irqsave(host_set lock)
3225 *
3226 */
3227static void ata_fill_sg(struct ata_queued_cmd *qc)
3228{
3229 struct ata_port *ap = qc->ap;
3230 struct scatterlist *sg;
3231 unsigned int idx;
3232
3233 WARN_ON(qc->__sg == NULL);
3234 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
3235
3236 idx = 0;
3237 ata_for_each_sg(sg, qc) {
3238 u32 addr, offset;
3239 u32 sg_len, len;
3240
3241 /* determine if physical DMA addr spans 64K boundary.
3242 * Note h/w doesn't support 64-bit, so we unconditionally
3243 * truncate dma_addr_t to u32.
3244 */
3245 addr = (u32) sg_dma_address(sg);
3246 sg_len = sg_dma_len(sg);
3247
3248 while (sg_len) {
3249 offset = addr & 0xffff;
3250 len = sg_len;
3251 if ((offset + sg_len) > 0x10000)
3252 len = 0x10000 - offset;
3253
3254 ap->prd[idx].addr = cpu_to_le32(addr);
3255 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
3256 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
3257
3258 idx++;
3259 sg_len -= len;
3260 addr += len;
3261 }
3262 }
3263
3264 if (idx)
3265 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
3266}
3267/**
3268 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
3269 * @qc: Metadata associated with taskfile to check
3270 *
3271 * Allow low-level driver to filter ATA PACKET commands, returning
3272 * a status indicating whether or not it is OK to use DMA for the
3273 * supplied PACKET command.
3274 *
3275 * LOCKING:
3276 * spin_lock_irqsave(host_set lock)
3277 *
3278 * RETURNS: 0 when ATAPI DMA can be used
3279 * nonzero otherwise
3280 */
3281int ata_check_atapi_dma(struct ata_queued_cmd *qc)
3282{
3283 struct ata_port *ap = qc->ap;
3284 int rc = 0; /* Assume ATAPI DMA is OK by default */
3285
3286 if (ap->ops->check_atapi_dma)
3287 rc = ap->ops->check_atapi_dma(qc);
3288
3289 return rc;
3290}
3291/**
3292 * ata_qc_prep - Prepare taskfile for submission
3293 * @qc: Metadata associated with taskfile to be prepared
3294 *
3295 * Prepare ATA taskfile for submission.
3296 *
3297 * LOCKING:
3298 * spin_lock_irqsave(host_set lock)
3299 */
3300void ata_qc_prep(struct ata_queued_cmd *qc)
3301{
3302 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
3303 return;
3304
3305 ata_fill_sg(qc);
3306}
3307
3308void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
3309
3310/**
3311 * ata_sg_init_one - Associate command with memory buffer
3312 * @qc: Command to be associated
3313 * @buf: Memory buffer
3314 * @buflen: Length of memory buffer, in bytes.
3315 *
3316 * Initialize the data-related elements of queued_cmd @qc
3317 * to point to a single memory buffer, @buf of byte length @buflen.
3318 *
3319 * LOCKING:
3320 * spin_lock_irqsave(host_set lock)
3321 */
3322
3323void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
3324{
3325 struct scatterlist *sg;
3326
3327 qc->flags |= ATA_QCFLAG_SINGLE;
3328
3329 memset(&qc->sgent, 0, sizeof(qc->sgent));
3330 qc->__sg = &qc->sgent;
3331 qc->n_elem = 1;
3332 qc->orig_n_elem = 1;
3333 qc->buf_virt = buf;
3334 qc->nbytes = buflen;
3335
3336 sg = qc->__sg;
3337 sg_init_one(sg, buf, buflen);
3338}
3339
3340/**
3341 * ata_sg_init - Associate command with scatter-gather table.
3342 * @qc: Command to be associated
3343 * @sg: Scatter-gather table.
3344 * @n_elem: Number of elements in s/g table.
3345 *
3346 * Initialize the data-related elements of queued_cmd @qc
3347 * to point to a scatter-gather table @sg, containing @n_elem
3348 * elements.
3349 *
3350 * LOCKING:
3351 * spin_lock_irqsave(host_set lock)
3352 */
3353
3354void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
3355 unsigned int n_elem)
3356{
3357 qc->flags |= ATA_QCFLAG_SG;
3358 qc->__sg = sg;
3359 qc->n_elem = n_elem;
3360 qc->orig_n_elem = n_elem;
3361}
3362
3363/**
3364 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
3365 * @qc: Command with memory buffer to be mapped.
3366 *
3367 * DMA-map the memory buffer associated with queued_cmd @qc.
3368 *
3369 * LOCKING:
3370 * spin_lock_irqsave(host_set lock)
3371 *
3372 * RETURNS:
3373 * Zero on success, negative on error.
3374 */
3375
3376static int ata_sg_setup_one(struct ata_queued_cmd *qc)
3377{
3378 struct ata_port *ap = qc->ap;
3379 int dir = qc->dma_dir;
3380 struct scatterlist *sg = qc->__sg;
3381 dma_addr_t dma_address;
3382 int trim_sg = 0;
3383
3384 /* we must lengthen transfers to end on a 32-bit boundary */
3385 qc->pad_len = sg->length & 3;
3386 if (qc->pad_len) {
3387 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3388 struct scatterlist *psg = &qc->pad_sgent;
3389
3390 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3391
3392 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3393
3394 if (qc->tf.flags & ATA_TFLAG_WRITE)
3395 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
3396 qc->pad_len);
3397
3398 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3399 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3400 /* trim sg */
3401 sg->length -= qc->pad_len;
3402 if (sg->length == 0)
3403 trim_sg = 1;
3404
3405 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
3406 sg->length, qc->pad_len);
3407 }
3408
3409 if (trim_sg) {
3410 qc->n_elem--;
3411 goto skip_map;
3412 }
3413
3414 dma_address = dma_map_single(ap->dev, qc->buf_virt,
3415 sg->length, dir);
3416 if (dma_mapping_error(dma_address)) {
3417 /* restore sg */
3418 sg->length += qc->pad_len;
3419 return -1;
3420 }
3421
3422 sg_dma_address(sg) = dma_address;
3423 sg_dma_len(sg) = sg->length;
3424
3425skip_map:
3426 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
3427 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3428
3429 return 0;
3430}
3431
3432/**
3433 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
3434 * @qc: Command with scatter-gather table to be mapped.
3435 *
3436 * DMA-map the scatter-gather table associated with queued_cmd @qc.
3437 *
3438 * LOCKING:
3439 * spin_lock_irqsave(host_set lock)
3440 *
3441 * RETURNS:
3442 * Zero on success, negative on error.
3443 *
3444 */
3445
3446static int ata_sg_setup(struct ata_queued_cmd *qc)
3447{
3448 struct ata_port *ap = qc->ap;
3449 struct scatterlist *sg = qc->__sg;
3450 struct scatterlist *lsg = &sg[qc->n_elem - 1];
3451 int n_elem, pre_n_elem, dir, trim_sg = 0;
3452
3453 VPRINTK("ENTER, ata%u\n", ap->id);
3454 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
3455
3456 /* we must lengthen transfers to end on a 32-bit boundary */
3457 qc->pad_len = lsg->length & 3;
3458 if (qc->pad_len) {
3459 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3460 struct scatterlist *psg = &qc->pad_sgent;
3461 unsigned int offset;
3462
3463 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3464
3465 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3466
3467 /*
3468 * psg->page/offset are used to copy to-be-written
3469 * data in this function or read data in ata_sg_clean.
3470 */
3471 offset = lsg->offset + lsg->length - qc->pad_len;
3472 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
3473 psg->offset = offset_in_page(offset);
3474
3475 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3476 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3477 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
3478 kunmap_atomic(addr, KM_IRQ0);
3479 }
3480
3481 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3482 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3483 /* trim last sg */
3484 lsg->length -= qc->pad_len;
3485 if (lsg->length == 0)
3486 trim_sg = 1;
3487
3488 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
3489 qc->n_elem - 1, lsg->length, qc->pad_len);
3490 }
3491
3492 pre_n_elem = qc->n_elem;
3493 if (trim_sg && pre_n_elem)
3494 pre_n_elem--;
3495
3496 if (!pre_n_elem) {
3497 n_elem = 0;
3498 goto skip_map;
3499 }
3500
3501 dir = qc->dma_dir;
3502 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
3503 if (n_elem < 1) {
3504 /* restore last sg */
3505 lsg->length += qc->pad_len;
3506 return -1;
3507 }
3508
3509 DPRINTK("%d sg elements mapped\n", n_elem);
3510
3511skip_map:
3512 qc->n_elem = n_elem;
3513
3514 return 0;
3515}
3516
3517/**
3518 * swap_buf_le16 - swap halves of 16-bit words in place
3519 * @buf: Buffer to swap
3520 * @buf_words: Number of 16-bit words in buffer.
3521 *
3522 * Swap halves of 16-bit words if needed to convert from
3523 * little-endian byte order to native cpu byte order, or
3524 * vice-versa.
3525 *
3526 * LOCKING:
3527 * Inherited from caller.
3528 */
3529void swap_buf_le16(u16 *buf, unsigned int buf_words)
3530{
3531#ifdef __BIG_ENDIAN
3532 unsigned int i;
3533
3534 for (i = 0; i < buf_words; i++)
3535 buf[i] = le16_to_cpu(buf[i]);
3536#endif /* __BIG_ENDIAN */
3537}
3538
3539/**
3540 * ata_mmio_data_xfer - Transfer data by MMIO
3541 * @adev: device for this I/O
3542 * @buf: data buffer
3543 * @buflen: buffer length
3544 * @write_data: read/write
3545 *
3546 * Transfer data from/to the device data register by MMIO.
3547 *
3548 * LOCKING:
3549 * Inherited from caller.
3550 */
3551
3552void ata_mmio_data_xfer(struct ata_device *adev, unsigned char *buf,
3553 unsigned int buflen, int write_data)
3554{
3555 struct ata_port *ap = adev->ap;
3556 unsigned int i;
3557 unsigned int words = buflen >> 1;
3558 u16 *buf16 = (u16 *) buf;
3559 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
3560
3561 /* Transfer multiple of 2 bytes */
3562 if (write_data) {
3563 for (i = 0; i < words; i++)
3564 writew(le16_to_cpu(buf16[i]), mmio);
3565 } else {
3566 for (i = 0; i < words; i++)
3567 buf16[i] = cpu_to_le16(readw(mmio));
3568 }
3569
3570 /* Transfer trailing 1 byte, if any. */
3571 if (unlikely(buflen & 0x01)) {
3572 u16 align_buf[1] = { 0 };
3573 unsigned char *trailing_buf = buf + buflen - 1;
3574
3575 if (write_data) {
3576 memcpy(align_buf, trailing_buf, 1);
3577 writew(le16_to_cpu(align_buf[0]), mmio);
3578 } else {
3579 align_buf[0] = cpu_to_le16(readw(mmio));
3580 memcpy(trailing_buf, align_buf, 1);
3581 }
3582 }
3583}
3584
3585/**
3586 * ata_pio_data_xfer - Transfer data by PIO
3587 * @adev: device to target
3588 * @buf: data buffer
3589 * @buflen: buffer length
3590 * @write_data: read/write
3591 *
3592 * Transfer data from/to the device data register by PIO.
3593 *
3594 * LOCKING:
3595 * Inherited from caller.
3596 */
3597
3598void ata_pio_data_xfer(struct ata_device *adev, unsigned char *buf,
3599 unsigned int buflen, int write_data)
3600{
3601 struct ata_port *ap = adev->ap;
3602 unsigned int words = buflen >> 1;
3603
3604 /* Transfer multiple of 2 bytes */
3605 if (write_data)
3606 outsw(ap->ioaddr.data_addr, buf, words);
3607 else
3608 insw(ap->ioaddr.data_addr, buf, words);
3609
3610 /* Transfer trailing 1 byte, if any. */
3611 if (unlikely(buflen & 0x01)) {
3612 u16 align_buf[1] = { 0 };
3613 unsigned char *trailing_buf = buf + buflen - 1;
3614
3615 if (write_data) {
3616 memcpy(align_buf, trailing_buf, 1);
3617 outw(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
3618 } else {
3619 align_buf[0] = cpu_to_le16(inw(ap->ioaddr.data_addr));
3620 memcpy(trailing_buf, align_buf, 1);
3621 }
3622 }
3623}
3624
3625/**
3626 * ata_pio_data_xfer_noirq - Transfer data by PIO
3627 * @adev: device to target
3628 * @buf: data buffer
3629 * @buflen: buffer length
3630 * @write_data: read/write
3631 *
3632 * Transfer data from/to the device data register by PIO. Do the
3633 * transfer with interrupts disabled.
3634 *
3635 * LOCKING:
3636 * Inherited from caller.
3637 */
3638
3639void ata_pio_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
3640 unsigned int buflen, int write_data)
3641{
3642 unsigned long flags;
3643 local_irq_save(flags);
3644 ata_pio_data_xfer(adev, buf, buflen, write_data);
3645 local_irq_restore(flags);
3646}
3647
3648
3649/**
3650 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
3651 * @qc: Command on going
3652 *
3653 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
3654 *
3655 * LOCKING:
3656 * Inherited from caller.
3657 */
3658
3659static void ata_pio_sector(struct ata_queued_cmd *qc)
3660{
3661 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3662 struct scatterlist *sg = qc->__sg;
3663 struct ata_port *ap = qc->ap;
3664 struct page *page;
3665 unsigned int offset;
3666 unsigned char *buf;
3667
3668 if (qc->cursect == (qc->nsect - 1))
3669 ap->hsm_task_state = HSM_ST_LAST;
3670
3671 page = sg[qc->cursg].page;
3672 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
3673
3674 /* get the current page and offset */
3675 page = nth_page(page, (offset >> PAGE_SHIFT));
3676 offset %= PAGE_SIZE;
3677
3678 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3679
3680 if (PageHighMem(page)) {
3681 unsigned long flags;
3682
3683 /* FIXME: use a bounce buffer */
3684 local_irq_save(flags);
3685 buf = kmap_atomic(page, KM_IRQ0);
3686
3687 /* do the actual data transfer */
3688 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
3689
3690 kunmap_atomic(buf, KM_IRQ0);
3691 local_irq_restore(flags);
3692 } else {
3693 buf = page_address(page);
3694 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
3695 }
3696
3697 qc->cursect++;
3698 qc->cursg_ofs++;
3699
3700 if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) {
3701 qc->cursg++;
3702 qc->cursg_ofs = 0;
3703 }
3704}
3705
3706/**
3707 * ata_pio_sectors - Transfer one or many 512-byte sectors.
3708 * @qc: Command on going
3709 *
3710 * Transfer one or many ATA_SECT_SIZE of data from/to the
3711 * ATA device for the DRQ request.
3712 *
3713 * LOCKING:
3714 * Inherited from caller.
3715 */
3716
3717static void ata_pio_sectors(struct ata_queued_cmd *qc)
3718{
3719 if (is_multi_taskfile(&qc->tf)) {
3720 /* READ/WRITE MULTIPLE */
3721 unsigned int nsect;
3722
3723 WARN_ON(qc->dev->multi_count == 0);
3724
3725 nsect = min(qc->nsect - qc->cursect, qc->dev->multi_count);
3726 while (nsect--)
3727 ata_pio_sector(qc);
3728 } else
3729 ata_pio_sector(qc);
3730}
3731
3732/**
3733 * atapi_send_cdb - Write CDB bytes to hardware
3734 * @ap: Port to which ATAPI device is attached.
3735 * @qc: Taskfile currently active
3736 *
3737 * When device has indicated its readiness to accept
3738 * a CDB, this function is called. Send the CDB.
3739 *
3740 * LOCKING:
3741 * caller.
3742 */
3743
3744static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
3745{
3746 /* send SCSI cdb */
3747 DPRINTK("send cdb\n");
3748 WARN_ON(qc->dev->cdb_len < 12);
3749
3750 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
3751 ata_altstatus(ap); /* flush */
3752
3753 switch (qc->tf.protocol) {
3754 case ATA_PROT_ATAPI:
3755 ap->hsm_task_state = HSM_ST;
3756 break;
3757 case ATA_PROT_ATAPI_NODATA:
3758 ap->hsm_task_state = HSM_ST_LAST;
3759 break;
3760 case ATA_PROT_ATAPI_DMA:
3761 ap->hsm_task_state = HSM_ST_LAST;
3762 /* initiate bmdma */
3763 ap->ops->bmdma_start(qc);
3764 break;
3765 }
3766}
3767
3768/**
3769 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
3770 * @qc: Command on going
3771 * @bytes: number of bytes
3772 *
3773 * Transfer Transfer data from/to the ATAPI device.
3774 *
3775 * LOCKING:
3776 * Inherited from caller.
3777 *
3778 */
3779
3780static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
3781{
3782 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3783 struct scatterlist *sg = qc->__sg;
3784 struct ata_port *ap = qc->ap;
3785 struct page *page;
3786 unsigned char *buf;
3787 unsigned int offset, count;
3788
3789 if (qc->curbytes + bytes >= qc->nbytes)
3790 ap->hsm_task_state = HSM_ST_LAST;
3791
3792next_sg:
3793 if (unlikely(qc->cursg >= qc->n_elem)) {
3794 /*
3795 * The end of qc->sg is reached and the device expects
3796 * more data to transfer. In order not to overrun qc->sg
3797 * and fulfill length specified in the byte count register,
3798 * - for read case, discard trailing data from the device
3799 * - for write case, padding zero data to the device
3800 */
3801 u16 pad_buf[1] = { 0 };
3802 unsigned int words = bytes >> 1;
3803 unsigned int i;
3804
3805 if (words) /* warning if bytes > 1 */
3806 ata_dev_printk(qc->dev, KERN_WARNING,
3807 "%u bytes trailing data\n", bytes);
3808
3809 for (i = 0; i < words; i++)
3810 ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
3811
3812 ap->hsm_task_state = HSM_ST_LAST;
3813 return;
3814 }
3815
3816 sg = &qc->__sg[qc->cursg];
3817
3818 page = sg->page;
3819 offset = sg->offset + qc->cursg_ofs;
3820
3821 /* get the current page and offset */
3822 page = nth_page(page, (offset >> PAGE_SHIFT));
3823 offset %= PAGE_SIZE;
3824
3825 /* don't overrun current sg */
3826 count = min(sg->length - qc->cursg_ofs, bytes);
3827
3828 /* don't cross page boundaries */
3829 count = min(count, (unsigned int)PAGE_SIZE - offset);
3830
3831 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3832
3833 if (PageHighMem(page)) {
3834 unsigned long flags;
3835
3836 /* FIXME: use bounce buffer */
3837 local_irq_save(flags);
3838 buf = kmap_atomic(page, KM_IRQ0);
3839
3840 /* do the actual data transfer */
3841 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
3842
3843 kunmap_atomic(buf, KM_IRQ0);
3844 local_irq_restore(flags);
3845 } else {
3846 buf = page_address(page);
3847 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
3848 }
3849
3850 bytes -= count;
3851 qc->curbytes += count;
3852 qc->cursg_ofs += count;
3853
3854 if (qc->cursg_ofs == sg->length) {
3855 qc->cursg++;
3856 qc->cursg_ofs = 0;
3857 }
3858
3859 if (bytes)
3860 goto next_sg;
3861}
3862
3863/**
3864 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
3865 * @qc: Command on going
3866 *
3867 * Transfer Transfer data from/to the ATAPI device.
3868 *
3869 * LOCKING:
3870 * Inherited from caller.
3871 */
3872
3873static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3874{
3875 struct ata_port *ap = qc->ap;
3876 struct ata_device *dev = qc->dev;
3877 unsigned int ireason, bc_lo, bc_hi, bytes;
3878 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
3879
3880 /* Abuse qc->result_tf for temp storage of intermediate TF
3881 * here to save some kernel stack usage.
3882 * For normal completion, qc->result_tf is not relevant. For
3883 * error, qc->result_tf is later overwritten by ata_qc_complete().
3884 * So, the correctness of qc->result_tf is not affected.
3885 */
3886 ap->ops->tf_read(ap, &qc->result_tf);
3887 ireason = qc->result_tf.nsect;
3888 bc_lo = qc->result_tf.lbam;
3889 bc_hi = qc->result_tf.lbah;
3890 bytes = (bc_hi << 8) | bc_lo;
3891
3892 /* shall be cleared to zero, indicating xfer of data */
3893 if (ireason & (1 << 0))
3894 goto err_out;
3895
3896 /* make sure transfer direction matches expected */
3897 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
3898 if (do_write != i_write)
3899 goto err_out;
3900
3901 VPRINTK("ata%u: xfering %d bytes\n", ap->id, bytes);
3902
3903 __atapi_pio_bytes(qc, bytes);
3904
3905 return;
3906
3907err_out:
3908 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
3909 qc->err_mask |= AC_ERR_HSM;
3910 ap->hsm_task_state = HSM_ST_ERR;
3911}
3912
3913/**
3914 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
3915 * @ap: the target ata_port
3916 * @qc: qc on going
3917 *
3918 * RETURNS:
3919 * 1 if ok in workqueue, 0 otherwise.
3920 */
3921
3922static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
3923{
3924 if (qc->tf.flags & ATA_TFLAG_POLLING)
3925 return 1;
3926
3927 if (ap->hsm_task_state == HSM_ST_FIRST) {
3928 if (qc->tf.protocol == ATA_PROT_PIO &&
3929 (qc->tf.flags & ATA_TFLAG_WRITE))
3930 return 1;
3931
3932 if (is_atapi_taskfile(&qc->tf) &&
3933 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
3934 return 1;
3935 }
3936
3937 return 0;
3938}
3939
3940/**
3941 * ata_hsm_qc_complete - finish a qc running on standard HSM
3942 * @qc: Command to complete
3943 * @in_wq: 1 if called from workqueue, 0 otherwise
3944 *
3945 * Finish @qc which is running on standard HSM.
3946 *
3947 * LOCKING:
3948 * If @in_wq is zero, spin_lock_irqsave(host_set lock).
3949 * Otherwise, none on entry and grabs host lock.
3950 */
3951static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
3952{
3953 struct ata_port *ap = qc->ap;
3954 unsigned long flags;
3955
3956 if (ap->ops->error_handler) {
3957 if (in_wq) {
3958 spin_lock_irqsave(ap->lock, flags);
3959
3960 /* EH might have kicked in while host_set lock
3961 * is released.
3962 */
3963 qc = ata_qc_from_tag(ap, qc->tag);
3964 if (qc) {
3965 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
3966 ata_irq_on(ap);
3967 ata_qc_complete(qc);
3968 } else
3969 ata_port_freeze(ap);
3970 }
3971
3972 spin_unlock_irqrestore(ap->lock, flags);
3973 } else {
3974 if (likely(!(qc->err_mask & AC_ERR_HSM)))
3975 ata_qc_complete(qc);
3976 else
3977 ata_port_freeze(ap);
3978 }
3979 } else {
3980 if (in_wq) {
3981 spin_lock_irqsave(ap->lock, flags);
3982 ata_irq_on(ap);
3983 ata_qc_complete(qc);
3984 spin_unlock_irqrestore(ap->lock, flags);
3985 } else
3986 ata_qc_complete(qc);
3987 }
3988
3989 ata_altstatus(ap); /* flush */
3990}
3991
3992/**
3993 * ata_hsm_move - move the HSM to the next state.
3994 * @ap: the target ata_port
3995 * @qc: qc on going
3996 * @status: current device status
3997 * @in_wq: 1 if called from workqueue, 0 otherwise
3998 *
3999 * RETURNS:
4000 * 1 when poll next status needed, 0 otherwise.
4001 */
4002int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
4003 u8 status, int in_wq)
4004{
4005 unsigned long flags = 0;
4006 int poll_next;
4007
4008 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
4009
4010 /* Make sure ata_qc_issue_prot() does not throw things
4011 * like DMA polling into the workqueue. Notice that
4012 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
4013 */
4014 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
4015
4016fsm_start:
4017 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
4018 ap->id, qc->tf.protocol, ap->hsm_task_state, status);
4019
4020 switch (ap->hsm_task_state) {
4021 case HSM_ST_FIRST:
4022 /* Send first data block or PACKET CDB */
4023
4024 /* If polling, we will stay in the work queue after
4025 * sending the data. Otherwise, interrupt handler
4026 * takes over after sending the data.
4027 */
4028 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
4029
4030 /* check device status */
4031 if (unlikely((status & ATA_DRQ) == 0)) {
4032 /* handle BSY=0, DRQ=0 as error */
4033 if (likely(status & (ATA_ERR | ATA_DF)))
4034 /* device stops HSM for abort/error */
4035 qc->err_mask |= AC_ERR_DEV;
4036 else
4037 /* HSM violation. Let EH handle this */
4038 qc->err_mask |= AC_ERR_HSM;
4039
4040 ap->hsm_task_state = HSM_ST_ERR;
4041 goto fsm_start;
4042 }
4043
4044 /* Device should not ask for data transfer (DRQ=1)
4045 * when it finds something wrong.
4046 * We ignore DRQ here and stop the HSM by
4047 * changing hsm_task_state to HSM_ST_ERR and
4048 * let the EH abort the command or reset the device.
4049 */
4050 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4051 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4052 ap->id, status);
4053 qc->err_mask |= AC_ERR_HSM;
4054 ap->hsm_task_state = HSM_ST_ERR;
4055 goto fsm_start;
4056 }
4057
4058 /* Send the CDB (atapi) or the first data block (ata pio out).
4059 * During the state transition, interrupt handler shouldn't
4060 * be invoked before the data transfer is complete and
4061 * hsm_task_state is changed. Hence, the following locking.
4062 */
4063 if (in_wq)
4064 spin_lock_irqsave(ap->lock, flags);
4065
4066 if (qc->tf.protocol == ATA_PROT_PIO) {
4067 /* PIO data out protocol.
4068 * send first data block.
4069 */
4070
4071 /* ata_pio_sectors() might change the state
4072 * to HSM_ST_LAST. so, the state is changed here
4073 * before ata_pio_sectors().
4074 */
4075 ap->hsm_task_state = HSM_ST;
4076 ata_pio_sectors(qc);
4077 ata_altstatus(ap); /* flush */
4078 } else
4079 /* send CDB */
4080 atapi_send_cdb(ap, qc);
4081
4082 if (in_wq)
4083 spin_unlock_irqrestore(ap->lock, flags);
4084
4085 /* if polling, ata_pio_task() handles the rest.
4086 * otherwise, interrupt handler takes over from here.
4087 */
4088 break;
4089
4090 case HSM_ST:
4091 /* complete command or read/write the data register */
4092 if (qc->tf.protocol == ATA_PROT_ATAPI) {
4093 /* ATAPI PIO protocol */
4094 if ((status & ATA_DRQ) == 0) {
4095 /* No more data to transfer or device error.
4096 * Device error will be tagged in HSM_ST_LAST.
4097 */
4098 ap->hsm_task_state = HSM_ST_LAST;
4099 goto fsm_start;
4100 }
4101
4102 /* Device should not ask for data transfer (DRQ=1)
4103 * when it finds something wrong.
4104 * We ignore DRQ here and stop the HSM by
4105 * changing hsm_task_state to HSM_ST_ERR and
4106 * let the EH abort the command or reset the device.
4107 */
4108 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4109 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4110 ap->id, status);
4111 qc->err_mask |= AC_ERR_HSM;
4112 ap->hsm_task_state = HSM_ST_ERR;
4113 goto fsm_start;
4114 }
4115
4116 atapi_pio_bytes(qc);
4117
4118 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
4119 /* bad ireason reported by device */
4120 goto fsm_start;
4121
4122 } else {
4123 /* ATA PIO protocol */
4124 if (unlikely((status & ATA_DRQ) == 0)) {
4125 /* handle BSY=0, DRQ=0 as error */
4126 if (likely(status & (ATA_ERR | ATA_DF)))
4127 /* device stops HSM for abort/error */
4128 qc->err_mask |= AC_ERR_DEV;
4129 else
4130 /* HSM violation. Let EH handle this */
4131 qc->err_mask |= AC_ERR_HSM;
4132
4133 ap->hsm_task_state = HSM_ST_ERR;
4134 goto fsm_start;
4135 }
4136
4137 /* For PIO reads, some devices may ask for
4138 * data transfer (DRQ=1) alone with ERR=1.
4139 * We respect DRQ here and transfer one
4140 * block of junk data before changing the
4141 * hsm_task_state to HSM_ST_ERR.
4142 *
4143 * For PIO writes, ERR=1 DRQ=1 doesn't make
4144 * sense since the data block has been
4145 * transferred to the device.
4146 */
4147 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4148 /* data might be corrputed */
4149 qc->err_mask |= AC_ERR_DEV;
4150
4151 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
4152 ata_pio_sectors(qc);
4153 ata_altstatus(ap);
4154 status = ata_wait_idle(ap);
4155 }
4156
4157 if (status & (ATA_BUSY | ATA_DRQ))
4158 qc->err_mask |= AC_ERR_HSM;
4159
4160 /* ata_pio_sectors() might change the
4161 * state to HSM_ST_LAST. so, the state
4162 * is changed after ata_pio_sectors().
4163 */
4164 ap->hsm_task_state = HSM_ST_ERR;
4165 goto fsm_start;
4166 }
4167
4168 ata_pio_sectors(qc);
4169
4170 if (ap->hsm_task_state == HSM_ST_LAST &&
4171 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
4172 /* all data read */
4173 ata_altstatus(ap);
4174 status = ata_wait_idle(ap);
4175 goto fsm_start;
4176 }
4177 }
4178
4179 ata_altstatus(ap); /* flush */
4180 poll_next = 1;
4181 break;
4182
4183 case HSM_ST_LAST:
4184 if (unlikely(!ata_ok(status))) {
4185 qc->err_mask |= __ac_err_mask(status);
4186 ap->hsm_task_state = HSM_ST_ERR;
4187 goto fsm_start;
4188 }
4189
4190 /* no more data to transfer */
4191 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
4192 ap->id, qc->dev->devno, status);
4193
4194 WARN_ON(qc->err_mask);
4195
4196 ap->hsm_task_state = HSM_ST_IDLE;
4197
4198 /* complete taskfile transaction */
4199 ata_hsm_qc_complete(qc, in_wq);
4200
4201 poll_next = 0;
4202 break;
4203
4204 case HSM_ST_ERR:
4205 /* make sure qc->err_mask is available to
4206 * know what's wrong and recover
4207 */
4208 WARN_ON(qc->err_mask == 0);
4209
4210 ap->hsm_task_state = HSM_ST_IDLE;
4211
4212 /* complete taskfile transaction */
4213 ata_hsm_qc_complete(qc, in_wq);
4214
4215 poll_next = 0;
4216 break;
4217 default:
4218 poll_next = 0;
4219 BUG();
4220 }
4221
4222 return poll_next;
4223}
4224
4225static void ata_pio_task(void *_data)
4226{
4227 struct ata_queued_cmd *qc = _data;
4228 struct ata_port *ap = qc->ap;
4229 u8 status;
4230 int poll_next;
4231
4232fsm_start:
4233 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
4234
4235 /*
4236 * This is purely heuristic. This is a fast path.
4237 * Sometimes when we enter, BSY will be cleared in
4238 * a chk-status or two. If not, the drive is probably seeking
4239 * or something. Snooze for a couple msecs, then
4240 * chk-status again. If still busy, queue delayed work.
4241 */
4242 status = ata_busy_wait(ap, ATA_BUSY, 5);
4243 if (status & ATA_BUSY) {
4244 msleep(2);
4245 status = ata_busy_wait(ap, ATA_BUSY, 10);
4246 if (status & ATA_BUSY) {
4247 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
4248 return;
4249 }
4250 }
4251
4252 /* move the HSM */
4253 poll_next = ata_hsm_move(ap, qc, status, 1);
4254
4255 /* another command or interrupt handler
4256 * may be running at this point.
4257 */
4258 if (poll_next)
4259 goto fsm_start;
4260}
4261
4262/**
4263 * ata_qc_new - Request an available ATA command, for queueing
4264 * @ap: Port associated with device @dev
4265 * @dev: Device from whom we request an available command structure
4266 *
4267 * LOCKING:
4268 * None.
4269 */
4270
4271static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4272{
4273 struct ata_queued_cmd *qc = NULL;
4274 unsigned int i;
4275
4276 /* no command while frozen */
4277 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4278 return NULL;
4279
4280 /* the last tag is reserved for internal command. */
4281 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
4282 if (!test_and_set_bit(i, &ap->qc_allocated)) {
4283 qc = __ata_qc_from_tag(ap, i);
4284 break;
4285 }
4286
4287 if (qc)
4288 qc->tag = i;
4289
4290 return qc;
4291}
4292
4293/**
4294 * ata_qc_new_init - Request an available ATA command, and initialize it
4295 * @dev: Device from whom we request an available command structure
4296 *
4297 * LOCKING:
4298 * None.
4299 */
4300
4301struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
4302{
4303 struct ata_port *ap = dev->ap;
4304 struct ata_queued_cmd *qc;
4305
4306 qc = ata_qc_new(ap);
4307 if (qc) {
4308 qc->scsicmd = NULL;
4309 qc->ap = ap;
4310 qc->dev = dev;
4311
4312 ata_qc_reinit(qc);
4313 }
4314
4315 return qc;
4316}
4317
4318/**
4319 * ata_qc_free - free unused ata_queued_cmd
4320 * @qc: Command to complete
4321 *
4322 * Designed to free unused ata_queued_cmd object
4323 * in case something prevents using it.
4324 *
4325 * LOCKING:
4326 * spin_lock_irqsave(host_set lock)
4327 */
4328void ata_qc_free(struct ata_queued_cmd *qc)
4329{
4330 struct ata_port *ap = qc->ap;
4331 unsigned int tag;
4332
4333 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4334
4335 qc->flags = 0;
4336 tag = qc->tag;
4337 if (likely(ata_tag_valid(tag))) {
4338 qc->tag = ATA_TAG_POISON;
4339 clear_bit(tag, &ap->qc_allocated);
4340 }
4341}
4342
4343void __ata_qc_complete(struct ata_queued_cmd *qc)
4344{
4345 struct ata_port *ap = qc->ap;
4346
4347 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4348 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
4349
4350 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4351 ata_sg_clean(qc);
4352
4353 /* command should be marked inactive atomically with qc completion */
4354 if (qc->tf.protocol == ATA_PROT_NCQ)
4355 ap->sactive &= ~(1 << qc->tag);
4356 else
4357 ap->active_tag = ATA_TAG_POISON;
4358
4359 /* atapi: mark qc as inactive to prevent the interrupt handler
4360 * from completing the command twice later, before the error handler
4361 * is called. (when rc != 0 and atapi request sense is needed)
4362 */
4363 qc->flags &= ~ATA_QCFLAG_ACTIVE;
4364 ap->qc_active &= ~(1 << qc->tag);
4365
4366 /* call completion callback */
4367 qc->complete_fn(qc);
4368}
4369
4370/**
4371 * ata_qc_complete - Complete an active ATA command
4372 * @qc: Command to complete
4373 * @err_mask: ATA Status register contents
4374 *
4375 * Indicate to the mid and upper layers that an ATA
4376 * command has completed, with either an ok or not-ok status.
4377 *
4378 * LOCKING:
4379 * spin_lock_irqsave(host_set lock)
4380 */
4381void ata_qc_complete(struct ata_queued_cmd *qc)
4382{
4383 struct ata_port *ap = qc->ap;
4384
4385 /* XXX: New EH and old EH use different mechanisms to
4386 * synchronize EH with regular execution path.
4387 *
4388 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4389 * Normal execution path is responsible for not accessing a
4390 * failed qc. libata core enforces the rule by returning NULL
4391 * from ata_qc_from_tag() for failed qcs.
4392 *
4393 * Old EH depends on ata_qc_complete() nullifying completion
4394 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4395 * not synchronize with interrupt handler. Only PIO task is
4396 * taken care of.
4397 */
4398 if (ap->ops->error_handler) {
4399 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
4400
4401 if (unlikely(qc->err_mask))
4402 qc->flags |= ATA_QCFLAG_FAILED;
4403
4404 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4405 if (!ata_tag_internal(qc->tag)) {
4406 /* always fill result TF for failed qc */
4407 ap->ops->tf_read(ap, &qc->result_tf);
4408 ata_qc_schedule_eh(qc);
4409 return;
4410 }
4411 }
4412
4413 /* read result TF if requested */
4414 if (qc->flags & ATA_QCFLAG_RESULT_TF)
4415 ap->ops->tf_read(ap, &qc->result_tf);
4416
4417 __ata_qc_complete(qc);
4418 } else {
4419 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4420 return;
4421
4422 /* read result TF if failed or requested */
4423 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
4424 ap->ops->tf_read(ap, &qc->result_tf);
4425
4426 __ata_qc_complete(qc);
4427 }
4428}
4429
4430/**
4431 * ata_qc_complete_multiple - Complete multiple qcs successfully
4432 * @ap: port in question
4433 * @qc_active: new qc_active mask
4434 * @finish_qc: LLDD callback invoked before completing a qc
4435 *
4436 * Complete in-flight commands. This functions is meant to be
4437 * called from low-level driver's interrupt routine to complete
4438 * requests normally. ap->qc_active and @qc_active is compared
4439 * and commands are completed accordingly.
4440 *
4441 * LOCKING:
4442 * spin_lock_irqsave(host_set lock)
4443 *
4444 * RETURNS:
4445 * Number of completed commands on success, -errno otherwise.
4446 */
4447int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
4448 void (*finish_qc)(struct ata_queued_cmd *))
4449{
4450 int nr_done = 0;
4451 u32 done_mask;
4452 int i;
4453
4454 done_mask = ap->qc_active ^ qc_active;
4455
4456 if (unlikely(done_mask & qc_active)) {
4457 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
4458 "(%08x->%08x)\n", ap->qc_active, qc_active);
4459 return -EINVAL;
4460 }
4461
4462 for (i = 0; i < ATA_MAX_QUEUE; i++) {
4463 struct ata_queued_cmd *qc;
4464
4465 if (!(done_mask & (1 << i)))
4466 continue;
4467
4468 if ((qc = ata_qc_from_tag(ap, i))) {
4469 if (finish_qc)
4470 finish_qc(qc);
4471 ata_qc_complete(qc);
4472 nr_done++;
4473 }
4474 }
4475
4476 return nr_done;
4477}
4478
4479static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
4480{
4481 struct ata_port *ap = qc->ap;
4482
4483 switch (qc->tf.protocol) {
4484 case ATA_PROT_NCQ:
4485 case ATA_PROT_DMA:
4486 case ATA_PROT_ATAPI_DMA:
4487 return 1;
4488
4489 case ATA_PROT_ATAPI:
4490 case ATA_PROT_PIO:
4491 if (ap->flags & ATA_FLAG_PIO_DMA)
4492 return 1;
4493
4494 /* fall through */
4495
4496 default:
4497 return 0;
4498 }
4499
4500 /* never reached */
4501}
4502
4503/**
4504 * ata_qc_issue - issue taskfile to device
4505 * @qc: command to issue to device
4506 *
4507 * Prepare an ATA command to submission to device.
4508 * This includes mapping the data into a DMA-able
4509 * area, filling in the S/G table, and finally
4510 * writing the taskfile to hardware, starting the command.
4511 *
4512 * LOCKING:
4513 * spin_lock_irqsave(host_set lock)
4514 */
4515void ata_qc_issue(struct ata_queued_cmd *qc)
4516{
4517 struct ata_port *ap = qc->ap;
4518
4519 /* Make sure only one non-NCQ command is outstanding. The
4520 * check is skipped for old EH because it reuses active qc to
4521 * request ATAPI sense.
4522 */
4523 WARN_ON(ap->ops->error_handler && ata_tag_valid(ap->active_tag));
4524
4525 if (qc->tf.protocol == ATA_PROT_NCQ) {
4526 WARN_ON(ap->sactive & (1 << qc->tag));
4527 ap->sactive |= 1 << qc->tag;
4528 } else {
4529 WARN_ON(ap->sactive);
4530 ap->active_tag = qc->tag;
4531 }
4532
4533 qc->flags |= ATA_QCFLAG_ACTIVE;
4534 ap->qc_active |= 1 << qc->tag;
4535
4536 if (ata_should_dma_map(qc)) {
4537 if (qc->flags & ATA_QCFLAG_SG) {
4538 if (ata_sg_setup(qc))
4539 goto sg_err;
4540 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
4541 if (ata_sg_setup_one(qc))
4542 goto sg_err;
4543 }
4544 } else {
4545 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4546 }
4547
4548 ap->ops->qc_prep(qc);
4549
4550 qc->err_mask |= ap->ops->qc_issue(qc);
4551 if (unlikely(qc->err_mask))
4552 goto err;
4553 return;
4554
4555sg_err:
4556 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4557 qc->err_mask |= AC_ERR_SYSTEM;
4558err:
4559 ata_qc_complete(qc);
4560}
4561
4562/**
4563 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
4564 * @qc: command to issue to device
4565 *
4566 * Using various libata functions and hooks, this function
4567 * starts an ATA command. ATA commands are grouped into
4568 * classes called "protocols", and issuing each type of protocol
4569 * is slightly different.
4570 *
4571 * May be used as the qc_issue() entry in ata_port_operations.
4572 *
4573 * LOCKING:
4574 * spin_lock_irqsave(host_set lock)
4575 *
4576 * RETURNS:
4577 * Zero on success, AC_ERR_* mask on failure
4578 */
4579
4580unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
4581{
4582 struct ata_port *ap = qc->ap;
4583
4584 /* Use polling pio if the LLD doesn't handle
4585 * interrupt driven pio and atapi CDB interrupt.
4586 */
4587 if (ap->flags & ATA_FLAG_PIO_POLLING) {
4588 switch (qc->tf.protocol) {
4589 case ATA_PROT_PIO:
4590 case ATA_PROT_ATAPI:
4591 case ATA_PROT_ATAPI_NODATA:
4592 qc->tf.flags |= ATA_TFLAG_POLLING;
4593 break;
4594 case ATA_PROT_ATAPI_DMA:
4595 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
4596 /* see ata_dma_blacklisted() */
4597 BUG();
4598 break;
4599 default:
4600 break;
4601 }
4602 }
4603
4604 /* select the device */
4605 ata_dev_select(ap, qc->dev->devno, 1, 0);
4606
4607 /* start the command */
4608 switch (qc->tf.protocol) {
4609 case ATA_PROT_NODATA:
4610 if (qc->tf.flags & ATA_TFLAG_POLLING)
4611 ata_qc_set_polling(qc);
4612
4613 ata_tf_to_host(ap, &qc->tf);
4614 ap->hsm_task_state = HSM_ST_LAST;
4615
4616 if (qc->tf.flags & ATA_TFLAG_POLLING)
4617 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4618
4619 break;
4620
4621 case ATA_PROT_DMA:
4622 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
4623
4624 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4625 ap->ops->bmdma_setup(qc); /* set up bmdma */
4626 ap->ops->bmdma_start(qc); /* initiate bmdma */
4627 ap->hsm_task_state = HSM_ST_LAST;
4628 break;
4629
4630 case ATA_PROT_PIO:
4631 if (qc->tf.flags & ATA_TFLAG_POLLING)
4632 ata_qc_set_polling(qc);
4633
4634 ata_tf_to_host(ap, &qc->tf);
4635
4636 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4637 /* PIO data out protocol */
4638 ap->hsm_task_state = HSM_ST_FIRST;
4639 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4640
4641 /* always send first data block using
4642 * the ata_pio_task() codepath.
4643 */
4644 } else {
4645 /* PIO data in protocol */
4646 ap->hsm_task_state = HSM_ST;
4647
4648 if (qc->tf.flags & ATA_TFLAG_POLLING)
4649 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4650
4651 /* if polling, ata_pio_task() handles the rest.
4652 * otherwise, interrupt handler takes over from here.
4653 */
4654 }
4655
4656 break;
4657
4658 case ATA_PROT_ATAPI:
4659 case ATA_PROT_ATAPI_NODATA:
4660 if (qc->tf.flags & ATA_TFLAG_POLLING)
4661 ata_qc_set_polling(qc);
4662
4663 ata_tf_to_host(ap, &qc->tf);
4664
4665 ap->hsm_task_state = HSM_ST_FIRST;
4666
4667 /* send cdb by polling if no cdb interrupt */
4668 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
4669 (qc->tf.flags & ATA_TFLAG_POLLING))
4670 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4671 break;
4672
4673 case ATA_PROT_ATAPI_DMA:
4674 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
4675
4676 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4677 ap->ops->bmdma_setup(qc); /* set up bmdma */
4678 ap->hsm_task_state = HSM_ST_FIRST;
4679
4680 /* send cdb by polling if no cdb interrupt */
4681 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4682 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4683 break;
4684
4685 default:
4686 WARN_ON(1);
4687 return AC_ERR_SYSTEM;
4688 }
4689
4690 return 0;
4691}
4692
4693/**
4694 * ata_host_intr - Handle host interrupt for given (port, task)
4695 * @ap: Port on which interrupt arrived (possibly...)
4696 * @qc: Taskfile currently active in engine
4697 *
4698 * Handle host interrupt for given queued command. Currently,
4699 * only DMA interrupts are handled. All other commands are
4700 * handled via polling with interrupts disabled (nIEN bit).
4701 *
4702 * LOCKING:
4703 * spin_lock_irqsave(host_set lock)
4704 *
4705 * RETURNS:
4706 * One if interrupt was handled, zero if not (shared irq).
4707 */
4708
4709inline unsigned int ata_host_intr (struct ata_port *ap,
4710 struct ata_queued_cmd *qc)
4711{
4712 u8 status, host_stat = 0;
4713
4714 VPRINTK("ata%u: protocol %d task_state %d\n",
4715 ap->id, qc->tf.protocol, ap->hsm_task_state);
4716
4717 /* Check whether we are expecting interrupt in this state */
4718 switch (ap->hsm_task_state) {
4719 case HSM_ST_FIRST:
4720 /* Some pre-ATAPI-4 devices assert INTRQ
4721 * at this state when ready to receive CDB.
4722 */
4723
4724 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
4725 * The flag was turned on only for atapi devices.
4726 * No need to check is_atapi_taskfile(&qc->tf) again.
4727 */
4728 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4729 goto idle_irq;
4730 break;
4731 case HSM_ST_LAST:
4732 if (qc->tf.protocol == ATA_PROT_DMA ||
4733 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
4734 /* check status of DMA engine */
4735 host_stat = ap->ops->bmdma_status(ap);
4736 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
4737
4738 /* if it's not our irq... */
4739 if (!(host_stat & ATA_DMA_INTR))
4740 goto idle_irq;
4741
4742 /* before we do anything else, clear DMA-Start bit */
4743 ap->ops->bmdma_stop(qc);
4744
4745 if (unlikely(host_stat & ATA_DMA_ERR)) {
4746 /* error when transfering data to/from memory */
4747 qc->err_mask |= AC_ERR_HOST_BUS;
4748 ap->hsm_task_state = HSM_ST_ERR;
4749 }
4750 }
4751 break;
4752 case HSM_ST:
4753 break;
4754 default:
4755 goto idle_irq;
4756 }
4757
4758 /* check altstatus */
4759 status = ata_altstatus(ap);
4760 if (status & ATA_BUSY)
4761 goto idle_irq;
4762
4763 /* check main status, clearing INTRQ */
4764 status = ata_chk_status(ap);
4765 if (unlikely(status & ATA_BUSY))
4766 goto idle_irq;
4767
4768 /* ack bmdma irq events */
4769 ap->ops->irq_clear(ap);
4770
4771 ata_hsm_move(ap, qc, status, 0);
4772 return 1; /* irq handled */
4773
4774idle_irq:
4775 ap->stats.idle_irq++;
4776
4777#ifdef ATA_IRQ_TRAP
4778 if ((ap->stats.idle_irq % 1000) == 0) {
4779 ata_irq_ack(ap, 0); /* debug trap */
4780 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
4781 return 1;
4782 }
4783#endif
4784 return 0; /* irq not handled */
4785}
4786
4787/**
4788 * ata_interrupt - Default ATA host interrupt handler
4789 * @irq: irq line (unused)
4790 * @dev_instance: pointer to our ata_host_set information structure
4791 * @regs: unused
4792 *
4793 * Default interrupt handler for PCI IDE devices. Calls
4794 * ata_host_intr() for each port that is not disabled.
4795 *
4796 * LOCKING:
4797 * Obtains host_set lock during operation.
4798 *
4799 * RETURNS:
4800 * IRQ_NONE or IRQ_HANDLED.
4801 */
4802
4803irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
4804{
4805 struct ata_host_set *host_set = dev_instance;
4806 unsigned int i;
4807 unsigned int handled = 0;
4808 unsigned long flags;
4809
4810 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
4811 spin_lock_irqsave(&host_set->lock, flags);
4812
4813 for (i = 0; i < host_set->n_ports; i++) {
4814 struct ata_port *ap;
4815
4816 ap = host_set->ports[i];
4817 if (ap &&
4818 !(ap->flags & ATA_FLAG_DISABLED)) {
4819 struct ata_queued_cmd *qc;
4820
4821 qc = ata_qc_from_tag(ap, ap->active_tag);
4822 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
4823 (qc->flags & ATA_QCFLAG_ACTIVE))
4824 handled |= ata_host_intr(ap, qc);
4825 }
4826 }
4827
4828 spin_unlock_irqrestore(&host_set->lock, flags);
4829
4830 return IRQ_RETVAL(handled);
4831}
4832
4833/**
4834 * sata_scr_valid - test whether SCRs are accessible
4835 * @ap: ATA port to test SCR accessibility for
4836 *
4837 * Test whether SCRs are accessible for @ap.
4838 *
4839 * LOCKING:
4840 * None.
4841 *
4842 * RETURNS:
4843 * 1 if SCRs are accessible, 0 otherwise.
4844 */
4845int sata_scr_valid(struct ata_port *ap)
4846{
4847 return ap->cbl == ATA_CBL_SATA && ap->ops->scr_read;
4848}
4849
4850/**
4851 * sata_scr_read - read SCR register of the specified port
4852 * @ap: ATA port to read SCR for
4853 * @reg: SCR to read
4854 * @val: Place to store read value
4855 *
4856 * Read SCR register @reg of @ap into *@val. This function is
4857 * guaranteed to succeed if the cable type of the port is SATA
4858 * and the port implements ->scr_read.
4859 *
4860 * LOCKING:
4861 * None.
4862 *
4863 * RETURNS:
4864 * 0 on success, negative errno on failure.
4865 */
4866int sata_scr_read(struct ata_port *ap, int reg, u32 *val)
4867{
4868 if (sata_scr_valid(ap)) {
4869 *val = ap->ops->scr_read(ap, reg);
4870 return 0;
4871 }
4872 return -EOPNOTSUPP;
4873}
4874
4875/**
4876 * sata_scr_write - write SCR register of the specified port
4877 * @ap: ATA port to write SCR for
4878 * @reg: SCR to write
4879 * @val: value to write
4880 *
4881 * Write @val to SCR register @reg of @ap. This function is
4882 * guaranteed to succeed if the cable type of the port is SATA
4883 * and the port implements ->scr_read.
4884 *
4885 * LOCKING:
4886 * None.
4887 *
4888 * RETURNS:
4889 * 0 on success, negative errno on failure.
4890 */
4891int sata_scr_write(struct ata_port *ap, int reg, u32 val)
4892{
4893 if (sata_scr_valid(ap)) {
4894 ap->ops->scr_write(ap, reg, val);
4895 return 0;
4896 }
4897 return -EOPNOTSUPP;
4898}
4899
4900/**
4901 * sata_scr_write_flush - write SCR register of the specified port and flush
4902 * @ap: ATA port to write SCR for
4903 * @reg: SCR to write
4904 * @val: value to write
4905 *
4906 * This function is identical to sata_scr_write() except that this
4907 * function performs flush after writing to the register.
4908 *
4909 * LOCKING:
4910 * None.
4911 *
4912 * RETURNS:
4913 * 0 on success, negative errno on failure.
4914 */
4915int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val)
4916{
4917 if (sata_scr_valid(ap)) {
4918 ap->ops->scr_write(ap, reg, val);
4919 ap->ops->scr_read(ap, reg);
4920 return 0;
4921 }
4922 return -EOPNOTSUPP;
4923}
4924
4925/**
4926 * ata_port_online - test whether the given port is online
4927 * @ap: ATA port to test
4928 *
4929 * Test whether @ap is online. Note that this function returns 0
4930 * if online status of @ap cannot be obtained, so
4931 * ata_port_online(ap) != !ata_port_offline(ap).
4932 *
4933 * LOCKING:
4934 * None.
4935 *
4936 * RETURNS:
4937 * 1 if the port online status is available and online.
4938 */
4939int ata_port_online(struct ata_port *ap)
4940{
4941 u32 sstatus;
4942
4943 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) == 0x3)
4944 return 1;
4945 return 0;
4946}
4947
4948/**
4949 * ata_port_offline - test whether the given port is offline
4950 * @ap: ATA port to test
4951 *
4952 * Test whether @ap is offline. Note that this function returns
4953 * 0 if offline status of @ap cannot be obtained, so
4954 * ata_port_online(ap) != !ata_port_offline(ap).
4955 *
4956 * LOCKING:
4957 * None.
4958 *
4959 * RETURNS:
4960 * 1 if the port offline status is available and offline.
4961 */
4962int ata_port_offline(struct ata_port *ap)
4963{
4964 u32 sstatus;
4965
4966 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) != 0x3)
4967 return 1;
4968 return 0;
4969}
4970
4971int ata_flush_cache(struct ata_device *dev)
4972{
4973 unsigned int err_mask;
4974 u8 cmd;
4975
4976 if (!ata_try_flush_cache(dev))
4977 return 0;
4978
4979 if (ata_id_has_flush_ext(dev->id))
4980 cmd = ATA_CMD_FLUSH_EXT;
4981 else
4982 cmd = ATA_CMD_FLUSH;
4983
4984 err_mask = ata_do_simple_cmd(dev, cmd);
4985 if (err_mask) {
4986 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
4987 return -EIO;
4988 }
4989
4990 return 0;
4991}
4992
4993static int ata_host_set_request_pm(struct ata_host_set *host_set,
4994 pm_message_t mesg, unsigned int action,
4995 unsigned int ehi_flags, int wait)
4996{
4997 unsigned long flags;
4998 int i, rc;
4999
5000 for (i = 0; i < host_set->n_ports; i++) {
5001 struct ata_port *ap = host_set->ports[i];
5002
5003 /* Previous resume operation might still be in
5004 * progress. Wait for PM_PENDING to clear.
5005 */
5006 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5007 ata_port_wait_eh(ap);
5008 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5009 }
5010
5011 /* request PM ops to EH */
5012 spin_lock_irqsave(ap->lock, flags);
5013
5014 ap->pm_mesg = mesg;
5015 if (wait) {
5016 rc = 0;
5017 ap->pm_result = &rc;
5018 }
5019
5020 ap->pflags |= ATA_PFLAG_PM_PENDING;
5021 ap->eh_info.action |= action;
5022 ap->eh_info.flags |= ehi_flags;
5023
5024 ata_port_schedule_eh(ap);
5025
5026 spin_unlock_irqrestore(ap->lock, flags);
5027
5028 /* wait and check result */
5029 if (wait) {
5030 ata_port_wait_eh(ap);
5031 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5032 if (rc)
5033 return rc;
5034 }
5035 }
5036
5037 return 0;
5038}
5039
5040/**
5041 * ata_host_set_suspend - suspend host_set
5042 * @host_set: host_set to suspend
5043 * @mesg: PM message
5044 *
5045 * Suspend @host_set. Actual operation is performed by EH. This
5046 * function requests EH to perform PM operations and waits for EH
5047 * to finish.
5048 *
5049 * LOCKING:
5050 * Kernel thread context (may sleep).
5051 *
5052 * RETURNS:
5053 * 0 on success, -errno on failure.
5054 */
5055int ata_host_set_suspend(struct ata_host_set *host_set, pm_message_t mesg)
5056{
5057 int i, j, rc;
5058
5059 rc = ata_host_set_request_pm(host_set, mesg, 0, ATA_EHI_QUIET, 1);
5060 if (rc)
5061 goto fail;
5062
5063 /* EH is quiescent now. Fail if we have any ready device.
5064 * This happens if hotplug occurs between completion of device
5065 * suspension and here.
5066 */
5067 for (i = 0; i < host_set->n_ports; i++) {
5068 struct ata_port *ap = host_set->ports[i];
5069
5070 for (j = 0; j < ATA_MAX_DEVICES; j++) {
5071 struct ata_device *dev = &ap->device[j];
5072
5073 if (ata_dev_ready(dev)) {
5074 ata_port_printk(ap, KERN_WARNING,
5075 "suspend failed, device %d "
5076 "still active\n", dev->devno);
5077 rc = -EBUSY;
5078 goto fail;
5079 }
5080 }
5081 }
5082
5083 host_set->dev->power.power_state = mesg;
5084 return 0;
5085
5086 fail:
5087 ata_host_set_resume(host_set);
5088 return rc;
5089}
5090
5091/**
5092 * ata_host_set_resume - resume host_set
5093 * @host_set: host_set to resume
5094 *
5095 * Resume @host_set. Actual operation is performed by EH. This
5096 * function requests EH to perform PM operations and returns.
5097 * Note that all resume operations are performed parallely.
5098 *
5099 * LOCKING:
5100 * Kernel thread context (may sleep).
5101 */
5102void ata_host_set_resume(struct ata_host_set *host_set)
5103{
5104 ata_host_set_request_pm(host_set, PMSG_ON, ATA_EH_SOFTRESET,
5105 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5106 host_set->dev->power.power_state = PMSG_ON;
5107}
5108
5109/**
5110 * ata_port_start - Set port up for dma.
5111 * @ap: Port to initialize
5112 *
5113 * Called just after data structures for each port are
5114 * initialized. Allocates space for PRD table.
5115 *
5116 * May be used as the port_start() entry in ata_port_operations.
5117 *
5118 * LOCKING:
5119 * Inherited from caller.
5120 */
5121
5122int ata_port_start (struct ata_port *ap)
5123{
5124 struct device *dev = ap->dev;
5125 int rc;
5126
5127 ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
5128 if (!ap->prd)
5129 return -ENOMEM;
5130
5131 rc = ata_pad_alloc(ap, dev);
5132 if (rc) {
5133 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
5134 return rc;
5135 }
5136
5137 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
5138
5139 return 0;
5140}
5141
5142
5143/**
5144 * ata_port_stop - Undo ata_port_start()
5145 * @ap: Port to shut down
5146 *
5147 * Frees the PRD table.
5148 *
5149 * May be used as the port_stop() entry in ata_port_operations.
5150 *
5151 * LOCKING:
5152 * Inherited from caller.
5153 */
5154
5155void ata_port_stop (struct ata_port *ap)
5156{
5157 struct device *dev = ap->dev;
5158
5159 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
5160 ata_pad_free(ap, dev);
5161}
5162
5163void ata_host_stop (struct ata_host_set *host_set)
5164{
5165 if (host_set->mmio_base)
5166 iounmap(host_set->mmio_base);
5167}
5168
5169/**
5170 * ata_dev_init - Initialize an ata_device structure
5171 * @dev: Device structure to initialize
5172 *
5173 * Initialize @dev in preparation for probing.
5174 *
5175 * LOCKING:
5176 * Inherited from caller.
5177 */
5178void ata_dev_init(struct ata_device *dev)
5179{
5180 struct ata_port *ap = dev->ap;
5181 unsigned long flags;
5182
5183 /* SATA spd limit is bound to the first device */
5184 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5185
5186 /* High bits of dev->flags are used to record warm plug
5187 * requests which occur asynchronously. Synchronize using
5188 * host_set lock.
5189 */
5190 spin_lock_irqsave(ap->lock, flags);
5191 dev->flags &= ~ATA_DFLAG_INIT_MASK;
5192 spin_unlock_irqrestore(ap->lock, flags);
5193
5194 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
5195 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
5196 dev->pio_mask = UINT_MAX;
5197 dev->mwdma_mask = UINT_MAX;
5198 dev->udma_mask = UINT_MAX;
5199}
5200
5201/**
5202 * ata_port_init - Initialize an ata_port structure
5203 * @ap: Structure to initialize
5204 * @host_set: Collection of hosts to which @ap belongs
5205 * @ent: Probe information provided by low-level driver
5206 * @port_no: Port number associated with this ata_port
5207 *
5208 * Initialize a new ata_port structure.
5209 *
5210 * LOCKING:
5211 * Inherited from caller.
5212 */
5213void ata_port_init(struct ata_port *ap, struct ata_host_set *host_set,
5214 const struct ata_probe_ent *ent, unsigned int port_no)
5215{
5216 unsigned int i;
5217
5218 ap->lock = &host_set->lock;
5219 ap->flags = ATA_FLAG_DISABLED;
5220 ap->id = ata_unique_id++;
5221 ap->ctl = ATA_DEVCTL_OBS;
5222 ap->host_set = host_set;
5223 ap->dev = ent->dev;
5224 ap->port_no = port_no;
5225 ap->pio_mask = ent->pio_mask;
5226 ap->mwdma_mask = ent->mwdma_mask;
5227 ap->udma_mask = ent->udma_mask;
5228 ap->flags |= ent->host_flags;
5229 ap->ops = ent->port_ops;
5230 ap->hw_sata_spd_limit = UINT_MAX;
5231 ap->active_tag = ATA_TAG_POISON;
5232 ap->last_ctl = 0xFF;
5233
5234#if defined(ATA_VERBOSE_DEBUG)
5235 /* turn on all debugging levels */
5236 ap->msg_enable = 0x00FF;
5237#elif defined(ATA_DEBUG)
5238 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5239#else
5240 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5241#endif
5242
5243 INIT_WORK(&ap->port_task, NULL, NULL);
5244 INIT_WORK(&ap->hotplug_task, ata_scsi_hotplug, ap);
5245 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan, ap);
5246 INIT_LIST_HEAD(&ap->eh_done_q);
5247 init_waitqueue_head(&ap->eh_wait_q);
5248
5249 /* set cable type */
5250 ap->cbl = ATA_CBL_NONE;
5251 if (ap->flags & ATA_FLAG_SATA)
5252 ap->cbl = ATA_CBL_SATA;
5253
5254 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5255 struct ata_device *dev = &ap->device[i];
5256 dev->ap = ap;
5257 dev->devno = i;
5258 ata_dev_init(dev);
5259 }
5260
5261#ifdef ATA_IRQ_TRAP
5262 ap->stats.unhandled_irq = 1;
5263 ap->stats.idle_irq = 1;
5264#endif
5265
5266 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
5267}
5268
5269/**
5270 * ata_port_init_shost - Initialize SCSI host associated with ATA port
5271 * @ap: ATA port to initialize SCSI host for
5272 * @shost: SCSI host associated with @ap
5273 *
5274 * Initialize SCSI host @shost associated with ATA port @ap.
5275 *
5276 * LOCKING:
5277 * Inherited from caller.
5278 */
5279static void ata_port_init_shost(struct ata_port *ap, struct Scsi_Host *shost)
5280{
5281 ap->host = shost;
5282
5283 shost->unique_id = ap->id;
5284 shost->max_id = 16;
5285 shost->max_lun = 1;
5286 shost->max_channel = 1;
5287 shost->max_cmd_len = 12;
5288}
5289
5290/**
5291 * ata_port_add - Attach low-level ATA driver to system
5292 * @ent: Information provided by low-level driver
5293 * @host_set: Collections of ports to which we add
5294 * @port_no: Port number associated with this host
5295 *
5296 * Attach low-level ATA driver to system.
5297 *
5298 * LOCKING:
5299 * PCI/etc. bus probe sem.
5300 *
5301 * RETURNS:
5302 * New ata_port on success, for NULL on error.
5303 */
5304static struct ata_port * ata_port_add(const struct ata_probe_ent *ent,
5305 struct ata_host_set *host_set,
5306 unsigned int port_no)
5307{
5308 struct Scsi_Host *shost;
5309 struct ata_port *ap;
5310
5311 DPRINTK("ENTER\n");
5312
5313 if (!ent->port_ops->error_handler &&
5314 !(ent->host_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) {
5315 printk(KERN_ERR "ata%u: no reset mechanism available\n",
5316 port_no);
5317 return NULL;
5318 }
5319
5320 shost = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
5321 if (!shost)
5322 return NULL;
5323
5324 shost->transportt = &ata_scsi_transport_template;
5325
5326 ap = ata_shost_to_port(shost);
5327
5328 ata_port_init(ap, host_set, ent, port_no);
5329 ata_port_init_shost(ap, shost);
5330
5331 return ap;
5332}
5333
5334/**
5335 * ata_sas_host_init - Initialize a host_set struct
5336 * @host_set: host_set to initialize
5337 * @dev: device host_set is attached to
5338 * @flags: host_set flags
5339 * @ops: port_ops
5340 *
5341 * LOCKING:
5342 * PCI/etc. bus probe sem.
5343 *
5344 */
5345
5346void ata_host_set_init(struct ata_host_set *host_set,
5347 struct device *dev, unsigned long flags,
5348 const struct ata_port_operations *ops)
5349{
5350 spin_lock_init(&host_set->lock);
5351 host_set->dev = dev;
5352 host_set->flags = flags;
5353 host_set->ops = ops;
5354}
5355
5356/**
5357 * ata_device_add - Register hardware device with ATA and SCSI layers
5358 * @ent: Probe information describing hardware device to be registered
5359 *
5360 * This function processes the information provided in the probe
5361 * information struct @ent, allocates the necessary ATA and SCSI
5362 * host information structures, initializes them, and registers
5363 * everything with requisite kernel subsystems.
5364 *
5365 * This function requests irqs, probes the ATA bus, and probes
5366 * the SCSI bus.
5367 *
5368 * LOCKING:
5369 * PCI/etc. bus probe sem.
5370 *
5371 * RETURNS:
5372 * Number of ports registered. Zero on error (no ports registered).
5373 */
5374int ata_device_add(const struct ata_probe_ent *ent)
5375{
5376 unsigned int i;
5377 struct device *dev = ent->dev;
5378 struct ata_host_set *host_set;
5379 int rc;
5380
5381 DPRINTK("ENTER\n");
5382 /* alloc a container for our list of ATA ports (buses) */
5383 host_set = kzalloc(sizeof(struct ata_host_set) +
5384 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
5385 if (!host_set)
5386 return 0;
5387
5388 ata_host_set_init(host_set, dev, ent->host_set_flags, ent->port_ops);
5389 host_set->n_ports = ent->n_ports;
5390 host_set->irq = ent->irq;
5391 host_set->irq2 = ent->irq2;
5392 host_set->mmio_base = ent->mmio_base;
5393 host_set->private_data = ent->private_data;
5394
5395 /* register each port bound to this device */
5396 for (i = 0; i < host_set->n_ports; i++) {
5397 struct ata_port *ap;
5398 unsigned long xfer_mode_mask;
5399 int irq_line = ent->irq;
5400
5401 ap = ata_port_add(ent, host_set, i);
5402 if (!ap)
5403 goto err_out;
5404
5405 host_set->ports[i] = ap;
5406
5407 /* dummy? */
5408 if (ent->dummy_port_mask & (1 << i)) {
5409 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
5410 ap->ops = &ata_dummy_port_ops;
5411 continue;
5412 }
5413
5414 /* start port */
5415 rc = ap->ops->port_start(ap);
5416 if (rc) {
5417 host_set->ports[i] = NULL;
5418 scsi_host_put(ap->host);
5419 goto err_out;
5420 }
5421
5422 /* Report the secondary IRQ for second channel legacy */
5423 if (i == 1 && ent->irq2)
5424 irq_line = ent->irq2;
5425
5426 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
5427 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
5428 (ap->pio_mask << ATA_SHIFT_PIO);
5429
5430 /* print per-port info to dmesg */
5431 ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%lX "
5432 "ctl 0x%lX bmdma 0x%lX irq %d\n",
5433 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
5434 ata_mode_string(xfer_mode_mask),
5435 ap->ioaddr.cmd_addr,
5436 ap->ioaddr.ctl_addr,
5437 ap->ioaddr.bmdma_addr,
5438 irq_line);
5439
5440 ata_chk_status(ap);
5441 host_set->ops->irq_clear(ap);
5442 ata_eh_freeze_port(ap); /* freeze port before requesting IRQ */
5443 }
5444
5445 /* obtain irq, that may be shared between channels */
5446 rc = request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
5447 DRV_NAME, host_set);
5448 if (rc) {
5449 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5450 ent->irq, rc);
5451 goto err_out;
5452 }
5453
5454 /* do we have a second IRQ for the other channel, eg legacy mode */
5455 if (ent->irq2) {
5456 /* We will get weird core code crashes later if this is true
5457 so trap it now */
5458 BUG_ON(ent->irq == ent->irq2);
5459
5460 rc = request_irq(ent->irq2, ent->port_ops->irq_handler, ent->irq_flags,
5461 DRV_NAME, host_set);
5462 if (rc) {
5463 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5464 ent->irq2, rc);
5465 goto err_out_free_irq;
5466 }
5467 }
5468
5469 /* perform each probe synchronously */
5470 DPRINTK("probe begin\n");
5471 for (i = 0; i < host_set->n_ports; i++) {
5472 struct ata_port *ap = host_set->ports[i];
5473 u32 scontrol;
5474 int rc;
5475
5476 /* init sata_spd_limit to the current value */
5477 if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
5478 int spd = (scontrol >> 4) & 0xf;
5479 ap->hw_sata_spd_limit &= (1 << spd) - 1;
5480 }
5481 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5482
5483 rc = scsi_add_host(ap->host, dev);
5484 if (rc) {
5485 ata_port_printk(ap, KERN_ERR, "scsi_add_host failed\n");
5486 /* FIXME: do something useful here */
5487 /* FIXME: handle unconditional calls to
5488 * scsi_scan_host and ata_host_remove, below,
5489 * at the very least
5490 */
5491 }
5492
5493 if (ap->ops->error_handler) {
5494 struct ata_eh_info *ehi = &ap->eh_info;
5495 unsigned long flags;
5496
5497 ata_port_probe(ap);
5498
5499 /* kick EH for boot probing */
5500 spin_lock_irqsave(ap->lock, flags);
5501
5502 ehi->probe_mask = (1 << ATA_MAX_DEVICES) - 1;
5503 ehi->action |= ATA_EH_SOFTRESET;
5504 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
5505
5506 ap->pflags |= ATA_PFLAG_LOADING;
5507 ata_port_schedule_eh(ap);
5508
5509 spin_unlock_irqrestore(ap->lock, flags);
5510
5511 /* wait for EH to finish */
5512 ata_port_wait_eh(ap);
5513 } else {
5514 DPRINTK("ata%u: bus probe begin\n", ap->id);
5515 rc = ata_bus_probe(ap);
5516 DPRINTK("ata%u: bus probe end\n", ap->id);
5517
5518 if (rc) {
5519 /* FIXME: do something useful here?
5520 * Current libata behavior will
5521 * tear down everything when
5522 * the module is removed
5523 * or the h/w is unplugged.
5524 */
5525 }
5526 }
5527 }
5528
5529 /* probes are done, now scan each port's disk(s) */
5530 DPRINTK("host probe begin\n");
5531 for (i = 0; i < host_set->n_ports; i++) {
5532 struct ata_port *ap = host_set->ports[i];
5533
5534 ata_scsi_scan_host(ap);
5535 }
5536
5537 dev_set_drvdata(dev, host_set);
5538
5539 VPRINTK("EXIT, returning %u\n", ent->n_ports);
5540 return ent->n_ports; /* success */
5541
5542err_out_free_irq:
5543 free_irq(ent->irq, host_set);
5544err_out:
5545 for (i = 0; i < host_set->n_ports; i++) {
5546 struct ata_port *ap = host_set->ports[i];
5547 if (ap) {
5548 ap->ops->port_stop(ap);
5549 scsi_host_put(ap->host);
5550 }
5551 }
5552
5553 kfree(host_set);
5554 VPRINTK("EXIT, returning 0\n");
5555 return 0;
5556}
5557
5558/**
5559 * ata_port_detach - Detach ATA port in prepration of device removal
5560 * @ap: ATA port to be detached
5561 *
5562 * Detach all ATA devices and the associated SCSI devices of @ap;
5563 * then, remove the associated SCSI host. @ap is guaranteed to
5564 * be quiescent on return from this function.
5565 *
5566 * LOCKING:
5567 * Kernel thread context (may sleep).
5568 */
5569void ata_port_detach(struct ata_port *ap)
5570{
5571 unsigned long flags;
5572 int i;
5573
5574 if (!ap->ops->error_handler)
5575 goto skip_eh;
5576
5577 /* tell EH we're leaving & flush EH */
5578 spin_lock_irqsave(ap->lock, flags);
5579 ap->pflags |= ATA_PFLAG_UNLOADING;
5580 spin_unlock_irqrestore(ap->lock, flags);
5581
5582 ata_port_wait_eh(ap);
5583
5584 /* EH is now guaranteed to see UNLOADING, so no new device
5585 * will be attached. Disable all existing devices.
5586 */
5587 spin_lock_irqsave(ap->lock, flags);
5588
5589 for (i = 0; i < ATA_MAX_DEVICES; i++)
5590 ata_dev_disable(&ap->device[i]);
5591
5592 spin_unlock_irqrestore(ap->lock, flags);
5593
5594 /* Final freeze & EH. All in-flight commands are aborted. EH
5595 * will be skipped and retrials will be terminated with bad
5596 * target.
5597 */
5598 spin_lock_irqsave(ap->lock, flags);
5599 ata_port_freeze(ap); /* won't be thawed */
5600 spin_unlock_irqrestore(ap->lock, flags);
5601
5602 ata_port_wait_eh(ap);
5603
5604 /* Flush hotplug task. The sequence is similar to
5605 * ata_port_flush_task().
5606 */
5607 flush_workqueue(ata_aux_wq);
5608 cancel_delayed_work(&ap->hotplug_task);
5609 flush_workqueue(ata_aux_wq);
5610
5611 skip_eh:
5612 /* remove the associated SCSI host */
5613 scsi_remove_host(ap->host);
5614}
5615
5616/**
5617 * ata_host_set_remove - PCI layer callback for device removal
5618 * @host_set: ATA host set that was removed
5619 *
5620 * Unregister all objects associated with this host set. Free those
5621 * objects.
5622 *
5623 * LOCKING:
5624 * Inherited from calling layer (may sleep).
5625 */
5626
5627void ata_host_set_remove(struct ata_host_set *host_set)
5628{
5629 unsigned int i;
5630
5631 for (i = 0; i < host_set->n_ports; i++)
5632 ata_port_detach(host_set->ports[i]);
5633
5634 free_irq(host_set->irq, host_set);
5635 if (host_set->irq2)
5636 free_irq(host_set->irq2, host_set);
5637
5638 for (i = 0; i < host_set->n_ports; i++) {
5639 struct ata_port *ap = host_set->ports[i];
5640
5641 ata_scsi_release(ap->host);
5642
5643 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
5644 struct ata_ioports *ioaddr = &ap->ioaddr;
5645
5646 /* FIXME: Add -ac IDE pci mods to remove these special cases */
5647 if (ioaddr->cmd_addr == ATA_PRIMARY_CMD)
5648 release_region(ATA_PRIMARY_CMD, 8);
5649 else if (ioaddr->cmd_addr == ATA_SECONDARY_CMD)
5650 release_region(ATA_SECONDARY_CMD, 8);
5651 }
5652
5653 scsi_host_put(ap->host);
5654 }
5655
5656 if (host_set->ops->host_stop)
5657 host_set->ops->host_stop(host_set);
5658
5659 kfree(host_set);
5660}
5661
5662/**
5663 * ata_scsi_release - SCSI layer callback hook for host unload
5664 * @host: libata host to be unloaded
5665 *
5666 * Performs all duties necessary to shut down a libata port...
5667 * Kill port kthread, disable port, and release resources.
5668 *
5669 * LOCKING:
5670 * Inherited from SCSI layer.
5671 *
5672 * RETURNS:
5673 * One.
5674 */
5675
5676int ata_scsi_release(struct Scsi_Host *host)
5677{
5678 struct ata_port *ap = ata_shost_to_port(host);
5679
5680 DPRINTK("ENTER\n");
5681
5682 ap->ops->port_disable(ap);
5683 ap->ops->port_stop(ap);
5684
5685 DPRINTK("EXIT\n");
5686 return 1;
5687}
5688
5689struct ata_probe_ent *
5690ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
5691{
5692 struct ata_probe_ent *probe_ent;
5693
5694 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
5695 if (!probe_ent) {
5696 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
5697 kobject_name(&(dev->kobj)));
5698 return NULL;
5699 }
5700
5701 INIT_LIST_HEAD(&probe_ent->node);
5702 probe_ent->dev = dev;
5703
5704 probe_ent->sht = port->sht;
5705 probe_ent->host_flags = port->host_flags;
5706 probe_ent->pio_mask = port->pio_mask;
5707 probe_ent->mwdma_mask = port->mwdma_mask;
5708 probe_ent->udma_mask = port->udma_mask;
5709 probe_ent->port_ops = port->port_ops;
5710
5711 return probe_ent;
5712}
5713
5714/**
5715 * ata_std_ports - initialize ioaddr with standard port offsets.
5716 * @ioaddr: IO address structure to be initialized
5717 *
5718 * Utility function which initializes data_addr, error_addr,
5719 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
5720 * device_addr, status_addr, and command_addr to standard offsets
5721 * relative to cmd_addr.
5722 *
5723 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
5724 */
5725
5726void ata_std_ports(struct ata_ioports *ioaddr)
5727{
5728 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
5729 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
5730 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
5731 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
5732 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
5733 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
5734 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
5735 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
5736 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
5737 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
5738}
5739
5740
5741#ifdef CONFIG_PCI
5742
5743void ata_pci_host_stop (struct ata_host_set *host_set)
5744{
5745 struct pci_dev *pdev = to_pci_dev(host_set->dev);
5746
5747 pci_iounmap(pdev, host_set->mmio_base);
5748}
5749
5750/**
5751 * ata_pci_remove_one - PCI layer callback for device removal
5752 * @pdev: PCI device that was removed
5753 *
5754 * PCI layer indicates to libata via this hook that
5755 * hot-unplug or module unload event has occurred.
5756 * Handle this by unregistering all objects associated
5757 * with this PCI device. Free those objects. Then finally
5758 * release PCI resources and disable device.
5759 *
5760 * LOCKING:
5761 * Inherited from PCI layer (may sleep).
5762 */
5763
5764void ata_pci_remove_one (struct pci_dev *pdev)
5765{
5766 struct device *dev = pci_dev_to_dev(pdev);
5767 struct ata_host_set *host_set = dev_get_drvdata(dev);
5768
5769 ata_host_set_remove(host_set);
5770
5771 pci_release_regions(pdev);
5772 pci_disable_device(pdev);
5773 dev_set_drvdata(dev, NULL);
5774}
5775
5776/* move to PCI subsystem */
5777int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
5778{
5779 unsigned long tmp = 0;
5780
5781 switch (bits->width) {
5782 case 1: {
5783 u8 tmp8 = 0;
5784 pci_read_config_byte(pdev, bits->reg, &tmp8);
5785 tmp = tmp8;
5786 break;
5787 }
5788 case 2: {
5789 u16 tmp16 = 0;
5790 pci_read_config_word(pdev, bits->reg, &tmp16);
5791 tmp = tmp16;
5792 break;
5793 }
5794 case 4: {
5795 u32 tmp32 = 0;
5796 pci_read_config_dword(pdev, bits->reg, &tmp32);
5797 tmp = tmp32;
5798 break;
5799 }
5800
5801 default:
5802 return -EINVAL;
5803 }
5804
5805 tmp &= bits->mask;
5806
5807 return (tmp == bits->val) ? 1 : 0;
5808}
5809
5810void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
5811{
5812 pci_save_state(pdev);
5813
5814 if (mesg.event == PM_EVENT_SUSPEND) {
5815 pci_disable_device(pdev);
5816 pci_set_power_state(pdev, PCI_D3hot);
5817 }
5818}
5819
5820void ata_pci_device_do_resume(struct pci_dev *pdev)
5821{
5822 pci_set_power_state(pdev, PCI_D0);
5823 pci_restore_state(pdev);
5824 pci_enable_device(pdev);
5825 pci_set_master(pdev);
5826}
5827
5828int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
5829{
5830 struct ata_host_set *host_set = dev_get_drvdata(&pdev->dev);
5831 int rc = 0;
5832
5833 rc = ata_host_set_suspend(host_set, mesg);
5834 if (rc)
5835 return rc;
5836
5837 ata_pci_device_do_suspend(pdev, mesg);
5838
5839 return 0;
5840}
5841
5842int ata_pci_device_resume(struct pci_dev *pdev)
5843{
5844 struct ata_host_set *host_set = dev_get_drvdata(&pdev->dev);
5845
5846 ata_pci_device_do_resume(pdev);
5847 ata_host_set_resume(host_set);
5848 return 0;
5849}
5850#endif /* CONFIG_PCI */
5851
5852
5853static int __init ata_init(void)
5854{
5855 ata_probe_timeout *= HZ;
5856 ata_wq = create_workqueue("ata");
5857 if (!ata_wq)
5858 return -ENOMEM;
5859
5860 ata_aux_wq = create_singlethread_workqueue("ata_aux");
5861 if (!ata_aux_wq) {
5862 destroy_workqueue(ata_wq);
5863 return -ENOMEM;
5864 }
5865
5866 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
5867 return 0;
5868}
5869
5870static void __exit ata_exit(void)
5871{
5872 destroy_workqueue(ata_wq);
5873 destroy_workqueue(ata_aux_wq);
5874}
5875
5876module_init(ata_init);
5877module_exit(ata_exit);
5878
5879static unsigned long ratelimit_time;
5880static DEFINE_SPINLOCK(ata_ratelimit_lock);
5881
5882int ata_ratelimit(void)
5883{
5884 int rc;
5885 unsigned long flags;
5886
5887 spin_lock_irqsave(&ata_ratelimit_lock, flags);
5888
5889 if (time_after(jiffies, ratelimit_time)) {
5890 rc = 1;
5891 ratelimit_time = jiffies + (HZ/5);
5892 } else
5893 rc = 0;
5894
5895 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
5896
5897 return rc;
5898}
5899
5900/**
5901 * ata_wait_register - wait until register value changes
5902 * @reg: IO-mapped register
5903 * @mask: Mask to apply to read register value
5904 * @val: Wait condition
5905 * @interval_msec: polling interval in milliseconds
5906 * @timeout_msec: timeout in milliseconds
5907 *
5908 * Waiting for some bits of register to change is a common
5909 * operation for ATA controllers. This function reads 32bit LE
5910 * IO-mapped register @reg and tests for the following condition.
5911 *
5912 * (*@reg & mask) != val
5913 *
5914 * If the condition is met, it returns; otherwise, the process is
5915 * repeated after @interval_msec until timeout.
5916 *
5917 * LOCKING:
5918 * Kernel thread context (may sleep)
5919 *
5920 * RETURNS:
5921 * The final register value.
5922 */
5923u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
5924 unsigned long interval_msec,
5925 unsigned long timeout_msec)
5926{
5927 unsigned long timeout;
5928 u32 tmp;
5929
5930 tmp = ioread32(reg);
5931
5932 /* Calculate timeout _after_ the first read to make sure
5933 * preceding writes reach the controller before starting to
5934 * eat away the timeout.
5935 */
5936 timeout = jiffies + (timeout_msec * HZ) / 1000;
5937
5938 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
5939 msleep(interval_msec);
5940 tmp = ioread32(reg);
5941 }
5942
5943 return tmp;
5944}
5945
5946/*
5947 * Dummy port_ops
5948 */
5949static void ata_dummy_noret(struct ata_port *ap) { }
5950static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
5951static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
5952
5953static u8 ata_dummy_check_status(struct ata_port *ap)
5954{
5955 return ATA_DRDY;
5956}
5957
5958static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
5959{
5960 return AC_ERR_SYSTEM;
5961}
5962
5963const struct ata_port_operations ata_dummy_port_ops = {
5964 .port_disable = ata_port_disable,
5965 .check_status = ata_dummy_check_status,
5966 .check_altstatus = ata_dummy_check_status,
5967 .dev_select = ata_noop_dev_select,
5968 .qc_prep = ata_noop_qc_prep,
5969 .qc_issue = ata_dummy_qc_issue,
5970 .freeze = ata_dummy_noret,
5971 .thaw = ata_dummy_noret,
5972 .error_handler = ata_dummy_noret,
5973 .post_internal_cmd = ata_dummy_qc_noret,
5974 .irq_clear = ata_dummy_noret,
5975 .port_start = ata_dummy_ret0,
5976 .port_stop = ata_dummy_noret,
5977};
5978
5979/*
5980 * libata is essentially a library of internal helper functions for
5981 * low-level ATA host controller drivers. As such, the API/ABI is
5982 * likely to change as new drivers are added and updated.
5983 * Do not depend on ABI/API stability.
5984 */
5985
5986EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
5987EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
5988EXPORT_SYMBOL_GPL(sata_deb_timing_long);
5989EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
5990EXPORT_SYMBOL_GPL(ata_std_bios_param);
5991EXPORT_SYMBOL_GPL(ata_std_ports);
5992EXPORT_SYMBOL_GPL(ata_host_set_init);
5993EXPORT_SYMBOL_GPL(ata_device_add);
5994EXPORT_SYMBOL_GPL(ata_port_detach);
5995EXPORT_SYMBOL_GPL(ata_host_set_remove);
5996EXPORT_SYMBOL_GPL(ata_sg_init);
5997EXPORT_SYMBOL_GPL(ata_sg_init_one);
5998EXPORT_SYMBOL_GPL(ata_hsm_move);
5999EXPORT_SYMBOL_GPL(ata_qc_complete);
6000EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
6001EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
6002EXPORT_SYMBOL_GPL(ata_tf_load);
6003EXPORT_SYMBOL_GPL(ata_tf_read);
6004EXPORT_SYMBOL_GPL(ata_noop_dev_select);
6005EXPORT_SYMBOL_GPL(ata_std_dev_select);
6006EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6007EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6008EXPORT_SYMBOL_GPL(ata_check_status);
6009EXPORT_SYMBOL_GPL(ata_altstatus);
6010EXPORT_SYMBOL_GPL(ata_exec_command);
6011EXPORT_SYMBOL_GPL(ata_port_start);
6012EXPORT_SYMBOL_GPL(ata_port_stop);
6013EXPORT_SYMBOL_GPL(ata_host_stop);
6014EXPORT_SYMBOL_GPL(ata_interrupt);
6015EXPORT_SYMBOL_GPL(ata_mmio_data_xfer);
6016EXPORT_SYMBOL_GPL(ata_pio_data_xfer);
6017EXPORT_SYMBOL_GPL(ata_pio_data_xfer_noirq);
6018EXPORT_SYMBOL_GPL(ata_qc_prep);
6019EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
6020EXPORT_SYMBOL_GPL(ata_bmdma_setup);
6021EXPORT_SYMBOL_GPL(ata_bmdma_start);
6022EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
6023EXPORT_SYMBOL_GPL(ata_bmdma_status);
6024EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6025EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
6026EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
6027EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
6028EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
6029EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
6030EXPORT_SYMBOL_GPL(ata_port_probe);
6031EXPORT_SYMBOL_GPL(sata_set_spd);
6032EXPORT_SYMBOL_GPL(sata_phy_debounce);
6033EXPORT_SYMBOL_GPL(sata_phy_resume);
6034EXPORT_SYMBOL_GPL(sata_phy_reset);
6035EXPORT_SYMBOL_GPL(__sata_phy_reset);
6036EXPORT_SYMBOL_GPL(ata_bus_reset);
6037EXPORT_SYMBOL_GPL(ata_std_prereset);
6038EXPORT_SYMBOL_GPL(ata_std_softreset);
6039EXPORT_SYMBOL_GPL(sata_std_hardreset);
6040EXPORT_SYMBOL_GPL(ata_std_postreset);
6041EXPORT_SYMBOL_GPL(ata_dev_revalidate);
6042EXPORT_SYMBOL_GPL(ata_dev_classify);
6043EXPORT_SYMBOL_GPL(ata_dev_pair);
6044EXPORT_SYMBOL_GPL(ata_port_disable);
6045EXPORT_SYMBOL_GPL(ata_ratelimit);
6046EXPORT_SYMBOL_GPL(ata_wait_register);
6047EXPORT_SYMBOL_GPL(ata_busy_sleep);
6048EXPORT_SYMBOL_GPL(ata_port_queue_task);
6049EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
6050EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
6051EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
6052EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
6053EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
6054EXPORT_SYMBOL_GPL(ata_scsi_release);
6055EXPORT_SYMBOL_GPL(ata_host_intr);
6056EXPORT_SYMBOL_GPL(sata_scr_valid);
6057EXPORT_SYMBOL_GPL(sata_scr_read);
6058EXPORT_SYMBOL_GPL(sata_scr_write);
6059EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6060EXPORT_SYMBOL_GPL(ata_port_online);
6061EXPORT_SYMBOL_GPL(ata_port_offline);
6062EXPORT_SYMBOL_GPL(ata_host_set_suspend);
6063EXPORT_SYMBOL_GPL(ata_host_set_resume);
6064EXPORT_SYMBOL_GPL(ata_id_string);
6065EXPORT_SYMBOL_GPL(ata_id_c_string);
6066EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6067
6068EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6069EXPORT_SYMBOL_GPL(ata_timing_compute);
6070EXPORT_SYMBOL_GPL(ata_timing_merge);
6071
6072#ifdef CONFIG_PCI
6073EXPORT_SYMBOL_GPL(pci_test_config_bits);
6074EXPORT_SYMBOL_GPL(ata_pci_host_stop);
6075EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
6076EXPORT_SYMBOL_GPL(ata_pci_init_one);
6077EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6078EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6079EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6080EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6081EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6082EXPORT_SYMBOL_GPL(ata_pci_default_filter);
6083EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
6084#endif /* CONFIG_PCI */
6085
6086EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
6087EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
6088
6089EXPORT_SYMBOL_GPL(ata_eng_timeout);
6090EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6091EXPORT_SYMBOL_GPL(ata_port_abort);
6092EXPORT_SYMBOL_GPL(ata_port_freeze);
6093EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6094EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
6095EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6096EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
6097EXPORT_SYMBOL_GPL(ata_do_eh);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
new file mode 100644
index 000000000000..2c476eee463f
--- /dev/null
+++ b/drivers/ata/libata-eh.c
@@ -0,0 +1,2246 @@
1/*
2 * libata-eh.c - libata error handling
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2006 Tejun Heo <htejun@gmail.com>
9 *
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; either version 2, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
24 * USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 */
34
35#include <linux/config.h>
36#include <linux/kernel.h>
37#include <scsi/scsi.h>
38#include <scsi/scsi_host.h>
39#include <scsi/scsi_eh.h>
40#include <scsi/scsi_device.h>
41#include <scsi/scsi_cmnd.h>
42#include "../scsi/scsi_transport_api.h"
43
44#include <linux/libata.h>
45
46#include "libata.h"
47
48static void __ata_port_freeze(struct ata_port *ap);
49static void ata_eh_finish(struct ata_port *ap);
50static void ata_eh_handle_port_suspend(struct ata_port *ap);
51static void ata_eh_handle_port_resume(struct ata_port *ap);
52
53static void ata_ering_record(struct ata_ering *ering, int is_io,
54 unsigned int err_mask)
55{
56 struct ata_ering_entry *ent;
57
58 WARN_ON(!err_mask);
59
60 ering->cursor++;
61 ering->cursor %= ATA_ERING_SIZE;
62
63 ent = &ering->ring[ering->cursor];
64 ent->is_io = is_io;
65 ent->err_mask = err_mask;
66 ent->timestamp = get_jiffies_64();
67}
68
69static struct ata_ering_entry * ata_ering_top(struct ata_ering *ering)
70{
71 struct ata_ering_entry *ent = &ering->ring[ering->cursor];
72 if (!ent->err_mask)
73 return NULL;
74 return ent;
75}
76
77static int ata_ering_map(struct ata_ering *ering,
78 int (*map_fn)(struct ata_ering_entry *, void *),
79 void *arg)
80{
81 int idx, rc = 0;
82 struct ata_ering_entry *ent;
83
84 idx = ering->cursor;
85 do {
86 ent = &ering->ring[idx];
87 if (!ent->err_mask)
88 break;
89 rc = map_fn(ent, arg);
90 if (rc)
91 break;
92 idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE;
93 } while (idx != ering->cursor);
94
95 return rc;
96}
97
98static unsigned int ata_eh_dev_action(struct ata_device *dev)
99{
100 struct ata_eh_context *ehc = &dev->ap->eh_context;
101
102 return ehc->i.action | ehc->i.dev_action[dev->devno];
103}
104
105static void ata_eh_clear_action(struct ata_device *dev,
106 struct ata_eh_info *ehi, unsigned int action)
107{
108 int i;
109
110 if (!dev) {
111 ehi->action &= ~action;
112 for (i = 0; i < ATA_MAX_DEVICES; i++)
113 ehi->dev_action[i] &= ~action;
114 } else {
115 /* doesn't make sense for port-wide EH actions */
116 WARN_ON(!(action & ATA_EH_PERDEV_MASK));
117
118 /* break ehi->action into ehi->dev_action */
119 if (ehi->action & action) {
120 for (i = 0; i < ATA_MAX_DEVICES; i++)
121 ehi->dev_action[i] |= ehi->action & action;
122 ehi->action &= ~action;
123 }
124
125 /* turn off the specified per-dev action */
126 ehi->dev_action[dev->devno] &= ~action;
127 }
128}
129
130/**
131 * ata_scsi_timed_out - SCSI layer time out callback
132 * @cmd: timed out SCSI command
133 *
134 * Handles SCSI layer timeout. We race with normal completion of
135 * the qc for @cmd. If the qc is already gone, we lose and let
136 * the scsi command finish (EH_HANDLED). Otherwise, the qc has
137 * timed out and EH should be invoked. Prevent ata_qc_complete()
138 * from finishing it by setting EH_SCHEDULED and return
139 * EH_NOT_HANDLED.
140 *
141 * TODO: kill this function once old EH is gone.
142 *
143 * LOCKING:
144 * Called from timer context
145 *
146 * RETURNS:
147 * EH_HANDLED or EH_NOT_HANDLED
148 */
149enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
150{
151 struct Scsi_Host *host = cmd->device->host;
152 struct ata_port *ap = ata_shost_to_port(host);
153 unsigned long flags;
154 struct ata_queued_cmd *qc;
155 enum scsi_eh_timer_return ret;
156
157 DPRINTK("ENTER\n");
158
159 if (ap->ops->error_handler) {
160 ret = EH_NOT_HANDLED;
161 goto out;
162 }
163
164 ret = EH_HANDLED;
165 spin_lock_irqsave(ap->lock, flags);
166 qc = ata_qc_from_tag(ap, ap->active_tag);
167 if (qc) {
168 WARN_ON(qc->scsicmd != cmd);
169 qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
170 qc->err_mask |= AC_ERR_TIMEOUT;
171 ret = EH_NOT_HANDLED;
172 }
173 spin_unlock_irqrestore(ap->lock, flags);
174
175 out:
176 DPRINTK("EXIT, ret=%d\n", ret);
177 return ret;
178}
179
180/**
181 * ata_scsi_error - SCSI layer error handler callback
182 * @host: SCSI host on which error occurred
183 *
184 * Handles SCSI-layer-thrown error events.
185 *
186 * LOCKING:
187 * Inherited from SCSI layer (none, can sleep)
188 *
189 * RETURNS:
190 * Zero.
191 */
192void ata_scsi_error(struct Scsi_Host *host)
193{
194 struct ata_port *ap = ata_shost_to_port(host);
195 int i, repeat_cnt = ATA_EH_MAX_REPEAT;
196 unsigned long flags;
197
198 DPRINTK("ENTER\n");
199
200 /* synchronize with port task */
201 ata_port_flush_task(ap);
202
203 /* synchronize with host_set lock and sort out timeouts */
204
205 /* For new EH, all qcs are finished in one of three ways -
206 * normal completion, error completion, and SCSI timeout.
207 * Both cmpletions can race against SCSI timeout. When normal
208 * completion wins, the qc never reaches EH. When error
209 * completion wins, the qc has ATA_QCFLAG_FAILED set.
210 *
211 * When SCSI timeout wins, things are a bit more complex.
212 * Normal or error completion can occur after the timeout but
213 * before this point. In such cases, both types of
214 * completions are honored. A scmd is determined to have
215 * timed out iff its associated qc is active and not failed.
216 */
217 if (ap->ops->error_handler) {
218 struct scsi_cmnd *scmd, *tmp;
219 int nr_timedout = 0;
220
221 spin_lock_irqsave(ap->lock, flags);
222
223 list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) {
224 struct ata_queued_cmd *qc;
225
226 for (i = 0; i < ATA_MAX_QUEUE; i++) {
227 qc = __ata_qc_from_tag(ap, i);
228 if (qc->flags & ATA_QCFLAG_ACTIVE &&
229 qc->scsicmd == scmd)
230 break;
231 }
232
233 if (i < ATA_MAX_QUEUE) {
234 /* the scmd has an associated qc */
235 if (!(qc->flags & ATA_QCFLAG_FAILED)) {
236 /* which hasn't failed yet, timeout */
237 qc->err_mask |= AC_ERR_TIMEOUT;
238 qc->flags |= ATA_QCFLAG_FAILED;
239 nr_timedout++;
240 }
241 } else {
242 /* Normal completion occurred after
243 * SCSI timeout but before this point.
244 * Successfully complete it.
245 */
246 scmd->retries = scmd->allowed;
247 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
248 }
249 }
250
251 /* If we have timed out qcs. They belong to EH from
252 * this point but the state of the controller is
253 * unknown. Freeze the port to make sure the IRQ
254 * handler doesn't diddle with those qcs. This must
255 * be done atomically w.r.t. setting QCFLAG_FAILED.
256 */
257 if (nr_timedout)
258 __ata_port_freeze(ap);
259
260 spin_unlock_irqrestore(ap->lock, flags);
261 } else
262 spin_unlock_wait(ap->lock);
263
264 repeat:
265 /* invoke error handler */
266 if (ap->ops->error_handler) {
267 /* process port resume request */
268 ata_eh_handle_port_resume(ap);
269
270 /* fetch & clear EH info */
271 spin_lock_irqsave(ap->lock, flags);
272
273 memset(&ap->eh_context, 0, sizeof(ap->eh_context));
274 ap->eh_context.i = ap->eh_info;
275 memset(&ap->eh_info, 0, sizeof(ap->eh_info));
276
277 ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS;
278 ap->pflags &= ~ATA_PFLAG_EH_PENDING;
279
280 spin_unlock_irqrestore(ap->lock, flags);
281
282 /* invoke EH, skip if unloading or suspended */
283 if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED)))
284 ap->ops->error_handler(ap);
285 else
286 ata_eh_finish(ap);
287
288 /* process port suspend request */
289 ata_eh_handle_port_suspend(ap);
290
291 /* Exception might have happend after ->error_handler
292 * recovered the port but before this point. Repeat
293 * EH in such case.
294 */
295 spin_lock_irqsave(ap->lock, flags);
296
297 if (ap->pflags & ATA_PFLAG_EH_PENDING) {
298 if (--repeat_cnt) {
299 ata_port_printk(ap, KERN_INFO,
300 "EH pending after completion, "
301 "repeating EH (cnt=%d)\n", repeat_cnt);
302 spin_unlock_irqrestore(ap->lock, flags);
303 goto repeat;
304 }
305 ata_port_printk(ap, KERN_ERR, "EH pending after %d "
306 "tries, giving up\n", ATA_EH_MAX_REPEAT);
307 }
308
309 /* this run is complete, make sure EH info is clear */
310 memset(&ap->eh_info, 0, sizeof(ap->eh_info));
311
312 /* Clear host_eh_scheduled while holding ap->lock such
313 * that if exception occurs after this point but
314 * before EH completion, SCSI midlayer will
315 * re-initiate EH.
316 */
317 host->host_eh_scheduled = 0;
318
319 spin_unlock_irqrestore(ap->lock, flags);
320 } else {
321 WARN_ON(ata_qc_from_tag(ap, ap->active_tag) == NULL);
322 ap->ops->eng_timeout(ap);
323 }
324
325 /* finish or retry handled scmd's and clean up */
326 WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q));
327
328 scsi_eh_flush_done_q(&ap->eh_done_q);
329
330 /* clean up */
331 spin_lock_irqsave(ap->lock, flags);
332
333 if (ap->pflags & ATA_PFLAG_LOADING)
334 ap->pflags &= ~ATA_PFLAG_LOADING;
335 else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG)
336 queue_work(ata_aux_wq, &ap->hotplug_task);
337
338 if (ap->pflags & ATA_PFLAG_RECOVERED)
339 ata_port_printk(ap, KERN_INFO, "EH complete\n");
340
341 ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED);
342
343 /* tell wait_eh that we're done */
344 ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS;
345 wake_up_all(&ap->eh_wait_q);
346
347 spin_unlock_irqrestore(ap->lock, flags);
348
349 DPRINTK("EXIT\n");
350}
351
352/**
353 * ata_port_wait_eh - Wait for the currently pending EH to complete
354 * @ap: Port to wait EH for
355 *
356 * Wait until the currently pending EH is complete.
357 *
358 * LOCKING:
359 * Kernel thread context (may sleep).
360 */
361void ata_port_wait_eh(struct ata_port *ap)
362{
363 unsigned long flags;
364 DEFINE_WAIT(wait);
365
366 retry:
367 spin_lock_irqsave(ap->lock, flags);
368
369 while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) {
370 prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
371 spin_unlock_irqrestore(ap->lock, flags);
372 schedule();
373 spin_lock_irqsave(ap->lock, flags);
374 }
375 finish_wait(&ap->eh_wait_q, &wait);
376
377 spin_unlock_irqrestore(ap->lock, flags);
378
379 /* make sure SCSI EH is complete */
380 if (scsi_host_in_recovery(ap->host)) {
381 msleep(10);
382 goto retry;
383 }
384}
385
386/**
387 * ata_qc_timeout - Handle timeout of queued command
388 * @qc: Command that timed out
389 *
390 * Some part of the kernel (currently, only the SCSI layer)
391 * has noticed that the active command on port @ap has not
392 * completed after a specified length of time. Handle this
393 * condition by disabling DMA (if necessary) and completing
394 * transactions, with error if necessary.
395 *
396 * This also handles the case of the "lost interrupt", where
397 * for some reason (possibly hardware bug, possibly driver bug)
398 * an interrupt was not delivered to the driver, even though the
399 * transaction completed successfully.
400 *
401 * TODO: kill this function once old EH is gone.
402 *
403 * LOCKING:
404 * Inherited from SCSI layer (none, can sleep)
405 */
406static void ata_qc_timeout(struct ata_queued_cmd *qc)
407{
408 struct ata_port *ap = qc->ap;
409 u8 host_stat = 0, drv_stat;
410 unsigned long flags;
411
412 DPRINTK("ENTER\n");
413
414 ap->hsm_task_state = HSM_ST_IDLE;
415
416 spin_lock_irqsave(ap->lock, flags);
417
418 switch (qc->tf.protocol) {
419
420 case ATA_PROT_DMA:
421 case ATA_PROT_ATAPI_DMA:
422 host_stat = ap->ops->bmdma_status(ap);
423
424 /* before we do anything else, clear DMA-Start bit */
425 ap->ops->bmdma_stop(qc);
426
427 /* fall through */
428
429 default:
430 ata_altstatus(ap);
431 drv_stat = ata_chk_status(ap);
432
433 /* ack bmdma irq events */
434 ap->ops->irq_clear(ap);
435
436 ata_dev_printk(qc->dev, KERN_ERR, "command 0x%x timeout, "
437 "stat 0x%x host_stat 0x%x\n",
438 qc->tf.command, drv_stat, host_stat);
439
440 /* complete taskfile transaction */
441 qc->err_mask |= AC_ERR_TIMEOUT;
442 break;
443 }
444
445 spin_unlock_irqrestore(ap->lock, flags);
446
447 ata_eh_qc_complete(qc);
448
449 DPRINTK("EXIT\n");
450}
451
452/**
453 * ata_eng_timeout - Handle timeout of queued command
454 * @ap: Port on which timed-out command is active
455 *
456 * Some part of the kernel (currently, only the SCSI layer)
457 * has noticed that the active command on port @ap has not
458 * completed after a specified length of time. Handle this
459 * condition by disabling DMA (if necessary) and completing
460 * transactions, with error if necessary.
461 *
462 * This also handles the case of the "lost interrupt", where
463 * for some reason (possibly hardware bug, possibly driver bug)
464 * an interrupt was not delivered to the driver, even though the
465 * transaction completed successfully.
466 *
467 * TODO: kill this function once old EH is gone.
468 *
469 * LOCKING:
470 * Inherited from SCSI layer (none, can sleep)
471 */
472void ata_eng_timeout(struct ata_port *ap)
473{
474 DPRINTK("ENTER\n");
475
476 ata_qc_timeout(ata_qc_from_tag(ap, ap->active_tag));
477
478 DPRINTK("EXIT\n");
479}
480
481/**
482 * ata_qc_schedule_eh - schedule qc for error handling
483 * @qc: command to schedule error handling for
484 *
485 * Schedule error handling for @qc. EH will kick in as soon as
486 * other commands are drained.
487 *
488 * LOCKING:
489 * spin_lock_irqsave(host_set lock)
490 */
491void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
492{
493 struct ata_port *ap = qc->ap;
494
495 WARN_ON(!ap->ops->error_handler);
496
497 qc->flags |= ATA_QCFLAG_FAILED;
498 qc->ap->pflags |= ATA_PFLAG_EH_PENDING;
499
500 /* The following will fail if timeout has already expired.
501 * ata_scsi_error() takes care of such scmds on EH entry.
502 * Note that ATA_QCFLAG_FAILED is unconditionally set after
503 * this function completes.
504 */
505 scsi_req_abort_cmd(qc->scsicmd);
506}
507
508/**
509 * ata_port_schedule_eh - schedule error handling without a qc
510 * @ap: ATA port to schedule EH for
511 *
512 * Schedule error handling for @ap. EH will kick in as soon as
513 * all commands are drained.
514 *
515 * LOCKING:
516 * spin_lock_irqsave(host_set lock)
517 */
518void ata_port_schedule_eh(struct ata_port *ap)
519{
520 WARN_ON(!ap->ops->error_handler);
521
522 ap->pflags |= ATA_PFLAG_EH_PENDING;
523 scsi_schedule_eh(ap->host);
524
525 DPRINTK("port EH scheduled\n");
526}
527
528/**
529 * ata_port_abort - abort all qc's on the port
530 * @ap: ATA port to abort qc's for
531 *
532 * Abort all active qc's of @ap and schedule EH.
533 *
534 * LOCKING:
535 * spin_lock_irqsave(host_set lock)
536 *
537 * RETURNS:
538 * Number of aborted qc's.
539 */
540int ata_port_abort(struct ata_port *ap)
541{
542 int tag, nr_aborted = 0;
543
544 WARN_ON(!ap->ops->error_handler);
545
546 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
547 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
548
549 if (qc) {
550 qc->flags |= ATA_QCFLAG_FAILED;
551 ata_qc_complete(qc);
552 nr_aborted++;
553 }
554 }
555
556 if (!nr_aborted)
557 ata_port_schedule_eh(ap);
558
559 return nr_aborted;
560}
561
562/**
563 * __ata_port_freeze - freeze port
564 * @ap: ATA port to freeze
565 *
566 * This function is called when HSM violation or some other
567 * condition disrupts normal operation of the port. Frozen port
568 * is not allowed to perform any operation until the port is
569 * thawed, which usually follows a successful reset.
570 *
571 * ap->ops->freeze() callback can be used for freezing the port
572 * hardware-wise (e.g. mask interrupt and stop DMA engine). If a
573 * port cannot be frozen hardware-wise, the interrupt handler
574 * must ack and clear interrupts unconditionally while the port
575 * is frozen.
576 *
577 * LOCKING:
578 * spin_lock_irqsave(host_set lock)
579 */
580static void __ata_port_freeze(struct ata_port *ap)
581{
582 WARN_ON(!ap->ops->error_handler);
583
584 if (ap->ops->freeze)
585 ap->ops->freeze(ap);
586
587 ap->pflags |= ATA_PFLAG_FROZEN;
588
589 DPRINTK("ata%u port frozen\n", ap->id);
590}
591
592/**
593 * ata_port_freeze - abort & freeze port
594 * @ap: ATA port to freeze
595 *
596 * Abort and freeze @ap.
597 *
598 * LOCKING:
599 * spin_lock_irqsave(host_set lock)
600 *
601 * RETURNS:
602 * Number of aborted commands.
603 */
604int ata_port_freeze(struct ata_port *ap)
605{
606 int nr_aborted;
607
608 WARN_ON(!ap->ops->error_handler);
609
610 nr_aborted = ata_port_abort(ap);
611 __ata_port_freeze(ap);
612
613 return nr_aborted;
614}
615
616/**
617 * ata_eh_freeze_port - EH helper to freeze port
618 * @ap: ATA port to freeze
619 *
620 * Freeze @ap.
621 *
622 * LOCKING:
623 * None.
624 */
625void ata_eh_freeze_port(struct ata_port *ap)
626{
627 unsigned long flags;
628
629 if (!ap->ops->error_handler)
630 return;
631
632 spin_lock_irqsave(ap->lock, flags);
633 __ata_port_freeze(ap);
634 spin_unlock_irqrestore(ap->lock, flags);
635}
636
637/**
638 * ata_port_thaw_port - EH helper to thaw port
639 * @ap: ATA port to thaw
640 *
641 * Thaw frozen port @ap.
642 *
643 * LOCKING:
644 * None.
645 */
646void ata_eh_thaw_port(struct ata_port *ap)
647{
648 unsigned long flags;
649
650 if (!ap->ops->error_handler)
651 return;
652
653 spin_lock_irqsave(ap->lock, flags);
654
655 ap->pflags &= ~ATA_PFLAG_FROZEN;
656
657 if (ap->ops->thaw)
658 ap->ops->thaw(ap);
659
660 spin_unlock_irqrestore(ap->lock, flags);
661
662 DPRINTK("ata%u port thawed\n", ap->id);
663}
664
665static void ata_eh_scsidone(struct scsi_cmnd *scmd)
666{
667 /* nada */
668}
669
670static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
671{
672 struct ata_port *ap = qc->ap;
673 struct scsi_cmnd *scmd = qc->scsicmd;
674 unsigned long flags;
675
676 spin_lock_irqsave(ap->lock, flags);
677 qc->scsidone = ata_eh_scsidone;
678 __ata_qc_complete(qc);
679 WARN_ON(ata_tag_valid(qc->tag));
680 spin_unlock_irqrestore(ap->lock, flags);
681
682 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
683}
684
685/**
686 * ata_eh_qc_complete - Complete an active ATA command from EH
687 * @qc: Command to complete
688 *
689 * Indicate to the mid and upper layers that an ATA command has
690 * completed. To be used from EH.
691 */
692void ata_eh_qc_complete(struct ata_queued_cmd *qc)
693{
694 struct scsi_cmnd *scmd = qc->scsicmd;
695 scmd->retries = scmd->allowed;
696 __ata_eh_qc_complete(qc);
697}
698
699/**
700 * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
701 * @qc: Command to retry
702 *
703 * Indicate to the mid and upper layers that an ATA command
704 * should be retried. To be used from EH.
705 *
706 * SCSI midlayer limits the number of retries to scmd->allowed.
707 * scmd->retries is decremented for commands which get retried
708 * due to unrelated failures (qc->err_mask is zero).
709 */
710void ata_eh_qc_retry(struct ata_queued_cmd *qc)
711{
712 struct scsi_cmnd *scmd = qc->scsicmd;
713 if (!qc->err_mask && scmd->retries)
714 scmd->retries--;
715 __ata_eh_qc_complete(qc);
716}
717
718/**
719 * ata_eh_detach_dev - detach ATA device
720 * @dev: ATA device to detach
721 *
722 * Detach @dev.
723 *
724 * LOCKING:
725 * None.
726 */
727static void ata_eh_detach_dev(struct ata_device *dev)
728{
729 struct ata_port *ap = dev->ap;
730 unsigned long flags;
731
732 ata_dev_disable(dev);
733
734 spin_lock_irqsave(ap->lock, flags);
735
736 dev->flags &= ~ATA_DFLAG_DETACH;
737
738 if (ata_scsi_offline_dev(dev)) {
739 dev->flags |= ATA_DFLAG_DETACHED;
740 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
741 }
742
743 /* clear per-dev EH actions */
744 ata_eh_clear_action(dev, &ap->eh_info, ATA_EH_PERDEV_MASK);
745 ata_eh_clear_action(dev, &ap->eh_context.i, ATA_EH_PERDEV_MASK);
746
747 spin_unlock_irqrestore(ap->lock, flags);
748}
749
750/**
751 * ata_eh_about_to_do - about to perform eh_action
752 * @ap: target ATA port
753 * @dev: target ATA dev for per-dev action (can be NULL)
754 * @action: action about to be performed
755 *
756 * Called just before performing EH actions to clear related bits
757 * in @ap->eh_info such that eh actions are not unnecessarily
758 * repeated.
759 *
760 * LOCKING:
761 * None.
762 */
763static void ata_eh_about_to_do(struct ata_port *ap, struct ata_device *dev,
764 unsigned int action)
765{
766 unsigned long flags;
767 struct ata_eh_info *ehi = &ap->eh_info;
768 struct ata_eh_context *ehc = &ap->eh_context;
769
770 spin_lock_irqsave(ap->lock, flags);
771
772 /* Reset is represented by combination of actions and EHI
773 * flags. Suck in all related bits before clearing eh_info to
774 * avoid losing requested action.
775 */
776 if (action & ATA_EH_RESET_MASK) {
777 ehc->i.action |= ehi->action & ATA_EH_RESET_MASK;
778 ehc->i.flags |= ehi->flags & ATA_EHI_RESET_MODIFIER_MASK;
779
780 /* make sure all reset actions are cleared & clear EHI flags */
781 action |= ATA_EH_RESET_MASK;
782 ehi->flags &= ~ATA_EHI_RESET_MODIFIER_MASK;
783 }
784
785 ata_eh_clear_action(dev, ehi, action);
786
787 if (!(ehc->i.flags & ATA_EHI_QUIET))
788 ap->pflags |= ATA_PFLAG_RECOVERED;
789
790 spin_unlock_irqrestore(ap->lock, flags);
791}
792
793/**
794 * ata_eh_done - EH action complete
795 * @ap: target ATA port
796 * @dev: target ATA dev for per-dev action (can be NULL)
797 * @action: action just completed
798 *
799 * Called right after performing EH actions to clear related bits
800 * in @ap->eh_context.
801 *
802 * LOCKING:
803 * None.
804 */
805static void ata_eh_done(struct ata_port *ap, struct ata_device *dev,
806 unsigned int action)
807{
808 /* if reset is complete, clear all reset actions & reset modifier */
809 if (action & ATA_EH_RESET_MASK) {
810 action |= ATA_EH_RESET_MASK;
811 ap->eh_context.i.flags &= ~ATA_EHI_RESET_MODIFIER_MASK;
812 }
813
814 ata_eh_clear_action(dev, &ap->eh_context.i, action);
815}
816
817/**
818 * ata_err_string - convert err_mask to descriptive string
819 * @err_mask: error mask to convert to string
820 *
821 * Convert @err_mask to descriptive string. Errors are
822 * prioritized according to severity and only the most severe
823 * error is reported.
824 *
825 * LOCKING:
826 * None.
827 *
828 * RETURNS:
829 * Descriptive string for @err_mask
830 */
831static const char * ata_err_string(unsigned int err_mask)
832{
833 if (err_mask & AC_ERR_HOST_BUS)
834 return "host bus error";
835 if (err_mask & AC_ERR_ATA_BUS)
836 return "ATA bus error";
837 if (err_mask & AC_ERR_TIMEOUT)
838 return "timeout";
839 if (err_mask & AC_ERR_HSM)
840 return "HSM violation";
841 if (err_mask & AC_ERR_SYSTEM)
842 return "internal error";
843 if (err_mask & AC_ERR_MEDIA)
844 return "media error";
845 if (err_mask & AC_ERR_INVALID)
846 return "invalid argument";
847 if (err_mask & AC_ERR_DEV)
848 return "device error";
849 return "unknown error";
850}
851
852/**
853 * ata_read_log_page - read a specific log page
854 * @dev: target device
855 * @page: page to read
856 * @buf: buffer to store read page
857 * @sectors: number of sectors to read
858 *
859 * Read log page using READ_LOG_EXT command.
860 *
861 * LOCKING:
862 * Kernel thread context (may sleep).
863 *
864 * RETURNS:
865 * 0 on success, AC_ERR_* mask otherwise.
866 */
867static unsigned int ata_read_log_page(struct ata_device *dev,
868 u8 page, void *buf, unsigned int sectors)
869{
870 struct ata_taskfile tf;
871 unsigned int err_mask;
872
873 DPRINTK("read log page - page %d\n", page);
874
875 ata_tf_init(dev, &tf);
876 tf.command = ATA_CMD_READ_LOG_EXT;
877 tf.lbal = page;
878 tf.nsect = sectors;
879 tf.hob_nsect = sectors >> 8;
880 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
881 tf.protocol = ATA_PROT_PIO;
882
883 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
884 buf, sectors * ATA_SECT_SIZE);
885
886 DPRINTK("EXIT, err_mask=%x\n", err_mask);
887 return err_mask;
888}
889
890/**
891 * ata_eh_read_log_10h - Read log page 10h for NCQ error details
892 * @dev: Device to read log page 10h from
893 * @tag: Resulting tag of the failed command
894 * @tf: Resulting taskfile registers of the failed command
895 *
896 * Read log page 10h to obtain NCQ error details and clear error
897 * condition.
898 *
899 * LOCKING:
900 * Kernel thread context (may sleep).
901 *
902 * RETURNS:
903 * 0 on success, -errno otherwise.
904 */
905static int ata_eh_read_log_10h(struct ata_device *dev,
906 int *tag, struct ata_taskfile *tf)
907{
908 u8 *buf = dev->ap->sector_buf;
909 unsigned int err_mask;
910 u8 csum;
911 int i;
912
913 err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, buf, 1);
914 if (err_mask)
915 return -EIO;
916
917 csum = 0;
918 for (i = 0; i < ATA_SECT_SIZE; i++)
919 csum += buf[i];
920 if (csum)
921 ata_dev_printk(dev, KERN_WARNING,
922 "invalid checksum 0x%x on log page 10h\n", csum);
923
924 if (buf[0] & 0x80)
925 return -ENOENT;
926
927 *tag = buf[0] & 0x1f;
928
929 tf->command = buf[2];
930 tf->feature = buf[3];
931 tf->lbal = buf[4];
932 tf->lbam = buf[5];
933 tf->lbah = buf[6];
934 tf->device = buf[7];
935 tf->hob_lbal = buf[8];
936 tf->hob_lbam = buf[9];
937 tf->hob_lbah = buf[10];
938 tf->nsect = buf[12];
939 tf->hob_nsect = buf[13];
940
941 return 0;
942}
943
944/**
945 * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
946 * @dev: device to perform REQUEST_SENSE to
947 * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
948 *
949 * Perform ATAPI REQUEST_SENSE after the device reported CHECK
950 * SENSE. This function is EH helper.
951 *
952 * LOCKING:
953 * Kernel thread context (may sleep).
954 *
955 * RETURNS:
956 * 0 on success, AC_ERR_* mask on failure
957 */
958static unsigned int atapi_eh_request_sense(struct ata_device *dev,
959 unsigned char *sense_buf)
960{
961 struct ata_port *ap = dev->ap;
962 struct ata_taskfile tf;
963 u8 cdb[ATAPI_CDB_LEN];
964
965 DPRINTK("ATAPI request sense\n");
966
967 ata_tf_init(dev, &tf);
968
969 /* FIXME: is this needed? */
970 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
971
972 /* XXX: why tf_read here? */
973 ap->ops->tf_read(ap, &tf);
974
975 /* fill these in, for the case where they are -not- overwritten */
976 sense_buf[0] = 0x70;
977 sense_buf[2] = tf.feature >> 4;
978
979 memset(cdb, 0, ATAPI_CDB_LEN);
980 cdb[0] = REQUEST_SENSE;
981 cdb[4] = SCSI_SENSE_BUFFERSIZE;
982
983 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
984 tf.command = ATA_CMD_PACKET;
985
986 /* is it pointless to prefer PIO for "safety reasons"? */
987 if (ap->flags & ATA_FLAG_PIO_DMA) {
988 tf.protocol = ATA_PROT_ATAPI_DMA;
989 tf.feature |= ATAPI_PKT_DMA;
990 } else {
991 tf.protocol = ATA_PROT_ATAPI;
992 tf.lbam = (8 * 1024) & 0xff;
993 tf.lbah = (8 * 1024) >> 8;
994 }
995
996 return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
997 sense_buf, SCSI_SENSE_BUFFERSIZE);
998}
999
1000/**
1001 * ata_eh_analyze_serror - analyze SError for a failed port
1002 * @ap: ATA port to analyze SError for
1003 *
1004 * Analyze SError if available and further determine cause of
1005 * failure.
1006 *
1007 * LOCKING:
1008 * None.
1009 */
1010static void ata_eh_analyze_serror(struct ata_port *ap)
1011{
1012 struct ata_eh_context *ehc = &ap->eh_context;
1013 u32 serror = ehc->i.serror;
1014 unsigned int err_mask = 0, action = 0;
1015
1016 if (serror & SERR_PERSISTENT) {
1017 err_mask |= AC_ERR_ATA_BUS;
1018 action |= ATA_EH_HARDRESET;
1019 }
1020 if (serror &
1021 (SERR_DATA_RECOVERED | SERR_COMM_RECOVERED | SERR_DATA)) {
1022 err_mask |= AC_ERR_ATA_BUS;
1023 action |= ATA_EH_SOFTRESET;
1024 }
1025 if (serror & SERR_PROTOCOL) {
1026 err_mask |= AC_ERR_HSM;
1027 action |= ATA_EH_SOFTRESET;
1028 }
1029 if (serror & SERR_INTERNAL) {
1030 err_mask |= AC_ERR_SYSTEM;
1031 action |= ATA_EH_SOFTRESET;
1032 }
1033 if (serror & (SERR_PHYRDY_CHG | SERR_DEV_XCHG))
1034 ata_ehi_hotplugged(&ehc->i);
1035
1036 ehc->i.err_mask |= err_mask;
1037 ehc->i.action |= action;
1038}
1039
1040/**
1041 * ata_eh_analyze_ncq_error - analyze NCQ error
1042 * @ap: ATA port to analyze NCQ error for
1043 *
1044 * Read log page 10h, determine the offending qc and acquire
1045 * error status TF. For NCQ device errors, all LLDDs have to do
1046 * is setting AC_ERR_DEV in ehi->err_mask. This function takes
1047 * care of the rest.
1048 *
1049 * LOCKING:
1050 * Kernel thread context (may sleep).
1051 */
1052static void ata_eh_analyze_ncq_error(struct ata_port *ap)
1053{
1054 struct ata_eh_context *ehc = &ap->eh_context;
1055 struct ata_device *dev = ap->device;
1056 struct ata_queued_cmd *qc;
1057 struct ata_taskfile tf;
1058 int tag, rc;
1059
1060 /* if frozen, we can't do much */
1061 if (ap->pflags & ATA_PFLAG_FROZEN)
1062 return;
1063
1064 /* is it NCQ device error? */
1065 if (!ap->sactive || !(ehc->i.err_mask & AC_ERR_DEV))
1066 return;
1067
1068 /* has LLDD analyzed already? */
1069 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1070 qc = __ata_qc_from_tag(ap, tag);
1071
1072 if (!(qc->flags & ATA_QCFLAG_FAILED))
1073 continue;
1074
1075 if (qc->err_mask)
1076 return;
1077 }
1078
1079 /* okay, this error is ours */
1080 rc = ata_eh_read_log_10h(dev, &tag, &tf);
1081 if (rc) {
1082 ata_port_printk(ap, KERN_ERR, "failed to read log page 10h "
1083 "(errno=%d)\n", rc);
1084 return;
1085 }
1086
1087 if (!(ap->sactive & (1 << tag))) {
1088 ata_port_printk(ap, KERN_ERR, "log page 10h reported "
1089 "inactive tag %d\n", tag);
1090 return;
1091 }
1092
1093 /* we've got the perpetrator, condemn it */
1094 qc = __ata_qc_from_tag(ap, tag);
1095 memcpy(&qc->result_tf, &tf, sizeof(tf));
1096 qc->err_mask |= AC_ERR_DEV;
1097 ehc->i.err_mask &= ~AC_ERR_DEV;
1098}
1099
1100/**
1101 * ata_eh_analyze_tf - analyze taskfile of a failed qc
1102 * @qc: qc to analyze
1103 * @tf: Taskfile registers to analyze
1104 *
1105 * Analyze taskfile of @qc and further determine cause of
1106 * failure. This function also requests ATAPI sense data if
1107 * avaliable.
1108 *
1109 * LOCKING:
1110 * Kernel thread context (may sleep).
1111 *
1112 * RETURNS:
1113 * Determined recovery action
1114 */
1115static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
1116 const struct ata_taskfile *tf)
1117{
1118 unsigned int tmp, action = 0;
1119 u8 stat = tf->command, err = tf->feature;
1120
1121 if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) {
1122 qc->err_mask |= AC_ERR_HSM;
1123 return ATA_EH_SOFTRESET;
1124 }
1125
1126 if (!(qc->err_mask & AC_ERR_DEV))
1127 return 0;
1128
1129 switch (qc->dev->class) {
1130 case ATA_DEV_ATA:
1131 if (err & ATA_ICRC)
1132 qc->err_mask |= AC_ERR_ATA_BUS;
1133 if (err & ATA_UNC)
1134 qc->err_mask |= AC_ERR_MEDIA;
1135 if (err & ATA_IDNF)
1136 qc->err_mask |= AC_ERR_INVALID;
1137 break;
1138
1139 case ATA_DEV_ATAPI:
1140 tmp = atapi_eh_request_sense(qc->dev,
1141 qc->scsicmd->sense_buffer);
1142 if (!tmp) {
1143 /* ATA_QCFLAG_SENSE_VALID is used to tell
1144 * atapi_qc_complete() that sense data is
1145 * already valid.
1146 *
1147 * TODO: interpret sense data and set
1148 * appropriate err_mask.
1149 */
1150 qc->flags |= ATA_QCFLAG_SENSE_VALID;
1151 } else
1152 qc->err_mask |= tmp;
1153 }
1154
1155 if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS))
1156 action |= ATA_EH_SOFTRESET;
1157
1158 return action;
1159}
1160
1161static int ata_eh_categorize_ering_entry(struct ata_ering_entry *ent)
1162{
1163 if (ent->err_mask & (AC_ERR_ATA_BUS | AC_ERR_TIMEOUT))
1164 return 1;
1165
1166 if (ent->is_io) {
1167 if (ent->err_mask & AC_ERR_HSM)
1168 return 1;
1169 if ((ent->err_mask &
1170 (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV)
1171 return 2;
1172 }
1173
1174 return 0;
1175}
1176
1177struct speed_down_needed_arg {
1178 u64 since;
1179 int nr_errors[3];
1180};
1181
1182static int speed_down_needed_cb(struct ata_ering_entry *ent, void *void_arg)
1183{
1184 struct speed_down_needed_arg *arg = void_arg;
1185
1186 if (ent->timestamp < arg->since)
1187 return -1;
1188
1189 arg->nr_errors[ata_eh_categorize_ering_entry(ent)]++;
1190 return 0;
1191}
1192
1193/**
1194 * ata_eh_speed_down_needed - Determine wheter speed down is necessary
1195 * @dev: Device of interest
1196 *
1197 * This function examines error ring of @dev and determines
1198 * whether speed down is necessary. Speed down is necessary if
1199 * there have been more than 3 of Cat-1 errors or 10 of Cat-2
1200 * errors during last 15 minutes.
1201 *
1202 * Cat-1 errors are ATA_BUS, TIMEOUT for any command and HSM
1203 * violation for known supported commands.
1204 *
1205 * Cat-2 errors are unclassified DEV error for known supported
1206 * command.
1207 *
1208 * LOCKING:
1209 * Inherited from caller.
1210 *
1211 * RETURNS:
1212 * 1 if speed down is necessary, 0 otherwise
1213 */
1214static int ata_eh_speed_down_needed(struct ata_device *dev)
1215{
1216 const u64 interval = 15LLU * 60 * HZ;
1217 static const int err_limits[3] = { -1, 3, 10 };
1218 struct speed_down_needed_arg arg;
1219 struct ata_ering_entry *ent;
1220 int err_cat;
1221 u64 j64;
1222
1223 ent = ata_ering_top(&dev->ering);
1224 if (!ent)
1225 return 0;
1226
1227 err_cat = ata_eh_categorize_ering_entry(ent);
1228 if (err_cat == 0)
1229 return 0;
1230
1231 memset(&arg, 0, sizeof(arg));
1232
1233 j64 = get_jiffies_64();
1234 if (j64 >= interval)
1235 arg.since = j64 - interval;
1236 else
1237 arg.since = 0;
1238
1239 ata_ering_map(&dev->ering, speed_down_needed_cb, &arg);
1240
1241 return arg.nr_errors[err_cat] > err_limits[err_cat];
1242}
1243
1244/**
1245 * ata_eh_speed_down - record error and speed down if necessary
1246 * @dev: Failed device
1247 * @is_io: Did the device fail during normal IO?
1248 * @err_mask: err_mask of the error
1249 *
1250 * Record error and examine error history to determine whether
1251 * adjusting transmission speed is necessary. It also sets
1252 * transmission limits appropriately if such adjustment is
1253 * necessary.
1254 *
1255 * LOCKING:
1256 * Kernel thread context (may sleep).
1257 *
1258 * RETURNS:
1259 * 0 on success, -errno otherwise
1260 */
1261static int ata_eh_speed_down(struct ata_device *dev, int is_io,
1262 unsigned int err_mask)
1263{
1264 if (!err_mask)
1265 return 0;
1266
1267 /* record error and determine whether speed down is necessary */
1268 ata_ering_record(&dev->ering, is_io, err_mask);
1269
1270 if (!ata_eh_speed_down_needed(dev))
1271 return 0;
1272
1273 /* speed down SATA link speed if possible */
1274 if (sata_down_spd_limit(dev->ap) == 0)
1275 return ATA_EH_HARDRESET;
1276
1277 /* lower transfer mode */
1278 if (ata_down_xfermask_limit(dev, 0) == 0)
1279 return ATA_EH_SOFTRESET;
1280
1281 ata_dev_printk(dev, KERN_ERR,
1282 "speed down requested but no transfer mode left\n");
1283 return 0;
1284}
1285
1286/**
1287 * ata_eh_autopsy - analyze error and determine recovery action
1288 * @ap: ATA port to perform autopsy on
1289 *
1290 * Analyze why @ap failed and determine which recovery action is
1291 * needed. This function also sets more detailed AC_ERR_* values
1292 * and fills sense data for ATAPI CHECK SENSE.
1293 *
1294 * LOCKING:
1295 * Kernel thread context (may sleep).
1296 */
1297static void ata_eh_autopsy(struct ata_port *ap)
1298{
1299 struct ata_eh_context *ehc = &ap->eh_context;
1300 unsigned int all_err_mask = 0;
1301 int tag, is_io = 0;
1302 u32 serror;
1303 int rc;
1304
1305 DPRINTK("ENTER\n");
1306
1307 if (ehc->i.flags & ATA_EHI_NO_AUTOPSY)
1308 return;
1309
1310 /* obtain and analyze SError */
1311 rc = sata_scr_read(ap, SCR_ERROR, &serror);
1312 if (rc == 0) {
1313 ehc->i.serror |= serror;
1314 ata_eh_analyze_serror(ap);
1315 } else if (rc != -EOPNOTSUPP)
1316 ehc->i.action |= ATA_EH_HARDRESET;
1317
1318 /* analyze NCQ failure */
1319 ata_eh_analyze_ncq_error(ap);
1320
1321 /* any real error trumps AC_ERR_OTHER */
1322 if (ehc->i.err_mask & ~AC_ERR_OTHER)
1323 ehc->i.err_mask &= ~AC_ERR_OTHER;
1324
1325 all_err_mask |= ehc->i.err_mask;
1326
1327 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1328 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1329
1330 if (!(qc->flags & ATA_QCFLAG_FAILED))
1331 continue;
1332
1333 /* inherit upper level err_mask */
1334 qc->err_mask |= ehc->i.err_mask;
1335
1336 /* analyze TF */
1337 ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf);
1338
1339 /* DEV errors are probably spurious in case of ATA_BUS error */
1340 if (qc->err_mask & AC_ERR_ATA_BUS)
1341 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA |
1342 AC_ERR_INVALID);
1343
1344 /* any real error trumps unknown error */
1345 if (qc->err_mask & ~AC_ERR_OTHER)
1346 qc->err_mask &= ~AC_ERR_OTHER;
1347
1348 /* SENSE_VALID trumps dev/unknown error and revalidation */
1349 if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
1350 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
1351 ehc->i.action &= ~ATA_EH_REVALIDATE;
1352 }
1353
1354 /* accumulate error info */
1355 ehc->i.dev = qc->dev;
1356 all_err_mask |= qc->err_mask;
1357 if (qc->flags & ATA_QCFLAG_IO)
1358 is_io = 1;
1359 }
1360
1361 /* enforce default EH actions */
1362 if (ap->pflags & ATA_PFLAG_FROZEN ||
1363 all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT))
1364 ehc->i.action |= ATA_EH_SOFTRESET;
1365 else if (all_err_mask)
1366 ehc->i.action |= ATA_EH_REVALIDATE;
1367
1368 /* if we have offending qcs and the associated failed device */
1369 if (ehc->i.dev) {
1370 /* speed down */
1371 ehc->i.action |= ata_eh_speed_down(ehc->i.dev, is_io,
1372 all_err_mask);
1373
1374 /* perform per-dev EH action only on the offending device */
1375 ehc->i.dev_action[ehc->i.dev->devno] |=
1376 ehc->i.action & ATA_EH_PERDEV_MASK;
1377 ehc->i.action &= ~ATA_EH_PERDEV_MASK;
1378 }
1379
1380 DPRINTK("EXIT\n");
1381}
1382
1383/**
1384 * ata_eh_report - report error handling to user
1385 * @ap: ATA port EH is going on
1386 *
1387 * Report EH to user.
1388 *
1389 * LOCKING:
1390 * None.
1391 */
1392static void ata_eh_report(struct ata_port *ap)
1393{
1394 struct ata_eh_context *ehc = &ap->eh_context;
1395 const char *frozen, *desc;
1396 int tag, nr_failed = 0;
1397
1398 desc = NULL;
1399 if (ehc->i.desc[0] != '\0')
1400 desc = ehc->i.desc;
1401
1402 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1403 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1404
1405 if (!(qc->flags & ATA_QCFLAG_FAILED))
1406 continue;
1407 if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask)
1408 continue;
1409
1410 nr_failed++;
1411 }
1412
1413 if (!nr_failed && !ehc->i.err_mask)
1414 return;
1415
1416 frozen = "";
1417 if (ap->pflags & ATA_PFLAG_FROZEN)
1418 frozen = " frozen";
1419
1420 if (ehc->i.dev) {
1421 ata_dev_printk(ehc->i.dev, KERN_ERR, "exception Emask 0x%x "
1422 "SAct 0x%x SErr 0x%x action 0x%x%s\n",
1423 ehc->i.err_mask, ap->sactive, ehc->i.serror,
1424 ehc->i.action, frozen);
1425 if (desc)
1426 ata_dev_printk(ehc->i.dev, KERN_ERR, "(%s)\n", desc);
1427 } else {
1428 ata_port_printk(ap, KERN_ERR, "exception Emask 0x%x "
1429 "SAct 0x%x SErr 0x%x action 0x%x%s\n",
1430 ehc->i.err_mask, ap->sactive, ehc->i.serror,
1431 ehc->i.action, frozen);
1432 if (desc)
1433 ata_port_printk(ap, KERN_ERR, "(%s)\n", desc);
1434 }
1435
1436 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1437 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1438
1439 if (!(qc->flags & ATA_QCFLAG_FAILED) || !qc->err_mask)
1440 continue;
1441
1442 ata_dev_printk(qc->dev, KERN_ERR, "tag %d cmd 0x%x "
1443 "Emask 0x%x stat 0x%x err 0x%x (%s)\n",
1444 qc->tag, qc->tf.command, qc->err_mask,
1445 qc->result_tf.command, qc->result_tf.feature,
1446 ata_err_string(qc->err_mask));
1447 }
1448}
1449
1450static int ata_do_reset(struct ata_port *ap, ata_reset_fn_t reset,
1451 unsigned int *classes)
1452{
1453 int i, rc;
1454
1455 for (i = 0; i < ATA_MAX_DEVICES; i++)
1456 classes[i] = ATA_DEV_UNKNOWN;
1457
1458 rc = reset(ap, classes);
1459 if (rc)
1460 return rc;
1461
1462 /* If any class isn't ATA_DEV_UNKNOWN, consider classification
1463 * is complete and convert all ATA_DEV_UNKNOWN to
1464 * ATA_DEV_NONE.
1465 */
1466 for (i = 0; i < ATA_MAX_DEVICES; i++)
1467 if (classes[i] != ATA_DEV_UNKNOWN)
1468 break;
1469
1470 if (i < ATA_MAX_DEVICES)
1471 for (i = 0; i < ATA_MAX_DEVICES; i++)
1472 if (classes[i] == ATA_DEV_UNKNOWN)
1473 classes[i] = ATA_DEV_NONE;
1474
1475 return 0;
1476}
1477
1478static int ata_eh_followup_srst_needed(int rc, int classify,
1479 const unsigned int *classes)
1480{
1481 if (rc == -EAGAIN)
1482 return 1;
1483 if (rc != 0)
1484 return 0;
1485 if (classify && classes[0] == ATA_DEV_UNKNOWN)
1486 return 1;
1487 return 0;
1488}
1489
1490static int ata_eh_reset(struct ata_port *ap, int classify,
1491 ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
1492 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
1493{
1494 struct ata_eh_context *ehc = &ap->eh_context;
1495 unsigned int *classes = ehc->classes;
1496 int tries = ATA_EH_RESET_TRIES;
1497 int verbose = !(ehc->i.flags & ATA_EHI_QUIET);
1498 unsigned int action;
1499 ata_reset_fn_t reset;
1500 int i, did_followup_srst, rc;
1501
1502 /* about to reset */
1503 ata_eh_about_to_do(ap, NULL, ehc->i.action & ATA_EH_RESET_MASK);
1504
1505 /* Determine which reset to use and record in ehc->i.action.
1506 * prereset() may examine and modify it.
1507 */
1508 action = ehc->i.action;
1509 ehc->i.action &= ~ATA_EH_RESET_MASK;
1510 if (softreset && (!hardreset || (!sata_set_spd_needed(ap) &&
1511 !(action & ATA_EH_HARDRESET))))
1512 ehc->i.action |= ATA_EH_SOFTRESET;
1513 else
1514 ehc->i.action |= ATA_EH_HARDRESET;
1515
1516 if (prereset) {
1517 rc = prereset(ap);
1518 if (rc) {
1519 ata_port_printk(ap, KERN_ERR,
1520 "prereset failed (errno=%d)\n", rc);
1521 return rc;
1522 }
1523 }
1524
1525 /* prereset() might have modified ehc->i.action */
1526 if (ehc->i.action & ATA_EH_HARDRESET)
1527 reset = hardreset;
1528 else if (ehc->i.action & ATA_EH_SOFTRESET)
1529 reset = softreset;
1530 else {
1531 /* prereset told us not to reset, bang classes and return */
1532 for (i = 0; i < ATA_MAX_DEVICES; i++)
1533 classes[i] = ATA_DEV_NONE;
1534 return 0;
1535 }
1536
1537 /* did prereset() screw up? if so, fix up to avoid oopsing */
1538 if (!reset) {
1539 ata_port_printk(ap, KERN_ERR, "BUG: prereset() requested "
1540 "invalid reset type\n");
1541 if (softreset)
1542 reset = softreset;
1543 else
1544 reset = hardreset;
1545 }
1546
1547 retry:
1548 /* shut up during boot probing */
1549 if (verbose)
1550 ata_port_printk(ap, KERN_INFO, "%s resetting port\n",
1551 reset == softreset ? "soft" : "hard");
1552
1553 /* mark that this EH session started with reset */
1554 ehc->i.flags |= ATA_EHI_DID_RESET;
1555
1556 rc = ata_do_reset(ap, reset, classes);
1557
1558 did_followup_srst = 0;
1559 if (reset == hardreset &&
1560 ata_eh_followup_srst_needed(rc, classify, classes)) {
1561 /* okay, let's do follow-up softreset */
1562 did_followup_srst = 1;
1563 reset = softreset;
1564
1565 if (!reset) {
1566 ata_port_printk(ap, KERN_ERR,
1567 "follow-up softreset required "
1568 "but no softreset avaliable\n");
1569 return -EINVAL;
1570 }
1571
1572 ata_eh_about_to_do(ap, NULL, ATA_EH_RESET_MASK);
1573 rc = ata_do_reset(ap, reset, classes);
1574
1575 if (rc == 0 && classify &&
1576 classes[0] == ATA_DEV_UNKNOWN) {
1577 ata_port_printk(ap, KERN_ERR,
1578 "classification failed\n");
1579 return -EINVAL;
1580 }
1581 }
1582
1583 if (rc && --tries) {
1584 const char *type;
1585
1586 if (reset == softreset) {
1587 if (did_followup_srst)
1588 type = "follow-up soft";
1589 else
1590 type = "soft";
1591 } else
1592 type = "hard";
1593
1594 ata_port_printk(ap, KERN_WARNING,
1595 "%sreset failed, retrying in 5 secs\n", type);
1596 ssleep(5);
1597
1598 if (reset == hardreset)
1599 sata_down_spd_limit(ap);
1600 if (hardreset)
1601 reset = hardreset;
1602 goto retry;
1603 }
1604
1605 if (rc == 0) {
1606 /* After the reset, the device state is PIO 0 and the
1607 * controller state is undefined. Record the mode.
1608 */
1609 for (i = 0; i < ATA_MAX_DEVICES; i++)
1610 ap->device[i].pio_mode = XFER_PIO_0;
1611
1612 if (postreset)
1613 postreset(ap, classes);
1614
1615 /* reset successful, schedule revalidation */
1616 ata_eh_done(ap, NULL, ehc->i.action & ATA_EH_RESET_MASK);
1617 ehc->i.action |= ATA_EH_REVALIDATE;
1618 }
1619
1620 return rc;
1621}
1622
1623static int ata_eh_revalidate_and_attach(struct ata_port *ap,
1624 struct ata_device **r_failed_dev)
1625{
1626 struct ata_eh_context *ehc = &ap->eh_context;
1627 struct ata_device *dev;
1628 unsigned long flags;
1629 int i, rc = 0;
1630
1631 DPRINTK("ENTER\n");
1632
1633 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1634 unsigned int action;
1635
1636 dev = &ap->device[i];
1637 action = ata_eh_dev_action(dev);
1638
1639 if (action & ATA_EH_REVALIDATE && ata_dev_ready(dev)) {
1640 if (ata_port_offline(ap)) {
1641 rc = -EIO;
1642 break;
1643 }
1644
1645 ata_eh_about_to_do(ap, dev, ATA_EH_REVALIDATE);
1646 rc = ata_dev_revalidate(dev,
1647 ehc->i.flags & ATA_EHI_DID_RESET);
1648 if (rc)
1649 break;
1650
1651 ata_eh_done(ap, dev, ATA_EH_REVALIDATE);
1652
1653 /* schedule the scsi_rescan_device() here */
1654 queue_work(ata_aux_wq, &(ap->scsi_rescan_task));
1655 } else if (dev->class == ATA_DEV_UNKNOWN &&
1656 ehc->tries[dev->devno] &&
1657 ata_class_enabled(ehc->classes[dev->devno])) {
1658 dev->class = ehc->classes[dev->devno];
1659
1660 rc = ata_dev_read_id(dev, &dev->class, 1, dev->id);
1661 if (rc == 0)
1662 rc = ata_dev_configure(dev, 1);
1663
1664 if (rc) {
1665 dev->class = ATA_DEV_UNKNOWN;
1666 break;
1667 }
1668
1669 spin_lock_irqsave(ap->lock, flags);
1670 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
1671 spin_unlock_irqrestore(ap->lock, flags);
1672 }
1673 }
1674
1675 if (rc)
1676 *r_failed_dev = dev;
1677
1678 DPRINTK("EXIT\n");
1679 return rc;
1680}
1681
1682/**
1683 * ata_eh_suspend - handle suspend EH action
1684 * @ap: target host port
1685 * @r_failed_dev: result parameter to indicate failing device
1686 *
1687 * Handle suspend EH action. Disk devices are spinned down and
1688 * other types of devices are just marked suspended. Once
1689 * suspended, no EH action to the device is allowed until it is
1690 * resumed.
1691 *
1692 * LOCKING:
1693 * Kernel thread context (may sleep).
1694 *
1695 * RETURNS:
1696 * 0 on success, -errno otherwise
1697 */
1698static int ata_eh_suspend(struct ata_port *ap, struct ata_device **r_failed_dev)
1699{
1700 struct ata_device *dev;
1701 int i, rc = 0;
1702
1703 DPRINTK("ENTER\n");
1704
1705 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1706 unsigned long flags;
1707 unsigned int action, err_mask;
1708
1709 dev = &ap->device[i];
1710 action = ata_eh_dev_action(dev);
1711
1712 if (!ata_dev_enabled(dev) || !(action & ATA_EH_SUSPEND))
1713 continue;
1714
1715 WARN_ON(dev->flags & ATA_DFLAG_SUSPENDED);
1716
1717 ata_eh_about_to_do(ap, dev, ATA_EH_SUSPEND);
1718
1719 if (dev->class == ATA_DEV_ATA && !(action & ATA_EH_PM_FREEZE)) {
1720 /* flush cache */
1721 rc = ata_flush_cache(dev);
1722 if (rc)
1723 break;
1724
1725 /* spin down */
1726 err_mask = ata_do_simple_cmd(dev, ATA_CMD_STANDBYNOW1);
1727 if (err_mask) {
1728 ata_dev_printk(dev, KERN_ERR, "failed to "
1729 "spin down (err_mask=0x%x)\n",
1730 err_mask);
1731 rc = -EIO;
1732 break;
1733 }
1734 }
1735
1736 spin_lock_irqsave(ap->lock, flags);
1737 dev->flags |= ATA_DFLAG_SUSPENDED;
1738 spin_unlock_irqrestore(ap->lock, flags);
1739
1740 ata_eh_done(ap, dev, ATA_EH_SUSPEND);
1741 }
1742
1743 if (rc)
1744 *r_failed_dev = dev;
1745
1746 DPRINTK("EXIT\n");
1747 return 0;
1748}
1749
1750/**
1751 * ata_eh_prep_resume - prep for resume EH action
1752 * @ap: target host port
1753 *
1754 * Clear SUSPENDED in preparation for scheduled resume actions.
1755 * This allows other parts of EH to access the devices being
1756 * resumed.
1757 *
1758 * LOCKING:
1759 * Kernel thread context (may sleep).
1760 */
1761static void ata_eh_prep_resume(struct ata_port *ap)
1762{
1763 struct ata_device *dev;
1764 unsigned long flags;
1765 int i;
1766
1767 DPRINTK("ENTER\n");
1768
1769 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1770 unsigned int action;
1771
1772 dev = &ap->device[i];
1773 action = ata_eh_dev_action(dev);
1774
1775 if (!ata_dev_enabled(dev) || !(action & ATA_EH_RESUME))
1776 continue;
1777
1778 spin_lock_irqsave(ap->lock, flags);
1779 dev->flags &= ~ATA_DFLAG_SUSPENDED;
1780 spin_unlock_irqrestore(ap->lock, flags);
1781 }
1782
1783 DPRINTK("EXIT\n");
1784}
1785
1786/**
1787 * ata_eh_resume - handle resume EH action
1788 * @ap: target host port
1789 * @r_failed_dev: result parameter to indicate failing device
1790 *
1791 * Handle resume EH action. Target devices are already reset and
1792 * revalidated. Spinning up is the only operation left.
1793 *
1794 * LOCKING:
1795 * Kernel thread context (may sleep).
1796 *
1797 * RETURNS:
1798 * 0 on success, -errno otherwise
1799 */
1800static int ata_eh_resume(struct ata_port *ap, struct ata_device **r_failed_dev)
1801{
1802 struct ata_device *dev;
1803 int i, rc = 0;
1804
1805 DPRINTK("ENTER\n");
1806
1807 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1808 unsigned int action, err_mask;
1809
1810 dev = &ap->device[i];
1811 action = ata_eh_dev_action(dev);
1812
1813 if (!ata_dev_enabled(dev) || !(action & ATA_EH_RESUME))
1814 continue;
1815
1816 ata_eh_about_to_do(ap, dev, ATA_EH_RESUME);
1817
1818 if (dev->class == ATA_DEV_ATA && !(action & ATA_EH_PM_FREEZE)) {
1819 err_mask = ata_do_simple_cmd(dev,
1820 ATA_CMD_IDLEIMMEDIATE);
1821 if (err_mask) {
1822 ata_dev_printk(dev, KERN_ERR, "failed to "
1823 "spin up (err_mask=0x%x)\n",
1824 err_mask);
1825 rc = -EIO;
1826 break;
1827 }
1828 }
1829
1830 ata_eh_done(ap, dev, ATA_EH_RESUME);
1831 }
1832
1833 if (rc)
1834 *r_failed_dev = dev;
1835
1836 DPRINTK("EXIT\n");
1837 return 0;
1838}
1839
1840static int ata_port_nr_enabled(struct ata_port *ap)
1841{
1842 int i, cnt = 0;
1843
1844 for (i = 0; i < ATA_MAX_DEVICES; i++)
1845 if (ata_dev_enabled(&ap->device[i]))
1846 cnt++;
1847 return cnt;
1848}
1849
1850static int ata_port_nr_vacant(struct ata_port *ap)
1851{
1852 int i, cnt = 0;
1853
1854 for (i = 0; i < ATA_MAX_DEVICES; i++)
1855 if (ap->device[i].class == ATA_DEV_UNKNOWN)
1856 cnt++;
1857 return cnt;
1858}
1859
1860static int ata_eh_skip_recovery(struct ata_port *ap)
1861{
1862 struct ata_eh_context *ehc = &ap->eh_context;
1863 int i;
1864
1865 /* skip if all possible devices are suspended */
1866 for (i = 0; i < ata_port_max_devices(ap); i++) {
1867 struct ata_device *dev = &ap->device[i];
1868
1869 if (!(dev->flags & ATA_DFLAG_SUSPENDED))
1870 break;
1871 }
1872
1873 if (i == ata_port_max_devices(ap))
1874 return 1;
1875
1876 /* thaw frozen port, resume link and recover failed devices */
1877 if ((ap->pflags & ATA_PFLAG_FROZEN) ||
1878 (ehc->i.flags & ATA_EHI_RESUME_LINK) || ata_port_nr_enabled(ap))
1879 return 0;
1880
1881 /* skip if class codes for all vacant slots are ATA_DEV_NONE */
1882 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1883 struct ata_device *dev = &ap->device[i];
1884
1885 if (dev->class == ATA_DEV_UNKNOWN &&
1886 ehc->classes[dev->devno] != ATA_DEV_NONE)
1887 return 0;
1888 }
1889
1890 return 1;
1891}
1892
1893/**
1894 * ata_eh_recover - recover host port after error
1895 * @ap: host port to recover
1896 * @prereset: prereset method (can be NULL)
1897 * @softreset: softreset method (can be NULL)
1898 * @hardreset: hardreset method (can be NULL)
1899 * @postreset: postreset method (can be NULL)
1900 *
1901 * This is the alpha and omega, eum and yang, heart and soul of
1902 * libata exception handling. On entry, actions required to
1903 * recover the port and hotplug requests are recorded in
1904 * eh_context. This function executes all the operations with
1905 * appropriate retrials and fallbacks to resurrect failed
1906 * devices, detach goners and greet newcomers.
1907 *
1908 * LOCKING:
1909 * Kernel thread context (may sleep).
1910 *
1911 * RETURNS:
1912 * 0 on success, -errno on failure.
1913 */
1914static int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
1915 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
1916 ata_postreset_fn_t postreset)
1917{
1918 struct ata_eh_context *ehc = &ap->eh_context;
1919 struct ata_device *dev;
1920 int down_xfermask, i, rc;
1921
1922 DPRINTK("ENTER\n");
1923
1924 /* prep for recovery */
1925 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1926 dev = &ap->device[i];
1927
1928 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
1929
1930 /* process hotplug request */
1931 if (dev->flags & ATA_DFLAG_DETACH)
1932 ata_eh_detach_dev(dev);
1933
1934 if (!ata_dev_enabled(dev) &&
1935 ((ehc->i.probe_mask & (1 << dev->devno)) &&
1936 !(ehc->did_probe_mask & (1 << dev->devno)))) {
1937 ata_eh_detach_dev(dev);
1938 ata_dev_init(dev);
1939 ehc->did_probe_mask |= (1 << dev->devno);
1940 ehc->i.action |= ATA_EH_SOFTRESET;
1941 }
1942 }
1943
1944 retry:
1945 down_xfermask = 0;
1946 rc = 0;
1947
1948 /* if UNLOADING, finish immediately */
1949 if (ap->pflags & ATA_PFLAG_UNLOADING)
1950 goto out;
1951
1952 /* prep for resume */
1953 ata_eh_prep_resume(ap);
1954
1955 /* skip EH if possible. */
1956 if (ata_eh_skip_recovery(ap))
1957 ehc->i.action = 0;
1958
1959 for (i = 0; i < ATA_MAX_DEVICES; i++)
1960 ehc->classes[i] = ATA_DEV_UNKNOWN;
1961
1962 /* reset */
1963 if (ehc->i.action & ATA_EH_RESET_MASK) {
1964 ata_eh_freeze_port(ap);
1965
1966 rc = ata_eh_reset(ap, ata_port_nr_vacant(ap), prereset,
1967 softreset, hardreset, postreset);
1968 if (rc) {
1969 ata_port_printk(ap, KERN_ERR,
1970 "reset failed, giving up\n");
1971 goto out;
1972 }
1973
1974 ata_eh_thaw_port(ap);
1975 }
1976
1977 /* revalidate existing devices and attach new ones */
1978 rc = ata_eh_revalidate_and_attach(ap, &dev);
1979 if (rc)
1980 goto dev_fail;
1981
1982 /* resume devices */
1983 rc = ata_eh_resume(ap, &dev);
1984 if (rc)
1985 goto dev_fail;
1986
1987 /* configure transfer mode if the port has been reset */
1988 if (ehc->i.flags & ATA_EHI_DID_RESET) {
1989 rc = ata_set_mode(ap, &dev);
1990 if (rc) {
1991 down_xfermask = 1;
1992 goto dev_fail;
1993 }
1994 }
1995
1996 /* suspend devices */
1997 rc = ata_eh_suspend(ap, &dev);
1998 if (rc)
1999 goto dev_fail;
2000
2001 goto out;
2002
2003 dev_fail:
2004 switch (rc) {
2005 case -ENODEV:
2006 /* device missing, schedule probing */
2007 ehc->i.probe_mask |= (1 << dev->devno);
2008 case -EINVAL:
2009 ehc->tries[dev->devno] = 0;
2010 break;
2011 case -EIO:
2012 sata_down_spd_limit(ap);
2013 default:
2014 ehc->tries[dev->devno]--;
2015 if (down_xfermask &&
2016 ata_down_xfermask_limit(dev, ehc->tries[dev->devno] == 1))
2017 ehc->tries[dev->devno] = 0;
2018 }
2019
2020 if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) {
2021 /* disable device if it has used up all its chances */
2022 ata_dev_disable(dev);
2023
2024 /* detach if offline */
2025 if (ata_port_offline(ap))
2026 ata_eh_detach_dev(dev);
2027
2028 /* probe if requested */
2029 if ((ehc->i.probe_mask & (1 << dev->devno)) &&
2030 !(ehc->did_probe_mask & (1 << dev->devno))) {
2031 ata_eh_detach_dev(dev);
2032 ata_dev_init(dev);
2033
2034 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
2035 ehc->did_probe_mask |= (1 << dev->devno);
2036 ehc->i.action |= ATA_EH_SOFTRESET;
2037 }
2038 } else {
2039 /* soft didn't work? be haaaaard */
2040 if (ehc->i.flags & ATA_EHI_DID_RESET)
2041 ehc->i.action |= ATA_EH_HARDRESET;
2042 else
2043 ehc->i.action |= ATA_EH_SOFTRESET;
2044 }
2045
2046 if (ata_port_nr_enabled(ap)) {
2047 ata_port_printk(ap, KERN_WARNING, "failed to recover some "
2048 "devices, retrying in 5 secs\n");
2049 ssleep(5);
2050 } else {
2051 /* no device left, repeat fast */
2052 msleep(500);
2053 }
2054
2055 goto retry;
2056
2057 out:
2058 if (rc) {
2059 for (i = 0; i < ATA_MAX_DEVICES; i++)
2060 ata_dev_disable(&ap->device[i]);
2061 }
2062
2063 DPRINTK("EXIT, rc=%d\n", rc);
2064 return rc;
2065}
2066
2067/**
2068 * ata_eh_finish - finish up EH
2069 * @ap: host port to finish EH for
2070 *
2071 * Recovery is complete. Clean up EH states and retry or finish
2072 * failed qcs.
2073 *
2074 * LOCKING:
2075 * None.
2076 */
2077static void ata_eh_finish(struct ata_port *ap)
2078{
2079 int tag;
2080
2081 /* retry or finish qcs */
2082 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2083 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2084
2085 if (!(qc->flags & ATA_QCFLAG_FAILED))
2086 continue;
2087
2088 if (qc->err_mask) {
2089 /* FIXME: Once EH migration is complete,
2090 * generate sense data in this function,
2091 * considering both err_mask and tf.
2092 */
2093 if (qc->err_mask & AC_ERR_INVALID)
2094 ata_eh_qc_complete(qc);
2095 else
2096 ata_eh_qc_retry(qc);
2097 } else {
2098 if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
2099 ata_eh_qc_complete(qc);
2100 } else {
2101 /* feed zero TF to sense generation */
2102 memset(&qc->result_tf, 0, sizeof(qc->result_tf));
2103 ata_eh_qc_retry(qc);
2104 }
2105 }
2106 }
2107}
2108
2109/**
2110 * ata_do_eh - do standard error handling
2111 * @ap: host port to handle error for
2112 * @prereset: prereset method (can be NULL)
2113 * @softreset: softreset method (can be NULL)
2114 * @hardreset: hardreset method (can be NULL)
2115 * @postreset: postreset method (can be NULL)
2116 *
2117 * Perform standard error handling sequence.
2118 *
2119 * LOCKING:
2120 * Kernel thread context (may sleep).
2121 */
2122void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
2123 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
2124 ata_postreset_fn_t postreset)
2125{
2126 ata_eh_autopsy(ap);
2127 ata_eh_report(ap);
2128 ata_eh_recover(ap, prereset, softreset, hardreset, postreset);
2129 ata_eh_finish(ap);
2130}
2131
2132/**
2133 * ata_eh_handle_port_suspend - perform port suspend operation
2134 * @ap: port to suspend
2135 *
2136 * Suspend @ap.
2137 *
2138 * LOCKING:
2139 * Kernel thread context (may sleep).
2140 */
2141static void ata_eh_handle_port_suspend(struct ata_port *ap)
2142{
2143 unsigned long flags;
2144 int rc = 0;
2145
2146 /* are we suspending? */
2147 spin_lock_irqsave(ap->lock, flags);
2148 if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
2149 ap->pm_mesg.event == PM_EVENT_ON) {
2150 spin_unlock_irqrestore(ap->lock, flags);
2151 return;
2152 }
2153 spin_unlock_irqrestore(ap->lock, flags);
2154
2155 WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED);
2156
2157 /* suspend */
2158 ata_eh_freeze_port(ap);
2159
2160 if (ap->ops->port_suspend)
2161 rc = ap->ops->port_suspend(ap, ap->pm_mesg);
2162
2163 /* report result */
2164 spin_lock_irqsave(ap->lock, flags);
2165
2166 ap->pflags &= ~ATA_PFLAG_PM_PENDING;
2167 if (rc == 0)
2168 ap->pflags |= ATA_PFLAG_SUSPENDED;
2169 else
2170 ata_port_schedule_eh(ap);
2171
2172 if (ap->pm_result) {
2173 *ap->pm_result = rc;
2174 ap->pm_result = NULL;
2175 }
2176
2177 spin_unlock_irqrestore(ap->lock, flags);
2178
2179 return;
2180}
2181
2182/**
2183 * ata_eh_handle_port_resume - perform port resume operation
2184 * @ap: port to resume
2185 *
2186 * Resume @ap.
2187 *
2188 * This function also waits upto one second until all devices
2189 * hanging off this port requests resume EH action. This is to
2190 * prevent invoking EH and thus reset multiple times on resume.
2191 *
2192 * On DPM resume, where some of devices might not be resumed
2193 * together, this may delay port resume upto one second, but such
2194 * DPM resumes are rare and 1 sec delay isn't too bad.
2195 *
2196 * LOCKING:
2197 * Kernel thread context (may sleep).
2198 */
2199static void ata_eh_handle_port_resume(struct ata_port *ap)
2200{
2201 unsigned long timeout;
2202 unsigned long flags;
2203 int i, rc = 0;
2204
2205 /* are we resuming? */
2206 spin_lock_irqsave(ap->lock, flags);
2207 if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
2208 ap->pm_mesg.event != PM_EVENT_ON) {
2209 spin_unlock_irqrestore(ap->lock, flags);
2210 return;
2211 }
2212 spin_unlock_irqrestore(ap->lock, flags);
2213
2214 /* spurious? */
2215 if (!(ap->pflags & ATA_PFLAG_SUSPENDED))
2216 goto done;
2217
2218 if (ap->ops->port_resume)
2219 rc = ap->ops->port_resume(ap);
2220
2221 /* give devices time to request EH */
2222 timeout = jiffies + HZ; /* 1s max */
2223 while (1) {
2224 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2225 struct ata_device *dev = &ap->device[i];
2226 unsigned int action = ata_eh_dev_action(dev);
2227
2228 if ((dev->flags & ATA_DFLAG_SUSPENDED) &&
2229 !(action & ATA_EH_RESUME))
2230 break;
2231 }
2232
2233 if (i == ATA_MAX_DEVICES || time_after(jiffies, timeout))
2234 break;
2235 msleep(10);
2236 }
2237
2238 done:
2239 spin_lock_irqsave(ap->lock, flags);
2240 ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED);
2241 if (ap->pm_result) {
2242 *ap->pm_result = rc;
2243 ap->pm_result = NULL;
2244 }
2245 spin_unlock_irqrestore(ap->lock, flags);
2246}
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
new file mode 100644
index 000000000000..d168e3413661
--- /dev/null
+++ b/drivers/ata/libata-scsi.c
@@ -0,0 +1,3322 @@
1/*
2 * libata-scsi.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from
31 * - http://www.t10.org/
32 * - http://www.t13.org/
33 *
34 */
35
36#include <linux/kernel.h>
37#include <linux/blkdev.h>
38#include <linux/spinlock.h>
39#include <scsi/scsi.h>
40#include <scsi/scsi_host.h>
41#include <scsi/scsi_cmnd.h>
42#include <scsi/scsi_eh.h>
43#include <scsi/scsi_device.h>
44#include <scsi/scsi_tcq.h>
45#include <scsi/scsi_transport.h>
46#include <linux/libata.h>
47#include <linux/hdreg.h>
48#include <asm/uaccess.h>
49
50#include "libata.h"
51
52#define SECTOR_SIZE 512
53
54typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *qc, const u8 *scsicmd);
55
56static struct ata_device * __ata_scsi_find_dev(struct ata_port *ap,
57 const struct scsi_device *scsidev);
58static struct ata_device * ata_scsi_find_dev(struct ata_port *ap,
59 const struct scsi_device *scsidev);
60static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
61 unsigned int id, unsigned int lun);
62
63
64#define RW_RECOVERY_MPAGE 0x1
65#define RW_RECOVERY_MPAGE_LEN 12
66#define CACHE_MPAGE 0x8
67#define CACHE_MPAGE_LEN 20
68#define CONTROL_MPAGE 0xa
69#define CONTROL_MPAGE_LEN 12
70#define ALL_MPAGES 0x3f
71#define ALL_SUB_MPAGES 0xff
72
73
74static const u8 def_rw_recovery_mpage[] = {
75 RW_RECOVERY_MPAGE,
76 RW_RECOVERY_MPAGE_LEN - 2,
77 (1 << 7) | /* AWRE, sat-r06 say it shall be 0 */
78 (1 << 6), /* ARRE (auto read reallocation) */
79 0, /* read retry count */
80 0, 0, 0, 0,
81 0, /* write retry count */
82 0, 0, 0
83};
84
85static const u8 def_cache_mpage[CACHE_MPAGE_LEN] = {
86 CACHE_MPAGE,
87 CACHE_MPAGE_LEN - 2,
88 0, /* contains WCE, needs to be 0 for logic */
89 0, 0, 0, 0, 0, 0, 0, 0, 0,
90 0, /* contains DRA, needs to be 0 for logic */
91 0, 0, 0, 0, 0, 0, 0
92};
93
94static const u8 def_control_mpage[CONTROL_MPAGE_LEN] = {
95 CONTROL_MPAGE,
96 CONTROL_MPAGE_LEN - 2,
97 2, /* DSENSE=0, GLTSD=1 */
98 0, /* [QAM+QERR may be 1, see 05-359r1] */
99 0, 0, 0, 0, 0xff, 0xff,
100 0, 30 /* extended self test time, see 05-359r1 */
101};
102
103/*
104 * libata transport template. libata doesn't do real transport stuff.
105 * It just needs the eh_timed_out hook.
106 */
107struct scsi_transport_template ata_scsi_transport_template = {
108 .eh_strategy_handler = ata_scsi_error,
109 .eh_timed_out = ata_scsi_timed_out,
110 .user_scan = ata_scsi_user_scan,
111};
112
113
114static void ata_scsi_invalid_field(struct scsi_cmnd *cmd,
115 void (*done)(struct scsi_cmnd *))
116{
117 ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x24, 0x0);
118 /* "Invalid field in cbd" */
119 done(cmd);
120}
121
122/**
123 * ata_std_bios_param - generic bios head/sector/cylinder calculator used by sd.
124 * @sdev: SCSI device for which BIOS geometry is to be determined
125 * @bdev: block device associated with @sdev
126 * @capacity: capacity of SCSI device
127 * @geom: location to which geometry will be output
128 *
129 * Generic bios head/sector/cylinder calculator
130 * used by sd. Most BIOSes nowadays expect a XXX/255/16 (CHS)
131 * mapping. Some situations may arise where the disk is not
132 * bootable if this is not used.
133 *
134 * LOCKING:
135 * Defined by the SCSI layer. We don't really care.
136 *
137 * RETURNS:
138 * Zero.
139 */
140int ata_std_bios_param(struct scsi_device *sdev, struct block_device *bdev,
141 sector_t capacity, int geom[])
142{
143 geom[0] = 255;
144 geom[1] = 63;
145 sector_div(capacity, 255*63);
146 geom[2] = capacity;
147
148 return 0;
149}
150
151/**
152 * ata_cmd_ioctl - Handler for HDIO_DRIVE_CMD ioctl
153 * @scsidev: Device to which we are issuing command
154 * @arg: User provided data for issuing command
155 *
156 * LOCKING:
157 * Defined by the SCSI layer. We don't really care.
158 *
159 * RETURNS:
160 * Zero on success, negative errno on error.
161 */
162
163int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
164{
165 int rc = 0;
166 u8 scsi_cmd[MAX_COMMAND_SIZE];
167 u8 args[4], *argbuf = NULL;
168 int argsize = 0;
169 struct scsi_sense_hdr sshdr;
170 enum dma_data_direction data_dir;
171
172 if (arg == NULL)
173 return -EINVAL;
174
175 if (copy_from_user(args, arg, sizeof(args)))
176 return -EFAULT;
177
178 memset(scsi_cmd, 0, sizeof(scsi_cmd));
179
180 if (args[3]) {
181 argsize = SECTOR_SIZE * args[3];
182 argbuf = kmalloc(argsize, GFP_KERNEL);
183 if (argbuf == NULL) {
184 rc = -ENOMEM;
185 goto error;
186 }
187
188 scsi_cmd[1] = (4 << 1); /* PIO Data-in */
189 scsi_cmd[2] = 0x0e; /* no off.line or cc, read from dev,
190 block count in sector count field */
191 data_dir = DMA_FROM_DEVICE;
192 } else {
193 scsi_cmd[1] = (3 << 1); /* Non-data */
194 /* scsi_cmd[2] is already 0 -- no off.line, cc, or data xfer */
195 data_dir = DMA_NONE;
196 }
197
198 scsi_cmd[0] = ATA_16;
199
200 scsi_cmd[4] = args[2];
201 if (args[0] == WIN_SMART) { /* hack -- ide driver does this too... */
202 scsi_cmd[6] = args[3];
203 scsi_cmd[8] = args[1];
204 scsi_cmd[10] = 0x4f;
205 scsi_cmd[12] = 0xc2;
206 } else {
207 scsi_cmd[6] = args[1];
208 }
209 scsi_cmd[14] = args[0];
210
211 /* Good values for timeout and retries? Values below
212 from scsi_ioctl_send_command() for default case... */
213 if (scsi_execute_req(scsidev, scsi_cmd, data_dir, argbuf, argsize,
214 &sshdr, (10*HZ), 5)) {
215 rc = -EIO;
216 goto error;
217 }
218
219 /* Need code to retrieve data from check condition? */
220
221 if ((argbuf)
222 && copy_to_user(arg + sizeof(args), argbuf, argsize))
223 rc = -EFAULT;
224error:
225 kfree(argbuf);
226 return rc;
227}
228
229/**
230 * ata_task_ioctl - Handler for HDIO_DRIVE_TASK ioctl
231 * @scsidev: Device to which we are issuing command
232 * @arg: User provided data for issuing command
233 *
234 * LOCKING:
235 * Defined by the SCSI layer. We don't really care.
236 *
237 * RETURNS:
238 * Zero on success, negative errno on error.
239 */
240int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg)
241{
242 int rc = 0;
243 u8 scsi_cmd[MAX_COMMAND_SIZE];
244 u8 args[7];
245 struct scsi_sense_hdr sshdr;
246
247 if (arg == NULL)
248 return -EINVAL;
249
250 if (copy_from_user(args, arg, sizeof(args)))
251 return -EFAULT;
252
253 memset(scsi_cmd, 0, sizeof(scsi_cmd));
254 scsi_cmd[0] = ATA_16;
255 scsi_cmd[1] = (3 << 1); /* Non-data */
256 /* scsi_cmd[2] is already 0 -- no off.line, cc, or data xfer */
257 scsi_cmd[4] = args[1];
258 scsi_cmd[6] = args[2];
259 scsi_cmd[8] = args[3];
260 scsi_cmd[10] = args[4];
261 scsi_cmd[12] = args[5];
262 scsi_cmd[14] = args[0];
263
264 /* Good values for timeout and retries? Values below
265 from scsi_ioctl_send_command() for default case... */
266 if (scsi_execute_req(scsidev, scsi_cmd, DMA_NONE, NULL, 0, &sshdr,
267 (10*HZ), 5))
268 rc = -EIO;
269
270 /* Need code to retrieve data from check condition? */
271 return rc;
272}
273
274int ata_scsi_ioctl(struct scsi_device *scsidev, int cmd, void __user *arg)
275{
276 int val = -EINVAL, rc = -EINVAL;
277
278 switch (cmd) {
279 case ATA_IOC_GET_IO32:
280 val = 0;
281 if (copy_to_user(arg, &val, 1))
282 return -EFAULT;
283 return 0;
284
285 case ATA_IOC_SET_IO32:
286 val = (unsigned long) arg;
287 if (val != 0)
288 return -EINVAL;
289 return 0;
290
291 case HDIO_DRIVE_CMD:
292 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
293 return -EACCES;
294 return ata_cmd_ioctl(scsidev, arg);
295
296 case HDIO_DRIVE_TASK:
297 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
298 return -EACCES;
299 return ata_task_ioctl(scsidev, arg);
300
301 default:
302 rc = -ENOTTY;
303 break;
304 }
305
306 return rc;
307}
308
309/**
310 * ata_scsi_qc_new - acquire new ata_queued_cmd reference
311 * @dev: ATA device to which the new command is attached
312 * @cmd: SCSI command that originated this ATA command
313 * @done: SCSI command completion function
314 *
315 * Obtain a reference to an unused ata_queued_cmd structure,
316 * which is the basic libata structure representing a single
317 * ATA command sent to the hardware.
318 *
319 * If a command was available, fill in the SCSI-specific
320 * portions of the structure with information on the
321 * current command.
322 *
323 * LOCKING:
324 * spin_lock_irqsave(host_set lock)
325 *
326 * RETURNS:
327 * Command allocated, or %NULL if none available.
328 */
329struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev,
330 struct scsi_cmnd *cmd,
331 void (*done)(struct scsi_cmnd *))
332{
333 struct ata_queued_cmd *qc;
334
335 qc = ata_qc_new_init(dev);
336 if (qc) {
337 qc->scsicmd = cmd;
338 qc->scsidone = done;
339
340 if (cmd->use_sg) {
341 qc->__sg = (struct scatterlist *) cmd->request_buffer;
342 qc->n_elem = cmd->use_sg;
343 } else {
344 qc->__sg = &qc->sgent;
345 qc->n_elem = 1;
346 }
347 } else {
348 cmd->result = (DID_OK << 16) | (QUEUE_FULL << 1);
349 done(cmd);
350 }
351
352 return qc;
353}
354
355/**
356 * ata_dump_status - user friendly display of error info
357 * @id: id of the port in question
358 * @tf: ptr to filled out taskfile
359 *
360 * Decode and dump the ATA error/status registers for the user so
361 * that they have some idea what really happened at the non
362 * make-believe layer.
363 *
364 * LOCKING:
365 * inherited from caller
366 */
367void ata_dump_status(unsigned id, struct ata_taskfile *tf)
368{
369 u8 stat = tf->command, err = tf->feature;
370
371 printk(KERN_WARNING "ata%u: status=0x%02x { ", id, stat);
372 if (stat & ATA_BUSY) {
373 printk("Busy }\n"); /* Data is not valid in this case */
374 } else {
375 if (stat & 0x40) printk("DriveReady ");
376 if (stat & 0x20) printk("DeviceFault ");
377 if (stat & 0x10) printk("SeekComplete ");
378 if (stat & 0x08) printk("DataRequest ");
379 if (stat & 0x04) printk("CorrectedError ");
380 if (stat & 0x02) printk("Index ");
381 if (stat & 0x01) printk("Error ");
382 printk("}\n");
383
384 if (err) {
385 printk(KERN_WARNING "ata%u: error=0x%02x { ", id, err);
386 if (err & 0x04) printk("DriveStatusError ");
387 if (err & 0x80) {
388 if (err & 0x04) printk("BadCRC ");
389 else printk("Sector ");
390 }
391 if (err & 0x40) printk("UncorrectableError ");
392 if (err & 0x10) printk("SectorIdNotFound ");
393 if (err & 0x02) printk("TrackZeroNotFound ");
394 if (err & 0x01) printk("AddrMarkNotFound ");
395 printk("}\n");
396 }
397 }
398}
399
400/**
401 * ata_scsi_device_suspend - suspend ATA device associated with sdev
402 * @sdev: the SCSI device to suspend
403 * @mesg: target power management message
404 *
405 * Request suspend EH action on the ATA device associated with
406 * @sdev and wait for the operation to complete.
407 *
408 * LOCKING:
409 * Kernel thread context (may sleep).
410 *
411 * RETURNS:
412 * 0 on success, -errno otherwise.
413 */
414int ata_scsi_device_suspend(struct scsi_device *sdev, pm_message_t mesg)
415{
416 struct ata_port *ap = ata_shost_to_port(sdev->host);
417 struct ata_device *dev = ata_scsi_find_dev(ap, sdev);
418 unsigned long flags;
419 unsigned int action;
420 int rc = 0;
421
422 if (!dev)
423 goto out;
424
425 spin_lock_irqsave(ap->lock, flags);
426
427 /* wait for the previous resume to complete */
428 while (dev->flags & ATA_DFLAG_SUSPENDED) {
429 spin_unlock_irqrestore(ap->lock, flags);
430 ata_port_wait_eh(ap);
431 spin_lock_irqsave(ap->lock, flags);
432 }
433
434 /* if @sdev is already detached, nothing to do */
435 if (sdev->sdev_state == SDEV_OFFLINE ||
436 sdev->sdev_state == SDEV_CANCEL || sdev->sdev_state == SDEV_DEL)
437 goto out_unlock;
438
439 /* request suspend */
440 action = ATA_EH_SUSPEND;
441 if (mesg.event != PM_EVENT_SUSPEND)
442 action |= ATA_EH_PM_FREEZE;
443 ap->eh_info.dev_action[dev->devno] |= action;
444 ap->eh_info.flags |= ATA_EHI_QUIET;
445 ata_port_schedule_eh(ap);
446
447 spin_unlock_irqrestore(ap->lock, flags);
448
449 /* wait for EH to do the job */
450 ata_port_wait_eh(ap);
451
452 spin_lock_irqsave(ap->lock, flags);
453
454 /* If @sdev is still attached but the associated ATA device
455 * isn't suspended, the operation failed.
456 */
457 if (sdev->sdev_state != SDEV_OFFLINE &&
458 sdev->sdev_state != SDEV_CANCEL && sdev->sdev_state != SDEV_DEL &&
459 !(dev->flags & ATA_DFLAG_SUSPENDED))
460 rc = -EIO;
461
462 out_unlock:
463 spin_unlock_irqrestore(ap->lock, flags);
464 out:
465 if (rc == 0)
466 sdev->sdev_gendev.power.power_state = mesg;
467 return rc;
468}
469
470/**
471 * ata_scsi_device_resume - resume ATA device associated with sdev
472 * @sdev: the SCSI device to resume
473 *
474 * Request resume EH action on the ATA device associated with
475 * @sdev and return immediately. This enables parallel
476 * wakeup/spinup of devices.
477 *
478 * LOCKING:
479 * Kernel thread context (may sleep).
480 *
481 * RETURNS:
482 * 0.
483 */
484int ata_scsi_device_resume(struct scsi_device *sdev)
485{
486 struct ata_port *ap = ata_shost_to_port(sdev->host);
487 struct ata_device *dev = ata_scsi_find_dev(ap, sdev);
488 struct ata_eh_info *ehi = &ap->eh_info;
489 unsigned long flags;
490 unsigned int action;
491
492 if (!dev)
493 goto out;
494
495 spin_lock_irqsave(ap->lock, flags);
496
497 /* if @sdev is already detached, nothing to do */
498 if (sdev->sdev_state == SDEV_OFFLINE ||
499 sdev->sdev_state == SDEV_CANCEL || sdev->sdev_state == SDEV_DEL)
500 goto out_unlock;
501
502 /* request resume */
503 action = ATA_EH_RESUME;
504 if (sdev->sdev_gendev.power.power_state.event == PM_EVENT_SUSPEND)
505 __ata_ehi_hotplugged(ehi);
506 else
507 action |= ATA_EH_PM_FREEZE | ATA_EH_SOFTRESET;
508 ehi->dev_action[dev->devno] |= action;
509
510 /* We don't want autopsy and verbose EH messages. Disable
511 * those if we're the only device on this link.
512 */
513 if (ata_port_max_devices(ap) == 1)
514 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
515
516 ata_port_schedule_eh(ap);
517
518 out_unlock:
519 spin_unlock_irqrestore(ap->lock, flags);
520 out:
521 sdev->sdev_gendev.power.power_state = PMSG_ON;
522 return 0;
523}
524
525/**
526 * ata_to_sense_error - convert ATA error to SCSI error
527 * @id: ATA device number
528 * @drv_stat: value contained in ATA status register
529 * @drv_err: value contained in ATA error register
530 * @sk: the sense key we'll fill out
531 * @asc: the additional sense code we'll fill out
532 * @ascq: the additional sense code qualifier we'll fill out
533 * @verbose: be verbose
534 *
535 * Converts an ATA error into a SCSI error. Fill out pointers to
536 * SK, ASC, and ASCQ bytes for later use in fixed or descriptor
537 * format sense blocks.
538 *
539 * LOCKING:
540 * spin_lock_irqsave(host_set lock)
541 */
542void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc,
543 u8 *ascq, int verbose)
544{
545 int i;
546
547 /* Based on the 3ware driver translation table */
548 static const unsigned char sense_table[][4] = {
549 /* BBD|ECC|ID|MAR */
550 {0xd1, ABORTED_COMMAND, 0x00, 0x00}, // Device busy Aborted command
551 /* BBD|ECC|ID */
552 {0xd0, ABORTED_COMMAND, 0x00, 0x00}, // Device busy Aborted command
553 /* ECC|MC|MARK */
554 {0x61, HARDWARE_ERROR, 0x00, 0x00}, // Device fault Hardware error
555 /* ICRC|ABRT */ /* NB: ICRC & !ABRT is BBD */
556 {0x84, ABORTED_COMMAND, 0x47, 0x00}, // Data CRC error SCSI parity error
557 /* MC|ID|ABRT|TRK0|MARK */
558 {0x37, NOT_READY, 0x04, 0x00}, // Unit offline Not ready
559 /* MCR|MARK */
560 {0x09, NOT_READY, 0x04, 0x00}, // Unrecovered disk error Not ready
561 /* Bad address mark */
562 {0x01, MEDIUM_ERROR, 0x13, 0x00}, // Address mark not found Address mark not found for data field
563 /* TRK0 */
564 {0x02, HARDWARE_ERROR, 0x00, 0x00}, // Track 0 not found Hardware error
565 /* Abort & !ICRC */
566 {0x04, ABORTED_COMMAND, 0x00, 0x00}, // Aborted command Aborted command
567 /* Media change request */
568 {0x08, NOT_READY, 0x04, 0x00}, // Media change request FIXME: faking offline
569 /* SRV */
570 {0x10, ABORTED_COMMAND, 0x14, 0x00}, // ID not found Recorded entity not found
571 /* Media change */
572 {0x08, NOT_READY, 0x04, 0x00}, // Media change FIXME: faking offline
573 /* ECC */
574 {0x40, MEDIUM_ERROR, 0x11, 0x04}, // Uncorrectable ECC error Unrecovered read error
575 /* BBD - block marked bad */
576 {0x80, MEDIUM_ERROR, 0x11, 0x04}, // Block marked bad Medium error, unrecovered read error
577 {0xFF, 0xFF, 0xFF, 0xFF}, // END mark
578 };
579 static const unsigned char stat_table[][4] = {
580 /* Must be first because BUSY means no other bits valid */
581 {0x80, ABORTED_COMMAND, 0x47, 0x00}, // Busy, fake parity for now
582 {0x20, HARDWARE_ERROR, 0x00, 0x00}, // Device fault
583 {0x08, ABORTED_COMMAND, 0x47, 0x00}, // Timed out in xfer, fake parity for now
584 {0x04, RECOVERED_ERROR, 0x11, 0x00}, // Recovered ECC error Medium error, recovered
585 {0xFF, 0xFF, 0xFF, 0xFF}, // END mark
586 };
587
588 /*
589 * Is this an error we can process/parse
590 */
591 if (drv_stat & ATA_BUSY) {
592 drv_err = 0; /* Ignore the err bits, they're invalid */
593 }
594
595 if (drv_err) {
596 /* Look for drv_err */
597 for (i = 0; sense_table[i][0] != 0xFF; i++) {
598 /* Look for best matches first */
599 if ((sense_table[i][0] & drv_err) ==
600 sense_table[i][0]) {
601 *sk = sense_table[i][1];
602 *asc = sense_table[i][2];
603 *ascq = sense_table[i][3];
604 goto translate_done;
605 }
606 }
607 /* No immediate match */
608 if (verbose)
609 printk(KERN_WARNING "ata%u: no sense translation for "
610 "error 0x%02x\n", id, drv_err);
611 }
612
613 /* Fall back to interpreting status bits */
614 for (i = 0; stat_table[i][0] != 0xFF; i++) {
615 if (stat_table[i][0] & drv_stat) {
616 *sk = stat_table[i][1];
617 *asc = stat_table[i][2];
618 *ascq = stat_table[i][3];
619 goto translate_done;
620 }
621 }
622 /* No error? Undecoded? */
623 if (verbose)
624 printk(KERN_WARNING "ata%u: no sense translation for "
625 "status: 0x%02x\n", id, drv_stat);
626
627 /* We need a sensible error return here, which is tricky, and one
628 that won't cause people to do things like return a disk wrongly */
629 *sk = ABORTED_COMMAND;
630 *asc = 0x00;
631 *ascq = 0x00;
632
633 translate_done:
634 if (verbose)
635 printk(KERN_ERR "ata%u: translated ATA stat/err 0x%02x/%02x "
636 "to SCSI SK/ASC/ASCQ 0x%x/%02x/%02x\n",
637 id, drv_stat, drv_err, *sk, *asc, *ascq);
638 return;
639}
640
641/*
642 * ata_gen_ata_desc_sense - Generate check condition sense block.
643 * @qc: Command that completed.
644 *
645 * This function is specific to the ATA descriptor format sense
646 * block specified for the ATA pass through commands. Regardless
647 * of whether the command errored or not, return a sense
648 * block. Copy all controller registers into the sense
649 * block. Clear sense key, ASC & ASCQ if there is no error.
650 *
651 * LOCKING:
652 * spin_lock_irqsave(host_set lock)
653 */
654void ata_gen_ata_desc_sense(struct ata_queued_cmd *qc)
655{
656 struct scsi_cmnd *cmd = qc->scsicmd;
657 struct ata_taskfile *tf = &qc->result_tf;
658 unsigned char *sb = cmd->sense_buffer;
659 unsigned char *desc = sb + 8;
660 int verbose = qc->ap->ops->error_handler == NULL;
661
662 memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
663
664 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
665
666 /*
667 * Use ata_to_sense_error() to map status register bits
668 * onto sense key, asc & ascq.
669 */
670 if (qc->err_mask ||
671 tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
672 ata_to_sense_error(qc->ap->id, tf->command, tf->feature,
673 &sb[1], &sb[2], &sb[3], verbose);
674 sb[1] &= 0x0f;
675 }
676
677 /*
678 * Sense data is current and format is descriptor.
679 */
680 sb[0] = 0x72;
681
682 desc[0] = 0x09;
683
684 /*
685 * Set length of additional sense data.
686 * Since we only populate descriptor 0, the total
687 * length is the same (fixed) length as descriptor 0.
688 */
689 desc[1] = sb[7] = 14;
690
691 /*
692 * Copy registers into sense buffer.
693 */
694 desc[2] = 0x00;
695 desc[3] = tf->feature; /* == error reg */
696 desc[5] = tf->nsect;
697 desc[7] = tf->lbal;
698 desc[9] = tf->lbam;
699 desc[11] = tf->lbah;
700 desc[12] = tf->device;
701 desc[13] = tf->command; /* == status reg */
702
703 /*
704 * Fill in Extend bit, and the high order bytes
705 * if applicable.
706 */
707 if (tf->flags & ATA_TFLAG_LBA48) {
708 desc[2] |= 0x01;
709 desc[4] = tf->hob_nsect;
710 desc[6] = tf->hob_lbal;
711 desc[8] = tf->hob_lbam;
712 desc[10] = tf->hob_lbah;
713 }
714}
715
716/**
717 * ata_gen_fixed_sense - generate a SCSI fixed sense block
718 * @qc: Command that we are erroring out
719 *
720 * Leverage ata_to_sense_error() to give us the codes. Fit our
721 * LBA in here if there's room.
722 *
723 * LOCKING:
724 * inherited from caller
725 */
726void ata_gen_fixed_sense(struct ata_queued_cmd *qc)
727{
728 struct scsi_cmnd *cmd = qc->scsicmd;
729 struct ata_taskfile *tf = &qc->result_tf;
730 unsigned char *sb = cmd->sense_buffer;
731 int verbose = qc->ap->ops->error_handler == NULL;
732
733 memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
734
735 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
736
737 /*
738 * Use ata_to_sense_error() to map status register bits
739 * onto sense key, asc & ascq.
740 */
741 if (qc->err_mask ||
742 tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
743 ata_to_sense_error(qc->ap->id, tf->command, tf->feature,
744 &sb[2], &sb[12], &sb[13], verbose);
745 sb[2] &= 0x0f;
746 }
747
748 sb[0] = 0x70;
749 sb[7] = 0x0a;
750
751 if (tf->flags & ATA_TFLAG_LBA48) {
752 /* TODO: find solution for LBA48 descriptors */
753 }
754
755 else if (tf->flags & ATA_TFLAG_LBA) {
756 /* A small (28b) LBA will fit in the 32b info field */
757 sb[0] |= 0x80; /* set valid bit */
758 sb[3] = tf->device & 0x0f;
759 sb[4] = tf->lbah;
760 sb[5] = tf->lbam;
761 sb[6] = tf->lbal;
762 }
763
764 else {
765 /* TODO: C/H/S */
766 }
767}
768
769static void ata_scsi_sdev_config(struct scsi_device *sdev)
770{
771 sdev->use_10_for_rw = 1;
772 sdev->use_10_for_ms = 1;
773}
774
775static void ata_scsi_dev_config(struct scsi_device *sdev,
776 struct ata_device *dev)
777{
778 unsigned int max_sectors;
779
780 /* TODO: 2048 is an arbitrary number, not the
781 * hardware maximum. This should be increased to
782 * 65534 when Jens Axboe's patch for dynamically
783 * determining max_sectors is merged.
784 */
785 max_sectors = ATA_MAX_SECTORS;
786 if (dev->flags & ATA_DFLAG_LBA48)
787 max_sectors = ATA_MAX_SECTORS_LBA48;
788 if (dev->max_sectors)
789 max_sectors = dev->max_sectors;
790
791 blk_queue_max_sectors(sdev->request_queue, max_sectors);
792
793 /*
794 * SATA DMA transfers must be multiples of 4 byte, so
795 * we need to pad ATAPI transfers using an extra sg.
796 * Decrement max hw segments accordingly.
797 */
798 if (dev->class == ATA_DEV_ATAPI) {
799 request_queue_t *q = sdev->request_queue;
800 blk_queue_max_hw_segments(q, q->max_hw_segments - 1);
801 }
802
803 if (dev->flags & ATA_DFLAG_NCQ) {
804 int depth;
805
806 depth = min(sdev->host->can_queue, ata_id_queue_depth(dev->id));
807 depth = min(ATA_MAX_QUEUE - 1, depth);
808 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth);
809 }
810}
811
812/**
813 * ata_scsi_slave_config - Set SCSI device attributes
814 * @sdev: SCSI device to examine
815 *
816 * This is called before we actually start reading
817 * and writing to the device, to configure certain
818 * SCSI mid-layer behaviors.
819 *
820 * LOCKING:
821 * Defined by SCSI layer. We don't really care.
822 */
823
824int ata_scsi_slave_config(struct scsi_device *sdev)
825{
826 struct ata_port *ap = ata_shost_to_port(sdev->host);
827 struct ata_device *dev = __ata_scsi_find_dev(ap, sdev);
828
829 ata_scsi_sdev_config(sdev);
830
831 blk_queue_max_phys_segments(sdev->request_queue, LIBATA_MAX_PRD);
832
833 if (dev)
834 ata_scsi_dev_config(sdev, dev);
835
836 return 0; /* scsi layer doesn't check return value, sigh */
837}
838
839/**
840 * ata_scsi_slave_destroy - SCSI device is about to be destroyed
841 * @sdev: SCSI device to be destroyed
842 *
843 * @sdev is about to be destroyed for hot/warm unplugging. If
844 * this unplugging was initiated by libata as indicated by NULL
845 * dev->sdev, this function doesn't have to do anything.
846 * Otherwise, SCSI layer initiated warm-unplug is in progress.
847 * Clear dev->sdev, schedule the device for ATA detach and invoke
848 * EH.
849 *
850 * LOCKING:
851 * Defined by SCSI layer. We don't really care.
852 */
853void ata_scsi_slave_destroy(struct scsi_device *sdev)
854{
855 struct ata_port *ap = ata_shost_to_port(sdev->host);
856 unsigned long flags;
857 struct ata_device *dev;
858
859 if (!ap->ops->error_handler)
860 return;
861
862 spin_lock_irqsave(ap->lock, flags);
863 dev = __ata_scsi_find_dev(ap, sdev);
864 if (dev && dev->sdev) {
865 /* SCSI device already in CANCEL state, no need to offline it */
866 dev->sdev = NULL;
867 dev->flags |= ATA_DFLAG_DETACH;
868 ata_port_schedule_eh(ap);
869 }
870 spin_unlock_irqrestore(ap->lock, flags);
871}
872
873/**
874 * ata_scsi_change_queue_depth - SCSI callback for queue depth config
875 * @sdev: SCSI device to configure queue depth for
876 * @queue_depth: new queue depth
877 *
878 * This is libata standard hostt->change_queue_depth callback.
879 * SCSI will call into this callback when user tries to set queue
880 * depth via sysfs.
881 *
882 * LOCKING:
883 * SCSI layer (we don't care)
884 *
885 * RETURNS:
886 * Newly configured queue depth.
887 */
888int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth)
889{
890 struct ata_port *ap = ata_shost_to_port(sdev->host);
891 struct ata_device *dev;
892 int max_depth;
893
894 if (queue_depth < 1)
895 return sdev->queue_depth;
896
897 dev = ata_scsi_find_dev(ap, sdev);
898 if (!dev || !ata_dev_enabled(dev))
899 return sdev->queue_depth;
900
901 max_depth = min(sdev->host->can_queue, ata_id_queue_depth(dev->id));
902 max_depth = min(ATA_MAX_QUEUE - 1, max_depth);
903 if (queue_depth > max_depth)
904 queue_depth = max_depth;
905
906 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, queue_depth);
907 return queue_depth;
908}
909
910/**
911 * ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command
912 * @qc: Storage for translated ATA taskfile
913 * @scsicmd: SCSI command to translate
914 *
915 * Sets up an ATA taskfile to issue STANDBY (to stop) or READ VERIFY
916 * (to start). Perhaps these commands should be preceded by
917 * CHECK POWER MODE to see what power mode the device is already in.
918 * [See SAT revision 5 at www.t10.org]
919 *
920 * LOCKING:
921 * spin_lock_irqsave(host_set lock)
922 *
923 * RETURNS:
924 * Zero on success, non-zero on error.
925 */
926
927static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc,
928 const u8 *scsicmd)
929{
930 struct ata_taskfile *tf = &qc->tf;
931
932 tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
933 tf->protocol = ATA_PROT_NODATA;
934 if (scsicmd[1] & 0x1) {
935 ; /* ignore IMMED bit, violates sat-r05 */
936 }
937 if (scsicmd[4] & 0x2)
938 goto invalid_fld; /* LOEJ bit set not supported */
939 if (((scsicmd[4] >> 4) & 0xf) != 0)
940 goto invalid_fld; /* power conditions not supported */
941 if (scsicmd[4] & 0x1) {
942 tf->nsect = 1; /* 1 sector, lba=0 */
943
944 if (qc->dev->flags & ATA_DFLAG_LBA) {
945 tf->flags |= ATA_TFLAG_LBA;
946
947 tf->lbah = 0x0;
948 tf->lbam = 0x0;
949 tf->lbal = 0x0;
950 tf->device |= ATA_LBA;
951 } else {
952 /* CHS */
953 tf->lbal = 0x1; /* sect */
954 tf->lbam = 0x0; /* cyl low */
955 tf->lbah = 0x0; /* cyl high */
956 }
957
958 tf->command = ATA_CMD_VERIFY; /* READ VERIFY */
959 } else {
960 tf->nsect = 0; /* time period value (0 implies now) */
961 tf->command = ATA_CMD_STANDBY;
962 /* Consider: ATA STANDBY IMMEDIATE command */
963 }
964 /*
965 * Standby and Idle condition timers could be implemented but that
966 * would require libata to implement the Power condition mode page
967 * and allow the user to change it. Changing mode pages requires
968 * MODE SELECT to be implemented.
969 */
970
971 return 0;
972
973invalid_fld:
974 ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x24, 0x0);
975 /* "Invalid field in cbd" */
976 return 1;
977}
978
979
980/**
981 * ata_scsi_flush_xlat - Translate SCSI SYNCHRONIZE CACHE command
982 * @qc: Storage for translated ATA taskfile
983 * @scsicmd: SCSI command to translate (ignored)
984 *
985 * Sets up an ATA taskfile to issue FLUSH CACHE or
986 * FLUSH CACHE EXT.
987 *
988 * LOCKING:
989 * spin_lock_irqsave(host_set lock)
990 *
991 * RETURNS:
992 * Zero on success, non-zero on error.
993 */
994
995static unsigned int ata_scsi_flush_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd)
996{
997 struct ata_taskfile *tf = &qc->tf;
998
999 tf->flags |= ATA_TFLAG_DEVICE;
1000 tf->protocol = ATA_PROT_NODATA;
1001
1002 if ((qc->dev->flags & ATA_DFLAG_LBA48) &&
1003 (ata_id_has_flush_ext(qc->dev->id)))
1004 tf->command = ATA_CMD_FLUSH_EXT;
1005 else
1006 tf->command = ATA_CMD_FLUSH;
1007
1008 return 0;
1009}
1010
1011/**
1012 * scsi_6_lba_len - Get LBA and transfer length
1013 * @scsicmd: SCSI command to translate
1014 *
1015 * Calculate LBA and transfer length for 6-byte commands.
1016 *
1017 * RETURNS:
1018 * @plba: the LBA
1019 * @plen: the transfer length
1020 */
1021
1022static void scsi_6_lba_len(const u8 *scsicmd, u64 *plba, u32 *plen)
1023{
1024 u64 lba = 0;
1025 u32 len = 0;
1026
1027 VPRINTK("six-byte command\n");
1028
1029 lba |= ((u64)scsicmd[2]) << 8;
1030 lba |= ((u64)scsicmd[3]);
1031
1032 len |= ((u32)scsicmd[4]);
1033
1034 *plba = lba;
1035 *plen = len;
1036}
1037
1038/**
1039 * scsi_10_lba_len - Get LBA and transfer length
1040 * @scsicmd: SCSI command to translate
1041 *
1042 * Calculate LBA and transfer length for 10-byte commands.
1043 *
1044 * RETURNS:
1045 * @plba: the LBA
1046 * @plen: the transfer length
1047 */
1048
1049static void scsi_10_lba_len(const u8 *scsicmd, u64 *plba, u32 *plen)
1050{
1051 u64 lba = 0;
1052 u32 len = 0;
1053
1054 VPRINTK("ten-byte command\n");
1055
1056 lba |= ((u64)scsicmd[2]) << 24;
1057 lba |= ((u64)scsicmd[3]) << 16;
1058 lba |= ((u64)scsicmd[4]) << 8;
1059 lba |= ((u64)scsicmd[5]);
1060
1061 len |= ((u32)scsicmd[7]) << 8;
1062 len |= ((u32)scsicmd[8]);
1063
1064 *plba = lba;
1065 *plen = len;
1066}
1067
1068/**
1069 * scsi_16_lba_len - Get LBA and transfer length
1070 * @scsicmd: SCSI command to translate
1071 *
1072 * Calculate LBA and transfer length for 16-byte commands.
1073 *
1074 * RETURNS:
1075 * @plba: the LBA
1076 * @plen: the transfer length
1077 */
1078
1079static void scsi_16_lba_len(const u8 *scsicmd, u64 *plba, u32 *plen)
1080{
1081 u64 lba = 0;
1082 u32 len = 0;
1083
1084 VPRINTK("sixteen-byte command\n");
1085
1086 lba |= ((u64)scsicmd[2]) << 56;
1087 lba |= ((u64)scsicmd[3]) << 48;
1088 lba |= ((u64)scsicmd[4]) << 40;
1089 lba |= ((u64)scsicmd[5]) << 32;
1090 lba |= ((u64)scsicmd[6]) << 24;
1091 lba |= ((u64)scsicmd[7]) << 16;
1092 lba |= ((u64)scsicmd[8]) << 8;
1093 lba |= ((u64)scsicmd[9]);
1094
1095 len |= ((u32)scsicmd[10]) << 24;
1096 len |= ((u32)scsicmd[11]) << 16;
1097 len |= ((u32)scsicmd[12]) << 8;
1098 len |= ((u32)scsicmd[13]);
1099
1100 *plba = lba;
1101 *plen = len;
1102}
1103
1104/**
1105 * ata_scsi_verify_xlat - Translate SCSI VERIFY command into an ATA one
1106 * @qc: Storage for translated ATA taskfile
1107 * @scsicmd: SCSI command to translate
1108 *
1109 * Converts SCSI VERIFY command to an ATA READ VERIFY command.
1110 *
1111 * LOCKING:
1112 * spin_lock_irqsave(host_set lock)
1113 *
1114 * RETURNS:
1115 * Zero on success, non-zero on error.
1116 */
1117
1118static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd)
1119{
1120 struct ata_taskfile *tf = &qc->tf;
1121 struct ata_device *dev = qc->dev;
1122 u64 dev_sectors = qc->dev->n_sectors;
1123 u64 block;
1124 u32 n_block;
1125
1126 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1127 tf->protocol = ATA_PROT_NODATA;
1128
1129 if (scsicmd[0] == VERIFY)
1130 scsi_10_lba_len(scsicmd, &block, &n_block);
1131 else if (scsicmd[0] == VERIFY_16)
1132 scsi_16_lba_len(scsicmd, &block, &n_block);
1133 else
1134 goto invalid_fld;
1135
1136 if (!n_block)
1137 goto nothing_to_do;
1138 if (block >= dev_sectors)
1139 goto out_of_range;
1140 if ((block + n_block) > dev_sectors)
1141 goto out_of_range;
1142
1143 if (dev->flags & ATA_DFLAG_LBA) {
1144 tf->flags |= ATA_TFLAG_LBA;
1145
1146 if (lba_28_ok(block, n_block)) {
1147 /* use LBA28 */
1148 tf->command = ATA_CMD_VERIFY;
1149 tf->device |= (block >> 24) & 0xf;
1150 } else if (lba_48_ok(block, n_block)) {
1151 if (!(dev->flags & ATA_DFLAG_LBA48))
1152 goto out_of_range;
1153
1154 /* use LBA48 */
1155 tf->flags |= ATA_TFLAG_LBA48;
1156 tf->command = ATA_CMD_VERIFY_EXT;
1157
1158 tf->hob_nsect = (n_block >> 8) & 0xff;
1159
1160 tf->hob_lbah = (block >> 40) & 0xff;
1161 tf->hob_lbam = (block >> 32) & 0xff;
1162 tf->hob_lbal = (block >> 24) & 0xff;
1163 } else
1164 /* request too large even for LBA48 */
1165 goto out_of_range;
1166
1167 tf->nsect = n_block & 0xff;
1168
1169 tf->lbah = (block >> 16) & 0xff;
1170 tf->lbam = (block >> 8) & 0xff;
1171 tf->lbal = block & 0xff;
1172
1173 tf->device |= ATA_LBA;
1174 } else {
1175 /* CHS */
1176 u32 sect, head, cyl, track;
1177
1178 if (!lba_28_ok(block, n_block))
1179 goto out_of_range;
1180
1181 /* Convert LBA to CHS */
1182 track = (u32)block / dev->sectors;
1183 cyl = track / dev->heads;
1184 head = track % dev->heads;
1185 sect = (u32)block % dev->sectors + 1;
1186
1187 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
1188 (u32)block, track, cyl, head, sect);
1189
1190 /* Check whether the converted CHS can fit.
1191 Cylinder: 0-65535
1192 Head: 0-15
1193 Sector: 1-255*/
1194 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
1195 goto out_of_range;
1196
1197 tf->command = ATA_CMD_VERIFY;
1198 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
1199 tf->lbal = sect;
1200 tf->lbam = cyl;
1201 tf->lbah = cyl >> 8;
1202 tf->device |= head;
1203 }
1204
1205 return 0;
1206
1207invalid_fld:
1208 ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x24, 0x0);
1209 /* "Invalid field in cbd" */
1210 return 1;
1211
1212out_of_range:
1213 ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x21, 0x0);
1214 /* "Logical Block Address out of range" */
1215 return 1;
1216
1217nothing_to_do:
1218 qc->scsicmd->result = SAM_STAT_GOOD;
1219 return 1;
1220}
1221
1222/**
1223 * ata_scsi_rw_xlat - Translate SCSI r/w command into an ATA one
1224 * @qc: Storage for translated ATA taskfile
1225 * @scsicmd: SCSI command to translate
1226 *
1227 * Converts any of six SCSI read/write commands into the
1228 * ATA counterpart, including starting sector (LBA),
1229 * sector count, and taking into account the device's LBA48
1230 * support.
1231 *
1232 * Commands %READ_6, %READ_10, %READ_16, %WRITE_6, %WRITE_10, and
1233 * %WRITE_16 are currently supported.
1234 *
1235 * LOCKING:
1236 * spin_lock_irqsave(host_set lock)
1237 *
1238 * RETURNS:
1239 * Zero on success, non-zero on error.
1240 */
1241
1242static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd)
1243{
1244 struct ata_taskfile *tf = &qc->tf;
1245 struct ata_device *dev = qc->dev;
1246 u64 block;
1247 u32 n_block;
1248
1249 qc->flags |= ATA_QCFLAG_IO;
1250 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1251
1252 if (scsicmd[0] == WRITE_10 || scsicmd[0] == WRITE_6 ||
1253 scsicmd[0] == WRITE_16)
1254 tf->flags |= ATA_TFLAG_WRITE;
1255
1256 /* Calculate the SCSI LBA, transfer length and FUA. */
1257 switch (scsicmd[0]) {
1258 case READ_10:
1259 case WRITE_10:
1260 scsi_10_lba_len(scsicmd, &block, &n_block);
1261 if (unlikely(scsicmd[1] & (1 << 3)))
1262 tf->flags |= ATA_TFLAG_FUA;
1263 break;
1264 case READ_6:
1265 case WRITE_6:
1266 scsi_6_lba_len(scsicmd, &block, &n_block);
1267
1268 /* for 6-byte r/w commands, transfer length 0
1269 * means 256 blocks of data, not 0 block.
1270 */
1271 if (!n_block)
1272 n_block = 256;
1273 break;
1274 case READ_16:
1275 case WRITE_16:
1276 scsi_16_lba_len(scsicmd, &block, &n_block);
1277 if (unlikely(scsicmd[1] & (1 << 3)))
1278 tf->flags |= ATA_TFLAG_FUA;
1279 break;
1280 default:
1281 DPRINTK("no-byte command\n");
1282 goto invalid_fld;
1283 }
1284
1285 /* Check and compose ATA command */
1286 if (!n_block)
1287 /* For 10-byte and 16-byte SCSI R/W commands, transfer
1288 * length 0 means transfer 0 block of data.
1289 * However, for ATA R/W commands, sector count 0 means
1290 * 256 or 65536 sectors, not 0 sectors as in SCSI.
1291 *
1292 * WARNING: one or two older ATA drives treat 0 as 0...
1293 */
1294 goto nothing_to_do;
1295
1296 if ((dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ) {
1297 /* yay, NCQ */
1298 if (!lba_48_ok(block, n_block))
1299 goto out_of_range;
1300
1301 tf->protocol = ATA_PROT_NCQ;
1302 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
1303
1304 if (tf->flags & ATA_TFLAG_WRITE)
1305 tf->command = ATA_CMD_FPDMA_WRITE;
1306 else
1307 tf->command = ATA_CMD_FPDMA_READ;
1308
1309 qc->nsect = n_block;
1310
1311 tf->nsect = qc->tag << 3;
1312 tf->hob_feature = (n_block >> 8) & 0xff;
1313 tf->feature = n_block & 0xff;
1314
1315 tf->hob_lbah = (block >> 40) & 0xff;
1316 tf->hob_lbam = (block >> 32) & 0xff;
1317 tf->hob_lbal = (block >> 24) & 0xff;
1318 tf->lbah = (block >> 16) & 0xff;
1319 tf->lbam = (block >> 8) & 0xff;
1320 tf->lbal = block & 0xff;
1321
1322 tf->device = 1 << 6;
1323 if (tf->flags & ATA_TFLAG_FUA)
1324 tf->device |= 1 << 7;
1325 } else if (dev->flags & ATA_DFLAG_LBA) {
1326 tf->flags |= ATA_TFLAG_LBA;
1327
1328 if (lba_28_ok(block, n_block)) {
1329 /* use LBA28 */
1330 tf->device |= (block >> 24) & 0xf;
1331 } else if (lba_48_ok(block, n_block)) {
1332 if (!(dev->flags & ATA_DFLAG_LBA48))
1333 goto out_of_range;
1334
1335 /* use LBA48 */
1336 tf->flags |= ATA_TFLAG_LBA48;
1337
1338 tf->hob_nsect = (n_block >> 8) & 0xff;
1339
1340 tf->hob_lbah = (block >> 40) & 0xff;
1341 tf->hob_lbam = (block >> 32) & 0xff;
1342 tf->hob_lbal = (block >> 24) & 0xff;
1343 } else
1344 /* request too large even for LBA48 */
1345 goto out_of_range;
1346
1347 if (unlikely(ata_rwcmd_protocol(qc) < 0))
1348 goto invalid_fld;
1349
1350 qc->nsect = n_block;
1351 tf->nsect = n_block & 0xff;
1352
1353 tf->lbah = (block >> 16) & 0xff;
1354 tf->lbam = (block >> 8) & 0xff;
1355 tf->lbal = block & 0xff;
1356
1357 tf->device |= ATA_LBA;
1358 } else {
1359 /* CHS */
1360 u32 sect, head, cyl, track;
1361
1362 /* The request -may- be too large for CHS addressing. */
1363 if (!lba_28_ok(block, n_block))
1364 goto out_of_range;
1365
1366 if (unlikely(ata_rwcmd_protocol(qc) < 0))
1367 goto invalid_fld;
1368
1369 /* Convert LBA to CHS */
1370 track = (u32)block / dev->sectors;
1371 cyl = track / dev->heads;
1372 head = track % dev->heads;
1373 sect = (u32)block % dev->sectors + 1;
1374
1375 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
1376 (u32)block, track, cyl, head, sect);
1377
1378 /* Check whether the converted CHS can fit.
1379 Cylinder: 0-65535
1380 Head: 0-15
1381 Sector: 1-255*/
1382 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
1383 goto out_of_range;
1384
1385 qc->nsect = n_block;
1386 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
1387 tf->lbal = sect;
1388 tf->lbam = cyl;
1389 tf->lbah = cyl >> 8;
1390 tf->device |= head;
1391 }
1392
1393 return 0;
1394
1395invalid_fld:
1396 ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x24, 0x0);
1397 /* "Invalid field in cbd" */
1398 return 1;
1399
1400out_of_range:
1401 ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x21, 0x0);
1402 /* "Logical Block Address out of range" */
1403 return 1;
1404
1405nothing_to_do:
1406 qc->scsicmd->result = SAM_STAT_GOOD;
1407 return 1;
1408}
1409
1410static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
1411{
1412 struct scsi_cmnd *cmd = qc->scsicmd;
1413 u8 *cdb = cmd->cmnd;
1414 int need_sense = (qc->err_mask != 0);
1415
1416 /* We snoop the SET_FEATURES - Write Cache ON/OFF command, and
1417 * schedule EH_REVALIDATE operation to update the IDENTIFY DEVICE
1418 * cache
1419 */
1420 if (!need_sense && (qc->tf.command == ATA_CMD_SET_FEATURES) &&
1421 ((qc->tf.feature == SETFEATURES_WC_ON) ||
1422 (qc->tf.feature == SETFEATURES_WC_OFF))) {
1423 qc->ap->eh_info.action |= ATA_EH_REVALIDATE;
1424 ata_port_schedule_eh(qc->ap);
1425 }
1426
1427 /* For ATA pass thru (SAT) commands, generate a sense block if
1428 * user mandated it or if there's an error. Note that if we
1429 * generate because the user forced us to, a check condition
1430 * is generated and the ATA register values are returned
1431 * whether the command completed successfully or not. If there
1432 * was no error, SK, ASC and ASCQ will all be zero.
1433 */
1434 if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) &&
1435 ((cdb[2] & 0x20) || need_sense)) {
1436 ata_gen_ata_desc_sense(qc);
1437 } else {
1438 if (!need_sense) {
1439 cmd->result = SAM_STAT_GOOD;
1440 } else {
1441 /* TODO: decide which descriptor format to use
1442 * for 48b LBA devices and call that here
1443 * instead of the fixed desc, which is only
1444 * good for smaller LBA (and maybe CHS?)
1445 * devices.
1446 */
1447 ata_gen_fixed_sense(qc);
1448 }
1449 }
1450
1451 if (need_sense && !qc->ap->ops->error_handler)
1452 ata_dump_status(qc->ap->id, &qc->result_tf);
1453
1454 qc->scsidone(cmd);
1455
1456 ata_qc_free(qc);
1457}
1458
1459/**
1460 * ata_scmd_need_defer - Check whether we need to defer scmd
1461 * @dev: ATA device to which the command is addressed
1462 * @is_io: Is the command IO (and thus possibly NCQ)?
1463 *
1464 * NCQ and non-NCQ commands cannot run together. As upper layer
1465 * only knows the queue depth, we are responsible for maintaining
1466 * exclusion. This function checks whether a new command can be
1467 * issued to @dev.
1468 *
1469 * LOCKING:
1470 * spin_lock_irqsave(host_set lock)
1471 *
1472 * RETURNS:
1473 * 1 if deferring is needed, 0 otherwise.
1474 */
1475static int ata_scmd_need_defer(struct ata_device *dev, int is_io)
1476{
1477 struct ata_port *ap = dev->ap;
1478
1479 if (!(dev->flags & ATA_DFLAG_NCQ))
1480 return 0;
1481
1482 if (is_io) {
1483 if (!ata_tag_valid(ap->active_tag))
1484 return 0;
1485 } else {
1486 if (!ata_tag_valid(ap->active_tag) && !ap->sactive)
1487 return 0;
1488 }
1489 return 1;
1490}
1491
1492/**
1493 * ata_scsi_translate - Translate then issue SCSI command to ATA device
1494 * @dev: ATA device to which the command is addressed
1495 * @cmd: SCSI command to execute
1496 * @done: SCSI command completion function
1497 * @xlat_func: Actor which translates @cmd to an ATA taskfile
1498 *
1499 * Our ->queuecommand() function has decided that the SCSI
1500 * command issued can be directly translated into an ATA
1501 * command, rather than handled internally.
1502 *
1503 * This function sets up an ata_queued_cmd structure for the
1504 * SCSI command, and sends that ata_queued_cmd to the hardware.
1505 *
1506 * The xlat_func argument (actor) returns 0 if ready to execute
1507 * ATA command, else 1 to finish translation. If 1 is returned
1508 * then cmd->result (and possibly cmd->sense_buffer) are assumed
1509 * to be set reflecting an error condition or clean (early)
1510 * termination.
1511 *
1512 * LOCKING:
1513 * spin_lock_irqsave(host_set lock)
1514 *
1515 * RETURNS:
1516 * 0 on success, SCSI_ML_QUEUE_DEVICE_BUSY if the command
1517 * needs to be deferred.
1518 */
1519static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd,
1520 void (*done)(struct scsi_cmnd *),
1521 ata_xlat_func_t xlat_func)
1522{
1523 struct ata_queued_cmd *qc;
1524 u8 *scsicmd = cmd->cmnd;
1525 int is_io = xlat_func == ata_scsi_rw_xlat;
1526
1527 VPRINTK("ENTER\n");
1528
1529 if (unlikely(ata_scmd_need_defer(dev, is_io)))
1530 goto defer;
1531
1532 qc = ata_scsi_qc_new(dev, cmd, done);
1533 if (!qc)
1534 goto err_mem;
1535
1536 /* data is present; dma-map it */
1537 if (cmd->sc_data_direction == DMA_FROM_DEVICE ||
1538 cmd->sc_data_direction == DMA_TO_DEVICE) {
1539 if (unlikely(cmd->request_bufflen < 1)) {
1540 ata_dev_printk(dev, KERN_WARNING,
1541 "WARNING: zero len r/w req\n");
1542 goto err_did;
1543 }
1544
1545 if (cmd->use_sg)
1546 ata_sg_init(qc, cmd->request_buffer, cmd->use_sg);
1547 else
1548 ata_sg_init_one(qc, cmd->request_buffer,
1549 cmd->request_bufflen);
1550
1551 qc->dma_dir = cmd->sc_data_direction;
1552 }
1553
1554 qc->complete_fn = ata_scsi_qc_complete;
1555
1556 if (xlat_func(qc, scsicmd))
1557 goto early_finish;
1558
1559 /* select device, send command to hardware */
1560 ata_qc_issue(qc);
1561
1562 VPRINTK("EXIT\n");
1563 return 0;
1564
1565early_finish:
1566 ata_qc_free(qc);
1567 done(cmd);
1568 DPRINTK("EXIT - early finish (good or error)\n");
1569 return 0;
1570
1571err_did:
1572 ata_qc_free(qc);
1573err_mem:
1574 cmd->result = (DID_ERROR << 16);
1575 done(cmd);
1576 DPRINTK("EXIT - internal\n");
1577 return 0;
1578
1579defer:
1580 DPRINTK("EXIT - defer\n");
1581 return SCSI_MLQUEUE_DEVICE_BUSY;
1582}
1583
1584/**
1585 * ata_scsi_rbuf_get - Map response buffer.
1586 * @cmd: SCSI command containing buffer to be mapped.
1587 * @buf_out: Pointer to mapped area.
1588 *
1589 * Maps buffer contained within SCSI command @cmd.
1590 *
1591 * LOCKING:
1592 * spin_lock_irqsave(host_set lock)
1593 *
1594 * RETURNS:
1595 * Length of response buffer.
1596 */
1597
1598static unsigned int ata_scsi_rbuf_get(struct scsi_cmnd *cmd, u8 **buf_out)
1599{
1600 u8 *buf;
1601 unsigned int buflen;
1602
1603 if (cmd->use_sg) {
1604 struct scatterlist *sg;
1605
1606 sg = (struct scatterlist *) cmd->request_buffer;
1607 buf = kmap_atomic(sg->page, KM_USER0) + sg->offset;
1608 buflen = sg->length;
1609 } else {
1610 buf = cmd->request_buffer;
1611 buflen = cmd->request_bufflen;
1612 }
1613
1614 *buf_out = buf;
1615 return buflen;
1616}
1617
1618/**
1619 * ata_scsi_rbuf_put - Unmap response buffer.
1620 * @cmd: SCSI command containing buffer to be unmapped.
1621 * @buf: buffer to unmap
1622 *
1623 * Unmaps response buffer contained within @cmd.
1624 *
1625 * LOCKING:
1626 * spin_lock_irqsave(host_set lock)
1627 */
1628
1629static inline void ata_scsi_rbuf_put(struct scsi_cmnd *cmd, u8 *buf)
1630{
1631 if (cmd->use_sg) {
1632 struct scatterlist *sg;
1633
1634 sg = (struct scatterlist *) cmd->request_buffer;
1635 kunmap_atomic(buf - sg->offset, KM_USER0);
1636 }
1637}
1638
1639/**
1640 * ata_scsi_rbuf_fill - wrapper for SCSI command simulators
1641 * @args: device IDENTIFY data / SCSI command of interest.
1642 * @actor: Callback hook for desired SCSI command simulator
1643 *
1644 * Takes care of the hard work of simulating a SCSI command...
1645 * Mapping the response buffer, calling the command's handler,
1646 * and handling the handler's return value. This return value
1647 * indicates whether the handler wishes the SCSI command to be
1648 * completed successfully (0), or not (in which case cmd->result
1649 * and sense buffer are assumed to be set).
1650 *
1651 * LOCKING:
1652 * spin_lock_irqsave(host_set lock)
1653 */
1654
1655void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
1656 unsigned int (*actor) (struct ata_scsi_args *args,
1657 u8 *rbuf, unsigned int buflen))
1658{
1659 u8 *rbuf;
1660 unsigned int buflen, rc;
1661 struct scsi_cmnd *cmd = args->cmd;
1662
1663 buflen = ata_scsi_rbuf_get(cmd, &rbuf);
1664 memset(rbuf, 0, buflen);
1665 rc = actor(args, rbuf, buflen);
1666 ata_scsi_rbuf_put(cmd, rbuf);
1667
1668 if (rc == 0)
1669 cmd->result = SAM_STAT_GOOD;
1670 args->done(cmd);
1671}
1672
1673/**
1674 * ata_scsiop_inq_std - Simulate INQUIRY command
1675 * @args: device IDENTIFY data / SCSI command of interest.
1676 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
1677 * @buflen: Response buffer length.
1678 *
1679 * Returns standard device identification data associated
1680 * with non-VPD INQUIRY command output.
1681 *
1682 * LOCKING:
1683 * spin_lock_irqsave(host_set lock)
1684 */
1685
1686unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf,
1687 unsigned int buflen)
1688{
1689 u8 hdr[] = {
1690 TYPE_DISK,
1691 0,
1692 0x5, /* claim SPC-3 version compatibility */
1693 2,
1694 95 - 4
1695 };
1696
1697 /* set scsi removeable (RMB) bit per ata bit */
1698 if (ata_id_removeable(args->id))
1699 hdr[1] |= (1 << 7);
1700
1701 VPRINTK("ENTER\n");
1702
1703 memcpy(rbuf, hdr, sizeof(hdr));
1704
1705 if (buflen > 35) {
1706 memcpy(&rbuf[8], "ATA ", 8);
1707 ata_id_string(args->id, &rbuf[16], ATA_ID_PROD_OFS, 16);
1708 ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV_OFS, 4);
1709 if (rbuf[32] == 0 || rbuf[32] == ' ')
1710 memcpy(&rbuf[32], "n/a ", 4);
1711 }
1712
1713 if (buflen > 63) {
1714 const u8 versions[] = {
1715 0x60, /* SAM-3 (no version claimed) */
1716
1717 0x03,
1718 0x20, /* SBC-2 (no version claimed) */
1719
1720 0x02,
1721 0x60 /* SPC-3 (no version claimed) */
1722 };
1723
1724 memcpy(rbuf + 59, versions, sizeof(versions));
1725 }
1726
1727 return 0;
1728}
1729
1730/**
1731 * ata_scsiop_inq_00 - Simulate INQUIRY VPD page 0, list of pages
1732 * @args: device IDENTIFY data / SCSI command of interest.
1733 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
1734 * @buflen: Response buffer length.
1735 *
1736 * Returns list of inquiry VPD pages available.
1737 *
1738 * LOCKING:
1739 * spin_lock_irqsave(host_set lock)
1740 */
1741
1742unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf,
1743 unsigned int buflen)
1744{
1745 const u8 pages[] = {
1746 0x00, /* page 0x00, this page */
1747 0x80, /* page 0x80, unit serial no page */
1748 0x83 /* page 0x83, device ident page */
1749 };
1750 rbuf[3] = sizeof(pages); /* number of supported VPD pages */
1751
1752 if (buflen > 6)
1753 memcpy(rbuf + 4, pages, sizeof(pages));
1754
1755 return 0;
1756}
1757
1758/**
1759 * ata_scsiop_inq_80 - Simulate INQUIRY VPD page 80, device serial number
1760 * @args: device IDENTIFY data / SCSI command of interest.
1761 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
1762 * @buflen: Response buffer length.
1763 *
1764 * Returns ATA device serial number.
1765 *
1766 * LOCKING:
1767 * spin_lock_irqsave(host_set lock)
1768 */
1769
1770unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf,
1771 unsigned int buflen)
1772{
1773 const u8 hdr[] = {
1774 0,
1775 0x80, /* this page code */
1776 0,
1777 ATA_SERNO_LEN, /* page len */
1778 };
1779 memcpy(rbuf, hdr, sizeof(hdr));
1780
1781 if (buflen > (ATA_SERNO_LEN + 4 - 1))
1782 ata_id_string(args->id, (unsigned char *) &rbuf[4],
1783 ATA_ID_SERNO_OFS, ATA_SERNO_LEN);
1784
1785 return 0;
1786}
1787
1788/**
1789 * ata_scsiop_inq_83 - Simulate INQUIRY VPD page 83, device identity
1790 * @args: device IDENTIFY data / SCSI command of interest.
1791 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
1792 * @buflen: Response buffer length.
1793 *
1794 * Yields two logical unit device identification designators:
1795 * - vendor specific ASCII containing the ATA serial number
1796 * - SAT defined "t10 vendor id based" containing ASCII vendor
1797 * name ("ATA "), model and serial numbers.
1798 *
1799 * LOCKING:
1800 * spin_lock_irqsave(host_set lock)
1801 */
1802
1803unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf,
1804 unsigned int buflen)
1805{
1806 int num;
1807 const int sat_model_serial_desc_len = 68;
1808 const int ata_model_byte_len = 40;
1809
1810 rbuf[1] = 0x83; /* this page code */
1811 num = 4;
1812
1813 if (buflen > (ATA_SERNO_LEN + num + 3)) {
1814 /* piv=0, assoc=lu, code_set=ACSII, designator=vendor */
1815 rbuf[num + 0] = 2;
1816 rbuf[num + 3] = ATA_SERNO_LEN;
1817 num += 4;
1818 ata_id_string(args->id, (unsigned char *) rbuf + num,
1819 ATA_ID_SERNO_OFS, ATA_SERNO_LEN);
1820 num += ATA_SERNO_LEN;
1821 }
1822 if (buflen > (sat_model_serial_desc_len + num + 3)) {
1823 /* SAT defined lu model and serial numbers descriptor */
1824 /* piv=0, assoc=lu, code_set=ACSII, designator=t10 vendor id */
1825 rbuf[num + 0] = 2;
1826 rbuf[num + 1] = 1;
1827 rbuf[num + 3] = sat_model_serial_desc_len;
1828 num += 4;
1829 memcpy(rbuf + num, "ATA ", 8);
1830 num += 8;
1831 ata_id_string(args->id, (unsigned char *) rbuf + num,
1832 ATA_ID_PROD_OFS, ata_model_byte_len);
1833 num += ata_model_byte_len;
1834 ata_id_string(args->id, (unsigned char *) rbuf + num,
1835 ATA_ID_SERNO_OFS, ATA_SERNO_LEN);
1836 num += ATA_SERNO_LEN;
1837 }
1838 rbuf[3] = num - 4; /* page len (assume less than 256 bytes) */
1839 return 0;
1840}
1841
1842/**
1843 * ata_scsiop_noop - Command handler that simply returns success.
1844 * @args: device IDENTIFY data / SCSI command of interest.
1845 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
1846 * @buflen: Response buffer length.
1847 *
1848 * No operation. Simply returns success to caller, to indicate
1849 * that the caller should successfully complete this SCSI command.
1850 *
1851 * LOCKING:
1852 * spin_lock_irqsave(host_set lock)
1853 */
1854
1855unsigned int ata_scsiop_noop(struct ata_scsi_args *args, u8 *rbuf,
1856 unsigned int buflen)
1857{
1858 VPRINTK("ENTER\n");
1859 return 0;
1860}
1861
1862/**
1863 * ata_msense_push - Push data onto MODE SENSE data output buffer
1864 * @ptr_io: (input/output) Location to store more output data
1865 * @last: End of output data buffer
1866 * @buf: Pointer to BLOB being added to output buffer
1867 * @buflen: Length of BLOB
1868 *
1869 * Store MODE SENSE data on an output buffer.
1870 *
1871 * LOCKING:
1872 * None.
1873 */
1874
1875static void ata_msense_push(u8 **ptr_io, const u8 *last,
1876 const u8 *buf, unsigned int buflen)
1877{
1878 u8 *ptr = *ptr_io;
1879
1880 if ((ptr + buflen - 1) > last)
1881 return;
1882
1883 memcpy(ptr, buf, buflen);
1884
1885 ptr += buflen;
1886
1887 *ptr_io = ptr;
1888}
1889
1890/**
1891 * ata_msense_caching - Simulate MODE SENSE caching info page
1892 * @id: device IDENTIFY data
1893 * @ptr_io: (input/output) Location to store more output data
1894 * @last: End of output data buffer
1895 *
1896 * Generate a caching info page, which conditionally indicates
1897 * write caching to the SCSI layer, depending on device
1898 * capabilities.
1899 *
1900 * LOCKING:
1901 * None.
1902 */
1903
1904static unsigned int ata_msense_caching(u16 *id, u8 **ptr_io,
1905 const u8 *last)
1906{
1907 u8 page[CACHE_MPAGE_LEN];
1908
1909 memcpy(page, def_cache_mpage, sizeof(page));
1910 if (ata_id_wcache_enabled(id))
1911 page[2] |= (1 << 2); /* write cache enable */
1912 if (!ata_id_rahead_enabled(id))
1913 page[12] |= (1 << 5); /* disable read ahead */
1914
1915 ata_msense_push(ptr_io, last, page, sizeof(page));
1916 return sizeof(page);
1917}
1918
1919/**
1920 * ata_msense_ctl_mode - Simulate MODE SENSE control mode page
1921 * @dev: Device associated with this MODE SENSE command
1922 * @ptr_io: (input/output) Location to store more output data
1923 * @last: End of output data buffer
1924 *
1925 * Generate a generic MODE SENSE control mode page.
1926 *
1927 * LOCKING:
1928 * None.
1929 */
1930
1931static unsigned int ata_msense_ctl_mode(u8 **ptr_io, const u8 *last)
1932{
1933 ata_msense_push(ptr_io, last, def_control_mpage,
1934 sizeof(def_control_mpage));
1935 return sizeof(def_control_mpage);
1936}
1937
1938/**
1939 * ata_msense_rw_recovery - Simulate MODE SENSE r/w error recovery page
1940 * @dev: Device associated with this MODE SENSE command
1941 * @ptr_io: (input/output) Location to store more output data
1942 * @last: End of output data buffer
1943 *
1944 * Generate a generic MODE SENSE r/w error recovery page.
1945 *
1946 * LOCKING:
1947 * None.
1948 */
1949
1950static unsigned int ata_msense_rw_recovery(u8 **ptr_io, const u8 *last)
1951{
1952
1953 ata_msense_push(ptr_io, last, def_rw_recovery_mpage,
1954 sizeof(def_rw_recovery_mpage));
1955 return sizeof(def_rw_recovery_mpage);
1956}
1957
1958/*
1959 * We can turn this into a real blacklist if it's needed, for now just
1960 * blacklist any Maxtor BANC1G10 revision firmware
1961 */
1962static int ata_dev_supports_fua(u16 *id)
1963{
1964 unsigned char model[41], fw[9];
1965
1966 if (!libata_fua)
1967 return 0;
1968 if (!ata_id_has_fua(id))
1969 return 0;
1970
1971 ata_id_c_string(id, model, ATA_ID_PROD_OFS, sizeof(model));
1972 ata_id_c_string(id, fw, ATA_ID_FW_REV_OFS, sizeof(fw));
1973
1974 if (strcmp(model, "Maxtor"))
1975 return 1;
1976 if (strcmp(fw, "BANC1G10"))
1977 return 1;
1978
1979 return 0; /* blacklisted */
1980}
1981
1982/**
1983 * ata_scsiop_mode_sense - Simulate MODE SENSE 6, 10 commands
1984 * @args: device IDENTIFY data / SCSI command of interest.
1985 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
1986 * @buflen: Response buffer length.
1987 *
1988 * Simulate MODE SENSE commands. Assume this is invoked for direct
1989 * access devices (e.g. disks) only. There should be no block
1990 * descriptor for other device types.
1991 *
1992 * LOCKING:
1993 * spin_lock_irqsave(host_set lock)
1994 */
1995
1996unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
1997 unsigned int buflen)
1998{
1999 struct ata_device *dev = args->dev;
2000 u8 *scsicmd = args->cmd->cmnd, *p, *last;
2001 const u8 sat_blk_desc[] = {
2002 0, 0, 0, 0, /* number of blocks: sat unspecified */
2003 0,
2004 0, 0x2, 0x0 /* block length: 512 bytes */
2005 };
2006 u8 pg, spg;
2007 unsigned int ebd, page_control, six_byte, output_len, alloc_len, minlen;
2008 u8 dpofua;
2009
2010 VPRINTK("ENTER\n");
2011
2012 six_byte = (scsicmd[0] == MODE_SENSE);
2013 ebd = !(scsicmd[1] & 0x8); /* dbd bit inverted == edb */
2014 /*
2015 * LLBA bit in msense(10) ignored (compliant)
2016 */
2017
2018 page_control = scsicmd[2] >> 6;
2019 switch (page_control) {
2020 case 0: /* current */
2021 break; /* supported */
2022 case 3: /* saved */
2023 goto saving_not_supp;
2024 case 1: /* changeable */
2025 case 2: /* defaults */
2026 default:
2027 goto invalid_fld;
2028 }
2029
2030 if (six_byte) {
2031 output_len = 4 + (ebd ? 8 : 0);
2032 alloc_len = scsicmd[4];
2033 } else {
2034 output_len = 8 + (ebd ? 8 : 0);
2035 alloc_len = (scsicmd[7] << 8) + scsicmd[8];
2036 }
2037 minlen = (alloc_len < buflen) ? alloc_len : buflen;
2038
2039 p = rbuf + output_len;
2040 last = rbuf + minlen - 1;
2041
2042 pg = scsicmd[2] & 0x3f;
2043 spg = scsicmd[3];
2044 /*
2045 * No mode subpages supported (yet) but asking for _all_
2046 * subpages may be valid
2047 */
2048 if (spg && (spg != ALL_SUB_MPAGES))
2049 goto invalid_fld;
2050
2051 switch(pg) {
2052 case RW_RECOVERY_MPAGE:
2053 output_len += ata_msense_rw_recovery(&p, last);
2054 break;
2055
2056 case CACHE_MPAGE:
2057 output_len += ata_msense_caching(args->id, &p, last);
2058 break;
2059
2060 case CONTROL_MPAGE: {
2061 output_len += ata_msense_ctl_mode(&p, last);
2062 break;
2063 }
2064
2065 case ALL_MPAGES:
2066 output_len += ata_msense_rw_recovery(&p, last);
2067 output_len += ata_msense_caching(args->id, &p, last);
2068 output_len += ata_msense_ctl_mode(&p, last);
2069 break;
2070
2071 default: /* invalid page code */
2072 goto invalid_fld;
2073 }
2074
2075 if (minlen < 1)
2076 return 0;
2077
2078 dpofua = 0;
2079 if (ata_dev_supports_fua(args->id) && (dev->flags & ATA_DFLAG_LBA48) &&
2080 (!(dev->flags & ATA_DFLAG_PIO) || dev->multi_count))
2081 dpofua = 1 << 4;
2082
2083 if (six_byte) {
2084 output_len--;
2085 rbuf[0] = output_len;
2086 if (minlen > 2)
2087 rbuf[2] |= dpofua;
2088 if (ebd) {
2089 if (minlen > 3)
2090 rbuf[3] = sizeof(sat_blk_desc);
2091 if (minlen > 11)
2092 memcpy(rbuf + 4, sat_blk_desc,
2093 sizeof(sat_blk_desc));
2094 }
2095 } else {
2096 output_len -= 2;
2097 rbuf[0] = output_len >> 8;
2098 if (minlen > 1)
2099 rbuf[1] = output_len;
2100 if (minlen > 3)
2101 rbuf[3] |= dpofua;
2102 if (ebd) {
2103 if (minlen > 7)
2104 rbuf[7] = sizeof(sat_blk_desc);
2105 if (minlen > 15)
2106 memcpy(rbuf + 8, sat_blk_desc,
2107 sizeof(sat_blk_desc));
2108 }
2109 }
2110 return 0;
2111
2112invalid_fld:
2113 ata_scsi_set_sense(args->cmd, ILLEGAL_REQUEST, 0x24, 0x0);
2114 /* "Invalid field in cbd" */
2115 return 1;
2116
2117saving_not_supp:
2118 ata_scsi_set_sense(args->cmd, ILLEGAL_REQUEST, 0x39, 0x0);
2119 /* "Saving parameters not supported" */
2120 return 1;
2121}
2122
2123/**
2124 * ata_scsiop_read_cap - Simulate READ CAPACITY[ 16] commands
2125 * @args: device IDENTIFY data / SCSI command of interest.
2126 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
2127 * @buflen: Response buffer length.
2128 *
2129 * Simulate READ CAPACITY commands.
2130 *
2131 * LOCKING:
2132 * spin_lock_irqsave(host_set lock)
2133 */
2134
2135unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf,
2136 unsigned int buflen)
2137{
2138 u64 n_sectors;
2139 u32 tmp;
2140
2141 VPRINTK("ENTER\n");
2142
2143 if (ata_id_has_lba(args->id)) {
2144 if (ata_id_has_lba48(args->id))
2145 n_sectors = ata_id_u64(args->id, 100);
2146 else
2147 n_sectors = ata_id_u32(args->id, 60);
2148 } else {
2149 /* CHS default translation */
2150 n_sectors = args->id[1] * args->id[3] * args->id[6];
2151
2152 if (ata_id_current_chs_valid(args->id))
2153 /* CHS current translation */
2154 n_sectors = ata_id_u32(args->id, 57);
2155 }
2156
2157 n_sectors--; /* ATA TotalUserSectors - 1 */
2158
2159 if (args->cmd->cmnd[0] == READ_CAPACITY) {
2160 if( n_sectors >= 0xffffffffULL )
2161 tmp = 0xffffffff ; /* Return max count on overflow */
2162 else
2163 tmp = n_sectors ;
2164
2165 /* sector count, 32-bit */
2166 rbuf[0] = tmp >> (8 * 3);
2167 rbuf[1] = tmp >> (8 * 2);
2168 rbuf[2] = tmp >> (8 * 1);
2169 rbuf[3] = tmp;
2170
2171 /* sector size */
2172 tmp = ATA_SECT_SIZE;
2173 rbuf[6] = tmp >> 8;
2174 rbuf[7] = tmp;
2175
2176 } else {
2177 /* sector count, 64-bit */
2178 tmp = n_sectors >> (8 * 4);
2179 rbuf[2] = tmp >> (8 * 3);
2180 rbuf[3] = tmp >> (8 * 2);
2181 rbuf[4] = tmp >> (8 * 1);
2182 rbuf[5] = tmp;
2183 tmp = n_sectors;
2184 rbuf[6] = tmp >> (8 * 3);
2185 rbuf[7] = tmp >> (8 * 2);
2186 rbuf[8] = tmp >> (8 * 1);
2187 rbuf[9] = tmp;
2188
2189 /* sector size */
2190 tmp = ATA_SECT_SIZE;
2191 rbuf[12] = tmp >> 8;
2192 rbuf[13] = tmp;
2193 }
2194
2195 return 0;
2196}
2197
2198/**
2199 * ata_scsiop_report_luns - Simulate REPORT LUNS command
2200 * @args: device IDENTIFY data / SCSI command of interest.
2201 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
2202 * @buflen: Response buffer length.
2203 *
2204 * Simulate REPORT LUNS command.
2205 *
2206 * LOCKING:
2207 * spin_lock_irqsave(host_set lock)
2208 */
2209
2210unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf,
2211 unsigned int buflen)
2212{
2213 VPRINTK("ENTER\n");
2214 rbuf[3] = 8; /* just one lun, LUN 0, size 8 bytes */
2215
2216 return 0;
2217}
2218
2219/**
2220 * ata_scsi_set_sense - Set SCSI sense data and status
2221 * @cmd: SCSI request to be handled
2222 * @sk: SCSI-defined sense key
2223 * @asc: SCSI-defined additional sense code
2224 * @ascq: SCSI-defined additional sense code qualifier
2225 *
2226 * Helper function that builds a valid fixed format, current
2227 * response code and the given sense key (sk), additional sense
2228 * code (asc) and additional sense code qualifier (ascq) with
2229 * a SCSI command status of %SAM_STAT_CHECK_CONDITION and
2230 * DRIVER_SENSE set in the upper bits of scsi_cmnd::result .
2231 *
2232 * LOCKING:
2233 * Not required
2234 */
2235
2236void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
2237{
2238 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
2239
2240 cmd->sense_buffer[0] = 0x70; /* fixed format, current */
2241 cmd->sense_buffer[2] = sk;
2242 cmd->sense_buffer[7] = 18 - 8; /* additional sense length */
2243 cmd->sense_buffer[12] = asc;
2244 cmd->sense_buffer[13] = ascq;
2245}
2246
2247/**
2248 * ata_scsi_badcmd - End a SCSI request with an error
2249 * @cmd: SCSI request to be handled
2250 * @done: SCSI command completion function
2251 * @asc: SCSI-defined additional sense code
2252 * @ascq: SCSI-defined additional sense code qualifier
2253 *
2254 * Helper function that completes a SCSI command with
2255 * %SAM_STAT_CHECK_CONDITION, with a sense key %ILLEGAL_REQUEST
2256 * and the specified additional sense codes.
2257 *
2258 * LOCKING:
2259 * spin_lock_irqsave(host_set lock)
2260 */
2261
2262void ata_scsi_badcmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), u8 asc, u8 ascq)
2263{
2264 DPRINTK("ENTER\n");
2265 ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, asc, ascq);
2266
2267 done(cmd);
2268}
2269
2270static void atapi_sense_complete(struct ata_queued_cmd *qc)
2271{
2272 if (qc->err_mask && ((qc->err_mask & AC_ERR_DEV) == 0)) {
2273 /* FIXME: not quite right; we don't want the
2274 * translation of taskfile registers into
2275 * a sense descriptors, since that's only
2276 * correct for ATA, not ATAPI
2277 */
2278 ata_gen_ata_desc_sense(qc);
2279 }
2280
2281 qc->scsidone(qc->scsicmd);
2282 ata_qc_free(qc);
2283}
2284
2285/* is it pointless to prefer PIO for "safety reasons"? */
2286static inline int ata_pio_use_silly(struct ata_port *ap)
2287{
2288 return (ap->flags & ATA_FLAG_PIO_DMA);
2289}
2290
2291static void atapi_request_sense(struct ata_queued_cmd *qc)
2292{
2293 struct ata_port *ap = qc->ap;
2294 struct scsi_cmnd *cmd = qc->scsicmd;
2295
2296 DPRINTK("ATAPI request sense\n");
2297
2298 /* FIXME: is this needed? */
2299 memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
2300
2301 ap->ops->tf_read(ap, &qc->tf);
2302
2303 /* fill these in, for the case where they are -not- overwritten */
2304 cmd->sense_buffer[0] = 0x70;
2305 cmd->sense_buffer[2] = qc->tf.feature >> 4;
2306
2307 ata_qc_reinit(qc);
2308
2309 ata_sg_init_one(qc, cmd->sense_buffer, sizeof(cmd->sense_buffer));
2310 qc->dma_dir = DMA_FROM_DEVICE;
2311
2312 memset(&qc->cdb, 0, qc->dev->cdb_len);
2313 qc->cdb[0] = REQUEST_SENSE;
2314 qc->cdb[4] = SCSI_SENSE_BUFFERSIZE;
2315
2316 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2317 qc->tf.command = ATA_CMD_PACKET;
2318
2319 if (ata_pio_use_silly(ap)) {
2320 qc->tf.protocol = ATA_PROT_ATAPI_DMA;
2321 qc->tf.feature |= ATAPI_PKT_DMA;
2322 } else {
2323 qc->tf.protocol = ATA_PROT_ATAPI;
2324 qc->tf.lbam = (8 * 1024) & 0xff;
2325 qc->tf.lbah = (8 * 1024) >> 8;
2326 }
2327 qc->nbytes = SCSI_SENSE_BUFFERSIZE;
2328
2329 qc->complete_fn = atapi_sense_complete;
2330
2331 ata_qc_issue(qc);
2332
2333 DPRINTK("EXIT\n");
2334}
2335
2336static void atapi_qc_complete(struct ata_queued_cmd *qc)
2337{
2338 struct scsi_cmnd *cmd = qc->scsicmd;
2339 unsigned int err_mask = qc->err_mask;
2340
2341 VPRINTK("ENTER, err_mask 0x%X\n", err_mask);
2342
2343 /* handle completion from new EH */
2344 if (unlikely(qc->ap->ops->error_handler &&
2345 (err_mask || qc->flags & ATA_QCFLAG_SENSE_VALID))) {
2346
2347 if (!(qc->flags & ATA_QCFLAG_SENSE_VALID)) {
2348 /* FIXME: not quite right; we don't want the
2349 * translation of taskfile registers into a
2350 * sense descriptors, since that's only
2351 * correct for ATA, not ATAPI
2352 */
2353 ata_gen_ata_desc_sense(qc);
2354 }
2355
2356 /* SCSI EH automatically locks door if sdev->locked is
2357 * set. Sometimes door lock request continues to
2358 * fail, for example, when no media is present. This
2359 * creates a loop - SCSI EH issues door lock which
2360 * fails and gets invoked again to acquire sense data
2361 * for the failed command.
2362 *
2363 * If door lock fails, always clear sdev->locked to
2364 * avoid this infinite loop.
2365 */
2366 if (qc->cdb[0] == ALLOW_MEDIUM_REMOVAL)
2367 qc->dev->sdev->locked = 0;
2368
2369 qc->scsicmd->result = SAM_STAT_CHECK_CONDITION;
2370 qc->scsidone(cmd);
2371 ata_qc_free(qc);
2372 return;
2373 }
2374
2375 /* successful completion or old EH failure path */
2376 if (unlikely(err_mask & AC_ERR_DEV)) {
2377 cmd->result = SAM_STAT_CHECK_CONDITION;
2378 atapi_request_sense(qc);
2379 return;
2380 } else if (unlikely(err_mask)) {
2381 /* FIXME: not quite right; we don't want the
2382 * translation of taskfile registers into
2383 * a sense descriptors, since that's only
2384 * correct for ATA, not ATAPI
2385 */
2386 ata_gen_ata_desc_sense(qc);
2387 } else {
2388 u8 *scsicmd = cmd->cmnd;
2389
2390 if ((scsicmd[0] == INQUIRY) && ((scsicmd[1] & 0x03) == 0)) {
2391 u8 *buf = NULL;
2392 unsigned int buflen;
2393
2394 buflen = ata_scsi_rbuf_get(cmd, &buf);
2395
2396 /* ATAPI devices typically report zero for their SCSI version,
2397 * and sometimes deviate from the spec WRT response data
2398 * format. If SCSI version is reported as zero like normal,
2399 * then we make the following fixups: 1) Fake MMC-5 version,
2400 * to indicate to the Linux scsi midlayer this is a modern
2401 * device. 2) Ensure response data format / ATAPI information
2402 * are always correct.
2403 */
2404 if (buf[2] == 0) {
2405 buf[2] = 0x5;
2406 buf[3] = 0x32;
2407 }
2408
2409 ata_scsi_rbuf_put(cmd, buf);
2410 }
2411
2412 cmd->result = SAM_STAT_GOOD;
2413 }
2414
2415 qc->scsidone(cmd);
2416 ata_qc_free(qc);
2417}
2418/**
2419 * atapi_xlat - Initialize PACKET taskfile
2420 * @qc: command structure to be initialized
2421 * @scsicmd: SCSI CDB associated with this PACKET command
2422 *
2423 * LOCKING:
2424 * spin_lock_irqsave(host_set lock)
2425 *
2426 * RETURNS:
2427 * Zero on success, non-zero on failure.
2428 */
2429
2430static unsigned int atapi_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd)
2431{
2432 struct scsi_cmnd *cmd = qc->scsicmd;
2433 struct ata_device *dev = qc->dev;
2434 int using_pio = (dev->flags & ATA_DFLAG_PIO);
2435 int nodata = (cmd->sc_data_direction == DMA_NONE);
2436
2437 if (!using_pio)
2438 /* Check whether ATAPI DMA is safe */
2439 if (ata_check_atapi_dma(qc))
2440 using_pio = 1;
2441
2442 memcpy(&qc->cdb, scsicmd, dev->cdb_len);
2443
2444 qc->complete_fn = atapi_qc_complete;
2445
2446 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2447 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
2448 qc->tf.flags |= ATA_TFLAG_WRITE;
2449 DPRINTK("direction: write\n");
2450 }
2451
2452 qc->tf.command = ATA_CMD_PACKET;
2453
2454 /* no data, or PIO data xfer */
2455 if (using_pio || nodata) {
2456 if (nodata)
2457 qc->tf.protocol = ATA_PROT_ATAPI_NODATA;
2458 else
2459 qc->tf.protocol = ATA_PROT_ATAPI;
2460 qc->tf.lbam = (8 * 1024) & 0xff;
2461 qc->tf.lbah = (8 * 1024) >> 8;
2462 }
2463
2464 /* DMA data xfer */
2465 else {
2466 qc->tf.protocol = ATA_PROT_ATAPI_DMA;
2467 qc->tf.feature |= ATAPI_PKT_DMA;
2468
2469 if (atapi_dmadir && (cmd->sc_data_direction != DMA_TO_DEVICE))
2470 /* some SATA bridges need us to indicate data xfer direction */
2471 qc->tf.feature |= ATAPI_DMADIR;
2472 }
2473
2474 qc->nbytes = cmd->request_bufflen;
2475
2476 return 0;
2477}
2478
2479static struct ata_device * ata_find_dev(struct ata_port *ap, int id)
2480{
2481 if (likely(id < ATA_MAX_DEVICES))
2482 return &ap->device[id];
2483 return NULL;
2484}
2485
2486static struct ata_device * __ata_scsi_find_dev(struct ata_port *ap,
2487 const struct scsi_device *scsidev)
2488{
2489 /* skip commands not addressed to targets we simulate */
2490 if (unlikely(scsidev->channel || scsidev->lun))
2491 return NULL;
2492
2493 return ata_find_dev(ap, scsidev->id);
2494}
2495
2496/**
2497 * ata_scsi_dev_enabled - determine if device is enabled
2498 * @dev: ATA device
2499 *
2500 * Determine if commands should be sent to the specified device.
2501 *
2502 * LOCKING:
2503 * spin_lock_irqsave(host_set lock)
2504 *
2505 * RETURNS:
2506 * 0 if commands are not allowed / 1 if commands are allowed
2507 */
2508
2509static int ata_scsi_dev_enabled(struct ata_device *dev)
2510{
2511 if (unlikely(!ata_dev_enabled(dev)))
2512 return 0;
2513
2514 if (!atapi_enabled || (dev->ap->flags & ATA_FLAG_NO_ATAPI)) {
2515 if (unlikely(dev->class == ATA_DEV_ATAPI)) {
2516 ata_dev_printk(dev, KERN_WARNING,
2517 "WARNING: ATAPI is %s, device ignored.\n",
2518 atapi_enabled ? "not supported with this driver" : "disabled");
2519 return 0;
2520 }
2521 }
2522
2523 return 1;
2524}
2525
2526/**
2527 * ata_scsi_find_dev - lookup ata_device from scsi_cmnd
2528 * @ap: ATA port to which the device is attached
2529 * @scsidev: SCSI device from which we derive the ATA device
2530 *
2531 * Given various information provided in struct scsi_cmnd,
2532 * map that onto an ATA bus, and using that mapping
2533 * determine which ata_device is associated with the
2534 * SCSI command to be sent.
2535 *
2536 * LOCKING:
2537 * spin_lock_irqsave(host_set lock)
2538 *
2539 * RETURNS:
2540 * Associated ATA device, or %NULL if not found.
2541 */
2542static struct ata_device *
2543ata_scsi_find_dev(struct ata_port *ap, const struct scsi_device *scsidev)
2544{
2545 struct ata_device *dev = __ata_scsi_find_dev(ap, scsidev);
2546
2547 if (unlikely(!dev || !ata_scsi_dev_enabled(dev)))
2548 return NULL;
2549
2550 return dev;
2551}
2552
2553/*
2554 * ata_scsi_map_proto - Map pass-thru protocol value to taskfile value.
2555 * @byte1: Byte 1 from pass-thru CDB.
2556 *
2557 * RETURNS:
2558 * ATA_PROT_UNKNOWN if mapping failed/unimplemented, protocol otherwise.
2559 */
2560static u8
2561ata_scsi_map_proto(u8 byte1)
2562{
2563 switch((byte1 & 0x1e) >> 1) {
2564 case 3: /* Non-data */
2565 return ATA_PROT_NODATA;
2566
2567 case 6: /* DMA */
2568 return ATA_PROT_DMA;
2569
2570 case 4: /* PIO Data-in */
2571 case 5: /* PIO Data-out */
2572 return ATA_PROT_PIO;
2573
2574 case 10: /* Device Reset */
2575 case 0: /* Hard Reset */
2576 case 1: /* SRST */
2577 case 2: /* Bus Idle */
2578 case 7: /* Packet */
2579 case 8: /* DMA Queued */
2580 case 9: /* Device Diagnostic */
2581 case 11: /* UDMA Data-in */
2582 case 12: /* UDMA Data-Out */
2583 case 13: /* FPDMA */
2584 default: /* Reserved */
2585 break;
2586 }
2587
2588 return ATA_PROT_UNKNOWN;
2589}
2590
2591/**
2592 * ata_scsi_pass_thru - convert ATA pass-thru CDB to taskfile
2593 * @qc: command structure to be initialized
2594 * @scsicmd: SCSI command to convert
2595 *
2596 * Handles either 12 or 16-byte versions of the CDB.
2597 *
2598 * RETURNS:
2599 * Zero on success, non-zero on failure.
2600 */
2601static unsigned int
2602ata_scsi_pass_thru(struct ata_queued_cmd *qc, const u8 *scsicmd)
2603{
2604 struct ata_taskfile *tf = &(qc->tf);
2605 struct scsi_cmnd *cmd = qc->scsicmd;
2606 struct ata_device *dev = qc->dev;
2607
2608 if ((tf->protocol = ata_scsi_map_proto(scsicmd[1])) == ATA_PROT_UNKNOWN)
2609 goto invalid_fld;
2610
2611 /* We may not issue DMA commands if no DMA mode is set */
2612 if (tf->protocol == ATA_PROT_DMA && dev->dma_mode == 0)
2613 goto invalid_fld;
2614
2615 if (scsicmd[1] & 0xe0)
2616 /* PIO multi not supported yet */
2617 goto invalid_fld;
2618
2619 /*
2620 * 12 and 16 byte CDBs use different offsets to
2621 * provide the various register values.
2622 */
2623 if (scsicmd[0] == ATA_16) {
2624 /*
2625 * 16-byte CDB - may contain extended commands.
2626 *
2627 * If that is the case, copy the upper byte register values.
2628 */
2629 if (scsicmd[1] & 0x01) {
2630 tf->hob_feature = scsicmd[3];
2631 tf->hob_nsect = scsicmd[5];
2632 tf->hob_lbal = scsicmd[7];
2633 tf->hob_lbam = scsicmd[9];
2634 tf->hob_lbah = scsicmd[11];
2635 tf->flags |= ATA_TFLAG_LBA48;
2636 } else
2637 tf->flags &= ~ATA_TFLAG_LBA48;
2638
2639 /*
2640 * Always copy low byte, device and command registers.
2641 */
2642 tf->feature = scsicmd[4];
2643 tf->nsect = scsicmd[6];
2644 tf->lbal = scsicmd[8];
2645 tf->lbam = scsicmd[10];
2646 tf->lbah = scsicmd[12];
2647 tf->device = scsicmd[13];
2648 tf->command = scsicmd[14];
2649 } else {
2650 /*
2651 * 12-byte CDB - incapable of extended commands.
2652 */
2653 tf->flags &= ~ATA_TFLAG_LBA48;
2654
2655 tf->feature = scsicmd[3];
2656 tf->nsect = scsicmd[4];
2657 tf->lbal = scsicmd[5];
2658 tf->lbam = scsicmd[6];
2659 tf->lbah = scsicmd[7];
2660 tf->device = scsicmd[8];
2661 tf->command = scsicmd[9];
2662 }
2663 /*
2664 * If slave is possible, enforce correct master/slave bit
2665 */
2666 if (qc->ap->flags & ATA_FLAG_SLAVE_POSS)
2667 tf->device = qc->dev->devno ?
2668 tf->device | ATA_DEV1 : tf->device & ~ATA_DEV1;
2669
2670 /*
2671 * Filter SET_FEATURES - XFER MODE command -- otherwise,
2672 * SET_FEATURES - XFER MODE must be preceded/succeeded
2673 * by an update to hardware-specific registers for each
2674 * controller (i.e. the reason for ->set_piomode(),
2675 * ->set_dmamode(), and ->post_set_mode() hooks).
2676 */
2677 if ((tf->command == ATA_CMD_SET_FEATURES)
2678 && (tf->feature == SETFEATURES_XFER))
2679 goto invalid_fld;
2680
2681 /*
2682 * Set flags so that all registers will be written,
2683 * and pass on write indication (used for PIO/DMA
2684 * setup.)
2685 */
2686 tf->flags |= (ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE);
2687
2688 if (cmd->sc_data_direction == DMA_TO_DEVICE)
2689 tf->flags |= ATA_TFLAG_WRITE;
2690
2691 /*
2692 * Set transfer length.
2693 *
2694 * TODO: find out if we need to do more here to
2695 * cover scatter/gather case.
2696 */
2697 qc->nsect = cmd->request_bufflen / ATA_SECT_SIZE;
2698
2699 /* request result TF */
2700 qc->flags |= ATA_QCFLAG_RESULT_TF;
2701
2702 return 0;
2703
2704 invalid_fld:
2705 ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x24, 0x00);
2706 /* "Invalid field in cdb" */
2707 return 1;
2708}
2709
2710/**
2711 * ata_get_xlat_func - check if SCSI to ATA translation is possible
2712 * @dev: ATA device
2713 * @cmd: SCSI command opcode to consider
2714 *
2715 * Look up the SCSI command given, and determine whether the
2716 * SCSI command is to be translated or simulated.
2717 *
2718 * RETURNS:
2719 * Pointer to translation function if possible, %NULL if not.
2720 */
2721
2722static inline ata_xlat_func_t ata_get_xlat_func(struct ata_device *dev, u8 cmd)
2723{
2724 switch (cmd) {
2725 case READ_6:
2726 case READ_10:
2727 case READ_16:
2728
2729 case WRITE_6:
2730 case WRITE_10:
2731 case WRITE_16:
2732 return ata_scsi_rw_xlat;
2733
2734 case SYNCHRONIZE_CACHE:
2735 if (ata_try_flush_cache(dev))
2736 return ata_scsi_flush_xlat;
2737 break;
2738
2739 case VERIFY:
2740 case VERIFY_16:
2741 return ata_scsi_verify_xlat;
2742
2743 case ATA_12:
2744 case ATA_16:
2745 return ata_scsi_pass_thru;
2746
2747 case START_STOP:
2748 return ata_scsi_start_stop_xlat;
2749 }
2750
2751 return NULL;
2752}
2753
2754/**
2755 * ata_scsi_dump_cdb - dump SCSI command contents to dmesg
2756 * @ap: ATA port to which the command was being sent
2757 * @cmd: SCSI command to dump
2758 *
2759 * Prints the contents of a SCSI command via printk().
2760 */
2761
2762static inline void ata_scsi_dump_cdb(struct ata_port *ap,
2763 struct scsi_cmnd *cmd)
2764{
2765#ifdef ATA_DEBUG
2766 struct scsi_device *scsidev = cmd->device;
2767 u8 *scsicmd = cmd->cmnd;
2768
2769 DPRINTK("CDB (%u:%d,%d,%d) %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
2770 ap->id,
2771 scsidev->channel, scsidev->id, scsidev->lun,
2772 scsicmd[0], scsicmd[1], scsicmd[2], scsicmd[3],
2773 scsicmd[4], scsicmd[5], scsicmd[6], scsicmd[7],
2774 scsicmd[8]);
2775#endif
2776}
2777
2778static inline int __ata_scsi_queuecmd(struct scsi_cmnd *cmd,
2779 void (*done)(struct scsi_cmnd *),
2780 struct ata_device *dev)
2781{
2782 int rc = 0;
2783
2784 if (dev->class == ATA_DEV_ATA) {
2785 ata_xlat_func_t xlat_func = ata_get_xlat_func(dev,
2786 cmd->cmnd[0]);
2787
2788 if (xlat_func)
2789 rc = ata_scsi_translate(dev, cmd, done, xlat_func);
2790 else
2791 ata_scsi_simulate(dev, cmd, done);
2792 } else
2793 rc = ata_scsi_translate(dev, cmd, done, atapi_xlat);
2794
2795 return rc;
2796}
2797
2798/**
2799 * ata_scsi_queuecmd - Issue SCSI cdb to libata-managed device
2800 * @cmd: SCSI command to be sent
2801 * @done: Completion function, called when command is complete
2802 *
2803 * In some cases, this function translates SCSI commands into
2804 * ATA taskfiles, and queues the taskfiles to be sent to
2805 * hardware. In other cases, this function simulates a
2806 * SCSI device by evaluating and responding to certain
2807 * SCSI commands. This creates the overall effect of
2808 * ATA and ATAPI devices appearing as SCSI devices.
2809 *
2810 * LOCKING:
2811 * Releases scsi-layer-held lock, and obtains host_set lock.
2812 *
2813 * RETURNS:
2814 * Return value from __ata_scsi_queuecmd() if @cmd can be queued,
2815 * 0 otherwise.
2816 */
2817int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
2818{
2819 struct ata_port *ap;
2820 struct ata_device *dev;
2821 struct scsi_device *scsidev = cmd->device;
2822 struct Scsi_Host *shost = scsidev->host;
2823 int rc = 0;
2824
2825 ap = ata_shost_to_port(shost);
2826
2827 spin_unlock(shost->host_lock);
2828 spin_lock(ap->lock);
2829
2830 ata_scsi_dump_cdb(ap, cmd);
2831
2832 dev = ata_scsi_find_dev(ap, scsidev);
2833 if (likely(dev))
2834 rc = __ata_scsi_queuecmd(cmd, done, dev);
2835 else {
2836 cmd->result = (DID_BAD_TARGET << 16);
2837 done(cmd);
2838 }
2839
2840 spin_unlock(ap->lock);
2841 spin_lock(shost->host_lock);
2842 return rc;
2843}
2844
2845/**
2846 * ata_scsi_simulate - simulate SCSI command on ATA device
2847 * @dev: the target device
2848 * @cmd: SCSI command being sent to device.
2849 * @done: SCSI command completion function.
2850 *
2851 * Interprets and directly executes a select list of SCSI commands
2852 * that can be handled internally.
2853 *
2854 * LOCKING:
2855 * spin_lock_irqsave(host_set lock)
2856 */
2857
2858void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
2859 void (*done)(struct scsi_cmnd *))
2860{
2861 struct ata_scsi_args args;
2862 const u8 *scsicmd = cmd->cmnd;
2863
2864 args.dev = dev;
2865 args.id = dev->id;
2866 args.cmd = cmd;
2867 args.done = done;
2868
2869 switch(scsicmd[0]) {
2870 /* no-op's, complete with success */
2871 case SYNCHRONIZE_CACHE:
2872 case REZERO_UNIT:
2873 case SEEK_6:
2874 case SEEK_10:
2875 case TEST_UNIT_READY:
2876 case FORMAT_UNIT: /* FIXME: correct? */
2877 case SEND_DIAGNOSTIC: /* FIXME: correct? */
2878 ata_scsi_rbuf_fill(&args, ata_scsiop_noop);
2879 break;
2880
2881 case INQUIRY:
2882 if (scsicmd[1] & 2) /* is CmdDt set? */
2883 ata_scsi_invalid_field(cmd, done);
2884 else if ((scsicmd[1] & 1) == 0) /* is EVPD clear? */
2885 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std);
2886 else if (scsicmd[2] == 0x00)
2887 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_00);
2888 else if (scsicmd[2] == 0x80)
2889 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_80);
2890 else if (scsicmd[2] == 0x83)
2891 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_83);
2892 else
2893 ata_scsi_invalid_field(cmd, done);
2894 break;
2895
2896 case MODE_SENSE:
2897 case MODE_SENSE_10:
2898 ata_scsi_rbuf_fill(&args, ata_scsiop_mode_sense);
2899 break;
2900
2901 case MODE_SELECT: /* unconditionally return */
2902 case MODE_SELECT_10: /* bad-field-in-cdb */
2903 ata_scsi_invalid_field(cmd, done);
2904 break;
2905
2906 case READ_CAPACITY:
2907 ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
2908 break;
2909
2910 case SERVICE_ACTION_IN:
2911 if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16)
2912 ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
2913 else
2914 ata_scsi_invalid_field(cmd, done);
2915 break;
2916
2917 case REPORT_LUNS:
2918 ata_scsi_rbuf_fill(&args, ata_scsiop_report_luns);
2919 break;
2920
2921 /* mandatory commands we haven't implemented yet */
2922 case REQUEST_SENSE:
2923
2924 /* all other commands */
2925 default:
2926 ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x20, 0x0);
2927 /* "Invalid command operation code" */
2928 done(cmd);
2929 break;
2930 }
2931}
2932
2933void ata_scsi_scan_host(struct ata_port *ap)
2934{
2935 unsigned int i;
2936
2937 if (ap->flags & ATA_FLAG_DISABLED)
2938 return;
2939
2940 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2941 struct ata_device *dev = &ap->device[i];
2942 struct scsi_device *sdev;
2943
2944 if (!ata_dev_enabled(dev) || dev->sdev)
2945 continue;
2946
2947 sdev = __scsi_add_device(ap->host, 0, i, 0, NULL);
2948 if (!IS_ERR(sdev)) {
2949 dev->sdev = sdev;
2950 scsi_device_put(sdev);
2951 }
2952 }
2953}
2954
2955/**
2956 * ata_scsi_offline_dev - offline attached SCSI device
2957 * @dev: ATA device to offline attached SCSI device for
2958 *
2959 * This function is called from ata_eh_hotplug() and responsible
2960 * for taking the SCSI device attached to @dev offline. This
2961 * function is called with host_set lock which protects dev->sdev
2962 * against clearing.
2963 *
2964 * LOCKING:
2965 * spin_lock_irqsave(host_set lock)
2966 *
2967 * RETURNS:
2968 * 1 if attached SCSI device exists, 0 otherwise.
2969 */
2970int ata_scsi_offline_dev(struct ata_device *dev)
2971{
2972 if (dev->sdev) {
2973 scsi_device_set_state(dev->sdev, SDEV_OFFLINE);
2974 return 1;
2975 }
2976 return 0;
2977}
2978
2979/**
2980 * ata_scsi_remove_dev - remove attached SCSI device
2981 * @dev: ATA device to remove attached SCSI device for
2982 *
2983 * This function is called from ata_eh_scsi_hotplug() and
2984 * responsible for removing the SCSI device attached to @dev.
2985 *
2986 * LOCKING:
2987 * Kernel thread context (may sleep).
2988 */
2989static void ata_scsi_remove_dev(struct ata_device *dev)
2990{
2991 struct ata_port *ap = dev->ap;
2992 struct scsi_device *sdev;
2993 unsigned long flags;
2994
2995 /* Alas, we need to grab scan_mutex to ensure SCSI device
2996 * state doesn't change underneath us and thus
2997 * scsi_device_get() always succeeds. The mutex locking can
2998 * be removed if there is __scsi_device_get() interface which
2999 * increments reference counts regardless of device state.
3000 */
3001 mutex_lock(&ap->host->scan_mutex);
3002 spin_lock_irqsave(ap->lock, flags);
3003
3004 /* clearing dev->sdev is protected by host_set lock */
3005 sdev = dev->sdev;
3006 dev->sdev = NULL;
3007
3008 if (sdev) {
3009 /* If user initiated unplug races with us, sdev can go
3010 * away underneath us after the host_set lock and
3011 * scan_mutex are released. Hold onto it.
3012 */
3013 if (scsi_device_get(sdev) == 0) {
3014 /* The following ensures the attached sdev is
3015 * offline on return from ata_scsi_offline_dev()
3016 * regardless it wins or loses the race
3017 * against this function.
3018 */
3019 scsi_device_set_state(sdev, SDEV_OFFLINE);
3020 } else {
3021 WARN_ON(1);
3022 sdev = NULL;
3023 }
3024 }
3025
3026 spin_unlock_irqrestore(ap->lock, flags);
3027 mutex_unlock(&ap->host->scan_mutex);
3028
3029 if (sdev) {
3030 ata_dev_printk(dev, KERN_INFO, "detaching (SCSI %s)\n",
3031 sdev->sdev_gendev.bus_id);
3032
3033 scsi_remove_device(sdev);
3034 scsi_device_put(sdev);
3035 }
3036}
3037
3038/**
3039 * ata_scsi_hotplug - SCSI part of hotplug
3040 * @data: Pointer to ATA port to perform SCSI hotplug on
3041 *
3042 * Perform SCSI part of hotplug. It's executed from a separate
3043 * workqueue after EH completes. This is necessary because SCSI
3044 * hot plugging requires working EH and hot unplugging is
3045 * synchronized with hot plugging with a mutex.
3046 *
3047 * LOCKING:
3048 * Kernel thread context (may sleep).
3049 */
3050void ata_scsi_hotplug(void *data)
3051{
3052 struct ata_port *ap = data;
3053 int i;
3054
3055 if (ap->pflags & ATA_PFLAG_UNLOADING) {
3056 DPRINTK("ENTER/EXIT - unloading\n");
3057 return;
3058 }
3059
3060 DPRINTK("ENTER\n");
3061
3062 /* unplug detached devices */
3063 for (i = 0; i < ATA_MAX_DEVICES; i++) {
3064 struct ata_device *dev = &ap->device[i];
3065 unsigned long flags;
3066
3067 if (!(dev->flags & ATA_DFLAG_DETACHED))
3068 continue;
3069
3070 spin_lock_irqsave(ap->lock, flags);
3071 dev->flags &= ~ATA_DFLAG_DETACHED;
3072 spin_unlock_irqrestore(ap->lock, flags);
3073
3074 ata_scsi_remove_dev(dev);
3075 }
3076
3077 /* scan for new ones */
3078 ata_scsi_scan_host(ap);
3079
3080 /* If we scanned while EH was in progress, scan would have
3081 * failed silently. Requeue if there are enabled but
3082 * unattached devices.
3083 */
3084 for (i = 0; i < ATA_MAX_DEVICES; i++) {
3085 struct ata_device *dev = &ap->device[i];
3086 if (ata_dev_enabled(dev) && !dev->sdev) {
3087 queue_delayed_work(ata_aux_wq, &ap->hotplug_task, HZ);
3088 break;
3089 }
3090 }
3091
3092 DPRINTK("EXIT\n");
3093}
3094
3095/**
3096 * ata_scsi_user_scan - indication for user-initiated bus scan
3097 * @shost: SCSI host to scan
3098 * @channel: Channel to scan
3099 * @id: ID to scan
3100 * @lun: LUN to scan
3101 *
3102 * This function is called when user explicitly requests bus
3103 * scan. Set probe pending flag and invoke EH.
3104 *
3105 * LOCKING:
3106 * SCSI layer (we don't care)
3107 *
3108 * RETURNS:
3109 * Zero.
3110 */
3111static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
3112 unsigned int id, unsigned int lun)
3113{
3114 struct ata_port *ap = ata_shost_to_port(shost);
3115 unsigned long flags;
3116 int rc = 0;
3117
3118 if (!ap->ops->error_handler)
3119 return -EOPNOTSUPP;
3120
3121 if ((channel != SCAN_WILD_CARD && channel != 0) ||
3122 (lun != SCAN_WILD_CARD && lun != 0))
3123 return -EINVAL;
3124
3125 spin_lock_irqsave(ap->lock, flags);
3126
3127 if (id == SCAN_WILD_CARD) {
3128 ap->eh_info.probe_mask |= (1 << ATA_MAX_DEVICES) - 1;
3129 ap->eh_info.action |= ATA_EH_SOFTRESET;
3130 } else {
3131 struct ata_device *dev = ata_find_dev(ap, id);
3132
3133 if (dev) {
3134 ap->eh_info.probe_mask |= 1 << dev->devno;
3135 ap->eh_info.action |= ATA_EH_SOFTRESET;
3136 ap->eh_info.flags |= ATA_EHI_RESUME_LINK;
3137 } else
3138 rc = -EINVAL;
3139 }
3140
3141 if (rc == 0)
3142 ata_port_schedule_eh(ap);
3143
3144 spin_unlock_irqrestore(ap->lock, flags);
3145
3146 return rc;
3147}
3148
3149/**
3150 * ata_scsi_dev_rescan - initiate scsi_rescan_device()
3151 * @data: Pointer to ATA port to perform scsi_rescan_device()
3152 *
3153 * After ATA pass thru (SAT) commands are executed successfully,
3154 * libata need to propagate the changes to SCSI layer. This
3155 * function must be executed from ata_aux_wq such that sdev
3156 * attach/detach don't race with rescan.
3157 *
3158 * LOCKING:
3159 * Kernel thread context (may sleep).
3160 */
3161void ata_scsi_dev_rescan(void *data)
3162{
3163 struct ata_port *ap = data;
3164 struct ata_device *dev;
3165 unsigned int i;
3166
3167 for (i = 0; i < ATA_MAX_DEVICES; i++) {
3168 dev = &ap->device[i];
3169
3170 if (ata_dev_enabled(dev) && dev->sdev)
3171 scsi_rescan_device(&(dev->sdev->sdev_gendev));
3172 }
3173}
3174
3175/**
3176 * ata_sas_port_alloc - Allocate port for a SAS attached SATA device
3177 * @pdev: PCI device that the scsi device is attached to
3178 * @port_info: Information from low-level host driver
3179 * @host: SCSI host that the scsi device is attached to
3180 *
3181 * LOCKING:
3182 * PCI/etc. bus probe sem.
3183 *
3184 * RETURNS:
3185 * ata_port pointer on success / NULL on failure.
3186 */
3187
3188struct ata_port *ata_sas_port_alloc(struct ata_host_set *host_set,
3189 struct ata_port_info *port_info,
3190 struct Scsi_Host *host)
3191{
3192 struct ata_port *ap = kzalloc(sizeof(*ap), GFP_KERNEL);
3193 struct ata_probe_ent *ent;
3194
3195 if (!ap)
3196 return NULL;
3197
3198 ent = ata_probe_ent_alloc(host_set->dev, port_info);
3199 if (!ent) {
3200 kfree(ap);
3201 return NULL;
3202 }
3203
3204 ata_port_init(ap, host_set, ent, 0);
3205 ap->lock = host->host_lock;
3206 kfree(ent);
3207 return ap;
3208}
3209EXPORT_SYMBOL_GPL(ata_sas_port_alloc);
3210
3211/**
3212 * ata_sas_port_start - Set port up for dma.
3213 * @ap: Port to initialize
3214 *
3215 * Called just after data structures for each port are
3216 * initialized. Allocates DMA pad.
3217 *
3218 * May be used as the port_start() entry in ata_port_operations.
3219 *
3220 * LOCKING:
3221 * Inherited from caller.
3222 */
3223int ata_sas_port_start(struct ata_port *ap)
3224{
3225 return ata_pad_alloc(ap, ap->dev);
3226}
3227EXPORT_SYMBOL_GPL(ata_sas_port_start);
3228
3229/**
3230 * ata_port_stop - Undo ata_sas_port_start()
3231 * @ap: Port to shut down
3232 *
3233 * Frees the DMA pad.
3234 *
3235 * May be used as the port_stop() entry in ata_port_operations.
3236 *
3237 * LOCKING:
3238 * Inherited from caller.
3239 */
3240
3241void ata_sas_port_stop(struct ata_port *ap)
3242{
3243 ata_pad_free(ap, ap->dev);
3244}
3245EXPORT_SYMBOL_GPL(ata_sas_port_stop);
3246
3247/**
3248 * ata_sas_port_init - Initialize a SATA device
3249 * @ap: SATA port to initialize
3250 *
3251 * LOCKING:
3252 * PCI/etc. bus probe sem.
3253 *
3254 * RETURNS:
3255 * Zero on success, non-zero on error.
3256 */
3257
3258int ata_sas_port_init(struct ata_port *ap)
3259{
3260 int rc = ap->ops->port_start(ap);
3261
3262 if (!rc)
3263 rc = ata_bus_probe(ap);
3264
3265 return rc;
3266}
3267EXPORT_SYMBOL_GPL(ata_sas_port_init);
3268
3269/**
3270 * ata_sas_port_destroy - Destroy a SATA port allocated by ata_sas_port_alloc
3271 * @ap: SATA port to destroy
3272 *
3273 */
3274
3275void ata_sas_port_destroy(struct ata_port *ap)
3276{
3277 ap->ops->port_stop(ap);
3278 kfree(ap);
3279}
3280EXPORT_SYMBOL_GPL(ata_sas_port_destroy);
3281
3282/**
3283 * ata_sas_slave_configure - Default slave_config routine for libata devices
3284 * @sdev: SCSI device to configure
3285 * @ap: ATA port to which SCSI device is attached
3286 *
3287 * RETURNS:
3288 * Zero.
3289 */
3290
3291int ata_sas_slave_configure(struct scsi_device *sdev, struct ata_port *ap)
3292{
3293 ata_scsi_sdev_config(sdev);
3294 ata_scsi_dev_config(sdev, ap->device);
3295 return 0;
3296}
3297EXPORT_SYMBOL_GPL(ata_sas_slave_configure);
3298
3299/**
3300 * ata_sas_queuecmd - Issue SCSI cdb to libata-managed device
3301 * @cmd: SCSI command to be sent
3302 * @done: Completion function, called when command is complete
3303 * @ap: ATA port to which the command is being sent
3304 *
3305 * RETURNS:
3306 * Zero.
3307 */
3308
3309int ata_sas_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *),
3310 struct ata_port *ap)
3311{
3312 ata_scsi_dump_cdb(ap, cmd);
3313
3314 if (likely(ata_scsi_dev_enabled(ap->device)))
3315 __ata_scsi_queuecmd(cmd, done, ap->device);
3316 else {
3317 cmd->result = (DID_BAD_TARGET << 16);
3318 done(cmd);
3319 }
3320 return 0;
3321}
3322EXPORT_SYMBOL_GPL(ata_sas_queuecmd);
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
new file mode 100644
index 000000000000..d4a4f82360ec
--- /dev/null
+++ b/drivers/ata/libata.h
@@ -0,0 +1,122 @@
1/*
2 * libata.h - helper library for ATA
3 *
4 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
5 * Copyright 2003-2004 Jeff Garzik
6 *
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
21 *
22 *
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
25 *
26 */
27
28#ifndef __LIBATA_H__
29#define __LIBATA_H__
30
31#define DRV_NAME "libata"
32#define DRV_VERSION "2.00" /* must be exactly four chars */
33
34struct ata_scsi_args {
35 struct ata_device *dev;
36 u16 *id;
37 struct scsi_cmnd *cmd;
38 void (*done)(struct scsi_cmnd *);
39};
40
41/* libata-core.c */
42extern struct workqueue_struct *ata_aux_wq;
43extern int atapi_enabled;
44extern int atapi_dmadir;
45extern int libata_fua;
46extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev);
47extern int ata_rwcmd_protocol(struct ata_queued_cmd *qc);
48extern void ata_dev_disable(struct ata_device *dev);
49extern void ata_port_flush_task(struct ata_port *ap);
50extern unsigned ata_exec_internal(struct ata_device *dev,
51 struct ata_taskfile *tf, const u8 *cdb,
52 int dma_dir, void *buf, unsigned int buflen);
53extern unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd);
54extern int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
55 int post_reset, u16 *id);
56extern int ata_dev_configure(struct ata_device *dev, int print_info);
57extern int sata_down_spd_limit(struct ata_port *ap);
58extern int sata_set_spd_needed(struct ata_port *ap);
59extern int ata_down_xfermask_limit(struct ata_device *dev, int force_pio0);
60extern int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev);
61extern void ata_qc_free(struct ata_queued_cmd *qc);
62extern void ata_qc_issue(struct ata_queued_cmd *qc);
63extern void __ata_qc_complete(struct ata_queued_cmd *qc);
64extern int ata_check_atapi_dma(struct ata_queued_cmd *qc);
65extern void ata_dev_select(struct ata_port *ap, unsigned int device,
66 unsigned int wait, unsigned int can_sleep);
67extern void swap_buf_le16(u16 *buf, unsigned int buf_words);
68extern int ata_flush_cache(struct ata_device *dev);
69extern void ata_dev_init(struct ata_device *dev);
70extern int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg);
71extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg);
72extern void ata_port_init(struct ata_port *ap, struct ata_host_set *host_set,
73 const struct ata_probe_ent *ent, unsigned int port_no);
74extern struct ata_probe_ent *ata_probe_ent_alloc(struct device *dev,
75 const struct ata_port_info *port);
76
77
78/* libata-scsi.c */
79extern struct scsi_transport_template ata_scsi_transport_template;
80
81extern void ata_scsi_scan_host(struct ata_port *ap);
82extern int ata_scsi_offline_dev(struct ata_device *dev);
83extern void ata_scsi_hotplug(void *data);
84extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf,
85 unsigned int buflen);
86
87extern unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf,
88 unsigned int buflen);
89
90extern unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf,
91 unsigned int buflen);
92extern unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf,
93 unsigned int buflen);
94extern unsigned int ata_scsiop_noop(struct ata_scsi_args *args, u8 *rbuf,
95 unsigned int buflen);
96extern unsigned int ata_scsiop_sync_cache(struct ata_scsi_args *args, u8 *rbuf,
97 unsigned int buflen);
98extern unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
99 unsigned int buflen);
100extern unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf,
101 unsigned int buflen);
102extern unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf,
103 unsigned int buflen);
104extern void ata_scsi_badcmd(struct scsi_cmnd *cmd,
105 void (*done)(struct scsi_cmnd *),
106 u8 asc, u8 ascq);
107extern void ata_scsi_set_sense(struct scsi_cmnd *cmd,
108 u8 sk, u8 asc, u8 ascq);
109extern void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
110 unsigned int (*actor) (struct ata_scsi_args *args,
111 u8 *rbuf, unsigned int buflen));
112extern void ata_schedule_scsi_eh(struct Scsi_Host *shost);
113extern void ata_scsi_dev_rescan(void *data);
114extern int ata_bus_probe(struct ata_port *ap);
115
116/* libata-eh.c */
117extern enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd);
118extern void ata_scsi_error(struct Scsi_Host *host);
119extern void ata_port_wait_eh(struct ata_port *ap);
120extern void ata_qc_schedule_eh(struct ata_queued_cmd *qc);
121
122#endif /* __LIBATA_H__ */
diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
new file mode 100644
index 000000000000..61d2aa697b4d
--- /dev/null
+++ b/drivers/ata/pdc_adma.c
@@ -0,0 +1,740 @@
1/*
2 * pdc_adma.c - Pacific Digital Corporation ADMA
3 *
4 * Maintained by: Mark Lord <mlord@pobox.com>
5 *
6 * Copyright 2005 Mark Lord
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
21 *
22 *
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
25 *
26 *
27 * Supports ATA disks in single-packet ADMA mode.
28 * Uses PIO for everything else.
29 *
30 * TODO: Use ADMA transfers for ATAPI devices, when possible.
31 * This requires careful attention to a number of quirks of the chip.
32 *
33 */
34
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/blkdev.h>
40#include <linux/delay.h>
41#include <linux/interrupt.h>
42#include <linux/sched.h>
43#include <linux/device.h>
44#include <scsi/scsi_host.h>
45#include <asm/io.h>
46#include <linux/libata.h>
47
48#define DRV_NAME "pdc_adma"
49#define DRV_VERSION "0.04"
50
51/* macro to calculate base address for ATA regs */
52#define ADMA_ATA_REGS(base,port_no) ((base) + ((port_no) * 0x40))
53
54/* macro to calculate base address for ADMA regs */
55#define ADMA_REGS(base,port_no) ((base) + 0x80 + ((port_no) * 0x20))
56
57enum {
58 ADMA_PORTS = 2,
59 ADMA_CPB_BYTES = 40,
60 ADMA_PRD_BYTES = LIBATA_MAX_PRD * 16,
61 ADMA_PKT_BYTES = ADMA_CPB_BYTES + ADMA_PRD_BYTES,
62
63 ADMA_DMA_BOUNDARY = 0xffffffff,
64
65 /* global register offsets */
66 ADMA_MODE_LOCK = 0x00c7,
67
68 /* per-channel register offsets */
69 ADMA_CONTROL = 0x0000, /* ADMA control */
70 ADMA_STATUS = 0x0002, /* ADMA status */
71 ADMA_CPB_COUNT = 0x0004, /* CPB count */
72 ADMA_CPB_CURRENT = 0x000c, /* current CPB address */
73 ADMA_CPB_NEXT = 0x000c, /* next CPB address */
74 ADMA_CPB_LOOKUP = 0x0010, /* CPB lookup table */
75 ADMA_FIFO_IN = 0x0014, /* input FIFO threshold */
76 ADMA_FIFO_OUT = 0x0016, /* output FIFO threshold */
77
78 /* ADMA_CONTROL register bits */
79 aNIEN = (1 << 8), /* irq mask: 1==masked */
80 aGO = (1 << 7), /* packet trigger ("Go!") */
81 aRSTADM = (1 << 5), /* ADMA logic reset */
82 aPIOMD4 = 0x0003, /* PIO mode 4 */
83
84 /* ADMA_STATUS register bits */
85 aPSD = (1 << 6),
86 aUIRQ = (1 << 4),
87 aPERR = (1 << 0),
88
89 /* CPB bits */
90 cDONE = (1 << 0),
91 cVLD = (1 << 0),
92 cDAT = (1 << 2),
93 cIEN = (1 << 3),
94
95 /* PRD bits */
96 pORD = (1 << 4),
97 pDIRO = (1 << 5),
98 pEND = (1 << 7),
99
100 /* ATA register flags */
101 rIGN = (1 << 5),
102 rEND = (1 << 7),
103
104 /* ATA register addresses */
105 ADMA_REGS_CONTROL = 0x0e,
106 ADMA_REGS_SECTOR_COUNT = 0x12,
107 ADMA_REGS_LBA_LOW = 0x13,
108 ADMA_REGS_LBA_MID = 0x14,
109 ADMA_REGS_LBA_HIGH = 0x15,
110 ADMA_REGS_DEVICE = 0x16,
111 ADMA_REGS_COMMAND = 0x17,
112
113 /* PCI device IDs */
114 board_1841_idx = 0, /* ADMA 2-port controller */
115};
116
117typedef enum { adma_state_idle, adma_state_pkt, adma_state_mmio } adma_state_t;
118
119struct adma_port_priv {
120 u8 *pkt;
121 dma_addr_t pkt_dma;
122 adma_state_t state;
123};
124
125static int adma_ata_init_one (struct pci_dev *pdev,
126 const struct pci_device_id *ent);
127static irqreturn_t adma_intr (int irq, void *dev_instance,
128 struct pt_regs *regs);
129static int adma_port_start(struct ata_port *ap);
130static void adma_host_stop(struct ata_host_set *host_set);
131static void adma_port_stop(struct ata_port *ap);
132static void adma_phy_reset(struct ata_port *ap);
133static void adma_qc_prep(struct ata_queued_cmd *qc);
134static unsigned int adma_qc_issue(struct ata_queued_cmd *qc);
135static int adma_check_atapi_dma(struct ata_queued_cmd *qc);
136static void adma_bmdma_stop(struct ata_queued_cmd *qc);
137static u8 adma_bmdma_status(struct ata_port *ap);
138static void adma_irq_clear(struct ata_port *ap);
139static void adma_eng_timeout(struct ata_port *ap);
140
141static struct scsi_host_template adma_ata_sht = {
142 .module = THIS_MODULE,
143 .name = DRV_NAME,
144 .ioctl = ata_scsi_ioctl,
145 .queuecommand = ata_scsi_queuecmd,
146 .can_queue = ATA_DEF_QUEUE,
147 .this_id = ATA_SHT_THIS_ID,
148 .sg_tablesize = LIBATA_MAX_PRD,
149 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
150 .emulated = ATA_SHT_EMULATED,
151 .use_clustering = ENABLE_CLUSTERING,
152 .proc_name = DRV_NAME,
153 .dma_boundary = ADMA_DMA_BOUNDARY,
154 .slave_configure = ata_scsi_slave_config,
155 .slave_destroy = ata_scsi_slave_destroy,
156 .bios_param = ata_std_bios_param,
157};
158
159static const struct ata_port_operations adma_ata_ops = {
160 .port_disable = ata_port_disable,
161 .tf_load = ata_tf_load,
162 .tf_read = ata_tf_read,
163 .check_status = ata_check_status,
164 .check_atapi_dma = adma_check_atapi_dma,
165 .exec_command = ata_exec_command,
166 .dev_select = ata_std_dev_select,
167 .phy_reset = adma_phy_reset,
168 .qc_prep = adma_qc_prep,
169 .qc_issue = adma_qc_issue,
170 .eng_timeout = adma_eng_timeout,
171 .data_xfer = ata_mmio_data_xfer,
172 .irq_handler = adma_intr,
173 .irq_clear = adma_irq_clear,
174 .port_start = adma_port_start,
175 .port_stop = adma_port_stop,
176 .host_stop = adma_host_stop,
177 .bmdma_stop = adma_bmdma_stop,
178 .bmdma_status = adma_bmdma_status,
179};
180
181static struct ata_port_info adma_port_info[] = {
182 /* board_1841_idx */
183 {
184 .sht = &adma_ata_sht,
185 .host_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST |
186 ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO |
187 ATA_FLAG_PIO_POLLING,
188 .pio_mask = 0x10, /* pio4 */
189 .udma_mask = 0x1f, /* udma0-4 */
190 .port_ops = &adma_ata_ops,
191 },
192};
193
194static const struct pci_device_id adma_ata_pci_tbl[] = {
195 { PCI_VENDOR_ID_PDC, 0x1841, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
196 board_1841_idx },
197
198 { } /* terminate list */
199};
200
201static struct pci_driver adma_ata_pci_driver = {
202 .name = DRV_NAME,
203 .id_table = adma_ata_pci_tbl,
204 .probe = adma_ata_init_one,
205 .remove = ata_pci_remove_one,
206};
207
208static int adma_check_atapi_dma(struct ata_queued_cmd *qc)
209{
210 return 1; /* ATAPI DMA not yet supported */
211}
212
213static void adma_bmdma_stop(struct ata_queued_cmd *qc)
214{
215 /* nothing */
216}
217
218static u8 adma_bmdma_status(struct ata_port *ap)
219{
220 return 0;
221}
222
223static void adma_irq_clear(struct ata_port *ap)
224{
225 /* nothing */
226}
227
228static void adma_reset_engine(void __iomem *chan)
229{
230 /* reset ADMA to idle state */
231 writew(aPIOMD4 | aNIEN | aRSTADM, chan + ADMA_CONTROL);
232 udelay(2);
233 writew(aPIOMD4, chan + ADMA_CONTROL);
234 udelay(2);
235}
236
237static void adma_reinit_engine(struct ata_port *ap)
238{
239 struct adma_port_priv *pp = ap->private_data;
240 void __iomem *mmio_base = ap->host_set->mmio_base;
241 void __iomem *chan = ADMA_REGS(mmio_base, ap->port_no);
242
243 /* mask/clear ATA interrupts */
244 writeb(ATA_NIEN, (void __iomem *)ap->ioaddr.ctl_addr);
245 ata_check_status(ap);
246
247 /* reset the ADMA engine */
248 adma_reset_engine(chan);
249
250 /* set in-FIFO threshold to 0x100 */
251 writew(0x100, chan + ADMA_FIFO_IN);
252
253 /* set CPB pointer */
254 writel((u32)pp->pkt_dma, chan + ADMA_CPB_NEXT);
255
256 /* set out-FIFO threshold to 0x100 */
257 writew(0x100, chan + ADMA_FIFO_OUT);
258
259 /* set CPB count */
260 writew(1, chan + ADMA_CPB_COUNT);
261
262 /* read/discard ADMA status */
263 readb(chan + ADMA_STATUS);
264}
265
266static inline void adma_enter_reg_mode(struct ata_port *ap)
267{
268 void __iomem *chan = ADMA_REGS(ap->host_set->mmio_base, ap->port_no);
269
270 writew(aPIOMD4, chan + ADMA_CONTROL);
271 readb(chan + ADMA_STATUS); /* flush */
272}
273
274static void adma_phy_reset(struct ata_port *ap)
275{
276 struct adma_port_priv *pp = ap->private_data;
277
278 pp->state = adma_state_idle;
279 adma_reinit_engine(ap);
280 ata_port_probe(ap);
281 ata_bus_reset(ap);
282}
283
284static void adma_eng_timeout(struct ata_port *ap)
285{
286 struct adma_port_priv *pp = ap->private_data;
287
288 if (pp->state != adma_state_idle) /* healthy paranoia */
289 pp->state = adma_state_mmio;
290 adma_reinit_engine(ap);
291 ata_eng_timeout(ap);
292}
293
294static int adma_fill_sg(struct ata_queued_cmd *qc)
295{
296 struct scatterlist *sg;
297 struct ata_port *ap = qc->ap;
298 struct adma_port_priv *pp = ap->private_data;
299 u8 *buf = pp->pkt;
300 int i = (2 + buf[3]) * 8;
301 u8 pFLAGS = pORD | ((qc->tf.flags & ATA_TFLAG_WRITE) ? pDIRO : 0);
302
303 ata_for_each_sg(sg, qc) {
304 u32 addr;
305 u32 len;
306
307 addr = (u32)sg_dma_address(sg);
308 *(__le32 *)(buf + i) = cpu_to_le32(addr);
309 i += 4;
310
311 len = sg_dma_len(sg) >> 3;
312 *(__le32 *)(buf + i) = cpu_to_le32(len);
313 i += 4;
314
315 if (ata_sg_is_last(sg, qc))
316 pFLAGS |= pEND;
317 buf[i++] = pFLAGS;
318 buf[i++] = qc->dev->dma_mode & 0xf;
319 buf[i++] = 0; /* pPKLW */
320 buf[i++] = 0; /* reserved */
321
322 *(__le32 *)(buf + i)
323 = (pFLAGS & pEND) ? 0 : cpu_to_le32(pp->pkt_dma + i + 4);
324 i += 4;
325
326 VPRINTK("PRD[%u] = (0x%lX, 0x%X)\n", i/4,
327 (unsigned long)addr, len);
328 }
329 return i;
330}
331
332static void adma_qc_prep(struct ata_queued_cmd *qc)
333{
334 struct adma_port_priv *pp = qc->ap->private_data;
335 u8 *buf = pp->pkt;
336 u32 pkt_dma = (u32)pp->pkt_dma;
337 int i = 0;
338
339 VPRINTK("ENTER\n");
340
341 adma_enter_reg_mode(qc->ap);
342 if (qc->tf.protocol != ATA_PROT_DMA) {
343 ata_qc_prep(qc);
344 return;
345 }
346
347 buf[i++] = 0; /* Response flags */
348 buf[i++] = 0; /* reserved */
349 buf[i++] = cVLD | cDAT | cIEN;
350 i++; /* cLEN, gets filled in below */
351
352 *(__le32 *)(buf+i) = cpu_to_le32(pkt_dma); /* cNCPB */
353 i += 4; /* cNCPB */
354 i += 4; /* cPRD, gets filled in below */
355
356 buf[i++] = 0; /* reserved */
357 buf[i++] = 0; /* reserved */
358 buf[i++] = 0; /* reserved */
359 buf[i++] = 0; /* reserved */
360
361 /* ATA registers; must be a multiple of 4 */
362 buf[i++] = qc->tf.device;
363 buf[i++] = ADMA_REGS_DEVICE;
364 if ((qc->tf.flags & ATA_TFLAG_LBA48)) {
365 buf[i++] = qc->tf.hob_nsect;
366 buf[i++] = ADMA_REGS_SECTOR_COUNT;
367 buf[i++] = qc->tf.hob_lbal;
368 buf[i++] = ADMA_REGS_LBA_LOW;
369 buf[i++] = qc->tf.hob_lbam;
370 buf[i++] = ADMA_REGS_LBA_MID;
371 buf[i++] = qc->tf.hob_lbah;
372 buf[i++] = ADMA_REGS_LBA_HIGH;
373 }
374 buf[i++] = qc->tf.nsect;
375 buf[i++] = ADMA_REGS_SECTOR_COUNT;
376 buf[i++] = qc->tf.lbal;
377 buf[i++] = ADMA_REGS_LBA_LOW;
378 buf[i++] = qc->tf.lbam;
379 buf[i++] = ADMA_REGS_LBA_MID;
380 buf[i++] = qc->tf.lbah;
381 buf[i++] = ADMA_REGS_LBA_HIGH;
382 buf[i++] = 0;
383 buf[i++] = ADMA_REGS_CONTROL;
384 buf[i++] = rIGN;
385 buf[i++] = 0;
386 buf[i++] = qc->tf.command;
387 buf[i++] = ADMA_REGS_COMMAND | rEND;
388
389 buf[3] = (i >> 3) - 2; /* cLEN */
390 *(__le32 *)(buf+8) = cpu_to_le32(pkt_dma + i); /* cPRD */
391
392 i = adma_fill_sg(qc);
393 wmb(); /* flush PRDs and pkt to memory */
394#if 0
395 /* dump out CPB + PRDs for debug */
396 {
397 int j, len = 0;
398 static char obuf[2048];
399 for (j = 0; j < i; ++j) {
400 len += sprintf(obuf+len, "%02x ", buf[j]);
401 if ((j & 7) == 7) {
402 printk("%s\n", obuf);
403 len = 0;
404 }
405 }
406 if (len)
407 printk("%s\n", obuf);
408 }
409#endif
410}
411
412static inline void adma_packet_start(struct ata_queued_cmd *qc)
413{
414 struct ata_port *ap = qc->ap;
415 void __iomem *chan = ADMA_REGS(ap->host_set->mmio_base, ap->port_no);
416
417 VPRINTK("ENTER, ap %p\n", ap);
418
419 /* fire up the ADMA engine */
420 writew(aPIOMD4 | aGO, chan + ADMA_CONTROL);
421}
422
423static unsigned int adma_qc_issue(struct ata_queued_cmd *qc)
424{
425 struct adma_port_priv *pp = qc->ap->private_data;
426
427 switch (qc->tf.protocol) {
428 case ATA_PROT_DMA:
429 pp->state = adma_state_pkt;
430 adma_packet_start(qc);
431 return 0;
432
433 case ATA_PROT_ATAPI_DMA:
434 BUG();
435 break;
436
437 default:
438 break;
439 }
440
441 pp->state = adma_state_mmio;
442 return ata_qc_issue_prot(qc);
443}
444
445static inline unsigned int adma_intr_pkt(struct ata_host_set *host_set)
446{
447 unsigned int handled = 0, port_no;
448 u8 __iomem *mmio_base = host_set->mmio_base;
449
450 for (port_no = 0; port_no < host_set->n_ports; ++port_no) {
451 struct ata_port *ap = host_set->ports[port_no];
452 struct adma_port_priv *pp;
453 struct ata_queued_cmd *qc;
454 void __iomem *chan = ADMA_REGS(mmio_base, port_no);
455 u8 status = readb(chan + ADMA_STATUS);
456
457 if (status == 0)
458 continue;
459 handled = 1;
460 adma_enter_reg_mode(ap);
461 if (ap->flags & ATA_FLAG_DISABLED)
462 continue;
463 pp = ap->private_data;
464 if (!pp || pp->state != adma_state_pkt)
465 continue;
466 qc = ata_qc_from_tag(ap, ap->active_tag);
467 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
468 if ((status & (aPERR | aPSD | aUIRQ)))
469 qc->err_mask |= AC_ERR_OTHER;
470 else if (pp->pkt[0] != cDONE)
471 qc->err_mask |= AC_ERR_OTHER;
472
473 ata_qc_complete(qc);
474 }
475 }
476 return handled;
477}
478
479static inline unsigned int adma_intr_mmio(struct ata_host_set *host_set)
480{
481 unsigned int handled = 0, port_no;
482
483 for (port_no = 0; port_no < host_set->n_ports; ++port_no) {
484 struct ata_port *ap;
485 ap = host_set->ports[port_no];
486 if (ap && (!(ap->flags & ATA_FLAG_DISABLED))) {
487 struct ata_queued_cmd *qc;
488 struct adma_port_priv *pp = ap->private_data;
489 if (!pp || pp->state != adma_state_mmio)
490 continue;
491 qc = ata_qc_from_tag(ap, ap->active_tag);
492 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
493
494 /* check main status, clearing INTRQ */
495 u8 status = ata_check_status(ap);
496 if ((status & ATA_BUSY))
497 continue;
498 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
499 ap->id, qc->tf.protocol, status);
500
501 /* complete taskfile transaction */
502 pp->state = adma_state_idle;
503 qc->err_mask |= ac_err_mask(status);
504 ata_qc_complete(qc);
505 handled = 1;
506 }
507 }
508 }
509 return handled;
510}
511
512static irqreturn_t adma_intr(int irq, void *dev_instance, struct pt_regs *regs)
513{
514 struct ata_host_set *host_set = dev_instance;
515 unsigned int handled = 0;
516
517 VPRINTK("ENTER\n");
518
519 spin_lock(&host_set->lock);
520 handled = adma_intr_pkt(host_set) | adma_intr_mmio(host_set);
521 spin_unlock(&host_set->lock);
522
523 VPRINTK("EXIT\n");
524
525 return IRQ_RETVAL(handled);
526}
527
528static void adma_ata_setup_port(struct ata_ioports *port, unsigned long base)
529{
530 port->cmd_addr =
531 port->data_addr = base + 0x000;
532 port->error_addr =
533 port->feature_addr = base + 0x004;
534 port->nsect_addr = base + 0x008;
535 port->lbal_addr = base + 0x00c;
536 port->lbam_addr = base + 0x010;
537 port->lbah_addr = base + 0x014;
538 port->device_addr = base + 0x018;
539 port->status_addr =
540 port->command_addr = base + 0x01c;
541 port->altstatus_addr =
542 port->ctl_addr = base + 0x038;
543}
544
545static int adma_port_start(struct ata_port *ap)
546{
547 struct device *dev = ap->host_set->dev;
548 struct adma_port_priv *pp;
549 int rc;
550
551 rc = ata_port_start(ap);
552 if (rc)
553 return rc;
554 adma_enter_reg_mode(ap);
555 rc = -ENOMEM;
556 pp = kcalloc(1, sizeof(*pp), GFP_KERNEL);
557 if (!pp)
558 goto err_out;
559 pp->pkt = dma_alloc_coherent(dev, ADMA_PKT_BYTES, &pp->pkt_dma,
560 GFP_KERNEL);
561 if (!pp->pkt)
562 goto err_out_kfree;
563 /* paranoia? */
564 if ((pp->pkt_dma & 7) != 0) {
565 printk("bad alignment for pp->pkt_dma: %08x\n",
566 (u32)pp->pkt_dma);
567 dma_free_coherent(dev, ADMA_PKT_BYTES,
568 pp->pkt, pp->pkt_dma);
569 goto err_out_kfree;
570 }
571 memset(pp->pkt, 0, ADMA_PKT_BYTES);
572 ap->private_data = pp;
573 adma_reinit_engine(ap);
574 return 0;
575
576err_out_kfree:
577 kfree(pp);
578err_out:
579 ata_port_stop(ap);
580 return rc;
581}
582
583static void adma_port_stop(struct ata_port *ap)
584{
585 struct device *dev = ap->host_set->dev;
586 struct adma_port_priv *pp = ap->private_data;
587
588 adma_reset_engine(ADMA_REGS(ap->host_set->mmio_base, ap->port_no));
589 if (pp != NULL) {
590 ap->private_data = NULL;
591 if (pp->pkt != NULL)
592 dma_free_coherent(dev, ADMA_PKT_BYTES,
593 pp->pkt, pp->pkt_dma);
594 kfree(pp);
595 }
596 ata_port_stop(ap);
597}
598
599static void adma_host_stop(struct ata_host_set *host_set)
600{
601 unsigned int port_no;
602
603 for (port_no = 0; port_no < ADMA_PORTS; ++port_no)
604 adma_reset_engine(ADMA_REGS(host_set->mmio_base, port_no));
605
606 ata_pci_host_stop(host_set);
607}
608
609static void adma_host_init(unsigned int chip_id,
610 struct ata_probe_ent *probe_ent)
611{
612 unsigned int port_no;
613 void __iomem *mmio_base = probe_ent->mmio_base;
614
615 /* enable/lock aGO operation */
616 writeb(7, mmio_base + ADMA_MODE_LOCK);
617
618 /* reset the ADMA logic */
619 for (port_no = 0; port_no < ADMA_PORTS; ++port_no)
620 adma_reset_engine(ADMA_REGS(mmio_base, port_no));
621}
622
623static int adma_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base)
624{
625 int rc;
626
627 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
628 if (rc) {
629 dev_printk(KERN_ERR, &pdev->dev,
630 "32-bit DMA enable failed\n");
631 return rc;
632 }
633 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
634 if (rc) {
635 dev_printk(KERN_ERR, &pdev->dev,
636 "32-bit consistent DMA enable failed\n");
637 return rc;
638 }
639 return 0;
640}
641
642static int adma_ata_init_one(struct pci_dev *pdev,
643 const struct pci_device_id *ent)
644{
645 static int printed_version;
646 struct ata_probe_ent *probe_ent = NULL;
647 void __iomem *mmio_base;
648 unsigned int board_idx = (unsigned int) ent->driver_data;
649 int rc, port_no;
650
651 if (!printed_version++)
652 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
653
654 rc = pci_enable_device(pdev);
655 if (rc)
656 return rc;
657
658 rc = pci_request_regions(pdev, DRV_NAME);
659 if (rc)
660 goto err_out;
661
662 if ((pci_resource_flags(pdev, 4) & IORESOURCE_MEM) == 0) {
663 rc = -ENODEV;
664 goto err_out_regions;
665 }
666
667 mmio_base = pci_iomap(pdev, 4, 0);
668 if (mmio_base == NULL) {
669 rc = -ENOMEM;
670 goto err_out_regions;
671 }
672
673 rc = adma_set_dma_masks(pdev, mmio_base);
674 if (rc)
675 goto err_out_iounmap;
676
677 probe_ent = kcalloc(1, sizeof(*probe_ent), GFP_KERNEL);
678 if (probe_ent == NULL) {
679 rc = -ENOMEM;
680 goto err_out_iounmap;
681 }
682
683 probe_ent->dev = pci_dev_to_dev(pdev);
684 INIT_LIST_HEAD(&probe_ent->node);
685
686 probe_ent->sht = adma_port_info[board_idx].sht;
687 probe_ent->host_flags = adma_port_info[board_idx].host_flags;
688 probe_ent->pio_mask = adma_port_info[board_idx].pio_mask;
689 probe_ent->mwdma_mask = adma_port_info[board_idx].mwdma_mask;
690 probe_ent->udma_mask = adma_port_info[board_idx].udma_mask;
691 probe_ent->port_ops = adma_port_info[board_idx].port_ops;
692
693 probe_ent->irq = pdev->irq;
694 probe_ent->irq_flags = IRQF_SHARED;
695 probe_ent->mmio_base = mmio_base;
696 probe_ent->n_ports = ADMA_PORTS;
697
698 for (port_no = 0; port_no < probe_ent->n_ports; ++port_no) {
699 adma_ata_setup_port(&probe_ent->port[port_no],
700 ADMA_ATA_REGS((unsigned long)mmio_base, port_no));
701 }
702
703 pci_set_master(pdev);
704
705 /* initialize adapter */
706 adma_host_init(board_idx, probe_ent);
707
708 rc = ata_device_add(probe_ent);
709 kfree(probe_ent);
710 if (rc != ADMA_PORTS)
711 goto err_out_iounmap;
712 return 0;
713
714err_out_iounmap:
715 pci_iounmap(pdev, mmio_base);
716err_out_regions:
717 pci_release_regions(pdev);
718err_out:
719 pci_disable_device(pdev);
720 return rc;
721}
722
723static int __init adma_ata_init(void)
724{
725 return pci_register_driver(&adma_ata_pci_driver);
726}
727
728static void __exit adma_ata_exit(void)
729{
730 pci_unregister_driver(&adma_ata_pci_driver);
731}
732
733MODULE_AUTHOR("Mark Lord");
734MODULE_DESCRIPTION("Pacific Digital Corporation ADMA low-level driver");
735MODULE_LICENSE("GPL");
736MODULE_DEVICE_TABLE(pci, adma_ata_pci_tbl);
737MODULE_VERSION(DRV_VERSION);
738
739module_init(adma_ata_init);
740module_exit(adma_ata_exit);
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
new file mode 100644
index 000000000000..a2915a56accd
--- /dev/null
+++ b/drivers/ata/sata_mv.c
@@ -0,0 +1,2468 @@
1/*
2 * sata_mv.c - Marvell SATA support
3 *
4 * Copyright 2005: EMC Corporation, all rights reserved.
5 * Copyright 2005 Red Hat, Inc. All rights reserved.
6 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/pci.h>
27#include <linux/init.h>
28#include <linux/blkdev.h>
29#include <linux/delay.h>
30#include <linux/interrupt.h>
31#include <linux/sched.h>
32#include <linux/dma-mapping.h>
33#include <linux/device.h>
34#include <scsi/scsi_host.h>
35#include <scsi/scsi_cmnd.h>
36#include <linux/libata.h>
37#include <asm/io.h>
38
39#define DRV_NAME "sata_mv"
40#define DRV_VERSION "0.7"
41
42enum {
43 /* BAR's are enumerated in terms of pci_resource_start() terms */
44 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
45 MV_IO_BAR = 2, /* offset 0x18: IO space */
46 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
47
48 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
49 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
50
51 MV_PCI_REG_BASE = 0,
52 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
53 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
54 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
55 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
56 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
57 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
58
59 MV_SATAHC0_REG_BASE = 0x20000,
60 MV_FLASH_CTL = 0x1046c,
61 MV_GPIO_PORT_CTL = 0x104f0,
62 MV_RESET_CFG = 0x180d8,
63
64 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
65 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
66 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
67 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
68
69 MV_USE_Q_DEPTH = ATA_DEF_QUEUE,
70
71 MV_MAX_Q_DEPTH = 32,
72 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
73
74 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
75 * CRPB needs alignment on a 256B boundary. Size == 256B
76 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
77 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
78 */
79 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
80 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
81 MV_MAX_SG_CT = 176,
82 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
83 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
84
85 MV_PORTS_PER_HC = 4,
86 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
87 MV_PORT_HC_SHIFT = 2,
88 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
89 MV_PORT_MASK = 3,
90
91 /* Host Flags */
92 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
93 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
94 MV_COMMON_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
95 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
96 ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING),
97 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
98
99 CRQB_FLAG_READ = (1 << 0),
100 CRQB_TAG_SHIFT = 1,
101 CRQB_CMD_ADDR_SHIFT = 8,
102 CRQB_CMD_CS = (0x2 << 11),
103 CRQB_CMD_LAST = (1 << 15),
104
105 CRPB_FLAG_STATUS_SHIFT = 8,
106
107 EPRD_FLAG_END_OF_TBL = (1 << 31),
108
109 /* PCI interface registers */
110
111 PCI_COMMAND_OFS = 0xc00,
112
113 PCI_MAIN_CMD_STS_OFS = 0xd30,
114 STOP_PCI_MASTER = (1 << 2),
115 PCI_MASTER_EMPTY = (1 << 3),
116 GLOB_SFT_RST = (1 << 4),
117
118 MV_PCI_MODE = 0xd00,
119 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
120 MV_PCI_DISC_TIMER = 0xd04,
121 MV_PCI_MSI_TRIGGER = 0xc38,
122 MV_PCI_SERR_MASK = 0xc28,
123 MV_PCI_XBAR_TMOUT = 0x1d04,
124 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
125 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
126 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
127 MV_PCI_ERR_COMMAND = 0x1d50,
128
129 PCI_IRQ_CAUSE_OFS = 0x1d58,
130 PCI_IRQ_MASK_OFS = 0x1d5c,
131 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
132
133 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
134 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
135 PORT0_ERR = (1 << 0), /* shift by port # */
136 PORT0_DONE = (1 << 1), /* shift by port # */
137 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
138 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
139 PCI_ERR = (1 << 18),
140 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
141 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
142 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
143 GPIO_INT = (1 << 22),
144 SELF_INT = (1 << 23),
145 TWSI_INT = (1 << 24),
146 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
147 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
148 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
149 HC_MAIN_RSVD),
150
151 /* SATAHC registers */
152 HC_CFG_OFS = 0,
153
154 HC_IRQ_CAUSE_OFS = 0x14,
155 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
156 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
157 DEV_IRQ = (1 << 8), /* shift by port # */
158
159 /* Shadow block registers */
160 SHD_BLK_OFS = 0x100,
161 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
162
163 /* SATA registers */
164 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
165 SATA_ACTIVE_OFS = 0x350,
166 PHY_MODE3 = 0x310,
167 PHY_MODE4 = 0x314,
168 PHY_MODE2 = 0x330,
169 MV5_PHY_MODE = 0x74,
170 MV5_LT_MODE = 0x30,
171 MV5_PHY_CTL = 0x0C,
172 SATA_INTERFACE_CTL = 0x050,
173
174 MV_M2_PREAMP_MASK = 0x7e0,
175
176 /* Port registers */
177 EDMA_CFG_OFS = 0,
178 EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */
179 EDMA_CFG_NCQ = (1 << 5),
180 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
181 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
182 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
183
184 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
185 EDMA_ERR_IRQ_MASK_OFS = 0xc,
186 EDMA_ERR_D_PAR = (1 << 0),
187 EDMA_ERR_PRD_PAR = (1 << 1),
188 EDMA_ERR_DEV = (1 << 2),
189 EDMA_ERR_DEV_DCON = (1 << 3),
190 EDMA_ERR_DEV_CON = (1 << 4),
191 EDMA_ERR_SERR = (1 << 5),
192 EDMA_ERR_SELF_DIS = (1 << 7),
193 EDMA_ERR_BIST_ASYNC = (1 << 8),
194 EDMA_ERR_CRBQ_PAR = (1 << 9),
195 EDMA_ERR_CRPB_PAR = (1 << 10),
196 EDMA_ERR_INTRL_PAR = (1 << 11),
197 EDMA_ERR_IORDY = (1 << 12),
198 EDMA_ERR_LNK_CTRL_RX = (0xf << 13),
199 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15),
200 EDMA_ERR_LNK_DATA_RX = (0xf << 17),
201 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21),
202 EDMA_ERR_LNK_DATA_TX = (0x1f << 26),
203 EDMA_ERR_TRANS_PROTO = (1 << 31),
204 EDMA_ERR_FATAL = (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
205 EDMA_ERR_DEV_DCON | EDMA_ERR_CRBQ_PAR |
206 EDMA_ERR_CRPB_PAR | EDMA_ERR_INTRL_PAR |
207 EDMA_ERR_IORDY | EDMA_ERR_LNK_CTRL_RX_2 |
208 EDMA_ERR_LNK_DATA_RX |
209 EDMA_ERR_LNK_DATA_TX |
210 EDMA_ERR_TRANS_PROTO),
211
212 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
213 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
214
215 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
216 EDMA_REQ_Q_PTR_SHIFT = 5,
217
218 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
219 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
220 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
221 EDMA_RSP_Q_PTR_SHIFT = 3,
222
223 EDMA_CMD_OFS = 0x28,
224 EDMA_EN = (1 << 0),
225 EDMA_DS = (1 << 1),
226 ATA_RST = (1 << 2),
227
228 EDMA_IORDY_TMOUT = 0x34,
229 EDMA_ARB_CFG = 0x38,
230
231 /* Host private flags (hp_flags) */
232 MV_HP_FLAG_MSI = (1 << 0),
233 MV_HP_ERRATA_50XXB0 = (1 << 1),
234 MV_HP_ERRATA_50XXB2 = (1 << 2),
235 MV_HP_ERRATA_60X1B2 = (1 << 3),
236 MV_HP_ERRATA_60X1C0 = (1 << 4),
237 MV_HP_ERRATA_XX42A0 = (1 << 5),
238 MV_HP_50XX = (1 << 6),
239 MV_HP_GEN_IIE = (1 << 7),
240
241 /* Port private flags (pp_flags) */
242 MV_PP_FLAG_EDMA_EN = (1 << 0),
243 MV_PP_FLAG_EDMA_DS_ACT = (1 << 1),
244};
245
246#define IS_50XX(hpriv) ((hpriv)->hp_flags & MV_HP_50XX)
247#define IS_60XX(hpriv) (((hpriv)->hp_flags & MV_HP_50XX) == 0)
248#define IS_GEN_I(hpriv) IS_50XX(hpriv)
249#define IS_GEN_II(hpriv) IS_60XX(hpriv)
250#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
251
252enum {
253 /* Our DMA boundary is determined by an ePRD being unable to handle
254 * anything larger than 64KB
255 */
256 MV_DMA_BOUNDARY = 0xffffU,
257
258 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
259
260 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
261};
262
263enum chip_type {
264 chip_504x,
265 chip_508x,
266 chip_5080,
267 chip_604x,
268 chip_608x,
269 chip_6042,
270 chip_7042,
271};
272
273/* Command ReQuest Block: 32B */
274struct mv_crqb {
275 __le32 sg_addr;
276 __le32 sg_addr_hi;
277 __le16 ctrl_flags;
278 __le16 ata_cmd[11];
279};
280
281struct mv_crqb_iie {
282 __le32 addr;
283 __le32 addr_hi;
284 __le32 flags;
285 __le32 len;
286 __le32 ata_cmd[4];
287};
288
289/* Command ResPonse Block: 8B */
290struct mv_crpb {
291 __le16 id;
292 __le16 flags;
293 __le32 tmstmp;
294};
295
296/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
297struct mv_sg {
298 __le32 addr;
299 __le32 flags_size;
300 __le32 addr_hi;
301 __le32 reserved;
302};
303
304struct mv_port_priv {
305 struct mv_crqb *crqb;
306 dma_addr_t crqb_dma;
307 struct mv_crpb *crpb;
308 dma_addr_t crpb_dma;
309 struct mv_sg *sg_tbl;
310 dma_addr_t sg_tbl_dma;
311 u32 pp_flags;
312};
313
314struct mv_port_signal {
315 u32 amps;
316 u32 pre;
317};
318
319struct mv_host_priv;
320struct mv_hw_ops {
321 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
322 unsigned int port);
323 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
324 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
325 void __iomem *mmio);
326 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
327 unsigned int n_hc);
328 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
329 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
330};
331
332struct mv_host_priv {
333 u32 hp_flags;
334 struct mv_port_signal signal[8];
335 const struct mv_hw_ops *ops;
336};
337
338static void mv_irq_clear(struct ata_port *ap);
339static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
340static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
341static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
342static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
343static void mv_phy_reset(struct ata_port *ap);
344static void __mv_phy_reset(struct ata_port *ap, int can_sleep);
345static void mv_host_stop(struct ata_host_set *host_set);
346static int mv_port_start(struct ata_port *ap);
347static void mv_port_stop(struct ata_port *ap);
348static void mv_qc_prep(struct ata_queued_cmd *qc);
349static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
350static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
351static irqreturn_t mv_interrupt(int irq, void *dev_instance,
352 struct pt_regs *regs);
353static void mv_eng_timeout(struct ata_port *ap);
354static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
355
356static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
357 unsigned int port);
358static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
359static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
360 void __iomem *mmio);
361static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
362 unsigned int n_hc);
363static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
364static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
365
366static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
367 unsigned int port);
368static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
369static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
370 void __iomem *mmio);
371static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
372 unsigned int n_hc);
373static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
374static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
375static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
376 unsigned int port_no);
377static void mv_stop_and_reset(struct ata_port *ap);
378
379static struct scsi_host_template mv_sht = {
380 .module = THIS_MODULE,
381 .name = DRV_NAME,
382 .ioctl = ata_scsi_ioctl,
383 .queuecommand = ata_scsi_queuecmd,
384 .can_queue = MV_USE_Q_DEPTH,
385 .this_id = ATA_SHT_THIS_ID,
386 .sg_tablesize = MV_MAX_SG_CT / 2,
387 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
388 .emulated = ATA_SHT_EMULATED,
389 .use_clustering = ATA_SHT_USE_CLUSTERING,
390 .proc_name = DRV_NAME,
391 .dma_boundary = MV_DMA_BOUNDARY,
392 .slave_configure = ata_scsi_slave_config,
393 .slave_destroy = ata_scsi_slave_destroy,
394 .bios_param = ata_std_bios_param,
395};
396
397static const struct ata_port_operations mv5_ops = {
398 .port_disable = ata_port_disable,
399
400 .tf_load = ata_tf_load,
401 .tf_read = ata_tf_read,
402 .check_status = ata_check_status,
403 .exec_command = ata_exec_command,
404 .dev_select = ata_std_dev_select,
405
406 .phy_reset = mv_phy_reset,
407
408 .qc_prep = mv_qc_prep,
409 .qc_issue = mv_qc_issue,
410 .data_xfer = ata_mmio_data_xfer,
411
412 .eng_timeout = mv_eng_timeout,
413
414 .irq_handler = mv_interrupt,
415 .irq_clear = mv_irq_clear,
416
417 .scr_read = mv5_scr_read,
418 .scr_write = mv5_scr_write,
419
420 .port_start = mv_port_start,
421 .port_stop = mv_port_stop,
422 .host_stop = mv_host_stop,
423};
424
425static const struct ata_port_operations mv6_ops = {
426 .port_disable = ata_port_disable,
427
428 .tf_load = ata_tf_load,
429 .tf_read = ata_tf_read,
430 .check_status = ata_check_status,
431 .exec_command = ata_exec_command,
432 .dev_select = ata_std_dev_select,
433
434 .phy_reset = mv_phy_reset,
435
436 .qc_prep = mv_qc_prep,
437 .qc_issue = mv_qc_issue,
438 .data_xfer = ata_mmio_data_xfer,
439
440 .eng_timeout = mv_eng_timeout,
441
442 .irq_handler = mv_interrupt,
443 .irq_clear = mv_irq_clear,
444
445 .scr_read = mv_scr_read,
446 .scr_write = mv_scr_write,
447
448 .port_start = mv_port_start,
449 .port_stop = mv_port_stop,
450 .host_stop = mv_host_stop,
451};
452
453static const struct ata_port_operations mv_iie_ops = {
454 .port_disable = ata_port_disable,
455
456 .tf_load = ata_tf_load,
457 .tf_read = ata_tf_read,
458 .check_status = ata_check_status,
459 .exec_command = ata_exec_command,
460 .dev_select = ata_std_dev_select,
461
462 .phy_reset = mv_phy_reset,
463
464 .qc_prep = mv_qc_prep_iie,
465 .qc_issue = mv_qc_issue,
466
467 .eng_timeout = mv_eng_timeout,
468
469 .irq_handler = mv_interrupt,
470 .irq_clear = mv_irq_clear,
471
472 .scr_read = mv_scr_read,
473 .scr_write = mv_scr_write,
474
475 .port_start = mv_port_start,
476 .port_stop = mv_port_stop,
477 .host_stop = mv_host_stop,
478};
479
480static const struct ata_port_info mv_port_info[] = {
481 { /* chip_504x */
482 .sht = &mv_sht,
483 .host_flags = MV_COMMON_FLAGS,
484 .pio_mask = 0x1f, /* pio0-4 */
485 .udma_mask = 0x7f, /* udma0-6 */
486 .port_ops = &mv5_ops,
487 },
488 { /* chip_508x */
489 .sht = &mv_sht,
490 .host_flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
491 .pio_mask = 0x1f, /* pio0-4 */
492 .udma_mask = 0x7f, /* udma0-6 */
493 .port_ops = &mv5_ops,
494 },
495 { /* chip_5080 */
496 .sht = &mv_sht,
497 .host_flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
498 .pio_mask = 0x1f, /* pio0-4 */
499 .udma_mask = 0x7f, /* udma0-6 */
500 .port_ops = &mv5_ops,
501 },
502 { /* chip_604x */
503 .sht = &mv_sht,
504 .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
505 .pio_mask = 0x1f, /* pio0-4 */
506 .udma_mask = 0x7f, /* udma0-6 */
507 .port_ops = &mv6_ops,
508 },
509 { /* chip_608x */
510 .sht = &mv_sht,
511 .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS |
512 MV_FLAG_DUAL_HC),
513 .pio_mask = 0x1f, /* pio0-4 */
514 .udma_mask = 0x7f, /* udma0-6 */
515 .port_ops = &mv6_ops,
516 },
517 { /* chip_6042 */
518 .sht = &mv_sht,
519 .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
520 .pio_mask = 0x1f, /* pio0-4 */
521 .udma_mask = 0x7f, /* udma0-6 */
522 .port_ops = &mv_iie_ops,
523 },
524 { /* chip_7042 */
525 .sht = &mv_sht,
526 .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS |
527 MV_FLAG_DUAL_HC),
528 .pio_mask = 0x1f, /* pio0-4 */
529 .udma_mask = 0x7f, /* udma0-6 */
530 .port_ops = &mv_iie_ops,
531 },
532};
533
534static const struct pci_device_id mv_pci_tbl[] = {
535 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5040), 0, 0, chip_504x},
536 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5041), 0, 0, chip_504x},
537 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5080), 0, 0, chip_5080},
538 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5081), 0, 0, chip_508x},
539
540 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6040), 0, 0, chip_604x},
541 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6041), 0, 0, chip_604x},
542 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6042), 0, 0, chip_6042},
543 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6080), 0, 0, chip_608x},
544 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6081), 0, 0, chip_608x},
545
546 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x0241), 0, 0, chip_604x},
547 {} /* terminate list */
548};
549
550static struct pci_driver mv_pci_driver = {
551 .name = DRV_NAME,
552 .id_table = mv_pci_tbl,
553 .probe = mv_init_one,
554 .remove = ata_pci_remove_one,
555};
556
557static const struct mv_hw_ops mv5xxx_ops = {
558 .phy_errata = mv5_phy_errata,
559 .enable_leds = mv5_enable_leds,
560 .read_preamp = mv5_read_preamp,
561 .reset_hc = mv5_reset_hc,
562 .reset_flash = mv5_reset_flash,
563 .reset_bus = mv5_reset_bus,
564};
565
566static const struct mv_hw_ops mv6xxx_ops = {
567 .phy_errata = mv6_phy_errata,
568 .enable_leds = mv6_enable_leds,
569 .read_preamp = mv6_read_preamp,
570 .reset_hc = mv6_reset_hc,
571 .reset_flash = mv6_reset_flash,
572 .reset_bus = mv_reset_pci_bus,
573};
574
575/*
576 * module options
577 */
578static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
579
580
581/*
582 * Functions
583 */
584
585static inline void writelfl(unsigned long data, void __iomem *addr)
586{
587 writel(data, addr);
588 (void) readl(addr); /* flush to avoid PCI posted write */
589}
590
591static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
592{
593 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
594}
595
596static inline unsigned int mv_hc_from_port(unsigned int port)
597{
598 return port >> MV_PORT_HC_SHIFT;
599}
600
601static inline unsigned int mv_hardport_from_port(unsigned int port)
602{
603 return port & MV_PORT_MASK;
604}
605
606static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
607 unsigned int port)
608{
609 return mv_hc_base(base, mv_hc_from_port(port));
610}
611
612static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
613{
614 return mv_hc_base_from_port(base, port) +
615 MV_SATAHC_ARBTR_REG_SZ +
616 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
617}
618
619static inline void __iomem *mv_ap_base(struct ata_port *ap)
620{
621 return mv_port_base(ap->host_set->mmio_base, ap->port_no);
622}
623
624static inline int mv_get_hc_count(unsigned long host_flags)
625{
626 return ((host_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
627}
628
629static void mv_irq_clear(struct ata_port *ap)
630{
631}
632
633/**
634 * mv_start_dma - Enable eDMA engine
635 * @base: port base address
636 * @pp: port private data
637 *
638 * Verify the local cache of the eDMA state is accurate with a
639 * WARN_ON.
640 *
641 * LOCKING:
642 * Inherited from caller.
643 */
644static void mv_start_dma(void __iomem *base, struct mv_port_priv *pp)
645{
646 if (!(MV_PP_FLAG_EDMA_EN & pp->pp_flags)) {
647 writelfl(EDMA_EN, base + EDMA_CMD_OFS);
648 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
649 }
650 WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
651}
652
653/**
654 * mv_stop_dma - Disable eDMA engine
655 * @ap: ATA channel to manipulate
656 *
657 * Verify the local cache of the eDMA state is accurate with a
658 * WARN_ON.
659 *
660 * LOCKING:
661 * Inherited from caller.
662 */
663static void mv_stop_dma(struct ata_port *ap)
664{
665 void __iomem *port_mmio = mv_ap_base(ap);
666 struct mv_port_priv *pp = ap->private_data;
667 u32 reg;
668 int i;
669
670 if (MV_PP_FLAG_EDMA_EN & pp->pp_flags) {
671 /* Disable EDMA if active. The disable bit auto clears.
672 */
673 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
674 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
675 } else {
676 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
677 }
678
679 /* now properly wait for the eDMA to stop */
680 for (i = 1000; i > 0; i--) {
681 reg = readl(port_mmio + EDMA_CMD_OFS);
682 if (!(EDMA_EN & reg)) {
683 break;
684 }
685 udelay(100);
686 }
687
688 if (EDMA_EN & reg) {
689 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
690 /* FIXME: Consider doing a reset here to recover */
691 }
692}
693
694#ifdef ATA_DEBUG
695static void mv_dump_mem(void __iomem *start, unsigned bytes)
696{
697 int b, w;
698 for (b = 0; b < bytes; ) {
699 DPRINTK("%p: ", start + b);
700 for (w = 0; b < bytes && w < 4; w++) {
701 printk("%08x ",readl(start + b));
702 b += sizeof(u32);
703 }
704 printk("\n");
705 }
706}
707#endif
708
709static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
710{
711#ifdef ATA_DEBUG
712 int b, w;
713 u32 dw;
714 for (b = 0; b < bytes; ) {
715 DPRINTK("%02x: ", b);
716 for (w = 0; b < bytes && w < 4; w++) {
717 (void) pci_read_config_dword(pdev,b,&dw);
718 printk("%08x ",dw);
719 b += sizeof(u32);
720 }
721 printk("\n");
722 }
723#endif
724}
725static void mv_dump_all_regs(void __iomem *mmio_base, int port,
726 struct pci_dev *pdev)
727{
728#ifdef ATA_DEBUG
729 void __iomem *hc_base = mv_hc_base(mmio_base,
730 port >> MV_PORT_HC_SHIFT);
731 void __iomem *port_base;
732 int start_port, num_ports, p, start_hc, num_hcs, hc;
733
734 if (0 > port) {
735 start_hc = start_port = 0;
736 num_ports = 8; /* shld be benign for 4 port devs */
737 num_hcs = 2;
738 } else {
739 start_hc = port >> MV_PORT_HC_SHIFT;
740 start_port = port;
741 num_ports = num_hcs = 1;
742 }
743 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
744 num_ports > 1 ? num_ports - 1 : start_port);
745
746 if (NULL != pdev) {
747 DPRINTK("PCI config space regs:\n");
748 mv_dump_pci_cfg(pdev, 0x68);
749 }
750 DPRINTK("PCI regs:\n");
751 mv_dump_mem(mmio_base+0xc00, 0x3c);
752 mv_dump_mem(mmio_base+0xd00, 0x34);
753 mv_dump_mem(mmio_base+0xf00, 0x4);
754 mv_dump_mem(mmio_base+0x1d00, 0x6c);
755 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
756 hc_base = mv_hc_base(mmio_base, hc);
757 DPRINTK("HC regs (HC %i):\n", hc);
758 mv_dump_mem(hc_base, 0x1c);
759 }
760 for (p = start_port; p < start_port + num_ports; p++) {
761 port_base = mv_port_base(mmio_base, p);
762 DPRINTK("EDMA regs (port %i):\n",p);
763 mv_dump_mem(port_base, 0x54);
764 DPRINTK("SATA regs (port %i):\n",p);
765 mv_dump_mem(port_base+0x300, 0x60);
766 }
767#endif
768}
769
770static unsigned int mv_scr_offset(unsigned int sc_reg_in)
771{
772 unsigned int ofs;
773
774 switch (sc_reg_in) {
775 case SCR_STATUS:
776 case SCR_CONTROL:
777 case SCR_ERROR:
778 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
779 break;
780 case SCR_ACTIVE:
781 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
782 break;
783 default:
784 ofs = 0xffffffffU;
785 break;
786 }
787 return ofs;
788}
789
790static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
791{
792 unsigned int ofs = mv_scr_offset(sc_reg_in);
793
794 if (0xffffffffU != ofs) {
795 return readl(mv_ap_base(ap) + ofs);
796 } else {
797 return (u32) ofs;
798 }
799}
800
801static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
802{
803 unsigned int ofs = mv_scr_offset(sc_reg_in);
804
805 if (0xffffffffU != ofs) {
806 writelfl(val, mv_ap_base(ap) + ofs);
807 }
808}
809
810/**
811 * mv_host_stop - Host specific cleanup/stop routine.
812 * @host_set: host data structure
813 *
814 * Disable ints, cleanup host memory, call general purpose
815 * host_stop.
816 *
817 * LOCKING:
818 * Inherited from caller.
819 */
820static void mv_host_stop(struct ata_host_set *host_set)
821{
822 struct mv_host_priv *hpriv = host_set->private_data;
823 struct pci_dev *pdev = to_pci_dev(host_set->dev);
824
825 if (hpriv->hp_flags & MV_HP_FLAG_MSI) {
826 pci_disable_msi(pdev);
827 } else {
828 pci_intx(pdev, 0);
829 }
830 kfree(hpriv);
831 ata_host_stop(host_set);
832}
833
834static inline void mv_priv_free(struct mv_port_priv *pp, struct device *dev)
835{
836 dma_free_coherent(dev, MV_PORT_PRIV_DMA_SZ, pp->crpb, pp->crpb_dma);
837}
838
839static void mv_edma_cfg(struct mv_host_priv *hpriv, void __iomem *port_mmio)
840{
841 u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
842
843 /* set up non-NCQ EDMA configuration */
844 cfg &= ~0x1f; /* clear queue depth */
845 cfg &= ~EDMA_CFG_NCQ; /* clear NCQ mode */
846 cfg &= ~(1 << 9); /* disable equeue */
847
848 if (IS_GEN_I(hpriv))
849 cfg |= (1 << 8); /* enab config burst size mask */
850
851 else if (IS_GEN_II(hpriv))
852 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
853
854 else if (IS_GEN_IIE(hpriv)) {
855 cfg |= (1 << 23); /* dis RX PM port mask */
856 cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
857 cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */
858 cfg |= (1 << 18); /* enab early completion */
859 cfg |= (1 << 17); /* enab host q cache */
860 cfg |= (1 << 22); /* enab cutthrough */
861 }
862
863 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
864}
865
866/**
867 * mv_port_start - Port specific init/start routine.
868 * @ap: ATA channel to manipulate
869 *
870 * Allocate and point to DMA memory, init port private memory,
871 * zero indices.
872 *
873 * LOCKING:
874 * Inherited from caller.
875 */
876static int mv_port_start(struct ata_port *ap)
877{
878 struct device *dev = ap->host_set->dev;
879 struct mv_host_priv *hpriv = ap->host_set->private_data;
880 struct mv_port_priv *pp;
881 void __iomem *port_mmio = mv_ap_base(ap);
882 void *mem;
883 dma_addr_t mem_dma;
884 int rc = -ENOMEM;
885
886 pp = kmalloc(sizeof(*pp), GFP_KERNEL);
887 if (!pp)
888 goto err_out;
889 memset(pp, 0, sizeof(*pp));
890
891 mem = dma_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
892 GFP_KERNEL);
893 if (!mem)
894 goto err_out_pp;
895 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
896
897 rc = ata_pad_alloc(ap, dev);
898 if (rc)
899 goto err_out_priv;
900
901 /* First item in chunk of DMA memory:
902 * 32-slot command request table (CRQB), 32 bytes each in size
903 */
904 pp->crqb = mem;
905 pp->crqb_dma = mem_dma;
906 mem += MV_CRQB_Q_SZ;
907 mem_dma += MV_CRQB_Q_SZ;
908
909 /* Second item:
910 * 32-slot command response table (CRPB), 8 bytes each in size
911 */
912 pp->crpb = mem;
913 pp->crpb_dma = mem_dma;
914 mem += MV_CRPB_Q_SZ;
915 mem_dma += MV_CRPB_Q_SZ;
916
917 /* Third item:
918 * Table of scatter-gather descriptors (ePRD), 16 bytes each
919 */
920 pp->sg_tbl = mem;
921 pp->sg_tbl_dma = mem_dma;
922
923 mv_edma_cfg(hpriv, port_mmio);
924
925 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
926 writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK,
927 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
928
929 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
930 writelfl(pp->crqb_dma & 0xffffffff,
931 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
932 else
933 writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
934
935 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
936
937 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
938 writelfl(pp->crpb_dma & 0xffffffff,
939 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
940 else
941 writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
942
943 writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK,
944 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
945
946 /* Don't turn on EDMA here...do it before DMA commands only. Else
947 * we'll be unable to send non-data, PIO, etc due to restricted access
948 * to shadow regs.
949 */
950 ap->private_data = pp;
951 return 0;
952
953err_out_priv:
954 mv_priv_free(pp, dev);
955err_out_pp:
956 kfree(pp);
957err_out:
958 return rc;
959}
960
961/**
962 * mv_port_stop - Port specific cleanup/stop routine.
963 * @ap: ATA channel to manipulate
964 *
965 * Stop DMA, cleanup port memory.
966 *
967 * LOCKING:
968 * This routine uses the host_set lock to protect the DMA stop.
969 */
970static void mv_port_stop(struct ata_port *ap)
971{
972 struct device *dev = ap->host_set->dev;
973 struct mv_port_priv *pp = ap->private_data;
974 unsigned long flags;
975
976 spin_lock_irqsave(&ap->host_set->lock, flags);
977 mv_stop_dma(ap);
978 spin_unlock_irqrestore(&ap->host_set->lock, flags);
979
980 ap->private_data = NULL;
981 ata_pad_free(ap, dev);
982 mv_priv_free(pp, dev);
983 kfree(pp);
984}
985
986/**
987 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
988 * @qc: queued command whose SG list to source from
989 *
990 * Populate the SG list and mark the last entry.
991 *
992 * LOCKING:
993 * Inherited from caller.
994 */
995static void mv_fill_sg(struct ata_queued_cmd *qc)
996{
997 struct mv_port_priv *pp = qc->ap->private_data;
998 unsigned int i = 0;
999 struct scatterlist *sg;
1000
1001 ata_for_each_sg(sg, qc) {
1002 dma_addr_t addr;
1003 u32 sg_len, len, offset;
1004
1005 addr = sg_dma_address(sg);
1006 sg_len = sg_dma_len(sg);
1007
1008 while (sg_len) {
1009 offset = addr & MV_DMA_BOUNDARY;
1010 len = sg_len;
1011 if ((offset + sg_len) > 0x10000)
1012 len = 0x10000 - offset;
1013
1014 pp->sg_tbl[i].addr = cpu_to_le32(addr & 0xffffffff);
1015 pp->sg_tbl[i].addr_hi = cpu_to_le32((addr >> 16) >> 16);
1016 pp->sg_tbl[i].flags_size = cpu_to_le32(len & 0xffff);
1017
1018 sg_len -= len;
1019 addr += len;
1020
1021 if (!sg_len && ata_sg_is_last(sg, qc))
1022 pp->sg_tbl[i].flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1023
1024 i++;
1025 }
1026 }
1027}
1028
1029static inline unsigned mv_inc_q_index(unsigned index)
1030{
1031 return (index + 1) & MV_MAX_Q_DEPTH_MASK;
1032}
1033
1034static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1035{
1036 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1037 (last ? CRQB_CMD_LAST : 0);
1038 *cmdw = cpu_to_le16(tmp);
1039}
1040
1041/**
1042 * mv_qc_prep - Host specific command preparation.
1043 * @qc: queued command to prepare
1044 *
1045 * This routine simply redirects to the general purpose routine
1046 * if command is not DMA. Else, it handles prep of the CRQB
1047 * (command request block), does some sanity checking, and calls
1048 * the SG load routine.
1049 *
1050 * LOCKING:
1051 * Inherited from caller.
1052 */
1053static void mv_qc_prep(struct ata_queued_cmd *qc)
1054{
1055 struct ata_port *ap = qc->ap;
1056 struct mv_port_priv *pp = ap->private_data;
1057 __le16 *cw;
1058 struct ata_taskfile *tf;
1059 u16 flags = 0;
1060 unsigned in_index;
1061
1062 if (ATA_PROT_DMA != qc->tf.protocol)
1063 return;
1064
1065 /* Fill in command request block
1066 */
1067 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1068 flags |= CRQB_FLAG_READ;
1069 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1070 flags |= qc->tag << CRQB_TAG_SHIFT;
1071
1072 /* get current queue index from hardware */
1073 in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS)
1074 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1075
1076 pp->crqb[in_index].sg_addr =
1077 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1078 pp->crqb[in_index].sg_addr_hi =
1079 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1080 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1081
1082 cw = &pp->crqb[in_index].ata_cmd[0];
1083 tf = &qc->tf;
1084
1085 /* Sadly, the CRQB cannot accomodate all registers--there are
1086 * only 11 bytes...so we must pick and choose required
1087 * registers based on the command. So, we drop feature and
1088 * hob_feature for [RW] DMA commands, but they are needed for
1089 * NCQ. NCQ will drop hob_nsect.
1090 */
1091 switch (tf->command) {
1092 case ATA_CMD_READ:
1093 case ATA_CMD_READ_EXT:
1094 case ATA_CMD_WRITE:
1095 case ATA_CMD_WRITE_EXT:
1096 case ATA_CMD_WRITE_FUA_EXT:
1097 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1098 break;
1099#ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1100 case ATA_CMD_FPDMA_READ:
1101 case ATA_CMD_FPDMA_WRITE:
1102 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1103 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1104 break;
1105#endif /* FIXME: remove this line when NCQ added */
1106 default:
1107 /* The only other commands EDMA supports in non-queued and
1108 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1109 * of which are defined/used by Linux. If we get here, this
1110 * driver needs work.
1111 *
1112 * FIXME: modify libata to give qc_prep a return value and
1113 * return error here.
1114 */
1115 BUG_ON(tf->command);
1116 break;
1117 }
1118 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1119 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1120 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1121 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1122 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1123 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1124 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1125 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1126 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1127
1128 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1129 return;
1130 mv_fill_sg(qc);
1131}
1132
1133/**
1134 * mv_qc_prep_iie - Host specific command preparation.
1135 * @qc: queued command to prepare
1136 *
1137 * This routine simply redirects to the general purpose routine
1138 * if command is not DMA. Else, it handles prep of the CRQB
1139 * (command request block), does some sanity checking, and calls
1140 * the SG load routine.
1141 *
1142 * LOCKING:
1143 * Inherited from caller.
1144 */
1145static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1146{
1147 struct ata_port *ap = qc->ap;
1148 struct mv_port_priv *pp = ap->private_data;
1149 struct mv_crqb_iie *crqb;
1150 struct ata_taskfile *tf;
1151 unsigned in_index;
1152 u32 flags = 0;
1153
1154 if (ATA_PROT_DMA != qc->tf.protocol)
1155 return;
1156
1157 /* Fill in Gen IIE command request block
1158 */
1159 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1160 flags |= CRQB_FLAG_READ;
1161
1162 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1163 flags |= qc->tag << CRQB_TAG_SHIFT;
1164
1165 /* get current queue index from hardware */
1166 in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS)
1167 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1168
1169 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1170 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1171 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1172 crqb->flags = cpu_to_le32(flags);
1173
1174 tf = &qc->tf;
1175 crqb->ata_cmd[0] = cpu_to_le32(
1176 (tf->command << 16) |
1177 (tf->feature << 24)
1178 );
1179 crqb->ata_cmd[1] = cpu_to_le32(
1180 (tf->lbal << 0) |
1181 (tf->lbam << 8) |
1182 (tf->lbah << 16) |
1183 (tf->device << 24)
1184 );
1185 crqb->ata_cmd[2] = cpu_to_le32(
1186 (tf->hob_lbal << 0) |
1187 (tf->hob_lbam << 8) |
1188 (tf->hob_lbah << 16) |
1189 (tf->hob_feature << 24)
1190 );
1191 crqb->ata_cmd[3] = cpu_to_le32(
1192 (tf->nsect << 0) |
1193 (tf->hob_nsect << 8)
1194 );
1195
1196 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1197 return;
1198 mv_fill_sg(qc);
1199}
1200
1201/**
1202 * mv_qc_issue - Initiate a command to the host
1203 * @qc: queued command to start
1204 *
1205 * This routine simply redirects to the general purpose routine
1206 * if command is not DMA. Else, it sanity checks our local
1207 * caches of the request producer/consumer indices then enables
1208 * DMA and bumps the request producer index.
1209 *
1210 * LOCKING:
1211 * Inherited from caller.
1212 */
1213static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1214{
1215 void __iomem *port_mmio = mv_ap_base(qc->ap);
1216 struct mv_port_priv *pp = qc->ap->private_data;
1217 unsigned in_index;
1218 u32 in_ptr;
1219
1220 if (ATA_PROT_DMA != qc->tf.protocol) {
1221 /* We're about to send a non-EDMA capable command to the
1222 * port. Turn off EDMA so there won't be problems accessing
1223 * shadow block, etc registers.
1224 */
1225 mv_stop_dma(qc->ap);
1226 return ata_qc_issue_prot(qc);
1227 }
1228
1229 in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1230 in_index = (in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1231
1232 /* until we do queuing, the queue should be empty at this point */
1233 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1234 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1235
1236 in_index = mv_inc_q_index(in_index); /* now incr producer index */
1237
1238 mv_start_dma(port_mmio, pp);
1239
1240 /* and write the request in pointer to kick the EDMA to life */
1241 in_ptr &= EDMA_REQ_Q_BASE_LO_MASK;
1242 in_ptr |= in_index << EDMA_REQ_Q_PTR_SHIFT;
1243 writelfl(in_ptr, port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1244
1245 return 0;
1246}
1247
1248/**
1249 * mv_get_crpb_status - get status from most recently completed cmd
1250 * @ap: ATA channel to manipulate
1251 *
1252 * This routine is for use when the port is in DMA mode, when it
1253 * will be using the CRPB (command response block) method of
1254 * returning command completion information. We check indices
1255 * are good, grab status, and bump the response consumer index to
1256 * prove that we're up to date.
1257 *
1258 * LOCKING:
1259 * Inherited from caller.
1260 */
1261static u8 mv_get_crpb_status(struct ata_port *ap)
1262{
1263 void __iomem *port_mmio = mv_ap_base(ap);
1264 struct mv_port_priv *pp = ap->private_data;
1265 unsigned out_index;
1266 u32 out_ptr;
1267 u8 ata_status;
1268
1269 out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1270 out_index = (out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1271
1272 ata_status = le16_to_cpu(pp->crpb[out_index].flags)
1273 >> CRPB_FLAG_STATUS_SHIFT;
1274
1275 /* increment our consumer index... */
1276 out_index = mv_inc_q_index(out_index);
1277
1278 /* and, until we do NCQ, there should only be 1 CRPB waiting */
1279 WARN_ON(out_index != ((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1280 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1281
1282 /* write out our inc'd consumer index so EDMA knows we're caught up */
1283 out_ptr &= EDMA_RSP_Q_BASE_LO_MASK;
1284 out_ptr |= out_index << EDMA_RSP_Q_PTR_SHIFT;
1285 writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1286
1287 /* Return ATA status register for completed CRPB */
1288 return ata_status;
1289}
1290
1291/**
1292 * mv_err_intr - Handle error interrupts on the port
1293 * @ap: ATA channel to manipulate
1294 * @reset_allowed: bool: 0 == don't trigger from reset here
1295 *
1296 * In most cases, just clear the interrupt and move on. However,
1297 * some cases require an eDMA reset, which is done right before
1298 * the COMRESET in mv_phy_reset(). The SERR case requires a
1299 * clear of pending errors in the SATA SERROR register. Finally,
1300 * if the port disabled DMA, update our cached copy to match.
1301 *
1302 * LOCKING:
1303 * Inherited from caller.
1304 */
1305static void mv_err_intr(struct ata_port *ap, int reset_allowed)
1306{
1307 void __iomem *port_mmio = mv_ap_base(ap);
1308 u32 edma_err_cause, serr = 0;
1309
1310 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1311
1312 if (EDMA_ERR_SERR & edma_err_cause) {
1313 sata_scr_read(ap, SCR_ERROR, &serr);
1314 sata_scr_write_flush(ap, SCR_ERROR, serr);
1315 }
1316 if (EDMA_ERR_SELF_DIS & edma_err_cause) {
1317 struct mv_port_priv *pp = ap->private_data;
1318 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1319 }
1320 DPRINTK(KERN_ERR "ata%u: port error; EDMA err cause: 0x%08x "
1321 "SERR: 0x%08x\n", ap->id, edma_err_cause, serr);
1322
1323 /* Clear EDMA now that SERR cleanup done */
1324 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1325
1326 /* check for fatal here and recover if needed */
1327 if (reset_allowed && (EDMA_ERR_FATAL & edma_err_cause))
1328 mv_stop_and_reset(ap);
1329}
1330
1331/**
1332 * mv_host_intr - Handle all interrupts on the given host controller
1333 * @host_set: host specific structure
1334 * @relevant: port error bits relevant to this host controller
1335 * @hc: which host controller we're to look at
1336 *
1337 * Read then write clear the HC interrupt status then walk each
1338 * port connected to the HC and see if it needs servicing. Port
1339 * success ints are reported in the HC interrupt status reg, the
1340 * port error ints are reported in the higher level main
1341 * interrupt status register and thus are passed in via the
1342 * 'relevant' argument.
1343 *
1344 * LOCKING:
1345 * Inherited from caller.
1346 */
1347static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
1348 unsigned int hc)
1349{
1350 void __iomem *mmio = host_set->mmio_base;
1351 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1352 struct ata_queued_cmd *qc;
1353 u32 hc_irq_cause;
1354 int shift, port, port0, hard_port, handled;
1355 unsigned int err_mask;
1356
1357 if (hc == 0) {
1358 port0 = 0;
1359 } else {
1360 port0 = MV_PORTS_PER_HC;
1361 }
1362
1363 /* we'll need the HC success int register in most cases */
1364 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1365 if (hc_irq_cause) {
1366 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1367 }
1368
1369 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1370 hc,relevant,hc_irq_cause);
1371
1372 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
1373 u8 ata_status = 0;
1374 struct ata_port *ap = host_set->ports[port];
1375 struct mv_port_priv *pp = ap->private_data;
1376
1377 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1378 handled = 0; /* ensure ata_status is set if handled++ */
1379
1380 /* Note that DEV_IRQ might happen spuriously during EDMA,
1381 * and should be ignored in such cases.
1382 * The cause of this is still under investigation.
1383 */
1384 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1385 /* EDMA: check for response queue interrupt */
1386 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) {
1387 ata_status = mv_get_crpb_status(ap);
1388 handled = 1;
1389 }
1390 } else {
1391 /* PIO: check for device (drive) interrupt */
1392 if ((DEV_IRQ << hard_port) & hc_irq_cause) {
1393 ata_status = readb((void __iomem *)
1394 ap->ioaddr.status_addr);
1395 handled = 1;
1396 /* ignore spurious intr if drive still BUSY */
1397 if (ata_status & ATA_BUSY) {
1398 ata_status = 0;
1399 handled = 0;
1400 }
1401 }
1402 }
1403
1404 if (ap && (ap->flags & ATA_FLAG_DISABLED))
1405 continue;
1406
1407 err_mask = ac_err_mask(ata_status);
1408
1409 shift = port << 1; /* (port * 2) */
1410 if (port >= MV_PORTS_PER_HC) {
1411 shift++; /* skip bit 8 in the HC Main IRQ reg */
1412 }
1413 if ((PORT0_ERR << shift) & relevant) {
1414 mv_err_intr(ap, 1);
1415 err_mask |= AC_ERR_OTHER;
1416 handled = 1;
1417 }
1418
1419 if (handled) {
1420 qc = ata_qc_from_tag(ap, ap->active_tag);
1421 if (qc && (qc->flags & ATA_QCFLAG_ACTIVE)) {
1422 VPRINTK("port %u IRQ found for qc, "
1423 "ata_status 0x%x\n", port,ata_status);
1424 /* mark qc status appropriately */
1425 if (!(qc->tf.flags & ATA_TFLAG_POLLING)) {
1426 qc->err_mask |= err_mask;
1427 ata_qc_complete(qc);
1428 }
1429 }
1430 }
1431 }
1432 VPRINTK("EXIT\n");
1433}
1434
1435/**
1436 * mv_interrupt -
1437 * @irq: unused
1438 * @dev_instance: private data; in this case the host structure
1439 * @regs: unused
1440 *
1441 * Read the read only register to determine if any host
1442 * controllers have pending interrupts. If so, call lower level
1443 * routine to handle. Also check for PCI errors which are only
1444 * reported here.
1445 *
1446 * LOCKING:
1447 * This routine holds the host_set lock while processing pending
1448 * interrupts.
1449 */
1450static irqreturn_t mv_interrupt(int irq, void *dev_instance,
1451 struct pt_regs *regs)
1452{
1453 struct ata_host_set *host_set = dev_instance;
1454 unsigned int hc, handled = 0, n_hcs;
1455 void __iomem *mmio = host_set->mmio_base;
1456 struct mv_host_priv *hpriv;
1457 u32 irq_stat;
1458
1459 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
1460
1461 /* check the cases where we either have nothing pending or have read
1462 * a bogus register value which can indicate HW removal or PCI fault
1463 */
1464 if (!irq_stat || (0xffffffffU == irq_stat)) {
1465 return IRQ_NONE;
1466 }
1467
1468 n_hcs = mv_get_hc_count(host_set->ports[0]->flags);
1469 spin_lock(&host_set->lock);
1470
1471 for (hc = 0; hc < n_hcs; hc++) {
1472 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1473 if (relevant) {
1474 mv_host_intr(host_set, relevant, hc);
1475 handled++;
1476 }
1477 }
1478
1479 hpriv = host_set->private_data;
1480 if (IS_60XX(hpriv)) {
1481 /* deal with the interrupt coalescing bits */
1482 if (irq_stat & (TRAN_LO_DONE | TRAN_HI_DONE | PORTS_0_7_COAL_DONE)) {
1483 writelfl(0, mmio + MV_IRQ_COAL_CAUSE_LO);
1484 writelfl(0, mmio + MV_IRQ_COAL_CAUSE_HI);
1485 writelfl(0, mmio + MV_IRQ_COAL_CAUSE);
1486 }
1487 }
1488
1489 if (PCI_ERR & irq_stat) {
1490 printk(KERN_ERR DRV_NAME ": PCI ERROR; PCI IRQ cause=0x%08x\n",
1491 readl(mmio + PCI_IRQ_CAUSE_OFS));
1492
1493 DPRINTK("All regs @ PCI error\n");
1494 mv_dump_all_regs(mmio, -1, to_pci_dev(host_set->dev));
1495
1496 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1497 handled++;
1498 }
1499 spin_unlock(&host_set->lock);
1500
1501 return IRQ_RETVAL(handled);
1502}
1503
1504static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1505{
1506 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1507 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1508
1509 return hc_mmio + ofs;
1510}
1511
1512static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1513{
1514 unsigned int ofs;
1515
1516 switch (sc_reg_in) {
1517 case SCR_STATUS:
1518 case SCR_ERROR:
1519 case SCR_CONTROL:
1520 ofs = sc_reg_in * sizeof(u32);
1521 break;
1522 default:
1523 ofs = 0xffffffffU;
1524 break;
1525 }
1526 return ofs;
1527}
1528
1529static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
1530{
1531 void __iomem *mmio = mv5_phy_base(ap->host_set->mmio_base, ap->port_no);
1532 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1533
1534 if (ofs != 0xffffffffU)
1535 return readl(mmio + ofs);
1536 else
1537 return (u32) ofs;
1538}
1539
1540static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1541{
1542 void __iomem *mmio = mv5_phy_base(ap->host_set->mmio_base, ap->port_no);
1543 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1544
1545 if (ofs != 0xffffffffU)
1546 writelfl(val, mmio + ofs);
1547}
1548
1549static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1550{
1551 u8 rev_id;
1552 int early_5080;
1553
1554 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
1555
1556 early_5080 = (pdev->device == 0x5080) && (rev_id == 0);
1557
1558 if (!early_5080) {
1559 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1560 tmp |= (1 << 0);
1561 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1562 }
1563
1564 mv_reset_pci_bus(pdev, mmio);
1565}
1566
1567static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1568{
1569 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1570}
1571
1572static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1573 void __iomem *mmio)
1574{
1575 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1576 u32 tmp;
1577
1578 tmp = readl(phy_mmio + MV5_PHY_MODE);
1579
1580 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1581 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
1582}
1583
1584static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1585{
1586 u32 tmp;
1587
1588 writel(0, mmio + MV_GPIO_PORT_CTL);
1589
1590 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1591
1592 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1593 tmp |= ~(1 << 0);
1594 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1595}
1596
1597static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1598 unsigned int port)
1599{
1600 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1601 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1602 u32 tmp;
1603 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1604
1605 if (fix_apm_sq) {
1606 tmp = readl(phy_mmio + MV5_LT_MODE);
1607 tmp |= (1 << 19);
1608 writel(tmp, phy_mmio + MV5_LT_MODE);
1609
1610 tmp = readl(phy_mmio + MV5_PHY_CTL);
1611 tmp &= ~0x3;
1612 tmp |= 0x1;
1613 writel(tmp, phy_mmio + MV5_PHY_CTL);
1614 }
1615
1616 tmp = readl(phy_mmio + MV5_PHY_MODE);
1617 tmp &= ~mask;
1618 tmp |= hpriv->signal[port].pre;
1619 tmp |= hpriv->signal[port].amps;
1620 writel(tmp, phy_mmio + MV5_PHY_MODE);
1621}
1622
1623
1624#undef ZERO
1625#define ZERO(reg) writel(0, port_mmio + (reg))
1626static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1627 unsigned int port)
1628{
1629 void __iomem *port_mmio = mv_port_base(mmio, port);
1630
1631 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1632
1633 mv_channel_reset(hpriv, mmio, port);
1634
1635 ZERO(0x028); /* command */
1636 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1637 ZERO(0x004); /* timer */
1638 ZERO(0x008); /* irq err cause */
1639 ZERO(0x00c); /* irq err mask */
1640 ZERO(0x010); /* rq bah */
1641 ZERO(0x014); /* rq inp */
1642 ZERO(0x018); /* rq outp */
1643 ZERO(0x01c); /* respq bah */
1644 ZERO(0x024); /* respq outp */
1645 ZERO(0x020); /* respq inp */
1646 ZERO(0x02c); /* test control */
1647 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1648}
1649#undef ZERO
1650
1651#define ZERO(reg) writel(0, hc_mmio + (reg))
1652static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1653 unsigned int hc)
1654{
1655 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1656 u32 tmp;
1657
1658 ZERO(0x00c);
1659 ZERO(0x010);
1660 ZERO(0x014);
1661 ZERO(0x018);
1662
1663 tmp = readl(hc_mmio + 0x20);
1664 tmp &= 0x1c1c1c1c;
1665 tmp |= 0x03030303;
1666 writel(tmp, hc_mmio + 0x20);
1667}
1668#undef ZERO
1669
1670static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1671 unsigned int n_hc)
1672{
1673 unsigned int hc, port;
1674
1675 for (hc = 0; hc < n_hc; hc++) {
1676 for (port = 0; port < MV_PORTS_PER_HC; port++)
1677 mv5_reset_hc_port(hpriv, mmio,
1678 (hc * MV_PORTS_PER_HC) + port);
1679
1680 mv5_reset_one_hc(hpriv, mmio, hc);
1681 }
1682
1683 return 0;
1684}
1685
1686#undef ZERO
1687#define ZERO(reg) writel(0, mmio + (reg))
1688static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1689{
1690 u32 tmp;
1691
1692 tmp = readl(mmio + MV_PCI_MODE);
1693 tmp &= 0xff00ffff;
1694 writel(tmp, mmio + MV_PCI_MODE);
1695
1696 ZERO(MV_PCI_DISC_TIMER);
1697 ZERO(MV_PCI_MSI_TRIGGER);
1698 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1699 ZERO(HC_MAIN_IRQ_MASK_OFS);
1700 ZERO(MV_PCI_SERR_MASK);
1701 ZERO(PCI_IRQ_CAUSE_OFS);
1702 ZERO(PCI_IRQ_MASK_OFS);
1703 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1704 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1705 ZERO(MV_PCI_ERR_ATTRIBUTE);
1706 ZERO(MV_PCI_ERR_COMMAND);
1707}
1708#undef ZERO
1709
1710static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1711{
1712 u32 tmp;
1713
1714 mv5_reset_flash(hpriv, mmio);
1715
1716 tmp = readl(mmio + MV_GPIO_PORT_CTL);
1717 tmp &= 0x3;
1718 tmp |= (1 << 5) | (1 << 6);
1719 writel(tmp, mmio + MV_GPIO_PORT_CTL);
1720}
1721
1722/**
1723 * mv6_reset_hc - Perform the 6xxx global soft reset
1724 * @mmio: base address of the HBA
1725 *
1726 * This routine only applies to 6xxx parts.
1727 *
1728 * LOCKING:
1729 * Inherited from caller.
1730 */
1731static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1732 unsigned int n_hc)
1733{
1734 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
1735 int i, rc = 0;
1736 u32 t;
1737
1738 /* Following procedure defined in PCI "main command and status
1739 * register" table.
1740 */
1741 t = readl(reg);
1742 writel(t | STOP_PCI_MASTER, reg);
1743
1744 for (i = 0; i < 1000; i++) {
1745 udelay(1);
1746 t = readl(reg);
1747 if (PCI_MASTER_EMPTY & t) {
1748 break;
1749 }
1750 }
1751 if (!(PCI_MASTER_EMPTY & t)) {
1752 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
1753 rc = 1;
1754 goto done;
1755 }
1756
1757 /* set reset */
1758 i = 5;
1759 do {
1760 writel(t | GLOB_SFT_RST, reg);
1761 t = readl(reg);
1762 udelay(1);
1763 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
1764
1765 if (!(GLOB_SFT_RST & t)) {
1766 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
1767 rc = 1;
1768 goto done;
1769 }
1770
1771 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
1772 i = 5;
1773 do {
1774 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
1775 t = readl(reg);
1776 udelay(1);
1777 } while ((GLOB_SFT_RST & t) && (i-- > 0));
1778
1779 if (GLOB_SFT_RST & t) {
1780 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
1781 rc = 1;
1782 }
1783done:
1784 return rc;
1785}
1786
1787static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
1788 void __iomem *mmio)
1789{
1790 void __iomem *port_mmio;
1791 u32 tmp;
1792
1793 tmp = readl(mmio + MV_RESET_CFG);
1794 if ((tmp & (1 << 0)) == 0) {
1795 hpriv->signal[idx].amps = 0x7 << 8;
1796 hpriv->signal[idx].pre = 0x1 << 5;
1797 return;
1798 }
1799
1800 port_mmio = mv_port_base(mmio, idx);
1801 tmp = readl(port_mmio + PHY_MODE2);
1802
1803 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
1804 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
1805}
1806
1807static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1808{
1809 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
1810}
1811
1812static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1813 unsigned int port)
1814{
1815 void __iomem *port_mmio = mv_port_base(mmio, port);
1816
1817 u32 hp_flags = hpriv->hp_flags;
1818 int fix_phy_mode2 =
1819 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
1820 int fix_phy_mode4 =
1821 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
1822 u32 m2, tmp;
1823
1824 if (fix_phy_mode2) {
1825 m2 = readl(port_mmio + PHY_MODE2);
1826 m2 &= ~(1 << 16);
1827 m2 |= (1 << 31);
1828 writel(m2, port_mmio + PHY_MODE2);
1829
1830 udelay(200);
1831
1832 m2 = readl(port_mmio + PHY_MODE2);
1833 m2 &= ~((1 << 16) | (1 << 31));
1834 writel(m2, port_mmio + PHY_MODE2);
1835
1836 udelay(200);
1837 }
1838
1839 /* who knows what this magic does */
1840 tmp = readl(port_mmio + PHY_MODE3);
1841 tmp &= ~0x7F800000;
1842 tmp |= 0x2A800000;
1843 writel(tmp, port_mmio + PHY_MODE3);
1844
1845 if (fix_phy_mode4) {
1846 u32 m4;
1847
1848 m4 = readl(port_mmio + PHY_MODE4);
1849
1850 if (hp_flags & MV_HP_ERRATA_60X1B2)
1851 tmp = readl(port_mmio + 0x310);
1852
1853 m4 = (m4 & ~(1 << 1)) | (1 << 0);
1854
1855 writel(m4, port_mmio + PHY_MODE4);
1856
1857 if (hp_flags & MV_HP_ERRATA_60X1B2)
1858 writel(tmp, port_mmio + 0x310);
1859 }
1860
1861 /* Revert values of pre-emphasis and signal amps to the saved ones */
1862 m2 = readl(port_mmio + PHY_MODE2);
1863
1864 m2 &= ~MV_M2_PREAMP_MASK;
1865 m2 |= hpriv->signal[port].amps;
1866 m2 |= hpriv->signal[port].pre;
1867 m2 &= ~(1 << 16);
1868
1869 /* according to mvSata 3.6.1, some IIE values are fixed */
1870 if (IS_GEN_IIE(hpriv)) {
1871 m2 &= ~0xC30FF01F;
1872 m2 |= 0x0000900F;
1873 }
1874
1875 writel(m2, port_mmio + PHY_MODE2);
1876}
1877
1878static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
1879 unsigned int port_no)
1880{
1881 void __iomem *port_mmio = mv_port_base(mmio, port_no);
1882
1883 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
1884
1885 if (IS_60XX(hpriv)) {
1886 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
1887 ifctl |= (1 << 7); /* enable gen2i speed */
1888 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
1889 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
1890 }
1891
1892 udelay(25); /* allow reset propagation */
1893
1894 /* Spec never mentions clearing the bit. Marvell's driver does
1895 * clear the bit, however.
1896 */
1897 writelfl(0, port_mmio + EDMA_CMD_OFS);
1898
1899 hpriv->ops->phy_errata(hpriv, mmio, port_no);
1900
1901 if (IS_50XX(hpriv))
1902 mdelay(1);
1903}
1904
1905static void mv_stop_and_reset(struct ata_port *ap)
1906{
1907 struct mv_host_priv *hpriv = ap->host_set->private_data;
1908 void __iomem *mmio = ap->host_set->mmio_base;
1909
1910 mv_stop_dma(ap);
1911
1912 mv_channel_reset(hpriv, mmio, ap->port_no);
1913
1914 __mv_phy_reset(ap, 0);
1915}
1916
1917static inline void __msleep(unsigned int msec, int can_sleep)
1918{
1919 if (can_sleep)
1920 msleep(msec);
1921 else
1922 mdelay(msec);
1923}
1924
1925/**
1926 * __mv_phy_reset - Perform eDMA reset followed by COMRESET
1927 * @ap: ATA channel to manipulate
1928 *
1929 * Part of this is taken from __sata_phy_reset and modified to
1930 * not sleep since this routine gets called from interrupt level.
1931 *
1932 * LOCKING:
1933 * Inherited from caller. This is coded to safe to call at
1934 * interrupt level, i.e. it does not sleep.
1935 */
1936static void __mv_phy_reset(struct ata_port *ap, int can_sleep)
1937{
1938 struct mv_port_priv *pp = ap->private_data;
1939 struct mv_host_priv *hpriv = ap->host_set->private_data;
1940 void __iomem *port_mmio = mv_ap_base(ap);
1941 struct ata_taskfile tf;
1942 struct ata_device *dev = &ap->device[0];
1943 unsigned long timeout;
1944 int retry = 5;
1945 u32 sstatus;
1946
1947 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
1948
1949 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
1950 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
1951 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
1952
1953 /* Issue COMRESET via SControl */
1954comreset_retry:
1955 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
1956 __msleep(1, can_sleep);
1957
1958 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1959 __msleep(20, can_sleep);
1960
1961 timeout = jiffies + msecs_to_jiffies(200);
1962 do {
1963 sata_scr_read(ap, SCR_STATUS, &sstatus);
1964 sstatus &= 0x3;
1965 if ((sstatus == 3) || (sstatus == 0))
1966 break;
1967
1968 __msleep(1, can_sleep);
1969 } while (time_before(jiffies, timeout));
1970
1971 /* work around errata */
1972 if (IS_60XX(hpriv) &&
1973 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
1974 (retry-- > 0))
1975 goto comreset_retry;
1976
1977 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
1978 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
1979 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
1980
1981 if (ata_port_online(ap)) {
1982 ata_port_probe(ap);
1983 } else {
1984 sata_scr_read(ap, SCR_STATUS, &sstatus);
1985 ata_port_printk(ap, KERN_INFO,
1986 "no device found (phy stat %08x)\n", sstatus);
1987 ata_port_disable(ap);
1988 return;
1989 }
1990 ap->cbl = ATA_CBL_SATA;
1991
1992 /* even after SStatus reflects that device is ready,
1993 * it seems to take a while for link to be fully
1994 * established (and thus Status no longer 0x80/0x7F),
1995 * so we poll a bit for that, here.
1996 */
1997 retry = 20;
1998 while (1) {
1999 u8 drv_stat = ata_check_status(ap);
2000 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2001 break;
2002 __msleep(500, can_sleep);
2003 if (retry-- <= 0)
2004 break;
2005 }
2006
2007 tf.lbah = readb((void __iomem *) ap->ioaddr.lbah_addr);
2008 tf.lbam = readb((void __iomem *) ap->ioaddr.lbam_addr);
2009 tf.lbal = readb((void __iomem *) ap->ioaddr.lbal_addr);
2010 tf.nsect = readb((void __iomem *) ap->ioaddr.nsect_addr);
2011
2012 dev->class = ata_dev_classify(&tf);
2013 if (!ata_dev_enabled(dev)) {
2014 VPRINTK("Port disabled post-sig: No device present.\n");
2015 ata_port_disable(ap);
2016 }
2017
2018 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2019
2020 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2021
2022 VPRINTK("EXIT\n");
2023}
2024
2025static void mv_phy_reset(struct ata_port *ap)
2026{
2027 __mv_phy_reset(ap, 1);
2028}
2029
2030/**
2031 * mv_eng_timeout - Routine called by libata when SCSI times out I/O
2032 * @ap: ATA channel to manipulate
2033 *
2034 * Intent is to clear all pending error conditions, reset the
2035 * chip/bus, fail the command, and move on.
2036 *
2037 * LOCKING:
2038 * This routine holds the host_set lock while failing the command.
2039 */
2040static void mv_eng_timeout(struct ata_port *ap)
2041{
2042 struct ata_queued_cmd *qc;
2043 unsigned long flags;
2044
2045 ata_port_printk(ap, KERN_ERR, "Entering mv_eng_timeout\n");
2046 DPRINTK("All regs @ start of eng_timeout\n");
2047 mv_dump_all_regs(ap->host_set->mmio_base, ap->port_no,
2048 to_pci_dev(ap->host_set->dev));
2049
2050 qc = ata_qc_from_tag(ap, ap->active_tag);
2051 printk(KERN_ERR "mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n",
2052 ap->host_set->mmio_base, ap, qc, qc->scsicmd,
2053 &qc->scsicmd->cmnd);
2054
2055 spin_lock_irqsave(&ap->host_set->lock, flags);
2056 mv_err_intr(ap, 0);
2057 mv_stop_and_reset(ap);
2058 spin_unlock_irqrestore(&ap->host_set->lock, flags);
2059
2060 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
2061 if (qc->flags & ATA_QCFLAG_ACTIVE) {
2062 qc->err_mask |= AC_ERR_TIMEOUT;
2063 ata_eh_qc_complete(qc);
2064 }
2065}
2066
2067/**
2068 * mv_port_init - Perform some early initialization on a single port.
2069 * @port: libata data structure storing shadow register addresses
2070 * @port_mmio: base address of the port
2071 *
2072 * Initialize shadow register mmio addresses, clear outstanding
2073 * interrupts on the port, and unmask interrupts for the future
2074 * start of the port.
2075 *
2076 * LOCKING:
2077 * Inherited from caller.
2078 */
2079static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2080{
2081 unsigned long shd_base = (unsigned long) port_mmio + SHD_BLK_OFS;
2082 unsigned serr_ofs;
2083
2084 /* PIO related setup
2085 */
2086 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2087 port->error_addr =
2088 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2089 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2090 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2091 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2092 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2093 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2094 port->status_addr =
2095 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2096 /* special case: control/altstatus doesn't have ATA_REG_ address */
2097 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2098
2099 /* unused: */
2100 port->cmd_addr = port->bmdma_addr = port->scr_addr = 0;
2101
2102 /* Clear any currently outstanding port interrupt conditions */
2103 serr_ofs = mv_scr_offset(SCR_ERROR);
2104 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2105 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2106
2107 /* unmask all EDMA error interrupts */
2108 writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2109
2110 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2111 readl(port_mmio + EDMA_CFG_OFS),
2112 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2113 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2114}
2115
2116static int mv_chip_id(struct pci_dev *pdev, struct mv_host_priv *hpriv,
2117 unsigned int board_idx)
2118{
2119 u8 rev_id;
2120 u32 hp_flags = hpriv->hp_flags;
2121
2122 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
2123
2124 switch(board_idx) {
2125 case chip_5080:
2126 hpriv->ops = &mv5xxx_ops;
2127 hp_flags |= MV_HP_50XX;
2128
2129 switch (rev_id) {
2130 case 0x1:
2131 hp_flags |= MV_HP_ERRATA_50XXB0;
2132 break;
2133 case 0x3:
2134 hp_flags |= MV_HP_ERRATA_50XXB2;
2135 break;
2136 default:
2137 dev_printk(KERN_WARNING, &pdev->dev,
2138 "Applying 50XXB2 workarounds to unknown rev\n");
2139 hp_flags |= MV_HP_ERRATA_50XXB2;
2140 break;
2141 }
2142 break;
2143
2144 case chip_504x:
2145 case chip_508x:
2146 hpriv->ops = &mv5xxx_ops;
2147 hp_flags |= MV_HP_50XX;
2148
2149 switch (rev_id) {
2150 case 0x0:
2151 hp_flags |= MV_HP_ERRATA_50XXB0;
2152 break;
2153 case 0x3:
2154 hp_flags |= MV_HP_ERRATA_50XXB2;
2155 break;
2156 default:
2157 dev_printk(KERN_WARNING, &pdev->dev,
2158 "Applying B2 workarounds to unknown rev\n");
2159 hp_flags |= MV_HP_ERRATA_50XXB2;
2160 break;
2161 }
2162 break;
2163
2164 case chip_604x:
2165 case chip_608x:
2166 hpriv->ops = &mv6xxx_ops;
2167
2168 switch (rev_id) {
2169 case 0x7:
2170 hp_flags |= MV_HP_ERRATA_60X1B2;
2171 break;
2172 case 0x9:
2173 hp_flags |= MV_HP_ERRATA_60X1C0;
2174 break;
2175 default:
2176 dev_printk(KERN_WARNING, &pdev->dev,
2177 "Applying B2 workarounds to unknown rev\n");
2178 hp_flags |= MV_HP_ERRATA_60X1B2;
2179 break;
2180 }
2181 break;
2182
2183 case chip_7042:
2184 case chip_6042:
2185 hpriv->ops = &mv6xxx_ops;
2186
2187 hp_flags |= MV_HP_GEN_IIE;
2188
2189 switch (rev_id) {
2190 case 0x0:
2191 hp_flags |= MV_HP_ERRATA_XX42A0;
2192 break;
2193 case 0x1:
2194 hp_flags |= MV_HP_ERRATA_60X1C0;
2195 break;
2196 default:
2197 dev_printk(KERN_WARNING, &pdev->dev,
2198 "Applying 60X1C0 workarounds to unknown rev\n");
2199 hp_flags |= MV_HP_ERRATA_60X1C0;
2200 break;
2201 }
2202 break;
2203
2204 default:
2205 printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx);
2206 return 1;
2207 }
2208
2209 hpriv->hp_flags = hp_flags;
2210
2211 return 0;
2212}
2213
2214/**
2215 * mv_init_host - Perform some early initialization of the host.
2216 * @pdev: host PCI device
2217 * @probe_ent: early data struct representing the host
2218 *
2219 * If possible, do an early global reset of the host. Then do
2220 * our port init and clear/unmask all/relevant host interrupts.
2221 *
2222 * LOCKING:
2223 * Inherited from caller.
2224 */
2225static int mv_init_host(struct pci_dev *pdev, struct ata_probe_ent *probe_ent,
2226 unsigned int board_idx)
2227{
2228 int rc = 0, n_hc, port, hc;
2229 void __iomem *mmio = probe_ent->mmio_base;
2230 struct mv_host_priv *hpriv = probe_ent->private_data;
2231
2232 /* global interrupt mask */
2233 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2234
2235 rc = mv_chip_id(pdev, hpriv, board_idx);
2236 if (rc)
2237 goto done;
2238
2239 n_hc = mv_get_hc_count(probe_ent->host_flags);
2240 probe_ent->n_ports = MV_PORTS_PER_HC * n_hc;
2241
2242 for (port = 0; port < probe_ent->n_ports; port++)
2243 hpriv->ops->read_preamp(hpriv, port, mmio);
2244
2245 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2246 if (rc)
2247 goto done;
2248
2249 hpriv->ops->reset_flash(hpriv, mmio);
2250 hpriv->ops->reset_bus(pdev, mmio);
2251 hpriv->ops->enable_leds(hpriv, mmio);
2252
2253 for (port = 0; port < probe_ent->n_ports; port++) {
2254 if (IS_60XX(hpriv)) {
2255 void __iomem *port_mmio = mv_port_base(mmio, port);
2256
2257 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2258 ifctl |= (1 << 7); /* enable gen2i speed */
2259 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2260 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2261 }
2262
2263 hpriv->ops->phy_errata(hpriv, mmio, port);
2264 }
2265
2266 for (port = 0; port < probe_ent->n_ports; port++) {
2267 void __iomem *port_mmio = mv_port_base(mmio, port);
2268 mv_port_init(&probe_ent->port[port], port_mmio);
2269 }
2270
2271 for (hc = 0; hc < n_hc; hc++) {
2272 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2273
2274 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2275 "(before clear)=0x%08x\n", hc,
2276 readl(hc_mmio + HC_CFG_OFS),
2277 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2278
2279 /* Clear any currently outstanding hc interrupt conditions */
2280 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2281 }
2282
2283 /* Clear any currently outstanding host interrupt conditions */
2284 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
2285
2286 /* and unmask interrupt generation for host regs */
2287 writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
2288 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
2289
2290 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2291 "PCI int cause/mask=0x%08x/0x%08x\n",
2292 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2293 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
2294 readl(mmio + PCI_IRQ_CAUSE_OFS),
2295 readl(mmio + PCI_IRQ_MASK_OFS));
2296
2297done:
2298 return rc;
2299}
2300
2301/**
2302 * mv_print_info - Dump key info to kernel log for perusal.
2303 * @probe_ent: early data struct representing the host
2304 *
2305 * FIXME: complete this.
2306 *
2307 * LOCKING:
2308 * Inherited from caller.
2309 */
2310static void mv_print_info(struct ata_probe_ent *probe_ent)
2311{
2312 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
2313 struct mv_host_priv *hpriv = probe_ent->private_data;
2314 u8 rev_id, scc;
2315 const char *scc_s;
2316
2317 /* Use this to determine the HW stepping of the chip so we know
2318 * what errata to workaround
2319 */
2320 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
2321
2322 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2323 if (scc == 0)
2324 scc_s = "SCSI";
2325 else if (scc == 0x01)
2326 scc_s = "RAID";
2327 else
2328 scc_s = "unknown";
2329
2330 dev_printk(KERN_INFO, &pdev->dev,
2331 "%u slots %u ports %s mode IRQ via %s\n",
2332 (unsigned)MV_MAX_Q_DEPTH, probe_ent->n_ports,
2333 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2334}
2335
2336/**
2337 * mv_init_one - handle a positive probe of a Marvell host
2338 * @pdev: PCI device found
2339 * @ent: PCI device ID entry for the matched host
2340 *
2341 * LOCKING:
2342 * Inherited from caller.
2343 */
2344static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2345{
2346 static int printed_version = 0;
2347 struct ata_probe_ent *probe_ent = NULL;
2348 struct mv_host_priv *hpriv;
2349 unsigned int board_idx = (unsigned int)ent->driver_data;
2350 void __iomem *mmio_base;
2351 int pci_dev_busy = 0, rc;
2352
2353 if (!printed_version++)
2354 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2355
2356 rc = pci_enable_device(pdev);
2357 if (rc) {
2358 return rc;
2359 }
2360 pci_set_master(pdev);
2361
2362 rc = pci_request_regions(pdev, DRV_NAME);
2363 if (rc) {
2364 pci_dev_busy = 1;
2365 goto err_out;
2366 }
2367
2368 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
2369 if (probe_ent == NULL) {
2370 rc = -ENOMEM;
2371 goto err_out_regions;
2372 }
2373
2374 memset(probe_ent, 0, sizeof(*probe_ent));
2375 probe_ent->dev = pci_dev_to_dev(pdev);
2376 INIT_LIST_HEAD(&probe_ent->node);
2377
2378 mmio_base = pci_iomap(pdev, MV_PRIMARY_BAR, 0);
2379 if (mmio_base == NULL) {
2380 rc = -ENOMEM;
2381 goto err_out_free_ent;
2382 }
2383
2384 hpriv = kmalloc(sizeof(*hpriv), GFP_KERNEL);
2385 if (!hpriv) {
2386 rc = -ENOMEM;
2387 goto err_out_iounmap;
2388 }
2389 memset(hpriv, 0, sizeof(*hpriv));
2390
2391 probe_ent->sht = mv_port_info[board_idx].sht;
2392 probe_ent->host_flags = mv_port_info[board_idx].host_flags;
2393 probe_ent->pio_mask = mv_port_info[board_idx].pio_mask;
2394 probe_ent->udma_mask = mv_port_info[board_idx].udma_mask;
2395 probe_ent->port_ops = mv_port_info[board_idx].port_ops;
2396
2397 probe_ent->irq = pdev->irq;
2398 probe_ent->irq_flags = IRQF_SHARED;
2399 probe_ent->mmio_base = mmio_base;
2400 probe_ent->private_data = hpriv;
2401
2402 /* initialize adapter */
2403 rc = mv_init_host(pdev, probe_ent, board_idx);
2404 if (rc) {
2405 goto err_out_hpriv;
2406 }
2407
2408 /* Enable interrupts */
2409 if (msi && pci_enable_msi(pdev) == 0) {
2410 hpriv->hp_flags |= MV_HP_FLAG_MSI;
2411 } else {
2412 pci_intx(pdev, 1);
2413 }
2414
2415 mv_dump_pci_cfg(pdev, 0x68);
2416 mv_print_info(probe_ent);
2417
2418 if (ata_device_add(probe_ent) == 0) {
2419 rc = -ENODEV; /* No devices discovered */
2420 goto err_out_dev_add;
2421 }
2422
2423 kfree(probe_ent);
2424 return 0;
2425
2426err_out_dev_add:
2427 if (MV_HP_FLAG_MSI & hpriv->hp_flags) {
2428 pci_disable_msi(pdev);
2429 } else {
2430 pci_intx(pdev, 0);
2431 }
2432err_out_hpriv:
2433 kfree(hpriv);
2434err_out_iounmap:
2435 pci_iounmap(pdev, mmio_base);
2436err_out_free_ent:
2437 kfree(probe_ent);
2438err_out_regions:
2439 pci_release_regions(pdev);
2440err_out:
2441 if (!pci_dev_busy) {
2442 pci_disable_device(pdev);
2443 }
2444
2445 return rc;
2446}
2447
2448static int __init mv_init(void)
2449{
2450 return pci_register_driver(&mv_pci_driver);
2451}
2452
2453static void __exit mv_exit(void)
2454{
2455 pci_unregister_driver(&mv_pci_driver);
2456}
2457
2458MODULE_AUTHOR("Brett Russ");
2459MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2460MODULE_LICENSE("GPL");
2461MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2462MODULE_VERSION(DRV_VERSION);
2463
2464module_param(msi, int, 0444);
2465MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2466
2467module_init(mv_init);
2468module_exit(mv_exit);
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
new file mode 100644
index 000000000000..be46df75ab5a
--- /dev/null
+++ b/drivers/ata/sata_nv.c
@@ -0,0 +1,595 @@
1/*
2 * sata_nv.c - NVIDIA nForce SATA
3 *
4 * Copyright 2004 NVIDIA Corp. All rights reserved.
5 * Copyright 2004 Andrew Chew
6 *
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
21 *
22 *
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
25 *
26 * No hardware documentation available outside of NVIDIA.
27 * This driver programs the NVIDIA SATA controller in a similar
28 * fashion as with other PCI IDE BMDMA controllers, with a few
29 * NV-specific details such as register offsets, SATA phy location,
30 * hotplug info, etc.
31 *
32 */
33
34#include <linux/kernel.h>
35#include <linux/module.h>
36#include <linux/pci.h>
37#include <linux/init.h>
38#include <linux/blkdev.h>
39#include <linux/delay.h>
40#include <linux/interrupt.h>
41#include <linux/device.h>
42#include <scsi/scsi_host.h>
43#include <linux/libata.h>
44
45#define DRV_NAME "sata_nv"
46#define DRV_VERSION "2.0"
47
48enum {
49 NV_PORTS = 2,
50 NV_PIO_MASK = 0x1f,
51 NV_MWDMA_MASK = 0x07,
52 NV_UDMA_MASK = 0x7f,
53 NV_PORT0_SCR_REG_OFFSET = 0x00,
54 NV_PORT1_SCR_REG_OFFSET = 0x40,
55
56 /* INT_STATUS/ENABLE */
57 NV_INT_STATUS = 0x10,
58 NV_INT_ENABLE = 0x11,
59 NV_INT_STATUS_CK804 = 0x440,
60 NV_INT_ENABLE_CK804 = 0x441,
61
62 /* INT_STATUS/ENABLE bits */
63 NV_INT_DEV = 0x01,
64 NV_INT_PM = 0x02,
65 NV_INT_ADDED = 0x04,
66 NV_INT_REMOVED = 0x08,
67
68 NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
69
70 NV_INT_ALL = 0x0f,
71 NV_INT_MASK = NV_INT_DEV |
72 NV_INT_ADDED | NV_INT_REMOVED,
73
74 /* INT_CONFIG */
75 NV_INT_CONFIG = 0x12,
76 NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
77
78 // For PCI config register 20
79 NV_MCP_SATA_CFG_20 = 0x50,
80 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
81};
82
83static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
84static void nv_ck804_host_stop(struct ata_host_set *host_set);
85static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance,
86 struct pt_regs *regs);
87static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance,
88 struct pt_regs *regs);
89static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance,
90 struct pt_regs *regs);
91static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg);
92static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
93
94static void nv_nf2_freeze(struct ata_port *ap);
95static void nv_nf2_thaw(struct ata_port *ap);
96static void nv_ck804_freeze(struct ata_port *ap);
97static void nv_ck804_thaw(struct ata_port *ap);
98static void nv_error_handler(struct ata_port *ap);
99
100enum nv_host_type
101{
102 GENERIC,
103 NFORCE2,
104 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
105 CK804
106};
107
108static const struct pci_device_id nv_pci_tbl[] = {
109 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA,
110 PCI_ANY_ID, PCI_ANY_ID, 0, 0, NFORCE2 },
111 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA,
112 PCI_ANY_ID, PCI_ANY_ID, 0, 0, NFORCE3 },
113 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2,
114 PCI_ANY_ID, PCI_ANY_ID, 0, 0, NFORCE3 },
115 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA,
116 PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
117 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
119 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
121 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2,
122 PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
123 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA,
124 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
125 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2,
126 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
127 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA,
128 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
129 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2,
130 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
131 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA,
132 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
133 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2,
134 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
135 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3,
136 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
137 { PCI_VENDOR_ID_NVIDIA, 0x045c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
138 { PCI_VENDOR_ID_NVIDIA, 0x045d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
139 { PCI_VENDOR_ID_NVIDIA, 0x045e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
140 { PCI_VENDOR_ID_NVIDIA, 0x045f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
141 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
142 PCI_ANY_ID, PCI_ANY_ID,
143 PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC },
144 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
145 PCI_ANY_ID, PCI_ANY_ID,
146 PCI_CLASS_STORAGE_RAID<<8, 0xffff00, GENERIC },
147 { 0, } /* terminate list */
148};
149
150static struct pci_driver nv_pci_driver = {
151 .name = DRV_NAME,
152 .id_table = nv_pci_tbl,
153 .probe = nv_init_one,
154 .remove = ata_pci_remove_one,
155};
156
157static struct scsi_host_template nv_sht = {
158 .module = THIS_MODULE,
159 .name = DRV_NAME,
160 .ioctl = ata_scsi_ioctl,
161 .queuecommand = ata_scsi_queuecmd,
162 .can_queue = ATA_DEF_QUEUE,
163 .this_id = ATA_SHT_THIS_ID,
164 .sg_tablesize = LIBATA_MAX_PRD,
165 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
166 .emulated = ATA_SHT_EMULATED,
167 .use_clustering = ATA_SHT_USE_CLUSTERING,
168 .proc_name = DRV_NAME,
169 .dma_boundary = ATA_DMA_BOUNDARY,
170 .slave_configure = ata_scsi_slave_config,
171 .slave_destroy = ata_scsi_slave_destroy,
172 .bios_param = ata_std_bios_param,
173};
174
175static const struct ata_port_operations nv_generic_ops = {
176 .port_disable = ata_port_disable,
177 .tf_load = ata_tf_load,
178 .tf_read = ata_tf_read,
179 .exec_command = ata_exec_command,
180 .check_status = ata_check_status,
181 .dev_select = ata_std_dev_select,
182 .bmdma_setup = ata_bmdma_setup,
183 .bmdma_start = ata_bmdma_start,
184 .bmdma_stop = ata_bmdma_stop,
185 .bmdma_status = ata_bmdma_status,
186 .qc_prep = ata_qc_prep,
187 .qc_issue = ata_qc_issue_prot,
188 .freeze = ata_bmdma_freeze,
189 .thaw = ata_bmdma_thaw,
190 .error_handler = nv_error_handler,
191 .post_internal_cmd = ata_bmdma_post_internal_cmd,
192 .data_xfer = ata_pio_data_xfer,
193 .irq_handler = nv_generic_interrupt,
194 .irq_clear = ata_bmdma_irq_clear,
195 .scr_read = nv_scr_read,
196 .scr_write = nv_scr_write,
197 .port_start = ata_port_start,
198 .port_stop = ata_port_stop,
199 .host_stop = ata_pci_host_stop,
200};
201
202static const struct ata_port_operations nv_nf2_ops = {
203 .port_disable = ata_port_disable,
204 .tf_load = ata_tf_load,
205 .tf_read = ata_tf_read,
206 .exec_command = ata_exec_command,
207 .check_status = ata_check_status,
208 .dev_select = ata_std_dev_select,
209 .bmdma_setup = ata_bmdma_setup,
210 .bmdma_start = ata_bmdma_start,
211 .bmdma_stop = ata_bmdma_stop,
212 .bmdma_status = ata_bmdma_status,
213 .qc_prep = ata_qc_prep,
214 .qc_issue = ata_qc_issue_prot,
215 .freeze = nv_nf2_freeze,
216 .thaw = nv_nf2_thaw,
217 .error_handler = nv_error_handler,
218 .post_internal_cmd = ata_bmdma_post_internal_cmd,
219 .data_xfer = ata_pio_data_xfer,
220 .irq_handler = nv_nf2_interrupt,
221 .irq_clear = ata_bmdma_irq_clear,
222 .scr_read = nv_scr_read,
223 .scr_write = nv_scr_write,
224 .port_start = ata_port_start,
225 .port_stop = ata_port_stop,
226 .host_stop = ata_pci_host_stop,
227};
228
229static const struct ata_port_operations nv_ck804_ops = {
230 .port_disable = ata_port_disable,
231 .tf_load = ata_tf_load,
232 .tf_read = ata_tf_read,
233 .exec_command = ata_exec_command,
234 .check_status = ata_check_status,
235 .dev_select = ata_std_dev_select,
236 .bmdma_setup = ata_bmdma_setup,
237 .bmdma_start = ata_bmdma_start,
238 .bmdma_stop = ata_bmdma_stop,
239 .bmdma_status = ata_bmdma_status,
240 .qc_prep = ata_qc_prep,
241 .qc_issue = ata_qc_issue_prot,
242 .freeze = nv_ck804_freeze,
243 .thaw = nv_ck804_thaw,
244 .error_handler = nv_error_handler,
245 .post_internal_cmd = ata_bmdma_post_internal_cmd,
246 .data_xfer = ata_pio_data_xfer,
247 .irq_handler = nv_ck804_interrupt,
248 .irq_clear = ata_bmdma_irq_clear,
249 .scr_read = nv_scr_read,
250 .scr_write = nv_scr_write,
251 .port_start = ata_port_start,
252 .port_stop = ata_port_stop,
253 .host_stop = nv_ck804_host_stop,
254};
255
256static struct ata_port_info nv_port_info[] = {
257 /* generic */
258 {
259 .sht = &nv_sht,
260 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
261 .pio_mask = NV_PIO_MASK,
262 .mwdma_mask = NV_MWDMA_MASK,
263 .udma_mask = NV_UDMA_MASK,
264 .port_ops = &nv_generic_ops,
265 },
266 /* nforce2/3 */
267 {
268 .sht = &nv_sht,
269 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
270 .pio_mask = NV_PIO_MASK,
271 .mwdma_mask = NV_MWDMA_MASK,
272 .udma_mask = NV_UDMA_MASK,
273 .port_ops = &nv_nf2_ops,
274 },
275 /* ck804 */
276 {
277 .sht = &nv_sht,
278 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
279 .pio_mask = NV_PIO_MASK,
280 .mwdma_mask = NV_MWDMA_MASK,
281 .udma_mask = NV_UDMA_MASK,
282 .port_ops = &nv_ck804_ops,
283 },
284};
285
286MODULE_AUTHOR("NVIDIA");
287MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
288MODULE_LICENSE("GPL");
289MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
290MODULE_VERSION(DRV_VERSION);
291
292static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance,
293 struct pt_regs *regs)
294{
295 struct ata_host_set *host_set = dev_instance;
296 unsigned int i;
297 unsigned int handled = 0;
298 unsigned long flags;
299
300 spin_lock_irqsave(&host_set->lock, flags);
301
302 for (i = 0; i < host_set->n_ports; i++) {
303 struct ata_port *ap;
304
305 ap = host_set->ports[i];
306 if (ap &&
307 !(ap->flags & ATA_FLAG_DISABLED)) {
308 struct ata_queued_cmd *qc;
309
310 qc = ata_qc_from_tag(ap, ap->active_tag);
311 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
312 handled += ata_host_intr(ap, qc);
313 else
314 // No request pending? Clear interrupt status
315 // anyway, in case there's one pending.
316 ap->ops->check_status(ap);
317 }
318
319 }
320
321 spin_unlock_irqrestore(&host_set->lock, flags);
322
323 return IRQ_RETVAL(handled);
324}
325
326static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
327{
328 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
329 int handled;
330
331 /* freeze if hotplugged */
332 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
333 ata_port_freeze(ap);
334 return 1;
335 }
336
337 /* bail out if not our interrupt */
338 if (!(irq_stat & NV_INT_DEV))
339 return 0;
340
341 /* DEV interrupt w/ no active qc? */
342 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
343 ata_check_status(ap);
344 return 1;
345 }
346
347 /* handle interrupt */
348 handled = ata_host_intr(ap, qc);
349 if (unlikely(!handled)) {
350 /* spurious, clear it */
351 ata_check_status(ap);
352 }
353
354 return 1;
355}
356
357static irqreturn_t nv_do_interrupt(struct ata_host_set *host_set, u8 irq_stat)
358{
359 int i, handled = 0;
360
361 for (i = 0; i < host_set->n_ports; i++) {
362 struct ata_port *ap = host_set->ports[i];
363
364 if (ap && !(ap->flags & ATA_FLAG_DISABLED))
365 handled += nv_host_intr(ap, irq_stat);
366
367 irq_stat >>= NV_INT_PORT_SHIFT;
368 }
369
370 return IRQ_RETVAL(handled);
371}
372
373static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance,
374 struct pt_regs *regs)
375{
376 struct ata_host_set *host_set = dev_instance;
377 u8 irq_stat;
378 irqreturn_t ret;
379
380 spin_lock(&host_set->lock);
381 irq_stat = inb(host_set->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
382 ret = nv_do_interrupt(host_set, irq_stat);
383 spin_unlock(&host_set->lock);
384
385 return ret;
386}
387
388static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance,
389 struct pt_regs *regs)
390{
391 struct ata_host_set *host_set = dev_instance;
392 u8 irq_stat;
393 irqreturn_t ret;
394
395 spin_lock(&host_set->lock);
396 irq_stat = readb(host_set->mmio_base + NV_INT_STATUS_CK804);
397 ret = nv_do_interrupt(host_set, irq_stat);
398 spin_unlock(&host_set->lock);
399
400 return ret;
401}
402
403static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg)
404{
405 if (sc_reg > SCR_CONTROL)
406 return 0xffffffffU;
407
408 return ioread32((void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4));
409}
410
411static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
412{
413 if (sc_reg > SCR_CONTROL)
414 return;
415
416 iowrite32(val, (void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4));
417}
418
419static void nv_nf2_freeze(struct ata_port *ap)
420{
421 unsigned long scr_addr = ap->host_set->ports[0]->ioaddr.scr_addr;
422 int shift = ap->port_no * NV_INT_PORT_SHIFT;
423 u8 mask;
424
425 mask = inb(scr_addr + NV_INT_ENABLE);
426 mask &= ~(NV_INT_ALL << shift);
427 outb(mask, scr_addr + NV_INT_ENABLE);
428}
429
430static void nv_nf2_thaw(struct ata_port *ap)
431{
432 unsigned long scr_addr = ap->host_set->ports[0]->ioaddr.scr_addr;
433 int shift = ap->port_no * NV_INT_PORT_SHIFT;
434 u8 mask;
435
436 outb(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
437
438 mask = inb(scr_addr + NV_INT_ENABLE);
439 mask |= (NV_INT_MASK << shift);
440 outb(mask, scr_addr + NV_INT_ENABLE);
441}
442
443static void nv_ck804_freeze(struct ata_port *ap)
444{
445 void __iomem *mmio_base = ap->host_set->mmio_base;
446 int shift = ap->port_no * NV_INT_PORT_SHIFT;
447 u8 mask;
448
449 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
450 mask &= ~(NV_INT_ALL << shift);
451 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
452}
453
454static void nv_ck804_thaw(struct ata_port *ap)
455{
456 void __iomem *mmio_base = ap->host_set->mmio_base;
457 int shift = ap->port_no * NV_INT_PORT_SHIFT;
458 u8 mask;
459
460 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
461
462 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
463 mask |= (NV_INT_MASK << shift);
464 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
465}
466
467static int nv_hardreset(struct ata_port *ap, unsigned int *class)
468{
469 unsigned int dummy;
470
471 /* SATA hardreset fails to retrieve proper device signature on
472 * some controllers. Don't classify on hardreset. For more
473 * info, see http://bugme.osdl.org/show_bug.cgi?id=3352
474 */
475 return sata_std_hardreset(ap, &dummy);
476}
477
478static void nv_error_handler(struct ata_port *ap)
479{
480 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
481 nv_hardreset, ata_std_postreset);
482}
483
484static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
485{
486 static int printed_version = 0;
487 struct ata_port_info *ppi;
488 struct ata_probe_ent *probe_ent;
489 int pci_dev_busy = 0;
490 int rc;
491 u32 bar;
492 unsigned long base;
493
494 // Make sure this is a SATA controller by counting the number of bars
495 // (NVIDIA SATA controllers will always have six bars). Otherwise,
496 // it's an IDE controller and we ignore it.
497 for (bar=0; bar<6; bar++)
498 if (pci_resource_start(pdev, bar) == 0)
499 return -ENODEV;
500
501 if (!printed_version++)
502 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
503
504 rc = pci_enable_device(pdev);
505 if (rc)
506 goto err_out;
507
508 rc = pci_request_regions(pdev, DRV_NAME);
509 if (rc) {
510 pci_dev_busy = 1;
511 goto err_out_disable;
512 }
513
514 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
515 if (rc)
516 goto err_out_regions;
517 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
518 if (rc)
519 goto err_out_regions;
520
521 rc = -ENOMEM;
522
523 ppi = &nv_port_info[ent->driver_data];
524 probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
525 if (!probe_ent)
526 goto err_out_regions;
527
528 probe_ent->mmio_base = pci_iomap(pdev, 5, 0);
529 if (!probe_ent->mmio_base) {
530 rc = -EIO;
531 goto err_out_free_ent;
532 }
533
534 base = (unsigned long)probe_ent->mmio_base;
535
536 probe_ent->port[0].scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
537 probe_ent->port[1].scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
538
539 /* enable SATA space for CK804 */
540 if (ent->driver_data == CK804) {
541 u8 regval;
542
543 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
544 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
545 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
546 }
547
548 pci_set_master(pdev);
549
550 rc = ata_device_add(probe_ent);
551 if (rc != NV_PORTS)
552 goto err_out_iounmap;
553
554 kfree(probe_ent);
555
556 return 0;
557
558err_out_iounmap:
559 pci_iounmap(pdev, probe_ent->mmio_base);
560err_out_free_ent:
561 kfree(probe_ent);
562err_out_regions:
563 pci_release_regions(pdev);
564err_out_disable:
565 if (!pci_dev_busy)
566 pci_disable_device(pdev);
567err_out:
568 return rc;
569}
570
571static void nv_ck804_host_stop(struct ata_host_set *host_set)
572{
573 struct pci_dev *pdev = to_pci_dev(host_set->dev);
574 u8 regval;
575
576 /* disable SATA space for CK804 */
577 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
578 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
579 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
580
581 ata_pci_host_stop(host_set);
582}
583
584static int __init nv_init(void)
585{
586 return pci_register_driver(&nv_pci_driver);
587}
588
589static void __exit nv_exit(void)
590{
591 pci_unregister_driver(&nv_pci_driver);
592}
593
594module_init(nv_init);
595module_exit(nv_exit);
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
new file mode 100644
index 000000000000..a5b3a7db7a9f
--- /dev/null
+++ b/drivers/ata/sata_promise.c
@@ -0,0 +1,844 @@
1/*
2 * sata_promise.c - Promise SATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc.
9 *
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 *
26 * libata documentation is available via 'make {ps|pdf}docs',
27 * as Documentation/DocBook/libata.*
28 *
29 * Hardware information only available under NDA.
30 *
31 */
32
33#include <linux/kernel.h>
34#include <linux/module.h>
35#include <linux/pci.h>
36#include <linux/init.h>
37#include <linux/blkdev.h>
38#include <linux/delay.h>
39#include <linux/interrupt.h>
40#include <linux/sched.h>
41#include <linux/device.h>
42#include <scsi/scsi_host.h>
43#include <scsi/scsi_cmnd.h>
44#include <linux/libata.h>
45#include <asm/io.h>
46#include "sata_promise.h"
47
48#define DRV_NAME "sata_promise"
49#define DRV_VERSION "1.04"
50
51
52enum {
53 PDC_PKT_SUBMIT = 0x40, /* Command packet pointer addr */
54 PDC_INT_SEQMASK = 0x40, /* Mask of asserted SEQ INTs */
55 PDC_TBG_MODE = 0x41, /* TBG mode */
56 PDC_FLASH_CTL = 0x44, /* Flash control register */
57 PDC_PCI_CTL = 0x48, /* PCI control and status register */
58 PDC_GLOBAL_CTL = 0x48, /* Global control/status (per port) */
59 PDC_CTLSTAT = 0x60, /* IDE control and status (per port) */
60 PDC_SATA_PLUG_CSR = 0x6C, /* SATA Plug control/status reg */
61 PDC2_SATA_PLUG_CSR = 0x60, /* SATAII Plug control/status reg */
62 PDC_SLEW_CTL = 0x470, /* slew rate control reg */
63
64 PDC_ERR_MASK = (1<<19) | (1<<20) | (1<<21) | (1<<22) |
65 (1<<8) | (1<<9) | (1<<10),
66
67 board_2037x = 0, /* FastTrak S150 TX2plus */
68 board_20319 = 1, /* FastTrak S150 TX4 */
69 board_20619 = 2, /* FastTrak TX4000 */
70 board_20771 = 3, /* FastTrak TX2300 */
71 board_2057x = 4, /* SATAII150 Tx2plus */
72 board_40518 = 5, /* SATAII150 Tx4 */
73
74 PDC_HAS_PATA = (1 << 1), /* PDC20375/20575 has PATA */
75
76 PDC_RESET = (1 << 11), /* HDMA reset */
77
78 PDC_COMMON_FLAGS = ATA_FLAG_NO_LEGACY | ATA_FLAG_SRST |
79 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
80 ATA_FLAG_PIO_POLLING,
81};
82
83
84struct pdc_port_priv {
85 u8 *pkt;
86 dma_addr_t pkt_dma;
87};
88
89struct pdc_host_priv {
90 int hotplug_offset;
91};
92
93static u32 pdc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg);
94static void pdc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
95static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
96static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
97static void pdc_eng_timeout(struct ata_port *ap);
98static int pdc_port_start(struct ata_port *ap);
99static void pdc_port_stop(struct ata_port *ap);
100static void pdc_pata_phy_reset(struct ata_port *ap);
101static void pdc_sata_phy_reset(struct ata_port *ap);
102static void pdc_qc_prep(struct ata_queued_cmd *qc);
103static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
104static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
105static void pdc_irq_clear(struct ata_port *ap);
106static unsigned int pdc_qc_issue_prot(struct ata_queued_cmd *qc);
107static void pdc_host_stop(struct ata_host_set *host_set);
108
109
110static struct scsi_host_template pdc_ata_sht = {
111 .module = THIS_MODULE,
112 .name = DRV_NAME,
113 .ioctl = ata_scsi_ioctl,
114 .queuecommand = ata_scsi_queuecmd,
115 .can_queue = ATA_DEF_QUEUE,
116 .this_id = ATA_SHT_THIS_ID,
117 .sg_tablesize = LIBATA_MAX_PRD,
118 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
119 .emulated = ATA_SHT_EMULATED,
120 .use_clustering = ATA_SHT_USE_CLUSTERING,
121 .proc_name = DRV_NAME,
122 .dma_boundary = ATA_DMA_BOUNDARY,
123 .slave_configure = ata_scsi_slave_config,
124 .slave_destroy = ata_scsi_slave_destroy,
125 .bios_param = ata_std_bios_param,
126};
127
128static const struct ata_port_operations pdc_sata_ops = {
129 .port_disable = ata_port_disable,
130 .tf_load = pdc_tf_load_mmio,
131 .tf_read = ata_tf_read,
132 .check_status = ata_check_status,
133 .exec_command = pdc_exec_command_mmio,
134 .dev_select = ata_std_dev_select,
135
136 .phy_reset = pdc_sata_phy_reset,
137
138 .qc_prep = pdc_qc_prep,
139 .qc_issue = pdc_qc_issue_prot,
140 .eng_timeout = pdc_eng_timeout,
141 .data_xfer = ata_mmio_data_xfer,
142 .irq_handler = pdc_interrupt,
143 .irq_clear = pdc_irq_clear,
144
145 .scr_read = pdc_sata_scr_read,
146 .scr_write = pdc_sata_scr_write,
147 .port_start = pdc_port_start,
148 .port_stop = pdc_port_stop,
149 .host_stop = pdc_host_stop,
150};
151
152static const struct ata_port_operations pdc_pata_ops = {
153 .port_disable = ata_port_disable,
154 .tf_load = pdc_tf_load_mmio,
155 .tf_read = ata_tf_read,
156 .check_status = ata_check_status,
157 .exec_command = pdc_exec_command_mmio,
158 .dev_select = ata_std_dev_select,
159
160 .phy_reset = pdc_pata_phy_reset,
161
162 .qc_prep = pdc_qc_prep,
163 .qc_issue = pdc_qc_issue_prot,
164 .data_xfer = ata_mmio_data_xfer,
165 .eng_timeout = pdc_eng_timeout,
166 .irq_handler = pdc_interrupt,
167 .irq_clear = pdc_irq_clear,
168
169 .port_start = pdc_port_start,
170 .port_stop = pdc_port_stop,
171 .host_stop = pdc_host_stop,
172};
173
174static const struct ata_port_info pdc_port_info[] = {
175 /* board_2037x */
176 {
177 .sht = &pdc_ata_sht,
178 .host_flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
179 .pio_mask = 0x1f, /* pio0-4 */
180 .mwdma_mask = 0x07, /* mwdma0-2 */
181 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
182 .port_ops = &pdc_sata_ops,
183 },
184
185 /* board_20319 */
186 {
187 .sht = &pdc_ata_sht,
188 .host_flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
189 .pio_mask = 0x1f, /* pio0-4 */
190 .mwdma_mask = 0x07, /* mwdma0-2 */
191 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
192 .port_ops = &pdc_sata_ops,
193 },
194
195 /* board_20619 */
196 {
197 .sht = &pdc_ata_sht,
198 .host_flags = PDC_COMMON_FLAGS | ATA_FLAG_SLAVE_POSS,
199 .pio_mask = 0x1f, /* pio0-4 */
200 .mwdma_mask = 0x07, /* mwdma0-2 */
201 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
202 .port_ops = &pdc_pata_ops,
203 },
204
205 /* board_20771 */
206 {
207 .sht = &pdc_ata_sht,
208 .host_flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
209 .pio_mask = 0x1f, /* pio0-4 */
210 .mwdma_mask = 0x07, /* mwdma0-2 */
211 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
212 .port_ops = &pdc_sata_ops,
213 },
214
215 /* board_2057x */
216 {
217 .sht = &pdc_ata_sht,
218 .host_flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
219 .pio_mask = 0x1f, /* pio0-4 */
220 .mwdma_mask = 0x07, /* mwdma0-2 */
221 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
222 .port_ops = &pdc_sata_ops,
223 },
224
225 /* board_40518 */
226 {
227 .sht = &pdc_ata_sht,
228 .host_flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
229 .pio_mask = 0x1f, /* pio0-4 */
230 .mwdma_mask = 0x07, /* mwdma0-2 */
231 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
232 .port_ops = &pdc_sata_ops,
233 },
234};
235
236static const struct pci_device_id pdc_ata_pci_tbl[] = {
237 { PCI_VENDOR_ID_PROMISE, 0x3371, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
238 board_2037x },
239 { PCI_VENDOR_ID_PROMISE, 0x3570, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
240 board_2037x },
241 { PCI_VENDOR_ID_PROMISE, 0x3571, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
242 board_2037x },
243 { PCI_VENDOR_ID_PROMISE, 0x3373, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
244 board_2037x },
245 { PCI_VENDOR_ID_PROMISE, 0x3375, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
246 board_2037x },
247 { PCI_VENDOR_ID_PROMISE, 0x3376, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
248 board_2037x },
249 { PCI_VENDOR_ID_PROMISE, 0x3574, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
250 board_2057x },
251 { PCI_VENDOR_ID_PROMISE, 0x3d75, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
252 board_2057x },
253 { PCI_VENDOR_ID_PROMISE, 0x3d73, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
254 board_2037x },
255
256 { PCI_VENDOR_ID_PROMISE, 0x3318, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
257 board_20319 },
258 { PCI_VENDOR_ID_PROMISE, 0x3319, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
259 board_20319 },
260 { PCI_VENDOR_ID_PROMISE, 0x3515, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
261 board_20319 },
262 { PCI_VENDOR_ID_PROMISE, 0x3519, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
263 board_20319 },
264 { PCI_VENDOR_ID_PROMISE, 0x3d17, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
265 board_20319 },
266 { PCI_VENDOR_ID_PROMISE, 0x3d18, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
267 board_40518 },
268
269 { PCI_VENDOR_ID_PROMISE, 0x6629, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
270 board_20619 },
271
272/* TODO: remove all associated board_20771 code, as it completely
273 * duplicates board_2037x code, unless reason for separation can be
274 * divined.
275 */
276#if 0
277 { PCI_VENDOR_ID_PROMISE, 0x3570, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
278 board_20771 },
279#endif
280
281 { } /* terminate list */
282};
283
284
285static struct pci_driver pdc_ata_pci_driver = {
286 .name = DRV_NAME,
287 .id_table = pdc_ata_pci_tbl,
288 .probe = pdc_ata_init_one,
289 .remove = ata_pci_remove_one,
290};
291
292
293static int pdc_port_start(struct ata_port *ap)
294{
295 struct device *dev = ap->host_set->dev;
296 struct pdc_port_priv *pp;
297 int rc;
298
299 rc = ata_port_start(ap);
300 if (rc)
301 return rc;
302
303 pp = kzalloc(sizeof(*pp), GFP_KERNEL);
304 if (!pp) {
305 rc = -ENOMEM;
306 goto err_out;
307 }
308
309 pp->pkt = dma_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL);
310 if (!pp->pkt) {
311 rc = -ENOMEM;
312 goto err_out_kfree;
313 }
314
315 ap->private_data = pp;
316
317 return 0;
318
319err_out_kfree:
320 kfree(pp);
321err_out:
322 ata_port_stop(ap);
323 return rc;
324}
325
326
327static void pdc_port_stop(struct ata_port *ap)
328{
329 struct device *dev = ap->host_set->dev;
330 struct pdc_port_priv *pp = ap->private_data;
331
332 ap->private_data = NULL;
333 dma_free_coherent(dev, 128, pp->pkt, pp->pkt_dma);
334 kfree(pp);
335 ata_port_stop(ap);
336}
337
338
339static void pdc_host_stop(struct ata_host_set *host_set)
340{
341 struct pdc_host_priv *hp = host_set->private_data;
342
343 ata_pci_host_stop(host_set);
344
345 kfree(hp);
346}
347
348
349static void pdc_reset_port(struct ata_port *ap)
350{
351 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr + PDC_CTLSTAT;
352 unsigned int i;
353 u32 tmp;
354
355 for (i = 11; i > 0; i--) {
356 tmp = readl(mmio);
357 if (tmp & PDC_RESET)
358 break;
359
360 udelay(100);
361
362 tmp |= PDC_RESET;
363 writel(tmp, mmio);
364 }
365
366 tmp &= ~PDC_RESET;
367 writel(tmp, mmio);
368 readl(mmio); /* flush */
369}
370
371static void pdc_sata_phy_reset(struct ata_port *ap)
372{
373 pdc_reset_port(ap);
374 sata_phy_reset(ap);
375}
376
377static void pdc_pata_cbl_detect(struct ata_port *ap)
378{
379 u8 tmp;
380 void __iomem *mmio = (void *) ap->ioaddr.cmd_addr + PDC_CTLSTAT + 0x03;
381
382 tmp = readb(mmio);
383
384 if (tmp & 0x01) {
385 ap->cbl = ATA_CBL_PATA40;
386 ap->udma_mask &= ATA_UDMA_MASK_40C;
387 } else
388 ap->cbl = ATA_CBL_PATA80;
389}
390
391static void pdc_pata_phy_reset(struct ata_port *ap)
392{
393 pdc_pata_cbl_detect(ap);
394 pdc_reset_port(ap);
395 ata_port_probe(ap);
396 ata_bus_reset(ap);
397}
398
399static u32 pdc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg)
400{
401 if (sc_reg > SCR_CONTROL)
402 return 0xffffffffU;
403 return readl((void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
404}
405
406
407static void pdc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg,
408 u32 val)
409{
410 if (sc_reg > SCR_CONTROL)
411 return;
412 writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
413}
414
415static void pdc_qc_prep(struct ata_queued_cmd *qc)
416{
417 struct pdc_port_priv *pp = qc->ap->private_data;
418 unsigned int i;
419
420 VPRINTK("ENTER\n");
421
422 switch (qc->tf.protocol) {
423 case ATA_PROT_DMA:
424 ata_qc_prep(qc);
425 /* fall through */
426
427 case ATA_PROT_NODATA:
428 i = pdc_pkt_header(&qc->tf, qc->ap->prd_dma,
429 qc->dev->devno, pp->pkt);
430
431 if (qc->tf.flags & ATA_TFLAG_LBA48)
432 i = pdc_prep_lba48(&qc->tf, pp->pkt, i);
433 else
434 i = pdc_prep_lba28(&qc->tf, pp->pkt, i);
435
436 pdc_pkt_footer(&qc->tf, pp->pkt, i);
437 break;
438
439 default:
440 break;
441 }
442}
443
444static void pdc_eng_timeout(struct ata_port *ap)
445{
446 struct ata_host_set *host_set = ap->host_set;
447 u8 drv_stat;
448 struct ata_queued_cmd *qc;
449 unsigned long flags;
450
451 DPRINTK("ENTER\n");
452
453 spin_lock_irqsave(&host_set->lock, flags);
454
455 qc = ata_qc_from_tag(ap, ap->active_tag);
456
457 switch (qc->tf.protocol) {
458 case ATA_PROT_DMA:
459 case ATA_PROT_NODATA:
460 ata_port_printk(ap, KERN_ERR, "command timeout\n");
461 drv_stat = ata_wait_idle(ap);
462 qc->err_mask |= __ac_err_mask(drv_stat);
463 break;
464
465 default:
466 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
467
468 ata_port_printk(ap, KERN_ERR,
469 "unknown timeout, cmd 0x%x stat 0x%x\n",
470 qc->tf.command, drv_stat);
471
472 qc->err_mask |= ac_err_mask(drv_stat);
473 break;
474 }
475
476 spin_unlock_irqrestore(&host_set->lock, flags);
477 ata_eh_qc_complete(qc);
478 DPRINTK("EXIT\n");
479}
480
481static inline unsigned int pdc_host_intr( struct ata_port *ap,
482 struct ata_queued_cmd *qc)
483{
484 unsigned int handled = 0;
485 u32 tmp;
486 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr + PDC_GLOBAL_CTL;
487
488 tmp = readl(mmio);
489 if (tmp & PDC_ERR_MASK) {
490 qc->err_mask |= AC_ERR_DEV;
491 pdc_reset_port(ap);
492 }
493
494 switch (qc->tf.protocol) {
495 case ATA_PROT_DMA:
496 case ATA_PROT_NODATA:
497 qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
498 ata_qc_complete(qc);
499 handled = 1;
500 break;
501
502 default:
503 ap->stats.idle_irq++;
504 break;
505 }
506
507 return handled;
508}
509
510static void pdc_irq_clear(struct ata_port *ap)
511{
512 struct ata_host_set *host_set = ap->host_set;
513 void __iomem *mmio = host_set->mmio_base;
514
515 readl(mmio + PDC_INT_SEQMASK);
516}
517
518static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
519{
520 struct ata_host_set *host_set = dev_instance;
521 struct ata_port *ap;
522 u32 mask = 0;
523 unsigned int i, tmp;
524 unsigned int handled = 0;
525 void __iomem *mmio_base;
526
527 VPRINTK("ENTER\n");
528
529 if (!host_set || !host_set->mmio_base) {
530 VPRINTK("QUICK EXIT\n");
531 return IRQ_NONE;
532 }
533
534 mmio_base = host_set->mmio_base;
535
536 /* reading should also clear interrupts */
537 mask = readl(mmio_base + PDC_INT_SEQMASK);
538
539 if (mask == 0xffffffff) {
540 VPRINTK("QUICK EXIT 2\n");
541 return IRQ_NONE;
542 }
543
544 spin_lock(&host_set->lock);
545
546 mask &= 0xffff; /* only 16 tags possible */
547 if (!mask) {
548 VPRINTK("QUICK EXIT 3\n");
549 goto done_irq;
550 }
551
552 writel(mask, mmio_base + PDC_INT_SEQMASK);
553
554 for (i = 0; i < host_set->n_ports; i++) {
555 VPRINTK("port %u\n", i);
556 ap = host_set->ports[i];
557 tmp = mask & (1 << (i + 1));
558 if (tmp && ap &&
559 !(ap->flags & ATA_FLAG_DISABLED)) {
560 struct ata_queued_cmd *qc;
561
562 qc = ata_qc_from_tag(ap, ap->active_tag);
563 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
564 handled += pdc_host_intr(ap, qc);
565 }
566 }
567
568 VPRINTK("EXIT\n");
569
570done_irq:
571 spin_unlock(&host_set->lock);
572 return IRQ_RETVAL(handled);
573}
574
575static inline void pdc_packet_start(struct ata_queued_cmd *qc)
576{
577 struct ata_port *ap = qc->ap;
578 struct pdc_port_priv *pp = ap->private_data;
579 unsigned int port_no = ap->port_no;
580 u8 seq = (u8) (port_no + 1);
581
582 VPRINTK("ENTER, ap %p\n", ap);
583
584 writel(0x00000001, ap->host_set->mmio_base + (seq * 4));
585 readl(ap->host_set->mmio_base + (seq * 4)); /* flush */
586
587 pp->pkt[2] = seq;
588 wmb(); /* flush PRD, pkt writes */
589 writel(pp->pkt_dma, (void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
590 readl((void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); /* flush */
591}
592
593static unsigned int pdc_qc_issue_prot(struct ata_queued_cmd *qc)
594{
595 switch (qc->tf.protocol) {
596 case ATA_PROT_DMA:
597 case ATA_PROT_NODATA:
598 pdc_packet_start(qc);
599 return 0;
600
601 case ATA_PROT_ATAPI_DMA:
602 BUG();
603 break;
604
605 default:
606 break;
607 }
608
609 return ata_qc_issue_prot(qc);
610}
611
612static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
613{
614 WARN_ON (tf->protocol == ATA_PROT_DMA ||
615 tf->protocol == ATA_PROT_NODATA);
616 ata_tf_load(ap, tf);
617}
618
619
620static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
621{
622 WARN_ON (tf->protocol == ATA_PROT_DMA ||
623 tf->protocol == ATA_PROT_NODATA);
624 ata_exec_command(ap, tf);
625}
626
627
628static void pdc_ata_setup_port(struct ata_ioports *port, unsigned long base)
629{
630 port->cmd_addr = base;
631 port->data_addr = base;
632 port->feature_addr =
633 port->error_addr = base + 0x4;
634 port->nsect_addr = base + 0x8;
635 port->lbal_addr = base + 0xc;
636 port->lbam_addr = base + 0x10;
637 port->lbah_addr = base + 0x14;
638 port->device_addr = base + 0x18;
639 port->command_addr =
640 port->status_addr = base + 0x1c;
641 port->altstatus_addr =
642 port->ctl_addr = base + 0x38;
643}
644
645
646static void pdc_host_init(unsigned int chip_id, struct ata_probe_ent *pe)
647{
648 void __iomem *mmio = pe->mmio_base;
649 struct pdc_host_priv *hp = pe->private_data;
650 int hotplug_offset = hp->hotplug_offset;
651 u32 tmp;
652
653 /*
654 * Except for the hotplug stuff, this is voodoo from the
655 * Promise driver. Label this entire section
656 * "TODO: figure out why we do this"
657 */
658
659 /* change FIFO_SHD to 8 dwords, enable BMR_BURST */
660 tmp = readl(mmio + PDC_FLASH_CTL);
661 tmp |= 0x12000; /* bit 16 (fifo 8 dw) and 13 (bmr burst?) */
662 writel(tmp, mmio + PDC_FLASH_CTL);
663
664 /* clear plug/unplug flags for all ports */
665 tmp = readl(mmio + hotplug_offset);
666 writel(tmp | 0xff, mmio + hotplug_offset);
667
668 /* mask plug/unplug ints */
669 tmp = readl(mmio + hotplug_offset);
670 writel(tmp | 0xff0000, mmio + hotplug_offset);
671
672 /* reduce TBG clock to 133 Mhz. */
673 tmp = readl(mmio + PDC_TBG_MODE);
674 tmp &= ~0x30000; /* clear bit 17, 16*/
675 tmp |= 0x10000; /* set bit 17:16 = 0:1 */
676 writel(tmp, mmio + PDC_TBG_MODE);
677
678 readl(mmio + PDC_TBG_MODE); /* flush */
679 msleep(10);
680
681 /* adjust slew rate control register. */
682 tmp = readl(mmio + PDC_SLEW_CTL);
683 tmp &= 0xFFFFF03F; /* clear bit 11 ~ 6 */
684 tmp |= 0x00000900; /* set bit 11-9 = 100b , bit 8-6 = 100 */
685 writel(tmp, mmio + PDC_SLEW_CTL);
686}
687
688static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
689{
690 static int printed_version;
691 struct ata_probe_ent *probe_ent = NULL;
692 struct pdc_host_priv *hp;
693 unsigned long base;
694 void __iomem *mmio_base;
695 unsigned int board_idx = (unsigned int) ent->driver_data;
696 int pci_dev_busy = 0;
697 int rc;
698
699 if (!printed_version++)
700 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
701
702 rc = pci_enable_device(pdev);
703 if (rc)
704 return rc;
705
706 rc = pci_request_regions(pdev, DRV_NAME);
707 if (rc) {
708 pci_dev_busy = 1;
709 goto err_out;
710 }
711
712 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
713 if (rc)
714 goto err_out_regions;
715 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
716 if (rc)
717 goto err_out_regions;
718
719 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
720 if (probe_ent == NULL) {
721 rc = -ENOMEM;
722 goto err_out_regions;
723 }
724
725 probe_ent->dev = pci_dev_to_dev(pdev);
726 INIT_LIST_HEAD(&probe_ent->node);
727
728 mmio_base = pci_iomap(pdev, 3, 0);
729 if (mmio_base == NULL) {
730 rc = -ENOMEM;
731 goto err_out_free_ent;
732 }
733 base = (unsigned long) mmio_base;
734
735 hp = kzalloc(sizeof(*hp), GFP_KERNEL);
736 if (hp == NULL) {
737 rc = -ENOMEM;
738 goto err_out_free_ent;
739 }
740
741 /* Set default hotplug offset */
742 hp->hotplug_offset = PDC_SATA_PLUG_CSR;
743 probe_ent->private_data = hp;
744
745 probe_ent->sht = pdc_port_info[board_idx].sht;
746 probe_ent->host_flags = pdc_port_info[board_idx].host_flags;
747 probe_ent->pio_mask = pdc_port_info[board_idx].pio_mask;
748 probe_ent->mwdma_mask = pdc_port_info[board_idx].mwdma_mask;
749 probe_ent->udma_mask = pdc_port_info[board_idx].udma_mask;
750 probe_ent->port_ops = pdc_port_info[board_idx].port_ops;
751
752 probe_ent->irq = pdev->irq;
753 probe_ent->irq_flags = IRQF_SHARED;
754 probe_ent->mmio_base = mmio_base;
755
756 pdc_ata_setup_port(&probe_ent->port[0], base + 0x200);
757 pdc_ata_setup_port(&probe_ent->port[1], base + 0x280);
758
759 probe_ent->port[0].scr_addr = base + 0x400;
760 probe_ent->port[1].scr_addr = base + 0x500;
761
762 /* notice 4-port boards */
763 switch (board_idx) {
764 case board_40518:
765 /* Override hotplug offset for SATAII150 */
766 hp->hotplug_offset = PDC2_SATA_PLUG_CSR;
767 /* Fall through */
768 case board_20319:
769 probe_ent->n_ports = 4;
770
771 pdc_ata_setup_port(&probe_ent->port[2], base + 0x300);
772 pdc_ata_setup_port(&probe_ent->port[3], base + 0x380);
773
774 probe_ent->port[2].scr_addr = base + 0x600;
775 probe_ent->port[3].scr_addr = base + 0x700;
776 break;
777 case board_2057x:
778 /* Override hotplug offset for SATAII150 */
779 hp->hotplug_offset = PDC2_SATA_PLUG_CSR;
780 /* Fall through */
781 case board_2037x:
782 probe_ent->n_ports = 2;
783 break;
784 case board_20771:
785 probe_ent->n_ports = 2;
786 break;
787 case board_20619:
788 probe_ent->n_ports = 4;
789
790 pdc_ata_setup_port(&probe_ent->port[2], base + 0x300);
791 pdc_ata_setup_port(&probe_ent->port[3], base + 0x380);
792
793 probe_ent->port[2].scr_addr = base + 0x600;
794 probe_ent->port[3].scr_addr = base + 0x700;
795 break;
796 default:
797 BUG();
798 break;
799 }
800
801 pci_set_master(pdev);
802
803 /* initialize adapter */
804 pdc_host_init(board_idx, probe_ent);
805
806 /* FIXME: Need any other frees than hp? */
807 if (!ata_device_add(probe_ent))
808 kfree(hp);
809
810 kfree(probe_ent);
811
812 return 0;
813
814err_out_free_ent:
815 kfree(probe_ent);
816err_out_regions:
817 pci_release_regions(pdev);
818err_out:
819 if (!pci_dev_busy)
820 pci_disable_device(pdev);
821 return rc;
822}
823
824
825static int __init pdc_ata_init(void)
826{
827 return pci_register_driver(&pdc_ata_pci_driver);
828}
829
830
831static void __exit pdc_ata_exit(void)
832{
833 pci_unregister_driver(&pdc_ata_pci_driver);
834}
835
836
837MODULE_AUTHOR("Jeff Garzik");
838MODULE_DESCRIPTION("Promise ATA TX2/TX4/TX4000 low-level driver");
839MODULE_LICENSE("GPL");
840MODULE_DEVICE_TABLE(pci, pdc_ata_pci_tbl);
841MODULE_VERSION(DRV_VERSION);
842
843module_init(pdc_ata_init);
844module_exit(pdc_ata_exit);
diff --git a/drivers/ata/sata_promise.h b/drivers/ata/sata_promise.h
new file mode 100644
index 000000000000..6ee5e190262d
--- /dev/null
+++ b/drivers/ata/sata_promise.h
@@ -0,0 +1,157 @@
1/*
2 * sata_promise.h - Promise SATA common definitions and inline funcs
3 *
4 * Copyright 2003-2004 Red Hat, Inc.
5 *
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2, or (at your option)
10 * any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; see the file COPYING. If not, write to
19 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
20 *
21 *
22 * libata documentation is available via 'make {ps|pdf}docs',
23 * as Documentation/DocBook/libata.*
24 *
25 */
26
27#ifndef __SATA_PROMISE_H__
28#define __SATA_PROMISE_H__
29
30#include <linux/ata.h>
31
32enum pdc_packet_bits {
33 PDC_PKT_READ = (1 << 2),
34 PDC_PKT_NODATA = (1 << 3),
35
36 PDC_PKT_SIZEMASK = (1 << 7) | (1 << 6) | (1 << 5),
37 PDC_PKT_CLEAR_BSY = (1 << 4),
38 PDC_PKT_WAIT_DRDY = (1 << 3) | (1 << 4),
39 PDC_LAST_REG = (1 << 3),
40
41 PDC_REG_DEVCTL = (1 << 3) | (1 << 2) | (1 << 1),
42};
43
44static inline unsigned int pdc_pkt_header(struct ata_taskfile *tf,
45 dma_addr_t sg_table,
46 unsigned int devno, u8 *buf)
47{
48 u8 dev_reg;
49 u32 *buf32 = (u32 *) buf;
50
51 /* set control bits (byte 0), zero delay seq id (byte 3),
52 * and seq id (byte 2)
53 */
54 switch (tf->protocol) {
55 case ATA_PROT_DMA:
56 if (!(tf->flags & ATA_TFLAG_WRITE))
57 buf32[0] = cpu_to_le32(PDC_PKT_READ);
58 else
59 buf32[0] = 0;
60 break;
61
62 case ATA_PROT_NODATA:
63 buf32[0] = cpu_to_le32(PDC_PKT_NODATA);
64 break;
65
66 default:
67 BUG();
68 break;
69 }
70
71 buf32[1] = cpu_to_le32(sg_table); /* S/G table addr */
72 buf32[2] = 0; /* no next-packet */
73
74 if (devno == 0)
75 dev_reg = ATA_DEVICE_OBS;
76 else
77 dev_reg = ATA_DEVICE_OBS | ATA_DEV1;
78
79 /* select device */
80 buf[12] = (1 << 5) | PDC_PKT_CLEAR_BSY | ATA_REG_DEVICE;
81 buf[13] = dev_reg;
82
83 /* device control register */
84 buf[14] = (1 << 5) | PDC_REG_DEVCTL;
85 buf[15] = tf->ctl;
86
87 return 16; /* offset of next byte */
88}
89
90static inline unsigned int pdc_pkt_footer(struct ata_taskfile *tf, u8 *buf,
91 unsigned int i)
92{
93 if (tf->flags & ATA_TFLAG_DEVICE) {
94 buf[i++] = (1 << 5) | ATA_REG_DEVICE;
95 buf[i++] = tf->device;
96 }
97
98 /* and finally the command itself; also includes end-of-pkt marker */
99 buf[i++] = (1 << 5) | PDC_LAST_REG | ATA_REG_CMD;
100 buf[i++] = tf->command;
101
102 return i;
103}
104
105static inline unsigned int pdc_prep_lba28(struct ata_taskfile *tf, u8 *buf, unsigned int i)
106{
107 /* the "(1 << 5)" should be read "(count << 5)" */
108
109 /* ATA command block registers */
110 buf[i++] = (1 << 5) | ATA_REG_FEATURE;
111 buf[i++] = tf->feature;
112
113 buf[i++] = (1 << 5) | ATA_REG_NSECT;
114 buf[i++] = tf->nsect;
115
116 buf[i++] = (1 << 5) | ATA_REG_LBAL;
117 buf[i++] = tf->lbal;
118
119 buf[i++] = (1 << 5) | ATA_REG_LBAM;
120 buf[i++] = tf->lbam;
121
122 buf[i++] = (1 << 5) | ATA_REG_LBAH;
123 buf[i++] = tf->lbah;
124
125 return i;
126}
127
128static inline unsigned int pdc_prep_lba48(struct ata_taskfile *tf, u8 *buf, unsigned int i)
129{
130 /* the "(2 << 5)" should be read "(count << 5)" */
131
132 /* ATA command block registers */
133 buf[i++] = (2 << 5) | ATA_REG_FEATURE;
134 buf[i++] = tf->hob_feature;
135 buf[i++] = tf->feature;
136
137 buf[i++] = (2 << 5) | ATA_REG_NSECT;
138 buf[i++] = tf->hob_nsect;
139 buf[i++] = tf->nsect;
140
141 buf[i++] = (2 << 5) | ATA_REG_LBAL;
142 buf[i++] = tf->hob_lbal;
143 buf[i++] = tf->lbal;
144
145 buf[i++] = (2 << 5) | ATA_REG_LBAM;
146 buf[i++] = tf->hob_lbam;
147 buf[i++] = tf->lbam;
148
149 buf[i++] = (2 << 5) | ATA_REG_LBAH;
150 buf[i++] = tf->hob_lbah;
151 buf[i++] = tf->lbah;
152
153 return i;
154}
155
156
157#endif /* __SATA_PROMISE_H__ */
diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
new file mode 100644
index 000000000000..71bd6712b377
--- /dev/null
+++ b/drivers/ata/sata_qstor.c
@@ -0,0 +1,730 @@
1/*
2 * sata_qstor.c - Pacific Digital Corporation QStor SATA
3 *
4 * Maintained by: Mark Lord <mlord@pobox.com>
5 *
6 * Copyright 2005 Pacific Digital Corporation.
7 * (OSL/GPL code release authorized by Jalil Fadavi).
8 *
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2, or (at your option)
13 * any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; see the file COPYING. If not, write to
22 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 *
24 *
25 * libata documentation is available via 'make {ps|pdf}docs',
26 * as Documentation/DocBook/libata.*
27 *
28 */
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/pci.h>
33#include <linux/init.h>
34#include <linux/blkdev.h>
35#include <linux/delay.h>
36#include <linux/interrupt.h>
37#include <linux/sched.h>
38#include <linux/device.h>
39#include <scsi/scsi_host.h>
40#include <asm/io.h>
41#include <linux/libata.h>
42
43#define DRV_NAME "sata_qstor"
44#define DRV_VERSION "0.06"
45
46enum {
47 QS_PORTS = 4,
48 QS_MAX_PRD = LIBATA_MAX_PRD,
49 QS_CPB_ORDER = 6,
50 QS_CPB_BYTES = (1 << QS_CPB_ORDER),
51 QS_PRD_BYTES = QS_MAX_PRD * 16,
52 QS_PKT_BYTES = QS_CPB_BYTES + QS_PRD_BYTES,
53
54 /* global register offsets */
55 QS_HCF_CNFG3 = 0x0003, /* host configuration offset */
56 QS_HID_HPHY = 0x0004, /* host physical interface info */
57 QS_HCT_CTRL = 0x00e4, /* global interrupt mask offset */
58 QS_HST_SFF = 0x0100, /* host status fifo offset */
59 QS_HVS_SERD3 = 0x0393, /* PHY enable offset */
60
61 /* global control bits */
62 QS_HPHY_64BIT = (1 << 1), /* 64-bit bus detected */
63 QS_CNFG3_GSRST = 0x01, /* global chip reset */
64 QS_SERD3_PHY_ENA = 0xf0, /* PHY detection ENAble*/
65
66 /* per-channel register offsets */
67 QS_CCF_CPBA = 0x0710, /* chan CPB base address */
68 QS_CCF_CSEP = 0x0718, /* chan CPB separation factor */
69 QS_CFC_HUFT = 0x0800, /* host upstream fifo threshold */
70 QS_CFC_HDFT = 0x0804, /* host downstream fifo threshold */
71 QS_CFC_DUFT = 0x0808, /* dev upstream fifo threshold */
72 QS_CFC_DDFT = 0x080c, /* dev downstream fifo threshold */
73 QS_CCT_CTR0 = 0x0900, /* chan control-0 offset */
74 QS_CCT_CTR1 = 0x0901, /* chan control-1 offset */
75 QS_CCT_CFF = 0x0a00, /* chan command fifo offset */
76
77 /* channel control bits */
78 QS_CTR0_REG = (1 << 1), /* register mode (vs. pkt mode) */
79 QS_CTR0_CLER = (1 << 2), /* clear channel errors */
80 QS_CTR1_RDEV = (1 << 1), /* sata phy/comms reset */
81 QS_CTR1_RCHN = (1 << 4), /* reset channel logic */
82 QS_CCF_RUN_PKT = 0x107, /* RUN a new dma PKT */
83
84 /* pkt sub-field headers */
85 QS_HCB_HDR = 0x01, /* Host Control Block header */
86 QS_DCB_HDR = 0x02, /* Device Control Block header */
87
88 /* pkt HCB flag bits */
89 QS_HF_DIRO = (1 << 0), /* data DIRection Out */
90 QS_HF_DAT = (1 << 3), /* DATa pkt */
91 QS_HF_IEN = (1 << 4), /* Interrupt ENable */
92 QS_HF_VLD = (1 << 5), /* VaLiD pkt */
93
94 /* pkt DCB flag bits */
95 QS_DF_PORD = (1 << 2), /* Pio OR Dma */
96 QS_DF_ELBA = (1 << 3), /* Extended LBA (lba48) */
97
98 /* PCI device IDs */
99 board_2068_idx = 0, /* QStor 4-port SATA/RAID */
100};
101
102enum {
103 QS_DMA_BOUNDARY = ~0UL
104};
105
106typedef enum { qs_state_idle, qs_state_pkt, qs_state_mmio } qs_state_t;
107
108struct qs_port_priv {
109 u8 *pkt;
110 dma_addr_t pkt_dma;
111 qs_state_t state;
112};
113
114static u32 qs_scr_read (struct ata_port *ap, unsigned int sc_reg);
115static void qs_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
116static int qs_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
117static irqreturn_t qs_intr (int irq, void *dev_instance, struct pt_regs *regs);
118static int qs_port_start(struct ata_port *ap);
119static void qs_host_stop(struct ata_host_set *host_set);
120static void qs_port_stop(struct ata_port *ap);
121static void qs_phy_reset(struct ata_port *ap);
122static void qs_qc_prep(struct ata_queued_cmd *qc);
123static unsigned int qs_qc_issue(struct ata_queued_cmd *qc);
124static int qs_check_atapi_dma(struct ata_queued_cmd *qc);
125static void qs_bmdma_stop(struct ata_queued_cmd *qc);
126static u8 qs_bmdma_status(struct ata_port *ap);
127static void qs_irq_clear(struct ata_port *ap);
128static void qs_eng_timeout(struct ata_port *ap);
129
130static struct scsi_host_template qs_ata_sht = {
131 .module = THIS_MODULE,
132 .name = DRV_NAME,
133 .ioctl = ata_scsi_ioctl,
134 .queuecommand = ata_scsi_queuecmd,
135 .can_queue = ATA_DEF_QUEUE,
136 .this_id = ATA_SHT_THIS_ID,
137 .sg_tablesize = QS_MAX_PRD,
138 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
139 .emulated = ATA_SHT_EMULATED,
140 //FIXME .use_clustering = ATA_SHT_USE_CLUSTERING,
141 .use_clustering = ENABLE_CLUSTERING,
142 .proc_name = DRV_NAME,
143 .dma_boundary = QS_DMA_BOUNDARY,
144 .slave_configure = ata_scsi_slave_config,
145 .slave_destroy = ata_scsi_slave_destroy,
146 .bios_param = ata_std_bios_param,
147};
148
149static const struct ata_port_operations qs_ata_ops = {
150 .port_disable = ata_port_disable,
151 .tf_load = ata_tf_load,
152 .tf_read = ata_tf_read,
153 .check_status = ata_check_status,
154 .check_atapi_dma = qs_check_atapi_dma,
155 .exec_command = ata_exec_command,
156 .dev_select = ata_std_dev_select,
157 .phy_reset = qs_phy_reset,
158 .qc_prep = qs_qc_prep,
159 .qc_issue = qs_qc_issue,
160 .data_xfer = ata_mmio_data_xfer,
161 .eng_timeout = qs_eng_timeout,
162 .irq_handler = qs_intr,
163 .irq_clear = qs_irq_clear,
164 .scr_read = qs_scr_read,
165 .scr_write = qs_scr_write,
166 .port_start = qs_port_start,
167 .port_stop = qs_port_stop,
168 .host_stop = qs_host_stop,
169 .bmdma_stop = qs_bmdma_stop,
170 .bmdma_status = qs_bmdma_status,
171};
172
173static const struct ata_port_info qs_port_info[] = {
174 /* board_2068_idx */
175 {
176 .sht = &qs_ata_sht,
177 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
178 ATA_FLAG_SATA_RESET |
179 //FIXME ATA_FLAG_SRST |
180 ATA_FLAG_MMIO | ATA_FLAG_PIO_POLLING,
181 .pio_mask = 0x10, /* pio4 */
182 .udma_mask = 0x7f, /* udma0-6 */
183 .port_ops = &qs_ata_ops,
184 },
185};
186
187static const struct pci_device_id qs_ata_pci_tbl[] = {
188 { PCI_VENDOR_ID_PDC, 0x2068, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
189 board_2068_idx },
190
191 { } /* terminate list */
192};
193
194static struct pci_driver qs_ata_pci_driver = {
195 .name = DRV_NAME,
196 .id_table = qs_ata_pci_tbl,
197 .probe = qs_ata_init_one,
198 .remove = ata_pci_remove_one,
199};
200
201static int qs_check_atapi_dma(struct ata_queued_cmd *qc)
202{
203 return 1; /* ATAPI DMA not supported */
204}
205
206static void qs_bmdma_stop(struct ata_queued_cmd *qc)
207{
208 /* nothing */
209}
210
211static u8 qs_bmdma_status(struct ata_port *ap)
212{
213 return 0;
214}
215
216static void qs_irq_clear(struct ata_port *ap)
217{
218 /* nothing */
219}
220
221static inline void qs_enter_reg_mode(struct ata_port *ap)
222{
223 u8 __iomem *chan = ap->host_set->mmio_base + (ap->port_no * 0x4000);
224
225 writeb(QS_CTR0_REG, chan + QS_CCT_CTR0);
226 readb(chan + QS_CCT_CTR0); /* flush */
227}
228
229static inline void qs_reset_channel_logic(struct ata_port *ap)
230{
231 u8 __iomem *chan = ap->host_set->mmio_base + (ap->port_no * 0x4000);
232
233 writeb(QS_CTR1_RCHN, chan + QS_CCT_CTR1);
234 readb(chan + QS_CCT_CTR0); /* flush */
235 qs_enter_reg_mode(ap);
236}
237
238static void qs_phy_reset(struct ata_port *ap)
239{
240 struct qs_port_priv *pp = ap->private_data;
241
242 pp->state = qs_state_idle;
243 qs_reset_channel_logic(ap);
244 sata_phy_reset(ap);
245}
246
247static void qs_eng_timeout(struct ata_port *ap)
248{
249 struct qs_port_priv *pp = ap->private_data;
250
251 if (pp->state != qs_state_idle) /* healthy paranoia */
252 pp->state = qs_state_mmio;
253 qs_reset_channel_logic(ap);
254 ata_eng_timeout(ap);
255}
256
257static u32 qs_scr_read (struct ata_port *ap, unsigned int sc_reg)
258{
259 if (sc_reg > SCR_CONTROL)
260 return ~0U;
261 return readl((void __iomem *)(ap->ioaddr.scr_addr + (sc_reg * 8)));
262}
263
264static void qs_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
265{
266 if (sc_reg > SCR_CONTROL)
267 return;
268 writel(val, (void __iomem *)(ap->ioaddr.scr_addr + (sc_reg * 8)));
269}
270
271static unsigned int qs_fill_sg(struct ata_queued_cmd *qc)
272{
273 struct scatterlist *sg;
274 struct ata_port *ap = qc->ap;
275 struct qs_port_priv *pp = ap->private_data;
276 unsigned int nelem;
277 u8 *prd = pp->pkt + QS_CPB_BYTES;
278
279 WARN_ON(qc->__sg == NULL);
280 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
281
282 nelem = 0;
283 ata_for_each_sg(sg, qc) {
284 u64 addr;
285 u32 len;
286
287 addr = sg_dma_address(sg);
288 *(__le64 *)prd = cpu_to_le64(addr);
289 prd += sizeof(u64);
290
291 len = sg_dma_len(sg);
292 *(__le32 *)prd = cpu_to_le32(len);
293 prd += sizeof(u64);
294
295 VPRINTK("PRD[%u] = (0x%llX, 0x%X)\n", nelem,
296 (unsigned long long)addr, len);
297 nelem++;
298 }
299
300 return nelem;
301}
302
303static void qs_qc_prep(struct ata_queued_cmd *qc)
304{
305 struct qs_port_priv *pp = qc->ap->private_data;
306 u8 dflags = QS_DF_PORD, *buf = pp->pkt;
307 u8 hflags = QS_HF_DAT | QS_HF_IEN | QS_HF_VLD;
308 u64 addr;
309 unsigned int nelem;
310
311 VPRINTK("ENTER\n");
312
313 qs_enter_reg_mode(qc->ap);
314 if (qc->tf.protocol != ATA_PROT_DMA) {
315 ata_qc_prep(qc);
316 return;
317 }
318
319 nelem = qs_fill_sg(qc);
320
321 if ((qc->tf.flags & ATA_TFLAG_WRITE))
322 hflags |= QS_HF_DIRO;
323 if ((qc->tf.flags & ATA_TFLAG_LBA48))
324 dflags |= QS_DF_ELBA;
325
326 /* host control block (HCB) */
327 buf[ 0] = QS_HCB_HDR;
328 buf[ 1] = hflags;
329 *(__le32 *)(&buf[ 4]) = cpu_to_le32(qc->nsect * ATA_SECT_SIZE);
330 *(__le32 *)(&buf[ 8]) = cpu_to_le32(nelem);
331 addr = ((u64)pp->pkt_dma) + QS_CPB_BYTES;
332 *(__le64 *)(&buf[16]) = cpu_to_le64(addr);
333
334 /* device control block (DCB) */
335 buf[24] = QS_DCB_HDR;
336 buf[28] = dflags;
337
338 /* frame information structure (FIS) */
339 ata_tf_to_fis(&qc->tf, &buf[32], 0);
340}
341
342static inline void qs_packet_start(struct ata_queued_cmd *qc)
343{
344 struct ata_port *ap = qc->ap;
345 u8 __iomem *chan = ap->host_set->mmio_base + (ap->port_no * 0x4000);
346
347 VPRINTK("ENTER, ap %p\n", ap);
348
349 writeb(QS_CTR0_CLER, chan + QS_CCT_CTR0);
350 wmb(); /* flush PRDs and pkt to memory */
351 writel(QS_CCF_RUN_PKT, chan + QS_CCT_CFF);
352 readl(chan + QS_CCT_CFF); /* flush */
353}
354
355static unsigned int qs_qc_issue(struct ata_queued_cmd *qc)
356{
357 struct qs_port_priv *pp = qc->ap->private_data;
358
359 switch (qc->tf.protocol) {
360 case ATA_PROT_DMA:
361
362 pp->state = qs_state_pkt;
363 qs_packet_start(qc);
364 return 0;
365
366 case ATA_PROT_ATAPI_DMA:
367 BUG();
368 break;
369
370 default:
371 break;
372 }
373
374 pp->state = qs_state_mmio;
375 return ata_qc_issue_prot(qc);
376}
377
378static inline unsigned int qs_intr_pkt(struct ata_host_set *host_set)
379{
380 unsigned int handled = 0;
381 u8 sFFE;
382 u8 __iomem *mmio_base = host_set->mmio_base;
383
384 do {
385 u32 sff0 = readl(mmio_base + QS_HST_SFF);
386 u32 sff1 = readl(mmio_base + QS_HST_SFF + 4);
387 u8 sEVLD = (sff1 >> 30) & 0x01; /* valid flag */
388 sFFE = sff1 >> 31; /* empty flag */
389
390 if (sEVLD) {
391 u8 sDST = sff0 >> 16; /* dev status */
392 u8 sHST = sff1 & 0x3f; /* host status */
393 unsigned int port_no = (sff1 >> 8) & 0x03;
394 struct ata_port *ap = host_set->ports[port_no];
395
396 DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n",
397 sff1, sff0, port_no, sHST, sDST);
398 handled = 1;
399 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
400 struct ata_queued_cmd *qc;
401 struct qs_port_priv *pp = ap->private_data;
402 if (!pp || pp->state != qs_state_pkt)
403 continue;
404 qc = ata_qc_from_tag(ap, ap->active_tag);
405 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
406 switch (sHST) {
407 case 0: /* successful CPB */
408 case 3: /* device error */
409 pp->state = qs_state_idle;
410 qs_enter_reg_mode(qc->ap);
411 qc->err_mask |= ac_err_mask(sDST);
412 ata_qc_complete(qc);
413 break;
414 default:
415 break;
416 }
417 }
418 }
419 }
420 } while (!sFFE);
421 return handled;
422}
423
424static inline unsigned int qs_intr_mmio(struct ata_host_set *host_set)
425{
426 unsigned int handled = 0, port_no;
427
428 for (port_no = 0; port_no < host_set->n_ports; ++port_no) {
429 struct ata_port *ap;
430 ap = host_set->ports[port_no];
431 if (ap &&
432 !(ap->flags & ATA_FLAG_DISABLED)) {
433 struct ata_queued_cmd *qc;
434 struct qs_port_priv *pp = ap->private_data;
435 if (!pp || pp->state != qs_state_mmio)
436 continue;
437 qc = ata_qc_from_tag(ap, ap->active_tag);
438 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
439
440 /* check main status, clearing INTRQ */
441 u8 status = ata_check_status(ap);
442 if ((status & ATA_BUSY))
443 continue;
444 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
445 ap->id, qc->tf.protocol, status);
446
447 /* complete taskfile transaction */
448 pp->state = qs_state_idle;
449 qc->err_mask |= ac_err_mask(status);
450 ata_qc_complete(qc);
451 handled = 1;
452 }
453 }
454 }
455 return handled;
456}
457
458static irqreturn_t qs_intr(int irq, void *dev_instance, struct pt_regs *regs)
459{
460 struct ata_host_set *host_set = dev_instance;
461 unsigned int handled = 0;
462
463 VPRINTK("ENTER\n");
464
465 spin_lock(&host_set->lock);
466 handled = qs_intr_pkt(host_set) | qs_intr_mmio(host_set);
467 spin_unlock(&host_set->lock);
468
469 VPRINTK("EXIT\n");
470
471 return IRQ_RETVAL(handled);
472}
473
474static void qs_ata_setup_port(struct ata_ioports *port, unsigned long base)
475{
476 port->cmd_addr =
477 port->data_addr = base + 0x400;
478 port->error_addr =
479 port->feature_addr = base + 0x408; /* hob_feature = 0x409 */
480 port->nsect_addr = base + 0x410; /* hob_nsect = 0x411 */
481 port->lbal_addr = base + 0x418; /* hob_lbal = 0x419 */
482 port->lbam_addr = base + 0x420; /* hob_lbam = 0x421 */
483 port->lbah_addr = base + 0x428; /* hob_lbah = 0x429 */
484 port->device_addr = base + 0x430;
485 port->status_addr =
486 port->command_addr = base + 0x438;
487 port->altstatus_addr =
488 port->ctl_addr = base + 0x440;
489 port->scr_addr = base + 0xc00;
490}
491
492static int qs_port_start(struct ata_port *ap)
493{
494 struct device *dev = ap->host_set->dev;
495 struct qs_port_priv *pp;
496 void __iomem *mmio_base = ap->host_set->mmio_base;
497 void __iomem *chan = mmio_base + (ap->port_no * 0x4000);
498 u64 addr;
499 int rc;
500
501 rc = ata_port_start(ap);
502 if (rc)
503 return rc;
504 qs_enter_reg_mode(ap);
505 pp = kzalloc(sizeof(*pp), GFP_KERNEL);
506 if (!pp) {
507 rc = -ENOMEM;
508 goto err_out;
509 }
510 pp->pkt = dma_alloc_coherent(dev, QS_PKT_BYTES, &pp->pkt_dma,
511 GFP_KERNEL);
512 if (!pp->pkt) {
513 rc = -ENOMEM;
514 goto err_out_kfree;
515 }
516 memset(pp->pkt, 0, QS_PKT_BYTES);
517 ap->private_data = pp;
518
519 addr = (u64)pp->pkt_dma;
520 writel((u32) addr, chan + QS_CCF_CPBA);
521 writel((u32)(addr >> 32), chan + QS_CCF_CPBA + 4);
522 return 0;
523
524err_out_kfree:
525 kfree(pp);
526err_out:
527 ata_port_stop(ap);
528 return rc;
529}
530
531static void qs_port_stop(struct ata_port *ap)
532{
533 struct device *dev = ap->host_set->dev;
534 struct qs_port_priv *pp = ap->private_data;
535
536 if (pp != NULL) {
537 ap->private_data = NULL;
538 if (pp->pkt != NULL)
539 dma_free_coherent(dev, QS_PKT_BYTES, pp->pkt,
540 pp->pkt_dma);
541 kfree(pp);
542 }
543 ata_port_stop(ap);
544}
545
546static void qs_host_stop(struct ata_host_set *host_set)
547{
548 void __iomem *mmio_base = host_set->mmio_base;
549 struct pci_dev *pdev = to_pci_dev(host_set->dev);
550
551 writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */
552 writeb(QS_CNFG3_GSRST, mmio_base + QS_HCF_CNFG3); /* global reset */
553
554 pci_iounmap(pdev, mmio_base);
555}
556
557static void qs_host_init(unsigned int chip_id, struct ata_probe_ent *pe)
558{
559 void __iomem *mmio_base = pe->mmio_base;
560 unsigned int port_no;
561
562 writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */
563 writeb(QS_CNFG3_GSRST, mmio_base + QS_HCF_CNFG3); /* global reset */
564
565 /* reset each channel in turn */
566 for (port_no = 0; port_no < pe->n_ports; ++port_no) {
567 u8 __iomem *chan = mmio_base + (port_no * 0x4000);
568 writeb(QS_CTR1_RDEV|QS_CTR1_RCHN, chan + QS_CCT_CTR1);
569 writeb(QS_CTR0_REG, chan + QS_CCT_CTR0);
570 readb(chan + QS_CCT_CTR0); /* flush */
571 }
572 writeb(QS_SERD3_PHY_ENA, mmio_base + QS_HVS_SERD3); /* enable phy */
573
574 for (port_no = 0; port_no < pe->n_ports; ++port_no) {
575 u8 __iomem *chan = mmio_base + (port_no * 0x4000);
576 /* set FIFO depths to same settings as Windows driver */
577 writew(32, chan + QS_CFC_HUFT);
578 writew(32, chan + QS_CFC_HDFT);
579 writew(10, chan + QS_CFC_DUFT);
580 writew( 8, chan + QS_CFC_DDFT);
581 /* set CPB size in bytes, as a power of two */
582 writeb(QS_CPB_ORDER, chan + QS_CCF_CSEP);
583 }
584 writeb(1, mmio_base + QS_HCT_CTRL); /* enable host interrupts */
585}
586
587/*
588 * The QStor understands 64-bit buses, and uses 64-bit fields
589 * for DMA pointers regardless of bus width. We just have to
590 * make sure our DMA masks are set appropriately for whatever
591 * bridge lies between us and the QStor, and then the DMA mapping
592 * code will ensure we only ever "see" appropriate buffer addresses.
593 * If we're 32-bit limited somewhere, then our 64-bit fields will
594 * just end up with zeros in the upper 32-bits, without any special
595 * logic required outside of this routine (below).
596 */
597static int qs_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base)
598{
599 u32 bus_info = readl(mmio_base + QS_HID_HPHY);
600 int rc, have_64bit_bus = (bus_info & QS_HPHY_64BIT);
601
602 if (have_64bit_bus &&
603 !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
604 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
605 if (rc) {
606 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
607 if (rc) {
608 dev_printk(KERN_ERR, &pdev->dev,
609 "64-bit DMA enable failed\n");
610 return rc;
611 }
612 }
613 } else {
614 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
615 if (rc) {
616 dev_printk(KERN_ERR, &pdev->dev,
617 "32-bit DMA enable failed\n");
618 return rc;
619 }
620 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
621 if (rc) {
622 dev_printk(KERN_ERR, &pdev->dev,
623 "32-bit consistent DMA enable failed\n");
624 return rc;
625 }
626 }
627 return 0;
628}
629
630static int qs_ata_init_one(struct pci_dev *pdev,
631 const struct pci_device_id *ent)
632{
633 static int printed_version;
634 struct ata_probe_ent *probe_ent = NULL;
635 void __iomem *mmio_base;
636 unsigned int board_idx = (unsigned int) ent->driver_data;
637 int rc, port_no;
638
639 if (!printed_version++)
640 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
641
642 rc = pci_enable_device(pdev);
643 if (rc)
644 return rc;
645
646 rc = pci_request_regions(pdev, DRV_NAME);
647 if (rc)
648 goto err_out;
649
650 if ((pci_resource_flags(pdev, 4) & IORESOURCE_MEM) == 0) {
651 rc = -ENODEV;
652 goto err_out_regions;
653 }
654
655 mmio_base = pci_iomap(pdev, 4, 0);
656 if (mmio_base == NULL) {
657 rc = -ENOMEM;
658 goto err_out_regions;
659 }
660
661 rc = qs_set_dma_masks(pdev, mmio_base);
662 if (rc)
663 goto err_out_iounmap;
664
665 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
666 if (probe_ent == NULL) {
667 rc = -ENOMEM;
668 goto err_out_iounmap;
669 }
670
671 memset(probe_ent, 0, sizeof(*probe_ent));
672 probe_ent->dev = pci_dev_to_dev(pdev);
673 INIT_LIST_HEAD(&probe_ent->node);
674
675 probe_ent->sht = qs_port_info[board_idx].sht;
676 probe_ent->host_flags = qs_port_info[board_idx].host_flags;
677 probe_ent->pio_mask = qs_port_info[board_idx].pio_mask;
678 probe_ent->mwdma_mask = qs_port_info[board_idx].mwdma_mask;
679 probe_ent->udma_mask = qs_port_info[board_idx].udma_mask;
680 probe_ent->port_ops = qs_port_info[board_idx].port_ops;
681
682 probe_ent->irq = pdev->irq;
683 probe_ent->irq_flags = IRQF_SHARED;
684 probe_ent->mmio_base = mmio_base;
685 probe_ent->n_ports = QS_PORTS;
686
687 for (port_no = 0; port_no < probe_ent->n_ports; ++port_no) {
688 unsigned long chan = (unsigned long)mmio_base +
689 (port_no * 0x4000);
690 qs_ata_setup_port(&probe_ent->port[port_no], chan);
691 }
692
693 pci_set_master(pdev);
694
695 /* initialize adapter */
696 qs_host_init(board_idx, probe_ent);
697
698 rc = ata_device_add(probe_ent);
699 kfree(probe_ent);
700 if (rc != QS_PORTS)
701 goto err_out_iounmap;
702 return 0;
703
704err_out_iounmap:
705 pci_iounmap(pdev, mmio_base);
706err_out_regions:
707 pci_release_regions(pdev);
708err_out:
709 pci_disable_device(pdev);
710 return rc;
711}
712
713static int __init qs_ata_init(void)
714{
715 return pci_register_driver(&qs_ata_pci_driver);
716}
717
718static void __exit qs_ata_exit(void)
719{
720 pci_unregister_driver(&qs_ata_pci_driver);
721}
722
723MODULE_AUTHOR("Mark Lord");
724MODULE_DESCRIPTION("Pacific Digital Corporation QStor SATA low-level driver");
725MODULE_LICENSE("GPL");
726MODULE_DEVICE_TABLE(pci, qs_ata_pci_tbl);
727MODULE_VERSION(DRV_VERSION);
728
729module_init(qs_ata_init);
730module_exit(qs_ata_exit);
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
new file mode 100644
index 000000000000..907faa87239b
--- /dev/null
+++ b/drivers/ata/sata_sil.c
@@ -0,0 +1,723 @@
1/*
2 * sata_sil.c - Silicon Image SATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2005 Red Hat, Inc.
9 * Copyright 2003 Benjamin Herrenschmidt
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Documentation for SiI 3112:
31 * http://gkernel.sourceforge.net/specs/sii/3112A_SiI-DS-0095-B2.pdf.bz2
32 *
33 * Other errata and documentation available under NDA.
34 *
35 */
36
37#include <linux/kernel.h>
38#include <linux/module.h>
39#include <linux/pci.h>
40#include <linux/init.h>
41#include <linux/blkdev.h>
42#include <linux/delay.h>
43#include <linux/interrupt.h>
44#include <linux/device.h>
45#include <scsi/scsi_host.h>
46#include <linux/libata.h>
47
48#define DRV_NAME "sata_sil"
49#define DRV_VERSION "2.0"
50
51enum {
52 /*
53 * host flags
54 */
55 SIL_FLAG_NO_SATA_IRQ = (1 << 28),
56 SIL_FLAG_RERR_ON_DMA_ACT = (1 << 29),
57 SIL_FLAG_MOD15WRITE = (1 << 30),
58
59 SIL_DFL_HOST_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
60 ATA_FLAG_MMIO | ATA_FLAG_HRST_TO_RESUME,
61
62 /*
63 * Controller IDs
64 */
65 sil_3112 = 0,
66 sil_3112_no_sata_irq = 1,
67 sil_3512 = 2,
68 sil_3114 = 3,
69
70 /*
71 * Register offsets
72 */
73 SIL_SYSCFG = 0x48,
74
75 /*
76 * Register bits
77 */
78 /* SYSCFG */
79 SIL_MASK_IDE0_INT = (1 << 22),
80 SIL_MASK_IDE1_INT = (1 << 23),
81 SIL_MASK_IDE2_INT = (1 << 24),
82 SIL_MASK_IDE3_INT = (1 << 25),
83 SIL_MASK_2PORT = SIL_MASK_IDE0_INT | SIL_MASK_IDE1_INT,
84 SIL_MASK_4PORT = SIL_MASK_2PORT |
85 SIL_MASK_IDE2_INT | SIL_MASK_IDE3_INT,
86
87 /* BMDMA/BMDMA2 */
88 SIL_INTR_STEERING = (1 << 1),
89
90 SIL_DMA_ENABLE = (1 << 0), /* DMA run switch */
91 SIL_DMA_RDWR = (1 << 3), /* DMA Rd-Wr */
92 SIL_DMA_SATA_IRQ = (1 << 4), /* OR of all SATA IRQs */
93 SIL_DMA_ACTIVE = (1 << 16), /* DMA running */
94 SIL_DMA_ERROR = (1 << 17), /* PCI bus error */
95 SIL_DMA_COMPLETE = (1 << 18), /* cmd complete / IRQ pending */
96 SIL_DMA_N_SATA_IRQ = (1 << 6), /* SATA_IRQ for the next channel */
97 SIL_DMA_N_ACTIVE = (1 << 24), /* ACTIVE for the next channel */
98 SIL_DMA_N_ERROR = (1 << 25), /* ERROR for the next channel */
99 SIL_DMA_N_COMPLETE = (1 << 26), /* COMPLETE for the next channel */
100
101 /* SIEN */
102 SIL_SIEN_N = (1 << 16), /* triggered by SError.N */
103
104 /*
105 * Others
106 */
107 SIL_QUIRK_MOD15WRITE = (1 << 0),
108 SIL_QUIRK_UDMA5MAX = (1 << 1),
109};
110
111static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
112static int sil_pci_device_resume(struct pci_dev *pdev);
113static void sil_dev_config(struct ata_port *ap, struct ata_device *dev);
114static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg);
115static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
116static void sil_post_set_mode (struct ata_port *ap);
117static irqreturn_t sil_interrupt(int irq, void *dev_instance,
118 struct pt_regs *regs);
119static void sil_freeze(struct ata_port *ap);
120static void sil_thaw(struct ata_port *ap);
121
122
123static const struct pci_device_id sil_pci_tbl[] = {
124 { 0x1095, 0x3112, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
125 { 0x1095, 0x0240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
126 { 0x1095, 0x3512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3512 },
127 { 0x1095, 0x3114, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3114 },
128 { 0x1002, 0x436e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
129 { 0x1002, 0x4379, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_no_sata_irq },
130 { 0x1002, 0x437a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_no_sata_irq },
131 { } /* terminate list */
132};
133
134
135/* TODO firmware versions should be added - eric */
136static const struct sil_drivelist {
137 const char * product;
138 unsigned int quirk;
139} sil_blacklist [] = {
140 { "ST320012AS", SIL_QUIRK_MOD15WRITE },
141 { "ST330013AS", SIL_QUIRK_MOD15WRITE },
142 { "ST340017AS", SIL_QUIRK_MOD15WRITE },
143 { "ST360015AS", SIL_QUIRK_MOD15WRITE },
144 { "ST380023AS", SIL_QUIRK_MOD15WRITE },
145 { "ST3120023AS", SIL_QUIRK_MOD15WRITE },
146 { "ST340014ASL", SIL_QUIRK_MOD15WRITE },
147 { "ST360014ASL", SIL_QUIRK_MOD15WRITE },
148 { "ST380011ASL", SIL_QUIRK_MOD15WRITE },
149 { "ST3120022ASL", SIL_QUIRK_MOD15WRITE },
150 { "ST3160021ASL", SIL_QUIRK_MOD15WRITE },
151 { "Maxtor 4D060H3", SIL_QUIRK_UDMA5MAX },
152 { }
153};
154
155static struct pci_driver sil_pci_driver = {
156 .name = DRV_NAME,
157 .id_table = sil_pci_tbl,
158 .probe = sil_init_one,
159 .remove = ata_pci_remove_one,
160 .suspend = ata_pci_device_suspend,
161 .resume = sil_pci_device_resume,
162};
163
164static struct scsi_host_template sil_sht = {
165 .module = THIS_MODULE,
166 .name = DRV_NAME,
167 .ioctl = ata_scsi_ioctl,
168 .queuecommand = ata_scsi_queuecmd,
169 .can_queue = ATA_DEF_QUEUE,
170 .this_id = ATA_SHT_THIS_ID,
171 .sg_tablesize = LIBATA_MAX_PRD,
172 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
173 .emulated = ATA_SHT_EMULATED,
174 .use_clustering = ATA_SHT_USE_CLUSTERING,
175 .proc_name = DRV_NAME,
176 .dma_boundary = ATA_DMA_BOUNDARY,
177 .slave_configure = ata_scsi_slave_config,
178 .slave_destroy = ata_scsi_slave_destroy,
179 .bios_param = ata_std_bios_param,
180 .suspend = ata_scsi_device_suspend,
181 .resume = ata_scsi_device_resume,
182};
183
184static const struct ata_port_operations sil_ops = {
185 .port_disable = ata_port_disable,
186 .dev_config = sil_dev_config,
187 .tf_load = ata_tf_load,
188 .tf_read = ata_tf_read,
189 .check_status = ata_check_status,
190 .exec_command = ata_exec_command,
191 .dev_select = ata_std_dev_select,
192 .post_set_mode = sil_post_set_mode,
193 .bmdma_setup = ata_bmdma_setup,
194 .bmdma_start = ata_bmdma_start,
195 .bmdma_stop = ata_bmdma_stop,
196 .bmdma_status = ata_bmdma_status,
197 .qc_prep = ata_qc_prep,
198 .qc_issue = ata_qc_issue_prot,
199 .data_xfer = ata_mmio_data_xfer,
200 .freeze = sil_freeze,
201 .thaw = sil_thaw,
202 .error_handler = ata_bmdma_error_handler,
203 .post_internal_cmd = ata_bmdma_post_internal_cmd,
204 .irq_handler = sil_interrupt,
205 .irq_clear = ata_bmdma_irq_clear,
206 .scr_read = sil_scr_read,
207 .scr_write = sil_scr_write,
208 .port_start = ata_port_start,
209 .port_stop = ata_port_stop,
210 .host_stop = ata_pci_host_stop,
211};
212
213static const struct ata_port_info sil_port_info[] = {
214 /* sil_3112 */
215 {
216 .sht = &sil_sht,
217 .host_flags = SIL_DFL_HOST_FLAGS | SIL_FLAG_MOD15WRITE,
218 .pio_mask = 0x1f, /* pio0-4 */
219 .mwdma_mask = 0x07, /* mwdma0-2 */
220 .udma_mask = 0x3f, /* udma0-5 */
221 .port_ops = &sil_ops,
222 },
223 /* sil_3112_no_sata_irq */
224 {
225 .sht = &sil_sht,
226 .host_flags = SIL_DFL_HOST_FLAGS | SIL_FLAG_MOD15WRITE |
227 SIL_FLAG_NO_SATA_IRQ,
228 .pio_mask = 0x1f, /* pio0-4 */
229 .mwdma_mask = 0x07, /* mwdma0-2 */
230 .udma_mask = 0x3f, /* udma0-5 */
231 .port_ops = &sil_ops,
232 },
233 /* sil_3512 */
234 {
235 .sht = &sil_sht,
236 .host_flags = SIL_DFL_HOST_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT,
237 .pio_mask = 0x1f, /* pio0-4 */
238 .mwdma_mask = 0x07, /* mwdma0-2 */
239 .udma_mask = 0x3f, /* udma0-5 */
240 .port_ops = &sil_ops,
241 },
242 /* sil_3114 */
243 {
244 .sht = &sil_sht,
245 .host_flags = SIL_DFL_HOST_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT,
246 .pio_mask = 0x1f, /* pio0-4 */
247 .mwdma_mask = 0x07, /* mwdma0-2 */
248 .udma_mask = 0x3f, /* udma0-5 */
249 .port_ops = &sil_ops,
250 },
251};
252
253/* per-port register offsets */
254/* TODO: we can probably calculate rather than use a table */
255static const struct {
256 unsigned long tf; /* ATA taskfile register block */
257 unsigned long ctl; /* ATA control/altstatus register block */
258 unsigned long bmdma; /* DMA register block */
259 unsigned long bmdma2; /* DMA register block #2 */
260 unsigned long fifo_cfg; /* FIFO Valid Byte Count and Control */
261 unsigned long scr; /* SATA control register block */
262 unsigned long sien; /* SATA Interrupt Enable register */
263 unsigned long xfer_mode;/* data transfer mode register */
264 unsigned long sfis_cfg; /* SATA FIS reception config register */
265} sil_port[] = {
266 /* port 0 ... */
267 { 0x80, 0x8A, 0x00, 0x10, 0x40, 0x100, 0x148, 0xb4, 0x14c },
268 { 0xC0, 0xCA, 0x08, 0x18, 0x44, 0x180, 0x1c8, 0xf4, 0x1cc },
269 { 0x280, 0x28A, 0x200, 0x210, 0x240, 0x300, 0x348, 0x2b4, 0x34c },
270 { 0x2C0, 0x2CA, 0x208, 0x218, 0x244, 0x380, 0x3c8, 0x2f4, 0x3cc },
271 /* ... port 3 */
272};
273
274MODULE_AUTHOR("Jeff Garzik");
275MODULE_DESCRIPTION("low-level driver for Silicon Image SATA controller");
276MODULE_LICENSE("GPL");
277MODULE_DEVICE_TABLE(pci, sil_pci_tbl);
278MODULE_VERSION(DRV_VERSION);
279
280static int slow_down = 0;
281module_param(slow_down, int, 0444);
282MODULE_PARM_DESC(slow_down, "Sledgehammer used to work around random problems, by limiting commands to 15 sectors (0=off, 1=on)");
283
284
285static unsigned char sil_get_device_cache_line(struct pci_dev *pdev)
286{
287 u8 cache_line = 0;
288 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_line);
289 return cache_line;
290}
291
292static void sil_post_set_mode (struct ata_port *ap)
293{
294 struct ata_host_set *host_set = ap->host_set;
295 struct ata_device *dev;
296 void __iomem *addr =
297 host_set->mmio_base + sil_port[ap->port_no].xfer_mode;
298 u32 tmp, dev_mode[2];
299 unsigned int i;
300
301 for (i = 0; i < 2; i++) {
302 dev = &ap->device[i];
303 if (!ata_dev_enabled(dev))
304 dev_mode[i] = 0; /* PIO0/1/2 */
305 else if (dev->flags & ATA_DFLAG_PIO)
306 dev_mode[i] = 1; /* PIO3/4 */
307 else
308 dev_mode[i] = 3; /* UDMA */
309 /* value 2 indicates MDMA */
310 }
311
312 tmp = readl(addr);
313 tmp &= ~((1<<5) | (1<<4) | (1<<1) | (1<<0));
314 tmp |= dev_mode[0];
315 tmp |= (dev_mode[1] << 4);
316 writel(tmp, addr);
317 readl(addr); /* flush */
318}
319
320static inline unsigned long sil_scr_addr(struct ata_port *ap, unsigned int sc_reg)
321{
322 unsigned long offset = ap->ioaddr.scr_addr;
323
324 switch (sc_reg) {
325 case SCR_STATUS:
326 return offset + 4;
327 case SCR_ERROR:
328 return offset + 8;
329 case SCR_CONTROL:
330 return offset;
331 default:
332 /* do nothing */
333 break;
334 }
335
336 return 0;
337}
338
339static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg)
340{
341 void __iomem *mmio = (void __iomem *) sil_scr_addr(ap, sc_reg);
342 if (mmio)
343 return readl(mmio);
344 return 0xffffffffU;
345}
346
347static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
348{
349 void *mmio = (void __iomem *) sil_scr_addr(ap, sc_reg);
350 if (mmio)
351 writel(val, mmio);
352}
353
354static void sil_host_intr(struct ata_port *ap, u32 bmdma2)
355{
356 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
357 u8 status;
358
359 if (unlikely(bmdma2 & SIL_DMA_SATA_IRQ)) {
360 u32 serror;
361
362 /* SIEN doesn't mask SATA IRQs on some 3112s. Those
363 * controllers continue to assert IRQ as long as
364 * SError bits are pending. Clear SError immediately.
365 */
366 serror = sil_scr_read(ap, SCR_ERROR);
367 sil_scr_write(ap, SCR_ERROR, serror);
368
369 /* Trigger hotplug and accumulate SError only if the
370 * port isn't already frozen. Otherwise, PHY events
371 * during hardreset makes controllers with broken SIEN
372 * repeat probing needlessly.
373 */
374 if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
375 ata_ehi_hotplugged(&ap->eh_info);
376 ap->eh_info.serror |= serror;
377 }
378
379 goto freeze;
380 }
381
382 if (unlikely(!qc || qc->tf.ctl & ATA_NIEN))
383 goto freeze;
384
385 /* Check whether we are expecting interrupt in this state */
386 switch (ap->hsm_task_state) {
387 case HSM_ST_FIRST:
388 /* Some pre-ATAPI-4 devices assert INTRQ
389 * at this state when ready to receive CDB.
390 */
391
392 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
393 * The flag was turned on only for atapi devices.
394 * No need to check is_atapi_taskfile(&qc->tf) again.
395 */
396 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
397 goto err_hsm;
398 break;
399 case HSM_ST_LAST:
400 if (qc->tf.protocol == ATA_PROT_DMA ||
401 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
402 /* clear DMA-Start bit */
403 ap->ops->bmdma_stop(qc);
404
405 if (bmdma2 & SIL_DMA_ERROR) {
406 qc->err_mask |= AC_ERR_HOST_BUS;
407 ap->hsm_task_state = HSM_ST_ERR;
408 }
409 }
410 break;
411 case HSM_ST:
412 break;
413 default:
414 goto err_hsm;
415 }
416
417 /* check main status, clearing INTRQ */
418 status = ata_chk_status(ap);
419 if (unlikely(status & ATA_BUSY))
420 goto err_hsm;
421
422 /* ack bmdma irq events */
423 ata_bmdma_irq_clear(ap);
424
425 /* kick HSM in the ass */
426 ata_hsm_move(ap, qc, status, 0);
427
428 return;
429
430 err_hsm:
431 qc->err_mask |= AC_ERR_HSM;
432 freeze:
433 ata_port_freeze(ap);
434}
435
436static irqreturn_t sil_interrupt(int irq, void *dev_instance,
437 struct pt_regs *regs)
438{
439 struct ata_host_set *host_set = dev_instance;
440 void __iomem *mmio_base = host_set->mmio_base;
441 int handled = 0;
442 int i;
443
444 spin_lock(&host_set->lock);
445
446 for (i = 0; i < host_set->n_ports; i++) {
447 struct ata_port *ap = host_set->ports[i];
448 u32 bmdma2 = readl(mmio_base + sil_port[ap->port_no].bmdma2);
449
450 if (unlikely(!ap || ap->flags & ATA_FLAG_DISABLED))
451 continue;
452
453 /* turn off SATA_IRQ if not supported */
454 if (ap->flags & SIL_FLAG_NO_SATA_IRQ)
455 bmdma2 &= ~SIL_DMA_SATA_IRQ;
456
457 if (bmdma2 == 0xffffffff ||
458 !(bmdma2 & (SIL_DMA_COMPLETE | SIL_DMA_SATA_IRQ)))
459 continue;
460
461 sil_host_intr(ap, bmdma2);
462 handled = 1;
463 }
464
465 spin_unlock(&host_set->lock);
466
467 return IRQ_RETVAL(handled);
468}
469
470static void sil_freeze(struct ata_port *ap)
471{
472 void __iomem *mmio_base = ap->host_set->mmio_base;
473 u32 tmp;
474
475 /* global IRQ mask doesn't block SATA IRQ, turn off explicitly */
476 writel(0, mmio_base + sil_port[ap->port_no].sien);
477
478 /* plug IRQ */
479 tmp = readl(mmio_base + SIL_SYSCFG);
480 tmp |= SIL_MASK_IDE0_INT << ap->port_no;
481 writel(tmp, mmio_base + SIL_SYSCFG);
482 readl(mmio_base + SIL_SYSCFG); /* flush */
483}
484
485static void sil_thaw(struct ata_port *ap)
486{
487 void __iomem *mmio_base = ap->host_set->mmio_base;
488 u32 tmp;
489
490 /* clear IRQ */
491 ata_chk_status(ap);
492 ata_bmdma_irq_clear(ap);
493
494 /* turn on SATA IRQ if supported */
495 if (!(ap->flags & SIL_FLAG_NO_SATA_IRQ))
496 writel(SIL_SIEN_N, mmio_base + sil_port[ap->port_no].sien);
497
498 /* turn on IRQ */
499 tmp = readl(mmio_base + SIL_SYSCFG);
500 tmp &= ~(SIL_MASK_IDE0_INT << ap->port_no);
501 writel(tmp, mmio_base + SIL_SYSCFG);
502}
503
504/**
505 * sil_dev_config - Apply device/host-specific errata fixups
506 * @ap: Port containing device to be examined
507 * @dev: Device to be examined
508 *
509 * After the IDENTIFY [PACKET] DEVICE step is complete, and a
510 * device is known to be present, this function is called.
511 * We apply two errata fixups which are specific to Silicon Image,
512 * a Seagate and a Maxtor fixup.
513 *
514 * For certain Seagate devices, we must limit the maximum sectors
515 * to under 8K.
516 *
517 * For certain Maxtor devices, we must not program the drive
518 * beyond udma5.
519 *
520 * Both fixups are unfairly pessimistic. As soon as I get more
521 * information on these errata, I will create a more exhaustive
522 * list, and apply the fixups to only the specific
523 * devices/hosts/firmwares that need it.
524 *
525 * 20040111 - Seagate drives affected by the Mod15Write bug are blacklisted
526 * The Maxtor quirk is in the blacklist, but I'm keeping the original
527 * pessimistic fix for the following reasons...
528 * - There seems to be less info on it, only one device gleaned off the
529 * Windows driver, maybe only one is affected. More info would be greatly
530 * appreciated.
531 * - But then again UDMA5 is hardly anything to complain about
532 */
533static void sil_dev_config(struct ata_port *ap, struct ata_device *dev)
534{
535 unsigned int n, quirks = 0;
536 unsigned char model_num[41];
537
538 ata_id_c_string(dev->id, model_num, ATA_ID_PROD_OFS, sizeof(model_num));
539
540 for (n = 0; sil_blacklist[n].product; n++)
541 if (!strcmp(sil_blacklist[n].product, model_num)) {
542 quirks = sil_blacklist[n].quirk;
543 break;
544 }
545
546 /* limit requests to 15 sectors */
547 if (slow_down ||
548 ((ap->flags & SIL_FLAG_MOD15WRITE) &&
549 (quirks & SIL_QUIRK_MOD15WRITE))) {
550 ata_dev_printk(dev, KERN_INFO, "applying Seagate errata fix "
551 "(mod15write workaround)\n");
552 dev->max_sectors = 15;
553 return;
554 }
555
556 /* limit to udma5 */
557 if (quirks & SIL_QUIRK_UDMA5MAX) {
558 ata_dev_printk(dev, KERN_INFO,
559 "applying Maxtor errata fix %s\n", model_num);
560 dev->udma_mask &= ATA_UDMA5;
561 return;
562 }
563}
564
565static void sil_init_controller(struct pci_dev *pdev,
566 int n_ports, unsigned long host_flags,
567 void __iomem *mmio_base)
568{
569 u8 cls;
570 u32 tmp;
571 int i;
572
573 /* Initialize FIFO PCI bus arbitration */
574 cls = sil_get_device_cache_line(pdev);
575 if (cls) {
576 cls >>= 3;
577 cls++; /* cls = (line_size/8)+1 */
578 for (i = 0; i < n_ports; i++)
579 writew(cls << 8 | cls,
580 mmio_base + sil_port[i].fifo_cfg);
581 } else
582 dev_printk(KERN_WARNING, &pdev->dev,
583 "cache line size not set. Driver may not function\n");
584
585 /* Apply R_ERR on DMA activate FIS errata workaround */
586 if (host_flags & SIL_FLAG_RERR_ON_DMA_ACT) {
587 int cnt;
588
589 for (i = 0, cnt = 0; i < n_ports; i++) {
590 tmp = readl(mmio_base + sil_port[i].sfis_cfg);
591 if ((tmp & 0x3) != 0x01)
592 continue;
593 if (!cnt)
594 dev_printk(KERN_INFO, &pdev->dev,
595 "Applying R_ERR on DMA activate "
596 "FIS errata fix\n");
597 writel(tmp & ~0x3, mmio_base + sil_port[i].sfis_cfg);
598 cnt++;
599 }
600 }
601
602 if (n_ports == 4) {
603 /* flip the magic "make 4 ports work" bit */
604 tmp = readl(mmio_base + sil_port[2].bmdma);
605 if ((tmp & SIL_INTR_STEERING) == 0)
606 writel(tmp | SIL_INTR_STEERING,
607 mmio_base + sil_port[2].bmdma);
608 }
609}
610
611static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
612{
613 static int printed_version;
614 struct ata_probe_ent *probe_ent = NULL;
615 unsigned long base;
616 void __iomem *mmio_base;
617 int rc;
618 unsigned int i;
619 int pci_dev_busy = 0;
620
621 if (!printed_version++)
622 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
623
624 rc = pci_enable_device(pdev);
625 if (rc)
626 return rc;
627
628 rc = pci_request_regions(pdev, DRV_NAME);
629 if (rc) {
630 pci_dev_busy = 1;
631 goto err_out;
632 }
633
634 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
635 if (rc)
636 goto err_out_regions;
637 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
638 if (rc)
639 goto err_out_regions;
640
641 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
642 if (probe_ent == NULL) {
643 rc = -ENOMEM;
644 goto err_out_regions;
645 }
646
647 INIT_LIST_HEAD(&probe_ent->node);
648 probe_ent->dev = pci_dev_to_dev(pdev);
649 probe_ent->port_ops = sil_port_info[ent->driver_data].port_ops;
650 probe_ent->sht = sil_port_info[ent->driver_data].sht;
651 probe_ent->n_ports = (ent->driver_data == sil_3114) ? 4 : 2;
652 probe_ent->pio_mask = sil_port_info[ent->driver_data].pio_mask;
653 probe_ent->mwdma_mask = sil_port_info[ent->driver_data].mwdma_mask;
654 probe_ent->udma_mask = sil_port_info[ent->driver_data].udma_mask;
655 probe_ent->irq = pdev->irq;
656 probe_ent->irq_flags = IRQF_SHARED;
657 probe_ent->host_flags = sil_port_info[ent->driver_data].host_flags;
658
659 mmio_base = pci_iomap(pdev, 5, 0);
660 if (mmio_base == NULL) {
661 rc = -ENOMEM;
662 goto err_out_free_ent;
663 }
664
665 probe_ent->mmio_base = mmio_base;
666
667 base = (unsigned long) mmio_base;
668
669 for (i = 0; i < probe_ent->n_ports; i++) {
670 probe_ent->port[i].cmd_addr = base + sil_port[i].tf;
671 probe_ent->port[i].altstatus_addr =
672 probe_ent->port[i].ctl_addr = base + sil_port[i].ctl;
673 probe_ent->port[i].bmdma_addr = base + sil_port[i].bmdma;
674 probe_ent->port[i].scr_addr = base + sil_port[i].scr;
675 ata_std_ports(&probe_ent->port[i]);
676 }
677
678 sil_init_controller(pdev, probe_ent->n_ports, probe_ent->host_flags,
679 mmio_base);
680
681 pci_set_master(pdev);
682
683 /* FIXME: check ata_device_add return value */
684 ata_device_add(probe_ent);
685 kfree(probe_ent);
686
687 return 0;
688
689err_out_free_ent:
690 kfree(probe_ent);
691err_out_regions:
692 pci_release_regions(pdev);
693err_out:
694 if (!pci_dev_busy)
695 pci_disable_device(pdev);
696 return rc;
697}
698
699static int sil_pci_device_resume(struct pci_dev *pdev)
700{
701 struct ata_host_set *host_set = dev_get_drvdata(&pdev->dev);
702
703 ata_pci_device_do_resume(pdev);
704 sil_init_controller(pdev, host_set->n_ports, host_set->ports[0]->flags,
705 host_set->mmio_base);
706 ata_host_set_resume(host_set);
707
708 return 0;
709}
710
711static int __init sil_init(void)
712{
713 return pci_register_driver(&sil_pci_driver);
714}
715
716static void __exit sil_exit(void)
717{
718 pci_unregister_driver(&sil_pci_driver);
719}
720
721
722module_init(sil_init);
723module_exit(sil_exit);
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
new file mode 100644
index 000000000000..3a0161ddc33f
--- /dev/null
+++ b/drivers/ata/sata_sil24.c
@@ -0,0 +1,1222 @@
1/*
2 * sata_sil24.c - Driver for Silicon Image 3124/3132 SATA-2 controllers
3 *
4 * Copyright 2005 Tejun Heo
5 *
6 * Based on preview driver from Silicon Image.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2, or (at your option) any
11 * later version.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 */
19
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/pci.h>
23#include <linux/blkdev.h>
24#include <linux/delay.h>
25#include <linux/interrupt.h>
26#include <linux/dma-mapping.h>
27#include <linux/device.h>
28#include <scsi/scsi_host.h>
29#include <scsi/scsi_cmnd.h>
30#include <linux/libata.h>
31#include <asm/io.h>
32
33#define DRV_NAME "sata_sil24"
34#define DRV_VERSION "0.3"
35
36/*
37 * Port request block (PRB) 32 bytes
38 */
39struct sil24_prb {
40 __le16 ctrl;
41 __le16 prot;
42 __le32 rx_cnt;
43 u8 fis[6 * 4];
44};
45
46/*
47 * Scatter gather entry (SGE) 16 bytes
48 */
49struct sil24_sge {
50 __le64 addr;
51 __le32 cnt;
52 __le32 flags;
53};
54
55/*
56 * Port multiplier
57 */
58struct sil24_port_multiplier {
59 __le32 diag;
60 __le32 sactive;
61};
62
63enum {
64 /*
65 * Global controller registers (128 bytes @ BAR0)
66 */
67 /* 32 bit regs */
68 HOST_SLOT_STAT = 0x00, /* 32 bit slot stat * 4 */
69 HOST_CTRL = 0x40,
70 HOST_IRQ_STAT = 0x44,
71 HOST_PHY_CFG = 0x48,
72 HOST_BIST_CTRL = 0x50,
73 HOST_BIST_PTRN = 0x54,
74 HOST_BIST_STAT = 0x58,
75 HOST_MEM_BIST_STAT = 0x5c,
76 HOST_FLASH_CMD = 0x70,
77 /* 8 bit regs */
78 HOST_FLASH_DATA = 0x74,
79 HOST_TRANSITION_DETECT = 0x75,
80 HOST_GPIO_CTRL = 0x76,
81 HOST_I2C_ADDR = 0x78, /* 32 bit */
82 HOST_I2C_DATA = 0x7c,
83 HOST_I2C_XFER_CNT = 0x7e,
84 HOST_I2C_CTRL = 0x7f,
85
86 /* HOST_SLOT_STAT bits */
87 HOST_SSTAT_ATTN = (1 << 31),
88
89 /* HOST_CTRL bits */
90 HOST_CTRL_M66EN = (1 << 16), /* M66EN PCI bus signal */
91 HOST_CTRL_TRDY = (1 << 17), /* latched PCI TRDY */
92 HOST_CTRL_STOP = (1 << 18), /* latched PCI STOP */
93 HOST_CTRL_DEVSEL = (1 << 19), /* latched PCI DEVSEL */
94 HOST_CTRL_REQ64 = (1 << 20), /* latched PCI REQ64 */
95 HOST_CTRL_GLOBAL_RST = (1 << 31), /* global reset */
96
97 /*
98 * Port registers
99 * (8192 bytes @ +0x0000, +0x2000, +0x4000 and +0x6000 @ BAR2)
100 */
101 PORT_REGS_SIZE = 0x2000,
102
103 PORT_LRAM = 0x0000, /* 31 LRAM slots and PM regs */
104 PORT_LRAM_SLOT_SZ = 0x0080, /* 32 bytes PRB + 2 SGE, ACT... */
105
106 PORT_PM = 0x0f80, /* 8 bytes PM * 16 (128 bytes) */
107 /* 32 bit regs */
108 PORT_CTRL_STAT = 0x1000, /* write: ctrl-set, read: stat */
109 PORT_CTRL_CLR = 0x1004, /* write: ctrl-clear */
110 PORT_IRQ_STAT = 0x1008, /* high: status, low: interrupt */
111 PORT_IRQ_ENABLE_SET = 0x1010, /* write: enable-set */
112 PORT_IRQ_ENABLE_CLR = 0x1014, /* write: enable-clear */
113 PORT_ACTIVATE_UPPER_ADDR= 0x101c,
114 PORT_EXEC_FIFO = 0x1020, /* command execution fifo */
115 PORT_CMD_ERR = 0x1024, /* command error number */
116 PORT_FIS_CFG = 0x1028,
117 PORT_FIFO_THRES = 0x102c,
118 /* 16 bit regs */
119 PORT_DECODE_ERR_CNT = 0x1040,
120 PORT_DECODE_ERR_THRESH = 0x1042,
121 PORT_CRC_ERR_CNT = 0x1044,
122 PORT_CRC_ERR_THRESH = 0x1046,
123 PORT_HSHK_ERR_CNT = 0x1048,
124 PORT_HSHK_ERR_THRESH = 0x104a,
125 /* 32 bit regs */
126 PORT_PHY_CFG = 0x1050,
127 PORT_SLOT_STAT = 0x1800,
128 PORT_CMD_ACTIVATE = 0x1c00, /* 64 bit cmd activate * 31 (248 bytes) */
129 PORT_EXEC_DIAG = 0x1e00, /* 32bit exec diag * 16 (64 bytes, 0-10 used on 3124) */
130 PORT_PSD_DIAG = 0x1e40, /* 32bit psd diag * 16 (64 bytes, 0-8 used on 3124) */
131 PORT_SCONTROL = 0x1f00,
132 PORT_SSTATUS = 0x1f04,
133 PORT_SERROR = 0x1f08,
134 PORT_SACTIVE = 0x1f0c,
135
136 /* PORT_CTRL_STAT bits */
137 PORT_CS_PORT_RST = (1 << 0), /* port reset */
138 PORT_CS_DEV_RST = (1 << 1), /* device reset */
139 PORT_CS_INIT = (1 << 2), /* port initialize */
140 PORT_CS_IRQ_WOC = (1 << 3), /* interrupt write one to clear */
141 PORT_CS_CDB16 = (1 << 5), /* 0=12b cdb, 1=16b cdb */
142 PORT_CS_RESUME = (1 << 6), /* port resume */
143 PORT_CS_32BIT_ACTV = (1 << 10), /* 32-bit activation */
144 PORT_CS_PM_EN = (1 << 13), /* port multiplier enable */
145 PORT_CS_RDY = (1 << 31), /* port ready to accept commands */
146
147 /* PORT_IRQ_STAT/ENABLE_SET/CLR */
148 /* bits[11:0] are masked */
149 PORT_IRQ_COMPLETE = (1 << 0), /* command(s) completed */
150 PORT_IRQ_ERROR = (1 << 1), /* command execution error */
151 PORT_IRQ_PORTRDY_CHG = (1 << 2), /* port ready change */
152 PORT_IRQ_PWR_CHG = (1 << 3), /* power management change */
153 PORT_IRQ_PHYRDY_CHG = (1 << 4), /* PHY ready change */
154 PORT_IRQ_COMWAKE = (1 << 5), /* COMWAKE received */
155 PORT_IRQ_UNK_FIS = (1 << 6), /* unknown FIS received */
156 PORT_IRQ_DEV_XCHG = (1 << 7), /* device exchanged */
157 PORT_IRQ_8B10B = (1 << 8), /* 8b/10b decode error threshold */
158 PORT_IRQ_CRC = (1 << 9), /* CRC error threshold */
159 PORT_IRQ_HANDSHAKE = (1 << 10), /* handshake error threshold */
160 PORT_IRQ_SDB_NOTIFY = (1 << 11), /* SDB notify received */
161
162 DEF_PORT_IRQ = PORT_IRQ_COMPLETE | PORT_IRQ_ERROR |
163 PORT_IRQ_PHYRDY_CHG | PORT_IRQ_DEV_XCHG |
164 PORT_IRQ_UNK_FIS,
165
166 /* bits[27:16] are unmasked (raw) */
167 PORT_IRQ_RAW_SHIFT = 16,
168 PORT_IRQ_MASKED_MASK = 0x7ff,
169 PORT_IRQ_RAW_MASK = (0x7ff << PORT_IRQ_RAW_SHIFT),
170
171 /* ENABLE_SET/CLR specific, intr steering - 2 bit field */
172 PORT_IRQ_STEER_SHIFT = 30,
173 PORT_IRQ_STEER_MASK = (3 << PORT_IRQ_STEER_SHIFT),
174
175 /* PORT_CMD_ERR constants */
176 PORT_CERR_DEV = 1, /* Error bit in D2H Register FIS */
177 PORT_CERR_SDB = 2, /* Error bit in SDB FIS */
178 PORT_CERR_DATA = 3, /* Error in data FIS not detected by dev */
179 PORT_CERR_SEND = 4, /* Initial cmd FIS transmission failure */
180 PORT_CERR_INCONSISTENT = 5, /* Protocol mismatch */
181 PORT_CERR_DIRECTION = 6, /* Data direction mismatch */
182 PORT_CERR_UNDERRUN = 7, /* Ran out of SGEs while writing */
183 PORT_CERR_OVERRUN = 8, /* Ran out of SGEs while reading */
184 PORT_CERR_PKT_PROT = 11, /* DIR invalid in 1st PIO setup of ATAPI */
185 PORT_CERR_SGT_BOUNDARY = 16, /* PLD ecode 00 - SGT not on qword boundary */
186 PORT_CERR_SGT_TGTABRT = 17, /* PLD ecode 01 - target abort */
187 PORT_CERR_SGT_MSTABRT = 18, /* PLD ecode 10 - master abort */
188 PORT_CERR_SGT_PCIPERR = 19, /* PLD ecode 11 - PCI parity err while fetching SGT */
189 PORT_CERR_CMD_BOUNDARY = 24, /* ctrl[15:13] 001 - PRB not on qword boundary */
190 PORT_CERR_CMD_TGTABRT = 25, /* ctrl[15:13] 010 - target abort */
191 PORT_CERR_CMD_MSTABRT = 26, /* ctrl[15:13] 100 - master abort */
192 PORT_CERR_CMD_PCIPERR = 27, /* ctrl[15:13] 110 - PCI parity err while fetching PRB */
193 PORT_CERR_XFR_UNDEF = 32, /* PSD ecode 00 - undefined */
194 PORT_CERR_XFR_TGTABRT = 33, /* PSD ecode 01 - target abort */
195 PORT_CERR_XFR_MSTABRT = 34, /* PSD ecode 10 - master abort */
196 PORT_CERR_XFR_PCIPERR = 35, /* PSD ecode 11 - PCI prity err during transfer */
197 PORT_CERR_SENDSERVICE = 36, /* FIS received while sending service */
198
199 /* bits of PRB control field */
200 PRB_CTRL_PROTOCOL = (1 << 0), /* override def. ATA protocol */
201 PRB_CTRL_PACKET_READ = (1 << 4), /* PACKET cmd read */
202 PRB_CTRL_PACKET_WRITE = (1 << 5), /* PACKET cmd write */
203 PRB_CTRL_NIEN = (1 << 6), /* Mask completion irq */
204 PRB_CTRL_SRST = (1 << 7), /* Soft reset request (ign BSY?) */
205
206 /* PRB protocol field */
207 PRB_PROT_PACKET = (1 << 0),
208 PRB_PROT_TCQ = (1 << 1),
209 PRB_PROT_NCQ = (1 << 2),
210 PRB_PROT_READ = (1 << 3),
211 PRB_PROT_WRITE = (1 << 4),
212 PRB_PROT_TRANSPARENT = (1 << 5),
213
214 /*
215 * Other constants
216 */
217 SGE_TRM = (1 << 31), /* Last SGE in chain */
218 SGE_LNK = (1 << 30), /* linked list
219 Points to SGT, not SGE */
220 SGE_DRD = (1 << 29), /* discard data read (/dev/null)
221 data address ignored */
222
223 SIL24_MAX_CMDS = 31,
224
225 /* board id */
226 BID_SIL3124 = 0,
227 BID_SIL3132 = 1,
228 BID_SIL3131 = 2,
229
230 /* host flags */
231 SIL24_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
232 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
233 ATA_FLAG_NCQ | ATA_FLAG_SKIP_D2H_BSY,
234 SIL24_FLAG_PCIX_IRQ_WOC = (1 << 24), /* IRQ loss errata on PCI-X */
235
236 IRQ_STAT_4PORTS = 0xf,
237};
238
239struct sil24_ata_block {
240 struct sil24_prb prb;
241 struct sil24_sge sge[LIBATA_MAX_PRD];
242};
243
244struct sil24_atapi_block {
245 struct sil24_prb prb;
246 u8 cdb[16];
247 struct sil24_sge sge[LIBATA_MAX_PRD - 1];
248};
249
250union sil24_cmd_block {
251 struct sil24_ata_block ata;
252 struct sil24_atapi_block atapi;
253};
254
255static struct sil24_cerr_info {
256 unsigned int err_mask, action;
257 const char *desc;
258} sil24_cerr_db[] = {
259 [0] = { AC_ERR_DEV, ATA_EH_REVALIDATE,
260 "device error" },
261 [PORT_CERR_DEV] = { AC_ERR_DEV, ATA_EH_REVALIDATE,
262 "device error via D2H FIS" },
263 [PORT_CERR_SDB] = { AC_ERR_DEV, ATA_EH_REVALIDATE,
264 "device error via SDB FIS" },
265 [PORT_CERR_DATA] = { AC_ERR_ATA_BUS, ATA_EH_SOFTRESET,
266 "error in data FIS" },
267 [PORT_CERR_SEND] = { AC_ERR_ATA_BUS, ATA_EH_SOFTRESET,
268 "failed to transmit command FIS" },
269 [PORT_CERR_INCONSISTENT] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
270 "protocol mismatch" },
271 [PORT_CERR_DIRECTION] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
272 "data directon mismatch" },
273 [PORT_CERR_UNDERRUN] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
274 "ran out of SGEs while writing" },
275 [PORT_CERR_OVERRUN] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
276 "ran out of SGEs while reading" },
277 [PORT_CERR_PKT_PROT] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
278 "invalid data directon for ATAPI CDB" },
279 [PORT_CERR_SGT_BOUNDARY] = { AC_ERR_SYSTEM, ATA_EH_SOFTRESET,
280 "SGT no on qword boundary" },
281 [PORT_CERR_SGT_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
282 "PCI target abort while fetching SGT" },
283 [PORT_CERR_SGT_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
284 "PCI master abort while fetching SGT" },
285 [PORT_CERR_SGT_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
286 "PCI parity error while fetching SGT" },
287 [PORT_CERR_CMD_BOUNDARY] = { AC_ERR_SYSTEM, ATA_EH_SOFTRESET,
288 "PRB not on qword boundary" },
289 [PORT_CERR_CMD_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
290 "PCI target abort while fetching PRB" },
291 [PORT_CERR_CMD_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
292 "PCI master abort while fetching PRB" },
293 [PORT_CERR_CMD_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
294 "PCI parity error while fetching PRB" },
295 [PORT_CERR_XFR_UNDEF] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
296 "undefined error while transferring data" },
297 [PORT_CERR_XFR_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
298 "PCI target abort while transferring data" },
299 [PORT_CERR_XFR_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
300 "PCI master abort while transferring data" },
301 [PORT_CERR_XFR_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
302 "PCI parity error while transferring data" },
303 [PORT_CERR_SENDSERVICE] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
304 "FIS received while sending service FIS" },
305};
306
307/*
308 * ap->private_data
309 *
310 * The preview driver always returned 0 for status. We emulate it
311 * here from the previous interrupt.
312 */
313struct sil24_port_priv {
314 union sil24_cmd_block *cmd_block; /* 32 cmd blocks */
315 dma_addr_t cmd_block_dma; /* DMA base addr for them */
316 struct ata_taskfile tf; /* Cached taskfile registers */
317};
318
319/* ap->host_set->private_data */
320struct sil24_host_priv {
321 void __iomem *host_base; /* global controller control (128 bytes @BAR0) */
322 void __iomem *port_base; /* port registers (4 * 8192 bytes @BAR2) */
323};
324
325static void sil24_dev_config(struct ata_port *ap, struct ata_device *dev);
326static u8 sil24_check_status(struct ata_port *ap);
327static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg);
328static void sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val);
329static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
330static void sil24_qc_prep(struct ata_queued_cmd *qc);
331static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc);
332static void sil24_irq_clear(struct ata_port *ap);
333static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
334static void sil24_freeze(struct ata_port *ap);
335static void sil24_thaw(struct ata_port *ap);
336static void sil24_error_handler(struct ata_port *ap);
337static void sil24_post_internal_cmd(struct ata_queued_cmd *qc);
338static int sil24_port_start(struct ata_port *ap);
339static void sil24_port_stop(struct ata_port *ap);
340static void sil24_host_stop(struct ata_host_set *host_set);
341static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
342static int sil24_pci_device_resume(struct pci_dev *pdev);
343
344static const struct pci_device_id sil24_pci_tbl[] = {
345 { 0x1095, 0x3124, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3124 },
346 { 0x8086, 0x3124, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3124 },
347 { 0x1095, 0x3132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3132 },
348 { 0x1095, 0x3131, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3131 },
349 { 0x1095, 0x3531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3131 },
350 { } /* terminate list */
351};
352
353static struct pci_driver sil24_pci_driver = {
354 .name = DRV_NAME,
355 .id_table = sil24_pci_tbl,
356 .probe = sil24_init_one,
357 .remove = ata_pci_remove_one, /* safe? */
358 .suspend = ata_pci_device_suspend,
359 .resume = sil24_pci_device_resume,
360};
361
362static struct scsi_host_template sil24_sht = {
363 .module = THIS_MODULE,
364 .name = DRV_NAME,
365 .ioctl = ata_scsi_ioctl,
366 .queuecommand = ata_scsi_queuecmd,
367 .change_queue_depth = ata_scsi_change_queue_depth,
368 .can_queue = SIL24_MAX_CMDS,
369 .this_id = ATA_SHT_THIS_ID,
370 .sg_tablesize = LIBATA_MAX_PRD,
371 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
372 .emulated = ATA_SHT_EMULATED,
373 .use_clustering = ATA_SHT_USE_CLUSTERING,
374 .proc_name = DRV_NAME,
375 .dma_boundary = ATA_DMA_BOUNDARY,
376 .slave_configure = ata_scsi_slave_config,
377 .slave_destroy = ata_scsi_slave_destroy,
378 .bios_param = ata_std_bios_param,
379 .suspend = ata_scsi_device_suspend,
380 .resume = ata_scsi_device_resume,
381};
382
383static const struct ata_port_operations sil24_ops = {
384 .port_disable = ata_port_disable,
385
386 .dev_config = sil24_dev_config,
387
388 .check_status = sil24_check_status,
389 .check_altstatus = sil24_check_status,
390 .dev_select = ata_noop_dev_select,
391
392 .tf_read = sil24_tf_read,
393
394 .qc_prep = sil24_qc_prep,
395 .qc_issue = sil24_qc_issue,
396
397 .irq_handler = sil24_interrupt,
398 .irq_clear = sil24_irq_clear,
399
400 .scr_read = sil24_scr_read,
401 .scr_write = sil24_scr_write,
402
403 .freeze = sil24_freeze,
404 .thaw = sil24_thaw,
405 .error_handler = sil24_error_handler,
406 .post_internal_cmd = sil24_post_internal_cmd,
407
408 .port_start = sil24_port_start,
409 .port_stop = sil24_port_stop,
410 .host_stop = sil24_host_stop,
411};
412
413/*
414 * Use bits 30-31 of host_flags to encode available port numbers.
415 * Current maxium is 4.
416 */
417#define SIL24_NPORTS2FLAG(nports) ((((unsigned)(nports) - 1) & 0x3) << 30)
418#define SIL24_FLAG2NPORTS(flag) ((((flag) >> 30) & 0x3) + 1)
419
420static struct ata_port_info sil24_port_info[] = {
421 /* sil_3124 */
422 {
423 .sht = &sil24_sht,
424 .host_flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(4) |
425 SIL24_FLAG_PCIX_IRQ_WOC,
426 .pio_mask = 0x1f, /* pio0-4 */
427 .mwdma_mask = 0x07, /* mwdma0-2 */
428 .udma_mask = 0x3f, /* udma0-5 */
429 .port_ops = &sil24_ops,
430 },
431 /* sil_3132 */
432 {
433 .sht = &sil24_sht,
434 .host_flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(2),
435 .pio_mask = 0x1f, /* pio0-4 */
436 .mwdma_mask = 0x07, /* mwdma0-2 */
437 .udma_mask = 0x3f, /* udma0-5 */
438 .port_ops = &sil24_ops,
439 },
440 /* sil_3131/sil_3531 */
441 {
442 .sht = &sil24_sht,
443 .host_flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(1),
444 .pio_mask = 0x1f, /* pio0-4 */
445 .mwdma_mask = 0x07, /* mwdma0-2 */
446 .udma_mask = 0x3f, /* udma0-5 */
447 .port_ops = &sil24_ops,
448 },
449};
450
451static int sil24_tag(int tag)
452{
453 if (unlikely(ata_tag_internal(tag)))
454 return 0;
455 return tag;
456}
457
458static void sil24_dev_config(struct ata_port *ap, struct ata_device *dev)
459{
460 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
461
462 if (dev->cdb_len == 16)
463 writel(PORT_CS_CDB16, port + PORT_CTRL_STAT);
464 else
465 writel(PORT_CS_CDB16, port + PORT_CTRL_CLR);
466}
467
468static inline void sil24_update_tf(struct ata_port *ap)
469{
470 struct sil24_port_priv *pp = ap->private_data;
471 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
472 struct sil24_prb __iomem *prb = port;
473 u8 fis[6 * 4];
474
475 memcpy_fromio(fis, prb->fis, 6 * 4);
476 ata_tf_from_fis(fis, &pp->tf);
477}
478
479static u8 sil24_check_status(struct ata_port *ap)
480{
481 struct sil24_port_priv *pp = ap->private_data;
482 return pp->tf.command;
483}
484
485static int sil24_scr_map[] = {
486 [SCR_CONTROL] = 0,
487 [SCR_STATUS] = 1,
488 [SCR_ERROR] = 2,
489 [SCR_ACTIVE] = 3,
490};
491
492static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg)
493{
494 void __iomem *scr_addr = (void __iomem *)ap->ioaddr.scr_addr;
495 if (sc_reg < ARRAY_SIZE(sil24_scr_map)) {
496 void __iomem *addr;
497 addr = scr_addr + sil24_scr_map[sc_reg] * 4;
498 return readl(scr_addr + sil24_scr_map[sc_reg] * 4);
499 }
500 return 0xffffffffU;
501}
502
503static void sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val)
504{
505 void __iomem *scr_addr = (void __iomem *)ap->ioaddr.scr_addr;
506 if (sc_reg < ARRAY_SIZE(sil24_scr_map)) {
507 void __iomem *addr;
508 addr = scr_addr + sil24_scr_map[sc_reg] * 4;
509 writel(val, scr_addr + sil24_scr_map[sc_reg] * 4);
510 }
511}
512
513static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
514{
515 struct sil24_port_priv *pp = ap->private_data;
516 *tf = pp->tf;
517}
518
519static int sil24_init_port(struct ata_port *ap)
520{
521 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
522 u32 tmp;
523
524 writel(PORT_CS_INIT, port + PORT_CTRL_STAT);
525 ata_wait_register(port + PORT_CTRL_STAT,
526 PORT_CS_INIT, PORT_CS_INIT, 10, 100);
527 tmp = ata_wait_register(port + PORT_CTRL_STAT,
528 PORT_CS_RDY, 0, 10, 100);
529
530 if ((tmp & (PORT_CS_INIT | PORT_CS_RDY)) != PORT_CS_RDY)
531 return -EIO;
532 return 0;
533}
534
535static int sil24_softreset(struct ata_port *ap, unsigned int *class)
536{
537 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
538 struct sil24_port_priv *pp = ap->private_data;
539 struct sil24_prb *prb = &pp->cmd_block[0].ata.prb;
540 dma_addr_t paddr = pp->cmd_block_dma;
541 u32 mask, irq_stat;
542 const char *reason;
543
544 DPRINTK("ENTER\n");
545
546 if (ata_port_offline(ap)) {
547 DPRINTK("PHY reports no device\n");
548 *class = ATA_DEV_NONE;
549 goto out;
550 }
551
552 /* put the port into known state */
553 if (sil24_init_port(ap)) {
554 reason ="port not ready";
555 goto err;
556 }
557
558 /* do SRST */
559 prb->ctrl = cpu_to_le16(PRB_CTRL_SRST);
560 prb->fis[1] = 0; /* no PM yet */
561
562 writel((u32)paddr, port + PORT_CMD_ACTIVATE);
563 writel((u64)paddr >> 32, port + PORT_CMD_ACTIVATE + 4);
564
565 mask = (PORT_IRQ_COMPLETE | PORT_IRQ_ERROR) << PORT_IRQ_RAW_SHIFT;
566 irq_stat = ata_wait_register(port + PORT_IRQ_STAT, mask, 0x0,
567 100, ATA_TMOUT_BOOT / HZ * 1000);
568
569 writel(irq_stat, port + PORT_IRQ_STAT); /* clear IRQs */
570 irq_stat >>= PORT_IRQ_RAW_SHIFT;
571
572 if (!(irq_stat & PORT_IRQ_COMPLETE)) {
573 if (irq_stat & PORT_IRQ_ERROR)
574 reason = "SRST command error";
575 else
576 reason = "timeout";
577 goto err;
578 }
579
580 sil24_update_tf(ap);
581 *class = ata_dev_classify(&pp->tf);
582
583 if (*class == ATA_DEV_UNKNOWN)
584 *class = ATA_DEV_NONE;
585
586 out:
587 DPRINTK("EXIT, class=%u\n", *class);
588 return 0;
589
590 err:
591 ata_port_printk(ap, KERN_ERR, "softreset failed (%s)\n", reason);
592 return -EIO;
593}
594
595static int sil24_hardreset(struct ata_port *ap, unsigned int *class)
596{
597 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
598 const char *reason;
599 int tout_msec, rc;
600 u32 tmp;
601
602 /* sil24 does the right thing(tm) without any protection */
603 sata_set_spd(ap);
604
605 tout_msec = 100;
606 if (ata_port_online(ap))
607 tout_msec = 5000;
608
609 writel(PORT_CS_DEV_RST, port + PORT_CTRL_STAT);
610 tmp = ata_wait_register(port + PORT_CTRL_STAT,
611 PORT_CS_DEV_RST, PORT_CS_DEV_RST, 10, tout_msec);
612
613 /* SStatus oscillates between zero and valid status after
614 * DEV_RST, debounce it.
615 */
616 rc = sata_phy_debounce(ap, sata_deb_timing_long);
617 if (rc) {
618 reason = "PHY debouncing failed";
619 goto err;
620 }
621
622 if (tmp & PORT_CS_DEV_RST) {
623 if (ata_port_offline(ap))
624 return 0;
625 reason = "link not ready";
626 goto err;
627 }
628
629 /* Sil24 doesn't store signature FIS after hardreset, so we
630 * can't wait for BSY to clear. Some devices take a long time
631 * to get ready and those devices will choke if we don't wait
632 * for BSY clearance here. Tell libata to perform follow-up
633 * softreset.
634 */
635 return -EAGAIN;
636
637 err:
638 ata_port_printk(ap, KERN_ERR, "hardreset failed (%s)\n", reason);
639 return -EIO;
640}
641
642static inline void sil24_fill_sg(struct ata_queued_cmd *qc,
643 struct sil24_sge *sge)
644{
645 struct scatterlist *sg;
646 unsigned int idx = 0;
647
648 ata_for_each_sg(sg, qc) {
649 sge->addr = cpu_to_le64(sg_dma_address(sg));
650 sge->cnt = cpu_to_le32(sg_dma_len(sg));
651 if (ata_sg_is_last(sg, qc))
652 sge->flags = cpu_to_le32(SGE_TRM);
653 else
654 sge->flags = 0;
655
656 sge++;
657 idx++;
658 }
659}
660
661static void sil24_qc_prep(struct ata_queued_cmd *qc)
662{
663 struct ata_port *ap = qc->ap;
664 struct sil24_port_priv *pp = ap->private_data;
665 union sil24_cmd_block *cb;
666 struct sil24_prb *prb;
667 struct sil24_sge *sge;
668 u16 ctrl = 0;
669
670 cb = &pp->cmd_block[sil24_tag(qc->tag)];
671
672 switch (qc->tf.protocol) {
673 case ATA_PROT_PIO:
674 case ATA_PROT_DMA:
675 case ATA_PROT_NCQ:
676 case ATA_PROT_NODATA:
677 prb = &cb->ata.prb;
678 sge = cb->ata.sge;
679 break;
680
681 case ATA_PROT_ATAPI:
682 case ATA_PROT_ATAPI_DMA:
683 case ATA_PROT_ATAPI_NODATA:
684 prb = &cb->atapi.prb;
685 sge = cb->atapi.sge;
686 memset(cb->atapi.cdb, 0, 32);
687 memcpy(cb->atapi.cdb, qc->cdb, qc->dev->cdb_len);
688
689 if (qc->tf.protocol != ATA_PROT_ATAPI_NODATA) {
690 if (qc->tf.flags & ATA_TFLAG_WRITE)
691 ctrl = PRB_CTRL_PACKET_WRITE;
692 else
693 ctrl = PRB_CTRL_PACKET_READ;
694 }
695 break;
696
697 default:
698 prb = NULL; /* shut up, gcc */
699 sge = NULL;
700 BUG();
701 }
702
703 prb->ctrl = cpu_to_le16(ctrl);
704 ata_tf_to_fis(&qc->tf, prb->fis, 0);
705
706 if (qc->flags & ATA_QCFLAG_DMAMAP)
707 sil24_fill_sg(qc, sge);
708}
709
710static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc)
711{
712 struct ata_port *ap = qc->ap;
713 struct sil24_port_priv *pp = ap->private_data;
714 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
715 unsigned int tag = sil24_tag(qc->tag);
716 dma_addr_t paddr;
717 void __iomem *activate;
718
719 paddr = pp->cmd_block_dma + tag * sizeof(*pp->cmd_block);
720 activate = port + PORT_CMD_ACTIVATE + tag * 8;
721
722 writel((u32)paddr, activate);
723 writel((u64)paddr >> 32, activate + 4);
724
725 return 0;
726}
727
728static void sil24_irq_clear(struct ata_port *ap)
729{
730 /* unused */
731}
732
733static void sil24_freeze(struct ata_port *ap)
734{
735 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
736
737 /* Port-wide IRQ mask in HOST_CTRL doesn't really work, clear
738 * PORT_IRQ_ENABLE instead.
739 */
740 writel(0xffff, port + PORT_IRQ_ENABLE_CLR);
741}
742
743static void sil24_thaw(struct ata_port *ap)
744{
745 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
746 u32 tmp;
747
748 /* clear IRQ */
749 tmp = readl(port + PORT_IRQ_STAT);
750 writel(tmp, port + PORT_IRQ_STAT);
751
752 /* turn IRQ back on */
753 writel(DEF_PORT_IRQ, port + PORT_IRQ_ENABLE_SET);
754}
755
756static void sil24_error_intr(struct ata_port *ap)
757{
758 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
759 struct ata_eh_info *ehi = &ap->eh_info;
760 int freeze = 0;
761 u32 irq_stat;
762
763 /* on error, we need to clear IRQ explicitly */
764 irq_stat = readl(port + PORT_IRQ_STAT);
765 writel(irq_stat, port + PORT_IRQ_STAT);
766
767 /* first, analyze and record host port events */
768 ata_ehi_clear_desc(ehi);
769
770 ata_ehi_push_desc(ehi, "irq_stat 0x%08x", irq_stat);
771
772 if (irq_stat & (PORT_IRQ_PHYRDY_CHG | PORT_IRQ_DEV_XCHG)) {
773 ata_ehi_hotplugged(ehi);
774 ata_ehi_push_desc(ehi, ", %s",
775 irq_stat & PORT_IRQ_PHYRDY_CHG ?
776 "PHY RDY changed" : "device exchanged");
777 freeze = 1;
778 }
779
780 if (irq_stat & PORT_IRQ_UNK_FIS) {
781 ehi->err_mask |= AC_ERR_HSM;
782 ehi->action |= ATA_EH_SOFTRESET;
783 ata_ehi_push_desc(ehi , ", unknown FIS");
784 freeze = 1;
785 }
786
787 /* deal with command error */
788 if (irq_stat & PORT_IRQ_ERROR) {
789 struct sil24_cerr_info *ci = NULL;
790 unsigned int err_mask = 0, action = 0;
791 struct ata_queued_cmd *qc;
792 u32 cerr;
793
794 /* analyze CMD_ERR */
795 cerr = readl(port + PORT_CMD_ERR);
796 if (cerr < ARRAY_SIZE(sil24_cerr_db))
797 ci = &sil24_cerr_db[cerr];
798
799 if (ci && ci->desc) {
800 err_mask |= ci->err_mask;
801 action |= ci->action;
802 ata_ehi_push_desc(ehi, ", %s", ci->desc);
803 } else {
804 err_mask |= AC_ERR_OTHER;
805 action |= ATA_EH_SOFTRESET;
806 ata_ehi_push_desc(ehi, ", unknown command error %d",
807 cerr);
808 }
809
810 /* record error info */
811 qc = ata_qc_from_tag(ap, ap->active_tag);
812 if (qc) {
813 sil24_update_tf(ap);
814 qc->err_mask |= err_mask;
815 } else
816 ehi->err_mask |= err_mask;
817
818 ehi->action |= action;
819 }
820
821 /* freeze or abort */
822 if (freeze)
823 ata_port_freeze(ap);
824 else
825 ata_port_abort(ap);
826}
827
828static void sil24_finish_qc(struct ata_queued_cmd *qc)
829{
830 if (qc->flags & ATA_QCFLAG_RESULT_TF)
831 sil24_update_tf(qc->ap);
832}
833
834static inline void sil24_host_intr(struct ata_port *ap)
835{
836 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
837 u32 slot_stat, qc_active;
838 int rc;
839
840 slot_stat = readl(port + PORT_SLOT_STAT);
841
842 if (unlikely(slot_stat & HOST_SSTAT_ATTN)) {
843 sil24_error_intr(ap);
844 return;
845 }
846
847 if (ap->flags & SIL24_FLAG_PCIX_IRQ_WOC)
848 writel(PORT_IRQ_COMPLETE, port + PORT_IRQ_STAT);
849
850 qc_active = slot_stat & ~HOST_SSTAT_ATTN;
851 rc = ata_qc_complete_multiple(ap, qc_active, sil24_finish_qc);
852 if (rc > 0)
853 return;
854 if (rc < 0) {
855 struct ata_eh_info *ehi = &ap->eh_info;
856 ehi->err_mask |= AC_ERR_HSM;
857 ehi->action |= ATA_EH_SOFTRESET;
858 ata_port_freeze(ap);
859 return;
860 }
861
862 if (ata_ratelimit())
863 ata_port_printk(ap, KERN_INFO, "spurious interrupt "
864 "(slot_stat 0x%x active_tag %d sactive 0x%x)\n",
865 slot_stat, ap->active_tag, ap->sactive);
866}
867
868static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
869{
870 struct ata_host_set *host_set = dev_instance;
871 struct sil24_host_priv *hpriv = host_set->private_data;
872 unsigned handled = 0;
873 u32 status;
874 int i;
875
876 status = readl(hpriv->host_base + HOST_IRQ_STAT);
877
878 if (status == 0xffffffff) {
879 printk(KERN_ERR DRV_NAME ": IRQ status == 0xffffffff, "
880 "PCI fault or device removal?\n");
881 goto out;
882 }
883
884 if (!(status & IRQ_STAT_4PORTS))
885 goto out;
886
887 spin_lock(&host_set->lock);
888
889 for (i = 0; i < host_set->n_ports; i++)
890 if (status & (1 << i)) {
891 struct ata_port *ap = host_set->ports[i];
892 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
893 sil24_host_intr(host_set->ports[i]);
894 handled++;
895 } else
896 printk(KERN_ERR DRV_NAME
897 ": interrupt from disabled port %d\n", i);
898 }
899
900 spin_unlock(&host_set->lock);
901 out:
902 return IRQ_RETVAL(handled);
903}
904
905static void sil24_error_handler(struct ata_port *ap)
906{
907 struct ata_eh_context *ehc = &ap->eh_context;
908
909 if (sil24_init_port(ap)) {
910 ata_eh_freeze_port(ap);
911 ehc->i.action |= ATA_EH_HARDRESET;
912 }
913
914 /* perform recovery */
915 ata_do_eh(ap, ata_std_prereset, sil24_softreset, sil24_hardreset,
916 ata_std_postreset);
917}
918
919static void sil24_post_internal_cmd(struct ata_queued_cmd *qc)
920{
921 struct ata_port *ap = qc->ap;
922
923 if (qc->flags & ATA_QCFLAG_FAILED)
924 qc->err_mask |= AC_ERR_OTHER;
925
926 /* make DMA engine forget about the failed command */
927 if (qc->err_mask)
928 sil24_init_port(ap);
929}
930
931static inline void sil24_cblk_free(struct sil24_port_priv *pp, struct device *dev)
932{
933 const size_t cb_size = sizeof(*pp->cmd_block) * SIL24_MAX_CMDS;
934
935 dma_free_coherent(dev, cb_size, pp->cmd_block, pp->cmd_block_dma);
936}
937
938static int sil24_port_start(struct ata_port *ap)
939{
940 struct device *dev = ap->host_set->dev;
941 struct sil24_port_priv *pp;
942 union sil24_cmd_block *cb;
943 size_t cb_size = sizeof(*cb) * SIL24_MAX_CMDS;
944 dma_addr_t cb_dma;
945 int rc = -ENOMEM;
946
947 pp = kzalloc(sizeof(*pp), GFP_KERNEL);
948 if (!pp)
949 goto err_out;
950
951 pp->tf.command = ATA_DRDY;
952
953 cb = dma_alloc_coherent(dev, cb_size, &cb_dma, GFP_KERNEL);
954 if (!cb)
955 goto err_out_pp;
956 memset(cb, 0, cb_size);
957
958 rc = ata_pad_alloc(ap, dev);
959 if (rc)
960 goto err_out_pad;
961
962 pp->cmd_block = cb;
963 pp->cmd_block_dma = cb_dma;
964
965 ap->private_data = pp;
966
967 return 0;
968
969err_out_pad:
970 sil24_cblk_free(pp, dev);
971err_out_pp:
972 kfree(pp);
973err_out:
974 return rc;
975}
976
977static void sil24_port_stop(struct ata_port *ap)
978{
979 struct device *dev = ap->host_set->dev;
980 struct sil24_port_priv *pp = ap->private_data;
981
982 sil24_cblk_free(pp, dev);
983 ata_pad_free(ap, dev);
984 kfree(pp);
985}
986
987static void sil24_host_stop(struct ata_host_set *host_set)
988{
989 struct sil24_host_priv *hpriv = host_set->private_data;
990 struct pci_dev *pdev = to_pci_dev(host_set->dev);
991
992 pci_iounmap(pdev, hpriv->host_base);
993 pci_iounmap(pdev, hpriv->port_base);
994 kfree(hpriv);
995}
996
997static void sil24_init_controller(struct pci_dev *pdev, int n_ports,
998 unsigned long host_flags,
999 void __iomem *host_base,
1000 void __iomem *port_base)
1001{
1002 u32 tmp;
1003 int i;
1004
1005 /* GPIO off */
1006 writel(0, host_base + HOST_FLASH_CMD);
1007
1008 /* clear global reset & mask interrupts during initialization */
1009 writel(0, host_base + HOST_CTRL);
1010
1011 /* init ports */
1012 for (i = 0; i < n_ports; i++) {
1013 void __iomem *port = port_base + i * PORT_REGS_SIZE;
1014
1015 /* Initial PHY setting */
1016 writel(0x20c, port + PORT_PHY_CFG);
1017
1018 /* Clear port RST */
1019 tmp = readl(port + PORT_CTRL_STAT);
1020 if (tmp & PORT_CS_PORT_RST) {
1021 writel(PORT_CS_PORT_RST, port + PORT_CTRL_CLR);
1022 tmp = ata_wait_register(port + PORT_CTRL_STAT,
1023 PORT_CS_PORT_RST,
1024 PORT_CS_PORT_RST, 10, 100);
1025 if (tmp & PORT_CS_PORT_RST)
1026 dev_printk(KERN_ERR, &pdev->dev,
1027 "failed to clear port RST\n");
1028 }
1029
1030 /* Configure IRQ WoC */
1031 if (host_flags & SIL24_FLAG_PCIX_IRQ_WOC)
1032 writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_STAT);
1033 else
1034 writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_CLR);
1035
1036 /* Zero error counters. */
1037 writel(0x8000, port + PORT_DECODE_ERR_THRESH);
1038 writel(0x8000, port + PORT_CRC_ERR_THRESH);
1039 writel(0x8000, port + PORT_HSHK_ERR_THRESH);
1040 writel(0x0000, port + PORT_DECODE_ERR_CNT);
1041 writel(0x0000, port + PORT_CRC_ERR_CNT);
1042 writel(0x0000, port + PORT_HSHK_ERR_CNT);
1043
1044 /* Always use 64bit activation */
1045 writel(PORT_CS_32BIT_ACTV, port + PORT_CTRL_CLR);
1046
1047 /* Clear port multiplier enable and resume bits */
1048 writel(PORT_CS_PM_EN | PORT_CS_RESUME, port + PORT_CTRL_CLR);
1049 }
1050
1051 /* Turn on interrupts */
1052 writel(IRQ_STAT_4PORTS, host_base + HOST_CTRL);
1053}
1054
1055static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1056{
1057 static int printed_version = 0;
1058 unsigned int board_id = (unsigned int)ent->driver_data;
1059 struct ata_port_info *pinfo = &sil24_port_info[board_id];
1060 struct ata_probe_ent *probe_ent = NULL;
1061 struct sil24_host_priv *hpriv = NULL;
1062 void __iomem *host_base = NULL;
1063 void __iomem *port_base = NULL;
1064 int i, rc;
1065 u32 tmp;
1066
1067 if (!printed_version++)
1068 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1069
1070 rc = pci_enable_device(pdev);
1071 if (rc)
1072 return rc;
1073
1074 rc = pci_request_regions(pdev, DRV_NAME);
1075 if (rc)
1076 goto out_disable;
1077
1078 rc = -ENOMEM;
1079 /* map mmio registers */
1080 host_base = pci_iomap(pdev, 0, 0);
1081 if (!host_base)
1082 goto out_free;
1083 port_base = pci_iomap(pdev, 2, 0);
1084 if (!port_base)
1085 goto out_free;
1086
1087 /* allocate & init probe_ent and hpriv */
1088 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
1089 if (!probe_ent)
1090 goto out_free;
1091
1092 hpriv = kzalloc(sizeof(*hpriv), GFP_KERNEL);
1093 if (!hpriv)
1094 goto out_free;
1095
1096 probe_ent->dev = pci_dev_to_dev(pdev);
1097 INIT_LIST_HEAD(&probe_ent->node);
1098
1099 probe_ent->sht = pinfo->sht;
1100 probe_ent->host_flags = pinfo->host_flags;
1101 probe_ent->pio_mask = pinfo->pio_mask;
1102 probe_ent->mwdma_mask = pinfo->mwdma_mask;
1103 probe_ent->udma_mask = pinfo->udma_mask;
1104 probe_ent->port_ops = pinfo->port_ops;
1105 probe_ent->n_ports = SIL24_FLAG2NPORTS(pinfo->host_flags);
1106
1107 probe_ent->irq = pdev->irq;
1108 probe_ent->irq_flags = IRQF_SHARED;
1109 probe_ent->private_data = hpriv;
1110
1111 hpriv->host_base = host_base;
1112 hpriv->port_base = port_base;
1113
1114 /*
1115 * Configure the device
1116 */
1117 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
1118 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
1119 if (rc) {
1120 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1121 if (rc) {
1122 dev_printk(KERN_ERR, &pdev->dev,
1123 "64-bit DMA enable failed\n");
1124 goto out_free;
1125 }
1126 }
1127 } else {
1128 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1129 if (rc) {
1130 dev_printk(KERN_ERR, &pdev->dev,
1131 "32-bit DMA enable failed\n");
1132 goto out_free;
1133 }
1134 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1135 if (rc) {
1136 dev_printk(KERN_ERR, &pdev->dev,
1137 "32-bit consistent DMA enable failed\n");
1138 goto out_free;
1139 }
1140 }
1141
1142 /* Apply workaround for completion IRQ loss on PCI-X errata */
1143 if (probe_ent->host_flags & SIL24_FLAG_PCIX_IRQ_WOC) {
1144 tmp = readl(host_base + HOST_CTRL);
1145 if (tmp & (HOST_CTRL_TRDY | HOST_CTRL_STOP | HOST_CTRL_DEVSEL))
1146 dev_printk(KERN_INFO, &pdev->dev,
1147 "Applying completion IRQ loss on PCI-X "
1148 "errata fix\n");
1149 else
1150 probe_ent->host_flags &= ~SIL24_FLAG_PCIX_IRQ_WOC;
1151 }
1152
1153 for (i = 0; i < probe_ent->n_ports; i++) {
1154 unsigned long portu =
1155 (unsigned long)port_base + i * PORT_REGS_SIZE;
1156
1157 probe_ent->port[i].cmd_addr = portu;
1158 probe_ent->port[i].scr_addr = portu + PORT_SCONTROL;
1159
1160 ata_std_ports(&probe_ent->port[i]);
1161 }
1162
1163 sil24_init_controller(pdev, probe_ent->n_ports, probe_ent->host_flags,
1164 host_base, port_base);
1165
1166 pci_set_master(pdev);
1167
1168 /* FIXME: check ata_device_add return value */
1169 ata_device_add(probe_ent);
1170
1171 kfree(probe_ent);
1172 return 0;
1173
1174 out_free:
1175 if (host_base)
1176 pci_iounmap(pdev, host_base);
1177 if (port_base)
1178 pci_iounmap(pdev, port_base);
1179 kfree(probe_ent);
1180 kfree(hpriv);
1181 pci_release_regions(pdev);
1182 out_disable:
1183 pci_disable_device(pdev);
1184 return rc;
1185}
1186
1187static int sil24_pci_device_resume(struct pci_dev *pdev)
1188{
1189 struct ata_host_set *host_set = dev_get_drvdata(&pdev->dev);
1190 struct sil24_host_priv *hpriv = host_set->private_data;
1191
1192 ata_pci_device_do_resume(pdev);
1193
1194 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND)
1195 writel(HOST_CTRL_GLOBAL_RST, hpriv->host_base + HOST_CTRL);
1196
1197 sil24_init_controller(pdev, host_set->n_ports,
1198 host_set->ports[0]->flags,
1199 hpriv->host_base, hpriv->port_base);
1200
1201 ata_host_set_resume(host_set);
1202
1203 return 0;
1204}
1205
1206static int __init sil24_init(void)
1207{
1208 return pci_register_driver(&sil24_pci_driver);
1209}
1210
1211static void __exit sil24_exit(void)
1212{
1213 pci_unregister_driver(&sil24_pci_driver);
1214}
1215
1216MODULE_AUTHOR("Tejun Heo");
1217MODULE_DESCRIPTION("Silicon Image 3124/3132 SATA low-level driver");
1218MODULE_LICENSE("GPL");
1219MODULE_DEVICE_TABLE(pci, sil24_pci_tbl);
1220
1221module_init(sil24_init);
1222module_exit(sil24_exit);
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
new file mode 100644
index 000000000000..ac24f66897f6
--- /dev/null
+++ b/drivers/ata/sata_sis.c
@@ -0,0 +1,347 @@
1/*
2 * sata_sis.c - Silicon Integrated Systems SATA
3 *
4 * Maintained by: Uwe Koziolek
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2004 Uwe Koziolek
9 *
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 *
26 * libata documentation is available via 'make {ps|pdf}docs',
27 * as Documentation/DocBook/libata.*
28 *
29 * Hardware documentation available under NDA.
30 *
31 */
32
33#include <linux/kernel.h>
34#include <linux/module.h>
35#include <linux/pci.h>
36#include <linux/init.h>
37#include <linux/blkdev.h>
38#include <linux/delay.h>
39#include <linux/interrupt.h>
40#include <linux/device.h>
41#include <scsi/scsi_host.h>
42#include <linux/libata.h>
43
44#define DRV_NAME "sata_sis"
45#define DRV_VERSION "0.6"
46
47enum {
48 sis_180 = 0,
49 SIS_SCR_PCI_BAR = 5,
50
51 /* PCI configuration registers */
52 SIS_GENCTL = 0x54, /* IDE General Control register */
53 SIS_SCR_BASE = 0xc0, /* sata0 phy SCR registers */
54 SIS180_SATA1_OFS = 0x10, /* offset from sata0->sata1 phy regs */
55 SIS182_SATA1_OFS = 0x20, /* offset from sata0->sata1 phy regs */
56 SIS_PMR = 0x90, /* port mapping register */
57 SIS_PMR_COMBINED = 0x30,
58
59 /* random bits */
60 SIS_FLAG_CFGSCR = (1 << 30), /* host flag: SCRs via PCI cfg */
61
62 GENCTL_IOMAPPED_SCR = (1 << 26), /* if set, SCRs are in IO space */
63};
64
65static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
66static u32 sis_scr_read (struct ata_port *ap, unsigned int sc_reg);
67static void sis_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
68
69static const struct pci_device_id sis_pci_tbl[] = {
70 { PCI_VENDOR_ID_SI, 0x180, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sis_180 },
71 { PCI_VENDOR_ID_SI, 0x181, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sis_180 },
72 { PCI_VENDOR_ID_SI, 0x182, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sis_180 },
73 { } /* terminate list */
74};
75
76
77static struct pci_driver sis_pci_driver = {
78 .name = DRV_NAME,
79 .id_table = sis_pci_tbl,
80 .probe = sis_init_one,
81 .remove = ata_pci_remove_one,
82};
83
84static struct scsi_host_template sis_sht = {
85 .module = THIS_MODULE,
86 .name = DRV_NAME,
87 .ioctl = ata_scsi_ioctl,
88 .queuecommand = ata_scsi_queuecmd,
89 .can_queue = ATA_DEF_QUEUE,
90 .this_id = ATA_SHT_THIS_ID,
91 .sg_tablesize = ATA_MAX_PRD,
92 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
93 .emulated = ATA_SHT_EMULATED,
94 .use_clustering = ATA_SHT_USE_CLUSTERING,
95 .proc_name = DRV_NAME,
96 .dma_boundary = ATA_DMA_BOUNDARY,
97 .slave_configure = ata_scsi_slave_config,
98 .slave_destroy = ata_scsi_slave_destroy,
99 .bios_param = ata_std_bios_param,
100};
101
102static const struct ata_port_operations sis_ops = {
103 .port_disable = ata_port_disable,
104 .tf_load = ata_tf_load,
105 .tf_read = ata_tf_read,
106 .check_status = ata_check_status,
107 .exec_command = ata_exec_command,
108 .dev_select = ata_std_dev_select,
109 .bmdma_setup = ata_bmdma_setup,
110 .bmdma_start = ata_bmdma_start,
111 .bmdma_stop = ata_bmdma_stop,
112 .bmdma_status = ata_bmdma_status,
113 .qc_prep = ata_qc_prep,
114 .qc_issue = ata_qc_issue_prot,
115 .data_xfer = ata_pio_data_xfer,
116 .freeze = ata_bmdma_freeze,
117 .thaw = ata_bmdma_thaw,
118 .error_handler = ata_bmdma_error_handler,
119 .post_internal_cmd = ata_bmdma_post_internal_cmd,
120 .irq_handler = ata_interrupt,
121 .irq_clear = ata_bmdma_irq_clear,
122 .scr_read = sis_scr_read,
123 .scr_write = sis_scr_write,
124 .port_start = ata_port_start,
125 .port_stop = ata_port_stop,
126 .host_stop = ata_host_stop,
127};
128
129static struct ata_port_info sis_port_info = {
130 .sht = &sis_sht,
131 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
132 .pio_mask = 0x1f,
133 .mwdma_mask = 0x7,
134 .udma_mask = 0x7f,
135 .port_ops = &sis_ops,
136};
137
138
139MODULE_AUTHOR("Uwe Koziolek");
140MODULE_DESCRIPTION("low-level driver for Silicon Integratad Systems SATA controller");
141MODULE_LICENSE("GPL");
142MODULE_DEVICE_TABLE(pci, sis_pci_tbl);
143MODULE_VERSION(DRV_VERSION);
144
145static unsigned int get_scr_cfg_addr(unsigned int port_no, unsigned int sc_reg, int device)
146{
147 unsigned int addr = SIS_SCR_BASE + (4 * sc_reg);
148
149 if (port_no) {
150 if (device == 0x182)
151 addr += SIS182_SATA1_OFS;
152 else
153 addr += SIS180_SATA1_OFS;
154 }
155
156 return addr;
157}
158
159static u32 sis_scr_cfg_read (struct ata_port *ap, unsigned int sc_reg)
160{
161 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
162 unsigned int cfg_addr = get_scr_cfg_addr(ap->port_no, sc_reg, pdev->device);
163 u32 val, val2 = 0;
164 u8 pmr;
165
166 if (sc_reg == SCR_ERROR) /* doesn't exist in PCI cfg space */
167 return 0xffffffff;
168
169 pci_read_config_byte(pdev, SIS_PMR, &pmr);
170
171 pci_read_config_dword(pdev, cfg_addr, &val);
172
173 if ((pdev->device == 0x182) || (pmr & SIS_PMR_COMBINED))
174 pci_read_config_dword(pdev, cfg_addr+0x10, &val2);
175
176 return val|val2;
177}
178
179static void sis_scr_cfg_write (struct ata_port *ap, unsigned int scr, u32 val)
180{
181 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
182 unsigned int cfg_addr = get_scr_cfg_addr(ap->port_no, scr, pdev->device);
183 u8 pmr;
184
185 if (scr == SCR_ERROR) /* doesn't exist in PCI cfg space */
186 return;
187
188 pci_read_config_byte(pdev, SIS_PMR, &pmr);
189
190 pci_write_config_dword(pdev, cfg_addr, val);
191
192 if ((pdev->device == 0x182) || (pmr & SIS_PMR_COMBINED))
193 pci_write_config_dword(pdev, cfg_addr+0x10, val);
194}
195
196static u32 sis_scr_read (struct ata_port *ap, unsigned int sc_reg)
197{
198 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
199 u32 val, val2 = 0;
200 u8 pmr;
201
202 if (sc_reg > SCR_CONTROL)
203 return 0xffffffffU;
204
205 if (ap->flags & SIS_FLAG_CFGSCR)
206 return sis_scr_cfg_read(ap, sc_reg);
207
208 pci_read_config_byte(pdev, SIS_PMR, &pmr);
209
210 val = inl(ap->ioaddr.scr_addr + (sc_reg * 4));
211
212 if ((pdev->device == 0x182) || (pmr & SIS_PMR_COMBINED))
213 val2 = inl(ap->ioaddr.scr_addr + (sc_reg * 4) + 0x10);
214
215 return val | val2;
216}
217
218static void sis_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
219{
220 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
221 u8 pmr;
222
223 if (sc_reg > SCR_CONTROL)
224 return;
225
226 pci_read_config_byte(pdev, SIS_PMR, &pmr);
227
228 if (ap->flags & SIS_FLAG_CFGSCR)
229 sis_scr_cfg_write(ap, sc_reg, val);
230 else {
231 outl(val, ap->ioaddr.scr_addr + (sc_reg * 4));
232 if ((pdev->device == 0x182) || (pmr & SIS_PMR_COMBINED))
233 outl(val, ap->ioaddr.scr_addr + (sc_reg * 4)+0x10);
234 }
235}
236
237static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
238{
239 static int printed_version;
240 struct ata_probe_ent *probe_ent = NULL;
241 int rc;
242 u32 genctl;
243 struct ata_port_info *ppi;
244 int pci_dev_busy = 0;
245 u8 pmr;
246 u8 port2_start;
247
248 if (!printed_version++)
249 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
250
251 rc = pci_enable_device(pdev);
252 if (rc)
253 return rc;
254
255 rc = pci_request_regions(pdev, DRV_NAME);
256 if (rc) {
257 pci_dev_busy = 1;
258 goto err_out;
259 }
260
261 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
262 if (rc)
263 goto err_out_regions;
264 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
265 if (rc)
266 goto err_out_regions;
267
268 ppi = &sis_port_info;
269 probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
270 if (!probe_ent) {
271 rc = -ENOMEM;
272 goto err_out_regions;
273 }
274
275 /* check and see if the SCRs are in IO space or PCI cfg space */
276 pci_read_config_dword(pdev, SIS_GENCTL, &genctl);
277 if ((genctl & GENCTL_IOMAPPED_SCR) == 0)
278 probe_ent->host_flags |= SIS_FLAG_CFGSCR;
279
280 /* if hardware thinks SCRs are in IO space, but there are
281 * no IO resources assigned, change to PCI cfg space.
282 */
283 if ((!(probe_ent->host_flags & SIS_FLAG_CFGSCR)) &&
284 ((pci_resource_start(pdev, SIS_SCR_PCI_BAR) == 0) ||
285 (pci_resource_len(pdev, SIS_SCR_PCI_BAR) < 128))) {
286 genctl &= ~GENCTL_IOMAPPED_SCR;
287 pci_write_config_dword(pdev, SIS_GENCTL, genctl);
288 probe_ent->host_flags |= SIS_FLAG_CFGSCR;
289 }
290
291 pci_read_config_byte(pdev, SIS_PMR, &pmr);
292 if (ent->device != 0x182) {
293 if ((pmr & SIS_PMR_COMBINED) == 0) {
294 dev_printk(KERN_INFO, &pdev->dev,
295 "Detected SiS 180/181 chipset in SATA mode\n");
296 port2_start = 64;
297 }
298 else {
299 dev_printk(KERN_INFO, &pdev->dev,
300 "Detected SiS 180/181 chipset in combined mode\n");
301 port2_start=0;
302 }
303 }
304 else {
305 dev_printk(KERN_INFO, &pdev->dev, "Detected SiS 182 chipset\n");
306 port2_start = 0x20;
307 }
308
309 if (!(probe_ent->host_flags & SIS_FLAG_CFGSCR)) {
310 probe_ent->port[0].scr_addr =
311 pci_resource_start(pdev, SIS_SCR_PCI_BAR);
312 probe_ent->port[1].scr_addr =
313 pci_resource_start(pdev, SIS_SCR_PCI_BAR) + port2_start;
314 }
315
316 pci_set_master(pdev);
317 pci_intx(pdev, 1);
318
319 /* FIXME: check ata_device_add return value */
320 ata_device_add(probe_ent);
321 kfree(probe_ent);
322
323 return 0;
324
325err_out_regions:
326 pci_release_regions(pdev);
327
328err_out:
329 if (!pci_dev_busy)
330 pci_disable_device(pdev);
331 return rc;
332
333}
334
335static int __init sis_init(void)
336{
337 return pci_register_driver(&sis_pci_driver);
338}
339
340static void __exit sis_exit(void)
341{
342 pci_unregister_driver(&sis_pci_driver);
343}
344
345module_init(sis_init);
346module_exit(sis_exit);
347
diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
new file mode 100644
index 000000000000..baf259a966d0
--- /dev/null
+++ b/drivers/ata/sata_svw.c
@@ -0,0 +1,508 @@
1/*
2 * sata_svw.c - ServerWorks / Apple K2 SATA
3 *
4 * Maintained by: Benjamin Herrenschmidt <benh@kernel.crashing.org> and
5 * Jeff Garzik <jgarzik@pobox.com>
6 * Please ALWAYS copy linux-ide@vger.kernel.org
7 * on emails.
8 *
9 * Copyright 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
10 *
11 * Bits from Jeff Garzik, Copyright RedHat, Inc.
12 *
13 * This driver probably works with non-Apple versions of the
14 * Broadcom chipset...
15 *
16 *
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by
19 * the Free Software Foundation; either version 2, or (at your option)
20 * any later version.
21 *
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
26 *
27 * You should have received a copy of the GNU General Public License
28 * along with this program; see the file COPYING. If not, write to
29 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
30 *
31 *
32 * libata documentation is available via 'make {ps|pdf}docs',
33 * as Documentation/DocBook/libata.*
34 *
35 * Hardware documentation available under NDA.
36 *
37 */
38
39#include <linux/kernel.h>
40#include <linux/module.h>
41#include <linux/pci.h>
42#include <linux/init.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/interrupt.h>
46#include <linux/device.h>
47#include <scsi/scsi_host.h>
48#include <linux/libata.h>
49
50#ifdef CONFIG_PPC_OF
51#include <asm/prom.h>
52#include <asm/pci-bridge.h>
53#endif /* CONFIG_PPC_OF */
54
55#define DRV_NAME "sata_svw"
56#define DRV_VERSION "2.0"
57
58enum {
59 /* Taskfile registers offsets */
60 K2_SATA_TF_CMD_OFFSET = 0x00,
61 K2_SATA_TF_DATA_OFFSET = 0x00,
62 K2_SATA_TF_ERROR_OFFSET = 0x04,
63 K2_SATA_TF_NSECT_OFFSET = 0x08,
64 K2_SATA_TF_LBAL_OFFSET = 0x0c,
65 K2_SATA_TF_LBAM_OFFSET = 0x10,
66 K2_SATA_TF_LBAH_OFFSET = 0x14,
67 K2_SATA_TF_DEVICE_OFFSET = 0x18,
68 K2_SATA_TF_CMDSTAT_OFFSET = 0x1c,
69 K2_SATA_TF_CTL_OFFSET = 0x20,
70
71 /* DMA base */
72 K2_SATA_DMA_CMD_OFFSET = 0x30,
73
74 /* SCRs base */
75 K2_SATA_SCR_STATUS_OFFSET = 0x40,
76 K2_SATA_SCR_ERROR_OFFSET = 0x44,
77 K2_SATA_SCR_CONTROL_OFFSET = 0x48,
78
79 /* Others */
80 K2_SATA_SICR1_OFFSET = 0x80,
81 K2_SATA_SICR2_OFFSET = 0x84,
82 K2_SATA_SIM_OFFSET = 0x88,
83
84 /* Port stride */
85 K2_SATA_PORT_OFFSET = 0x100,
86};
87
88static u8 k2_stat_check_status(struct ata_port *ap);
89
90
91static u32 k2_sata_scr_read (struct ata_port *ap, unsigned int sc_reg)
92{
93 if (sc_reg > SCR_CONTROL)
94 return 0xffffffffU;
95 return readl((void *) ap->ioaddr.scr_addr + (sc_reg * 4));
96}
97
98
99static void k2_sata_scr_write (struct ata_port *ap, unsigned int sc_reg,
100 u32 val)
101{
102 if (sc_reg > SCR_CONTROL)
103 return;
104 writel(val, (void *) ap->ioaddr.scr_addr + (sc_reg * 4));
105}
106
107
108static void k2_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
109{
110 struct ata_ioports *ioaddr = &ap->ioaddr;
111 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
112
113 if (tf->ctl != ap->last_ctl) {
114 writeb(tf->ctl, ioaddr->ctl_addr);
115 ap->last_ctl = tf->ctl;
116 ata_wait_idle(ap);
117 }
118 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
119 writew(tf->feature | (((u16)tf->hob_feature) << 8), ioaddr->feature_addr);
120 writew(tf->nsect | (((u16)tf->hob_nsect) << 8), ioaddr->nsect_addr);
121 writew(tf->lbal | (((u16)tf->hob_lbal) << 8), ioaddr->lbal_addr);
122 writew(tf->lbam | (((u16)tf->hob_lbam) << 8), ioaddr->lbam_addr);
123 writew(tf->lbah | (((u16)tf->hob_lbah) << 8), ioaddr->lbah_addr);
124 } else if (is_addr) {
125 writew(tf->feature, ioaddr->feature_addr);
126 writew(tf->nsect, ioaddr->nsect_addr);
127 writew(tf->lbal, ioaddr->lbal_addr);
128 writew(tf->lbam, ioaddr->lbam_addr);
129 writew(tf->lbah, ioaddr->lbah_addr);
130 }
131
132 if (tf->flags & ATA_TFLAG_DEVICE)
133 writeb(tf->device, ioaddr->device_addr);
134
135 ata_wait_idle(ap);
136}
137
138
139static void k2_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
140{
141 struct ata_ioports *ioaddr = &ap->ioaddr;
142 u16 nsect, lbal, lbam, lbah, feature;
143
144 tf->command = k2_stat_check_status(ap);
145 tf->device = readw(ioaddr->device_addr);
146 feature = readw(ioaddr->error_addr);
147 nsect = readw(ioaddr->nsect_addr);
148 lbal = readw(ioaddr->lbal_addr);
149 lbam = readw(ioaddr->lbam_addr);
150 lbah = readw(ioaddr->lbah_addr);
151
152 tf->feature = feature;
153 tf->nsect = nsect;
154 tf->lbal = lbal;
155 tf->lbam = lbam;
156 tf->lbah = lbah;
157
158 if (tf->flags & ATA_TFLAG_LBA48) {
159 tf->hob_feature = feature >> 8;
160 tf->hob_nsect = nsect >> 8;
161 tf->hob_lbal = lbal >> 8;
162 tf->hob_lbam = lbam >> 8;
163 tf->hob_lbah = lbah >> 8;
164 }
165}
166
167/**
168 * k2_bmdma_setup_mmio - Set up PCI IDE BMDMA transaction (MMIO)
169 * @qc: Info associated with this ATA transaction.
170 *
171 * LOCKING:
172 * spin_lock_irqsave(host_set lock)
173 */
174
175static void k2_bmdma_setup_mmio (struct ata_queued_cmd *qc)
176{
177 struct ata_port *ap = qc->ap;
178 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
179 u8 dmactl;
180 void *mmio = (void *) ap->ioaddr.bmdma_addr;
181 /* load PRD table addr. */
182 mb(); /* make sure PRD table writes are visible to controller */
183 writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS);
184
185 /* specify data direction, triple-check start bit is clear */
186 dmactl = readb(mmio + ATA_DMA_CMD);
187 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
188 if (!rw)
189 dmactl |= ATA_DMA_WR;
190 writeb(dmactl, mmio + ATA_DMA_CMD);
191
192 /* issue r/w command if this is not a ATA DMA command*/
193 if (qc->tf.protocol != ATA_PROT_DMA)
194 ap->ops->exec_command(ap, &qc->tf);
195}
196
197/**
198 * k2_bmdma_start_mmio - Start a PCI IDE BMDMA transaction (MMIO)
199 * @qc: Info associated with this ATA transaction.
200 *
201 * LOCKING:
202 * spin_lock_irqsave(host_set lock)
203 */
204
205static void k2_bmdma_start_mmio (struct ata_queued_cmd *qc)
206{
207 struct ata_port *ap = qc->ap;
208 void *mmio = (void *) ap->ioaddr.bmdma_addr;
209 u8 dmactl;
210
211 /* start host DMA transaction */
212 dmactl = readb(mmio + ATA_DMA_CMD);
213 writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD);
214 /* There is a race condition in certain SATA controllers that can
215 be seen when the r/w command is given to the controller before the
216 host DMA is started. On a Read command, the controller would initiate
217 the command to the drive even before it sees the DMA start. When there
218 are very fast drives connected to the controller, or when the data request
219 hits in the drive cache, there is the possibility that the drive returns a part
220 or all of the requested data to the controller before the DMA start is issued.
221 In this case, the controller would become confused as to what to do with the data.
222 In the worst case when all the data is returned back to the controller, the
223 controller could hang. In other cases it could return partial data returning
224 in data corruption. This problem has been seen in PPC systems and can also appear
225 on an system with very fast disks, where the SATA controller is sitting behind a
226 number of bridges, and hence there is significant latency between the r/w command
227 and the start command. */
228 /* issue r/w command if the access is to ATA*/
229 if (qc->tf.protocol == ATA_PROT_DMA)
230 ap->ops->exec_command(ap, &qc->tf);
231}
232
233
234static u8 k2_stat_check_status(struct ata_port *ap)
235{
236 return readl((void *) ap->ioaddr.status_addr);
237}
238
239#ifdef CONFIG_PPC_OF
240/*
241 * k2_sata_proc_info
242 * inout : decides on the direction of the dataflow and the meaning of the
243 * variables
244 * buffer: If inout==FALSE data is being written to it else read from it
245 * *start: If inout==FALSE start of the valid data in the buffer
246 * offset: If inout==FALSE offset from the beginning of the imaginary file
247 * from which we start writing into the buffer
248 * length: If inout==FALSE max number of bytes to be written into the buffer
249 * else number of bytes in the buffer
250 */
251static int k2_sata_proc_info(struct Scsi_Host *shost, char *page, char **start,
252 off_t offset, int count, int inout)
253{
254 struct ata_port *ap;
255 struct device_node *np;
256 int len, index;
257
258 /* Find the ata_port */
259 ap = ata_shost_to_port(shost);
260 if (ap == NULL)
261 return 0;
262
263 /* Find the OF node for the PCI device proper */
264 np = pci_device_to_OF_node(to_pci_dev(ap->host_set->dev));
265 if (np == NULL)
266 return 0;
267
268 /* Match it to a port node */
269 index = (ap == ap->host_set->ports[0]) ? 0 : 1;
270 for (np = np->child; np != NULL; np = np->sibling) {
271 u32 *reg = (u32 *)get_property(np, "reg", NULL);
272 if (!reg)
273 continue;
274 if (index == *reg)
275 break;
276 }
277 if (np == NULL)
278 return 0;
279
280 len = sprintf(page, "devspec: %s\n", np->full_name);
281
282 return len;
283}
284#endif /* CONFIG_PPC_OF */
285
286
287static struct scsi_host_template k2_sata_sht = {
288 .module = THIS_MODULE,
289 .name = DRV_NAME,
290 .ioctl = ata_scsi_ioctl,
291 .queuecommand = ata_scsi_queuecmd,
292 .can_queue = ATA_DEF_QUEUE,
293 .this_id = ATA_SHT_THIS_ID,
294 .sg_tablesize = LIBATA_MAX_PRD,
295 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
296 .emulated = ATA_SHT_EMULATED,
297 .use_clustering = ATA_SHT_USE_CLUSTERING,
298 .proc_name = DRV_NAME,
299 .dma_boundary = ATA_DMA_BOUNDARY,
300 .slave_configure = ata_scsi_slave_config,
301 .slave_destroy = ata_scsi_slave_destroy,
302#ifdef CONFIG_PPC_OF
303 .proc_info = k2_sata_proc_info,
304#endif
305 .bios_param = ata_std_bios_param,
306};
307
308
309static const struct ata_port_operations k2_sata_ops = {
310 .port_disable = ata_port_disable,
311 .tf_load = k2_sata_tf_load,
312 .tf_read = k2_sata_tf_read,
313 .check_status = k2_stat_check_status,
314 .exec_command = ata_exec_command,
315 .dev_select = ata_std_dev_select,
316 .bmdma_setup = k2_bmdma_setup_mmio,
317 .bmdma_start = k2_bmdma_start_mmio,
318 .bmdma_stop = ata_bmdma_stop,
319 .bmdma_status = ata_bmdma_status,
320 .qc_prep = ata_qc_prep,
321 .qc_issue = ata_qc_issue_prot,
322 .data_xfer = ata_mmio_data_xfer,
323 .freeze = ata_bmdma_freeze,
324 .thaw = ata_bmdma_thaw,
325 .error_handler = ata_bmdma_error_handler,
326 .post_internal_cmd = ata_bmdma_post_internal_cmd,
327 .irq_handler = ata_interrupt,
328 .irq_clear = ata_bmdma_irq_clear,
329 .scr_read = k2_sata_scr_read,
330 .scr_write = k2_sata_scr_write,
331 .port_start = ata_port_start,
332 .port_stop = ata_port_stop,
333 .host_stop = ata_pci_host_stop,
334};
335
336static void k2_sata_setup_port(struct ata_ioports *port, unsigned long base)
337{
338 port->cmd_addr = base + K2_SATA_TF_CMD_OFFSET;
339 port->data_addr = base + K2_SATA_TF_DATA_OFFSET;
340 port->feature_addr =
341 port->error_addr = base + K2_SATA_TF_ERROR_OFFSET;
342 port->nsect_addr = base + K2_SATA_TF_NSECT_OFFSET;
343 port->lbal_addr = base + K2_SATA_TF_LBAL_OFFSET;
344 port->lbam_addr = base + K2_SATA_TF_LBAM_OFFSET;
345 port->lbah_addr = base + K2_SATA_TF_LBAH_OFFSET;
346 port->device_addr = base + K2_SATA_TF_DEVICE_OFFSET;
347 port->command_addr =
348 port->status_addr = base + K2_SATA_TF_CMDSTAT_OFFSET;
349 port->altstatus_addr =
350 port->ctl_addr = base + K2_SATA_TF_CTL_OFFSET;
351 port->bmdma_addr = base + K2_SATA_DMA_CMD_OFFSET;
352 port->scr_addr = base + K2_SATA_SCR_STATUS_OFFSET;
353}
354
355
356static int k2_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
357{
358 static int printed_version;
359 struct ata_probe_ent *probe_ent = NULL;
360 unsigned long base;
361 void __iomem *mmio_base;
362 int pci_dev_busy = 0;
363 int rc;
364 int i;
365
366 if (!printed_version++)
367 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
368
369 /*
370 * If this driver happens to only be useful on Apple's K2, then
371 * we should check that here as it has a normal Serverworks ID
372 */
373 rc = pci_enable_device(pdev);
374 if (rc)
375 return rc;
376 /*
377 * Check if we have resources mapped at all (second function may
378 * have been disabled by firmware)
379 */
380 if (pci_resource_len(pdev, 5) == 0)
381 return -ENODEV;
382
383 /* Request PCI regions */
384 rc = pci_request_regions(pdev, DRV_NAME);
385 if (rc) {
386 pci_dev_busy = 1;
387 goto err_out;
388 }
389
390 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
391 if (rc)
392 goto err_out_regions;
393 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
394 if (rc)
395 goto err_out_regions;
396
397 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
398 if (probe_ent == NULL) {
399 rc = -ENOMEM;
400 goto err_out_regions;
401 }
402
403 memset(probe_ent, 0, sizeof(*probe_ent));
404 probe_ent->dev = pci_dev_to_dev(pdev);
405 INIT_LIST_HEAD(&probe_ent->node);
406
407 mmio_base = pci_iomap(pdev, 5, 0);
408 if (mmio_base == NULL) {
409 rc = -ENOMEM;
410 goto err_out_free_ent;
411 }
412 base = (unsigned long) mmio_base;
413
414 /* Clear a magic bit in SCR1 according to Darwin, those help
415 * some funky seagate drives (though so far, those were already
416 * set by the firmware on the machines I had access to)
417 */
418 writel(readl(mmio_base + K2_SATA_SICR1_OFFSET) & ~0x00040000,
419 mmio_base + K2_SATA_SICR1_OFFSET);
420
421 /* Clear SATA error & interrupts we don't use */
422 writel(0xffffffff, mmio_base + K2_SATA_SCR_ERROR_OFFSET);
423 writel(0x0, mmio_base + K2_SATA_SIM_OFFSET);
424
425 probe_ent->sht = &k2_sata_sht;
426 probe_ent->host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
427 ATA_FLAG_MMIO;
428 probe_ent->port_ops = &k2_sata_ops;
429 probe_ent->n_ports = 4;
430 probe_ent->irq = pdev->irq;
431 probe_ent->irq_flags = IRQF_SHARED;
432 probe_ent->mmio_base = mmio_base;
433
434 /* We don't care much about the PIO/UDMA masks, but the core won't like us
435 * if we don't fill these
436 */
437 probe_ent->pio_mask = 0x1f;
438 probe_ent->mwdma_mask = 0x7;
439 probe_ent->udma_mask = 0x7f;
440
441 /* different controllers have different number of ports - currently 4 or 8 */
442 /* All ports are on the same function. Multi-function device is no
443 * longer available. This should not be seen in any system. */
444 for (i = 0; i < ent->driver_data; i++)
445 k2_sata_setup_port(&probe_ent->port[i], base + i * K2_SATA_PORT_OFFSET);
446
447 pci_set_master(pdev);
448
449 /* FIXME: check ata_device_add return value */
450 ata_device_add(probe_ent);
451 kfree(probe_ent);
452
453 return 0;
454
455err_out_free_ent:
456 kfree(probe_ent);
457err_out_regions:
458 pci_release_regions(pdev);
459err_out:
460 if (!pci_dev_busy)
461 pci_disable_device(pdev);
462 return rc;
463}
464
465/* 0x240 is device ID for Apple K2 device
466 * 0x241 is device ID for Serverworks Frodo4
467 * 0x242 is device ID for Serverworks Frodo8
468 * 0x24a is device ID for BCM5785 (aka HT1000) HT southbridge integrated SATA
469 * controller
470 * */
471static const struct pci_device_id k2_sata_pci_tbl[] = {
472 { 0x1166, 0x0240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
473 { 0x1166, 0x0241, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
474 { 0x1166, 0x0242, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
475 { 0x1166, 0x024a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
476 { 0x1166, 0x024b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
477 { }
478};
479
480
481static struct pci_driver k2_sata_pci_driver = {
482 .name = DRV_NAME,
483 .id_table = k2_sata_pci_tbl,
484 .probe = k2_sata_init_one,
485 .remove = ata_pci_remove_one,
486};
487
488
489static int __init k2_sata_init(void)
490{
491 return pci_register_driver(&k2_sata_pci_driver);
492}
493
494
495static void __exit k2_sata_exit(void)
496{
497 pci_unregister_driver(&k2_sata_pci_driver);
498}
499
500
501MODULE_AUTHOR("Benjamin Herrenschmidt");
502MODULE_DESCRIPTION("low-level driver for K2 SATA controller");
503MODULE_LICENSE("GPL");
504MODULE_DEVICE_TABLE(pci, k2_sata_pci_tbl);
505MODULE_VERSION(DRV_VERSION);
506
507module_init(k2_sata_init);
508module_exit(k2_sata_exit);
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
new file mode 100644
index 000000000000..0da83cba5c12
--- /dev/null
+++ b/drivers/ata/sata_sx4.c
@@ -0,0 +1,1502 @@
1/*
2 * sata_sx4.c - Promise SATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc.
9 *
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 *
26 * libata documentation is available via 'make {ps|pdf}docs',
27 * as Documentation/DocBook/libata.*
28 *
29 * Hardware documentation available under NDA.
30 *
31 */
32
33#include <linux/kernel.h>
34#include <linux/module.h>
35#include <linux/pci.h>
36#include <linux/init.h>
37#include <linux/blkdev.h>
38#include <linux/delay.h>
39#include <linux/interrupt.h>
40#include <linux/sched.h>
41#include <linux/device.h>
42#include <scsi/scsi_host.h>
43#include <scsi/scsi_cmnd.h>
44#include <linux/libata.h>
45#include <asm/io.h>
46#include "sata_promise.h"
47
48#define DRV_NAME "sata_sx4"
49#define DRV_VERSION "0.9"
50
51
52enum {
53 PDC_PRD_TBL = 0x44, /* Direct command DMA table addr */
54
55 PDC_PKT_SUBMIT = 0x40, /* Command packet pointer addr */
56 PDC_HDMA_PKT_SUBMIT = 0x100, /* Host DMA packet pointer addr */
57 PDC_INT_SEQMASK = 0x40, /* Mask of asserted SEQ INTs */
58 PDC_HDMA_CTLSTAT = 0x12C, /* Host DMA control / status */
59
60 PDC_20621_SEQCTL = 0x400,
61 PDC_20621_SEQMASK = 0x480,
62 PDC_20621_GENERAL_CTL = 0x484,
63 PDC_20621_PAGE_SIZE = (32 * 1024),
64
65 /* chosen, not constant, values; we design our own DIMM mem map */
66 PDC_20621_DIMM_WINDOW = 0x0C, /* page# for 32K DIMM window */
67 PDC_20621_DIMM_BASE = 0x00200000,
68 PDC_20621_DIMM_DATA = (64 * 1024),
69 PDC_DIMM_DATA_STEP = (256 * 1024),
70 PDC_DIMM_WINDOW_STEP = (8 * 1024),
71 PDC_DIMM_HOST_PRD = (6 * 1024),
72 PDC_DIMM_HOST_PKT = (128 * 0),
73 PDC_DIMM_HPKT_PRD = (128 * 1),
74 PDC_DIMM_ATA_PKT = (128 * 2),
75 PDC_DIMM_APKT_PRD = (128 * 3),
76 PDC_DIMM_HEADER_SZ = PDC_DIMM_APKT_PRD + 128,
77 PDC_PAGE_WINDOW = 0x40,
78 PDC_PAGE_DATA = PDC_PAGE_WINDOW +
79 (PDC_20621_DIMM_DATA / PDC_20621_PAGE_SIZE),
80 PDC_PAGE_SET = PDC_DIMM_DATA_STEP / PDC_20621_PAGE_SIZE,
81
82 PDC_CHIP0_OFS = 0xC0000, /* offset of chip #0 */
83
84 PDC_20621_ERR_MASK = (1<<19) | (1<<20) | (1<<21) | (1<<22) |
85 (1<<23),
86
87 board_20621 = 0, /* FastTrak S150 SX4 */
88
89 PDC_RESET = (1 << 11), /* HDMA reset */
90
91 PDC_MAX_HDMA = 32,
92 PDC_HDMA_Q_MASK = (PDC_MAX_HDMA - 1),
93
94 PDC_DIMM0_SPD_DEV_ADDRESS = 0x50,
95 PDC_DIMM1_SPD_DEV_ADDRESS = 0x51,
96 PDC_MAX_DIMM_MODULE = 0x02,
97 PDC_I2C_CONTROL_OFFSET = 0x48,
98 PDC_I2C_ADDR_DATA_OFFSET = 0x4C,
99 PDC_DIMM0_CONTROL_OFFSET = 0x80,
100 PDC_DIMM1_CONTROL_OFFSET = 0x84,
101 PDC_SDRAM_CONTROL_OFFSET = 0x88,
102 PDC_I2C_WRITE = 0x00000000,
103 PDC_I2C_READ = 0x00000040,
104 PDC_I2C_START = 0x00000080,
105 PDC_I2C_MASK_INT = 0x00000020,
106 PDC_I2C_COMPLETE = 0x00010000,
107 PDC_I2C_NO_ACK = 0x00100000,
108 PDC_DIMM_SPD_SUBADDRESS_START = 0x00,
109 PDC_DIMM_SPD_SUBADDRESS_END = 0x7F,
110 PDC_DIMM_SPD_ROW_NUM = 3,
111 PDC_DIMM_SPD_COLUMN_NUM = 4,
112 PDC_DIMM_SPD_MODULE_ROW = 5,
113 PDC_DIMM_SPD_TYPE = 11,
114 PDC_DIMM_SPD_FRESH_RATE = 12,
115 PDC_DIMM_SPD_BANK_NUM = 17,
116 PDC_DIMM_SPD_CAS_LATENCY = 18,
117 PDC_DIMM_SPD_ATTRIBUTE = 21,
118 PDC_DIMM_SPD_ROW_PRE_CHARGE = 27,
119 PDC_DIMM_SPD_ROW_ACTIVE_DELAY = 28,
120 PDC_DIMM_SPD_RAS_CAS_DELAY = 29,
121 PDC_DIMM_SPD_ACTIVE_PRECHARGE = 30,
122 PDC_DIMM_SPD_SYSTEM_FREQ = 126,
123 PDC_CTL_STATUS = 0x08,
124 PDC_DIMM_WINDOW_CTLR = 0x0C,
125 PDC_TIME_CONTROL = 0x3C,
126 PDC_TIME_PERIOD = 0x40,
127 PDC_TIME_COUNTER = 0x44,
128 PDC_GENERAL_CTLR = 0x484,
129 PCI_PLL_INIT = 0x8A531824,
130 PCI_X_TCOUNT = 0xEE1E5CFF
131};
132
133
134struct pdc_port_priv {
135 u8 dimm_buf[(ATA_PRD_SZ * ATA_MAX_PRD) + 512];
136 u8 *pkt;
137 dma_addr_t pkt_dma;
138};
139
140struct pdc_host_priv {
141 void __iomem *dimm_mmio;
142
143 unsigned int doing_hdma;
144 unsigned int hdma_prod;
145 unsigned int hdma_cons;
146 struct {
147 struct ata_queued_cmd *qc;
148 unsigned int seq;
149 unsigned long pkt_ofs;
150 } hdma[32];
151};
152
153
154static int pdc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
155static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
156static void pdc_eng_timeout(struct ata_port *ap);
157static void pdc_20621_phy_reset (struct ata_port *ap);
158static int pdc_port_start(struct ata_port *ap);
159static void pdc_port_stop(struct ata_port *ap);
160static void pdc20621_qc_prep(struct ata_queued_cmd *qc);
161static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
162static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
163static void pdc20621_host_stop(struct ata_host_set *host_set);
164static unsigned int pdc20621_dimm_init(struct ata_probe_ent *pe);
165static int pdc20621_detect_dimm(struct ata_probe_ent *pe);
166static unsigned int pdc20621_i2c_read(struct ata_probe_ent *pe,
167 u32 device, u32 subaddr, u32 *pdata);
168static int pdc20621_prog_dimm0(struct ata_probe_ent *pe);
169static unsigned int pdc20621_prog_dimm_global(struct ata_probe_ent *pe);
170#ifdef ATA_VERBOSE_DEBUG
171static void pdc20621_get_from_dimm(struct ata_probe_ent *pe,
172 void *psource, u32 offset, u32 size);
173#endif
174static void pdc20621_put_to_dimm(struct ata_probe_ent *pe,
175 void *psource, u32 offset, u32 size);
176static void pdc20621_irq_clear(struct ata_port *ap);
177static unsigned int pdc20621_qc_issue_prot(struct ata_queued_cmd *qc);
178
179
180static struct scsi_host_template pdc_sata_sht = {
181 .module = THIS_MODULE,
182 .name = DRV_NAME,
183 .ioctl = ata_scsi_ioctl,
184 .queuecommand = ata_scsi_queuecmd,
185 .can_queue = ATA_DEF_QUEUE,
186 .this_id = ATA_SHT_THIS_ID,
187 .sg_tablesize = LIBATA_MAX_PRD,
188 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
189 .emulated = ATA_SHT_EMULATED,
190 .use_clustering = ATA_SHT_USE_CLUSTERING,
191 .proc_name = DRV_NAME,
192 .dma_boundary = ATA_DMA_BOUNDARY,
193 .slave_configure = ata_scsi_slave_config,
194 .slave_destroy = ata_scsi_slave_destroy,
195 .bios_param = ata_std_bios_param,
196};
197
198static const struct ata_port_operations pdc_20621_ops = {
199 .port_disable = ata_port_disable,
200 .tf_load = pdc_tf_load_mmio,
201 .tf_read = ata_tf_read,
202 .check_status = ata_check_status,
203 .exec_command = pdc_exec_command_mmio,
204 .dev_select = ata_std_dev_select,
205 .phy_reset = pdc_20621_phy_reset,
206 .qc_prep = pdc20621_qc_prep,
207 .qc_issue = pdc20621_qc_issue_prot,
208 .data_xfer = ata_mmio_data_xfer,
209 .eng_timeout = pdc_eng_timeout,
210 .irq_handler = pdc20621_interrupt,
211 .irq_clear = pdc20621_irq_clear,
212 .port_start = pdc_port_start,
213 .port_stop = pdc_port_stop,
214 .host_stop = pdc20621_host_stop,
215};
216
217static const struct ata_port_info pdc_port_info[] = {
218 /* board_20621 */
219 {
220 .sht = &pdc_sata_sht,
221 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
222 ATA_FLAG_SRST | ATA_FLAG_MMIO |
223 ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING,
224 .pio_mask = 0x1f, /* pio0-4 */
225 .mwdma_mask = 0x07, /* mwdma0-2 */
226 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
227 .port_ops = &pdc_20621_ops,
228 },
229
230};
231
232static const struct pci_device_id pdc_sata_pci_tbl[] = {
233 { PCI_VENDOR_ID_PROMISE, 0x6622, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
234 board_20621 },
235 { } /* terminate list */
236};
237
238
239static struct pci_driver pdc_sata_pci_driver = {
240 .name = DRV_NAME,
241 .id_table = pdc_sata_pci_tbl,
242 .probe = pdc_sata_init_one,
243 .remove = ata_pci_remove_one,
244};
245
246
247static void pdc20621_host_stop(struct ata_host_set *host_set)
248{
249 struct pci_dev *pdev = to_pci_dev(host_set->dev);
250 struct pdc_host_priv *hpriv = host_set->private_data;
251 void __iomem *dimm_mmio = hpriv->dimm_mmio;
252
253 pci_iounmap(pdev, dimm_mmio);
254 kfree(hpriv);
255
256 pci_iounmap(pdev, host_set->mmio_base);
257}
258
259static int pdc_port_start(struct ata_port *ap)
260{
261 struct device *dev = ap->host_set->dev;
262 struct pdc_port_priv *pp;
263 int rc;
264
265 rc = ata_port_start(ap);
266 if (rc)
267 return rc;
268
269 pp = kmalloc(sizeof(*pp), GFP_KERNEL);
270 if (!pp) {
271 rc = -ENOMEM;
272 goto err_out;
273 }
274 memset(pp, 0, sizeof(*pp));
275
276 pp->pkt = dma_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL);
277 if (!pp->pkt) {
278 rc = -ENOMEM;
279 goto err_out_kfree;
280 }
281
282 ap->private_data = pp;
283
284 return 0;
285
286err_out_kfree:
287 kfree(pp);
288err_out:
289 ata_port_stop(ap);
290 return rc;
291}
292
293
294static void pdc_port_stop(struct ata_port *ap)
295{
296 struct device *dev = ap->host_set->dev;
297 struct pdc_port_priv *pp = ap->private_data;
298
299 ap->private_data = NULL;
300 dma_free_coherent(dev, 128, pp->pkt, pp->pkt_dma);
301 kfree(pp);
302 ata_port_stop(ap);
303}
304
305
306static void pdc_20621_phy_reset (struct ata_port *ap)
307{
308 VPRINTK("ENTER\n");
309 ap->cbl = ATA_CBL_SATA;
310 ata_port_probe(ap);
311 ata_bus_reset(ap);
312}
313
314static inline void pdc20621_ata_sg(struct ata_taskfile *tf, u8 *buf,
315 unsigned int portno,
316 unsigned int total_len)
317{
318 u32 addr;
319 unsigned int dw = PDC_DIMM_APKT_PRD >> 2;
320 u32 *buf32 = (u32 *) buf;
321
322 /* output ATA packet S/G table */
323 addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA +
324 (PDC_DIMM_DATA_STEP * portno);
325 VPRINTK("ATA sg addr 0x%x, %d\n", addr, addr);
326 buf32[dw] = cpu_to_le32(addr);
327 buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT);
328
329 VPRINTK("ATA PSG @ %x == (0x%x, 0x%x)\n",
330 PDC_20621_DIMM_BASE +
331 (PDC_DIMM_WINDOW_STEP * portno) +
332 PDC_DIMM_APKT_PRD,
333 buf32[dw], buf32[dw + 1]);
334}
335
336static inline void pdc20621_host_sg(struct ata_taskfile *tf, u8 *buf,
337 unsigned int portno,
338 unsigned int total_len)
339{
340 u32 addr;
341 unsigned int dw = PDC_DIMM_HPKT_PRD >> 2;
342 u32 *buf32 = (u32 *) buf;
343
344 /* output Host DMA packet S/G table */
345 addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA +
346 (PDC_DIMM_DATA_STEP * portno);
347
348 buf32[dw] = cpu_to_le32(addr);
349 buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT);
350
351 VPRINTK("HOST PSG @ %x == (0x%x, 0x%x)\n",
352 PDC_20621_DIMM_BASE +
353 (PDC_DIMM_WINDOW_STEP * portno) +
354 PDC_DIMM_HPKT_PRD,
355 buf32[dw], buf32[dw + 1]);
356}
357
358static inline unsigned int pdc20621_ata_pkt(struct ata_taskfile *tf,
359 unsigned int devno, u8 *buf,
360 unsigned int portno)
361{
362 unsigned int i, dw;
363 u32 *buf32 = (u32 *) buf;
364 u8 dev_reg;
365
366 unsigned int dimm_sg = PDC_20621_DIMM_BASE +
367 (PDC_DIMM_WINDOW_STEP * portno) +
368 PDC_DIMM_APKT_PRD;
369 VPRINTK("ENTER, dimm_sg == 0x%x, %d\n", dimm_sg, dimm_sg);
370
371 i = PDC_DIMM_ATA_PKT;
372
373 /*
374 * Set up ATA packet
375 */
376 if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE)))
377 buf[i++] = PDC_PKT_READ;
378 else if (tf->protocol == ATA_PROT_NODATA)
379 buf[i++] = PDC_PKT_NODATA;
380 else
381 buf[i++] = 0;
382 buf[i++] = 0; /* reserved */
383 buf[i++] = portno + 1; /* seq. id */
384 buf[i++] = 0xff; /* delay seq. id */
385
386 /* dimm dma S/G, and next-pkt */
387 dw = i >> 2;
388 if (tf->protocol == ATA_PROT_NODATA)
389 buf32[dw] = 0;
390 else
391 buf32[dw] = cpu_to_le32(dimm_sg);
392 buf32[dw + 1] = 0;
393 i += 8;
394
395 if (devno == 0)
396 dev_reg = ATA_DEVICE_OBS;
397 else
398 dev_reg = ATA_DEVICE_OBS | ATA_DEV1;
399
400 /* select device */
401 buf[i++] = (1 << 5) | PDC_PKT_CLEAR_BSY | ATA_REG_DEVICE;
402 buf[i++] = dev_reg;
403
404 /* device control register */
405 buf[i++] = (1 << 5) | PDC_REG_DEVCTL;
406 buf[i++] = tf->ctl;
407
408 return i;
409}
410
411static inline void pdc20621_host_pkt(struct ata_taskfile *tf, u8 *buf,
412 unsigned int portno)
413{
414 unsigned int dw;
415 u32 tmp, *buf32 = (u32 *) buf;
416
417 unsigned int host_sg = PDC_20621_DIMM_BASE +
418 (PDC_DIMM_WINDOW_STEP * portno) +
419 PDC_DIMM_HOST_PRD;
420 unsigned int dimm_sg = PDC_20621_DIMM_BASE +
421 (PDC_DIMM_WINDOW_STEP * portno) +
422 PDC_DIMM_HPKT_PRD;
423 VPRINTK("ENTER, dimm_sg == 0x%x, %d\n", dimm_sg, dimm_sg);
424 VPRINTK("host_sg == 0x%x, %d\n", host_sg, host_sg);
425
426 dw = PDC_DIMM_HOST_PKT >> 2;
427
428 /*
429 * Set up Host DMA packet
430 */
431 if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE)))
432 tmp = PDC_PKT_READ;
433 else
434 tmp = 0;
435 tmp |= ((portno + 1 + 4) << 16); /* seq. id */
436 tmp |= (0xff << 24); /* delay seq. id */
437 buf32[dw + 0] = cpu_to_le32(tmp);
438 buf32[dw + 1] = cpu_to_le32(host_sg);
439 buf32[dw + 2] = cpu_to_le32(dimm_sg);
440 buf32[dw + 3] = 0;
441
442 VPRINTK("HOST PKT @ %x == (0x%x 0x%x 0x%x 0x%x)\n",
443 PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * portno) +
444 PDC_DIMM_HOST_PKT,
445 buf32[dw + 0],
446 buf32[dw + 1],
447 buf32[dw + 2],
448 buf32[dw + 3]);
449}
450
451static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
452{
453 struct scatterlist *sg;
454 struct ata_port *ap = qc->ap;
455 struct pdc_port_priv *pp = ap->private_data;
456 void __iomem *mmio = ap->host_set->mmio_base;
457 struct pdc_host_priv *hpriv = ap->host_set->private_data;
458 void __iomem *dimm_mmio = hpriv->dimm_mmio;
459 unsigned int portno = ap->port_no;
460 unsigned int i, idx, total_len = 0, sgt_len;
461 u32 *buf = (u32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ];
462
463 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
464
465 VPRINTK("ata%u: ENTER\n", ap->id);
466
467 /* hard-code chip #0 */
468 mmio += PDC_CHIP0_OFS;
469
470 /*
471 * Build S/G table
472 */
473 idx = 0;
474 ata_for_each_sg(sg, qc) {
475 buf[idx++] = cpu_to_le32(sg_dma_address(sg));
476 buf[idx++] = cpu_to_le32(sg_dma_len(sg));
477 total_len += sg_dma_len(sg);
478 }
479 buf[idx - 1] |= cpu_to_le32(ATA_PRD_EOT);
480 sgt_len = idx * 4;
481
482 /*
483 * Build ATA, host DMA packets
484 */
485 pdc20621_host_sg(&qc->tf, &pp->dimm_buf[0], portno, total_len);
486 pdc20621_host_pkt(&qc->tf, &pp->dimm_buf[0], portno);
487
488 pdc20621_ata_sg(&qc->tf, &pp->dimm_buf[0], portno, total_len);
489 i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno);
490
491 if (qc->tf.flags & ATA_TFLAG_LBA48)
492 i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i);
493 else
494 i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i);
495
496 pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i);
497
498 /* copy three S/G tables and two packets to DIMM MMIO window */
499 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP),
500 &pp->dimm_buf, PDC_DIMM_HEADER_SZ);
501 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP) +
502 PDC_DIMM_HOST_PRD,
503 &pp->dimm_buf[PDC_DIMM_HEADER_SZ], sgt_len);
504
505 /* force host FIFO dump */
506 writel(0x00000001, mmio + PDC_20621_GENERAL_CTL);
507
508 readl(dimm_mmio); /* MMIO PCI posting flush */
509
510 VPRINTK("ata pkt buf ofs %u, prd size %u, mmio copied\n", i, sgt_len);
511}
512
513static void pdc20621_nodata_prep(struct ata_queued_cmd *qc)
514{
515 struct ata_port *ap = qc->ap;
516 struct pdc_port_priv *pp = ap->private_data;
517 void __iomem *mmio = ap->host_set->mmio_base;
518 struct pdc_host_priv *hpriv = ap->host_set->private_data;
519 void __iomem *dimm_mmio = hpriv->dimm_mmio;
520 unsigned int portno = ap->port_no;
521 unsigned int i;
522
523 VPRINTK("ata%u: ENTER\n", ap->id);
524
525 /* hard-code chip #0 */
526 mmio += PDC_CHIP0_OFS;
527
528 i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno);
529
530 if (qc->tf.flags & ATA_TFLAG_LBA48)
531 i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i);
532 else
533 i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i);
534
535 pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i);
536
537 /* copy three S/G tables and two packets to DIMM MMIO window */
538 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP),
539 &pp->dimm_buf, PDC_DIMM_HEADER_SZ);
540
541 /* force host FIFO dump */
542 writel(0x00000001, mmio + PDC_20621_GENERAL_CTL);
543
544 readl(dimm_mmio); /* MMIO PCI posting flush */
545
546 VPRINTK("ata pkt buf ofs %u, mmio copied\n", i);
547}
548
549static void pdc20621_qc_prep(struct ata_queued_cmd *qc)
550{
551 switch (qc->tf.protocol) {
552 case ATA_PROT_DMA:
553 pdc20621_dma_prep(qc);
554 break;
555 case ATA_PROT_NODATA:
556 pdc20621_nodata_prep(qc);
557 break;
558 default:
559 break;
560 }
561}
562
563static void __pdc20621_push_hdma(struct ata_queued_cmd *qc,
564 unsigned int seq,
565 u32 pkt_ofs)
566{
567 struct ata_port *ap = qc->ap;
568 struct ata_host_set *host_set = ap->host_set;
569 void __iomem *mmio = host_set->mmio_base;
570
571 /* hard-code chip #0 */
572 mmio += PDC_CHIP0_OFS;
573
574 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
575 readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */
576
577 writel(pkt_ofs, mmio + PDC_HDMA_PKT_SUBMIT);
578 readl(mmio + PDC_HDMA_PKT_SUBMIT); /* flush */
579}
580
581static void pdc20621_push_hdma(struct ata_queued_cmd *qc,
582 unsigned int seq,
583 u32 pkt_ofs)
584{
585 struct ata_port *ap = qc->ap;
586 struct pdc_host_priv *pp = ap->host_set->private_data;
587 unsigned int idx = pp->hdma_prod & PDC_HDMA_Q_MASK;
588
589 if (!pp->doing_hdma) {
590 __pdc20621_push_hdma(qc, seq, pkt_ofs);
591 pp->doing_hdma = 1;
592 return;
593 }
594
595 pp->hdma[idx].qc = qc;
596 pp->hdma[idx].seq = seq;
597 pp->hdma[idx].pkt_ofs = pkt_ofs;
598 pp->hdma_prod++;
599}
600
601static void pdc20621_pop_hdma(struct ata_queued_cmd *qc)
602{
603 struct ata_port *ap = qc->ap;
604 struct pdc_host_priv *pp = ap->host_set->private_data;
605 unsigned int idx = pp->hdma_cons & PDC_HDMA_Q_MASK;
606
607 /* if nothing on queue, we're done */
608 if (pp->hdma_prod == pp->hdma_cons) {
609 pp->doing_hdma = 0;
610 return;
611 }
612
613 __pdc20621_push_hdma(pp->hdma[idx].qc, pp->hdma[idx].seq,
614 pp->hdma[idx].pkt_ofs);
615 pp->hdma_cons++;
616}
617
618#ifdef ATA_VERBOSE_DEBUG
619static void pdc20621_dump_hdma(struct ata_queued_cmd *qc)
620{
621 struct ata_port *ap = qc->ap;
622 unsigned int port_no = ap->port_no;
623 struct pdc_host_priv *hpriv = ap->host_set->private_data;
624 void *dimm_mmio = hpriv->dimm_mmio;
625
626 dimm_mmio += (port_no * PDC_DIMM_WINDOW_STEP);
627 dimm_mmio += PDC_DIMM_HOST_PKT;
628
629 printk(KERN_ERR "HDMA[0] == 0x%08X\n", readl(dimm_mmio));
630 printk(KERN_ERR "HDMA[1] == 0x%08X\n", readl(dimm_mmio + 4));
631 printk(KERN_ERR "HDMA[2] == 0x%08X\n", readl(dimm_mmio + 8));
632 printk(KERN_ERR "HDMA[3] == 0x%08X\n", readl(dimm_mmio + 12));
633}
634#else
635static inline void pdc20621_dump_hdma(struct ata_queued_cmd *qc) { }
636#endif /* ATA_VERBOSE_DEBUG */
637
638static void pdc20621_packet_start(struct ata_queued_cmd *qc)
639{
640 struct ata_port *ap = qc->ap;
641 struct ata_host_set *host_set = ap->host_set;
642 unsigned int port_no = ap->port_no;
643 void __iomem *mmio = host_set->mmio_base;
644 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
645 u8 seq = (u8) (port_no + 1);
646 unsigned int port_ofs;
647
648 /* hard-code chip #0 */
649 mmio += PDC_CHIP0_OFS;
650
651 VPRINTK("ata%u: ENTER\n", ap->id);
652
653 wmb(); /* flush PRD, pkt writes */
654
655 port_ofs = PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no);
656
657 /* if writing, we (1) DMA to DIMM, then (2) do ATA command */
658 if (rw && qc->tf.protocol == ATA_PROT_DMA) {
659 seq += 4;
660
661 pdc20621_dump_hdma(qc);
662 pdc20621_push_hdma(qc, seq, port_ofs + PDC_DIMM_HOST_PKT);
663 VPRINTK("queued ofs 0x%x (%u), seq %u\n",
664 port_ofs + PDC_DIMM_HOST_PKT,
665 port_ofs + PDC_DIMM_HOST_PKT,
666 seq);
667 } else {
668 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
669 readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */
670
671 writel(port_ofs + PDC_DIMM_ATA_PKT,
672 (void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
673 readl((void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
674 VPRINTK("submitted ofs 0x%x (%u), seq %u\n",
675 port_ofs + PDC_DIMM_ATA_PKT,
676 port_ofs + PDC_DIMM_ATA_PKT,
677 seq);
678 }
679}
680
681static unsigned int pdc20621_qc_issue_prot(struct ata_queued_cmd *qc)
682{
683 switch (qc->tf.protocol) {
684 case ATA_PROT_DMA:
685 case ATA_PROT_NODATA:
686 pdc20621_packet_start(qc);
687 return 0;
688
689 case ATA_PROT_ATAPI_DMA:
690 BUG();
691 break;
692
693 default:
694 break;
695 }
696
697 return ata_qc_issue_prot(qc);
698}
699
700static inline unsigned int pdc20621_host_intr( struct ata_port *ap,
701 struct ata_queued_cmd *qc,
702 unsigned int doing_hdma,
703 void __iomem *mmio)
704{
705 unsigned int port_no = ap->port_no;
706 unsigned int port_ofs =
707 PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no);
708 u8 status;
709 unsigned int handled = 0;
710
711 VPRINTK("ENTER\n");
712
713 if ((qc->tf.protocol == ATA_PROT_DMA) && /* read */
714 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
715
716 /* step two - DMA from DIMM to host */
717 if (doing_hdma) {
718 VPRINTK("ata%u: read hdma, 0x%x 0x%x\n", ap->id,
719 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
720 /* get drive status; clear intr; complete txn */
721 qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
722 ata_qc_complete(qc);
723 pdc20621_pop_hdma(qc);
724 }
725
726 /* step one - exec ATA command */
727 else {
728 u8 seq = (u8) (port_no + 1 + 4);
729 VPRINTK("ata%u: read ata, 0x%x 0x%x\n", ap->id,
730 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
731
732 /* submit hdma pkt */
733 pdc20621_dump_hdma(qc);
734 pdc20621_push_hdma(qc, seq,
735 port_ofs + PDC_DIMM_HOST_PKT);
736 }
737 handled = 1;
738
739 } else if (qc->tf.protocol == ATA_PROT_DMA) { /* write */
740
741 /* step one - DMA from host to DIMM */
742 if (doing_hdma) {
743 u8 seq = (u8) (port_no + 1);
744 VPRINTK("ata%u: write hdma, 0x%x 0x%x\n", ap->id,
745 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
746
747 /* submit ata pkt */
748 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
749 readl(mmio + PDC_20621_SEQCTL + (seq * 4));
750 writel(port_ofs + PDC_DIMM_ATA_PKT,
751 (void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
752 readl((void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
753 }
754
755 /* step two - execute ATA command */
756 else {
757 VPRINTK("ata%u: write ata, 0x%x 0x%x\n", ap->id,
758 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
759 /* get drive status; clear intr; complete txn */
760 qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
761 ata_qc_complete(qc);
762 pdc20621_pop_hdma(qc);
763 }
764 handled = 1;
765
766 /* command completion, but no data xfer */
767 } else if (qc->tf.protocol == ATA_PROT_NODATA) {
768
769 status = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
770 DPRINTK("BUS_NODATA (drv_stat 0x%X)\n", status);
771 qc->err_mask |= ac_err_mask(status);
772 ata_qc_complete(qc);
773 handled = 1;
774
775 } else {
776 ap->stats.idle_irq++;
777 }
778
779 return handled;
780}
781
782static void pdc20621_irq_clear(struct ata_port *ap)
783{
784 struct ata_host_set *host_set = ap->host_set;
785 void __iomem *mmio = host_set->mmio_base;
786
787 mmio += PDC_CHIP0_OFS;
788
789 readl(mmio + PDC_20621_SEQMASK);
790}
791
792static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
793{
794 struct ata_host_set *host_set = dev_instance;
795 struct ata_port *ap;
796 u32 mask = 0;
797 unsigned int i, tmp, port_no;
798 unsigned int handled = 0;
799 void __iomem *mmio_base;
800
801 VPRINTK("ENTER\n");
802
803 if (!host_set || !host_set->mmio_base) {
804 VPRINTK("QUICK EXIT\n");
805 return IRQ_NONE;
806 }
807
808 mmio_base = host_set->mmio_base;
809
810 /* reading should also clear interrupts */
811 mmio_base += PDC_CHIP0_OFS;
812 mask = readl(mmio_base + PDC_20621_SEQMASK);
813 VPRINTK("mask == 0x%x\n", mask);
814
815 if (mask == 0xffffffff) {
816 VPRINTK("QUICK EXIT 2\n");
817 return IRQ_NONE;
818 }
819 mask &= 0xffff; /* only 16 tags possible */
820 if (!mask) {
821 VPRINTK("QUICK EXIT 3\n");
822 return IRQ_NONE;
823 }
824
825 spin_lock(&host_set->lock);
826
827 for (i = 1; i < 9; i++) {
828 port_no = i - 1;
829 if (port_no > 3)
830 port_no -= 4;
831 if (port_no >= host_set->n_ports)
832 ap = NULL;
833 else
834 ap = host_set->ports[port_no];
835 tmp = mask & (1 << i);
836 VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp);
837 if (tmp && ap &&
838 !(ap->flags & ATA_FLAG_DISABLED)) {
839 struct ata_queued_cmd *qc;
840
841 qc = ata_qc_from_tag(ap, ap->active_tag);
842 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
843 handled += pdc20621_host_intr(ap, qc, (i > 4),
844 mmio_base);
845 }
846 }
847
848 spin_unlock(&host_set->lock);
849
850 VPRINTK("mask == 0x%x\n", mask);
851
852 VPRINTK("EXIT\n");
853
854 return IRQ_RETVAL(handled);
855}
856
857static void pdc_eng_timeout(struct ata_port *ap)
858{
859 u8 drv_stat;
860 struct ata_host_set *host_set = ap->host_set;
861 struct ata_queued_cmd *qc;
862 unsigned long flags;
863
864 DPRINTK("ENTER\n");
865
866 spin_lock_irqsave(&host_set->lock, flags);
867
868 qc = ata_qc_from_tag(ap, ap->active_tag);
869
870 switch (qc->tf.protocol) {
871 case ATA_PROT_DMA:
872 case ATA_PROT_NODATA:
873 ata_port_printk(ap, KERN_ERR, "command timeout\n");
874 qc->err_mask |= __ac_err_mask(ata_wait_idle(ap));
875 break;
876
877 default:
878 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
879
880 ata_port_printk(ap, KERN_ERR,
881 "unknown timeout, cmd 0x%x stat 0x%x\n",
882 qc->tf.command, drv_stat);
883
884 qc->err_mask |= ac_err_mask(drv_stat);
885 break;
886 }
887
888 spin_unlock_irqrestore(&host_set->lock, flags);
889 ata_eh_qc_complete(qc);
890 DPRINTK("EXIT\n");
891}
892
893static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
894{
895 WARN_ON (tf->protocol == ATA_PROT_DMA ||
896 tf->protocol == ATA_PROT_NODATA);
897 ata_tf_load(ap, tf);
898}
899
900
901static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
902{
903 WARN_ON (tf->protocol == ATA_PROT_DMA ||
904 tf->protocol == ATA_PROT_NODATA);
905 ata_exec_command(ap, tf);
906}
907
908
909static void pdc_sata_setup_port(struct ata_ioports *port, unsigned long base)
910{
911 port->cmd_addr = base;
912 port->data_addr = base;
913 port->feature_addr =
914 port->error_addr = base + 0x4;
915 port->nsect_addr = base + 0x8;
916 port->lbal_addr = base + 0xc;
917 port->lbam_addr = base + 0x10;
918 port->lbah_addr = base + 0x14;
919 port->device_addr = base + 0x18;
920 port->command_addr =
921 port->status_addr = base + 0x1c;
922 port->altstatus_addr =
923 port->ctl_addr = base + 0x38;
924}
925
926
927#ifdef ATA_VERBOSE_DEBUG
928static void pdc20621_get_from_dimm(struct ata_probe_ent *pe, void *psource,
929 u32 offset, u32 size)
930{
931 u32 window_size;
932 u16 idx;
933 u8 page_mask;
934 long dist;
935 void __iomem *mmio = pe->mmio_base;
936 struct pdc_host_priv *hpriv = pe->private_data;
937 void __iomem *dimm_mmio = hpriv->dimm_mmio;
938
939 /* hard-code chip #0 */
940 mmio += PDC_CHIP0_OFS;
941
942 page_mask = 0x00;
943 window_size = 0x2000 * 4; /* 32K byte uchar size */
944 idx = (u16) (offset / window_size);
945
946 writel(0x01, mmio + PDC_GENERAL_CTLR);
947 readl(mmio + PDC_GENERAL_CTLR);
948 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
949 readl(mmio + PDC_DIMM_WINDOW_CTLR);
950
951 offset -= (idx * window_size);
952 idx++;
953 dist = ((long) (window_size - (offset + size))) >= 0 ? size :
954 (long) (window_size - offset);
955 memcpy_fromio((char *) psource, (char *) (dimm_mmio + offset / 4),
956 dist);
957
958 psource += dist;
959 size -= dist;
960 for (; (long) size >= (long) window_size ;) {
961 writel(0x01, mmio + PDC_GENERAL_CTLR);
962 readl(mmio + PDC_GENERAL_CTLR);
963 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
964 readl(mmio + PDC_DIMM_WINDOW_CTLR);
965 memcpy_fromio((char *) psource, (char *) (dimm_mmio),
966 window_size / 4);
967 psource += window_size;
968 size -= window_size;
969 idx ++;
970 }
971
972 if (size) {
973 writel(0x01, mmio + PDC_GENERAL_CTLR);
974 readl(mmio + PDC_GENERAL_CTLR);
975 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
976 readl(mmio + PDC_DIMM_WINDOW_CTLR);
977 memcpy_fromio((char *) psource, (char *) (dimm_mmio),
978 size / 4);
979 }
980}
981#endif
982
983
984static void pdc20621_put_to_dimm(struct ata_probe_ent *pe, void *psource,
985 u32 offset, u32 size)
986{
987 u32 window_size;
988 u16 idx;
989 u8 page_mask;
990 long dist;
991 void __iomem *mmio = pe->mmio_base;
992 struct pdc_host_priv *hpriv = pe->private_data;
993 void __iomem *dimm_mmio = hpriv->dimm_mmio;
994
995 /* hard-code chip #0 */
996 mmio += PDC_CHIP0_OFS;
997
998 page_mask = 0x00;
999 window_size = 0x2000 * 4; /* 32K byte uchar size */
1000 idx = (u16) (offset / window_size);
1001
1002 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1003 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1004 offset -= (idx * window_size);
1005 idx++;
1006 dist = ((long)(s32)(window_size - (offset + size))) >= 0 ? size :
1007 (long) (window_size - offset);
1008 memcpy_toio(dimm_mmio + offset / 4, psource, dist);
1009 writel(0x01, mmio + PDC_GENERAL_CTLR);
1010 readl(mmio + PDC_GENERAL_CTLR);
1011
1012 psource += dist;
1013 size -= dist;
1014 for (; (long) size >= (long) window_size ;) {
1015 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1016 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1017 memcpy_toio(dimm_mmio, psource, window_size / 4);
1018 writel(0x01, mmio + PDC_GENERAL_CTLR);
1019 readl(mmio + PDC_GENERAL_CTLR);
1020 psource += window_size;
1021 size -= window_size;
1022 idx ++;
1023 }
1024
1025 if (size) {
1026 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1027 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1028 memcpy_toio(dimm_mmio, psource, size / 4);
1029 writel(0x01, mmio + PDC_GENERAL_CTLR);
1030 readl(mmio + PDC_GENERAL_CTLR);
1031 }
1032}
1033
1034
1035static unsigned int pdc20621_i2c_read(struct ata_probe_ent *pe, u32 device,
1036 u32 subaddr, u32 *pdata)
1037{
1038 void __iomem *mmio = pe->mmio_base;
1039 u32 i2creg = 0;
1040 u32 status;
1041 u32 count =0;
1042
1043 /* hard-code chip #0 */
1044 mmio += PDC_CHIP0_OFS;
1045
1046 i2creg |= device << 24;
1047 i2creg |= subaddr << 16;
1048
1049 /* Set the device and subaddress */
1050 writel(i2creg, mmio + PDC_I2C_ADDR_DATA_OFFSET);
1051 readl(mmio + PDC_I2C_ADDR_DATA_OFFSET);
1052
1053 /* Write Control to perform read operation, mask int */
1054 writel(PDC_I2C_READ | PDC_I2C_START | PDC_I2C_MASK_INT,
1055 mmio + PDC_I2C_CONTROL_OFFSET);
1056
1057 for (count = 0; count <= 1000; count ++) {
1058 status = readl(mmio + PDC_I2C_CONTROL_OFFSET);
1059 if (status & PDC_I2C_COMPLETE) {
1060 status = readl(mmio + PDC_I2C_ADDR_DATA_OFFSET);
1061 break;
1062 } else if (count == 1000)
1063 return 0;
1064 }
1065
1066 *pdata = (status >> 8) & 0x000000ff;
1067 return 1;
1068}
1069
1070
1071static int pdc20621_detect_dimm(struct ata_probe_ent *pe)
1072{
1073 u32 data=0 ;
1074 if (pdc20621_i2c_read(pe, PDC_DIMM0_SPD_DEV_ADDRESS,
1075 PDC_DIMM_SPD_SYSTEM_FREQ, &data)) {
1076 if (data == 100)
1077 return 100;
1078 } else
1079 return 0;
1080
1081 if (pdc20621_i2c_read(pe, PDC_DIMM0_SPD_DEV_ADDRESS, 9, &data)) {
1082 if(data <= 0x75)
1083 return 133;
1084 } else
1085 return 0;
1086
1087 return 0;
1088}
1089
1090
1091static int pdc20621_prog_dimm0(struct ata_probe_ent *pe)
1092{
1093 u32 spd0[50];
1094 u32 data = 0;
1095 int size, i;
1096 u8 bdimmsize;
1097 void __iomem *mmio = pe->mmio_base;
1098 static const struct {
1099 unsigned int reg;
1100 unsigned int ofs;
1101 } pdc_i2c_read_data [] = {
1102 { PDC_DIMM_SPD_TYPE, 11 },
1103 { PDC_DIMM_SPD_FRESH_RATE, 12 },
1104 { PDC_DIMM_SPD_COLUMN_NUM, 4 },
1105 { PDC_DIMM_SPD_ATTRIBUTE, 21 },
1106 { PDC_DIMM_SPD_ROW_NUM, 3 },
1107 { PDC_DIMM_SPD_BANK_NUM, 17 },
1108 { PDC_DIMM_SPD_MODULE_ROW, 5 },
1109 { PDC_DIMM_SPD_ROW_PRE_CHARGE, 27 },
1110 { PDC_DIMM_SPD_ROW_ACTIVE_DELAY, 28 },
1111 { PDC_DIMM_SPD_RAS_CAS_DELAY, 29 },
1112 { PDC_DIMM_SPD_ACTIVE_PRECHARGE, 30 },
1113 { PDC_DIMM_SPD_CAS_LATENCY, 18 },
1114 };
1115
1116 /* hard-code chip #0 */
1117 mmio += PDC_CHIP0_OFS;
1118
1119 for(i=0; i<ARRAY_SIZE(pdc_i2c_read_data); i++)
1120 pdc20621_i2c_read(pe, PDC_DIMM0_SPD_DEV_ADDRESS,
1121 pdc_i2c_read_data[i].reg,
1122 &spd0[pdc_i2c_read_data[i].ofs]);
1123
1124 data |= (spd0[4] - 8) | ((spd0[21] != 0) << 3) | ((spd0[3]-11) << 4);
1125 data |= ((spd0[17] / 4) << 6) | ((spd0[5] / 2) << 7) |
1126 ((((spd0[27] + 9) / 10) - 1) << 8) ;
1127 data |= (((((spd0[29] > spd0[28])
1128 ? spd0[29] : spd0[28]) + 9) / 10) - 1) << 10;
1129 data |= ((spd0[30] - spd0[29] + 9) / 10 - 2) << 12;
1130
1131 if (spd0[18] & 0x08)
1132 data |= ((0x03) << 14);
1133 else if (spd0[18] & 0x04)
1134 data |= ((0x02) << 14);
1135 else if (spd0[18] & 0x01)
1136 data |= ((0x01) << 14);
1137 else
1138 data |= (0 << 14);
1139
1140 /*
1141 Calculate the size of bDIMMSize (power of 2) and
1142 merge the DIMM size by program start/end address.
1143 */
1144
1145 bdimmsize = spd0[4] + (spd0[5] / 2) + spd0[3] + (spd0[17] / 2) + 3;
1146 size = (1 << bdimmsize) >> 20; /* size = xxx(MB) */
1147 data |= (((size / 16) - 1) << 16);
1148 data |= (0 << 23);
1149 data |= 8;
1150 writel(data, mmio + PDC_DIMM0_CONTROL_OFFSET);
1151 readl(mmio + PDC_DIMM0_CONTROL_OFFSET);
1152 return size;
1153}
1154
1155
1156static unsigned int pdc20621_prog_dimm_global(struct ata_probe_ent *pe)
1157{
1158 u32 data, spd0;
1159 int error, i;
1160 void __iomem *mmio = pe->mmio_base;
1161
1162 /* hard-code chip #0 */
1163 mmio += PDC_CHIP0_OFS;
1164
1165 /*
1166 Set To Default : DIMM Module Global Control Register (0x022259F1)
1167 DIMM Arbitration Disable (bit 20)
1168 DIMM Data/Control Output Driving Selection (bit12 - bit15)
1169 Refresh Enable (bit 17)
1170 */
1171
1172 data = 0x022259F1;
1173 writel(data, mmio + PDC_SDRAM_CONTROL_OFFSET);
1174 readl(mmio + PDC_SDRAM_CONTROL_OFFSET);
1175
1176 /* Turn on for ECC */
1177 pdc20621_i2c_read(pe, PDC_DIMM0_SPD_DEV_ADDRESS,
1178 PDC_DIMM_SPD_TYPE, &spd0);
1179 if (spd0 == 0x02) {
1180 data |= (0x01 << 16);
1181 writel(data, mmio + PDC_SDRAM_CONTROL_OFFSET);
1182 readl(mmio + PDC_SDRAM_CONTROL_OFFSET);
1183 printk(KERN_ERR "Local DIMM ECC Enabled\n");
1184 }
1185
1186 /* DIMM Initialization Select/Enable (bit 18/19) */
1187 data &= (~(1<<18));
1188 data |= (1<<19);
1189 writel(data, mmio + PDC_SDRAM_CONTROL_OFFSET);
1190
1191 error = 1;
1192 for (i = 1; i <= 10; i++) { /* polling ~5 secs */
1193 data = readl(mmio + PDC_SDRAM_CONTROL_OFFSET);
1194 if (!(data & (1<<19))) {
1195 error = 0;
1196 break;
1197 }
1198 msleep(i*100);
1199 }
1200 return error;
1201}
1202
1203
1204static unsigned int pdc20621_dimm_init(struct ata_probe_ent *pe)
1205{
1206 int speed, size, length;
1207 u32 addr,spd0,pci_status;
1208 u32 tmp=0;
1209 u32 time_period=0;
1210 u32 tcount=0;
1211 u32 ticks=0;
1212 u32 clock=0;
1213 u32 fparam=0;
1214 void __iomem *mmio = pe->mmio_base;
1215
1216 /* hard-code chip #0 */
1217 mmio += PDC_CHIP0_OFS;
1218
1219 /* Initialize PLL based upon PCI Bus Frequency */
1220
1221 /* Initialize Time Period Register */
1222 writel(0xffffffff, mmio + PDC_TIME_PERIOD);
1223 time_period = readl(mmio + PDC_TIME_PERIOD);
1224 VPRINTK("Time Period Register (0x40): 0x%x\n", time_period);
1225
1226 /* Enable timer */
1227 writel(0x00001a0, mmio + PDC_TIME_CONTROL);
1228 readl(mmio + PDC_TIME_CONTROL);
1229
1230 /* Wait 3 seconds */
1231 msleep(3000);
1232
1233 /*
1234 When timer is enabled, counter is decreased every internal
1235 clock cycle.
1236 */
1237
1238 tcount = readl(mmio + PDC_TIME_COUNTER);
1239 VPRINTK("Time Counter Register (0x44): 0x%x\n", tcount);
1240
1241 /*
1242 If SX4 is on PCI-X bus, after 3 seconds, the timer counter
1243 register should be >= (0xffffffff - 3x10^8).
1244 */
1245 if(tcount >= PCI_X_TCOUNT) {
1246 ticks = (time_period - tcount);
1247 VPRINTK("Num counters 0x%x (%d)\n", ticks, ticks);
1248
1249 clock = (ticks / 300000);
1250 VPRINTK("10 * Internal clk = 0x%x (%d)\n", clock, clock);
1251
1252 clock = (clock * 33);
1253 VPRINTK("10 * Internal clk * 33 = 0x%x (%d)\n", clock, clock);
1254
1255 /* PLL F Param (bit 22:16) */
1256 fparam = (1400000 / clock) - 2;
1257 VPRINTK("PLL F Param: 0x%x (%d)\n", fparam, fparam);
1258
1259 /* OD param = 0x2 (bit 31:30), R param = 0x5 (bit 29:25) */
1260 pci_status = (0x8a001824 | (fparam << 16));
1261 } else
1262 pci_status = PCI_PLL_INIT;
1263
1264 /* Initialize PLL. */
1265 VPRINTK("pci_status: 0x%x\n", pci_status);
1266 writel(pci_status, mmio + PDC_CTL_STATUS);
1267 readl(mmio + PDC_CTL_STATUS);
1268
1269 /*
1270 Read SPD of DIMM by I2C interface,
1271 and program the DIMM Module Controller.
1272 */
1273 if (!(speed = pdc20621_detect_dimm(pe))) {
1274 printk(KERN_ERR "Detect Local DIMM Fail\n");
1275 return 1; /* DIMM error */
1276 }
1277 VPRINTK("Local DIMM Speed = %d\n", speed);
1278
1279 /* Programming DIMM0 Module Control Register (index_CID0:80h) */
1280 size = pdc20621_prog_dimm0(pe);
1281 VPRINTK("Local DIMM Size = %dMB\n",size);
1282
1283 /* Programming DIMM Module Global Control Register (index_CID0:88h) */
1284 if (pdc20621_prog_dimm_global(pe)) {
1285 printk(KERN_ERR "Programming DIMM Module Global Control Register Fail\n");
1286 return 1;
1287 }
1288
1289#ifdef ATA_VERBOSE_DEBUG
1290 {
1291 u8 test_parttern1[40] = {0x55,0xAA,'P','r','o','m','i','s','e',' ',
1292 'N','o','t',' ','Y','e','t',' ','D','e','f','i','n','e','d',' ',
1293 '1','.','1','0',
1294 '9','8','0','3','1','6','1','2',0,0};
1295 u8 test_parttern2[40] = {0};
1296
1297 pdc20621_put_to_dimm(pe, (void *) test_parttern2, 0x10040, 40);
1298 pdc20621_put_to_dimm(pe, (void *) test_parttern2, 0x40, 40);
1299
1300 pdc20621_put_to_dimm(pe, (void *) test_parttern1, 0x10040, 40);
1301 pdc20621_get_from_dimm(pe, (void *) test_parttern2, 0x40, 40);
1302 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1303 test_parttern2[1], &(test_parttern2[2]));
1304 pdc20621_get_from_dimm(pe, (void *) test_parttern2, 0x10040,
1305 40);
1306 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1307 test_parttern2[1], &(test_parttern2[2]));
1308
1309 pdc20621_put_to_dimm(pe, (void *) test_parttern1, 0x40, 40);
1310 pdc20621_get_from_dimm(pe, (void *) test_parttern2, 0x40, 40);
1311 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1312 test_parttern2[1], &(test_parttern2[2]));
1313 }
1314#endif
1315
1316 /* ECC initiliazation. */
1317
1318 pdc20621_i2c_read(pe, PDC_DIMM0_SPD_DEV_ADDRESS,
1319 PDC_DIMM_SPD_TYPE, &spd0);
1320 if (spd0 == 0x02) {
1321 VPRINTK("Start ECC initialization\n");
1322 addr = 0;
1323 length = size * 1024 * 1024;
1324 while (addr < length) {
1325 pdc20621_put_to_dimm(pe, (void *) &tmp, addr,
1326 sizeof(u32));
1327 addr += sizeof(u32);
1328 }
1329 VPRINTK("Finish ECC initialization\n");
1330 }
1331 return 0;
1332}
1333
1334
1335static void pdc_20621_init(struct ata_probe_ent *pe)
1336{
1337 u32 tmp;
1338 void __iomem *mmio = pe->mmio_base;
1339
1340 /* hard-code chip #0 */
1341 mmio += PDC_CHIP0_OFS;
1342
1343 /*
1344 * Select page 0x40 for our 32k DIMM window
1345 */
1346 tmp = readl(mmio + PDC_20621_DIMM_WINDOW) & 0xffff0000;
1347 tmp |= PDC_PAGE_WINDOW; /* page 40h; arbitrarily selected */
1348 writel(tmp, mmio + PDC_20621_DIMM_WINDOW);
1349
1350 /*
1351 * Reset Host DMA
1352 */
1353 tmp = readl(mmio + PDC_HDMA_CTLSTAT);
1354 tmp |= PDC_RESET;
1355 writel(tmp, mmio + PDC_HDMA_CTLSTAT);
1356 readl(mmio + PDC_HDMA_CTLSTAT); /* flush */
1357
1358 udelay(10);
1359
1360 tmp = readl(mmio + PDC_HDMA_CTLSTAT);
1361 tmp &= ~PDC_RESET;
1362 writel(tmp, mmio + PDC_HDMA_CTLSTAT);
1363 readl(mmio + PDC_HDMA_CTLSTAT); /* flush */
1364}
1365
1366static int pdc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1367{
1368 static int printed_version;
1369 struct ata_probe_ent *probe_ent = NULL;
1370 unsigned long base;
1371 void __iomem *mmio_base;
1372 void __iomem *dimm_mmio = NULL;
1373 struct pdc_host_priv *hpriv = NULL;
1374 unsigned int board_idx = (unsigned int) ent->driver_data;
1375 int pci_dev_busy = 0;
1376 int rc;
1377
1378 if (!printed_version++)
1379 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1380
1381 rc = pci_enable_device(pdev);
1382 if (rc)
1383 return rc;
1384
1385 rc = pci_request_regions(pdev, DRV_NAME);
1386 if (rc) {
1387 pci_dev_busy = 1;
1388 goto err_out;
1389 }
1390
1391 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
1392 if (rc)
1393 goto err_out_regions;
1394 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
1395 if (rc)
1396 goto err_out_regions;
1397
1398 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
1399 if (probe_ent == NULL) {
1400 rc = -ENOMEM;
1401 goto err_out_regions;
1402 }
1403
1404 memset(probe_ent, 0, sizeof(*probe_ent));
1405 probe_ent->dev = pci_dev_to_dev(pdev);
1406 INIT_LIST_HEAD(&probe_ent->node);
1407
1408 mmio_base = pci_iomap(pdev, 3, 0);
1409 if (mmio_base == NULL) {
1410 rc = -ENOMEM;
1411 goto err_out_free_ent;
1412 }
1413 base = (unsigned long) mmio_base;
1414
1415 hpriv = kmalloc(sizeof(*hpriv), GFP_KERNEL);
1416 if (!hpriv) {
1417 rc = -ENOMEM;
1418 goto err_out_iounmap;
1419 }
1420 memset(hpriv, 0, sizeof(*hpriv));
1421
1422 dimm_mmio = pci_iomap(pdev, 4, 0);
1423 if (!dimm_mmio) {
1424 kfree(hpriv);
1425 rc = -ENOMEM;
1426 goto err_out_iounmap;
1427 }
1428
1429 hpriv->dimm_mmio = dimm_mmio;
1430
1431 probe_ent->sht = pdc_port_info[board_idx].sht;
1432 probe_ent->host_flags = pdc_port_info[board_idx].host_flags;
1433 probe_ent->pio_mask = pdc_port_info[board_idx].pio_mask;
1434 probe_ent->mwdma_mask = pdc_port_info[board_idx].mwdma_mask;
1435 probe_ent->udma_mask = pdc_port_info[board_idx].udma_mask;
1436 probe_ent->port_ops = pdc_port_info[board_idx].port_ops;
1437
1438 probe_ent->irq = pdev->irq;
1439 probe_ent->irq_flags = IRQF_SHARED;
1440 probe_ent->mmio_base = mmio_base;
1441
1442 probe_ent->private_data = hpriv;
1443 base += PDC_CHIP0_OFS;
1444
1445 probe_ent->n_ports = 4;
1446 pdc_sata_setup_port(&probe_ent->port[0], base + 0x200);
1447 pdc_sata_setup_port(&probe_ent->port[1], base + 0x280);
1448 pdc_sata_setup_port(&probe_ent->port[2], base + 0x300);
1449 pdc_sata_setup_port(&probe_ent->port[3], base + 0x380);
1450
1451 pci_set_master(pdev);
1452
1453 /* initialize adapter */
1454 /* initialize local dimm */
1455 if (pdc20621_dimm_init(probe_ent)) {
1456 rc = -ENOMEM;
1457 goto err_out_iounmap_dimm;
1458 }
1459 pdc_20621_init(probe_ent);
1460
1461 /* FIXME: check ata_device_add return value */
1462 ata_device_add(probe_ent);
1463 kfree(probe_ent);
1464
1465 return 0;
1466
1467err_out_iounmap_dimm: /* only get to this label if 20621 */
1468 kfree(hpriv);
1469 pci_iounmap(pdev, dimm_mmio);
1470err_out_iounmap:
1471 pci_iounmap(pdev, mmio_base);
1472err_out_free_ent:
1473 kfree(probe_ent);
1474err_out_regions:
1475 pci_release_regions(pdev);
1476err_out:
1477 if (!pci_dev_busy)
1478 pci_disable_device(pdev);
1479 return rc;
1480}
1481
1482
1483static int __init pdc_sata_init(void)
1484{
1485 return pci_register_driver(&pdc_sata_pci_driver);
1486}
1487
1488
1489static void __exit pdc_sata_exit(void)
1490{
1491 pci_unregister_driver(&pdc_sata_pci_driver);
1492}
1493
1494
1495MODULE_AUTHOR("Jeff Garzik");
1496MODULE_DESCRIPTION("Promise SATA low-level driver");
1497MODULE_LICENSE("GPL");
1498MODULE_DEVICE_TABLE(pci, pdc_sata_pci_tbl);
1499MODULE_VERSION(DRV_VERSION);
1500
1501module_init(pdc_sata_init);
1502module_exit(pdc_sata_exit);
diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c
new file mode 100644
index 000000000000..654aae2b25c5
--- /dev/null
+++ b/drivers/ata/sata_uli.c
@@ -0,0 +1,300 @@
1/*
2 * sata_uli.c - ULi Electronics SATA
3 *
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2, or (at your option)
8 * any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; see the file COPYING. If not, write to
17 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
18 *
19 *
20 * libata documentation is available via 'make {ps|pdf}docs',
21 * as Documentation/DocBook/libata.*
22 *
23 * Hardware documentation available under NDA.
24 *
25 */
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/pci.h>
30#include <linux/init.h>
31#include <linux/blkdev.h>
32#include <linux/delay.h>
33#include <linux/interrupt.h>
34#include <linux/device.h>
35#include <scsi/scsi_host.h>
36#include <linux/libata.h>
37
38#define DRV_NAME "sata_uli"
39#define DRV_VERSION "1.0"
40
41enum {
42 uli_5289 = 0,
43 uli_5287 = 1,
44 uli_5281 = 2,
45
46 uli_max_ports = 4,
47
48 /* PCI configuration registers */
49 ULI5287_BASE = 0x90, /* sata0 phy SCR registers */
50 ULI5287_OFFS = 0x10, /* offset from sata0->sata1 phy regs */
51 ULI5281_BASE = 0x60, /* sata0 phy SCR registers */
52 ULI5281_OFFS = 0x60, /* offset from sata0->sata1 phy regs */
53};
54
55struct uli_priv {
56 unsigned int scr_cfg_addr[uli_max_ports];
57};
58
59static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
60static u32 uli_scr_read (struct ata_port *ap, unsigned int sc_reg);
61static void uli_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
62
63static const struct pci_device_id uli_pci_tbl[] = {
64 { PCI_VENDOR_ID_AL, 0x5289, PCI_ANY_ID, PCI_ANY_ID, 0, 0, uli_5289 },
65 { PCI_VENDOR_ID_AL, 0x5287, PCI_ANY_ID, PCI_ANY_ID, 0, 0, uli_5287 },
66 { PCI_VENDOR_ID_AL, 0x5281, PCI_ANY_ID, PCI_ANY_ID, 0, 0, uli_5281 },
67 { } /* terminate list */
68};
69
70
71static struct pci_driver uli_pci_driver = {
72 .name = DRV_NAME,
73 .id_table = uli_pci_tbl,
74 .probe = uli_init_one,
75 .remove = ata_pci_remove_one,
76};
77
78static struct scsi_host_template uli_sht = {
79 .module = THIS_MODULE,
80 .name = DRV_NAME,
81 .ioctl = ata_scsi_ioctl,
82 .queuecommand = ata_scsi_queuecmd,
83 .can_queue = ATA_DEF_QUEUE,
84 .this_id = ATA_SHT_THIS_ID,
85 .sg_tablesize = LIBATA_MAX_PRD,
86 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
87 .emulated = ATA_SHT_EMULATED,
88 .use_clustering = ATA_SHT_USE_CLUSTERING,
89 .proc_name = DRV_NAME,
90 .dma_boundary = ATA_DMA_BOUNDARY,
91 .slave_configure = ata_scsi_slave_config,
92 .slave_destroy = ata_scsi_slave_destroy,
93 .bios_param = ata_std_bios_param,
94};
95
96static const struct ata_port_operations uli_ops = {
97 .port_disable = ata_port_disable,
98
99 .tf_load = ata_tf_load,
100 .tf_read = ata_tf_read,
101 .check_status = ata_check_status,
102 .exec_command = ata_exec_command,
103 .dev_select = ata_std_dev_select,
104
105 .bmdma_setup = ata_bmdma_setup,
106 .bmdma_start = ata_bmdma_start,
107 .bmdma_stop = ata_bmdma_stop,
108 .bmdma_status = ata_bmdma_status,
109 .qc_prep = ata_qc_prep,
110 .qc_issue = ata_qc_issue_prot,
111 .data_xfer = ata_pio_data_xfer,
112
113 .freeze = ata_bmdma_freeze,
114 .thaw = ata_bmdma_thaw,
115 .error_handler = ata_bmdma_error_handler,
116 .post_internal_cmd = ata_bmdma_post_internal_cmd,
117
118 .irq_handler = ata_interrupt,
119 .irq_clear = ata_bmdma_irq_clear,
120
121 .scr_read = uli_scr_read,
122 .scr_write = uli_scr_write,
123
124 .port_start = ata_port_start,
125 .port_stop = ata_port_stop,
126 .host_stop = ata_host_stop,
127};
128
129static struct ata_port_info uli_port_info = {
130 .sht = &uli_sht,
131 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
132 .pio_mask = 0x1f, /* pio0-4 */
133 .udma_mask = 0x7f, /* udma0-6 */
134 .port_ops = &uli_ops,
135};
136
137
138MODULE_AUTHOR("Peer Chen");
139MODULE_DESCRIPTION("low-level driver for ULi Electronics SATA controller");
140MODULE_LICENSE("GPL");
141MODULE_DEVICE_TABLE(pci, uli_pci_tbl);
142MODULE_VERSION(DRV_VERSION);
143
144static unsigned int get_scr_cfg_addr(struct ata_port *ap, unsigned int sc_reg)
145{
146 struct uli_priv *hpriv = ap->host_set->private_data;
147 return hpriv->scr_cfg_addr[ap->port_no] + (4 * sc_reg);
148}
149
150static u32 uli_scr_cfg_read (struct ata_port *ap, unsigned int sc_reg)
151{
152 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
153 unsigned int cfg_addr = get_scr_cfg_addr(ap, sc_reg);
154 u32 val;
155
156 pci_read_config_dword(pdev, cfg_addr, &val);
157 return val;
158}
159
160static void uli_scr_cfg_write (struct ata_port *ap, unsigned int scr, u32 val)
161{
162 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
163 unsigned int cfg_addr = get_scr_cfg_addr(ap, scr);
164
165 pci_write_config_dword(pdev, cfg_addr, val);
166}
167
168static u32 uli_scr_read (struct ata_port *ap, unsigned int sc_reg)
169{
170 if (sc_reg > SCR_CONTROL)
171 return 0xffffffffU;
172
173 return uli_scr_cfg_read(ap, sc_reg);
174}
175
176static void uli_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
177{
178 if (sc_reg > SCR_CONTROL) //SCR_CONTROL=2, SCR_ERROR=1, SCR_STATUS=0
179 return;
180
181 uli_scr_cfg_write(ap, sc_reg, val);
182}
183
184static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
185{
186 static int printed_version;
187 struct ata_probe_ent *probe_ent;
188 struct ata_port_info *ppi;
189 int rc;
190 unsigned int board_idx = (unsigned int) ent->driver_data;
191 int pci_dev_busy = 0;
192 struct uli_priv *hpriv;
193
194 if (!printed_version++)
195 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
196
197 rc = pci_enable_device(pdev);
198 if (rc)
199 return rc;
200
201 rc = pci_request_regions(pdev, DRV_NAME);
202 if (rc) {
203 pci_dev_busy = 1;
204 goto err_out;
205 }
206
207 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
208 if (rc)
209 goto err_out_regions;
210 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
211 if (rc)
212 goto err_out_regions;
213
214 ppi = &uli_port_info;
215 probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
216 if (!probe_ent) {
217 rc = -ENOMEM;
218 goto err_out_regions;
219 }
220
221 hpriv = kzalloc(sizeof(*hpriv), GFP_KERNEL);
222 if (!hpriv) {
223 rc = -ENOMEM;
224 goto err_out_probe_ent;
225 }
226
227 probe_ent->private_data = hpriv;
228
229 switch (board_idx) {
230 case uli_5287:
231 hpriv->scr_cfg_addr[0] = ULI5287_BASE;
232 hpriv->scr_cfg_addr[1] = ULI5287_BASE + ULI5287_OFFS;
233 probe_ent->n_ports = 4;
234
235 probe_ent->port[2].cmd_addr = pci_resource_start(pdev, 0) + 8;
236 probe_ent->port[2].altstatus_addr =
237 probe_ent->port[2].ctl_addr =
238 (pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS) + 4;
239 probe_ent->port[2].bmdma_addr = pci_resource_start(pdev, 4) + 16;
240 hpriv->scr_cfg_addr[2] = ULI5287_BASE + ULI5287_OFFS*4;
241
242 probe_ent->port[3].cmd_addr = pci_resource_start(pdev, 2) + 8;
243 probe_ent->port[3].altstatus_addr =
244 probe_ent->port[3].ctl_addr =
245 (pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS) + 4;
246 probe_ent->port[3].bmdma_addr = pci_resource_start(pdev, 4) + 24;
247 hpriv->scr_cfg_addr[3] = ULI5287_BASE + ULI5287_OFFS*5;
248
249 ata_std_ports(&probe_ent->port[2]);
250 ata_std_ports(&probe_ent->port[3]);
251 break;
252
253 case uli_5289:
254 hpriv->scr_cfg_addr[0] = ULI5287_BASE;
255 hpriv->scr_cfg_addr[1] = ULI5287_BASE + ULI5287_OFFS;
256 break;
257
258 case uli_5281:
259 hpriv->scr_cfg_addr[0] = ULI5281_BASE;
260 hpriv->scr_cfg_addr[1] = ULI5281_BASE + ULI5281_OFFS;
261 break;
262
263 default:
264 BUG();
265 break;
266 }
267
268 pci_set_master(pdev);
269 pci_intx(pdev, 1);
270
271 /* FIXME: check ata_device_add return value */
272 ata_device_add(probe_ent);
273 kfree(probe_ent);
274
275 return 0;
276
277err_out_probe_ent:
278 kfree(probe_ent);
279err_out_regions:
280 pci_release_regions(pdev);
281err_out:
282 if (!pci_dev_busy)
283 pci_disable_device(pdev);
284 return rc;
285
286}
287
288static int __init uli_init(void)
289{
290 return pci_register_driver(&uli_pci_driver);
291}
292
293static void __exit uli_exit(void)
294{
295 pci_unregister_driver(&uli_pci_driver);
296}
297
298
299module_init(uli_init);
300module_exit(uli_exit);
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
new file mode 100644
index 000000000000..0bf1dbea6406
--- /dev/null
+++ b/drivers/ata/sata_via.c
@@ -0,0 +1,394 @@
1/*
2 * sata_via.c - VIA Serial ATA controllers
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available under NDA.
31 *
32 *
33 * To-do list:
34 * - VT6421 PATA support
35 *
36 */
37
38#include <linux/kernel.h>
39#include <linux/module.h>
40#include <linux/pci.h>
41#include <linux/init.h>
42#include <linux/blkdev.h>
43#include <linux/delay.h>
44#include <linux/device.h>
45#include <scsi/scsi_host.h>
46#include <linux/libata.h>
47#include <asm/io.h>
48
49#define DRV_NAME "sata_via"
50#define DRV_VERSION "2.0"
51
52enum board_ids_enum {
53 vt6420,
54 vt6421,
55};
56
57enum {
58 SATA_CHAN_ENAB = 0x40, /* SATA channel enable */
59 SATA_INT_GATE = 0x41, /* SATA interrupt gating */
60 SATA_NATIVE_MODE = 0x42, /* Native mode enable */
61 SATA_PATA_SHARING = 0x49, /* PATA/SATA sharing func ctrl */
62
63 PORT0 = (1 << 1),
64 PORT1 = (1 << 0),
65 ALL_PORTS = PORT0 | PORT1,
66 N_PORTS = 2,
67
68 NATIVE_MODE_ALL = (1 << 7) | (1 << 6) | (1 << 5) | (1 << 4),
69
70 SATA_EXT_PHY = (1 << 6), /* 0==use PATA, 1==ext phy */
71 SATA_2DEV = (1 << 5), /* SATA is master/slave */
72};
73
74static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
75static u32 svia_scr_read (struct ata_port *ap, unsigned int sc_reg);
76static void svia_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
77
78static const struct pci_device_id svia_pci_tbl[] = {
79 { 0x1106, 0x3149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, vt6420 },
80 { 0x1106, 0x3249, PCI_ANY_ID, PCI_ANY_ID, 0, 0, vt6421 },
81
82 { } /* terminate list */
83};
84
85static struct pci_driver svia_pci_driver = {
86 .name = DRV_NAME,
87 .id_table = svia_pci_tbl,
88 .probe = svia_init_one,
89 .remove = ata_pci_remove_one,
90};
91
92static struct scsi_host_template svia_sht = {
93 .module = THIS_MODULE,
94 .name = DRV_NAME,
95 .ioctl = ata_scsi_ioctl,
96 .queuecommand = ata_scsi_queuecmd,
97 .can_queue = ATA_DEF_QUEUE,
98 .this_id = ATA_SHT_THIS_ID,
99 .sg_tablesize = LIBATA_MAX_PRD,
100 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
101 .emulated = ATA_SHT_EMULATED,
102 .use_clustering = ATA_SHT_USE_CLUSTERING,
103 .proc_name = DRV_NAME,
104 .dma_boundary = ATA_DMA_BOUNDARY,
105 .slave_configure = ata_scsi_slave_config,
106 .slave_destroy = ata_scsi_slave_destroy,
107 .bios_param = ata_std_bios_param,
108};
109
110static const struct ata_port_operations svia_sata_ops = {
111 .port_disable = ata_port_disable,
112
113 .tf_load = ata_tf_load,
114 .tf_read = ata_tf_read,
115 .check_status = ata_check_status,
116 .exec_command = ata_exec_command,
117 .dev_select = ata_std_dev_select,
118
119 .bmdma_setup = ata_bmdma_setup,
120 .bmdma_start = ata_bmdma_start,
121 .bmdma_stop = ata_bmdma_stop,
122 .bmdma_status = ata_bmdma_status,
123
124 .qc_prep = ata_qc_prep,
125 .qc_issue = ata_qc_issue_prot,
126 .data_xfer = ata_pio_data_xfer,
127
128 .freeze = ata_bmdma_freeze,
129 .thaw = ata_bmdma_thaw,
130 .error_handler = ata_bmdma_error_handler,
131 .post_internal_cmd = ata_bmdma_post_internal_cmd,
132
133 .irq_handler = ata_interrupt,
134 .irq_clear = ata_bmdma_irq_clear,
135
136 .scr_read = svia_scr_read,
137 .scr_write = svia_scr_write,
138
139 .port_start = ata_port_start,
140 .port_stop = ata_port_stop,
141 .host_stop = ata_host_stop,
142};
143
144static struct ata_port_info svia_port_info = {
145 .sht = &svia_sht,
146 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
147 .pio_mask = 0x1f,
148 .mwdma_mask = 0x07,
149 .udma_mask = 0x7f,
150 .port_ops = &svia_sata_ops,
151};
152
153MODULE_AUTHOR("Jeff Garzik");
154MODULE_DESCRIPTION("SCSI low-level driver for VIA SATA controllers");
155MODULE_LICENSE("GPL");
156MODULE_DEVICE_TABLE(pci, svia_pci_tbl);
157MODULE_VERSION(DRV_VERSION);
158
159static u32 svia_scr_read (struct ata_port *ap, unsigned int sc_reg)
160{
161 if (sc_reg > SCR_CONTROL)
162 return 0xffffffffU;
163 return inl(ap->ioaddr.scr_addr + (4 * sc_reg));
164}
165
166static void svia_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
167{
168 if (sc_reg > SCR_CONTROL)
169 return;
170 outl(val, ap->ioaddr.scr_addr + (4 * sc_reg));
171}
172
173static const unsigned int svia_bar_sizes[] = {
174 8, 4, 8, 4, 16, 256
175};
176
177static const unsigned int vt6421_bar_sizes[] = {
178 16, 16, 16, 16, 32, 128
179};
180
181static unsigned long svia_scr_addr(unsigned long addr, unsigned int port)
182{
183 return addr + (port * 128);
184}
185
186static unsigned long vt6421_scr_addr(unsigned long addr, unsigned int port)
187{
188 return addr + (port * 64);
189}
190
191static void vt6421_init_addrs(struct ata_probe_ent *probe_ent,
192 struct pci_dev *pdev,
193 unsigned int port)
194{
195 unsigned long reg_addr = pci_resource_start(pdev, port);
196 unsigned long bmdma_addr = pci_resource_start(pdev, 4) + (port * 8);
197 unsigned long scr_addr;
198
199 probe_ent->port[port].cmd_addr = reg_addr;
200 probe_ent->port[port].altstatus_addr =
201 probe_ent->port[port].ctl_addr = (reg_addr + 8) | ATA_PCI_CTL_OFS;
202 probe_ent->port[port].bmdma_addr = bmdma_addr;
203
204 scr_addr = vt6421_scr_addr(pci_resource_start(pdev, 5), port);
205 probe_ent->port[port].scr_addr = scr_addr;
206
207 ata_std_ports(&probe_ent->port[port]);
208}
209
210static struct ata_probe_ent *vt6420_init_probe_ent(struct pci_dev *pdev)
211{
212 struct ata_probe_ent *probe_ent;
213 struct ata_port_info *ppi = &svia_port_info;
214
215 probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
216 if (!probe_ent)
217 return NULL;
218
219 probe_ent->port[0].scr_addr =
220 svia_scr_addr(pci_resource_start(pdev, 5), 0);
221 probe_ent->port[1].scr_addr =
222 svia_scr_addr(pci_resource_start(pdev, 5), 1);
223
224 return probe_ent;
225}
226
227static struct ata_probe_ent *vt6421_init_probe_ent(struct pci_dev *pdev)
228{
229 struct ata_probe_ent *probe_ent;
230 unsigned int i;
231
232 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
233 if (!probe_ent)
234 return NULL;
235
236 memset(probe_ent, 0, sizeof(*probe_ent));
237 probe_ent->dev = pci_dev_to_dev(pdev);
238 INIT_LIST_HEAD(&probe_ent->node);
239
240 probe_ent->sht = &svia_sht;
241 probe_ent->host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY;
242 probe_ent->port_ops = &svia_sata_ops;
243 probe_ent->n_ports = N_PORTS;
244 probe_ent->irq = pdev->irq;
245 probe_ent->irq_flags = IRQF_SHARED;
246 probe_ent->pio_mask = 0x1f;
247 probe_ent->mwdma_mask = 0x07;
248 probe_ent->udma_mask = 0x7f;
249
250 for (i = 0; i < N_PORTS; i++)
251 vt6421_init_addrs(probe_ent, pdev, i);
252
253 return probe_ent;
254}
255
256static void svia_configure(struct pci_dev *pdev)
257{
258 u8 tmp8;
259
260 pci_read_config_byte(pdev, PCI_INTERRUPT_LINE, &tmp8);
261 dev_printk(KERN_INFO, &pdev->dev, "routed to hard irq line %d\n",
262 (int) (tmp8 & 0xf0) == 0xf0 ? 0 : tmp8 & 0x0f);
263
264 /* make sure SATA channels are enabled */
265 pci_read_config_byte(pdev, SATA_CHAN_ENAB, &tmp8);
266 if ((tmp8 & ALL_PORTS) != ALL_PORTS) {
267 dev_printk(KERN_DEBUG, &pdev->dev,
268 "enabling SATA channels (0x%x)\n",
269 (int) tmp8);
270 tmp8 |= ALL_PORTS;
271 pci_write_config_byte(pdev, SATA_CHAN_ENAB, tmp8);
272 }
273
274 /* make sure interrupts for each channel sent to us */
275 pci_read_config_byte(pdev, SATA_INT_GATE, &tmp8);
276 if ((tmp8 & ALL_PORTS) != ALL_PORTS) {
277 dev_printk(KERN_DEBUG, &pdev->dev,
278 "enabling SATA channel interrupts (0x%x)\n",
279 (int) tmp8);
280 tmp8 |= ALL_PORTS;
281 pci_write_config_byte(pdev, SATA_INT_GATE, tmp8);
282 }
283
284 /* make sure native mode is enabled */
285 pci_read_config_byte(pdev, SATA_NATIVE_MODE, &tmp8);
286 if ((tmp8 & NATIVE_MODE_ALL) != NATIVE_MODE_ALL) {
287 dev_printk(KERN_DEBUG, &pdev->dev,
288 "enabling SATA channel native mode (0x%x)\n",
289 (int) tmp8);
290 tmp8 |= NATIVE_MODE_ALL;
291 pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8);
292 }
293}
294
295static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
296{
297 static int printed_version;
298 unsigned int i;
299 int rc;
300 struct ata_probe_ent *probe_ent;
301 int board_id = (int) ent->driver_data;
302 const int *bar_sizes;
303 int pci_dev_busy = 0;
304 u8 tmp8;
305
306 if (!printed_version++)
307 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
308
309 rc = pci_enable_device(pdev);
310 if (rc)
311 return rc;
312
313 rc = pci_request_regions(pdev, DRV_NAME);
314 if (rc) {
315 pci_dev_busy = 1;
316 goto err_out;
317 }
318
319 if (board_id == vt6420) {
320 pci_read_config_byte(pdev, SATA_PATA_SHARING, &tmp8);
321 if (tmp8 & SATA_2DEV) {
322 dev_printk(KERN_ERR, &pdev->dev,
323 "SATA master/slave not supported (0x%x)\n",
324 (int) tmp8);
325 rc = -EIO;
326 goto err_out_regions;
327 }
328
329 bar_sizes = &svia_bar_sizes[0];
330 } else {
331 bar_sizes = &vt6421_bar_sizes[0];
332 }
333
334 for (i = 0; i < ARRAY_SIZE(svia_bar_sizes); i++)
335 if ((pci_resource_start(pdev, i) == 0) ||
336 (pci_resource_len(pdev, i) < bar_sizes[i])) {
337 dev_printk(KERN_ERR, &pdev->dev,
338 "invalid PCI BAR %u (sz 0x%llx, val 0x%llx)\n",
339 i,
340 (unsigned long long)pci_resource_start(pdev, i),
341 (unsigned long long)pci_resource_len(pdev, i));
342 rc = -ENODEV;
343 goto err_out_regions;
344 }
345
346 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
347 if (rc)
348 goto err_out_regions;
349 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
350 if (rc)
351 goto err_out_regions;
352
353 if (board_id == vt6420)
354 probe_ent = vt6420_init_probe_ent(pdev);
355 else
356 probe_ent = vt6421_init_probe_ent(pdev);
357
358 if (!probe_ent) {
359 dev_printk(KERN_ERR, &pdev->dev, "out of memory\n");
360 rc = -ENOMEM;
361 goto err_out_regions;
362 }
363
364 svia_configure(pdev);
365
366 pci_set_master(pdev);
367
368 /* FIXME: check ata_device_add return value */
369 ata_device_add(probe_ent);
370 kfree(probe_ent);
371
372 return 0;
373
374err_out_regions:
375 pci_release_regions(pdev);
376err_out:
377 if (!pci_dev_busy)
378 pci_disable_device(pdev);
379 return rc;
380}
381
382static int __init svia_init(void)
383{
384 return pci_register_driver(&svia_pci_driver);
385}
386
387static void __exit svia_exit(void)
388{
389 pci_unregister_driver(&svia_pci_driver);
390}
391
392module_init(svia_init);
393module_exit(svia_exit);
394
diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
new file mode 100644
index 000000000000..4c69a705a483
--- /dev/null
+++ b/drivers/ata/sata_vsc.c
@@ -0,0 +1,482 @@
1/*
2 * sata_vsc.c - Vitesse VSC7174 4 port DPA SATA
3 *
4 * Maintained by: Jeremy Higdon @ SGI
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2004 SGI
9 *
10 * Bits from Jeff Garzik, Copyright RedHat, Inc.
11 *
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2, or (at your option)
16 * any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; see the file COPYING. If not, write to
25 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 *
27 *
28 * libata documentation is available via 'make {ps|pdf}docs',
29 * as Documentation/DocBook/libata.*
30 *
31 * Vitesse hardware documentation presumably available under NDA.
32 * Intel 31244 (same hardware interface) documentation presumably
33 * available from http://developer.intel.com/
34 *
35 */
36
37#include <linux/kernel.h>
38#include <linux/module.h>
39#include <linux/pci.h>
40#include <linux/init.h>
41#include <linux/blkdev.h>
42#include <linux/delay.h>
43#include <linux/interrupt.h>
44#include <linux/dma-mapping.h>
45#include <linux/device.h>
46#include <scsi/scsi_host.h>
47#include <linux/libata.h>
48
49#define DRV_NAME "sata_vsc"
50#define DRV_VERSION "2.0"
51
52enum {
53 /* Interrupt register offsets (from chip base address) */
54 VSC_SATA_INT_STAT_OFFSET = 0x00,
55 VSC_SATA_INT_MASK_OFFSET = 0x04,
56
57 /* Taskfile registers offsets */
58 VSC_SATA_TF_CMD_OFFSET = 0x00,
59 VSC_SATA_TF_DATA_OFFSET = 0x00,
60 VSC_SATA_TF_ERROR_OFFSET = 0x04,
61 VSC_SATA_TF_FEATURE_OFFSET = 0x06,
62 VSC_SATA_TF_NSECT_OFFSET = 0x08,
63 VSC_SATA_TF_LBAL_OFFSET = 0x0c,
64 VSC_SATA_TF_LBAM_OFFSET = 0x10,
65 VSC_SATA_TF_LBAH_OFFSET = 0x14,
66 VSC_SATA_TF_DEVICE_OFFSET = 0x18,
67 VSC_SATA_TF_STATUS_OFFSET = 0x1c,
68 VSC_SATA_TF_COMMAND_OFFSET = 0x1d,
69 VSC_SATA_TF_ALTSTATUS_OFFSET = 0x28,
70 VSC_SATA_TF_CTL_OFFSET = 0x29,
71
72 /* DMA base */
73 VSC_SATA_UP_DESCRIPTOR_OFFSET = 0x64,
74 VSC_SATA_UP_DATA_BUFFER_OFFSET = 0x6C,
75 VSC_SATA_DMA_CMD_OFFSET = 0x70,
76
77 /* SCRs base */
78 VSC_SATA_SCR_STATUS_OFFSET = 0x100,
79 VSC_SATA_SCR_ERROR_OFFSET = 0x104,
80 VSC_SATA_SCR_CONTROL_OFFSET = 0x108,
81
82 /* Port stride */
83 VSC_SATA_PORT_OFFSET = 0x200,
84
85 /* Error interrupt status bit offsets */
86 VSC_SATA_INT_ERROR_CRC = 0x40,
87 VSC_SATA_INT_ERROR_T = 0x20,
88 VSC_SATA_INT_ERROR_P = 0x10,
89 VSC_SATA_INT_ERROR_R = 0x8,
90 VSC_SATA_INT_ERROR_E = 0x4,
91 VSC_SATA_INT_ERROR_M = 0x2,
92 VSC_SATA_INT_PHY_CHANGE = 0x1,
93 VSC_SATA_INT_ERROR = (VSC_SATA_INT_ERROR_CRC | VSC_SATA_INT_ERROR_T | \
94 VSC_SATA_INT_ERROR_P | VSC_SATA_INT_ERROR_R | \
95 VSC_SATA_INT_ERROR_E | VSC_SATA_INT_ERROR_M | \
96 VSC_SATA_INT_PHY_CHANGE),
97};
98
99
100#define is_vsc_sata_int_err(port_idx, int_status) \
101 (int_status & (VSC_SATA_INT_ERROR << (8 * port_idx)))
102
103
104static u32 vsc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg)
105{
106 if (sc_reg > SCR_CONTROL)
107 return 0xffffffffU;
108 return readl((void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
109}
110
111
112static void vsc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg,
113 u32 val)
114{
115 if (sc_reg > SCR_CONTROL)
116 return;
117 writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
118}
119
120
121static void vsc_intr_mask_update(struct ata_port *ap, u8 ctl)
122{
123 void __iomem *mask_addr;
124 u8 mask;
125
126 mask_addr = ap->host_set->mmio_base +
127 VSC_SATA_INT_MASK_OFFSET + ap->port_no;
128 mask = readb(mask_addr);
129 if (ctl & ATA_NIEN)
130 mask |= 0x80;
131 else
132 mask &= 0x7F;
133 writeb(mask, mask_addr);
134}
135
136
137static void vsc_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
138{
139 struct ata_ioports *ioaddr = &ap->ioaddr;
140 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
141
142 /*
143 * The only thing the ctl register is used for is SRST.
144 * That is not enabled or disabled via tf_load.
145 * However, if ATA_NIEN is changed, then we need to change the interrupt register.
146 */
147 if ((tf->ctl & ATA_NIEN) != (ap->last_ctl & ATA_NIEN)) {
148 ap->last_ctl = tf->ctl;
149 vsc_intr_mask_update(ap, tf->ctl & ATA_NIEN);
150 }
151 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
152 writew(tf->feature | (((u16)tf->hob_feature) << 8), ioaddr->feature_addr);
153 writew(tf->nsect | (((u16)tf->hob_nsect) << 8), ioaddr->nsect_addr);
154 writew(tf->lbal | (((u16)tf->hob_lbal) << 8), ioaddr->lbal_addr);
155 writew(tf->lbam | (((u16)tf->hob_lbam) << 8), ioaddr->lbam_addr);
156 writew(tf->lbah | (((u16)tf->hob_lbah) << 8), ioaddr->lbah_addr);
157 } else if (is_addr) {
158 writew(tf->feature, ioaddr->feature_addr);
159 writew(tf->nsect, ioaddr->nsect_addr);
160 writew(tf->lbal, ioaddr->lbal_addr);
161 writew(tf->lbam, ioaddr->lbam_addr);
162 writew(tf->lbah, ioaddr->lbah_addr);
163 }
164
165 if (tf->flags & ATA_TFLAG_DEVICE)
166 writeb(tf->device, ioaddr->device_addr);
167
168 ata_wait_idle(ap);
169}
170
171
172static void vsc_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
173{
174 struct ata_ioports *ioaddr = &ap->ioaddr;
175 u16 nsect, lbal, lbam, lbah, feature;
176
177 tf->command = ata_check_status(ap);
178 tf->device = readw(ioaddr->device_addr);
179 feature = readw(ioaddr->error_addr);
180 nsect = readw(ioaddr->nsect_addr);
181 lbal = readw(ioaddr->lbal_addr);
182 lbam = readw(ioaddr->lbam_addr);
183 lbah = readw(ioaddr->lbah_addr);
184
185 tf->feature = feature;
186 tf->nsect = nsect;
187 tf->lbal = lbal;
188 tf->lbam = lbam;
189 tf->lbah = lbah;
190
191 if (tf->flags & ATA_TFLAG_LBA48) {
192 tf->hob_feature = feature >> 8;
193 tf->hob_nsect = nsect >> 8;
194 tf->hob_lbal = lbal >> 8;
195 tf->hob_lbam = lbam >> 8;
196 tf->hob_lbah = lbah >> 8;
197 }
198}
199
200
201/*
202 * vsc_sata_interrupt
203 *
204 * Read the interrupt register and process for the devices that have them pending.
205 */
206static irqreturn_t vsc_sata_interrupt (int irq, void *dev_instance,
207 struct pt_regs *regs)
208{
209 struct ata_host_set *host_set = dev_instance;
210 unsigned int i;
211 unsigned int handled = 0;
212 u32 int_status;
213
214 spin_lock(&host_set->lock);
215
216 int_status = readl(host_set->mmio_base + VSC_SATA_INT_STAT_OFFSET);
217
218 for (i = 0; i < host_set->n_ports; i++) {
219 if (int_status & ((u32) 0xFF << (8 * i))) {
220 struct ata_port *ap;
221
222 ap = host_set->ports[i];
223
224 if (is_vsc_sata_int_err(i, int_status)) {
225 u32 err_status;
226 printk(KERN_DEBUG "%s: ignoring interrupt(s)\n", __FUNCTION__);
227 err_status = ap ? vsc_sata_scr_read(ap, SCR_ERROR) : 0;
228 vsc_sata_scr_write(ap, SCR_ERROR, err_status);
229 handled++;
230 }
231
232 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
233 struct ata_queued_cmd *qc;
234
235 qc = ata_qc_from_tag(ap, ap->active_tag);
236 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
237 handled += ata_host_intr(ap, qc);
238 else if (is_vsc_sata_int_err(i, int_status)) {
239 /*
240 * On some chips (i.e. Intel 31244), an error
241 * interrupt will sneak in at initialization
242 * time (phy state changes). Clearing the SCR
243 * error register is not required, but it prevents
244 * the phy state change interrupts from recurring
245 * later.
246 */
247 u32 err_status;
248 err_status = vsc_sata_scr_read(ap, SCR_ERROR);
249 printk(KERN_DEBUG "%s: clearing interrupt, "
250 "status %x; sata err status %x\n",
251 __FUNCTION__,
252 int_status, err_status);
253 vsc_sata_scr_write(ap, SCR_ERROR, err_status);
254 /* Clear interrupt status */
255 ata_chk_status(ap);
256 handled++;
257 }
258 }
259 }
260 }
261
262 spin_unlock(&host_set->lock);
263
264 return IRQ_RETVAL(handled);
265}
266
267
268static struct scsi_host_template vsc_sata_sht = {
269 .module = THIS_MODULE,
270 .name = DRV_NAME,
271 .ioctl = ata_scsi_ioctl,
272 .queuecommand = ata_scsi_queuecmd,
273 .can_queue = ATA_DEF_QUEUE,
274 .this_id = ATA_SHT_THIS_ID,
275 .sg_tablesize = LIBATA_MAX_PRD,
276 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
277 .emulated = ATA_SHT_EMULATED,
278 .use_clustering = ATA_SHT_USE_CLUSTERING,
279 .proc_name = DRV_NAME,
280 .dma_boundary = ATA_DMA_BOUNDARY,
281 .slave_configure = ata_scsi_slave_config,
282 .slave_destroy = ata_scsi_slave_destroy,
283 .bios_param = ata_std_bios_param,
284};
285
286
287static const struct ata_port_operations vsc_sata_ops = {
288 .port_disable = ata_port_disable,
289 .tf_load = vsc_sata_tf_load,
290 .tf_read = vsc_sata_tf_read,
291 .exec_command = ata_exec_command,
292 .check_status = ata_check_status,
293 .dev_select = ata_std_dev_select,
294 .bmdma_setup = ata_bmdma_setup,
295 .bmdma_start = ata_bmdma_start,
296 .bmdma_stop = ata_bmdma_stop,
297 .bmdma_status = ata_bmdma_status,
298 .qc_prep = ata_qc_prep,
299 .qc_issue = ata_qc_issue_prot,
300 .data_xfer = ata_mmio_data_xfer,
301 .freeze = ata_bmdma_freeze,
302 .thaw = ata_bmdma_thaw,
303 .error_handler = ata_bmdma_error_handler,
304 .post_internal_cmd = ata_bmdma_post_internal_cmd,
305 .irq_handler = vsc_sata_interrupt,
306 .irq_clear = ata_bmdma_irq_clear,
307 .scr_read = vsc_sata_scr_read,
308 .scr_write = vsc_sata_scr_write,
309 .port_start = ata_port_start,
310 .port_stop = ata_port_stop,
311 .host_stop = ata_pci_host_stop,
312};
313
314static void __devinit vsc_sata_setup_port(struct ata_ioports *port, unsigned long base)
315{
316 port->cmd_addr = base + VSC_SATA_TF_CMD_OFFSET;
317 port->data_addr = base + VSC_SATA_TF_DATA_OFFSET;
318 port->error_addr = base + VSC_SATA_TF_ERROR_OFFSET;
319 port->feature_addr = base + VSC_SATA_TF_FEATURE_OFFSET;
320 port->nsect_addr = base + VSC_SATA_TF_NSECT_OFFSET;
321 port->lbal_addr = base + VSC_SATA_TF_LBAL_OFFSET;
322 port->lbam_addr = base + VSC_SATA_TF_LBAM_OFFSET;
323 port->lbah_addr = base + VSC_SATA_TF_LBAH_OFFSET;
324 port->device_addr = base + VSC_SATA_TF_DEVICE_OFFSET;
325 port->status_addr = base + VSC_SATA_TF_STATUS_OFFSET;
326 port->command_addr = base + VSC_SATA_TF_COMMAND_OFFSET;
327 port->altstatus_addr = base + VSC_SATA_TF_ALTSTATUS_OFFSET;
328 port->ctl_addr = base + VSC_SATA_TF_CTL_OFFSET;
329 port->bmdma_addr = base + VSC_SATA_DMA_CMD_OFFSET;
330 port->scr_addr = base + VSC_SATA_SCR_STATUS_OFFSET;
331 writel(0, base + VSC_SATA_UP_DESCRIPTOR_OFFSET);
332 writel(0, base + VSC_SATA_UP_DATA_BUFFER_OFFSET);
333}
334
335
336static int __devinit vsc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
337{
338 static int printed_version;
339 struct ata_probe_ent *probe_ent = NULL;
340 unsigned long base;
341 int pci_dev_busy = 0;
342 void __iomem *mmio_base;
343 int rc;
344
345 if (!printed_version++)
346 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
347
348 rc = pci_enable_device(pdev);
349 if (rc)
350 return rc;
351
352 /*
353 * Check if we have needed resource mapped.
354 */
355 if (pci_resource_len(pdev, 0) == 0) {
356 rc = -ENODEV;
357 goto err_out;
358 }
359
360 rc = pci_request_regions(pdev, DRV_NAME);
361 if (rc) {
362 pci_dev_busy = 1;
363 goto err_out;
364 }
365
366 /*
367 * Use 32 bit DMA mask, because 64 bit address support is poor.
368 */
369 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
370 if (rc)
371 goto err_out_regions;
372 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
373 if (rc)
374 goto err_out_regions;
375
376 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
377 if (probe_ent == NULL) {
378 rc = -ENOMEM;
379 goto err_out_regions;
380 }
381 memset(probe_ent, 0, sizeof(*probe_ent));
382 probe_ent->dev = pci_dev_to_dev(pdev);
383 INIT_LIST_HEAD(&probe_ent->node);
384
385 mmio_base = pci_iomap(pdev, 0, 0);
386 if (mmio_base == NULL) {
387 rc = -ENOMEM;
388 goto err_out_free_ent;
389 }
390 base = (unsigned long) mmio_base;
391
392 /*
393 * Due to a bug in the chip, the default cache line size can't be used
394 */
395 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x80);
396
397 probe_ent->sht = &vsc_sata_sht;
398 probe_ent->host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
399 ATA_FLAG_MMIO;
400 probe_ent->port_ops = &vsc_sata_ops;
401 probe_ent->n_ports = 4;
402 probe_ent->irq = pdev->irq;
403 probe_ent->irq_flags = IRQF_SHARED;
404 probe_ent->mmio_base = mmio_base;
405
406 /* We don't care much about the PIO/UDMA masks, but the core won't like us
407 * if we don't fill these
408 */
409 probe_ent->pio_mask = 0x1f;
410 probe_ent->mwdma_mask = 0x07;
411 probe_ent->udma_mask = 0x7f;
412
413 /* We have 4 ports per PCI function */
414 vsc_sata_setup_port(&probe_ent->port[0], base + 1 * VSC_SATA_PORT_OFFSET);
415 vsc_sata_setup_port(&probe_ent->port[1], base + 2 * VSC_SATA_PORT_OFFSET);
416 vsc_sata_setup_port(&probe_ent->port[2], base + 3 * VSC_SATA_PORT_OFFSET);
417 vsc_sata_setup_port(&probe_ent->port[3], base + 4 * VSC_SATA_PORT_OFFSET);
418
419 pci_set_master(pdev);
420
421 /*
422 * Config offset 0x98 is "Extended Control and Status Register 0"
423 * Default value is (1 << 28). All bits except bit 28 are reserved in
424 * DPA mode. If bit 28 is set, LED 0 reflects all ports' activity.
425 * If bit 28 is clear, each port has its own LED.
426 */
427 pci_write_config_dword(pdev, 0x98, 0);
428
429 /* FIXME: check ata_device_add return value */
430 ata_device_add(probe_ent);
431 kfree(probe_ent);
432
433 return 0;
434
435err_out_free_ent:
436 kfree(probe_ent);
437err_out_regions:
438 pci_release_regions(pdev);
439err_out:
440 if (!pci_dev_busy)
441 pci_disable_device(pdev);
442 return rc;
443}
444
445
446static const struct pci_device_id vsc_sata_pci_tbl[] = {
447 { PCI_VENDOR_ID_VITESSE, 0x7174,
448 PCI_ANY_ID, PCI_ANY_ID, 0x10600, 0xFFFFFF, 0 },
449 { PCI_VENDOR_ID_INTEL, 0x3200,
450 PCI_ANY_ID, PCI_ANY_ID, 0x10600, 0xFFFFFF, 0 },
451 { } /* terminate list */
452};
453
454
455static struct pci_driver vsc_sata_pci_driver = {
456 .name = DRV_NAME,
457 .id_table = vsc_sata_pci_tbl,
458 .probe = vsc_sata_init_one,
459 .remove = ata_pci_remove_one,
460};
461
462
463static int __init vsc_sata_init(void)
464{
465 return pci_register_driver(&vsc_sata_pci_driver);
466}
467
468
469static void __exit vsc_sata_exit(void)
470{
471 pci_unregister_driver(&vsc_sata_pci_driver);
472}
473
474
475MODULE_AUTHOR("Jeremy Higdon");
476MODULE_DESCRIPTION("low-level driver for Vitesse VSC7174 SATA controller");
477MODULE_LICENSE("GPL");
478MODULE_DEVICE_TABLE(pci, vsc_sata_pci_tbl);
479MODULE_VERSION(DRV_VERSION);
480
481module_init(vsc_sata_init);
482module_exit(vsc_sata_exit);