aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ata
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ata')
-rw-r--r--drivers/ata/Kconfig150
-rw-r--r--drivers/ata/Makefile21
-rw-r--r--drivers/ata/ahci.c1684
-rw-r--r--drivers/ata/ata_piix.c1010
-rw-r--r--drivers/ata/libata-core.c6143
-rw-r--r--drivers/ata/libata-eh.c2246
-rw-r--r--drivers/ata/libata-scsi.c3322
-rw-r--r--drivers/ata/libata-sff.c1109
-rw-r--r--drivers/ata/libata.h122
-rw-r--r--drivers/ata/pdc_adma.c740
-rw-r--r--drivers/ata/sata_mv.c2466
-rw-r--r--drivers/ata/sata_nv.c595
-rw-r--r--drivers/ata/sata_promise.c844
-rw-r--r--drivers/ata/sata_promise.h157
-rw-r--r--drivers/ata/sata_qstor.c730
-rw-r--r--drivers/ata/sata_sil.c728
-rw-r--r--drivers/ata/sata_sil24.c1227
-rw-r--r--drivers/ata/sata_sis.c347
-rw-r--r--drivers/ata/sata_svw.c508
-rw-r--r--drivers/ata/sata_sx4.c1502
-rw-r--r--drivers/ata/sata_uli.c300
-rw-r--r--drivers/ata/sata_via.c502
-rw-r--r--drivers/ata/sata_vsc.c482
23 files changed, 26935 insertions, 0 deletions
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
new file mode 100644
index 000000000000..13027d56b7f6
--- /dev/null
+++ b/drivers/ata/Kconfig
@@ -0,0 +1,150 @@
1#
2# SATA/PATA driver configuration
3#
4
5menu "Serial ATA (prod) and Parallel ATA (experimental) drivers"
6
7config ATA
8 tristate "ATA device support"
9 select SCSI
10 ---help---
11 If you want to use a ATA hard disk, ATA tape drive, ATA CD-ROM or
12 any other ATA device under Linux, say Y and make sure that you know
13 the name of your ATA host adapter (the card inside your computer
14 that "speaks" the ATA protocol, also called ATA controller),
15 because you will be asked for it.
16
17if ATA
18
19config SATA_AHCI
20 tristate "AHCI SATA support"
21 depends on PCI
22 help
23 This option enables support for AHCI Serial ATA.
24
25 If unsure, say N.
26
27config SATA_SVW
28 tristate "ServerWorks Frodo / Apple K2 SATA support"
29 depends on PCI
30 help
31 This option enables support for Broadcom/Serverworks/Apple K2
32 SATA support.
33
34 If unsure, say N.
35
36config ATA_PIIX
37 tristate "Intel PIIX/ICH SATA support"
38 depends on PCI
39 help
40 This option enables support for ICH5/6/7/8 Serial ATA.
41 If PATA support was enabled previously, this enables
42 support for select Intel PIIX/ICH PATA host controllers.
43
44 If unsure, say N.
45
46config SATA_MV
47 tristate "Marvell SATA support (HIGHLY EXPERIMENTAL)"
48 depends on PCI && EXPERIMENTAL
49 help
50 This option enables support for the Marvell Serial ATA family.
51 Currently supports 88SX[56]0[48][01] chips.
52
53 If unsure, say N.
54
55config SATA_NV
56 tristate "NVIDIA SATA support"
57 depends on PCI
58 help
59 This option enables support for NVIDIA Serial ATA.
60
61 If unsure, say N.
62
63config PDC_ADMA
64 tristate "Pacific Digital ADMA support"
65 depends on PCI
66 help
67 This option enables support for Pacific Digital ADMA controllers
68
69 If unsure, say N.
70
71config SATA_QSTOR
72 tristate "Pacific Digital SATA QStor support"
73 depends on PCI
74 help
75 This option enables support for Pacific Digital Serial ATA QStor.
76
77 If unsure, say N.
78
79config SATA_PROMISE
80 tristate "Promise SATA TX2/TX4 support"
81 depends on PCI
82 help
83 This option enables support for Promise Serial ATA TX2/TX4.
84
85 If unsure, say N.
86
87config SATA_SX4
88 tristate "Promise SATA SX4 support"
89 depends on PCI && EXPERIMENTAL
90 help
91 This option enables support for Promise Serial ATA SX4.
92
93 If unsure, say N.
94
95config SATA_SIL
96 tristate "Silicon Image SATA support"
97 depends on PCI
98 help
99 This option enables support for Silicon Image Serial ATA.
100
101 If unsure, say N.
102
103config SATA_SIL24
104 tristate "Silicon Image 3124/3132 SATA support"
105 depends on PCI
106 help
107 This option enables support for Silicon Image 3124/3132 Serial ATA.
108
109 If unsure, say N.
110
111config SATA_SIS
112 tristate "SiS 964/180 SATA support"
113 depends on PCI
114 help
115 This option enables support for SiS Serial ATA 964/180.
116
117 If unsure, say N.
118
119config SATA_ULI
120 tristate "ULi Electronics SATA support"
121 depends on PCI
122 help
123 This option enables support for ULi Electronics SATA.
124
125 If unsure, say N.
126
127config SATA_VIA
128 tristate "VIA SATA support"
129 depends on PCI
130 help
131 This option enables support for VIA Serial ATA.
132
133 If unsure, say N.
134
135config SATA_VITESSE
136 tristate "VITESSE VSC-7174 / INTEL 31244 SATA support"
137 depends on PCI
138 help
139 This option enables support for Vitesse VSC7174 and Intel 31244 Serial ATA.
140
141 If unsure, say N.
142
143config SATA_INTEL_COMBINED
144 bool
145 depends on IDE=y && !BLK_DEV_IDE_SATA && (SATA_AHCI || ATA_PIIX)
146 default y
147
148endif
149endmenu
150
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
new file mode 100644
index 000000000000..e260e3fe65c8
--- /dev/null
+++ b/drivers/ata/Makefile
@@ -0,0 +1,21 @@
1
2obj-$(CONFIG_ATA) += libata.o
3
4obj-$(CONFIG_SATA_AHCI) += ahci.o
5obj-$(CONFIG_SATA_SVW) += sata_svw.o
6obj-$(CONFIG_ATA_PIIX) += ata_piix.o
7obj-$(CONFIG_SATA_PROMISE) += sata_promise.o
8obj-$(CONFIG_SATA_QSTOR) += sata_qstor.o
9obj-$(CONFIG_SATA_SIL) += sata_sil.o
10obj-$(CONFIG_SATA_SIL24) += sata_sil24.o
11obj-$(CONFIG_SATA_VIA) += sata_via.o
12obj-$(CONFIG_SATA_VITESSE) += sata_vsc.o
13obj-$(CONFIG_SATA_SIS) += sata_sis.o
14obj-$(CONFIG_SATA_SX4) += sata_sx4.o
15obj-$(CONFIG_SATA_NV) += sata_nv.o
16obj-$(CONFIG_SATA_ULI) += sata_uli.o
17obj-$(CONFIG_SATA_MV) += sata_mv.o
18obj-$(CONFIG_PDC_ADMA) += pdc_adma.o
19
20libata-objs := libata-core.o libata-scsi.o libata-sff.o libata-eh.o
21
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
new file mode 100644
index 000000000000..3f1106fdaed1
--- /dev/null
+++ b/drivers/ata/ahci.c
@@ -0,0 +1,1684 @@
1/*
2 * ahci.c - AHCI SATA support
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2004-2005 Red Hat, Inc.
9 *
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 *
26 * libata documentation is available via 'make {ps|pdf}docs',
27 * as Documentation/DocBook/libata.*
28 *
29 * AHCI hardware documentation:
30 * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
31 * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
32 *
33 */
34
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/blkdev.h>
40#include <linux/delay.h>
41#include <linux/interrupt.h>
42#include <linux/sched.h>
43#include <linux/dma-mapping.h>
44#include <linux/device.h>
45#include <scsi/scsi_host.h>
46#include <scsi/scsi_cmnd.h>
47#include <linux/libata.h>
48#include <asm/io.h>
49
50#define DRV_NAME "ahci"
51#define DRV_VERSION "2.0"
52
53
54enum {
55 AHCI_PCI_BAR = 5,
56 AHCI_MAX_SG = 168, /* hardware max is 64K */
57 AHCI_DMA_BOUNDARY = 0xffffffff,
58 AHCI_USE_CLUSTERING = 0,
59 AHCI_MAX_CMDS = 32,
60 AHCI_CMD_SZ = 32,
61 AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ,
62 AHCI_RX_FIS_SZ = 256,
63 AHCI_CMD_TBL_CDB = 0x40,
64 AHCI_CMD_TBL_HDR_SZ = 0x80,
65 AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16),
66 AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS,
67 AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ +
68 AHCI_RX_FIS_SZ,
69 AHCI_IRQ_ON_SG = (1 << 31),
70 AHCI_CMD_ATAPI = (1 << 5),
71 AHCI_CMD_WRITE = (1 << 6),
72 AHCI_CMD_PREFETCH = (1 << 7),
73 AHCI_CMD_RESET = (1 << 8),
74 AHCI_CMD_CLR_BUSY = (1 << 10),
75
76 RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
77 RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */
78
79 board_ahci = 0,
80 board_ahci_vt8251 = 1,
81
82 /* global controller registers */
83 HOST_CAP = 0x00, /* host capabilities */
84 HOST_CTL = 0x04, /* global host control */
85 HOST_IRQ_STAT = 0x08, /* interrupt status */
86 HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */
87 HOST_VERSION = 0x10, /* AHCI spec. version compliancy */
88
89 /* HOST_CTL bits */
90 HOST_RESET = (1 << 0), /* reset controller; self-clear */
91 HOST_IRQ_EN = (1 << 1), /* global IRQ enable */
92 HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
93
94 /* HOST_CAP bits */
95 HOST_CAP_SSC = (1 << 14), /* Slumber capable */
96 HOST_CAP_CLO = (1 << 24), /* Command List Override support */
97 HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */
98 HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */
99 HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
100
101 /* registers for each SATA port */
102 PORT_LST_ADDR = 0x00, /* command list DMA addr */
103 PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */
104 PORT_FIS_ADDR = 0x08, /* FIS rx buf addr */
105 PORT_FIS_ADDR_HI = 0x0c, /* FIS rx buf addr hi */
106 PORT_IRQ_STAT = 0x10, /* interrupt status */
107 PORT_IRQ_MASK = 0x14, /* interrupt enable/disable mask */
108 PORT_CMD = 0x18, /* port command */
109 PORT_TFDATA = 0x20, /* taskfile data */
110 PORT_SIG = 0x24, /* device TF signature */
111 PORT_CMD_ISSUE = 0x38, /* command issue */
112 PORT_SCR = 0x28, /* SATA phy register block */
113 PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */
114 PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */
115 PORT_SCR_ERR = 0x30, /* SATA phy register: SError */
116 PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */
117
118 /* PORT_IRQ_{STAT,MASK} bits */
119 PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */
120 PORT_IRQ_TF_ERR = (1 << 30), /* task file error */
121 PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */
122 PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */
123 PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */
124 PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */
125 PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */
126 PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */
127
128 PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */
129 PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */
130 PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */
131 PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */
132 PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */
133 PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */
134 PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */
135 PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */
136 PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */
137
138 PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR |
139 PORT_IRQ_IF_ERR |
140 PORT_IRQ_CONNECT |
141 PORT_IRQ_PHYRDY |
142 PORT_IRQ_UNK_FIS,
143 PORT_IRQ_ERROR = PORT_IRQ_FREEZE |
144 PORT_IRQ_TF_ERR |
145 PORT_IRQ_HBUS_DATA_ERR,
146 DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE |
147 PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS |
148 PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
149
150 /* PORT_CMD bits */
151 PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */
152 PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
153 PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */
154 PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */
155 PORT_CMD_CLO = (1 << 3), /* Command list override */
156 PORT_CMD_POWER_ON = (1 << 2), /* Power up device */
157 PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */
158 PORT_CMD_START = (1 << 0), /* Enable port DMA engine */
159
160 PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */
161 PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */
162 PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */
163 PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */
164
165 /* hpriv->flags bits */
166 AHCI_FLAG_MSI = (1 << 0),
167
168 /* ap->flags bits */
169 AHCI_FLAG_RESET_NEEDS_CLO = (1 << 24),
170 AHCI_FLAG_NO_NCQ = (1 << 25),
171};
172
173struct ahci_cmd_hdr {
174 u32 opts;
175 u32 status;
176 u32 tbl_addr;
177 u32 tbl_addr_hi;
178 u32 reserved[4];
179};
180
181struct ahci_sg {
182 u32 addr;
183 u32 addr_hi;
184 u32 reserved;
185 u32 flags_size;
186};
187
188struct ahci_host_priv {
189 unsigned long flags;
190 u32 cap; /* cache of HOST_CAP register */
191 u32 port_map; /* cache of HOST_PORTS_IMPL reg */
192};
193
194struct ahci_port_priv {
195 struct ahci_cmd_hdr *cmd_slot;
196 dma_addr_t cmd_slot_dma;
197 void *cmd_tbl;
198 dma_addr_t cmd_tbl_dma;
199 void *rx_fis;
200 dma_addr_t rx_fis_dma;
201};
202
203static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg);
204static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
205static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
206static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
207static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
208static void ahci_irq_clear(struct ata_port *ap);
209static int ahci_port_start(struct ata_port *ap);
210static void ahci_port_stop(struct ata_port *ap);
211static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
212static void ahci_qc_prep(struct ata_queued_cmd *qc);
213static u8 ahci_check_status(struct ata_port *ap);
214static void ahci_freeze(struct ata_port *ap);
215static void ahci_thaw(struct ata_port *ap);
216static void ahci_error_handler(struct ata_port *ap);
217static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
218static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
219static int ahci_port_resume(struct ata_port *ap);
220static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
221static int ahci_pci_device_resume(struct pci_dev *pdev);
222static void ahci_remove_one (struct pci_dev *pdev);
223
224static struct scsi_host_template ahci_sht = {
225 .module = THIS_MODULE,
226 .name = DRV_NAME,
227 .ioctl = ata_scsi_ioctl,
228 .queuecommand = ata_scsi_queuecmd,
229 .change_queue_depth = ata_scsi_change_queue_depth,
230 .can_queue = AHCI_MAX_CMDS - 1,
231 .this_id = ATA_SHT_THIS_ID,
232 .sg_tablesize = AHCI_MAX_SG,
233 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
234 .emulated = ATA_SHT_EMULATED,
235 .use_clustering = AHCI_USE_CLUSTERING,
236 .proc_name = DRV_NAME,
237 .dma_boundary = AHCI_DMA_BOUNDARY,
238 .slave_configure = ata_scsi_slave_config,
239 .slave_destroy = ata_scsi_slave_destroy,
240 .bios_param = ata_std_bios_param,
241 .suspend = ata_scsi_device_suspend,
242 .resume = ata_scsi_device_resume,
243};
244
245static const struct ata_port_operations ahci_ops = {
246 .port_disable = ata_port_disable,
247
248 .check_status = ahci_check_status,
249 .check_altstatus = ahci_check_status,
250 .dev_select = ata_noop_dev_select,
251
252 .tf_read = ahci_tf_read,
253
254 .qc_prep = ahci_qc_prep,
255 .qc_issue = ahci_qc_issue,
256
257 .irq_handler = ahci_interrupt,
258 .irq_clear = ahci_irq_clear,
259
260 .scr_read = ahci_scr_read,
261 .scr_write = ahci_scr_write,
262
263 .freeze = ahci_freeze,
264 .thaw = ahci_thaw,
265
266 .error_handler = ahci_error_handler,
267 .post_internal_cmd = ahci_post_internal_cmd,
268
269 .port_suspend = ahci_port_suspend,
270 .port_resume = ahci_port_resume,
271
272 .port_start = ahci_port_start,
273 .port_stop = ahci_port_stop,
274};
275
276static const struct ata_port_info ahci_port_info[] = {
277 /* board_ahci */
278 {
279 .sht = &ahci_sht,
280 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
281 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
282 ATA_FLAG_SKIP_D2H_BSY,
283 .pio_mask = 0x1f, /* pio0-4 */
284 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
285 .port_ops = &ahci_ops,
286 },
287 /* board_ahci_vt8251 */
288 {
289 .sht = &ahci_sht,
290 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
291 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
292 ATA_FLAG_SKIP_D2H_BSY |
293 AHCI_FLAG_RESET_NEEDS_CLO | AHCI_FLAG_NO_NCQ,
294 .pio_mask = 0x1f, /* pio0-4 */
295 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
296 .port_ops = &ahci_ops,
297 },
298};
299
300static const struct pci_device_id ahci_pci_tbl[] = {
301 /* Intel */
302 { PCI_VENDOR_ID_INTEL, 0x2652, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
303 board_ahci }, /* ICH6 */
304 { PCI_VENDOR_ID_INTEL, 0x2653, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
305 board_ahci }, /* ICH6M */
306 { PCI_VENDOR_ID_INTEL, 0x27c1, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
307 board_ahci }, /* ICH7 */
308 { PCI_VENDOR_ID_INTEL, 0x27c5, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
309 board_ahci }, /* ICH7M */
310 { PCI_VENDOR_ID_INTEL, 0x27c3, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
311 board_ahci }, /* ICH7R */
312 { PCI_VENDOR_ID_AL, 0x5288, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
313 board_ahci }, /* ULi M5288 */
314 { PCI_VENDOR_ID_INTEL, 0x2681, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
315 board_ahci }, /* ESB2 */
316 { PCI_VENDOR_ID_INTEL, 0x2682, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
317 board_ahci }, /* ESB2 */
318 { PCI_VENDOR_ID_INTEL, 0x2683, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
319 board_ahci }, /* ESB2 */
320 { PCI_VENDOR_ID_INTEL, 0x27c6, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
321 board_ahci }, /* ICH7-M DH */
322 { PCI_VENDOR_ID_INTEL, 0x2821, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
323 board_ahci }, /* ICH8 */
324 { PCI_VENDOR_ID_INTEL, 0x2822, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
325 board_ahci }, /* ICH8 */
326 { PCI_VENDOR_ID_INTEL, 0x2824, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
327 board_ahci }, /* ICH8 */
328 { PCI_VENDOR_ID_INTEL, 0x2829, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
329 board_ahci }, /* ICH8M */
330 { PCI_VENDOR_ID_INTEL, 0x282a, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
331 board_ahci }, /* ICH8M */
332
333 /* JMicron */
334 { 0x197b, 0x2360, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
335 board_ahci }, /* JMicron JMB360 */
336 { 0x197b, 0x2361, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
337 board_ahci }, /* JMicron JMB361 */
338 { 0x197b, 0x2363, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
339 board_ahci }, /* JMicron JMB363 */
340 { 0x197b, 0x2365, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
341 board_ahci }, /* JMicron JMB365 */
342 { 0x197b, 0x2366, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
343 board_ahci }, /* JMicron JMB366 */
344
345 /* ATI */
346 { PCI_VENDOR_ID_ATI, 0x4380, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
347 board_ahci }, /* ATI SB600 non-raid */
348 { PCI_VENDOR_ID_ATI, 0x4381, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
349 board_ahci }, /* ATI SB600 raid */
350
351 /* VIA */
352 { PCI_VENDOR_ID_VIA, 0x3349, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
353 board_ahci_vt8251 }, /* VIA VT8251 */
354
355 /* NVIDIA */
356 { PCI_VENDOR_ID_NVIDIA, 0x044c, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
357 board_ahci }, /* MCP65 */
358 { PCI_VENDOR_ID_NVIDIA, 0x044d, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
359 board_ahci }, /* MCP65 */
360 { PCI_VENDOR_ID_NVIDIA, 0x044e, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
361 board_ahci }, /* MCP65 */
362 { PCI_VENDOR_ID_NVIDIA, 0x044f, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
363 board_ahci }, /* MCP65 */
364
365 /* SiS */
366 { PCI_VENDOR_ID_SI, 0x1184, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
367 board_ahci }, /* SiS 966 */
368 { PCI_VENDOR_ID_SI, 0x1185, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
369 board_ahci }, /* SiS 966 */
370 { PCI_VENDOR_ID_SI, 0x0186, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
371 board_ahci }, /* SiS 968 */
372
373 { } /* terminate list */
374};
375
376
377static struct pci_driver ahci_pci_driver = {
378 .name = DRV_NAME,
379 .id_table = ahci_pci_tbl,
380 .probe = ahci_init_one,
381 .suspend = ahci_pci_device_suspend,
382 .resume = ahci_pci_device_resume,
383 .remove = ahci_remove_one,
384};
385
386
387static inline unsigned long ahci_port_base_ul (unsigned long base, unsigned int port)
388{
389 return base + 0x100 + (port * 0x80);
390}
391
392static inline void __iomem *ahci_port_base (void __iomem *base, unsigned int port)
393{
394 return (void __iomem *) ahci_port_base_ul((unsigned long)base, port);
395}
396
397static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg_in)
398{
399 unsigned int sc_reg;
400
401 switch (sc_reg_in) {
402 case SCR_STATUS: sc_reg = 0; break;
403 case SCR_CONTROL: sc_reg = 1; break;
404 case SCR_ERROR: sc_reg = 2; break;
405 case SCR_ACTIVE: sc_reg = 3; break;
406 default:
407 return 0xffffffffU;
408 }
409
410 return readl((void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
411}
412
413
414static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg_in,
415 u32 val)
416{
417 unsigned int sc_reg;
418
419 switch (sc_reg_in) {
420 case SCR_STATUS: sc_reg = 0; break;
421 case SCR_CONTROL: sc_reg = 1; break;
422 case SCR_ERROR: sc_reg = 2; break;
423 case SCR_ACTIVE: sc_reg = 3; break;
424 default:
425 return;
426 }
427
428 writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
429}
430
431static void ahci_start_engine(void __iomem *port_mmio)
432{
433 u32 tmp;
434
435 /* start DMA */
436 tmp = readl(port_mmio + PORT_CMD);
437 tmp |= PORT_CMD_START;
438 writel(tmp, port_mmio + PORT_CMD);
439 readl(port_mmio + PORT_CMD); /* flush */
440}
441
442static int ahci_stop_engine(void __iomem *port_mmio)
443{
444 u32 tmp;
445
446 tmp = readl(port_mmio + PORT_CMD);
447
448 /* check if the HBA is idle */
449 if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
450 return 0;
451
452 /* setting HBA to idle */
453 tmp &= ~PORT_CMD_START;
454 writel(tmp, port_mmio + PORT_CMD);
455
456 /* wait for engine to stop. This could be as long as 500 msec */
457 tmp = ata_wait_register(port_mmio + PORT_CMD,
458 PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
459 if (tmp & PORT_CMD_LIST_ON)
460 return -EIO;
461
462 return 0;
463}
464
465static void ahci_start_fis_rx(void __iomem *port_mmio, u32 cap,
466 dma_addr_t cmd_slot_dma, dma_addr_t rx_fis_dma)
467{
468 u32 tmp;
469
470 /* set FIS registers */
471 if (cap & HOST_CAP_64)
472 writel((cmd_slot_dma >> 16) >> 16, port_mmio + PORT_LST_ADDR_HI);
473 writel(cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
474
475 if (cap & HOST_CAP_64)
476 writel((rx_fis_dma >> 16) >> 16, port_mmio + PORT_FIS_ADDR_HI);
477 writel(rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
478
479 /* enable FIS reception */
480 tmp = readl(port_mmio + PORT_CMD);
481 tmp |= PORT_CMD_FIS_RX;
482 writel(tmp, port_mmio + PORT_CMD);
483
484 /* flush */
485 readl(port_mmio + PORT_CMD);
486}
487
488static int ahci_stop_fis_rx(void __iomem *port_mmio)
489{
490 u32 tmp;
491
492 /* disable FIS reception */
493 tmp = readl(port_mmio + PORT_CMD);
494 tmp &= ~PORT_CMD_FIS_RX;
495 writel(tmp, port_mmio + PORT_CMD);
496
497 /* wait for completion, spec says 500ms, give it 1000 */
498 tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON,
499 PORT_CMD_FIS_ON, 10, 1000);
500 if (tmp & PORT_CMD_FIS_ON)
501 return -EBUSY;
502
503 return 0;
504}
505
506static void ahci_power_up(void __iomem *port_mmio, u32 cap)
507{
508 u32 cmd;
509
510 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
511
512 /* spin up device */
513 if (cap & HOST_CAP_SSS) {
514 cmd |= PORT_CMD_SPIN_UP;
515 writel(cmd, port_mmio + PORT_CMD);
516 }
517
518 /* wake up link */
519 writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD);
520}
521
522static void ahci_power_down(void __iomem *port_mmio, u32 cap)
523{
524 u32 cmd, scontrol;
525
526 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
527
528 if (cap & HOST_CAP_SSC) {
529 /* enable transitions to slumber mode */
530 scontrol = readl(port_mmio + PORT_SCR_CTL);
531 if ((scontrol & 0x0f00) > 0x100) {
532 scontrol &= ~0xf00;
533 writel(scontrol, port_mmio + PORT_SCR_CTL);
534 }
535
536 /* put device into slumber mode */
537 writel(cmd | PORT_CMD_ICC_SLUMBER, port_mmio + PORT_CMD);
538
539 /* wait for the transition to complete */
540 ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_ICC_SLUMBER,
541 PORT_CMD_ICC_SLUMBER, 1, 50);
542 }
543
544 /* put device into listen mode */
545 if (cap & HOST_CAP_SSS) {
546 /* first set PxSCTL.DET to 0 */
547 scontrol = readl(port_mmio + PORT_SCR_CTL);
548 scontrol &= ~0xf;
549 writel(scontrol, port_mmio + PORT_SCR_CTL);
550
551 /* then set PxCMD.SUD to 0 */
552 cmd &= ~PORT_CMD_SPIN_UP;
553 writel(cmd, port_mmio + PORT_CMD);
554 }
555}
556
557static void ahci_init_port(void __iomem *port_mmio, u32 cap,
558 dma_addr_t cmd_slot_dma, dma_addr_t rx_fis_dma)
559{
560 /* power up */
561 ahci_power_up(port_mmio, cap);
562
563 /* enable FIS reception */
564 ahci_start_fis_rx(port_mmio, cap, cmd_slot_dma, rx_fis_dma);
565
566 /* enable DMA */
567 ahci_start_engine(port_mmio);
568}
569
570static int ahci_deinit_port(void __iomem *port_mmio, u32 cap, const char **emsg)
571{
572 int rc;
573
574 /* disable DMA */
575 rc = ahci_stop_engine(port_mmio);
576 if (rc) {
577 *emsg = "failed to stop engine";
578 return rc;
579 }
580
581 /* disable FIS reception */
582 rc = ahci_stop_fis_rx(port_mmio);
583 if (rc) {
584 *emsg = "failed stop FIS RX";
585 return rc;
586 }
587
588 /* put device into slumber mode */
589 ahci_power_down(port_mmio, cap);
590
591 return 0;
592}
593
594static int ahci_reset_controller(void __iomem *mmio, struct pci_dev *pdev)
595{
596 u32 cap_save, tmp;
597
598 cap_save = readl(mmio + HOST_CAP);
599 cap_save &= ( (1<<28) | (1<<17) );
600 cap_save |= (1 << 27);
601
602 /* global controller reset */
603 tmp = readl(mmio + HOST_CTL);
604 if ((tmp & HOST_RESET) == 0) {
605 writel(tmp | HOST_RESET, mmio + HOST_CTL);
606 readl(mmio + HOST_CTL); /* flush */
607 }
608
609 /* reset must complete within 1 second, or
610 * the hardware should be considered fried.
611 */
612 ssleep(1);
613
614 tmp = readl(mmio + HOST_CTL);
615 if (tmp & HOST_RESET) {
616 dev_printk(KERN_ERR, &pdev->dev,
617 "controller reset failed (0x%x)\n", tmp);
618 return -EIO;
619 }
620
621 writel(HOST_AHCI_EN, mmio + HOST_CTL);
622 (void) readl(mmio + HOST_CTL); /* flush */
623 writel(cap_save, mmio + HOST_CAP);
624 writel(0xf, mmio + HOST_PORTS_IMPL);
625 (void) readl(mmio + HOST_PORTS_IMPL); /* flush */
626
627 if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
628 u16 tmp16;
629
630 /* configure PCS */
631 pci_read_config_word(pdev, 0x92, &tmp16);
632 tmp16 |= 0xf;
633 pci_write_config_word(pdev, 0x92, tmp16);
634 }
635
636 return 0;
637}
638
639static void ahci_init_controller(void __iomem *mmio, struct pci_dev *pdev,
640 int n_ports, u32 cap)
641{
642 int i, rc;
643 u32 tmp;
644
645 for (i = 0; i < n_ports; i++) {
646 void __iomem *port_mmio = ahci_port_base(mmio, i);
647 const char *emsg = NULL;
648
649#if 0 /* BIOSen initialize this incorrectly */
650 if (!(hpriv->port_map & (1 << i)))
651 continue;
652#endif
653
654 /* make sure port is not active */
655 rc = ahci_deinit_port(port_mmio, cap, &emsg);
656 if (rc)
657 dev_printk(KERN_WARNING, &pdev->dev,
658 "%s (%d)\n", emsg, rc);
659
660 /* clear SError */
661 tmp = readl(port_mmio + PORT_SCR_ERR);
662 VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
663 writel(tmp, port_mmio + PORT_SCR_ERR);
664
665 /* clear port IRQ */
666 tmp = readl(port_mmio + PORT_IRQ_STAT);
667 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
668 if (tmp)
669 writel(tmp, port_mmio + PORT_IRQ_STAT);
670
671 writel(1 << i, mmio + HOST_IRQ_STAT);
672 }
673
674 tmp = readl(mmio + HOST_CTL);
675 VPRINTK("HOST_CTL 0x%x\n", tmp);
676 writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
677 tmp = readl(mmio + HOST_CTL);
678 VPRINTK("HOST_CTL 0x%x\n", tmp);
679}
680
681static unsigned int ahci_dev_classify(struct ata_port *ap)
682{
683 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
684 struct ata_taskfile tf;
685 u32 tmp;
686
687 tmp = readl(port_mmio + PORT_SIG);
688 tf.lbah = (tmp >> 24) & 0xff;
689 tf.lbam = (tmp >> 16) & 0xff;
690 tf.lbal = (tmp >> 8) & 0xff;
691 tf.nsect = (tmp) & 0xff;
692
693 return ata_dev_classify(&tf);
694}
695
696static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
697 u32 opts)
698{
699 dma_addr_t cmd_tbl_dma;
700
701 cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
702
703 pp->cmd_slot[tag].opts = cpu_to_le32(opts);
704 pp->cmd_slot[tag].status = 0;
705 pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
706 pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
707}
708
709static int ahci_clo(struct ata_port *ap)
710{
711 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
712 struct ahci_host_priv *hpriv = ap->host->private_data;
713 u32 tmp;
714
715 if (!(hpriv->cap & HOST_CAP_CLO))
716 return -EOPNOTSUPP;
717
718 tmp = readl(port_mmio + PORT_CMD);
719 tmp |= PORT_CMD_CLO;
720 writel(tmp, port_mmio + PORT_CMD);
721
722 tmp = ata_wait_register(port_mmio + PORT_CMD,
723 PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
724 if (tmp & PORT_CMD_CLO)
725 return -EIO;
726
727 return 0;
728}
729
730static int ahci_prereset(struct ata_port *ap)
731{
732 if ((ap->flags & AHCI_FLAG_RESET_NEEDS_CLO) &&
733 (ata_busy_wait(ap, ATA_BUSY, 1000) & ATA_BUSY)) {
734 /* ATA_BUSY hasn't cleared, so send a CLO */
735 ahci_clo(ap);
736 }
737
738 return ata_std_prereset(ap);
739}
740
741static int ahci_softreset(struct ata_port *ap, unsigned int *class)
742{
743 struct ahci_port_priv *pp = ap->private_data;
744 void __iomem *mmio = ap->host->mmio_base;
745 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
746 const u32 cmd_fis_len = 5; /* five dwords */
747 const char *reason = NULL;
748 struct ata_taskfile tf;
749 u32 tmp;
750 u8 *fis;
751 int rc;
752
753 DPRINTK("ENTER\n");
754
755 if (ata_port_offline(ap)) {
756 DPRINTK("PHY reports no device\n");
757 *class = ATA_DEV_NONE;
758 return 0;
759 }
760
761 /* prepare for SRST (AHCI-1.1 10.4.1) */
762 rc = ahci_stop_engine(port_mmio);
763 if (rc) {
764 reason = "failed to stop engine";
765 goto fail_restart;
766 }
767
768 /* check BUSY/DRQ, perform Command List Override if necessary */
769 ahci_tf_read(ap, &tf);
770 if (tf.command & (ATA_BUSY | ATA_DRQ)) {
771 rc = ahci_clo(ap);
772
773 if (rc == -EOPNOTSUPP) {
774 reason = "port busy but CLO unavailable";
775 goto fail_restart;
776 } else if (rc) {
777 reason = "port busy but CLO failed";
778 goto fail_restart;
779 }
780 }
781
782 /* restart engine */
783 ahci_start_engine(port_mmio);
784
785 ata_tf_init(ap->device, &tf);
786 fis = pp->cmd_tbl;
787
788 /* issue the first D2H Register FIS */
789 ahci_fill_cmd_slot(pp, 0,
790 cmd_fis_len | AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY);
791
792 tf.ctl |= ATA_SRST;
793 ata_tf_to_fis(&tf, fis, 0);
794 fis[1] &= ~(1 << 7); /* turn off Command FIS bit */
795
796 writel(1, port_mmio + PORT_CMD_ISSUE);
797
798 tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1, 1, 500);
799 if (tmp & 0x1) {
800 rc = -EIO;
801 reason = "1st FIS failed";
802 goto fail;
803 }
804
805 /* spec says at least 5us, but be generous and sleep for 1ms */
806 msleep(1);
807
808 /* issue the second D2H Register FIS */
809 ahci_fill_cmd_slot(pp, 0, cmd_fis_len);
810
811 tf.ctl &= ~ATA_SRST;
812 ata_tf_to_fis(&tf, fis, 0);
813 fis[1] &= ~(1 << 7); /* turn off Command FIS bit */
814
815 writel(1, port_mmio + PORT_CMD_ISSUE);
816 readl(port_mmio + PORT_CMD_ISSUE); /* flush */
817
818 /* spec mandates ">= 2ms" before checking status.
819 * We wait 150ms, because that was the magic delay used for
820 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
821 * between when the ATA command register is written, and then
822 * status is checked. Because waiting for "a while" before
823 * checking status is fine, post SRST, we perform this magic
824 * delay here as well.
825 */
826 msleep(150);
827
828 *class = ATA_DEV_NONE;
829 if (ata_port_online(ap)) {
830 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
831 rc = -EIO;
832 reason = "device not ready";
833 goto fail;
834 }
835 *class = ahci_dev_classify(ap);
836 }
837
838 DPRINTK("EXIT, class=%u\n", *class);
839 return 0;
840
841 fail_restart:
842 ahci_start_engine(port_mmio);
843 fail:
844 ata_port_printk(ap, KERN_ERR, "softreset failed (%s)\n", reason);
845 return rc;
846}
847
848static int ahci_hardreset(struct ata_port *ap, unsigned int *class)
849{
850 struct ahci_port_priv *pp = ap->private_data;
851 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
852 struct ata_taskfile tf;
853 void __iomem *mmio = ap->host->mmio_base;
854 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
855 int rc;
856
857 DPRINTK("ENTER\n");
858
859 ahci_stop_engine(port_mmio);
860
861 /* clear D2H reception area to properly wait for D2H FIS */
862 ata_tf_init(ap->device, &tf);
863 tf.command = 0xff;
864 ata_tf_to_fis(&tf, d2h_fis, 0);
865
866 rc = sata_std_hardreset(ap, class);
867
868 ahci_start_engine(port_mmio);
869
870 if (rc == 0 && ata_port_online(ap))
871 *class = ahci_dev_classify(ap);
872 if (*class == ATA_DEV_UNKNOWN)
873 *class = ATA_DEV_NONE;
874
875 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
876 return rc;
877}
878
879static void ahci_postreset(struct ata_port *ap, unsigned int *class)
880{
881 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
882 u32 new_tmp, tmp;
883
884 ata_std_postreset(ap, class);
885
886 /* Make sure port's ATAPI bit is set appropriately */
887 new_tmp = tmp = readl(port_mmio + PORT_CMD);
888 if (*class == ATA_DEV_ATAPI)
889 new_tmp |= PORT_CMD_ATAPI;
890 else
891 new_tmp &= ~PORT_CMD_ATAPI;
892 if (new_tmp != tmp) {
893 writel(new_tmp, port_mmio + PORT_CMD);
894 readl(port_mmio + PORT_CMD); /* flush */
895 }
896}
897
898static u8 ahci_check_status(struct ata_port *ap)
899{
900 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr;
901
902 return readl(mmio + PORT_TFDATA) & 0xFF;
903}
904
905static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
906{
907 struct ahci_port_priv *pp = ap->private_data;
908 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
909
910 ata_tf_from_fis(d2h_fis, tf);
911}
912
913static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
914{
915 struct scatterlist *sg;
916 struct ahci_sg *ahci_sg;
917 unsigned int n_sg = 0;
918
919 VPRINTK("ENTER\n");
920
921 /*
922 * Next, the S/G list.
923 */
924 ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
925 ata_for_each_sg(sg, qc) {
926 dma_addr_t addr = sg_dma_address(sg);
927 u32 sg_len = sg_dma_len(sg);
928
929 ahci_sg->addr = cpu_to_le32(addr & 0xffffffff);
930 ahci_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
931 ahci_sg->flags_size = cpu_to_le32(sg_len - 1);
932
933 ahci_sg++;
934 n_sg++;
935 }
936
937 return n_sg;
938}
939
940static void ahci_qc_prep(struct ata_queued_cmd *qc)
941{
942 struct ata_port *ap = qc->ap;
943 struct ahci_port_priv *pp = ap->private_data;
944 int is_atapi = is_atapi_taskfile(&qc->tf);
945 void *cmd_tbl;
946 u32 opts;
947 const u32 cmd_fis_len = 5; /* five dwords */
948 unsigned int n_elem;
949
950 /*
951 * Fill in command table information. First, the header,
952 * a SATA Register - Host to Device command FIS.
953 */
954 cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
955
956 ata_tf_to_fis(&qc->tf, cmd_tbl, 0);
957 if (is_atapi) {
958 memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
959 memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
960 }
961
962 n_elem = 0;
963 if (qc->flags & ATA_QCFLAG_DMAMAP)
964 n_elem = ahci_fill_sg(qc, cmd_tbl);
965
966 /*
967 * Fill in command slot information.
968 */
969 opts = cmd_fis_len | n_elem << 16;
970 if (qc->tf.flags & ATA_TFLAG_WRITE)
971 opts |= AHCI_CMD_WRITE;
972 if (is_atapi)
973 opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
974
975 ahci_fill_cmd_slot(pp, qc->tag, opts);
976}
977
978static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
979{
980 struct ahci_port_priv *pp = ap->private_data;
981 struct ata_eh_info *ehi = &ap->eh_info;
982 unsigned int err_mask = 0, action = 0;
983 struct ata_queued_cmd *qc;
984 u32 serror;
985
986 ata_ehi_clear_desc(ehi);
987
988 /* AHCI needs SError cleared; otherwise, it might lock up */
989 serror = ahci_scr_read(ap, SCR_ERROR);
990 ahci_scr_write(ap, SCR_ERROR, serror);
991
992 /* analyze @irq_stat */
993 ata_ehi_push_desc(ehi, "irq_stat 0x%08x", irq_stat);
994
995 if (irq_stat & PORT_IRQ_TF_ERR)
996 err_mask |= AC_ERR_DEV;
997
998 if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
999 err_mask |= AC_ERR_HOST_BUS;
1000 action |= ATA_EH_SOFTRESET;
1001 }
1002
1003 if (irq_stat & PORT_IRQ_IF_ERR) {
1004 err_mask |= AC_ERR_ATA_BUS;
1005 action |= ATA_EH_SOFTRESET;
1006 ata_ehi_push_desc(ehi, ", interface fatal error");
1007 }
1008
1009 if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
1010 ata_ehi_hotplugged(ehi);
1011 ata_ehi_push_desc(ehi, ", %s", irq_stat & PORT_IRQ_CONNECT ?
1012 "connection status changed" : "PHY RDY changed");
1013 }
1014
1015 if (irq_stat & PORT_IRQ_UNK_FIS) {
1016 u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
1017
1018 err_mask |= AC_ERR_HSM;
1019 action |= ATA_EH_SOFTRESET;
1020 ata_ehi_push_desc(ehi, ", unknown FIS %08x %08x %08x %08x",
1021 unk[0], unk[1], unk[2], unk[3]);
1022 }
1023
1024 /* okay, let's hand over to EH */
1025 ehi->serror |= serror;
1026 ehi->action |= action;
1027
1028 qc = ata_qc_from_tag(ap, ap->active_tag);
1029 if (qc)
1030 qc->err_mask |= err_mask;
1031 else
1032 ehi->err_mask |= err_mask;
1033
1034 if (irq_stat & PORT_IRQ_FREEZE)
1035 ata_port_freeze(ap);
1036 else
1037 ata_port_abort(ap);
1038}
1039
1040static void ahci_host_intr(struct ata_port *ap)
1041{
1042 void __iomem *mmio = ap->host->mmio_base;
1043 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1044 struct ata_eh_info *ehi = &ap->eh_info;
1045 u32 status, qc_active;
1046 int rc;
1047
1048 status = readl(port_mmio + PORT_IRQ_STAT);
1049 writel(status, port_mmio + PORT_IRQ_STAT);
1050
1051 if (unlikely(status & PORT_IRQ_ERROR)) {
1052 ahci_error_intr(ap, status);
1053 return;
1054 }
1055
1056 if (ap->sactive)
1057 qc_active = readl(port_mmio + PORT_SCR_ACT);
1058 else
1059 qc_active = readl(port_mmio + PORT_CMD_ISSUE);
1060
1061 rc = ata_qc_complete_multiple(ap, qc_active, NULL);
1062 if (rc > 0)
1063 return;
1064 if (rc < 0) {
1065 ehi->err_mask |= AC_ERR_HSM;
1066 ehi->action |= ATA_EH_SOFTRESET;
1067 ata_port_freeze(ap);
1068 return;
1069 }
1070
1071 /* hmmm... a spurious interupt */
1072
1073 /* some devices send D2H reg with I bit set during NCQ command phase */
1074 if (ap->sactive && status & PORT_IRQ_D2H_REG_FIS)
1075 return;
1076
1077 /* ignore interim PIO setup fis interrupts */
1078 if (ata_tag_valid(ap->active_tag) && (status & PORT_IRQ_PIOS_FIS))
1079 return;
1080
1081 if (ata_ratelimit())
1082 ata_port_printk(ap, KERN_INFO, "spurious interrupt "
1083 "(irq_stat 0x%x active_tag %d sactive 0x%x)\n",
1084 status, ap->active_tag, ap->sactive);
1085}
1086
1087static void ahci_irq_clear(struct ata_port *ap)
1088{
1089 /* TODO */
1090}
1091
1092static irqreturn_t ahci_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
1093{
1094 struct ata_host *host = dev_instance;
1095 struct ahci_host_priv *hpriv;
1096 unsigned int i, handled = 0;
1097 void __iomem *mmio;
1098 u32 irq_stat, irq_ack = 0;
1099
1100 VPRINTK("ENTER\n");
1101
1102 hpriv = host->private_data;
1103 mmio = host->mmio_base;
1104
1105 /* sigh. 0xffffffff is a valid return from h/w */
1106 irq_stat = readl(mmio + HOST_IRQ_STAT);
1107 irq_stat &= hpriv->port_map;
1108 if (!irq_stat)
1109 return IRQ_NONE;
1110
1111 spin_lock(&host->lock);
1112
1113 for (i = 0; i < host->n_ports; i++) {
1114 struct ata_port *ap;
1115
1116 if (!(irq_stat & (1 << i)))
1117 continue;
1118
1119 ap = host->ports[i];
1120 if (ap) {
1121 ahci_host_intr(ap);
1122 VPRINTK("port %u\n", i);
1123 } else {
1124 VPRINTK("port %u (no irq)\n", i);
1125 if (ata_ratelimit())
1126 dev_printk(KERN_WARNING, host->dev,
1127 "interrupt on disabled port %u\n", i);
1128 }
1129
1130 irq_ack |= (1 << i);
1131 }
1132
1133 if (irq_ack) {
1134 writel(irq_ack, mmio + HOST_IRQ_STAT);
1135 handled = 1;
1136 }
1137
1138 spin_unlock(&host->lock);
1139
1140 VPRINTK("EXIT\n");
1141
1142 return IRQ_RETVAL(handled);
1143}
1144
1145static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
1146{
1147 struct ata_port *ap = qc->ap;
1148 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
1149
1150 if (qc->tf.protocol == ATA_PROT_NCQ)
1151 writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
1152 writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE);
1153 readl(port_mmio + PORT_CMD_ISSUE); /* flush */
1154
1155 return 0;
1156}
1157
1158static void ahci_freeze(struct ata_port *ap)
1159{
1160 void __iomem *mmio = ap->host->mmio_base;
1161 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1162
1163 /* turn IRQ off */
1164 writel(0, port_mmio + PORT_IRQ_MASK);
1165}
1166
1167static void ahci_thaw(struct ata_port *ap)
1168{
1169 void __iomem *mmio = ap->host->mmio_base;
1170 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1171 u32 tmp;
1172
1173 /* clear IRQ */
1174 tmp = readl(port_mmio + PORT_IRQ_STAT);
1175 writel(tmp, port_mmio + PORT_IRQ_STAT);
1176 writel(1 << ap->id, mmio + HOST_IRQ_STAT);
1177
1178 /* turn IRQ back on */
1179 writel(DEF_PORT_IRQ, port_mmio + PORT_IRQ_MASK);
1180}
1181
1182static void ahci_error_handler(struct ata_port *ap)
1183{
1184 void __iomem *mmio = ap->host->mmio_base;
1185 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1186
1187 if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
1188 /* restart engine */
1189 ahci_stop_engine(port_mmio);
1190 ahci_start_engine(port_mmio);
1191 }
1192
1193 /* perform recovery */
1194 ata_do_eh(ap, ahci_prereset, ahci_softreset, ahci_hardreset,
1195 ahci_postreset);
1196}
1197
1198static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
1199{
1200 struct ata_port *ap = qc->ap;
1201 void __iomem *mmio = ap->host->mmio_base;
1202 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1203
1204 if (qc->flags & ATA_QCFLAG_FAILED)
1205 qc->err_mask |= AC_ERR_OTHER;
1206
1207 if (qc->err_mask) {
1208 /* make DMA engine forget about the failed command */
1209 ahci_stop_engine(port_mmio);
1210 ahci_start_engine(port_mmio);
1211 }
1212}
1213
1214static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
1215{
1216 struct ahci_host_priv *hpriv = ap->host->private_data;
1217 struct ahci_port_priv *pp = ap->private_data;
1218 void __iomem *mmio = ap->host->mmio_base;
1219 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1220 const char *emsg = NULL;
1221 int rc;
1222
1223 rc = ahci_deinit_port(port_mmio, hpriv->cap, &emsg);
1224 if (rc) {
1225 ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc);
1226 ahci_init_port(port_mmio, hpriv->cap,
1227 pp->cmd_slot_dma, pp->rx_fis_dma);
1228 }
1229
1230 return rc;
1231}
1232
1233static int ahci_port_resume(struct ata_port *ap)
1234{
1235 struct ahci_port_priv *pp = ap->private_data;
1236 struct ahci_host_priv *hpriv = ap->host->private_data;
1237 void __iomem *mmio = ap->host->mmio_base;
1238 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1239
1240 ahci_init_port(port_mmio, hpriv->cap, pp->cmd_slot_dma, pp->rx_fis_dma);
1241
1242 return 0;
1243}
1244
1245static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
1246{
1247 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1248 void __iomem *mmio = host->mmio_base;
1249 u32 ctl;
1250
1251 if (mesg.event == PM_EVENT_SUSPEND) {
1252 /* AHCI spec rev1.1 section 8.3.3:
1253 * Software must disable interrupts prior to requesting a
1254 * transition of the HBA to D3 state.
1255 */
1256 ctl = readl(mmio + HOST_CTL);
1257 ctl &= ~HOST_IRQ_EN;
1258 writel(ctl, mmio + HOST_CTL);
1259 readl(mmio + HOST_CTL); /* flush */
1260 }
1261
1262 return ata_pci_device_suspend(pdev, mesg);
1263}
1264
1265static int ahci_pci_device_resume(struct pci_dev *pdev)
1266{
1267 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1268 struct ahci_host_priv *hpriv = host->private_data;
1269 void __iomem *mmio = host->mmio_base;
1270 int rc;
1271
1272 ata_pci_device_do_resume(pdev);
1273
1274 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
1275 rc = ahci_reset_controller(mmio, pdev);
1276 if (rc)
1277 return rc;
1278
1279 ahci_init_controller(mmio, pdev, host->n_ports, hpriv->cap);
1280 }
1281
1282 ata_host_resume(host);
1283
1284 return 0;
1285}
1286
1287static int ahci_port_start(struct ata_port *ap)
1288{
1289 struct device *dev = ap->host->dev;
1290 struct ahci_host_priv *hpriv = ap->host->private_data;
1291 struct ahci_port_priv *pp;
1292 void __iomem *mmio = ap->host->mmio_base;
1293 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1294 void *mem;
1295 dma_addr_t mem_dma;
1296 int rc;
1297
1298 pp = kmalloc(sizeof(*pp), GFP_KERNEL);
1299 if (!pp)
1300 return -ENOMEM;
1301 memset(pp, 0, sizeof(*pp));
1302
1303 rc = ata_pad_alloc(ap, dev);
1304 if (rc) {
1305 kfree(pp);
1306 return rc;
1307 }
1308
1309 mem = dma_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma, GFP_KERNEL);
1310 if (!mem) {
1311 ata_pad_free(ap, dev);
1312 kfree(pp);
1313 return -ENOMEM;
1314 }
1315 memset(mem, 0, AHCI_PORT_PRIV_DMA_SZ);
1316
1317 /*
1318 * First item in chunk of DMA memory: 32-slot command table,
1319 * 32 bytes each in size
1320 */
1321 pp->cmd_slot = mem;
1322 pp->cmd_slot_dma = mem_dma;
1323
1324 mem += AHCI_CMD_SLOT_SZ;
1325 mem_dma += AHCI_CMD_SLOT_SZ;
1326
1327 /*
1328 * Second item: Received-FIS area
1329 */
1330 pp->rx_fis = mem;
1331 pp->rx_fis_dma = mem_dma;
1332
1333 mem += AHCI_RX_FIS_SZ;
1334 mem_dma += AHCI_RX_FIS_SZ;
1335
1336 /*
1337 * Third item: data area for storing a single command
1338 * and its scatter-gather table
1339 */
1340 pp->cmd_tbl = mem;
1341 pp->cmd_tbl_dma = mem_dma;
1342
1343 ap->private_data = pp;
1344
1345 /* initialize port */
1346 ahci_init_port(port_mmio, hpriv->cap, pp->cmd_slot_dma, pp->rx_fis_dma);
1347
1348 return 0;
1349}
1350
1351static void ahci_port_stop(struct ata_port *ap)
1352{
1353 struct device *dev = ap->host->dev;
1354 struct ahci_host_priv *hpriv = ap->host->private_data;
1355 struct ahci_port_priv *pp = ap->private_data;
1356 void __iomem *mmio = ap->host->mmio_base;
1357 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1358 const char *emsg = NULL;
1359 int rc;
1360
1361 /* de-initialize port */
1362 rc = ahci_deinit_port(port_mmio, hpriv->cap, &emsg);
1363 if (rc)
1364 ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc);
1365
1366 ap->private_data = NULL;
1367 dma_free_coherent(dev, AHCI_PORT_PRIV_DMA_SZ,
1368 pp->cmd_slot, pp->cmd_slot_dma);
1369 ata_pad_free(ap, dev);
1370 kfree(pp);
1371}
1372
1373static void ahci_setup_port(struct ata_ioports *port, unsigned long base,
1374 unsigned int port_idx)
1375{
1376 VPRINTK("ENTER, base==0x%lx, port_idx %u\n", base, port_idx);
1377 base = ahci_port_base_ul(base, port_idx);
1378 VPRINTK("base now==0x%lx\n", base);
1379
1380 port->cmd_addr = base;
1381 port->scr_addr = base + PORT_SCR;
1382
1383 VPRINTK("EXIT\n");
1384}
1385
1386static int ahci_host_init(struct ata_probe_ent *probe_ent)
1387{
1388 struct ahci_host_priv *hpriv = probe_ent->private_data;
1389 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
1390 void __iomem *mmio = probe_ent->mmio_base;
1391 unsigned int i, using_dac;
1392 int rc;
1393
1394 rc = ahci_reset_controller(mmio, pdev);
1395 if (rc)
1396 return rc;
1397
1398 hpriv->cap = readl(mmio + HOST_CAP);
1399 hpriv->port_map = readl(mmio + HOST_PORTS_IMPL);
1400 probe_ent->n_ports = (hpriv->cap & 0x1f) + 1;
1401
1402 VPRINTK("cap 0x%x port_map 0x%x n_ports %d\n",
1403 hpriv->cap, hpriv->port_map, probe_ent->n_ports);
1404
1405 using_dac = hpriv->cap & HOST_CAP_64;
1406 if (using_dac &&
1407 !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
1408 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
1409 if (rc) {
1410 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1411 if (rc) {
1412 dev_printk(KERN_ERR, &pdev->dev,
1413 "64-bit DMA enable failed\n");
1414 return rc;
1415 }
1416 }
1417 } else {
1418 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1419 if (rc) {
1420 dev_printk(KERN_ERR, &pdev->dev,
1421 "32-bit DMA enable failed\n");
1422 return rc;
1423 }
1424 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1425 if (rc) {
1426 dev_printk(KERN_ERR, &pdev->dev,
1427 "32-bit consistent DMA enable failed\n");
1428 return rc;
1429 }
1430 }
1431
1432 for (i = 0; i < probe_ent->n_ports; i++)
1433 ahci_setup_port(&probe_ent->port[i], (unsigned long) mmio, i);
1434
1435 ahci_init_controller(mmio, pdev, probe_ent->n_ports, hpriv->cap);
1436
1437 pci_set_master(pdev);
1438
1439 return 0;
1440}
1441
1442static void ahci_print_info(struct ata_probe_ent *probe_ent)
1443{
1444 struct ahci_host_priv *hpriv = probe_ent->private_data;
1445 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
1446 void __iomem *mmio = probe_ent->mmio_base;
1447 u32 vers, cap, impl, speed;
1448 const char *speed_s;
1449 u16 cc;
1450 const char *scc_s;
1451
1452 vers = readl(mmio + HOST_VERSION);
1453 cap = hpriv->cap;
1454 impl = hpriv->port_map;
1455
1456 speed = (cap >> 20) & 0xf;
1457 if (speed == 1)
1458 speed_s = "1.5";
1459 else if (speed == 2)
1460 speed_s = "3";
1461 else
1462 speed_s = "?";
1463
1464 pci_read_config_word(pdev, 0x0a, &cc);
1465 if (cc == 0x0101)
1466 scc_s = "IDE";
1467 else if (cc == 0x0106)
1468 scc_s = "SATA";
1469 else if (cc == 0x0104)
1470 scc_s = "RAID";
1471 else
1472 scc_s = "unknown";
1473
1474 dev_printk(KERN_INFO, &pdev->dev,
1475 "AHCI %02x%02x.%02x%02x "
1476 "%u slots %u ports %s Gbps 0x%x impl %s mode\n"
1477 ,
1478
1479 (vers >> 24) & 0xff,
1480 (vers >> 16) & 0xff,
1481 (vers >> 8) & 0xff,
1482 vers & 0xff,
1483
1484 ((cap >> 8) & 0x1f) + 1,
1485 (cap & 0x1f) + 1,
1486 speed_s,
1487 impl,
1488 scc_s);
1489
1490 dev_printk(KERN_INFO, &pdev->dev,
1491 "flags: "
1492 "%s%s%s%s%s%s"
1493 "%s%s%s%s%s%s%s\n"
1494 ,
1495
1496 cap & (1 << 31) ? "64bit " : "",
1497 cap & (1 << 30) ? "ncq " : "",
1498 cap & (1 << 28) ? "ilck " : "",
1499 cap & (1 << 27) ? "stag " : "",
1500 cap & (1 << 26) ? "pm " : "",
1501 cap & (1 << 25) ? "led " : "",
1502
1503 cap & (1 << 24) ? "clo " : "",
1504 cap & (1 << 19) ? "nz " : "",
1505 cap & (1 << 18) ? "only " : "",
1506 cap & (1 << 17) ? "pmp " : "",
1507 cap & (1 << 15) ? "pio " : "",
1508 cap & (1 << 14) ? "slum " : "",
1509 cap & (1 << 13) ? "part " : ""
1510 );
1511}
1512
1513static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1514{
1515 static int printed_version;
1516 struct ata_probe_ent *probe_ent = NULL;
1517 struct ahci_host_priv *hpriv;
1518 unsigned long base;
1519 void __iomem *mmio_base;
1520 unsigned int board_idx = (unsigned int) ent->driver_data;
1521 int have_msi, pci_dev_busy = 0;
1522 int rc;
1523
1524 VPRINTK("ENTER\n");
1525
1526 WARN_ON(ATA_MAX_QUEUE > AHCI_MAX_CMDS);
1527
1528 if (!printed_version++)
1529 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1530
1531 /* JMicron-specific fixup: make sure we're in AHCI mode */
1532 /* This is protected from races with ata_jmicron by the pci probe
1533 locking */
1534 if (pdev->vendor == PCI_VENDOR_ID_JMICRON) {
1535 /* AHCI enable, AHCI on function 0 */
1536 pci_write_config_byte(pdev, 0x41, 0xa1);
1537 /* Function 1 is the PATA controller */
1538 if (PCI_FUNC(pdev->devfn))
1539 return -ENODEV;
1540 }
1541
1542 rc = pci_enable_device(pdev);
1543 if (rc)
1544 return rc;
1545
1546 rc = pci_request_regions(pdev, DRV_NAME);
1547 if (rc) {
1548 pci_dev_busy = 1;
1549 goto err_out;
1550 }
1551
1552 if (pci_enable_msi(pdev) == 0)
1553 have_msi = 1;
1554 else {
1555 pci_intx(pdev, 1);
1556 have_msi = 0;
1557 }
1558
1559 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
1560 if (probe_ent == NULL) {
1561 rc = -ENOMEM;
1562 goto err_out_msi;
1563 }
1564
1565 memset(probe_ent, 0, sizeof(*probe_ent));
1566 probe_ent->dev = pci_dev_to_dev(pdev);
1567 INIT_LIST_HEAD(&probe_ent->node);
1568
1569 mmio_base = pci_iomap(pdev, AHCI_PCI_BAR, 0);
1570 if (mmio_base == NULL) {
1571 rc = -ENOMEM;
1572 goto err_out_free_ent;
1573 }
1574 base = (unsigned long) mmio_base;
1575
1576 hpriv = kmalloc(sizeof(*hpriv), GFP_KERNEL);
1577 if (!hpriv) {
1578 rc = -ENOMEM;
1579 goto err_out_iounmap;
1580 }
1581 memset(hpriv, 0, sizeof(*hpriv));
1582
1583 probe_ent->sht = ahci_port_info[board_idx].sht;
1584 probe_ent->port_flags = ahci_port_info[board_idx].flags;
1585 probe_ent->pio_mask = ahci_port_info[board_idx].pio_mask;
1586 probe_ent->udma_mask = ahci_port_info[board_idx].udma_mask;
1587 probe_ent->port_ops = ahci_port_info[board_idx].port_ops;
1588
1589 probe_ent->irq = pdev->irq;
1590 probe_ent->irq_flags = IRQF_SHARED;
1591 probe_ent->mmio_base = mmio_base;
1592 probe_ent->private_data = hpriv;
1593
1594 if (have_msi)
1595 hpriv->flags |= AHCI_FLAG_MSI;
1596
1597 /* initialize adapter */
1598 rc = ahci_host_init(probe_ent);
1599 if (rc)
1600 goto err_out_hpriv;
1601
1602 if (!(probe_ent->port_flags & AHCI_FLAG_NO_NCQ) &&
1603 (hpriv->cap & HOST_CAP_NCQ))
1604 probe_ent->port_flags |= ATA_FLAG_NCQ;
1605
1606 ahci_print_info(probe_ent);
1607
1608 /* FIXME: check ata_device_add return value */
1609 ata_device_add(probe_ent);
1610 kfree(probe_ent);
1611
1612 return 0;
1613
1614err_out_hpriv:
1615 kfree(hpriv);
1616err_out_iounmap:
1617 pci_iounmap(pdev, mmio_base);
1618err_out_free_ent:
1619 kfree(probe_ent);
1620err_out_msi:
1621 if (have_msi)
1622 pci_disable_msi(pdev);
1623 else
1624 pci_intx(pdev, 0);
1625 pci_release_regions(pdev);
1626err_out:
1627 if (!pci_dev_busy)
1628 pci_disable_device(pdev);
1629 return rc;
1630}
1631
1632static void ahci_remove_one (struct pci_dev *pdev)
1633{
1634 struct device *dev = pci_dev_to_dev(pdev);
1635 struct ata_host *host = dev_get_drvdata(dev);
1636 struct ahci_host_priv *hpriv = host->private_data;
1637 unsigned int i;
1638 int have_msi;
1639
1640 for (i = 0; i < host->n_ports; i++)
1641 ata_port_detach(host->ports[i]);
1642
1643 have_msi = hpriv->flags & AHCI_FLAG_MSI;
1644 free_irq(host->irq, host);
1645
1646 for (i = 0; i < host->n_ports; i++) {
1647 struct ata_port *ap = host->ports[i];
1648
1649 ata_scsi_release(ap->scsi_host);
1650 scsi_host_put(ap->scsi_host);
1651 }
1652
1653 kfree(hpriv);
1654 pci_iounmap(pdev, host->mmio_base);
1655 kfree(host);
1656
1657 if (have_msi)
1658 pci_disable_msi(pdev);
1659 else
1660 pci_intx(pdev, 0);
1661 pci_release_regions(pdev);
1662 pci_disable_device(pdev);
1663 dev_set_drvdata(dev, NULL);
1664}
1665
1666static int __init ahci_init(void)
1667{
1668 return pci_register_driver(&ahci_pci_driver);
1669}
1670
1671static void __exit ahci_exit(void)
1672{
1673 pci_unregister_driver(&ahci_pci_driver);
1674}
1675
1676
1677MODULE_AUTHOR("Jeff Garzik");
1678MODULE_DESCRIPTION("AHCI SATA low-level driver");
1679MODULE_LICENSE("GPL");
1680MODULE_DEVICE_TABLE(pci, ahci_pci_tbl);
1681MODULE_VERSION(DRV_VERSION);
1682
1683module_init(ahci_init);
1684module_exit(ahci_exit);
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
new file mode 100644
index 000000000000..22b2dba90b9a
--- /dev/null
+++ b/drivers/ata/ata_piix.c
@@ -0,0 +1,1010 @@
1/*
2 * ata_piix.c - Intel PATA/SATA controllers
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 *
9 * Copyright 2003-2005 Red Hat Inc
10 * Copyright 2003-2005 Jeff Garzik
11 *
12 *
13 * Copyright header from piix.c:
14 *
15 * Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer
16 * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
17 * Copyright (C) 2003 Red Hat Inc <alan@redhat.com>
18 *
19 *
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2, or (at your option)
23 * any later version.
24 *
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
29 *
30 * You should have received a copy of the GNU General Public License
31 * along with this program; see the file COPYING. If not, write to
32 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
33 *
34 *
35 * libata documentation is available via 'make {ps|pdf}docs',
36 * as Documentation/DocBook/libata.*
37 *
38 * Hardware documentation available at http://developer.intel.com/
39 *
40 * Documentation
41 * Publically available from Intel web site. Errata documentation
42 * is also publically available. As an aide to anyone hacking on this
43 * driver the list of errata that are relevant is below.going back to
44 * PIIX4. Older device documentation is now a bit tricky to find.
45 *
46 * The chipsets all follow very much the same design. The orginal Triton
47 * series chipsets do _not_ support independant device timings, but this
48 * is fixed in Triton II. With the odd mobile exception the chips then
49 * change little except in gaining more modes until SATA arrives. This
50 * driver supports only the chips with independant timing (that is those
51 * with SITRE and the 0x44 timing register). See pata_oldpiix and pata_mpiix
52 * for the early chip drivers.
53 *
54 * Errata of note:
55 *
56 * Unfixable
57 * PIIX4 errata #9 - Only on ultra obscure hw
58 * ICH3 errata #13 - Not observed to affect real hw
59 * by Intel
60 *
61 * Things we must deal with
62 * PIIX4 errata #10 - BM IDE hang with non UDMA
63 * (must stop/start dma to recover)
64 * 440MX errata #15 - As PIIX4 errata #10
65 * PIIX4 errata #15 - Must not read control registers
66 * during a PIO transfer
67 * 440MX errata #13 - As PIIX4 errata #15
68 * ICH2 errata #21 - DMA mode 0 doesn't work right
69 * ICH0/1 errata #55 - As ICH2 errata #21
70 * ICH2 spec c #9 - Extra operations needed to handle
71 * drive hotswap [NOT YET SUPPORTED]
72 * ICH2 spec c #20 - IDE PRD must not cross a 64K boundary
73 * and must be dword aligned
74 * ICH2 spec c #24 - UDMA mode 4,5 t85/86 should be 6ns not 3.3
75 *
76 * Should have been BIOS fixed:
77 * 450NX: errata #19 - DMA hangs on old 450NX
78 * 450NX: errata #20 - DMA hangs on old 450NX
79 * 450NX: errata #25 - Corruption with DMA on old 450NX
80 * ICH3 errata #15 - IDE deadlock under high load
81 * (BIOS must set dev 31 fn 0 bit 23)
82 * ICH3 errata #18 - Don't use native mode
83 */
84
85#include <linux/kernel.h>
86#include <linux/module.h>
87#include <linux/pci.h>
88#include <linux/init.h>
89#include <linux/blkdev.h>
90#include <linux/delay.h>
91#include <linux/device.h>
92#include <scsi/scsi_host.h>
93#include <linux/libata.h>
94
95#define DRV_NAME "ata_piix"
96#define DRV_VERSION "2.00"
97
98enum {
99 PIIX_IOCFG = 0x54, /* IDE I/O configuration register */
100 ICH5_PMR = 0x90, /* port mapping register */
101 ICH5_PCS = 0x92, /* port control and status */
102 PIIX_SCC = 0x0A, /* sub-class code register */
103
104 PIIX_FLAG_IGNORE_PCS = (1 << 25), /* ignore PCS present bits */
105 PIIX_FLAG_SCR = (1 << 26), /* SCR available */
106 PIIX_FLAG_AHCI = (1 << 27), /* AHCI possible */
107 PIIX_FLAG_CHECKINTR = (1 << 28), /* make sure PCI INTx enabled */
108
109 /* combined mode. if set, PATA is channel 0.
110 * if clear, PATA is channel 1.
111 */
112 PIIX_PORT_ENABLED = (1 << 0),
113 PIIX_PORT_PRESENT = (1 << 4),
114
115 PIIX_80C_PRI = (1 << 5) | (1 << 4),
116 PIIX_80C_SEC = (1 << 7) | (1 << 6),
117
118 /* controller IDs */
119 piix4_pata = 0,
120 ich5_pata = 1,
121 ich5_sata = 2,
122 esb_sata = 3,
123 ich6_sata = 4,
124 ich6_sata_ahci = 5,
125 ich6m_sata_ahci = 6,
126 ich8_sata_ahci = 7,
127
128 /* constants for mapping table */
129 P0 = 0, /* port 0 */
130 P1 = 1, /* port 1 */
131 P2 = 2, /* port 2 */
132 P3 = 3, /* port 3 */
133 IDE = -1, /* IDE */
134 NA = -2, /* not avaliable */
135 RV = -3, /* reserved */
136
137 PIIX_AHCI_DEVICE = 6,
138};
139
140struct piix_map_db {
141 const u32 mask;
142 const u16 port_enable;
143 const int present_shift;
144 const int map[][4];
145};
146
147struct piix_host_priv {
148 const int *map;
149 const struct piix_map_db *map_db;
150};
151
152static int piix_init_one (struct pci_dev *pdev,
153 const struct pci_device_id *ent);
154static void piix_host_stop(struct ata_host *host);
155static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev);
156static void piix_set_dmamode (struct ata_port *ap, struct ata_device *adev);
157static void piix_pata_error_handler(struct ata_port *ap);
158static void piix_sata_error_handler(struct ata_port *ap);
159
160static unsigned int in_module_init = 1;
161
162static const struct pci_device_id piix_pci_tbl[] = {
163#ifdef ATA_ENABLE_PATA
164 { 0x8086, 0x7111, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix4_pata },
165 { 0x8086, 0x24db, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_pata },
166 { 0x8086, 0x25a2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_pata },
167 { 0x8086, 0x27df, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_pata },
168#endif
169
170 /* NOTE: The following PCI ids must be kept in sync with the
171 * list in drivers/pci/quirks.c.
172 */
173
174 /* 82801EB (ICH5) */
175 { 0x8086, 0x24d1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
176 /* 82801EB (ICH5) */
177 { 0x8086, 0x24df, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
178 /* 6300ESB (ICH5 variant with broken PCS present bits) */
179 { 0x8086, 0x25a3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, esb_sata },
180 /* 6300ESB pretending RAID */
181 { 0x8086, 0x25b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, esb_sata },
182 /* 82801FB/FW (ICH6/ICH6W) */
183 { 0x8086, 0x2651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata },
184 /* 82801FR/FRW (ICH6R/ICH6RW) */
185 { 0x8086, 0x2652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
186 /* 82801FBM ICH6M (ICH6R with only port 0 and 2 implemented) */
187 { 0x8086, 0x2653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6m_sata_ahci },
188 /* 82801GB/GR/GH (ICH7, identical to ICH6) */
189 { 0x8086, 0x27c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
190 /* 2801GBM/GHM (ICH7M, identical to ICH6M) */
191 { 0x8086, 0x27c4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6m_sata_ahci },
192 /* Enterprise Southbridge 2 (where's the datasheet?) */
193 { 0x8086, 0x2680, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
194 /* SATA Controller 1 IDE (ICH8, no datasheet yet) */
195 { 0x8086, 0x2820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
196 /* SATA Controller 2 IDE (ICH8, ditto) */
197 { 0x8086, 0x2825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
198 /* Mobile SATA Controller IDE (ICH8M, ditto) */
199 { 0x8086, 0x2828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
200
201 { } /* terminate list */
202};
203
204static struct pci_driver piix_pci_driver = {
205 .name = DRV_NAME,
206 .id_table = piix_pci_tbl,
207 .probe = piix_init_one,
208 .remove = ata_pci_remove_one,
209 .suspend = ata_pci_device_suspend,
210 .resume = ata_pci_device_resume,
211};
212
213static struct scsi_host_template piix_sht = {
214 .module = THIS_MODULE,
215 .name = DRV_NAME,
216 .ioctl = ata_scsi_ioctl,
217 .queuecommand = ata_scsi_queuecmd,
218 .can_queue = ATA_DEF_QUEUE,
219 .this_id = ATA_SHT_THIS_ID,
220 .sg_tablesize = LIBATA_MAX_PRD,
221 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
222 .emulated = ATA_SHT_EMULATED,
223 .use_clustering = ATA_SHT_USE_CLUSTERING,
224 .proc_name = DRV_NAME,
225 .dma_boundary = ATA_DMA_BOUNDARY,
226 .slave_configure = ata_scsi_slave_config,
227 .slave_destroy = ata_scsi_slave_destroy,
228 .bios_param = ata_std_bios_param,
229 .resume = ata_scsi_device_resume,
230 .suspend = ata_scsi_device_suspend,
231};
232
233static const struct ata_port_operations piix_pata_ops = {
234 .port_disable = ata_port_disable,
235 .set_piomode = piix_set_piomode,
236 .set_dmamode = piix_set_dmamode,
237 .mode_filter = ata_pci_default_filter,
238
239 .tf_load = ata_tf_load,
240 .tf_read = ata_tf_read,
241 .check_status = ata_check_status,
242 .exec_command = ata_exec_command,
243 .dev_select = ata_std_dev_select,
244
245 .bmdma_setup = ata_bmdma_setup,
246 .bmdma_start = ata_bmdma_start,
247 .bmdma_stop = ata_bmdma_stop,
248 .bmdma_status = ata_bmdma_status,
249 .qc_prep = ata_qc_prep,
250 .qc_issue = ata_qc_issue_prot,
251 .data_xfer = ata_pio_data_xfer,
252
253 .freeze = ata_bmdma_freeze,
254 .thaw = ata_bmdma_thaw,
255 .error_handler = piix_pata_error_handler,
256 .post_internal_cmd = ata_bmdma_post_internal_cmd,
257
258 .irq_handler = ata_interrupt,
259 .irq_clear = ata_bmdma_irq_clear,
260
261 .port_start = ata_port_start,
262 .port_stop = ata_port_stop,
263 .host_stop = piix_host_stop,
264};
265
266static const struct ata_port_operations piix_sata_ops = {
267 .port_disable = ata_port_disable,
268
269 .tf_load = ata_tf_load,
270 .tf_read = ata_tf_read,
271 .check_status = ata_check_status,
272 .exec_command = ata_exec_command,
273 .dev_select = ata_std_dev_select,
274
275 .bmdma_setup = ata_bmdma_setup,
276 .bmdma_start = ata_bmdma_start,
277 .bmdma_stop = ata_bmdma_stop,
278 .bmdma_status = ata_bmdma_status,
279 .qc_prep = ata_qc_prep,
280 .qc_issue = ata_qc_issue_prot,
281 .data_xfer = ata_pio_data_xfer,
282
283 .freeze = ata_bmdma_freeze,
284 .thaw = ata_bmdma_thaw,
285 .error_handler = piix_sata_error_handler,
286 .post_internal_cmd = ata_bmdma_post_internal_cmd,
287
288 .irq_handler = ata_interrupt,
289 .irq_clear = ata_bmdma_irq_clear,
290
291 .port_start = ata_port_start,
292 .port_stop = ata_port_stop,
293 .host_stop = piix_host_stop,
294};
295
296static const struct piix_map_db ich5_map_db = {
297 .mask = 0x7,
298 .port_enable = 0x3,
299 .present_shift = 4,
300 .map = {
301 /* PM PS SM SS MAP */
302 { P0, NA, P1, NA }, /* 000b */
303 { P1, NA, P0, NA }, /* 001b */
304 { RV, RV, RV, RV },
305 { RV, RV, RV, RV },
306 { P0, P1, IDE, IDE }, /* 100b */
307 { P1, P0, IDE, IDE }, /* 101b */
308 { IDE, IDE, P0, P1 }, /* 110b */
309 { IDE, IDE, P1, P0 }, /* 111b */
310 },
311};
312
313static const struct piix_map_db ich6_map_db = {
314 .mask = 0x3,
315 .port_enable = 0xf,
316 .present_shift = 4,
317 .map = {
318 /* PM PS SM SS MAP */
319 { P0, P2, P1, P3 }, /* 00b */
320 { IDE, IDE, P1, P3 }, /* 01b */
321 { P0, P2, IDE, IDE }, /* 10b */
322 { RV, RV, RV, RV },
323 },
324};
325
326static const struct piix_map_db ich6m_map_db = {
327 .mask = 0x3,
328 .port_enable = 0x5,
329 .present_shift = 4,
330 .map = {
331 /* PM PS SM SS MAP */
332 { P0, P2, RV, RV }, /* 00b */
333 { RV, RV, RV, RV },
334 { P0, P2, IDE, IDE }, /* 10b */
335 { RV, RV, RV, RV },
336 },
337};
338
339static const struct piix_map_db ich8_map_db = {
340 .mask = 0x3,
341 .port_enable = 0x3,
342 .present_shift = 8,
343 .map = {
344 /* PM PS SM SS MAP */
345 { P0, NA, P1, NA }, /* 00b (hardwired) */
346 { RV, RV, RV, RV },
347 { RV, RV, RV, RV }, /* 10b (never) */
348 { RV, RV, RV, RV },
349 },
350};
351
352static const struct piix_map_db *piix_map_db_table[] = {
353 [ich5_sata] = &ich5_map_db,
354 [esb_sata] = &ich5_map_db,
355 [ich6_sata] = &ich6_map_db,
356 [ich6_sata_ahci] = &ich6_map_db,
357 [ich6m_sata_ahci] = &ich6m_map_db,
358 [ich8_sata_ahci] = &ich8_map_db,
359};
360
361static struct ata_port_info piix_port_info[] = {
362 /* piix4_pata */
363 {
364 .sht = &piix_sht,
365 .flags = ATA_FLAG_SLAVE_POSS,
366 .pio_mask = 0x1f, /* pio0-4 */
367#if 0
368 .mwdma_mask = 0x06, /* mwdma1-2 */
369#else
370 .mwdma_mask = 0x00, /* mwdma broken */
371#endif
372 .udma_mask = ATA_UDMA_MASK_40C,
373 .port_ops = &piix_pata_ops,
374 },
375
376 /* ich5_pata */
377 {
378 .sht = &piix_sht,
379 .flags = ATA_FLAG_SLAVE_POSS | PIIX_FLAG_CHECKINTR,
380 .pio_mask = 0x1f, /* pio0-4 */
381#if 0
382 .mwdma_mask = 0x06, /* mwdma1-2 */
383#else
384 .mwdma_mask = 0x00, /* mwdma broken */
385#endif
386 .udma_mask = 0x3f, /* udma0-5 */
387 .port_ops = &piix_pata_ops,
388 },
389
390 /* ich5_sata */
391 {
392 .sht = &piix_sht,
393 .flags = ATA_FLAG_SATA | PIIX_FLAG_CHECKINTR |
394 PIIX_FLAG_IGNORE_PCS,
395 .pio_mask = 0x1f, /* pio0-4 */
396 .mwdma_mask = 0x07, /* mwdma0-2 */
397 .udma_mask = 0x7f, /* udma0-6 */
398 .port_ops = &piix_sata_ops,
399 },
400
401 /* i6300esb_sata */
402 {
403 .sht = &piix_sht,
404 .flags = ATA_FLAG_SATA |
405 PIIX_FLAG_CHECKINTR | PIIX_FLAG_IGNORE_PCS,
406 .pio_mask = 0x1f, /* pio0-4 */
407 .mwdma_mask = 0x07, /* mwdma0-2 */
408 .udma_mask = 0x7f, /* udma0-6 */
409 .port_ops = &piix_sata_ops,
410 },
411
412 /* ich6_sata */
413 {
414 .sht = &piix_sht,
415 .flags = ATA_FLAG_SATA |
416 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR,
417 .pio_mask = 0x1f, /* pio0-4 */
418 .mwdma_mask = 0x07, /* mwdma0-2 */
419 .udma_mask = 0x7f, /* udma0-6 */
420 .port_ops = &piix_sata_ops,
421 },
422
423 /* ich6_sata_ahci */
424 {
425 .sht = &piix_sht,
426 .flags = ATA_FLAG_SATA |
427 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR |
428 PIIX_FLAG_AHCI,
429 .pio_mask = 0x1f, /* pio0-4 */
430 .mwdma_mask = 0x07, /* mwdma0-2 */
431 .udma_mask = 0x7f, /* udma0-6 */
432 .port_ops = &piix_sata_ops,
433 },
434
435 /* ich6m_sata_ahci */
436 {
437 .sht = &piix_sht,
438 .flags = ATA_FLAG_SATA |
439 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR |
440 PIIX_FLAG_AHCI,
441 .pio_mask = 0x1f, /* pio0-4 */
442 .mwdma_mask = 0x07, /* mwdma0-2 */
443 .udma_mask = 0x7f, /* udma0-6 */
444 .port_ops = &piix_sata_ops,
445 },
446
447 /* ich8_sata_ahci */
448 {
449 .sht = &piix_sht,
450 .flags = ATA_FLAG_SATA |
451 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR |
452 PIIX_FLAG_AHCI,
453 .pio_mask = 0x1f, /* pio0-4 */
454 .mwdma_mask = 0x07, /* mwdma0-2 */
455 .udma_mask = 0x7f, /* udma0-6 */
456 .port_ops = &piix_sata_ops,
457 },
458};
459
460static struct pci_bits piix_enable_bits[] = {
461 { 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */
462 { 0x43U, 1U, 0x80UL, 0x80UL }, /* port 1 */
463};
464
465MODULE_AUTHOR("Andre Hedrick, Alan Cox, Andrzej Krzysztofowicz, Jeff Garzik");
466MODULE_DESCRIPTION("SCSI low-level driver for Intel PIIX/ICH ATA controllers");
467MODULE_LICENSE("GPL");
468MODULE_DEVICE_TABLE(pci, piix_pci_tbl);
469MODULE_VERSION(DRV_VERSION);
470
471static int force_pcs = 0;
472module_param(force_pcs, int, 0444);
473MODULE_PARM_DESC(force_pcs, "force honoring or ignoring PCS to work around "
474 "device mis-detection (0=default, 1=ignore PCS, 2=honor PCS)");
475
476/**
477 * piix_pata_cbl_detect - Probe host controller cable detect info
478 * @ap: Port for which cable detect info is desired
479 *
480 * Read 80c cable indicator from ATA PCI device's PCI config
481 * register. This register is normally set by firmware (BIOS).
482 *
483 * LOCKING:
484 * None (inherited from caller).
485 */
486static void piix_pata_cbl_detect(struct ata_port *ap)
487{
488 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
489 u8 tmp, mask;
490
491 /* no 80c support in host controller? */
492 if ((ap->udma_mask & ~ATA_UDMA_MASK_40C) == 0)
493 goto cbl40;
494
495 /* check BIOS cable detect results */
496 mask = ap->port_no == 0 ? PIIX_80C_PRI : PIIX_80C_SEC;
497 pci_read_config_byte(pdev, PIIX_IOCFG, &tmp);
498 if ((tmp & mask) == 0)
499 goto cbl40;
500
501 ap->cbl = ATA_CBL_PATA80;
502 return;
503
504cbl40:
505 ap->cbl = ATA_CBL_PATA40;
506 ap->udma_mask &= ATA_UDMA_MASK_40C;
507}
508
509/**
510 * piix_pata_prereset - prereset for PATA host controller
511 * @ap: Target port
512 *
513 * Prereset including cable detection.
514 *
515 * LOCKING:
516 * None (inherited from caller).
517 */
518static int piix_pata_prereset(struct ata_port *ap)
519{
520 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
521
522 if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->port_no])) {
523 ata_port_printk(ap, KERN_INFO, "port disabled. ignoring.\n");
524 ap->eh_context.i.action &= ~ATA_EH_RESET_MASK;
525 return 0;
526 }
527
528 piix_pata_cbl_detect(ap);
529
530 return ata_std_prereset(ap);
531}
532
533static void piix_pata_error_handler(struct ata_port *ap)
534{
535 ata_bmdma_drive_eh(ap, piix_pata_prereset, ata_std_softreset, NULL,
536 ata_std_postreset);
537}
538
539/**
540 * piix_sata_present_mask - determine present mask for SATA host controller
541 * @ap: Target port
542 *
543 * Reads SATA PCI device's PCI config register Port Configuration
544 * and Status (PCS) to determine port and device availability.
545 *
546 * LOCKING:
547 * None (inherited from caller).
548 *
549 * RETURNS:
550 * determined present_mask
551 */
552static unsigned int piix_sata_present_mask(struct ata_port *ap)
553{
554 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
555 struct piix_host_priv *hpriv = ap->host->private_data;
556 const unsigned int *map = hpriv->map;
557 int base = 2 * ap->port_no;
558 unsigned int present_mask = 0;
559 int port, i;
560 u16 pcs;
561
562 pci_read_config_word(pdev, ICH5_PCS, &pcs);
563 DPRINTK("ata%u: ENTER, pcs=0x%x base=%d\n", ap->id, pcs, base);
564
565 for (i = 0; i < 2; i++) {
566 port = map[base + i];
567 if (port < 0)
568 continue;
569 if ((ap->flags & PIIX_FLAG_IGNORE_PCS) ||
570 (pcs & 1 << (hpriv->map_db->present_shift + port)))
571 present_mask |= 1 << i;
572 }
573
574 DPRINTK("ata%u: LEAVE, pcs=0x%x present_mask=0x%x\n",
575 ap->id, pcs, present_mask);
576
577 return present_mask;
578}
579
580/**
581 * piix_sata_softreset - reset SATA host port via ATA SRST
582 * @ap: port to reset
583 * @classes: resulting classes of attached devices
584 *
585 * Reset SATA host port via ATA SRST. On controllers with
586 * reliable PCS present bits, the bits are used to determine
587 * device presence.
588 *
589 * LOCKING:
590 * Kernel thread context (may sleep)
591 *
592 * RETURNS:
593 * 0 on success, -errno otherwise.
594 */
595static int piix_sata_softreset(struct ata_port *ap, unsigned int *classes)
596{
597 unsigned int present_mask;
598 int i, rc;
599
600 present_mask = piix_sata_present_mask(ap);
601
602 rc = ata_std_softreset(ap, classes);
603 if (rc)
604 return rc;
605
606 for (i = 0; i < ATA_MAX_DEVICES; i++) {
607 if (!(present_mask & (1 << i)))
608 classes[i] = ATA_DEV_NONE;
609 }
610
611 return 0;
612}
613
614static void piix_sata_error_handler(struct ata_port *ap)
615{
616 ata_bmdma_drive_eh(ap, ata_std_prereset, piix_sata_softreset, NULL,
617 ata_std_postreset);
618}
619
620/**
621 * piix_set_piomode - Initialize host controller PATA PIO timings
622 * @ap: Port whose timings we are configuring
623 * @adev: um
624 *
625 * Set PIO mode for device, in host controller PCI config space.
626 *
627 * LOCKING:
628 * None (inherited from caller).
629 */
630
631static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev)
632{
633 unsigned int pio = adev->pio_mode - XFER_PIO_0;
634 struct pci_dev *dev = to_pci_dev(ap->host->dev);
635 unsigned int is_slave = (adev->devno != 0);
636 unsigned int master_port= ap->port_no ? 0x42 : 0x40;
637 unsigned int slave_port = 0x44;
638 u16 master_data;
639 u8 slave_data;
640
641 static const /* ISP RTC */
642 u8 timings[][2] = { { 0, 0 },
643 { 0, 0 },
644 { 1, 0 },
645 { 2, 1 },
646 { 2, 3 }, };
647
648 pci_read_config_word(dev, master_port, &master_data);
649 if (is_slave) {
650 master_data |= 0x4000;
651 /* enable PPE, IE and TIME */
652 master_data |= 0x0070;
653 pci_read_config_byte(dev, slave_port, &slave_data);
654 slave_data &= (ap->port_no ? 0x0f : 0xf0);
655 slave_data |=
656 (timings[pio][0] << 2) |
657 (timings[pio][1] << (ap->port_no ? 4 : 0));
658 } else {
659 master_data &= 0xccf8;
660 /* enable PPE, IE and TIME */
661 master_data |= 0x0007;
662 master_data |=
663 (timings[pio][0] << 12) |
664 (timings[pio][1] << 8);
665 }
666 pci_write_config_word(dev, master_port, master_data);
667 if (is_slave)
668 pci_write_config_byte(dev, slave_port, slave_data);
669}
670
671/**
672 * piix_set_dmamode - Initialize host controller PATA PIO timings
673 * @ap: Port whose timings we are configuring
674 * @adev: um
675 * @udma: udma mode, 0 - 6
676 *
677 * Set UDMA mode for device, in host controller PCI config space.
678 *
679 * LOCKING:
680 * None (inherited from caller).
681 */
682
683static void piix_set_dmamode (struct ata_port *ap, struct ata_device *adev)
684{
685 unsigned int udma = adev->dma_mode; /* FIXME: MWDMA too */
686 struct pci_dev *dev = to_pci_dev(ap->host->dev);
687 u8 maslave = ap->port_no ? 0x42 : 0x40;
688 u8 speed = udma;
689 unsigned int drive_dn = (ap->port_no ? 2 : 0) + adev->devno;
690 int a_speed = 3 << (drive_dn * 4);
691 int u_flag = 1 << drive_dn;
692 int v_flag = 0x01 << drive_dn;
693 int w_flag = 0x10 << drive_dn;
694 int u_speed = 0;
695 int sitre;
696 u16 reg4042, reg4a;
697 u8 reg48, reg54, reg55;
698
699 pci_read_config_word(dev, maslave, &reg4042);
700 DPRINTK("reg4042 = 0x%04x\n", reg4042);
701 sitre = (reg4042 & 0x4000) ? 1 : 0;
702 pci_read_config_byte(dev, 0x48, &reg48);
703 pci_read_config_word(dev, 0x4a, &reg4a);
704 pci_read_config_byte(dev, 0x54, &reg54);
705 pci_read_config_byte(dev, 0x55, &reg55);
706
707 switch(speed) {
708 case XFER_UDMA_4:
709 case XFER_UDMA_2: u_speed = 2 << (drive_dn * 4); break;
710 case XFER_UDMA_6:
711 case XFER_UDMA_5:
712 case XFER_UDMA_3:
713 case XFER_UDMA_1: u_speed = 1 << (drive_dn * 4); break;
714 case XFER_UDMA_0: u_speed = 0 << (drive_dn * 4); break;
715 case XFER_MW_DMA_2:
716 case XFER_MW_DMA_1: break;
717 default:
718 BUG();
719 return;
720 }
721
722 if (speed >= XFER_UDMA_0) {
723 if (!(reg48 & u_flag))
724 pci_write_config_byte(dev, 0x48, reg48 | u_flag);
725 if (speed == XFER_UDMA_5) {
726 pci_write_config_byte(dev, 0x55, (u8) reg55|w_flag);
727 } else {
728 pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag);
729 }
730 if ((reg4a & a_speed) != u_speed)
731 pci_write_config_word(dev, 0x4a, (reg4a & ~a_speed) | u_speed);
732 if (speed > XFER_UDMA_2) {
733 if (!(reg54 & v_flag))
734 pci_write_config_byte(dev, 0x54, reg54 | v_flag);
735 } else
736 pci_write_config_byte(dev, 0x54, reg54 & ~v_flag);
737 } else {
738 if (reg48 & u_flag)
739 pci_write_config_byte(dev, 0x48, reg48 & ~u_flag);
740 if (reg4a & a_speed)
741 pci_write_config_word(dev, 0x4a, reg4a & ~a_speed);
742 if (reg54 & v_flag)
743 pci_write_config_byte(dev, 0x54, reg54 & ~v_flag);
744 if (reg55 & w_flag)
745 pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag);
746 }
747}
748
749#define AHCI_PCI_BAR 5
750#define AHCI_GLOBAL_CTL 0x04
751#define AHCI_ENABLE (1 << 31)
752static int piix_disable_ahci(struct pci_dev *pdev)
753{
754 void __iomem *mmio;
755 u32 tmp;
756 int rc = 0;
757
758 /* BUG: pci_enable_device has not yet been called. This
759 * works because this device is usually set up by BIOS.
760 */
761
762 if (!pci_resource_start(pdev, AHCI_PCI_BAR) ||
763 !pci_resource_len(pdev, AHCI_PCI_BAR))
764 return 0;
765
766 mmio = pci_iomap(pdev, AHCI_PCI_BAR, 64);
767 if (!mmio)
768 return -ENOMEM;
769
770 tmp = readl(mmio + AHCI_GLOBAL_CTL);
771 if (tmp & AHCI_ENABLE) {
772 tmp &= ~AHCI_ENABLE;
773 writel(tmp, mmio + AHCI_GLOBAL_CTL);
774
775 tmp = readl(mmio + AHCI_GLOBAL_CTL);
776 if (tmp & AHCI_ENABLE)
777 rc = -EIO;
778 }
779
780 pci_iounmap(pdev, mmio);
781 return rc;
782}
783
784/**
785 * piix_check_450nx_errata - Check for problem 450NX setup
786 * @ata_dev: the PCI device to check
787 *
788 * Check for the present of 450NX errata #19 and errata #25. If
789 * they are found return an error code so we can turn off DMA
790 */
791
792static int __devinit piix_check_450nx_errata(struct pci_dev *ata_dev)
793{
794 struct pci_dev *pdev = NULL;
795 u16 cfg;
796 u8 rev;
797 int no_piix_dma = 0;
798
799 while((pdev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, pdev)) != NULL)
800 {
801 /* Look for 450NX PXB. Check for problem configurations
802 A PCI quirk checks bit 6 already */
803 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
804 pci_read_config_word(pdev, 0x41, &cfg);
805 /* Only on the original revision: IDE DMA can hang */
806 if (rev == 0x00)
807 no_piix_dma = 1;
808 /* On all revisions below 5 PXB bus lock must be disabled for IDE */
809 else if (cfg & (1<<14) && rev < 5)
810 no_piix_dma = 2;
811 }
812 if (no_piix_dma)
813 dev_printk(KERN_WARNING, &ata_dev->dev, "450NX errata present, disabling IDE DMA.\n");
814 if (no_piix_dma == 2)
815 dev_printk(KERN_WARNING, &ata_dev->dev, "A BIOS update may resolve this.\n");
816 return no_piix_dma;
817}
818
819static void __devinit piix_init_pcs(struct pci_dev *pdev,
820 struct ata_port_info *pinfo,
821 const struct piix_map_db *map_db)
822{
823 u16 pcs, new_pcs;
824
825 pci_read_config_word(pdev, ICH5_PCS, &pcs);
826
827 new_pcs = pcs | map_db->port_enable;
828
829 if (new_pcs != pcs) {
830 DPRINTK("updating PCS from 0x%x to 0x%x\n", pcs, new_pcs);
831 pci_write_config_word(pdev, ICH5_PCS, new_pcs);
832 msleep(150);
833 }
834
835 if (force_pcs == 1) {
836 dev_printk(KERN_INFO, &pdev->dev,
837 "force ignoring PCS (0x%x)\n", new_pcs);
838 pinfo[0].flags |= PIIX_FLAG_IGNORE_PCS;
839 pinfo[1].flags |= PIIX_FLAG_IGNORE_PCS;
840 } else if (force_pcs == 2) {
841 dev_printk(KERN_INFO, &pdev->dev,
842 "force honoring PCS (0x%x)\n", new_pcs);
843 pinfo[0].flags &= ~PIIX_FLAG_IGNORE_PCS;
844 pinfo[1].flags &= ~PIIX_FLAG_IGNORE_PCS;
845 }
846}
847
848static void __devinit piix_init_sata_map(struct pci_dev *pdev,
849 struct ata_port_info *pinfo,
850 const struct piix_map_db *map_db)
851{
852 struct piix_host_priv *hpriv = pinfo[0].private_data;
853 const unsigned int *map;
854 int i, invalid_map = 0;
855 u8 map_value;
856
857 pci_read_config_byte(pdev, ICH5_PMR, &map_value);
858
859 map = map_db->map[map_value & map_db->mask];
860
861 dev_printk(KERN_INFO, &pdev->dev, "MAP [");
862 for (i = 0; i < 4; i++) {
863 switch (map[i]) {
864 case RV:
865 invalid_map = 1;
866 printk(" XX");
867 break;
868
869 case NA:
870 printk(" --");
871 break;
872
873 case IDE:
874 WARN_ON((i & 1) || map[i + 1] != IDE);
875 pinfo[i / 2] = piix_port_info[ich5_pata];
876 pinfo[i / 2].private_data = hpriv;
877 i++;
878 printk(" IDE IDE");
879 break;
880
881 default:
882 printk(" P%d", map[i]);
883 if (i & 1)
884 pinfo[i / 2].flags |= ATA_FLAG_SLAVE_POSS;
885 break;
886 }
887 }
888 printk(" ]\n");
889
890 if (invalid_map)
891 dev_printk(KERN_ERR, &pdev->dev,
892 "invalid MAP value %u\n", map_value);
893
894 hpriv->map = map;
895 hpriv->map_db = map_db;
896}
897
898/**
899 * piix_init_one - Register PIIX ATA PCI device with kernel services
900 * @pdev: PCI device to register
901 * @ent: Entry in piix_pci_tbl matching with @pdev
902 *
903 * Called from kernel PCI layer. We probe for combined mode (sigh),
904 * and then hand over control to libata, for it to do the rest.
905 *
906 * LOCKING:
907 * Inherited from PCI layer (may sleep).
908 *
909 * RETURNS:
910 * Zero on success, or -ERRNO value.
911 */
912
913static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
914{
915 static int printed_version;
916 struct ata_port_info port_info[2];
917 struct ata_port_info *ppinfo[2] = { &port_info[0], &port_info[1] };
918 struct piix_host_priv *hpriv;
919 unsigned long port_flags;
920
921 if (!printed_version++)
922 dev_printk(KERN_DEBUG, &pdev->dev,
923 "version " DRV_VERSION "\n");
924
925 /* no hotplugging support (FIXME) */
926 if (!in_module_init)
927 return -ENODEV;
928
929 hpriv = kzalloc(sizeof(*hpriv), GFP_KERNEL);
930 if (!hpriv)
931 return -ENOMEM;
932
933 port_info[0] = piix_port_info[ent->driver_data];
934 port_info[1] = piix_port_info[ent->driver_data];
935 port_info[0].private_data = hpriv;
936 port_info[1].private_data = hpriv;
937
938 port_flags = port_info[0].flags;
939
940 if (port_flags & PIIX_FLAG_AHCI) {
941 u8 tmp;
942 pci_read_config_byte(pdev, PIIX_SCC, &tmp);
943 if (tmp == PIIX_AHCI_DEVICE) {
944 int rc = piix_disable_ahci(pdev);
945 if (rc)
946 return rc;
947 }
948 }
949
950 /* Initialize SATA map */
951 if (port_flags & ATA_FLAG_SATA) {
952 piix_init_sata_map(pdev, port_info,
953 piix_map_db_table[ent->driver_data]);
954 piix_init_pcs(pdev, port_info,
955 piix_map_db_table[ent->driver_data]);
956 }
957
958 /* On ICH5, some BIOSen disable the interrupt using the
959 * PCI_COMMAND_INTX_DISABLE bit added in PCI 2.3.
960 * On ICH6, this bit has the same effect, but only when
961 * MSI is disabled (and it is disabled, as we don't use
962 * message-signalled interrupts currently).
963 */
964 if (port_flags & PIIX_FLAG_CHECKINTR)
965 pci_intx(pdev, 1);
966
967 if (piix_check_450nx_errata(pdev)) {
968 /* This writes into the master table but it does not
969 really matter for this errata as we will apply it to
970 all the PIIX devices on the board */
971 port_info[0].mwdma_mask = 0;
972 port_info[0].udma_mask = 0;
973 port_info[1].mwdma_mask = 0;
974 port_info[1].udma_mask = 0;
975 }
976 return ata_pci_init_one(pdev, ppinfo, 2);
977}
978
979static void piix_host_stop(struct ata_host *host)
980{
981 struct piix_host_priv *hpriv = host->private_data;
982
983 ata_host_stop(host);
984
985 kfree(hpriv);
986}
987
988static int __init piix_init(void)
989{
990 int rc;
991
992 DPRINTK("pci_register_driver\n");
993 rc = pci_register_driver(&piix_pci_driver);
994 if (rc)
995 return rc;
996
997 in_module_init = 0;
998
999 DPRINTK("done\n");
1000 return 0;
1001}
1002
1003static void __exit piix_exit(void)
1004{
1005 pci_unregister_driver(&piix_pci_driver);
1006}
1007
1008module_init(piix_init);
1009module_exit(piix_exit);
1010
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
new file mode 100644
index 000000000000..1c9315401f7a
--- /dev/null
+++ b/drivers/ata/libata-core.c
@@ -0,0 +1,6143 @@
1/*
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 */
34
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/list.h>
40#include <linux/mm.h>
41#include <linux/highmem.h>
42#include <linux/spinlock.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/timer.h>
46#include <linux/interrupt.h>
47#include <linux/completion.h>
48#include <linux/suspend.h>
49#include <linux/workqueue.h>
50#include <linux/jiffies.h>
51#include <linux/scatterlist.h>
52#include <scsi/scsi.h>
53#include <scsi/scsi_cmnd.h>
54#include <scsi/scsi_host.h>
55#include <linux/libata.h>
56#include <asm/io.h>
57#include <asm/semaphore.h>
58#include <asm/byteorder.h>
59
60#include "libata.h"
61
62/* debounce timing parameters in msecs { interval, duration, timeout } */
63const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
64const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
65const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
66
67static unsigned int ata_dev_init_params(struct ata_device *dev,
68 u16 heads, u16 sectors);
69static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
70static void ata_dev_xfermask(struct ata_device *dev);
71
72static unsigned int ata_unique_id = 1;
73static struct workqueue_struct *ata_wq;
74
75struct workqueue_struct *ata_aux_wq;
76
77int atapi_enabled = 1;
78module_param(atapi_enabled, int, 0444);
79MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
80
81int atapi_dmadir = 0;
82module_param(atapi_dmadir, int, 0444);
83MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
84
85int libata_fua = 0;
86module_param_named(fua, libata_fua, int, 0444);
87MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
88
89static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
90module_param(ata_probe_timeout, int, 0444);
91MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
92
93MODULE_AUTHOR("Jeff Garzik");
94MODULE_DESCRIPTION("Library module for ATA devices");
95MODULE_LICENSE("GPL");
96MODULE_VERSION(DRV_VERSION);
97
98
99/**
100 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
101 * @tf: Taskfile to convert
102 * @fis: Buffer into which data will output
103 * @pmp: Port multiplier port
104 *
105 * Converts a standard ATA taskfile to a Serial ATA
106 * FIS structure (Register - Host to Device).
107 *
108 * LOCKING:
109 * Inherited from caller.
110 */
111
112void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
113{
114 fis[0] = 0x27; /* Register - Host to Device FIS */
115 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
116 bit 7 indicates Command FIS */
117 fis[2] = tf->command;
118 fis[3] = tf->feature;
119
120 fis[4] = tf->lbal;
121 fis[5] = tf->lbam;
122 fis[6] = tf->lbah;
123 fis[7] = tf->device;
124
125 fis[8] = tf->hob_lbal;
126 fis[9] = tf->hob_lbam;
127 fis[10] = tf->hob_lbah;
128 fis[11] = tf->hob_feature;
129
130 fis[12] = tf->nsect;
131 fis[13] = tf->hob_nsect;
132 fis[14] = 0;
133 fis[15] = tf->ctl;
134
135 fis[16] = 0;
136 fis[17] = 0;
137 fis[18] = 0;
138 fis[19] = 0;
139}
140
141/**
142 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
143 * @fis: Buffer from which data will be input
144 * @tf: Taskfile to output
145 *
146 * Converts a serial ATA FIS structure to a standard ATA taskfile.
147 *
148 * LOCKING:
149 * Inherited from caller.
150 */
151
152void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
153{
154 tf->command = fis[2]; /* status */
155 tf->feature = fis[3]; /* error */
156
157 tf->lbal = fis[4];
158 tf->lbam = fis[5];
159 tf->lbah = fis[6];
160 tf->device = fis[7];
161
162 tf->hob_lbal = fis[8];
163 tf->hob_lbam = fis[9];
164 tf->hob_lbah = fis[10];
165
166 tf->nsect = fis[12];
167 tf->hob_nsect = fis[13];
168}
169
170static const u8 ata_rw_cmds[] = {
171 /* pio multi */
172 ATA_CMD_READ_MULTI,
173 ATA_CMD_WRITE_MULTI,
174 ATA_CMD_READ_MULTI_EXT,
175 ATA_CMD_WRITE_MULTI_EXT,
176 0,
177 0,
178 0,
179 ATA_CMD_WRITE_MULTI_FUA_EXT,
180 /* pio */
181 ATA_CMD_PIO_READ,
182 ATA_CMD_PIO_WRITE,
183 ATA_CMD_PIO_READ_EXT,
184 ATA_CMD_PIO_WRITE_EXT,
185 0,
186 0,
187 0,
188 0,
189 /* dma */
190 ATA_CMD_READ,
191 ATA_CMD_WRITE,
192 ATA_CMD_READ_EXT,
193 ATA_CMD_WRITE_EXT,
194 0,
195 0,
196 0,
197 ATA_CMD_WRITE_FUA_EXT
198};
199
200/**
201 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
202 * @qc: command to examine and configure
203 *
204 * Examine the device configuration and tf->flags to calculate
205 * the proper read/write commands and protocol to use.
206 *
207 * LOCKING:
208 * caller.
209 */
210int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
211{
212 struct ata_taskfile *tf = &qc->tf;
213 struct ata_device *dev = qc->dev;
214 u8 cmd;
215
216 int index, fua, lba48, write;
217
218 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
219 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
220 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
221
222 if (dev->flags & ATA_DFLAG_PIO) {
223 tf->protocol = ATA_PROT_PIO;
224 index = dev->multi_count ? 0 : 8;
225 } else if (lba48 && (qc->ap->flags & ATA_FLAG_PIO_LBA48)) {
226 /* Unable to use DMA due to host limitation */
227 tf->protocol = ATA_PROT_PIO;
228 index = dev->multi_count ? 0 : 8;
229 } else {
230 tf->protocol = ATA_PROT_DMA;
231 index = 16;
232 }
233
234 cmd = ata_rw_cmds[index + fua + lba48 + write];
235 if (cmd) {
236 tf->command = cmd;
237 return 0;
238 }
239 return -1;
240}
241
242/**
243 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
244 * @pio_mask: pio_mask
245 * @mwdma_mask: mwdma_mask
246 * @udma_mask: udma_mask
247 *
248 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
249 * unsigned int xfer_mask.
250 *
251 * LOCKING:
252 * None.
253 *
254 * RETURNS:
255 * Packed xfer_mask.
256 */
257static unsigned int ata_pack_xfermask(unsigned int pio_mask,
258 unsigned int mwdma_mask,
259 unsigned int udma_mask)
260{
261 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
262 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
263 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
264}
265
266/**
267 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
268 * @xfer_mask: xfer_mask to unpack
269 * @pio_mask: resulting pio_mask
270 * @mwdma_mask: resulting mwdma_mask
271 * @udma_mask: resulting udma_mask
272 *
273 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
274 * Any NULL distination masks will be ignored.
275 */
276static void ata_unpack_xfermask(unsigned int xfer_mask,
277 unsigned int *pio_mask,
278 unsigned int *mwdma_mask,
279 unsigned int *udma_mask)
280{
281 if (pio_mask)
282 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
283 if (mwdma_mask)
284 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
285 if (udma_mask)
286 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
287}
288
289static const struct ata_xfer_ent {
290 int shift, bits;
291 u8 base;
292} ata_xfer_tbl[] = {
293 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
294 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
295 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
296 { -1, },
297};
298
299/**
300 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
301 * @xfer_mask: xfer_mask of interest
302 *
303 * Return matching XFER_* value for @xfer_mask. Only the highest
304 * bit of @xfer_mask is considered.
305 *
306 * LOCKING:
307 * None.
308 *
309 * RETURNS:
310 * Matching XFER_* value, 0 if no match found.
311 */
312static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
313{
314 int highbit = fls(xfer_mask) - 1;
315 const struct ata_xfer_ent *ent;
316
317 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
318 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
319 return ent->base + highbit - ent->shift;
320 return 0;
321}
322
323/**
324 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
325 * @xfer_mode: XFER_* of interest
326 *
327 * Return matching xfer_mask for @xfer_mode.
328 *
329 * LOCKING:
330 * None.
331 *
332 * RETURNS:
333 * Matching xfer_mask, 0 if no match found.
334 */
335static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
336{
337 const struct ata_xfer_ent *ent;
338
339 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
340 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
341 return 1 << (ent->shift + xfer_mode - ent->base);
342 return 0;
343}
344
345/**
346 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
347 * @xfer_mode: XFER_* of interest
348 *
349 * Return matching xfer_shift for @xfer_mode.
350 *
351 * LOCKING:
352 * None.
353 *
354 * RETURNS:
355 * Matching xfer_shift, -1 if no match found.
356 */
357static int ata_xfer_mode2shift(unsigned int xfer_mode)
358{
359 const struct ata_xfer_ent *ent;
360
361 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
362 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
363 return ent->shift;
364 return -1;
365}
366
367/**
368 * ata_mode_string - convert xfer_mask to string
369 * @xfer_mask: mask of bits supported; only highest bit counts.
370 *
371 * Determine string which represents the highest speed
372 * (highest bit in @modemask).
373 *
374 * LOCKING:
375 * None.
376 *
377 * RETURNS:
378 * Constant C string representing highest speed listed in
379 * @mode_mask, or the constant C string "<n/a>".
380 */
381static const char *ata_mode_string(unsigned int xfer_mask)
382{
383 static const char * const xfer_mode_str[] = {
384 "PIO0",
385 "PIO1",
386 "PIO2",
387 "PIO3",
388 "PIO4",
389 "PIO5",
390 "PIO6",
391 "MWDMA0",
392 "MWDMA1",
393 "MWDMA2",
394 "MWDMA3",
395 "MWDMA4",
396 "UDMA/16",
397 "UDMA/25",
398 "UDMA/33",
399 "UDMA/44",
400 "UDMA/66",
401 "UDMA/100",
402 "UDMA/133",
403 "UDMA7",
404 };
405 int highbit;
406
407 highbit = fls(xfer_mask) - 1;
408 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
409 return xfer_mode_str[highbit];
410 return "<n/a>";
411}
412
413static const char *sata_spd_string(unsigned int spd)
414{
415 static const char * const spd_str[] = {
416 "1.5 Gbps",
417 "3.0 Gbps",
418 };
419
420 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
421 return "<unknown>";
422 return spd_str[spd - 1];
423}
424
425void ata_dev_disable(struct ata_device *dev)
426{
427 if (ata_dev_enabled(dev) && ata_msg_drv(dev->ap)) {
428 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
429 dev->class++;
430 }
431}
432
433/**
434 * ata_pio_devchk - PATA device presence detection
435 * @ap: ATA channel to examine
436 * @device: Device to examine (starting at zero)
437 *
438 * This technique was originally described in
439 * Hale Landis's ATADRVR (www.ata-atapi.com), and
440 * later found its way into the ATA/ATAPI spec.
441 *
442 * Write a pattern to the ATA shadow registers,
443 * and if a device is present, it will respond by
444 * correctly storing and echoing back the
445 * ATA shadow register contents.
446 *
447 * LOCKING:
448 * caller.
449 */
450
451static unsigned int ata_pio_devchk(struct ata_port *ap,
452 unsigned int device)
453{
454 struct ata_ioports *ioaddr = &ap->ioaddr;
455 u8 nsect, lbal;
456
457 ap->ops->dev_select(ap, device);
458
459 outb(0x55, ioaddr->nsect_addr);
460 outb(0xaa, ioaddr->lbal_addr);
461
462 outb(0xaa, ioaddr->nsect_addr);
463 outb(0x55, ioaddr->lbal_addr);
464
465 outb(0x55, ioaddr->nsect_addr);
466 outb(0xaa, ioaddr->lbal_addr);
467
468 nsect = inb(ioaddr->nsect_addr);
469 lbal = inb(ioaddr->lbal_addr);
470
471 if ((nsect == 0x55) && (lbal == 0xaa))
472 return 1; /* we found a device */
473
474 return 0; /* nothing found */
475}
476
477/**
478 * ata_mmio_devchk - PATA device presence detection
479 * @ap: ATA channel to examine
480 * @device: Device to examine (starting at zero)
481 *
482 * This technique was originally described in
483 * Hale Landis's ATADRVR (www.ata-atapi.com), and
484 * later found its way into the ATA/ATAPI spec.
485 *
486 * Write a pattern to the ATA shadow registers,
487 * and if a device is present, it will respond by
488 * correctly storing and echoing back the
489 * ATA shadow register contents.
490 *
491 * LOCKING:
492 * caller.
493 */
494
495static unsigned int ata_mmio_devchk(struct ata_port *ap,
496 unsigned int device)
497{
498 struct ata_ioports *ioaddr = &ap->ioaddr;
499 u8 nsect, lbal;
500
501 ap->ops->dev_select(ap, device);
502
503 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
504 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
505
506 writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
507 writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
508
509 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
510 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
511
512 nsect = readb((void __iomem *) ioaddr->nsect_addr);
513 lbal = readb((void __iomem *) ioaddr->lbal_addr);
514
515 if ((nsect == 0x55) && (lbal == 0xaa))
516 return 1; /* we found a device */
517
518 return 0; /* nothing found */
519}
520
521/**
522 * ata_devchk - PATA device presence detection
523 * @ap: ATA channel to examine
524 * @device: Device to examine (starting at zero)
525 *
526 * Dispatch ATA device presence detection, depending
527 * on whether we are using PIO or MMIO to talk to the
528 * ATA shadow registers.
529 *
530 * LOCKING:
531 * caller.
532 */
533
534static unsigned int ata_devchk(struct ata_port *ap,
535 unsigned int device)
536{
537 if (ap->flags & ATA_FLAG_MMIO)
538 return ata_mmio_devchk(ap, device);
539 return ata_pio_devchk(ap, device);
540}
541
542/**
543 * ata_dev_classify - determine device type based on ATA-spec signature
544 * @tf: ATA taskfile register set for device to be identified
545 *
546 * Determine from taskfile register contents whether a device is
547 * ATA or ATAPI, as per "Signature and persistence" section
548 * of ATA/PI spec (volume 1, sect 5.14).
549 *
550 * LOCKING:
551 * None.
552 *
553 * RETURNS:
554 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
555 * the event of failure.
556 */
557
558unsigned int ata_dev_classify(const struct ata_taskfile *tf)
559{
560 /* Apple's open source Darwin code hints that some devices only
561 * put a proper signature into the LBA mid/high registers,
562 * So, we only check those. It's sufficient for uniqueness.
563 */
564
565 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
566 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
567 DPRINTK("found ATA device by sig\n");
568 return ATA_DEV_ATA;
569 }
570
571 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
572 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
573 DPRINTK("found ATAPI device by sig\n");
574 return ATA_DEV_ATAPI;
575 }
576
577 DPRINTK("unknown device\n");
578 return ATA_DEV_UNKNOWN;
579}
580
581/**
582 * ata_dev_try_classify - Parse returned ATA device signature
583 * @ap: ATA channel to examine
584 * @device: Device to examine (starting at zero)
585 * @r_err: Value of error register on completion
586 *
587 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
588 * an ATA/ATAPI-defined set of values is placed in the ATA
589 * shadow registers, indicating the results of device detection
590 * and diagnostics.
591 *
592 * Select the ATA device, and read the values from the ATA shadow
593 * registers. Then parse according to the Error register value,
594 * and the spec-defined values examined by ata_dev_classify().
595 *
596 * LOCKING:
597 * caller.
598 *
599 * RETURNS:
600 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
601 */
602
603static unsigned int
604ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
605{
606 struct ata_taskfile tf;
607 unsigned int class;
608 u8 err;
609
610 ap->ops->dev_select(ap, device);
611
612 memset(&tf, 0, sizeof(tf));
613
614 ap->ops->tf_read(ap, &tf);
615 err = tf.feature;
616 if (r_err)
617 *r_err = err;
618
619 /* see if device passed diags */
620 if (err == 1)
621 /* do nothing */ ;
622 else if ((device == 0) && (err == 0x81))
623 /* do nothing */ ;
624 else
625 return ATA_DEV_NONE;
626
627 /* determine if device is ATA or ATAPI */
628 class = ata_dev_classify(&tf);
629
630 if (class == ATA_DEV_UNKNOWN)
631 return ATA_DEV_NONE;
632 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
633 return ATA_DEV_NONE;
634 return class;
635}
636
637/**
638 * ata_id_string - Convert IDENTIFY DEVICE page into string
639 * @id: IDENTIFY DEVICE results we will examine
640 * @s: string into which data is output
641 * @ofs: offset into identify device page
642 * @len: length of string to return. must be an even number.
643 *
644 * The strings in the IDENTIFY DEVICE page are broken up into
645 * 16-bit chunks. Run through the string, and output each
646 * 8-bit chunk linearly, regardless of platform.
647 *
648 * LOCKING:
649 * caller.
650 */
651
652void ata_id_string(const u16 *id, unsigned char *s,
653 unsigned int ofs, unsigned int len)
654{
655 unsigned int c;
656
657 while (len > 0) {
658 c = id[ofs] >> 8;
659 *s = c;
660 s++;
661
662 c = id[ofs] & 0xff;
663 *s = c;
664 s++;
665
666 ofs++;
667 len -= 2;
668 }
669}
670
671/**
672 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
673 * @id: IDENTIFY DEVICE results we will examine
674 * @s: string into which data is output
675 * @ofs: offset into identify device page
676 * @len: length of string to return. must be an odd number.
677 *
678 * This function is identical to ata_id_string except that it
679 * trims trailing spaces and terminates the resulting string with
680 * null. @len must be actual maximum length (even number) + 1.
681 *
682 * LOCKING:
683 * caller.
684 */
685void ata_id_c_string(const u16 *id, unsigned char *s,
686 unsigned int ofs, unsigned int len)
687{
688 unsigned char *p;
689
690 WARN_ON(!(len & 1));
691
692 ata_id_string(id, s, ofs, len - 1);
693
694 p = s + strnlen(s, len - 1);
695 while (p > s && p[-1] == ' ')
696 p--;
697 *p = '\0';
698}
699
700static u64 ata_id_n_sectors(const u16 *id)
701{
702 if (ata_id_has_lba(id)) {
703 if (ata_id_has_lba48(id))
704 return ata_id_u64(id, 100);
705 else
706 return ata_id_u32(id, 60);
707 } else {
708 if (ata_id_current_chs_valid(id))
709 return ata_id_u32(id, 57);
710 else
711 return id[1] * id[3] * id[6];
712 }
713}
714
715/**
716 * ata_noop_dev_select - Select device 0/1 on ATA bus
717 * @ap: ATA channel to manipulate
718 * @device: ATA device (numbered from zero) to select
719 *
720 * This function performs no actual function.
721 *
722 * May be used as the dev_select() entry in ata_port_operations.
723 *
724 * LOCKING:
725 * caller.
726 */
727void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
728{
729}
730
731
732/**
733 * ata_std_dev_select - Select device 0/1 on ATA bus
734 * @ap: ATA channel to manipulate
735 * @device: ATA device (numbered from zero) to select
736 *
737 * Use the method defined in the ATA specification to
738 * make either device 0, or device 1, active on the
739 * ATA channel. Works with both PIO and MMIO.
740 *
741 * May be used as the dev_select() entry in ata_port_operations.
742 *
743 * LOCKING:
744 * caller.
745 */
746
747void ata_std_dev_select (struct ata_port *ap, unsigned int device)
748{
749 u8 tmp;
750
751 if (device == 0)
752 tmp = ATA_DEVICE_OBS;
753 else
754 tmp = ATA_DEVICE_OBS | ATA_DEV1;
755
756 if (ap->flags & ATA_FLAG_MMIO) {
757 writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
758 } else {
759 outb(tmp, ap->ioaddr.device_addr);
760 }
761 ata_pause(ap); /* needed; also flushes, for mmio */
762}
763
764/**
765 * ata_dev_select - Select device 0/1 on ATA bus
766 * @ap: ATA channel to manipulate
767 * @device: ATA device (numbered from zero) to select
768 * @wait: non-zero to wait for Status register BSY bit to clear
769 * @can_sleep: non-zero if context allows sleeping
770 *
771 * Use the method defined in the ATA specification to
772 * make either device 0, or device 1, active on the
773 * ATA channel.
774 *
775 * This is a high-level version of ata_std_dev_select(),
776 * which additionally provides the services of inserting
777 * the proper pauses and status polling, where needed.
778 *
779 * LOCKING:
780 * caller.
781 */
782
783void ata_dev_select(struct ata_port *ap, unsigned int device,
784 unsigned int wait, unsigned int can_sleep)
785{
786 if (ata_msg_probe(ap))
787 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, ata%u: "
788 "device %u, wait %u\n", ap->id, device, wait);
789
790 if (wait)
791 ata_wait_idle(ap);
792
793 ap->ops->dev_select(ap, device);
794
795 if (wait) {
796 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
797 msleep(150);
798 ata_wait_idle(ap);
799 }
800}
801
802/**
803 * ata_dump_id - IDENTIFY DEVICE info debugging output
804 * @id: IDENTIFY DEVICE page to dump
805 *
806 * Dump selected 16-bit words from the given IDENTIFY DEVICE
807 * page.
808 *
809 * LOCKING:
810 * caller.
811 */
812
813static inline void ata_dump_id(const u16 *id)
814{
815 DPRINTK("49==0x%04x "
816 "53==0x%04x "
817 "63==0x%04x "
818 "64==0x%04x "
819 "75==0x%04x \n",
820 id[49],
821 id[53],
822 id[63],
823 id[64],
824 id[75]);
825 DPRINTK("80==0x%04x "
826 "81==0x%04x "
827 "82==0x%04x "
828 "83==0x%04x "
829 "84==0x%04x \n",
830 id[80],
831 id[81],
832 id[82],
833 id[83],
834 id[84]);
835 DPRINTK("88==0x%04x "
836 "93==0x%04x\n",
837 id[88],
838 id[93]);
839}
840
841/**
842 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
843 * @id: IDENTIFY data to compute xfer mask from
844 *
845 * Compute the xfermask for this device. This is not as trivial
846 * as it seems if we must consider early devices correctly.
847 *
848 * FIXME: pre IDE drive timing (do we care ?).
849 *
850 * LOCKING:
851 * None.
852 *
853 * RETURNS:
854 * Computed xfermask
855 */
856static unsigned int ata_id_xfermask(const u16 *id)
857{
858 unsigned int pio_mask, mwdma_mask, udma_mask;
859
860 /* Usual case. Word 53 indicates word 64 is valid */
861 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
862 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
863 pio_mask <<= 3;
864 pio_mask |= 0x7;
865 } else {
866 /* If word 64 isn't valid then Word 51 high byte holds
867 * the PIO timing number for the maximum. Turn it into
868 * a mask.
869 */
870 pio_mask = (2 << (id[ATA_ID_OLD_PIO_MODES] & 0xFF)) - 1 ;
871
872 /* But wait.. there's more. Design your standards by
873 * committee and you too can get a free iordy field to
874 * process. However its the speeds not the modes that
875 * are supported... Note drivers using the timing API
876 * will get this right anyway
877 */
878 }
879
880 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
881
882 if (ata_id_is_cfa(id)) {
883 /*
884 * Process compact flash extended modes
885 */
886 int pio = id[163] & 0x7;
887 int dma = (id[163] >> 3) & 7;
888
889 if (pio)
890 pio_mask |= (1 << 5);
891 if (pio > 1)
892 pio_mask |= (1 << 6);
893 if (dma)
894 mwdma_mask |= (1 << 3);
895 if (dma > 1)
896 mwdma_mask |= (1 << 4);
897 }
898
899 udma_mask = 0;
900 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
901 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
902
903 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
904}
905
906/**
907 * ata_port_queue_task - Queue port_task
908 * @ap: The ata_port to queue port_task for
909 * @fn: workqueue function to be scheduled
910 * @data: data value to pass to workqueue function
911 * @delay: delay time for workqueue function
912 *
913 * Schedule @fn(@data) for execution after @delay jiffies using
914 * port_task. There is one port_task per port and it's the
915 * user(low level driver)'s responsibility to make sure that only
916 * one task is active at any given time.
917 *
918 * libata core layer takes care of synchronization between
919 * port_task and EH. ata_port_queue_task() may be ignored for EH
920 * synchronization.
921 *
922 * LOCKING:
923 * Inherited from caller.
924 */
925void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
926 unsigned long delay)
927{
928 int rc;
929
930 if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK)
931 return;
932
933 PREPARE_WORK(&ap->port_task, fn, data);
934
935 if (!delay)
936 rc = queue_work(ata_wq, &ap->port_task);
937 else
938 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
939
940 /* rc == 0 means that another user is using port task */
941 WARN_ON(rc == 0);
942}
943
944/**
945 * ata_port_flush_task - Flush port_task
946 * @ap: The ata_port to flush port_task for
947 *
948 * After this function completes, port_task is guranteed not to
949 * be running or scheduled.
950 *
951 * LOCKING:
952 * Kernel thread context (may sleep)
953 */
954void ata_port_flush_task(struct ata_port *ap)
955{
956 unsigned long flags;
957
958 DPRINTK("ENTER\n");
959
960 spin_lock_irqsave(ap->lock, flags);
961 ap->pflags |= ATA_PFLAG_FLUSH_PORT_TASK;
962 spin_unlock_irqrestore(ap->lock, flags);
963
964 DPRINTK("flush #1\n");
965 flush_workqueue(ata_wq);
966
967 /*
968 * At this point, if a task is running, it's guaranteed to see
969 * the FLUSH flag; thus, it will never queue pio tasks again.
970 * Cancel and flush.
971 */
972 if (!cancel_delayed_work(&ap->port_task)) {
973 if (ata_msg_ctl(ap))
974 ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n",
975 __FUNCTION__);
976 flush_workqueue(ata_wq);
977 }
978
979 spin_lock_irqsave(ap->lock, flags);
980 ap->pflags &= ~ATA_PFLAG_FLUSH_PORT_TASK;
981 spin_unlock_irqrestore(ap->lock, flags);
982
983 if (ata_msg_ctl(ap))
984 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
985}
986
987void ata_qc_complete_internal(struct ata_queued_cmd *qc)
988{
989 struct completion *waiting = qc->private_data;
990
991 complete(waiting);
992}
993
994/**
995 * ata_exec_internal - execute libata internal command
996 * @dev: Device to which the command is sent
997 * @tf: Taskfile registers for the command and the result
998 * @cdb: CDB for packet command
999 * @dma_dir: Data tranfer direction of the command
1000 * @buf: Data buffer of the command
1001 * @buflen: Length of data buffer
1002 *
1003 * Executes libata internal command with timeout. @tf contains
1004 * command on entry and result on return. Timeout and error
1005 * conditions are reported via return value. No recovery action
1006 * is taken after a command times out. It's caller's duty to
1007 * clean up after timeout.
1008 *
1009 * LOCKING:
1010 * None. Should be called with kernel context, might sleep.
1011 *
1012 * RETURNS:
1013 * Zero on success, AC_ERR_* mask on failure
1014 */
1015unsigned ata_exec_internal(struct ata_device *dev,
1016 struct ata_taskfile *tf, const u8 *cdb,
1017 int dma_dir, void *buf, unsigned int buflen)
1018{
1019 struct ata_port *ap = dev->ap;
1020 u8 command = tf->command;
1021 struct ata_queued_cmd *qc;
1022 unsigned int tag, preempted_tag;
1023 u32 preempted_sactive, preempted_qc_active;
1024 DECLARE_COMPLETION_ONSTACK(wait);
1025 unsigned long flags;
1026 unsigned int err_mask;
1027 int rc;
1028
1029 spin_lock_irqsave(ap->lock, flags);
1030
1031 /* no internal command while frozen */
1032 if (ap->pflags & ATA_PFLAG_FROZEN) {
1033 spin_unlock_irqrestore(ap->lock, flags);
1034 return AC_ERR_SYSTEM;
1035 }
1036
1037 /* initialize internal qc */
1038
1039 /* XXX: Tag 0 is used for drivers with legacy EH as some
1040 * drivers choke if any other tag is given. This breaks
1041 * ata_tag_internal() test for those drivers. Don't use new
1042 * EH stuff without converting to it.
1043 */
1044 if (ap->ops->error_handler)
1045 tag = ATA_TAG_INTERNAL;
1046 else
1047 tag = 0;
1048
1049 if (test_and_set_bit(tag, &ap->qc_allocated))
1050 BUG();
1051 qc = __ata_qc_from_tag(ap, tag);
1052
1053 qc->tag = tag;
1054 qc->scsicmd = NULL;
1055 qc->ap = ap;
1056 qc->dev = dev;
1057 ata_qc_reinit(qc);
1058
1059 preempted_tag = ap->active_tag;
1060 preempted_sactive = ap->sactive;
1061 preempted_qc_active = ap->qc_active;
1062 ap->active_tag = ATA_TAG_POISON;
1063 ap->sactive = 0;
1064 ap->qc_active = 0;
1065
1066 /* prepare & issue qc */
1067 qc->tf = *tf;
1068 if (cdb)
1069 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1070 qc->flags |= ATA_QCFLAG_RESULT_TF;
1071 qc->dma_dir = dma_dir;
1072 if (dma_dir != DMA_NONE) {
1073 ata_sg_init_one(qc, buf, buflen);
1074 qc->nsect = buflen / ATA_SECT_SIZE;
1075 }
1076
1077 qc->private_data = &wait;
1078 qc->complete_fn = ata_qc_complete_internal;
1079
1080 ata_qc_issue(qc);
1081
1082 spin_unlock_irqrestore(ap->lock, flags);
1083
1084 rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
1085
1086 ata_port_flush_task(ap);
1087
1088 if (!rc) {
1089 spin_lock_irqsave(ap->lock, flags);
1090
1091 /* We're racing with irq here. If we lose, the
1092 * following test prevents us from completing the qc
1093 * twice. If we win, the port is frozen and will be
1094 * cleaned up by ->post_internal_cmd().
1095 */
1096 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1097 qc->err_mask |= AC_ERR_TIMEOUT;
1098
1099 if (ap->ops->error_handler)
1100 ata_port_freeze(ap);
1101 else
1102 ata_qc_complete(qc);
1103
1104 if (ata_msg_warn(ap))
1105 ata_dev_printk(dev, KERN_WARNING,
1106 "qc timeout (cmd 0x%x)\n", command);
1107 }
1108
1109 spin_unlock_irqrestore(ap->lock, flags);
1110 }
1111
1112 /* do post_internal_cmd */
1113 if (ap->ops->post_internal_cmd)
1114 ap->ops->post_internal_cmd(qc);
1115
1116 if (qc->flags & ATA_QCFLAG_FAILED && !qc->err_mask) {
1117 if (ata_msg_warn(ap))
1118 ata_dev_printk(dev, KERN_WARNING,
1119 "zero err_mask for failed "
1120 "internal command, assuming AC_ERR_OTHER\n");
1121 qc->err_mask |= AC_ERR_OTHER;
1122 }
1123
1124 /* finish up */
1125 spin_lock_irqsave(ap->lock, flags);
1126
1127 *tf = qc->result_tf;
1128 err_mask = qc->err_mask;
1129
1130 ata_qc_free(qc);
1131 ap->active_tag = preempted_tag;
1132 ap->sactive = preempted_sactive;
1133 ap->qc_active = preempted_qc_active;
1134
1135 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1136 * Until those drivers are fixed, we detect the condition
1137 * here, fail the command with AC_ERR_SYSTEM and reenable the
1138 * port.
1139 *
1140 * Note that this doesn't change any behavior as internal
1141 * command failure results in disabling the device in the
1142 * higher layer for LLDDs without new reset/EH callbacks.
1143 *
1144 * Kill the following code as soon as those drivers are fixed.
1145 */
1146 if (ap->flags & ATA_FLAG_DISABLED) {
1147 err_mask |= AC_ERR_SYSTEM;
1148 ata_port_probe(ap);
1149 }
1150
1151 spin_unlock_irqrestore(ap->lock, flags);
1152
1153 return err_mask;
1154}
1155
1156/**
1157 * ata_do_simple_cmd - execute simple internal command
1158 * @dev: Device to which the command is sent
1159 * @cmd: Opcode to execute
1160 *
1161 * Execute a 'simple' command, that only consists of the opcode
1162 * 'cmd' itself, without filling any other registers
1163 *
1164 * LOCKING:
1165 * Kernel thread context (may sleep).
1166 *
1167 * RETURNS:
1168 * Zero on success, AC_ERR_* mask on failure
1169 */
1170unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1171{
1172 struct ata_taskfile tf;
1173
1174 ata_tf_init(dev, &tf);
1175
1176 tf.command = cmd;
1177 tf.flags |= ATA_TFLAG_DEVICE;
1178 tf.protocol = ATA_PROT_NODATA;
1179
1180 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1181}
1182
1183/**
1184 * ata_pio_need_iordy - check if iordy needed
1185 * @adev: ATA device
1186 *
1187 * Check if the current speed of the device requires IORDY. Used
1188 * by various controllers for chip configuration.
1189 */
1190
1191unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1192{
1193 int pio;
1194 int speed = adev->pio_mode - XFER_PIO_0;
1195
1196 if (speed < 2)
1197 return 0;
1198 if (speed > 2)
1199 return 1;
1200
1201 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1202
1203 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1204 pio = adev->id[ATA_ID_EIDE_PIO];
1205 /* Is the speed faster than the drive allows non IORDY ? */
1206 if (pio) {
1207 /* This is cycle times not frequency - watch the logic! */
1208 if (pio > 240) /* PIO2 is 240nS per cycle */
1209 return 1;
1210 return 0;
1211 }
1212 }
1213 return 0;
1214}
1215
1216/**
1217 * ata_dev_read_id - Read ID data from the specified device
1218 * @dev: target device
1219 * @p_class: pointer to class of the target device (may be changed)
1220 * @post_reset: is this read ID post-reset?
1221 * @id: buffer to read IDENTIFY data into
1222 *
1223 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1224 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1225 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1226 * for pre-ATA4 drives.
1227 *
1228 * LOCKING:
1229 * Kernel thread context (may sleep)
1230 *
1231 * RETURNS:
1232 * 0 on success, -errno otherwise.
1233 */
1234int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1235 int post_reset, u16 *id)
1236{
1237 struct ata_port *ap = dev->ap;
1238 unsigned int class = *p_class;
1239 struct ata_taskfile tf;
1240 unsigned int err_mask = 0;
1241 const char *reason;
1242 int rc;
1243
1244 if (ata_msg_ctl(ap))
1245 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
1246 __FUNCTION__, ap->id, dev->devno);
1247
1248 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1249
1250 retry:
1251 ata_tf_init(dev, &tf);
1252
1253 switch (class) {
1254 case ATA_DEV_ATA:
1255 tf.command = ATA_CMD_ID_ATA;
1256 break;
1257 case ATA_DEV_ATAPI:
1258 tf.command = ATA_CMD_ID_ATAPI;
1259 break;
1260 default:
1261 rc = -ENODEV;
1262 reason = "unsupported class";
1263 goto err_out;
1264 }
1265
1266 tf.protocol = ATA_PROT_PIO;
1267
1268 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1269 id, sizeof(id[0]) * ATA_ID_WORDS);
1270 if (err_mask) {
1271 rc = -EIO;
1272 reason = "I/O error";
1273 goto err_out;
1274 }
1275
1276 swap_buf_le16(id, ATA_ID_WORDS);
1277
1278 /* sanity check */
1279 if ((class == ATA_DEV_ATA) != (ata_id_is_ata(id) | ata_id_is_cfa(id))) {
1280 rc = -EINVAL;
1281 reason = "device reports illegal type";
1282 goto err_out;
1283 }
1284
1285 if (post_reset && class == ATA_DEV_ATA) {
1286 /*
1287 * The exact sequence expected by certain pre-ATA4 drives is:
1288 * SRST RESET
1289 * IDENTIFY
1290 * INITIALIZE DEVICE PARAMETERS
1291 * anything else..
1292 * Some drives were very specific about that exact sequence.
1293 */
1294 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1295 err_mask = ata_dev_init_params(dev, id[3], id[6]);
1296 if (err_mask) {
1297 rc = -EIO;
1298 reason = "INIT_DEV_PARAMS failed";
1299 goto err_out;
1300 }
1301
1302 /* current CHS translation info (id[53-58]) might be
1303 * changed. reread the identify device info.
1304 */
1305 post_reset = 0;
1306 goto retry;
1307 }
1308 }
1309
1310 *p_class = class;
1311
1312 return 0;
1313
1314 err_out:
1315 if (ata_msg_warn(ap))
1316 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
1317 "(%s, err_mask=0x%x)\n", reason, err_mask);
1318 return rc;
1319}
1320
1321static inline u8 ata_dev_knobble(struct ata_device *dev)
1322{
1323 return ((dev->ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1324}
1325
1326static void ata_dev_config_ncq(struct ata_device *dev,
1327 char *desc, size_t desc_sz)
1328{
1329 struct ata_port *ap = dev->ap;
1330 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1331
1332 if (!ata_id_has_ncq(dev->id)) {
1333 desc[0] = '\0';
1334 return;
1335 }
1336
1337 if (ap->flags & ATA_FLAG_NCQ) {
1338 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
1339 dev->flags |= ATA_DFLAG_NCQ;
1340 }
1341
1342 if (hdepth >= ddepth)
1343 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1344 else
1345 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1346}
1347
1348static void ata_set_port_max_cmd_len(struct ata_port *ap)
1349{
1350 int i;
1351
1352 if (ap->scsi_host) {
1353 unsigned int len = 0;
1354
1355 for (i = 0; i < ATA_MAX_DEVICES; i++)
1356 len = max(len, ap->device[i].cdb_len);
1357
1358 ap->scsi_host->max_cmd_len = len;
1359 }
1360}
1361
1362/**
1363 * ata_dev_configure - Configure the specified ATA/ATAPI device
1364 * @dev: Target device to configure
1365 * @print_info: Enable device info printout
1366 *
1367 * Configure @dev according to @dev->id. Generic and low-level
1368 * driver specific fixups are also applied.
1369 *
1370 * LOCKING:
1371 * Kernel thread context (may sleep)
1372 *
1373 * RETURNS:
1374 * 0 on success, -errno otherwise
1375 */
1376int ata_dev_configure(struct ata_device *dev, int print_info)
1377{
1378 struct ata_port *ap = dev->ap;
1379 const u16 *id = dev->id;
1380 unsigned int xfer_mask;
1381 char revbuf[7]; /* XYZ-99\0 */
1382 int rc;
1383
1384 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
1385 ata_dev_printk(dev, KERN_INFO,
1386 "%s: ENTER/EXIT (host %u, dev %u) -- nodev\n",
1387 __FUNCTION__, ap->id, dev->devno);
1388 return 0;
1389 }
1390
1391 if (ata_msg_probe(ap))
1392 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
1393 __FUNCTION__, ap->id, dev->devno);
1394
1395 /* print device capabilities */
1396 if (ata_msg_probe(ap))
1397 ata_dev_printk(dev, KERN_DEBUG,
1398 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1399 "85:%04x 86:%04x 87:%04x 88:%04x\n",
1400 __FUNCTION__,
1401 id[49], id[82], id[83], id[84],
1402 id[85], id[86], id[87], id[88]);
1403
1404 /* initialize to-be-configured parameters */
1405 dev->flags &= ~ATA_DFLAG_CFG_MASK;
1406 dev->max_sectors = 0;
1407 dev->cdb_len = 0;
1408 dev->n_sectors = 0;
1409 dev->cylinders = 0;
1410 dev->heads = 0;
1411 dev->sectors = 0;
1412
1413 /*
1414 * common ATA, ATAPI feature tests
1415 */
1416
1417 /* find max transfer mode; for printk only */
1418 xfer_mask = ata_id_xfermask(id);
1419
1420 if (ata_msg_probe(ap))
1421 ata_dump_id(id);
1422
1423 /* ATA-specific feature tests */
1424 if (dev->class == ATA_DEV_ATA) {
1425 if (ata_id_is_cfa(id)) {
1426 if (id[162] & 1) /* CPRM may make this media unusable */
1427 ata_dev_printk(dev, KERN_WARNING, "ata%u: device %u supports DRM functions and may not be fully accessable.\n",
1428 ap->id, dev->devno);
1429 snprintf(revbuf, 7, "CFA");
1430 }
1431 else
1432 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
1433
1434 dev->n_sectors = ata_id_n_sectors(id);
1435
1436 if (ata_id_has_lba(id)) {
1437 const char *lba_desc;
1438 char ncq_desc[20];
1439
1440 lba_desc = "LBA";
1441 dev->flags |= ATA_DFLAG_LBA;
1442 if (ata_id_has_lba48(id)) {
1443 dev->flags |= ATA_DFLAG_LBA48;
1444 lba_desc = "LBA48";
1445 }
1446
1447 /* config NCQ */
1448 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1449
1450 /* print device info to dmesg */
1451 if (ata_msg_drv(ap) && print_info)
1452 ata_dev_printk(dev, KERN_INFO, "%s, "
1453 "max %s, %Lu sectors: %s %s\n",
1454 revbuf,
1455 ata_mode_string(xfer_mask),
1456 (unsigned long long)dev->n_sectors,
1457 lba_desc, ncq_desc);
1458 } else {
1459 /* CHS */
1460
1461 /* Default translation */
1462 dev->cylinders = id[1];
1463 dev->heads = id[3];
1464 dev->sectors = id[6];
1465
1466 if (ata_id_current_chs_valid(id)) {
1467 /* Current CHS translation is valid. */
1468 dev->cylinders = id[54];
1469 dev->heads = id[55];
1470 dev->sectors = id[56];
1471 }
1472
1473 /* print device info to dmesg */
1474 if (ata_msg_drv(ap) && print_info)
1475 ata_dev_printk(dev, KERN_INFO, "%s, "
1476 "max %s, %Lu sectors: CHS %u/%u/%u\n",
1477 revbuf,
1478 ata_mode_string(xfer_mask),
1479 (unsigned long long)dev->n_sectors,
1480 dev->cylinders, dev->heads,
1481 dev->sectors);
1482 }
1483
1484 if (dev->id[59] & 0x100) {
1485 dev->multi_count = dev->id[59] & 0xff;
1486 if (ata_msg_drv(ap) && print_info)
1487 ata_dev_printk(dev, KERN_INFO,
1488 "ata%u: dev %u multi count %u\n",
1489 ap->id, dev->devno, dev->multi_count);
1490 }
1491
1492 dev->cdb_len = 16;
1493 }
1494
1495 /* ATAPI-specific feature tests */
1496 else if (dev->class == ATA_DEV_ATAPI) {
1497 char *cdb_intr_string = "";
1498
1499 rc = atapi_cdb_len(id);
1500 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1501 if (ata_msg_warn(ap))
1502 ata_dev_printk(dev, KERN_WARNING,
1503 "unsupported CDB len\n");
1504 rc = -EINVAL;
1505 goto err_out_nosup;
1506 }
1507 dev->cdb_len = (unsigned int) rc;
1508
1509 if (ata_id_cdb_intr(dev->id)) {
1510 dev->flags |= ATA_DFLAG_CDB_INTR;
1511 cdb_intr_string = ", CDB intr";
1512 }
1513
1514 /* print device info to dmesg */
1515 if (ata_msg_drv(ap) && print_info)
1516 ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s%s\n",
1517 ata_mode_string(xfer_mask),
1518 cdb_intr_string);
1519 }
1520
1521 ata_set_port_max_cmd_len(ap);
1522
1523 /* limit bridge transfers to udma5, 200 sectors */
1524 if (ata_dev_knobble(dev)) {
1525 if (ata_msg_drv(ap) && print_info)
1526 ata_dev_printk(dev, KERN_INFO,
1527 "applying bridge limits\n");
1528 dev->udma_mask &= ATA_UDMA5;
1529 dev->max_sectors = ATA_MAX_SECTORS;
1530 }
1531
1532 if (ap->ops->dev_config)
1533 ap->ops->dev_config(ap, dev);
1534
1535 if (ata_msg_probe(ap))
1536 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
1537 __FUNCTION__, ata_chk_status(ap));
1538 return 0;
1539
1540err_out_nosup:
1541 if (ata_msg_probe(ap))
1542 ata_dev_printk(dev, KERN_DEBUG,
1543 "%s: EXIT, err\n", __FUNCTION__);
1544 return rc;
1545}
1546
1547/**
1548 * ata_bus_probe - Reset and probe ATA bus
1549 * @ap: Bus to probe
1550 *
1551 * Master ATA bus probing function. Initiates a hardware-dependent
1552 * bus reset, then attempts to identify any devices found on
1553 * the bus.
1554 *
1555 * LOCKING:
1556 * PCI/etc. bus probe sem.
1557 *
1558 * RETURNS:
1559 * Zero on success, negative errno otherwise.
1560 */
1561
1562int ata_bus_probe(struct ata_port *ap)
1563{
1564 unsigned int classes[ATA_MAX_DEVICES];
1565 int tries[ATA_MAX_DEVICES];
1566 int i, rc, down_xfermask;
1567 struct ata_device *dev;
1568
1569 ata_port_probe(ap);
1570
1571 for (i = 0; i < ATA_MAX_DEVICES; i++)
1572 tries[i] = ATA_PROBE_MAX_TRIES;
1573
1574 retry:
1575 down_xfermask = 0;
1576
1577 /* reset and determine device classes */
1578 ap->ops->phy_reset(ap);
1579
1580 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1581 dev = &ap->device[i];
1582
1583 if (!(ap->flags & ATA_FLAG_DISABLED) &&
1584 dev->class != ATA_DEV_UNKNOWN)
1585 classes[dev->devno] = dev->class;
1586 else
1587 classes[dev->devno] = ATA_DEV_NONE;
1588
1589 dev->class = ATA_DEV_UNKNOWN;
1590 }
1591
1592 ata_port_probe(ap);
1593
1594 /* after the reset the device state is PIO 0 and the controller
1595 state is undefined. Record the mode */
1596
1597 for (i = 0; i < ATA_MAX_DEVICES; i++)
1598 ap->device[i].pio_mode = XFER_PIO_0;
1599
1600 /* read IDENTIFY page and configure devices */
1601 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1602 dev = &ap->device[i];
1603
1604 if (tries[i])
1605 dev->class = classes[i];
1606
1607 if (!ata_dev_enabled(dev))
1608 continue;
1609
1610 rc = ata_dev_read_id(dev, &dev->class, 1, dev->id);
1611 if (rc)
1612 goto fail;
1613
1614 rc = ata_dev_configure(dev, 1);
1615 if (rc)
1616 goto fail;
1617 }
1618
1619 /* configure transfer mode */
1620 rc = ata_set_mode(ap, &dev);
1621 if (rc) {
1622 down_xfermask = 1;
1623 goto fail;
1624 }
1625
1626 for (i = 0; i < ATA_MAX_DEVICES; i++)
1627 if (ata_dev_enabled(&ap->device[i]))
1628 return 0;
1629
1630 /* no device present, disable port */
1631 ata_port_disable(ap);
1632 ap->ops->port_disable(ap);
1633 return -ENODEV;
1634
1635 fail:
1636 switch (rc) {
1637 case -EINVAL:
1638 case -ENODEV:
1639 tries[dev->devno] = 0;
1640 break;
1641 case -EIO:
1642 sata_down_spd_limit(ap);
1643 /* fall through */
1644 default:
1645 tries[dev->devno]--;
1646 if (down_xfermask &&
1647 ata_down_xfermask_limit(dev, tries[dev->devno] == 1))
1648 tries[dev->devno] = 0;
1649 }
1650
1651 if (!tries[dev->devno]) {
1652 ata_down_xfermask_limit(dev, 1);
1653 ata_dev_disable(dev);
1654 }
1655
1656 goto retry;
1657}
1658
1659/**
1660 * ata_port_probe - Mark port as enabled
1661 * @ap: Port for which we indicate enablement
1662 *
1663 * Modify @ap data structure such that the system
1664 * thinks that the entire port is enabled.
1665 *
1666 * LOCKING: host lock, or some other form of
1667 * serialization.
1668 */
1669
1670void ata_port_probe(struct ata_port *ap)
1671{
1672 ap->flags &= ~ATA_FLAG_DISABLED;
1673}
1674
1675/**
1676 * sata_print_link_status - Print SATA link status
1677 * @ap: SATA port to printk link status about
1678 *
1679 * This function prints link speed and status of a SATA link.
1680 *
1681 * LOCKING:
1682 * None.
1683 */
1684static void sata_print_link_status(struct ata_port *ap)
1685{
1686 u32 sstatus, scontrol, tmp;
1687
1688 if (sata_scr_read(ap, SCR_STATUS, &sstatus))
1689 return;
1690 sata_scr_read(ap, SCR_CONTROL, &scontrol);
1691
1692 if (ata_port_online(ap)) {
1693 tmp = (sstatus >> 4) & 0xf;
1694 ata_port_printk(ap, KERN_INFO,
1695 "SATA link up %s (SStatus %X SControl %X)\n",
1696 sata_spd_string(tmp), sstatus, scontrol);
1697 } else {
1698 ata_port_printk(ap, KERN_INFO,
1699 "SATA link down (SStatus %X SControl %X)\n",
1700 sstatus, scontrol);
1701 }
1702}
1703
1704/**
1705 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1706 * @ap: SATA port associated with target SATA PHY.
1707 *
1708 * This function issues commands to standard SATA Sxxx
1709 * PHY registers, to wake up the phy (and device), and
1710 * clear any reset condition.
1711 *
1712 * LOCKING:
1713 * PCI/etc. bus probe sem.
1714 *
1715 */
1716void __sata_phy_reset(struct ata_port *ap)
1717{
1718 u32 sstatus;
1719 unsigned long timeout = jiffies + (HZ * 5);
1720
1721 if (ap->flags & ATA_FLAG_SATA_RESET) {
1722 /* issue phy wake/reset */
1723 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
1724 /* Couldn't find anything in SATA I/II specs, but
1725 * AHCI-1.1 10.4.2 says at least 1 ms. */
1726 mdelay(1);
1727 }
1728 /* phy wake/clear reset */
1729 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1730
1731 /* wait for phy to become ready, if necessary */
1732 do {
1733 msleep(200);
1734 sata_scr_read(ap, SCR_STATUS, &sstatus);
1735 if ((sstatus & 0xf) != 1)
1736 break;
1737 } while (time_before(jiffies, timeout));
1738
1739 /* print link status */
1740 sata_print_link_status(ap);
1741
1742 /* TODO: phy layer with polling, timeouts, etc. */
1743 if (!ata_port_offline(ap))
1744 ata_port_probe(ap);
1745 else
1746 ata_port_disable(ap);
1747
1748 if (ap->flags & ATA_FLAG_DISABLED)
1749 return;
1750
1751 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1752 ata_port_disable(ap);
1753 return;
1754 }
1755
1756 ap->cbl = ATA_CBL_SATA;
1757}
1758
1759/**
1760 * sata_phy_reset - Reset SATA bus.
1761 * @ap: SATA port associated with target SATA PHY.
1762 *
1763 * This function resets the SATA bus, and then probes
1764 * the bus for devices.
1765 *
1766 * LOCKING:
1767 * PCI/etc. bus probe sem.
1768 *
1769 */
1770void sata_phy_reset(struct ata_port *ap)
1771{
1772 __sata_phy_reset(ap);
1773 if (ap->flags & ATA_FLAG_DISABLED)
1774 return;
1775 ata_bus_reset(ap);
1776}
1777
1778/**
1779 * ata_dev_pair - return other device on cable
1780 * @adev: device
1781 *
1782 * Obtain the other device on the same cable, or if none is
1783 * present NULL is returned
1784 */
1785
1786struct ata_device *ata_dev_pair(struct ata_device *adev)
1787{
1788 struct ata_port *ap = adev->ap;
1789 struct ata_device *pair = &ap->device[1 - adev->devno];
1790 if (!ata_dev_enabled(pair))
1791 return NULL;
1792 return pair;
1793}
1794
1795/**
1796 * ata_port_disable - Disable port.
1797 * @ap: Port to be disabled.
1798 *
1799 * Modify @ap data structure such that the system
1800 * thinks that the entire port is disabled, and should
1801 * never attempt to probe or communicate with devices
1802 * on this port.
1803 *
1804 * LOCKING: host lock, or some other form of
1805 * serialization.
1806 */
1807
1808void ata_port_disable(struct ata_port *ap)
1809{
1810 ap->device[0].class = ATA_DEV_NONE;
1811 ap->device[1].class = ATA_DEV_NONE;
1812 ap->flags |= ATA_FLAG_DISABLED;
1813}
1814
1815/**
1816 * sata_down_spd_limit - adjust SATA spd limit downward
1817 * @ap: Port to adjust SATA spd limit for
1818 *
1819 * Adjust SATA spd limit of @ap downward. Note that this
1820 * function only adjusts the limit. The change must be applied
1821 * using sata_set_spd().
1822 *
1823 * LOCKING:
1824 * Inherited from caller.
1825 *
1826 * RETURNS:
1827 * 0 on success, negative errno on failure
1828 */
1829int sata_down_spd_limit(struct ata_port *ap)
1830{
1831 u32 sstatus, spd, mask;
1832 int rc, highbit;
1833
1834 rc = sata_scr_read(ap, SCR_STATUS, &sstatus);
1835 if (rc)
1836 return rc;
1837
1838 mask = ap->sata_spd_limit;
1839 if (mask <= 1)
1840 return -EINVAL;
1841 highbit = fls(mask) - 1;
1842 mask &= ~(1 << highbit);
1843
1844 spd = (sstatus >> 4) & 0xf;
1845 if (spd <= 1)
1846 return -EINVAL;
1847 spd--;
1848 mask &= (1 << spd) - 1;
1849 if (!mask)
1850 return -EINVAL;
1851
1852 ap->sata_spd_limit = mask;
1853
1854 ata_port_printk(ap, KERN_WARNING, "limiting SATA link speed to %s\n",
1855 sata_spd_string(fls(mask)));
1856
1857 return 0;
1858}
1859
1860static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol)
1861{
1862 u32 spd, limit;
1863
1864 if (ap->sata_spd_limit == UINT_MAX)
1865 limit = 0;
1866 else
1867 limit = fls(ap->sata_spd_limit);
1868
1869 spd = (*scontrol >> 4) & 0xf;
1870 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
1871
1872 return spd != limit;
1873}
1874
1875/**
1876 * sata_set_spd_needed - is SATA spd configuration needed
1877 * @ap: Port in question
1878 *
1879 * Test whether the spd limit in SControl matches
1880 * @ap->sata_spd_limit. This function is used to determine
1881 * whether hardreset is necessary to apply SATA spd
1882 * configuration.
1883 *
1884 * LOCKING:
1885 * Inherited from caller.
1886 *
1887 * RETURNS:
1888 * 1 if SATA spd configuration is needed, 0 otherwise.
1889 */
1890int sata_set_spd_needed(struct ata_port *ap)
1891{
1892 u32 scontrol;
1893
1894 if (sata_scr_read(ap, SCR_CONTROL, &scontrol))
1895 return 0;
1896
1897 return __sata_set_spd_needed(ap, &scontrol);
1898}
1899
1900/**
1901 * sata_set_spd - set SATA spd according to spd limit
1902 * @ap: Port to set SATA spd for
1903 *
1904 * Set SATA spd of @ap according to sata_spd_limit.
1905 *
1906 * LOCKING:
1907 * Inherited from caller.
1908 *
1909 * RETURNS:
1910 * 0 if spd doesn't need to be changed, 1 if spd has been
1911 * changed. Negative errno if SCR registers are inaccessible.
1912 */
1913int sata_set_spd(struct ata_port *ap)
1914{
1915 u32 scontrol;
1916 int rc;
1917
1918 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
1919 return rc;
1920
1921 if (!__sata_set_spd_needed(ap, &scontrol))
1922 return 0;
1923
1924 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
1925 return rc;
1926
1927 return 1;
1928}
1929
1930/*
1931 * This mode timing computation functionality is ported over from
1932 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
1933 */
1934/*
1935 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
1936 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
1937 * for UDMA6, which is currently supported only by Maxtor drives.
1938 *
1939 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
1940 */
1941
1942static const struct ata_timing ata_timing[] = {
1943
1944 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
1945 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
1946 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
1947 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
1948
1949 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
1950 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
1951 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
1952 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
1953 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
1954
1955/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
1956
1957 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
1958 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
1959 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
1960
1961 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
1962 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
1963 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
1964
1965 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
1966 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
1967 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
1968 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
1969
1970 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
1971 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
1972 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
1973
1974/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
1975
1976 { 0xFF }
1977};
1978
1979#define ENOUGH(v,unit) (((v)-1)/(unit)+1)
1980#define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
1981
1982static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
1983{
1984 q->setup = EZ(t->setup * 1000, T);
1985 q->act8b = EZ(t->act8b * 1000, T);
1986 q->rec8b = EZ(t->rec8b * 1000, T);
1987 q->cyc8b = EZ(t->cyc8b * 1000, T);
1988 q->active = EZ(t->active * 1000, T);
1989 q->recover = EZ(t->recover * 1000, T);
1990 q->cycle = EZ(t->cycle * 1000, T);
1991 q->udma = EZ(t->udma * 1000, UT);
1992}
1993
1994void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
1995 struct ata_timing *m, unsigned int what)
1996{
1997 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
1998 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
1999 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2000 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2001 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2002 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2003 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2004 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2005}
2006
2007static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
2008{
2009 const struct ata_timing *t;
2010
2011 for (t = ata_timing; t->mode != speed; t++)
2012 if (t->mode == 0xFF)
2013 return NULL;
2014 return t;
2015}
2016
2017int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2018 struct ata_timing *t, int T, int UT)
2019{
2020 const struct ata_timing *s;
2021 struct ata_timing p;
2022
2023 /*
2024 * Find the mode.
2025 */
2026
2027 if (!(s = ata_timing_find_mode(speed)))
2028 return -EINVAL;
2029
2030 memcpy(t, s, sizeof(*s));
2031
2032 /*
2033 * If the drive is an EIDE drive, it can tell us it needs extended
2034 * PIO/MW_DMA cycle timing.
2035 */
2036
2037 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2038 memset(&p, 0, sizeof(p));
2039 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2040 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2041 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2042 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2043 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2044 }
2045 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2046 }
2047
2048 /*
2049 * Convert the timing to bus clock counts.
2050 */
2051
2052 ata_timing_quantize(t, t, T, UT);
2053
2054 /*
2055 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2056 * S.M.A.R.T * and some other commands. We have to ensure that the
2057 * DMA cycle timing is slower/equal than the fastest PIO timing.
2058 */
2059
2060 if (speed > XFER_PIO_4) {
2061 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2062 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2063 }
2064
2065 /*
2066 * Lengthen active & recovery time so that cycle time is correct.
2067 */
2068
2069 if (t->act8b + t->rec8b < t->cyc8b) {
2070 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2071 t->rec8b = t->cyc8b - t->act8b;
2072 }
2073
2074 if (t->active + t->recover < t->cycle) {
2075 t->active += (t->cycle - (t->active + t->recover)) / 2;
2076 t->recover = t->cycle - t->active;
2077 }
2078
2079 return 0;
2080}
2081
2082/**
2083 * ata_down_xfermask_limit - adjust dev xfer masks downward
2084 * @dev: Device to adjust xfer masks
2085 * @force_pio0: Force PIO0
2086 *
2087 * Adjust xfer masks of @dev downward. Note that this function
2088 * does not apply the change. Invoking ata_set_mode() afterwards
2089 * will apply the limit.
2090 *
2091 * LOCKING:
2092 * Inherited from caller.
2093 *
2094 * RETURNS:
2095 * 0 on success, negative errno on failure
2096 */
2097int ata_down_xfermask_limit(struct ata_device *dev, int force_pio0)
2098{
2099 unsigned long xfer_mask;
2100 int highbit;
2101
2102 xfer_mask = ata_pack_xfermask(dev->pio_mask, dev->mwdma_mask,
2103 dev->udma_mask);
2104
2105 if (!xfer_mask)
2106 goto fail;
2107 /* don't gear down to MWDMA from UDMA, go directly to PIO */
2108 if (xfer_mask & ATA_MASK_UDMA)
2109 xfer_mask &= ~ATA_MASK_MWDMA;
2110
2111 highbit = fls(xfer_mask) - 1;
2112 xfer_mask &= ~(1 << highbit);
2113 if (force_pio0)
2114 xfer_mask &= 1 << ATA_SHIFT_PIO;
2115 if (!xfer_mask)
2116 goto fail;
2117
2118 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2119 &dev->udma_mask);
2120
2121 ata_dev_printk(dev, KERN_WARNING, "limiting speed to %s\n",
2122 ata_mode_string(xfer_mask));
2123
2124 return 0;
2125
2126 fail:
2127 return -EINVAL;
2128}
2129
2130static int ata_dev_set_mode(struct ata_device *dev)
2131{
2132 unsigned int err_mask;
2133 int rc;
2134
2135 dev->flags &= ~ATA_DFLAG_PIO;
2136 if (dev->xfer_shift == ATA_SHIFT_PIO)
2137 dev->flags |= ATA_DFLAG_PIO;
2138
2139 err_mask = ata_dev_set_xfermode(dev);
2140 if (err_mask) {
2141 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2142 "(err_mask=0x%x)\n", err_mask);
2143 return -EIO;
2144 }
2145
2146 rc = ata_dev_revalidate(dev, 0);
2147 if (rc)
2148 return rc;
2149
2150 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2151 dev->xfer_shift, (int)dev->xfer_mode);
2152
2153 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2154 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
2155 return 0;
2156}
2157
2158/**
2159 * ata_set_mode - Program timings and issue SET FEATURES - XFER
2160 * @ap: port on which timings will be programmed
2161 * @r_failed_dev: out paramter for failed device
2162 *
2163 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2164 * ata_set_mode() fails, pointer to the failing device is
2165 * returned in @r_failed_dev.
2166 *
2167 * LOCKING:
2168 * PCI/etc. bus probe sem.
2169 *
2170 * RETURNS:
2171 * 0 on success, negative errno otherwise
2172 */
2173int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
2174{
2175 struct ata_device *dev;
2176 int i, rc = 0, used_dma = 0, found = 0;
2177
2178 /* has private set_mode? */
2179 if (ap->ops->set_mode) {
2180 /* FIXME: make ->set_mode handle no device case and
2181 * return error code and failing device on failure.
2182 */
2183 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2184 if (ata_dev_ready(&ap->device[i])) {
2185 ap->ops->set_mode(ap);
2186 break;
2187 }
2188 }
2189 return 0;
2190 }
2191
2192 /* step 1: calculate xfer_mask */
2193 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2194 unsigned int pio_mask, dma_mask;
2195
2196 dev = &ap->device[i];
2197
2198 if (!ata_dev_enabled(dev))
2199 continue;
2200
2201 ata_dev_xfermask(dev);
2202
2203 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2204 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2205 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2206 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
2207
2208 found = 1;
2209 if (dev->dma_mode)
2210 used_dma = 1;
2211 }
2212 if (!found)
2213 goto out;
2214
2215 /* step 2: always set host PIO timings */
2216 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2217 dev = &ap->device[i];
2218 if (!ata_dev_enabled(dev))
2219 continue;
2220
2221 if (!dev->pio_mode) {
2222 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
2223 rc = -EINVAL;
2224 goto out;
2225 }
2226
2227 dev->xfer_mode = dev->pio_mode;
2228 dev->xfer_shift = ATA_SHIFT_PIO;
2229 if (ap->ops->set_piomode)
2230 ap->ops->set_piomode(ap, dev);
2231 }
2232
2233 /* step 3: set host DMA timings */
2234 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2235 dev = &ap->device[i];
2236
2237 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2238 continue;
2239
2240 dev->xfer_mode = dev->dma_mode;
2241 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2242 if (ap->ops->set_dmamode)
2243 ap->ops->set_dmamode(ap, dev);
2244 }
2245
2246 /* step 4: update devices' xfer mode */
2247 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2248 dev = &ap->device[i];
2249
2250 /* don't udpate suspended devices' xfer mode */
2251 if (!ata_dev_ready(dev))
2252 continue;
2253
2254 rc = ata_dev_set_mode(dev);
2255 if (rc)
2256 goto out;
2257 }
2258
2259 /* Record simplex status. If we selected DMA then the other
2260 * host channels are not permitted to do so.
2261 */
2262 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
2263 ap->host->simplex_claimed = 1;
2264
2265 /* step5: chip specific finalisation */
2266 if (ap->ops->post_set_mode)
2267 ap->ops->post_set_mode(ap);
2268
2269 out:
2270 if (rc)
2271 *r_failed_dev = dev;
2272 return rc;
2273}
2274
2275/**
2276 * ata_tf_to_host - issue ATA taskfile to host controller
2277 * @ap: port to which command is being issued
2278 * @tf: ATA taskfile register set
2279 *
2280 * Issues ATA taskfile register set to ATA host controller,
2281 * with proper synchronization with interrupt handler and
2282 * other threads.
2283 *
2284 * LOCKING:
2285 * spin_lock_irqsave(host lock)
2286 */
2287
2288static inline void ata_tf_to_host(struct ata_port *ap,
2289 const struct ata_taskfile *tf)
2290{
2291 ap->ops->tf_load(ap, tf);
2292 ap->ops->exec_command(ap, tf);
2293}
2294
2295/**
2296 * ata_busy_sleep - sleep until BSY clears, or timeout
2297 * @ap: port containing status register to be polled
2298 * @tmout_pat: impatience timeout
2299 * @tmout: overall timeout
2300 *
2301 * Sleep until ATA Status register bit BSY clears,
2302 * or a timeout occurs.
2303 *
2304 * LOCKING: None.
2305 */
2306
2307unsigned int ata_busy_sleep (struct ata_port *ap,
2308 unsigned long tmout_pat, unsigned long tmout)
2309{
2310 unsigned long timer_start, timeout;
2311 u8 status;
2312
2313 status = ata_busy_wait(ap, ATA_BUSY, 300);
2314 timer_start = jiffies;
2315 timeout = timer_start + tmout_pat;
2316 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
2317 msleep(50);
2318 status = ata_busy_wait(ap, ATA_BUSY, 3);
2319 }
2320
2321 if (status & ATA_BUSY)
2322 ata_port_printk(ap, KERN_WARNING,
2323 "port is slow to respond, please be patient\n");
2324
2325 timeout = timer_start + tmout;
2326 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
2327 msleep(50);
2328 status = ata_chk_status(ap);
2329 }
2330
2331 if (status & ATA_BUSY) {
2332 ata_port_printk(ap, KERN_ERR, "port failed to respond "
2333 "(%lu secs)\n", tmout / HZ);
2334 return 1;
2335 }
2336
2337 return 0;
2338}
2339
2340static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
2341{
2342 struct ata_ioports *ioaddr = &ap->ioaddr;
2343 unsigned int dev0 = devmask & (1 << 0);
2344 unsigned int dev1 = devmask & (1 << 1);
2345 unsigned long timeout;
2346
2347 /* if device 0 was found in ata_devchk, wait for its
2348 * BSY bit to clear
2349 */
2350 if (dev0)
2351 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2352
2353 /* if device 1 was found in ata_devchk, wait for
2354 * register access, then wait for BSY to clear
2355 */
2356 timeout = jiffies + ATA_TMOUT_BOOT;
2357 while (dev1) {
2358 u8 nsect, lbal;
2359
2360 ap->ops->dev_select(ap, 1);
2361 if (ap->flags & ATA_FLAG_MMIO) {
2362 nsect = readb((void __iomem *) ioaddr->nsect_addr);
2363 lbal = readb((void __iomem *) ioaddr->lbal_addr);
2364 } else {
2365 nsect = inb(ioaddr->nsect_addr);
2366 lbal = inb(ioaddr->lbal_addr);
2367 }
2368 if ((nsect == 1) && (lbal == 1))
2369 break;
2370 if (time_after(jiffies, timeout)) {
2371 dev1 = 0;
2372 break;
2373 }
2374 msleep(50); /* give drive a breather */
2375 }
2376 if (dev1)
2377 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2378
2379 /* is all this really necessary? */
2380 ap->ops->dev_select(ap, 0);
2381 if (dev1)
2382 ap->ops->dev_select(ap, 1);
2383 if (dev0)
2384 ap->ops->dev_select(ap, 0);
2385}
2386
2387static unsigned int ata_bus_softreset(struct ata_port *ap,
2388 unsigned int devmask)
2389{
2390 struct ata_ioports *ioaddr = &ap->ioaddr;
2391
2392 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
2393
2394 /* software reset. causes dev0 to be selected */
2395 if (ap->flags & ATA_FLAG_MMIO) {
2396 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2397 udelay(20); /* FIXME: flush */
2398 writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
2399 udelay(20); /* FIXME: flush */
2400 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2401 } else {
2402 outb(ap->ctl, ioaddr->ctl_addr);
2403 udelay(10);
2404 outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2405 udelay(10);
2406 outb(ap->ctl, ioaddr->ctl_addr);
2407 }
2408
2409 /* spec mandates ">= 2ms" before checking status.
2410 * We wait 150ms, because that was the magic delay used for
2411 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
2412 * between when the ATA command register is written, and then
2413 * status is checked. Because waiting for "a while" before
2414 * checking status is fine, post SRST, we perform this magic
2415 * delay here as well.
2416 *
2417 * Old drivers/ide uses the 2mS rule and then waits for ready
2418 */
2419 msleep(150);
2420
2421 /* Before we perform post reset processing we want to see if
2422 * the bus shows 0xFF because the odd clown forgets the D7
2423 * pulldown resistor.
2424 */
2425 if (ata_check_status(ap) == 0xFF) {
2426 ata_port_printk(ap, KERN_ERR, "SRST failed (status 0xFF)\n");
2427 return AC_ERR_OTHER;
2428 }
2429
2430 ata_bus_post_reset(ap, devmask);
2431
2432 return 0;
2433}
2434
2435/**
2436 * ata_bus_reset - reset host port and associated ATA channel
2437 * @ap: port to reset
2438 *
2439 * This is typically the first time we actually start issuing
2440 * commands to the ATA channel. We wait for BSY to clear, then
2441 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2442 * result. Determine what devices, if any, are on the channel
2443 * by looking at the device 0/1 error register. Look at the signature
2444 * stored in each device's taskfile registers, to determine if
2445 * the device is ATA or ATAPI.
2446 *
2447 * LOCKING:
2448 * PCI/etc. bus probe sem.
2449 * Obtains host lock.
2450 *
2451 * SIDE EFFECTS:
2452 * Sets ATA_FLAG_DISABLED if bus reset fails.
2453 */
2454
2455void ata_bus_reset(struct ata_port *ap)
2456{
2457 struct ata_ioports *ioaddr = &ap->ioaddr;
2458 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2459 u8 err;
2460 unsigned int dev0, dev1 = 0, devmask = 0;
2461
2462 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
2463
2464 /* determine if device 0/1 are present */
2465 if (ap->flags & ATA_FLAG_SATA_RESET)
2466 dev0 = 1;
2467 else {
2468 dev0 = ata_devchk(ap, 0);
2469 if (slave_possible)
2470 dev1 = ata_devchk(ap, 1);
2471 }
2472
2473 if (dev0)
2474 devmask |= (1 << 0);
2475 if (dev1)
2476 devmask |= (1 << 1);
2477
2478 /* select device 0 again */
2479 ap->ops->dev_select(ap, 0);
2480
2481 /* issue bus reset */
2482 if (ap->flags & ATA_FLAG_SRST)
2483 if (ata_bus_softreset(ap, devmask))
2484 goto err_out;
2485
2486 /*
2487 * determine by signature whether we have ATA or ATAPI devices
2488 */
2489 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
2490 if ((slave_possible) && (err != 0x81))
2491 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
2492
2493 /* re-enable interrupts */
2494 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2495 ata_irq_on(ap);
2496
2497 /* is double-select really necessary? */
2498 if (ap->device[1].class != ATA_DEV_NONE)
2499 ap->ops->dev_select(ap, 1);
2500 if (ap->device[0].class != ATA_DEV_NONE)
2501 ap->ops->dev_select(ap, 0);
2502
2503 /* if no devices were detected, disable this port */
2504 if ((ap->device[0].class == ATA_DEV_NONE) &&
2505 (ap->device[1].class == ATA_DEV_NONE))
2506 goto err_out;
2507
2508 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2509 /* set up device control for ATA_FLAG_SATA_RESET */
2510 if (ap->flags & ATA_FLAG_MMIO)
2511 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2512 else
2513 outb(ap->ctl, ioaddr->ctl_addr);
2514 }
2515
2516 DPRINTK("EXIT\n");
2517 return;
2518
2519err_out:
2520 ata_port_printk(ap, KERN_ERR, "disabling port\n");
2521 ap->ops->port_disable(ap);
2522
2523 DPRINTK("EXIT\n");
2524}
2525
2526/**
2527 * sata_phy_debounce - debounce SATA phy status
2528 * @ap: ATA port to debounce SATA phy status for
2529 * @params: timing parameters { interval, duratinon, timeout } in msec
2530 *
2531 * Make sure SStatus of @ap reaches stable state, determined by
2532 * holding the same value where DET is not 1 for @duration polled
2533 * every @interval, before @timeout. Timeout constraints the
2534 * beginning of the stable state. Because, after hot unplugging,
2535 * DET gets stuck at 1 on some controllers, this functions waits
2536 * until timeout then returns 0 if DET is stable at 1.
2537 *
2538 * LOCKING:
2539 * Kernel thread context (may sleep)
2540 *
2541 * RETURNS:
2542 * 0 on success, -errno on failure.
2543 */
2544int sata_phy_debounce(struct ata_port *ap, const unsigned long *params)
2545{
2546 unsigned long interval_msec = params[0];
2547 unsigned long duration = params[1] * HZ / 1000;
2548 unsigned long timeout = jiffies + params[2] * HZ / 1000;
2549 unsigned long last_jiffies;
2550 u32 last, cur;
2551 int rc;
2552
2553 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2554 return rc;
2555 cur &= 0xf;
2556
2557 last = cur;
2558 last_jiffies = jiffies;
2559
2560 while (1) {
2561 msleep(interval_msec);
2562 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2563 return rc;
2564 cur &= 0xf;
2565
2566 /* DET stable? */
2567 if (cur == last) {
2568 if (cur == 1 && time_before(jiffies, timeout))
2569 continue;
2570 if (time_after(jiffies, last_jiffies + duration))
2571 return 0;
2572 continue;
2573 }
2574
2575 /* unstable, start over */
2576 last = cur;
2577 last_jiffies = jiffies;
2578
2579 /* check timeout */
2580 if (time_after(jiffies, timeout))
2581 return -EBUSY;
2582 }
2583}
2584
2585/**
2586 * sata_phy_resume - resume SATA phy
2587 * @ap: ATA port to resume SATA phy for
2588 * @params: timing parameters { interval, duratinon, timeout } in msec
2589 *
2590 * Resume SATA phy of @ap and debounce it.
2591 *
2592 * LOCKING:
2593 * Kernel thread context (may sleep)
2594 *
2595 * RETURNS:
2596 * 0 on success, -errno on failure.
2597 */
2598int sata_phy_resume(struct ata_port *ap, const unsigned long *params)
2599{
2600 u32 scontrol;
2601 int rc;
2602
2603 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2604 return rc;
2605
2606 scontrol = (scontrol & 0x0f0) | 0x300;
2607
2608 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2609 return rc;
2610
2611 /* Some PHYs react badly if SStatus is pounded immediately
2612 * after resuming. Delay 200ms before debouncing.
2613 */
2614 msleep(200);
2615
2616 return sata_phy_debounce(ap, params);
2617}
2618
2619static void ata_wait_spinup(struct ata_port *ap)
2620{
2621 struct ata_eh_context *ehc = &ap->eh_context;
2622 unsigned long end, secs;
2623 int rc;
2624
2625 /* first, debounce phy if SATA */
2626 if (ap->cbl == ATA_CBL_SATA) {
2627 rc = sata_phy_debounce(ap, sata_deb_timing_hotplug);
2628
2629 /* if debounced successfully and offline, no need to wait */
2630 if ((rc == 0 || rc == -EOPNOTSUPP) && ata_port_offline(ap))
2631 return;
2632 }
2633
2634 /* okay, let's give the drive time to spin up */
2635 end = ehc->i.hotplug_timestamp + ATA_SPINUP_WAIT * HZ / 1000;
2636 secs = ((end - jiffies) + HZ - 1) / HZ;
2637
2638 if (time_after(jiffies, end))
2639 return;
2640
2641 if (secs > 5)
2642 ata_port_printk(ap, KERN_INFO, "waiting for device to spin up "
2643 "(%lu secs)\n", secs);
2644
2645 schedule_timeout_uninterruptible(end - jiffies);
2646}
2647
2648/**
2649 * ata_std_prereset - prepare for reset
2650 * @ap: ATA port to be reset
2651 *
2652 * @ap is about to be reset. Initialize it.
2653 *
2654 * LOCKING:
2655 * Kernel thread context (may sleep)
2656 *
2657 * RETURNS:
2658 * 0 on success, -errno otherwise.
2659 */
2660int ata_std_prereset(struct ata_port *ap)
2661{
2662 struct ata_eh_context *ehc = &ap->eh_context;
2663 const unsigned long *timing = sata_ehc_deb_timing(ehc);
2664 int rc;
2665
2666 /* handle link resume & hotplug spinup */
2667 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
2668 (ap->flags & ATA_FLAG_HRST_TO_RESUME))
2669 ehc->i.action |= ATA_EH_HARDRESET;
2670
2671 if ((ehc->i.flags & ATA_EHI_HOTPLUGGED) &&
2672 (ap->flags & ATA_FLAG_SKIP_D2H_BSY))
2673 ata_wait_spinup(ap);
2674
2675 /* if we're about to do hardreset, nothing more to do */
2676 if (ehc->i.action & ATA_EH_HARDRESET)
2677 return 0;
2678
2679 /* if SATA, resume phy */
2680 if (ap->cbl == ATA_CBL_SATA) {
2681 rc = sata_phy_resume(ap, timing);
2682 if (rc && rc != -EOPNOTSUPP) {
2683 /* phy resume failed */
2684 ata_port_printk(ap, KERN_WARNING, "failed to resume "
2685 "link for reset (errno=%d)\n", rc);
2686 return rc;
2687 }
2688 }
2689
2690 /* Wait for !BSY if the controller can wait for the first D2H
2691 * Reg FIS and we don't know that no device is attached.
2692 */
2693 if (!(ap->flags & ATA_FLAG_SKIP_D2H_BSY) && !ata_port_offline(ap))
2694 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2695
2696 return 0;
2697}
2698
2699/**
2700 * ata_std_softreset - reset host port via ATA SRST
2701 * @ap: port to reset
2702 * @classes: resulting classes of attached devices
2703 *
2704 * Reset host port using ATA SRST.
2705 *
2706 * LOCKING:
2707 * Kernel thread context (may sleep)
2708 *
2709 * RETURNS:
2710 * 0 on success, -errno otherwise.
2711 */
2712int ata_std_softreset(struct ata_port *ap, unsigned int *classes)
2713{
2714 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2715 unsigned int devmask = 0, err_mask;
2716 u8 err;
2717
2718 DPRINTK("ENTER\n");
2719
2720 if (ata_port_offline(ap)) {
2721 classes[0] = ATA_DEV_NONE;
2722 goto out;
2723 }
2724
2725 /* determine if device 0/1 are present */
2726 if (ata_devchk(ap, 0))
2727 devmask |= (1 << 0);
2728 if (slave_possible && ata_devchk(ap, 1))
2729 devmask |= (1 << 1);
2730
2731 /* select device 0 again */
2732 ap->ops->dev_select(ap, 0);
2733
2734 /* issue bus reset */
2735 DPRINTK("about to softreset, devmask=%x\n", devmask);
2736 err_mask = ata_bus_softreset(ap, devmask);
2737 if (err_mask) {
2738 ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n",
2739 err_mask);
2740 return -EIO;
2741 }
2742
2743 /* determine by signature whether we have ATA or ATAPI devices */
2744 classes[0] = ata_dev_try_classify(ap, 0, &err);
2745 if (slave_possible && err != 0x81)
2746 classes[1] = ata_dev_try_classify(ap, 1, &err);
2747
2748 out:
2749 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2750 return 0;
2751}
2752
2753/**
2754 * sata_std_hardreset - reset host port via SATA phy reset
2755 * @ap: port to reset
2756 * @class: resulting class of attached device
2757 *
2758 * SATA phy-reset host port using DET bits of SControl register.
2759 *
2760 * LOCKING:
2761 * Kernel thread context (may sleep)
2762 *
2763 * RETURNS:
2764 * 0 on success, -errno otherwise.
2765 */
2766int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
2767{
2768 struct ata_eh_context *ehc = &ap->eh_context;
2769 const unsigned long *timing = sata_ehc_deb_timing(ehc);
2770 u32 scontrol;
2771 int rc;
2772
2773 DPRINTK("ENTER\n");
2774
2775 if (sata_set_spd_needed(ap)) {
2776 /* SATA spec says nothing about how to reconfigure
2777 * spd. To be on the safe side, turn off phy during
2778 * reconfiguration. This works for at least ICH7 AHCI
2779 * and Sil3124.
2780 */
2781 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2782 return rc;
2783
2784 scontrol = (scontrol & 0x0f0) | 0x304;
2785
2786 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2787 return rc;
2788
2789 sata_set_spd(ap);
2790 }
2791
2792 /* issue phy wake/reset */
2793 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2794 return rc;
2795
2796 scontrol = (scontrol & 0x0f0) | 0x301;
2797
2798 if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol)))
2799 return rc;
2800
2801 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
2802 * 10.4.2 says at least 1 ms.
2803 */
2804 msleep(1);
2805
2806 /* bring phy back */
2807 sata_phy_resume(ap, timing);
2808
2809 /* TODO: phy layer with polling, timeouts, etc. */
2810 if (ata_port_offline(ap)) {
2811 *class = ATA_DEV_NONE;
2812 DPRINTK("EXIT, link offline\n");
2813 return 0;
2814 }
2815
2816 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2817 ata_port_printk(ap, KERN_ERR,
2818 "COMRESET failed (device not ready)\n");
2819 return -EIO;
2820 }
2821
2822 ap->ops->dev_select(ap, 0); /* probably unnecessary */
2823
2824 *class = ata_dev_try_classify(ap, 0, NULL);
2825
2826 DPRINTK("EXIT, class=%u\n", *class);
2827 return 0;
2828}
2829
2830/**
2831 * ata_std_postreset - standard postreset callback
2832 * @ap: the target ata_port
2833 * @classes: classes of attached devices
2834 *
2835 * This function is invoked after a successful reset. Note that
2836 * the device might have been reset more than once using
2837 * different reset methods before postreset is invoked.
2838 *
2839 * LOCKING:
2840 * Kernel thread context (may sleep)
2841 */
2842void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
2843{
2844 u32 serror;
2845
2846 DPRINTK("ENTER\n");
2847
2848 /* print link status */
2849 sata_print_link_status(ap);
2850
2851 /* clear SError */
2852 if (sata_scr_read(ap, SCR_ERROR, &serror) == 0)
2853 sata_scr_write(ap, SCR_ERROR, serror);
2854
2855 /* re-enable interrupts */
2856 if (!ap->ops->error_handler) {
2857 /* FIXME: hack. create a hook instead */
2858 if (ap->ioaddr.ctl_addr)
2859 ata_irq_on(ap);
2860 }
2861
2862 /* is double-select really necessary? */
2863 if (classes[0] != ATA_DEV_NONE)
2864 ap->ops->dev_select(ap, 1);
2865 if (classes[1] != ATA_DEV_NONE)
2866 ap->ops->dev_select(ap, 0);
2867
2868 /* bail out if no device is present */
2869 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2870 DPRINTK("EXIT, no device\n");
2871 return;
2872 }
2873
2874 /* set up device control */
2875 if (ap->ioaddr.ctl_addr) {
2876 if (ap->flags & ATA_FLAG_MMIO)
2877 writeb(ap->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
2878 else
2879 outb(ap->ctl, ap->ioaddr.ctl_addr);
2880 }
2881
2882 DPRINTK("EXIT\n");
2883}
2884
2885/**
2886 * ata_dev_same_device - Determine whether new ID matches configured device
2887 * @dev: device to compare against
2888 * @new_class: class of the new device
2889 * @new_id: IDENTIFY page of the new device
2890 *
2891 * Compare @new_class and @new_id against @dev and determine
2892 * whether @dev is the device indicated by @new_class and
2893 * @new_id.
2894 *
2895 * LOCKING:
2896 * None.
2897 *
2898 * RETURNS:
2899 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
2900 */
2901static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
2902 const u16 *new_id)
2903{
2904 const u16 *old_id = dev->id;
2905 unsigned char model[2][41], serial[2][21];
2906 u64 new_n_sectors;
2907
2908 if (dev->class != new_class) {
2909 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
2910 dev->class, new_class);
2911 return 0;
2912 }
2913
2914 ata_id_c_string(old_id, model[0], ATA_ID_PROD_OFS, sizeof(model[0]));
2915 ata_id_c_string(new_id, model[1], ATA_ID_PROD_OFS, sizeof(model[1]));
2916 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO_OFS, sizeof(serial[0]));
2917 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO_OFS, sizeof(serial[1]));
2918 new_n_sectors = ata_id_n_sectors(new_id);
2919
2920 if (strcmp(model[0], model[1])) {
2921 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
2922 "'%s' != '%s'\n", model[0], model[1]);
2923 return 0;
2924 }
2925
2926 if (strcmp(serial[0], serial[1])) {
2927 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
2928 "'%s' != '%s'\n", serial[0], serial[1]);
2929 return 0;
2930 }
2931
2932 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
2933 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
2934 "%llu != %llu\n",
2935 (unsigned long long)dev->n_sectors,
2936 (unsigned long long)new_n_sectors);
2937 return 0;
2938 }
2939
2940 return 1;
2941}
2942
2943/**
2944 * ata_dev_revalidate - Revalidate ATA device
2945 * @dev: device to revalidate
2946 * @post_reset: is this revalidation after reset?
2947 *
2948 * Re-read IDENTIFY page and make sure @dev is still attached to
2949 * the port.
2950 *
2951 * LOCKING:
2952 * Kernel thread context (may sleep)
2953 *
2954 * RETURNS:
2955 * 0 on success, negative errno otherwise
2956 */
2957int ata_dev_revalidate(struct ata_device *dev, int post_reset)
2958{
2959 unsigned int class = dev->class;
2960 u16 *id = (void *)dev->ap->sector_buf;
2961 int rc;
2962
2963 if (!ata_dev_enabled(dev)) {
2964 rc = -ENODEV;
2965 goto fail;
2966 }
2967
2968 /* read ID data */
2969 rc = ata_dev_read_id(dev, &class, post_reset, id);
2970 if (rc)
2971 goto fail;
2972
2973 /* is the device still there? */
2974 if (!ata_dev_same_device(dev, class, id)) {
2975 rc = -ENODEV;
2976 goto fail;
2977 }
2978
2979 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
2980
2981 /* configure device according to the new ID */
2982 rc = ata_dev_configure(dev, 0);
2983 if (rc == 0)
2984 return 0;
2985
2986 fail:
2987 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
2988 return rc;
2989}
2990
2991static const char * const ata_dma_blacklist [] = {
2992 "WDC AC11000H", NULL,
2993 "WDC AC22100H", NULL,
2994 "WDC AC32500H", NULL,
2995 "WDC AC33100H", NULL,
2996 "WDC AC31600H", NULL,
2997 "WDC AC32100H", "24.09P07",
2998 "WDC AC23200L", "21.10N21",
2999 "Compaq CRD-8241B", NULL,
3000 "CRD-8400B", NULL,
3001 "CRD-8480B", NULL,
3002 "CRD-8482B", NULL,
3003 "CRD-84", NULL,
3004 "SanDisk SDP3B", NULL,
3005 "SanDisk SDP3B-64", NULL,
3006 "SANYO CD-ROM CRD", NULL,
3007 "HITACHI CDR-8", NULL,
3008 "HITACHI CDR-8335", NULL,
3009 "HITACHI CDR-8435", NULL,
3010 "Toshiba CD-ROM XM-6202B", NULL,
3011 "TOSHIBA CD-ROM XM-1702BC", NULL,
3012 "CD-532E-A", NULL,
3013 "E-IDE CD-ROM CR-840", NULL,
3014 "CD-ROM Drive/F5A", NULL,
3015 "WPI CDD-820", NULL,
3016 "SAMSUNG CD-ROM SC-148C", NULL,
3017 "SAMSUNG CD-ROM SC", NULL,
3018 "SanDisk SDP3B-64", NULL,
3019 "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,
3020 "_NEC DV5800A", NULL,
3021 "SAMSUNG CD-ROM SN-124", "N001"
3022};
3023
3024static int ata_strim(char *s, size_t len)
3025{
3026 len = strnlen(s, len);
3027
3028 /* ATAPI specifies that empty space is blank-filled; remove blanks */
3029 while ((len > 0) && (s[len - 1] == ' ')) {
3030 len--;
3031 s[len] = 0;
3032 }
3033 return len;
3034}
3035
3036static int ata_dma_blacklisted(const struct ata_device *dev)
3037{
3038 unsigned char model_num[40];
3039 unsigned char model_rev[16];
3040 unsigned int nlen, rlen;
3041 int i;
3042
3043 /* We don't support polling DMA.
3044 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3045 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3046 */
3047 if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) &&
3048 (dev->flags & ATA_DFLAG_CDB_INTR))
3049 return 1;
3050
3051 ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
3052 sizeof(model_num));
3053 ata_id_string(dev->id, model_rev, ATA_ID_FW_REV_OFS,
3054 sizeof(model_rev));
3055 nlen = ata_strim(model_num, sizeof(model_num));
3056 rlen = ata_strim(model_rev, sizeof(model_rev));
3057
3058 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i += 2) {
3059 if (!strncmp(ata_dma_blacklist[i], model_num, nlen)) {
3060 if (ata_dma_blacklist[i+1] == NULL)
3061 return 1;
3062 if (!strncmp(ata_dma_blacklist[i], model_rev, rlen))
3063 return 1;
3064 }
3065 }
3066 return 0;
3067}
3068
3069/**
3070 * ata_dev_xfermask - Compute supported xfermask of the given device
3071 * @dev: Device to compute xfermask for
3072 *
3073 * Compute supported xfermask of @dev and store it in
3074 * dev->*_mask. This function is responsible for applying all
3075 * known limits including host controller limits, device
3076 * blacklist, etc...
3077 *
3078 * LOCKING:
3079 * None.
3080 */
3081static void ata_dev_xfermask(struct ata_device *dev)
3082{
3083 struct ata_port *ap = dev->ap;
3084 struct ata_host *host = ap->host;
3085 unsigned long xfer_mask;
3086
3087 /* controller modes available */
3088 xfer_mask = ata_pack_xfermask(ap->pio_mask,
3089 ap->mwdma_mask, ap->udma_mask);
3090
3091 /* Apply cable rule here. Don't apply it early because when
3092 * we handle hot plug the cable type can itself change.
3093 */
3094 if (ap->cbl == ATA_CBL_PATA40)
3095 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
3096
3097 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
3098 dev->mwdma_mask, dev->udma_mask);
3099 xfer_mask &= ata_id_xfermask(dev->id);
3100
3101 /*
3102 * CFA Advanced TrueIDE timings are not allowed on a shared
3103 * cable
3104 */
3105 if (ata_dev_pair(dev)) {
3106 /* No PIO5 or PIO6 */
3107 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
3108 /* No MWDMA3 or MWDMA 4 */
3109 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
3110 }
3111
3112 if (ata_dma_blacklisted(dev)) {
3113 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3114 ata_dev_printk(dev, KERN_WARNING,
3115 "device is on DMA blacklist, disabling DMA\n");
3116 }
3117
3118 if ((host->flags & ATA_HOST_SIMPLEX) && host->simplex_claimed) {
3119 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3120 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
3121 "other device, disabling DMA\n");
3122 }
3123
3124 if (ap->ops->mode_filter)
3125 xfer_mask = ap->ops->mode_filter(ap, dev, xfer_mask);
3126
3127 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
3128 &dev->mwdma_mask, &dev->udma_mask);
3129}
3130
3131/**
3132 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
3133 * @dev: Device to which command will be sent
3134 *
3135 * Issue SET FEATURES - XFER MODE command to device @dev
3136 * on port @ap.
3137 *
3138 * LOCKING:
3139 * PCI/etc. bus probe sem.
3140 *
3141 * RETURNS:
3142 * 0 on success, AC_ERR_* mask otherwise.
3143 */
3144
3145static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
3146{
3147 struct ata_taskfile tf;
3148 unsigned int err_mask;
3149
3150 /* set up set-features taskfile */
3151 DPRINTK("set features - xfer mode\n");
3152
3153 ata_tf_init(dev, &tf);
3154 tf.command = ATA_CMD_SET_FEATURES;
3155 tf.feature = SETFEATURES_XFER;
3156 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3157 tf.protocol = ATA_PROT_NODATA;
3158 tf.nsect = dev->xfer_mode;
3159
3160 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
3161
3162 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3163 return err_mask;
3164}
3165
3166/**
3167 * ata_dev_init_params - Issue INIT DEV PARAMS command
3168 * @dev: Device to which command will be sent
3169 * @heads: Number of heads (taskfile parameter)
3170 * @sectors: Number of sectors (taskfile parameter)
3171 *
3172 * LOCKING:
3173 * Kernel thread context (may sleep)
3174 *
3175 * RETURNS:
3176 * 0 on success, AC_ERR_* mask otherwise.
3177 */
3178static unsigned int ata_dev_init_params(struct ata_device *dev,
3179 u16 heads, u16 sectors)
3180{
3181 struct ata_taskfile tf;
3182 unsigned int err_mask;
3183
3184 /* Number of sectors per track 1-255. Number of heads 1-16 */
3185 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
3186 return AC_ERR_INVALID;
3187
3188 /* set up init dev params taskfile */
3189 DPRINTK("init dev params \n");
3190
3191 ata_tf_init(dev, &tf);
3192 tf.command = ATA_CMD_INIT_DEV_PARAMS;
3193 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3194 tf.protocol = ATA_PROT_NODATA;
3195 tf.nsect = sectors;
3196 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
3197
3198 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
3199
3200 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3201 return err_mask;
3202}
3203
3204/**
3205 * ata_sg_clean - Unmap DMA memory associated with command
3206 * @qc: Command containing DMA memory to be released
3207 *
3208 * Unmap all mapped DMA memory associated with this command.
3209 *
3210 * LOCKING:
3211 * spin_lock_irqsave(host lock)
3212 */
3213
3214static void ata_sg_clean(struct ata_queued_cmd *qc)
3215{
3216 struct ata_port *ap = qc->ap;
3217 struct scatterlist *sg = qc->__sg;
3218 int dir = qc->dma_dir;
3219 void *pad_buf = NULL;
3220
3221 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
3222 WARN_ON(sg == NULL);
3223
3224 if (qc->flags & ATA_QCFLAG_SINGLE)
3225 WARN_ON(qc->n_elem > 1);
3226
3227 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
3228
3229 /* if we padded the buffer out to 32-bit bound, and data
3230 * xfer direction is from-device, we must copy from the
3231 * pad buffer back into the supplied buffer
3232 */
3233 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
3234 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3235
3236 if (qc->flags & ATA_QCFLAG_SG) {
3237 if (qc->n_elem)
3238 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
3239 /* restore last sg */
3240 sg[qc->orig_n_elem - 1].length += qc->pad_len;
3241 if (pad_buf) {
3242 struct scatterlist *psg = &qc->pad_sgent;
3243 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3244 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
3245 kunmap_atomic(addr, KM_IRQ0);
3246 }
3247 } else {
3248 if (qc->n_elem)
3249 dma_unmap_single(ap->dev,
3250 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
3251 dir);
3252 /* restore sg */
3253 sg->length += qc->pad_len;
3254 if (pad_buf)
3255 memcpy(qc->buf_virt + sg->length - qc->pad_len,
3256 pad_buf, qc->pad_len);
3257 }
3258
3259 qc->flags &= ~ATA_QCFLAG_DMAMAP;
3260 qc->__sg = NULL;
3261}
3262
3263/**
3264 * ata_fill_sg - Fill PCI IDE PRD table
3265 * @qc: Metadata associated with taskfile to be transferred
3266 *
3267 * Fill PCI IDE PRD (scatter-gather) table with segments
3268 * associated with the current disk command.
3269 *
3270 * LOCKING:
3271 * spin_lock_irqsave(host lock)
3272 *
3273 */
3274static void ata_fill_sg(struct ata_queued_cmd *qc)
3275{
3276 struct ata_port *ap = qc->ap;
3277 struct scatterlist *sg;
3278 unsigned int idx;
3279
3280 WARN_ON(qc->__sg == NULL);
3281 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
3282
3283 idx = 0;
3284 ata_for_each_sg(sg, qc) {
3285 u32 addr, offset;
3286 u32 sg_len, len;
3287
3288 /* determine if physical DMA addr spans 64K boundary.
3289 * Note h/w doesn't support 64-bit, so we unconditionally
3290 * truncate dma_addr_t to u32.
3291 */
3292 addr = (u32) sg_dma_address(sg);
3293 sg_len = sg_dma_len(sg);
3294
3295 while (sg_len) {
3296 offset = addr & 0xffff;
3297 len = sg_len;
3298 if ((offset + sg_len) > 0x10000)
3299 len = 0x10000 - offset;
3300
3301 ap->prd[idx].addr = cpu_to_le32(addr);
3302 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
3303 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
3304
3305 idx++;
3306 sg_len -= len;
3307 addr += len;
3308 }
3309 }
3310
3311 if (idx)
3312 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
3313}
3314/**
3315 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
3316 * @qc: Metadata associated with taskfile to check
3317 *
3318 * Allow low-level driver to filter ATA PACKET commands, returning
3319 * a status indicating whether or not it is OK to use DMA for the
3320 * supplied PACKET command.
3321 *
3322 * LOCKING:
3323 * spin_lock_irqsave(host lock)
3324 *
3325 * RETURNS: 0 when ATAPI DMA can be used
3326 * nonzero otherwise
3327 */
3328int ata_check_atapi_dma(struct ata_queued_cmd *qc)
3329{
3330 struct ata_port *ap = qc->ap;
3331 int rc = 0; /* Assume ATAPI DMA is OK by default */
3332
3333 if (ap->ops->check_atapi_dma)
3334 rc = ap->ops->check_atapi_dma(qc);
3335
3336 return rc;
3337}
3338/**
3339 * ata_qc_prep - Prepare taskfile for submission
3340 * @qc: Metadata associated with taskfile to be prepared
3341 *
3342 * Prepare ATA taskfile for submission.
3343 *
3344 * LOCKING:
3345 * spin_lock_irqsave(host lock)
3346 */
3347void ata_qc_prep(struct ata_queued_cmd *qc)
3348{
3349 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
3350 return;
3351
3352 ata_fill_sg(qc);
3353}
3354
3355void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
3356
3357/**
3358 * ata_sg_init_one - Associate command with memory buffer
3359 * @qc: Command to be associated
3360 * @buf: Memory buffer
3361 * @buflen: Length of memory buffer, in bytes.
3362 *
3363 * Initialize the data-related elements of queued_cmd @qc
3364 * to point to a single memory buffer, @buf of byte length @buflen.
3365 *
3366 * LOCKING:
3367 * spin_lock_irqsave(host lock)
3368 */
3369
3370void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
3371{
3372 struct scatterlist *sg;
3373
3374 qc->flags |= ATA_QCFLAG_SINGLE;
3375
3376 memset(&qc->sgent, 0, sizeof(qc->sgent));
3377 qc->__sg = &qc->sgent;
3378 qc->n_elem = 1;
3379 qc->orig_n_elem = 1;
3380 qc->buf_virt = buf;
3381 qc->nbytes = buflen;
3382
3383 sg = qc->__sg;
3384 sg_init_one(sg, buf, buflen);
3385}
3386
3387/**
3388 * ata_sg_init - Associate command with scatter-gather table.
3389 * @qc: Command to be associated
3390 * @sg: Scatter-gather table.
3391 * @n_elem: Number of elements in s/g table.
3392 *
3393 * Initialize the data-related elements of queued_cmd @qc
3394 * to point to a scatter-gather table @sg, containing @n_elem
3395 * elements.
3396 *
3397 * LOCKING:
3398 * spin_lock_irqsave(host lock)
3399 */
3400
3401void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
3402 unsigned int n_elem)
3403{
3404 qc->flags |= ATA_QCFLAG_SG;
3405 qc->__sg = sg;
3406 qc->n_elem = n_elem;
3407 qc->orig_n_elem = n_elem;
3408}
3409
3410/**
3411 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
3412 * @qc: Command with memory buffer to be mapped.
3413 *
3414 * DMA-map the memory buffer associated with queued_cmd @qc.
3415 *
3416 * LOCKING:
3417 * spin_lock_irqsave(host lock)
3418 *
3419 * RETURNS:
3420 * Zero on success, negative on error.
3421 */
3422
3423static int ata_sg_setup_one(struct ata_queued_cmd *qc)
3424{
3425 struct ata_port *ap = qc->ap;
3426 int dir = qc->dma_dir;
3427 struct scatterlist *sg = qc->__sg;
3428 dma_addr_t dma_address;
3429 int trim_sg = 0;
3430
3431 /* we must lengthen transfers to end on a 32-bit boundary */
3432 qc->pad_len = sg->length & 3;
3433 if (qc->pad_len) {
3434 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3435 struct scatterlist *psg = &qc->pad_sgent;
3436
3437 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3438
3439 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3440
3441 if (qc->tf.flags & ATA_TFLAG_WRITE)
3442 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
3443 qc->pad_len);
3444
3445 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3446 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3447 /* trim sg */
3448 sg->length -= qc->pad_len;
3449 if (sg->length == 0)
3450 trim_sg = 1;
3451
3452 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
3453 sg->length, qc->pad_len);
3454 }
3455
3456 if (trim_sg) {
3457 qc->n_elem--;
3458 goto skip_map;
3459 }
3460
3461 dma_address = dma_map_single(ap->dev, qc->buf_virt,
3462 sg->length, dir);
3463 if (dma_mapping_error(dma_address)) {
3464 /* restore sg */
3465 sg->length += qc->pad_len;
3466 return -1;
3467 }
3468
3469 sg_dma_address(sg) = dma_address;
3470 sg_dma_len(sg) = sg->length;
3471
3472skip_map:
3473 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
3474 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3475
3476 return 0;
3477}
3478
3479/**
3480 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
3481 * @qc: Command with scatter-gather table to be mapped.
3482 *
3483 * DMA-map the scatter-gather table associated with queued_cmd @qc.
3484 *
3485 * LOCKING:
3486 * spin_lock_irqsave(host lock)
3487 *
3488 * RETURNS:
3489 * Zero on success, negative on error.
3490 *
3491 */
3492
3493static int ata_sg_setup(struct ata_queued_cmd *qc)
3494{
3495 struct ata_port *ap = qc->ap;
3496 struct scatterlist *sg = qc->__sg;
3497 struct scatterlist *lsg = &sg[qc->n_elem - 1];
3498 int n_elem, pre_n_elem, dir, trim_sg = 0;
3499
3500 VPRINTK("ENTER, ata%u\n", ap->id);
3501 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
3502
3503 /* we must lengthen transfers to end on a 32-bit boundary */
3504 qc->pad_len = lsg->length & 3;
3505 if (qc->pad_len) {
3506 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3507 struct scatterlist *psg = &qc->pad_sgent;
3508 unsigned int offset;
3509
3510 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3511
3512 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3513
3514 /*
3515 * psg->page/offset are used to copy to-be-written
3516 * data in this function or read data in ata_sg_clean.
3517 */
3518 offset = lsg->offset + lsg->length - qc->pad_len;
3519 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
3520 psg->offset = offset_in_page(offset);
3521
3522 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3523 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3524 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
3525 kunmap_atomic(addr, KM_IRQ0);
3526 }
3527
3528 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3529 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3530 /* trim last sg */
3531 lsg->length -= qc->pad_len;
3532 if (lsg->length == 0)
3533 trim_sg = 1;
3534
3535 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
3536 qc->n_elem - 1, lsg->length, qc->pad_len);
3537 }
3538
3539 pre_n_elem = qc->n_elem;
3540 if (trim_sg && pre_n_elem)
3541 pre_n_elem--;
3542
3543 if (!pre_n_elem) {
3544 n_elem = 0;
3545 goto skip_map;
3546 }
3547
3548 dir = qc->dma_dir;
3549 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
3550 if (n_elem < 1) {
3551 /* restore last sg */
3552 lsg->length += qc->pad_len;
3553 return -1;
3554 }
3555
3556 DPRINTK("%d sg elements mapped\n", n_elem);
3557
3558skip_map:
3559 qc->n_elem = n_elem;
3560
3561 return 0;
3562}
3563
3564/**
3565 * swap_buf_le16 - swap halves of 16-bit words in place
3566 * @buf: Buffer to swap
3567 * @buf_words: Number of 16-bit words in buffer.
3568 *
3569 * Swap halves of 16-bit words if needed to convert from
3570 * little-endian byte order to native cpu byte order, or
3571 * vice-versa.
3572 *
3573 * LOCKING:
3574 * Inherited from caller.
3575 */
3576void swap_buf_le16(u16 *buf, unsigned int buf_words)
3577{
3578#ifdef __BIG_ENDIAN
3579 unsigned int i;
3580
3581 for (i = 0; i < buf_words; i++)
3582 buf[i] = le16_to_cpu(buf[i]);
3583#endif /* __BIG_ENDIAN */
3584}
3585
3586/**
3587 * ata_mmio_data_xfer - Transfer data by MMIO
3588 * @adev: device for this I/O
3589 * @buf: data buffer
3590 * @buflen: buffer length
3591 * @write_data: read/write
3592 *
3593 * Transfer data from/to the device data register by MMIO.
3594 *
3595 * LOCKING:
3596 * Inherited from caller.
3597 */
3598
3599void ata_mmio_data_xfer(struct ata_device *adev, unsigned char *buf,
3600 unsigned int buflen, int write_data)
3601{
3602 struct ata_port *ap = adev->ap;
3603 unsigned int i;
3604 unsigned int words = buflen >> 1;
3605 u16 *buf16 = (u16 *) buf;
3606 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
3607
3608 /* Transfer multiple of 2 bytes */
3609 if (write_data) {
3610 for (i = 0; i < words; i++)
3611 writew(le16_to_cpu(buf16[i]), mmio);
3612 } else {
3613 for (i = 0; i < words; i++)
3614 buf16[i] = cpu_to_le16(readw(mmio));
3615 }
3616
3617 /* Transfer trailing 1 byte, if any. */
3618 if (unlikely(buflen & 0x01)) {
3619 u16 align_buf[1] = { 0 };
3620 unsigned char *trailing_buf = buf + buflen - 1;
3621
3622 if (write_data) {
3623 memcpy(align_buf, trailing_buf, 1);
3624 writew(le16_to_cpu(align_buf[0]), mmio);
3625 } else {
3626 align_buf[0] = cpu_to_le16(readw(mmio));
3627 memcpy(trailing_buf, align_buf, 1);
3628 }
3629 }
3630}
3631
3632/**
3633 * ata_pio_data_xfer - Transfer data by PIO
3634 * @adev: device to target
3635 * @buf: data buffer
3636 * @buflen: buffer length
3637 * @write_data: read/write
3638 *
3639 * Transfer data from/to the device data register by PIO.
3640 *
3641 * LOCKING:
3642 * Inherited from caller.
3643 */
3644
3645void ata_pio_data_xfer(struct ata_device *adev, unsigned char *buf,
3646 unsigned int buflen, int write_data)
3647{
3648 struct ata_port *ap = adev->ap;
3649 unsigned int words = buflen >> 1;
3650
3651 /* Transfer multiple of 2 bytes */
3652 if (write_data)
3653 outsw(ap->ioaddr.data_addr, buf, words);
3654 else
3655 insw(ap->ioaddr.data_addr, buf, words);
3656
3657 /* Transfer trailing 1 byte, if any. */
3658 if (unlikely(buflen & 0x01)) {
3659 u16 align_buf[1] = { 0 };
3660 unsigned char *trailing_buf = buf + buflen - 1;
3661
3662 if (write_data) {
3663 memcpy(align_buf, trailing_buf, 1);
3664 outw(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
3665 } else {
3666 align_buf[0] = cpu_to_le16(inw(ap->ioaddr.data_addr));
3667 memcpy(trailing_buf, align_buf, 1);
3668 }
3669 }
3670}
3671
3672/**
3673 * ata_pio_data_xfer_noirq - Transfer data by PIO
3674 * @adev: device to target
3675 * @buf: data buffer
3676 * @buflen: buffer length
3677 * @write_data: read/write
3678 *
3679 * Transfer data from/to the device data register by PIO. Do the
3680 * transfer with interrupts disabled.
3681 *
3682 * LOCKING:
3683 * Inherited from caller.
3684 */
3685
3686void ata_pio_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
3687 unsigned int buflen, int write_data)
3688{
3689 unsigned long flags;
3690 local_irq_save(flags);
3691 ata_pio_data_xfer(adev, buf, buflen, write_data);
3692 local_irq_restore(flags);
3693}
3694
3695
3696/**
3697 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
3698 * @qc: Command on going
3699 *
3700 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
3701 *
3702 * LOCKING:
3703 * Inherited from caller.
3704 */
3705
3706static void ata_pio_sector(struct ata_queued_cmd *qc)
3707{
3708 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3709 struct scatterlist *sg = qc->__sg;
3710 struct ata_port *ap = qc->ap;
3711 struct page *page;
3712 unsigned int offset;
3713 unsigned char *buf;
3714
3715 if (qc->cursect == (qc->nsect - 1))
3716 ap->hsm_task_state = HSM_ST_LAST;
3717
3718 page = sg[qc->cursg].page;
3719 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
3720
3721 /* get the current page and offset */
3722 page = nth_page(page, (offset >> PAGE_SHIFT));
3723 offset %= PAGE_SIZE;
3724
3725 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3726
3727 if (PageHighMem(page)) {
3728 unsigned long flags;
3729
3730 /* FIXME: use a bounce buffer */
3731 local_irq_save(flags);
3732 buf = kmap_atomic(page, KM_IRQ0);
3733
3734 /* do the actual data transfer */
3735 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
3736
3737 kunmap_atomic(buf, KM_IRQ0);
3738 local_irq_restore(flags);
3739 } else {
3740 buf = page_address(page);
3741 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
3742 }
3743
3744 qc->cursect++;
3745 qc->cursg_ofs++;
3746
3747 if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) {
3748 qc->cursg++;
3749 qc->cursg_ofs = 0;
3750 }
3751}
3752
3753/**
3754 * ata_pio_sectors - Transfer one or many 512-byte sectors.
3755 * @qc: Command on going
3756 *
3757 * Transfer one or many ATA_SECT_SIZE of data from/to the
3758 * ATA device for the DRQ request.
3759 *
3760 * LOCKING:
3761 * Inherited from caller.
3762 */
3763
3764static void ata_pio_sectors(struct ata_queued_cmd *qc)
3765{
3766 if (is_multi_taskfile(&qc->tf)) {
3767 /* READ/WRITE MULTIPLE */
3768 unsigned int nsect;
3769
3770 WARN_ON(qc->dev->multi_count == 0);
3771
3772 nsect = min(qc->nsect - qc->cursect, qc->dev->multi_count);
3773 while (nsect--)
3774 ata_pio_sector(qc);
3775 } else
3776 ata_pio_sector(qc);
3777}
3778
3779/**
3780 * atapi_send_cdb - Write CDB bytes to hardware
3781 * @ap: Port to which ATAPI device is attached.
3782 * @qc: Taskfile currently active
3783 *
3784 * When device has indicated its readiness to accept
3785 * a CDB, this function is called. Send the CDB.
3786 *
3787 * LOCKING:
3788 * caller.
3789 */
3790
3791static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
3792{
3793 /* send SCSI cdb */
3794 DPRINTK("send cdb\n");
3795 WARN_ON(qc->dev->cdb_len < 12);
3796
3797 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
3798 ata_altstatus(ap); /* flush */
3799
3800 switch (qc->tf.protocol) {
3801 case ATA_PROT_ATAPI:
3802 ap->hsm_task_state = HSM_ST;
3803 break;
3804 case ATA_PROT_ATAPI_NODATA:
3805 ap->hsm_task_state = HSM_ST_LAST;
3806 break;
3807 case ATA_PROT_ATAPI_DMA:
3808 ap->hsm_task_state = HSM_ST_LAST;
3809 /* initiate bmdma */
3810 ap->ops->bmdma_start(qc);
3811 break;
3812 }
3813}
3814
3815/**
3816 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
3817 * @qc: Command on going
3818 * @bytes: number of bytes
3819 *
3820 * Transfer Transfer data from/to the ATAPI device.
3821 *
3822 * LOCKING:
3823 * Inherited from caller.
3824 *
3825 */
3826
3827static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
3828{
3829 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3830 struct scatterlist *sg = qc->__sg;
3831 struct ata_port *ap = qc->ap;
3832 struct page *page;
3833 unsigned char *buf;
3834 unsigned int offset, count;
3835
3836 if (qc->curbytes + bytes >= qc->nbytes)
3837 ap->hsm_task_state = HSM_ST_LAST;
3838
3839next_sg:
3840 if (unlikely(qc->cursg >= qc->n_elem)) {
3841 /*
3842 * The end of qc->sg is reached and the device expects
3843 * more data to transfer. In order not to overrun qc->sg
3844 * and fulfill length specified in the byte count register,
3845 * - for read case, discard trailing data from the device
3846 * - for write case, padding zero data to the device
3847 */
3848 u16 pad_buf[1] = { 0 };
3849 unsigned int words = bytes >> 1;
3850 unsigned int i;
3851
3852 if (words) /* warning if bytes > 1 */
3853 ata_dev_printk(qc->dev, KERN_WARNING,
3854 "%u bytes trailing data\n", bytes);
3855
3856 for (i = 0; i < words; i++)
3857 ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
3858
3859 ap->hsm_task_state = HSM_ST_LAST;
3860 return;
3861 }
3862
3863 sg = &qc->__sg[qc->cursg];
3864
3865 page = sg->page;
3866 offset = sg->offset + qc->cursg_ofs;
3867
3868 /* get the current page and offset */
3869 page = nth_page(page, (offset >> PAGE_SHIFT));
3870 offset %= PAGE_SIZE;
3871
3872 /* don't overrun current sg */
3873 count = min(sg->length - qc->cursg_ofs, bytes);
3874
3875 /* don't cross page boundaries */
3876 count = min(count, (unsigned int)PAGE_SIZE - offset);
3877
3878 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3879
3880 if (PageHighMem(page)) {
3881 unsigned long flags;
3882
3883 /* FIXME: use bounce buffer */
3884 local_irq_save(flags);
3885 buf = kmap_atomic(page, KM_IRQ0);
3886
3887 /* do the actual data transfer */
3888 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
3889
3890 kunmap_atomic(buf, KM_IRQ0);
3891 local_irq_restore(flags);
3892 } else {
3893 buf = page_address(page);
3894 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
3895 }
3896
3897 bytes -= count;
3898 qc->curbytes += count;
3899 qc->cursg_ofs += count;
3900
3901 if (qc->cursg_ofs == sg->length) {
3902 qc->cursg++;
3903 qc->cursg_ofs = 0;
3904 }
3905
3906 if (bytes)
3907 goto next_sg;
3908}
3909
3910/**
3911 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
3912 * @qc: Command on going
3913 *
3914 * Transfer Transfer data from/to the ATAPI device.
3915 *
3916 * LOCKING:
3917 * Inherited from caller.
3918 */
3919
3920static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3921{
3922 struct ata_port *ap = qc->ap;
3923 struct ata_device *dev = qc->dev;
3924 unsigned int ireason, bc_lo, bc_hi, bytes;
3925 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
3926
3927 /* Abuse qc->result_tf for temp storage of intermediate TF
3928 * here to save some kernel stack usage.
3929 * For normal completion, qc->result_tf is not relevant. For
3930 * error, qc->result_tf is later overwritten by ata_qc_complete().
3931 * So, the correctness of qc->result_tf is not affected.
3932 */
3933 ap->ops->tf_read(ap, &qc->result_tf);
3934 ireason = qc->result_tf.nsect;
3935 bc_lo = qc->result_tf.lbam;
3936 bc_hi = qc->result_tf.lbah;
3937 bytes = (bc_hi << 8) | bc_lo;
3938
3939 /* shall be cleared to zero, indicating xfer of data */
3940 if (ireason & (1 << 0))
3941 goto err_out;
3942
3943 /* make sure transfer direction matches expected */
3944 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
3945 if (do_write != i_write)
3946 goto err_out;
3947
3948 VPRINTK("ata%u: xfering %d bytes\n", ap->id, bytes);
3949
3950 __atapi_pio_bytes(qc, bytes);
3951
3952 return;
3953
3954err_out:
3955 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
3956 qc->err_mask |= AC_ERR_HSM;
3957 ap->hsm_task_state = HSM_ST_ERR;
3958}
3959
3960/**
3961 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
3962 * @ap: the target ata_port
3963 * @qc: qc on going
3964 *
3965 * RETURNS:
3966 * 1 if ok in workqueue, 0 otherwise.
3967 */
3968
3969static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
3970{
3971 if (qc->tf.flags & ATA_TFLAG_POLLING)
3972 return 1;
3973
3974 if (ap->hsm_task_state == HSM_ST_FIRST) {
3975 if (qc->tf.protocol == ATA_PROT_PIO &&
3976 (qc->tf.flags & ATA_TFLAG_WRITE))
3977 return 1;
3978
3979 if (is_atapi_taskfile(&qc->tf) &&
3980 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
3981 return 1;
3982 }
3983
3984 return 0;
3985}
3986
3987/**
3988 * ata_hsm_qc_complete - finish a qc running on standard HSM
3989 * @qc: Command to complete
3990 * @in_wq: 1 if called from workqueue, 0 otherwise
3991 *
3992 * Finish @qc which is running on standard HSM.
3993 *
3994 * LOCKING:
3995 * If @in_wq is zero, spin_lock_irqsave(host lock).
3996 * Otherwise, none on entry and grabs host lock.
3997 */
3998static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
3999{
4000 struct ata_port *ap = qc->ap;
4001 unsigned long flags;
4002
4003 if (ap->ops->error_handler) {
4004 if (in_wq) {
4005 spin_lock_irqsave(ap->lock, flags);
4006
4007 /* EH might have kicked in while host lock is
4008 * released.
4009 */
4010 qc = ata_qc_from_tag(ap, qc->tag);
4011 if (qc) {
4012 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
4013 ata_irq_on(ap);
4014 ata_qc_complete(qc);
4015 } else
4016 ata_port_freeze(ap);
4017 }
4018
4019 spin_unlock_irqrestore(ap->lock, flags);
4020 } else {
4021 if (likely(!(qc->err_mask & AC_ERR_HSM)))
4022 ata_qc_complete(qc);
4023 else
4024 ata_port_freeze(ap);
4025 }
4026 } else {
4027 if (in_wq) {
4028 spin_lock_irqsave(ap->lock, flags);
4029 ata_irq_on(ap);
4030 ata_qc_complete(qc);
4031 spin_unlock_irqrestore(ap->lock, flags);
4032 } else
4033 ata_qc_complete(qc);
4034 }
4035
4036 ata_altstatus(ap); /* flush */
4037}
4038
4039/**
4040 * ata_hsm_move - move the HSM to the next state.
4041 * @ap: the target ata_port
4042 * @qc: qc on going
4043 * @status: current device status
4044 * @in_wq: 1 if called from workqueue, 0 otherwise
4045 *
4046 * RETURNS:
4047 * 1 when poll next status needed, 0 otherwise.
4048 */
4049int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
4050 u8 status, int in_wq)
4051{
4052 unsigned long flags = 0;
4053 int poll_next;
4054
4055 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
4056
4057 /* Make sure ata_qc_issue_prot() does not throw things
4058 * like DMA polling into the workqueue. Notice that
4059 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
4060 */
4061 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
4062
4063fsm_start:
4064 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
4065 ap->id, qc->tf.protocol, ap->hsm_task_state, status);
4066
4067 switch (ap->hsm_task_state) {
4068 case HSM_ST_FIRST:
4069 /* Send first data block or PACKET CDB */
4070
4071 /* If polling, we will stay in the work queue after
4072 * sending the data. Otherwise, interrupt handler
4073 * takes over after sending the data.
4074 */
4075 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
4076
4077 /* check device status */
4078 if (unlikely((status & ATA_DRQ) == 0)) {
4079 /* handle BSY=0, DRQ=0 as error */
4080 if (likely(status & (ATA_ERR | ATA_DF)))
4081 /* device stops HSM for abort/error */
4082 qc->err_mask |= AC_ERR_DEV;
4083 else
4084 /* HSM violation. Let EH handle this */
4085 qc->err_mask |= AC_ERR_HSM;
4086
4087 ap->hsm_task_state = HSM_ST_ERR;
4088 goto fsm_start;
4089 }
4090
4091 /* Device should not ask for data transfer (DRQ=1)
4092 * when it finds something wrong.
4093 * We ignore DRQ here and stop the HSM by
4094 * changing hsm_task_state to HSM_ST_ERR and
4095 * let the EH abort the command or reset the device.
4096 */
4097 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4098 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4099 ap->id, status);
4100 qc->err_mask |= AC_ERR_HSM;
4101 ap->hsm_task_state = HSM_ST_ERR;
4102 goto fsm_start;
4103 }
4104
4105 /* Send the CDB (atapi) or the first data block (ata pio out).
4106 * During the state transition, interrupt handler shouldn't
4107 * be invoked before the data transfer is complete and
4108 * hsm_task_state is changed. Hence, the following locking.
4109 */
4110 if (in_wq)
4111 spin_lock_irqsave(ap->lock, flags);
4112
4113 if (qc->tf.protocol == ATA_PROT_PIO) {
4114 /* PIO data out protocol.
4115 * send first data block.
4116 */
4117
4118 /* ata_pio_sectors() might change the state
4119 * to HSM_ST_LAST. so, the state is changed here
4120 * before ata_pio_sectors().
4121 */
4122 ap->hsm_task_state = HSM_ST;
4123 ata_pio_sectors(qc);
4124 ata_altstatus(ap); /* flush */
4125 } else
4126 /* send CDB */
4127 atapi_send_cdb(ap, qc);
4128
4129 if (in_wq)
4130 spin_unlock_irqrestore(ap->lock, flags);
4131
4132 /* if polling, ata_pio_task() handles the rest.
4133 * otherwise, interrupt handler takes over from here.
4134 */
4135 break;
4136
4137 case HSM_ST:
4138 /* complete command or read/write the data register */
4139 if (qc->tf.protocol == ATA_PROT_ATAPI) {
4140 /* ATAPI PIO protocol */
4141 if ((status & ATA_DRQ) == 0) {
4142 /* No more data to transfer or device error.
4143 * Device error will be tagged in HSM_ST_LAST.
4144 */
4145 ap->hsm_task_state = HSM_ST_LAST;
4146 goto fsm_start;
4147 }
4148
4149 /* Device should not ask for data transfer (DRQ=1)
4150 * when it finds something wrong.
4151 * We ignore DRQ here and stop the HSM by
4152 * changing hsm_task_state to HSM_ST_ERR and
4153 * let the EH abort the command or reset the device.
4154 */
4155 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4156 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4157 ap->id, status);
4158 qc->err_mask |= AC_ERR_HSM;
4159 ap->hsm_task_state = HSM_ST_ERR;
4160 goto fsm_start;
4161 }
4162
4163 atapi_pio_bytes(qc);
4164
4165 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
4166 /* bad ireason reported by device */
4167 goto fsm_start;
4168
4169 } else {
4170 /* ATA PIO protocol */
4171 if (unlikely((status & ATA_DRQ) == 0)) {
4172 /* handle BSY=0, DRQ=0 as error */
4173 if (likely(status & (ATA_ERR | ATA_DF)))
4174 /* device stops HSM for abort/error */
4175 qc->err_mask |= AC_ERR_DEV;
4176 else
4177 /* HSM violation. Let EH handle this */
4178 qc->err_mask |= AC_ERR_HSM;
4179
4180 ap->hsm_task_state = HSM_ST_ERR;
4181 goto fsm_start;
4182 }
4183
4184 /* For PIO reads, some devices may ask for
4185 * data transfer (DRQ=1) alone with ERR=1.
4186 * We respect DRQ here and transfer one
4187 * block of junk data before changing the
4188 * hsm_task_state to HSM_ST_ERR.
4189 *
4190 * For PIO writes, ERR=1 DRQ=1 doesn't make
4191 * sense since the data block has been
4192 * transferred to the device.
4193 */
4194 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4195 /* data might be corrputed */
4196 qc->err_mask |= AC_ERR_DEV;
4197
4198 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
4199 ata_pio_sectors(qc);
4200 ata_altstatus(ap);
4201 status = ata_wait_idle(ap);
4202 }
4203
4204 if (status & (ATA_BUSY | ATA_DRQ))
4205 qc->err_mask |= AC_ERR_HSM;
4206
4207 /* ata_pio_sectors() might change the
4208 * state to HSM_ST_LAST. so, the state
4209 * is changed after ata_pio_sectors().
4210 */
4211 ap->hsm_task_state = HSM_ST_ERR;
4212 goto fsm_start;
4213 }
4214
4215 ata_pio_sectors(qc);
4216
4217 if (ap->hsm_task_state == HSM_ST_LAST &&
4218 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
4219 /* all data read */
4220 ata_altstatus(ap);
4221 status = ata_wait_idle(ap);
4222 goto fsm_start;
4223 }
4224 }
4225
4226 ata_altstatus(ap); /* flush */
4227 poll_next = 1;
4228 break;
4229
4230 case HSM_ST_LAST:
4231 if (unlikely(!ata_ok(status))) {
4232 qc->err_mask |= __ac_err_mask(status);
4233 ap->hsm_task_state = HSM_ST_ERR;
4234 goto fsm_start;
4235 }
4236
4237 /* no more data to transfer */
4238 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
4239 ap->id, qc->dev->devno, status);
4240
4241 WARN_ON(qc->err_mask);
4242
4243 ap->hsm_task_state = HSM_ST_IDLE;
4244
4245 /* complete taskfile transaction */
4246 ata_hsm_qc_complete(qc, in_wq);
4247
4248 poll_next = 0;
4249 break;
4250
4251 case HSM_ST_ERR:
4252 /* make sure qc->err_mask is available to
4253 * know what's wrong and recover
4254 */
4255 WARN_ON(qc->err_mask == 0);
4256
4257 ap->hsm_task_state = HSM_ST_IDLE;
4258
4259 /* complete taskfile transaction */
4260 ata_hsm_qc_complete(qc, in_wq);
4261
4262 poll_next = 0;
4263 break;
4264 default:
4265 poll_next = 0;
4266 BUG();
4267 }
4268
4269 return poll_next;
4270}
4271
4272static void ata_pio_task(void *_data)
4273{
4274 struct ata_queued_cmd *qc = _data;
4275 struct ata_port *ap = qc->ap;
4276 u8 status;
4277 int poll_next;
4278
4279fsm_start:
4280 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
4281
4282 /*
4283 * This is purely heuristic. This is a fast path.
4284 * Sometimes when we enter, BSY will be cleared in
4285 * a chk-status or two. If not, the drive is probably seeking
4286 * or something. Snooze for a couple msecs, then
4287 * chk-status again. If still busy, queue delayed work.
4288 */
4289 status = ata_busy_wait(ap, ATA_BUSY, 5);
4290 if (status & ATA_BUSY) {
4291 msleep(2);
4292 status = ata_busy_wait(ap, ATA_BUSY, 10);
4293 if (status & ATA_BUSY) {
4294 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
4295 return;
4296 }
4297 }
4298
4299 /* move the HSM */
4300 poll_next = ata_hsm_move(ap, qc, status, 1);
4301
4302 /* another command or interrupt handler
4303 * may be running at this point.
4304 */
4305 if (poll_next)
4306 goto fsm_start;
4307}
4308
4309/**
4310 * ata_qc_new - Request an available ATA command, for queueing
4311 * @ap: Port associated with device @dev
4312 * @dev: Device from whom we request an available command structure
4313 *
4314 * LOCKING:
4315 * None.
4316 */
4317
4318static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4319{
4320 struct ata_queued_cmd *qc = NULL;
4321 unsigned int i;
4322
4323 /* no command while frozen */
4324 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4325 return NULL;
4326
4327 /* the last tag is reserved for internal command. */
4328 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
4329 if (!test_and_set_bit(i, &ap->qc_allocated)) {
4330 qc = __ata_qc_from_tag(ap, i);
4331 break;
4332 }
4333
4334 if (qc)
4335 qc->tag = i;
4336
4337 return qc;
4338}
4339
4340/**
4341 * ata_qc_new_init - Request an available ATA command, and initialize it
4342 * @dev: Device from whom we request an available command structure
4343 *
4344 * LOCKING:
4345 * None.
4346 */
4347
4348struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
4349{
4350 struct ata_port *ap = dev->ap;
4351 struct ata_queued_cmd *qc;
4352
4353 qc = ata_qc_new(ap);
4354 if (qc) {
4355 qc->scsicmd = NULL;
4356 qc->ap = ap;
4357 qc->dev = dev;
4358
4359 ata_qc_reinit(qc);
4360 }
4361
4362 return qc;
4363}
4364
4365/**
4366 * ata_qc_free - free unused ata_queued_cmd
4367 * @qc: Command to complete
4368 *
4369 * Designed to free unused ata_queued_cmd object
4370 * in case something prevents using it.
4371 *
4372 * LOCKING:
4373 * spin_lock_irqsave(host lock)
4374 */
4375void ata_qc_free(struct ata_queued_cmd *qc)
4376{
4377 struct ata_port *ap = qc->ap;
4378 unsigned int tag;
4379
4380 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4381
4382 qc->flags = 0;
4383 tag = qc->tag;
4384 if (likely(ata_tag_valid(tag))) {
4385 qc->tag = ATA_TAG_POISON;
4386 clear_bit(tag, &ap->qc_allocated);
4387 }
4388}
4389
4390void __ata_qc_complete(struct ata_queued_cmd *qc)
4391{
4392 struct ata_port *ap = qc->ap;
4393
4394 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4395 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
4396
4397 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4398 ata_sg_clean(qc);
4399
4400 /* command should be marked inactive atomically with qc completion */
4401 if (qc->tf.protocol == ATA_PROT_NCQ)
4402 ap->sactive &= ~(1 << qc->tag);
4403 else
4404 ap->active_tag = ATA_TAG_POISON;
4405
4406 /* atapi: mark qc as inactive to prevent the interrupt handler
4407 * from completing the command twice later, before the error handler
4408 * is called. (when rc != 0 and atapi request sense is needed)
4409 */
4410 qc->flags &= ~ATA_QCFLAG_ACTIVE;
4411 ap->qc_active &= ~(1 << qc->tag);
4412
4413 /* call completion callback */
4414 qc->complete_fn(qc);
4415}
4416
4417/**
4418 * ata_qc_complete - Complete an active ATA command
4419 * @qc: Command to complete
4420 * @err_mask: ATA Status register contents
4421 *
4422 * Indicate to the mid and upper layers that an ATA
4423 * command has completed, with either an ok or not-ok status.
4424 *
4425 * LOCKING:
4426 * spin_lock_irqsave(host lock)
4427 */
4428void ata_qc_complete(struct ata_queued_cmd *qc)
4429{
4430 struct ata_port *ap = qc->ap;
4431
4432 /* XXX: New EH and old EH use different mechanisms to
4433 * synchronize EH with regular execution path.
4434 *
4435 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4436 * Normal execution path is responsible for not accessing a
4437 * failed qc. libata core enforces the rule by returning NULL
4438 * from ata_qc_from_tag() for failed qcs.
4439 *
4440 * Old EH depends on ata_qc_complete() nullifying completion
4441 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4442 * not synchronize with interrupt handler. Only PIO task is
4443 * taken care of.
4444 */
4445 if (ap->ops->error_handler) {
4446 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
4447
4448 if (unlikely(qc->err_mask))
4449 qc->flags |= ATA_QCFLAG_FAILED;
4450
4451 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4452 if (!ata_tag_internal(qc->tag)) {
4453 /* always fill result TF for failed qc */
4454 ap->ops->tf_read(ap, &qc->result_tf);
4455 ata_qc_schedule_eh(qc);
4456 return;
4457 }
4458 }
4459
4460 /* read result TF if requested */
4461 if (qc->flags & ATA_QCFLAG_RESULT_TF)
4462 ap->ops->tf_read(ap, &qc->result_tf);
4463
4464 __ata_qc_complete(qc);
4465 } else {
4466 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4467 return;
4468
4469 /* read result TF if failed or requested */
4470 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
4471 ap->ops->tf_read(ap, &qc->result_tf);
4472
4473 __ata_qc_complete(qc);
4474 }
4475}
4476
4477/**
4478 * ata_qc_complete_multiple - Complete multiple qcs successfully
4479 * @ap: port in question
4480 * @qc_active: new qc_active mask
4481 * @finish_qc: LLDD callback invoked before completing a qc
4482 *
4483 * Complete in-flight commands. This functions is meant to be
4484 * called from low-level driver's interrupt routine to complete
4485 * requests normally. ap->qc_active and @qc_active is compared
4486 * and commands are completed accordingly.
4487 *
4488 * LOCKING:
4489 * spin_lock_irqsave(host lock)
4490 *
4491 * RETURNS:
4492 * Number of completed commands on success, -errno otherwise.
4493 */
4494int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
4495 void (*finish_qc)(struct ata_queued_cmd *))
4496{
4497 int nr_done = 0;
4498 u32 done_mask;
4499 int i;
4500
4501 done_mask = ap->qc_active ^ qc_active;
4502
4503 if (unlikely(done_mask & qc_active)) {
4504 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
4505 "(%08x->%08x)\n", ap->qc_active, qc_active);
4506 return -EINVAL;
4507 }
4508
4509 for (i = 0; i < ATA_MAX_QUEUE; i++) {
4510 struct ata_queued_cmd *qc;
4511
4512 if (!(done_mask & (1 << i)))
4513 continue;
4514
4515 if ((qc = ata_qc_from_tag(ap, i))) {
4516 if (finish_qc)
4517 finish_qc(qc);
4518 ata_qc_complete(qc);
4519 nr_done++;
4520 }
4521 }
4522
4523 return nr_done;
4524}
4525
4526static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
4527{
4528 struct ata_port *ap = qc->ap;
4529
4530 switch (qc->tf.protocol) {
4531 case ATA_PROT_NCQ:
4532 case ATA_PROT_DMA:
4533 case ATA_PROT_ATAPI_DMA:
4534 return 1;
4535
4536 case ATA_PROT_ATAPI:
4537 case ATA_PROT_PIO:
4538 if (ap->flags & ATA_FLAG_PIO_DMA)
4539 return 1;
4540
4541 /* fall through */
4542
4543 default:
4544 return 0;
4545 }
4546
4547 /* never reached */
4548}
4549
4550/**
4551 * ata_qc_issue - issue taskfile to device
4552 * @qc: command to issue to device
4553 *
4554 * Prepare an ATA command to submission to device.
4555 * This includes mapping the data into a DMA-able
4556 * area, filling in the S/G table, and finally
4557 * writing the taskfile to hardware, starting the command.
4558 *
4559 * LOCKING:
4560 * spin_lock_irqsave(host lock)
4561 */
4562void ata_qc_issue(struct ata_queued_cmd *qc)
4563{
4564 struct ata_port *ap = qc->ap;
4565
4566 /* Make sure only one non-NCQ command is outstanding. The
4567 * check is skipped for old EH because it reuses active qc to
4568 * request ATAPI sense.
4569 */
4570 WARN_ON(ap->ops->error_handler && ata_tag_valid(ap->active_tag));
4571
4572 if (qc->tf.protocol == ATA_PROT_NCQ) {
4573 WARN_ON(ap->sactive & (1 << qc->tag));
4574 ap->sactive |= 1 << qc->tag;
4575 } else {
4576 WARN_ON(ap->sactive);
4577 ap->active_tag = qc->tag;
4578 }
4579
4580 qc->flags |= ATA_QCFLAG_ACTIVE;
4581 ap->qc_active |= 1 << qc->tag;
4582
4583 if (ata_should_dma_map(qc)) {
4584 if (qc->flags & ATA_QCFLAG_SG) {
4585 if (ata_sg_setup(qc))
4586 goto sg_err;
4587 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
4588 if (ata_sg_setup_one(qc))
4589 goto sg_err;
4590 }
4591 } else {
4592 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4593 }
4594
4595 ap->ops->qc_prep(qc);
4596
4597 qc->err_mask |= ap->ops->qc_issue(qc);
4598 if (unlikely(qc->err_mask))
4599 goto err;
4600 return;
4601
4602sg_err:
4603 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4604 qc->err_mask |= AC_ERR_SYSTEM;
4605err:
4606 ata_qc_complete(qc);
4607}
4608
4609/**
4610 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
4611 * @qc: command to issue to device
4612 *
4613 * Using various libata functions and hooks, this function
4614 * starts an ATA command. ATA commands are grouped into
4615 * classes called "protocols", and issuing each type of protocol
4616 * is slightly different.
4617 *
4618 * May be used as the qc_issue() entry in ata_port_operations.
4619 *
4620 * LOCKING:
4621 * spin_lock_irqsave(host lock)
4622 *
4623 * RETURNS:
4624 * Zero on success, AC_ERR_* mask on failure
4625 */
4626
4627unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
4628{
4629 struct ata_port *ap = qc->ap;
4630
4631 /* Use polling pio if the LLD doesn't handle
4632 * interrupt driven pio and atapi CDB interrupt.
4633 */
4634 if (ap->flags & ATA_FLAG_PIO_POLLING) {
4635 switch (qc->tf.protocol) {
4636 case ATA_PROT_PIO:
4637 case ATA_PROT_ATAPI:
4638 case ATA_PROT_ATAPI_NODATA:
4639 qc->tf.flags |= ATA_TFLAG_POLLING;
4640 break;
4641 case ATA_PROT_ATAPI_DMA:
4642 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
4643 /* see ata_dma_blacklisted() */
4644 BUG();
4645 break;
4646 default:
4647 break;
4648 }
4649 }
4650
4651 /* select the device */
4652 ata_dev_select(ap, qc->dev->devno, 1, 0);
4653
4654 /* start the command */
4655 switch (qc->tf.protocol) {
4656 case ATA_PROT_NODATA:
4657 if (qc->tf.flags & ATA_TFLAG_POLLING)
4658 ata_qc_set_polling(qc);
4659
4660 ata_tf_to_host(ap, &qc->tf);
4661 ap->hsm_task_state = HSM_ST_LAST;
4662
4663 if (qc->tf.flags & ATA_TFLAG_POLLING)
4664 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4665
4666 break;
4667
4668 case ATA_PROT_DMA:
4669 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
4670
4671 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4672 ap->ops->bmdma_setup(qc); /* set up bmdma */
4673 ap->ops->bmdma_start(qc); /* initiate bmdma */
4674 ap->hsm_task_state = HSM_ST_LAST;
4675 break;
4676
4677 case ATA_PROT_PIO:
4678 if (qc->tf.flags & ATA_TFLAG_POLLING)
4679 ata_qc_set_polling(qc);
4680
4681 ata_tf_to_host(ap, &qc->tf);
4682
4683 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4684 /* PIO data out protocol */
4685 ap->hsm_task_state = HSM_ST_FIRST;
4686 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4687
4688 /* always send first data block using
4689 * the ata_pio_task() codepath.
4690 */
4691 } else {
4692 /* PIO data in protocol */
4693 ap->hsm_task_state = HSM_ST;
4694
4695 if (qc->tf.flags & ATA_TFLAG_POLLING)
4696 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4697
4698 /* if polling, ata_pio_task() handles the rest.
4699 * otherwise, interrupt handler takes over from here.
4700 */
4701 }
4702
4703 break;
4704
4705 case ATA_PROT_ATAPI:
4706 case ATA_PROT_ATAPI_NODATA:
4707 if (qc->tf.flags & ATA_TFLAG_POLLING)
4708 ata_qc_set_polling(qc);
4709
4710 ata_tf_to_host(ap, &qc->tf);
4711
4712 ap->hsm_task_state = HSM_ST_FIRST;
4713
4714 /* send cdb by polling if no cdb interrupt */
4715 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
4716 (qc->tf.flags & ATA_TFLAG_POLLING))
4717 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4718 break;
4719
4720 case ATA_PROT_ATAPI_DMA:
4721 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
4722
4723 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4724 ap->ops->bmdma_setup(qc); /* set up bmdma */
4725 ap->hsm_task_state = HSM_ST_FIRST;
4726
4727 /* send cdb by polling if no cdb interrupt */
4728 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4729 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4730 break;
4731
4732 default:
4733 WARN_ON(1);
4734 return AC_ERR_SYSTEM;
4735 }
4736
4737 return 0;
4738}
4739
4740/**
4741 * ata_host_intr - Handle host interrupt for given (port, task)
4742 * @ap: Port on which interrupt arrived (possibly...)
4743 * @qc: Taskfile currently active in engine
4744 *
4745 * Handle host interrupt for given queued command. Currently,
4746 * only DMA interrupts are handled. All other commands are
4747 * handled via polling with interrupts disabled (nIEN bit).
4748 *
4749 * LOCKING:
4750 * spin_lock_irqsave(host lock)
4751 *
4752 * RETURNS:
4753 * One if interrupt was handled, zero if not (shared irq).
4754 */
4755
4756inline unsigned int ata_host_intr (struct ata_port *ap,
4757 struct ata_queued_cmd *qc)
4758{
4759 u8 status, host_stat = 0;
4760
4761 VPRINTK("ata%u: protocol %d task_state %d\n",
4762 ap->id, qc->tf.protocol, ap->hsm_task_state);
4763
4764 /* Check whether we are expecting interrupt in this state */
4765 switch (ap->hsm_task_state) {
4766 case HSM_ST_FIRST:
4767 /* Some pre-ATAPI-4 devices assert INTRQ
4768 * at this state when ready to receive CDB.
4769 */
4770
4771 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
4772 * The flag was turned on only for atapi devices.
4773 * No need to check is_atapi_taskfile(&qc->tf) again.
4774 */
4775 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4776 goto idle_irq;
4777 break;
4778 case HSM_ST_LAST:
4779 if (qc->tf.protocol == ATA_PROT_DMA ||
4780 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
4781 /* check status of DMA engine */
4782 host_stat = ap->ops->bmdma_status(ap);
4783 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
4784
4785 /* if it's not our irq... */
4786 if (!(host_stat & ATA_DMA_INTR))
4787 goto idle_irq;
4788
4789 /* before we do anything else, clear DMA-Start bit */
4790 ap->ops->bmdma_stop(qc);
4791
4792 if (unlikely(host_stat & ATA_DMA_ERR)) {
4793 /* error when transfering data to/from memory */
4794 qc->err_mask |= AC_ERR_HOST_BUS;
4795 ap->hsm_task_state = HSM_ST_ERR;
4796 }
4797 }
4798 break;
4799 case HSM_ST:
4800 break;
4801 default:
4802 goto idle_irq;
4803 }
4804
4805 /* check altstatus */
4806 status = ata_altstatus(ap);
4807 if (status & ATA_BUSY)
4808 goto idle_irq;
4809
4810 /* check main status, clearing INTRQ */
4811 status = ata_chk_status(ap);
4812 if (unlikely(status & ATA_BUSY))
4813 goto idle_irq;
4814
4815 /* ack bmdma irq events */
4816 ap->ops->irq_clear(ap);
4817
4818 ata_hsm_move(ap, qc, status, 0);
4819 return 1; /* irq handled */
4820
4821idle_irq:
4822 ap->stats.idle_irq++;
4823
4824#ifdef ATA_IRQ_TRAP
4825 if ((ap->stats.idle_irq % 1000) == 0) {
4826 ata_irq_ack(ap, 0); /* debug trap */
4827 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
4828 return 1;
4829 }
4830#endif
4831 return 0; /* irq not handled */
4832}
4833
4834/**
4835 * ata_interrupt - Default ATA host interrupt handler
4836 * @irq: irq line (unused)
4837 * @dev_instance: pointer to our ata_host information structure
4838 * @regs: unused
4839 *
4840 * Default interrupt handler for PCI IDE devices. Calls
4841 * ata_host_intr() for each port that is not disabled.
4842 *
4843 * LOCKING:
4844 * Obtains host lock during operation.
4845 *
4846 * RETURNS:
4847 * IRQ_NONE or IRQ_HANDLED.
4848 */
4849
4850irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
4851{
4852 struct ata_host *host = dev_instance;
4853 unsigned int i;
4854 unsigned int handled = 0;
4855 unsigned long flags;
4856
4857 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
4858 spin_lock_irqsave(&host->lock, flags);
4859
4860 for (i = 0; i < host->n_ports; i++) {
4861 struct ata_port *ap;
4862
4863 ap = host->ports[i];
4864 if (ap &&
4865 !(ap->flags & ATA_FLAG_DISABLED)) {
4866 struct ata_queued_cmd *qc;
4867
4868 qc = ata_qc_from_tag(ap, ap->active_tag);
4869 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
4870 (qc->flags & ATA_QCFLAG_ACTIVE))
4871 handled |= ata_host_intr(ap, qc);
4872 }
4873 }
4874
4875 spin_unlock_irqrestore(&host->lock, flags);
4876
4877 return IRQ_RETVAL(handled);
4878}
4879
4880/**
4881 * sata_scr_valid - test whether SCRs are accessible
4882 * @ap: ATA port to test SCR accessibility for
4883 *
4884 * Test whether SCRs are accessible for @ap.
4885 *
4886 * LOCKING:
4887 * None.
4888 *
4889 * RETURNS:
4890 * 1 if SCRs are accessible, 0 otherwise.
4891 */
4892int sata_scr_valid(struct ata_port *ap)
4893{
4894 return ap->cbl == ATA_CBL_SATA && ap->ops->scr_read;
4895}
4896
4897/**
4898 * sata_scr_read - read SCR register of the specified port
4899 * @ap: ATA port to read SCR for
4900 * @reg: SCR to read
4901 * @val: Place to store read value
4902 *
4903 * Read SCR register @reg of @ap into *@val. This function is
4904 * guaranteed to succeed if the cable type of the port is SATA
4905 * and the port implements ->scr_read.
4906 *
4907 * LOCKING:
4908 * None.
4909 *
4910 * RETURNS:
4911 * 0 on success, negative errno on failure.
4912 */
4913int sata_scr_read(struct ata_port *ap, int reg, u32 *val)
4914{
4915 if (sata_scr_valid(ap)) {
4916 *val = ap->ops->scr_read(ap, reg);
4917 return 0;
4918 }
4919 return -EOPNOTSUPP;
4920}
4921
4922/**
4923 * sata_scr_write - write SCR register of the specified port
4924 * @ap: ATA port to write SCR for
4925 * @reg: SCR to write
4926 * @val: value to write
4927 *
4928 * Write @val to SCR register @reg of @ap. This function is
4929 * guaranteed to succeed if the cable type of the port is SATA
4930 * and the port implements ->scr_read.
4931 *
4932 * LOCKING:
4933 * None.
4934 *
4935 * RETURNS:
4936 * 0 on success, negative errno on failure.
4937 */
4938int sata_scr_write(struct ata_port *ap, int reg, u32 val)
4939{
4940 if (sata_scr_valid(ap)) {
4941 ap->ops->scr_write(ap, reg, val);
4942 return 0;
4943 }
4944 return -EOPNOTSUPP;
4945}
4946
4947/**
4948 * sata_scr_write_flush - write SCR register of the specified port and flush
4949 * @ap: ATA port to write SCR for
4950 * @reg: SCR to write
4951 * @val: value to write
4952 *
4953 * This function is identical to sata_scr_write() except that this
4954 * function performs flush after writing to the register.
4955 *
4956 * LOCKING:
4957 * None.
4958 *
4959 * RETURNS:
4960 * 0 on success, negative errno on failure.
4961 */
4962int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val)
4963{
4964 if (sata_scr_valid(ap)) {
4965 ap->ops->scr_write(ap, reg, val);
4966 ap->ops->scr_read(ap, reg);
4967 return 0;
4968 }
4969 return -EOPNOTSUPP;
4970}
4971
4972/**
4973 * ata_port_online - test whether the given port is online
4974 * @ap: ATA port to test
4975 *
4976 * Test whether @ap is online. Note that this function returns 0
4977 * if online status of @ap cannot be obtained, so
4978 * ata_port_online(ap) != !ata_port_offline(ap).
4979 *
4980 * LOCKING:
4981 * None.
4982 *
4983 * RETURNS:
4984 * 1 if the port online status is available and online.
4985 */
4986int ata_port_online(struct ata_port *ap)
4987{
4988 u32 sstatus;
4989
4990 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) == 0x3)
4991 return 1;
4992 return 0;
4993}
4994
4995/**
4996 * ata_port_offline - test whether the given port is offline
4997 * @ap: ATA port to test
4998 *
4999 * Test whether @ap is offline. Note that this function returns
5000 * 0 if offline status of @ap cannot be obtained, so
5001 * ata_port_online(ap) != !ata_port_offline(ap).
5002 *
5003 * LOCKING:
5004 * None.
5005 *
5006 * RETURNS:
5007 * 1 if the port offline status is available and offline.
5008 */
5009int ata_port_offline(struct ata_port *ap)
5010{
5011 u32 sstatus;
5012
5013 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) != 0x3)
5014 return 1;
5015 return 0;
5016}
5017
5018int ata_flush_cache(struct ata_device *dev)
5019{
5020 unsigned int err_mask;
5021 u8 cmd;
5022
5023 if (!ata_try_flush_cache(dev))
5024 return 0;
5025
5026 if (ata_id_has_flush_ext(dev->id))
5027 cmd = ATA_CMD_FLUSH_EXT;
5028 else
5029 cmd = ATA_CMD_FLUSH;
5030
5031 err_mask = ata_do_simple_cmd(dev, cmd);
5032 if (err_mask) {
5033 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
5034 return -EIO;
5035 }
5036
5037 return 0;
5038}
5039
5040static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5041 unsigned int action, unsigned int ehi_flags,
5042 int wait)
5043{
5044 unsigned long flags;
5045 int i, rc;
5046
5047 for (i = 0; i < host->n_ports; i++) {
5048 struct ata_port *ap = host->ports[i];
5049
5050 /* Previous resume operation might still be in
5051 * progress. Wait for PM_PENDING to clear.
5052 */
5053 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5054 ata_port_wait_eh(ap);
5055 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5056 }
5057
5058 /* request PM ops to EH */
5059 spin_lock_irqsave(ap->lock, flags);
5060
5061 ap->pm_mesg = mesg;
5062 if (wait) {
5063 rc = 0;
5064 ap->pm_result = &rc;
5065 }
5066
5067 ap->pflags |= ATA_PFLAG_PM_PENDING;
5068 ap->eh_info.action |= action;
5069 ap->eh_info.flags |= ehi_flags;
5070
5071 ata_port_schedule_eh(ap);
5072
5073 spin_unlock_irqrestore(ap->lock, flags);
5074
5075 /* wait and check result */
5076 if (wait) {
5077 ata_port_wait_eh(ap);
5078 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5079 if (rc)
5080 return rc;
5081 }
5082 }
5083
5084 return 0;
5085}
5086
5087/**
5088 * ata_host_suspend - suspend host
5089 * @host: host to suspend
5090 * @mesg: PM message
5091 *
5092 * Suspend @host. Actual operation is performed by EH. This
5093 * function requests EH to perform PM operations and waits for EH
5094 * to finish.
5095 *
5096 * LOCKING:
5097 * Kernel thread context (may sleep).
5098 *
5099 * RETURNS:
5100 * 0 on success, -errno on failure.
5101 */
5102int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5103{
5104 int i, j, rc;
5105
5106 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
5107 if (rc)
5108 goto fail;
5109
5110 /* EH is quiescent now. Fail if we have any ready device.
5111 * This happens if hotplug occurs between completion of device
5112 * suspension and here.
5113 */
5114 for (i = 0; i < host->n_ports; i++) {
5115 struct ata_port *ap = host->ports[i];
5116
5117 for (j = 0; j < ATA_MAX_DEVICES; j++) {
5118 struct ata_device *dev = &ap->device[j];
5119
5120 if (ata_dev_ready(dev)) {
5121 ata_port_printk(ap, KERN_WARNING,
5122 "suspend failed, device %d "
5123 "still active\n", dev->devno);
5124 rc = -EBUSY;
5125 goto fail;
5126 }
5127 }
5128 }
5129
5130 host->dev->power.power_state = mesg;
5131 return 0;
5132
5133 fail:
5134 ata_host_resume(host);
5135 return rc;
5136}
5137
5138/**
5139 * ata_host_resume - resume host
5140 * @host: host to resume
5141 *
5142 * Resume @host. Actual operation is performed by EH. This
5143 * function requests EH to perform PM operations and returns.
5144 * Note that all resume operations are performed parallely.
5145 *
5146 * LOCKING:
5147 * Kernel thread context (may sleep).
5148 */
5149void ata_host_resume(struct ata_host *host)
5150{
5151 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
5152 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5153 host->dev->power.power_state = PMSG_ON;
5154}
5155
5156/**
5157 * ata_port_start - Set port up for dma.
5158 * @ap: Port to initialize
5159 *
5160 * Called just after data structures for each port are
5161 * initialized. Allocates space for PRD table.
5162 *
5163 * May be used as the port_start() entry in ata_port_operations.
5164 *
5165 * LOCKING:
5166 * Inherited from caller.
5167 */
5168
5169int ata_port_start (struct ata_port *ap)
5170{
5171 struct device *dev = ap->dev;
5172 int rc;
5173
5174 ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
5175 if (!ap->prd)
5176 return -ENOMEM;
5177
5178 rc = ata_pad_alloc(ap, dev);
5179 if (rc) {
5180 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
5181 return rc;
5182 }
5183
5184 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
5185
5186 return 0;
5187}
5188
5189
5190/**
5191 * ata_port_stop - Undo ata_port_start()
5192 * @ap: Port to shut down
5193 *
5194 * Frees the PRD table.
5195 *
5196 * May be used as the port_stop() entry in ata_port_operations.
5197 *
5198 * LOCKING:
5199 * Inherited from caller.
5200 */
5201
5202void ata_port_stop (struct ata_port *ap)
5203{
5204 struct device *dev = ap->dev;
5205
5206 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
5207 ata_pad_free(ap, dev);
5208}
5209
5210void ata_host_stop (struct ata_host *host)
5211{
5212 if (host->mmio_base)
5213 iounmap(host->mmio_base);
5214}
5215
5216/**
5217 * ata_dev_init - Initialize an ata_device structure
5218 * @dev: Device structure to initialize
5219 *
5220 * Initialize @dev in preparation for probing.
5221 *
5222 * LOCKING:
5223 * Inherited from caller.
5224 */
5225void ata_dev_init(struct ata_device *dev)
5226{
5227 struct ata_port *ap = dev->ap;
5228 unsigned long flags;
5229
5230 /* SATA spd limit is bound to the first device */
5231 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5232
5233 /* High bits of dev->flags are used to record warm plug
5234 * requests which occur asynchronously. Synchronize using
5235 * host lock.
5236 */
5237 spin_lock_irqsave(ap->lock, flags);
5238 dev->flags &= ~ATA_DFLAG_INIT_MASK;
5239 spin_unlock_irqrestore(ap->lock, flags);
5240
5241 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
5242 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
5243 dev->pio_mask = UINT_MAX;
5244 dev->mwdma_mask = UINT_MAX;
5245 dev->udma_mask = UINT_MAX;
5246}
5247
5248/**
5249 * ata_port_init - Initialize an ata_port structure
5250 * @ap: Structure to initialize
5251 * @host: Collection of hosts to which @ap belongs
5252 * @ent: Probe information provided by low-level driver
5253 * @port_no: Port number associated with this ata_port
5254 *
5255 * Initialize a new ata_port structure.
5256 *
5257 * LOCKING:
5258 * Inherited from caller.
5259 */
5260void ata_port_init(struct ata_port *ap, struct ata_host *host,
5261 const struct ata_probe_ent *ent, unsigned int port_no)
5262{
5263 unsigned int i;
5264
5265 ap->lock = &host->lock;
5266 ap->flags = ATA_FLAG_DISABLED;
5267 ap->id = ata_unique_id++;
5268 ap->ctl = ATA_DEVCTL_OBS;
5269 ap->host = host;
5270 ap->dev = ent->dev;
5271 ap->port_no = port_no;
5272 ap->pio_mask = ent->pio_mask;
5273 ap->mwdma_mask = ent->mwdma_mask;
5274 ap->udma_mask = ent->udma_mask;
5275 ap->flags |= ent->port_flags;
5276 ap->ops = ent->port_ops;
5277 ap->hw_sata_spd_limit = UINT_MAX;
5278 ap->active_tag = ATA_TAG_POISON;
5279 ap->last_ctl = 0xFF;
5280
5281#if defined(ATA_VERBOSE_DEBUG)
5282 /* turn on all debugging levels */
5283 ap->msg_enable = 0x00FF;
5284#elif defined(ATA_DEBUG)
5285 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5286#else
5287 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5288#endif
5289
5290 INIT_WORK(&ap->port_task, NULL, NULL);
5291 INIT_WORK(&ap->hotplug_task, ata_scsi_hotplug, ap);
5292 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan, ap);
5293 INIT_LIST_HEAD(&ap->eh_done_q);
5294 init_waitqueue_head(&ap->eh_wait_q);
5295
5296 /* set cable type */
5297 ap->cbl = ATA_CBL_NONE;
5298 if (ap->flags & ATA_FLAG_SATA)
5299 ap->cbl = ATA_CBL_SATA;
5300
5301 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5302 struct ata_device *dev = &ap->device[i];
5303 dev->ap = ap;
5304 dev->devno = i;
5305 ata_dev_init(dev);
5306 }
5307
5308#ifdef ATA_IRQ_TRAP
5309 ap->stats.unhandled_irq = 1;
5310 ap->stats.idle_irq = 1;
5311#endif
5312
5313 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
5314}
5315
5316/**
5317 * ata_port_init_shost - Initialize SCSI host associated with ATA port
5318 * @ap: ATA port to initialize SCSI host for
5319 * @shost: SCSI host associated with @ap
5320 *
5321 * Initialize SCSI host @shost associated with ATA port @ap.
5322 *
5323 * LOCKING:
5324 * Inherited from caller.
5325 */
5326static void ata_port_init_shost(struct ata_port *ap, struct Scsi_Host *shost)
5327{
5328 ap->scsi_host = shost;
5329
5330 shost->unique_id = ap->id;
5331 shost->max_id = 16;
5332 shost->max_lun = 1;
5333 shost->max_channel = 1;
5334 shost->max_cmd_len = 12;
5335}
5336
5337/**
5338 * ata_port_add - Attach low-level ATA driver to system
5339 * @ent: Information provided by low-level driver
5340 * @host: Collections of ports to which we add
5341 * @port_no: Port number associated with this host
5342 *
5343 * Attach low-level ATA driver to system.
5344 *
5345 * LOCKING:
5346 * PCI/etc. bus probe sem.
5347 *
5348 * RETURNS:
5349 * New ata_port on success, for NULL on error.
5350 */
5351static struct ata_port * ata_port_add(const struct ata_probe_ent *ent,
5352 struct ata_host *host,
5353 unsigned int port_no)
5354{
5355 struct Scsi_Host *shost;
5356 struct ata_port *ap;
5357
5358 DPRINTK("ENTER\n");
5359
5360 if (!ent->port_ops->error_handler &&
5361 !(ent->port_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) {
5362 printk(KERN_ERR "ata%u: no reset mechanism available\n",
5363 port_no);
5364 return NULL;
5365 }
5366
5367 shost = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
5368 if (!shost)
5369 return NULL;
5370
5371 shost->transportt = &ata_scsi_transport_template;
5372
5373 ap = ata_shost_to_port(shost);
5374
5375 ata_port_init(ap, host, ent, port_no);
5376 ata_port_init_shost(ap, shost);
5377
5378 return ap;
5379}
5380
5381/**
5382 * ata_sas_host_init - Initialize a host struct
5383 * @host: host to initialize
5384 * @dev: device host is attached to
5385 * @flags: host flags
5386 * @ops: port_ops
5387 *
5388 * LOCKING:
5389 * PCI/etc. bus probe sem.
5390 *
5391 */
5392
5393void ata_host_init(struct ata_host *host, struct device *dev,
5394 unsigned long flags, const struct ata_port_operations *ops)
5395{
5396 spin_lock_init(&host->lock);
5397 host->dev = dev;
5398 host->flags = flags;
5399 host->ops = ops;
5400}
5401
5402/**
5403 * ata_device_add - Register hardware device with ATA and SCSI layers
5404 * @ent: Probe information describing hardware device to be registered
5405 *
5406 * This function processes the information provided in the probe
5407 * information struct @ent, allocates the necessary ATA and SCSI
5408 * host information structures, initializes them, and registers
5409 * everything with requisite kernel subsystems.
5410 *
5411 * This function requests irqs, probes the ATA bus, and probes
5412 * the SCSI bus.
5413 *
5414 * LOCKING:
5415 * PCI/etc. bus probe sem.
5416 *
5417 * RETURNS:
5418 * Number of ports registered. Zero on error (no ports registered).
5419 */
5420int ata_device_add(const struct ata_probe_ent *ent)
5421{
5422 unsigned int i;
5423 struct device *dev = ent->dev;
5424 struct ata_host *host;
5425 int rc;
5426
5427 DPRINTK("ENTER\n");
5428 /* alloc a container for our list of ATA ports (buses) */
5429 host = kzalloc(sizeof(struct ata_host) +
5430 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
5431 if (!host)
5432 return 0;
5433
5434 ata_host_init(host, dev, ent->_host_flags, ent->port_ops);
5435 host->n_ports = ent->n_ports;
5436 host->irq = ent->irq;
5437 host->irq2 = ent->irq2;
5438 host->mmio_base = ent->mmio_base;
5439 host->private_data = ent->private_data;
5440
5441 /* register each port bound to this device */
5442 for (i = 0; i < host->n_ports; i++) {
5443 struct ata_port *ap;
5444 unsigned long xfer_mode_mask;
5445 int irq_line = ent->irq;
5446
5447 ap = ata_port_add(ent, host, i);
5448 if (!ap)
5449 goto err_out;
5450
5451 host->ports[i] = ap;
5452
5453 /* dummy? */
5454 if (ent->dummy_port_mask & (1 << i)) {
5455 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
5456 ap->ops = &ata_dummy_port_ops;
5457 continue;
5458 }
5459
5460 /* start port */
5461 rc = ap->ops->port_start(ap);
5462 if (rc) {
5463 host->ports[i] = NULL;
5464 scsi_host_put(ap->scsi_host);
5465 goto err_out;
5466 }
5467
5468 /* Report the secondary IRQ for second channel legacy */
5469 if (i == 1 && ent->irq2)
5470 irq_line = ent->irq2;
5471
5472 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
5473 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
5474 (ap->pio_mask << ATA_SHIFT_PIO);
5475
5476 /* print per-port info to dmesg */
5477 ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%lX "
5478 "ctl 0x%lX bmdma 0x%lX irq %d\n",
5479 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
5480 ata_mode_string(xfer_mode_mask),
5481 ap->ioaddr.cmd_addr,
5482 ap->ioaddr.ctl_addr,
5483 ap->ioaddr.bmdma_addr,
5484 irq_line);
5485
5486 ata_chk_status(ap);
5487 host->ops->irq_clear(ap);
5488 ata_eh_freeze_port(ap); /* freeze port before requesting IRQ */
5489 }
5490
5491 /* obtain irq, that may be shared between channels */
5492 rc = request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
5493 DRV_NAME, host);
5494 if (rc) {
5495 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5496 ent->irq, rc);
5497 goto err_out;
5498 }
5499
5500 /* do we have a second IRQ for the other channel, eg legacy mode */
5501 if (ent->irq2) {
5502 /* We will get weird core code crashes later if this is true
5503 so trap it now */
5504 BUG_ON(ent->irq == ent->irq2);
5505
5506 rc = request_irq(ent->irq2, ent->port_ops->irq_handler, ent->irq_flags,
5507 DRV_NAME, host);
5508 if (rc) {
5509 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5510 ent->irq2, rc);
5511 goto err_out_free_irq;
5512 }
5513 }
5514
5515 /* perform each probe synchronously */
5516 DPRINTK("probe begin\n");
5517 for (i = 0; i < host->n_ports; i++) {
5518 struct ata_port *ap = host->ports[i];
5519 u32 scontrol;
5520 int rc;
5521
5522 /* init sata_spd_limit to the current value */
5523 if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
5524 int spd = (scontrol >> 4) & 0xf;
5525 ap->hw_sata_spd_limit &= (1 << spd) - 1;
5526 }
5527 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5528
5529 rc = scsi_add_host(ap->scsi_host, dev);
5530 if (rc) {
5531 ata_port_printk(ap, KERN_ERR, "scsi_add_host failed\n");
5532 /* FIXME: do something useful here */
5533 /* FIXME: handle unconditional calls to
5534 * scsi_scan_host and ata_host_remove, below,
5535 * at the very least
5536 */
5537 }
5538
5539 if (ap->ops->error_handler) {
5540 struct ata_eh_info *ehi = &ap->eh_info;
5541 unsigned long flags;
5542
5543 ata_port_probe(ap);
5544
5545 /* kick EH for boot probing */
5546 spin_lock_irqsave(ap->lock, flags);
5547
5548 ehi->probe_mask = (1 << ATA_MAX_DEVICES) - 1;
5549 ehi->action |= ATA_EH_SOFTRESET;
5550 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
5551
5552 ap->pflags |= ATA_PFLAG_LOADING;
5553 ata_port_schedule_eh(ap);
5554
5555 spin_unlock_irqrestore(ap->lock, flags);
5556
5557 /* wait for EH to finish */
5558 ata_port_wait_eh(ap);
5559 } else {
5560 DPRINTK("ata%u: bus probe begin\n", ap->id);
5561 rc = ata_bus_probe(ap);
5562 DPRINTK("ata%u: bus probe end\n", ap->id);
5563
5564 if (rc) {
5565 /* FIXME: do something useful here?
5566 * Current libata behavior will
5567 * tear down everything when
5568 * the module is removed
5569 * or the h/w is unplugged.
5570 */
5571 }
5572 }
5573 }
5574
5575 /* probes are done, now scan each port's disk(s) */
5576 DPRINTK("host probe begin\n");
5577 for (i = 0; i < host->n_ports; i++) {
5578 struct ata_port *ap = host->ports[i];
5579
5580 ata_scsi_scan_host(ap);
5581 }
5582
5583 dev_set_drvdata(dev, host);
5584
5585 VPRINTK("EXIT, returning %u\n", ent->n_ports);
5586 return ent->n_ports; /* success */
5587
5588err_out_free_irq:
5589 free_irq(ent->irq, host);
5590err_out:
5591 for (i = 0; i < host->n_ports; i++) {
5592 struct ata_port *ap = host->ports[i];
5593 if (ap) {
5594 ap->ops->port_stop(ap);
5595 scsi_host_put(ap->scsi_host);
5596 }
5597 }
5598
5599 kfree(host);
5600 VPRINTK("EXIT, returning 0\n");
5601 return 0;
5602}
5603
5604/**
5605 * ata_port_detach - Detach ATA port in prepration of device removal
5606 * @ap: ATA port to be detached
5607 *
5608 * Detach all ATA devices and the associated SCSI devices of @ap;
5609 * then, remove the associated SCSI host. @ap is guaranteed to
5610 * be quiescent on return from this function.
5611 *
5612 * LOCKING:
5613 * Kernel thread context (may sleep).
5614 */
5615void ata_port_detach(struct ata_port *ap)
5616{
5617 unsigned long flags;
5618 int i;
5619
5620 if (!ap->ops->error_handler)
5621 goto skip_eh;
5622
5623 /* tell EH we're leaving & flush EH */
5624 spin_lock_irqsave(ap->lock, flags);
5625 ap->pflags |= ATA_PFLAG_UNLOADING;
5626 spin_unlock_irqrestore(ap->lock, flags);
5627
5628 ata_port_wait_eh(ap);
5629
5630 /* EH is now guaranteed to see UNLOADING, so no new device
5631 * will be attached. Disable all existing devices.
5632 */
5633 spin_lock_irqsave(ap->lock, flags);
5634
5635 for (i = 0; i < ATA_MAX_DEVICES; i++)
5636 ata_dev_disable(&ap->device[i]);
5637
5638 spin_unlock_irqrestore(ap->lock, flags);
5639
5640 /* Final freeze & EH. All in-flight commands are aborted. EH
5641 * will be skipped and retrials will be terminated with bad
5642 * target.
5643 */
5644 spin_lock_irqsave(ap->lock, flags);
5645 ata_port_freeze(ap); /* won't be thawed */
5646 spin_unlock_irqrestore(ap->lock, flags);
5647
5648 ata_port_wait_eh(ap);
5649
5650 /* Flush hotplug task. The sequence is similar to
5651 * ata_port_flush_task().
5652 */
5653 flush_workqueue(ata_aux_wq);
5654 cancel_delayed_work(&ap->hotplug_task);
5655 flush_workqueue(ata_aux_wq);
5656
5657 skip_eh:
5658 /* remove the associated SCSI host */
5659 scsi_remove_host(ap->scsi_host);
5660}
5661
5662/**
5663 * ata_host_remove - PCI layer callback for device removal
5664 * @host: ATA host set that was removed
5665 *
5666 * Unregister all objects associated with this host set. Free those
5667 * objects.
5668 *
5669 * LOCKING:
5670 * Inherited from calling layer (may sleep).
5671 */
5672
5673void ata_host_remove(struct ata_host *host)
5674{
5675 unsigned int i;
5676
5677 for (i = 0; i < host->n_ports; i++)
5678 ata_port_detach(host->ports[i]);
5679
5680 free_irq(host->irq, host);
5681 if (host->irq2)
5682 free_irq(host->irq2, host);
5683
5684 for (i = 0; i < host->n_ports; i++) {
5685 struct ata_port *ap = host->ports[i];
5686
5687 ata_scsi_release(ap->scsi_host);
5688
5689 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
5690 struct ata_ioports *ioaddr = &ap->ioaddr;
5691
5692 /* FIXME: Add -ac IDE pci mods to remove these special cases */
5693 if (ioaddr->cmd_addr == ATA_PRIMARY_CMD)
5694 release_region(ATA_PRIMARY_CMD, 8);
5695 else if (ioaddr->cmd_addr == ATA_SECONDARY_CMD)
5696 release_region(ATA_SECONDARY_CMD, 8);
5697 }
5698
5699 scsi_host_put(ap->scsi_host);
5700 }
5701
5702 if (host->ops->host_stop)
5703 host->ops->host_stop(host);
5704
5705 kfree(host);
5706}
5707
5708/**
5709 * ata_scsi_release - SCSI layer callback hook for host unload
5710 * @host: libata host to be unloaded
5711 *
5712 * Performs all duties necessary to shut down a libata port...
5713 * Kill port kthread, disable port, and release resources.
5714 *
5715 * LOCKING:
5716 * Inherited from SCSI layer.
5717 *
5718 * RETURNS:
5719 * One.
5720 */
5721
5722int ata_scsi_release(struct Scsi_Host *shost)
5723{
5724 struct ata_port *ap = ata_shost_to_port(shost);
5725
5726 DPRINTK("ENTER\n");
5727
5728 ap->ops->port_disable(ap);
5729 ap->ops->port_stop(ap);
5730
5731 DPRINTK("EXIT\n");
5732 return 1;
5733}
5734
5735struct ata_probe_ent *
5736ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
5737{
5738 struct ata_probe_ent *probe_ent;
5739
5740 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
5741 if (!probe_ent) {
5742 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
5743 kobject_name(&(dev->kobj)));
5744 return NULL;
5745 }
5746
5747 INIT_LIST_HEAD(&probe_ent->node);
5748 probe_ent->dev = dev;
5749
5750 probe_ent->sht = port->sht;
5751 probe_ent->port_flags = port->flags;
5752 probe_ent->pio_mask = port->pio_mask;
5753 probe_ent->mwdma_mask = port->mwdma_mask;
5754 probe_ent->udma_mask = port->udma_mask;
5755 probe_ent->port_ops = port->port_ops;
5756
5757 return probe_ent;
5758}
5759
5760/**
5761 * ata_std_ports - initialize ioaddr with standard port offsets.
5762 * @ioaddr: IO address structure to be initialized
5763 *
5764 * Utility function which initializes data_addr, error_addr,
5765 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
5766 * device_addr, status_addr, and command_addr to standard offsets
5767 * relative to cmd_addr.
5768 *
5769 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
5770 */
5771
5772void ata_std_ports(struct ata_ioports *ioaddr)
5773{
5774 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
5775 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
5776 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
5777 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
5778 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
5779 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
5780 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
5781 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
5782 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
5783 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
5784}
5785
5786
5787#ifdef CONFIG_PCI
5788
5789void ata_pci_host_stop (struct ata_host *host)
5790{
5791 struct pci_dev *pdev = to_pci_dev(host->dev);
5792
5793 pci_iounmap(pdev, host->mmio_base);
5794}
5795
5796/**
5797 * ata_pci_remove_one - PCI layer callback for device removal
5798 * @pdev: PCI device that was removed
5799 *
5800 * PCI layer indicates to libata via this hook that
5801 * hot-unplug or module unload event has occurred.
5802 * Handle this by unregistering all objects associated
5803 * with this PCI device. Free those objects. Then finally
5804 * release PCI resources and disable device.
5805 *
5806 * LOCKING:
5807 * Inherited from PCI layer (may sleep).
5808 */
5809
5810void ata_pci_remove_one (struct pci_dev *pdev)
5811{
5812 struct device *dev = pci_dev_to_dev(pdev);
5813 struct ata_host *host = dev_get_drvdata(dev);
5814
5815 ata_host_remove(host);
5816
5817 pci_release_regions(pdev);
5818 pci_disable_device(pdev);
5819 dev_set_drvdata(dev, NULL);
5820}
5821
5822/* move to PCI subsystem */
5823int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
5824{
5825 unsigned long tmp = 0;
5826
5827 switch (bits->width) {
5828 case 1: {
5829 u8 tmp8 = 0;
5830 pci_read_config_byte(pdev, bits->reg, &tmp8);
5831 tmp = tmp8;
5832 break;
5833 }
5834 case 2: {
5835 u16 tmp16 = 0;
5836 pci_read_config_word(pdev, bits->reg, &tmp16);
5837 tmp = tmp16;
5838 break;
5839 }
5840 case 4: {
5841 u32 tmp32 = 0;
5842 pci_read_config_dword(pdev, bits->reg, &tmp32);
5843 tmp = tmp32;
5844 break;
5845 }
5846
5847 default:
5848 return -EINVAL;
5849 }
5850
5851 tmp &= bits->mask;
5852
5853 return (tmp == bits->val) ? 1 : 0;
5854}
5855
5856void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
5857{
5858 pci_save_state(pdev);
5859
5860 if (mesg.event == PM_EVENT_SUSPEND) {
5861 pci_disable_device(pdev);
5862 pci_set_power_state(pdev, PCI_D3hot);
5863 }
5864}
5865
5866void ata_pci_device_do_resume(struct pci_dev *pdev)
5867{
5868 pci_set_power_state(pdev, PCI_D0);
5869 pci_restore_state(pdev);
5870 pci_enable_device(pdev);
5871 pci_set_master(pdev);
5872}
5873
5874int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
5875{
5876 struct ata_host *host = dev_get_drvdata(&pdev->dev);
5877 int rc = 0;
5878
5879 rc = ata_host_suspend(host, mesg);
5880 if (rc)
5881 return rc;
5882
5883 ata_pci_device_do_suspend(pdev, mesg);
5884
5885 return 0;
5886}
5887
5888int ata_pci_device_resume(struct pci_dev *pdev)
5889{
5890 struct ata_host *host = dev_get_drvdata(&pdev->dev);
5891
5892 ata_pci_device_do_resume(pdev);
5893 ata_host_resume(host);
5894 return 0;
5895}
5896#endif /* CONFIG_PCI */
5897
5898
5899static int __init ata_init(void)
5900{
5901 ata_probe_timeout *= HZ;
5902 ata_wq = create_workqueue("ata");
5903 if (!ata_wq)
5904 return -ENOMEM;
5905
5906 ata_aux_wq = create_singlethread_workqueue("ata_aux");
5907 if (!ata_aux_wq) {
5908 destroy_workqueue(ata_wq);
5909 return -ENOMEM;
5910 }
5911
5912 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
5913 return 0;
5914}
5915
5916static void __exit ata_exit(void)
5917{
5918 destroy_workqueue(ata_wq);
5919 destroy_workqueue(ata_aux_wq);
5920}
5921
5922module_init(ata_init);
5923module_exit(ata_exit);
5924
5925static unsigned long ratelimit_time;
5926static DEFINE_SPINLOCK(ata_ratelimit_lock);
5927
5928int ata_ratelimit(void)
5929{
5930 int rc;
5931 unsigned long flags;
5932
5933 spin_lock_irqsave(&ata_ratelimit_lock, flags);
5934
5935 if (time_after(jiffies, ratelimit_time)) {
5936 rc = 1;
5937 ratelimit_time = jiffies + (HZ/5);
5938 } else
5939 rc = 0;
5940
5941 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
5942
5943 return rc;
5944}
5945
5946/**
5947 * ata_wait_register - wait until register value changes
5948 * @reg: IO-mapped register
5949 * @mask: Mask to apply to read register value
5950 * @val: Wait condition
5951 * @interval_msec: polling interval in milliseconds
5952 * @timeout_msec: timeout in milliseconds
5953 *
5954 * Waiting for some bits of register to change is a common
5955 * operation for ATA controllers. This function reads 32bit LE
5956 * IO-mapped register @reg and tests for the following condition.
5957 *
5958 * (*@reg & mask) != val
5959 *
5960 * If the condition is met, it returns; otherwise, the process is
5961 * repeated after @interval_msec until timeout.
5962 *
5963 * LOCKING:
5964 * Kernel thread context (may sleep)
5965 *
5966 * RETURNS:
5967 * The final register value.
5968 */
5969u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
5970 unsigned long interval_msec,
5971 unsigned long timeout_msec)
5972{
5973 unsigned long timeout;
5974 u32 tmp;
5975
5976 tmp = ioread32(reg);
5977
5978 /* Calculate timeout _after_ the first read to make sure
5979 * preceding writes reach the controller before starting to
5980 * eat away the timeout.
5981 */
5982 timeout = jiffies + (timeout_msec * HZ) / 1000;
5983
5984 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
5985 msleep(interval_msec);
5986 tmp = ioread32(reg);
5987 }
5988
5989 return tmp;
5990}
5991
5992/*
5993 * Dummy port_ops
5994 */
5995static void ata_dummy_noret(struct ata_port *ap) { }
5996static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
5997static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
5998
5999static u8 ata_dummy_check_status(struct ata_port *ap)
6000{
6001 return ATA_DRDY;
6002}
6003
6004static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6005{
6006 return AC_ERR_SYSTEM;
6007}
6008
6009const struct ata_port_operations ata_dummy_port_ops = {
6010 .port_disable = ata_port_disable,
6011 .check_status = ata_dummy_check_status,
6012 .check_altstatus = ata_dummy_check_status,
6013 .dev_select = ata_noop_dev_select,
6014 .qc_prep = ata_noop_qc_prep,
6015 .qc_issue = ata_dummy_qc_issue,
6016 .freeze = ata_dummy_noret,
6017 .thaw = ata_dummy_noret,
6018 .error_handler = ata_dummy_noret,
6019 .post_internal_cmd = ata_dummy_qc_noret,
6020 .irq_clear = ata_dummy_noret,
6021 .port_start = ata_dummy_ret0,
6022 .port_stop = ata_dummy_noret,
6023};
6024
6025/*
6026 * libata is essentially a library of internal helper functions for
6027 * low-level ATA host controller drivers. As such, the API/ABI is
6028 * likely to change as new drivers are added and updated.
6029 * Do not depend on ABI/API stability.
6030 */
6031
6032EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6033EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6034EXPORT_SYMBOL_GPL(sata_deb_timing_long);
6035EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
6036EXPORT_SYMBOL_GPL(ata_std_bios_param);
6037EXPORT_SYMBOL_GPL(ata_std_ports);
6038EXPORT_SYMBOL_GPL(ata_host_init);
6039EXPORT_SYMBOL_GPL(ata_device_add);
6040EXPORT_SYMBOL_GPL(ata_port_detach);
6041EXPORT_SYMBOL_GPL(ata_host_remove);
6042EXPORT_SYMBOL_GPL(ata_sg_init);
6043EXPORT_SYMBOL_GPL(ata_sg_init_one);
6044EXPORT_SYMBOL_GPL(ata_hsm_move);
6045EXPORT_SYMBOL_GPL(ata_qc_complete);
6046EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
6047EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
6048EXPORT_SYMBOL_GPL(ata_tf_load);
6049EXPORT_SYMBOL_GPL(ata_tf_read);
6050EXPORT_SYMBOL_GPL(ata_noop_dev_select);
6051EXPORT_SYMBOL_GPL(ata_std_dev_select);
6052EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6053EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6054EXPORT_SYMBOL_GPL(ata_check_status);
6055EXPORT_SYMBOL_GPL(ata_altstatus);
6056EXPORT_SYMBOL_GPL(ata_exec_command);
6057EXPORT_SYMBOL_GPL(ata_port_start);
6058EXPORT_SYMBOL_GPL(ata_port_stop);
6059EXPORT_SYMBOL_GPL(ata_host_stop);
6060EXPORT_SYMBOL_GPL(ata_interrupt);
6061EXPORT_SYMBOL_GPL(ata_mmio_data_xfer);
6062EXPORT_SYMBOL_GPL(ata_pio_data_xfer);
6063EXPORT_SYMBOL_GPL(ata_pio_data_xfer_noirq);
6064EXPORT_SYMBOL_GPL(ata_qc_prep);
6065EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
6066EXPORT_SYMBOL_GPL(ata_bmdma_setup);
6067EXPORT_SYMBOL_GPL(ata_bmdma_start);
6068EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
6069EXPORT_SYMBOL_GPL(ata_bmdma_status);
6070EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6071EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
6072EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
6073EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
6074EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
6075EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
6076EXPORT_SYMBOL_GPL(ata_port_probe);
6077EXPORT_SYMBOL_GPL(sata_set_spd);
6078EXPORT_SYMBOL_GPL(sata_phy_debounce);
6079EXPORT_SYMBOL_GPL(sata_phy_resume);
6080EXPORT_SYMBOL_GPL(sata_phy_reset);
6081EXPORT_SYMBOL_GPL(__sata_phy_reset);
6082EXPORT_SYMBOL_GPL(ata_bus_reset);
6083EXPORT_SYMBOL_GPL(ata_std_prereset);
6084EXPORT_SYMBOL_GPL(ata_std_softreset);
6085EXPORT_SYMBOL_GPL(sata_std_hardreset);
6086EXPORT_SYMBOL_GPL(ata_std_postreset);
6087EXPORT_SYMBOL_GPL(ata_dev_revalidate);
6088EXPORT_SYMBOL_GPL(ata_dev_classify);
6089EXPORT_SYMBOL_GPL(ata_dev_pair);
6090EXPORT_SYMBOL_GPL(ata_port_disable);
6091EXPORT_SYMBOL_GPL(ata_ratelimit);
6092EXPORT_SYMBOL_GPL(ata_wait_register);
6093EXPORT_SYMBOL_GPL(ata_busy_sleep);
6094EXPORT_SYMBOL_GPL(ata_port_queue_task);
6095EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
6096EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
6097EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
6098EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
6099EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
6100EXPORT_SYMBOL_GPL(ata_scsi_release);
6101EXPORT_SYMBOL_GPL(ata_host_intr);
6102EXPORT_SYMBOL_GPL(sata_scr_valid);
6103EXPORT_SYMBOL_GPL(sata_scr_read);
6104EXPORT_SYMBOL_GPL(sata_scr_write);
6105EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6106EXPORT_SYMBOL_GPL(ata_port_online);
6107EXPORT_SYMBOL_GPL(ata_port_offline);
6108EXPORT_SYMBOL_GPL(ata_host_suspend);
6109EXPORT_SYMBOL_GPL(ata_host_resume);
6110EXPORT_SYMBOL_GPL(ata_id_string);
6111EXPORT_SYMBOL_GPL(ata_id_c_string);
6112EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6113
6114EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6115EXPORT_SYMBOL_GPL(ata_timing_compute);
6116EXPORT_SYMBOL_GPL(ata_timing_merge);
6117
6118#ifdef CONFIG_PCI
6119EXPORT_SYMBOL_GPL(pci_test_config_bits);
6120EXPORT_SYMBOL_GPL(ata_pci_host_stop);
6121EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
6122EXPORT_SYMBOL_GPL(ata_pci_init_one);
6123EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6124EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6125EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6126EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6127EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6128EXPORT_SYMBOL_GPL(ata_pci_default_filter);
6129EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
6130#endif /* CONFIG_PCI */
6131
6132EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
6133EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
6134
6135EXPORT_SYMBOL_GPL(ata_eng_timeout);
6136EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6137EXPORT_SYMBOL_GPL(ata_port_abort);
6138EXPORT_SYMBOL_GPL(ata_port_freeze);
6139EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6140EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
6141EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6142EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
6143EXPORT_SYMBOL_GPL(ata_do_eh);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
new file mode 100644
index 000000000000..b1b510493c2d
--- /dev/null
+++ b/drivers/ata/libata-eh.c
@@ -0,0 +1,2246 @@
1/*
2 * libata-eh.c - libata error handling
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2006 Tejun Heo <htejun@gmail.com>
9 *
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; either version 2, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
24 * USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 */
34
35#include <linux/config.h>
36#include <linux/kernel.h>
37#include <scsi/scsi.h>
38#include <scsi/scsi_host.h>
39#include <scsi/scsi_eh.h>
40#include <scsi/scsi_device.h>
41#include <scsi/scsi_cmnd.h>
42#include "../scsi/scsi_transport_api.h"
43
44#include <linux/libata.h>
45
46#include "libata.h"
47
48static void __ata_port_freeze(struct ata_port *ap);
49static void ata_eh_finish(struct ata_port *ap);
50static void ata_eh_handle_port_suspend(struct ata_port *ap);
51static void ata_eh_handle_port_resume(struct ata_port *ap);
52
53static void ata_ering_record(struct ata_ering *ering, int is_io,
54 unsigned int err_mask)
55{
56 struct ata_ering_entry *ent;
57
58 WARN_ON(!err_mask);
59
60 ering->cursor++;
61 ering->cursor %= ATA_ERING_SIZE;
62
63 ent = &ering->ring[ering->cursor];
64 ent->is_io = is_io;
65 ent->err_mask = err_mask;
66 ent->timestamp = get_jiffies_64();
67}
68
69static struct ata_ering_entry * ata_ering_top(struct ata_ering *ering)
70{
71 struct ata_ering_entry *ent = &ering->ring[ering->cursor];
72 if (!ent->err_mask)
73 return NULL;
74 return ent;
75}
76
77static int ata_ering_map(struct ata_ering *ering,
78 int (*map_fn)(struct ata_ering_entry *, void *),
79 void *arg)
80{
81 int idx, rc = 0;
82 struct ata_ering_entry *ent;
83
84 idx = ering->cursor;
85 do {
86 ent = &ering->ring[idx];
87 if (!ent->err_mask)
88 break;
89 rc = map_fn(ent, arg);
90 if (rc)
91 break;
92 idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE;
93 } while (idx != ering->cursor);
94
95 return rc;
96}
97
98static unsigned int ata_eh_dev_action(struct ata_device *dev)
99{
100 struct ata_eh_context *ehc = &dev->ap->eh_context;
101
102 return ehc->i.action | ehc->i.dev_action[dev->devno];
103}
104
105static void ata_eh_clear_action(struct ata_device *dev,
106 struct ata_eh_info *ehi, unsigned int action)
107{
108 int i;
109
110 if (!dev) {
111 ehi->action &= ~action;
112 for (i = 0; i < ATA_MAX_DEVICES; i++)
113 ehi->dev_action[i] &= ~action;
114 } else {
115 /* doesn't make sense for port-wide EH actions */
116 WARN_ON(!(action & ATA_EH_PERDEV_MASK));
117
118 /* break ehi->action into ehi->dev_action */
119 if (ehi->action & action) {
120 for (i = 0; i < ATA_MAX_DEVICES; i++)
121 ehi->dev_action[i] |= ehi->action & action;
122 ehi->action &= ~action;
123 }
124
125 /* turn off the specified per-dev action */
126 ehi->dev_action[dev->devno] &= ~action;
127 }
128}
129
130/**
131 * ata_scsi_timed_out - SCSI layer time out callback
132 * @cmd: timed out SCSI command
133 *
134 * Handles SCSI layer timeout. We race with normal completion of
135 * the qc for @cmd. If the qc is already gone, we lose and let
136 * the scsi command finish (EH_HANDLED). Otherwise, the qc has
137 * timed out and EH should be invoked. Prevent ata_qc_complete()
138 * from finishing it by setting EH_SCHEDULED and return
139 * EH_NOT_HANDLED.
140 *
141 * TODO: kill this function once old EH is gone.
142 *
143 * LOCKING:
144 * Called from timer context
145 *
146 * RETURNS:
147 * EH_HANDLED or EH_NOT_HANDLED
148 */
149enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
150{
151 struct Scsi_Host *host = cmd->device->host;
152 struct ata_port *ap = ata_shost_to_port(host);
153 unsigned long flags;
154 struct ata_queued_cmd *qc;
155 enum scsi_eh_timer_return ret;
156
157 DPRINTK("ENTER\n");
158
159 if (ap->ops->error_handler) {
160 ret = EH_NOT_HANDLED;
161 goto out;
162 }
163
164 ret = EH_HANDLED;
165 spin_lock_irqsave(ap->lock, flags);
166 qc = ata_qc_from_tag(ap, ap->active_tag);
167 if (qc) {
168 WARN_ON(qc->scsicmd != cmd);
169 qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
170 qc->err_mask |= AC_ERR_TIMEOUT;
171 ret = EH_NOT_HANDLED;
172 }
173 spin_unlock_irqrestore(ap->lock, flags);
174
175 out:
176 DPRINTK("EXIT, ret=%d\n", ret);
177 return ret;
178}
179
180/**
181 * ata_scsi_error - SCSI layer error handler callback
182 * @host: SCSI host on which error occurred
183 *
184 * Handles SCSI-layer-thrown error events.
185 *
186 * LOCKING:
187 * Inherited from SCSI layer (none, can sleep)
188 *
189 * RETURNS:
190 * Zero.
191 */
192void ata_scsi_error(struct Scsi_Host *host)
193{
194 struct ata_port *ap = ata_shost_to_port(host);
195 int i, repeat_cnt = ATA_EH_MAX_REPEAT;
196 unsigned long flags;
197
198 DPRINTK("ENTER\n");
199
200 /* synchronize with port task */
201 ata_port_flush_task(ap);
202
203 /* synchronize with host lock and sort out timeouts */
204
205 /* For new EH, all qcs are finished in one of three ways -
206 * normal completion, error completion, and SCSI timeout.
207 * Both cmpletions can race against SCSI timeout. When normal
208 * completion wins, the qc never reaches EH. When error
209 * completion wins, the qc has ATA_QCFLAG_FAILED set.
210 *
211 * When SCSI timeout wins, things are a bit more complex.
212 * Normal or error completion can occur after the timeout but
213 * before this point. In such cases, both types of
214 * completions are honored. A scmd is determined to have
215 * timed out iff its associated qc is active and not failed.
216 */
217 if (ap->ops->error_handler) {
218 struct scsi_cmnd *scmd, *tmp;
219 int nr_timedout = 0;
220
221 spin_lock_irqsave(ap->lock, flags);
222
223 list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) {
224 struct ata_queued_cmd *qc;
225
226 for (i = 0; i < ATA_MAX_QUEUE; i++) {
227 qc = __ata_qc_from_tag(ap, i);
228 if (qc->flags & ATA_QCFLAG_ACTIVE &&
229 qc->scsicmd == scmd)
230 break;
231 }
232
233 if (i < ATA_MAX_QUEUE) {
234 /* the scmd has an associated qc */
235 if (!(qc->flags & ATA_QCFLAG_FAILED)) {
236 /* which hasn't failed yet, timeout */
237 qc->err_mask |= AC_ERR_TIMEOUT;
238 qc->flags |= ATA_QCFLAG_FAILED;
239 nr_timedout++;
240 }
241 } else {
242 /* Normal completion occurred after
243 * SCSI timeout but before this point.
244 * Successfully complete it.
245 */
246 scmd->retries = scmd->allowed;
247 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
248 }
249 }
250
251 /* If we have timed out qcs. They belong to EH from
252 * this point but the state of the controller is
253 * unknown. Freeze the port to make sure the IRQ
254 * handler doesn't diddle with those qcs. This must
255 * be done atomically w.r.t. setting QCFLAG_FAILED.
256 */
257 if (nr_timedout)
258 __ata_port_freeze(ap);
259
260 spin_unlock_irqrestore(ap->lock, flags);
261 } else
262 spin_unlock_wait(ap->lock);
263
264 repeat:
265 /* invoke error handler */
266 if (ap->ops->error_handler) {
267 /* process port resume request */
268 ata_eh_handle_port_resume(ap);
269
270 /* fetch & clear EH info */
271 spin_lock_irqsave(ap->lock, flags);
272
273 memset(&ap->eh_context, 0, sizeof(ap->eh_context));
274 ap->eh_context.i = ap->eh_info;
275 memset(&ap->eh_info, 0, sizeof(ap->eh_info));
276
277 ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS;
278 ap->pflags &= ~ATA_PFLAG_EH_PENDING;
279
280 spin_unlock_irqrestore(ap->lock, flags);
281
282 /* invoke EH, skip if unloading or suspended */
283 if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED)))
284 ap->ops->error_handler(ap);
285 else
286 ata_eh_finish(ap);
287
288 /* process port suspend request */
289 ata_eh_handle_port_suspend(ap);
290
291 /* Exception might have happend after ->error_handler
292 * recovered the port but before this point. Repeat
293 * EH in such case.
294 */
295 spin_lock_irqsave(ap->lock, flags);
296
297 if (ap->pflags & ATA_PFLAG_EH_PENDING) {
298 if (--repeat_cnt) {
299 ata_port_printk(ap, KERN_INFO,
300 "EH pending after completion, "
301 "repeating EH (cnt=%d)\n", repeat_cnt);
302 spin_unlock_irqrestore(ap->lock, flags);
303 goto repeat;
304 }
305 ata_port_printk(ap, KERN_ERR, "EH pending after %d "
306 "tries, giving up\n", ATA_EH_MAX_REPEAT);
307 }
308
309 /* this run is complete, make sure EH info is clear */
310 memset(&ap->eh_info, 0, sizeof(ap->eh_info));
311
312 /* Clear host_eh_scheduled while holding ap->lock such
313 * that if exception occurs after this point but
314 * before EH completion, SCSI midlayer will
315 * re-initiate EH.
316 */
317 host->host_eh_scheduled = 0;
318
319 spin_unlock_irqrestore(ap->lock, flags);
320 } else {
321 WARN_ON(ata_qc_from_tag(ap, ap->active_tag) == NULL);
322 ap->ops->eng_timeout(ap);
323 }
324
325 /* finish or retry handled scmd's and clean up */
326 WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q));
327
328 scsi_eh_flush_done_q(&ap->eh_done_q);
329
330 /* clean up */
331 spin_lock_irqsave(ap->lock, flags);
332
333 if (ap->pflags & ATA_PFLAG_LOADING)
334 ap->pflags &= ~ATA_PFLAG_LOADING;
335 else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG)
336 queue_work(ata_aux_wq, &ap->hotplug_task);
337
338 if (ap->pflags & ATA_PFLAG_RECOVERED)
339 ata_port_printk(ap, KERN_INFO, "EH complete\n");
340
341 ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED);
342
343 /* tell wait_eh that we're done */
344 ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS;
345 wake_up_all(&ap->eh_wait_q);
346
347 spin_unlock_irqrestore(ap->lock, flags);
348
349 DPRINTK("EXIT\n");
350}
351
352/**
353 * ata_port_wait_eh - Wait for the currently pending EH to complete
354 * @ap: Port to wait EH for
355 *
356 * Wait until the currently pending EH is complete.
357 *
358 * LOCKING:
359 * Kernel thread context (may sleep).
360 */
361void ata_port_wait_eh(struct ata_port *ap)
362{
363 unsigned long flags;
364 DEFINE_WAIT(wait);
365
366 retry:
367 spin_lock_irqsave(ap->lock, flags);
368
369 while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) {
370 prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
371 spin_unlock_irqrestore(ap->lock, flags);
372 schedule();
373 spin_lock_irqsave(ap->lock, flags);
374 }
375 finish_wait(&ap->eh_wait_q, &wait);
376
377 spin_unlock_irqrestore(ap->lock, flags);
378
379 /* make sure SCSI EH is complete */
380 if (scsi_host_in_recovery(ap->scsi_host)) {
381 msleep(10);
382 goto retry;
383 }
384}
385
386/**
387 * ata_qc_timeout - Handle timeout of queued command
388 * @qc: Command that timed out
389 *
390 * Some part of the kernel (currently, only the SCSI layer)
391 * has noticed that the active command on port @ap has not
392 * completed after a specified length of time. Handle this
393 * condition by disabling DMA (if necessary) and completing
394 * transactions, with error if necessary.
395 *
396 * This also handles the case of the "lost interrupt", where
397 * for some reason (possibly hardware bug, possibly driver bug)
398 * an interrupt was not delivered to the driver, even though the
399 * transaction completed successfully.
400 *
401 * TODO: kill this function once old EH is gone.
402 *
403 * LOCKING:
404 * Inherited from SCSI layer (none, can sleep)
405 */
406static void ata_qc_timeout(struct ata_queued_cmd *qc)
407{
408 struct ata_port *ap = qc->ap;
409 u8 host_stat = 0, drv_stat;
410 unsigned long flags;
411
412 DPRINTK("ENTER\n");
413
414 ap->hsm_task_state = HSM_ST_IDLE;
415
416 spin_lock_irqsave(ap->lock, flags);
417
418 switch (qc->tf.protocol) {
419
420 case ATA_PROT_DMA:
421 case ATA_PROT_ATAPI_DMA:
422 host_stat = ap->ops->bmdma_status(ap);
423
424 /* before we do anything else, clear DMA-Start bit */
425 ap->ops->bmdma_stop(qc);
426
427 /* fall through */
428
429 default:
430 ata_altstatus(ap);
431 drv_stat = ata_chk_status(ap);
432
433 /* ack bmdma irq events */
434 ap->ops->irq_clear(ap);
435
436 ata_dev_printk(qc->dev, KERN_ERR, "command 0x%x timeout, "
437 "stat 0x%x host_stat 0x%x\n",
438 qc->tf.command, drv_stat, host_stat);
439
440 /* complete taskfile transaction */
441 qc->err_mask |= AC_ERR_TIMEOUT;
442 break;
443 }
444
445 spin_unlock_irqrestore(ap->lock, flags);
446
447 ata_eh_qc_complete(qc);
448
449 DPRINTK("EXIT\n");
450}
451
452/**
453 * ata_eng_timeout - Handle timeout of queued command
454 * @ap: Port on which timed-out command is active
455 *
456 * Some part of the kernel (currently, only the SCSI layer)
457 * has noticed that the active command on port @ap has not
458 * completed after a specified length of time. Handle this
459 * condition by disabling DMA (if necessary) and completing
460 * transactions, with error if necessary.
461 *
462 * This also handles the case of the "lost interrupt", where
463 * for some reason (possibly hardware bug, possibly driver bug)
464 * an interrupt was not delivered to the driver, even though the
465 * transaction completed successfully.
466 *
467 * TODO: kill this function once old EH is gone.
468 *
469 * LOCKING:
470 * Inherited from SCSI layer (none, can sleep)
471 */
472void ata_eng_timeout(struct ata_port *ap)
473{
474 DPRINTK("ENTER\n");
475
476 ata_qc_timeout(ata_qc_from_tag(ap, ap->active_tag));
477
478 DPRINTK("EXIT\n");
479}
480
481/**
482 * ata_qc_schedule_eh - schedule qc for error handling
483 * @qc: command to schedule error handling for
484 *
485 * Schedule error handling for @qc. EH will kick in as soon as
486 * other commands are drained.
487 *
488 * LOCKING:
489 * spin_lock_irqsave(host lock)
490 */
491void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
492{
493 struct ata_port *ap = qc->ap;
494
495 WARN_ON(!ap->ops->error_handler);
496
497 qc->flags |= ATA_QCFLAG_FAILED;
498 qc->ap->pflags |= ATA_PFLAG_EH_PENDING;
499
500 /* The following will fail if timeout has already expired.
501 * ata_scsi_error() takes care of such scmds on EH entry.
502 * Note that ATA_QCFLAG_FAILED is unconditionally set after
503 * this function completes.
504 */
505 scsi_req_abort_cmd(qc->scsicmd);
506}
507
508/**
509 * ata_port_schedule_eh - schedule error handling without a qc
510 * @ap: ATA port to schedule EH for
511 *
512 * Schedule error handling for @ap. EH will kick in as soon as
513 * all commands are drained.
514 *
515 * LOCKING:
516 * spin_lock_irqsave(host lock)
517 */
518void ata_port_schedule_eh(struct ata_port *ap)
519{
520 WARN_ON(!ap->ops->error_handler);
521
522 ap->pflags |= ATA_PFLAG_EH_PENDING;
523 scsi_schedule_eh(ap->scsi_host);
524
525 DPRINTK("port EH scheduled\n");
526}
527
528/**
529 * ata_port_abort - abort all qc's on the port
530 * @ap: ATA port to abort qc's for
531 *
532 * Abort all active qc's of @ap and schedule EH.
533 *
534 * LOCKING:
535 * spin_lock_irqsave(host lock)
536 *
537 * RETURNS:
538 * Number of aborted qc's.
539 */
540int ata_port_abort(struct ata_port *ap)
541{
542 int tag, nr_aborted = 0;
543
544 WARN_ON(!ap->ops->error_handler);
545
546 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
547 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
548
549 if (qc) {
550 qc->flags |= ATA_QCFLAG_FAILED;
551 ata_qc_complete(qc);
552 nr_aborted++;
553 }
554 }
555
556 if (!nr_aborted)
557 ata_port_schedule_eh(ap);
558
559 return nr_aborted;
560}
561
562/**
563 * __ata_port_freeze - freeze port
564 * @ap: ATA port to freeze
565 *
566 * This function is called when HSM violation or some other
567 * condition disrupts normal operation of the port. Frozen port
568 * is not allowed to perform any operation until the port is
569 * thawed, which usually follows a successful reset.
570 *
571 * ap->ops->freeze() callback can be used for freezing the port
572 * hardware-wise (e.g. mask interrupt and stop DMA engine). If a
573 * port cannot be frozen hardware-wise, the interrupt handler
574 * must ack and clear interrupts unconditionally while the port
575 * is frozen.
576 *
577 * LOCKING:
578 * spin_lock_irqsave(host lock)
579 */
580static void __ata_port_freeze(struct ata_port *ap)
581{
582 WARN_ON(!ap->ops->error_handler);
583
584 if (ap->ops->freeze)
585 ap->ops->freeze(ap);
586
587 ap->pflags |= ATA_PFLAG_FROZEN;
588
589 DPRINTK("ata%u port frozen\n", ap->id);
590}
591
592/**
593 * ata_port_freeze - abort & freeze port
594 * @ap: ATA port to freeze
595 *
596 * Abort and freeze @ap.
597 *
598 * LOCKING:
599 * spin_lock_irqsave(host lock)
600 *
601 * RETURNS:
602 * Number of aborted commands.
603 */
604int ata_port_freeze(struct ata_port *ap)
605{
606 int nr_aborted;
607
608 WARN_ON(!ap->ops->error_handler);
609
610 nr_aborted = ata_port_abort(ap);
611 __ata_port_freeze(ap);
612
613 return nr_aborted;
614}
615
616/**
617 * ata_eh_freeze_port - EH helper to freeze port
618 * @ap: ATA port to freeze
619 *
620 * Freeze @ap.
621 *
622 * LOCKING:
623 * None.
624 */
625void ata_eh_freeze_port(struct ata_port *ap)
626{
627 unsigned long flags;
628
629 if (!ap->ops->error_handler)
630 return;
631
632 spin_lock_irqsave(ap->lock, flags);
633 __ata_port_freeze(ap);
634 spin_unlock_irqrestore(ap->lock, flags);
635}
636
637/**
638 * ata_port_thaw_port - EH helper to thaw port
639 * @ap: ATA port to thaw
640 *
641 * Thaw frozen port @ap.
642 *
643 * LOCKING:
644 * None.
645 */
646void ata_eh_thaw_port(struct ata_port *ap)
647{
648 unsigned long flags;
649
650 if (!ap->ops->error_handler)
651 return;
652
653 spin_lock_irqsave(ap->lock, flags);
654
655 ap->pflags &= ~ATA_PFLAG_FROZEN;
656
657 if (ap->ops->thaw)
658 ap->ops->thaw(ap);
659
660 spin_unlock_irqrestore(ap->lock, flags);
661
662 DPRINTK("ata%u port thawed\n", ap->id);
663}
664
665static void ata_eh_scsidone(struct scsi_cmnd *scmd)
666{
667 /* nada */
668}
669
670static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
671{
672 struct ata_port *ap = qc->ap;
673 struct scsi_cmnd *scmd = qc->scsicmd;
674 unsigned long flags;
675
676 spin_lock_irqsave(ap->lock, flags);
677 qc->scsidone = ata_eh_scsidone;
678 __ata_qc_complete(qc);
679 WARN_ON(ata_tag_valid(qc->tag));
680 spin_unlock_irqrestore(ap->lock, flags);
681
682 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
683}
684
685/**
686 * ata_eh_qc_complete - Complete an active ATA command from EH
687 * @qc: Command to complete
688 *
689 * Indicate to the mid and upper layers that an ATA command has
690 * completed. To be used from EH.
691 */
692void ata_eh_qc_complete(struct ata_queued_cmd *qc)
693{
694 struct scsi_cmnd *scmd = qc->scsicmd;
695 scmd->retries = scmd->allowed;
696 __ata_eh_qc_complete(qc);
697}
698
699/**
700 * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
701 * @qc: Command to retry
702 *
703 * Indicate to the mid and upper layers that an ATA command
704 * should be retried. To be used from EH.
705 *
706 * SCSI midlayer limits the number of retries to scmd->allowed.
707 * scmd->retries is decremented for commands which get retried
708 * due to unrelated failures (qc->err_mask is zero).
709 */
710void ata_eh_qc_retry(struct ata_queued_cmd *qc)
711{
712 struct scsi_cmnd *scmd = qc->scsicmd;
713 if (!qc->err_mask && scmd->retries)
714 scmd->retries--;
715 __ata_eh_qc_complete(qc);
716}
717
718/**
719 * ata_eh_detach_dev - detach ATA device
720 * @dev: ATA device to detach
721 *
722 * Detach @dev.
723 *
724 * LOCKING:
725 * None.
726 */
727static void ata_eh_detach_dev(struct ata_device *dev)
728{
729 struct ata_port *ap = dev->ap;
730 unsigned long flags;
731
732 ata_dev_disable(dev);
733
734 spin_lock_irqsave(ap->lock, flags);
735
736 dev->flags &= ~ATA_DFLAG_DETACH;
737
738 if (ata_scsi_offline_dev(dev)) {
739 dev->flags |= ATA_DFLAG_DETACHED;
740 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
741 }
742
743 /* clear per-dev EH actions */
744 ata_eh_clear_action(dev, &ap->eh_info, ATA_EH_PERDEV_MASK);
745 ata_eh_clear_action(dev, &ap->eh_context.i, ATA_EH_PERDEV_MASK);
746
747 spin_unlock_irqrestore(ap->lock, flags);
748}
749
750/**
751 * ata_eh_about_to_do - about to perform eh_action
752 * @ap: target ATA port
753 * @dev: target ATA dev for per-dev action (can be NULL)
754 * @action: action about to be performed
755 *
756 * Called just before performing EH actions to clear related bits
757 * in @ap->eh_info such that eh actions are not unnecessarily
758 * repeated.
759 *
760 * LOCKING:
761 * None.
762 */
763static void ata_eh_about_to_do(struct ata_port *ap, struct ata_device *dev,
764 unsigned int action)
765{
766 unsigned long flags;
767 struct ata_eh_info *ehi = &ap->eh_info;
768 struct ata_eh_context *ehc = &ap->eh_context;
769
770 spin_lock_irqsave(ap->lock, flags);
771
772 /* Reset is represented by combination of actions and EHI
773 * flags. Suck in all related bits before clearing eh_info to
774 * avoid losing requested action.
775 */
776 if (action & ATA_EH_RESET_MASK) {
777 ehc->i.action |= ehi->action & ATA_EH_RESET_MASK;
778 ehc->i.flags |= ehi->flags & ATA_EHI_RESET_MODIFIER_MASK;
779
780 /* make sure all reset actions are cleared & clear EHI flags */
781 action |= ATA_EH_RESET_MASK;
782 ehi->flags &= ~ATA_EHI_RESET_MODIFIER_MASK;
783 }
784
785 ata_eh_clear_action(dev, ehi, action);
786
787 if (!(ehc->i.flags & ATA_EHI_QUIET))
788 ap->pflags |= ATA_PFLAG_RECOVERED;
789
790 spin_unlock_irqrestore(ap->lock, flags);
791}
792
793/**
794 * ata_eh_done - EH action complete
795 * @ap: target ATA port
796 * @dev: target ATA dev for per-dev action (can be NULL)
797 * @action: action just completed
798 *
799 * Called right after performing EH actions to clear related bits
800 * in @ap->eh_context.
801 *
802 * LOCKING:
803 * None.
804 */
805static void ata_eh_done(struct ata_port *ap, struct ata_device *dev,
806 unsigned int action)
807{
808 /* if reset is complete, clear all reset actions & reset modifier */
809 if (action & ATA_EH_RESET_MASK) {
810 action |= ATA_EH_RESET_MASK;
811 ap->eh_context.i.flags &= ~ATA_EHI_RESET_MODIFIER_MASK;
812 }
813
814 ata_eh_clear_action(dev, &ap->eh_context.i, action);
815}
816
817/**
818 * ata_err_string - convert err_mask to descriptive string
819 * @err_mask: error mask to convert to string
820 *
821 * Convert @err_mask to descriptive string. Errors are
822 * prioritized according to severity and only the most severe
823 * error is reported.
824 *
825 * LOCKING:
826 * None.
827 *
828 * RETURNS:
829 * Descriptive string for @err_mask
830 */
831static const char * ata_err_string(unsigned int err_mask)
832{
833 if (err_mask & AC_ERR_HOST_BUS)
834 return "host bus error";
835 if (err_mask & AC_ERR_ATA_BUS)
836 return "ATA bus error";
837 if (err_mask & AC_ERR_TIMEOUT)
838 return "timeout";
839 if (err_mask & AC_ERR_HSM)
840 return "HSM violation";
841 if (err_mask & AC_ERR_SYSTEM)
842 return "internal error";
843 if (err_mask & AC_ERR_MEDIA)
844 return "media error";
845 if (err_mask & AC_ERR_INVALID)
846 return "invalid argument";
847 if (err_mask & AC_ERR_DEV)
848 return "device error";
849 return "unknown error";
850}
851
852/**
853 * ata_read_log_page - read a specific log page
854 * @dev: target device
855 * @page: page to read
856 * @buf: buffer to store read page
857 * @sectors: number of sectors to read
858 *
859 * Read log page using READ_LOG_EXT command.
860 *
861 * LOCKING:
862 * Kernel thread context (may sleep).
863 *
864 * RETURNS:
865 * 0 on success, AC_ERR_* mask otherwise.
866 */
867static unsigned int ata_read_log_page(struct ata_device *dev,
868 u8 page, void *buf, unsigned int sectors)
869{
870 struct ata_taskfile tf;
871 unsigned int err_mask;
872
873 DPRINTK("read log page - page %d\n", page);
874
875 ata_tf_init(dev, &tf);
876 tf.command = ATA_CMD_READ_LOG_EXT;
877 tf.lbal = page;
878 tf.nsect = sectors;
879 tf.hob_nsect = sectors >> 8;
880 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
881 tf.protocol = ATA_PROT_PIO;
882
883 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
884 buf, sectors * ATA_SECT_SIZE);
885
886 DPRINTK("EXIT, err_mask=%x\n", err_mask);
887 return err_mask;
888}
889
890/**
891 * ata_eh_read_log_10h - Read log page 10h for NCQ error details
892 * @dev: Device to read log page 10h from
893 * @tag: Resulting tag of the failed command
894 * @tf: Resulting taskfile registers of the failed command
895 *
896 * Read log page 10h to obtain NCQ error details and clear error
897 * condition.
898 *
899 * LOCKING:
900 * Kernel thread context (may sleep).
901 *
902 * RETURNS:
903 * 0 on success, -errno otherwise.
904 */
905static int ata_eh_read_log_10h(struct ata_device *dev,
906 int *tag, struct ata_taskfile *tf)
907{
908 u8 *buf = dev->ap->sector_buf;
909 unsigned int err_mask;
910 u8 csum;
911 int i;
912
913 err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, buf, 1);
914 if (err_mask)
915 return -EIO;
916
917 csum = 0;
918 for (i = 0; i < ATA_SECT_SIZE; i++)
919 csum += buf[i];
920 if (csum)
921 ata_dev_printk(dev, KERN_WARNING,
922 "invalid checksum 0x%x on log page 10h\n", csum);
923
924 if (buf[0] & 0x80)
925 return -ENOENT;
926
927 *tag = buf[0] & 0x1f;
928
929 tf->command = buf[2];
930 tf->feature = buf[3];
931 tf->lbal = buf[4];
932 tf->lbam = buf[5];
933 tf->lbah = buf[6];
934 tf->device = buf[7];
935 tf->hob_lbal = buf[8];
936 tf->hob_lbam = buf[9];
937 tf->hob_lbah = buf[10];
938 tf->nsect = buf[12];
939 tf->hob_nsect = buf[13];
940
941 return 0;
942}
943
944/**
945 * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
946 * @dev: device to perform REQUEST_SENSE to
947 * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
948 *
949 * Perform ATAPI REQUEST_SENSE after the device reported CHECK
950 * SENSE. This function is EH helper.
951 *
952 * LOCKING:
953 * Kernel thread context (may sleep).
954 *
955 * RETURNS:
956 * 0 on success, AC_ERR_* mask on failure
957 */
958static unsigned int atapi_eh_request_sense(struct ata_device *dev,
959 unsigned char *sense_buf)
960{
961 struct ata_port *ap = dev->ap;
962 struct ata_taskfile tf;
963 u8 cdb[ATAPI_CDB_LEN];
964
965 DPRINTK("ATAPI request sense\n");
966
967 ata_tf_init(dev, &tf);
968
969 /* FIXME: is this needed? */
970 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
971
972 /* XXX: why tf_read here? */
973 ap->ops->tf_read(ap, &tf);
974
975 /* fill these in, for the case where they are -not- overwritten */
976 sense_buf[0] = 0x70;
977 sense_buf[2] = tf.feature >> 4;
978
979 memset(cdb, 0, ATAPI_CDB_LEN);
980 cdb[0] = REQUEST_SENSE;
981 cdb[4] = SCSI_SENSE_BUFFERSIZE;
982
983 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
984 tf.command = ATA_CMD_PACKET;
985
986 /* is it pointless to prefer PIO for "safety reasons"? */
987 if (ap->flags & ATA_FLAG_PIO_DMA) {
988 tf.protocol = ATA_PROT_ATAPI_DMA;
989 tf.feature |= ATAPI_PKT_DMA;
990 } else {
991 tf.protocol = ATA_PROT_ATAPI;
992 tf.lbam = (8 * 1024) & 0xff;
993 tf.lbah = (8 * 1024) >> 8;
994 }
995
996 return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
997 sense_buf, SCSI_SENSE_BUFFERSIZE);
998}
999
1000/**
1001 * ata_eh_analyze_serror - analyze SError for a failed port
1002 * @ap: ATA port to analyze SError for
1003 *
1004 * Analyze SError if available and further determine cause of
1005 * failure.
1006 *
1007 * LOCKING:
1008 * None.
1009 */
1010static void ata_eh_analyze_serror(struct ata_port *ap)
1011{
1012 struct ata_eh_context *ehc = &ap->eh_context;
1013 u32 serror = ehc->i.serror;
1014 unsigned int err_mask = 0, action = 0;
1015
1016 if (serror & SERR_PERSISTENT) {
1017 err_mask |= AC_ERR_ATA_BUS;
1018 action |= ATA_EH_HARDRESET;
1019 }
1020 if (serror &
1021 (SERR_DATA_RECOVERED | SERR_COMM_RECOVERED | SERR_DATA)) {
1022 err_mask |= AC_ERR_ATA_BUS;
1023 action |= ATA_EH_SOFTRESET;
1024 }
1025 if (serror & SERR_PROTOCOL) {
1026 err_mask |= AC_ERR_HSM;
1027 action |= ATA_EH_SOFTRESET;
1028 }
1029 if (serror & SERR_INTERNAL) {
1030 err_mask |= AC_ERR_SYSTEM;
1031 action |= ATA_EH_SOFTRESET;
1032 }
1033 if (serror & (SERR_PHYRDY_CHG | SERR_DEV_XCHG))
1034 ata_ehi_hotplugged(&ehc->i);
1035
1036 ehc->i.err_mask |= err_mask;
1037 ehc->i.action |= action;
1038}
1039
1040/**
1041 * ata_eh_analyze_ncq_error - analyze NCQ error
1042 * @ap: ATA port to analyze NCQ error for
1043 *
1044 * Read log page 10h, determine the offending qc and acquire
1045 * error status TF. For NCQ device errors, all LLDDs have to do
1046 * is setting AC_ERR_DEV in ehi->err_mask. This function takes
1047 * care of the rest.
1048 *
1049 * LOCKING:
1050 * Kernel thread context (may sleep).
1051 */
1052static void ata_eh_analyze_ncq_error(struct ata_port *ap)
1053{
1054 struct ata_eh_context *ehc = &ap->eh_context;
1055 struct ata_device *dev = ap->device;
1056 struct ata_queued_cmd *qc;
1057 struct ata_taskfile tf;
1058 int tag, rc;
1059
1060 /* if frozen, we can't do much */
1061 if (ap->pflags & ATA_PFLAG_FROZEN)
1062 return;
1063
1064 /* is it NCQ device error? */
1065 if (!ap->sactive || !(ehc->i.err_mask & AC_ERR_DEV))
1066 return;
1067
1068 /* has LLDD analyzed already? */
1069 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1070 qc = __ata_qc_from_tag(ap, tag);
1071
1072 if (!(qc->flags & ATA_QCFLAG_FAILED))
1073 continue;
1074
1075 if (qc->err_mask)
1076 return;
1077 }
1078
1079 /* okay, this error is ours */
1080 rc = ata_eh_read_log_10h(dev, &tag, &tf);
1081 if (rc) {
1082 ata_port_printk(ap, KERN_ERR, "failed to read log page 10h "
1083 "(errno=%d)\n", rc);
1084 return;
1085 }
1086
1087 if (!(ap->sactive & (1 << tag))) {
1088 ata_port_printk(ap, KERN_ERR, "log page 10h reported "
1089 "inactive tag %d\n", tag);
1090 return;
1091 }
1092
1093 /* we've got the perpetrator, condemn it */
1094 qc = __ata_qc_from_tag(ap, tag);
1095 memcpy(&qc->result_tf, &tf, sizeof(tf));
1096 qc->err_mask |= AC_ERR_DEV;
1097 ehc->i.err_mask &= ~AC_ERR_DEV;
1098}
1099
1100/**
1101 * ata_eh_analyze_tf - analyze taskfile of a failed qc
1102 * @qc: qc to analyze
1103 * @tf: Taskfile registers to analyze
1104 *
1105 * Analyze taskfile of @qc and further determine cause of
1106 * failure. This function also requests ATAPI sense data if
1107 * avaliable.
1108 *
1109 * LOCKING:
1110 * Kernel thread context (may sleep).
1111 *
1112 * RETURNS:
1113 * Determined recovery action
1114 */
1115static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
1116 const struct ata_taskfile *tf)
1117{
1118 unsigned int tmp, action = 0;
1119 u8 stat = tf->command, err = tf->feature;
1120
1121 if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) {
1122 qc->err_mask |= AC_ERR_HSM;
1123 return ATA_EH_SOFTRESET;
1124 }
1125
1126 if (!(qc->err_mask & AC_ERR_DEV))
1127 return 0;
1128
1129 switch (qc->dev->class) {
1130 case ATA_DEV_ATA:
1131 if (err & ATA_ICRC)
1132 qc->err_mask |= AC_ERR_ATA_BUS;
1133 if (err & ATA_UNC)
1134 qc->err_mask |= AC_ERR_MEDIA;
1135 if (err & ATA_IDNF)
1136 qc->err_mask |= AC_ERR_INVALID;
1137 break;
1138
1139 case ATA_DEV_ATAPI:
1140 tmp = atapi_eh_request_sense(qc->dev,
1141 qc->scsicmd->sense_buffer);
1142 if (!tmp) {
1143 /* ATA_QCFLAG_SENSE_VALID is used to tell
1144 * atapi_qc_complete() that sense data is
1145 * already valid.
1146 *
1147 * TODO: interpret sense data and set
1148 * appropriate err_mask.
1149 */
1150 qc->flags |= ATA_QCFLAG_SENSE_VALID;
1151 } else
1152 qc->err_mask |= tmp;
1153 }
1154
1155 if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS))
1156 action |= ATA_EH_SOFTRESET;
1157
1158 return action;
1159}
1160
1161static int ata_eh_categorize_ering_entry(struct ata_ering_entry *ent)
1162{
1163 if (ent->err_mask & (AC_ERR_ATA_BUS | AC_ERR_TIMEOUT))
1164 return 1;
1165
1166 if (ent->is_io) {
1167 if (ent->err_mask & AC_ERR_HSM)
1168 return 1;
1169 if ((ent->err_mask &
1170 (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV)
1171 return 2;
1172 }
1173
1174 return 0;
1175}
1176
1177struct speed_down_needed_arg {
1178 u64 since;
1179 int nr_errors[3];
1180};
1181
1182static int speed_down_needed_cb(struct ata_ering_entry *ent, void *void_arg)
1183{
1184 struct speed_down_needed_arg *arg = void_arg;
1185
1186 if (ent->timestamp < arg->since)
1187 return -1;
1188
1189 arg->nr_errors[ata_eh_categorize_ering_entry(ent)]++;
1190 return 0;
1191}
1192
1193/**
1194 * ata_eh_speed_down_needed - Determine wheter speed down is necessary
1195 * @dev: Device of interest
1196 *
1197 * This function examines error ring of @dev and determines
1198 * whether speed down is necessary. Speed down is necessary if
1199 * there have been more than 3 of Cat-1 errors or 10 of Cat-2
1200 * errors during last 15 minutes.
1201 *
1202 * Cat-1 errors are ATA_BUS, TIMEOUT for any command and HSM
1203 * violation for known supported commands.
1204 *
1205 * Cat-2 errors are unclassified DEV error for known supported
1206 * command.
1207 *
1208 * LOCKING:
1209 * Inherited from caller.
1210 *
1211 * RETURNS:
1212 * 1 if speed down is necessary, 0 otherwise
1213 */
1214static int ata_eh_speed_down_needed(struct ata_device *dev)
1215{
1216 const u64 interval = 15LLU * 60 * HZ;
1217 static const int err_limits[3] = { -1, 3, 10 };
1218 struct speed_down_needed_arg arg;
1219 struct ata_ering_entry *ent;
1220 int err_cat;
1221 u64 j64;
1222
1223 ent = ata_ering_top(&dev->ering);
1224 if (!ent)
1225 return 0;
1226
1227 err_cat = ata_eh_categorize_ering_entry(ent);
1228 if (err_cat == 0)
1229 return 0;
1230
1231 memset(&arg, 0, sizeof(arg));
1232
1233 j64 = get_jiffies_64();
1234 if (j64 >= interval)
1235 arg.since = j64 - interval;
1236 else
1237 arg.since = 0;
1238
1239 ata_ering_map(&dev->ering, speed_down_needed_cb, &arg);
1240
1241 return arg.nr_errors[err_cat] > err_limits[err_cat];
1242}
1243
1244/**
1245 * ata_eh_speed_down - record error and speed down if necessary
1246 * @dev: Failed device
1247 * @is_io: Did the device fail during normal IO?
1248 * @err_mask: err_mask of the error
1249 *
1250 * Record error and examine error history to determine whether
1251 * adjusting transmission speed is necessary. It also sets
1252 * transmission limits appropriately if such adjustment is
1253 * necessary.
1254 *
1255 * LOCKING:
1256 * Kernel thread context (may sleep).
1257 *
1258 * RETURNS:
1259 * 0 on success, -errno otherwise
1260 */
1261static int ata_eh_speed_down(struct ata_device *dev, int is_io,
1262 unsigned int err_mask)
1263{
1264 if (!err_mask)
1265 return 0;
1266
1267 /* record error and determine whether speed down is necessary */
1268 ata_ering_record(&dev->ering, is_io, err_mask);
1269
1270 if (!ata_eh_speed_down_needed(dev))
1271 return 0;
1272
1273 /* speed down SATA link speed if possible */
1274 if (sata_down_spd_limit(dev->ap) == 0)
1275 return ATA_EH_HARDRESET;
1276
1277 /* lower transfer mode */
1278 if (ata_down_xfermask_limit(dev, 0) == 0)
1279 return ATA_EH_SOFTRESET;
1280
1281 ata_dev_printk(dev, KERN_ERR,
1282 "speed down requested but no transfer mode left\n");
1283 return 0;
1284}
1285
1286/**
1287 * ata_eh_autopsy - analyze error and determine recovery action
1288 * @ap: ATA port to perform autopsy on
1289 *
1290 * Analyze why @ap failed and determine which recovery action is
1291 * needed. This function also sets more detailed AC_ERR_* values
1292 * and fills sense data for ATAPI CHECK SENSE.
1293 *
1294 * LOCKING:
1295 * Kernel thread context (may sleep).
1296 */
1297static void ata_eh_autopsy(struct ata_port *ap)
1298{
1299 struct ata_eh_context *ehc = &ap->eh_context;
1300 unsigned int all_err_mask = 0;
1301 int tag, is_io = 0;
1302 u32 serror;
1303 int rc;
1304
1305 DPRINTK("ENTER\n");
1306
1307 if (ehc->i.flags & ATA_EHI_NO_AUTOPSY)
1308 return;
1309
1310 /* obtain and analyze SError */
1311 rc = sata_scr_read(ap, SCR_ERROR, &serror);
1312 if (rc == 0) {
1313 ehc->i.serror |= serror;
1314 ata_eh_analyze_serror(ap);
1315 } else if (rc != -EOPNOTSUPP)
1316 ehc->i.action |= ATA_EH_HARDRESET;
1317
1318 /* analyze NCQ failure */
1319 ata_eh_analyze_ncq_error(ap);
1320
1321 /* any real error trumps AC_ERR_OTHER */
1322 if (ehc->i.err_mask & ~AC_ERR_OTHER)
1323 ehc->i.err_mask &= ~AC_ERR_OTHER;
1324
1325 all_err_mask |= ehc->i.err_mask;
1326
1327 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1328 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1329
1330 if (!(qc->flags & ATA_QCFLAG_FAILED))
1331 continue;
1332
1333 /* inherit upper level err_mask */
1334 qc->err_mask |= ehc->i.err_mask;
1335
1336 /* analyze TF */
1337 ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf);
1338
1339 /* DEV errors are probably spurious in case of ATA_BUS error */
1340 if (qc->err_mask & AC_ERR_ATA_BUS)
1341 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA |
1342 AC_ERR_INVALID);
1343
1344 /* any real error trumps unknown error */
1345 if (qc->err_mask & ~AC_ERR_OTHER)
1346 qc->err_mask &= ~AC_ERR_OTHER;
1347
1348 /* SENSE_VALID trumps dev/unknown error and revalidation */
1349 if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
1350 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
1351 ehc->i.action &= ~ATA_EH_REVALIDATE;
1352 }
1353
1354 /* accumulate error info */
1355 ehc->i.dev = qc->dev;
1356 all_err_mask |= qc->err_mask;
1357 if (qc->flags & ATA_QCFLAG_IO)
1358 is_io = 1;
1359 }
1360
1361 /* enforce default EH actions */
1362 if (ap->pflags & ATA_PFLAG_FROZEN ||
1363 all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT))
1364 ehc->i.action |= ATA_EH_SOFTRESET;
1365 else if (all_err_mask)
1366 ehc->i.action |= ATA_EH_REVALIDATE;
1367
1368 /* if we have offending qcs and the associated failed device */
1369 if (ehc->i.dev) {
1370 /* speed down */
1371 ehc->i.action |= ata_eh_speed_down(ehc->i.dev, is_io,
1372 all_err_mask);
1373
1374 /* perform per-dev EH action only on the offending device */
1375 ehc->i.dev_action[ehc->i.dev->devno] |=
1376 ehc->i.action & ATA_EH_PERDEV_MASK;
1377 ehc->i.action &= ~ATA_EH_PERDEV_MASK;
1378 }
1379
1380 DPRINTK("EXIT\n");
1381}
1382
1383/**
1384 * ata_eh_report - report error handling to user
1385 * @ap: ATA port EH is going on
1386 *
1387 * Report EH to user.
1388 *
1389 * LOCKING:
1390 * None.
1391 */
1392static void ata_eh_report(struct ata_port *ap)
1393{
1394 struct ata_eh_context *ehc = &ap->eh_context;
1395 const char *frozen, *desc;
1396 int tag, nr_failed = 0;
1397
1398 desc = NULL;
1399 if (ehc->i.desc[0] != '\0')
1400 desc = ehc->i.desc;
1401
1402 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1403 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1404
1405 if (!(qc->flags & ATA_QCFLAG_FAILED))
1406 continue;
1407 if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask)
1408 continue;
1409
1410 nr_failed++;
1411 }
1412
1413 if (!nr_failed && !ehc->i.err_mask)
1414 return;
1415
1416 frozen = "";
1417 if (ap->pflags & ATA_PFLAG_FROZEN)
1418 frozen = " frozen";
1419
1420 if (ehc->i.dev) {
1421 ata_dev_printk(ehc->i.dev, KERN_ERR, "exception Emask 0x%x "
1422 "SAct 0x%x SErr 0x%x action 0x%x%s\n",
1423 ehc->i.err_mask, ap->sactive, ehc->i.serror,
1424 ehc->i.action, frozen);
1425 if (desc)
1426 ata_dev_printk(ehc->i.dev, KERN_ERR, "(%s)\n", desc);
1427 } else {
1428 ata_port_printk(ap, KERN_ERR, "exception Emask 0x%x "
1429 "SAct 0x%x SErr 0x%x action 0x%x%s\n",
1430 ehc->i.err_mask, ap->sactive, ehc->i.serror,
1431 ehc->i.action, frozen);
1432 if (desc)
1433 ata_port_printk(ap, KERN_ERR, "(%s)\n", desc);
1434 }
1435
1436 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1437 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1438
1439 if (!(qc->flags & ATA_QCFLAG_FAILED) || !qc->err_mask)
1440 continue;
1441
1442 ata_dev_printk(qc->dev, KERN_ERR, "tag %d cmd 0x%x "
1443 "Emask 0x%x stat 0x%x err 0x%x (%s)\n",
1444 qc->tag, qc->tf.command, qc->err_mask,
1445 qc->result_tf.command, qc->result_tf.feature,
1446 ata_err_string(qc->err_mask));
1447 }
1448}
1449
1450static int ata_do_reset(struct ata_port *ap, ata_reset_fn_t reset,
1451 unsigned int *classes)
1452{
1453 int i, rc;
1454
1455 for (i = 0; i < ATA_MAX_DEVICES; i++)
1456 classes[i] = ATA_DEV_UNKNOWN;
1457
1458 rc = reset(ap, classes);
1459 if (rc)
1460 return rc;
1461
1462 /* If any class isn't ATA_DEV_UNKNOWN, consider classification
1463 * is complete and convert all ATA_DEV_UNKNOWN to
1464 * ATA_DEV_NONE.
1465 */
1466 for (i = 0; i < ATA_MAX_DEVICES; i++)
1467 if (classes[i] != ATA_DEV_UNKNOWN)
1468 break;
1469
1470 if (i < ATA_MAX_DEVICES)
1471 for (i = 0; i < ATA_MAX_DEVICES; i++)
1472 if (classes[i] == ATA_DEV_UNKNOWN)
1473 classes[i] = ATA_DEV_NONE;
1474
1475 return 0;
1476}
1477
1478static int ata_eh_followup_srst_needed(int rc, int classify,
1479 const unsigned int *classes)
1480{
1481 if (rc == -EAGAIN)
1482 return 1;
1483 if (rc != 0)
1484 return 0;
1485 if (classify && classes[0] == ATA_DEV_UNKNOWN)
1486 return 1;
1487 return 0;
1488}
1489
1490static int ata_eh_reset(struct ata_port *ap, int classify,
1491 ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
1492 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
1493{
1494 struct ata_eh_context *ehc = &ap->eh_context;
1495 unsigned int *classes = ehc->classes;
1496 int tries = ATA_EH_RESET_TRIES;
1497 int verbose = !(ehc->i.flags & ATA_EHI_QUIET);
1498 unsigned int action;
1499 ata_reset_fn_t reset;
1500 int i, did_followup_srst, rc;
1501
1502 /* about to reset */
1503 ata_eh_about_to_do(ap, NULL, ehc->i.action & ATA_EH_RESET_MASK);
1504
1505 /* Determine which reset to use and record in ehc->i.action.
1506 * prereset() may examine and modify it.
1507 */
1508 action = ehc->i.action;
1509 ehc->i.action &= ~ATA_EH_RESET_MASK;
1510 if (softreset && (!hardreset || (!sata_set_spd_needed(ap) &&
1511 !(action & ATA_EH_HARDRESET))))
1512 ehc->i.action |= ATA_EH_SOFTRESET;
1513 else
1514 ehc->i.action |= ATA_EH_HARDRESET;
1515
1516 if (prereset) {
1517 rc = prereset(ap);
1518 if (rc) {
1519 ata_port_printk(ap, KERN_ERR,
1520 "prereset failed (errno=%d)\n", rc);
1521 return rc;
1522 }
1523 }
1524
1525 /* prereset() might have modified ehc->i.action */
1526 if (ehc->i.action & ATA_EH_HARDRESET)
1527 reset = hardreset;
1528 else if (ehc->i.action & ATA_EH_SOFTRESET)
1529 reset = softreset;
1530 else {
1531 /* prereset told us not to reset, bang classes and return */
1532 for (i = 0; i < ATA_MAX_DEVICES; i++)
1533 classes[i] = ATA_DEV_NONE;
1534 return 0;
1535 }
1536
1537 /* did prereset() screw up? if so, fix up to avoid oopsing */
1538 if (!reset) {
1539 ata_port_printk(ap, KERN_ERR, "BUG: prereset() requested "
1540 "invalid reset type\n");
1541 if (softreset)
1542 reset = softreset;
1543 else
1544 reset = hardreset;
1545 }
1546
1547 retry:
1548 /* shut up during boot probing */
1549 if (verbose)
1550 ata_port_printk(ap, KERN_INFO, "%s resetting port\n",
1551 reset == softreset ? "soft" : "hard");
1552
1553 /* mark that this EH session started with reset */
1554 ehc->i.flags |= ATA_EHI_DID_RESET;
1555
1556 rc = ata_do_reset(ap, reset, classes);
1557
1558 did_followup_srst = 0;
1559 if (reset == hardreset &&
1560 ata_eh_followup_srst_needed(rc, classify, classes)) {
1561 /* okay, let's do follow-up softreset */
1562 did_followup_srst = 1;
1563 reset = softreset;
1564
1565 if (!reset) {
1566 ata_port_printk(ap, KERN_ERR,
1567 "follow-up softreset required "
1568 "but no softreset avaliable\n");
1569 return -EINVAL;
1570 }
1571
1572 ata_eh_about_to_do(ap, NULL, ATA_EH_RESET_MASK);
1573 rc = ata_do_reset(ap, reset, classes);
1574
1575 if (rc == 0 && classify &&
1576 classes[0] == ATA_DEV_UNKNOWN) {
1577 ata_port_printk(ap, KERN_ERR,
1578 "classification failed\n");
1579 return -EINVAL;
1580 }
1581 }
1582
1583 if (rc && --tries) {
1584 const char *type;
1585
1586 if (reset == softreset) {
1587 if (did_followup_srst)
1588 type = "follow-up soft";
1589 else
1590 type = "soft";
1591 } else
1592 type = "hard";
1593
1594 ata_port_printk(ap, KERN_WARNING,
1595 "%sreset failed, retrying in 5 secs\n", type);
1596 ssleep(5);
1597
1598 if (reset == hardreset)
1599 sata_down_spd_limit(ap);
1600 if (hardreset)
1601 reset = hardreset;
1602 goto retry;
1603 }
1604
1605 if (rc == 0) {
1606 /* After the reset, the device state is PIO 0 and the
1607 * controller state is undefined. Record the mode.
1608 */
1609 for (i = 0; i < ATA_MAX_DEVICES; i++)
1610 ap->device[i].pio_mode = XFER_PIO_0;
1611
1612 if (postreset)
1613 postreset(ap, classes);
1614
1615 /* reset successful, schedule revalidation */
1616 ata_eh_done(ap, NULL, ehc->i.action & ATA_EH_RESET_MASK);
1617 ehc->i.action |= ATA_EH_REVALIDATE;
1618 }
1619
1620 return rc;
1621}
1622
1623static int ata_eh_revalidate_and_attach(struct ata_port *ap,
1624 struct ata_device **r_failed_dev)
1625{
1626 struct ata_eh_context *ehc = &ap->eh_context;
1627 struct ata_device *dev;
1628 unsigned long flags;
1629 int i, rc = 0;
1630
1631 DPRINTK("ENTER\n");
1632
1633 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1634 unsigned int action;
1635
1636 dev = &ap->device[i];
1637 action = ata_eh_dev_action(dev);
1638
1639 if (action & ATA_EH_REVALIDATE && ata_dev_ready(dev)) {
1640 if (ata_port_offline(ap)) {
1641 rc = -EIO;
1642 break;
1643 }
1644
1645 ata_eh_about_to_do(ap, dev, ATA_EH_REVALIDATE);
1646 rc = ata_dev_revalidate(dev,
1647 ehc->i.flags & ATA_EHI_DID_RESET);
1648 if (rc)
1649 break;
1650
1651 ata_eh_done(ap, dev, ATA_EH_REVALIDATE);
1652
1653 /* schedule the scsi_rescan_device() here */
1654 queue_work(ata_aux_wq, &(ap->scsi_rescan_task));
1655 } else if (dev->class == ATA_DEV_UNKNOWN &&
1656 ehc->tries[dev->devno] &&
1657 ata_class_enabled(ehc->classes[dev->devno])) {
1658 dev->class = ehc->classes[dev->devno];
1659
1660 rc = ata_dev_read_id(dev, &dev->class, 1, dev->id);
1661 if (rc == 0)
1662 rc = ata_dev_configure(dev, 1);
1663
1664 if (rc) {
1665 dev->class = ATA_DEV_UNKNOWN;
1666 break;
1667 }
1668
1669 spin_lock_irqsave(ap->lock, flags);
1670 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
1671 spin_unlock_irqrestore(ap->lock, flags);
1672 }
1673 }
1674
1675 if (rc)
1676 *r_failed_dev = dev;
1677
1678 DPRINTK("EXIT\n");
1679 return rc;
1680}
1681
1682/**
1683 * ata_eh_suspend - handle suspend EH action
1684 * @ap: target host port
1685 * @r_failed_dev: result parameter to indicate failing device
1686 *
1687 * Handle suspend EH action. Disk devices are spinned down and
1688 * other types of devices are just marked suspended. Once
1689 * suspended, no EH action to the device is allowed until it is
1690 * resumed.
1691 *
1692 * LOCKING:
1693 * Kernel thread context (may sleep).
1694 *
1695 * RETURNS:
1696 * 0 on success, -errno otherwise
1697 */
1698static int ata_eh_suspend(struct ata_port *ap, struct ata_device **r_failed_dev)
1699{
1700 struct ata_device *dev;
1701 int i, rc = 0;
1702
1703 DPRINTK("ENTER\n");
1704
1705 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1706 unsigned long flags;
1707 unsigned int action, err_mask;
1708
1709 dev = &ap->device[i];
1710 action = ata_eh_dev_action(dev);
1711
1712 if (!ata_dev_enabled(dev) || !(action & ATA_EH_SUSPEND))
1713 continue;
1714
1715 WARN_ON(dev->flags & ATA_DFLAG_SUSPENDED);
1716
1717 ata_eh_about_to_do(ap, dev, ATA_EH_SUSPEND);
1718
1719 if (dev->class == ATA_DEV_ATA && !(action & ATA_EH_PM_FREEZE)) {
1720 /* flush cache */
1721 rc = ata_flush_cache(dev);
1722 if (rc)
1723 break;
1724
1725 /* spin down */
1726 err_mask = ata_do_simple_cmd(dev, ATA_CMD_STANDBYNOW1);
1727 if (err_mask) {
1728 ata_dev_printk(dev, KERN_ERR, "failed to "
1729 "spin down (err_mask=0x%x)\n",
1730 err_mask);
1731 rc = -EIO;
1732 break;
1733 }
1734 }
1735
1736 spin_lock_irqsave(ap->lock, flags);
1737 dev->flags |= ATA_DFLAG_SUSPENDED;
1738 spin_unlock_irqrestore(ap->lock, flags);
1739
1740 ata_eh_done(ap, dev, ATA_EH_SUSPEND);
1741 }
1742
1743 if (rc)
1744 *r_failed_dev = dev;
1745
1746 DPRINTK("EXIT\n");
1747 return 0;
1748}
1749
1750/**
1751 * ata_eh_prep_resume - prep for resume EH action
1752 * @ap: target host port
1753 *
1754 * Clear SUSPENDED in preparation for scheduled resume actions.
1755 * This allows other parts of EH to access the devices being
1756 * resumed.
1757 *
1758 * LOCKING:
1759 * Kernel thread context (may sleep).
1760 */
1761static void ata_eh_prep_resume(struct ata_port *ap)
1762{
1763 struct ata_device *dev;
1764 unsigned long flags;
1765 int i;
1766
1767 DPRINTK("ENTER\n");
1768
1769 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1770 unsigned int action;
1771
1772 dev = &ap->device[i];
1773 action = ata_eh_dev_action(dev);
1774
1775 if (!ata_dev_enabled(dev) || !(action & ATA_EH_RESUME))
1776 continue;
1777
1778 spin_lock_irqsave(ap->lock, flags);
1779 dev->flags &= ~ATA_DFLAG_SUSPENDED;
1780 spin_unlock_irqrestore(ap->lock, flags);
1781 }
1782
1783 DPRINTK("EXIT\n");
1784}
1785
1786/**
1787 * ata_eh_resume - handle resume EH action
1788 * @ap: target host port
1789 * @r_failed_dev: result parameter to indicate failing device
1790 *
1791 * Handle resume EH action. Target devices are already reset and
1792 * revalidated. Spinning up is the only operation left.
1793 *
1794 * LOCKING:
1795 * Kernel thread context (may sleep).
1796 *
1797 * RETURNS:
1798 * 0 on success, -errno otherwise
1799 */
1800static int ata_eh_resume(struct ata_port *ap, struct ata_device **r_failed_dev)
1801{
1802 struct ata_device *dev;
1803 int i, rc = 0;
1804
1805 DPRINTK("ENTER\n");
1806
1807 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1808 unsigned int action, err_mask;
1809
1810 dev = &ap->device[i];
1811 action = ata_eh_dev_action(dev);
1812
1813 if (!ata_dev_enabled(dev) || !(action & ATA_EH_RESUME))
1814 continue;
1815
1816 ata_eh_about_to_do(ap, dev, ATA_EH_RESUME);
1817
1818 if (dev->class == ATA_DEV_ATA && !(action & ATA_EH_PM_FREEZE)) {
1819 err_mask = ata_do_simple_cmd(dev,
1820 ATA_CMD_IDLEIMMEDIATE);
1821 if (err_mask) {
1822 ata_dev_printk(dev, KERN_ERR, "failed to "
1823 "spin up (err_mask=0x%x)\n",
1824 err_mask);
1825 rc = -EIO;
1826 break;
1827 }
1828 }
1829
1830 ata_eh_done(ap, dev, ATA_EH_RESUME);
1831 }
1832
1833 if (rc)
1834 *r_failed_dev = dev;
1835
1836 DPRINTK("EXIT\n");
1837 return 0;
1838}
1839
1840static int ata_port_nr_enabled(struct ata_port *ap)
1841{
1842 int i, cnt = 0;
1843
1844 for (i = 0; i < ATA_MAX_DEVICES; i++)
1845 if (ata_dev_enabled(&ap->device[i]))
1846 cnt++;
1847 return cnt;
1848}
1849
1850static int ata_port_nr_vacant(struct ata_port *ap)
1851{
1852 int i, cnt = 0;
1853
1854 for (i = 0; i < ATA_MAX_DEVICES; i++)
1855 if (ap->device[i].class == ATA_DEV_UNKNOWN)
1856 cnt++;
1857 return cnt;
1858}
1859
1860static int ata_eh_skip_recovery(struct ata_port *ap)
1861{
1862 struct ata_eh_context *ehc = &ap->eh_context;
1863 int i;
1864
1865 /* skip if all possible devices are suspended */
1866 for (i = 0; i < ata_port_max_devices(ap); i++) {
1867 struct ata_device *dev = &ap->device[i];
1868
1869 if (!(dev->flags & ATA_DFLAG_SUSPENDED))
1870 break;
1871 }
1872
1873 if (i == ata_port_max_devices(ap))
1874 return 1;
1875
1876 /* thaw frozen port, resume link and recover failed devices */
1877 if ((ap->pflags & ATA_PFLAG_FROZEN) ||
1878 (ehc->i.flags & ATA_EHI_RESUME_LINK) || ata_port_nr_enabled(ap))
1879 return 0;
1880
1881 /* skip if class codes for all vacant slots are ATA_DEV_NONE */
1882 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1883 struct ata_device *dev = &ap->device[i];
1884
1885 if (dev->class == ATA_DEV_UNKNOWN &&
1886 ehc->classes[dev->devno] != ATA_DEV_NONE)
1887 return 0;
1888 }
1889
1890 return 1;
1891}
1892
1893/**
1894 * ata_eh_recover - recover host port after error
1895 * @ap: host port to recover
1896 * @prereset: prereset method (can be NULL)
1897 * @softreset: softreset method (can be NULL)
1898 * @hardreset: hardreset method (can be NULL)
1899 * @postreset: postreset method (can be NULL)
1900 *
1901 * This is the alpha and omega, eum and yang, heart and soul of
1902 * libata exception handling. On entry, actions required to
1903 * recover the port and hotplug requests are recorded in
1904 * eh_context. This function executes all the operations with
1905 * appropriate retrials and fallbacks to resurrect failed
1906 * devices, detach goners and greet newcomers.
1907 *
1908 * LOCKING:
1909 * Kernel thread context (may sleep).
1910 *
1911 * RETURNS:
1912 * 0 on success, -errno on failure.
1913 */
1914static int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
1915 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
1916 ata_postreset_fn_t postreset)
1917{
1918 struct ata_eh_context *ehc = &ap->eh_context;
1919 struct ata_device *dev;
1920 int down_xfermask, i, rc;
1921
1922 DPRINTK("ENTER\n");
1923
1924 /* prep for recovery */
1925 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1926 dev = &ap->device[i];
1927
1928 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
1929
1930 /* process hotplug request */
1931 if (dev->flags & ATA_DFLAG_DETACH)
1932 ata_eh_detach_dev(dev);
1933
1934 if (!ata_dev_enabled(dev) &&
1935 ((ehc->i.probe_mask & (1 << dev->devno)) &&
1936 !(ehc->did_probe_mask & (1 << dev->devno)))) {
1937 ata_eh_detach_dev(dev);
1938 ata_dev_init(dev);
1939 ehc->did_probe_mask |= (1 << dev->devno);
1940 ehc->i.action |= ATA_EH_SOFTRESET;
1941 }
1942 }
1943
1944 retry:
1945 down_xfermask = 0;
1946 rc = 0;
1947
1948 /* if UNLOADING, finish immediately */
1949 if (ap->pflags & ATA_PFLAG_UNLOADING)
1950 goto out;
1951
1952 /* prep for resume */
1953 ata_eh_prep_resume(ap);
1954
1955 /* skip EH if possible. */
1956 if (ata_eh_skip_recovery(ap))
1957 ehc->i.action = 0;
1958
1959 for (i = 0; i < ATA_MAX_DEVICES; i++)
1960 ehc->classes[i] = ATA_DEV_UNKNOWN;
1961
1962 /* reset */
1963 if (ehc->i.action & ATA_EH_RESET_MASK) {
1964 ata_eh_freeze_port(ap);
1965
1966 rc = ata_eh_reset(ap, ata_port_nr_vacant(ap), prereset,
1967 softreset, hardreset, postreset);
1968 if (rc) {
1969 ata_port_printk(ap, KERN_ERR,
1970 "reset failed, giving up\n");
1971 goto out;
1972 }
1973
1974 ata_eh_thaw_port(ap);
1975 }
1976
1977 /* revalidate existing devices and attach new ones */
1978 rc = ata_eh_revalidate_and_attach(ap, &dev);
1979 if (rc)
1980 goto dev_fail;
1981
1982 /* resume devices */
1983 rc = ata_eh_resume(ap, &dev);
1984 if (rc)
1985 goto dev_fail;
1986
1987 /* configure transfer mode if the port has been reset */
1988 if (ehc->i.flags & ATA_EHI_DID_RESET) {
1989 rc = ata_set_mode(ap, &dev);
1990 if (rc) {
1991 down_xfermask = 1;
1992 goto dev_fail;
1993 }
1994 }
1995
1996 /* suspend devices */
1997 rc = ata_eh_suspend(ap, &dev);
1998 if (rc)
1999 goto dev_fail;
2000
2001 goto out;
2002
2003 dev_fail:
2004 switch (rc) {
2005 case -ENODEV:
2006 /* device missing, schedule probing */
2007 ehc->i.probe_mask |= (1 << dev->devno);
2008 case -EINVAL:
2009 ehc->tries[dev->devno] = 0;
2010 break;
2011 case -EIO:
2012 sata_down_spd_limit(ap);
2013 default:
2014 ehc->tries[dev->devno]--;
2015 if (down_xfermask &&
2016 ata_down_xfermask_limit(dev, ehc->tries[dev->devno] == 1))
2017 ehc->tries[dev->devno] = 0;
2018 }
2019
2020 if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) {
2021 /* disable device if it has used up all its chances */
2022 ata_dev_disable(dev);
2023
2024 /* detach if offline */
2025 if (ata_port_offline(ap))
2026 ata_eh_detach_dev(dev);
2027
2028 /* probe if requested */
2029 if ((ehc->i.probe_mask & (1 << dev->devno)) &&
2030 !(ehc->did_probe_mask & (1 << dev->devno))) {
2031 ata_eh_detach_dev(dev);
2032 ata_dev_init(dev);
2033
2034 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
2035 ehc->did_probe_mask |= (1 << dev->devno);
2036 ehc->i.action |= ATA_EH_SOFTRESET;
2037 }
2038 } else {
2039 /* soft didn't work? be haaaaard */
2040 if (ehc->i.flags & ATA_EHI_DID_RESET)
2041 ehc->i.action |= ATA_EH_HARDRESET;
2042 else
2043 ehc->i.action |= ATA_EH_SOFTRESET;
2044 }
2045
2046 if (ata_port_nr_enabled(ap)) {
2047 ata_port_printk(ap, KERN_WARNING, "failed to recover some "
2048 "devices, retrying in 5 secs\n");
2049 ssleep(5);
2050 } else {
2051 /* no device left, repeat fast */
2052 msleep(500);
2053 }
2054
2055 goto retry;
2056
2057 out:
2058 if (rc) {
2059 for (i = 0; i < ATA_MAX_DEVICES; i++)
2060 ata_dev_disable(&ap->device[i]);
2061 }
2062
2063 DPRINTK("EXIT, rc=%d\n", rc);
2064 return rc;
2065}
2066
2067/**
2068 * ata_eh_finish - finish up EH
2069 * @ap: host port to finish EH for
2070 *
2071 * Recovery is complete. Clean up EH states and retry or finish
2072 * failed qcs.
2073 *
2074 * LOCKING:
2075 * None.
2076 */
2077static void ata_eh_finish(struct ata_port *ap)
2078{
2079 int tag;
2080
2081 /* retry or finish qcs */
2082 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2083 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2084
2085 if (!(qc->flags & ATA_QCFLAG_FAILED))
2086 continue;
2087
2088 if (qc->err_mask) {
2089 /* FIXME: Once EH migration is complete,
2090 * generate sense data in this function,
2091 * considering both err_mask and tf.
2092 */
2093 if (qc->err_mask & AC_ERR_INVALID)
2094 ata_eh_qc_complete(qc);
2095 else
2096 ata_eh_qc_retry(qc);
2097 } else {
2098 if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
2099 ata_eh_qc_complete(qc);
2100 } else {
2101 /* feed zero TF to sense generation */
2102 memset(&qc->result_tf, 0, sizeof(qc->result_tf));
2103 ata_eh_qc_retry(qc);
2104 }
2105 }
2106 }
2107}
2108
2109/**
2110 * ata_do_eh - do standard error handling
2111 * @ap: host port to handle error for
2112 * @prereset: prereset method (can be NULL)
2113 * @softreset: softreset method (can be NULL)
2114 * @hardreset: hardreset method (can be NULL)
2115 * @postreset: postreset method (can be NULL)
2116 *
2117 * Perform standard error handling sequence.
2118 *
2119 * LOCKING:
2120 * Kernel thread context (may sleep).
2121 */
2122void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
2123 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
2124 ata_postreset_fn_t postreset)
2125{
2126 ata_eh_autopsy(ap);
2127 ata_eh_report(ap);
2128 ata_eh_recover(ap, prereset, softreset, hardreset, postreset);
2129 ata_eh_finish(ap);
2130}
2131
2132/**
2133 * ata_eh_handle_port_suspend - perform port suspend operation
2134 * @ap: port to suspend
2135 *
2136 * Suspend @ap.
2137 *
2138 * LOCKING:
2139 * Kernel thread context (may sleep).
2140 */
2141static void ata_eh_handle_port_suspend(struct ata_port *ap)
2142{
2143 unsigned long flags;
2144 int rc = 0;
2145
2146 /* are we suspending? */
2147 spin_lock_irqsave(ap->lock, flags);
2148 if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
2149 ap->pm_mesg.event == PM_EVENT_ON) {
2150 spin_unlock_irqrestore(ap->lock, flags);
2151 return;
2152 }
2153 spin_unlock_irqrestore(ap->lock, flags);
2154
2155 WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED);
2156
2157 /* suspend */
2158 ata_eh_freeze_port(ap);
2159
2160 if (ap->ops->port_suspend)
2161 rc = ap->ops->port_suspend(ap, ap->pm_mesg);
2162
2163 /* report result */
2164 spin_lock_irqsave(ap->lock, flags);
2165
2166 ap->pflags &= ~ATA_PFLAG_PM_PENDING;
2167 if (rc == 0)
2168 ap->pflags |= ATA_PFLAG_SUSPENDED;
2169 else
2170 ata_port_schedule_eh(ap);
2171
2172 if (ap->pm_result) {
2173 *ap->pm_result = rc;
2174 ap->pm_result = NULL;
2175 }
2176
2177 spin_unlock_irqrestore(ap->lock, flags);
2178
2179 return;
2180}
2181
2182/**
2183 * ata_eh_handle_port_resume - perform port resume operation
2184 * @ap: port to resume
2185 *
2186 * Resume @ap.
2187 *
2188 * This function also waits upto one second until all devices
2189 * hanging off this port requests resume EH action. This is to
2190 * prevent invoking EH and thus reset multiple times on resume.
2191 *
2192 * On DPM resume, where some of devices might not be resumed
2193 * together, this may delay port resume upto one second, but such
2194 * DPM resumes are rare and 1 sec delay isn't too bad.
2195 *
2196 * LOCKING:
2197 * Kernel thread context (may sleep).
2198 */
2199static void ata_eh_handle_port_resume(struct ata_port *ap)
2200{
2201 unsigned long timeout;
2202 unsigned long flags;
2203 int i, rc = 0;
2204
2205 /* are we resuming? */
2206 spin_lock_irqsave(ap->lock, flags);
2207 if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
2208 ap->pm_mesg.event != PM_EVENT_ON) {
2209 spin_unlock_irqrestore(ap->lock, flags);
2210 return;
2211 }
2212 spin_unlock_irqrestore(ap->lock, flags);
2213
2214 /* spurious? */
2215 if (!(ap->pflags & ATA_PFLAG_SUSPENDED))
2216 goto done;
2217
2218 if (ap->ops->port_resume)
2219 rc = ap->ops->port_resume(ap);
2220
2221 /* give devices time to request EH */
2222 timeout = jiffies + HZ; /* 1s max */
2223 while (1) {
2224 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2225 struct ata_device *dev = &ap->device[i];
2226 unsigned int action = ata_eh_dev_action(dev);
2227
2228 if ((dev->flags & ATA_DFLAG_SUSPENDED) &&
2229 !(action & ATA_EH_RESUME))
2230 break;
2231 }
2232
2233 if (i == ATA_MAX_DEVICES || time_after(jiffies, timeout))
2234 break;
2235 msleep(10);
2236 }
2237
2238 done:
2239 spin_lock_irqsave(ap->lock, flags);
2240 ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED);
2241 if (ap->pm_result) {
2242 *ap->pm_result = rc;
2243 ap->pm_result = NULL;
2244 }
2245 spin_unlock_irqrestore(ap->lock, flags);
2246}
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
new file mode 100644
index 000000000000..3986ec8741b4
--- /dev/null
+++ b/drivers/ata/libata-scsi.c
@@ -0,0 +1,3322 @@
1/*
2 * libata-scsi.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from
31 * - http://www.t10.org/
32 * - http://www.t13.org/
33 *
34 */
35
36#include <linux/kernel.h>
37#include <linux/blkdev.h>
38#include <linux/spinlock.h>
39#include <scsi/scsi.h>
40#include <scsi/scsi_host.h>
41#include <scsi/scsi_cmnd.h>
42#include <scsi/scsi_eh.h>
43#include <scsi/scsi_device.h>
44#include <scsi/scsi_tcq.h>
45#include <scsi/scsi_transport.h>
46#include <linux/libata.h>
47#include <linux/hdreg.h>
48#include <asm/uaccess.h>
49
50#include "libata.h"
51
52#define SECTOR_SIZE 512
53
54typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *qc, const u8 *scsicmd);
55
56static struct ata_device * __ata_scsi_find_dev(struct ata_port *ap,
57 const struct scsi_device *scsidev);
58static struct ata_device * ata_scsi_find_dev(struct ata_port *ap,
59 const struct scsi_device *scsidev);
60static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
61 unsigned int id, unsigned int lun);
62
63
64#define RW_RECOVERY_MPAGE 0x1
65#define RW_RECOVERY_MPAGE_LEN 12
66#define CACHE_MPAGE 0x8
67#define CACHE_MPAGE_LEN 20
68#define CONTROL_MPAGE 0xa
69#define CONTROL_MPAGE_LEN 12
70#define ALL_MPAGES 0x3f
71#define ALL_SUB_MPAGES 0xff
72
73
74static const u8 def_rw_recovery_mpage[] = {
75 RW_RECOVERY_MPAGE,
76 RW_RECOVERY_MPAGE_LEN - 2,
77 (1 << 7) | /* AWRE, sat-r06 say it shall be 0 */
78 (1 << 6), /* ARRE (auto read reallocation) */
79 0, /* read retry count */
80 0, 0, 0, 0,
81 0, /* write retry count */
82 0, 0, 0
83};
84
85static const u8 def_cache_mpage[CACHE_MPAGE_LEN] = {
86 CACHE_MPAGE,
87 CACHE_MPAGE_LEN - 2,
88 0, /* contains WCE, needs to be 0 for logic */
89 0, 0, 0, 0, 0, 0, 0, 0, 0,
90 0, /* contains DRA, needs to be 0 for logic */
91 0, 0, 0, 0, 0, 0, 0
92};
93
94static const u8 def_control_mpage[CONTROL_MPAGE_LEN] = {
95 CONTROL_MPAGE,
96 CONTROL_MPAGE_LEN - 2,
97 2, /* DSENSE=0, GLTSD=1 */
98 0, /* [QAM+QERR may be 1, see 05-359r1] */
99 0, 0, 0, 0, 0xff, 0xff,
100 0, 30 /* extended self test time, see 05-359r1 */
101};
102
103/*
104 * libata transport template. libata doesn't do real transport stuff.
105 * It just needs the eh_timed_out hook.
106 */
107struct scsi_transport_template ata_scsi_transport_template = {
108 .eh_strategy_handler = ata_scsi_error,
109 .eh_timed_out = ata_scsi_timed_out,
110 .user_scan = ata_scsi_user_scan,
111};
112
113
114static void ata_scsi_invalid_field(struct scsi_cmnd *cmd,
115 void (*done)(struct scsi_cmnd *))
116{
117 ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x24, 0x0);
118 /* "Invalid field in cbd" */
119 done(cmd);
120}
121
122/**
123 * ata_std_bios_param - generic bios head/sector/cylinder calculator used by sd.
124 * @sdev: SCSI device for which BIOS geometry is to be determined
125 * @bdev: block device associated with @sdev
126 * @capacity: capacity of SCSI device
127 * @geom: location to which geometry will be output
128 *
129 * Generic bios head/sector/cylinder calculator
130 * used by sd. Most BIOSes nowadays expect a XXX/255/16 (CHS)
131 * mapping. Some situations may arise where the disk is not
132 * bootable if this is not used.
133 *
134 * LOCKING:
135 * Defined by the SCSI layer. We don't really care.
136 *
137 * RETURNS:
138 * Zero.
139 */
140int ata_std_bios_param(struct scsi_device *sdev, struct block_device *bdev,
141 sector_t capacity, int geom[])
142{
143 geom[0] = 255;
144 geom[1] = 63;
145 sector_div(capacity, 255*63);
146 geom[2] = capacity;
147
148 return 0;
149}
150
151/**
152 * ata_cmd_ioctl - Handler for HDIO_DRIVE_CMD ioctl
153 * @scsidev: Device to which we are issuing command
154 * @arg: User provided data for issuing command
155 *
156 * LOCKING:
157 * Defined by the SCSI layer. We don't really care.
158 *
159 * RETURNS:
160 * Zero on success, negative errno on error.
161 */
162
163int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
164{
165 int rc = 0;
166 u8 scsi_cmd[MAX_COMMAND_SIZE];
167 u8 args[4], *argbuf = NULL;
168 int argsize = 0;
169 struct scsi_sense_hdr sshdr;
170 enum dma_data_direction data_dir;
171
172 if (arg == NULL)
173 return -EINVAL;
174
175 if (copy_from_user(args, arg, sizeof(args)))
176 return -EFAULT;
177
178 memset(scsi_cmd, 0, sizeof(scsi_cmd));
179
180 if (args[3]) {
181 argsize = SECTOR_SIZE * args[3];
182 argbuf = kmalloc(argsize, GFP_KERNEL);
183 if (argbuf == NULL) {
184 rc = -ENOMEM;
185 goto error;
186 }
187
188 scsi_cmd[1] = (4 << 1); /* PIO Data-in */
189 scsi_cmd[2] = 0x0e; /* no off.line or cc, read from dev,
190 block count in sector count field */
191 data_dir = DMA_FROM_DEVICE;
192 } else {
193 scsi_cmd[1] = (3 << 1); /* Non-data */
194 /* scsi_cmd[2] is already 0 -- no off.line, cc, or data xfer */
195 data_dir = DMA_NONE;
196 }
197
198 scsi_cmd[0] = ATA_16;
199
200 scsi_cmd[4] = args[2];
201 if (args[0] == WIN_SMART) { /* hack -- ide driver does this too... */
202 scsi_cmd[6] = args[3];
203 scsi_cmd[8] = args[1];
204 scsi_cmd[10] = 0x4f;
205 scsi_cmd[12] = 0xc2;
206 } else {
207 scsi_cmd[6] = args[1];
208 }
209 scsi_cmd[14] = args[0];
210
211 /* Good values for timeout and retries? Values below
212 from scsi_ioctl_send_command() for default case... */
213 if (scsi_execute_req(scsidev, scsi_cmd, data_dir, argbuf, argsize,
214 &sshdr, (10*HZ), 5)) {
215 rc = -EIO;
216 goto error;
217 }
218
219 /* Need code to retrieve data from check condition? */
220
221 if ((argbuf)
222 && copy_to_user(arg + sizeof(args), argbuf, argsize))
223 rc = -EFAULT;
224error:
225 kfree(argbuf);
226 return rc;
227}
228
229/**
230 * ata_task_ioctl - Handler for HDIO_DRIVE_TASK ioctl
231 * @scsidev: Device to which we are issuing command
232 * @arg: User provided data for issuing command
233 *
234 * LOCKING:
235 * Defined by the SCSI layer. We don't really care.
236 *
237 * RETURNS:
238 * Zero on success, negative errno on error.
239 */
240int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg)
241{
242 int rc = 0;
243 u8 scsi_cmd[MAX_COMMAND_SIZE];
244 u8 args[7];
245 struct scsi_sense_hdr sshdr;
246
247 if (arg == NULL)
248 return -EINVAL;
249
250 if (copy_from_user(args, arg, sizeof(args)))
251 return -EFAULT;
252
253 memset(scsi_cmd, 0, sizeof(scsi_cmd));
254 scsi_cmd[0] = ATA_16;
255 scsi_cmd[1] = (3 << 1); /* Non-data */
256 /* scsi_cmd[2] is already 0 -- no off.line, cc, or data xfer */
257 scsi_cmd[4] = args[1];
258 scsi_cmd[6] = args[2];
259 scsi_cmd[8] = args[3];
260 scsi_cmd[10] = args[4];
261 scsi_cmd[12] = args[5];
262 scsi_cmd[14] = args[0];
263
264 /* Good values for timeout and retries? Values below
265 from scsi_ioctl_send_command() for default case... */
266 if (scsi_execute_req(scsidev, scsi_cmd, DMA_NONE, NULL, 0, &sshdr,
267 (10*HZ), 5))
268 rc = -EIO;
269
270 /* Need code to retrieve data from check condition? */
271 return rc;
272}
273
274int ata_scsi_ioctl(struct scsi_device *scsidev, int cmd, void __user *arg)
275{
276 int val = -EINVAL, rc = -EINVAL;
277
278 switch (cmd) {
279 case ATA_IOC_GET_IO32:
280 val = 0;
281 if (copy_to_user(arg, &val, 1))
282 return -EFAULT;
283 return 0;
284
285 case ATA_IOC_SET_IO32:
286 val = (unsigned long) arg;
287 if (val != 0)
288 return -EINVAL;
289 return 0;
290
291 case HDIO_DRIVE_CMD:
292 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
293 return -EACCES;
294 return ata_cmd_ioctl(scsidev, arg);
295
296 case HDIO_DRIVE_TASK:
297 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
298 return -EACCES;
299 return ata_task_ioctl(scsidev, arg);
300
301 default:
302 rc = -ENOTTY;
303 break;
304 }
305
306 return rc;
307}
308
309/**
310 * ata_scsi_qc_new - acquire new ata_queued_cmd reference
311 * @dev: ATA device to which the new command is attached
312 * @cmd: SCSI command that originated this ATA command
313 * @done: SCSI command completion function
314 *
315 * Obtain a reference to an unused ata_queued_cmd structure,
316 * which is the basic libata structure representing a single
317 * ATA command sent to the hardware.
318 *
319 * If a command was available, fill in the SCSI-specific
320 * portions of the structure with information on the
321 * current command.
322 *
323 * LOCKING:
324 * spin_lock_irqsave(host lock)
325 *
326 * RETURNS:
327 * Command allocated, or %NULL if none available.
328 */
329struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev,
330 struct scsi_cmnd *cmd,
331 void (*done)(struct scsi_cmnd *))
332{
333 struct ata_queued_cmd *qc;
334
335 qc = ata_qc_new_init(dev);
336 if (qc) {
337 qc->scsicmd = cmd;
338 qc->scsidone = done;
339
340 if (cmd->use_sg) {
341 qc->__sg = (struct scatterlist *) cmd->request_buffer;
342 qc->n_elem = cmd->use_sg;
343 } else {
344 qc->__sg = &qc->sgent;
345 qc->n_elem = 1;
346 }
347 } else {
348 cmd->result = (DID_OK << 16) | (QUEUE_FULL << 1);
349 done(cmd);
350 }
351
352 return qc;
353}
354
355/**
356 * ata_dump_status - user friendly display of error info
357 * @id: id of the port in question
358 * @tf: ptr to filled out taskfile
359 *
360 * Decode and dump the ATA error/status registers for the user so
361 * that they have some idea what really happened at the non
362 * make-believe layer.
363 *
364 * LOCKING:
365 * inherited from caller
366 */
367void ata_dump_status(unsigned id, struct ata_taskfile *tf)
368{
369 u8 stat = tf->command, err = tf->feature;
370
371 printk(KERN_WARNING "ata%u: status=0x%02x { ", id, stat);
372 if (stat & ATA_BUSY) {
373 printk("Busy }\n"); /* Data is not valid in this case */
374 } else {
375 if (stat & 0x40) printk("DriveReady ");
376 if (stat & 0x20) printk("DeviceFault ");
377 if (stat & 0x10) printk("SeekComplete ");
378 if (stat & 0x08) printk("DataRequest ");
379 if (stat & 0x04) printk("CorrectedError ");
380 if (stat & 0x02) printk("Index ");
381 if (stat & 0x01) printk("Error ");
382 printk("}\n");
383
384 if (err) {
385 printk(KERN_WARNING "ata%u: error=0x%02x { ", id, err);
386 if (err & 0x04) printk("DriveStatusError ");
387 if (err & 0x80) {
388 if (err & 0x04) printk("BadCRC ");
389 else printk("Sector ");
390 }
391 if (err & 0x40) printk("UncorrectableError ");
392 if (err & 0x10) printk("SectorIdNotFound ");
393 if (err & 0x02) printk("TrackZeroNotFound ");
394 if (err & 0x01) printk("AddrMarkNotFound ");
395 printk("}\n");
396 }
397 }
398}
399
400/**
401 * ata_scsi_device_suspend - suspend ATA device associated with sdev
402 * @sdev: the SCSI device to suspend
403 * @mesg: target power management message
404 *
405 * Request suspend EH action on the ATA device associated with
406 * @sdev and wait for the operation to complete.
407 *
408 * LOCKING:
409 * Kernel thread context (may sleep).
410 *
411 * RETURNS:
412 * 0 on success, -errno otherwise.
413 */
414int ata_scsi_device_suspend(struct scsi_device *sdev, pm_message_t mesg)
415{
416 struct ata_port *ap = ata_shost_to_port(sdev->host);
417 struct ata_device *dev = ata_scsi_find_dev(ap, sdev);
418 unsigned long flags;
419 unsigned int action;
420 int rc = 0;
421
422 if (!dev)
423 goto out;
424
425 spin_lock_irqsave(ap->lock, flags);
426
427 /* wait for the previous resume to complete */
428 while (dev->flags & ATA_DFLAG_SUSPENDED) {
429 spin_unlock_irqrestore(ap->lock, flags);
430 ata_port_wait_eh(ap);
431 spin_lock_irqsave(ap->lock, flags);
432 }
433
434 /* if @sdev is already detached, nothing to do */
435 if (sdev->sdev_state == SDEV_OFFLINE ||
436 sdev->sdev_state == SDEV_CANCEL || sdev->sdev_state == SDEV_DEL)
437 goto out_unlock;
438
439 /* request suspend */
440 action = ATA_EH_SUSPEND;
441 if (mesg.event != PM_EVENT_SUSPEND)
442 action |= ATA_EH_PM_FREEZE;
443 ap->eh_info.dev_action[dev->devno] |= action;
444 ap->eh_info.flags |= ATA_EHI_QUIET;
445 ata_port_schedule_eh(ap);
446
447 spin_unlock_irqrestore(ap->lock, flags);
448
449 /* wait for EH to do the job */
450 ata_port_wait_eh(ap);
451
452 spin_lock_irqsave(ap->lock, flags);
453
454 /* If @sdev is still attached but the associated ATA device
455 * isn't suspended, the operation failed.
456 */
457 if (sdev->sdev_state != SDEV_OFFLINE &&
458 sdev->sdev_state != SDEV_CANCEL && sdev->sdev_state != SDEV_DEL &&
459 !(dev->flags & ATA_DFLAG_SUSPENDED))
460 rc = -EIO;
461
462 out_unlock:
463 spin_unlock_irqrestore(ap->lock, flags);
464 out:
465 if (rc == 0)
466 sdev->sdev_gendev.power.power_state = mesg;
467 return rc;
468}
469
470/**
471 * ata_scsi_device_resume - resume ATA device associated with sdev
472 * @sdev: the SCSI device to resume
473 *
474 * Request resume EH action on the ATA device associated with
475 * @sdev and return immediately. This enables parallel
476 * wakeup/spinup of devices.
477 *
478 * LOCKING:
479 * Kernel thread context (may sleep).
480 *
481 * RETURNS:
482 * 0.
483 */
484int ata_scsi_device_resume(struct scsi_device *sdev)
485{
486 struct ata_port *ap = ata_shost_to_port(sdev->host);
487 struct ata_device *dev = ata_scsi_find_dev(ap, sdev);
488 struct ata_eh_info *ehi = &ap->eh_info;
489 unsigned long flags;
490 unsigned int action;
491
492 if (!dev)
493 goto out;
494
495 spin_lock_irqsave(ap->lock, flags);
496
497 /* if @sdev is already detached, nothing to do */
498 if (sdev->sdev_state == SDEV_OFFLINE ||
499 sdev->sdev_state == SDEV_CANCEL || sdev->sdev_state == SDEV_DEL)
500 goto out_unlock;
501
502 /* request resume */
503 action = ATA_EH_RESUME;
504 if (sdev->sdev_gendev.power.power_state.event == PM_EVENT_SUSPEND)
505 __ata_ehi_hotplugged(ehi);
506 else
507 action |= ATA_EH_PM_FREEZE | ATA_EH_SOFTRESET;
508 ehi->dev_action[dev->devno] |= action;
509
510 /* We don't want autopsy and verbose EH messages. Disable
511 * those if we're the only device on this link.
512 */
513 if (ata_port_max_devices(ap) == 1)
514 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
515
516 ata_port_schedule_eh(ap);
517
518 out_unlock:
519 spin_unlock_irqrestore(ap->lock, flags);
520 out:
521 sdev->sdev_gendev.power.power_state = PMSG_ON;
522 return 0;
523}
524
525/**
526 * ata_to_sense_error - convert ATA error to SCSI error
527 * @id: ATA device number
528 * @drv_stat: value contained in ATA status register
529 * @drv_err: value contained in ATA error register
530 * @sk: the sense key we'll fill out
531 * @asc: the additional sense code we'll fill out
532 * @ascq: the additional sense code qualifier we'll fill out
533 * @verbose: be verbose
534 *
535 * Converts an ATA error into a SCSI error. Fill out pointers to
536 * SK, ASC, and ASCQ bytes for later use in fixed or descriptor
537 * format sense blocks.
538 *
539 * LOCKING:
540 * spin_lock_irqsave(host lock)
541 */
542void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc,
543 u8 *ascq, int verbose)
544{
545 int i;
546
547 /* Based on the 3ware driver translation table */
548 static const unsigned char sense_table[][4] = {
549 /* BBD|ECC|ID|MAR */
550 {0xd1, ABORTED_COMMAND, 0x00, 0x00}, // Device busy Aborted command
551 /* BBD|ECC|ID */
552 {0xd0, ABORTED_COMMAND, 0x00, 0x00}, // Device busy Aborted command
553 /* ECC|MC|MARK */
554 {0x61, HARDWARE_ERROR, 0x00, 0x00}, // Device fault Hardware error
555 /* ICRC|ABRT */ /* NB: ICRC & !ABRT is BBD */
556 {0x84, ABORTED_COMMAND, 0x47, 0x00}, // Data CRC error SCSI parity error
557 /* MC|ID|ABRT|TRK0|MARK */
558 {0x37, NOT_READY, 0x04, 0x00}, // Unit offline Not ready
559 /* MCR|MARK */
560 {0x09, NOT_READY, 0x04, 0x00}, // Unrecovered disk error Not ready
561 /* Bad address mark */
562 {0x01, MEDIUM_ERROR, 0x13, 0x00}, // Address mark not found Address mark not found for data field
563 /* TRK0 */
564 {0x02, HARDWARE_ERROR, 0x00, 0x00}, // Track 0 not found Hardware error
565 /* Abort & !ICRC */
566 {0x04, ABORTED_COMMAND, 0x00, 0x00}, // Aborted command Aborted command
567 /* Media change request */
568 {0x08, NOT_READY, 0x04, 0x00}, // Media change request FIXME: faking offline
569 /* SRV */
570 {0x10, ABORTED_COMMAND, 0x14, 0x00}, // ID not found Recorded entity not found
571 /* Media change */
572 {0x08, NOT_READY, 0x04, 0x00}, // Media change FIXME: faking offline
573 /* ECC */
574 {0x40, MEDIUM_ERROR, 0x11, 0x04}, // Uncorrectable ECC error Unrecovered read error
575 /* BBD - block marked bad */
576 {0x80, MEDIUM_ERROR, 0x11, 0x04}, // Block marked bad Medium error, unrecovered read error
577 {0xFF, 0xFF, 0xFF, 0xFF}, // END mark
578 };
579 static const unsigned char stat_table[][4] = {
580 /* Must be first because BUSY means no other bits valid */
581 {0x80, ABORTED_COMMAND, 0x47, 0x00}, // Busy, fake parity for now
582 {0x20, HARDWARE_ERROR, 0x00, 0x00}, // Device fault
583 {0x08, ABORTED_COMMAND, 0x47, 0x00}, // Timed out in xfer, fake parity for now
584 {0x04, RECOVERED_ERROR, 0x11, 0x00}, // Recovered ECC error Medium error, recovered
585 {0xFF, 0xFF, 0xFF, 0xFF}, // END mark
586 };
587
588 /*
589 * Is this an error we can process/parse
590 */
591 if (drv_stat & ATA_BUSY) {
592 drv_err = 0; /* Ignore the err bits, they're invalid */
593 }
594
595 if (drv_err) {
596 /* Look for drv_err */
597 for (i = 0; sense_table[i][0] != 0xFF; i++) {
598 /* Look for best matches first */
599 if ((sense_table[i][0] & drv_err) ==
600 sense_table[i][0]) {
601 *sk = sense_table[i][1];
602 *asc = sense_table[i][2];
603 *ascq = sense_table[i][3];
604 goto translate_done;
605 }
606 }
607 /* No immediate match */
608 if (verbose)
609 printk(KERN_WARNING "ata%u: no sense translation for "
610 "error 0x%02x\n", id, drv_err);
611 }
612
613 /* Fall back to interpreting status bits */
614 for (i = 0; stat_table[i][0] != 0xFF; i++) {
615 if (stat_table[i][0] & drv_stat) {
616 *sk = stat_table[i][1];
617 *asc = stat_table[i][2];
618 *ascq = stat_table[i][3];
619 goto translate_done;
620 }
621 }
622 /* No error? Undecoded? */
623 if (verbose)
624 printk(KERN_WARNING "ata%u: no sense translation for "
625 "status: 0x%02x\n", id, drv_stat);
626
627 /* We need a sensible error return here, which is tricky, and one
628 that won't cause people to do things like return a disk wrongly */
629 *sk = ABORTED_COMMAND;
630 *asc = 0x00;
631 *ascq = 0x00;
632
633 translate_done:
634 if (verbose)
635 printk(KERN_ERR "ata%u: translated ATA stat/err 0x%02x/%02x "
636 "to SCSI SK/ASC/ASCQ 0x%x/%02x/%02x\n",
637 id, drv_stat, drv_err, *sk, *asc, *ascq);
638 return;
639}
640
641/*
642 * ata_gen_ata_desc_sense - Generate check condition sense block.
643 * @qc: Command that completed.
644 *
645 * This function is specific to the ATA descriptor format sense
646 * block specified for the ATA pass through commands. Regardless
647 * of whether the command errored or not, return a sense
648 * block. Copy all controller registers into the sense
649 * block. Clear sense key, ASC & ASCQ if there is no error.
650 *
651 * LOCKING:
652 * spin_lock_irqsave(host lock)
653 */
654void ata_gen_ata_desc_sense(struct ata_queued_cmd *qc)
655{
656 struct scsi_cmnd *cmd = qc->scsicmd;
657 struct ata_taskfile *tf = &qc->result_tf;
658 unsigned char *sb = cmd->sense_buffer;
659 unsigned char *desc = sb + 8;
660 int verbose = qc->ap->ops->error_handler == NULL;
661
662 memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
663
664 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
665
666 /*
667 * Use ata_to_sense_error() to map status register bits
668 * onto sense key, asc & ascq.
669 */
670 if (qc->err_mask ||
671 tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
672 ata_to_sense_error(qc->ap->id, tf->command, tf->feature,
673 &sb[1], &sb[2], &sb[3], verbose);
674 sb[1] &= 0x0f;
675 }
676
677 /*
678 * Sense data is current and format is descriptor.
679 */
680 sb[0] = 0x72;
681
682 desc[0] = 0x09;
683
684 /*
685 * Set length of additional sense data.
686 * Since we only populate descriptor 0, the total
687 * length is the same (fixed) length as descriptor 0.
688 */
689 desc[1] = sb[7] = 14;
690
691 /*
692 * Copy registers into sense buffer.
693 */
694 desc[2] = 0x00;
695 desc[3] = tf->feature; /* == error reg */
696 desc[5] = tf->nsect;
697 desc[7] = tf->lbal;
698 desc[9] = tf->lbam;
699 desc[11] = tf->lbah;
700 desc[12] = tf->device;
701 desc[13] = tf->command; /* == status reg */
702
703 /*
704 * Fill in Extend bit, and the high order bytes
705 * if applicable.
706 */
707 if (tf->flags & ATA_TFLAG_LBA48) {
708 desc[2] |= 0x01;
709 desc[4] = tf->hob_nsect;
710 desc[6] = tf->hob_lbal;
711 desc[8] = tf->hob_lbam;
712 desc[10] = tf->hob_lbah;
713 }
714}
715
716/**
717 * ata_gen_fixed_sense - generate a SCSI fixed sense block
718 * @qc: Command that we are erroring out
719 *
720 * Leverage ata_to_sense_error() to give us the codes. Fit our
721 * LBA in here if there's room.
722 *
723 * LOCKING:
724 * inherited from caller
725 */
726void ata_gen_fixed_sense(struct ata_queued_cmd *qc)
727{
728 struct scsi_cmnd *cmd = qc->scsicmd;
729 struct ata_taskfile *tf = &qc->result_tf;
730 unsigned char *sb = cmd->sense_buffer;
731 int verbose = qc->ap->ops->error_handler == NULL;
732
733 memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
734
735 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
736
737 /*
738 * Use ata_to_sense_error() to map status register bits
739 * onto sense key, asc & ascq.
740 */
741 if (qc->err_mask ||
742 tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
743 ata_to_sense_error(qc->ap->id, tf->command, tf->feature,
744 &sb[2], &sb[12], &sb[13], verbose);
745 sb[2] &= 0x0f;
746 }
747
748 sb[0] = 0x70;
749 sb[7] = 0x0a;
750
751 if (tf->flags & ATA_TFLAG_LBA48) {
752 /* TODO: find solution for LBA48 descriptors */
753 }
754
755 else if (tf->flags & ATA_TFLAG_LBA) {
756 /* A small (28b) LBA will fit in the 32b info field */
757 sb[0] |= 0x80; /* set valid bit */
758 sb[3] = tf->device & 0x0f;
759 sb[4] = tf->lbah;
760 sb[5] = tf->lbam;
761 sb[6] = tf->lbal;
762 }
763
764 else {
765 /* TODO: C/H/S */
766 }
767}
768
769static void ata_scsi_sdev_config(struct scsi_device *sdev)
770{
771 sdev->use_10_for_rw = 1;
772 sdev->use_10_for_ms = 1;
773}
774
775static void ata_scsi_dev_config(struct scsi_device *sdev,
776 struct ata_device *dev)
777{
778 unsigned int max_sectors;
779
780 /* TODO: 2048 is an arbitrary number, not the
781 * hardware maximum. This should be increased to
782 * 65534 when Jens Axboe's patch for dynamically
783 * determining max_sectors is merged.
784 */
785 max_sectors = ATA_MAX_SECTORS;
786 if (dev->flags & ATA_DFLAG_LBA48)
787 max_sectors = ATA_MAX_SECTORS_LBA48;
788 if (dev->max_sectors)
789 max_sectors = dev->max_sectors;
790
791 blk_queue_max_sectors(sdev->request_queue, max_sectors);
792
793 /*
794 * SATA DMA transfers must be multiples of 4 byte, so
795 * we need to pad ATAPI transfers using an extra sg.
796 * Decrement max hw segments accordingly.
797 */
798 if (dev->class == ATA_DEV_ATAPI) {
799 request_queue_t *q = sdev->request_queue;
800 blk_queue_max_hw_segments(q, q->max_hw_segments - 1);
801 }
802
803 if (dev->flags & ATA_DFLAG_NCQ) {
804 int depth;
805
806 depth = min(sdev->host->can_queue, ata_id_queue_depth(dev->id));
807 depth = min(ATA_MAX_QUEUE - 1, depth);
808 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth);
809 }
810}
811
812/**
813 * ata_scsi_slave_config - Set SCSI device attributes
814 * @sdev: SCSI device to examine
815 *
816 * This is called before we actually start reading
817 * and writing to the device, to configure certain
818 * SCSI mid-layer behaviors.
819 *
820 * LOCKING:
821 * Defined by SCSI layer. We don't really care.
822 */
823
824int ata_scsi_slave_config(struct scsi_device *sdev)
825{
826 struct ata_port *ap = ata_shost_to_port(sdev->host);
827 struct ata_device *dev = __ata_scsi_find_dev(ap, sdev);
828
829 ata_scsi_sdev_config(sdev);
830
831 blk_queue_max_phys_segments(sdev->request_queue, LIBATA_MAX_PRD);
832
833 if (dev)
834 ata_scsi_dev_config(sdev, dev);
835
836 return 0; /* scsi layer doesn't check return value, sigh */
837}
838
839/**
840 * ata_scsi_slave_destroy - SCSI device is about to be destroyed
841 * @sdev: SCSI device to be destroyed
842 *
843 * @sdev is about to be destroyed for hot/warm unplugging. If
844 * this unplugging was initiated by libata as indicated by NULL
845 * dev->sdev, this function doesn't have to do anything.
846 * Otherwise, SCSI layer initiated warm-unplug is in progress.
847 * Clear dev->sdev, schedule the device for ATA detach and invoke
848 * EH.
849 *
850 * LOCKING:
851 * Defined by SCSI layer. We don't really care.
852 */
853void ata_scsi_slave_destroy(struct scsi_device *sdev)
854{
855 struct ata_port *ap = ata_shost_to_port(sdev->host);
856 unsigned long flags;
857 struct ata_device *dev;
858
859 if (!ap->ops->error_handler)
860 return;
861
862 spin_lock_irqsave(ap->lock, flags);
863 dev = __ata_scsi_find_dev(ap, sdev);
864 if (dev && dev->sdev) {
865 /* SCSI device already in CANCEL state, no need to offline it */
866 dev->sdev = NULL;
867 dev->flags |= ATA_DFLAG_DETACH;
868 ata_port_schedule_eh(ap);
869 }
870 spin_unlock_irqrestore(ap->lock, flags);
871}
872
873/**
874 * ata_scsi_change_queue_depth - SCSI callback for queue depth config
875 * @sdev: SCSI device to configure queue depth for
876 * @queue_depth: new queue depth
877 *
878 * This is libata standard hostt->change_queue_depth callback.
879 * SCSI will call into this callback when user tries to set queue
880 * depth via sysfs.
881 *
882 * LOCKING:
883 * SCSI layer (we don't care)
884 *
885 * RETURNS:
886 * Newly configured queue depth.
887 */
888int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth)
889{
890 struct ata_port *ap = ata_shost_to_port(sdev->host);
891 struct ata_device *dev;
892 int max_depth;
893
894 if (queue_depth < 1)
895 return sdev->queue_depth;
896
897 dev = ata_scsi_find_dev(ap, sdev);
898 if (!dev || !ata_dev_enabled(dev))
899 return sdev->queue_depth;
900
901 max_depth = min(sdev->host->can_queue, ata_id_queue_depth(dev->id));
902 max_depth = min(ATA_MAX_QUEUE - 1, max_depth);
903 if (queue_depth > max_depth)
904 queue_depth = max_depth;
905
906 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, queue_depth);
907 return queue_depth;
908}
909
910/**
911 * ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command
912 * @qc: Storage for translated ATA taskfile
913 * @scsicmd: SCSI command to translate
914 *
915 * Sets up an ATA taskfile to issue STANDBY (to stop) or READ VERIFY
916 * (to start). Perhaps these commands should be preceded by
917 * CHECK POWER MODE to see what power mode the device is already in.
918 * [See SAT revision 5 at www.t10.org]
919 *
920 * LOCKING:
921 * spin_lock_irqsave(host lock)
922 *
923 * RETURNS:
924 * Zero on success, non-zero on error.
925 */
926
927static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc,
928 const u8 *scsicmd)
929{
930 struct ata_taskfile *tf = &qc->tf;
931
932 tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
933 tf->protocol = ATA_PROT_NODATA;
934 if (scsicmd[1] & 0x1) {
935 ; /* ignore IMMED bit, violates sat-r05 */
936 }
937 if (scsicmd[4] & 0x2)
938 goto invalid_fld; /* LOEJ bit set not supported */
939 if (((scsicmd[4] >> 4) & 0xf) != 0)
940 goto invalid_fld; /* power conditions not supported */
941 if (scsicmd[4] & 0x1) {
942 tf->nsect = 1; /* 1 sector, lba=0 */
943
944 if (qc->dev->flags & ATA_DFLAG_LBA) {
945 tf->flags |= ATA_TFLAG_LBA;
946
947 tf->lbah = 0x0;
948 tf->lbam = 0x0;
949 tf->lbal = 0x0;
950 tf->device |= ATA_LBA;
951 } else {
952 /* CHS */
953 tf->lbal = 0x1; /* sect */
954 tf->lbam = 0x0; /* cyl low */
955 tf->lbah = 0x0; /* cyl high */
956 }
957
958 tf->command = ATA_CMD_VERIFY; /* READ VERIFY */
959 } else {
960 tf->nsect = 0; /* time period value (0 implies now) */
961 tf->command = ATA_CMD_STANDBY;
962 /* Consider: ATA STANDBY IMMEDIATE command */
963 }
964 /*
965 * Standby and Idle condition timers could be implemented but that
966 * would require libata to implement the Power condition mode page
967 * and allow the user to change it. Changing mode pages requires
968 * MODE SELECT to be implemented.
969 */
970
971 return 0;
972
973invalid_fld:
974 ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x24, 0x0);
975 /* "Invalid field in cbd" */
976 return 1;
977}
978
979
980/**
981 * ata_scsi_flush_xlat - Translate SCSI SYNCHRONIZE CACHE command
982 * @qc: Storage for translated ATA taskfile
983 * @scsicmd: SCSI command to translate (ignored)
984 *
985 * Sets up an ATA taskfile to issue FLUSH CACHE or
986 * FLUSH CACHE EXT.
987 *
988 * LOCKING:
989 * spin_lock_irqsave(host lock)
990 *
991 * RETURNS:
992 * Zero on success, non-zero on error.
993 */
994
995static unsigned int ata_scsi_flush_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd)
996{
997 struct ata_taskfile *tf = &qc->tf;
998
999 tf->flags |= ATA_TFLAG_DEVICE;
1000 tf->protocol = ATA_PROT_NODATA;
1001
1002 if ((qc->dev->flags & ATA_DFLAG_LBA48) &&
1003 (ata_id_has_flush_ext(qc->dev->id)))
1004 tf->command = ATA_CMD_FLUSH_EXT;
1005 else
1006 tf->command = ATA_CMD_FLUSH;
1007
1008 return 0;
1009}
1010
1011/**
1012 * scsi_6_lba_len - Get LBA and transfer length
1013 * @scsicmd: SCSI command to translate
1014 *
1015 * Calculate LBA and transfer length for 6-byte commands.
1016 *
1017 * RETURNS:
1018 * @plba: the LBA
1019 * @plen: the transfer length
1020 */
1021
1022static void scsi_6_lba_len(const u8 *scsicmd, u64 *plba, u32 *plen)
1023{
1024 u64 lba = 0;
1025 u32 len = 0;
1026
1027 VPRINTK("six-byte command\n");
1028
1029 lba |= ((u64)scsicmd[2]) << 8;
1030 lba |= ((u64)scsicmd[3]);
1031
1032 len |= ((u32)scsicmd[4]);
1033
1034 *plba = lba;
1035 *plen = len;
1036}
1037
1038/**
1039 * scsi_10_lba_len - Get LBA and transfer length
1040 * @scsicmd: SCSI command to translate
1041 *
1042 * Calculate LBA and transfer length for 10-byte commands.
1043 *
1044 * RETURNS:
1045 * @plba: the LBA
1046 * @plen: the transfer length
1047 */
1048
1049static void scsi_10_lba_len(const u8 *scsicmd, u64 *plba, u32 *plen)
1050{
1051 u64 lba = 0;
1052 u32 len = 0;
1053
1054 VPRINTK("ten-byte command\n");
1055
1056 lba |= ((u64)scsicmd[2]) << 24;
1057 lba |= ((u64)scsicmd[3]) << 16;
1058 lba |= ((u64)scsicmd[4]) << 8;
1059 lba |= ((u64)scsicmd[5]);
1060
1061 len |= ((u32)scsicmd[7]) << 8;
1062 len |= ((u32)scsicmd[8]);
1063
1064 *plba = lba;
1065 *plen = len;
1066}
1067
1068/**
1069 * scsi_16_lba_len - Get LBA and transfer length
1070 * @scsicmd: SCSI command to translate
1071 *
1072 * Calculate LBA and transfer length for 16-byte commands.
1073 *
1074 * RETURNS:
1075 * @plba: the LBA
1076 * @plen: the transfer length
1077 */
1078
1079static void scsi_16_lba_len(const u8 *scsicmd, u64 *plba, u32 *plen)
1080{
1081 u64 lba = 0;
1082 u32 len = 0;
1083
1084 VPRINTK("sixteen-byte command\n");
1085
1086 lba |= ((u64)scsicmd[2]) << 56;
1087 lba |= ((u64)scsicmd[3]) << 48;
1088 lba |= ((u64)scsicmd[4]) << 40;
1089 lba |= ((u64)scsicmd[5]) << 32;
1090 lba |= ((u64)scsicmd[6]) << 24;
1091 lba |= ((u64)scsicmd[7]) << 16;
1092 lba |= ((u64)scsicmd[8]) << 8;
1093 lba |= ((u64)scsicmd[9]);
1094
1095 len |= ((u32)scsicmd[10]) << 24;
1096 len |= ((u32)scsicmd[11]) << 16;
1097 len |= ((u32)scsicmd[12]) << 8;
1098 len |= ((u32)scsicmd[13]);
1099
1100 *plba = lba;
1101 *plen = len;
1102}
1103
1104/**
1105 * ata_scsi_verify_xlat - Translate SCSI VERIFY command into an ATA one
1106 * @qc: Storage for translated ATA taskfile
1107 * @scsicmd: SCSI command to translate
1108 *
1109 * Converts SCSI VERIFY command to an ATA READ VERIFY command.
1110 *
1111 * LOCKING:
1112 * spin_lock_irqsave(host lock)
1113 *
1114 * RETURNS:
1115 * Zero on success, non-zero on error.
1116 */
1117
1118static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd)
1119{
1120 struct ata_taskfile *tf = &qc->tf;
1121 struct ata_device *dev = qc->dev;
1122 u64 dev_sectors = qc->dev->n_sectors;
1123 u64 block;
1124 u32 n_block;
1125
1126 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1127 tf->protocol = ATA_PROT_NODATA;
1128
1129 if (scsicmd[0] == VERIFY)
1130 scsi_10_lba_len(scsicmd, &block, &n_block);
1131 else if (scsicmd[0] == VERIFY_16)
1132 scsi_16_lba_len(scsicmd, &block, &n_block);
1133 else
1134 goto invalid_fld;
1135
1136 if (!n_block)
1137 goto nothing_to_do;
1138 if (block >= dev_sectors)
1139 goto out_of_range;
1140 if ((block + n_block) > dev_sectors)
1141 goto out_of_range;
1142
1143 if (dev->flags & ATA_DFLAG_LBA) {
1144 tf->flags |= ATA_TFLAG_LBA;
1145
1146 if (lba_28_ok(block, n_block)) {
1147 /* use LBA28 */
1148 tf->command = ATA_CMD_VERIFY;
1149 tf->device |= (block >> 24) & 0xf;
1150 } else if (lba_48_ok(block, n_block)) {
1151 if (!(dev->flags & ATA_DFLAG_LBA48))
1152 goto out_of_range;
1153
1154 /* use LBA48 */
1155 tf->flags |= ATA_TFLAG_LBA48;
1156 tf->command = ATA_CMD_VERIFY_EXT;
1157
1158 tf->hob_nsect = (n_block >> 8) & 0xff;
1159
1160 tf->hob_lbah = (block >> 40) & 0xff;
1161 tf->hob_lbam = (block >> 32) & 0xff;
1162 tf->hob_lbal = (block >> 24) & 0xff;
1163 } else
1164 /* request too large even for LBA48 */
1165 goto out_of_range;
1166
1167 tf->nsect = n_block & 0xff;
1168
1169 tf->lbah = (block >> 16) & 0xff;
1170 tf->lbam = (block >> 8) & 0xff;
1171 tf->lbal = block & 0xff;
1172
1173 tf->device |= ATA_LBA;
1174 } else {
1175 /* CHS */
1176 u32 sect, head, cyl, track;
1177
1178 if (!lba_28_ok(block, n_block))
1179 goto out_of_range;
1180
1181 /* Convert LBA to CHS */
1182 track = (u32)block / dev->sectors;
1183 cyl = track / dev->heads;
1184 head = track % dev->heads;
1185 sect = (u32)block % dev->sectors + 1;
1186
1187 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
1188 (u32)block, track, cyl, head, sect);
1189
1190 /* Check whether the converted CHS can fit.
1191 Cylinder: 0-65535
1192 Head: 0-15
1193 Sector: 1-255*/
1194 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
1195 goto out_of_range;
1196
1197 tf->command = ATA_CMD_VERIFY;
1198 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
1199 tf->lbal = sect;
1200 tf->lbam = cyl;
1201 tf->lbah = cyl >> 8;
1202 tf->device |= head;
1203 }
1204
1205 return 0;
1206
1207invalid_fld:
1208 ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x24, 0x0);
1209 /* "Invalid field in cbd" */
1210 return 1;
1211
1212out_of_range:
1213 ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x21, 0x0);
1214 /* "Logical Block Address out of range" */
1215 return 1;
1216
1217nothing_to_do:
1218 qc->scsicmd->result = SAM_STAT_GOOD;
1219 return 1;
1220}
1221
1222/**
1223 * ata_scsi_rw_xlat - Translate SCSI r/w command into an ATA one
1224 * @qc: Storage for translated ATA taskfile
1225 * @scsicmd: SCSI command to translate
1226 *
1227 * Converts any of six SCSI read/write commands into the
1228 * ATA counterpart, including starting sector (LBA),
1229 * sector count, and taking into account the device's LBA48
1230 * support.
1231 *
1232 * Commands %READ_6, %READ_10, %READ_16, %WRITE_6, %WRITE_10, and
1233 * %WRITE_16 are currently supported.
1234 *
1235 * LOCKING:
1236 * spin_lock_irqsave(host lock)
1237 *
1238 * RETURNS:
1239 * Zero on success, non-zero on error.
1240 */
1241
1242static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd)
1243{
1244 struct ata_taskfile *tf = &qc->tf;
1245 struct ata_device *dev = qc->dev;
1246 u64 block;
1247 u32 n_block;
1248
1249 qc->flags |= ATA_QCFLAG_IO;
1250 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1251
1252 if (scsicmd[0] == WRITE_10 || scsicmd[0] == WRITE_6 ||
1253 scsicmd[0] == WRITE_16)
1254 tf->flags |= ATA_TFLAG_WRITE;
1255
1256 /* Calculate the SCSI LBA, transfer length and FUA. */
1257 switch (scsicmd[0]) {
1258 case READ_10:
1259 case WRITE_10:
1260 scsi_10_lba_len(scsicmd, &block, &n_block);
1261 if (unlikely(scsicmd[1] & (1 << 3)))
1262 tf->flags |= ATA_TFLAG_FUA;
1263 break;
1264 case READ_6:
1265 case WRITE_6:
1266 scsi_6_lba_len(scsicmd, &block, &n_block);
1267
1268 /* for 6-byte r/w commands, transfer length 0
1269 * means 256 blocks of data, not 0 block.
1270 */
1271 if (!n_block)
1272 n_block = 256;
1273 break;
1274 case READ_16:
1275 case WRITE_16:
1276 scsi_16_lba_len(scsicmd, &block, &n_block);
1277 if (unlikely(scsicmd[1] & (1 << 3)))
1278 tf->flags |= ATA_TFLAG_FUA;
1279 break;
1280 default:
1281 DPRINTK("no-byte command\n");
1282 goto invalid_fld;
1283 }
1284
1285 /* Check and compose ATA command */
1286 if (!n_block)
1287 /* For 10-byte and 16-byte SCSI R/W commands, transfer
1288 * length 0 means transfer 0 block of data.
1289 * However, for ATA R/W commands, sector count 0 means
1290 * 256 or 65536 sectors, not 0 sectors as in SCSI.
1291 *
1292 * WARNING: one or two older ATA drives treat 0 as 0...
1293 */
1294 goto nothing_to_do;
1295
1296 if ((dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ) {
1297 /* yay, NCQ */
1298 if (!lba_48_ok(block, n_block))
1299 goto out_of_range;
1300
1301 tf->protocol = ATA_PROT_NCQ;
1302 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
1303
1304 if (tf->flags & ATA_TFLAG_WRITE)
1305 tf->command = ATA_CMD_FPDMA_WRITE;
1306 else
1307 tf->command = ATA_CMD_FPDMA_READ;
1308
1309 qc->nsect = n_block;
1310
1311 tf->nsect = qc->tag << 3;
1312 tf->hob_feature = (n_block >> 8) & 0xff;
1313 tf->feature = n_block & 0xff;
1314
1315 tf->hob_lbah = (block >> 40) & 0xff;
1316 tf->hob_lbam = (block >> 32) & 0xff;
1317 tf->hob_lbal = (block >> 24) & 0xff;
1318 tf->lbah = (block >> 16) & 0xff;
1319 tf->lbam = (block >> 8) & 0xff;
1320 tf->lbal = block & 0xff;
1321
1322 tf->device = 1 << 6;
1323 if (tf->flags & ATA_TFLAG_FUA)
1324 tf->device |= 1 << 7;
1325 } else if (dev->flags & ATA_DFLAG_LBA) {
1326 tf->flags |= ATA_TFLAG_LBA;
1327
1328 if (lba_28_ok(block, n_block)) {
1329 /* use LBA28 */
1330 tf->device |= (block >> 24) & 0xf;
1331 } else if (lba_48_ok(block, n_block)) {
1332 if (!(dev->flags & ATA_DFLAG_LBA48))
1333 goto out_of_range;
1334
1335 /* use LBA48 */
1336 tf->flags |= ATA_TFLAG_LBA48;
1337
1338 tf->hob_nsect = (n_block >> 8) & 0xff;
1339
1340 tf->hob_lbah = (block >> 40) & 0xff;
1341 tf->hob_lbam = (block >> 32) & 0xff;
1342 tf->hob_lbal = (block >> 24) & 0xff;
1343 } else
1344 /* request too large even for LBA48 */
1345 goto out_of_range;
1346
1347 if (unlikely(ata_rwcmd_protocol(qc) < 0))
1348 goto invalid_fld;
1349
1350 qc->nsect = n_block;
1351 tf->nsect = n_block & 0xff;
1352
1353 tf->lbah = (block >> 16) & 0xff;
1354 tf->lbam = (block >> 8) & 0xff;
1355 tf->lbal = block & 0xff;
1356
1357 tf->device |= ATA_LBA;
1358 } else {
1359 /* CHS */
1360 u32 sect, head, cyl, track;
1361
1362 /* The request -may- be too large for CHS addressing. */
1363 if (!lba_28_ok(block, n_block))
1364 goto out_of_range;
1365
1366 if (unlikely(ata_rwcmd_protocol(qc) < 0))
1367 goto invalid_fld;
1368
1369 /* Convert LBA to CHS */
1370 track = (u32)block / dev->sectors;
1371 cyl = track / dev->heads;
1372 head = track % dev->heads;
1373 sect = (u32)block % dev->sectors + 1;
1374
1375 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
1376 (u32)block, track, cyl, head, sect);
1377
1378 /* Check whether the converted CHS can fit.
1379 Cylinder: 0-65535
1380 Head: 0-15
1381 Sector: 1-255*/
1382 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
1383 goto out_of_range;
1384
1385 qc->nsect = n_block;
1386 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
1387 tf->lbal = sect;
1388 tf->lbam = cyl;
1389 tf->lbah = cyl >> 8;
1390 tf->device |= head;
1391 }
1392
1393 return 0;
1394
1395invalid_fld:
1396 ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x24, 0x0);
1397 /* "Invalid field in cbd" */
1398 return 1;
1399
1400out_of_range:
1401 ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x21, 0x0);
1402 /* "Logical Block Address out of range" */
1403 return 1;
1404
1405nothing_to_do:
1406 qc->scsicmd->result = SAM_STAT_GOOD;
1407 return 1;
1408}
1409
1410static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
1411{
1412 struct scsi_cmnd *cmd = qc->scsicmd;
1413 u8 *cdb = cmd->cmnd;
1414 int need_sense = (qc->err_mask != 0);
1415
1416 /* We snoop the SET_FEATURES - Write Cache ON/OFF command, and
1417 * schedule EH_REVALIDATE operation to update the IDENTIFY DEVICE
1418 * cache
1419 */
1420 if (!need_sense && (qc->tf.command == ATA_CMD_SET_FEATURES) &&
1421 ((qc->tf.feature == SETFEATURES_WC_ON) ||
1422 (qc->tf.feature == SETFEATURES_WC_OFF))) {
1423 qc->ap->eh_info.action |= ATA_EH_REVALIDATE;
1424 ata_port_schedule_eh(qc->ap);
1425 }
1426
1427 /* For ATA pass thru (SAT) commands, generate a sense block if
1428 * user mandated it or if there's an error. Note that if we
1429 * generate because the user forced us to, a check condition
1430 * is generated and the ATA register values are returned
1431 * whether the command completed successfully or not. If there
1432 * was no error, SK, ASC and ASCQ will all be zero.
1433 */
1434 if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) &&
1435 ((cdb[2] & 0x20) || need_sense)) {
1436 ata_gen_ata_desc_sense(qc);
1437 } else {
1438 if (!need_sense) {
1439 cmd->result = SAM_STAT_GOOD;
1440 } else {
1441 /* TODO: decide which descriptor format to use
1442 * for 48b LBA devices and call that here
1443 * instead of the fixed desc, which is only
1444 * good for smaller LBA (and maybe CHS?)
1445 * devices.
1446 */
1447 ata_gen_fixed_sense(qc);
1448 }
1449 }
1450
1451 if (need_sense && !qc->ap->ops->error_handler)
1452 ata_dump_status(qc->ap->id, &qc->result_tf);
1453
1454 qc->scsidone(cmd);
1455
1456 ata_qc_free(qc);
1457}
1458
1459/**
1460 * ata_scmd_need_defer - Check whether we need to defer scmd
1461 * @dev: ATA device to which the command is addressed
1462 * @is_io: Is the command IO (and thus possibly NCQ)?
1463 *
1464 * NCQ and non-NCQ commands cannot run together. As upper layer
1465 * only knows the queue depth, we are responsible for maintaining
1466 * exclusion. This function checks whether a new command can be
1467 * issued to @dev.
1468 *
1469 * LOCKING:
1470 * spin_lock_irqsave(host lock)
1471 *
1472 * RETURNS:
1473 * 1 if deferring is needed, 0 otherwise.
1474 */
1475static int ata_scmd_need_defer(struct ata_device *dev, int is_io)
1476{
1477 struct ata_port *ap = dev->ap;
1478
1479 if (!(dev->flags & ATA_DFLAG_NCQ))
1480 return 0;
1481
1482 if (is_io) {
1483 if (!ata_tag_valid(ap->active_tag))
1484 return 0;
1485 } else {
1486 if (!ata_tag_valid(ap->active_tag) && !ap->sactive)
1487 return 0;
1488 }
1489 return 1;
1490}
1491
1492/**
1493 * ata_scsi_translate - Translate then issue SCSI command to ATA device
1494 * @dev: ATA device to which the command is addressed
1495 * @cmd: SCSI command to execute
1496 * @done: SCSI command completion function
1497 * @xlat_func: Actor which translates @cmd to an ATA taskfile
1498 *
1499 * Our ->queuecommand() function has decided that the SCSI
1500 * command issued can be directly translated into an ATA
1501 * command, rather than handled internally.
1502 *
1503 * This function sets up an ata_queued_cmd structure for the
1504 * SCSI command, and sends that ata_queued_cmd to the hardware.
1505 *
1506 * The xlat_func argument (actor) returns 0 if ready to execute
1507 * ATA command, else 1 to finish translation. If 1 is returned
1508 * then cmd->result (and possibly cmd->sense_buffer) are assumed
1509 * to be set reflecting an error condition or clean (early)
1510 * termination.
1511 *
1512 * LOCKING:
1513 * spin_lock_irqsave(host lock)
1514 *
1515 * RETURNS:
1516 * 0 on success, SCSI_ML_QUEUE_DEVICE_BUSY if the command
1517 * needs to be deferred.
1518 */
1519static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd,
1520 void (*done)(struct scsi_cmnd *),
1521 ata_xlat_func_t xlat_func)
1522{
1523 struct ata_queued_cmd *qc;
1524 u8 *scsicmd = cmd->cmnd;
1525 int is_io = xlat_func == ata_scsi_rw_xlat;
1526
1527 VPRINTK("ENTER\n");
1528
1529 if (unlikely(ata_scmd_need_defer(dev, is_io)))
1530 goto defer;
1531
1532 qc = ata_scsi_qc_new(dev, cmd, done);
1533 if (!qc)
1534 goto err_mem;
1535
1536 /* data is present; dma-map it */
1537 if (cmd->sc_data_direction == DMA_FROM_DEVICE ||
1538 cmd->sc_data_direction == DMA_TO_DEVICE) {
1539 if (unlikely(cmd->request_bufflen < 1)) {
1540 ata_dev_printk(dev, KERN_WARNING,
1541 "WARNING: zero len r/w req\n");
1542 goto err_did;
1543 }
1544
1545 if (cmd->use_sg)
1546 ata_sg_init(qc, cmd->request_buffer, cmd->use_sg);
1547 else
1548 ata_sg_init_one(qc, cmd->request_buffer,
1549 cmd->request_bufflen);
1550
1551 qc->dma_dir = cmd->sc_data_direction;
1552 }
1553
1554 qc->complete_fn = ata_scsi_qc_complete;
1555
1556 if (xlat_func(qc, scsicmd))
1557 goto early_finish;
1558
1559 /* select device, send command to hardware */
1560 ata_qc_issue(qc);
1561
1562 VPRINTK("EXIT\n");
1563 return 0;
1564
1565early_finish:
1566 ata_qc_free(qc);
1567 done(cmd);
1568 DPRINTK("EXIT - early finish (good or error)\n");
1569 return 0;
1570
1571err_did:
1572 ata_qc_free(qc);
1573err_mem:
1574 cmd->result = (DID_ERROR << 16);
1575 done(cmd);
1576 DPRINTK("EXIT - internal\n");
1577 return 0;
1578
1579defer:
1580 DPRINTK("EXIT - defer\n");
1581 return SCSI_MLQUEUE_DEVICE_BUSY;
1582}
1583
1584/**
1585 * ata_scsi_rbuf_get - Map response buffer.
1586 * @cmd: SCSI command containing buffer to be mapped.
1587 * @buf_out: Pointer to mapped area.
1588 *
1589 * Maps buffer contained within SCSI command @cmd.
1590 *
1591 * LOCKING:
1592 * spin_lock_irqsave(host lock)
1593 *
1594 * RETURNS:
1595 * Length of response buffer.
1596 */
1597
1598static unsigned int ata_scsi_rbuf_get(struct scsi_cmnd *cmd, u8 **buf_out)
1599{
1600 u8 *buf;
1601 unsigned int buflen;
1602
1603 if (cmd->use_sg) {
1604 struct scatterlist *sg;
1605
1606 sg = (struct scatterlist *) cmd->request_buffer;
1607 buf = kmap_atomic(sg->page, KM_USER0) + sg->offset;
1608 buflen = sg->length;
1609 } else {
1610 buf = cmd->request_buffer;
1611 buflen = cmd->request_bufflen;
1612 }
1613
1614 *buf_out = buf;
1615 return buflen;
1616}
1617
1618/**
1619 * ata_scsi_rbuf_put - Unmap response buffer.
1620 * @cmd: SCSI command containing buffer to be unmapped.
1621 * @buf: buffer to unmap
1622 *
1623 * Unmaps response buffer contained within @cmd.
1624 *
1625 * LOCKING:
1626 * spin_lock_irqsave(host lock)
1627 */
1628
1629static inline void ata_scsi_rbuf_put(struct scsi_cmnd *cmd, u8 *buf)
1630{
1631 if (cmd->use_sg) {
1632 struct scatterlist *sg;
1633
1634 sg = (struct scatterlist *) cmd->request_buffer;
1635 kunmap_atomic(buf - sg->offset, KM_USER0);
1636 }
1637}
1638
1639/**
1640 * ata_scsi_rbuf_fill - wrapper for SCSI command simulators
1641 * @args: device IDENTIFY data / SCSI command of interest.
1642 * @actor: Callback hook for desired SCSI command simulator
1643 *
1644 * Takes care of the hard work of simulating a SCSI command...
1645 * Mapping the response buffer, calling the command's handler,
1646 * and handling the handler's return value. This return value
1647 * indicates whether the handler wishes the SCSI command to be
1648 * completed successfully (0), or not (in which case cmd->result
1649 * and sense buffer are assumed to be set).
1650 *
1651 * LOCKING:
1652 * spin_lock_irqsave(host lock)
1653 */
1654
1655void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
1656 unsigned int (*actor) (struct ata_scsi_args *args,
1657 u8 *rbuf, unsigned int buflen))
1658{
1659 u8 *rbuf;
1660 unsigned int buflen, rc;
1661 struct scsi_cmnd *cmd = args->cmd;
1662
1663 buflen = ata_scsi_rbuf_get(cmd, &rbuf);
1664 memset(rbuf, 0, buflen);
1665 rc = actor(args, rbuf, buflen);
1666 ata_scsi_rbuf_put(cmd, rbuf);
1667
1668 if (rc == 0)
1669 cmd->result = SAM_STAT_GOOD;
1670 args->done(cmd);
1671}
1672
1673/**
1674 * ata_scsiop_inq_std - Simulate INQUIRY command
1675 * @args: device IDENTIFY data / SCSI command of interest.
1676 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
1677 * @buflen: Response buffer length.
1678 *
1679 * Returns standard device identification data associated
1680 * with non-VPD INQUIRY command output.
1681 *
1682 * LOCKING:
1683 * spin_lock_irqsave(host lock)
1684 */
1685
1686unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf,
1687 unsigned int buflen)
1688{
1689 u8 hdr[] = {
1690 TYPE_DISK,
1691 0,
1692 0x5, /* claim SPC-3 version compatibility */
1693 2,
1694 95 - 4
1695 };
1696
1697 /* set scsi removeable (RMB) bit per ata bit */
1698 if (ata_id_removeable(args->id))
1699 hdr[1] |= (1 << 7);
1700
1701 VPRINTK("ENTER\n");
1702
1703 memcpy(rbuf, hdr, sizeof(hdr));
1704
1705 if (buflen > 35) {
1706 memcpy(&rbuf[8], "ATA ", 8);
1707 ata_id_string(args->id, &rbuf[16], ATA_ID_PROD_OFS, 16);
1708 ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV_OFS, 4);
1709 if (rbuf[32] == 0 || rbuf[32] == ' ')
1710 memcpy(&rbuf[32], "n/a ", 4);
1711 }
1712
1713 if (buflen > 63) {
1714 const u8 versions[] = {
1715 0x60, /* SAM-3 (no version claimed) */
1716
1717 0x03,
1718 0x20, /* SBC-2 (no version claimed) */
1719
1720 0x02,
1721 0x60 /* SPC-3 (no version claimed) */
1722 };
1723
1724 memcpy(rbuf + 59, versions, sizeof(versions));
1725 }
1726
1727 return 0;
1728}
1729
1730/**
1731 * ata_scsiop_inq_00 - Simulate INQUIRY VPD page 0, list of pages
1732 * @args: device IDENTIFY data / SCSI command of interest.
1733 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
1734 * @buflen: Response buffer length.
1735 *
1736 * Returns list of inquiry VPD pages available.
1737 *
1738 * LOCKING:
1739 * spin_lock_irqsave(host lock)
1740 */
1741
1742unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf,
1743 unsigned int buflen)
1744{
1745 const u8 pages[] = {
1746 0x00, /* page 0x00, this page */
1747 0x80, /* page 0x80, unit serial no page */
1748 0x83 /* page 0x83, device ident page */
1749 };
1750 rbuf[3] = sizeof(pages); /* number of supported VPD pages */
1751
1752 if (buflen > 6)
1753 memcpy(rbuf + 4, pages, sizeof(pages));
1754
1755 return 0;
1756}
1757
1758/**
1759 * ata_scsiop_inq_80 - Simulate INQUIRY VPD page 80, device serial number
1760 * @args: device IDENTIFY data / SCSI command of interest.
1761 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
1762 * @buflen: Response buffer length.
1763 *
1764 * Returns ATA device serial number.
1765 *
1766 * LOCKING:
1767 * spin_lock_irqsave(host lock)
1768 */
1769
1770unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf,
1771 unsigned int buflen)
1772{
1773 const u8 hdr[] = {
1774 0,
1775 0x80, /* this page code */
1776 0,
1777 ATA_SERNO_LEN, /* page len */
1778 };
1779 memcpy(rbuf, hdr, sizeof(hdr));
1780
1781 if (buflen > (ATA_SERNO_LEN + 4 - 1))
1782 ata_id_string(args->id, (unsigned char *) &rbuf[4],
1783 ATA_ID_SERNO_OFS, ATA_SERNO_LEN);
1784
1785 return 0;
1786}
1787
1788/**
1789 * ata_scsiop_inq_83 - Simulate INQUIRY VPD page 83, device identity
1790 * @args: device IDENTIFY data / SCSI command of interest.
1791 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
1792 * @buflen: Response buffer length.
1793 *
1794 * Yields two logical unit device identification designators:
1795 * - vendor specific ASCII containing the ATA serial number
1796 * - SAT defined "t10 vendor id based" containing ASCII vendor
1797 * name ("ATA "), model and serial numbers.
1798 *
1799 * LOCKING:
1800 * spin_lock_irqsave(host lock)
1801 */
1802
1803unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf,
1804 unsigned int buflen)
1805{
1806 int num;
1807 const int sat_model_serial_desc_len = 68;
1808 const int ata_model_byte_len = 40;
1809
1810 rbuf[1] = 0x83; /* this page code */
1811 num = 4;
1812
1813 if (buflen > (ATA_SERNO_LEN + num + 3)) {
1814 /* piv=0, assoc=lu, code_set=ACSII, designator=vendor */
1815 rbuf[num + 0] = 2;
1816 rbuf[num + 3] = ATA_SERNO_LEN;
1817 num += 4;
1818 ata_id_string(args->id, (unsigned char *) rbuf + num,
1819 ATA_ID_SERNO_OFS, ATA_SERNO_LEN);
1820 num += ATA_SERNO_LEN;
1821 }
1822 if (buflen > (sat_model_serial_desc_len + num + 3)) {
1823 /* SAT defined lu model and serial numbers descriptor */
1824 /* piv=0, assoc=lu, code_set=ACSII, designator=t10 vendor id */
1825 rbuf[num + 0] = 2;
1826 rbuf[num + 1] = 1;
1827 rbuf[num + 3] = sat_model_serial_desc_len;
1828 num += 4;
1829 memcpy(rbuf + num, "ATA ", 8);
1830 num += 8;
1831 ata_id_string(args->id, (unsigned char *) rbuf + num,
1832 ATA_ID_PROD_OFS, ata_model_byte_len);
1833 num += ata_model_byte_len;
1834 ata_id_string(args->id, (unsigned char *) rbuf + num,
1835 ATA_ID_SERNO_OFS, ATA_SERNO_LEN);
1836 num += ATA_SERNO_LEN;
1837 }
1838 rbuf[3] = num - 4; /* page len (assume less than 256 bytes) */
1839 return 0;
1840}
1841
1842/**
1843 * ata_scsiop_noop - Command handler that simply returns success.
1844 * @args: device IDENTIFY data / SCSI command of interest.
1845 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
1846 * @buflen: Response buffer length.
1847 *
1848 * No operation. Simply returns success to caller, to indicate
1849 * that the caller should successfully complete this SCSI command.
1850 *
1851 * LOCKING:
1852 * spin_lock_irqsave(host lock)
1853 */
1854
1855unsigned int ata_scsiop_noop(struct ata_scsi_args *args, u8 *rbuf,
1856 unsigned int buflen)
1857{
1858 VPRINTK("ENTER\n");
1859 return 0;
1860}
1861
1862/**
1863 * ata_msense_push - Push data onto MODE SENSE data output buffer
1864 * @ptr_io: (input/output) Location to store more output data
1865 * @last: End of output data buffer
1866 * @buf: Pointer to BLOB being added to output buffer
1867 * @buflen: Length of BLOB
1868 *
1869 * Store MODE SENSE data on an output buffer.
1870 *
1871 * LOCKING:
1872 * None.
1873 */
1874
1875static void ata_msense_push(u8 **ptr_io, const u8 *last,
1876 const u8 *buf, unsigned int buflen)
1877{
1878 u8 *ptr = *ptr_io;
1879
1880 if ((ptr + buflen - 1) > last)
1881 return;
1882
1883 memcpy(ptr, buf, buflen);
1884
1885 ptr += buflen;
1886
1887 *ptr_io = ptr;
1888}
1889
1890/**
1891 * ata_msense_caching - Simulate MODE SENSE caching info page
1892 * @id: device IDENTIFY data
1893 * @ptr_io: (input/output) Location to store more output data
1894 * @last: End of output data buffer
1895 *
1896 * Generate a caching info page, which conditionally indicates
1897 * write caching to the SCSI layer, depending on device
1898 * capabilities.
1899 *
1900 * LOCKING:
1901 * None.
1902 */
1903
1904static unsigned int ata_msense_caching(u16 *id, u8 **ptr_io,
1905 const u8 *last)
1906{
1907 u8 page[CACHE_MPAGE_LEN];
1908
1909 memcpy(page, def_cache_mpage, sizeof(page));
1910 if (ata_id_wcache_enabled(id))
1911 page[2] |= (1 << 2); /* write cache enable */
1912 if (!ata_id_rahead_enabled(id))
1913 page[12] |= (1 << 5); /* disable read ahead */
1914
1915 ata_msense_push(ptr_io, last, page, sizeof(page));
1916 return sizeof(page);
1917}
1918
1919/**
1920 * ata_msense_ctl_mode - Simulate MODE SENSE control mode page
1921 * @dev: Device associated with this MODE SENSE command
1922 * @ptr_io: (input/output) Location to store more output data
1923 * @last: End of output data buffer
1924 *
1925 * Generate a generic MODE SENSE control mode page.
1926 *
1927 * LOCKING:
1928 * None.
1929 */
1930
1931static unsigned int ata_msense_ctl_mode(u8 **ptr_io, const u8 *last)
1932{
1933 ata_msense_push(ptr_io, last, def_control_mpage,
1934 sizeof(def_control_mpage));
1935 return sizeof(def_control_mpage);
1936}
1937
1938/**
1939 * ata_msense_rw_recovery - Simulate MODE SENSE r/w error recovery page
1940 * @dev: Device associated with this MODE SENSE command
1941 * @ptr_io: (input/output) Location to store more output data
1942 * @last: End of output data buffer
1943 *
1944 * Generate a generic MODE SENSE r/w error recovery page.
1945 *
1946 * LOCKING:
1947 * None.
1948 */
1949
1950static unsigned int ata_msense_rw_recovery(u8 **ptr_io, const u8 *last)
1951{
1952
1953 ata_msense_push(ptr_io, last, def_rw_recovery_mpage,
1954 sizeof(def_rw_recovery_mpage));
1955 return sizeof(def_rw_recovery_mpage);
1956}
1957
1958/*
1959 * We can turn this into a real blacklist if it's needed, for now just
1960 * blacklist any Maxtor BANC1G10 revision firmware
1961 */
1962static int ata_dev_supports_fua(u16 *id)
1963{
1964 unsigned char model[41], fw[9];
1965
1966 if (!libata_fua)
1967 return 0;
1968 if (!ata_id_has_fua(id))
1969 return 0;
1970
1971 ata_id_c_string(id, model, ATA_ID_PROD_OFS, sizeof(model));
1972 ata_id_c_string(id, fw, ATA_ID_FW_REV_OFS, sizeof(fw));
1973
1974 if (strcmp(model, "Maxtor"))
1975 return 1;
1976 if (strcmp(fw, "BANC1G10"))
1977 return 1;
1978
1979 return 0; /* blacklisted */
1980}
1981
1982/**
1983 * ata_scsiop_mode_sense - Simulate MODE SENSE 6, 10 commands
1984 * @args: device IDENTIFY data / SCSI command of interest.
1985 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
1986 * @buflen: Response buffer length.
1987 *
1988 * Simulate MODE SENSE commands. Assume this is invoked for direct
1989 * access devices (e.g. disks) only. There should be no block
1990 * descriptor for other device types.
1991 *
1992 * LOCKING:
1993 * spin_lock_irqsave(host lock)
1994 */
1995
1996unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
1997 unsigned int buflen)
1998{
1999 struct ata_device *dev = args->dev;
2000 u8 *scsicmd = args->cmd->cmnd, *p, *last;
2001 const u8 sat_blk_desc[] = {
2002 0, 0, 0, 0, /* number of blocks: sat unspecified */
2003 0,
2004 0, 0x2, 0x0 /* block length: 512 bytes */
2005 };
2006 u8 pg, spg;
2007 unsigned int ebd, page_control, six_byte, output_len, alloc_len, minlen;
2008 u8 dpofua;
2009
2010 VPRINTK("ENTER\n");
2011
2012 six_byte = (scsicmd[0] == MODE_SENSE);
2013 ebd = !(scsicmd[1] & 0x8); /* dbd bit inverted == edb */
2014 /*
2015 * LLBA bit in msense(10) ignored (compliant)
2016 */
2017
2018 page_control = scsicmd[2] >> 6;
2019 switch (page_control) {
2020 case 0: /* current */
2021 break; /* supported */
2022 case 3: /* saved */
2023 goto saving_not_supp;
2024 case 1: /* changeable */
2025 case 2: /* defaults */
2026 default:
2027 goto invalid_fld;
2028 }
2029
2030 if (six_byte) {
2031 output_len = 4 + (ebd ? 8 : 0);
2032 alloc_len = scsicmd[4];
2033 } else {
2034 output_len = 8 + (ebd ? 8 : 0);
2035 alloc_len = (scsicmd[7] << 8) + scsicmd[8];
2036 }
2037 minlen = (alloc_len < buflen) ? alloc_len : buflen;
2038
2039 p = rbuf + output_len;
2040 last = rbuf + minlen - 1;
2041
2042 pg = scsicmd[2] & 0x3f;
2043 spg = scsicmd[3];
2044 /*
2045 * No mode subpages supported (yet) but asking for _all_
2046 * subpages may be valid
2047 */
2048 if (spg && (spg != ALL_SUB_MPAGES))
2049 goto invalid_fld;
2050
2051 switch(pg) {
2052 case RW_RECOVERY_MPAGE:
2053 output_len += ata_msense_rw_recovery(&p, last);
2054 break;
2055
2056 case CACHE_MPAGE:
2057 output_len += ata_msense_caching(args->id, &p, last);
2058 break;
2059
2060 case CONTROL_MPAGE: {
2061 output_len += ata_msense_ctl_mode(&p, last);
2062 break;
2063 }
2064
2065 case ALL_MPAGES:
2066 output_len += ata_msense_rw_recovery(&p, last);
2067 output_len += ata_msense_caching(args->id, &p, last);
2068 output_len += ata_msense_ctl_mode(&p, last);
2069 break;
2070
2071 default: /* invalid page code */
2072 goto invalid_fld;
2073 }
2074
2075 if (minlen < 1)
2076 return 0;
2077
2078 dpofua = 0;
2079 if (ata_dev_supports_fua(args->id) && (dev->flags & ATA_DFLAG_LBA48) &&
2080 (!(dev->flags & ATA_DFLAG_PIO) || dev->multi_count))
2081 dpofua = 1 << 4;
2082
2083 if (six_byte) {
2084 output_len--;
2085 rbuf[0] = output_len;
2086 if (minlen > 2)
2087 rbuf[2] |= dpofua;
2088 if (ebd) {
2089 if (minlen > 3)
2090 rbuf[3] = sizeof(sat_blk_desc);
2091 if (minlen > 11)
2092 memcpy(rbuf + 4, sat_blk_desc,
2093 sizeof(sat_blk_desc));
2094 }
2095 } else {
2096 output_len -= 2;
2097 rbuf[0] = output_len >> 8;
2098 if (minlen > 1)
2099 rbuf[1] = output_len;
2100 if (minlen > 3)
2101 rbuf[3] |= dpofua;
2102 if (ebd) {
2103 if (minlen > 7)
2104 rbuf[7] = sizeof(sat_blk_desc);
2105 if (minlen > 15)
2106 memcpy(rbuf + 8, sat_blk_desc,
2107 sizeof(sat_blk_desc));
2108 }
2109 }
2110 return 0;
2111
2112invalid_fld:
2113 ata_scsi_set_sense(args->cmd, ILLEGAL_REQUEST, 0x24, 0x0);
2114 /* "Invalid field in cbd" */
2115 return 1;
2116
2117saving_not_supp:
2118 ata_scsi_set_sense(args->cmd, ILLEGAL_REQUEST, 0x39, 0x0);
2119 /* "Saving parameters not supported" */
2120 return 1;
2121}
2122
2123/**
2124 * ata_scsiop_read_cap - Simulate READ CAPACITY[ 16] commands
2125 * @args: device IDENTIFY data / SCSI command of interest.
2126 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
2127 * @buflen: Response buffer length.
2128 *
2129 * Simulate READ CAPACITY commands.
2130 *
2131 * LOCKING:
2132 * spin_lock_irqsave(host lock)
2133 */
2134
2135unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf,
2136 unsigned int buflen)
2137{
2138 u64 n_sectors;
2139 u32 tmp;
2140
2141 VPRINTK("ENTER\n");
2142
2143 if (ata_id_has_lba(args->id)) {
2144 if (ata_id_has_lba48(args->id))
2145 n_sectors = ata_id_u64(args->id, 100);
2146 else
2147 n_sectors = ata_id_u32(args->id, 60);
2148 } else {
2149 /* CHS default translation */
2150 n_sectors = args->id[1] * args->id[3] * args->id[6];
2151
2152 if (ata_id_current_chs_valid(args->id))
2153 /* CHS current translation */
2154 n_sectors = ata_id_u32(args->id, 57);
2155 }
2156
2157 n_sectors--; /* ATA TotalUserSectors - 1 */
2158
2159 if (args->cmd->cmnd[0] == READ_CAPACITY) {
2160 if( n_sectors >= 0xffffffffULL )
2161 tmp = 0xffffffff ; /* Return max count on overflow */
2162 else
2163 tmp = n_sectors ;
2164
2165 /* sector count, 32-bit */
2166 rbuf[0] = tmp >> (8 * 3);
2167 rbuf[1] = tmp >> (8 * 2);
2168 rbuf[2] = tmp >> (8 * 1);
2169 rbuf[3] = tmp;
2170
2171 /* sector size */
2172 tmp = ATA_SECT_SIZE;
2173 rbuf[6] = tmp >> 8;
2174 rbuf[7] = tmp;
2175
2176 } else {
2177 /* sector count, 64-bit */
2178 tmp = n_sectors >> (8 * 4);
2179 rbuf[2] = tmp >> (8 * 3);
2180 rbuf[3] = tmp >> (8 * 2);
2181 rbuf[4] = tmp >> (8 * 1);
2182 rbuf[5] = tmp;
2183 tmp = n_sectors;
2184 rbuf[6] = tmp >> (8 * 3);
2185 rbuf[7] = tmp >> (8 * 2);
2186 rbuf[8] = tmp >> (8 * 1);
2187 rbuf[9] = tmp;
2188
2189 /* sector size */
2190 tmp = ATA_SECT_SIZE;
2191 rbuf[12] = tmp >> 8;
2192 rbuf[13] = tmp;
2193 }
2194
2195 return 0;
2196}
2197
2198/**
2199 * ata_scsiop_report_luns - Simulate REPORT LUNS command
2200 * @args: device IDENTIFY data / SCSI command of interest.
2201 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
2202 * @buflen: Response buffer length.
2203 *
2204 * Simulate REPORT LUNS command.
2205 *
2206 * LOCKING:
2207 * spin_lock_irqsave(host lock)
2208 */
2209
2210unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf,
2211 unsigned int buflen)
2212{
2213 VPRINTK("ENTER\n");
2214 rbuf[3] = 8; /* just one lun, LUN 0, size 8 bytes */
2215
2216 return 0;
2217}
2218
2219/**
2220 * ata_scsi_set_sense - Set SCSI sense data and status
2221 * @cmd: SCSI request to be handled
2222 * @sk: SCSI-defined sense key
2223 * @asc: SCSI-defined additional sense code
2224 * @ascq: SCSI-defined additional sense code qualifier
2225 *
2226 * Helper function that builds a valid fixed format, current
2227 * response code and the given sense key (sk), additional sense
2228 * code (asc) and additional sense code qualifier (ascq) with
2229 * a SCSI command status of %SAM_STAT_CHECK_CONDITION and
2230 * DRIVER_SENSE set in the upper bits of scsi_cmnd::result .
2231 *
2232 * LOCKING:
2233 * Not required
2234 */
2235
2236void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
2237{
2238 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
2239
2240 cmd->sense_buffer[0] = 0x70; /* fixed format, current */
2241 cmd->sense_buffer[2] = sk;
2242 cmd->sense_buffer[7] = 18 - 8; /* additional sense length */
2243 cmd->sense_buffer[12] = asc;
2244 cmd->sense_buffer[13] = ascq;
2245}
2246
2247/**
2248 * ata_scsi_badcmd - End a SCSI request with an error
2249 * @cmd: SCSI request to be handled
2250 * @done: SCSI command completion function
2251 * @asc: SCSI-defined additional sense code
2252 * @ascq: SCSI-defined additional sense code qualifier
2253 *
2254 * Helper function that completes a SCSI command with
2255 * %SAM_STAT_CHECK_CONDITION, with a sense key %ILLEGAL_REQUEST
2256 * and the specified additional sense codes.
2257 *
2258 * LOCKING:
2259 * spin_lock_irqsave(host lock)
2260 */
2261
2262void ata_scsi_badcmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), u8 asc, u8 ascq)
2263{
2264 DPRINTK("ENTER\n");
2265 ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, asc, ascq);
2266
2267 done(cmd);
2268}
2269
2270static void atapi_sense_complete(struct ata_queued_cmd *qc)
2271{
2272 if (qc->err_mask && ((qc->err_mask & AC_ERR_DEV) == 0)) {
2273 /* FIXME: not quite right; we don't want the
2274 * translation of taskfile registers into
2275 * a sense descriptors, since that's only
2276 * correct for ATA, not ATAPI
2277 */
2278 ata_gen_ata_desc_sense(qc);
2279 }
2280
2281 qc->scsidone(qc->scsicmd);
2282 ata_qc_free(qc);
2283}
2284
2285/* is it pointless to prefer PIO for "safety reasons"? */
2286static inline int ata_pio_use_silly(struct ata_port *ap)
2287{
2288 return (ap->flags & ATA_FLAG_PIO_DMA);
2289}
2290
2291static void atapi_request_sense(struct ata_queued_cmd *qc)
2292{
2293 struct ata_port *ap = qc->ap;
2294 struct scsi_cmnd *cmd = qc->scsicmd;
2295
2296 DPRINTK("ATAPI request sense\n");
2297
2298 /* FIXME: is this needed? */
2299 memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
2300
2301 ap->ops->tf_read(ap, &qc->tf);
2302
2303 /* fill these in, for the case where they are -not- overwritten */
2304 cmd->sense_buffer[0] = 0x70;
2305 cmd->sense_buffer[2] = qc->tf.feature >> 4;
2306
2307 ata_qc_reinit(qc);
2308
2309 ata_sg_init_one(qc, cmd->sense_buffer, sizeof(cmd->sense_buffer));
2310 qc->dma_dir = DMA_FROM_DEVICE;
2311
2312 memset(&qc->cdb, 0, qc->dev->cdb_len);
2313 qc->cdb[0] = REQUEST_SENSE;
2314 qc->cdb[4] = SCSI_SENSE_BUFFERSIZE;
2315
2316 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2317 qc->tf.command = ATA_CMD_PACKET;
2318
2319 if (ata_pio_use_silly(ap)) {
2320 qc->tf.protocol = ATA_PROT_ATAPI_DMA;
2321 qc->tf.feature |= ATAPI_PKT_DMA;
2322 } else {
2323 qc->tf.protocol = ATA_PROT_ATAPI;
2324 qc->tf.lbam = (8 * 1024) & 0xff;
2325 qc->tf.lbah = (8 * 1024) >> 8;
2326 }
2327 qc->nbytes = SCSI_SENSE_BUFFERSIZE;
2328
2329 qc->complete_fn = atapi_sense_complete;
2330
2331 ata_qc_issue(qc);
2332
2333 DPRINTK("EXIT\n");
2334}
2335
2336static void atapi_qc_complete(struct ata_queued_cmd *qc)
2337{
2338 struct scsi_cmnd *cmd = qc->scsicmd;
2339 unsigned int err_mask = qc->err_mask;
2340
2341 VPRINTK("ENTER, err_mask 0x%X\n", err_mask);
2342
2343 /* handle completion from new EH */
2344 if (unlikely(qc->ap->ops->error_handler &&
2345 (err_mask || qc->flags & ATA_QCFLAG_SENSE_VALID))) {
2346
2347 if (!(qc->flags & ATA_QCFLAG_SENSE_VALID)) {
2348 /* FIXME: not quite right; we don't want the
2349 * translation of taskfile registers into a
2350 * sense descriptors, since that's only
2351 * correct for ATA, not ATAPI
2352 */
2353 ata_gen_ata_desc_sense(qc);
2354 }
2355
2356 /* SCSI EH automatically locks door if sdev->locked is
2357 * set. Sometimes door lock request continues to
2358 * fail, for example, when no media is present. This
2359 * creates a loop - SCSI EH issues door lock which
2360 * fails and gets invoked again to acquire sense data
2361 * for the failed command.
2362 *
2363 * If door lock fails, always clear sdev->locked to
2364 * avoid this infinite loop.
2365 */
2366 if (qc->cdb[0] == ALLOW_MEDIUM_REMOVAL)
2367 qc->dev->sdev->locked = 0;
2368
2369 qc->scsicmd->result = SAM_STAT_CHECK_CONDITION;
2370 qc->scsidone(cmd);
2371 ata_qc_free(qc);
2372 return;
2373 }
2374
2375 /* successful completion or old EH failure path */
2376 if (unlikely(err_mask & AC_ERR_DEV)) {
2377 cmd->result = SAM_STAT_CHECK_CONDITION;
2378 atapi_request_sense(qc);
2379 return;
2380 } else if (unlikely(err_mask)) {
2381 /* FIXME: not quite right; we don't want the
2382 * translation of taskfile registers into
2383 * a sense descriptors, since that's only
2384 * correct for ATA, not ATAPI
2385 */
2386 ata_gen_ata_desc_sense(qc);
2387 } else {
2388 u8 *scsicmd = cmd->cmnd;
2389
2390 if ((scsicmd[0] == INQUIRY) && ((scsicmd[1] & 0x03) == 0)) {
2391 u8 *buf = NULL;
2392 unsigned int buflen;
2393
2394 buflen = ata_scsi_rbuf_get(cmd, &buf);
2395
2396 /* ATAPI devices typically report zero for their SCSI version,
2397 * and sometimes deviate from the spec WRT response data
2398 * format. If SCSI version is reported as zero like normal,
2399 * then we make the following fixups: 1) Fake MMC-5 version,
2400 * to indicate to the Linux scsi midlayer this is a modern
2401 * device. 2) Ensure response data format / ATAPI information
2402 * are always correct.
2403 */
2404 if (buf[2] == 0) {
2405 buf[2] = 0x5;
2406 buf[3] = 0x32;
2407 }
2408
2409 ata_scsi_rbuf_put(cmd, buf);
2410 }
2411
2412 cmd->result = SAM_STAT_GOOD;
2413 }
2414
2415 qc->scsidone(cmd);
2416 ata_qc_free(qc);
2417}
2418/**
2419 * atapi_xlat - Initialize PACKET taskfile
2420 * @qc: command structure to be initialized
2421 * @scsicmd: SCSI CDB associated with this PACKET command
2422 *
2423 * LOCKING:
2424 * spin_lock_irqsave(host lock)
2425 *
2426 * RETURNS:
2427 * Zero on success, non-zero on failure.
2428 */
2429
2430static unsigned int atapi_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd)
2431{
2432 struct scsi_cmnd *cmd = qc->scsicmd;
2433 struct ata_device *dev = qc->dev;
2434 int using_pio = (dev->flags & ATA_DFLAG_PIO);
2435 int nodata = (cmd->sc_data_direction == DMA_NONE);
2436
2437 if (!using_pio)
2438 /* Check whether ATAPI DMA is safe */
2439 if (ata_check_atapi_dma(qc))
2440 using_pio = 1;
2441
2442 memcpy(&qc->cdb, scsicmd, dev->cdb_len);
2443
2444 qc->complete_fn = atapi_qc_complete;
2445
2446 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2447 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
2448 qc->tf.flags |= ATA_TFLAG_WRITE;
2449 DPRINTK("direction: write\n");
2450 }
2451
2452 qc->tf.command = ATA_CMD_PACKET;
2453
2454 /* no data, or PIO data xfer */
2455 if (using_pio || nodata) {
2456 if (nodata)
2457 qc->tf.protocol = ATA_PROT_ATAPI_NODATA;
2458 else
2459 qc->tf.protocol = ATA_PROT_ATAPI;
2460 qc->tf.lbam = (8 * 1024) & 0xff;
2461 qc->tf.lbah = (8 * 1024) >> 8;
2462 }
2463
2464 /* DMA data xfer */
2465 else {
2466 qc->tf.protocol = ATA_PROT_ATAPI_DMA;
2467 qc->tf.feature |= ATAPI_PKT_DMA;
2468
2469 if (atapi_dmadir && (cmd->sc_data_direction != DMA_TO_DEVICE))
2470 /* some SATA bridges need us to indicate data xfer direction */
2471 qc->tf.feature |= ATAPI_DMADIR;
2472 }
2473
2474 qc->nbytes = cmd->request_bufflen;
2475
2476 return 0;
2477}
2478
2479static struct ata_device * ata_find_dev(struct ata_port *ap, int id)
2480{
2481 if (likely(id < ATA_MAX_DEVICES))
2482 return &ap->device[id];
2483 return NULL;
2484}
2485
2486static struct ata_device * __ata_scsi_find_dev(struct ata_port *ap,
2487 const struct scsi_device *scsidev)
2488{
2489 /* skip commands not addressed to targets we simulate */
2490 if (unlikely(scsidev->channel || scsidev->lun))
2491 return NULL;
2492
2493 return ata_find_dev(ap, scsidev->id);
2494}
2495
2496/**
2497 * ata_scsi_dev_enabled - determine if device is enabled
2498 * @dev: ATA device
2499 *
2500 * Determine if commands should be sent to the specified device.
2501 *
2502 * LOCKING:
2503 * spin_lock_irqsave(host lock)
2504 *
2505 * RETURNS:
2506 * 0 if commands are not allowed / 1 if commands are allowed
2507 */
2508
2509static int ata_scsi_dev_enabled(struct ata_device *dev)
2510{
2511 if (unlikely(!ata_dev_enabled(dev)))
2512 return 0;
2513
2514 if (!atapi_enabled || (dev->ap->flags & ATA_FLAG_NO_ATAPI)) {
2515 if (unlikely(dev->class == ATA_DEV_ATAPI)) {
2516 ata_dev_printk(dev, KERN_WARNING,
2517 "WARNING: ATAPI is %s, device ignored.\n",
2518 atapi_enabled ? "not supported with this driver" : "disabled");
2519 return 0;
2520 }
2521 }
2522
2523 return 1;
2524}
2525
2526/**
2527 * ata_scsi_find_dev - lookup ata_device from scsi_cmnd
2528 * @ap: ATA port to which the device is attached
2529 * @scsidev: SCSI device from which we derive the ATA device
2530 *
2531 * Given various information provided in struct scsi_cmnd,
2532 * map that onto an ATA bus, and using that mapping
2533 * determine which ata_device is associated with the
2534 * SCSI command to be sent.
2535 *
2536 * LOCKING:
2537 * spin_lock_irqsave(host lock)
2538 *
2539 * RETURNS:
2540 * Associated ATA device, or %NULL if not found.
2541 */
2542static struct ata_device *
2543ata_scsi_find_dev(struct ata_port *ap, const struct scsi_device *scsidev)
2544{
2545 struct ata_device *dev = __ata_scsi_find_dev(ap, scsidev);
2546
2547 if (unlikely(!dev || !ata_scsi_dev_enabled(dev)))
2548 return NULL;
2549
2550 return dev;
2551}
2552
2553/*
2554 * ata_scsi_map_proto - Map pass-thru protocol value to taskfile value.
2555 * @byte1: Byte 1 from pass-thru CDB.
2556 *
2557 * RETURNS:
2558 * ATA_PROT_UNKNOWN if mapping failed/unimplemented, protocol otherwise.
2559 */
2560static u8
2561ata_scsi_map_proto(u8 byte1)
2562{
2563 switch((byte1 & 0x1e) >> 1) {
2564 case 3: /* Non-data */
2565 return ATA_PROT_NODATA;
2566
2567 case 6: /* DMA */
2568 return ATA_PROT_DMA;
2569
2570 case 4: /* PIO Data-in */
2571 case 5: /* PIO Data-out */
2572 return ATA_PROT_PIO;
2573
2574 case 10: /* Device Reset */
2575 case 0: /* Hard Reset */
2576 case 1: /* SRST */
2577 case 2: /* Bus Idle */
2578 case 7: /* Packet */
2579 case 8: /* DMA Queued */
2580 case 9: /* Device Diagnostic */
2581 case 11: /* UDMA Data-in */
2582 case 12: /* UDMA Data-Out */
2583 case 13: /* FPDMA */
2584 default: /* Reserved */
2585 break;
2586 }
2587
2588 return ATA_PROT_UNKNOWN;
2589}
2590
2591/**
2592 * ata_scsi_pass_thru - convert ATA pass-thru CDB to taskfile
2593 * @qc: command structure to be initialized
2594 * @scsicmd: SCSI command to convert
2595 *
2596 * Handles either 12 or 16-byte versions of the CDB.
2597 *
2598 * RETURNS:
2599 * Zero on success, non-zero on failure.
2600 */
2601static unsigned int
2602ata_scsi_pass_thru(struct ata_queued_cmd *qc, const u8 *scsicmd)
2603{
2604 struct ata_taskfile *tf = &(qc->tf);
2605 struct scsi_cmnd *cmd = qc->scsicmd;
2606 struct ata_device *dev = qc->dev;
2607
2608 if ((tf->protocol = ata_scsi_map_proto(scsicmd[1])) == ATA_PROT_UNKNOWN)
2609 goto invalid_fld;
2610
2611 /* We may not issue DMA commands if no DMA mode is set */
2612 if (tf->protocol == ATA_PROT_DMA && dev->dma_mode == 0)
2613 goto invalid_fld;
2614
2615 if (scsicmd[1] & 0xe0)
2616 /* PIO multi not supported yet */
2617 goto invalid_fld;
2618
2619 /*
2620 * 12 and 16 byte CDBs use different offsets to
2621 * provide the various register values.
2622 */
2623 if (scsicmd[0] == ATA_16) {
2624 /*
2625 * 16-byte CDB - may contain extended commands.
2626 *
2627 * If that is the case, copy the upper byte register values.
2628 */
2629 if (scsicmd[1] & 0x01) {
2630 tf->hob_feature = scsicmd[3];
2631 tf->hob_nsect = scsicmd[5];
2632 tf->hob_lbal = scsicmd[7];
2633 tf->hob_lbam = scsicmd[9];
2634 tf->hob_lbah = scsicmd[11];
2635 tf->flags |= ATA_TFLAG_LBA48;
2636 } else
2637 tf->flags &= ~ATA_TFLAG_LBA48;
2638
2639 /*
2640 * Always copy low byte, device and command registers.
2641 */
2642 tf->feature = scsicmd[4];
2643 tf->nsect = scsicmd[6];
2644 tf->lbal = scsicmd[8];
2645 tf->lbam = scsicmd[10];
2646 tf->lbah = scsicmd[12];
2647 tf->device = scsicmd[13];
2648 tf->command = scsicmd[14];
2649 } else {
2650 /*
2651 * 12-byte CDB - incapable of extended commands.
2652 */
2653 tf->flags &= ~ATA_TFLAG_LBA48;
2654
2655 tf->feature = scsicmd[3];
2656 tf->nsect = scsicmd[4];
2657 tf->lbal = scsicmd[5];
2658 tf->lbam = scsicmd[6];
2659 tf->lbah = scsicmd[7];
2660 tf->device = scsicmd[8];
2661 tf->command = scsicmd[9];
2662 }
2663 /*
2664 * If slave is possible, enforce correct master/slave bit
2665 */
2666 if (qc->ap->flags & ATA_FLAG_SLAVE_POSS)
2667 tf->device = qc->dev->devno ?
2668 tf->device | ATA_DEV1 : tf->device & ~ATA_DEV1;
2669
2670 /*
2671 * Filter SET_FEATURES - XFER MODE command -- otherwise,
2672 * SET_FEATURES - XFER MODE must be preceded/succeeded
2673 * by an update to hardware-specific registers for each
2674 * controller (i.e. the reason for ->set_piomode(),
2675 * ->set_dmamode(), and ->post_set_mode() hooks).
2676 */
2677 if ((tf->command == ATA_CMD_SET_FEATURES)
2678 && (tf->feature == SETFEATURES_XFER))
2679 goto invalid_fld;
2680
2681 /*
2682 * Set flags so that all registers will be written,
2683 * and pass on write indication (used for PIO/DMA
2684 * setup.)
2685 */
2686 tf->flags |= (ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE);
2687
2688 if (cmd->sc_data_direction == DMA_TO_DEVICE)
2689 tf->flags |= ATA_TFLAG_WRITE;
2690
2691 /*
2692 * Set transfer length.
2693 *
2694 * TODO: find out if we need to do more here to
2695 * cover scatter/gather case.
2696 */
2697 qc->nsect = cmd->request_bufflen / ATA_SECT_SIZE;
2698
2699 /* request result TF */
2700 qc->flags |= ATA_QCFLAG_RESULT_TF;
2701
2702 return 0;
2703
2704 invalid_fld:
2705 ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x24, 0x00);
2706 /* "Invalid field in cdb" */
2707 return 1;
2708}
2709
2710/**
2711 * ata_get_xlat_func - check if SCSI to ATA translation is possible
2712 * @dev: ATA device
2713 * @cmd: SCSI command opcode to consider
2714 *
2715 * Look up the SCSI command given, and determine whether the
2716 * SCSI command is to be translated or simulated.
2717 *
2718 * RETURNS:
2719 * Pointer to translation function if possible, %NULL if not.
2720 */
2721
2722static inline ata_xlat_func_t ata_get_xlat_func(struct ata_device *dev, u8 cmd)
2723{
2724 switch (cmd) {
2725 case READ_6:
2726 case READ_10:
2727 case READ_16:
2728
2729 case WRITE_6:
2730 case WRITE_10:
2731 case WRITE_16:
2732 return ata_scsi_rw_xlat;
2733
2734 case SYNCHRONIZE_CACHE:
2735 if (ata_try_flush_cache(dev))
2736 return ata_scsi_flush_xlat;
2737 break;
2738
2739 case VERIFY:
2740 case VERIFY_16:
2741 return ata_scsi_verify_xlat;
2742
2743 case ATA_12:
2744 case ATA_16:
2745 return ata_scsi_pass_thru;
2746
2747 case START_STOP:
2748 return ata_scsi_start_stop_xlat;
2749 }
2750
2751 return NULL;
2752}
2753
2754/**
2755 * ata_scsi_dump_cdb - dump SCSI command contents to dmesg
2756 * @ap: ATA port to which the command was being sent
2757 * @cmd: SCSI command to dump
2758 *
2759 * Prints the contents of a SCSI command via printk().
2760 */
2761
2762static inline void ata_scsi_dump_cdb(struct ata_port *ap,
2763 struct scsi_cmnd *cmd)
2764{
2765#ifdef ATA_DEBUG
2766 struct scsi_device *scsidev = cmd->device;
2767 u8 *scsicmd = cmd->cmnd;
2768
2769 DPRINTK("CDB (%u:%d,%d,%d) %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
2770 ap->id,
2771 scsidev->channel, scsidev->id, scsidev->lun,
2772 scsicmd[0], scsicmd[1], scsicmd[2], scsicmd[3],
2773 scsicmd[4], scsicmd[5], scsicmd[6], scsicmd[7],
2774 scsicmd[8]);
2775#endif
2776}
2777
2778static inline int __ata_scsi_queuecmd(struct scsi_cmnd *cmd,
2779 void (*done)(struct scsi_cmnd *),
2780 struct ata_device *dev)
2781{
2782 int rc = 0;
2783
2784 if (dev->class == ATA_DEV_ATA) {
2785 ata_xlat_func_t xlat_func = ata_get_xlat_func(dev,
2786 cmd->cmnd[0]);
2787
2788 if (xlat_func)
2789 rc = ata_scsi_translate(dev, cmd, done, xlat_func);
2790 else
2791 ata_scsi_simulate(dev, cmd, done);
2792 } else
2793 rc = ata_scsi_translate(dev, cmd, done, atapi_xlat);
2794
2795 return rc;
2796}
2797
2798/**
2799 * ata_scsi_queuecmd - Issue SCSI cdb to libata-managed device
2800 * @cmd: SCSI command to be sent
2801 * @done: Completion function, called when command is complete
2802 *
2803 * In some cases, this function translates SCSI commands into
2804 * ATA taskfiles, and queues the taskfiles to be sent to
2805 * hardware. In other cases, this function simulates a
2806 * SCSI device by evaluating and responding to certain
2807 * SCSI commands. This creates the overall effect of
2808 * ATA and ATAPI devices appearing as SCSI devices.
2809 *
2810 * LOCKING:
2811 * Releases scsi-layer-held lock, and obtains host lock.
2812 *
2813 * RETURNS:
2814 * Return value from __ata_scsi_queuecmd() if @cmd can be queued,
2815 * 0 otherwise.
2816 */
2817int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
2818{
2819 struct ata_port *ap;
2820 struct ata_device *dev;
2821 struct scsi_device *scsidev = cmd->device;
2822 struct Scsi_Host *shost = scsidev->host;
2823 int rc = 0;
2824
2825 ap = ata_shost_to_port(shost);
2826
2827 spin_unlock(shost->host_lock);
2828 spin_lock(ap->lock);
2829
2830 ata_scsi_dump_cdb(ap, cmd);
2831
2832 dev = ata_scsi_find_dev(ap, scsidev);
2833 if (likely(dev))
2834 rc = __ata_scsi_queuecmd(cmd, done, dev);
2835 else {
2836 cmd->result = (DID_BAD_TARGET << 16);
2837 done(cmd);
2838 }
2839
2840 spin_unlock(ap->lock);
2841 spin_lock(shost->host_lock);
2842 return rc;
2843}
2844
2845/**
2846 * ata_scsi_simulate - simulate SCSI command on ATA device
2847 * @dev: the target device
2848 * @cmd: SCSI command being sent to device.
2849 * @done: SCSI command completion function.
2850 *
2851 * Interprets and directly executes a select list of SCSI commands
2852 * that can be handled internally.
2853 *
2854 * LOCKING:
2855 * spin_lock_irqsave(host lock)
2856 */
2857
2858void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
2859 void (*done)(struct scsi_cmnd *))
2860{
2861 struct ata_scsi_args args;
2862 const u8 *scsicmd = cmd->cmnd;
2863
2864 args.dev = dev;
2865 args.id = dev->id;
2866 args.cmd = cmd;
2867 args.done = done;
2868
2869 switch(scsicmd[0]) {
2870 /* no-op's, complete with success */
2871 case SYNCHRONIZE_CACHE:
2872 case REZERO_UNIT:
2873 case SEEK_6:
2874 case SEEK_10:
2875 case TEST_UNIT_READY:
2876 case FORMAT_UNIT: /* FIXME: correct? */
2877 case SEND_DIAGNOSTIC: /* FIXME: correct? */
2878 ata_scsi_rbuf_fill(&args, ata_scsiop_noop);
2879 break;
2880
2881 case INQUIRY:
2882 if (scsicmd[1] & 2) /* is CmdDt set? */
2883 ata_scsi_invalid_field(cmd, done);
2884 else if ((scsicmd[1] & 1) == 0) /* is EVPD clear? */
2885 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std);
2886 else if (scsicmd[2] == 0x00)
2887 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_00);
2888 else if (scsicmd[2] == 0x80)
2889 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_80);
2890 else if (scsicmd[2] == 0x83)
2891 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_83);
2892 else
2893 ata_scsi_invalid_field(cmd, done);
2894 break;
2895
2896 case MODE_SENSE:
2897 case MODE_SENSE_10:
2898 ata_scsi_rbuf_fill(&args, ata_scsiop_mode_sense);
2899 break;
2900
2901 case MODE_SELECT: /* unconditionally return */
2902 case MODE_SELECT_10: /* bad-field-in-cdb */
2903 ata_scsi_invalid_field(cmd, done);
2904 break;
2905
2906 case READ_CAPACITY:
2907 ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
2908 break;
2909
2910 case SERVICE_ACTION_IN:
2911 if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16)
2912 ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
2913 else
2914 ata_scsi_invalid_field(cmd, done);
2915 break;
2916
2917 case REPORT_LUNS:
2918 ata_scsi_rbuf_fill(&args, ata_scsiop_report_luns);
2919 break;
2920
2921 /* mandatory commands we haven't implemented yet */
2922 case REQUEST_SENSE:
2923
2924 /* all other commands */
2925 default:
2926 ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x20, 0x0);
2927 /* "Invalid command operation code" */
2928 done(cmd);
2929 break;
2930 }
2931}
2932
2933void ata_scsi_scan_host(struct ata_port *ap)
2934{
2935 unsigned int i;
2936
2937 if (ap->flags & ATA_FLAG_DISABLED)
2938 return;
2939
2940 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2941 struct ata_device *dev = &ap->device[i];
2942 struct scsi_device *sdev;
2943
2944 if (!ata_dev_enabled(dev) || dev->sdev)
2945 continue;
2946
2947 sdev = __scsi_add_device(ap->scsi_host, 0, i, 0, NULL);
2948 if (!IS_ERR(sdev)) {
2949 dev->sdev = sdev;
2950 scsi_device_put(sdev);
2951 }
2952 }
2953}
2954
2955/**
2956 * ata_scsi_offline_dev - offline attached SCSI device
2957 * @dev: ATA device to offline attached SCSI device for
2958 *
2959 * This function is called from ata_eh_hotplug() and responsible
2960 * for taking the SCSI device attached to @dev offline. This
2961 * function is called with host lock which protects dev->sdev
2962 * against clearing.
2963 *
2964 * LOCKING:
2965 * spin_lock_irqsave(host lock)
2966 *
2967 * RETURNS:
2968 * 1 if attached SCSI device exists, 0 otherwise.
2969 */
2970int ata_scsi_offline_dev(struct ata_device *dev)
2971{
2972 if (dev->sdev) {
2973 scsi_device_set_state(dev->sdev, SDEV_OFFLINE);
2974 return 1;
2975 }
2976 return 0;
2977}
2978
2979/**
2980 * ata_scsi_remove_dev - remove attached SCSI device
2981 * @dev: ATA device to remove attached SCSI device for
2982 *
2983 * This function is called from ata_eh_scsi_hotplug() and
2984 * responsible for removing the SCSI device attached to @dev.
2985 *
2986 * LOCKING:
2987 * Kernel thread context (may sleep).
2988 */
2989static void ata_scsi_remove_dev(struct ata_device *dev)
2990{
2991 struct ata_port *ap = dev->ap;
2992 struct scsi_device *sdev;
2993 unsigned long flags;
2994
2995 /* Alas, we need to grab scan_mutex to ensure SCSI device
2996 * state doesn't change underneath us and thus
2997 * scsi_device_get() always succeeds. The mutex locking can
2998 * be removed if there is __scsi_device_get() interface which
2999 * increments reference counts regardless of device state.
3000 */
3001 mutex_lock(&ap->scsi_host->scan_mutex);
3002 spin_lock_irqsave(ap->lock, flags);
3003
3004 /* clearing dev->sdev is protected by host lock */
3005 sdev = dev->sdev;
3006 dev->sdev = NULL;
3007
3008 if (sdev) {
3009 /* If user initiated unplug races with us, sdev can go
3010 * away underneath us after the host lock and
3011 * scan_mutex are released. Hold onto it.
3012 */
3013 if (scsi_device_get(sdev) == 0) {
3014 /* The following ensures the attached sdev is
3015 * offline on return from ata_scsi_offline_dev()
3016 * regardless it wins or loses the race
3017 * against this function.
3018 */
3019 scsi_device_set_state(sdev, SDEV_OFFLINE);
3020 } else {
3021 WARN_ON(1);
3022 sdev = NULL;
3023 }
3024 }
3025
3026 spin_unlock_irqrestore(ap->lock, flags);
3027 mutex_unlock(&ap->scsi_host->scan_mutex);
3028
3029 if (sdev) {
3030 ata_dev_printk(dev, KERN_INFO, "detaching (SCSI %s)\n",
3031 sdev->sdev_gendev.bus_id);
3032
3033 scsi_remove_device(sdev);
3034 scsi_device_put(sdev);
3035 }
3036}
3037
3038/**
3039 * ata_scsi_hotplug - SCSI part of hotplug
3040 * @data: Pointer to ATA port to perform SCSI hotplug on
3041 *
3042 * Perform SCSI part of hotplug. It's executed from a separate
3043 * workqueue after EH completes. This is necessary because SCSI
3044 * hot plugging requires working EH and hot unplugging is
3045 * synchronized with hot plugging with a mutex.
3046 *
3047 * LOCKING:
3048 * Kernel thread context (may sleep).
3049 */
3050void ata_scsi_hotplug(void *data)
3051{
3052 struct ata_port *ap = data;
3053 int i;
3054
3055 if (ap->pflags & ATA_PFLAG_UNLOADING) {
3056 DPRINTK("ENTER/EXIT - unloading\n");
3057 return;
3058 }
3059
3060 DPRINTK("ENTER\n");
3061
3062 /* unplug detached devices */
3063 for (i = 0; i < ATA_MAX_DEVICES; i++) {
3064 struct ata_device *dev = &ap->device[i];
3065 unsigned long flags;
3066
3067 if (!(dev->flags & ATA_DFLAG_DETACHED))
3068 continue;
3069
3070 spin_lock_irqsave(ap->lock, flags);
3071 dev->flags &= ~ATA_DFLAG_DETACHED;
3072 spin_unlock_irqrestore(ap->lock, flags);
3073
3074 ata_scsi_remove_dev(dev);
3075 }
3076
3077 /* scan for new ones */
3078 ata_scsi_scan_host(ap);
3079
3080 /* If we scanned while EH was in progress, scan would have
3081 * failed silently. Requeue if there are enabled but
3082 * unattached devices.
3083 */
3084 for (i = 0; i < ATA_MAX_DEVICES; i++) {
3085 struct ata_device *dev = &ap->device[i];
3086 if (ata_dev_enabled(dev) && !dev->sdev) {
3087 queue_delayed_work(ata_aux_wq, &ap->hotplug_task, HZ);
3088 break;
3089 }
3090 }
3091
3092 DPRINTK("EXIT\n");
3093}
3094
3095/**
3096 * ata_scsi_user_scan - indication for user-initiated bus scan
3097 * @shost: SCSI host to scan
3098 * @channel: Channel to scan
3099 * @id: ID to scan
3100 * @lun: LUN to scan
3101 *
3102 * This function is called when user explicitly requests bus
3103 * scan. Set probe pending flag and invoke EH.
3104 *
3105 * LOCKING:
3106 * SCSI layer (we don't care)
3107 *
3108 * RETURNS:
3109 * Zero.
3110 */
3111static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
3112 unsigned int id, unsigned int lun)
3113{
3114 struct ata_port *ap = ata_shost_to_port(shost);
3115 unsigned long flags;
3116 int rc = 0;
3117
3118 if (!ap->ops->error_handler)
3119 return -EOPNOTSUPP;
3120
3121 if ((channel != SCAN_WILD_CARD && channel != 0) ||
3122 (lun != SCAN_WILD_CARD && lun != 0))
3123 return -EINVAL;
3124
3125 spin_lock_irqsave(ap->lock, flags);
3126
3127 if (id == SCAN_WILD_CARD) {
3128 ap->eh_info.probe_mask |= (1 << ATA_MAX_DEVICES) - 1;
3129 ap->eh_info.action |= ATA_EH_SOFTRESET;
3130 } else {
3131 struct ata_device *dev = ata_find_dev(ap, id);
3132
3133 if (dev) {
3134 ap->eh_info.probe_mask |= 1 << dev->devno;
3135 ap->eh_info.action |= ATA_EH_SOFTRESET;
3136 ap->eh_info.flags |= ATA_EHI_RESUME_LINK;
3137 } else
3138 rc = -EINVAL;
3139 }
3140
3141 if (rc == 0)
3142 ata_port_schedule_eh(ap);
3143
3144 spin_unlock_irqrestore(ap->lock, flags);
3145
3146 return rc;
3147}
3148
3149/**
3150 * ata_scsi_dev_rescan - initiate scsi_rescan_device()
3151 * @data: Pointer to ATA port to perform scsi_rescan_device()
3152 *
3153 * After ATA pass thru (SAT) commands are executed successfully,
3154 * libata need to propagate the changes to SCSI layer. This
3155 * function must be executed from ata_aux_wq such that sdev
3156 * attach/detach don't race with rescan.
3157 *
3158 * LOCKING:
3159 * Kernel thread context (may sleep).
3160 */
3161void ata_scsi_dev_rescan(void *data)
3162{
3163 struct ata_port *ap = data;
3164 struct ata_device *dev;
3165 unsigned int i;
3166
3167 for (i = 0; i < ATA_MAX_DEVICES; i++) {
3168 dev = &ap->device[i];
3169
3170 if (ata_dev_enabled(dev) && dev->sdev)
3171 scsi_rescan_device(&(dev->sdev->sdev_gendev));
3172 }
3173}
3174
3175/**
3176 * ata_sas_port_alloc - Allocate port for a SAS attached SATA device
3177 * @pdev: PCI device that the scsi device is attached to
3178 * @port_info: Information from low-level host driver
3179 * @shost: SCSI host that the scsi device is attached to
3180 *
3181 * LOCKING:
3182 * PCI/etc. bus probe sem.
3183 *
3184 * RETURNS:
3185 * ata_port pointer on success / NULL on failure.
3186 */
3187
3188struct ata_port *ata_sas_port_alloc(struct ata_host *host,
3189 struct ata_port_info *port_info,
3190 struct Scsi_Host *shost)
3191{
3192 struct ata_port *ap = kzalloc(sizeof(*ap), GFP_KERNEL);
3193 struct ata_probe_ent *ent;
3194
3195 if (!ap)
3196 return NULL;
3197
3198 ent = ata_probe_ent_alloc(host->dev, port_info);
3199 if (!ent) {
3200 kfree(ap);
3201 return NULL;
3202 }
3203
3204 ata_port_init(ap, host, ent, 0);
3205 ap->lock = shost->host_lock;
3206 kfree(ent);
3207 return ap;
3208}
3209EXPORT_SYMBOL_GPL(ata_sas_port_alloc);
3210
3211/**
3212 * ata_sas_port_start - Set port up for dma.
3213 * @ap: Port to initialize
3214 *
3215 * Called just after data structures for each port are
3216 * initialized. Allocates DMA pad.
3217 *
3218 * May be used as the port_start() entry in ata_port_operations.
3219 *
3220 * LOCKING:
3221 * Inherited from caller.
3222 */
3223int ata_sas_port_start(struct ata_port *ap)
3224{
3225 return ata_pad_alloc(ap, ap->dev);
3226}
3227EXPORT_SYMBOL_GPL(ata_sas_port_start);
3228
3229/**
3230 * ata_port_stop - Undo ata_sas_port_start()
3231 * @ap: Port to shut down
3232 *
3233 * Frees the DMA pad.
3234 *
3235 * May be used as the port_stop() entry in ata_port_operations.
3236 *
3237 * LOCKING:
3238 * Inherited from caller.
3239 */
3240
3241void ata_sas_port_stop(struct ata_port *ap)
3242{
3243 ata_pad_free(ap, ap->dev);
3244}
3245EXPORT_SYMBOL_GPL(ata_sas_port_stop);
3246
3247/**
3248 * ata_sas_port_init - Initialize a SATA device
3249 * @ap: SATA port to initialize
3250 *
3251 * LOCKING:
3252 * PCI/etc. bus probe sem.
3253 *
3254 * RETURNS:
3255 * Zero on success, non-zero on error.
3256 */
3257
3258int ata_sas_port_init(struct ata_port *ap)
3259{
3260 int rc = ap->ops->port_start(ap);
3261
3262 if (!rc)
3263 rc = ata_bus_probe(ap);
3264
3265 return rc;
3266}
3267EXPORT_SYMBOL_GPL(ata_sas_port_init);
3268
3269/**
3270 * ata_sas_port_destroy - Destroy a SATA port allocated by ata_sas_port_alloc
3271 * @ap: SATA port to destroy
3272 *
3273 */
3274
3275void ata_sas_port_destroy(struct ata_port *ap)
3276{
3277 ap->ops->port_stop(ap);
3278 kfree(ap);
3279}
3280EXPORT_SYMBOL_GPL(ata_sas_port_destroy);
3281
3282/**
3283 * ata_sas_slave_configure - Default slave_config routine for libata devices
3284 * @sdev: SCSI device to configure
3285 * @ap: ATA port to which SCSI device is attached
3286 *
3287 * RETURNS:
3288 * Zero.
3289 */
3290
3291int ata_sas_slave_configure(struct scsi_device *sdev, struct ata_port *ap)
3292{
3293 ata_scsi_sdev_config(sdev);
3294 ata_scsi_dev_config(sdev, ap->device);
3295 return 0;
3296}
3297EXPORT_SYMBOL_GPL(ata_sas_slave_configure);
3298
3299/**
3300 * ata_sas_queuecmd - Issue SCSI cdb to libata-managed device
3301 * @cmd: SCSI command to be sent
3302 * @done: Completion function, called when command is complete
3303 * @ap: ATA port to which the command is being sent
3304 *
3305 * RETURNS:
3306 * Zero.
3307 */
3308
3309int ata_sas_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *),
3310 struct ata_port *ap)
3311{
3312 ata_scsi_dump_cdb(ap, cmd);
3313
3314 if (likely(ata_scsi_dev_enabled(ap->device)))
3315 __ata_scsi_queuecmd(cmd, done, ap->device);
3316 else {
3317 cmd->result = (DID_BAD_TARGET << 16);
3318 done(cmd);
3319 }
3320 return 0;
3321}
3322EXPORT_SYMBOL_GPL(ata_sas_queuecmd);
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
new file mode 100644
index 000000000000..760502859821
--- /dev/null
+++ b/drivers/ata/libata-sff.c
@@ -0,0 +1,1109 @@
1/*
2 * libata-bmdma.c - helper library for PCI IDE BMDMA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2006 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2006 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 */
34
35#include <linux/kernel.h>
36#include <linux/pci.h>
37#include <linux/libata.h>
38
39#include "libata.h"
40
41/**
42 * ata_tf_load_pio - send taskfile registers to host controller
43 * @ap: Port to which output is sent
44 * @tf: ATA taskfile register set
45 *
46 * Outputs ATA taskfile to standard ATA host controller.
47 *
48 * LOCKING:
49 * Inherited from caller.
50 */
51
52static void ata_tf_load_pio(struct ata_port *ap, const struct ata_taskfile *tf)
53{
54 struct ata_ioports *ioaddr = &ap->ioaddr;
55 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
56
57 if (tf->ctl != ap->last_ctl) {
58 outb(tf->ctl, ioaddr->ctl_addr);
59 ap->last_ctl = tf->ctl;
60 ata_wait_idle(ap);
61 }
62
63 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
64 outb(tf->hob_feature, ioaddr->feature_addr);
65 outb(tf->hob_nsect, ioaddr->nsect_addr);
66 outb(tf->hob_lbal, ioaddr->lbal_addr);
67 outb(tf->hob_lbam, ioaddr->lbam_addr);
68 outb(tf->hob_lbah, ioaddr->lbah_addr);
69 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
70 tf->hob_feature,
71 tf->hob_nsect,
72 tf->hob_lbal,
73 tf->hob_lbam,
74 tf->hob_lbah);
75 }
76
77 if (is_addr) {
78 outb(tf->feature, ioaddr->feature_addr);
79 outb(tf->nsect, ioaddr->nsect_addr);
80 outb(tf->lbal, ioaddr->lbal_addr);
81 outb(tf->lbam, ioaddr->lbam_addr);
82 outb(tf->lbah, ioaddr->lbah_addr);
83 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
84 tf->feature,
85 tf->nsect,
86 tf->lbal,
87 tf->lbam,
88 tf->lbah);
89 }
90
91 if (tf->flags & ATA_TFLAG_DEVICE) {
92 outb(tf->device, ioaddr->device_addr);
93 VPRINTK("device 0x%X\n", tf->device);
94 }
95
96 ata_wait_idle(ap);
97}
98
99/**
100 * ata_tf_load_mmio - send taskfile registers to host controller
101 * @ap: Port to which output is sent
102 * @tf: ATA taskfile register set
103 *
104 * Outputs ATA taskfile to standard ATA host controller using MMIO.
105 *
106 * LOCKING:
107 * Inherited from caller.
108 */
109
110static void ata_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
111{
112 struct ata_ioports *ioaddr = &ap->ioaddr;
113 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
114
115 if (tf->ctl != ap->last_ctl) {
116 writeb(tf->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
117 ap->last_ctl = tf->ctl;
118 ata_wait_idle(ap);
119 }
120
121 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
122 writeb(tf->hob_feature, (void __iomem *) ioaddr->feature_addr);
123 writeb(tf->hob_nsect, (void __iomem *) ioaddr->nsect_addr);
124 writeb(tf->hob_lbal, (void __iomem *) ioaddr->lbal_addr);
125 writeb(tf->hob_lbam, (void __iomem *) ioaddr->lbam_addr);
126 writeb(tf->hob_lbah, (void __iomem *) ioaddr->lbah_addr);
127 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
128 tf->hob_feature,
129 tf->hob_nsect,
130 tf->hob_lbal,
131 tf->hob_lbam,
132 tf->hob_lbah);
133 }
134
135 if (is_addr) {
136 writeb(tf->feature, (void __iomem *) ioaddr->feature_addr);
137 writeb(tf->nsect, (void __iomem *) ioaddr->nsect_addr);
138 writeb(tf->lbal, (void __iomem *) ioaddr->lbal_addr);
139 writeb(tf->lbam, (void __iomem *) ioaddr->lbam_addr);
140 writeb(tf->lbah, (void __iomem *) ioaddr->lbah_addr);
141 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
142 tf->feature,
143 tf->nsect,
144 tf->lbal,
145 tf->lbam,
146 tf->lbah);
147 }
148
149 if (tf->flags & ATA_TFLAG_DEVICE) {
150 writeb(tf->device, (void __iomem *) ioaddr->device_addr);
151 VPRINTK("device 0x%X\n", tf->device);
152 }
153
154 ata_wait_idle(ap);
155}
156
157
158/**
159 * ata_tf_load - send taskfile registers to host controller
160 * @ap: Port to which output is sent
161 * @tf: ATA taskfile register set
162 *
163 * Outputs ATA taskfile to standard ATA host controller using MMIO
164 * or PIO as indicated by the ATA_FLAG_MMIO flag.
165 * Writes the control, feature, nsect, lbal, lbam, and lbah registers.
166 * Optionally (ATA_TFLAG_LBA48) writes hob_feature, hob_nsect,
167 * hob_lbal, hob_lbam, and hob_lbah.
168 *
169 * This function waits for idle (!BUSY and !DRQ) after writing
170 * registers. If the control register has a new value, this
171 * function also waits for idle after writing control and before
172 * writing the remaining registers.
173 *
174 * May be used as the tf_load() entry in ata_port_operations.
175 *
176 * LOCKING:
177 * Inherited from caller.
178 */
179void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
180{
181 if (ap->flags & ATA_FLAG_MMIO)
182 ata_tf_load_mmio(ap, tf);
183 else
184 ata_tf_load_pio(ap, tf);
185}
186
187/**
188 * ata_exec_command_pio - issue ATA command to host controller
189 * @ap: port to which command is being issued
190 * @tf: ATA taskfile register set
191 *
192 * Issues PIO write to ATA command register, with proper
193 * synchronization with interrupt handler / other threads.
194 *
195 * LOCKING:
196 * spin_lock_irqsave(host lock)
197 */
198
199static void ata_exec_command_pio(struct ata_port *ap, const struct ata_taskfile *tf)
200{
201 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
202
203 outb(tf->command, ap->ioaddr.command_addr);
204 ata_pause(ap);
205}
206
207
208/**
209 * ata_exec_command_mmio - issue ATA command to host controller
210 * @ap: port to which command is being issued
211 * @tf: ATA taskfile register set
212 *
213 * Issues MMIO write to ATA command register, with proper
214 * synchronization with interrupt handler / other threads.
215 *
216 * FIXME: missing write posting for 400nS delay enforcement
217 *
218 * LOCKING:
219 * spin_lock_irqsave(host lock)
220 */
221
222static void ata_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
223{
224 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
225
226 writeb(tf->command, (void __iomem *) ap->ioaddr.command_addr);
227 ata_pause(ap);
228}
229
230
231/**
232 * ata_exec_command - issue ATA command to host controller
233 * @ap: port to which command is being issued
234 * @tf: ATA taskfile register set
235 *
236 * Issues PIO/MMIO write to ATA command register, with proper
237 * synchronization with interrupt handler / other threads.
238 *
239 * LOCKING:
240 * spin_lock_irqsave(host lock)
241 */
242void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
243{
244 if (ap->flags & ATA_FLAG_MMIO)
245 ata_exec_command_mmio(ap, tf);
246 else
247 ata_exec_command_pio(ap, tf);
248}
249
250/**
251 * ata_tf_read_pio - input device's ATA taskfile shadow registers
252 * @ap: Port from which input is read
253 * @tf: ATA taskfile register set for storing input
254 *
255 * Reads ATA taskfile registers for currently-selected device
256 * into @tf.
257 *
258 * LOCKING:
259 * Inherited from caller.
260 */
261
262static void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf)
263{
264 struct ata_ioports *ioaddr = &ap->ioaddr;
265
266 tf->command = ata_check_status(ap);
267 tf->feature = inb(ioaddr->error_addr);
268 tf->nsect = inb(ioaddr->nsect_addr);
269 tf->lbal = inb(ioaddr->lbal_addr);
270 tf->lbam = inb(ioaddr->lbam_addr);
271 tf->lbah = inb(ioaddr->lbah_addr);
272 tf->device = inb(ioaddr->device_addr);
273
274 if (tf->flags & ATA_TFLAG_LBA48) {
275 outb(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
276 tf->hob_feature = inb(ioaddr->error_addr);
277 tf->hob_nsect = inb(ioaddr->nsect_addr);
278 tf->hob_lbal = inb(ioaddr->lbal_addr);
279 tf->hob_lbam = inb(ioaddr->lbam_addr);
280 tf->hob_lbah = inb(ioaddr->lbah_addr);
281 }
282}
283
284/**
285 * ata_tf_read_mmio - input device's ATA taskfile shadow registers
286 * @ap: Port from which input is read
287 * @tf: ATA taskfile register set for storing input
288 *
289 * Reads ATA taskfile registers for currently-selected device
290 * into @tf via MMIO.
291 *
292 * LOCKING:
293 * Inherited from caller.
294 */
295
296static void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf)
297{
298 struct ata_ioports *ioaddr = &ap->ioaddr;
299
300 tf->command = ata_check_status(ap);
301 tf->feature = readb((void __iomem *)ioaddr->error_addr);
302 tf->nsect = readb((void __iomem *)ioaddr->nsect_addr);
303 tf->lbal = readb((void __iomem *)ioaddr->lbal_addr);
304 tf->lbam = readb((void __iomem *)ioaddr->lbam_addr);
305 tf->lbah = readb((void __iomem *)ioaddr->lbah_addr);
306 tf->device = readb((void __iomem *)ioaddr->device_addr);
307
308 if (tf->flags & ATA_TFLAG_LBA48) {
309 writeb(tf->ctl | ATA_HOB, (void __iomem *) ap->ioaddr.ctl_addr);
310 tf->hob_feature = readb((void __iomem *)ioaddr->error_addr);
311 tf->hob_nsect = readb((void __iomem *)ioaddr->nsect_addr);
312 tf->hob_lbal = readb((void __iomem *)ioaddr->lbal_addr);
313 tf->hob_lbam = readb((void __iomem *)ioaddr->lbam_addr);
314 tf->hob_lbah = readb((void __iomem *)ioaddr->lbah_addr);
315 }
316}
317
318
319/**
320 * ata_tf_read - input device's ATA taskfile shadow registers
321 * @ap: Port from which input is read
322 * @tf: ATA taskfile register set for storing input
323 *
324 * Reads ATA taskfile registers for currently-selected device
325 * into @tf.
326 *
327 * Reads nsect, lbal, lbam, lbah, and device. If ATA_TFLAG_LBA48
328 * is set, also reads the hob registers.
329 *
330 * May be used as the tf_read() entry in ata_port_operations.
331 *
332 * LOCKING:
333 * Inherited from caller.
334 */
335void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
336{
337 if (ap->flags & ATA_FLAG_MMIO)
338 ata_tf_read_mmio(ap, tf);
339 else
340 ata_tf_read_pio(ap, tf);
341}
342
343/**
344 * ata_check_status_pio - Read device status reg & clear interrupt
345 * @ap: port where the device is
346 *
347 * Reads ATA taskfile status register for currently-selected device
348 * and return its value. This also clears pending interrupts
349 * from this device
350 *
351 * LOCKING:
352 * Inherited from caller.
353 */
354static u8 ata_check_status_pio(struct ata_port *ap)
355{
356 return inb(ap->ioaddr.status_addr);
357}
358
359/**
360 * ata_check_status_mmio - Read device status reg & clear interrupt
361 * @ap: port where the device is
362 *
363 * Reads ATA taskfile status register for currently-selected device
364 * via MMIO and return its value. This also clears pending interrupts
365 * from this device
366 *
367 * LOCKING:
368 * Inherited from caller.
369 */
370static u8 ata_check_status_mmio(struct ata_port *ap)
371{
372 return readb((void __iomem *) ap->ioaddr.status_addr);
373}
374
375
376/**
377 * ata_check_status - Read device status reg & clear interrupt
378 * @ap: port where the device is
379 *
380 * Reads ATA taskfile status register for currently-selected device
381 * and return its value. This also clears pending interrupts
382 * from this device
383 *
384 * May be used as the check_status() entry in ata_port_operations.
385 *
386 * LOCKING:
387 * Inherited from caller.
388 */
389u8 ata_check_status(struct ata_port *ap)
390{
391 if (ap->flags & ATA_FLAG_MMIO)
392 return ata_check_status_mmio(ap);
393 return ata_check_status_pio(ap);
394}
395
396
397/**
398 * ata_altstatus - Read device alternate status reg
399 * @ap: port where the device is
400 *
401 * Reads ATA taskfile alternate status register for
402 * currently-selected device and return its value.
403 *
404 * Note: may NOT be used as the check_altstatus() entry in
405 * ata_port_operations.
406 *
407 * LOCKING:
408 * Inherited from caller.
409 */
410u8 ata_altstatus(struct ata_port *ap)
411{
412 if (ap->ops->check_altstatus)
413 return ap->ops->check_altstatus(ap);
414
415 if (ap->flags & ATA_FLAG_MMIO)
416 return readb((void __iomem *)ap->ioaddr.altstatus_addr);
417 return inb(ap->ioaddr.altstatus_addr);
418}
419
420/**
421 * ata_bmdma_setup_mmio - Set up PCI IDE BMDMA transaction
422 * @qc: Info associated with this ATA transaction.
423 *
424 * LOCKING:
425 * spin_lock_irqsave(host lock)
426 */
427
428static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc)
429{
430 struct ata_port *ap = qc->ap;
431 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
432 u8 dmactl;
433 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
434
435 /* load PRD table addr. */
436 mb(); /* make sure PRD table writes are visible to controller */
437 writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS);
438
439 /* specify data direction, triple-check start bit is clear */
440 dmactl = readb(mmio + ATA_DMA_CMD);
441 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
442 if (!rw)
443 dmactl |= ATA_DMA_WR;
444 writeb(dmactl, mmio + ATA_DMA_CMD);
445
446 /* issue r/w command */
447 ap->ops->exec_command(ap, &qc->tf);
448}
449
450/**
451 * ata_bmdma_start_mmio - Start a PCI IDE BMDMA transaction
452 * @qc: Info associated with this ATA transaction.
453 *
454 * LOCKING:
455 * spin_lock_irqsave(host lock)
456 */
457
458static void ata_bmdma_start_mmio (struct ata_queued_cmd *qc)
459{
460 struct ata_port *ap = qc->ap;
461 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
462 u8 dmactl;
463
464 /* start host DMA transaction */
465 dmactl = readb(mmio + ATA_DMA_CMD);
466 writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD);
467
468 /* Strictly, one may wish to issue a readb() here, to
469 * flush the mmio write. However, control also passes
470 * to the hardware at this point, and it will interrupt
471 * us when we are to resume control. So, in effect,
472 * we don't care when the mmio write flushes.
473 * Further, a read of the DMA status register _immediately_
474 * following the write may not be what certain flaky hardware
475 * is expected, so I think it is best to not add a readb()
476 * without first all the MMIO ATA cards/mobos.
477 * Or maybe I'm just being paranoid.
478 */
479}
480
481/**
482 * ata_bmdma_setup_pio - Set up PCI IDE BMDMA transaction (PIO)
483 * @qc: Info associated with this ATA transaction.
484 *
485 * LOCKING:
486 * spin_lock_irqsave(host lock)
487 */
488
489static void ata_bmdma_setup_pio (struct ata_queued_cmd *qc)
490{
491 struct ata_port *ap = qc->ap;
492 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
493 u8 dmactl;
494
495 /* load PRD table addr. */
496 outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
497
498 /* specify data direction, triple-check start bit is clear */
499 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
500 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
501 if (!rw)
502 dmactl |= ATA_DMA_WR;
503 outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
504
505 /* issue r/w command */
506 ap->ops->exec_command(ap, &qc->tf);
507}
508
509/**
510 * ata_bmdma_start_pio - Start a PCI IDE BMDMA transaction (PIO)
511 * @qc: Info associated with this ATA transaction.
512 *
513 * LOCKING:
514 * spin_lock_irqsave(host lock)
515 */
516
517static void ata_bmdma_start_pio (struct ata_queued_cmd *qc)
518{
519 struct ata_port *ap = qc->ap;
520 u8 dmactl;
521
522 /* start host DMA transaction */
523 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
524 outb(dmactl | ATA_DMA_START,
525 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
526}
527
528
529/**
530 * ata_bmdma_start - Start a PCI IDE BMDMA transaction
531 * @qc: Info associated with this ATA transaction.
532 *
533 * Writes the ATA_DMA_START flag to the DMA command register.
534 *
535 * May be used as the bmdma_start() entry in ata_port_operations.
536 *
537 * LOCKING:
538 * spin_lock_irqsave(host lock)
539 */
540void ata_bmdma_start(struct ata_queued_cmd *qc)
541{
542 if (qc->ap->flags & ATA_FLAG_MMIO)
543 ata_bmdma_start_mmio(qc);
544 else
545 ata_bmdma_start_pio(qc);
546}
547
548
549/**
550 * ata_bmdma_setup - Set up PCI IDE BMDMA transaction
551 * @qc: Info associated with this ATA transaction.
552 *
553 * Writes address of PRD table to device's PRD Table Address
554 * register, sets the DMA control register, and calls
555 * ops->exec_command() to start the transfer.
556 *
557 * May be used as the bmdma_setup() entry in ata_port_operations.
558 *
559 * LOCKING:
560 * spin_lock_irqsave(host lock)
561 */
562void ata_bmdma_setup(struct ata_queued_cmd *qc)
563{
564 if (qc->ap->flags & ATA_FLAG_MMIO)
565 ata_bmdma_setup_mmio(qc);
566 else
567 ata_bmdma_setup_pio(qc);
568}
569
570
571/**
572 * ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt.
573 * @ap: Port associated with this ATA transaction.
574 *
575 * Clear interrupt and error flags in DMA status register.
576 *
577 * May be used as the irq_clear() entry in ata_port_operations.
578 *
579 * LOCKING:
580 * spin_lock_irqsave(host lock)
581 */
582
583void ata_bmdma_irq_clear(struct ata_port *ap)
584{
585 if (!ap->ioaddr.bmdma_addr)
586 return;
587
588 if (ap->flags & ATA_FLAG_MMIO) {
589 void __iomem *mmio =
590 ((void __iomem *) ap->ioaddr.bmdma_addr) + ATA_DMA_STATUS;
591 writeb(readb(mmio), mmio);
592 } else {
593 unsigned long addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
594 outb(inb(addr), addr);
595 }
596}
597
598
599/**
600 * ata_bmdma_status - Read PCI IDE BMDMA status
601 * @ap: Port associated with this ATA transaction.
602 *
603 * Read and return BMDMA status register.
604 *
605 * May be used as the bmdma_status() entry in ata_port_operations.
606 *
607 * LOCKING:
608 * spin_lock_irqsave(host lock)
609 */
610
611u8 ata_bmdma_status(struct ata_port *ap)
612{
613 u8 host_stat;
614 if (ap->flags & ATA_FLAG_MMIO) {
615 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
616 host_stat = readb(mmio + ATA_DMA_STATUS);
617 } else
618 host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
619 return host_stat;
620}
621
622
623/**
624 * ata_bmdma_stop - Stop PCI IDE BMDMA transfer
625 * @qc: Command we are ending DMA for
626 *
627 * Clears the ATA_DMA_START flag in the dma control register
628 *
629 * May be used as the bmdma_stop() entry in ata_port_operations.
630 *
631 * LOCKING:
632 * spin_lock_irqsave(host lock)
633 */
634
635void ata_bmdma_stop(struct ata_queued_cmd *qc)
636{
637 struct ata_port *ap = qc->ap;
638 if (ap->flags & ATA_FLAG_MMIO) {
639 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
640
641 /* clear start/stop bit */
642 writeb(readb(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
643 mmio + ATA_DMA_CMD);
644 } else {
645 /* clear start/stop bit */
646 outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
647 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
648 }
649
650 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
651 ata_altstatus(ap); /* dummy read */
652}
653
654/**
655 * ata_bmdma_freeze - Freeze BMDMA controller port
656 * @ap: port to freeze
657 *
658 * Freeze BMDMA controller port.
659 *
660 * LOCKING:
661 * Inherited from caller.
662 */
663void ata_bmdma_freeze(struct ata_port *ap)
664{
665 struct ata_ioports *ioaddr = &ap->ioaddr;
666
667 ap->ctl |= ATA_NIEN;
668 ap->last_ctl = ap->ctl;
669
670 if (ap->flags & ATA_FLAG_MMIO)
671 writeb(ap->ctl, (void __iomem *)ioaddr->ctl_addr);
672 else
673 outb(ap->ctl, ioaddr->ctl_addr);
674}
675
676/**
677 * ata_bmdma_thaw - Thaw BMDMA controller port
678 * @ap: port to thaw
679 *
680 * Thaw BMDMA controller port.
681 *
682 * LOCKING:
683 * Inherited from caller.
684 */
685void ata_bmdma_thaw(struct ata_port *ap)
686{
687 /* clear & re-enable interrupts */
688 ata_chk_status(ap);
689 ap->ops->irq_clear(ap);
690 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
691 ata_irq_on(ap);
692}
693
694/**
695 * ata_bmdma_drive_eh - Perform EH with given methods for BMDMA controller
696 * @ap: port to handle error for
697 * @prereset: prereset method (can be NULL)
698 * @softreset: softreset method (can be NULL)
699 * @hardreset: hardreset method (can be NULL)
700 * @postreset: postreset method (can be NULL)
701 *
702 * Handle error for ATA BMDMA controller. It can handle both
703 * PATA and SATA controllers. Many controllers should be able to
704 * use this EH as-is or with some added handling before and
705 * after.
706 *
707 * This function is intended to be used for constructing
708 * ->error_handler callback by low level drivers.
709 *
710 * LOCKING:
711 * Kernel thread context (may sleep)
712 */
713void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
714 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
715 ata_postreset_fn_t postreset)
716{
717 struct ata_eh_context *ehc = &ap->eh_context;
718 struct ata_queued_cmd *qc;
719 unsigned long flags;
720 int thaw = 0;
721
722 qc = __ata_qc_from_tag(ap, ap->active_tag);
723 if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
724 qc = NULL;
725
726 /* reset PIO HSM and stop DMA engine */
727 spin_lock_irqsave(ap->lock, flags);
728
729 ap->hsm_task_state = HSM_ST_IDLE;
730
731 if (qc && (qc->tf.protocol == ATA_PROT_DMA ||
732 qc->tf.protocol == ATA_PROT_ATAPI_DMA)) {
733 u8 host_stat;
734
735 host_stat = ata_bmdma_status(ap);
736
737 ata_ehi_push_desc(&ehc->i, "BMDMA stat 0x%x", host_stat);
738
739 /* BMDMA controllers indicate host bus error by
740 * setting DMA_ERR bit and timing out. As it wasn't
741 * really a timeout event, adjust error mask and
742 * cancel frozen state.
743 */
744 if (qc->err_mask == AC_ERR_TIMEOUT && host_stat & ATA_DMA_ERR) {
745 qc->err_mask = AC_ERR_HOST_BUS;
746 thaw = 1;
747 }
748
749 ap->ops->bmdma_stop(qc);
750 }
751
752 ata_altstatus(ap);
753 ata_chk_status(ap);
754 ap->ops->irq_clear(ap);
755
756 spin_unlock_irqrestore(ap->lock, flags);
757
758 if (thaw)
759 ata_eh_thaw_port(ap);
760
761 /* PIO and DMA engines have been stopped, perform recovery */
762 ata_do_eh(ap, prereset, softreset, hardreset, postreset);
763}
764
765/**
766 * ata_bmdma_error_handler - Stock error handler for BMDMA controller
767 * @ap: port to handle error for
768 *
769 * Stock error handler for BMDMA controller.
770 *
771 * LOCKING:
772 * Kernel thread context (may sleep)
773 */
774void ata_bmdma_error_handler(struct ata_port *ap)
775{
776 ata_reset_fn_t hardreset;
777
778 hardreset = NULL;
779 if (sata_scr_valid(ap))
780 hardreset = sata_std_hardreset;
781
782 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset, hardreset,
783 ata_std_postreset);
784}
785
786/**
787 * ata_bmdma_post_internal_cmd - Stock post_internal_cmd for
788 * BMDMA controller
789 * @qc: internal command to clean up
790 *
791 * LOCKING:
792 * Kernel thread context (may sleep)
793 */
794void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc)
795{
796 ata_bmdma_stop(qc);
797}
798
799#ifdef CONFIG_PCI
800/**
801 * ata_pci_init_native_mode - Initialize native-mode driver
802 * @pdev: pci device to be initialized
803 * @port: array[2] of pointers to port info structures.
804 * @ports: bitmap of ports present
805 *
806 * Utility function which allocates and initializes an
807 * ata_probe_ent structure for a standard dual-port
808 * PIO-based IDE controller. The returned ata_probe_ent
809 * structure can be passed to ata_device_add(). The returned
810 * ata_probe_ent structure should then be freed with kfree().
811 *
812 * The caller need only pass the address of the primary port, the
813 * secondary will be deduced automatically. If the device has non
814 * standard secondary port mappings this function can be called twice,
815 * once for each interface.
816 */
817
818struct ata_probe_ent *
819ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int ports)
820{
821 struct ata_probe_ent *probe_ent =
822 ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
823 int p = 0;
824 unsigned long bmdma;
825
826 if (!probe_ent)
827 return NULL;
828
829 probe_ent->irq = pdev->irq;
830 probe_ent->irq_flags = IRQF_SHARED;
831 probe_ent->private_data = port[0]->private_data;
832
833 if (ports & ATA_PORT_PRIMARY) {
834 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 0);
835 probe_ent->port[p].altstatus_addr =
836 probe_ent->port[p].ctl_addr =
837 pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS;
838 bmdma = pci_resource_start(pdev, 4);
839 if (bmdma) {
840 if (inb(bmdma + 2) & 0x80)
841 probe_ent->_host_flags |= ATA_HOST_SIMPLEX;
842 probe_ent->port[p].bmdma_addr = bmdma;
843 }
844 ata_std_ports(&probe_ent->port[p]);
845 p++;
846 }
847
848 if (ports & ATA_PORT_SECONDARY) {
849 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 2);
850 probe_ent->port[p].altstatus_addr =
851 probe_ent->port[p].ctl_addr =
852 pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS;
853 bmdma = pci_resource_start(pdev, 4);
854 if (bmdma) {
855 bmdma += 8;
856 if(inb(bmdma + 2) & 0x80)
857 probe_ent->_host_flags |= ATA_HOST_SIMPLEX;
858 probe_ent->port[p].bmdma_addr = bmdma;
859 }
860 ata_std_ports(&probe_ent->port[p]);
861 p++;
862 }
863
864 probe_ent->n_ports = p;
865 return probe_ent;
866}
867
868
869static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev,
870 struct ata_port_info **port, int port_mask)
871{
872 struct ata_probe_ent *probe_ent;
873 unsigned long bmdma = pci_resource_start(pdev, 4);
874
875 probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
876 if (!probe_ent)
877 return NULL;
878
879 probe_ent->n_ports = 2;
880 probe_ent->private_data = port[0]->private_data;
881
882 if (port_mask & ATA_PORT_PRIMARY) {
883 probe_ent->irq = 14;
884 probe_ent->port[0].cmd_addr = ATA_PRIMARY_CMD;
885 probe_ent->port[0].altstatus_addr =
886 probe_ent->port[0].ctl_addr = ATA_PRIMARY_CTL;
887 if (bmdma) {
888 probe_ent->port[0].bmdma_addr = bmdma;
889 if (inb(bmdma + 2) & 0x80)
890 probe_ent->_host_flags |= ATA_HOST_SIMPLEX;
891 }
892 ata_std_ports(&probe_ent->port[0]);
893 } else
894 probe_ent->dummy_port_mask |= ATA_PORT_PRIMARY;
895
896 if (port_mask & ATA_PORT_SECONDARY) {
897 if (probe_ent->irq)
898 probe_ent->irq2 = 15;
899 else
900 probe_ent->irq = 15;
901 probe_ent->port[1].cmd_addr = ATA_SECONDARY_CMD;
902 probe_ent->port[1].altstatus_addr =
903 probe_ent->port[1].ctl_addr = ATA_SECONDARY_CTL;
904 if (bmdma) {
905 probe_ent->port[1].bmdma_addr = bmdma + 8;
906 if (inb(bmdma + 10) & 0x80)
907 probe_ent->_host_flags |= ATA_HOST_SIMPLEX;
908 }
909 ata_std_ports(&probe_ent->port[1]);
910 } else
911 probe_ent->dummy_port_mask |= ATA_PORT_SECONDARY;
912
913 return probe_ent;
914}
915
916
917/**
918 * ata_pci_init_one - Initialize/register PCI IDE host controller
919 * @pdev: Controller to be initialized
920 * @port_info: Information from low-level host driver
921 * @n_ports: Number of ports attached to host controller
922 *
923 * This is a helper function which can be called from a driver's
924 * xxx_init_one() probe function if the hardware uses traditional
925 * IDE taskfile registers.
926 *
927 * This function calls pci_enable_device(), reserves its register
928 * regions, sets the dma mask, enables bus master mode, and calls
929 * ata_device_add()
930 *
931 * ASSUMPTION:
932 * Nobody makes a single channel controller that appears solely as
933 * the secondary legacy port on PCI.
934 *
935 * LOCKING:
936 * Inherited from PCI layer (may sleep).
937 *
938 * RETURNS:
939 * Zero on success, negative on errno-based value on error.
940 */
941
942int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
943 unsigned int n_ports)
944{
945 struct ata_probe_ent *probe_ent = NULL;
946 struct ata_port_info *port[2];
947 u8 tmp8, mask;
948 unsigned int legacy_mode = 0;
949 int disable_dev_on_err = 1;
950 int rc;
951
952 DPRINTK("ENTER\n");
953
954 port[0] = port_info[0];
955 if (n_ports > 1)
956 port[1] = port_info[1];
957 else
958 port[1] = port[0];
959
960 if ((port[0]->flags & ATA_FLAG_NO_LEGACY) == 0
961 && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
962 /* TODO: What if one channel is in native mode ... */
963 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
964 mask = (1 << 2) | (1 << 0);
965 if ((tmp8 & mask) != mask)
966 legacy_mode = (1 << 3);
967 }
968
969 /* FIXME... */
970 if ((!legacy_mode) && (n_ports > 2)) {
971 printk(KERN_ERR "ata: BUG: native mode, n_ports > 2\n");
972 n_ports = 2;
973 /* For now */
974 }
975
976 /* FIXME: Really for ATA it isn't safe because the device may be
977 multi-purpose and we want to leave it alone if it was already
978 enabled. Secondly for shared use as Arjan says we want refcounting
979
980 Checking dev->is_enabled is insufficient as this is not set at
981 boot for the primary video which is BIOS enabled
982 */
983
984 rc = pci_enable_device(pdev);
985 if (rc)
986 return rc;
987
988 rc = pci_request_regions(pdev, DRV_NAME);
989 if (rc) {
990 disable_dev_on_err = 0;
991 goto err_out;
992 }
993
994 if (legacy_mode) {
995 if (!request_region(ATA_PRIMARY_CMD, 8, "libata")) {
996 struct resource *conflict, res;
997 res.start = ATA_PRIMARY_CMD;
998 res.end = ATA_PRIMARY_CMD + 8 - 1;
999 conflict = ____request_resource(&ioport_resource, &res);
1000 if (!strcmp(conflict->name, "libata"))
1001 legacy_mode |= ATA_PORT_PRIMARY;
1002 else {
1003 disable_dev_on_err = 0;
1004 printk(KERN_WARNING "ata: 0x%0X IDE port busy\n", ATA_PRIMARY_CMD);
1005 }
1006 } else
1007 legacy_mode |= ATA_PORT_PRIMARY;
1008
1009 if (!request_region(ATA_SECONDARY_CMD, 8, "libata")) {
1010 struct resource *conflict, res;
1011 res.start = ATA_SECONDARY_CMD;
1012 res.end = ATA_SECONDARY_CMD + 8 - 1;
1013 conflict = ____request_resource(&ioport_resource, &res);
1014 if (!strcmp(conflict->name, "libata"))
1015 legacy_mode |= ATA_PORT_SECONDARY;
1016 else {
1017 disable_dev_on_err = 0;
1018 printk(KERN_WARNING "ata: 0x%X IDE port busy\n", ATA_SECONDARY_CMD);
1019 }
1020 } else
1021 legacy_mode |= ATA_PORT_SECONDARY;
1022 }
1023
1024 /* we have legacy mode, but all ports are unavailable */
1025 if (legacy_mode == (1 << 3)) {
1026 rc = -EBUSY;
1027 goto err_out_regions;
1028 }
1029
1030 /* FIXME: If we get no DMA mask we should fall back to PIO */
1031 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
1032 if (rc)
1033 goto err_out_regions;
1034 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
1035 if (rc)
1036 goto err_out_regions;
1037
1038 if (legacy_mode) {
1039 probe_ent = ata_pci_init_legacy_port(pdev, port, legacy_mode);
1040 } else {
1041 if (n_ports == 2)
1042 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
1043 else
1044 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY);
1045 }
1046 if (!probe_ent) {
1047 rc = -ENOMEM;
1048 goto err_out_regions;
1049 }
1050
1051 pci_set_master(pdev);
1052
1053 /* FIXME: check ata_device_add return */
1054 ata_device_add(probe_ent);
1055
1056 kfree(probe_ent);
1057
1058 return 0;
1059
1060err_out_regions:
1061 if (legacy_mode & ATA_PORT_PRIMARY)
1062 release_region(ATA_PRIMARY_CMD, 8);
1063 if (legacy_mode & ATA_PORT_SECONDARY)
1064 release_region(ATA_SECONDARY_CMD, 8);
1065 pci_release_regions(pdev);
1066err_out:
1067 if (disable_dev_on_err)
1068 pci_disable_device(pdev);
1069 return rc;
1070}
1071
1072/**
1073 * ata_pci_clear_simplex - attempt to kick device out of simplex
1074 * @pdev: PCI device
1075 *
1076 * Some PCI ATA devices report simplex mode but in fact can be told to
1077 * enter non simplex mode. This implements the neccessary logic to
1078 * perform the task on such devices. Calling it on other devices will
1079 * have -undefined- behaviour.
1080 */
1081
1082int ata_pci_clear_simplex(struct pci_dev *pdev)
1083{
1084 unsigned long bmdma = pci_resource_start(pdev, 4);
1085 u8 simplex;
1086
1087 if (bmdma == 0)
1088 return -ENOENT;
1089
1090 simplex = inb(bmdma + 0x02);
1091 outb(simplex & 0x60, bmdma + 0x02);
1092 simplex = inb(bmdma + 0x02);
1093 if (simplex & 0x80)
1094 return -EOPNOTSUPP;
1095 return 0;
1096}
1097
1098unsigned long ata_pci_default_filter(const struct ata_port *ap, struct ata_device *adev, unsigned long xfer_mask)
1099{
1100 /* Filter out DMA modes if the device has been configured by
1101 the BIOS as PIO only */
1102
1103 if (ap->ioaddr.bmdma_addr == 0)
1104 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
1105 return xfer_mask;
1106}
1107
1108#endif /* CONFIG_PCI */
1109
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
new file mode 100644
index 000000000000..a5ecb71390a9
--- /dev/null
+++ b/drivers/ata/libata.h
@@ -0,0 +1,122 @@
1/*
2 * libata.h - helper library for ATA
3 *
4 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
5 * Copyright 2003-2004 Jeff Garzik
6 *
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
21 *
22 *
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
25 *
26 */
27
28#ifndef __LIBATA_H__
29#define __LIBATA_H__
30
31#define DRV_NAME "libata"
32#define DRV_VERSION "2.00" /* must be exactly four chars */
33
34struct ata_scsi_args {
35 struct ata_device *dev;
36 u16 *id;
37 struct scsi_cmnd *cmd;
38 void (*done)(struct scsi_cmnd *);
39};
40
41/* libata-core.c */
42extern struct workqueue_struct *ata_aux_wq;
43extern int atapi_enabled;
44extern int atapi_dmadir;
45extern int libata_fua;
46extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev);
47extern int ata_rwcmd_protocol(struct ata_queued_cmd *qc);
48extern void ata_dev_disable(struct ata_device *dev);
49extern void ata_port_flush_task(struct ata_port *ap);
50extern unsigned ata_exec_internal(struct ata_device *dev,
51 struct ata_taskfile *tf, const u8 *cdb,
52 int dma_dir, void *buf, unsigned int buflen);
53extern unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd);
54extern int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
55 int post_reset, u16 *id);
56extern int ata_dev_configure(struct ata_device *dev, int print_info);
57extern int sata_down_spd_limit(struct ata_port *ap);
58extern int sata_set_spd_needed(struct ata_port *ap);
59extern int ata_down_xfermask_limit(struct ata_device *dev, int force_pio0);
60extern int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev);
61extern void ata_qc_free(struct ata_queued_cmd *qc);
62extern void ata_qc_issue(struct ata_queued_cmd *qc);
63extern void __ata_qc_complete(struct ata_queued_cmd *qc);
64extern int ata_check_atapi_dma(struct ata_queued_cmd *qc);
65extern void ata_dev_select(struct ata_port *ap, unsigned int device,
66 unsigned int wait, unsigned int can_sleep);
67extern void swap_buf_le16(u16 *buf, unsigned int buf_words);
68extern int ata_flush_cache(struct ata_device *dev);
69extern void ata_dev_init(struct ata_device *dev);
70extern int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg);
71extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg);
72extern void ata_port_init(struct ata_port *ap, struct ata_host *host,
73 const struct ata_probe_ent *ent, unsigned int port_no);
74extern struct ata_probe_ent *ata_probe_ent_alloc(struct device *dev,
75 const struct ata_port_info *port);
76
77
78/* libata-scsi.c */
79extern struct scsi_transport_template ata_scsi_transport_template;
80
81extern void ata_scsi_scan_host(struct ata_port *ap);
82extern int ata_scsi_offline_dev(struct ata_device *dev);
83extern void ata_scsi_hotplug(void *data);
84extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf,
85 unsigned int buflen);
86
87extern unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf,
88 unsigned int buflen);
89
90extern unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf,
91 unsigned int buflen);
92extern unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf,
93 unsigned int buflen);
94extern unsigned int ata_scsiop_noop(struct ata_scsi_args *args, u8 *rbuf,
95 unsigned int buflen);
96extern unsigned int ata_scsiop_sync_cache(struct ata_scsi_args *args, u8 *rbuf,
97 unsigned int buflen);
98extern unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
99 unsigned int buflen);
100extern unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf,
101 unsigned int buflen);
102extern unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf,
103 unsigned int buflen);
104extern void ata_scsi_badcmd(struct scsi_cmnd *cmd,
105 void (*done)(struct scsi_cmnd *),
106 u8 asc, u8 ascq);
107extern void ata_scsi_set_sense(struct scsi_cmnd *cmd,
108 u8 sk, u8 asc, u8 ascq);
109extern void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
110 unsigned int (*actor) (struct ata_scsi_args *args,
111 u8 *rbuf, unsigned int buflen));
112extern void ata_schedule_scsi_eh(struct Scsi_Host *shost);
113extern void ata_scsi_dev_rescan(void *data);
114extern int ata_bus_probe(struct ata_port *ap);
115
116/* libata-eh.c */
117extern enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd);
118extern void ata_scsi_error(struct Scsi_Host *host);
119extern void ata_port_wait_eh(struct ata_port *ap);
120extern void ata_qc_schedule_eh(struct ata_queued_cmd *qc);
121
122#endif /* __LIBATA_H__ */
diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
new file mode 100644
index 000000000000..912211ada816
--- /dev/null
+++ b/drivers/ata/pdc_adma.c
@@ -0,0 +1,740 @@
1/*
2 * pdc_adma.c - Pacific Digital Corporation ADMA
3 *
4 * Maintained by: Mark Lord <mlord@pobox.com>
5 *
6 * Copyright 2005 Mark Lord
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
21 *
22 *
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
25 *
26 *
27 * Supports ATA disks in single-packet ADMA mode.
28 * Uses PIO for everything else.
29 *
30 * TODO: Use ADMA transfers for ATAPI devices, when possible.
31 * This requires careful attention to a number of quirks of the chip.
32 *
33 */
34
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/blkdev.h>
40#include <linux/delay.h>
41#include <linux/interrupt.h>
42#include <linux/sched.h>
43#include <linux/device.h>
44#include <scsi/scsi_host.h>
45#include <asm/io.h>
46#include <linux/libata.h>
47
48#define DRV_NAME "pdc_adma"
49#define DRV_VERSION "0.04"
50
51/* macro to calculate base address for ATA regs */
52#define ADMA_ATA_REGS(base,port_no) ((base) + ((port_no) * 0x40))
53
54/* macro to calculate base address for ADMA regs */
55#define ADMA_REGS(base,port_no) ((base) + 0x80 + ((port_no) * 0x20))
56
57enum {
58 ADMA_PORTS = 2,
59 ADMA_CPB_BYTES = 40,
60 ADMA_PRD_BYTES = LIBATA_MAX_PRD * 16,
61 ADMA_PKT_BYTES = ADMA_CPB_BYTES + ADMA_PRD_BYTES,
62
63 ADMA_DMA_BOUNDARY = 0xffffffff,
64
65 /* global register offsets */
66 ADMA_MODE_LOCK = 0x00c7,
67
68 /* per-channel register offsets */
69 ADMA_CONTROL = 0x0000, /* ADMA control */
70 ADMA_STATUS = 0x0002, /* ADMA status */
71 ADMA_CPB_COUNT = 0x0004, /* CPB count */
72 ADMA_CPB_CURRENT = 0x000c, /* current CPB address */
73 ADMA_CPB_NEXT = 0x000c, /* next CPB address */
74 ADMA_CPB_LOOKUP = 0x0010, /* CPB lookup table */
75 ADMA_FIFO_IN = 0x0014, /* input FIFO threshold */
76 ADMA_FIFO_OUT = 0x0016, /* output FIFO threshold */
77
78 /* ADMA_CONTROL register bits */
79 aNIEN = (1 << 8), /* irq mask: 1==masked */
80 aGO = (1 << 7), /* packet trigger ("Go!") */
81 aRSTADM = (1 << 5), /* ADMA logic reset */
82 aPIOMD4 = 0x0003, /* PIO mode 4 */
83
84 /* ADMA_STATUS register bits */
85 aPSD = (1 << 6),
86 aUIRQ = (1 << 4),
87 aPERR = (1 << 0),
88
89 /* CPB bits */
90 cDONE = (1 << 0),
91 cVLD = (1 << 0),
92 cDAT = (1 << 2),
93 cIEN = (1 << 3),
94
95 /* PRD bits */
96 pORD = (1 << 4),
97 pDIRO = (1 << 5),
98 pEND = (1 << 7),
99
100 /* ATA register flags */
101 rIGN = (1 << 5),
102 rEND = (1 << 7),
103
104 /* ATA register addresses */
105 ADMA_REGS_CONTROL = 0x0e,
106 ADMA_REGS_SECTOR_COUNT = 0x12,
107 ADMA_REGS_LBA_LOW = 0x13,
108 ADMA_REGS_LBA_MID = 0x14,
109 ADMA_REGS_LBA_HIGH = 0x15,
110 ADMA_REGS_DEVICE = 0x16,
111 ADMA_REGS_COMMAND = 0x17,
112
113 /* PCI device IDs */
114 board_1841_idx = 0, /* ADMA 2-port controller */
115};
116
117typedef enum { adma_state_idle, adma_state_pkt, adma_state_mmio } adma_state_t;
118
119struct adma_port_priv {
120 u8 *pkt;
121 dma_addr_t pkt_dma;
122 adma_state_t state;
123};
124
125static int adma_ata_init_one (struct pci_dev *pdev,
126 const struct pci_device_id *ent);
127static irqreturn_t adma_intr (int irq, void *dev_instance,
128 struct pt_regs *regs);
129static int adma_port_start(struct ata_port *ap);
130static void adma_host_stop(struct ata_host *host);
131static void adma_port_stop(struct ata_port *ap);
132static void adma_phy_reset(struct ata_port *ap);
133static void adma_qc_prep(struct ata_queued_cmd *qc);
134static unsigned int adma_qc_issue(struct ata_queued_cmd *qc);
135static int adma_check_atapi_dma(struct ata_queued_cmd *qc);
136static void adma_bmdma_stop(struct ata_queued_cmd *qc);
137static u8 adma_bmdma_status(struct ata_port *ap);
138static void adma_irq_clear(struct ata_port *ap);
139static void adma_eng_timeout(struct ata_port *ap);
140
141static struct scsi_host_template adma_ata_sht = {
142 .module = THIS_MODULE,
143 .name = DRV_NAME,
144 .ioctl = ata_scsi_ioctl,
145 .queuecommand = ata_scsi_queuecmd,
146 .can_queue = ATA_DEF_QUEUE,
147 .this_id = ATA_SHT_THIS_ID,
148 .sg_tablesize = LIBATA_MAX_PRD,
149 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
150 .emulated = ATA_SHT_EMULATED,
151 .use_clustering = ENABLE_CLUSTERING,
152 .proc_name = DRV_NAME,
153 .dma_boundary = ADMA_DMA_BOUNDARY,
154 .slave_configure = ata_scsi_slave_config,
155 .slave_destroy = ata_scsi_slave_destroy,
156 .bios_param = ata_std_bios_param,
157};
158
159static const struct ata_port_operations adma_ata_ops = {
160 .port_disable = ata_port_disable,
161 .tf_load = ata_tf_load,
162 .tf_read = ata_tf_read,
163 .check_status = ata_check_status,
164 .check_atapi_dma = adma_check_atapi_dma,
165 .exec_command = ata_exec_command,
166 .dev_select = ata_std_dev_select,
167 .phy_reset = adma_phy_reset,
168 .qc_prep = adma_qc_prep,
169 .qc_issue = adma_qc_issue,
170 .eng_timeout = adma_eng_timeout,
171 .data_xfer = ata_mmio_data_xfer,
172 .irq_handler = adma_intr,
173 .irq_clear = adma_irq_clear,
174 .port_start = adma_port_start,
175 .port_stop = adma_port_stop,
176 .host_stop = adma_host_stop,
177 .bmdma_stop = adma_bmdma_stop,
178 .bmdma_status = adma_bmdma_status,
179};
180
181static struct ata_port_info adma_port_info[] = {
182 /* board_1841_idx */
183 {
184 .sht = &adma_ata_sht,
185 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST |
186 ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO |
187 ATA_FLAG_PIO_POLLING,
188 .pio_mask = 0x10, /* pio4 */
189 .udma_mask = 0x1f, /* udma0-4 */
190 .port_ops = &adma_ata_ops,
191 },
192};
193
194static const struct pci_device_id adma_ata_pci_tbl[] = {
195 { PCI_VENDOR_ID_PDC, 0x1841, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
196 board_1841_idx },
197
198 { } /* terminate list */
199};
200
201static struct pci_driver adma_ata_pci_driver = {
202 .name = DRV_NAME,
203 .id_table = adma_ata_pci_tbl,
204 .probe = adma_ata_init_one,
205 .remove = ata_pci_remove_one,
206};
207
208static int adma_check_atapi_dma(struct ata_queued_cmd *qc)
209{
210 return 1; /* ATAPI DMA not yet supported */
211}
212
213static void adma_bmdma_stop(struct ata_queued_cmd *qc)
214{
215 /* nothing */
216}
217
218static u8 adma_bmdma_status(struct ata_port *ap)
219{
220 return 0;
221}
222
223static void adma_irq_clear(struct ata_port *ap)
224{
225 /* nothing */
226}
227
228static void adma_reset_engine(void __iomem *chan)
229{
230 /* reset ADMA to idle state */
231 writew(aPIOMD4 | aNIEN | aRSTADM, chan + ADMA_CONTROL);
232 udelay(2);
233 writew(aPIOMD4, chan + ADMA_CONTROL);
234 udelay(2);
235}
236
237static void adma_reinit_engine(struct ata_port *ap)
238{
239 struct adma_port_priv *pp = ap->private_data;
240 void __iomem *mmio_base = ap->host->mmio_base;
241 void __iomem *chan = ADMA_REGS(mmio_base, ap->port_no);
242
243 /* mask/clear ATA interrupts */
244 writeb(ATA_NIEN, (void __iomem *)ap->ioaddr.ctl_addr);
245 ata_check_status(ap);
246
247 /* reset the ADMA engine */
248 adma_reset_engine(chan);
249
250 /* set in-FIFO threshold to 0x100 */
251 writew(0x100, chan + ADMA_FIFO_IN);
252
253 /* set CPB pointer */
254 writel((u32)pp->pkt_dma, chan + ADMA_CPB_NEXT);
255
256 /* set out-FIFO threshold to 0x100 */
257 writew(0x100, chan + ADMA_FIFO_OUT);
258
259 /* set CPB count */
260 writew(1, chan + ADMA_CPB_COUNT);
261
262 /* read/discard ADMA status */
263 readb(chan + ADMA_STATUS);
264}
265
266static inline void adma_enter_reg_mode(struct ata_port *ap)
267{
268 void __iomem *chan = ADMA_REGS(ap->host->mmio_base, ap->port_no);
269
270 writew(aPIOMD4, chan + ADMA_CONTROL);
271 readb(chan + ADMA_STATUS); /* flush */
272}
273
274static void adma_phy_reset(struct ata_port *ap)
275{
276 struct adma_port_priv *pp = ap->private_data;
277
278 pp->state = adma_state_idle;
279 adma_reinit_engine(ap);
280 ata_port_probe(ap);
281 ata_bus_reset(ap);
282}
283
284static void adma_eng_timeout(struct ata_port *ap)
285{
286 struct adma_port_priv *pp = ap->private_data;
287
288 if (pp->state != adma_state_idle) /* healthy paranoia */
289 pp->state = adma_state_mmio;
290 adma_reinit_engine(ap);
291 ata_eng_timeout(ap);
292}
293
294static int adma_fill_sg(struct ata_queued_cmd *qc)
295{
296 struct scatterlist *sg;
297 struct ata_port *ap = qc->ap;
298 struct adma_port_priv *pp = ap->private_data;
299 u8 *buf = pp->pkt;
300 int i = (2 + buf[3]) * 8;
301 u8 pFLAGS = pORD | ((qc->tf.flags & ATA_TFLAG_WRITE) ? pDIRO : 0);
302
303 ata_for_each_sg(sg, qc) {
304 u32 addr;
305 u32 len;
306
307 addr = (u32)sg_dma_address(sg);
308 *(__le32 *)(buf + i) = cpu_to_le32(addr);
309 i += 4;
310
311 len = sg_dma_len(sg) >> 3;
312 *(__le32 *)(buf + i) = cpu_to_le32(len);
313 i += 4;
314
315 if (ata_sg_is_last(sg, qc))
316 pFLAGS |= pEND;
317 buf[i++] = pFLAGS;
318 buf[i++] = qc->dev->dma_mode & 0xf;
319 buf[i++] = 0; /* pPKLW */
320 buf[i++] = 0; /* reserved */
321
322 *(__le32 *)(buf + i)
323 = (pFLAGS & pEND) ? 0 : cpu_to_le32(pp->pkt_dma + i + 4);
324 i += 4;
325
326 VPRINTK("PRD[%u] = (0x%lX, 0x%X)\n", i/4,
327 (unsigned long)addr, len);
328 }
329 return i;
330}
331
332static void adma_qc_prep(struct ata_queued_cmd *qc)
333{
334 struct adma_port_priv *pp = qc->ap->private_data;
335 u8 *buf = pp->pkt;
336 u32 pkt_dma = (u32)pp->pkt_dma;
337 int i = 0;
338
339 VPRINTK("ENTER\n");
340
341 adma_enter_reg_mode(qc->ap);
342 if (qc->tf.protocol != ATA_PROT_DMA) {
343 ata_qc_prep(qc);
344 return;
345 }
346
347 buf[i++] = 0; /* Response flags */
348 buf[i++] = 0; /* reserved */
349 buf[i++] = cVLD | cDAT | cIEN;
350 i++; /* cLEN, gets filled in below */
351
352 *(__le32 *)(buf+i) = cpu_to_le32(pkt_dma); /* cNCPB */
353 i += 4; /* cNCPB */
354 i += 4; /* cPRD, gets filled in below */
355
356 buf[i++] = 0; /* reserved */
357 buf[i++] = 0; /* reserved */
358 buf[i++] = 0; /* reserved */
359 buf[i++] = 0; /* reserved */
360
361 /* ATA registers; must be a multiple of 4 */
362 buf[i++] = qc->tf.device;
363 buf[i++] = ADMA_REGS_DEVICE;
364 if ((qc->tf.flags & ATA_TFLAG_LBA48)) {
365 buf[i++] = qc->tf.hob_nsect;
366 buf[i++] = ADMA_REGS_SECTOR_COUNT;
367 buf[i++] = qc->tf.hob_lbal;
368 buf[i++] = ADMA_REGS_LBA_LOW;
369 buf[i++] = qc->tf.hob_lbam;
370 buf[i++] = ADMA_REGS_LBA_MID;
371 buf[i++] = qc->tf.hob_lbah;
372 buf[i++] = ADMA_REGS_LBA_HIGH;
373 }
374 buf[i++] = qc->tf.nsect;
375 buf[i++] = ADMA_REGS_SECTOR_COUNT;
376 buf[i++] = qc->tf.lbal;
377 buf[i++] = ADMA_REGS_LBA_LOW;
378 buf[i++] = qc->tf.lbam;
379 buf[i++] = ADMA_REGS_LBA_MID;
380 buf[i++] = qc->tf.lbah;
381 buf[i++] = ADMA_REGS_LBA_HIGH;
382 buf[i++] = 0;
383 buf[i++] = ADMA_REGS_CONTROL;
384 buf[i++] = rIGN;
385 buf[i++] = 0;
386 buf[i++] = qc->tf.command;
387 buf[i++] = ADMA_REGS_COMMAND | rEND;
388
389 buf[3] = (i >> 3) - 2; /* cLEN */
390 *(__le32 *)(buf+8) = cpu_to_le32(pkt_dma + i); /* cPRD */
391
392 i = adma_fill_sg(qc);
393 wmb(); /* flush PRDs and pkt to memory */
394#if 0
395 /* dump out CPB + PRDs for debug */
396 {
397 int j, len = 0;
398 static char obuf[2048];
399 for (j = 0; j < i; ++j) {
400 len += sprintf(obuf+len, "%02x ", buf[j]);
401 if ((j & 7) == 7) {
402 printk("%s\n", obuf);
403 len = 0;
404 }
405 }
406 if (len)
407 printk("%s\n", obuf);
408 }
409#endif
410}
411
412static inline void adma_packet_start(struct ata_queued_cmd *qc)
413{
414 struct ata_port *ap = qc->ap;
415 void __iomem *chan = ADMA_REGS(ap->host->mmio_base, ap->port_no);
416
417 VPRINTK("ENTER, ap %p\n", ap);
418
419 /* fire up the ADMA engine */
420 writew(aPIOMD4 | aGO, chan + ADMA_CONTROL);
421}
422
423static unsigned int adma_qc_issue(struct ata_queued_cmd *qc)
424{
425 struct adma_port_priv *pp = qc->ap->private_data;
426
427 switch (qc->tf.protocol) {
428 case ATA_PROT_DMA:
429 pp->state = adma_state_pkt;
430 adma_packet_start(qc);
431 return 0;
432
433 case ATA_PROT_ATAPI_DMA:
434 BUG();
435 break;
436
437 default:
438 break;
439 }
440
441 pp->state = adma_state_mmio;
442 return ata_qc_issue_prot(qc);
443}
444
445static inline unsigned int adma_intr_pkt(struct ata_host *host)
446{
447 unsigned int handled = 0, port_no;
448 u8 __iomem *mmio_base = host->mmio_base;
449
450 for (port_no = 0; port_no < host->n_ports; ++port_no) {
451 struct ata_port *ap = host->ports[port_no];
452 struct adma_port_priv *pp;
453 struct ata_queued_cmd *qc;
454 void __iomem *chan = ADMA_REGS(mmio_base, port_no);
455 u8 status = readb(chan + ADMA_STATUS);
456
457 if (status == 0)
458 continue;
459 handled = 1;
460 adma_enter_reg_mode(ap);
461 if (ap->flags & ATA_FLAG_DISABLED)
462 continue;
463 pp = ap->private_data;
464 if (!pp || pp->state != adma_state_pkt)
465 continue;
466 qc = ata_qc_from_tag(ap, ap->active_tag);
467 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
468 if ((status & (aPERR | aPSD | aUIRQ)))
469 qc->err_mask |= AC_ERR_OTHER;
470 else if (pp->pkt[0] != cDONE)
471 qc->err_mask |= AC_ERR_OTHER;
472
473 ata_qc_complete(qc);
474 }
475 }
476 return handled;
477}
478
479static inline unsigned int adma_intr_mmio(struct ata_host *host)
480{
481 unsigned int handled = 0, port_no;
482
483 for (port_no = 0; port_no < host->n_ports; ++port_no) {
484 struct ata_port *ap;
485 ap = host->ports[port_no];
486 if (ap && (!(ap->flags & ATA_FLAG_DISABLED))) {
487 struct ata_queued_cmd *qc;
488 struct adma_port_priv *pp = ap->private_data;
489 if (!pp || pp->state != adma_state_mmio)
490 continue;
491 qc = ata_qc_from_tag(ap, ap->active_tag);
492 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
493
494 /* check main status, clearing INTRQ */
495 u8 status = ata_check_status(ap);
496 if ((status & ATA_BUSY))
497 continue;
498 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
499 ap->id, qc->tf.protocol, status);
500
501 /* complete taskfile transaction */
502 pp->state = adma_state_idle;
503 qc->err_mask |= ac_err_mask(status);
504 ata_qc_complete(qc);
505 handled = 1;
506 }
507 }
508 }
509 return handled;
510}
511
512static irqreturn_t adma_intr(int irq, void *dev_instance, struct pt_regs *regs)
513{
514 struct ata_host *host = dev_instance;
515 unsigned int handled = 0;
516
517 VPRINTK("ENTER\n");
518
519 spin_lock(&host->lock);
520 handled = adma_intr_pkt(host) | adma_intr_mmio(host);
521 spin_unlock(&host->lock);
522
523 VPRINTK("EXIT\n");
524
525 return IRQ_RETVAL(handled);
526}
527
528static void adma_ata_setup_port(struct ata_ioports *port, unsigned long base)
529{
530 port->cmd_addr =
531 port->data_addr = base + 0x000;
532 port->error_addr =
533 port->feature_addr = base + 0x004;
534 port->nsect_addr = base + 0x008;
535 port->lbal_addr = base + 0x00c;
536 port->lbam_addr = base + 0x010;
537 port->lbah_addr = base + 0x014;
538 port->device_addr = base + 0x018;
539 port->status_addr =
540 port->command_addr = base + 0x01c;
541 port->altstatus_addr =
542 port->ctl_addr = base + 0x038;
543}
544
545static int adma_port_start(struct ata_port *ap)
546{
547 struct device *dev = ap->host->dev;
548 struct adma_port_priv *pp;
549 int rc;
550
551 rc = ata_port_start(ap);
552 if (rc)
553 return rc;
554 adma_enter_reg_mode(ap);
555 rc = -ENOMEM;
556 pp = kcalloc(1, sizeof(*pp), GFP_KERNEL);
557 if (!pp)
558 goto err_out;
559 pp->pkt = dma_alloc_coherent(dev, ADMA_PKT_BYTES, &pp->pkt_dma,
560 GFP_KERNEL);
561 if (!pp->pkt)
562 goto err_out_kfree;
563 /* paranoia? */
564 if ((pp->pkt_dma & 7) != 0) {
565 printk("bad alignment for pp->pkt_dma: %08x\n",
566 (u32)pp->pkt_dma);
567 dma_free_coherent(dev, ADMA_PKT_BYTES,
568 pp->pkt, pp->pkt_dma);
569 goto err_out_kfree;
570 }
571 memset(pp->pkt, 0, ADMA_PKT_BYTES);
572 ap->private_data = pp;
573 adma_reinit_engine(ap);
574 return 0;
575
576err_out_kfree:
577 kfree(pp);
578err_out:
579 ata_port_stop(ap);
580 return rc;
581}
582
583static void adma_port_stop(struct ata_port *ap)
584{
585 struct device *dev = ap->host->dev;
586 struct adma_port_priv *pp = ap->private_data;
587
588 adma_reset_engine(ADMA_REGS(ap->host->mmio_base, ap->port_no));
589 if (pp != NULL) {
590 ap->private_data = NULL;
591 if (pp->pkt != NULL)
592 dma_free_coherent(dev, ADMA_PKT_BYTES,
593 pp->pkt, pp->pkt_dma);
594 kfree(pp);
595 }
596 ata_port_stop(ap);
597}
598
599static void adma_host_stop(struct ata_host *host)
600{
601 unsigned int port_no;
602
603 for (port_no = 0; port_no < ADMA_PORTS; ++port_no)
604 adma_reset_engine(ADMA_REGS(host->mmio_base, port_no));
605
606 ata_pci_host_stop(host);
607}
608
609static void adma_host_init(unsigned int chip_id,
610 struct ata_probe_ent *probe_ent)
611{
612 unsigned int port_no;
613 void __iomem *mmio_base = probe_ent->mmio_base;
614
615 /* enable/lock aGO operation */
616 writeb(7, mmio_base + ADMA_MODE_LOCK);
617
618 /* reset the ADMA logic */
619 for (port_no = 0; port_no < ADMA_PORTS; ++port_no)
620 adma_reset_engine(ADMA_REGS(mmio_base, port_no));
621}
622
623static int adma_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base)
624{
625 int rc;
626
627 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
628 if (rc) {
629 dev_printk(KERN_ERR, &pdev->dev,
630 "32-bit DMA enable failed\n");
631 return rc;
632 }
633 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
634 if (rc) {
635 dev_printk(KERN_ERR, &pdev->dev,
636 "32-bit consistent DMA enable failed\n");
637 return rc;
638 }
639 return 0;
640}
641
642static int adma_ata_init_one(struct pci_dev *pdev,
643 const struct pci_device_id *ent)
644{
645 static int printed_version;
646 struct ata_probe_ent *probe_ent = NULL;
647 void __iomem *mmio_base;
648 unsigned int board_idx = (unsigned int) ent->driver_data;
649 int rc, port_no;
650
651 if (!printed_version++)
652 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
653
654 rc = pci_enable_device(pdev);
655 if (rc)
656 return rc;
657
658 rc = pci_request_regions(pdev, DRV_NAME);
659 if (rc)
660 goto err_out;
661
662 if ((pci_resource_flags(pdev, 4) & IORESOURCE_MEM) == 0) {
663 rc = -ENODEV;
664 goto err_out_regions;
665 }
666
667 mmio_base = pci_iomap(pdev, 4, 0);
668 if (mmio_base == NULL) {
669 rc = -ENOMEM;
670 goto err_out_regions;
671 }
672
673 rc = adma_set_dma_masks(pdev, mmio_base);
674 if (rc)
675 goto err_out_iounmap;
676
677 probe_ent = kcalloc(1, sizeof(*probe_ent), GFP_KERNEL);
678 if (probe_ent == NULL) {
679 rc = -ENOMEM;
680 goto err_out_iounmap;
681 }
682
683 probe_ent->dev = pci_dev_to_dev(pdev);
684 INIT_LIST_HEAD(&probe_ent->node);
685
686 probe_ent->sht = adma_port_info[board_idx].sht;
687 probe_ent->port_flags = adma_port_info[board_idx].flags;
688 probe_ent->pio_mask = adma_port_info[board_idx].pio_mask;
689 probe_ent->mwdma_mask = adma_port_info[board_idx].mwdma_mask;
690 probe_ent->udma_mask = adma_port_info[board_idx].udma_mask;
691 probe_ent->port_ops = adma_port_info[board_idx].port_ops;
692
693 probe_ent->irq = pdev->irq;
694 probe_ent->irq_flags = IRQF_SHARED;
695 probe_ent->mmio_base = mmio_base;
696 probe_ent->n_ports = ADMA_PORTS;
697
698 for (port_no = 0; port_no < probe_ent->n_ports; ++port_no) {
699 adma_ata_setup_port(&probe_ent->port[port_no],
700 ADMA_ATA_REGS((unsigned long)mmio_base, port_no));
701 }
702
703 pci_set_master(pdev);
704
705 /* initialize adapter */
706 adma_host_init(board_idx, probe_ent);
707
708 rc = ata_device_add(probe_ent);
709 kfree(probe_ent);
710 if (rc != ADMA_PORTS)
711 goto err_out_iounmap;
712 return 0;
713
714err_out_iounmap:
715 pci_iounmap(pdev, mmio_base);
716err_out_regions:
717 pci_release_regions(pdev);
718err_out:
719 pci_disable_device(pdev);
720 return rc;
721}
722
723static int __init adma_ata_init(void)
724{
725 return pci_register_driver(&adma_ata_pci_driver);
726}
727
728static void __exit adma_ata_exit(void)
729{
730 pci_unregister_driver(&adma_ata_pci_driver);
731}
732
733MODULE_AUTHOR("Mark Lord");
734MODULE_DESCRIPTION("Pacific Digital Corporation ADMA low-level driver");
735MODULE_LICENSE("GPL");
736MODULE_DEVICE_TABLE(pci, adma_ata_pci_tbl);
737MODULE_VERSION(DRV_VERSION);
738
739module_init(adma_ata_init);
740module_exit(adma_ata_exit);
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
new file mode 100644
index 000000000000..34f1939b44c9
--- /dev/null
+++ b/drivers/ata/sata_mv.c
@@ -0,0 +1,2466 @@
1/*
2 * sata_mv.c - Marvell SATA support
3 *
4 * Copyright 2005: EMC Corporation, all rights reserved.
5 * Copyright 2005 Red Hat, Inc. All rights reserved.
6 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/pci.h>
27#include <linux/init.h>
28#include <linux/blkdev.h>
29#include <linux/delay.h>
30#include <linux/interrupt.h>
31#include <linux/sched.h>
32#include <linux/dma-mapping.h>
33#include <linux/device.h>
34#include <scsi/scsi_host.h>
35#include <scsi/scsi_cmnd.h>
36#include <linux/libata.h>
37#include <asm/io.h>
38
39#define DRV_NAME "sata_mv"
40#define DRV_VERSION "0.7"
41
42enum {
43 /* BAR's are enumerated in terms of pci_resource_start() terms */
44 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
45 MV_IO_BAR = 2, /* offset 0x18: IO space */
46 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
47
48 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
49 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
50
51 MV_PCI_REG_BASE = 0,
52 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
53 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
54 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
55 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
56 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
57 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
58
59 MV_SATAHC0_REG_BASE = 0x20000,
60 MV_FLASH_CTL = 0x1046c,
61 MV_GPIO_PORT_CTL = 0x104f0,
62 MV_RESET_CFG = 0x180d8,
63
64 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
65 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
66 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
67 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
68
69 MV_USE_Q_DEPTH = ATA_DEF_QUEUE,
70
71 MV_MAX_Q_DEPTH = 32,
72 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
73
74 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
75 * CRPB needs alignment on a 256B boundary. Size == 256B
76 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
77 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
78 */
79 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
80 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
81 MV_MAX_SG_CT = 176,
82 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
83 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
84
85 MV_PORTS_PER_HC = 4,
86 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
87 MV_PORT_HC_SHIFT = 2,
88 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
89 MV_PORT_MASK = 3,
90
91 /* Host Flags */
92 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
93 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
94 MV_COMMON_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
95 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
96 ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING),
97 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
98
99 CRQB_FLAG_READ = (1 << 0),
100 CRQB_TAG_SHIFT = 1,
101 CRQB_CMD_ADDR_SHIFT = 8,
102 CRQB_CMD_CS = (0x2 << 11),
103 CRQB_CMD_LAST = (1 << 15),
104
105 CRPB_FLAG_STATUS_SHIFT = 8,
106
107 EPRD_FLAG_END_OF_TBL = (1 << 31),
108
109 /* PCI interface registers */
110
111 PCI_COMMAND_OFS = 0xc00,
112
113 PCI_MAIN_CMD_STS_OFS = 0xd30,
114 STOP_PCI_MASTER = (1 << 2),
115 PCI_MASTER_EMPTY = (1 << 3),
116 GLOB_SFT_RST = (1 << 4),
117
118 MV_PCI_MODE = 0xd00,
119 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
120 MV_PCI_DISC_TIMER = 0xd04,
121 MV_PCI_MSI_TRIGGER = 0xc38,
122 MV_PCI_SERR_MASK = 0xc28,
123 MV_PCI_XBAR_TMOUT = 0x1d04,
124 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
125 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
126 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
127 MV_PCI_ERR_COMMAND = 0x1d50,
128
129 PCI_IRQ_CAUSE_OFS = 0x1d58,
130 PCI_IRQ_MASK_OFS = 0x1d5c,
131 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
132
133 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
134 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
135 PORT0_ERR = (1 << 0), /* shift by port # */
136 PORT0_DONE = (1 << 1), /* shift by port # */
137 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
138 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
139 PCI_ERR = (1 << 18),
140 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
141 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
142 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
143 GPIO_INT = (1 << 22),
144 SELF_INT = (1 << 23),
145 TWSI_INT = (1 << 24),
146 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
147 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
148 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
149 HC_MAIN_RSVD),
150
151 /* SATAHC registers */
152 HC_CFG_OFS = 0,
153
154 HC_IRQ_CAUSE_OFS = 0x14,
155 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
156 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
157 DEV_IRQ = (1 << 8), /* shift by port # */
158
159 /* Shadow block registers */
160 SHD_BLK_OFS = 0x100,
161 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
162
163 /* SATA registers */
164 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
165 SATA_ACTIVE_OFS = 0x350,
166 PHY_MODE3 = 0x310,
167 PHY_MODE4 = 0x314,
168 PHY_MODE2 = 0x330,
169 MV5_PHY_MODE = 0x74,
170 MV5_LT_MODE = 0x30,
171 MV5_PHY_CTL = 0x0C,
172 SATA_INTERFACE_CTL = 0x050,
173
174 MV_M2_PREAMP_MASK = 0x7e0,
175
176 /* Port registers */
177 EDMA_CFG_OFS = 0,
178 EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */
179 EDMA_CFG_NCQ = (1 << 5),
180 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
181 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
182 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
183
184 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
185 EDMA_ERR_IRQ_MASK_OFS = 0xc,
186 EDMA_ERR_D_PAR = (1 << 0),
187 EDMA_ERR_PRD_PAR = (1 << 1),
188 EDMA_ERR_DEV = (1 << 2),
189 EDMA_ERR_DEV_DCON = (1 << 3),
190 EDMA_ERR_DEV_CON = (1 << 4),
191 EDMA_ERR_SERR = (1 << 5),
192 EDMA_ERR_SELF_DIS = (1 << 7),
193 EDMA_ERR_BIST_ASYNC = (1 << 8),
194 EDMA_ERR_CRBQ_PAR = (1 << 9),
195 EDMA_ERR_CRPB_PAR = (1 << 10),
196 EDMA_ERR_INTRL_PAR = (1 << 11),
197 EDMA_ERR_IORDY = (1 << 12),
198 EDMA_ERR_LNK_CTRL_RX = (0xf << 13),
199 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15),
200 EDMA_ERR_LNK_DATA_RX = (0xf << 17),
201 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21),
202 EDMA_ERR_LNK_DATA_TX = (0x1f << 26),
203 EDMA_ERR_TRANS_PROTO = (1 << 31),
204 EDMA_ERR_FATAL = (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
205 EDMA_ERR_DEV_DCON | EDMA_ERR_CRBQ_PAR |
206 EDMA_ERR_CRPB_PAR | EDMA_ERR_INTRL_PAR |
207 EDMA_ERR_IORDY | EDMA_ERR_LNK_CTRL_RX_2 |
208 EDMA_ERR_LNK_DATA_RX |
209 EDMA_ERR_LNK_DATA_TX |
210 EDMA_ERR_TRANS_PROTO),
211
212 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
213 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
214
215 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
216 EDMA_REQ_Q_PTR_SHIFT = 5,
217
218 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
219 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
220 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
221 EDMA_RSP_Q_PTR_SHIFT = 3,
222
223 EDMA_CMD_OFS = 0x28,
224 EDMA_EN = (1 << 0),
225 EDMA_DS = (1 << 1),
226 ATA_RST = (1 << 2),
227
228 EDMA_IORDY_TMOUT = 0x34,
229 EDMA_ARB_CFG = 0x38,
230
231 /* Host private flags (hp_flags) */
232 MV_HP_FLAG_MSI = (1 << 0),
233 MV_HP_ERRATA_50XXB0 = (1 << 1),
234 MV_HP_ERRATA_50XXB2 = (1 << 2),
235 MV_HP_ERRATA_60X1B2 = (1 << 3),
236 MV_HP_ERRATA_60X1C0 = (1 << 4),
237 MV_HP_ERRATA_XX42A0 = (1 << 5),
238 MV_HP_50XX = (1 << 6),
239 MV_HP_GEN_IIE = (1 << 7),
240
241 /* Port private flags (pp_flags) */
242 MV_PP_FLAG_EDMA_EN = (1 << 0),
243 MV_PP_FLAG_EDMA_DS_ACT = (1 << 1),
244};
245
246#define IS_50XX(hpriv) ((hpriv)->hp_flags & MV_HP_50XX)
247#define IS_60XX(hpriv) (((hpriv)->hp_flags & MV_HP_50XX) == 0)
248#define IS_GEN_I(hpriv) IS_50XX(hpriv)
249#define IS_GEN_II(hpriv) IS_60XX(hpriv)
250#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
251
252enum {
253 /* Our DMA boundary is determined by an ePRD being unable to handle
254 * anything larger than 64KB
255 */
256 MV_DMA_BOUNDARY = 0xffffU,
257
258 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
259
260 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
261};
262
263enum chip_type {
264 chip_504x,
265 chip_508x,
266 chip_5080,
267 chip_604x,
268 chip_608x,
269 chip_6042,
270 chip_7042,
271};
272
273/* Command ReQuest Block: 32B */
274struct mv_crqb {
275 __le32 sg_addr;
276 __le32 sg_addr_hi;
277 __le16 ctrl_flags;
278 __le16 ata_cmd[11];
279};
280
281struct mv_crqb_iie {
282 __le32 addr;
283 __le32 addr_hi;
284 __le32 flags;
285 __le32 len;
286 __le32 ata_cmd[4];
287};
288
289/* Command ResPonse Block: 8B */
290struct mv_crpb {
291 __le16 id;
292 __le16 flags;
293 __le32 tmstmp;
294};
295
296/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
297struct mv_sg {
298 __le32 addr;
299 __le32 flags_size;
300 __le32 addr_hi;
301 __le32 reserved;
302};
303
304struct mv_port_priv {
305 struct mv_crqb *crqb;
306 dma_addr_t crqb_dma;
307 struct mv_crpb *crpb;
308 dma_addr_t crpb_dma;
309 struct mv_sg *sg_tbl;
310 dma_addr_t sg_tbl_dma;
311 u32 pp_flags;
312};
313
314struct mv_port_signal {
315 u32 amps;
316 u32 pre;
317};
318
319struct mv_host_priv;
320struct mv_hw_ops {
321 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
322 unsigned int port);
323 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
324 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
325 void __iomem *mmio);
326 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
327 unsigned int n_hc);
328 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
329 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
330};
331
332struct mv_host_priv {
333 u32 hp_flags;
334 struct mv_port_signal signal[8];
335 const struct mv_hw_ops *ops;
336};
337
338static void mv_irq_clear(struct ata_port *ap);
339static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
340static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
341static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
342static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
343static void mv_phy_reset(struct ata_port *ap);
344static void __mv_phy_reset(struct ata_port *ap, int can_sleep);
345static void mv_host_stop(struct ata_host *host);
346static int mv_port_start(struct ata_port *ap);
347static void mv_port_stop(struct ata_port *ap);
348static void mv_qc_prep(struct ata_queued_cmd *qc);
349static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
350static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
351static irqreturn_t mv_interrupt(int irq, void *dev_instance,
352 struct pt_regs *regs);
353static void mv_eng_timeout(struct ata_port *ap);
354static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
355
356static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
357 unsigned int port);
358static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
359static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
360 void __iomem *mmio);
361static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
362 unsigned int n_hc);
363static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
364static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
365
366static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
367 unsigned int port);
368static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
369static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
370 void __iomem *mmio);
371static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
372 unsigned int n_hc);
373static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
374static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
375static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
376 unsigned int port_no);
377static void mv_stop_and_reset(struct ata_port *ap);
378
379static struct scsi_host_template mv_sht = {
380 .module = THIS_MODULE,
381 .name = DRV_NAME,
382 .ioctl = ata_scsi_ioctl,
383 .queuecommand = ata_scsi_queuecmd,
384 .can_queue = MV_USE_Q_DEPTH,
385 .this_id = ATA_SHT_THIS_ID,
386 .sg_tablesize = MV_MAX_SG_CT / 2,
387 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
388 .emulated = ATA_SHT_EMULATED,
389 .use_clustering = ATA_SHT_USE_CLUSTERING,
390 .proc_name = DRV_NAME,
391 .dma_boundary = MV_DMA_BOUNDARY,
392 .slave_configure = ata_scsi_slave_config,
393 .slave_destroy = ata_scsi_slave_destroy,
394 .bios_param = ata_std_bios_param,
395};
396
397static const struct ata_port_operations mv5_ops = {
398 .port_disable = ata_port_disable,
399
400 .tf_load = ata_tf_load,
401 .tf_read = ata_tf_read,
402 .check_status = ata_check_status,
403 .exec_command = ata_exec_command,
404 .dev_select = ata_std_dev_select,
405
406 .phy_reset = mv_phy_reset,
407
408 .qc_prep = mv_qc_prep,
409 .qc_issue = mv_qc_issue,
410 .data_xfer = ata_mmio_data_xfer,
411
412 .eng_timeout = mv_eng_timeout,
413
414 .irq_handler = mv_interrupt,
415 .irq_clear = mv_irq_clear,
416
417 .scr_read = mv5_scr_read,
418 .scr_write = mv5_scr_write,
419
420 .port_start = mv_port_start,
421 .port_stop = mv_port_stop,
422 .host_stop = mv_host_stop,
423};
424
425static const struct ata_port_operations mv6_ops = {
426 .port_disable = ata_port_disable,
427
428 .tf_load = ata_tf_load,
429 .tf_read = ata_tf_read,
430 .check_status = ata_check_status,
431 .exec_command = ata_exec_command,
432 .dev_select = ata_std_dev_select,
433
434 .phy_reset = mv_phy_reset,
435
436 .qc_prep = mv_qc_prep,
437 .qc_issue = mv_qc_issue,
438 .data_xfer = ata_mmio_data_xfer,
439
440 .eng_timeout = mv_eng_timeout,
441
442 .irq_handler = mv_interrupt,
443 .irq_clear = mv_irq_clear,
444
445 .scr_read = mv_scr_read,
446 .scr_write = mv_scr_write,
447
448 .port_start = mv_port_start,
449 .port_stop = mv_port_stop,
450 .host_stop = mv_host_stop,
451};
452
453static const struct ata_port_operations mv_iie_ops = {
454 .port_disable = ata_port_disable,
455
456 .tf_load = ata_tf_load,
457 .tf_read = ata_tf_read,
458 .check_status = ata_check_status,
459 .exec_command = ata_exec_command,
460 .dev_select = ata_std_dev_select,
461
462 .phy_reset = mv_phy_reset,
463
464 .qc_prep = mv_qc_prep_iie,
465 .qc_issue = mv_qc_issue,
466
467 .eng_timeout = mv_eng_timeout,
468
469 .irq_handler = mv_interrupt,
470 .irq_clear = mv_irq_clear,
471
472 .scr_read = mv_scr_read,
473 .scr_write = mv_scr_write,
474
475 .port_start = mv_port_start,
476 .port_stop = mv_port_stop,
477 .host_stop = mv_host_stop,
478};
479
480static const struct ata_port_info mv_port_info[] = {
481 { /* chip_504x */
482 .sht = &mv_sht,
483 .flags = MV_COMMON_FLAGS,
484 .pio_mask = 0x1f, /* pio0-4 */
485 .udma_mask = 0x7f, /* udma0-6 */
486 .port_ops = &mv5_ops,
487 },
488 { /* chip_508x */
489 .sht = &mv_sht,
490 .flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
491 .pio_mask = 0x1f, /* pio0-4 */
492 .udma_mask = 0x7f, /* udma0-6 */
493 .port_ops = &mv5_ops,
494 },
495 { /* chip_5080 */
496 .sht = &mv_sht,
497 .flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
498 .pio_mask = 0x1f, /* pio0-4 */
499 .udma_mask = 0x7f, /* udma0-6 */
500 .port_ops = &mv5_ops,
501 },
502 { /* chip_604x */
503 .sht = &mv_sht,
504 .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
505 .pio_mask = 0x1f, /* pio0-4 */
506 .udma_mask = 0x7f, /* udma0-6 */
507 .port_ops = &mv6_ops,
508 },
509 { /* chip_608x */
510 .sht = &mv_sht,
511 .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS |
512 MV_FLAG_DUAL_HC),
513 .pio_mask = 0x1f, /* pio0-4 */
514 .udma_mask = 0x7f, /* udma0-6 */
515 .port_ops = &mv6_ops,
516 },
517 { /* chip_6042 */
518 .sht = &mv_sht,
519 .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
520 .pio_mask = 0x1f, /* pio0-4 */
521 .udma_mask = 0x7f, /* udma0-6 */
522 .port_ops = &mv_iie_ops,
523 },
524 { /* chip_7042 */
525 .sht = &mv_sht,
526 .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS |
527 MV_FLAG_DUAL_HC),
528 .pio_mask = 0x1f, /* pio0-4 */
529 .udma_mask = 0x7f, /* udma0-6 */
530 .port_ops = &mv_iie_ops,
531 },
532};
533
534static const struct pci_device_id mv_pci_tbl[] = {
535 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5040), 0, 0, chip_504x},
536 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5041), 0, 0, chip_504x},
537 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5080), 0, 0, chip_5080},
538 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5081), 0, 0, chip_508x},
539
540 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6040), 0, 0, chip_604x},
541 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6041), 0, 0, chip_604x},
542 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6042), 0, 0, chip_6042},
543 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6080), 0, 0, chip_608x},
544 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6081), 0, 0, chip_608x},
545
546 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x0241), 0, 0, chip_604x},
547 {} /* terminate list */
548};
549
550static struct pci_driver mv_pci_driver = {
551 .name = DRV_NAME,
552 .id_table = mv_pci_tbl,
553 .probe = mv_init_one,
554 .remove = ata_pci_remove_one,
555};
556
557static const struct mv_hw_ops mv5xxx_ops = {
558 .phy_errata = mv5_phy_errata,
559 .enable_leds = mv5_enable_leds,
560 .read_preamp = mv5_read_preamp,
561 .reset_hc = mv5_reset_hc,
562 .reset_flash = mv5_reset_flash,
563 .reset_bus = mv5_reset_bus,
564};
565
566static const struct mv_hw_ops mv6xxx_ops = {
567 .phy_errata = mv6_phy_errata,
568 .enable_leds = mv6_enable_leds,
569 .read_preamp = mv6_read_preamp,
570 .reset_hc = mv6_reset_hc,
571 .reset_flash = mv6_reset_flash,
572 .reset_bus = mv_reset_pci_bus,
573};
574
575/*
576 * module options
577 */
578static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
579
580
581/*
582 * Functions
583 */
584
585static inline void writelfl(unsigned long data, void __iomem *addr)
586{
587 writel(data, addr);
588 (void) readl(addr); /* flush to avoid PCI posted write */
589}
590
591static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
592{
593 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
594}
595
596static inline unsigned int mv_hc_from_port(unsigned int port)
597{
598 return port >> MV_PORT_HC_SHIFT;
599}
600
601static inline unsigned int mv_hardport_from_port(unsigned int port)
602{
603 return port & MV_PORT_MASK;
604}
605
606static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
607 unsigned int port)
608{
609 return mv_hc_base(base, mv_hc_from_port(port));
610}
611
612static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
613{
614 return mv_hc_base_from_port(base, port) +
615 MV_SATAHC_ARBTR_REG_SZ +
616 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
617}
618
619static inline void __iomem *mv_ap_base(struct ata_port *ap)
620{
621 return mv_port_base(ap->host->mmio_base, ap->port_no);
622}
623
624static inline int mv_get_hc_count(unsigned long port_flags)
625{
626 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
627}
628
629static void mv_irq_clear(struct ata_port *ap)
630{
631}
632
633/**
634 * mv_start_dma - Enable eDMA engine
635 * @base: port base address
636 * @pp: port private data
637 *
638 * Verify the local cache of the eDMA state is accurate with a
639 * WARN_ON.
640 *
641 * LOCKING:
642 * Inherited from caller.
643 */
644static void mv_start_dma(void __iomem *base, struct mv_port_priv *pp)
645{
646 if (!(MV_PP_FLAG_EDMA_EN & pp->pp_flags)) {
647 writelfl(EDMA_EN, base + EDMA_CMD_OFS);
648 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
649 }
650 WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
651}
652
653/**
654 * mv_stop_dma - Disable eDMA engine
655 * @ap: ATA channel to manipulate
656 *
657 * Verify the local cache of the eDMA state is accurate with a
658 * WARN_ON.
659 *
660 * LOCKING:
661 * Inherited from caller.
662 */
663static void mv_stop_dma(struct ata_port *ap)
664{
665 void __iomem *port_mmio = mv_ap_base(ap);
666 struct mv_port_priv *pp = ap->private_data;
667 u32 reg;
668 int i;
669
670 if (MV_PP_FLAG_EDMA_EN & pp->pp_flags) {
671 /* Disable EDMA if active. The disable bit auto clears.
672 */
673 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
674 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
675 } else {
676 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
677 }
678
679 /* now properly wait for the eDMA to stop */
680 for (i = 1000; i > 0; i--) {
681 reg = readl(port_mmio + EDMA_CMD_OFS);
682 if (!(EDMA_EN & reg)) {
683 break;
684 }
685 udelay(100);
686 }
687
688 if (EDMA_EN & reg) {
689 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
690 /* FIXME: Consider doing a reset here to recover */
691 }
692}
693
694#ifdef ATA_DEBUG
695static void mv_dump_mem(void __iomem *start, unsigned bytes)
696{
697 int b, w;
698 for (b = 0; b < bytes; ) {
699 DPRINTK("%p: ", start + b);
700 for (w = 0; b < bytes && w < 4; w++) {
701 printk("%08x ",readl(start + b));
702 b += sizeof(u32);
703 }
704 printk("\n");
705 }
706}
707#endif
708
709static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
710{
711#ifdef ATA_DEBUG
712 int b, w;
713 u32 dw;
714 for (b = 0; b < bytes; ) {
715 DPRINTK("%02x: ", b);
716 for (w = 0; b < bytes && w < 4; w++) {
717 (void) pci_read_config_dword(pdev,b,&dw);
718 printk("%08x ",dw);
719 b += sizeof(u32);
720 }
721 printk("\n");
722 }
723#endif
724}
725static void mv_dump_all_regs(void __iomem *mmio_base, int port,
726 struct pci_dev *pdev)
727{
728#ifdef ATA_DEBUG
729 void __iomem *hc_base = mv_hc_base(mmio_base,
730 port >> MV_PORT_HC_SHIFT);
731 void __iomem *port_base;
732 int start_port, num_ports, p, start_hc, num_hcs, hc;
733
734 if (0 > port) {
735 start_hc = start_port = 0;
736 num_ports = 8; /* shld be benign for 4 port devs */
737 num_hcs = 2;
738 } else {
739 start_hc = port >> MV_PORT_HC_SHIFT;
740 start_port = port;
741 num_ports = num_hcs = 1;
742 }
743 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
744 num_ports > 1 ? num_ports - 1 : start_port);
745
746 if (NULL != pdev) {
747 DPRINTK("PCI config space regs:\n");
748 mv_dump_pci_cfg(pdev, 0x68);
749 }
750 DPRINTK("PCI regs:\n");
751 mv_dump_mem(mmio_base+0xc00, 0x3c);
752 mv_dump_mem(mmio_base+0xd00, 0x34);
753 mv_dump_mem(mmio_base+0xf00, 0x4);
754 mv_dump_mem(mmio_base+0x1d00, 0x6c);
755 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
756 hc_base = mv_hc_base(mmio_base, hc);
757 DPRINTK("HC regs (HC %i):\n", hc);
758 mv_dump_mem(hc_base, 0x1c);
759 }
760 for (p = start_port; p < start_port + num_ports; p++) {
761 port_base = mv_port_base(mmio_base, p);
762 DPRINTK("EDMA regs (port %i):\n",p);
763 mv_dump_mem(port_base, 0x54);
764 DPRINTK("SATA regs (port %i):\n",p);
765 mv_dump_mem(port_base+0x300, 0x60);
766 }
767#endif
768}
769
770static unsigned int mv_scr_offset(unsigned int sc_reg_in)
771{
772 unsigned int ofs;
773
774 switch (sc_reg_in) {
775 case SCR_STATUS:
776 case SCR_CONTROL:
777 case SCR_ERROR:
778 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
779 break;
780 case SCR_ACTIVE:
781 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
782 break;
783 default:
784 ofs = 0xffffffffU;
785 break;
786 }
787 return ofs;
788}
789
790static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
791{
792 unsigned int ofs = mv_scr_offset(sc_reg_in);
793
794 if (0xffffffffU != ofs) {
795 return readl(mv_ap_base(ap) + ofs);
796 } else {
797 return (u32) ofs;
798 }
799}
800
801static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
802{
803 unsigned int ofs = mv_scr_offset(sc_reg_in);
804
805 if (0xffffffffU != ofs) {
806 writelfl(val, mv_ap_base(ap) + ofs);
807 }
808}
809
810/**
811 * mv_host_stop - Host specific cleanup/stop routine.
812 * @host: host data structure
813 *
814 * Disable ints, cleanup host memory, call general purpose
815 * host_stop.
816 *
817 * LOCKING:
818 * Inherited from caller.
819 */
820static void mv_host_stop(struct ata_host *host)
821{
822 struct mv_host_priv *hpriv = host->private_data;
823 struct pci_dev *pdev = to_pci_dev(host->dev);
824
825 if (hpriv->hp_flags & MV_HP_FLAG_MSI) {
826 pci_disable_msi(pdev);
827 } else {
828 pci_intx(pdev, 0);
829 }
830 kfree(hpriv);
831 ata_host_stop(host);
832}
833
834static inline void mv_priv_free(struct mv_port_priv *pp, struct device *dev)
835{
836 dma_free_coherent(dev, MV_PORT_PRIV_DMA_SZ, pp->crpb, pp->crpb_dma);
837}
838
839static void mv_edma_cfg(struct mv_host_priv *hpriv, void __iomem *port_mmio)
840{
841 u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
842
843 /* set up non-NCQ EDMA configuration */
844 cfg &= ~0x1f; /* clear queue depth */
845 cfg &= ~EDMA_CFG_NCQ; /* clear NCQ mode */
846 cfg &= ~(1 << 9); /* disable equeue */
847
848 if (IS_GEN_I(hpriv))
849 cfg |= (1 << 8); /* enab config burst size mask */
850
851 else if (IS_GEN_II(hpriv))
852 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
853
854 else if (IS_GEN_IIE(hpriv)) {
855 cfg |= (1 << 23); /* dis RX PM port mask */
856 cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
857 cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */
858 cfg |= (1 << 18); /* enab early completion */
859 cfg |= (1 << 17); /* enab host q cache */
860 cfg |= (1 << 22); /* enab cutthrough */
861 }
862
863 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
864}
865
866/**
867 * mv_port_start - Port specific init/start routine.
868 * @ap: ATA channel to manipulate
869 *
870 * Allocate and point to DMA memory, init port private memory,
871 * zero indices.
872 *
873 * LOCKING:
874 * Inherited from caller.
875 */
876static int mv_port_start(struct ata_port *ap)
877{
878 struct device *dev = ap->host->dev;
879 struct mv_host_priv *hpriv = ap->host->private_data;
880 struct mv_port_priv *pp;
881 void __iomem *port_mmio = mv_ap_base(ap);
882 void *mem;
883 dma_addr_t mem_dma;
884 int rc = -ENOMEM;
885
886 pp = kmalloc(sizeof(*pp), GFP_KERNEL);
887 if (!pp)
888 goto err_out;
889 memset(pp, 0, sizeof(*pp));
890
891 mem = dma_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
892 GFP_KERNEL);
893 if (!mem)
894 goto err_out_pp;
895 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
896
897 rc = ata_pad_alloc(ap, dev);
898 if (rc)
899 goto err_out_priv;
900
901 /* First item in chunk of DMA memory:
902 * 32-slot command request table (CRQB), 32 bytes each in size
903 */
904 pp->crqb = mem;
905 pp->crqb_dma = mem_dma;
906 mem += MV_CRQB_Q_SZ;
907 mem_dma += MV_CRQB_Q_SZ;
908
909 /* Second item:
910 * 32-slot command response table (CRPB), 8 bytes each in size
911 */
912 pp->crpb = mem;
913 pp->crpb_dma = mem_dma;
914 mem += MV_CRPB_Q_SZ;
915 mem_dma += MV_CRPB_Q_SZ;
916
917 /* Third item:
918 * Table of scatter-gather descriptors (ePRD), 16 bytes each
919 */
920 pp->sg_tbl = mem;
921 pp->sg_tbl_dma = mem_dma;
922
923 mv_edma_cfg(hpriv, port_mmio);
924
925 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
926 writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK,
927 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
928
929 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
930 writelfl(pp->crqb_dma & 0xffffffff,
931 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
932 else
933 writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
934
935 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
936
937 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
938 writelfl(pp->crpb_dma & 0xffffffff,
939 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
940 else
941 writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
942
943 writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK,
944 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
945
946 /* Don't turn on EDMA here...do it before DMA commands only. Else
947 * we'll be unable to send non-data, PIO, etc due to restricted access
948 * to shadow regs.
949 */
950 ap->private_data = pp;
951 return 0;
952
953err_out_priv:
954 mv_priv_free(pp, dev);
955err_out_pp:
956 kfree(pp);
957err_out:
958 return rc;
959}
960
961/**
962 * mv_port_stop - Port specific cleanup/stop routine.
963 * @ap: ATA channel to manipulate
964 *
965 * Stop DMA, cleanup port memory.
966 *
967 * LOCKING:
968 * This routine uses the host lock to protect the DMA stop.
969 */
970static void mv_port_stop(struct ata_port *ap)
971{
972 struct device *dev = ap->host->dev;
973 struct mv_port_priv *pp = ap->private_data;
974 unsigned long flags;
975
976 spin_lock_irqsave(&ap->host->lock, flags);
977 mv_stop_dma(ap);
978 spin_unlock_irqrestore(&ap->host->lock, flags);
979
980 ap->private_data = NULL;
981 ata_pad_free(ap, dev);
982 mv_priv_free(pp, dev);
983 kfree(pp);
984}
985
986/**
987 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
988 * @qc: queued command whose SG list to source from
989 *
990 * Populate the SG list and mark the last entry.
991 *
992 * LOCKING:
993 * Inherited from caller.
994 */
995static void mv_fill_sg(struct ata_queued_cmd *qc)
996{
997 struct mv_port_priv *pp = qc->ap->private_data;
998 unsigned int i = 0;
999 struct scatterlist *sg;
1000
1001 ata_for_each_sg(sg, qc) {
1002 dma_addr_t addr;
1003 u32 sg_len, len, offset;
1004
1005 addr = sg_dma_address(sg);
1006 sg_len = sg_dma_len(sg);
1007
1008 while (sg_len) {
1009 offset = addr & MV_DMA_BOUNDARY;
1010 len = sg_len;
1011 if ((offset + sg_len) > 0x10000)
1012 len = 0x10000 - offset;
1013
1014 pp->sg_tbl[i].addr = cpu_to_le32(addr & 0xffffffff);
1015 pp->sg_tbl[i].addr_hi = cpu_to_le32((addr >> 16) >> 16);
1016 pp->sg_tbl[i].flags_size = cpu_to_le32(len & 0xffff);
1017
1018 sg_len -= len;
1019 addr += len;
1020
1021 if (!sg_len && ata_sg_is_last(sg, qc))
1022 pp->sg_tbl[i].flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1023
1024 i++;
1025 }
1026 }
1027}
1028
1029static inline unsigned mv_inc_q_index(unsigned index)
1030{
1031 return (index + 1) & MV_MAX_Q_DEPTH_MASK;
1032}
1033
1034static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1035{
1036 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1037 (last ? CRQB_CMD_LAST : 0);
1038 *cmdw = cpu_to_le16(tmp);
1039}
1040
1041/**
1042 * mv_qc_prep - Host specific command preparation.
1043 * @qc: queued command to prepare
1044 *
1045 * This routine simply redirects to the general purpose routine
1046 * if command is not DMA. Else, it handles prep of the CRQB
1047 * (command request block), does some sanity checking, and calls
1048 * the SG load routine.
1049 *
1050 * LOCKING:
1051 * Inherited from caller.
1052 */
1053static void mv_qc_prep(struct ata_queued_cmd *qc)
1054{
1055 struct ata_port *ap = qc->ap;
1056 struct mv_port_priv *pp = ap->private_data;
1057 __le16 *cw;
1058 struct ata_taskfile *tf;
1059 u16 flags = 0;
1060 unsigned in_index;
1061
1062 if (ATA_PROT_DMA != qc->tf.protocol)
1063 return;
1064
1065 /* Fill in command request block
1066 */
1067 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1068 flags |= CRQB_FLAG_READ;
1069 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1070 flags |= qc->tag << CRQB_TAG_SHIFT;
1071
1072 /* get current queue index from hardware */
1073 in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS)
1074 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1075
1076 pp->crqb[in_index].sg_addr =
1077 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1078 pp->crqb[in_index].sg_addr_hi =
1079 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1080 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1081
1082 cw = &pp->crqb[in_index].ata_cmd[0];
1083 tf = &qc->tf;
1084
1085 /* Sadly, the CRQB cannot accomodate all registers--there are
1086 * only 11 bytes...so we must pick and choose required
1087 * registers based on the command. So, we drop feature and
1088 * hob_feature for [RW] DMA commands, but they are needed for
1089 * NCQ. NCQ will drop hob_nsect.
1090 */
1091 switch (tf->command) {
1092 case ATA_CMD_READ:
1093 case ATA_CMD_READ_EXT:
1094 case ATA_CMD_WRITE:
1095 case ATA_CMD_WRITE_EXT:
1096 case ATA_CMD_WRITE_FUA_EXT:
1097 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1098 break;
1099#ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1100 case ATA_CMD_FPDMA_READ:
1101 case ATA_CMD_FPDMA_WRITE:
1102 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1103 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1104 break;
1105#endif /* FIXME: remove this line when NCQ added */
1106 default:
1107 /* The only other commands EDMA supports in non-queued and
1108 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1109 * of which are defined/used by Linux. If we get here, this
1110 * driver needs work.
1111 *
1112 * FIXME: modify libata to give qc_prep a return value and
1113 * return error here.
1114 */
1115 BUG_ON(tf->command);
1116 break;
1117 }
1118 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1119 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1120 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1121 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1122 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1123 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1124 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1125 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1126 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1127
1128 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1129 return;
1130 mv_fill_sg(qc);
1131}
1132
1133/**
1134 * mv_qc_prep_iie - Host specific command preparation.
1135 * @qc: queued command to prepare
1136 *
1137 * This routine simply redirects to the general purpose routine
1138 * if command is not DMA. Else, it handles prep of the CRQB
1139 * (command request block), does some sanity checking, and calls
1140 * the SG load routine.
1141 *
1142 * LOCKING:
1143 * Inherited from caller.
1144 */
1145static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1146{
1147 struct ata_port *ap = qc->ap;
1148 struct mv_port_priv *pp = ap->private_data;
1149 struct mv_crqb_iie *crqb;
1150 struct ata_taskfile *tf;
1151 unsigned in_index;
1152 u32 flags = 0;
1153
1154 if (ATA_PROT_DMA != qc->tf.protocol)
1155 return;
1156
1157 /* Fill in Gen IIE command request block
1158 */
1159 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1160 flags |= CRQB_FLAG_READ;
1161
1162 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1163 flags |= qc->tag << CRQB_TAG_SHIFT;
1164
1165 /* get current queue index from hardware */
1166 in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS)
1167 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1168
1169 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1170 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1171 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1172 crqb->flags = cpu_to_le32(flags);
1173
1174 tf = &qc->tf;
1175 crqb->ata_cmd[0] = cpu_to_le32(
1176 (tf->command << 16) |
1177 (tf->feature << 24)
1178 );
1179 crqb->ata_cmd[1] = cpu_to_le32(
1180 (tf->lbal << 0) |
1181 (tf->lbam << 8) |
1182 (tf->lbah << 16) |
1183 (tf->device << 24)
1184 );
1185 crqb->ata_cmd[2] = cpu_to_le32(
1186 (tf->hob_lbal << 0) |
1187 (tf->hob_lbam << 8) |
1188 (tf->hob_lbah << 16) |
1189 (tf->hob_feature << 24)
1190 );
1191 crqb->ata_cmd[3] = cpu_to_le32(
1192 (tf->nsect << 0) |
1193 (tf->hob_nsect << 8)
1194 );
1195
1196 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1197 return;
1198 mv_fill_sg(qc);
1199}
1200
1201/**
1202 * mv_qc_issue - Initiate a command to the host
1203 * @qc: queued command to start
1204 *
1205 * This routine simply redirects to the general purpose routine
1206 * if command is not DMA. Else, it sanity checks our local
1207 * caches of the request producer/consumer indices then enables
1208 * DMA and bumps the request producer index.
1209 *
1210 * LOCKING:
1211 * Inherited from caller.
1212 */
1213static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1214{
1215 void __iomem *port_mmio = mv_ap_base(qc->ap);
1216 struct mv_port_priv *pp = qc->ap->private_data;
1217 unsigned in_index;
1218 u32 in_ptr;
1219
1220 if (ATA_PROT_DMA != qc->tf.protocol) {
1221 /* We're about to send a non-EDMA capable command to the
1222 * port. Turn off EDMA so there won't be problems accessing
1223 * shadow block, etc registers.
1224 */
1225 mv_stop_dma(qc->ap);
1226 return ata_qc_issue_prot(qc);
1227 }
1228
1229 in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1230 in_index = (in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1231
1232 /* until we do queuing, the queue should be empty at this point */
1233 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1234 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1235
1236 in_index = mv_inc_q_index(in_index); /* now incr producer index */
1237
1238 mv_start_dma(port_mmio, pp);
1239
1240 /* and write the request in pointer to kick the EDMA to life */
1241 in_ptr &= EDMA_REQ_Q_BASE_LO_MASK;
1242 in_ptr |= in_index << EDMA_REQ_Q_PTR_SHIFT;
1243 writelfl(in_ptr, port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1244
1245 return 0;
1246}
1247
1248/**
1249 * mv_get_crpb_status - get status from most recently completed cmd
1250 * @ap: ATA channel to manipulate
1251 *
1252 * This routine is for use when the port is in DMA mode, when it
1253 * will be using the CRPB (command response block) method of
1254 * returning command completion information. We check indices
1255 * are good, grab status, and bump the response consumer index to
1256 * prove that we're up to date.
1257 *
1258 * LOCKING:
1259 * Inherited from caller.
1260 */
1261static u8 mv_get_crpb_status(struct ata_port *ap)
1262{
1263 void __iomem *port_mmio = mv_ap_base(ap);
1264 struct mv_port_priv *pp = ap->private_data;
1265 unsigned out_index;
1266 u32 out_ptr;
1267 u8 ata_status;
1268
1269 out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1270 out_index = (out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1271
1272 ata_status = le16_to_cpu(pp->crpb[out_index].flags)
1273 >> CRPB_FLAG_STATUS_SHIFT;
1274
1275 /* increment our consumer index... */
1276 out_index = mv_inc_q_index(out_index);
1277
1278 /* and, until we do NCQ, there should only be 1 CRPB waiting */
1279 WARN_ON(out_index != ((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1280 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1281
1282 /* write out our inc'd consumer index so EDMA knows we're caught up */
1283 out_ptr &= EDMA_RSP_Q_BASE_LO_MASK;
1284 out_ptr |= out_index << EDMA_RSP_Q_PTR_SHIFT;
1285 writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1286
1287 /* Return ATA status register for completed CRPB */
1288 return ata_status;
1289}
1290
1291/**
1292 * mv_err_intr - Handle error interrupts on the port
1293 * @ap: ATA channel to manipulate
1294 * @reset_allowed: bool: 0 == don't trigger from reset here
1295 *
1296 * In most cases, just clear the interrupt and move on. However,
1297 * some cases require an eDMA reset, which is done right before
1298 * the COMRESET in mv_phy_reset(). The SERR case requires a
1299 * clear of pending errors in the SATA SERROR register. Finally,
1300 * if the port disabled DMA, update our cached copy to match.
1301 *
1302 * LOCKING:
1303 * Inherited from caller.
1304 */
1305static void mv_err_intr(struct ata_port *ap, int reset_allowed)
1306{
1307 void __iomem *port_mmio = mv_ap_base(ap);
1308 u32 edma_err_cause, serr = 0;
1309
1310 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1311
1312 if (EDMA_ERR_SERR & edma_err_cause) {
1313 sata_scr_read(ap, SCR_ERROR, &serr);
1314 sata_scr_write_flush(ap, SCR_ERROR, serr);
1315 }
1316 if (EDMA_ERR_SELF_DIS & edma_err_cause) {
1317 struct mv_port_priv *pp = ap->private_data;
1318 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1319 }
1320 DPRINTK(KERN_ERR "ata%u: port error; EDMA err cause: 0x%08x "
1321 "SERR: 0x%08x\n", ap->id, edma_err_cause, serr);
1322
1323 /* Clear EDMA now that SERR cleanup done */
1324 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1325
1326 /* check for fatal here and recover if needed */
1327 if (reset_allowed && (EDMA_ERR_FATAL & edma_err_cause))
1328 mv_stop_and_reset(ap);
1329}
1330
1331/**
1332 * mv_host_intr - Handle all interrupts on the given host controller
1333 * @host: host specific structure
1334 * @relevant: port error bits relevant to this host controller
1335 * @hc: which host controller we're to look at
1336 *
1337 * Read then write clear the HC interrupt status then walk each
1338 * port connected to the HC and see if it needs servicing. Port
1339 * success ints are reported in the HC interrupt status reg, the
1340 * port error ints are reported in the higher level main
1341 * interrupt status register and thus are passed in via the
1342 * 'relevant' argument.
1343 *
1344 * LOCKING:
1345 * Inherited from caller.
1346 */
1347static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1348{
1349 void __iomem *mmio = host->mmio_base;
1350 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1351 struct ata_queued_cmd *qc;
1352 u32 hc_irq_cause;
1353 int shift, port, port0, hard_port, handled;
1354 unsigned int err_mask;
1355
1356 if (hc == 0) {
1357 port0 = 0;
1358 } else {
1359 port0 = MV_PORTS_PER_HC;
1360 }
1361
1362 /* we'll need the HC success int register in most cases */
1363 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1364 if (hc_irq_cause) {
1365 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1366 }
1367
1368 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1369 hc,relevant,hc_irq_cause);
1370
1371 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
1372 u8 ata_status = 0;
1373 struct ata_port *ap = host->ports[port];
1374 struct mv_port_priv *pp = ap->private_data;
1375
1376 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1377 handled = 0; /* ensure ata_status is set if handled++ */
1378
1379 /* Note that DEV_IRQ might happen spuriously during EDMA,
1380 * and should be ignored in such cases.
1381 * The cause of this is still under investigation.
1382 */
1383 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1384 /* EDMA: check for response queue interrupt */
1385 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) {
1386 ata_status = mv_get_crpb_status(ap);
1387 handled = 1;
1388 }
1389 } else {
1390 /* PIO: check for device (drive) interrupt */
1391 if ((DEV_IRQ << hard_port) & hc_irq_cause) {
1392 ata_status = readb((void __iomem *)
1393 ap->ioaddr.status_addr);
1394 handled = 1;
1395 /* ignore spurious intr if drive still BUSY */
1396 if (ata_status & ATA_BUSY) {
1397 ata_status = 0;
1398 handled = 0;
1399 }
1400 }
1401 }
1402
1403 if (ap && (ap->flags & ATA_FLAG_DISABLED))
1404 continue;
1405
1406 err_mask = ac_err_mask(ata_status);
1407
1408 shift = port << 1; /* (port * 2) */
1409 if (port >= MV_PORTS_PER_HC) {
1410 shift++; /* skip bit 8 in the HC Main IRQ reg */
1411 }
1412 if ((PORT0_ERR << shift) & relevant) {
1413 mv_err_intr(ap, 1);
1414 err_mask |= AC_ERR_OTHER;
1415 handled = 1;
1416 }
1417
1418 if (handled) {
1419 qc = ata_qc_from_tag(ap, ap->active_tag);
1420 if (qc && (qc->flags & ATA_QCFLAG_ACTIVE)) {
1421 VPRINTK("port %u IRQ found for qc, "
1422 "ata_status 0x%x\n", port,ata_status);
1423 /* mark qc status appropriately */
1424 if (!(qc->tf.flags & ATA_TFLAG_POLLING)) {
1425 qc->err_mask |= err_mask;
1426 ata_qc_complete(qc);
1427 }
1428 }
1429 }
1430 }
1431 VPRINTK("EXIT\n");
1432}
1433
1434/**
1435 * mv_interrupt -
1436 * @irq: unused
1437 * @dev_instance: private data; in this case the host structure
1438 * @regs: unused
1439 *
1440 * Read the read only register to determine if any host
1441 * controllers have pending interrupts. If so, call lower level
1442 * routine to handle. Also check for PCI errors which are only
1443 * reported here.
1444 *
1445 * LOCKING:
1446 * This routine holds the host lock while processing pending
1447 * interrupts.
1448 */
1449static irqreturn_t mv_interrupt(int irq, void *dev_instance,
1450 struct pt_regs *regs)
1451{
1452 struct ata_host *host = dev_instance;
1453 unsigned int hc, handled = 0, n_hcs;
1454 void __iomem *mmio = host->mmio_base;
1455 struct mv_host_priv *hpriv;
1456 u32 irq_stat;
1457
1458 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
1459
1460 /* check the cases where we either have nothing pending or have read
1461 * a bogus register value which can indicate HW removal or PCI fault
1462 */
1463 if (!irq_stat || (0xffffffffU == irq_stat)) {
1464 return IRQ_NONE;
1465 }
1466
1467 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1468 spin_lock(&host->lock);
1469
1470 for (hc = 0; hc < n_hcs; hc++) {
1471 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1472 if (relevant) {
1473 mv_host_intr(host, relevant, hc);
1474 handled++;
1475 }
1476 }
1477
1478 hpriv = host->private_data;
1479 if (IS_60XX(hpriv)) {
1480 /* deal with the interrupt coalescing bits */
1481 if (irq_stat & (TRAN_LO_DONE | TRAN_HI_DONE | PORTS_0_7_COAL_DONE)) {
1482 writelfl(0, mmio + MV_IRQ_COAL_CAUSE_LO);
1483 writelfl(0, mmio + MV_IRQ_COAL_CAUSE_HI);
1484 writelfl(0, mmio + MV_IRQ_COAL_CAUSE);
1485 }
1486 }
1487
1488 if (PCI_ERR & irq_stat) {
1489 printk(KERN_ERR DRV_NAME ": PCI ERROR; PCI IRQ cause=0x%08x\n",
1490 readl(mmio + PCI_IRQ_CAUSE_OFS));
1491
1492 DPRINTK("All regs @ PCI error\n");
1493 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1494
1495 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1496 handled++;
1497 }
1498 spin_unlock(&host->lock);
1499
1500 return IRQ_RETVAL(handled);
1501}
1502
1503static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1504{
1505 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1506 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1507
1508 return hc_mmio + ofs;
1509}
1510
1511static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1512{
1513 unsigned int ofs;
1514
1515 switch (sc_reg_in) {
1516 case SCR_STATUS:
1517 case SCR_ERROR:
1518 case SCR_CONTROL:
1519 ofs = sc_reg_in * sizeof(u32);
1520 break;
1521 default:
1522 ofs = 0xffffffffU;
1523 break;
1524 }
1525 return ofs;
1526}
1527
1528static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
1529{
1530 void __iomem *mmio = mv5_phy_base(ap->host->mmio_base, ap->port_no);
1531 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1532
1533 if (ofs != 0xffffffffU)
1534 return readl(mmio + ofs);
1535 else
1536 return (u32) ofs;
1537}
1538
1539static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1540{
1541 void __iomem *mmio = mv5_phy_base(ap->host->mmio_base, ap->port_no);
1542 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1543
1544 if (ofs != 0xffffffffU)
1545 writelfl(val, mmio + ofs);
1546}
1547
1548static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1549{
1550 u8 rev_id;
1551 int early_5080;
1552
1553 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
1554
1555 early_5080 = (pdev->device == 0x5080) && (rev_id == 0);
1556
1557 if (!early_5080) {
1558 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1559 tmp |= (1 << 0);
1560 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1561 }
1562
1563 mv_reset_pci_bus(pdev, mmio);
1564}
1565
1566static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1567{
1568 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1569}
1570
1571static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1572 void __iomem *mmio)
1573{
1574 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1575 u32 tmp;
1576
1577 tmp = readl(phy_mmio + MV5_PHY_MODE);
1578
1579 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1580 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
1581}
1582
1583static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1584{
1585 u32 tmp;
1586
1587 writel(0, mmio + MV_GPIO_PORT_CTL);
1588
1589 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1590
1591 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1592 tmp |= ~(1 << 0);
1593 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1594}
1595
1596static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1597 unsigned int port)
1598{
1599 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1600 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1601 u32 tmp;
1602 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1603
1604 if (fix_apm_sq) {
1605 tmp = readl(phy_mmio + MV5_LT_MODE);
1606 tmp |= (1 << 19);
1607 writel(tmp, phy_mmio + MV5_LT_MODE);
1608
1609 tmp = readl(phy_mmio + MV5_PHY_CTL);
1610 tmp &= ~0x3;
1611 tmp |= 0x1;
1612 writel(tmp, phy_mmio + MV5_PHY_CTL);
1613 }
1614
1615 tmp = readl(phy_mmio + MV5_PHY_MODE);
1616 tmp &= ~mask;
1617 tmp |= hpriv->signal[port].pre;
1618 tmp |= hpriv->signal[port].amps;
1619 writel(tmp, phy_mmio + MV5_PHY_MODE);
1620}
1621
1622
1623#undef ZERO
1624#define ZERO(reg) writel(0, port_mmio + (reg))
1625static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1626 unsigned int port)
1627{
1628 void __iomem *port_mmio = mv_port_base(mmio, port);
1629
1630 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1631
1632 mv_channel_reset(hpriv, mmio, port);
1633
1634 ZERO(0x028); /* command */
1635 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1636 ZERO(0x004); /* timer */
1637 ZERO(0x008); /* irq err cause */
1638 ZERO(0x00c); /* irq err mask */
1639 ZERO(0x010); /* rq bah */
1640 ZERO(0x014); /* rq inp */
1641 ZERO(0x018); /* rq outp */
1642 ZERO(0x01c); /* respq bah */
1643 ZERO(0x024); /* respq outp */
1644 ZERO(0x020); /* respq inp */
1645 ZERO(0x02c); /* test control */
1646 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1647}
1648#undef ZERO
1649
1650#define ZERO(reg) writel(0, hc_mmio + (reg))
1651static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1652 unsigned int hc)
1653{
1654 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1655 u32 tmp;
1656
1657 ZERO(0x00c);
1658 ZERO(0x010);
1659 ZERO(0x014);
1660 ZERO(0x018);
1661
1662 tmp = readl(hc_mmio + 0x20);
1663 tmp &= 0x1c1c1c1c;
1664 tmp |= 0x03030303;
1665 writel(tmp, hc_mmio + 0x20);
1666}
1667#undef ZERO
1668
1669static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1670 unsigned int n_hc)
1671{
1672 unsigned int hc, port;
1673
1674 for (hc = 0; hc < n_hc; hc++) {
1675 for (port = 0; port < MV_PORTS_PER_HC; port++)
1676 mv5_reset_hc_port(hpriv, mmio,
1677 (hc * MV_PORTS_PER_HC) + port);
1678
1679 mv5_reset_one_hc(hpriv, mmio, hc);
1680 }
1681
1682 return 0;
1683}
1684
1685#undef ZERO
1686#define ZERO(reg) writel(0, mmio + (reg))
1687static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1688{
1689 u32 tmp;
1690
1691 tmp = readl(mmio + MV_PCI_MODE);
1692 tmp &= 0xff00ffff;
1693 writel(tmp, mmio + MV_PCI_MODE);
1694
1695 ZERO(MV_PCI_DISC_TIMER);
1696 ZERO(MV_PCI_MSI_TRIGGER);
1697 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1698 ZERO(HC_MAIN_IRQ_MASK_OFS);
1699 ZERO(MV_PCI_SERR_MASK);
1700 ZERO(PCI_IRQ_CAUSE_OFS);
1701 ZERO(PCI_IRQ_MASK_OFS);
1702 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1703 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1704 ZERO(MV_PCI_ERR_ATTRIBUTE);
1705 ZERO(MV_PCI_ERR_COMMAND);
1706}
1707#undef ZERO
1708
1709static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1710{
1711 u32 tmp;
1712
1713 mv5_reset_flash(hpriv, mmio);
1714
1715 tmp = readl(mmio + MV_GPIO_PORT_CTL);
1716 tmp &= 0x3;
1717 tmp |= (1 << 5) | (1 << 6);
1718 writel(tmp, mmio + MV_GPIO_PORT_CTL);
1719}
1720
1721/**
1722 * mv6_reset_hc - Perform the 6xxx global soft reset
1723 * @mmio: base address of the HBA
1724 *
1725 * This routine only applies to 6xxx parts.
1726 *
1727 * LOCKING:
1728 * Inherited from caller.
1729 */
1730static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1731 unsigned int n_hc)
1732{
1733 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
1734 int i, rc = 0;
1735 u32 t;
1736
1737 /* Following procedure defined in PCI "main command and status
1738 * register" table.
1739 */
1740 t = readl(reg);
1741 writel(t | STOP_PCI_MASTER, reg);
1742
1743 for (i = 0; i < 1000; i++) {
1744 udelay(1);
1745 t = readl(reg);
1746 if (PCI_MASTER_EMPTY & t) {
1747 break;
1748 }
1749 }
1750 if (!(PCI_MASTER_EMPTY & t)) {
1751 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
1752 rc = 1;
1753 goto done;
1754 }
1755
1756 /* set reset */
1757 i = 5;
1758 do {
1759 writel(t | GLOB_SFT_RST, reg);
1760 t = readl(reg);
1761 udelay(1);
1762 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
1763
1764 if (!(GLOB_SFT_RST & t)) {
1765 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
1766 rc = 1;
1767 goto done;
1768 }
1769
1770 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
1771 i = 5;
1772 do {
1773 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
1774 t = readl(reg);
1775 udelay(1);
1776 } while ((GLOB_SFT_RST & t) && (i-- > 0));
1777
1778 if (GLOB_SFT_RST & t) {
1779 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
1780 rc = 1;
1781 }
1782done:
1783 return rc;
1784}
1785
1786static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
1787 void __iomem *mmio)
1788{
1789 void __iomem *port_mmio;
1790 u32 tmp;
1791
1792 tmp = readl(mmio + MV_RESET_CFG);
1793 if ((tmp & (1 << 0)) == 0) {
1794 hpriv->signal[idx].amps = 0x7 << 8;
1795 hpriv->signal[idx].pre = 0x1 << 5;
1796 return;
1797 }
1798
1799 port_mmio = mv_port_base(mmio, idx);
1800 tmp = readl(port_mmio + PHY_MODE2);
1801
1802 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
1803 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
1804}
1805
1806static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1807{
1808 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
1809}
1810
1811static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1812 unsigned int port)
1813{
1814 void __iomem *port_mmio = mv_port_base(mmio, port);
1815
1816 u32 hp_flags = hpriv->hp_flags;
1817 int fix_phy_mode2 =
1818 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
1819 int fix_phy_mode4 =
1820 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
1821 u32 m2, tmp;
1822
1823 if (fix_phy_mode2) {
1824 m2 = readl(port_mmio + PHY_MODE2);
1825 m2 &= ~(1 << 16);
1826 m2 |= (1 << 31);
1827 writel(m2, port_mmio + PHY_MODE2);
1828
1829 udelay(200);
1830
1831 m2 = readl(port_mmio + PHY_MODE2);
1832 m2 &= ~((1 << 16) | (1 << 31));
1833 writel(m2, port_mmio + PHY_MODE2);
1834
1835 udelay(200);
1836 }
1837
1838 /* who knows what this magic does */
1839 tmp = readl(port_mmio + PHY_MODE3);
1840 tmp &= ~0x7F800000;
1841 tmp |= 0x2A800000;
1842 writel(tmp, port_mmio + PHY_MODE3);
1843
1844 if (fix_phy_mode4) {
1845 u32 m4;
1846
1847 m4 = readl(port_mmio + PHY_MODE4);
1848
1849 if (hp_flags & MV_HP_ERRATA_60X1B2)
1850 tmp = readl(port_mmio + 0x310);
1851
1852 m4 = (m4 & ~(1 << 1)) | (1 << 0);
1853
1854 writel(m4, port_mmio + PHY_MODE4);
1855
1856 if (hp_flags & MV_HP_ERRATA_60X1B2)
1857 writel(tmp, port_mmio + 0x310);
1858 }
1859
1860 /* Revert values of pre-emphasis and signal amps to the saved ones */
1861 m2 = readl(port_mmio + PHY_MODE2);
1862
1863 m2 &= ~MV_M2_PREAMP_MASK;
1864 m2 |= hpriv->signal[port].amps;
1865 m2 |= hpriv->signal[port].pre;
1866 m2 &= ~(1 << 16);
1867
1868 /* according to mvSata 3.6.1, some IIE values are fixed */
1869 if (IS_GEN_IIE(hpriv)) {
1870 m2 &= ~0xC30FF01F;
1871 m2 |= 0x0000900F;
1872 }
1873
1874 writel(m2, port_mmio + PHY_MODE2);
1875}
1876
1877static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
1878 unsigned int port_no)
1879{
1880 void __iomem *port_mmio = mv_port_base(mmio, port_no);
1881
1882 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
1883
1884 if (IS_60XX(hpriv)) {
1885 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
1886 ifctl |= (1 << 7); /* enable gen2i speed */
1887 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
1888 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
1889 }
1890
1891 udelay(25); /* allow reset propagation */
1892
1893 /* Spec never mentions clearing the bit. Marvell's driver does
1894 * clear the bit, however.
1895 */
1896 writelfl(0, port_mmio + EDMA_CMD_OFS);
1897
1898 hpriv->ops->phy_errata(hpriv, mmio, port_no);
1899
1900 if (IS_50XX(hpriv))
1901 mdelay(1);
1902}
1903
1904static void mv_stop_and_reset(struct ata_port *ap)
1905{
1906 struct mv_host_priv *hpriv = ap->host->private_data;
1907 void __iomem *mmio = ap->host->mmio_base;
1908
1909 mv_stop_dma(ap);
1910
1911 mv_channel_reset(hpriv, mmio, ap->port_no);
1912
1913 __mv_phy_reset(ap, 0);
1914}
1915
1916static inline void __msleep(unsigned int msec, int can_sleep)
1917{
1918 if (can_sleep)
1919 msleep(msec);
1920 else
1921 mdelay(msec);
1922}
1923
1924/**
1925 * __mv_phy_reset - Perform eDMA reset followed by COMRESET
1926 * @ap: ATA channel to manipulate
1927 *
1928 * Part of this is taken from __sata_phy_reset and modified to
1929 * not sleep since this routine gets called from interrupt level.
1930 *
1931 * LOCKING:
1932 * Inherited from caller. This is coded to safe to call at
1933 * interrupt level, i.e. it does not sleep.
1934 */
1935static void __mv_phy_reset(struct ata_port *ap, int can_sleep)
1936{
1937 struct mv_port_priv *pp = ap->private_data;
1938 struct mv_host_priv *hpriv = ap->host->private_data;
1939 void __iomem *port_mmio = mv_ap_base(ap);
1940 struct ata_taskfile tf;
1941 struct ata_device *dev = &ap->device[0];
1942 unsigned long timeout;
1943 int retry = 5;
1944 u32 sstatus;
1945
1946 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
1947
1948 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
1949 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
1950 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
1951
1952 /* Issue COMRESET via SControl */
1953comreset_retry:
1954 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
1955 __msleep(1, can_sleep);
1956
1957 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1958 __msleep(20, can_sleep);
1959
1960 timeout = jiffies + msecs_to_jiffies(200);
1961 do {
1962 sata_scr_read(ap, SCR_STATUS, &sstatus);
1963 sstatus &= 0x3;
1964 if ((sstatus == 3) || (sstatus == 0))
1965 break;
1966
1967 __msleep(1, can_sleep);
1968 } while (time_before(jiffies, timeout));
1969
1970 /* work around errata */
1971 if (IS_60XX(hpriv) &&
1972 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
1973 (retry-- > 0))
1974 goto comreset_retry;
1975
1976 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
1977 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
1978 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
1979
1980 if (ata_port_online(ap)) {
1981 ata_port_probe(ap);
1982 } else {
1983 sata_scr_read(ap, SCR_STATUS, &sstatus);
1984 ata_port_printk(ap, KERN_INFO,
1985 "no device found (phy stat %08x)\n", sstatus);
1986 ata_port_disable(ap);
1987 return;
1988 }
1989 ap->cbl = ATA_CBL_SATA;
1990
1991 /* even after SStatus reflects that device is ready,
1992 * it seems to take a while for link to be fully
1993 * established (and thus Status no longer 0x80/0x7F),
1994 * so we poll a bit for that, here.
1995 */
1996 retry = 20;
1997 while (1) {
1998 u8 drv_stat = ata_check_status(ap);
1999 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2000 break;
2001 __msleep(500, can_sleep);
2002 if (retry-- <= 0)
2003 break;
2004 }
2005
2006 tf.lbah = readb((void __iomem *) ap->ioaddr.lbah_addr);
2007 tf.lbam = readb((void __iomem *) ap->ioaddr.lbam_addr);
2008 tf.lbal = readb((void __iomem *) ap->ioaddr.lbal_addr);
2009 tf.nsect = readb((void __iomem *) ap->ioaddr.nsect_addr);
2010
2011 dev->class = ata_dev_classify(&tf);
2012 if (!ata_dev_enabled(dev)) {
2013 VPRINTK("Port disabled post-sig: No device present.\n");
2014 ata_port_disable(ap);
2015 }
2016
2017 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2018
2019 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2020
2021 VPRINTK("EXIT\n");
2022}
2023
2024static void mv_phy_reset(struct ata_port *ap)
2025{
2026 __mv_phy_reset(ap, 1);
2027}
2028
2029/**
2030 * mv_eng_timeout - Routine called by libata when SCSI times out I/O
2031 * @ap: ATA channel to manipulate
2032 *
2033 * Intent is to clear all pending error conditions, reset the
2034 * chip/bus, fail the command, and move on.
2035 *
2036 * LOCKING:
2037 * This routine holds the host lock while failing the command.
2038 */
2039static void mv_eng_timeout(struct ata_port *ap)
2040{
2041 struct ata_queued_cmd *qc;
2042 unsigned long flags;
2043
2044 ata_port_printk(ap, KERN_ERR, "Entering mv_eng_timeout\n");
2045 DPRINTK("All regs @ start of eng_timeout\n");
2046 mv_dump_all_regs(ap->host->mmio_base, ap->port_no,
2047 to_pci_dev(ap->host->dev));
2048
2049 qc = ata_qc_from_tag(ap, ap->active_tag);
2050 printk(KERN_ERR "mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n",
2051 ap->host->mmio_base, ap, qc, qc->scsicmd, &qc->scsicmd->cmnd);
2052
2053 spin_lock_irqsave(&ap->host->lock, flags);
2054 mv_err_intr(ap, 0);
2055 mv_stop_and_reset(ap);
2056 spin_unlock_irqrestore(&ap->host->lock, flags);
2057
2058 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
2059 if (qc->flags & ATA_QCFLAG_ACTIVE) {
2060 qc->err_mask |= AC_ERR_TIMEOUT;
2061 ata_eh_qc_complete(qc);
2062 }
2063}
2064
2065/**
2066 * mv_port_init - Perform some early initialization on a single port.
2067 * @port: libata data structure storing shadow register addresses
2068 * @port_mmio: base address of the port
2069 *
2070 * Initialize shadow register mmio addresses, clear outstanding
2071 * interrupts on the port, and unmask interrupts for the future
2072 * start of the port.
2073 *
2074 * LOCKING:
2075 * Inherited from caller.
2076 */
2077static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2078{
2079 unsigned long shd_base = (unsigned long) port_mmio + SHD_BLK_OFS;
2080 unsigned serr_ofs;
2081
2082 /* PIO related setup
2083 */
2084 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2085 port->error_addr =
2086 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2087 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2088 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2089 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2090 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2091 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2092 port->status_addr =
2093 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2094 /* special case: control/altstatus doesn't have ATA_REG_ address */
2095 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2096
2097 /* unused: */
2098 port->cmd_addr = port->bmdma_addr = port->scr_addr = 0;
2099
2100 /* Clear any currently outstanding port interrupt conditions */
2101 serr_ofs = mv_scr_offset(SCR_ERROR);
2102 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2103 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2104
2105 /* unmask all EDMA error interrupts */
2106 writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2107
2108 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2109 readl(port_mmio + EDMA_CFG_OFS),
2110 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2111 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2112}
2113
2114static int mv_chip_id(struct pci_dev *pdev, struct mv_host_priv *hpriv,
2115 unsigned int board_idx)
2116{
2117 u8 rev_id;
2118 u32 hp_flags = hpriv->hp_flags;
2119
2120 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
2121
2122 switch(board_idx) {
2123 case chip_5080:
2124 hpriv->ops = &mv5xxx_ops;
2125 hp_flags |= MV_HP_50XX;
2126
2127 switch (rev_id) {
2128 case 0x1:
2129 hp_flags |= MV_HP_ERRATA_50XXB0;
2130 break;
2131 case 0x3:
2132 hp_flags |= MV_HP_ERRATA_50XXB2;
2133 break;
2134 default:
2135 dev_printk(KERN_WARNING, &pdev->dev,
2136 "Applying 50XXB2 workarounds to unknown rev\n");
2137 hp_flags |= MV_HP_ERRATA_50XXB2;
2138 break;
2139 }
2140 break;
2141
2142 case chip_504x:
2143 case chip_508x:
2144 hpriv->ops = &mv5xxx_ops;
2145 hp_flags |= MV_HP_50XX;
2146
2147 switch (rev_id) {
2148 case 0x0:
2149 hp_flags |= MV_HP_ERRATA_50XXB0;
2150 break;
2151 case 0x3:
2152 hp_flags |= MV_HP_ERRATA_50XXB2;
2153 break;
2154 default:
2155 dev_printk(KERN_WARNING, &pdev->dev,
2156 "Applying B2 workarounds to unknown rev\n");
2157 hp_flags |= MV_HP_ERRATA_50XXB2;
2158 break;
2159 }
2160 break;
2161
2162 case chip_604x:
2163 case chip_608x:
2164 hpriv->ops = &mv6xxx_ops;
2165
2166 switch (rev_id) {
2167 case 0x7:
2168 hp_flags |= MV_HP_ERRATA_60X1B2;
2169 break;
2170 case 0x9:
2171 hp_flags |= MV_HP_ERRATA_60X1C0;
2172 break;
2173 default:
2174 dev_printk(KERN_WARNING, &pdev->dev,
2175 "Applying B2 workarounds to unknown rev\n");
2176 hp_flags |= MV_HP_ERRATA_60X1B2;
2177 break;
2178 }
2179 break;
2180
2181 case chip_7042:
2182 case chip_6042:
2183 hpriv->ops = &mv6xxx_ops;
2184
2185 hp_flags |= MV_HP_GEN_IIE;
2186
2187 switch (rev_id) {
2188 case 0x0:
2189 hp_flags |= MV_HP_ERRATA_XX42A0;
2190 break;
2191 case 0x1:
2192 hp_flags |= MV_HP_ERRATA_60X1C0;
2193 break;
2194 default:
2195 dev_printk(KERN_WARNING, &pdev->dev,
2196 "Applying 60X1C0 workarounds to unknown rev\n");
2197 hp_flags |= MV_HP_ERRATA_60X1C0;
2198 break;
2199 }
2200 break;
2201
2202 default:
2203 printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx);
2204 return 1;
2205 }
2206
2207 hpriv->hp_flags = hp_flags;
2208
2209 return 0;
2210}
2211
2212/**
2213 * mv_init_host - Perform some early initialization of the host.
2214 * @pdev: host PCI device
2215 * @probe_ent: early data struct representing the host
2216 *
2217 * If possible, do an early global reset of the host. Then do
2218 * our port init and clear/unmask all/relevant host interrupts.
2219 *
2220 * LOCKING:
2221 * Inherited from caller.
2222 */
2223static int mv_init_host(struct pci_dev *pdev, struct ata_probe_ent *probe_ent,
2224 unsigned int board_idx)
2225{
2226 int rc = 0, n_hc, port, hc;
2227 void __iomem *mmio = probe_ent->mmio_base;
2228 struct mv_host_priv *hpriv = probe_ent->private_data;
2229
2230 /* global interrupt mask */
2231 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2232
2233 rc = mv_chip_id(pdev, hpriv, board_idx);
2234 if (rc)
2235 goto done;
2236
2237 n_hc = mv_get_hc_count(probe_ent->port_flags);
2238 probe_ent->n_ports = MV_PORTS_PER_HC * n_hc;
2239
2240 for (port = 0; port < probe_ent->n_ports; port++)
2241 hpriv->ops->read_preamp(hpriv, port, mmio);
2242
2243 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2244 if (rc)
2245 goto done;
2246
2247 hpriv->ops->reset_flash(hpriv, mmio);
2248 hpriv->ops->reset_bus(pdev, mmio);
2249 hpriv->ops->enable_leds(hpriv, mmio);
2250
2251 for (port = 0; port < probe_ent->n_ports; port++) {
2252 if (IS_60XX(hpriv)) {
2253 void __iomem *port_mmio = mv_port_base(mmio, port);
2254
2255 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2256 ifctl |= (1 << 7); /* enable gen2i speed */
2257 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2258 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2259 }
2260
2261 hpriv->ops->phy_errata(hpriv, mmio, port);
2262 }
2263
2264 for (port = 0; port < probe_ent->n_ports; port++) {
2265 void __iomem *port_mmio = mv_port_base(mmio, port);
2266 mv_port_init(&probe_ent->port[port], port_mmio);
2267 }
2268
2269 for (hc = 0; hc < n_hc; hc++) {
2270 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2271
2272 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2273 "(before clear)=0x%08x\n", hc,
2274 readl(hc_mmio + HC_CFG_OFS),
2275 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2276
2277 /* Clear any currently outstanding hc interrupt conditions */
2278 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2279 }
2280
2281 /* Clear any currently outstanding host interrupt conditions */
2282 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
2283
2284 /* and unmask interrupt generation for host regs */
2285 writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
2286 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
2287
2288 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2289 "PCI int cause/mask=0x%08x/0x%08x\n",
2290 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2291 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
2292 readl(mmio + PCI_IRQ_CAUSE_OFS),
2293 readl(mmio + PCI_IRQ_MASK_OFS));
2294
2295done:
2296 return rc;
2297}
2298
2299/**
2300 * mv_print_info - Dump key info to kernel log for perusal.
2301 * @probe_ent: early data struct representing the host
2302 *
2303 * FIXME: complete this.
2304 *
2305 * LOCKING:
2306 * Inherited from caller.
2307 */
2308static void mv_print_info(struct ata_probe_ent *probe_ent)
2309{
2310 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
2311 struct mv_host_priv *hpriv = probe_ent->private_data;
2312 u8 rev_id, scc;
2313 const char *scc_s;
2314
2315 /* Use this to determine the HW stepping of the chip so we know
2316 * what errata to workaround
2317 */
2318 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
2319
2320 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2321 if (scc == 0)
2322 scc_s = "SCSI";
2323 else if (scc == 0x01)
2324 scc_s = "RAID";
2325 else
2326 scc_s = "unknown";
2327
2328 dev_printk(KERN_INFO, &pdev->dev,
2329 "%u slots %u ports %s mode IRQ via %s\n",
2330 (unsigned)MV_MAX_Q_DEPTH, probe_ent->n_ports,
2331 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2332}
2333
2334/**
2335 * mv_init_one - handle a positive probe of a Marvell host
2336 * @pdev: PCI device found
2337 * @ent: PCI device ID entry for the matched host
2338 *
2339 * LOCKING:
2340 * Inherited from caller.
2341 */
2342static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2343{
2344 static int printed_version = 0;
2345 struct ata_probe_ent *probe_ent = NULL;
2346 struct mv_host_priv *hpriv;
2347 unsigned int board_idx = (unsigned int)ent->driver_data;
2348 void __iomem *mmio_base;
2349 int pci_dev_busy = 0, rc;
2350
2351 if (!printed_version++)
2352 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2353
2354 rc = pci_enable_device(pdev);
2355 if (rc) {
2356 return rc;
2357 }
2358 pci_set_master(pdev);
2359
2360 rc = pci_request_regions(pdev, DRV_NAME);
2361 if (rc) {
2362 pci_dev_busy = 1;
2363 goto err_out;
2364 }
2365
2366 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
2367 if (probe_ent == NULL) {
2368 rc = -ENOMEM;
2369 goto err_out_regions;
2370 }
2371
2372 memset(probe_ent, 0, sizeof(*probe_ent));
2373 probe_ent->dev = pci_dev_to_dev(pdev);
2374 INIT_LIST_HEAD(&probe_ent->node);
2375
2376 mmio_base = pci_iomap(pdev, MV_PRIMARY_BAR, 0);
2377 if (mmio_base == NULL) {
2378 rc = -ENOMEM;
2379 goto err_out_free_ent;
2380 }
2381
2382 hpriv = kmalloc(sizeof(*hpriv), GFP_KERNEL);
2383 if (!hpriv) {
2384 rc = -ENOMEM;
2385 goto err_out_iounmap;
2386 }
2387 memset(hpriv, 0, sizeof(*hpriv));
2388
2389 probe_ent->sht = mv_port_info[board_idx].sht;
2390 probe_ent->port_flags = mv_port_info[board_idx].flags;
2391 probe_ent->pio_mask = mv_port_info[board_idx].pio_mask;
2392 probe_ent->udma_mask = mv_port_info[board_idx].udma_mask;
2393 probe_ent->port_ops = mv_port_info[board_idx].port_ops;
2394
2395 probe_ent->irq = pdev->irq;
2396 probe_ent->irq_flags = IRQF_SHARED;
2397 probe_ent->mmio_base = mmio_base;
2398 probe_ent->private_data = hpriv;
2399
2400 /* initialize adapter */
2401 rc = mv_init_host(pdev, probe_ent, board_idx);
2402 if (rc) {
2403 goto err_out_hpriv;
2404 }
2405
2406 /* Enable interrupts */
2407 if (msi && pci_enable_msi(pdev) == 0) {
2408 hpriv->hp_flags |= MV_HP_FLAG_MSI;
2409 } else {
2410 pci_intx(pdev, 1);
2411 }
2412
2413 mv_dump_pci_cfg(pdev, 0x68);
2414 mv_print_info(probe_ent);
2415
2416 if (ata_device_add(probe_ent) == 0) {
2417 rc = -ENODEV; /* No devices discovered */
2418 goto err_out_dev_add;
2419 }
2420
2421 kfree(probe_ent);
2422 return 0;
2423
2424err_out_dev_add:
2425 if (MV_HP_FLAG_MSI & hpriv->hp_flags) {
2426 pci_disable_msi(pdev);
2427 } else {
2428 pci_intx(pdev, 0);
2429 }
2430err_out_hpriv:
2431 kfree(hpriv);
2432err_out_iounmap:
2433 pci_iounmap(pdev, mmio_base);
2434err_out_free_ent:
2435 kfree(probe_ent);
2436err_out_regions:
2437 pci_release_regions(pdev);
2438err_out:
2439 if (!pci_dev_busy) {
2440 pci_disable_device(pdev);
2441 }
2442
2443 return rc;
2444}
2445
2446static int __init mv_init(void)
2447{
2448 return pci_register_driver(&mv_pci_driver);
2449}
2450
2451static void __exit mv_exit(void)
2452{
2453 pci_unregister_driver(&mv_pci_driver);
2454}
2455
2456MODULE_AUTHOR("Brett Russ");
2457MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2458MODULE_LICENSE("GPL");
2459MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2460MODULE_VERSION(DRV_VERSION);
2461
2462module_param(msi, int, 0444);
2463MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2464
2465module_init(mv_init);
2466module_exit(mv_exit);
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
new file mode 100644
index 000000000000..27c22feebf30
--- /dev/null
+++ b/drivers/ata/sata_nv.c
@@ -0,0 +1,595 @@
1/*
2 * sata_nv.c - NVIDIA nForce SATA
3 *
4 * Copyright 2004 NVIDIA Corp. All rights reserved.
5 * Copyright 2004 Andrew Chew
6 *
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
21 *
22 *
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
25 *
26 * No hardware documentation available outside of NVIDIA.
27 * This driver programs the NVIDIA SATA controller in a similar
28 * fashion as with other PCI IDE BMDMA controllers, with a few
29 * NV-specific details such as register offsets, SATA phy location,
30 * hotplug info, etc.
31 *
32 */
33
34#include <linux/kernel.h>
35#include <linux/module.h>
36#include <linux/pci.h>
37#include <linux/init.h>
38#include <linux/blkdev.h>
39#include <linux/delay.h>
40#include <linux/interrupt.h>
41#include <linux/device.h>
42#include <scsi/scsi_host.h>
43#include <linux/libata.h>
44
45#define DRV_NAME "sata_nv"
46#define DRV_VERSION "2.0"
47
48enum {
49 NV_PORTS = 2,
50 NV_PIO_MASK = 0x1f,
51 NV_MWDMA_MASK = 0x07,
52 NV_UDMA_MASK = 0x7f,
53 NV_PORT0_SCR_REG_OFFSET = 0x00,
54 NV_PORT1_SCR_REG_OFFSET = 0x40,
55
56 /* INT_STATUS/ENABLE */
57 NV_INT_STATUS = 0x10,
58 NV_INT_ENABLE = 0x11,
59 NV_INT_STATUS_CK804 = 0x440,
60 NV_INT_ENABLE_CK804 = 0x441,
61
62 /* INT_STATUS/ENABLE bits */
63 NV_INT_DEV = 0x01,
64 NV_INT_PM = 0x02,
65 NV_INT_ADDED = 0x04,
66 NV_INT_REMOVED = 0x08,
67
68 NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
69
70 NV_INT_ALL = 0x0f,
71 NV_INT_MASK = NV_INT_DEV |
72 NV_INT_ADDED | NV_INT_REMOVED,
73
74 /* INT_CONFIG */
75 NV_INT_CONFIG = 0x12,
76 NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
77
78 // For PCI config register 20
79 NV_MCP_SATA_CFG_20 = 0x50,
80 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
81};
82
83static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
84static void nv_ck804_host_stop(struct ata_host *host);
85static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance,
86 struct pt_regs *regs);
87static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance,
88 struct pt_regs *regs);
89static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance,
90 struct pt_regs *regs);
91static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg);
92static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
93
94static void nv_nf2_freeze(struct ata_port *ap);
95static void nv_nf2_thaw(struct ata_port *ap);
96static void nv_ck804_freeze(struct ata_port *ap);
97static void nv_ck804_thaw(struct ata_port *ap);
98static void nv_error_handler(struct ata_port *ap);
99
100enum nv_host_type
101{
102 GENERIC,
103 NFORCE2,
104 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
105 CK804
106};
107
108static const struct pci_device_id nv_pci_tbl[] = {
109 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA,
110 PCI_ANY_ID, PCI_ANY_ID, 0, 0, NFORCE2 },
111 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA,
112 PCI_ANY_ID, PCI_ANY_ID, 0, 0, NFORCE3 },
113 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2,
114 PCI_ANY_ID, PCI_ANY_ID, 0, 0, NFORCE3 },
115 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA,
116 PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
117 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
119 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
121 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2,
122 PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
123 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA,
124 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
125 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2,
126 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
127 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA,
128 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
129 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2,
130 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
131 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA,
132 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
133 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2,
134 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
135 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3,
136 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
137 { PCI_VENDOR_ID_NVIDIA, 0x045c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
138 { PCI_VENDOR_ID_NVIDIA, 0x045d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
139 { PCI_VENDOR_ID_NVIDIA, 0x045e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
140 { PCI_VENDOR_ID_NVIDIA, 0x045f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
141 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
142 PCI_ANY_ID, PCI_ANY_ID,
143 PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC },
144 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
145 PCI_ANY_ID, PCI_ANY_ID,
146 PCI_CLASS_STORAGE_RAID<<8, 0xffff00, GENERIC },
147 { 0, } /* terminate list */
148};
149
150static struct pci_driver nv_pci_driver = {
151 .name = DRV_NAME,
152 .id_table = nv_pci_tbl,
153 .probe = nv_init_one,
154 .remove = ata_pci_remove_one,
155};
156
157static struct scsi_host_template nv_sht = {
158 .module = THIS_MODULE,
159 .name = DRV_NAME,
160 .ioctl = ata_scsi_ioctl,
161 .queuecommand = ata_scsi_queuecmd,
162 .can_queue = ATA_DEF_QUEUE,
163 .this_id = ATA_SHT_THIS_ID,
164 .sg_tablesize = LIBATA_MAX_PRD,
165 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
166 .emulated = ATA_SHT_EMULATED,
167 .use_clustering = ATA_SHT_USE_CLUSTERING,
168 .proc_name = DRV_NAME,
169 .dma_boundary = ATA_DMA_BOUNDARY,
170 .slave_configure = ata_scsi_slave_config,
171 .slave_destroy = ata_scsi_slave_destroy,
172 .bios_param = ata_std_bios_param,
173};
174
175static const struct ata_port_operations nv_generic_ops = {
176 .port_disable = ata_port_disable,
177 .tf_load = ata_tf_load,
178 .tf_read = ata_tf_read,
179 .exec_command = ata_exec_command,
180 .check_status = ata_check_status,
181 .dev_select = ata_std_dev_select,
182 .bmdma_setup = ata_bmdma_setup,
183 .bmdma_start = ata_bmdma_start,
184 .bmdma_stop = ata_bmdma_stop,
185 .bmdma_status = ata_bmdma_status,
186 .qc_prep = ata_qc_prep,
187 .qc_issue = ata_qc_issue_prot,
188 .freeze = ata_bmdma_freeze,
189 .thaw = ata_bmdma_thaw,
190 .error_handler = nv_error_handler,
191 .post_internal_cmd = ata_bmdma_post_internal_cmd,
192 .data_xfer = ata_pio_data_xfer,
193 .irq_handler = nv_generic_interrupt,
194 .irq_clear = ata_bmdma_irq_clear,
195 .scr_read = nv_scr_read,
196 .scr_write = nv_scr_write,
197 .port_start = ata_port_start,
198 .port_stop = ata_port_stop,
199 .host_stop = ata_pci_host_stop,
200};
201
202static const struct ata_port_operations nv_nf2_ops = {
203 .port_disable = ata_port_disable,
204 .tf_load = ata_tf_load,
205 .tf_read = ata_tf_read,
206 .exec_command = ata_exec_command,
207 .check_status = ata_check_status,
208 .dev_select = ata_std_dev_select,
209 .bmdma_setup = ata_bmdma_setup,
210 .bmdma_start = ata_bmdma_start,
211 .bmdma_stop = ata_bmdma_stop,
212 .bmdma_status = ata_bmdma_status,
213 .qc_prep = ata_qc_prep,
214 .qc_issue = ata_qc_issue_prot,
215 .freeze = nv_nf2_freeze,
216 .thaw = nv_nf2_thaw,
217 .error_handler = nv_error_handler,
218 .post_internal_cmd = ata_bmdma_post_internal_cmd,
219 .data_xfer = ata_pio_data_xfer,
220 .irq_handler = nv_nf2_interrupt,
221 .irq_clear = ata_bmdma_irq_clear,
222 .scr_read = nv_scr_read,
223 .scr_write = nv_scr_write,
224 .port_start = ata_port_start,
225 .port_stop = ata_port_stop,
226 .host_stop = ata_pci_host_stop,
227};
228
229static const struct ata_port_operations nv_ck804_ops = {
230 .port_disable = ata_port_disable,
231 .tf_load = ata_tf_load,
232 .tf_read = ata_tf_read,
233 .exec_command = ata_exec_command,
234 .check_status = ata_check_status,
235 .dev_select = ata_std_dev_select,
236 .bmdma_setup = ata_bmdma_setup,
237 .bmdma_start = ata_bmdma_start,
238 .bmdma_stop = ata_bmdma_stop,
239 .bmdma_status = ata_bmdma_status,
240 .qc_prep = ata_qc_prep,
241 .qc_issue = ata_qc_issue_prot,
242 .freeze = nv_ck804_freeze,
243 .thaw = nv_ck804_thaw,
244 .error_handler = nv_error_handler,
245 .post_internal_cmd = ata_bmdma_post_internal_cmd,
246 .data_xfer = ata_pio_data_xfer,
247 .irq_handler = nv_ck804_interrupt,
248 .irq_clear = ata_bmdma_irq_clear,
249 .scr_read = nv_scr_read,
250 .scr_write = nv_scr_write,
251 .port_start = ata_port_start,
252 .port_stop = ata_port_stop,
253 .host_stop = nv_ck804_host_stop,
254};
255
256static struct ata_port_info nv_port_info[] = {
257 /* generic */
258 {
259 .sht = &nv_sht,
260 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
261 .pio_mask = NV_PIO_MASK,
262 .mwdma_mask = NV_MWDMA_MASK,
263 .udma_mask = NV_UDMA_MASK,
264 .port_ops = &nv_generic_ops,
265 },
266 /* nforce2/3 */
267 {
268 .sht = &nv_sht,
269 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
270 .pio_mask = NV_PIO_MASK,
271 .mwdma_mask = NV_MWDMA_MASK,
272 .udma_mask = NV_UDMA_MASK,
273 .port_ops = &nv_nf2_ops,
274 },
275 /* ck804 */
276 {
277 .sht = &nv_sht,
278 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
279 .pio_mask = NV_PIO_MASK,
280 .mwdma_mask = NV_MWDMA_MASK,
281 .udma_mask = NV_UDMA_MASK,
282 .port_ops = &nv_ck804_ops,
283 },
284};
285
286MODULE_AUTHOR("NVIDIA");
287MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
288MODULE_LICENSE("GPL");
289MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
290MODULE_VERSION(DRV_VERSION);
291
292static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance,
293 struct pt_regs *regs)
294{
295 struct ata_host *host = dev_instance;
296 unsigned int i;
297 unsigned int handled = 0;
298 unsigned long flags;
299
300 spin_lock_irqsave(&host->lock, flags);
301
302 for (i = 0; i < host->n_ports; i++) {
303 struct ata_port *ap;
304
305 ap = host->ports[i];
306 if (ap &&
307 !(ap->flags & ATA_FLAG_DISABLED)) {
308 struct ata_queued_cmd *qc;
309
310 qc = ata_qc_from_tag(ap, ap->active_tag);
311 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
312 handled += ata_host_intr(ap, qc);
313 else
314 // No request pending? Clear interrupt status
315 // anyway, in case there's one pending.
316 ap->ops->check_status(ap);
317 }
318
319 }
320
321 spin_unlock_irqrestore(&host->lock, flags);
322
323 return IRQ_RETVAL(handled);
324}
325
326static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
327{
328 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
329 int handled;
330
331 /* freeze if hotplugged */
332 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
333 ata_port_freeze(ap);
334 return 1;
335 }
336
337 /* bail out if not our interrupt */
338 if (!(irq_stat & NV_INT_DEV))
339 return 0;
340
341 /* DEV interrupt w/ no active qc? */
342 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
343 ata_check_status(ap);
344 return 1;
345 }
346
347 /* handle interrupt */
348 handled = ata_host_intr(ap, qc);
349 if (unlikely(!handled)) {
350 /* spurious, clear it */
351 ata_check_status(ap);
352 }
353
354 return 1;
355}
356
357static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
358{
359 int i, handled = 0;
360
361 for (i = 0; i < host->n_ports; i++) {
362 struct ata_port *ap = host->ports[i];
363
364 if (ap && !(ap->flags & ATA_FLAG_DISABLED))
365 handled += nv_host_intr(ap, irq_stat);
366
367 irq_stat >>= NV_INT_PORT_SHIFT;
368 }
369
370 return IRQ_RETVAL(handled);
371}
372
373static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance,
374 struct pt_regs *regs)
375{
376 struct ata_host *host = dev_instance;
377 u8 irq_stat;
378 irqreturn_t ret;
379
380 spin_lock(&host->lock);
381 irq_stat = inb(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
382 ret = nv_do_interrupt(host, irq_stat);
383 spin_unlock(&host->lock);
384
385 return ret;
386}
387
388static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance,
389 struct pt_regs *regs)
390{
391 struct ata_host *host = dev_instance;
392 u8 irq_stat;
393 irqreturn_t ret;
394
395 spin_lock(&host->lock);
396 irq_stat = readb(host->mmio_base + NV_INT_STATUS_CK804);
397 ret = nv_do_interrupt(host, irq_stat);
398 spin_unlock(&host->lock);
399
400 return ret;
401}
402
403static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg)
404{
405 if (sc_reg > SCR_CONTROL)
406 return 0xffffffffU;
407
408 return ioread32((void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4));
409}
410
411static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
412{
413 if (sc_reg > SCR_CONTROL)
414 return;
415
416 iowrite32(val, (void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4));
417}
418
419static void nv_nf2_freeze(struct ata_port *ap)
420{
421 unsigned long scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
422 int shift = ap->port_no * NV_INT_PORT_SHIFT;
423 u8 mask;
424
425 mask = inb(scr_addr + NV_INT_ENABLE);
426 mask &= ~(NV_INT_ALL << shift);
427 outb(mask, scr_addr + NV_INT_ENABLE);
428}
429
430static void nv_nf2_thaw(struct ata_port *ap)
431{
432 unsigned long scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
433 int shift = ap->port_no * NV_INT_PORT_SHIFT;
434 u8 mask;
435
436 outb(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
437
438 mask = inb(scr_addr + NV_INT_ENABLE);
439 mask |= (NV_INT_MASK << shift);
440 outb(mask, scr_addr + NV_INT_ENABLE);
441}
442
443static void nv_ck804_freeze(struct ata_port *ap)
444{
445 void __iomem *mmio_base = ap->host->mmio_base;
446 int shift = ap->port_no * NV_INT_PORT_SHIFT;
447 u8 mask;
448
449 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
450 mask &= ~(NV_INT_ALL << shift);
451 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
452}
453
454static void nv_ck804_thaw(struct ata_port *ap)
455{
456 void __iomem *mmio_base = ap->host->mmio_base;
457 int shift = ap->port_no * NV_INT_PORT_SHIFT;
458 u8 mask;
459
460 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
461
462 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
463 mask |= (NV_INT_MASK << shift);
464 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
465}
466
467static int nv_hardreset(struct ata_port *ap, unsigned int *class)
468{
469 unsigned int dummy;
470
471 /* SATA hardreset fails to retrieve proper device signature on
472 * some controllers. Don't classify on hardreset. For more
473 * info, see http://bugme.osdl.org/show_bug.cgi?id=3352
474 */
475 return sata_std_hardreset(ap, &dummy);
476}
477
478static void nv_error_handler(struct ata_port *ap)
479{
480 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
481 nv_hardreset, ata_std_postreset);
482}
483
484static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
485{
486 static int printed_version = 0;
487 struct ata_port_info *ppi;
488 struct ata_probe_ent *probe_ent;
489 int pci_dev_busy = 0;
490 int rc;
491 u32 bar;
492 unsigned long base;
493
494 // Make sure this is a SATA controller by counting the number of bars
495 // (NVIDIA SATA controllers will always have six bars). Otherwise,
496 // it's an IDE controller and we ignore it.
497 for (bar=0; bar<6; bar++)
498 if (pci_resource_start(pdev, bar) == 0)
499 return -ENODEV;
500
501 if (!printed_version++)
502 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
503
504 rc = pci_enable_device(pdev);
505 if (rc)
506 goto err_out;
507
508 rc = pci_request_regions(pdev, DRV_NAME);
509 if (rc) {
510 pci_dev_busy = 1;
511 goto err_out_disable;
512 }
513
514 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
515 if (rc)
516 goto err_out_regions;
517 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
518 if (rc)
519 goto err_out_regions;
520
521 rc = -ENOMEM;
522
523 ppi = &nv_port_info[ent->driver_data];
524 probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
525 if (!probe_ent)
526 goto err_out_regions;
527
528 probe_ent->mmio_base = pci_iomap(pdev, 5, 0);
529 if (!probe_ent->mmio_base) {
530 rc = -EIO;
531 goto err_out_free_ent;
532 }
533
534 base = (unsigned long)probe_ent->mmio_base;
535
536 probe_ent->port[0].scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
537 probe_ent->port[1].scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
538
539 /* enable SATA space for CK804 */
540 if (ent->driver_data == CK804) {
541 u8 regval;
542
543 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
544 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
545 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
546 }
547
548 pci_set_master(pdev);
549
550 rc = ata_device_add(probe_ent);
551 if (rc != NV_PORTS)
552 goto err_out_iounmap;
553
554 kfree(probe_ent);
555
556 return 0;
557
558err_out_iounmap:
559 pci_iounmap(pdev, probe_ent->mmio_base);
560err_out_free_ent:
561 kfree(probe_ent);
562err_out_regions:
563 pci_release_regions(pdev);
564err_out_disable:
565 if (!pci_dev_busy)
566 pci_disable_device(pdev);
567err_out:
568 return rc;
569}
570
571static void nv_ck804_host_stop(struct ata_host *host)
572{
573 struct pci_dev *pdev = to_pci_dev(host->dev);
574 u8 regval;
575
576 /* disable SATA space for CK804 */
577 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
578 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
579 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
580
581 ata_pci_host_stop(host);
582}
583
584static int __init nv_init(void)
585{
586 return pci_register_driver(&nv_pci_driver);
587}
588
589static void __exit nv_exit(void)
590{
591 pci_unregister_driver(&nv_pci_driver);
592}
593
594module_init(nv_init);
595module_exit(nv_exit);
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
new file mode 100644
index 000000000000..d627812ea73d
--- /dev/null
+++ b/drivers/ata/sata_promise.c
@@ -0,0 +1,844 @@
1/*
2 * sata_promise.c - Promise SATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc.
9 *
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 *
26 * libata documentation is available via 'make {ps|pdf}docs',
27 * as Documentation/DocBook/libata.*
28 *
29 * Hardware information only available under NDA.
30 *
31 */
32
33#include <linux/kernel.h>
34#include <linux/module.h>
35#include <linux/pci.h>
36#include <linux/init.h>
37#include <linux/blkdev.h>
38#include <linux/delay.h>
39#include <linux/interrupt.h>
40#include <linux/sched.h>
41#include <linux/device.h>
42#include <scsi/scsi_host.h>
43#include <scsi/scsi_cmnd.h>
44#include <linux/libata.h>
45#include <asm/io.h>
46#include "sata_promise.h"
47
48#define DRV_NAME "sata_promise"
49#define DRV_VERSION "1.04"
50
51
52enum {
53 PDC_PKT_SUBMIT = 0x40, /* Command packet pointer addr */
54 PDC_INT_SEQMASK = 0x40, /* Mask of asserted SEQ INTs */
55 PDC_TBG_MODE = 0x41, /* TBG mode */
56 PDC_FLASH_CTL = 0x44, /* Flash control register */
57 PDC_PCI_CTL = 0x48, /* PCI control and status register */
58 PDC_GLOBAL_CTL = 0x48, /* Global control/status (per port) */
59 PDC_CTLSTAT = 0x60, /* IDE control and status (per port) */
60 PDC_SATA_PLUG_CSR = 0x6C, /* SATA Plug control/status reg */
61 PDC2_SATA_PLUG_CSR = 0x60, /* SATAII Plug control/status reg */
62 PDC_SLEW_CTL = 0x470, /* slew rate control reg */
63
64 PDC_ERR_MASK = (1<<19) | (1<<20) | (1<<21) | (1<<22) |
65 (1<<8) | (1<<9) | (1<<10),
66
67 board_2037x = 0, /* FastTrak S150 TX2plus */
68 board_20319 = 1, /* FastTrak S150 TX4 */
69 board_20619 = 2, /* FastTrak TX4000 */
70 board_20771 = 3, /* FastTrak TX2300 */
71 board_2057x = 4, /* SATAII150 Tx2plus */
72 board_40518 = 5, /* SATAII150 Tx4 */
73
74 PDC_HAS_PATA = (1 << 1), /* PDC20375/20575 has PATA */
75
76 PDC_RESET = (1 << 11), /* HDMA reset */
77
78 PDC_COMMON_FLAGS = ATA_FLAG_NO_LEGACY | ATA_FLAG_SRST |
79 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
80 ATA_FLAG_PIO_POLLING,
81};
82
83
84struct pdc_port_priv {
85 u8 *pkt;
86 dma_addr_t pkt_dma;
87};
88
89struct pdc_host_priv {
90 int hotplug_offset;
91};
92
93static u32 pdc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg);
94static void pdc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
95static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
96static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
97static void pdc_eng_timeout(struct ata_port *ap);
98static int pdc_port_start(struct ata_port *ap);
99static void pdc_port_stop(struct ata_port *ap);
100static void pdc_pata_phy_reset(struct ata_port *ap);
101static void pdc_sata_phy_reset(struct ata_port *ap);
102static void pdc_qc_prep(struct ata_queued_cmd *qc);
103static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
104static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
105static void pdc_irq_clear(struct ata_port *ap);
106static unsigned int pdc_qc_issue_prot(struct ata_queued_cmd *qc);
107static void pdc_host_stop(struct ata_host *host);
108
109
110static struct scsi_host_template pdc_ata_sht = {
111 .module = THIS_MODULE,
112 .name = DRV_NAME,
113 .ioctl = ata_scsi_ioctl,
114 .queuecommand = ata_scsi_queuecmd,
115 .can_queue = ATA_DEF_QUEUE,
116 .this_id = ATA_SHT_THIS_ID,
117 .sg_tablesize = LIBATA_MAX_PRD,
118 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
119 .emulated = ATA_SHT_EMULATED,
120 .use_clustering = ATA_SHT_USE_CLUSTERING,
121 .proc_name = DRV_NAME,
122 .dma_boundary = ATA_DMA_BOUNDARY,
123 .slave_configure = ata_scsi_slave_config,
124 .slave_destroy = ata_scsi_slave_destroy,
125 .bios_param = ata_std_bios_param,
126};
127
128static const struct ata_port_operations pdc_sata_ops = {
129 .port_disable = ata_port_disable,
130 .tf_load = pdc_tf_load_mmio,
131 .tf_read = ata_tf_read,
132 .check_status = ata_check_status,
133 .exec_command = pdc_exec_command_mmio,
134 .dev_select = ata_std_dev_select,
135
136 .phy_reset = pdc_sata_phy_reset,
137
138 .qc_prep = pdc_qc_prep,
139 .qc_issue = pdc_qc_issue_prot,
140 .eng_timeout = pdc_eng_timeout,
141 .data_xfer = ata_mmio_data_xfer,
142 .irq_handler = pdc_interrupt,
143 .irq_clear = pdc_irq_clear,
144
145 .scr_read = pdc_sata_scr_read,
146 .scr_write = pdc_sata_scr_write,
147 .port_start = pdc_port_start,
148 .port_stop = pdc_port_stop,
149 .host_stop = pdc_host_stop,
150};
151
152static const struct ata_port_operations pdc_pata_ops = {
153 .port_disable = ata_port_disable,
154 .tf_load = pdc_tf_load_mmio,
155 .tf_read = ata_tf_read,
156 .check_status = ata_check_status,
157 .exec_command = pdc_exec_command_mmio,
158 .dev_select = ata_std_dev_select,
159
160 .phy_reset = pdc_pata_phy_reset,
161
162 .qc_prep = pdc_qc_prep,
163 .qc_issue = pdc_qc_issue_prot,
164 .data_xfer = ata_mmio_data_xfer,
165 .eng_timeout = pdc_eng_timeout,
166 .irq_handler = pdc_interrupt,
167 .irq_clear = pdc_irq_clear,
168
169 .port_start = pdc_port_start,
170 .port_stop = pdc_port_stop,
171 .host_stop = pdc_host_stop,
172};
173
174static const struct ata_port_info pdc_port_info[] = {
175 /* board_2037x */
176 {
177 .sht = &pdc_ata_sht,
178 .flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
179 .pio_mask = 0x1f, /* pio0-4 */
180 .mwdma_mask = 0x07, /* mwdma0-2 */
181 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
182 .port_ops = &pdc_sata_ops,
183 },
184
185 /* board_20319 */
186 {
187 .sht = &pdc_ata_sht,
188 .flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
189 .pio_mask = 0x1f, /* pio0-4 */
190 .mwdma_mask = 0x07, /* mwdma0-2 */
191 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
192 .port_ops = &pdc_sata_ops,
193 },
194
195 /* board_20619 */
196 {
197 .sht = &pdc_ata_sht,
198 .flags = PDC_COMMON_FLAGS | ATA_FLAG_SLAVE_POSS,
199 .pio_mask = 0x1f, /* pio0-4 */
200 .mwdma_mask = 0x07, /* mwdma0-2 */
201 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
202 .port_ops = &pdc_pata_ops,
203 },
204
205 /* board_20771 */
206 {
207 .sht = &pdc_ata_sht,
208 .flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
209 .pio_mask = 0x1f, /* pio0-4 */
210 .mwdma_mask = 0x07, /* mwdma0-2 */
211 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
212 .port_ops = &pdc_sata_ops,
213 },
214
215 /* board_2057x */
216 {
217 .sht = &pdc_ata_sht,
218 .flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
219 .pio_mask = 0x1f, /* pio0-4 */
220 .mwdma_mask = 0x07, /* mwdma0-2 */
221 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
222 .port_ops = &pdc_sata_ops,
223 },
224
225 /* board_40518 */
226 {
227 .sht = &pdc_ata_sht,
228 .flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
229 .pio_mask = 0x1f, /* pio0-4 */
230 .mwdma_mask = 0x07, /* mwdma0-2 */
231 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
232 .port_ops = &pdc_sata_ops,
233 },
234};
235
236static const struct pci_device_id pdc_ata_pci_tbl[] = {
237 { PCI_VENDOR_ID_PROMISE, 0x3371, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
238 board_2037x },
239 { PCI_VENDOR_ID_PROMISE, 0x3570, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
240 board_2037x },
241 { PCI_VENDOR_ID_PROMISE, 0x3571, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
242 board_2037x },
243 { PCI_VENDOR_ID_PROMISE, 0x3373, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
244 board_2037x },
245 { PCI_VENDOR_ID_PROMISE, 0x3375, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
246 board_2037x },
247 { PCI_VENDOR_ID_PROMISE, 0x3376, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
248 board_2037x },
249 { PCI_VENDOR_ID_PROMISE, 0x3574, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
250 board_2057x },
251 { PCI_VENDOR_ID_PROMISE, 0x3d75, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
252 board_2057x },
253 { PCI_VENDOR_ID_PROMISE, 0x3d73, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
254 board_2037x },
255
256 { PCI_VENDOR_ID_PROMISE, 0x3318, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
257 board_20319 },
258 { PCI_VENDOR_ID_PROMISE, 0x3319, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
259 board_20319 },
260 { PCI_VENDOR_ID_PROMISE, 0x3515, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
261 board_20319 },
262 { PCI_VENDOR_ID_PROMISE, 0x3519, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
263 board_20319 },
264 { PCI_VENDOR_ID_PROMISE, 0x3d17, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
265 board_20319 },
266 { PCI_VENDOR_ID_PROMISE, 0x3d18, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
267 board_40518 },
268
269 { PCI_VENDOR_ID_PROMISE, 0x6629, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
270 board_20619 },
271
272/* TODO: remove all associated board_20771 code, as it completely
273 * duplicates board_2037x code, unless reason for separation can be
274 * divined.
275 */
276#if 0
277 { PCI_VENDOR_ID_PROMISE, 0x3570, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
278 board_20771 },
279#endif
280
281 { } /* terminate list */
282};
283
284
285static struct pci_driver pdc_ata_pci_driver = {
286 .name = DRV_NAME,
287 .id_table = pdc_ata_pci_tbl,
288 .probe = pdc_ata_init_one,
289 .remove = ata_pci_remove_one,
290};
291
292
293static int pdc_port_start(struct ata_port *ap)
294{
295 struct device *dev = ap->host->dev;
296 struct pdc_port_priv *pp;
297 int rc;
298
299 rc = ata_port_start(ap);
300 if (rc)
301 return rc;
302
303 pp = kzalloc(sizeof(*pp), GFP_KERNEL);
304 if (!pp) {
305 rc = -ENOMEM;
306 goto err_out;
307 }
308
309 pp->pkt = dma_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL);
310 if (!pp->pkt) {
311 rc = -ENOMEM;
312 goto err_out_kfree;
313 }
314
315 ap->private_data = pp;
316
317 return 0;
318
319err_out_kfree:
320 kfree(pp);
321err_out:
322 ata_port_stop(ap);
323 return rc;
324}
325
326
327static void pdc_port_stop(struct ata_port *ap)
328{
329 struct device *dev = ap->host->dev;
330 struct pdc_port_priv *pp = ap->private_data;
331
332 ap->private_data = NULL;
333 dma_free_coherent(dev, 128, pp->pkt, pp->pkt_dma);
334 kfree(pp);
335 ata_port_stop(ap);
336}
337
338
339static void pdc_host_stop(struct ata_host *host)
340{
341 struct pdc_host_priv *hp = host->private_data;
342
343 ata_pci_host_stop(host);
344
345 kfree(hp);
346}
347
348
349static void pdc_reset_port(struct ata_port *ap)
350{
351 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr + PDC_CTLSTAT;
352 unsigned int i;
353 u32 tmp;
354
355 for (i = 11; i > 0; i--) {
356 tmp = readl(mmio);
357 if (tmp & PDC_RESET)
358 break;
359
360 udelay(100);
361
362 tmp |= PDC_RESET;
363 writel(tmp, mmio);
364 }
365
366 tmp &= ~PDC_RESET;
367 writel(tmp, mmio);
368 readl(mmio); /* flush */
369}
370
371static void pdc_sata_phy_reset(struct ata_port *ap)
372{
373 pdc_reset_port(ap);
374 sata_phy_reset(ap);
375}
376
377static void pdc_pata_cbl_detect(struct ata_port *ap)
378{
379 u8 tmp;
380 void __iomem *mmio = (void *) ap->ioaddr.cmd_addr + PDC_CTLSTAT + 0x03;
381
382 tmp = readb(mmio);
383
384 if (tmp & 0x01) {
385 ap->cbl = ATA_CBL_PATA40;
386 ap->udma_mask &= ATA_UDMA_MASK_40C;
387 } else
388 ap->cbl = ATA_CBL_PATA80;
389}
390
391static void pdc_pata_phy_reset(struct ata_port *ap)
392{
393 pdc_pata_cbl_detect(ap);
394 pdc_reset_port(ap);
395 ata_port_probe(ap);
396 ata_bus_reset(ap);
397}
398
399static u32 pdc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg)
400{
401 if (sc_reg > SCR_CONTROL)
402 return 0xffffffffU;
403 return readl((void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
404}
405
406
407static void pdc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg,
408 u32 val)
409{
410 if (sc_reg > SCR_CONTROL)
411 return;
412 writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
413}
414
415static void pdc_qc_prep(struct ata_queued_cmd *qc)
416{
417 struct pdc_port_priv *pp = qc->ap->private_data;
418 unsigned int i;
419
420 VPRINTK("ENTER\n");
421
422 switch (qc->tf.protocol) {
423 case ATA_PROT_DMA:
424 ata_qc_prep(qc);
425 /* fall through */
426
427 case ATA_PROT_NODATA:
428 i = pdc_pkt_header(&qc->tf, qc->ap->prd_dma,
429 qc->dev->devno, pp->pkt);
430
431 if (qc->tf.flags & ATA_TFLAG_LBA48)
432 i = pdc_prep_lba48(&qc->tf, pp->pkt, i);
433 else
434 i = pdc_prep_lba28(&qc->tf, pp->pkt, i);
435
436 pdc_pkt_footer(&qc->tf, pp->pkt, i);
437 break;
438
439 default:
440 break;
441 }
442}
443
444static void pdc_eng_timeout(struct ata_port *ap)
445{
446 struct ata_host *host = ap->host;
447 u8 drv_stat;
448 struct ata_queued_cmd *qc;
449 unsigned long flags;
450
451 DPRINTK("ENTER\n");
452
453 spin_lock_irqsave(&host->lock, flags);
454
455 qc = ata_qc_from_tag(ap, ap->active_tag);
456
457 switch (qc->tf.protocol) {
458 case ATA_PROT_DMA:
459 case ATA_PROT_NODATA:
460 ata_port_printk(ap, KERN_ERR, "command timeout\n");
461 drv_stat = ata_wait_idle(ap);
462 qc->err_mask |= __ac_err_mask(drv_stat);
463 break;
464
465 default:
466 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
467
468 ata_port_printk(ap, KERN_ERR,
469 "unknown timeout, cmd 0x%x stat 0x%x\n",
470 qc->tf.command, drv_stat);
471
472 qc->err_mask |= ac_err_mask(drv_stat);
473 break;
474 }
475
476 spin_unlock_irqrestore(&host->lock, flags);
477 ata_eh_qc_complete(qc);
478 DPRINTK("EXIT\n");
479}
480
481static inline unsigned int pdc_host_intr( struct ata_port *ap,
482 struct ata_queued_cmd *qc)
483{
484 unsigned int handled = 0;
485 u32 tmp;
486 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr + PDC_GLOBAL_CTL;
487
488 tmp = readl(mmio);
489 if (tmp & PDC_ERR_MASK) {
490 qc->err_mask |= AC_ERR_DEV;
491 pdc_reset_port(ap);
492 }
493
494 switch (qc->tf.protocol) {
495 case ATA_PROT_DMA:
496 case ATA_PROT_NODATA:
497 qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
498 ata_qc_complete(qc);
499 handled = 1;
500 break;
501
502 default:
503 ap->stats.idle_irq++;
504 break;
505 }
506
507 return handled;
508}
509
510static void pdc_irq_clear(struct ata_port *ap)
511{
512 struct ata_host *host = ap->host;
513 void __iomem *mmio = host->mmio_base;
514
515 readl(mmio + PDC_INT_SEQMASK);
516}
517
518static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
519{
520 struct ata_host *host = dev_instance;
521 struct ata_port *ap;
522 u32 mask = 0;
523 unsigned int i, tmp;
524 unsigned int handled = 0;
525 void __iomem *mmio_base;
526
527 VPRINTK("ENTER\n");
528
529 if (!host || !host->mmio_base) {
530 VPRINTK("QUICK EXIT\n");
531 return IRQ_NONE;
532 }
533
534 mmio_base = host->mmio_base;
535
536 /* reading should also clear interrupts */
537 mask = readl(mmio_base + PDC_INT_SEQMASK);
538
539 if (mask == 0xffffffff) {
540 VPRINTK("QUICK EXIT 2\n");
541 return IRQ_NONE;
542 }
543
544 spin_lock(&host->lock);
545
546 mask &= 0xffff; /* only 16 tags possible */
547 if (!mask) {
548 VPRINTK("QUICK EXIT 3\n");
549 goto done_irq;
550 }
551
552 writel(mask, mmio_base + PDC_INT_SEQMASK);
553
554 for (i = 0; i < host->n_ports; i++) {
555 VPRINTK("port %u\n", i);
556 ap = host->ports[i];
557 tmp = mask & (1 << (i + 1));
558 if (tmp && ap &&
559 !(ap->flags & ATA_FLAG_DISABLED)) {
560 struct ata_queued_cmd *qc;
561
562 qc = ata_qc_from_tag(ap, ap->active_tag);
563 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
564 handled += pdc_host_intr(ap, qc);
565 }
566 }
567
568 VPRINTK("EXIT\n");
569
570done_irq:
571 spin_unlock(&host->lock);
572 return IRQ_RETVAL(handled);
573}
574
575static inline void pdc_packet_start(struct ata_queued_cmd *qc)
576{
577 struct ata_port *ap = qc->ap;
578 struct pdc_port_priv *pp = ap->private_data;
579 unsigned int port_no = ap->port_no;
580 u8 seq = (u8) (port_no + 1);
581
582 VPRINTK("ENTER, ap %p\n", ap);
583
584 writel(0x00000001, ap->host->mmio_base + (seq * 4));
585 readl(ap->host->mmio_base + (seq * 4)); /* flush */
586
587 pp->pkt[2] = seq;
588 wmb(); /* flush PRD, pkt writes */
589 writel(pp->pkt_dma, (void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
590 readl((void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); /* flush */
591}
592
593static unsigned int pdc_qc_issue_prot(struct ata_queued_cmd *qc)
594{
595 switch (qc->tf.protocol) {
596 case ATA_PROT_DMA:
597 case ATA_PROT_NODATA:
598 pdc_packet_start(qc);
599 return 0;
600
601 case ATA_PROT_ATAPI_DMA:
602 BUG();
603 break;
604
605 default:
606 break;
607 }
608
609 return ata_qc_issue_prot(qc);
610}
611
612static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
613{
614 WARN_ON (tf->protocol == ATA_PROT_DMA ||
615 tf->protocol == ATA_PROT_NODATA);
616 ata_tf_load(ap, tf);
617}
618
619
620static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
621{
622 WARN_ON (tf->protocol == ATA_PROT_DMA ||
623 tf->protocol == ATA_PROT_NODATA);
624 ata_exec_command(ap, tf);
625}
626
627
628static void pdc_ata_setup_port(struct ata_ioports *port, unsigned long base)
629{
630 port->cmd_addr = base;
631 port->data_addr = base;
632 port->feature_addr =
633 port->error_addr = base + 0x4;
634 port->nsect_addr = base + 0x8;
635 port->lbal_addr = base + 0xc;
636 port->lbam_addr = base + 0x10;
637 port->lbah_addr = base + 0x14;
638 port->device_addr = base + 0x18;
639 port->command_addr =
640 port->status_addr = base + 0x1c;
641 port->altstatus_addr =
642 port->ctl_addr = base + 0x38;
643}
644
645
646static void pdc_host_init(unsigned int chip_id, struct ata_probe_ent *pe)
647{
648 void __iomem *mmio = pe->mmio_base;
649 struct pdc_host_priv *hp = pe->private_data;
650 int hotplug_offset = hp->hotplug_offset;
651 u32 tmp;
652
653 /*
654 * Except for the hotplug stuff, this is voodoo from the
655 * Promise driver. Label this entire section
656 * "TODO: figure out why we do this"
657 */
658
659 /* change FIFO_SHD to 8 dwords, enable BMR_BURST */
660 tmp = readl(mmio + PDC_FLASH_CTL);
661 tmp |= 0x12000; /* bit 16 (fifo 8 dw) and 13 (bmr burst?) */
662 writel(tmp, mmio + PDC_FLASH_CTL);
663
664 /* clear plug/unplug flags for all ports */
665 tmp = readl(mmio + hotplug_offset);
666 writel(tmp | 0xff, mmio + hotplug_offset);
667
668 /* mask plug/unplug ints */
669 tmp = readl(mmio + hotplug_offset);
670 writel(tmp | 0xff0000, mmio + hotplug_offset);
671
672 /* reduce TBG clock to 133 Mhz. */
673 tmp = readl(mmio + PDC_TBG_MODE);
674 tmp &= ~0x30000; /* clear bit 17, 16*/
675 tmp |= 0x10000; /* set bit 17:16 = 0:1 */
676 writel(tmp, mmio + PDC_TBG_MODE);
677
678 readl(mmio + PDC_TBG_MODE); /* flush */
679 msleep(10);
680
681 /* adjust slew rate control register. */
682 tmp = readl(mmio + PDC_SLEW_CTL);
683 tmp &= 0xFFFFF03F; /* clear bit 11 ~ 6 */
684 tmp |= 0x00000900; /* set bit 11-9 = 100b , bit 8-6 = 100 */
685 writel(tmp, mmio + PDC_SLEW_CTL);
686}
687
688static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
689{
690 static int printed_version;
691 struct ata_probe_ent *probe_ent = NULL;
692 struct pdc_host_priv *hp;
693 unsigned long base;
694 void __iomem *mmio_base;
695 unsigned int board_idx = (unsigned int) ent->driver_data;
696 int pci_dev_busy = 0;
697 int rc;
698
699 if (!printed_version++)
700 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
701
702 rc = pci_enable_device(pdev);
703 if (rc)
704 return rc;
705
706 rc = pci_request_regions(pdev, DRV_NAME);
707 if (rc) {
708 pci_dev_busy = 1;
709 goto err_out;
710 }
711
712 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
713 if (rc)
714 goto err_out_regions;
715 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
716 if (rc)
717 goto err_out_regions;
718
719 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
720 if (probe_ent == NULL) {
721 rc = -ENOMEM;
722 goto err_out_regions;
723 }
724
725 probe_ent->dev = pci_dev_to_dev(pdev);
726 INIT_LIST_HEAD(&probe_ent->node);
727
728 mmio_base = pci_iomap(pdev, 3, 0);
729 if (mmio_base == NULL) {
730 rc = -ENOMEM;
731 goto err_out_free_ent;
732 }
733 base = (unsigned long) mmio_base;
734
735 hp = kzalloc(sizeof(*hp), GFP_KERNEL);
736 if (hp == NULL) {
737 rc = -ENOMEM;
738 goto err_out_free_ent;
739 }
740
741 /* Set default hotplug offset */
742 hp->hotplug_offset = PDC_SATA_PLUG_CSR;
743 probe_ent->private_data = hp;
744
745 probe_ent->sht = pdc_port_info[board_idx].sht;
746 probe_ent->port_flags = pdc_port_info[board_idx].flags;
747 probe_ent->pio_mask = pdc_port_info[board_idx].pio_mask;
748 probe_ent->mwdma_mask = pdc_port_info[board_idx].mwdma_mask;
749 probe_ent->udma_mask = pdc_port_info[board_idx].udma_mask;
750 probe_ent->port_ops = pdc_port_info[board_idx].port_ops;
751
752 probe_ent->irq = pdev->irq;
753 probe_ent->irq_flags = IRQF_SHARED;
754 probe_ent->mmio_base = mmio_base;
755
756 pdc_ata_setup_port(&probe_ent->port[0], base + 0x200);
757 pdc_ata_setup_port(&probe_ent->port[1], base + 0x280);
758
759 probe_ent->port[0].scr_addr = base + 0x400;
760 probe_ent->port[1].scr_addr = base + 0x500;
761
762 /* notice 4-port boards */
763 switch (board_idx) {
764 case board_40518:
765 /* Override hotplug offset for SATAII150 */
766 hp->hotplug_offset = PDC2_SATA_PLUG_CSR;
767 /* Fall through */
768 case board_20319:
769 probe_ent->n_ports = 4;
770
771 pdc_ata_setup_port(&probe_ent->port[2], base + 0x300);
772 pdc_ata_setup_port(&probe_ent->port[3], base + 0x380);
773
774 probe_ent->port[2].scr_addr = base + 0x600;
775 probe_ent->port[3].scr_addr = base + 0x700;
776 break;
777 case board_2057x:
778 /* Override hotplug offset for SATAII150 */
779 hp->hotplug_offset = PDC2_SATA_PLUG_CSR;
780 /* Fall through */
781 case board_2037x:
782 probe_ent->n_ports = 2;
783 break;
784 case board_20771:
785 probe_ent->n_ports = 2;
786 break;
787 case board_20619:
788 probe_ent->n_ports = 4;
789
790 pdc_ata_setup_port(&probe_ent->port[2], base + 0x300);
791 pdc_ata_setup_port(&probe_ent->port[3], base + 0x380);
792
793 probe_ent->port[2].scr_addr = base + 0x600;
794 probe_ent->port[3].scr_addr = base + 0x700;
795 break;
796 default:
797 BUG();
798 break;
799 }
800
801 pci_set_master(pdev);
802
803 /* initialize adapter */
804 pdc_host_init(board_idx, probe_ent);
805
806 /* FIXME: Need any other frees than hp? */
807 if (!ata_device_add(probe_ent))
808 kfree(hp);
809
810 kfree(probe_ent);
811
812 return 0;
813
814err_out_free_ent:
815 kfree(probe_ent);
816err_out_regions:
817 pci_release_regions(pdev);
818err_out:
819 if (!pci_dev_busy)
820 pci_disable_device(pdev);
821 return rc;
822}
823
824
825static int __init pdc_ata_init(void)
826{
827 return pci_register_driver(&pdc_ata_pci_driver);
828}
829
830
831static void __exit pdc_ata_exit(void)
832{
833 pci_unregister_driver(&pdc_ata_pci_driver);
834}
835
836
837MODULE_AUTHOR("Jeff Garzik");
838MODULE_DESCRIPTION("Promise ATA TX2/TX4/TX4000 low-level driver");
839MODULE_LICENSE("GPL");
840MODULE_DEVICE_TABLE(pci, pdc_ata_pci_tbl);
841MODULE_VERSION(DRV_VERSION);
842
843module_init(pdc_ata_init);
844module_exit(pdc_ata_exit);
diff --git a/drivers/ata/sata_promise.h b/drivers/ata/sata_promise.h
new file mode 100644
index 000000000000..6ee5e190262d
--- /dev/null
+++ b/drivers/ata/sata_promise.h
@@ -0,0 +1,157 @@
1/*
2 * sata_promise.h - Promise SATA common definitions and inline funcs
3 *
4 * Copyright 2003-2004 Red Hat, Inc.
5 *
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2, or (at your option)
10 * any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; see the file COPYING. If not, write to
19 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
20 *
21 *
22 * libata documentation is available via 'make {ps|pdf}docs',
23 * as Documentation/DocBook/libata.*
24 *
25 */
26
27#ifndef __SATA_PROMISE_H__
28#define __SATA_PROMISE_H__
29
30#include <linux/ata.h>
31
32enum pdc_packet_bits {
33 PDC_PKT_READ = (1 << 2),
34 PDC_PKT_NODATA = (1 << 3),
35
36 PDC_PKT_SIZEMASK = (1 << 7) | (1 << 6) | (1 << 5),
37 PDC_PKT_CLEAR_BSY = (1 << 4),
38 PDC_PKT_WAIT_DRDY = (1 << 3) | (1 << 4),
39 PDC_LAST_REG = (1 << 3),
40
41 PDC_REG_DEVCTL = (1 << 3) | (1 << 2) | (1 << 1),
42};
43
44static inline unsigned int pdc_pkt_header(struct ata_taskfile *tf,
45 dma_addr_t sg_table,
46 unsigned int devno, u8 *buf)
47{
48 u8 dev_reg;
49 u32 *buf32 = (u32 *) buf;
50
51 /* set control bits (byte 0), zero delay seq id (byte 3),
52 * and seq id (byte 2)
53 */
54 switch (tf->protocol) {
55 case ATA_PROT_DMA:
56 if (!(tf->flags & ATA_TFLAG_WRITE))
57 buf32[0] = cpu_to_le32(PDC_PKT_READ);
58 else
59 buf32[0] = 0;
60 break;
61
62 case ATA_PROT_NODATA:
63 buf32[0] = cpu_to_le32(PDC_PKT_NODATA);
64 break;
65
66 default:
67 BUG();
68 break;
69 }
70
71 buf32[1] = cpu_to_le32(sg_table); /* S/G table addr */
72 buf32[2] = 0; /* no next-packet */
73
74 if (devno == 0)
75 dev_reg = ATA_DEVICE_OBS;
76 else
77 dev_reg = ATA_DEVICE_OBS | ATA_DEV1;
78
79 /* select device */
80 buf[12] = (1 << 5) | PDC_PKT_CLEAR_BSY | ATA_REG_DEVICE;
81 buf[13] = dev_reg;
82
83 /* device control register */
84 buf[14] = (1 << 5) | PDC_REG_DEVCTL;
85 buf[15] = tf->ctl;
86
87 return 16; /* offset of next byte */
88}
89
90static inline unsigned int pdc_pkt_footer(struct ata_taskfile *tf, u8 *buf,
91 unsigned int i)
92{
93 if (tf->flags & ATA_TFLAG_DEVICE) {
94 buf[i++] = (1 << 5) | ATA_REG_DEVICE;
95 buf[i++] = tf->device;
96 }
97
98 /* and finally the command itself; also includes end-of-pkt marker */
99 buf[i++] = (1 << 5) | PDC_LAST_REG | ATA_REG_CMD;
100 buf[i++] = tf->command;
101
102 return i;
103}
104
105static inline unsigned int pdc_prep_lba28(struct ata_taskfile *tf, u8 *buf, unsigned int i)
106{
107 /* the "(1 << 5)" should be read "(count << 5)" */
108
109 /* ATA command block registers */
110 buf[i++] = (1 << 5) | ATA_REG_FEATURE;
111 buf[i++] = tf->feature;
112
113 buf[i++] = (1 << 5) | ATA_REG_NSECT;
114 buf[i++] = tf->nsect;
115
116 buf[i++] = (1 << 5) | ATA_REG_LBAL;
117 buf[i++] = tf->lbal;
118
119 buf[i++] = (1 << 5) | ATA_REG_LBAM;
120 buf[i++] = tf->lbam;
121
122 buf[i++] = (1 << 5) | ATA_REG_LBAH;
123 buf[i++] = tf->lbah;
124
125 return i;
126}
127
128static inline unsigned int pdc_prep_lba48(struct ata_taskfile *tf, u8 *buf, unsigned int i)
129{
130 /* the "(2 << 5)" should be read "(count << 5)" */
131
132 /* ATA command block registers */
133 buf[i++] = (2 << 5) | ATA_REG_FEATURE;
134 buf[i++] = tf->hob_feature;
135 buf[i++] = tf->feature;
136
137 buf[i++] = (2 << 5) | ATA_REG_NSECT;
138 buf[i++] = tf->hob_nsect;
139 buf[i++] = tf->nsect;
140
141 buf[i++] = (2 << 5) | ATA_REG_LBAL;
142 buf[i++] = tf->hob_lbal;
143 buf[i++] = tf->lbal;
144
145 buf[i++] = (2 << 5) | ATA_REG_LBAM;
146 buf[i++] = tf->hob_lbam;
147 buf[i++] = tf->lbam;
148
149 buf[i++] = (2 << 5) | ATA_REG_LBAH;
150 buf[i++] = tf->hob_lbah;
151 buf[i++] = tf->lbah;
152
153 return i;
154}
155
156
157#endif /* __SATA_PROMISE_H__ */
diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
new file mode 100644
index 000000000000..fa29dfe2a7b5
--- /dev/null
+++ b/drivers/ata/sata_qstor.c
@@ -0,0 +1,730 @@
1/*
2 * sata_qstor.c - Pacific Digital Corporation QStor SATA
3 *
4 * Maintained by: Mark Lord <mlord@pobox.com>
5 *
6 * Copyright 2005 Pacific Digital Corporation.
7 * (OSL/GPL code release authorized by Jalil Fadavi).
8 *
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2, or (at your option)
13 * any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; see the file COPYING. If not, write to
22 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 *
24 *
25 * libata documentation is available via 'make {ps|pdf}docs',
26 * as Documentation/DocBook/libata.*
27 *
28 */
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/pci.h>
33#include <linux/init.h>
34#include <linux/blkdev.h>
35#include <linux/delay.h>
36#include <linux/interrupt.h>
37#include <linux/sched.h>
38#include <linux/device.h>
39#include <scsi/scsi_host.h>
40#include <asm/io.h>
41#include <linux/libata.h>
42
43#define DRV_NAME "sata_qstor"
44#define DRV_VERSION "0.06"
45
46enum {
47 QS_PORTS = 4,
48 QS_MAX_PRD = LIBATA_MAX_PRD,
49 QS_CPB_ORDER = 6,
50 QS_CPB_BYTES = (1 << QS_CPB_ORDER),
51 QS_PRD_BYTES = QS_MAX_PRD * 16,
52 QS_PKT_BYTES = QS_CPB_BYTES + QS_PRD_BYTES,
53
54 /* global register offsets */
55 QS_HCF_CNFG3 = 0x0003, /* host configuration offset */
56 QS_HID_HPHY = 0x0004, /* host physical interface info */
57 QS_HCT_CTRL = 0x00e4, /* global interrupt mask offset */
58 QS_HST_SFF = 0x0100, /* host status fifo offset */
59 QS_HVS_SERD3 = 0x0393, /* PHY enable offset */
60
61 /* global control bits */
62 QS_HPHY_64BIT = (1 << 1), /* 64-bit bus detected */
63 QS_CNFG3_GSRST = 0x01, /* global chip reset */
64 QS_SERD3_PHY_ENA = 0xf0, /* PHY detection ENAble*/
65
66 /* per-channel register offsets */
67 QS_CCF_CPBA = 0x0710, /* chan CPB base address */
68 QS_CCF_CSEP = 0x0718, /* chan CPB separation factor */
69 QS_CFC_HUFT = 0x0800, /* host upstream fifo threshold */
70 QS_CFC_HDFT = 0x0804, /* host downstream fifo threshold */
71 QS_CFC_DUFT = 0x0808, /* dev upstream fifo threshold */
72 QS_CFC_DDFT = 0x080c, /* dev downstream fifo threshold */
73 QS_CCT_CTR0 = 0x0900, /* chan control-0 offset */
74 QS_CCT_CTR1 = 0x0901, /* chan control-1 offset */
75 QS_CCT_CFF = 0x0a00, /* chan command fifo offset */
76
77 /* channel control bits */
78 QS_CTR0_REG = (1 << 1), /* register mode (vs. pkt mode) */
79 QS_CTR0_CLER = (1 << 2), /* clear channel errors */
80 QS_CTR1_RDEV = (1 << 1), /* sata phy/comms reset */
81 QS_CTR1_RCHN = (1 << 4), /* reset channel logic */
82 QS_CCF_RUN_PKT = 0x107, /* RUN a new dma PKT */
83
84 /* pkt sub-field headers */
85 QS_HCB_HDR = 0x01, /* Host Control Block header */
86 QS_DCB_HDR = 0x02, /* Device Control Block header */
87
88 /* pkt HCB flag bits */
89 QS_HF_DIRO = (1 << 0), /* data DIRection Out */
90 QS_HF_DAT = (1 << 3), /* DATa pkt */
91 QS_HF_IEN = (1 << 4), /* Interrupt ENable */
92 QS_HF_VLD = (1 << 5), /* VaLiD pkt */
93
94 /* pkt DCB flag bits */
95 QS_DF_PORD = (1 << 2), /* Pio OR Dma */
96 QS_DF_ELBA = (1 << 3), /* Extended LBA (lba48) */
97
98 /* PCI device IDs */
99 board_2068_idx = 0, /* QStor 4-port SATA/RAID */
100};
101
102enum {
103 QS_DMA_BOUNDARY = ~0UL
104};
105
106typedef enum { qs_state_idle, qs_state_pkt, qs_state_mmio } qs_state_t;
107
108struct qs_port_priv {
109 u8 *pkt;
110 dma_addr_t pkt_dma;
111 qs_state_t state;
112};
113
114static u32 qs_scr_read (struct ata_port *ap, unsigned int sc_reg);
115static void qs_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
116static int qs_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
117static irqreturn_t qs_intr (int irq, void *dev_instance, struct pt_regs *regs);
118static int qs_port_start(struct ata_port *ap);
119static void qs_host_stop(struct ata_host *host);
120static void qs_port_stop(struct ata_port *ap);
121static void qs_phy_reset(struct ata_port *ap);
122static void qs_qc_prep(struct ata_queued_cmd *qc);
123static unsigned int qs_qc_issue(struct ata_queued_cmd *qc);
124static int qs_check_atapi_dma(struct ata_queued_cmd *qc);
125static void qs_bmdma_stop(struct ata_queued_cmd *qc);
126static u8 qs_bmdma_status(struct ata_port *ap);
127static void qs_irq_clear(struct ata_port *ap);
128static void qs_eng_timeout(struct ata_port *ap);
129
130static struct scsi_host_template qs_ata_sht = {
131 .module = THIS_MODULE,
132 .name = DRV_NAME,
133 .ioctl = ata_scsi_ioctl,
134 .queuecommand = ata_scsi_queuecmd,
135 .can_queue = ATA_DEF_QUEUE,
136 .this_id = ATA_SHT_THIS_ID,
137 .sg_tablesize = QS_MAX_PRD,
138 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
139 .emulated = ATA_SHT_EMULATED,
140 //FIXME .use_clustering = ATA_SHT_USE_CLUSTERING,
141 .use_clustering = ENABLE_CLUSTERING,
142 .proc_name = DRV_NAME,
143 .dma_boundary = QS_DMA_BOUNDARY,
144 .slave_configure = ata_scsi_slave_config,
145 .slave_destroy = ata_scsi_slave_destroy,
146 .bios_param = ata_std_bios_param,
147};
148
149static const struct ata_port_operations qs_ata_ops = {
150 .port_disable = ata_port_disable,
151 .tf_load = ata_tf_load,
152 .tf_read = ata_tf_read,
153 .check_status = ata_check_status,
154 .check_atapi_dma = qs_check_atapi_dma,
155 .exec_command = ata_exec_command,
156 .dev_select = ata_std_dev_select,
157 .phy_reset = qs_phy_reset,
158 .qc_prep = qs_qc_prep,
159 .qc_issue = qs_qc_issue,
160 .data_xfer = ata_mmio_data_xfer,
161 .eng_timeout = qs_eng_timeout,
162 .irq_handler = qs_intr,
163 .irq_clear = qs_irq_clear,
164 .scr_read = qs_scr_read,
165 .scr_write = qs_scr_write,
166 .port_start = qs_port_start,
167 .port_stop = qs_port_stop,
168 .host_stop = qs_host_stop,
169 .bmdma_stop = qs_bmdma_stop,
170 .bmdma_status = qs_bmdma_status,
171};
172
173static const struct ata_port_info qs_port_info[] = {
174 /* board_2068_idx */
175 {
176 .sht = &qs_ata_sht,
177 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
178 ATA_FLAG_SATA_RESET |
179 //FIXME ATA_FLAG_SRST |
180 ATA_FLAG_MMIO | ATA_FLAG_PIO_POLLING,
181 .pio_mask = 0x10, /* pio4 */
182 .udma_mask = 0x7f, /* udma0-6 */
183 .port_ops = &qs_ata_ops,
184 },
185};
186
187static const struct pci_device_id qs_ata_pci_tbl[] = {
188 { PCI_VENDOR_ID_PDC, 0x2068, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
189 board_2068_idx },
190
191 { } /* terminate list */
192};
193
194static struct pci_driver qs_ata_pci_driver = {
195 .name = DRV_NAME,
196 .id_table = qs_ata_pci_tbl,
197 .probe = qs_ata_init_one,
198 .remove = ata_pci_remove_one,
199};
200
201static int qs_check_atapi_dma(struct ata_queued_cmd *qc)
202{
203 return 1; /* ATAPI DMA not supported */
204}
205
206static void qs_bmdma_stop(struct ata_queued_cmd *qc)
207{
208 /* nothing */
209}
210
211static u8 qs_bmdma_status(struct ata_port *ap)
212{
213 return 0;
214}
215
216static void qs_irq_clear(struct ata_port *ap)
217{
218 /* nothing */
219}
220
221static inline void qs_enter_reg_mode(struct ata_port *ap)
222{
223 u8 __iomem *chan = ap->host->mmio_base + (ap->port_no * 0x4000);
224
225 writeb(QS_CTR0_REG, chan + QS_CCT_CTR0);
226 readb(chan + QS_CCT_CTR0); /* flush */
227}
228
229static inline void qs_reset_channel_logic(struct ata_port *ap)
230{
231 u8 __iomem *chan = ap->host->mmio_base + (ap->port_no * 0x4000);
232
233 writeb(QS_CTR1_RCHN, chan + QS_CCT_CTR1);
234 readb(chan + QS_CCT_CTR0); /* flush */
235 qs_enter_reg_mode(ap);
236}
237
238static void qs_phy_reset(struct ata_port *ap)
239{
240 struct qs_port_priv *pp = ap->private_data;
241
242 pp->state = qs_state_idle;
243 qs_reset_channel_logic(ap);
244 sata_phy_reset(ap);
245}
246
247static void qs_eng_timeout(struct ata_port *ap)
248{
249 struct qs_port_priv *pp = ap->private_data;
250
251 if (pp->state != qs_state_idle) /* healthy paranoia */
252 pp->state = qs_state_mmio;
253 qs_reset_channel_logic(ap);
254 ata_eng_timeout(ap);
255}
256
257static u32 qs_scr_read (struct ata_port *ap, unsigned int sc_reg)
258{
259 if (sc_reg > SCR_CONTROL)
260 return ~0U;
261 return readl((void __iomem *)(ap->ioaddr.scr_addr + (sc_reg * 8)));
262}
263
264static void qs_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
265{
266 if (sc_reg > SCR_CONTROL)
267 return;
268 writel(val, (void __iomem *)(ap->ioaddr.scr_addr + (sc_reg * 8)));
269}
270
271static unsigned int qs_fill_sg(struct ata_queued_cmd *qc)
272{
273 struct scatterlist *sg;
274 struct ata_port *ap = qc->ap;
275 struct qs_port_priv *pp = ap->private_data;
276 unsigned int nelem;
277 u8 *prd = pp->pkt + QS_CPB_BYTES;
278
279 WARN_ON(qc->__sg == NULL);
280 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
281
282 nelem = 0;
283 ata_for_each_sg(sg, qc) {
284 u64 addr;
285 u32 len;
286
287 addr = sg_dma_address(sg);
288 *(__le64 *)prd = cpu_to_le64(addr);
289 prd += sizeof(u64);
290
291 len = sg_dma_len(sg);
292 *(__le32 *)prd = cpu_to_le32(len);
293 prd += sizeof(u64);
294
295 VPRINTK("PRD[%u] = (0x%llX, 0x%X)\n", nelem,
296 (unsigned long long)addr, len);
297 nelem++;
298 }
299
300 return nelem;
301}
302
303static void qs_qc_prep(struct ata_queued_cmd *qc)
304{
305 struct qs_port_priv *pp = qc->ap->private_data;
306 u8 dflags = QS_DF_PORD, *buf = pp->pkt;
307 u8 hflags = QS_HF_DAT | QS_HF_IEN | QS_HF_VLD;
308 u64 addr;
309 unsigned int nelem;
310
311 VPRINTK("ENTER\n");
312
313 qs_enter_reg_mode(qc->ap);
314 if (qc->tf.protocol != ATA_PROT_DMA) {
315 ata_qc_prep(qc);
316 return;
317 }
318
319 nelem = qs_fill_sg(qc);
320
321 if ((qc->tf.flags & ATA_TFLAG_WRITE))
322 hflags |= QS_HF_DIRO;
323 if ((qc->tf.flags & ATA_TFLAG_LBA48))
324 dflags |= QS_DF_ELBA;
325
326 /* host control block (HCB) */
327 buf[ 0] = QS_HCB_HDR;
328 buf[ 1] = hflags;
329 *(__le32 *)(&buf[ 4]) = cpu_to_le32(qc->nsect * ATA_SECT_SIZE);
330 *(__le32 *)(&buf[ 8]) = cpu_to_le32(nelem);
331 addr = ((u64)pp->pkt_dma) + QS_CPB_BYTES;
332 *(__le64 *)(&buf[16]) = cpu_to_le64(addr);
333
334 /* device control block (DCB) */
335 buf[24] = QS_DCB_HDR;
336 buf[28] = dflags;
337
338 /* frame information structure (FIS) */
339 ata_tf_to_fis(&qc->tf, &buf[32], 0);
340}
341
342static inline void qs_packet_start(struct ata_queued_cmd *qc)
343{
344 struct ata_port *ap = qc->ap;
345 u8 __iomem *chan = ap->host->mmio_base + (ap->port_no * 0x4000);
346
347 VPRINTK("ENTER, ap %p\n", ap);
348
349 writeb(QS_CTR0_CLER, chan + QS_CCT_CTR0);
350 wmb(); /* flush PRDs and pkt to memory */
351 writel(QS_CCF_RUN_PKT, chan + QS_CCT_CFF);
352 readl(chan + QS_CCT_CFF); /* flush */
353}
354
355static unsigned int qs_qc_issue(struct ata_queued_cmd *qc)
356{
357 struct qs_port_priv *pp = qc->ap->private_data;
358
359 switch (qc->tf.protocol) {
360 case ATA_PROT_DMA:
361
362 pp->state = qs_state_pkt;
363 qs_packet_start(qc);
364 return 0;
365
366 case ATA_PROT_ATAPI_DMA:
367 BUG();
368 break;
369
370 default:
371 break;
372 }
373
374 pp->state = qs_state_mmio;
375 return ata_qc_issue_prot(qc);
376}
377
378static inline unsigned int qs_intr_pkt(struct ata_host *host)
379{
380 unsigned int handled = 0;
381 u8 sFFE;
382 u8 __iomem *mmio_base = host->mmio_base;
383
384 do {
385 u32 sff0 = readl(mmio_base + QS_HST_SFF);
386 u32 sff1 = readl(mmio_base + QS_HST_SFF + 4);
387 u8 sEVLD = (sff1 >> 30) & 0x01; /* valid flag */
388 sFFE = sff1 >> 31; /* empty flag */
389
390 if (sEVLD) {
391 u8 sDST = sff0 >> 16; /* dev status */
392 u8 sHST = sff1 & 0x3f; /* host status */
393 unsigned int port_no = (sff1 >> 8) & 0x03;
394 struct ata_port *ap = host->ports[port_no];
395
396 DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n",
397 sff1, sff0, port_no, sHST, sDST);
398 handled = 1;
399 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
400 struct ata_queued_cmd *qc;
401 struct qs_port_priv *pp = ap->private_data;
402 if (!pp || pp->state != qs_state_pkt)
403 continue;
404 qc = ata_qc_from_tag(ap, ap->active_tag);
405 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
406 switch (sHST) {
407 case 0: /* successful CPB */
408 case 3: /* device error */
409 pp->state = qs_state_idle;
410 qs_enter_reg_mode(qc->ap);
411 qc->err_mask |= ac_err_mask(sDST);
412 ata_qc_complete(qc);
413 break;
414 default:
415 break;
416 }
417 }
418 }
419 }
420 } while (!sFFE);
421 return handled;
422}
423
424static inline unsigned int qs_intr_mmio(struct ata_host *host)
425{
426 unsigned int handled = 0, port_no;
427
428 for (port_no = 0; port_no < host->n_ports; ++port_no) {
429 struct ata_port *ap;
430 ap = host->ports[port_no];
431 if (ap &&
432 !(ap->flags & ATA_FLAG_DISABLED)) {
433 struct ata_queued_cmd *qc;
434 struct qs_port_priv *pp = ap->private_data;
435 if (!pp || pp->state != qs_state_mmio)
436 continue;
437 qc = ata_qc_from_tag(ap, ap->active_tag);
438 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
439
440 /* check main status, clearing INTRQ */
441 u8 status = ata_check_status(ap);
442 if ((status & ATA_BUSY))
443 continue;
444 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
445 ap->id, qc->tf.protocol, status);
446
447 /* complete taskfile transaction */
448 pp->state = qs_state_idle;
449 qc->err_mask |= ac_err_mask(status);
450 ata_qc_complete(qc);
451 handled = 1;
452 }
453 }
454 }
455 return handled;
456}
457
458static irqreturn_t qs_intr(int irq, void *dev_instance, struct pt_regs *regs)
459{
460 struct ata_host *host = dev_instance;
461 unsigned int handled = 0;
462
463 VPRINTK("ENTER\n");
464
465 spin_lock(&host->lock);
466 handled = qs_intr_pkt(host) | qs_intr_mmio(host);
467 spin_unlock(&host->lock);
468
469 VPRINTK("EXIT\n");
470
471 return IRQ_RETVAL(handled);
472}
473
474static void qs_ata_setup_port(struct ata_ioports *port, unsigned long base)
475{
476 port->cmd_addr =
477 port->data_addr = base + 0x400;
478 port->error_addr =
479 port->feature_addr = base + 0x408; /* hob_feature = 0x409 */
480 port->nsect_addr = base + 0x410; /* hob_nsect = 0x411 */
481 port->lbal_addr = base + 0x418; /* hob_lbal = 0x419 */
482 port->lbam_addr = base + 0x420; /* hob_lbam = 0x421 */
483 port->lbah_addr = base + 0x428; /* hob_lbah = 0x429 */
484 port->device_addr = base + 0x430;
485 port->status_addr =
486 port->command_addr = base + 0x438;
487 port->altstatus_addr =
488 port->ctl_addr = base + 0x440;
489 port->scr_addr = base + 0xc00;
490}
491
492static int qs_port_start(struct ata_port *ap)
493{
494 struct device *dev = ap->host->dev;
495 struct qs_port_priv *pp;
496 void __iomem *mmio_base = ap->host->mmio_base;
497 void __iomem *chan = mmio_base + (ap->port_no * 0x4000);
498 u64 addr;
499 int rc;
500
501 rc = ata_port_start(ap);
502 if (rc)
503 return rc;
504 qs_enter_reg_mode(ap);
505 pp = kzalloc(sizeof(*pp), GFP_KERNEL);
506 if (!pp) {
507 rc = -ENOMEM;
508 goto err_out;
509 }
510 pp->pkt = dma_alloc_coherent(dev, QS_PKT_BYTES, &pp->pkt_dma,
511 GFP_KERNEL);
512 if (!pp->pkt) {
513 rc = -ENOMEM;
514 goto err_out_kfree;
515 }
516 memset(pp->pkt, 0, QS_PKT_BYTES);
517 ap->private_data = pp;
518
519 addr = (u64)pp->pkt_dma;
520 writel((u32) addr, chan + QS_CCF_CPBA);
521 writel((u32)(addr >> 32), chan + QS_CCF_CPBA + 4);
522 return 0;
523
524err_out_kfree:
525 kfree(pp);
526err_out:
527 ata_port_stop(ap);
528 return rc;
529}
530
531static void qs_port_stop(struct ata_port *ap)
532{
533 struct device *dev = ap->host->dev;
534 struct qs_port_priv *pp = ap->private_data;
535
536 if (pp != NULL) {
537 ap->private_data = NULL;
538 if (pp->pkt != NULL)
539 dma_free_coherent(dev, QS_PKT_BYTES, pp->pkt,
540 pp->pkt_dma);
541 kfree(pp);
542 }
543 ata_port_stop(ap);
544}
545
546static void qs_host_stop(struct ata_host *host)
547{
548 void __iomem *mmio_base = host->mmio_base;
549 struct pci_dev *pdev = to_pci_dev(host->dev);
550
551 writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */
552 writeb(QS_CNFG3_GSRST, mmio_base + QS_HCF_CNFG3); /* global reset */
553
554 pci_iounmap(pdev, mmio_base);
555}
556
557static void qs_host_init(unsigned int chip_id, struct ata_probe_ent *pe)
558{
559 void __iomem *mmio_base = pe->mmio_base;
560 unsigned int port_no;
561
562 writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */
563 writeb(QS_CNFG3_GSRST, mmio_base + QS_HCF_CNFG3); /* global reset */
564
565 /* reset each channel in turn */
566 for (port_no = 0; port_no < pe->n_ports; ++port_no) {
567 u8 __iomem *chan = mmio_base + (port_no * 0x4000);
568 writeb(QS_CTR1_RDEV|QS_CTR1_RCHN, chan + QS_CCT_CTR1);
569 writeb(QS_CTR0_REG, chan + QS_CCT_CTR0);
570 readb(chan + QS_CCT_CTR0); /* flush */
571 }
572 writeb(QS_SERD3_PHY_ENA, mmio_base + QS_HVS_SERD3); /* enable phy */
573
574 for (port_no = 0; port_no < pe->n_ports; ++port_no) {
575 u8 __iomem *chan = mmio_base + (port_no * 0x4000);
576 /* set FIFO depths to same settings as Windows driver */
577 writew(32, chan + QS_CFC_HUFT);
578 writew(32, chan + QS_CFC_HDFT);
579 writew(10, chan + QS_CFC_DUFT);
580 writew( 8, chan + QS_CFC_DDFT);
581 /* set CPB size in bytes, as a power of two */
582 writeb(QS_CPB_ORDER, chan + QS_CCF_CSEP);
583 }
584 writeb(1, mmio_base + QS_HCT_CTRL); /* enable host interrupts */
585}
586
587/*
588 * The QStor understands 64-bit buses, and uses 64-bit fields
589 * for DMA pointers regardless of bus width. We just have to
590 * make sure our DMA masks are set appropriately for whatever
591 * bridge lies between us and the QStor, and then the DMA mapping
592 * code will ensure we only ever "see" appropriate buffer addresses.
593 * If we're 32-bit limited somewhere, then our 64-bit fields will
594 * just end up with zeros in the upper 32-bits, without any special
595 * logic required outside of this routine (below).
596 */
597static int qs_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base)
598{
599 u32 bus_info = readl(mmio_base + QS_HID_HPHY);
600 int rc, have_64bit_bus = (bus_info & QS_HPHY_64BIT);
601
602 if (have_64bit_bus &&
603 !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
604 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
605 if (rc) {
606 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
607 if (rc) {
608 dev_printk(KERN_ERR, &pdev->dev,
609 "64-bit DMA enable failed\n");
610 return rc;
611 }
612 }
613 } else {
614 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
615 if (rc) {
616 dev_printk(KERN_ERR, &pdev->dev,
617 "32-bit DMA enable failed\n");
618 return rc;
619 }
620 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
621 if (rc) {
622 dev_printk(KERN_ERR, &pdev->dev,
623 "32-bit consistent DMA enable failed\n");
624 return rc;
625 }
626 }
627 return 0;
628}
629
630static int qs_ata_init_one(struct pci_dev *pdev,
631 const struct pci_device_id *ent)
632{
633 static int printed_version;
634 struct ata_probe_ent *probe_ent = NULL;
635 void __iomem *mmio_base;
636 unsigned int board_idx = (unsigned int) ent->driver_data;
637 int rc, port_no;
638
639 if (!printed_version++)
640 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
641
642 rc = pci_enable_device(pdev);
643 if (rc)
644 return rc;
645
646 rc = pci_request_regions(pdev, DRV_NAME);
647 if (rc)
648 goto err_out;
649
650 if ((pci_resource_flags(pdev, 4) & IORESOURCE_MEM) == 0) {
651 rc = -ENODEV;
652 goto err_out_regions;
653 }
654
655 mmio_base = pci_iomap(pdev, 4, 0);
656 if (mmio_base == NULL) {
657 rc = -ENOMEM;
658 goto err_out_regions;
659 }
660
661 rc = qs_set_dma_masks(pdev, mmio_base);
662 if (rc)
663 goto err_out_iounmap;
664
665 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
666 if (probe_ent == NULL) {
667 rc = -ENOMEM;
668 goto err_out_iounmap;
669 }
670
671 memset(probe_ent, 0, sizeof(*probe_ent));
672 probe_ent->dev = pci_dev_to_dev(pdev);
673 INIT_LIST_HEAD(&probe_ent->node);
674
675 probe_ent->sht = qs_port_info[board_idx].sht;
676 probe_ent->port_flags = qs_port_info[board_idx].flags;
677 probe_ent->pio_mask = qs_port_info[board_idx].pio_mask;
678 probe_ent->mwdma_mask = qs_port_info[board_idx].mwdma_mask;
679 probe_ent->udma_mask = qs_port_info[board_idx].udma_mask;
680 probe_ent->port_ops = qs_port_info[board_idx].port_ops;
681
682 probe_ent->irq = pdev->irq;
683 probe_ent->irq_flags = IRQF_SHARED;
684 probe_ent->mmio_base = mmio_base;
685 probe_ent->n_ports = QS_PORTS;
686
687 for (port_no = 0; port_no < probe_ent->n_ports; ++port_no) {
688 unsigned long chan = (unsigned long)mmio_base +
689 (port_no * 0x4000);
690 qs_ata_setup_port(&probe_ent->port[port_no], chan);
691 }
692
693 pci_set_master(pdev);
694
695 /* initialize adapter */
696 qs_host_init(board_idx, probe_ent);
697
698 rc = ata_device_add(probe_ent);
699 kfree(probe_ent);
700 if (rc != QS_PORTS)
701 goto err_out_iounmap;
702 return 0;
703
704err_out_iounmap:
705 pci_iounmap(pdev, mmio_base);
706err_out_regions:
707 pci_release_regions(pdev);
708err_out:
709 pci_disable_device(pdev);
710 return rc;
711}
712
713static int __init qs_ata_init(void)
714{
715 return pci_register_driver(&qs_ata_pci_driver);
716}
717
718static void __exit qs_ata_exit(void)
719{
720 pci_unregister_driver(&qs_ata_pci_driver);
721}
722
723MODULE_AUTHOR("Mark Lord");
724MODULE_DESCRIPTION("Pacific Digital Corporation QStor SATA low-level driver");
725MODULE_LICENSE("GPL");
726MODULE_DEVICE_TABLE(pci, qs_ata_pci_tbl);
727MODULE_VERSION(DRV_VERSION);
728
729module_init(qs_ata_init);
730module_exit(qs_ata_exit);
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
new file mode 100644
index 000000000000..c63dbabc0cd9
--- /dev/null
+++ b/drivers/ata/sata_sil.c
@@ -0,0 +1,728 @@
1/*
2 * sata_sil.c - Silicon Image SATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2005 Red Hat, Inc.
9 * Copyright 2003 Benjamin Herrenschmidt
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Documentation for SiI 3112:
31 * http://gkernel.sourceforge.net/specs/sii/3112A_SiI-DS-0095-B2.pdf.bz2
32 *
33 * Other errata and documentation available under NDA.
34 *
35 */
36
37#include <linux/kernel.h>
38#include <linux/module.h>
39#include <linux/pci.h>
40#include <linux/init.h>
41#include <linux/blkdev.h>
42#include <linux/delay.h>
43#include <linux/interrupt.h>
44#include <linux/device.h>
45#include <scsi/scsi_host.h>
46#include <linux/libata.h>
47
48#define DRV_NAME "sata_sil"
49#define DRV_VERSION "2.0"
50
51enum {
52 /*
53 * host flags
54 */
55 SIL_FLAG_NO_SATA_IRQ = (1 << 28),
56 SIL_FLAG_RERR_ON_DMA_ACT = (1 << 29),
57 SIL_FLAG_MOD15WRITE = (1 << 30),
58
59 SIL_DFL_PORT_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
60 ATA_FLAG_MMIO | ATA_FLAG_HRST_TO_RESUME,
61
62 /*
63 * Controller IDs
64 */
65 sil_3112 = 0,
66 sil_3112_no_sata_irq = 1,
67 sil_3512 = 2,
68 sil_3114 = 3,
69
70 /*
71 * Register offsets
72 */
73 SIL_SYSCFG = 0x48,
74
75 /*
76 * Register bits
77 */
78 /* SYSCFG */
79 SIL_MASK_IDE0_INT = (1 << 22),
80 SIL_MASK_IDE1_INT = (1 << 23),
81 SIL_MASK_IDE2_INT = (1 << 24),
82 SIL_MASK_IDE3_INT = (1 << 25),
83 SIL_MASK_2PORT = SIL_MASK_IDE0_INT | SIL_MASK_IDE1_INT,
84 SIL_MASK_4PORT = SIL_MASK_2PORT |
85 SIL_MASK_IDE2_INT | SIL_MASK_IDE3_INT,
86
87 /* BMDMA/BMDMA2 */
88 SIL_INTR_STEERING = (1 << 1),
89
90 SIL_DMA_ENABLE = (1 << 0), /* DMA run switch */
91 SIL_DMA_RDWR = (1 << 3), /* DMA Rd-Wr */
92 SIL_DMA_SATA_IRQ = (1 << 4), /* OR of all SATA IRQs */
93 SIL_DMA_ACTIVE = (1 << 16), /* DMA running */
94 SIL_DMA_ERROR = (1 << 17), /* PCI bus error */
95 SIL_DMA_COMPLETE = (1 << 18), /* cmd complete / IRQ pending */
96 SIL_DMA_N_SATA_IRQ = (1 << 6), /* SATA_IRQ for the next channel */
97 SIL_DMA_N_ACTIVE = (1 << 24), /* ACTIVE for the next channel */
98 SIL_DMA_N_ERROR = (1 << 25), /* ERROR for the next channel */
99 SIL_DMA_N_COMPLETE = (1 << 26), /* COMPLETE for the next channel */
100
101 /* SIEN */
102 SIL_SIEN_N = (1 << 16), /* triggered by SError.N */
103
104 /*
105 * Others
106 */
107 SIL_QUIRK_MOD15WRITE = (1 << 0),
108 SIL_QUIRK_UDMA5MAX = (1 << 1),
109};
110
111static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
112#ifdef CONFIG_PM
113static int sil_pci_device_resume(struct pci_dev *pdev);
114#endif
115static void sil_dev_config(struct ata_port *ap, struct ata_device *dev);
116static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg);
117static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
118static void sil_post_set_mode (struct ata_port *ap);
119static irqreturn_t sil_interrupt(int irq, void *dev_instance,
120 struct pt_regs *regs);
121static void sil_freeze(struct ata_port *ap);
122static void sil_thaw(struct ata_port *ap);
123
124
125static const struct pci_device_id sil_pci_tbl[] = {
126 { 0x1095, 0x3112, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
127 { 0x1095, 0x0240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
128 { 0x1095, 0x3512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3512 },
129 { 0x1095, 0x3114, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3114 },
130 { 0x1002, 0x436e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
131 { 0x1002, 0x4379, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_no_sata_irq },
132 { 0x1002, 0x437a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_no_sata_irq },
133 { } /* terminate list */
134};
135
136
137/* TODO firmware versions should be added - eric */
138static const struct sil_drivelist {
139 const char * product;
140 unsigned int quirk;
141} sil_blacklist [] = {
142 { "ST320012AS", SIL_QUIRK_MOD15WRITE },
143 { "ST330013AS", SIL_QUIRK_MOD15WRITE },
144 { "ST340017AS", SIL_QUIRK_MOD15WRITE },
145 { "ST360015AS", SIL_QUIRK_MOD15WRITE },
146 { "ST380023AS", SIL_QUIRK_MOD15WRITE },
147 { "ST3120023AS", SIL_QUIRK_MOD15WRITE },
148 { "ST340014ASL", SIL_QUIRK_MOD15WRITE },
149 { "ST360014ASL", SIL_QUIRK_MOD15WRITE },
150 { "ST380011ASL", SIL_QUIRK_MOD15WRITE },
151 { "ST3120022ASL", SIL_QUIRK_MOD15WRITE },
152 { "ST3160021ASL", SIL_QUIRK_MOD15WRITE },
153 { "Maxtor 4D060H3", SIL_QUIRK_UDMA5MAX },
154 { }
155};
156
157static struct pci_driver sil_pci_driver = {
158 .name = DRV_NAME,
159 .id_table = sil_pci_tbl,
160 .probe = sil_init_one,
161 .remove = ata_pci_remove_one,
162#ifdef CONFIG_PM
163 .suspend = ata_pci_device_suspend,
164 .resume = sil_pci_device_resume,
165#endif
166};
167
168static struct scsi_host_template sil_sht = {
169 .module = THIS_MODULE,
170 .name = DRV_NAME,
171 .ioctl = ata_scsi_ioctl,
172 .queuecommand = ata_scsi_queuecmd,
173 .can_queue = ATA_DEF_QUEUE,
174 .this_id = ATA_SHT_THIS_ID,
175 .sg_tablesize = LIBATA_MAX_PRD,
176 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
177 .emulated = ATA_SHT_EMULATED,
178 .use_clustering = ATA_SHT_USE_CLUSTERING,
179 .proc_name = DRV_NAME,
180 .dma_boundary = ATA_DMA_BOUNDARY,
181 .slave_configure = ata_scsi_slave_config,
182 .slave_destroy = ata_scsi_slave_destroy,
183 .bios_param = ata_std_bios_param,
184 .suspend = ata_scsi_device_suspend,
185 .resume = ata_scsi_device_resume,
186};
187
188static const struct ata_port_operations sil_ops = {
189 .port_disable = ata_port_disable,
190 .dev_config = sil_dev_config,
191 .tf_load = ata_tf_load,
192 .tf_read = ata_tf_read,
193 .check_status = ata_check_status,
194 .exec_command = ata_exec_command,
195 .dev_select = ata_std_dev_select,
196 .post_set_mode = sil_post_set_mode,
197 .bmdma_setup = ata_bmdma_setup,
198 .bmdma_start = ata_bmdma_start,
199 .bmdma_stop = ata_bmdma_stop,
200 .bmdma_status = ata_bmdma_status,
201 .qc_prep = ata_qc_prep,
202 .qc_issue = ata_qc_issue_prot,
203 .data_xfer = ata_mmio_data_xfer,
204 .freeze = sil_freeze,
205 .thaw = sil_thaw,
206 .error_handler = ata_bmdma_error_handler,
207 .post_internal_cmd = ata_bmdma_post_internal_cmd,
208 .irq_handler = sil_interrupt,
209 .irq_clear = ata_bmdma_irq_clear,
210 .scr_read = sil_scr_read,
211 .scr_write = sil_scr_write,
212 .port_start = ata_port_start,
213 .port_stop = ata_port_stop,
214 .host_stop = ata_pci_host_stop,
215};
216
217static const struct ata_port_info sil_port_info[] = {
218 /* sil_3112 */
219 {
220 .sht = &sil_sht,
221 .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE,
222 .pio_mask = 0x1f, /* pio0-4 */
223 .mwdma_mask = 0x07, /* mwdma0-2 */
224 .udma_mask = 0x3f, /* udma0-5 */
225 .port_ops = &sil_ops,
226 },
227 /* sil_3112_no_sata_irq */
228 {
229 .sht = &sil_sht,
230 .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE |
231 SIL_FLAG_NO_SATA_IRQ,
232 .pio_mask = 0x1f, /* pio0-4 */
233 .mwdma_mask = 0x07, /* mwdma0-2 */
234 .udma_mask = 0x3f, /* udma0-5 */
235 .port_ops = &sil_ops,
236 },
237 /* sil_3512 */
238 {
239 .sht = &sil_sht,
240 .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT,
241 .pio_mask = 0x1f, /* pio0-4 */
242 .mwdma_mask = 0x07, /* mwdma0-2 */
243 .udma_mask = 0x3f, /* udma0-5 */
244 .port_ops = &sil_ops,
245 },
246 /* sil_3114 */
247 {
248 .sht = &sil_sht,
249 .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT,
250 .pio_mask = 0x1f, /* pio0-4 */
251 .mwdma_mask = 0x07, /* mwdma0-2 */
252 .udma_mask = 0x3f, /* udma0-5 */
253 .port_ops = &sil_ops,
254 },
255};
256
257/* per-port register offsets */
258/* TODO: we can probably calculate rather than use a table */
259static const struct {
260 unsigned long tf; /* ATA taskfile register block */
261 unsigned long ctl; /* ATA control/altstatus register block */
262 unsigned long bmdma; /* DMA register block */
263 unsigned long bmdma2; /* DMA register block #2 */
264 unsigned long fifo_cfg; /* FIFO Valid Byte Count and Control */
265 unsigned long scr; /* SATA control register block */
266 unsigned long sien; /* SATA Interrupt Enable register */
267 unsigned long xfer_mode;/* data transfer mode register */
268 unsigned long sfis_cfg; /* SATA FIS reception config register */
269} sil_port[] = {
270 /* port 0 ... */
271 { 0x80, 0x8A, 0x00, 0x10, 0x40, 0x100, 0x148, 0xb4, 0x14c },
272 { 0xC0, 0xCA, 0x08, 0x18, 0x44, 0x180, 0x1c8, 0xf4, 0x1cc },
273 { 0x280, 0x28A, 0x200, 0x210, 0x240, 0x300, 0x348, 0x2b4, 0x34c },
274 { 0x2C0, 0x2CA, 0x208, 0x218, 0x244, 0x380, 0x3c8, 0x2f4, 0x3cc },
275 /* ... port 3 */
276};
277
278MODULE_AUTHOR("Jeff Garzik");
279MODULE_DESCRIPTION("low-level driver for Silicon Image SATA controller");
280MODULE_LICENSE("GPL");
281MODULE_DEVICE_TABLE(pci, sil_pci_tbl);
282MODULE_VERSION(DRV_VERSION);
283
284static int slow_down = 0;
285module_param(slow_down, int, 0444);
286MODULE_PARM_DESC(slow_down, "Sledgehammer used to work around random problems, by limiting commands to 15 sectors (0=off, 1=on)");
287
288
289static unsigned char sil_get_device_cache_line(struct pci_dev *pdev)
290{
291 u8 cache_line = 0;
292 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_line);
293 return cache_line;
294}
295
296static void sil_post_set_mode (struct ata_port *ap)
297{
298 struct ata_host *host = ap->host;
299 struct ata_device *dev;
300 void __iomem *addr = host->mmio_base + sil_port[ap->port_no].xfer_mode;
301 u32 tmp, dev_mode[2];
302 unsigned int i;
303
304 for (i = 0; i < 2; i++) {
305 dev = &ap->device[i];
306 if (!ata_dev_enabled(dev))
307 dev_mode[i] = 0; /* PIO0/1/2 */
308 else if (dev->flags & ATA_DFLAG_PIO)
309 dev_mode[i] = 1; /* PIO3/4 */
310 else
311 dev_mode[i] = 3; /* UDMA */
312 /* value 2 indicates MDMA */
313 }
314
315 tmp = readl(addr);
316 tmp &= ~((1<<5) | (1<<4) | (1<<1) | (1<<0));
317 tmp |= dev_mode[0];
318 tmp |= (dev_mode[1] << 4);
319 writel(tmp, addr);
320 readl(addr); /* flush */
321}
322
323static inline unsigned long sil_scr_addr(struct ata_port *ap, unsigned int sc_reg)
324{
325 unsigned long offset = ap->ioaddr.scr_addr;
326
327 switch (sc_reg) {
328 case SCR_STATUS:
329 return offset + 4;
330 case SCR_ERROR:
331 return offset + 8;
332 case SCR_CONTROL:
333 return offset;
334 default:
335 /* do nothing */
336 break;
337 }
338
339 return 0;
340}
341
342static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg)
343{
344 void __iomem *mmio = (void __iomem *) sil_scr_addr(ap, sc_reg);
345 if (mmio)
346 return readl(mmio);
347 return 0xffffffffU;
348}
349
350static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
351{
352 void *mmio = (void __iomem *) sil_scr_addr(ap, sc_reg);
353 if (mmio)
354 writel(val, mmio);
355}
356
357static void sil_host_intr(struct ata_port *ap, u32 bmdma2)
358{
359 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
360 u8 status;
361
362 if (unlikely(bmdma2 & SIL_DMA_SATA_IRQ)) {
363 u32 serror;
364
365 /* SIEN doesn't mask SATA IRQs on some 3112s. Those
366 * controllers continue to assert IRQ as long as
367 * SError bits are pending. Clear SError immediately.
368 */
369 serror = sil_scr_read(ap, SCR_ERROR);
370 sil_scr_write(ap, SCR_ERROR, serror);
371
372 /* Trigger hotplug and accumulate SError only if the
373 * port isn't already frozen. Otherwise, PHY events
374 * during hardreset makes controllers with broken SIEN
375 * repeat probing needlessly.
376 */
377 if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
378 ata_ehi_hotplugged(&ap->eh_info);
379 ap->eh_info.serror |= serror;
380 }
381
382 goto freeze;
383 }
384
385 if (unlikely(!qc || qc->tf.ctl & ATA_NIEN))
386 goto freeze;
387
388 /* Check whether we are expecting interrupt in this state */
389 switch (ap->hsm_task_state) {
390 case HSM_ST_FIRST:
391 /* Some pre-ATAPI-4 devices assert INTRQ
392 * at this state when ready to receive CDB.
393 */
394
395 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
396 * The flag was turned on only for atapi devices.
397 * No need to check is_atapi_taskfile(&qc->tf) again.
398 */
399 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
400 goto err_hsm;
401 break;
402 case HSM_ST_LAST:
403 if (qc->tf.protocol == ATA_PROT_DMA ||
404 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
405 /* clear DMA-Start bit */
406 ap->ops->bmdma_stop(qc);
407
408 if (bmdma2 & SIL_DMA_ERROR) {
409 qc->err_mask |= AC_ERR_HOST_BUS;
410 ap->hsm_task_state = HSM_ST_ERR;
411 }
412 }
413 break;
414 case HSM_ST:
415 break;
416 default:
417 goto err_hsm;
418 }
419
420 /* check main status, clearing INTRQ */
421 status = ata_chk_status(ap);
422 if (unlikely(status & ATA_BUSY))
423 goto err_hsm;
424
425 /* ack bmdma irq events */
426 ata_bmdma_irq_clear(ap);
427
428 /* kick HSM in the ass */
429 ata_hsm_move(ap, qc, status, 0);
430
431 return;
432
433 err_hsm:
434 qc->err_mask |= AC_ERR_HSM;
435 freeze:
436 ata_port_freeze(ap);
437}
438
439static irqreturn_t sil_interrupt(int irq, void *dev_instance,
440 struct pt_regs *regs)
441{
442 struct ata_host *host = dev_instance;
443 void __iomem *mmio_base = host->mmio_base;
444 int handled = 0;
445 int i;
446
447 spin_lock(&host->lock);
448
449 for (i = 0; i < host->n_ports; i++) {
450 struct ata_port *ap = host->ports[i];
451 u32 bmdma2 = readl(mmio_base + sil_port[ap->port_no].bmdma2);
452
453 if (unlikely(!ap || ap->flags & ATA_FLAG_DISABLED))
454 continue;
455
456 /* turn off SATA_IRQ if not supported */
457 if (ap->flags & SIL_FLAG_NO_SATA_IRQ)
458 bmdma2 &= ~SIL_DMA_SATA_IRQ;
459
460 if (bmdma2 == 0xffffffff ||
461 !(bmdma2 & (SIL_DMA_COMPLETE | SIL_DMA_SATA_IRQ)))
462 continue;
463
464 sil_host_intr(ap, bmdma2);
465 handled = 1;
466 }
467
468 spin_unlock(&host->lock);
469
470 return IRQ_RETVAL(handled);
471}
472
473static void sil_freeze(struct ata_port *ap)
474{
475 void __iomem *mmio_base = ap->host->mmio_base;
476 u32 tmp;
477
478 /* global IRQ mask doesn't block SATA IRQ, turn off explicitly */
479 writel(0, mmio_base + sil_port[ap->port_no].sien);
480
481 /* plug IRQ */
482 tmp = readl(mmio_base + SIL_SYSCFG);
483 tmp |= SIL_MASK_IDE0_INT << ap->port_no;
484 writel(tmp, mmio_base + SIL_SYSCFG);
485 readl(mmio_base + SIL_SYSCFG); /* flush */
486}
487
488static void sil_thaw(struct ata_port *ap)
489{
490 void __iomem *mmio_base = ap->host->mmio_base;
491 u32 tmp;
492
493 /* clear IRQ */
494 ata_chk_status(ap);
495 ata_bmdma_irq_clear(ap);
496
497 /* turn on SATA IRQ if supported */
498 if (!(ap->flags & SIL_FLAG_NO_SATA_IRQ))
499 writel(SIL_SIEN_N, mmio_base + sil_port[ap->port_no].sien);
500
501 /* turn on IRQ */
502 tmp = readl(mmio_base + SIL_SYSCFG);
503 tmp &= ~(SIL_MASK_IDE0_INT << ap->port_no);
504 writel(tmp, mmio_base + SIL_SYSCFG);
505}
506
507/**
508 * sil_dev_config - Apply device/host-specific errata fixups
509 * @ap: Port containing device to be examined
510 * @dev: Device to be examined
511 *
512 * After the IDENTIFY [PACKET] DEVICE step is complete, and a
513 * device is known to be present, this function is called.
514 * We apply two errata fixups which are specific to Silicon Image,
515 * a Seagate and a Maxtor fixup.
516 *
517 * For certain Seagate devices, we must limit the maximum sectors
518 * to under 8K.
519 *
520 * For certain Maxtor devices, we must not program the drive
521 * beyond udma5.
522 *
523 * Both fixups are unfairly pessimistic. As soon as I get more
524 * information on these errata, I will create a more exhaustive
525 * list, and apply the fixups to only the specific
526 * devices/hosts/firmwares that need it.
527 *
528 * 20040111 - Seagate drives affected by the Mod15Write bug are blacklisted
529 * The Maxtor quirk is in the blacklist, but I'm keeping the original
530 * pessimistic fix for the following reasons...
531 * - There seems to be less info on it, only one device gleaned off the
532 * Windows driver, maybe only one is affected. More info would be greatly
533 * appreciated.
534 * - But then again UDMA5 is hardly anything to complain about
535 */
536static void sil_dev_config(struct ata_port *ap, struct ata_device *dev)
537{
538 unsigned int n, quirks = 0;
539 unsigned char model_num[41];
540
541 ata_id_c_string(dev->id, model_num, ATA_ID_PROD_OFS, sizeof(model_num));
542
543 for (n = 0; sil_blacklist[n].product; n++)
544 if (!strcmp(sil_blacklist[n].product, model_num)) {
545 quirks = sil_blacklist[n].quirk;
546 break;
547 }
548
549 /* limit requests to 15 sectors */
550 if (slow_down ||
551 ((ap->flags & SIL_FLAG_MOD15WRITE) &&
552 (quirks & SIL_QUIRK_MOD15WRITE))) {
553 ata_dev_printk(dev, KERN_INFO, "applying Seagate errata fix "
554 "(mod15write workaround)\n");
555 dev->max_sectors = 15;
556 return;
557 }
558
559 /* limit to udma5 */
560 if (quirks & SIL_QUIRK_UDMA5MAX) {
561 ata_dev_printk(dev, KERN_INFO,
562 "applying Maxtor errata fix %s\n", model_num);
563 dev->udma_mask &= ATA_UDMA5;
564 return;
565 }
566}
567
568static void sil_init_controller(struct pci_dev *pdev,
569 int n_ports, unsigned long port_flags,
570 void __iomem *mmio_base)
571{
572 u8 cls;
573 u32 tmp;
574 int i;
575
576 /* Initialize FIFO PCI bus arbitration */
577 cls = sil_get_device_cache_line(pdev);
578 if (cls) {
579 cls >>= 3;
580 cls++; /* cls = (line_size/8)+1 */
581 for (i = 0; i < n_ports; i++)
582 writew(cls << 8 | cls,
583 mmio_base + sil_port[i].fifo_cfg);
584 } else
585 dev_printk(KERN_WARNING, &pdev->dev,
586 "cache line size not set. Driver may not function\n");
587
588 /* Apply R_ERR on DMA activate FIS errata workaround */
589 if (port_flags & SIL_FLAG_RERR_ON_DMA_ACT) {
590 int cnt;
591
592 for (i = 0, cnt = 0; i < n_ports; i++) {
593 tmp = readl(mmio_base + sil_port[i].sfis_cfg);
594 if ((tmp & 0x3) != 0x01)
595 continue;
596 if (!cnt)
597 dev_printk(KERN_INFO, &pdev->dev,
598 "Applying R_ERR on DMA activate "
599 "FIS errata fix\n");
600 writel(tmp & ~0x3, mmio_base + sil_port[i].sfis_cfg);
601 cnt++;
602 }
603 }
604
605 if (n_ports == 4) {
606 /* flip the magic "make 4 ports work" bit */
607 tmp = readl(mmio_base + sil_port[2].bmdma);
608 if ((tmp & SIL_INTR_STEERING) == 0)
609 writel(tmp | SIL_INTR_STEERING,
610 mmio_base + sil_port[2].bmdma);
611 }
612}
613
614static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
615{
616 static int printed_version;
617 struct ata_probe_ent *probe_ent = NULL;
618 unsigned long base;
619 void __iomem *mmio_base;
620 int rc;
621 unsigned int i;
622 int pci_dev_busy = 0;
623
624 if (!printed_version++)
625 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
626
627 rc = pci_enable_device(pdev);
628 if (rc)
629 return rc;
630
631 rc = pci_request_regions(pdev, DRV_NAME);
632 if (rc) {
633 pci_dev_busy = 1;
634 goto err_out;
635 }
636
637 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
638 if (rc)
639 goto err_out_regions;
640 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
641 if (rc)
642 goto err_out_regions;
643
644 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
645 if (probe_ent == NULL) {
646 rc = -ENOMEM;
647 goto err_out_regions;
648 }
649
650 INIT_LIST_HEAD(&probe_ent->node);
651 probe_ent->dev = pci_dev_to_dev(pdev);
652 probe_ent->port_ops = sil_port_info[ent->driver_data].port_ops;
653 probe_ent->sht = sil_port_info[ent->driver_data].sht;
654 probe_ent->n_ports = (ent->driver_data == sil_3114) ? 4 : 2;
655 probe_ent->pio_mask = sil_port_info[ent->driver_data].pio_mask;
656 probe_ent->mwdma_mask = sil_port_info[ent->driver_data].mwdma_mask;
657 probe_ent->udma_mask = sil_port_info[ent->driver_data].udma_mask;
658 probe_ent->irq = pdev->irq;
659 probe_ent->irq_flags = IRQF_SHARED;
660 probe_ent->port_flags = sil_port_info[ent->driver_data].flags;
661
662 mmio_base = pci_iomap(pdev, 5, 0);
663 if (mmio_base == NULL) {
664 rc = -ENOMEM;
665 goto err_out_free_ent;
666 }
667
668 probe_ent->mmio_base = mmio_base;
669
670 base = (unsigned long) mmio_base;
671
672 for (i = 0; i < probe_ent->n_ports; i++) {
673 probe_ent->port[i].cmd_addr = base + sil_port[i].tf;
674 probe_ent->port[i].altstatus_addr =
675 probe_ent->port[i].ctl_addr = base + sil_port[i].ctl;
676 probe_ent->port[i].bmdma_addr = base + sil_port[i].bmdma;
677 probe_ent->port[i].scr_addr = base + sil_port[i].scr;
678 ata_std_ports(&probe_ent->port[i]);
679 }
680
681 sil_init_controller(pdev, probe_ent->n_ports, probe_ent->port_flags,
682 mmio_base);
683
684 pci_set_master(pdev);
685
686 /* FIXME: check ata_device_add return value */
687 ata_device_add(probe_ent);
688 kfree(probe_ent);
689
690 return 0;
691
692err_out_free_ent:
693 kfree(probe_ent);
694err_out_regions:
695 pci_release_regions(pdev);
696err_out:
697 if (!pci_dev_busy)
698 pci_disable_device(pdev);
699 return rc;
700}
701
702#ifdef CONFIG_PM
703static int sil_pci_device_resume(struct pci_dev *pdev)
704{
705 struct ata_host *host = dev_get_drvdata(&pdev->dev);
706
707 ata_pci_device_do_resume(pdev);
708 sil_init_controller(pdev, host->n_ports, host->ports[0]->flags,
709 host->mmio_base);
710 ata_host_resume(host);
711
712 return 0;
713}
714#endif
715
716static int __init sil_init(void)
717{
718 return pci_register_driver(&sil_pci_driver);
719}
720
721static void __exit sil_exit(void)
722{
723 pci_unregister_driver(&sil_pci_driver);
724}
725
726
727module_init(sil_init);
728module_exit(sil_exit);
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
new file mode 100644
index 000000000000..39cb07baebae
--- /dev/null
+++ b/drivers/ata/sata_sil24.c
@@ -0,0 +1,1227 @@
1/*
2 * sata_sil24.c - Driver for Silicon Image 3124/3132 SATA-2 controllers
3 *
4 * Copyright 2005 Tejun Heo
5 *
6 * Based on preview driver from Silicon Image.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2, or (at your option) any
11 * later version.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 */
19
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/pci.h>
23#include <linux/blkdev.h>
24#include <linux/delay.h>
25#include <linux/interrupt.h>
26#include <linux/dma-mapping.h>
27#include <linux/device.h>
28#include <scsi/scsi_host.h>
29#include <scsi/scsi_cmnd.h>
30#include <linux/libata.h>
31#include <asm/io.h>
32
33#define DRV_NAME "sata_sil24"
34#define DRV_VERSION "0.3"
35
36/*
37 * Port request block (PRB) 32 bytes
38 */
39struct sil24_prb {
40 __le16 ctrl;
41 __le16 prot;
42 __le32 rx_cnt;
43 u8 fis[6 * 4];
44};
45
46/*
47 * Scatter gather entry (SGE) 16 bytes
48 */
49struct sil24_sge {
50 __le64 addr;
51 __le32 cnt;
52 __le32 flags;
53};
54
55/*
56 * Port multiplier
57 */
58struct sil24_port_multiplier {
59 __le32 diag;
60 __le32 sactive;
61};
62
63enum {
64 /*
65 * Global controller registers (128 bytes @ BAR0)
66 */
67 /* 32 bit regs */
68 HOST_SLOT_STAT = 0x00, /* 32 bit slot stat * 4 */
69 HOST_CTRL = 0x40,
70 HOST_IRQ_STAT = 0x44,
71 HOST_PHY_CFG = 0x48,
72 HOST_BIST_CTRL = 0x50,
73 HOST_BIST_PTRN = 0x54,
74 HOST_BIST_STAT = 0x58,
75 HOST_MEM_BIST_STAT = 0x5c,
76 HOST_FLASH_CMD = 0x70,
77 /* 8 bit regs */
78 HOST_FLASH_DATA = 0x74,
79 HOST_TRANSITION_DETECT = 0x75,
80 HOST_GPIO_CTRL = 0x76,
81 HOST_I2C_ADDR = 0x78, /* 32 bit */
82 HOST_I2C_DATA = 0x7c,
83 HOST_I2C_XFER_CNT = 0x7e,
84 HOST_I2C_CTRL = 0x7f,
85
86 /* HOST_SLOT_STAT bits */
87 HOST_SSTAT_ATTN = (1 << 31),
88
89 /* HOST_CTRL bits */
90 HOST_CTRL_M66EN = (1 << 16), /* M66EN PCI bus signal */
91 HOST_CTRL_TRDY = (1 << 17), /* latched PCI TRDY */
92 HOST_CTRL_STOP = (1 << 18), /* latched PCI STOP */
93 HOST_CTRL_DEVSEL = (1 << 19), /* latched PCI DEVSEL */
94 HOST_CTRL_REQ64 = (1 << 20), /* latched PCI REQ64 */
95 HOST_CTRL_GLOBAL_RST = (1 << 31), /* global reset */
96
97 /*
98 * Port registers
99 * (8192 bytes @ +0x0000, +0x2000, +0x4000 and +0x6000 @ BAR2)
100 */
101 PORT_REGS_SIZE = 0x2000,
102
103 PORT_LRAM = 0x0000, /* 31 LRAM slots and PM regs */
104 PORT_LRAM_SLOT_SZ = 0x0080, /* 32 bytes PRB + 2 SGE, ACT... */
105
106 PORT_PM = 0x0f80, /* 8 bytes PM * 16 (128 bytes) */
107 /* 32 bit regs */
108 PORT_CTRL_STAT = 0x1000, /* write: ctrl-set, read: stat */
109 PORT_CTRL_CLR = 0x1004, /* write: ctrl-clear */
110 PORT_IRQ_STAT = 0x1008, /* high: status, low: interrupt */
111 PORT_IRQ_ENABLE_SET = 0x1010, /* write: enable-set */
112 PORT_IRQ_ENABLE_CLR = 0x1014, /* write: enable-clear */
113 PORT_ACTIVATE_UPPER_ADDR= 0x101c,
114 PORT_EXEC_FIFO = 0x1020, /* command execution fifo */
115 PORT_CMD_ERR = 0x1024, /* command error number */
116 PORT_FIS_CFG = 0x1028,
117 PORT_FIFO_THRES = 0x102c,
118 /* 16 bit regs */
119 PORT_DECODE_ERR_CNT = 0x1040,
120 PORT_DECODE_ERR_THRESH = 0x1042,
121 PORT_CRC_ERR_CNT = 0x1044,
122 PORT_CRC_ERR_THRESH = 0x1046,
123 PORT_HSHK_ERR_CNT = 0x1048,
124 PORT_HSHK_ERR_THRESH = 0x104a,
125 /* 32 bit regs */
126 PORT_PHY_CFG = 0x1050,
127 PORT_SLOT_STAT = 0x1800,
128 PORT_CMD_ACTIVATE = 0x1c00, /* 64 bit cmd activate * 31 (248 bytes) */
129 PORT_EXEC_DIAG = 0x1e00, /* 32bit exec diag * 16 (64 bytes, 0-10 used on 3124) */
130 PORT_PSD_DIAG = 0x1e40, /* 32bit psd diag * 16 (64 bytes, 0-8 used on 3124) */
131 PORT_SCONTROL = 0x1f00,
132 PORT_SSTATUS = 0x1f04,
133 PORT_SERROR = 0x1f08,
134 PORT_SACTIVE = 0x1f0c,
135
136 /* PORT_CTRL_STAT bits */
137 PORT_CS_PORT_RST = (1 << 0), /* port reset */
138 PORT_CS_DEV_RST = (1 << 1), /* device reset */
139 PORT_CS_INIT = (1 << 2), /* port initialize */
140 PORT_CS_IRQ_WOC = (1 << 3), /* interrupt write one to clear */
141 PORT_CS_CDB16 = (1 << 5), /* 0=12b cdb, 1=16b cdb */
142 PORT_CS_RESUME = (1 << 6), /* port resume */
143 PORT_CS_32BIT_ACTV = (1 << 10), /* 32-bit activation */
144 PORT_CS_PM_EN = (1 << 13), /* port multiplier enable */
145 PORT_CS_RDY = (1 << 31), /* port ready to accept commands */
146
147 /* PORT_IRQ_STAT/ENABLE_SET/CLR */
148 /* bits[11:0] are masked */
149 PORT_IRQ_COMPLETE = (1 << 0), /* command(s) completed */
150 PORT_IRQ_ERROR = (1 << 1), /* command execution error */
151 PORT_IRQ_PORTRDY_CHG = (1 << 2), /* port ready change */
152 PORT_IRQ_PWR_CHG = (1 << 3), /* power management change */
153 PORT_IRQ_PHYRDY_CHG = (1 << 4), /* PHY ready change */
154 PORT_IRQ_COMWAKE = (1 << 5), /* COMWAKE received */
155 PORT_IRQ_UNK_FIS = (1 << 6), /* unknown FIS received */
156 PORT_IRQ_DEV_XCHG = (1 << 7), /* device exchanged */
157 PORT_IRQ_8B10B = (1 << 8), /* 8b/10b decode error threshold */
158 PORT_IRQ_CRC = (1 << 9), /* CRC error threshold */
159 PORT_IRQ_HANDSHAKE = (1 << 10), /* handshake error threshold */
160 PORT_IRQ_SDB_NOTIFY = (1 << 11), /* SDB notify received */
161
162 DEF_PORT_IRQ = PORT_IRQ_COMPLETE | PORT_IRQ_ERROR |
163 PORT_IRQ_PHYRDY_CHG | PORT_IRQ_DEV_XCHG |
164 PORT_IRQ_UNK_FIS,
165
166 /* bits[27:16] are unmasked (raw) */
167 PORT_IRQ_RAW_SHIFT = 16,
168 PORT_IRQ_MASKED_MASK = 0x7ff,
169 PORT_IRQ_RAW_MASK = (0x7ff << PORT_IRQ_RAW_SHIFT),
170
171 /* ENABLE_SET/CLR specific, intr steering - 2 bit field */
172 PORT_IRQ_STEER_SHIFT = 30,
173 PORT_IRQ_STEER_MASK = (3 << PORT_IRQ_STEER_SHIFT),
174
175 /* PORT_CMD_ERR constants */
176 PORT_CERR_DEV = 1, /* Error bit in D2H Register FIS */
177 PORT_CERR_SDB = 2, /* Error bit in SDB FIS */
178 PORT_CERR_DATA = 3, /* Error in data FIS not detected by dev */
179 PORT_CERR_SEND = 4, /* Initial cmd FIS transmission failure */
180 PORT_CERR_INCONSISTENT = 5, /* Protocol mismatch */
181 PORT_CERR_DIRECTION = 6, /* Data direction mismatch */
182 PORT_CERR_UNDERRUN = 7, /* Ran out of SGEs while writing */
183 PORT_CERR_OVERRUN = 8, /* Ran out of SGEs while reading */
184 PORT_CERR_PKT_PROT = 11, /* DIR invalid in 1st PIO setup of ATAPI */
185 PORT_CERR_SGT_BOUNDARY = 16, /* PLD ecode 00 - SGT not on qword boundary */
186 PORT_CERR_SGT_TGTABRT = 17, /* PLD ecode 01 - target abort */
187 PORT_CERR_SGT_MSTABRT = 18, /* PLD ecode 10 - master abort */
188 PORT_CERR_SGT_PCIPERR = 19, /* PLD ecode 11 - PCI parity err while fetching SGT */
189 PORT_CERR_CMD_BOUNDARY = 24, /* ctrl[15:13] 001 - PRB not on qword boundary */
190 PORT_CERR_CMD_TGTABRT = 25, /* ctrl[15:13] 010 - target abort */
191 PORT_CERR_CMD_MSTABRT = 26, /* ctrl[15:13] 100 - master abort */
192 PORT_CERR_CMD_PCIPERR = 27, /* ctrl[15:13] 110 - PCI parity err while fetching PRB */
193 PORT_CERR_XFR_UNDEF = 32, /* PSD ecode 00 - undefined */
194 PORT_CERR_XFR_TGTABRT = 33, /* PSD ecode 01 - target abort */
195 PORT_CERR_XFR_MSTABRT = 34, /* PSD ecode 10 - master abort */
196 PORT_CERR_XFR_PCIPERR = 35, /* PSD ecode 11 - PCI prity err during transfer */
197 PORT_CERR_SENDSERVICE = 36, /* FIS received while sending service */
198
199 /* bits of PRB control field */
200 PRB_CTRL_PROTOCOL = (1 << 0), /* override def. ATA protocol */
201 PRB_CTRL_PACKET_READ = (1 << 4), /* PACKET cmd read */
202 PRB_CTRL_PACKET_WRITE = (1 << 5), /* PACKET cmd write */
203 PRB_CTRL_NIEN = (1 << 6), /* Mask completion irq */
204 PRB_CTRL_SRST = (1 << 7), /* Soft reset request (ign BSY?) */
205
206 /* PRB protocol field */
207 PRB_PROT_PACKET = (1 << 0),
208 PRB_PROT_TCQ = (1 << 1),
209 PRB_PROT_NCQ = (1 << 2),
210 PRB_PROT_READ = (1 << 3),
211 PRB_PROT_WRITE = (1 << 4),
212 PRB_PROT_TRANSPARENT = (1 << 5),
213
214 /*
215 * Other constants
216 */
217 SGE_TRM = (1 << 31), /* Last SGE in chain */
218 SGE_LNK = (1 << 30), /* linked list
219 Points to SGT, not SGE */
220 SGE_DRD = (1 << 29), /* discard data read (/dev/null)
221 data address ignored */
222
223 SIL24_MAX_CMDS = 31,
224
225 /* board id */
226 BID_SIL3124 = 0,
227 BID_SIL3132 = 1,
228 BID_SIL3131 = 2,
229
230 /* host flags */
231 SIL24_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
232 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
233 ATA_FLAG_NCQ | ATA_FLAG_SKIP_D2H_BSY,
234 SIL24_FLAG_PCIX_IRQ_WOC = (1 << 24), /* IRQ loss errata on PCI-X */
235
236 IRQ_STAT_4PORTS = 0xf,
237};
238
239struct sil24_ata_block {
240 struct sil24_prb prb;
241 struct sil24_sge sge[LIBATA_MAX_PRD];
242};
243
244struct sil24_atapi_block {
245 struct sil24_prb prb;
246 u8 cdb[16];
247 struct sil24_sge sge[LIBATA_MAX_PRD - 1];
248};
249
250union sil24_cmd_block {
251 struct sil24_ata_block ata;
252 struct sil24_atapi_block atapi;
253};
254
255static struct sil24_cerr_info {
256 unsigned int err_mask, action;
257 const char *desc;
258} sil24_cerr_db[] = {
259 [0] = { AC_ERR_DEV, ATA_EH_REVALIDATE,
260 "device error" },
261 [PORT_CERR_DEV] = { AC_ERR_DEV, ATA_EH_REVALIDATE,
262 "device error via D2H FIS" },
263 [PORT_CERR_SDB] = { AC_ERR_DEV, ATA_EH_REVALIDATE,
264 "device error via SDB FIS" },
265 [PORT_CERR_DATA] = { AC_ERR_ATA_BUS, ATA_EH_SOFTRESET,
266 "error in data FIS" },
267 [PORT_CERR_SEND] = { AC_ERR_ATA_BUS, ATA_EH_SOFTRESET,
268 "failed to transmit command FIS" },
269 [PORT_CERR_INCONSISTENT] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
270 "protocol mismatch" },
271 [PORT_CERR_DIRECTION] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
272 "data directon mismatch" },
273 [PORT_CERR_UNDERRUN] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
274 "ran out of SGEs while writing" },
275 [PORT_CERR_OVERRUN] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
276 "ran out of SGEs while reading" },
277 [PORT_CERR_PKT_PROT] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
278 "invalid data directon for ATAPI CDB" },
279 [PORT_CERR_SGT_BOUNDARY] = { AC_ERR_SYSTEM, ATA_EH_SOFTRESET,
280 "SGT no on qword boundary" },
281 [PORT_CERR_SGT_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
282 "PCI target abort while fetching SGT" },
283 [PORT_CERR_SGT_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
284 "PCI master abort while fetching SGT" },
285 [PORT_CERR_SGT_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
286 "PCI parity error while fetching SGT" },
287 [PORT_CERR_CMD_BOUNDARY] = { AC_ERR_SYSTEM, ATA_EH_SOFTRESET,
288 "PRB not on qword boundary" },
289 [PORT_CERR_CMD_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
290 "PCI target abort while fetching PRB" },
291 [PORT_CERR_CMD_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
292 "PCI master abort while fetching PRB" },
293 [PORT_CERR_CMD_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
294 "PCI parity error while fetching PRB" },
295 [PORT_CERR_XFR_UNDEF] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
296 "undefined error while transferring data" },
297 [PORT_CERR_XFR_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
298 "PCI target abort while transferring data" },
299 [PORT_CERR_XFR_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
300 "PCI master abort while transferring data" },
301 [PORT_CERR_XFR_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
302 "PCI parity error while transferring data" },
303 [PORT_CERR_SENDSERVICE] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
304 "FIS received while sending service FIS" },
305};
306
307/*
308 * ap->private_data
309 *
310 * The preview driver always returned 0 for status. We emulate it
311 * here from the previous interrupt.
312 */
313struct sil24_port_priv {
314 union sil24_cmd_block *cmd_block; /* 32 cmd blocks */
315 dma_addr_t cmd_block_dma; /* DMA base addr for them */
316 struct ata_taskfile tf; /* Cached taskfile registers */
317};
318
319/* ap->host->private_data */
320struct sil24_host_priv {
321 void __iomem *host_base; /* global controller control (128 bytes @BAR0) */
322 void __iomem *port_base; /* port registers (4 * 8192 bytes @BAR2) */
323};
324
325static void sil24_dev_config(struct ata_port *ap, struct ata_device *dev);
326static u8 sil24_check_status(struct ata_port *ap);
327static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg);
328static void sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val);
329static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
330static void sil24_qc_prep(struct ata_queued_cmd *qc);
331static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc);
332static void sil24_irq_clear(struct ata_port *ap);
333static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
334static void sil24_freeze(struct ata_port *ap);
335static void sil24_thaw(struct ata_port *ap);
336static void sil24_error_handler(struct ata_port *ap);
337static void sil24_post_internal_cmd(struct ata_queued_cmd *qc);
338static int sil24_port_start(struct ata_port *ap);
339static void sil24_port_stop(struct ata_port *ap);
340static void sil24_host_stop(struct ata_host *host);
341static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
342#ifdef CONFIG_PM
343static int sil24_pci_device_resume(struct pci_dev *pdev);
344#endif
345
346static const struct pci_device_id sil24_pci_tbl[] = {
347 { 0x1095, 0x3124, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3124 },
348 { 0x8086, 0x3124, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3124 },
349 { 0x1095, 0x3132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3132 },
350 { 0x1095, 0x3131, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3131 },
351 { 0x1095, 0x3531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3131 },
352 { } /* terminate list */
353};
354
355static struct pci_driver sil24_pci_driver = {
356 .name = DRV_NAME,
357 .id_table = sil24_pci_tbl,
358 .probe = sil24_init_one,
359 .remove = ata_pci_remove_one, /* safe? */
360#ifdef CONFIG_PM
361 .suspend = ata_pci_device_suspend,
362 .resume = sil24_pci_device_resume,
363#endif
364};
365
366static struct scsi_host_template sil24_sht = {
367 .module = THIS_MODULE,
368 .name = DRV_NAME,
369 .ioctl = ata_scsi_ioctl,
370 .queuecommand = ata_scsi_queuecmd,
371 .change_queue_depth = ata_scsi_change_queue_depth,
372 .can_queue = SIL24_MAX_CMDS,
373 .this_id = ATA_SHT_THIS_ID,
374 .sg_tablesize = LIBATA_MAX_PRD,
375 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
376 .emulated = ATA_SHT_EMULATED,
377 .use_clustering = ATA_SHT_USE_CLUSTERING,
378 .proc_name = DRV_NAME,
379 .dma_boundary = ATA_DMA_BOUNDARY,
380 .slave_configure = ata_scsi_slave_config,
381 .slave_destroy = ata_scsi_slave_destroy,
382 .bios_param = ata_std_bios_param,
383 .suspend = ata_scsi_device_suspend,
384 .resume = ata_scsi_device_resume,
385};
386
387static const struct ata_port_operations sil24_ops = {
388 .port_disable = ata_port_disable,
389
390 .dev_config = sil24_dev_config,
391
392 .check_status = sil24_check_status,
393 .check_altstatus = sil24_check_status,
394 .dev_select = ata_noop_dev_select,
395
396 .tf_read = sil24_tf_read,
397
398 .qc_prep = sil24_qc_prep,
399 .qc_issue = sil24_qc_issue,
400
401 .irq_handler = sil24_interrupt,
402 .irq_clear = sil24_irq_clear,
403
404 .scr_read = sil24_scr_read,
405 .scr_write = sil24_scr_write,
406
407 .freeze = sil24_freeze,
408 .thaw = sil24_thaw,
409 .error_handler = sil24_error_handler,
410 .post_internal_cmd = sil24_post_internal_cmd,
411
412 .port_start = sil24_port_start,
413 .port_stop = sil24_port_stop,
414 .host_stop = sil24_host_stop,
415};
416
417/*
418 * Use bits 30-31 of port_flags to encode available port numbers.
419 * Current maxium is 4.
420 */
421#define SIL24_NPORTS2FLAG(nports) ((((unsigned)(nports) - 1) & 0x3) << 30)
422#define SIL24_FLAG2NPORTS(flag) ((((flag) >> 30) & 0x3) + 1)
423
424static struct ata_port_info sil24_port_info[] = {
425 /* sil_3124 */
426 {
427 .sht = &sil24_sht,
428 .flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(4) |
429 SIL24_FLAG_PCIX_IRQ_WOC,
430 .pio_mask = 0x1f, /* pio0-4 */
431 .mwdma_mask = 0x07, /* mwdma0-2 */
432 .udma_mask = 0x3f, /* udma0-5 */
433 .port_ops = &sil24_ops,
434 },
435 /* sil_3132 */
436 {
437 .sht = &sil24_sht,
438 .flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(2),
439 .pio_mask = 0x1f, /* pio0-4 */
440 .mwdma_mask = 0x07, /* mwdma0-2 */
441 .udma_mask = 0x3f, /* udma0-5 */
442 .port_ops = &sil24_ops,
443 },
444 /* sil_3131/sil_3531 */
445 {
446 .sht = &sil24_sht,
447 .flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(1),
448 .pio_mask = 0x1f, /* pio0-4 */
449 .mwdma_mask = 0x07, /* mwdma0-2 */
450 .udma_mask = 0x3f, /* udma0-5 */
451 .port_ops = &sil24_ops,
452 },
453};
454
455static int sil24_tag(int tag)
456{
457 if (unlikely(ata_tag_internal(tag)))
458 return 0;
459 return tag;
460}
461
462static void sil24_dev_config(struct ata_port *ap, struct ata_device *dev)
463{
464 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
465
466 if (dev->cdb_len == 16)
467 writel(PORT_CS_CDB16, port + PORT_CTRL_STAT);
468 else
469 writel(PORT_CS_CDB16, port + PORT_CTRL_CLR);
470}
471
472static inline void sil24_update_tf(struct ata_port *ap)
473{
474 struct sil24_port_priv *pp = ap->private_data;
475 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
476 struct sil24_prb __iomem *prb = port;
477 u8 fis[6 * 4];
478
479 memcpy_fromio(fis, prb->fis, 6 * 4);
480 ata_tf_from_fis(fis, &pp->tf);
481}
482
483static u8 sil24_check_status(struct ata_port *ap)
484{
485 struct sil24_port_priv *pp = ap->private_data;
486 return pp->tf.command;
487}
488
489static int sil24_scr_map[] = {
490 [SCR_CONTROL] = 0,
491 [SCR_STATUS] = 1,
492 [SCR_ERROR] = 2,
493 [SCR_ACTIVE] = 3,
494};
495
496static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg)
497{
498 void __iomem *scr_addr = (void __iomem *)ap->ioaddr.scr_addr;
499 if (sc_reg < ARRAY_SIZE(sil24_scr_map)) {
500 void __iomem *addr;
501 addr = scr_addr + sil24_scr_map[sc_reg] * 4;
502 return readl(scr_addr + sil24_scr_map[sc_reg] * 4);
503 }
504 return 0xffffffffU;
505}
506
507static void sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val)
508{
509 void __iomem *scr_addr = (void __iomem *)ap->ioaddr.scr_addr;
510 if (sc_reg < ARRAY_SIZE(sil24_scr_map)) {
511 void __iomem *addr;
512 addr = scr_addr + sil24_scr_map[sc_reg] * 4;
513 writel(val, scr_addr + sil24_scr_map[sc_reg] * 4);
514 }
515}
516
517static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
518{
519 struct sil24_port_priv *pp = ap->private_data;
520 *tf = pp->tf;
521}
522
523static int sil24_init_port(struct ata_port *ap)
524{
525 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
526 u32 tmp;
527
528 writel(PORT_CS_INIT, port + PORT_CTRL_STAT);
529 ata_wait_register(port + PORT_CTRL_STAT,
530 PORT_CS_INIT, PORT_CS_INIT, 10, 100);
531 tmp = ata_wait_register(port + PORT_CTRL_STAT,
532 PORT_CS_RDY, 0, 10, 100);
533
534 if ((tmp & (PORT_CS_INIT | PORT_CS_RDY)) != PORT_CS_RDY)
535 return -EIO;
536 return 0;
537}
538
539static int sil24_softreset(struct ata_port *ap, unsigned int *class)
540{
541 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
542 struct sil24_port_priv *pp = ap->private_data;
543 struct sil24_prb *prb = &pp->cmd_block[0].ata.prb;
544 dma_addr_t paddr = pp->cmd_block_dma;
545 u32 mask, irq_stat;
546 const char *reason;
547
548 DPRINTK("ENTER\n");
549
550 if (ata_port_offline(ap)) {
551 DPRINTK("PHY reports no device\n");
552 *class = ATA_DEV_NONE;
553 goto out;
554 }
555
556 /* put the port into known state */
557 if (sil24_init_port(ap)) {
558 reason ="port not ready";
559 goto err;
560 }
561
562 /* do SRST */
563 prb->ctrl = cpu_to_le16(PRB_CTRL_SRST);
564 prb->fis[1] = 0; /* no PM yet */
565
566 writel((u32)paddr, port + PORT_CMD_ACTIVATE);
567 writel((u64)paddr >> 32, port + PORT_CMD_ACTIVATE + 4);
568
569 mask = (PORT_IRQ_COMPLETE | PORT_IRQ_ERROR) << PORT_IRQ_RAW_SHIFT;
570 irq_stat = ata_wait_register(port + PORT_IRQ_STAT, mask, 0x0,
571 100, ATA_TMOUT_BOOT / HZ * 1000);
572
573 writel(irq_stat, port + PORT_IRQ_STAT); /* clear IRQs */
574 irq_stat >>= PORT_IRQ_RAW_SHIFT;
575
576 if (!(irq_stat & PORT_IRQ_COMPLETE)) {
577 if (irq_stat & PORT_IRQ_ERROR)
578 reason = "SRST command error";
579 else
580 reason = "timeout";
581 goto err;
582 }
583
584 sil24_update_tf(ap);
585 *class = ata_dev_classify(&pp->tf);
586
587 if (*class == ATA_DEV_UNKNOWN)
588 *class = ATA_DEV_NONE;
589
590 out:
591 DPRINTK("EXIT, class=%u\n", *class);
592 return 0;
593
594 err:
595 ata_port_printk(ap, KERN_ERR, "softreset failed (%s)\n", reason);
596 return -EIO;
597}
598
599static int sil24_hardreset(struct ata_port *ap, unsigned int *class)
600{
601 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
602 const char *reason;
603 int tout_msec, rc;
604 u32 tmp;
605
606 /* sil24 does the right thing(tm) without any protection */
607 sata_set_spd(ap);
608
609 tout_msec = 100;
610 if (ata_port_online(ap))
611 tout_msec = 5000;
612
613 writel(PORT_CS_DEV_RST, port + PORT_CTRL_STAT);
614 tmp = ata_wait_register(port + PORT_CTRL_STAT,
615 PORT_CS_DEV_RST, PORT_CS_DEV_RST, 10, tout_msec);
616
617 /* SStatus oscillates between zero and valid status after
618 * DEV_RST, debounce it.
619 */
620 rc = sata_phy_debounce(ap, sata_deb_timing_long);
621 if (rc) {
622 reason = "PHY debouncing failed";
623 goto err;
624 }
625
626 if (tmp & PORT_CS_DEV_RST) {
627 if (ata_port_offline(ap))
628 return 0;
629 reason = "link not ready";
630 goto err;
631 }
632
633 /* Sil24 doesn't store signature FIS after hardreset, so we
634 * can't wait for BSY to clear. Some devices take a long time
635 * to get ready and those devices will choke if we don't wait
636 * for BSY clearance here. Tell libata to perform follow-up
637 * softreset.
638 */
639 return -EAGAIN;
640
641 err:
642 ata_port_printk(ap, KERN_ERR, "hardreset failed (%s)\n", reason);
643 return -EIO;
644}
645
646static inline void sil24_fill_sg(struct ata_queued_cmd *qc,
647 struct sil24_sge *sge)
648{
649 struct scatterlist *sg;
650 unsigned int idx = 0;
651
652 ata_for_each_sg(sg, qc) {
653 sge->addr = cpu_to_le64(sg_dma_address(sg));
654 sge->cnt = cpu_to_le32(sg_dma_len(sg));
655 if (ata_sg_is_last(sg, qc))
656 sge->flags = cpu_to_le32(SGE_TRM);
657 else
658 sge->flags = 0;
659
660 sge++;
661 idx++;
662 }
663}
664
665static void sil24_qc_prep(struct ata_queued_cmd *qc)
666{
667 struct ata_port *ap = qc->ap;
668 struct sil24_port_priv *pp = ap->private_data;
669 union sil24_cmd_block *cb;
670 struct sil24_prb *prb;
671 struct sil24_sge *sge;
672 u16 ctrl = 0;
673
674 cb = &pp->cmd_block[sil24_tag(qc->tag)];
675
676 switch (qc->tf.protocol) {
677 case ATA_PROT_PIO:
678 case ATA_PROT_DMA:
679 case ATA_PROT_NCQ:
680 case ATA_PROT_NODATA:
681 prb = &cb->ata.prb;
682 sge = cb->ata.sge;
683 break;
684
685 case ATA_PROT_ATAPI:
686 case ATA_PROT_ATAPI_DMA:
687 case ATA_PROT_ATAPI_NODATA:
688 prb = &cb->atapi.prb;
689 sge = cb->atapi.sge;
690 memset(cb->atapi.cdb, 0, 32);
691 memcpy(cb->atapi.cdb, qc->cdb, qc->dev->cdb_len);
692
693 if (qc->tf.protocol != ATA_PROT_ATAPI_NODATA) {
694 if (qc->tf.flags & ATA_TFLAG_WRITE)
695 ctrl = PRB_CTRL_PACKET_WRITE;
696 else
697 ctrl = PRB_CTRL_PACKET_READ;
698 }
699 break;
700
701 default:
702 prb = NULL; /* shut up, gcc */
703 sge = NULL;
704 BUG();
705 }
706
707 prb->ctrl = cpu_to_le16(ctrl);
708 ata_tf_to_fis(&qc->tf, prb->fis, 0);
709
710 if (qc->flags & ATA_QCFLAG_DMAMAP)
711 sil24_fill_sg(qc, sge);
712}
713
714static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc)
715{
716 struct ata_port *ap = qc->ap;
717 struct sil24_port_priv *pp = ap->private_data;
718 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
719 unsigned int tag = sil24_tag(qc->tag);
720 dma_addr_t paddr;
721 void __iomem *activate;
722
723 paddr = pp->cmd_block_dma + tag * sizeof(*pp->cmd_block);
724 activate = port + PORT_CMD_ACTIVATE + tag * 8;
725
726 writel((u32)paddr, activate);
727 writel((u64)paddr >> 32, activate + 4);
728
729 return 0;
730}
731
732static void sil24_irq_clear(struct ata_port *ap)
733{
734 /* unused */
735}
736
737static void sil24_freeze(struct ata_port *ap)
738{
739 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
740
741 /* Port-wide IRQ mask in HOST_CTRL doesn't really work, clear
742 * PORT_IRQ_ENABLE instead.
743 */
744 writel(0xffff, port + PORT_IRQ_ENABLE_CLR);
745}
746
747static void sil24_thaw(struct ata_port *ap)
748{
749 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
750 u32 tmp;
751
752 /* clear IRQ */
753 tmp = readl(port + PORT_IRQ_STAT);
754 writel(tmp, port + PORT_IRQ_STAT);
755
756 /* turn IRQ back on */
757 writel(DEF_PORT_IRQ, port + PORT_IRQ_ENABLE_SET);
758}
759
760static void sil24_error_intr(struct ata_port *ap)
761{
762 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
763 struct ata_eh_info *ehi = &ap->eh_info;
764 int freeze = 0;
765 u32 irq_stat;
766
767 /* on error, we need to clear IRQ explicitly */
768 irq_stat = readl(port + PORT_IRQ_STAT);
769 writel(irq_stat, port + PORT_IRQ_STAT);
770
771 /* first, analyze and record host port events */
772 ata_ehi_clear_desc(ehi);
773
774 ata_ehi_push_desc(ehi, "irq_stat 0x%08x", irq_stat);
775
776 if (irq_stat & (PORT_IRQ_PHYRDY_CHG | PORT_IRQ_DEV_XCHG)) {
777 ata_ehi_hotplugged(ehi);
778 ata_ehi_push_desc(ehi, ", %s",
779 irq_stat & PORT_IRQ_PHYRDY_CHG ?
780 "PHY RDY changed" : "device exchanged");
781 freeze = 1;
782 }
783
784 if (irq_stat & PORT_IRQ_UNK_FIS) {
785 ehi->err_mask |= AC_ERR_HSM;
786 ehi->action |= ATA_EH_SOFTRESET;
787 ata_ehi_push_desc(ehi , ", unknown FIS");
788 freeze = 1;
789 }
790
791 /* deal with command error */
792 if (irq_stat & PORT_IRQ_ERROR) {
793 struct sil24_cerr_info *ci = NULL;
794 unsigned int err_mask = 0, action = 0;
795 struct ata_queued_cmd *qc;
796 u32 cerr;
797
798 /* analyze CMD_ERR */
799 cerr = readl(port + PORT_CMD_ERR);
800 if (cerr < ARRAY_SIZE(sil24_cerr_db))
801 ci = &sil24_cerr_db[cerr];
802
803 if (ci && ci->desc) {
804 err_mask |= ci->err_mask;
805 action |= ci->action;
806 ata_ehi_push_desc(ehi, ", %s", ci->desc);
807 } else {
808 err_mask |= AC_ERR_OTHER;
809 action |= ATA_EH_SOFTRESET;
810 ata_ehi_push_desc(ehi, ", unknown command error %d",
811 cerr);
812 }
813
814 /* record error info */
815 qc = ata_qc_from_tag(ap, ap->active_tag);
816 if (qc) {
817 sil24_update_tf(ap);
818 qc->err_mask |= err_mask;
819 } else
820 ehi->err_mask |= err_mask;
821
822 ehi->action |= action;
823 }
824
825 /* freeze or abort */
826 if (freeze)
827 ata_port_freeze(ap);
828 else
829 ata_port_abort(ap);
830}
831
832static void sil24_finish_qc(struct ata_queued_cmd *qc)
833{
834 if (qc->flags & ATA_QCFLAG_RESULT_TF)
835 sil24_update_tf(qc->ap);
836}
837
838static inline void sil24_host_intr(struct ata_port *ap)
839{
840 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
841 u32 slot_stat, qc_active;
842 int rc;
843
844 slot_stat = readl(port + PORT_SLOT_STAT);
845
846 if (unlikely(slot_stat & HOST_SSTAT_ATTN)) {
847 sil24_error_intr(ap);
848 return;
849 }
850
851 if (ap->flags & SIL24_FLAG_PCIX_IRQ_WOC)
852 writel(PORT_IRQ_COMPLETE, port + PORT_IRQ_STAT);
853
854 qc_active = slot_stat & ~HOST_SSTAT_ATTN;
855 rc = ata_qc_complete_multiple(ap, qc_active, sil24_finish_qc);
856 if (rc > 0)
857 return;
858 if (rc < 0) {
859 struct ata_eh_info *ehi = &ap->eh_info;
860 ehi->err_mask |= AC_ERR_HSM;
861 ehi->action |= ATA_EH_SOFTRESET;
862 ata_port_freeze(ap);
863 return;
864 }
865
866 if (ata_ratelimit())
867 ata_port_printk(ap, KERN_INFO, "spurious interrupt "
868 "(slot_stat 0x%x active_tag %d sactive 0x%x)\n",
869 slot_stat, ap->active_tag, ap->sactive);
870}
871
872static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
873{
874 struct ata_host *host = dev_instance;
875 struct sil24_host_priv *hpriv = host->private_data;
876 unsigned handled = 0;
877 u32 status;
878 int i;
879
880 status = readl(hpriv->host_base + HOST_IRQ_STAT);
881
882 if (status == 0xffffffff) {
883 printk(KERN_ERR DRV_NAME ": IRQ status == 0xffffffff, "
884 "PCI fault or device removal?\n");
885 goto out;
886 }
887
888 if (!(status & IRQ_STAT_4PORTS))
889 goto out;
890
891 spin_lock(&host->lock);
892
893 for (i = 0; i < host->n_ports; i++)
894 if (status & (1 << i)) {
895 struct ata_port *ap = host->ports[i];
896 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
897 sil24_host_intr(host->ports[i]);
898 handled++;
899 } else
900 printk(KERN_ERR DRV_NAME
901 ": interrupt from disabled port %d\n", i);
902 }
903
904 spin_unlock(&host->lock);
905 out:
906 return IRQ_RETVAL(handled);
907}
908
909static void sil24_error_handler(struct ata_port *ap)
910{
911 struct ata_eh_context *ehc = &ap->eh_context;
912
913 if (sil24_init_port(ap)) {
914 ata_eh_freeze_port(ap);
915 ehc->i.action |= ATA_EH_HARDRESET;
916 }
917
918 /* perform recovery */
919 ata_do_eh(ap, ata_std_prereset, sil24_softreset, sil24_hardreset,
920 ata_std_postreset);
921}
922
923static void sil24_post_internal_cmd(struct ata_queued_cmd *qc)
924{
925 struct ata_port *ap = qc->ap;
926
927 if (qc->flags & ATA_QCFLAG_FAILED)
928 qc->err_mask |= AC_ERR_OTHER;
929
930 /* make DMA engine forget about the failed command */
931 if (qc->err_mask)
932 sil24_init_port(ap);
933}
934
935static inline void sil24_cblk_free(struct sil24_port_priv *pp, struct device *dev)
936{
937 const size_t cb_size = sizeof(*pp->cmd_block) * SIL24_MAX_CMDS;
938
939 dma_free_coherent(dev, cb_size, pp->cmd_block, pp->cmd_block_dma);
940}
941
942static int sil24_port_start(struct ata_port *ap)
943{
944 struct device *dev = ap->host->dev;
945 struct sil24_port_priv *pp;
946 union sil24_cmd_block *cb;
947 size_t cb_size = sizeof(*cb) * SIL24_MAX_CMDS;
948 dma_addr_t cb_dma;
949 int rc = -ENOMEM;
950
951 pp = kzalloc(sizeof(*pp), GFP_KERNEL);
952 if (!pp)
953 goto err_out;
954
955 pp->tf.command = ATA_DRDY;
956
957 cb = dma_alloc_coherent(dev, cb_size, &cb_dma, GFP_KERNEL);
958 if (!cb)
959 goto err_out_pp;
960 memset(cb, 0, cb_size);
961
962 rc = ata_pad_alloc(ap, dev);
963 if (rc)
964 goto err_out_pad;
965
966 pp->cmd_block = cb;
967 pp->cmd_block_dma = cb_dma;
968
969 ap->private_data = pp;
970
971 return 0;
972
973err_out_pad:
974 sil24_cblk_free(pp, dev);
975err_out_pp:
976 kfree(pp);
977err_out:
978 return rc;
979}
980
981static void sil24_port_stop(struct ata_port *ap)
982{
983 struct device *dev = ap->host->dev;
984 struct sil24_port_priv *pp = ap->private_data;
985
986 sil24_cblk_free(pp, dev);
987 ata_pad_free(ap, dev);
988 kfree(pp);
989}
990
991static void sil24_host_stop(struct ata_host *host)
992{
993 struct sil24_host_priv *hpriv = host->private_data;
994 struct pci_dev *pdev = to_pci_dev(host->dev);
995
996 pci_iounmap(pdev, hpriv->host_base);
997 pci_iounmap(pdev, hpriv->port_base);
998 kfree(hpriv);
999}
1000
1001static void sil24_init_controller(struct pci_dev *pdev, int n_ports,
1002 unsigned long port_flags,
1003 void __iomem *host_base,
1004 void __iomem *port_base)
1005{
1006 u32 tmp;
1007 int i;
1008
1009 /* GPIO off */
1010 writel(0, host_base + HOST_FLASH_CMD);
1011
1012 /* clear global reset & mask interrupts during initialization */
1013 writel(0, host_base + HOST_CTRL);
1014
1015 /* init ports */
1016 for (i = 0; i < n_ports; i++) {
1017 void __iomem *port = port_base + i * PORT_REGS_SIZE;
1018
1019 /* Initial PHY setting */
1020 writel(0x20c, port + PORT_PHY_CFG);
1021
1022 /* Clear port RST */
1023 tmp = readl(port + PORT_CTRL_STAT);
1024 if (tmp & PORT_CS_PORT_RST) {
1025 writel(PORT_CS_PORT_RST, port + PORT_CTRL_CLR);
1026 tmp = ata_wait_register(port + PORT_CTRL_STAT,
1027 PORT_CS_PORT_RST,
1028 PORT_CS_PORT_RST, 10, 100);
1029 if (tmp & PORT_CS_PORT_RST)
1030 dev_printk(KERN_ERR, &pdev->dev,
1031 "failed to clear port RST\n");
1032 }
1033
1034 /* Configure IRQ WoC */
1035 if (port_flags & SIL24_FLAG_PCIX_IRQ_WOC)
1036 writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_STAT);
1037 else
1038 writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_CLR);
1039
1040 /* Zero error counters. */
1041 writel(0x8000, port + PORT_DECODE_ERR_THRESH);
1042 writel(0x8000, port + PORT_CRC_ERR_THRESH);
1043 writel(0x8000, port + PORT_HSHK_ERR_THRESH);
1044 writel(0x0000, port + PORT_DECODE_ERR_CNT);
1045 writel(0x0000, port + PORT_CRC_ERR_CNT);
1046 writel(0x0000, port + PORT_HSHK_ERR_CNT);
1047
1048 /* Always use 64bit activation */
1049 writel(PORT_CS_32BIT_ACTV, port + PORT_CTRL_CLR);
1050
1051 /* Clear port multiplier enable and resume bits */
1052 writel(PORT_CS_PM_EN | PORT_CS_RESUME, port + PORT_CTRL_CLR);
1053 }
1054
1055 /* Turn on interrupts */
1056 writel(IRQ_STAT_4PORTS, host_base + HOST_CTRL);
1057}
1058
1059static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1060{
1061 static int printed_version = 0;
1062 unsigned int board_id = (unsigned int)ent->driver_data;
1063 struct ata_port_info *pinfo = &sil24_port_info[board_id];
1064 struct ata_probe_ent *probe_ent = NULL;
1065 struct sil24_host_priv *hpriv = NULL;
1066 void __iomem *host_base = NULL;
1067 void __iomem *port_base = NULL;
1068 int i, rc;
1069 u32 tmp;
1070
1071 if (!printed_version++)
1072 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1073
1074 rc = pci_enable_device(pdev);
1075 if (rc)
1076 return rc;
1077
1078 rc = pci_request_regions(pdev, DRV_NAME);
1079 if (rc)
1080 goto out_disable;
1081
1082 rc = -ENOMEM;
1083 /* map mmio registers */
1084 host_base = pci_iomap(pdev, 0, 0);
1085 if (!host_base)
1086 goto out_free;
1087 port_base = pci_iomap(pdev, 2, 0);
1088 if (!port_base)
1089 goto out_free;
1090
1091 /* allocate & init probe_ent and hpriv */
1092 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
1093 if (!probe_ent)
1094 goto out_free;
1095
1096 hpriv = kzalloc(sizeof(*hpriv), GFP_KERNEL);
1097 if (!hpriv)
1098 goto out_free;
1099
1100 probe_ent->dev = pci_dev_to_dev(pdev);
1101 INIT_LIST_HEAD(&probe_ent->node);
1102
1103 probe_ent->sht = pinfo->sht;
1104 probe_ent->port_flags = pinfo->flags;
1105 probe_ent->pio_mask = pinfo->pio_mask;
1106 probe_ent->mwdma_mask = pinfo->mwdma_mask;
1107 probe_ent->udma_mask = pinfo->udma_mask;
1108 probe_ent->port_ops = pinfo->port_ops;
1109 probe_ent->n_ports = SIL24_FLAG2NPORTS(pinfo->flags);
1110
1111 probe_ent->irq = pdev->irq;
1112 probe_ent->irq_flags = IRQF_SHARED;
1113 probe_ent->private_data = hpriv;
1114
1115 hpriv->host_base = host_base;
1116 hpriv->port_base = port_base;
1117
1118 /*
1119 * Configure the device
1120 */
1121 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
1122 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
1123 if (rc) {
1124 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1125 if (rc) {
1126 dev_printk(KERN_ERR, &pdev->dev,
1127 "64-bit DMA enable failed\n");
1128 goto out_free;
1129 }
1130 }
1131 } else {
1132 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1133 if (rc) {
1134 dev_printk(KERN_ERR, &pdev->dev,
1135 "32-bit DMA enable failed\n");
1136 goto out_free;
1137 }
1138 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1139 if (rc) {
1140 dev_printk(KERN_ERR, &pdev->dev,
1141 "32-bit consistent DMA enable failed\n");
1142 goto out_free;
1143 }
1144 }
1145
1146 /* Apply workaround for completion IRQ loss on PCI-X errata */
1147 if (probe_ent->port_flags & SIL24_FLAG_PCIX_IRQ_WOC) {
1148 tmp = readl(host_base + HOST_CTRL);
1149 if (tmp & (HOST_CTRL_TRDY | HOST_CTRL_STOP | HOST_CTRL_DEVSEL))
1150 dev_printk(KERN_INFO, &pdev->dev,
1151 "Applying completion IRQ loss on PCI-X "
1152 "errata fix\n");
1153 else
1154 probe_ent->port_flags &= ~SIL24_FLAG_PCIX_IRQ_WOC;
1155 }
1156
1157 for (i = 0; i < probe_ent->n_ports; i++) {
1158 unsigned long portu =
1159 (unsigned long)port_base + i * PORT_REGS_SIZE;
1160
1161 probe_ent->port[i].cmd_addr = portu;
1162 probe_ent->port[i].scr_addr = portu + PORT_SCONTROL;
1163
1164 ata_std_ports(&probe_ent->port[i]);
1165 }
1166
1167 sil24_init_controller(pdev, probe_ent->n_ports, probe_ent->port_flags,
1168 host_base, port_base);
1169
1170 pci_set_master(pdev);
1171
1172 /* FIXME: check ata_device_add return value */
1173 ata_device_add(probe_ent);
1174
1175 kfree(probe_ent);
1176 return 0;
1177
1178 out_free:
1179 if (host_base)
1180 pci_iounmap(pdev, host_base);
1181 if (port_base)
1182 pci_iounmap(pdev, port_base);
1183 kfree(probe_ent);
1184 kfree(hpriv);
1185 pci_release_regions(pdev);
1186 out_disable:
1187 pci_disable_device(pdev);
1188 return rc;
1189}
1190
1191#ifdef CONFIG_PM
1192static int sil24_pci_device_resume(struct pci_dev *pdev)
1193{
1194 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1195 struct sil24_host_priv *hpriv = host->private_data;
1196
1197 ata_pci_device_do_resume(pdev);
1198
1199 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND)
1200 writel(HOST_CTRL_GLOBAL_RST, hpriv->host_base + HOST_CTRL);
1201
1202 sil24_init_controller(pdev, host->n_ports, host->ports[0]->flags,
1203 hpriv->host_base, hpriv->port_base);
1204
1205 ata_host_resume(host);
1206
1207 return 0;
1208}
1209#endif
1210
1211static int __init sil24_init(void)
1212{
1213 return pci_register_driver(&sil24_pci_driver);
1214}
1215
1216static void __exit sil24_exit(void)
1217{
1218 pci_unregister_driver(&sil24_pci_driver);
1219}
1220
1221MODULE_AUTHOR("Tejun Heo");
1222MODULE_DESCRIPTION("Silicon Image 3124/3132 SATA low-level driver");
1223MODULE_LICENSE("GPL");
1224MODULE_DEVICE_TABLE(pci, sil24_pci_tbl);
1225
1226module_init(sil24_init);
1227module_exit(sil24_exit);
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
new file mode 100644
index 000000000000..9b17375d8056
--- /dev/null
+++ b/drivers/ata/sata_sis.c
@@ -0,0 +1,347 @@
1/*
2 * sata_sis.c - Silicon Integrated Systems SATA
3 *
4 * Maintained by: Uwe Koziolek
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2004 Uwe Koziolek
9 *
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 *
26 * libata documentation is available via 'make {ps|pdf}docs',
27 * as Documentation/DocBook/libata.*
28 *
29 * Hardware documentation available under NDA.
30 *
31 */
32
33#include <linux/kernel.h>
34#include <linux/module.h>
35#include <linux/pci.h>
36#include <linux/init.h>
37#include <linux/blkdev.h>
38#include <linux/delay.h>
39#include <linux/interrupt.h>
40#include <linux/device.h>
41#include <scsi/scsi_host.h>
42#include <linux/libata.h>
43
44#define DRV_NAME "sata_sis"
45#define DRV_VERSION "0.6"
46
47enum {
48 sis_180 = 0,
49 SIS_SCR_PCI_BAR = 5,
50
51 /* PCI configuration registers */
52 SIS_GENCTL = 0x54, /* IDE General Control register */
53 SIS_SCR_BASE = 0xc0, /* sata0 phy SCR registers */
54 SIS180_SATA1_OFS = 0x10, /* offset from sata0->sata1 phy regs */
55 SIS182_SATA1_OFS = 0x20, /* offset from sata0->sata1 phy regs */
56 SIS_PMR = 0x90, /* port mapping register */
57 SIS_PMR_COMBINED = 0x30,
58
59 /* random bits */
60 SIS_FLAG_CFGSCR = (1 << 30), /* host flag: SCRs via PCI cfg */
61
62 GENCTL_IOMAPPED_SCR = (1 << 26), /* if set, SCRs are in IO space */
63};
64
65static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
66static u32 sis_scr_read (struct ata_port *ap, unsigned int sc_reg);
67static void sis_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
68
69static const struct pci_device_id sis_pci_tbl[] = {
70 { PCI_VENDOR_ID_SI, 0x180, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sis_180 },
71 { PCI_VENDOR_ID_SI, 0x181, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sis_180 },
72 { PCI_VENDOR_ID_SI, 0x182, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sis_180 },
73 { } /* terminate list */
74};
75
76
77static struct pci_driver sis_pci_driver = {
78 .name = DRV_NAME,
79 .id_table = sis_pci_tbl,
80 .probe = sis_init_one,
81 .remove = ata_pci_remove_one,
82};
83
84static struct scsi_host_template sis_sht = {
85 .module = THIS_MODULE,
86 .name = DRV_NAME,
87 .ioctl = ata_scsi_ioctl,
88 .queuecommand = ata_scsi_queuecmd,
89 .can_queue = ATA_DEF_QUEUE,
90 .this_id = ATA_SHT_THIS_ID,
91 .sg_tablesize = ATA_MAX_PRD,
92 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
93 .emulated = ATA_SHT_EMULATED,
94 .use_clustering = ATA_SHT_USE_CLUSTERING,
95 .proc_name = DRV_NAME,
96 .dma_boundary = ATA_DMA_BOUNDARY,
97 .slave_configure = ata_scsi_slave_config,
98 .slave_destroy = ata_scsi_slave_destroy,
99 .bios_param = ata_std_bios_param,
100};
101
102static const struct ata_port_operations sis_ops = {
103 .port_disable = ata_port_disable,
104 .tf_load = ata_tf_load,
105 .tf_read = ata_tf_read,
106 .check_status = ata_check_status,
107 .exec_command = ata_exec_command,
108 .dev_select = ata_std_dev_select,
109 .bmdma_setup = ata_bmdma_setup,
110 .bmdma_start = ata_bmdma_start,
111 .bmdma_stop = ata_bmdma_stop,
112 .bmdma_status = ata_bmdma_status,
113 .qc_prep = ata_qc_prep,
114 .qc_issue = ata_qc_issue_prot,
115 .data_xfer = ata_pio_data_xfer,
116 .freeze = ata_bmdma_freeze,
117 .thaw = ata_bmdma_thaw,
118 .error_handler = ata_bmdma_error_handler,
119 .post_internal_cmd = ata_bmdma_post_internal_cmd,
120 .irq_handler = ata_interrupt,
121 .irq_clear = ata_bmdma_irq_clear,
122 .scr_read = sis_scr_read,
123 .scr_write = sis_scr_write,
124 .port_start = ata_port_start,
125 .port_stop = ata_port_stop,
126 .host_stop = ata_host_stop,
127};
128
129static struct ata_port_info sis_port_info = {
130 .sht = &sis_sht,
131 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
132 .pio_mask = 0x1f,
133 .mwdma_mask = 0x7,
134 .udma_mask = 0x7f,
135 .port_ops = &sis_ops,
136};
137
138
139MODULE_AUTHOR("Uwe Koziolek");
140MODULE_DESCRIPTION("low-level driver for Silicon Integratad Systems SATA controller");
141MODULE_LICENSE("GPL");
142MODULE_DEVICE_TABLE(pci, sis_pci_tbl);
143MODULE_VERSION(DRV_VERSION);
144
145static unsigned int get_scr_cfg_addr(unsigned int port_no, unsigned int sc_reg, int device)
146{
147 unsigned int addr = SIS_SCR_BASE + (4 * sc_reg);
148
149 if (port_no) {
150 if (device == 0x182)
151 addr += SIS182_SATA1_OFS;
152 else
153 addr += SIS180_SATA1_OFS;
154 }
155
156 return addr;
157}
158
159static u32 sis_scr_cfg_read (struct ata_port *ap, unsigned int sc_reg)
160{
161 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
162 unsigned int cfg_addr = get_scr_cfg_addr(ap->port_no, sc_reg, pdev->device);
163 u32 val, val2 = 0;
164 u8 pmr;
165
166 if (sc_reg == SCR_ERROR) /* doesn't exist in PCI cfg space */
167 return 0xffffffff;
168
169 pci_read_config_byte(pdev, SIS_PMR, &pmr);
170
171 pci_read_config_dword(pdev, cfg_addr, &val);
172
173 if ((pdev->device == 0x182) || (pmr & SIS_PMR_COMBINED))
174 pci_read_config_dword(pdev, cfg_addr+0x10, &val2);
175
176 return val|val2;
177}
178
179static void sis_scr_cfg_write (struct ata_port *ap, unsigned int scr, u32 val)
180{
181 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
182 unsigned int cfg_addr = get_scr_cfg_addr(ap->port_no, scr, pdev->device);
183 u8 pmr;
184
185 if (scr == SCR_ERROR) /* doesn't exist in PCI cfg space */
186 return;
187
188 pci_read_config_byte(pdev, SIS_PMR, &pmr);
189
190 pci_write_config_dword(pdev, cfg_addr, val);
191
192 if ((pdev->device == 0x182) || (pmr & SIS_PMR_COMBINED))
193 pci_write_config_dword(pdev, cfg_addr+0x10, val);
194}
195
196static u32 sis_scr_read (struct ata_port *ap, unsigned int sc_reg)
197{
198 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
199 u32 val, val2 = 0;
200 u8 pmr;
201
202 if (sc_reg > SCR_CONTROL)
203 return 0xffffffffU;
204
205 if (ap->flags & SIS_FLAG_CFGSCR)
206 return sis_scr_cfg_read(ap, sc_reg);
207
208 pci_read_config_byte(pdev, SIS_PMR, &pmr);
209
210 val = inl(ap->ioaddr.scr_addr + (sc_reg * 4));
211
212 if ((pdev->device == 0x182) || (pmr & SIS_PMR_COMBINED))
213 val2 = inl(ap->ioaddr.scr_addr + (sc_reg * 4) + 0x10);
214
215 return val | val2;
216}
217
218static void sis_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
219{
220 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
221 u8 pmr;
222
223 if (sc_reg > SCR_CONTROL)
224 return;
225
226 pci_read_config_byte(pdev, SIS_PMR, &pmr);
227
228 if (ap->flags & SIS_FLAG_CFGSCR)
229 sis_scr_cfg_write(ap, sc_reg, val);
230 else {
231 outl(val, ap->ioaddr.scr_addr + (sc_reg * 4));
232 if ((pdev->device == 0x182) || (pmr & SIS_PMR_COMBINED))
233 outl(val, ap->ioaddr.scr_addr + (sc_reg * 4)+0x10);
234 }
235}
236
237static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
238{
239 static int printed_version;
240 struct ata_probe_ent *probe_ent = NULL;
241 int rc;
242 u32 genctl;
243 struct ata_port_info *ppi;
244 int pci_dev_busy = 0;
245 u8 pmr;
246 u8 port2_start;
247
248 if (!printed_version++)
249 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
250
251 rc = pci_enable_device(pdev);
252 if (rc)
253 return rc;
254
255 rc = pci_request_regions(pdev, DRV_NAME);
256 if (rc) {
257 pci_dev_busy = 1;
258 goto err_out;
259 }
260
261 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
262 if (rc)
263 goto err_out_regions;
264 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
265 if (rc)
266 goto err_out_regions;
267
268 ppi = &sis_port_info;
269 probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
270 if (!probe_ent) {
271 rc = -ENOMEM;
272 goto err_out_regions;
273 }
274
275 /* check and see if the SCRs are in IO space or PCI cfg space */
276 pci_read_config_dword(pdev, SIS_GENCTL, &genctl);
277 if ((genctl & GENCTL_IOMAPPED_SCR) == 0)
278 probe_ent->port_flags |= SIS_FLAG_CFGSCR;
279
280 /* if hardware thinks SCRs are in IO space, but there are
281 * no IO resources assigned, change to PCI cfg space.
282 */
283 if ((!(probe_ent->port_flags & SIS_FLAG_CFGSCR)) &&
284 ((pci_resource_start(pdev, SIS_SCR_PCI_BAR) == 0) ||
285 (pci_resource_len(pdev, SIS_SCR_PCI_BAR) < 128))) {
286 genctl &= ~GENCTL_IOMAPPED_SCR;
287 pci_write_config_dword(pdev, SIS_GENCTL, genctl);
288 probe_ent->port_flags |= SIS_FLAG_CFGSCR;
289 }
290
291 pci_read_config_byte(pdev, SIS_PMR, &pmr);
292 if (ent->device != 0x182) {
293 if ((pmr & SIS_PMR_COMBINED) == 0) {
294 dev_printk(KERN_INFO, &pdev->dev,
295 "Detected SiS 180/181 chipset in SATA mode\n");
296 port2_start = 64;
297 }
298 else {
299 dev_printk(KERN_INFO, &pdev->dev,
300 "Detected SiS 180/181 chipset in combined mode\n");
301 port2_start=0;
302 }
303 }
304 else {
305 dev_printk(KERN_INFO, &pdev->dev, "Detected SiS 182 chipset\n");
306 port2_start = 0x20;
307 }
308
309 if (!(probe_ent->port_flags & SIS_FLAG_CFGSCR)) {
310 probe_ent->port[0].scr_addr =
311 pci_resource_start(pdev, SIS_SCR_PCI_BAR);
312 probe_ent->port[1].scr_addr =
313 pci_resource_start(pdev, SIS_SCR_PCI_BAR) + port2_start;
314 }
315
316 pci_set_master(pdev);
317 pci_intx(pdev, 1);
318
319 /* FIXME: check ata_device_add return value */
320 ata_device_add(probe_ent);
321 kfree(probe_ent);
322
323 return 0;
324
325err_out_regions:
326 pci_release_regions(pdev);
327
328err_out:
329 if (!pci_dev_busy)
330 pci_disable_device(pdev);
331 return rc;
332
333}
334
335static int __init sis_init(void)
336{
337 return pci_register_driver(&sis_pci_driver);
338}
339
340static void __exit sis_exit(void)
341{
342 pci_unregister_driver(&sis_pci_driver);
343}
344
345module_init(sis_init);
346module_exit(sis_exit);
347
diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
new file mode 100644
index 000000000000..2a7e3495cf16
--- /dev/null
+++ b/drivers/ata/sata_svw.c
@@ -0,0 +1,508 @@
1/*
2 * sata_svw.c - ServerWorks / Apple K2 SATA
3 *
4 * Maintained by: Benjamin Herrenschmidt <benh@kernel.crashing.org> and
5 * Jeff Garzik <jgarzik@pobox.com>
6 * Please ALWAYS copy linux-ide@vger.kernel.org
7 * on emails.
8 *
9 * Copyright 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
10 *
11 * Bits from Jeff Garzik, Copyright RedHat, Inc.
12 *
13 * This driver probably works with non-Apple versions of the
14 * Broadcom chipset...
15 *
16 *
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by
19 * the Free Software Foundation; either version 2, or (at your option)
20 * any later version.
21 *
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
26 *
27 * You should have received a copy of the GNU General Public License
28 * along with this program; see the file COPYING. If not, write to
29 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
30 *
31 *
32 * libata documentation is available via 'make {ps|pdf}docs',
33 * as Documentation/DocBook/libata.*
34 *
35 * Hardware documentation available under NDA.
36 *
37 */
38
39#include <linux/kernel.h>
40#include <linux/module.h>
41#include <linux/pci.h>
42#include <linux/init.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/interrupt.h>
46#include <linux/device.h>
47#include <scsi/scsi_host.h>
48#include <linux/libata.h>
49
50#ifdef CONFIG_PPC_OF
51#include <asm/prom.h>
52#include <asm/pci-bridge.h>
53#endif /* CONFIG_PPC_OF */
54
55#define DRV_NAME "sata_svw"
56#define DRV_VERSION "2.0"
57
58enum {
59 /* Taskfile registers offsets */
60 K2_SATA_TF_CMD_OFFSET = 0x00,
61 K2_SATA_TF_DATA_OFFSET = 0x00,
62 K2_SATA_TF_ERROR_OFFSET = 0x04,
63 K2_SATA_TF_NSECT_OFFSET = 0x08,
64 K2_SATA_TF_LBAL_OFFSET = 0x0c,
65 K2_SATA_TF_LBAM_OFFSET = 0x10,
66 K2_SATA_TF_LBAH_OFFSET = 0x14,
67 K2_SATA_TF_DEVICE_OFFSET = 0x18,
68 K2_SATA_TF_CMDSTAT_OFFSET = 0x1c,
69 K2_SATA_TF_CTL_OFFSET = 0x20,
70
71 /* DMA base */
72 K2_SATA_DMA_CMD_OFFSET = 0x30,
73
74 /* SCRs base */
75 K2_SATA_SCR_STATUS_OFFSET = 0x40,
76 K2_SATA_SCR_ERROR_OFFSET = 0x44,
77 K2_SATA_SCR_CONTROL_OFFSET = 0x48,
78
79 /* Others */
80 K2_SATA_SICR1_OFFSET = 0x80,
81 K2_SATA_SICR2_OFFSET = 0x84,
82 K2_SATA_SIM_OFFSET = 0x88,
83
84 /* Port stride */
85 K2_SATA_PORT_OFFSET = 0x100,
86};
87
88static u8 k2_stat_check_status(struct ata_port *ap);
89
90
91static u32 k2_sata_scr_read (struct ata_port *ap, unsigned int sc_reg)
92{
93 if (sc_reg > SCR_CONTROL)
94 return 0xffffffffU;
95 return readl((void *) ap->ioaddr.scr_addr + (sc_reg * 4));
96}
97
98
99static void k2_sata_scr_write (struct ata_port *ap, unsigned int sc_reg,
100 u32 val)
101{
102 if (sc_reg > SCR_CONTROL)
103 return;
104 writel(val, (void *) ap->ioaddr.scr_addr + (sc_reg * 4));
105}
106
107
108static void k2_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
109{
110 struct ata_ioports *ioaddr = &ap->ioaddr;
111 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
112
113 if (tf->ctl != ap->last_ctl) {
114 writeb(tf->ctl, ioaddr->ctl_addr);
115 ap->last_ctl = tf->ctl;
116 ata_wait_idle(ap);
117 }
118 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
119 writew(tf->feature | (((u16)tf->hob_feature) << 8), ioaddr->feature_addr);
120 writew(tf->nsect | (((u16)tf->hob_nsect) << 8), ioaddr->nsect_addr);
121 writew(tf->lbal | (((u16)tf->hob_lbal) << 8), ioaddr->lbal_addr);
122 writew(tf->lbam | (((u16)tf->hob_lbam) << 8), ioaddr->lbam_addr);
123 writew(tf->lbah | (((u16)tf->hob_lbah) << 8), ioaddr->lbah_addr);
124 } else if (is_addr) {
125 writew(tf->feature, ioaddr->feature_addr);
126 writew(tf->nsect, ioaddr->nsect_addr);
127 writew(tf->lbal, ioaddr->lbal_addr);
128 writew(tf->lbam, ioaddr->lbam_addr);
129 writew(tf->lbah, ioaddr->lbah_addr);
130 }
131
132 if (tf->flags & ATA_TFLAG_DEVICE)
133 writeb(tf->device, ioaddr->device_addr);
134
135 ata_wait_idle(ap);
136}
137
138
139static void k2_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
140{
141 struct ata_ioports *ioaddr = &ap->ioaddr;
142 u16 nsect, lbal, lbam, lbah, feature;
143
144 tf->command = k2_stat_check_status(ap);
145 tf->device = readw(ioaddr->device_addr);
146 feature = readw(ioaddr->error_addr);
147 nsect = readw(ioaddr->nsect_addr);
148 lbal = readw(ioaddr->lbal_addr);
149 lbam = readw(ioaddr->lbam_addr);
150 lbah = readw(ioaddr->lbah_addr);
151
152 tf->feature = feature;
153 tf->nsect = nsect;
154 tf->lbal = lbal;
155 tf->lbam = lbam;
156 tf->lbah = lbah;
157
158 if (tf->flags & ATA_TFLAG_LBA48) {
159 tf->hob_feature = feature >> 8;
160 tf->hob_nsect = nsect >> 8;
161 tf->hob_lbal = lbal >> 8;
162 tf->hob_lbam = lbam >> 8;
163 tf->hob_lbah = lbah >> 8;
164 }
165}
166
167/**
168 * k2_bmdma_setup_mmio - Set up PCI IDE BMDMA transaction (MMIO)
169 * @qc: Info associated with this ATA transaction.
170 *
171 * LOCKING:
172 * spin_lock_irqsave(host lock)
173 */
174
175static void k2_bmdma_setup_mmio (struct ata_queued_cmd *qc)
176{
177 struct ata_port *ap = qc->ap;
178 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
179 u8 dmactl;
180 void *mmio = (void *) ap->ioaddr.bmdma_addr;
181 /* load PRD table addr. */
182 mb(); /* make sure PRD table writes are visible to controller */
183 writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS);
184
185 /* specify data direction, triple-check start bit is clear */
186 dmactl = readb(mmio + ATA_DMA_CMD);
187 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
188 if (!rw)
189 dmactl |= ATA_DMA_WR;
190 writeb(dmactl, mmio + ATA_DMA_CMD);
191
192 /* issue r/w command if this is not a ATA DMA command*/
193 if (qc->tf.protocol != ATA_PROT_DMA)
194 ap->ops->exec_command(ap, &qc->tf);
195}
196
197/**
198 * k2_bmdma_start_mmio - Start a PCI IDE BMDMA transaction (MMIO)
199 * @qc: Info associated with this ATA transaction.
200 *
201 * LOCKING:
202 * spin_lock_irqsave(host lock)
203 */
204
205static void k2_bmdma_start_mmio (struct ata_queued_cmd *qc)
206{
207 struct ata_port *ap = qc->ap;
208 void *mmio = (void *) ap->ioaddr.bmdma_addr;
209 u8 dmactl;
210
211 /* start host DMA transaction */
212 dmactl = readb(mmio + ATA_DMA_CMD);
213 writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD);
214 /* There is a race condition in certain SATA controllers that can
215 be seen when the r/w command is given to the controller before the
216 host DMA is started. On a Read command, the controller would initiate
217 the command to the drive even before it sees the DMA start. When there
218 are very fast drives connected to the controller, or when the data request
219 hits in the drive cache, there is the possibility that the drive returns a part
220 or all of the requested data to the controller before the DMA start is issued.
221 In this case, the controller would become confused as to what to do with the data.
222 In the worst case when all the data is returned back to the controller, the
223 controller could hang. In other cases it could return partial data returning
224 in data corruption. This problem has been seen in PPC systems and can also appear
225 on an system with very fast disks, where the SATA controller is sitting behind a
226 number of bridges, and hence there is significant latency between the r/w command
227 and the start command. */
228 /* issue r/w command if the access is to ATA*/
229 if (qc->tf.protocol == ATA_PROT_DMA)
230 ap->ops->exec_command(ap, &qc->tf);
231}
232
233
234static u8 k2_stat_check_status(struct ata_port *ap)
235{
236 return readl((void *) ap->ioaddr.status_addr);
237}
238
239#ifdef CONFIG_PPC_OF
240/*
241 * k2_sata_proc_info
242 * inout : decides on the direction of the dataflow and the meaning of the
243 * variables
244 * buffer: If inout==FALSE data is being written to it else read from it
245 * *start: If inout==FALSE start of the valid data in the buffer
246 * offset: If inout==FALSE offset from the beginning of the imaginary file
247 * from which we start writing into the buffer
248 * length: If inout==FALSE max number of bytes to be written into the buffer
249 * else number of bytes in the buffer
250 */
251static int k2_sata_proc_info(struct Scsi_Host *shost, char *page, char **start,
252 off_t offset, int count, int inout)
253{
254 struct ata_port *ap;
255 struct device_node *np;
256 int len, index;
257
258 /* Find the ata_port */
259 ap = ata_shost_to_port(shost);
260 if (ap == NULL)
261 return 0;
262
263 /* Find the OF node for the PCI device proper */
264 np = pci_device_to_OF_node(to_pci_dev(ap->host->dev));
265 if (np == NULL)
266 return 0;
267
268 /* Match it to a port node */
269 index = (ap == ap->host->ports[0]) ? 0 : 1;
270 for (np = np->child; np != NULL; np = np->sibling) {
271 u32 *reg = (u32 *)get_property(np, "reg", NULL);
272 if (!reg)
273 continue;
274 if (index == *reg)
275 break;
276 }
277 if (np == NULL)
278 return 0;
279
280 len = sprintf(page, "devspec: %s\n", np->full_name);
281
282 return len;
283}
284#endif /* CONFIG_PPC_OF */
285
286
287static struct scsi_host_template k2_sata_sht = {
288 .module = THIS_MODULE,
289 .name = DRV_NAME,
290 .ioctl = ata_scsi_ioctl,
291 .queuecommand = ata_scsi_queuecmd,
292 .can_queue = ATA_DEF_QUEUE,
293 .this_id = ATA_SHT_THIS_ID,
294 .sg_tablesize = LIBATA_MAX_PRD,
295 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
296 .emulated = ATA_SHT_EMULATED,
297 .use_clustering = ATA_SHT_USE_CLUSTERING,
298 .proc_name = DRV_NAME,
299 .dma_boundary = ATA_DMA_BOUNDARY,
300 .slave_configure = ata_scsi_slave_config,
301 .slave_destroy = ata_scsi_slave_destroy,
302#ifdef CONFIG_PPC_OF
303 .proc_info = k2_sata_proc_info,
304#endif
305 .bios_param = ata_std_bios_param,
306};
307
308
309static const struct ata_port_operations k2_sata_ops = {
310 .port_disable = ata_port_disable,
311 .tf_load = k2_sata_tf_load,
312 .tf_read = k2_sata_tf_read,
313 .check_status = k2_stat_check_status,
314 .exec_command = ata_exec_command,
315 .dev_select = ata_std_dev_select,
316 .bmdma_setup = k2_bmdma_setup_mmio,
317 .bmdma_start = k2_bmdma_start_mmio,
318 .bmdma_stop = ata_bmdma_stop,
319 .bmdma_status = ata_bmdma_status,
320 .qc_prep = ata_qc_prep,
321 .qc_issue = ata_qc_issue_prot,
322 .data_xfer = ata_mmio_data_xfer,
323 .freeze = ata_bmdma_freeze,
324 .thaw = ata_bmdma_thaw,
325 .error_handler = ata_bmdma_error_handler,
326 .post_internal_cmd = ata_bmdma_post_internal_cmd,
327 .irq_handler = ata_interrupt,
328 .irq_clear = ata_bmdma_irq_clear,
329 .scr_read = k2_sata_scr_read,
330 .scr_write = k2_sata_scr_write,
331 .port_start = ata_port_start,
332 .port_stop = ata_port_stop,
333 .host_stop = ata_pci_host_stop,
334};
335
336static void k2_sata_setup_port(struct ata_ioports *port, unsigned long base)
337{
338 port->cmd_addr = base + K2_SATA_TF_CMD_OFFSET;
339 port->data_addr = base + K2_SATA_TF_DATA_OFFSET;
340 port->feature_addr =
341 port->error_addr = base + K2_SATA_TF_ERROR_OFFSET;
342 port->nsect_addr = base + K2_SATA_TF_NSECT_OFFSET;
343 port->lbal_addr = base + K2_SATA_TF_LBAL_OFFSET;
344 port->lbam_addr = base + K2_SATA_TF_LBAM_OFFSET;
345 port->lbah_addr = base + K2_SATA_TF_LBAH_OFFSET;
346 port->device_addr = base + K2_SATA_TF_DEVICE_OFFSET;
347 port->command_addr =
348 port->status_addr = base + K2_SATA_TF_CMDSTAT_OFFSET;
349 port->altstatus_addr =
350 port->ctl_addr = base + K2_SATA_TF_CTL_OFFSET;
351 port->bmdma_addr = base + K2_SATA_DMA_CMD_OFFSET;
352 port->scr_addr = base + K2_SATA_SCR_STATUS_OFFSET;
353}
354
355
356static int k2_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
357{
358 static int printed_version;
359 struct ata_probe_ent *probe_ent = NULL;
360 unsigned long base;
361 void __iomem *mmio_base;
362 int pci_dev_busy = 0;
363 int rc;
364 int i;
365
366 if (!printed_version++)
367 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
368
369 /*
370 * If this driver happens to only be useful on Apple's K2, then
371 * we should check that here as it has a normal Serverworks ID
372 */
373 rc = pci_enable_device(pdev);
374 if (rc)
375 return rc;
376 /*
377 * Check if we have resources mapped at all (second function may
378 * have been disabled by firmware)
379 */
380 if (pci_resource_len(pdev, 5) == 0)
381 return -ENODEV;
382
383 /* Request PCI regions */
384 rc = pci_request_regions(pdev, DRV_NAME);
385 if (rc) {
386 pci_dev_busy = 1;
387 goto err_out;
388 }
389
390 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
391 if (rc)
392 goto err_out_regions;
393 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
394 if (rc)
395 goto err_out_regions;
396
397 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
398 if (probe_ent == NULL) {
399 rc = -ENOMEM;
400 goto err_out_regions;
401 }
402
403 memset(probe_ent, 0, sizeof(*probe_ent));
404 probe_ent->dev = pci_dev_to_dev(pdev);
405 INIT_LIST_HEAD(&probe_ent->node);
406
407 mmio_base = pci_iomap(pdev, 5, 0);
408 if (mmio_base == NULL) {
409 rc = -ENOMEM;
410 goto err_out_free_ent;
411 }
412 base = (unsigned long) mmio_base;
413
414 /* Clear a magic bit in SCR1 according to Darwin, those help
415 * some funky seagate drives (though so far, those were already
416 * set by the firmware on the machines I had access to)
417 */
418 writel(readl(mmio_base + K2_SATA_SICR1_OFFSET) & ~0x00040000,
419 mmio_base + K2_SATA_SICR1_OFFSET);
420
421 /* Clear SATA error & interrupts we don't use */
422 writel(0xffffffff, mmio_base + K2_SATA_SCR_ERROR_OFFSET);
423 writel(0x0, mmio_base + K2_SATA_SIM_OFFSET);
424
425 probe_ent->sht = &k2_sata_sht;
426 probe_ent->port_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
427 ATA_FLAG_MMIO;
428 probe_ent->port_ops = &k2_sata_ops;
429 probe_ent->n_ports = 4;
430 probe_ent->irq = pdev->irq;
431 probe_ent->irq_flags = IRQF_SHARED;
432 probe_ent->mmio_base = mmio_base;
433
434 /* We don't care much about the PIO/UDMA masks, but the core won't like us
435 * if we don't fill these
436 */
437 probe_ent->pio_mask = 0x1f;
438 probe_ent->mwdma_mask = 0x7;
439 probe_ent->udma_mask = 0x7f;
440
441 /* different controllers have different number of ports - currently 4 or 8 */
442 /* All ports are on the same function. Multi-function device is no
443 * longer available. This should not be seen in any system. */
444 for (i = 0; i < ent->driver_data; i++)
445 k2_sata_setup_port(&probe_ent->port[i], base + i * K2_SATA_PORT_OFFSET);
446
447 pci_set_master(pdev);
448
449 /* FIXME: check ata_device_add return value */
450 ata_device_add(probe_ent);
451 kfree(probe_ent);
452
453 return 0;
454
455err_out_free_ent:
456 kfree(probe_ent);
457err_out_regions:
458 pci_release_regions(pdev);
459err_out:
460 if (!pci_dev_busy)
461 pci_disable_device(pdev);
462 return rc;
463}
464
465/* 0x240 is device ID for Apple K2 device
466 * 0x241 is device ID for Serverworks Frodo4
467 * 0x242 is device ID for Serverworks Frodo8
468 * 0x24a is device ID for BCM5785 (aka HT1000) HT southbridge integrated SATA
469 * controller
470 * */
471static const struct pci_device_id k2_sata_pci_tbl[] = {
472 { 0x1166, 0x0240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
473 { 0x1166, 0x0241, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
474 { 0x1166, 0x0242, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
475 { 0x1166, 0x024a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
476 { 0x1166, 0x024b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
477 { }
478};
479
480
481static struct pci_driver k2_sata_pci_driver = {
482 .name = DRV_NAME,
483 .id_table = k2_sata_pci_tbl,
484 .probe = k2_sata_init_one,
485 .remove = ata_pci_remove_one,
486};
487
488
489static int __init k2_sata_init(void)
490{
491 return pci_register_driver(&k2_sata_pci_driver);
492}
493
494
495static void __exit k2_sata_exit(void)
496{
497 pci_unregister_driver(&k2_sata_pci_driver);
498}
499
500
501MODULE_AUTHOR("Benjamin Herrenschmidt");
502MODULE_DESCRIPTION("low-level driver for K2 SATA controller");
503MODULE_LICENSE("GPL");
504MODULE_DEVICE_TABLE(pci, k2_sata_pci_tbl);
505MODULE_VERSION(DRV_VERSION);
506
507module_init(k2_sata_init);
508module_exit(k2_sata_exit);
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
new file mode 100644
index 000000000000..091867e10ea3
--- /dev/null
+++ b/drivers/ata/sata_sx4.c
@@ -0,0 +1,1502 @@
1/*
2 * sata_sx4.c - Promise SATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc.
9 *
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 *
26 * libata documentation is available via 'make {ps|pdf}docs',
27 * as Documentation/DocBook/libata.*
28 *
29 * Hardware documentation available under NDA.
30 *
31 */
32
33#include <linux/kernel.h>
34#include <linux/module.h>
35#include <linux/pci.h>
36#include <linux/init.h>
37#include <linux/blkdev.h>
38#include <linux/delay.h>
39#include <linux/interrupt.h>
40#include <linux/sched.h>
41#include <linux/device.h>
42#include <scsi/scsi_host.h>
43#include <scsi/scsi_cmnd.h>
44#include <linux/libata.h>
45#include <asm/io.h>
46#include "sata_promise.h"
47
48#define DRV_NAME "sata_sx4"
49#define DRV_VERSION "0.9"
50
51
52enum {
53 PDC_PRD_TBL = 0x44, /* Direct command DMA table addr */
54
55 PDC_PKT_SUBMIT = 0x40, /* Command packet pointer addr */
56 PDC_HDMA_PKT_SUBMIT = 0x100, /* Host DMA packet pointer addr */
57 PDC_INT_SEQMASK = 0x40, /* Mask of asserted SEQ INTs */
58 PDC_HDMA_CTLSTAT = 0x12C, /* Host DMA control / status */
59
60 PDC_20621_SEQCTL = 0x400,
61 PDC_20621_SEQMASK = 0x480,
62 PDC_20621_GENERAL_CTL = 0x484,
63 PDC_20621_PAGE_SIZE = (32 * 1024),
64
65 /* chosen, not constant, values; we design our own DIMM mem map */
66 PDC_20621_DIMM_WINDOW = 0x0C, /* page# for 32K DIMM window */
67 PDC_20621_DIMM_BASE = 0x00200000,
68 PDC_20621_DIMM_DATA = (64 * 1024),
69 PDC_DIMM_DATA_STEP = (256 * 1024),
70 PDC_DIMM_WINDOW_STEP = (8 * 1024),
71 PDC_DIMM_HOST_PRD = (6 * 1024),
72 PDC_DIMM_HOST_PKT = (128 * 0),
73 PDC_DIMM_HPKT_PRD = (128 * 1),
74 PDC_DIMM_ATA_PKT = (128 * 2),
75 PDC_DIMM_APKT_PRD = (128 * 3),
76 PDC_DIMM_HEADER_SZ = PDC_DIMM_APKT_PRD + 128,
77 PDC_PAGE_WINDOW = 0x40,
78 PDC_PAGE_DATA = PDC_PAGE_WINDOW +
79 (PDC_20621_DIMM_DATA / PDC_20621_PAGE_SIZE),
80 PDC_PAGE_SET = PDC_DIMM_DATA_STEP / PDC_20621_PAGE_SIZE,
81
82 PDC_CHIP0_OFS = 0xC0000, /* offset of chip #0 */
83
84 PDC_20621_ERR_MASK = (1<<19) | (1<<20) | (1<<21) | (1<<22) |
85 (1<<23),
86
87 board_20621 = 0, /* FastTrak S150 SX4 */
88
89 PDC_RESET = (1 << 11), /* HDMA reset */
90
91 PDC_MAX_HDMA = 32,
92 PDC_HDMA_Q_MASK = (PDC_MAX_HDMA - 1),
93
94 PDC_DIMM0_SPD_DEV_ADDRESS = 0x50,
95 PDC_DIMM1_SPD_DEV_ADDRESS = 0x51,
96 PDC_MAX_DIMM_MODULE = 0x02,
97 PDC_I2C_CONTROL_OFFSET = 0x48,
98 PDC_I2C_ADDR_DATA_OFFSET = 0x4C,
99 PDC_DIMM0_CONTROL_OFFSET = 0x80,
100 PDC_DIMM1_CONTROL_OFFSET = 0x84,
101 PDC_SDRAM_CONTROL_OFFSET = 0x88,
102 PDC_I2C_WRITE = 0x00000000,
103 PDC_I2C_READ = 0x00000040,
104 PDC_I2C_START = 0x00000080,
105 PDC_I2C_MASK_INT = 0x00000020,
106 PDC_I2C_COMPLETE = 0x00010000,
107 PDC_I2C_NO_ACK = 0x00100000,
108 PDC_DIMM_SPD_SUBADDRESS_START = 0x00,
109 PDC_DIMM_SPD_SUBADDRESS_END = 0x7F,
110 PDC_DIMM_SPD_ROW_NUM = 3,
111 PDC_DIMM_SPD_COLUMN_NUM = 4,
112 PDC_DIMM_SPD_MODULE_ROW = 5,
113 PDC_DIMM_SPD_TYPE = 11,
114 PDC_DIMM_SPD_FRESH_RATE = 12,
115 PDC_DIMM_SPD_BANK_NUM = 17,
116 PDC_DIMM_SPD_CAS_LATENCY = 18,
117 PDC_DIMM_SPD_ATTRIBUTE = 21,
118 PDC_DIMM_SPD_ROW_PRE_CHARGE = 27,
119 PDC_DIMM_SPD_ROW_ACTIVE_DELAY = 28,
120 PDC_DIMM_SPD_RAS_CAS_DELAY = 29,
121 PDC_DIMM_SPD_ACTIVE_PRECHARGE = 30,
122 PDC_DIMM_SPD_SYSTEM_FREQ = 126,
123 PDC_CTL_STATUS = 0x08,
124 PDC_DIMM_WINDOW_CTLR = 0x0C,
125 PDC_TIME_CONTROL = 0x3C,
126 PDC_TIME_PERIOD = 0x40,
127 PDC_TIME_COUNTER = 0x44,
128 PDC_GENERAL_CTLR = 0x484,
129 PCI_PLL_INIT = 0x8A531824,
130 PCI_X_TCOUNT = 0xEE1E5CFF
131};
132
133
134struct pdc_port_priv {
135 u8 dimm_buf[(ATA_PRD_SZ * ATA_MAX_PRD) + 512];
136 u8 *pkt;
137 dma_addr_t pkt_dma;
138};
139
140struct pdc_host_priv {
141 void __iomem *dimm_mmio;
142
143 unsigned int doing_hdma;
144 unsigned int hdma_prod;
145 unsigned int hdma_cons;
146 struct {
147 struct ata_queued_cmd *qc;
148 unsigned int seq;
149 unsigned long pkt_ofs;
150 } hdma[32];
151};
152
153
154static int pdc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
155static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
156static void pdc_eng_timeout(struct ata_port *ap);
157static void pdc_20621_phy_reset (struct ata_port *ap);
158static int pdc_port_start(struct ata_port *ap);
159static void pdc_port_stop(struct ata_port *ap);
160static void pdc20621_qc_prep(struct ata_queued_cmd *qc);
161static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
162static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
163static void pdc20621_host_stop(struct ata_host *host);
164static unsigned int pdc20621_dimm_init(struct ata_probe_ent *pe);
165static int pdc20621_detect_dimm(struct ata_probe_ent *pe);
166static unsigned int pdc20621_i2c_read(struct ata_probe_ent *pe,
167 u32 device, u32 subaddr, u32 *pdata);
168static int pdc20621_prog_dimm0(struct ata_probe_ent *pe);
169static unsigned int pdc20621_prog_dimm_global(struct ata_probe_ent *pe);
170#ifdef ATA_VERBOSE_DEBUG
171static void pdc20621_get_from_dimm(struct ata_probe_ent *pe,
172 void *psource, u32 offset, u32 size);
173#endif
174static void pdc20621_put_to_dimm(struct ata_probe_ent *pe,
175 void *psource, u32 offset, u32 size);
176static void pdc20621_irq_clear(struct ata_port *ap);
177static unsigned int pdc20621_qc_issue_prot(struct ata_queued_cmd *qc);
178
179
180static struct scsi_host_template pdc_sata_sht = {
181 .module = THIS_MODULE,
182 .name = DRV_NAME,
183 .ioctl = ata_scsi_ioctl,
184 .queuecommand = ata_scsi_queuecmd,
185 .can_queue = ATA_DEF_QUEUE,
186 .this_id = ATA_SHT_THIS_ID,
187 .sg_tablesize = LIBATA_MAX_PRD,
188 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
189 .emulated = ATA_SHT_EMULATED,
190 .use_clustering = ATA_SHT_USE_CLUSTERING,
191 .proc_name = DRV_NAME,
192 .dma_boundary = ATA_DMA_BOUNDARY,
193 .slave_configure = ata_scsi_slave_config,
194 .slave_destroy = ata_scsi_slave_destroy,
195 .bios_param = ata_std_bios_param,
196};
197
198static const struct ata_port_operations pdc_20621_ops = {
199 .port_disable = ata_port_disable,
200 .tf_load = pdc_tf_load_mmio,
201 .tf_read = ata_tf_read,
202 .check_status = ata_check_status,
203 .exec_command = pdc_exec_command_mmio,
204 .dev_select = ata_std_dev_select,
205 .phy_reset = pdc_20621_phy_reset,
206 .qc_prep = pdc20621_qc_prep,
207 .qc_issue = pdc20621_qc_issue_prot,
208 .data_xfer = ata_mmio_data_xfer,
209 .eng_timeout = pdc_eng_timeout,
210 .irq_handler = pdc20621_interrupt,
211 .irq_clear = pdc20621_irq_clear,
212 .port_start = pdc_port_start,
213 .port_stop = pdc_port_stop,
214 .host_stop = pdc20621_host_stop,
215};
216
217static const struct ata_port_info pdc_port_info[] = {
218 /* board_20621 */
219 {
220 .sht = &pdc_sata_sht,
221 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
222 ATA_FLAG_SRST | ATA_FLAG_MMIO |
223 ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING,
224 .pio_mask = 0x1f, /* pio0-4 */
225 .mwdma_mask = 0x07, /* mwdma0-2 */
226 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
227 .port_ops = &pdc_20621_ops,
228 },
229
230};
231
232static const struct pci_device_id pdc_sata_pci_tbl[] = {
233 { PCI_VENDOR_ID_PROMISE, 0x6622, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
234 board_20621 },
235 { } /* terminate list */
236};
237
238
239static struct pci_driver pdc_sata_pci_driver = {
240 .name = DRV_NAME,
241 .id_table = pdc_sata_pci_tbl,
242 .probe = pdc_sata_init_one,
243 .remove = ata_pci_remove_one,
244};
245
246
247static void pdc20621_host_stop(struct ata_host *host)
248{
249 struct pci_dev *pdev = to_pci_dev(host->dev);
250 struct pdc_host_priv *hpriv = host->private_data;
251 void __iomem *dimm_mmio = hpriv->dimm_mmio;
252
253 pci_iounmap(pdev, dimm_mmio);
254 kfree(hpriv);
255
256 pci_iounmap(pdev, host->mmio_base);
257}
258
259static int pdc_port_start(struct ata_port *ap)
260{
261 struct device *dev = ap->host->dev;
262 struct pdc_port_priv *pp;
263 int rc;
264
265 rc = ata_port_start(ap);
266 if (rc)
267 return rc;
268
269 pp = kmalloc(sizeof(*pp), GFP_KERNEL);
270 if (!pp) {
271 rc = -ENOMEM;
272 goto err_out;
273 }
274 memset(pp, 0, sizeof(*pp));
275
276 pp->pkt = dma_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL);
277 if (!pp->pkt) {
278 rc = -ENOMEM;
279 goto err_out_kfree;
280 }
281
282 ap->private_data = pp;
283
284 return 0;
285
286err_out_kfree:
287 kfree(pp);
288err_out:
289 ata_port_stop(ap);
290 return rc;
291}
292
293
294static void pdc_port_stop(struct ata_port *ap)
295{
296 struct device *dev = ap->host->dev;
297 struct pdc_port_priv *pp = ap->private_data;
298
299 ap->private_data = NULL;
300 dma_free_coherent(dev, 128, pp->pkt, pp->pkt_dma);
301 kfree(pp);
302 ata_port_stop(ap);
303}
304
305
306static void pdc_20621_phy_reset (struct ata_port *ap)
307{
308 VPRINTK("ENTER\n");
309 ap->cbl = ATA_CBL_SATA;
310 ata_port_probe(ap);
311 ata_bus_reset(ap);
312}
313
314static inline void pdc20621_ata_sg(struct ata_taskfile *tf, u8 *buf,
315 unsigned int portno,
316 unsigned int total_len)
317{
318 u32 addr;
319 unsigned int dw = PDC_DIMM_APKT_PRD >> 2;
320 u32 *buf32 = (u32 *) buf;
321
322 /* output ATA packet S/G table */
323 addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA +
324 (PDC_DIMM_DATA_STEP * portno);
325 VPRINTK("ATA sg addr 0x%x, %d\n", addr, addr);
326 buf32[dw] = cpu_to_le32(addr);
327 buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT);
328
329 VPRINTK("ATA PSG @ %x == (0x%x, 0x%x)\n",
330 PDC_20621_DIMM_BASE +
331 (PDC_DIMM_WINDOW_STEP * portno) +
332 PDC_DIMM_APKT_PRD,
333 buf32[dw], buf32[dw + 1]);
334}
335
336static inline void pdc20621_host_sg(struct ata_taskfile *tf, u8 *buf,
337 unsigned int portno,
338 unsigned int total_len)
339{
340 u32 addr;
341 unsigned int dw = PDC_DIMM_HPKT_PRD >> 2;
342 u32 *buf32 = (u32 *) buf;
343
344 /* output Host DMA packet S/G table */
345 addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA +
346 (PDC_DIMM_DATA_STEP * portno);
347
348 buf32[dw] = cpu_to_le32(addr);
349 buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT);
350
351 VPRINTK("HOST PSG @ %x == (0x%x, 0x%x)\n",
352 PDC_20621_DIMM_BASE +
353 (PDC_DIMM_WINDOW_STEP * portno) +
354 PDC_DIMM_HPKT_PRD,
355 buf32[dw], buf32[dw + 1]);
356}
357
358static inline unsigned int pdc20621_ata_pkt(struct ata_taskfile *tf,
359 unsigned int devno, u8 *buf,
360 unsigned int portno)
361{
362 unsigned int i, dw;
363 u32 *buf32 = (u32 *) buf;
364 u8 dev_reg;
365
366 unsigned int dimm_sg = PDC_20621_DIMM_BASE +
367 (PDC_DIMM_WINDOW_STEP * portno) +
368 PDC_DIMM_APKT_PRD;
369 VPRINTK("ENTER, dimm_sg == 0x%x, %d\n", dimm_sg, dimm_sg);
370
371 i = PDC_DIMM_ATA_PKT;
372
373 /*
374 * Set up ATA packet
375 */
376 if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE)))
377 buf[i++] = PDC_PKT_READ;
378 else if (tf->protocol == ATA_PROT_NODATA)
379 buf[i++] = PDC_PKT_NODATA;
380 else
381 buf[i++] = 0;
382 buf[i++] = 0; /* reserved */
383 buf[i++] = portno + 1; /* seq. id */
384 buf[i++] = 0xff; /* delay seq. id */
385
386 /* dimm dma S/G, and next-pkt */
387 dw = i >> 2;
388 if (tf->protocol == ATA_PROT_NODATA)
389 buf32[dw] = 0;
390 else
391 buf32[dw] = cpu_to_le32(dimm_sg);
392 buf32[dw + 1] = 0;
393 i += 8;
394
395 if (devno == 0)
396 dev_reg = ATA_DEVICE_OBS;
397 else
398 dev_reg = ATA_DEVICE_OBS | ATA_DEV1;
399
400 /* select device */
401 buf[i++] = (1 << 5) | PDC_PKT_CLEAR_BSY | ATA_REG_DEVICE;
402 buf[i++] = dev_reg;
403
404 /* device control register */
405 buf[i++] = (1 << 5) | PDC_REG_DEVCTL;
406 buf[i++] = tf->ctl;
407
408 return i;
409}
410
411static inline void pdc20621_host_pkt(struct ata_taskfile *tf, u8 *buf,
412 unsigned int portno)
413{
414 unsigned int dw;
415 u32 tmp, *buf32 = (u32 *) buf;
416
417 unsigned int host_sg = PDC_20621_DIMM_BASE +
418 (PDC_DIMM_WINDOW_STEP * portno) +
419 PDC_DIMM_HOST_PRD;
420 unsigned int dimm_sg = PDC_20621_DIMM_BASE +
421 (PDC_DIMM_WINDOW_STEP * portno) +
422 PDC_DIMM_HPKT_PRD;
423 VPRINTK("ENTER, dimm_sg == 0x%x, %d\n", dimm_sg, dimm_sg);
424 VPRINTK("host_sg == 0x%x, %d\n", host_sg, host_sg);
425
426 dw = PDC_DIMM_HOST_PKT >> 2;
427
428 /*
429 * Set up Host DMA packet
430 */
431 if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE)))
432 tmp = PDC_PKT_READ;
433 else
434 tmp = 0;
435 tmp |= ((portno + 1 + 4) << 16); /* seq. id */
436 tmp |= (0xff << 24); /* delay seq. id */
437 buf32[dw + 0] = cpu_to_le32(tmp);
438 buf32[dw + 1] = cpu_to_le32(host_sg);
439 buf32[dw + 2] = cpu_to_le32(dimm_sg);
440 buf32[dw + 3] = 0;
441
442 VPRINTK("HOST PKT @ %x == (0x%x 0x%x 0x%x 0x%x)\n",
443 PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * portno) +
444 PDC_DIMM_HOST_PKT,
445 buf32[dw + 0],
446 buf32[dw + 1],
447 buf32[dw + 2],
448 buf32[dw + 3]);
449}
450
451static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
452{
453 struct scatterlist *sg;
454 struct ata_port *ap = qc->ap;
455 struct pdc_port_priv *pp = ap->private_data;
456 void __iomem *mmio = ap->host->mmio_base;
457 struct pdc_host_priv *hpriv = ap->host->private_data;
458 void __iomem *dimm_mmio = hpriv->dimm_mmio;
459 unsigned int portno = ap->port_no;
460 unsigned int i, idx, total_len = 0, sgt_len;
461 u32 *buf = (u32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ];
462
463 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
464
465 VPRINTK("ata%u: ENTER\n", ap->id);
466
467 /* hard-code chip #0 */
468 mmio += PDC_CHIP0_OFS;
469
470 /*
471 * Build S/G table
472 */
473 idx = 0;
474 ata_for_each_sg(sg, qc) {
475 buf[idx++] = cpu_to_le32(sg_dma_address(sg));
476 buf[idx++] = cpu_to_le32(sg_dma_len(sg));
477 total_len += sg_dma_len(sg);
478 }
479 buf[idx - 1] |= cpu_to_le32(ATA_PRD_EOT);
480 sgt_len = idx * 4;
481
482 /*
483 * Build ATA, host DMA packets
484 */
485 pdc20621_host_sg(&qc->tf, &pp->dimm_buf[0], portno, total_len);
486 pdc20621_host_pkt(&qc->tf, &pp->dimm_buf[0], portno);
487
488 pdc20621_ata_sg(&qc->tf, &pp->dimm_buf[0], portno, total_len);
489 i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno);
490
491 if (qc->tf.flags & ATA_TFLAG_LBA48)
492 i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i);
493 else
494 i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i);
495
496 pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i);
497
498 /* copy three S/G tables and two packets to DIMM MMIO window */
499 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP),
500 &pp->dimm_buf, PDC_DIMM_HEADER_SZ);
501 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP) +
502 PDC_DIMM_HOST_PRD,
503 &pp->dimm_buf[PDC_DIMM_HEADER_SZ], sgt_len);
504
505 /* force host FIFO dump */
506 writel(0x00000001, mmio + PDC_20621_GENERAL_CTL);
507
508 readl(dimm_mmio); /* MMIO PCI posting flush */
509
510 VPRINTK("ata pkt buf ofs %u, prd size %u, mmio copied\n", i, sgt_len);
511}
512
513static void pdc20621_nodata_prep(struct ata_queued_cmd *qc)
514{
515 struct ata_port *ap = qc->ap;
516 struct pdc_port_priv *pp = ap->private_data;
517 void __iomem *mmio = ap->host->mmio_base;
518 struct pdc_host_priv *hpriv = ap->host->private_data;
519 void __iomem *dimm_mmio = hpriv->dimm_mmio;
520 unsigned int portno = ap->port_no;
521 unsigned int i;
522
523 VPRINTK("ata%u: ENTER\n", ap->id);
524
525 /* hard-code chip #0 */
526 mmio += PDC_CHIP0_OFS;
527
528 i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno);
529
530 if (qc->tf.flags & ATA_TFLAG_LBA48)
531 i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i);
532 else
533 i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i);
534
535 pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i);
536
537 /* copy three S/G tables and two packets to DIMM MMIO window */
538 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP),
539 &pp->dimm_buf, PDC_DIMM_HEADER_SZ);
540
541 /* force host FIFO dump */
542 writel(0x00000001, mmio + PDC_20621_GENERAL_CTL);
543
544 readl(dimm_mmio); /* MMIO PCI posting flush */
545
546 VPRINTK("ata pkt buf ofs %u, mmio copied\n", i);
547}
548
549static void pdc20621_qc_prep(struct ata_queued_cmd *qc)
550{
551 switch (qc->tf.protocol) {
552 case ATA_PROT_DMA:
553 pdc20621_dma_prep(qc);
554 break;
555 case ATA_PROT_NODATA:
556 pdc20621_nodata_prep(qc);
557 break;
558 default:
559 break;
560 }
561}
562
563static void __pdc20621_push_hdma(struct ata_queued_cmd *qc,
564 unsigned int seq,
565 u32 pkt_ofs)
566{
567 struct ata_port *ap = qc->ap;
568 struct ata_host *host = ap->host;
569 void __iomem *mmio = host->mmio_base;
570
571 /* hard-code chip #0 */
572 mmio += PDC_CHIP0_OFS;
573
574 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
575 readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */
576
577 writel(pkt_ofs, mmio + PDC_HDMA_PKT_SUBMIT);
578 readl(mmio + PDC_HDMA_PKT_SUBMIT); /* flush */
579}
580
581static void pdc20621_push_hdma(struct ata_queued_cmd *qc,
582 unsigned int seq,
583 u32 pkt_ofs)
584{
585 struct ata_port *ap = qc->ap;
586 struct pdc_host_priv *pp = ap->host->private_data;
587 unsigned int idx = pp->hdma_prod & PDC_HDMA_Q_MASK;
588
589 if (!pp->doing_hdma) {
590 __pdc20621_push_hdma(qc, seq, pkt_ofs);
591 pp->doing_hdma = 1;
592 return;
593 }
594
595 pp->hdma[idx].qc = qc;
596 pp->hdma[idx].seq = seq;
597 pp->hdma[idx].pkt_ofs = pkt_ofs;
598 pp->hdma_prod++;
599}
600
601static void pdc20621_pop_hdma(struct ata_queued_cmd *qc)
602{
603 struct ata_port *ap = qc->ap;
604 struct pdc_host_priv *pp = ap->host->private_data;
605 unsigned int idx = pp->hdma_cons & PDC_HDMA_Q_MASK;
606
607 /* if nothing on queue, we're done */
608 if (pp->hdma_prod == pp->hdma_cons) {
609 pp->doing_hdma = 0;
610 return;
611 }
612
613 __pdc20621_push_hdma(pp->hdma[idx].qc, pp->hdma[idx].seq,
614 pp->hdma[idx].pkt_ofs);
615 pp->hdma_cons++;
616}
617
618#ifdef ATA_VERBOSE_DEBUG
619static void pdc20621_dump_hdma(struct ata_queued_cmd *qc)
620{
621 struct ata_port *ap = qc->ap;
622 unsigned int port_no = ap->port_no;
623 struct pdc_host_priv *hpriv = ap->host->private_data;
624 void *dimm_mmio = hpriv->dimm_mmio;
625
626 dimm_mmio += (port_no * PDC_DIMM_WINDOW_STEP);
627 dimm_mmio += PDC_DIMM_HOST_PKT;
628
629 printk(KERN_ERR "HDMA[0] == 0x%08X\n", readl(dimm_mmio));
630 printk(KERN_ERR "HDMA[1] == 0x%08X\n", readl(dimm_mmio + 4));
631 printk(KERN_ERR "HDMA[2] == 0x%08X\n", readl(dimm_mmio + 8));
632 printk(KERN_ERR "HDMA[3] == 0x%08X\n", readl(dimm_mmio + 12));
633}
634#else
635static inline void pdc20621_dump_hdma(struct ata_queued_cmd *qc) { }
636#endif /* ATA_VERBOSE_DEBUG */
637
638static void pdc20621_packet_start(struct ata_queued_cmd *qc)
639{
640 struct ata_port *ap = qc->ap;
641 struct ata_host *host = ap->host;
642 unsigned int port_no = ap->port_no;
643 void __iomem *mmio = host->mmio_base;
644 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
645 u8 seq = (u8) (port_no + 1);
646 unsigned int port_ofs;
647
648 /* hard-code chip #0 */
649 mmio += PDC_CHIP0_OFS;
650
651 VPRINTK("ata%u: ENTER\n", ap->id);
652
653 wmb(); /* flush PRD, pkt writes */
654
655 port_ofs = PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no);
656
657 /* if writing, we (1) DMA to DIMM, then (2) do ATA command */
658 if (rw && qc->tf.protocol == ATA_PROT_DMA) {
659 seq += 4;
660
661 pdc20621_dump_hdma(qc);
662 pdc20621_push_hdma(qc, seq, port_ofs + PDC_DIMM_HOST_PKT);
663 VPRINTK("queued ofs 0x%x (%u), seq %u\n",
664 port_ofs + PDC_DIMM_HOST_PKT,
665 port_ofs + PDC_DIMM_HOST_PKT,
666 seq);
667 } else {
668 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
669 readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */
670
671 writel(port_ofs + PDC_DIMM_ATA_PKT,
672 (void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
673 readl((void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
674 VPRINTK("submitted ofs 0x%x (%u), seq %u\n",
675 port_ofs + PDC_DIMM_ATA_PKT,
676 port_ofs + PDC_DIMM_ATA_PKT,
677 seq);
678 }
679}
680
681static unsigned int pdc20621_qc_issue_prot(struct ata_queued_cmd *qc)
682{
683 switch (qc->tf.protocol) {
684 case ATA_PROT_DMA:
685 case ATA_PROT_NODATA:
686 pdc20621_packet_start(qc);
687 return 0;
688
689 case ATA_PROT_ATAPI_DMA:
690 BUG();
691 break;
692
693 default:
694 break;
695 }
696
697 return ata_qc_issue_prot(qc);
698}
699
700static inline unsigned int pdc20621_host_intr( struct ata_port *ap,
701 struct ata_queued_cmd *qc,
702 unsigned int doing_hdma,
703 void __iomem *mmio)
704{
705 unsigned int port_no = ap->port_no;
706 unsigned int port_ofs =
707 PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no);
708 u8 status;
709 unsigned int handled = 0;
710
711 VPRINTK("ENTER\n");
712
713 if ((qc->tf.protocol == ATA_PROT_DMA) && /* read */
714 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
715
716 /* step two - DMA from DIMM to host */
717 if (doing_hdma) {
718 VPRINTK("ata%u: read hdma, 0x%x 0x%x\n", ap->id,
719 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
720 /* get drive status; clear intr; complete txn */
721 qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
722 ata_qc_complete(qc);
723 pdc20621_pop_hdma(qc);
724 }
725
726 /* step one - exec ATA command */
727 else {
728 u8 seq = (u8) (port_no + 1 + 4);
729 VPRINTK("ata%u: read ata, 0x%x 0x%x\n", ap->id,
730 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
731
732 /* submit hdma pkt */
733 pdc20621_dump_hdma(qc);
734 pdc20621_push_hdma(qc, seq,
735 port_ofs + PDC_DIMM_HOST_PKT);
736 }
737 handled = 1;
738
739 } else if (qc->tf.protocol == ATA_PROT_DMA) { /* write */
740
741 /* step one - DMA from host to DIMM */
742 if (doing_hdma) {
743 u8 seq = (u8) (port_no + 1);
744 VPRINTK("ata%u: write hdma, 0x%x 0x%x\n", ap->id,
745 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
746
747 /* submit ata pkt */
748 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
749 readl(mmio + PDC_20621_SEQCTL + (seq * 4));
750 writel(port_ofs + PDC_DIMM_ATA_PKT,
751 (void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
752 readl((void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
753 }
754
755 /* step two - execute ATA command */
756 else {
757 VPRINTK("ata%u: write ata, 0x%x 0x%x\n", ap->id,
758 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
759 /* get drive status; clear intr; complete txn */
760 qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
761 ata_qc_complete(qc);
762 pdc20621_pop_hdma(qc);
763 }
764 handled = 1;
765
766 /* command completion, but no data xfer */
767 } else if (qc->tf.protocol == ATA_PROT_NODATA) {
768
769 status = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
770 DPRINTK("BUS_NODATA (drv_stat 0x%X)\n", status);
771 qc->err_mask |= ac_err_mask(status);
772 ata_qc_complete(qc);
773 handled = 1;
774
775 } else {
776 ap->stats.idle_irq++;
777 }
778
779 return handled;
780}
781
782static void pdc20621_irq_clear(struct ata_port *ap)
783{
784 struct ata_host *host = ap->host;
785 void __iomem *mmio = host->mmio_base;
786
787 mmio += PDC_CHIP0_OFS;
788
789 readl(mmio + PDC_20621_SEQMASK);
790}
791
792static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
793{
794 struct ata_host *host = dev_instance;
795 struct ata_port *ap;
796 u32 mask = 0;
797 unsigned int i, tmp, port_no;
798 unsigned int handled = 0;
799 void __iomem *mmio_base;
800
801 VPRINTK("ENTER\n");
802
803 if (!host || !host->mmio_base) {
804 VPRINTK("QUICK EXIT\n");
805 return IRQ_NONE;
806 }
807
808 mmio_base = host->mmio_base;
809
810 /* reading should also clear interrupts */
811 mmio_base += PDC_CHIP0_OFS;
812 mask = readl(mmio_base + PDC_20621_SEQMASK);
813 VPRINTK("mask == 0x%x\n", mask);
814
815 if (mask == 0xffffffff) {
816 VPRINTK("QUICK EXIT 2\n");
817 return IRQ_NONE;
818 }
819 mask &= 0xffff; /* only 16 tags possible */
820 if (!mask) {
821 VPRINTK("QUICK EXIT 3\n");
822 return IRQ_NONE;
823 }
824
825 spin_lock(&host->lock);
826
827 for (i = 1; i < 9; i++) {
828 port_no = i - 1;
829 if (port_no > 3)
830 port_no -= 4;
831 if (port_no >= host->n_ports)
832 ap = NULL;
833 else
834 ap = host->ports[port_no];
835 tmp = mask & (1 << i);
836 VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp);
837 if (tmp && ap &&
838 !(ap->flags & ATA_FLAG_DISABLED)) {
839 struct ata_queued_cmd *qc;
840
841 qc = ata_qc_from_tag(ap, ap->active_tag);
842 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
843 handled += pdc20621_host_intr(ap, qc, (i > 4),
844 mmio_base);
845 }
846 }
847
848 spin_unlock(&host->lock);
849
850 VPRINTK("mask == 0x%x\n", mask);
851
852 VPRINTK("EXIT\n");
853
854 return IRQ_RETVAL(handled);
855}
856
857static void pdc_eng_timeout(struct ata_port *ap)
858{
859 u8 drv_stat;
860 struct ata_host *host = ap->host;
861 struct ata_queued_cmd *qc;
862 unsigned long flags;
863
864 DPRINTK("ENTER\n");
865
866 spin_lock_irqsave(&host->lock, flags);
867
868 qc = ata_qc_from_tag(ap, ap->active_tag);
869
870 switch (qc->tf.protocol) {
871 case ATA_PROT_DMA:
872 case ATA_PROT_NODATA:
873 ata_port_printk(ap, KERN_ERR, "command timeout\n");
874 qc->err_mask |= __ac_err_mask(ata_wait_idle(ap));
875 break;
876
877 default:
878 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
879
880 ata_port_printk(ap, KERN_ERR,
881 "unknown timeout, cmd 0x%x stat 0x%x\n",
882 qc->tf.command, drv_stat);
883
884 qc->err_mask |= ac_err_mask(drv_stat);
885 break;
886 }
887
888 spin_unlock_irqrestore(&host->lock, flags);
889 ata_eh_qc_complete(qc);
890 DPRINTK("EXIT\n");
891}
892
893static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
894{
895 WARN_ON (tf->protocol == ATA_PROT_DMA ||
896 tf->protocol == ATA_PROT_NODATA);
897 ata_tf_load(ap, tf);
898}
899
900
901static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
902{
903 WARN_ON (tf->protocol == ATA_PROT_DMA ||
904 tf->protocol == ATA_PROT_NODATA);
905 ata_exec_command(ap, tf);
906}
907
908
909static void pdc_sata_setup_port(struct ata_ioports *port, unsigned long base)
910{
911 port->cmd_addr = base;
912 port->data_addr = base;
913 port->feature_addr =
914 port->error_addr = base + 0x4;
915 port->nsect_addr = base + 0x8;
916 port->lbal_addr = base + 0xc;
917 port->lbam_addr = base + 0x10;
918 port->lbah_addr = base + 0x14;
919 port->device_addr = base + 0x18;
920 port->command_addr =
921 port->status_addr = base + 0x1c;
922 port->altstatus_addr =
923 port->ctl_addr = base + 0x38;
924}
925
926
927#ifdef ATA_VERBOSE_DEBUG
928static void pdc20621_get_from_dimm(struct ata_probe_ent *pe, void *psource,
929 u32 offset, u32 size)
930{
931 u32 window_size;
932 u16 idx;
933 u8 page_mask;
934 long dist;
935 void __iomem *mmio = pe->mmio_base;
936 struct pdc_host_priv *hpriv = pe->private_data;
937 void __iomem *dimm_mmio = hpriv->dimm_mmio;
938
939 /* hard-code chip #0 */
940 mmio += PDC_CHIP0_OFS;
941
942 page_mask = 0x00;
943 window_size = 0x2000 * 4; /* 32K byte uchar size */
944 idx = (u16) (offset / window_size);
945
946 writel(0x01, mmio + PDC_GENERAL_CTLR);
947 readl(mmio + PDC_GENERAL_CTLR);
948 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
949 readl(mmio + PDC_DIMM_WINDOW_CTLR);
950
951 offset -= (idx * window_size);
952 idx++;
953 dist = ((long) (window_size - (offset + size))) >= 0 ? size :
954 (long) (window_size - offset);
955 memcpy_fromio((char *) psource, (char *) (dimm_mmio + offset / 4),
956 dist);
957
958 psource += dist;
959 size -= dist;
960 for (; (long) size >= (long) window_size ;) {
961 writel(0x01, mmio + PDC_GENERAL_CTLR);
962 readl(mmio + PDC_GENERAL_CTLR);
963 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
964 readl(mmio + PDC_DIMM_WINDOW_CTLR);
965 memcpy_fromio((char *) psource, (char *) (dimm_mmio),
966 window_size / 4);
967 psource += window_size;
968 size -= window_size;
969 idx ++;
970 }
971
972 if (size) {
973 writel(0x01, mmio + PDC_GENERAL_CTLR);
974 readl(mmio + PDC_GENERAL_CTLR);
975 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
976 readl(mmio + PDC_DIMM_WINDOW_CTLR);
977 memcpy_fromio((char *) psource, (char *) (dimm_mmio),
978 size / 4);
979 }
980}
981#endif
982
983
984static void pdc20621_put_to_dimm(struct ata_probe_ent *pe, void *psource,
985 u32 offset, u32 size)
986{
987 u32 window_size;
988 u16 idx;
989 u8 page_mask;
990 long dist;
991 void __iomem *mmio = pe->mmio_base;
992 struct pdc_host_priv *hpriv = pe->private_data;
993 void __iomem *dimm_mmio = hpriv->dimm_mmio;
994
995 /* hard-code chip #0 */
996 mmio += PDC_CHIP0_OFS;
997
998 page_mask = 0x00;
999 window_size = 0x2000 * 4; /* 32K byte uchar size */
1000 idx = (u16) (offset / window_size);
1001
1002 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1003 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1004 offset -= (idx * window_size);
1005 idx++;
1006 dist = ((long)(s32)(window_size - (offset + size))) >= 0 ? size :
1007 (long) (window_size - offset);
1008 memcpy_toio(dimm_mmio + offset / 4, psource, dist);
1009 writel(0x01, mmio + PDC_GENERAL_CTLR);
1010 readl(mmio + PDC_GENERAL_CTLR);
1011
1012 psource += dist;
1013 size -= dist;
1014 for (; (long) size >= (long) window_size ;) {
1015 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1016 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1017 memcpy_toio(dimm_mmio, psource, window_size / 4);
1018 writel(0x01, mmio + PDC_GENERAL_CTLR);
1019 readl(mmio + PDC_GENERAL_CTLR);
1020 psource += window_size;
1021 size -= window_size;
1022 idx ++;
1023 }
1024
1025 if (size) {
1026 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1027 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1028 memcpy_toio(dimm_mmio, psource, size / 4);
1029 writel(0x01, mmio + PDC_GENERAL_CTLR);
1030 readl(mmio + PDC_GENERAL_CTLR);
1031 }
1032}
1033
1034
1035static unsigned int pdc20621_i2c_read(struct ata_probe_ent *pe, u32 device,
1036 u32 subaddr, u32 *pdata)
1037{
1038 void __iomem *mmio = pe->mmio_base;
1039 u32 i2creg = 0;
1040 u32 status;
1041 u32 count =0;
1042
1043 /* hard-code chip #0 */
1044 mmio += PDC_CHIP0_OFS;
1045
1046 i2creg |= device << 24;
1047 i2creg |= subaddr << 16;
1048
1049 /* Set the device and subaddress */
1050 writel(i2creg, mmio + PDC_I2C_ADDR_DATA_OFFSET);
1051 readl(mmio + PDC_I2C_ADDR_DATA_OFFSET);
1052
1053 /* Write Control to perform read operation, mask int */
1054 writel(PDC_I2C_READ | PDC_I2C_START | PDC_I2C_MASK_INT,
1055 mmio + PDC_I2C_CONTROL_OFFSET);
1056
1057 for (count = 0; count <= 1000; count ++) {
1058 status = readl(mmio + PDC_I2C_CONTROL_OFFSET);
1059 if (status & PDC_I2C_COMPLETE) {
1060 status = readl(mmio + PDC_I2C_ADDR_DATA_OFFSET);
1061 break;
1062 } else if (count == 1000)
1063 return 0;
1064 }
1065
1066 *pdata = (status >> 8) & 0x000000ff;
1067 return 1;
1068}
1069
1070
1071static int pdc20621_detect_dimm(struct ata_probe_ent *pe)
1072{
1073 u32 data=0 ;
1074 if (pdc20621_i2c_read(pe, PDC_DIMM0_SPD_DEV_ADDRESS,
1075 PDC_DIMM_SPD_SYSTEM_FREQ, &data)) {
1076 if (data == 100)
1077 return 100;
1078 } else
1079 return 0;
1080
1081 if (pdc20621_i2c_read(pe, PDC_DIMM0_SPD_DEV_ADDRESS, 9, &data)) {
1082 if(data <= 0x75)
1083 return 133;
1084 } else
1085 return 0;
1086
1087 return 0;
1088}
1089
1090
1091static int pdc20621_prog_dimm0(struct ata_probe_ent *pe)
1092{
1093 u32 spd0[50];
1094 u32 data = 0;
1095 int size, i;
1096 u8 bdimmsize;
1097 void __iomem *mmio = pe->mmio_base;
1098 static const struct {
1099 unsigned int reg;
1100 unsigned int ofs;
1101 } pdc_i2c_read_data [] = {
1102 { PDC_DIMM_SPD_TYPE, 11 },
1103 { PDC_DIMM_SPD_FRESH_RATE, 12 },
1104 { PDC_DIMM_SPD_COLUMN_NUM, 4 },
1105 { PDC_DIMM_SPD_ATTRIBUTE, 21 },
1106 { PDC_DIMM_SPD_ROW_NUM, 3 },
1107 { PDC_DIMM_SPD_BANK_NUM, 17 },
1108 { PDC_DIMM_SPD_MODULE_ROW, 5 },
1109 { PDC_DIMM_SPD_ROW_PRE_CHARGE, 27 },
1110 { PDC_DIMM_SPD_ROW_ACTIVE_DELAY, 28 },
1111 { PDC_DIMM_SPD_RAS_CAS_DELAY, 29 },
1112 { PDC_DIMM_SPD_ACTIVE_PRECHARGE, 30 },
1113 { PDC_DIMM_SPD_CAS_LATENCY, 18 },
1114 };
1115
1116 /* hard-code chip #0 */
1117 mmio += PDC_CHIP0_OFS;
1118
1119 for(i=0; i<ARRAY_SIZE(pdc_i2c_read_data); i++)
1120 pdc20621_i2c_read(pe, PDC_DIMM0_SPD_DEV_ADDRESS,
1121 pdc_i2c_read_data[i].reg,
1122 &spd0[pdc_i2c_read_data[i].ofs]);
1123
1124 data |= (spd0[4] - 8) | ((spd0[21] != 0) << 3) | ((spd0[3]-11) << 4);
1125 data |= ((spd0[17] / 4) << 6) | ((spd0[5] / 2) << 7) |
1126 ((((spd0[27] + 9) / 10) - 1) << 8) ;
1127 data |= (((((spd0[29] > spd0[28])
1128 ? spd0[29] : spd0[28]) + 9) / 10) - 1) << 10;
1129 data |= ((spd0[30] - spd0[29] + 9) / 10 - 2) << 12;
1130
1131 if (spd0[18] & 0x08)
1132 data |= ((0x03) << 14);
1133 else if (spd0[18] & 0x04)
1134 data |= ((0x02) << 14);
1135 else if (spd0[18] & 0x01)
1136 data |= ((0x01) << 14);
1137 else
1138 data |= (0 << 14);
1139
1140 /*
1141 Calculate the size of bDIMMSize (power of 2) and
1142 merge the DIMM size by program start/end address.
1143 */
1144
1145 bdimmsize = spd0[4] + (spd0[5] / 2) + spd0[3] + (spd0[17] / 2) + 3;
1146 size = (1 << bdimmsize) >> 20; /* size = xxx(MB) */
1147 data |= (((size / 16) - 1) << 16);
1148 data |= (0 << 23);
1149 data |= 8;
1150 writel(data, mmio + PDC_DIMM0_CONTROL_OFFSET);
1151 readl(mmio + PDC_DIMM0_CONTROL_OFFSET);
1152 return size;
1153}
1154
1155
1156static unsigned int pdc20621_prog_dimm_global(struct ata_probe_ent *pe)
1157{
1158 u32 data, spd0;
1159 int error, i;
1160 void __iomem *mmio = pe->mmio_base;
1161
1162 /* hard-code chip #0 */
1163 mmio += PDC_CHIP0_OFS;
1164
1165 /*
1166 Set To Default : DIMM Module Global Control Register (0x022259F1)
1167 DIMM Arbitration Disable (bit 20)
1168 DIMM Data/Control Output Driving Selection (bit12 - bit15)
1169 Refresh Enable (bit 17)
1170 */
1171
1172 data = 0x022259F1;
1173 writel(data, mmio + PDC_SDRAM_CONTROL_OFFSET);
1174 readl(mmio + PDC_SDRAM_CONTROL_OFFSET);
1175
1176 /* Turn on for ECC */
1177 pdc20621_i2c_read(pe, PDC_DIMM0_SPD_DEV_ADDRESS,
1178 PDC_DIMM_SPD_TYPE, &spd0);
1179 if (spd0 == 0x02) {
1180 data |= (0x01 << 16);
1181 writel(data, mmio + PDC_SDRAM_CONTROL_OFFSET);
1182 readl(mmio + PDC_SDRAM_CONTROL_OFFSET);
1183 printk(KERN_ERR "Local DIMM ECC Enabled\n");
1184 }
1185
1186 /* DIMM Initialization Select/Enable (bit 18/19) */
1187 data &= (~(1<<18));
1188 data |= (1<<19);
1189 writel(data, mmio + PDC_SDRAM_CONTROL_OFFSET);
1190
1191 error = 1;
1192 for (i = 1; i <= 10; i++) { /* polling ~5 secs */
1193 data = readl(mmio + PDC_SDRAM_CONTROL_OFFSET);
1194 if (!(data & (1<<19))) {
1195 error = 0;
1196 break;
1197 }
1198 msleep(i*100);
1199 }
1200 return error;
1201}
1202
1203
1204static unsigned int pdc20621_dimm_init(struct ata_probe_ent *pe)
1205{
1206 int speed, size, length;
1207 u32 addr,spd0,pci_status;
1208 u32 tmp=0;
1209 u32 time_period=0;
1210 u32 tcount=0;
1211 u32 ticks=0;
1212 u32 clock=0;
1213 u32 fparam=0;
1214 void __iomem *mmio = pe->mmio_base;
1215
1216 /* hard-code chip #0 */
1217 mmio += PDC_CHIP0_OFS;
1218
1219 /* Initialize PLL based upon PCI Bus Frequency */
1220
1221 /* Initialize Time Period Register */
1222 writel(0xffffffff, mmio + PDC_TIME_PERIOD);
1223 time_period = readl(mmio + PDC_TIME_PERIOD);
1224 VPRINTK("Time Period Register (0x40): 0x%x\n", time_period);
1225
1226 /* Enable timer */
1227 writel(0x00001a0, mmio + PDC_TIME_CONTROL);
1228 readl(mmio + PDC_TIME_CONTROL);
1229
1230 /* Wait 3 seconds */
1231 msleep(3000);
1232
1233 /*
1234 When timer is enabled, counter is decreased every internal
1235 clock cycle.
1236 */
1237
1238 tcount = readl(mmio + PDC_TIME_COUNTER);
1239 VPRINTK("Time Counter Register (0x44): 0x%x\n", tcount);
1240
1241 /*
1242 If SX4 is on PCI-X bus, after 3 seconds, the timer counter
1243 register should be >= (0xffffffff - 3x10^8).
1244 */
1245 if(tcount >= PCI_X_TCOUNT) {
1246 ticks = (time_period - tcount);
1247 VPRINTK("Num counters 0x%x (%d)\n", ticks, ticks);
1248
1249 clock = (ticks / 300000);
1250 VPRINTK("10 * Internal clk = 0x%x (%d)\n", clock, clock);
1251
1252 clock = (clock * 33);
1253 VPRINTK("10 * Internal clk * 33 = 0x%x (%d)\n", clock, clock);
1254
1255 /* PLL F Param (bit 22:16) */
1256 fparam = (1400000 / clock) - 2;
1257 VPRINTK("PLL F Param: 0x%x (%d)\n", fparam, fparam);
1258
1259 /* OD param = 0x2 (bit 31:30), R param = 0x5 (bit 29:25) */
1260 pci_status = (0x8a001824 | (fparam << 16));
1261 } else
1262 pci_status = PCI_PLL_INIT;
1263
1264 /* Initialize PLL. */
1265 VPRINTK("pci_status: 0x%x\n", pci_status);
1266 writel(pci_status, mmio + PDC_CTL_STATUS);
1267 readl(mmio + PDC_CTL_STATUS);
1268
1269 /*
1270 Read SPD of DIMM by I2C interface,
1271 and program the DIMM Module Controller.
1272 */
1273 if (!(speed = pdc20621_detect_dimm(pe))) {
1274 printk(KERN_ERR "Detect Local DIMM Fail\n");
1275 return 1; /* DIMM error */
1276 }
1277 VPRINTK("Local DIMM Speed = %d\n", speed);
1278
1279 /* Programming DIMM0 Module Control Register (index_CID0:80h) */
1280 size = pdc20621_prog_dimm0(pe);
1281 VPRINTK("Local DIMM Size = %dMB\n",size);
1282
1283 /* Programming DIMM Module Global Control Register (index_CID0:88h) */
1284 if (pdc20621_prog_dimm_global(pe)) {
1285 printk(KERN_ERR "Programming DIMM Module Global Control Register Fail\n");
1286 return 1;
1287 }
1288
1289#ifdef ATA_VERBOSE_DEBUG
1290 {
1291 u8 test_parttern1[40] = {0x55,0xAA,'P','r','o','m','i','s','e',' ',
1292 'N','o','t',' ','Y','e','t',' ','D','e','f','i','n','e','d',' ',
1293 '1','.','1','0',
1294 '9','8','0','3','1','6','1','2',0,0};
1295 u8 test_parttern2[40] = {0};
1296
1297 pdc20621_put_to_dimm(pe, (void *) test_parttern2, 0x10040, 40);
1298 pdc20621_put_to_dimm(pe, (void *) test_parttern2, 0x40, 40);
1299
1300 pdc20621_put_to_dimm(pe, (void *) test_parttern1, 0x10040, 40);
1301 pdc20621_get_from_dimm(pe, (void *) test_parttern2, 0x40, 40);
1302 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1303 test_parttern2[1], &(test_parttern2[2]));
1304 pdc20621_get_from_dimm(pe, (void *) test_parttern2, 0x10040,
1305 40);
1306 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1307 test_parttern2[1], &(test_parttern2[2]));
1308
1309 pdc20621_put_to_dimm(pe, (void *) test_parttern1, 0x40, 40);
1310 pdc20621_get_from_dimm(pe, (void *) test_parttern2, 0x40, 40);
1311 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1312 test_parttern2[1], &(test_parttern2[2]));
1313 }
1314#endif
1315
1316 /* ECC initiliazation. */
1317
1318 pdc20621_i2c_read(pe, PDC_DIMM0_SPD_DEV_ADDRESS,
1319 PDC_DIMM_SPD_TYPE, &spd0);
1320 if (spd0 == 0x02) {
1321 VPRINTK("Start ECC initialization\n");
1322 addr = 0;
1323 length = size * 1024 * 1024;
1324 while (addr < length) {
1325 pdc20621_put_to_dimm(pe, (void *) &tmp, addr,
1326 sizeof(u32));
1327 addr += sizeof(u32);
1328 }
1329 VPRINTK("Finish ECC initialization\n");
1330 }
1331 return 0;
1332}
1333
1334
1335static void pdc_20621_init(struct ata_probe_ent *pe)
1336{
1337 u32 tmp;
1338 void __iomem *mmio = pe->mmio_base;
1339
1340 /* hard-code chip #0 */
1341 mmio += PDC_CHIP0_OFS;
1342
1343 /*
1344 * Select page 0x40 for our 32k DIMM window
1345 */
1346 tmp = readl(mmio + PDC_20621_DIMM_WINDOW) & 0xffff0000;
1347 tmp |= PDC_PAGE_WINDOW; /* page 40h; arbitrarily selected */
1348 writel(tmp, mmio + PDC_20621_DIMM_WINDOW);
1349
1350 /*
1351 * Reset Host DMA
1352 */
1353 tmp = readl(mmio + PDC_HDMA_CTLSTAT);
1354 tmp |= PDC_RESET;
1355 writel(tmp, mmio + PDC_HDMA_CTLSTAT);
1356 readl(mmio + PDC_HDMA_CTLSTAT); /* flush */
1357
1358 udelay(10);
1359
1360 tmp = readl(mmio + PDC_HDMA_CTLSTAT);
1361 tmp &= ~PDC_RESET;
1362 writel(tmp, mmio + PDC_HDMA_CTLSTAT);
1363 readl(mmio + PDC_HDMA_CTLSTAT); /* flush */
1364}
1365
1366static int pdc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1367{
1368 static int printed_version;
1369 struct ata_probe_ent *probe_ent = NULL;
1370 unsigned long base;
1371 void __iomem *mmio_base;
1372 void __iomem *dimm_mmio = NULL;
1373 struct pdc_host_priv *hpriv = NULL;
1374 unsigned int board_idx = (unsigned int) ent->driver_data;
1375 int pci_dev_busy = 0;
1376 int rc;
1377
1378 if (!printed_version++)
1379 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1380
1381 rc = pci_enable_device(pdev);
1382 if (rc)
1383 return rc;
1384
1385 rc = pci_request_regions(pdev, DRV_NAME);
1386 if (rc) {
1387 pci_dev_busy = 1;
1388 goto err_out;
1389 }
1390
1391 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
1392 if (rc)
1393 goto err_out_regions;
1394 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
1395 if (rc)
1396 goto err_out_regions;
1397
1398 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
1399 if (probe_ent == NULL) {
1400 rc = -ENOMEM;
1401 goto err_out_regions;
1402 }
1403
1404 memset(probe_ent, 0, sizeof(*probe_ent));
1405 probe_ent->dev = pci_dev_to_dev(pdev);
1406 INIT_LIST_HEAD(&probe_ent->node);
1407
1408 mmio_base = pci_iomap(pdev, 3, 0);
1409 if (mmio_base == NULL) {
1410 rc = -ENOMEM;
1411 goto err_out_free_ent;
1412 }
1413 base = (unsigned long) mmio_base;
1414
1415 hpriv = kmalloc(sizeof(*hpriv), GFP_KERNEL);
1416 if (!hpriv) {
1417 rc = -ENOMEM;
1418 goto err_out_iounmap;
1419 }
1420 memset(hpriv, 0, sizeof(*hpriv));
1421
1422 dimm_mmio = pci_iomap(pdev, 4, 0);
1423 if (!dimm_mmio) {
1424 kfree(hpriv);
1425 rc = -ENOMEM;
1426 goto err_out_iounmap;
1427 }
1428
1429 hpriv->dimm_mmio = dimm_mmio;
1430
1431 probe_ent->sht = pdc_port_info[board_idx].sht;
1432 probe_ent->port_flags = pdc_port_info[board_idx].flags;
1433 probe_ent->pio_mask = pdc_port_info[board_idx].pio_mask;
1434 probe_ent->mwdma_mask = pdc_port_info[board_idx].mwdma_mask;
1435 probe_ent->udma_mask = pdc_port_info[board_idx].udma_mask;
1436 probe_ent->port_ops = pdc_port_info[board_idx].port_ops;
1437
1438 probe_ent->irq = pdev->irq;
1439 probe_ent->irq_flags = IRQF_SHARED;
1440 probe_ent->mmio_base = mmio_base;
1441
1442 probe_ent->private_data = hpriv;
1443 base += PDC_CHIP0_OFS;
1444
1445 probe_ent->n_ports = 4;
1446 pdc_sata_setup_port(&probe_ent->port[0], base + 0x200);
1447 pdc_sata_setup_port(&probe_ent->port[1], base + 0x280);
1448 pdc_sata_setup_port(&probe_ent->port[2], base + 0x300);
1449 pdc_sata_setup_port(&probe_ent->port[3], base + 0x380);
1450
1451 pci_set_master(pdev);
1452
1453 /* initialize adapter */
1454 /* initialize local dimm */
1455 if (pdc20621_dimm_init(probe_ent)) {
1456 rc = -ENOMEM;
1457 goto err_out_iounmap_dimm;
1458 }
1459 pdc_20621_init(probe_ent);
1460
1461 /* FIXME: check ata_device_add return value */
1462 ata_device_add(probe_ent);
1463 kfree(probe_ent);
1464
1465 return 0;
1466
1467err_out_iounmap_dimm: /* only get to this label if 20621 */
1468 kfree(hpriv);
1469 pci_iounmap(pdev, dimm_mmio);
1470err_out_iounmap:
1471 pci_iounmap(pdev, mmio_base);
1472err_out_free_ent:
1473 kfree(probe_ent);
1474err_out_regions:
1475 pci_release_regions(pdev);
1476err_out:
1477 if (!pci_dev_busy)
1478 pci_disable_device(pdev);
1479 return rc;
1480}
1481
1482
1483static int __init pdc_sata_init(void)
1484{
1485 return pci_register_driver(&pdc_sata_pci_driver);
1486}
1487
1488
1489static void __exit pdc_sata_exit(void)
1490{
1491 pci_unregister_driver(&pdc_sata_pci_driver);
1492}
1493
1494
1495MODULE_AUTHOR("Jeff Garzik");
1496MODULE_DESCRIPTION("Promise SATA low-level driver");
1497MODULE_LICENSE("GPL");
1498MODULE_DEVICE_TABLE(pci, pdc_sata_pci_tbl);
1499MODULE_VERSION(DRV_VERSION);
1500
1501module_init(pdc_sata_init);
1502module_exit(pdc_sata_exit);
diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c
new file mode 100644
index 000000000000..8fc6e800011a
--- /dev/null
+++ b/drivers/ata/sata_uli.c
@@ -0,0 +1,300 @@
1/*
2 * sata_uli.c - ULi Electronics SATA
3 *
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2, or (at your option)
8 * any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; see the file COPYING. If not, write to
17 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
18 *
19 *
20 * libata documentation is available via 'make {ps|pdf}docs',
21 * as Documentation/DocBook/libata.*
22 *
23 * Hardware documentation available under NDA.
24 *
25 */
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/pci.h>
30#include <linux/init.h>
31#include <linux/blkdev.h>
32#include <linux/delay.h>
33#include <linux/interrupt.h>
34#include <linux/device.h>
35#include <scsi/scsi_host.h>
36#include <linux/libata.h>
37
38#define DRV_NAME "sata_uli"
39#define DRV_VERSION "1.0"
40
41enum {
42 uli_5289 = 0,
43 uli_5287 = 1,
44 uli_5281 = 2,
45
46 uli_max_ports = 4,
47
48 /* PCI configuration registers */
49 ULI5287_BASE = 0x90, /* sata0 phy SCR registers */
50 ULI5287_OFFS = 0x10, /* offset from sata0->sata1 phy regs */
51 ULI5281_BASE = 0x60, /* sata0 phy SCR registers */
52 ULI5281_OFFS = 0x60, /* offset from sata0->sata1 phy regs */
53};
54
55struct uli_priv {
56 unsigned int scr_cfg_addr[uli_max_ports];
57};
58
59static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
60static u32 uli_scr_read (struct ata_port *ap, unsigned int sc_reg);
61static void uli_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
62
63static const struct pci_device_id uli_pci_tbl[] = {
64 { PCI_VENDOR_ID_AL, 0x5289, PCI_ANY_ID, PCI_ANY_ID, 0, 0, uli_5289 },
65 { PCI_VENDOR_ID_AL, 0x5287, PCI_ANY_ID, PCI_ANY_ID, 0, 0, uli_5287 },
66 { PCI_VENDOR_ID_AL, 0x5281, PCI_ANY_ID, PCI_ANY_ID, 0, 0, uli_5281 },
67 { } /* terminate list */
68};
69
70
71static struct pci_driver uli_pci_driver = {
72 .name = DRV_NAME,
73 .id_table = uli_pci_tbl,
74 .probe = uli_init_one,
75 .remove = ata_pci_remove_one,
76};
77
78static struct scsi_host_template uli_sht = {
79 .module = THIS_MODULE,
80 .name = DRV_NAME,
81 .ioctl = ata_scsi_ioctl,
82 .queuecommand = ata_scsi_queuecmd,
83 .can_queue = ATA_DEF_QUEUE,
84 .this_id = ATA_SHT_THIS_ID,
85 .sg_tablesize = LIBATA_MAX_PRD,
86 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
87 .emulated = ATA_SHT_EMULATED,
88 .use_clustering = ATA_SHT_USE_CLUSTERING,
89 .proc_name = DRV_NAME,
90 .dma_boundary = ATA_DMA_BOUNDARY,
91 .slave_configure = ata_scsi_slave_config,
92 .slave_destroy = ata_scsi_slave_destroy,
93 .bios_param = ata_std_bios_param,
94};
95
96static const struct ata_port_operations uli_ops = {
97 .port_disable = ata_port_disable,
98
99 .tf_load = ata_tf_load,
100 .tf_read = ata_tf_read,
101 .check_status = ata_check_status,
102 .exec_command = ata_exec_command,
103 .dev_select = ata_std_dev_select,
104
105 .bmdma_setup = ata_bmdma_setup,
106 .bmdma_start = ata_bmdma_start,
107 .bmdma_stop = ata_bmdma_stop,
108 .bmdma_status = ata_bmdma_status,
109 .qc_prep = ata_qc_prep,
110 .qc_issue = ata_qc_issue_prot,
111 .data_xfer = ata_pio_data_xfer,
112
113 .freeze = ata_bmdma_freeze,
114 .thaw = ata_bmdma_thaw,
115 .error_handler = ata_bmdma_error_handler,
116 .post_internal_cmd = ata_bmdma_post_internal_cmd,
117
118 .irq_handler = ata_interrupt,
119 .irq_clear = ata_bmdma_irq_clear,
120
121 .scr_read = uli_scr_read,
122 .scr_write = uli_scr_write,
123
124 .port_start = ata_port_start,
125 .port_stop = ata_port_stop,
126 .host_stop = ata_host_stop,
127};
128
129static struct ata_port_info uli_port_info = {
130 .sht = &uli_sht,
131 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
132 .pio_mask = 0x1f, /* pio0-4 */
133 .udma_mask = 0x7f, /* udma0-6 */
134 .port_ops = &uli_ops,
135};
136
137
138MODULE_AUTHOR("Peer Chen");
139MODULE_DESCRIPTION("low-level driver for ULi Electronics SATA controller");
140MODULE_LICENSE("GPL");
141MODULE_DEVICE_TABLE(pci, uli_pci_tbl);
142MODULE_VERSION(DRV_VERSION);
143
144static unsigned int get_scr_cfg_addr(struct ata_port *ap, unsigned int sc_reg)
145{
146 struct uli_priv *hpriv = ap->host->private_data;
147 return hpriv->scr_cfg_addr[ap->port_no] + (4 * sc_reg);
148}
149
150static u32 uli_scr_cfg_read (struct ata_port *ap, unsigned int sc_reg)
151{
152 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
153 unsigned int cfg_addr = get_scr_cfg_addr(ap, sc_reg);
154 u32 val;
155
156 pci_read_config_dword(pdev, cfg_addr, &val);
157 return val;
158}
159
160static void uli_scr_cfg_write (struct ata_port *ap, unsigned int scr, u32 val)
161{
162 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
163 unsigned int cfg_addr = get_scr_cfg_addr(ap, scr);
164
165 pci_write_config_dword(pdev, cfg_addr, val);
166}
167
168static u32 uli_scr_read (struct ata_port *ap, unsigned int sc_reg)
169{
170 if (sc_reg > SCR_CONTROL)
171 return 0xffffffffU;
172
173 return uli_scr_cfg_read(ap, sc_reg);
174}
175
176static void uli_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
177{
178 if (sc_reg > SCR_CONTROL) //SCR_CONTROL=2, SCR_ERROR=1, SCR_STATUS=0
179 return;
180
181 uli_scr_cfg_write(ap, sc_reg, val);
182}
183
184static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
185{
186 static int printed_version;
187 struct ata_probe_ent *probe_ent;
188 struct ata_port_info *ppi;
189 int rc;
190 unsigned int board_idx = (unsigned int) ent->driver_data;
191 int pci_dev_busy = 0;
192 struct uli_priv *hpriv;
193
194 if (!printed_version++)
195 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
196
197 rc = pci_enable_device(pdev);
198 if (rc)
199 return rc;
200
201 rc = pci_request_regions(pdev, DRV_NAME);
202 if (rc) {
203 pci_dev_busy = 1;
204 goto err_out;
205 }
206
207 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
208 if (rc)
209 goto err_out_regions;
210 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
211 if (rc)
212 goto err_out_regions;
213
214 ppi = &uli_port_info;
215 probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
216 if (!probe_ent) {
217 rc = -ENOMEM;
218 goto err_out_regions;
219 }
220
221 hpriv = kzalloc(sizeof(*hpriv), GFP_KERNEL);
222 if (!hpriv) {
223 rc = -ENOMEM;
224 goto err_out_probe_ent;
225 }
226
227 probe_ent->private_data = hpriv;
228
229 switch (board_idx) {
230 case uli_5287:
231 hpriv->scr_cfg_addr[0] = ULI5287_BASE;
232 hpriv->scr_cfg_addr[1] = ULI5287_BASE + ULI5287_OFFS;
233 probe_ent->n_ports = 4;
234
235 probe_ent->port[2].cmd_addr = pci_resource_start(pdev, 0) + 8;
236 probe_ent->port[2].altstatus_addr =
237 probe_ent->port[2].ctl_addr =
238 (pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS) + 4;
239 probe_ent->port[2].bmdma_addr = pci_resource_start(pdev, 4) + 16;
240 hpriv->scr_cfg_addr[2] = ULI5287_BASE + ULI5287_OFFS*4;
241
242 probe_ent->port[3].cmd_addr = pci_resource_start(pdev, 2) + 8;
243 probe_ent->port[3].altstatus_addr =
244 probe_ent->port[3].ctl_addr =
245 (pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS) + 4;
246 probe_ent->port[3].bmdma_addr = pci_resource_start(pdev, 4) + 24;
247 hpriv->scr_cfg_addr[3] = ULI5287_BASE + ULI5287_OFFS*5;
248
249 ata_std_ports(&probe_ent->port[2]);
250 ata_std_ports(&probe_ent->port[3]);
251 break;
252
253 case uli_5289:
254 hpriv->scr_cfg_addr[0] = ULI5287_BASE;
255 hpriv->scr_cfg_addr[1] = ULI5287_BASE + ULI5287_OFFS;
256 break;
257
258 case uli_5281:
259 hpriv->scr_cfg_addr[0] = ULI5281_BASE;
260 hpriv->scr_cfg_addr[1] = ULI5281_BASE + ULI5281_OFFS;
261 break;
262
263 default:
264 BUG();
265 break;
266 }
267
268 pci_set_master(pdev);
269 pci_intx(pdev, 1);
270
271 /* FIXME: check ata_device_add return value */
272 ata_device_add(probe_ent);
273 kfree(probe_ent);
274
275 return 0;
276
277err_out_probe_ent:
278 kfree(probe_ent);
279err_out_regions:
280 pci_release_regions(pdev);
281err_out:
282 if (!pci_dev_busy)
283 pci_disable_device(pdev);
284 return rc;
285
286}
287
288static int __init uli_init(void)
289{
290 return pci_register_driver(&uli_pci_driver);
291}
292
293static void __exit uli_exit(void)
294{
295 pci_unregister_driver(&uli_pci_driver);
296}
297
298
299module_init(uli_init);
300module_exit(uli_exit);
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
new file mode 100644
index 000000000000..7f087aef99de
--- /dev/null
+++ b/drivers/ata/sata_via.c
@@ -0,0 +1,502 @@
1/*
2 * sata_via.c - VIA Serial ATA controllers
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available under NDA.
31 *
32 *
33 * To-do list:
34 * - VT6421 PATA support
35 *
36 */
37
38#include <linux/kernel.h>
39#include <linux/module.h>
40#include <linux/pci.h>
41#include <linux/init.h>
42#include <linux/blkdev.h>
43#include <linux/delay.h>
44#include <linux/device.h>
45#include <scsi/scsi_host.h>
46#include <linux/libata.h>
47#include <asm/io.h>
48
49#define DRV_NAME "sata_via"
50#define DRV_VERSION "2.0"
51
52enum board_ids_enum {
53 vt6420,
54 vt6421,
55};
56
57enum {
58 SATA_CHAN_ENAB = 0x40, /* SATA channel enable */
59 SATA_INT_GATE = 0x41, /* SATA interrupt gating */
60 SATA_NATIVE_MODE = 0x42, /* Native mode enable */
61 SATA_PATA_SHARING = 0x49, /* PATA/SATA sharing func ctrl */
62
63 PORT0 = (1 << 1),
64 PORT1 = (1 << 0),
65 ALL_PORTS = PORT0 | PORT1,
66 N_PORTS = 2,
67
68 NATIVE_MODE_ALL = (1 << 7) | (1 << 6) | (1 << 5) | (1 << 4),
69
70 SATA_EXT_PHY = (1 << 6), /* 0==use PATA, 1==ext phy */
71 SATA_2DEV = (1 << 5), /* SATA is master/slave */
72};
73
74static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
75static u32 svia_scr_read (struct ata_port *ap, unsigned int sc_reg);
76static void svia_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
77static void vt6420_error_handler(struct ata_port *ap);
78
79static const struct pci_device_id svia_pci_tbl[] = {
80 { 0x1106, 0x0591, PCI_ANY_ID, PCI_ANY_ID, 0, 0, vt6420 },
81 { 0x1106, 0x3149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, vt6420 },
82 { 0x1106, 0x3249, PCI_ANY_ID, PCI_ANY_ID, 0, 0, vt6421 },
83
84 { } /* terminate list */
85};
86
87static struct pci_driver svia_pci_driver = {
88 .name = DRV_NAME,
89 .id_table = svia_pci_tbl,
90 .probe = svia_init_one,
91 .remove = ata_pci_remove_one,
92};
93
94static struct scsi_host_template svia_sht = {
95 .module = THIS_MODULE,
96 .name = DRV_NAME,
97 .ioctl = ata_scsi_ioctl,
98 .queuecommand = ata_scsi_queuecmd,
99 .can_queue = ATA_DEF_QUEUE,
100 .this_id = ATA_SHT_THIS_ID,
101 .sg_tablesize = LIBATA_MAX_PRD,
102 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
103 .emulated = ATA_SHT_EMULATED,
104 .use_clustering = ATA_SHT_USE_CLUSTERING,
105 .proc_name = DRV_NAME,
106 .dma_boundary = ATA_DMA_BOUNDARY,
107 .slave_configure = ata_scsi_slave_config,
108 .slave_destroy = ata_scsi_slave_destroy,
109 .bios_param = ata_std_bios_param,
110};
111
112static const struct ata_port_operations vt6420_sata_ops = {
113 .port_disable = ata_port_disable,
114
115 .tf_load = ata_tf_load,
116 .tf_read = ata_tf_read,
117 .check_status = ata_check_status,
118 .exec_command = ata_exec_command,
119 .dev_select = ata_std_dev_select,
120
121 .bmdma_setup = ata_bmdma_setup,
122 .bmdma_start = ata_bmdma_start,
123 .bmdma_stop = ata_bmdma_stop,
124 .bmdma_status = ata_bmdma_status,
125
126 .qc_prep = ata_qc_prep,
127 .qc_issue = ata_qc_issue_prot,
128 .data_xfer = ata_pio_data_xfer,
129
130 .freeze = ata_bmdma_freeze,
131 .thaw = ata_bmdma_thaw,
132 .error_handler = vt6420_error_handler,
133 .post_internal_cmd = ata_bmdma_post_internal_cmd,
134
135 .irq_handler = ata_interrupt,
136 .irq_clear = ata_bmdma_irq_clear,
137
138 .port_start = ata_port_start,
139 .port_stop = ata_port_stop,
140 .host_stop = ata_host_stop,
141};
142
143static const struct ata_port_operations vt6421_sata_ops = {
144 .port_disable = ata_port_disable,
145
146 .tf_load = ata_tf_load,
147 .tf_read = ata_tf_read,
148 .check_status = ata_check_status,
149 .exec_command = ata_exec_command,
150 .dev_select = ata_std_dev_select,
151
152 .bmdma_setup = ata_bmdma_setup,
153 .bmdma_start = ata_bmdma_start,
154 .bmdma_stop = ata_bmdma_stop,
155 .bmdma_status = ata_bmdma_status,
156
157 .qc_prep = ata_qc_prep,
158 .qc_issue = ata_qc_issue_prot,
159 .data_xfer = ata_pio_data_xfer,
160
161 .freeze = ata_bmdma_freeze,
162 .thaw = ata_bmdma_thaw,
163 .error_handler = ata_bmdma_error_handler,
164 .post_internal_cmd = ata_bmdma_post_internal_cmd,
165
166 .irq_handler = ata_interrupt,
167 .irq_clear = ata_bmdma_irq_clear,
168
169 .scr_read = svia_scr_read,
170 .scr_write = svia_scr_write,
171
172 .port_start = ata_port_start,
173 .port_stop = ata_port_stop,
174 .host_stop = ata_host_stop,
175};
176
177static struct ata_port_info vt6420_port_info = {
178 .sht = &svia_sht,
179 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
180 .pio_mask = 0x1f,
181 .mwdma_mask = 0x07,
182 .udma_mask = 0x7f,
183 .port_ops = &vt6420_sata_ops,
184};
185
186MODULE_AUTHOR("Jeff Garzik");
187MODULE_DESCRIPTION("SCSI low-level driver for VIA SATA controllers");
188MODULE_LICENSE("GPL");
189MODULE_DEVICE_TABLE(pci, svia_pci_tbl);
190MODULE_VERSION(DRV_VERSION);
191
192static u32 svia_scr_read (struct ata_port *ap, unsigned int sc_reg)
193{
194 if (sc_reg > SCR_CONTROL)
195 return 0xffffffffU;
196 return inl(ap->ioaddr.scr_addr + (4 * sc_reg));
197}
198
199static void svia_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
200{
201 if (sc_reg > SCR_CONTROL)
202 return;
203 outl(val, ap->ioaddr.scr_addr + (4 * sc_reg));
204}
205
206/**
207 * vt6420_prereset - prereset for vt6420
208 * @ap: target ATA port
209 *
210 * SCR registers on vt6420 are pieces of shit and may hang the
211 * whole machine completely if accessed with the wrong timing.
212 * To avoid such catastrophe, vt6420 doesn't provide generic SCR
213 * access operations, but uses SStatus and SControl only during
214 * boot probing in controlled way.
215 *
216 * As the old (pre EH update) probing code is proven to work, we
217 * strictly follow the access pattern.
218 *
219 * LOCKING:
220 * Kernel thread context (may sleep)
221 *
222 * RETURNS:
223 * 0 on success, -errno otherwise.
224 */
225static int vt6420_prereset(struct ata_port *ap)
226{
227 struct ata_eh_context *ehc = &ap->eh_context;
228 unsigned long timeout = jiffies + (HZ * 5);
229 u32 sstatus, scontrol;
230 int online;
231
232 /* don't do any SCR stuff if we're not loading */
233 if (!ATA_PFLAG_LOADING)
234 goto skip_scr;
235
236 /* Resume phy. This is the old resume sequence from
237 * __sata_phy_reset().
238 */
239 svia_scr_write(ap, SCR_CONTROL, 0x300);
240 svia_scr_read(ap, SCR_CONTROL); /* flush */
241
242 /* wait for phy to become ready, if necessary */
243 do {
244 msleep(200);
245 if ((svia_scr_read(ap, SCR_STATUS) & 0xf) != 1)
246 break;
247 } while (time_before(jiffies, timeout));
248
249 /* open code sata_print_link_status() */
250 sstatus = svia_scr_read(ap, SCR_STATUS);
251 scontrol = svia_scr_read(ap, SCR_CONTROL);
252
253 online = (sstatus & 0xf) == 0x3;
254
255 ata_port_printk(ap, KERN_INFO,
256 "SATA link %s 1.5 Gbps (SStatus %X SControl %X)\n",
257 online ? "up" : "down", sstatus, scontrol);
258
259 /* SStatus is read one more time */
260 svia_scr_read(ap, SCR_STATUS);
261
262 if (!online) {
263 /* tell EH to bail */
264 ehc->i.action &= ~ATA_EH_RESET_MASK;
265 return 0;
266 }
267
268 skip_scr:
269 /* wait for !BSY */
270 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
271
272 return 0;
273}
274
275static void vt6420_error_handler(struct ata_port *ap)
276{
277 return ata_bmdma_drive_eh(ap, vt6420_prereset, ata_std_softreset,
278 NULL, ata_std_postreset);
279}
280
281static const unsigned int svia_bar_sizes[] = {
282 8, 4, 8, 4, 16, 256
283};
284
285static const unsigned int vt6421_bar_sizes[] = {
286 16, 16, 16, 16, 32, 128
287};
288
289static unsigned long svia_scr_addr(unsigned long addr, unsigned int port)
290{
291 return addr + (port * 128);
292}
293
294static unsigned long vt6421_scr_addr(unsigned long addr, unsigned int port)
295{
296 return addr + (port * 64);
297}
298
299static void vt6421_init_addrs(struct ata_probe_ent *probe_ent,
300 struct pci_dev *pdev,
301 unsigned int port)
302{
303 unsigned long reg_addr = pci_resource_start(pdev, port);
304 unsigned long bmdma_addr = pci_resource_start(pdev, 4) + (port * 8);
305 unsigned long scr_addr;
306
307 probe_ent->port[port].cmd_addr = reg_addr;
308 probe_ent->port[port].altstatus_addr =
309 probe_ent->port[port].ctl_addr = (reg_addr + 8) | ATA_PCI_CTL_OFS;
310 probe_ent->port[port].bmdma_addr = bmdma_addr;
311
312 scr_addr = vt6421_scr_addr(pci_resource_start(pdev, 5), port);
313 probe_ent->port[port].scr_addr = scr_addr;
314
315 ata_std_ports(&probe_ent->port[port]);
316}
317
318static struct ata_probe_ent *vt6420_init_probe_ent(struct pci_dev *pdev)
319{
320 struct ata_probe_ent *probe_ent;
321 struct ata_port_info *ppi = &vt6420_port_info;
322
323 probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
324 if (!probe_ent)
325 return NULL;
326
327 probe_ent->port[0].scr_addr =
328 svia_scr_addr(pci_resource_start(pdev, 5), 0);
329 probe_ent->port[1].scr_addr =
330 svia_scr_addr(pci_resource_start(pdev, 5), 1);
331
332 return probe_ent;
333}
334
335static struct ata_probe_ent *vt6421_init_probe_ent(struct pci_dev *pdev)
336{
337 struct ata_probe_ent *probe_ent;
338 unsigned int i;
339
340 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
341 if (!probe_ent)
342 return NULL;
343
344 memset(probe_ent, 0, sizeof(*probe_ent));
345 probe_ent->dev = pci_dev_to_dev(pdev);
346 INIT_LIST_HEAD(&probe_ent->node);
347
348 probe_ent->sht = &svia_sht;
349 probe_ent->port_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY;
350 probe_ent->port_ops = &vt6421_sata_ops;
351 probe_ent->n_ports = N_PORTS;
352 probe_ent->irq = pdev->irq;
353 probe_ent->irq_flags = IRQF_SHARED;
354 probe_ent->pio_mask = 0x1f;
355 probe_ent->mwdma_mask = 0x07;
356 probe_ent->udma_mask = 0x7f;
357
358 for (i = 0; i < N_PORTS; i++)
359 vt6421_init_addrs(probe_ent, pdev, i);
360
361 return probe_ent;
362}
363
364static void svia_configure(struct pci_dev *pdev)
365{
366 u8 tmp8;
367
368 pci_read_config_byte(pdev, PCI_INTERRUPT_LINE, &tmp8);
369 dev_printk(KERN_INFO, &pdev->dev, "routed to hard irq line %d\n",
370 (int) (tmp8 & 0xf0) == 0xf0 ? 0 : tmp8 & 0x0f);
371
372 /* make sure SATA channels are enabled */
373 pci_read_config_byte(pdev, SATA_CHAN_ENAB, &tmp8);
374 if ((tmp8 & ALL_PORTS) != ALL_PORTS) {
375 dev_printk(KERN_DEBUG, &pdev->dev,
376 "enabling SATA channels (0x%x)\n",
377 (int) tmp8);
378 tmp8 |= ALL_PORTS;
379 pci_write_config_byte(pdev, SATA_CHAN_ENAB, tmp8);
380 }
381
382 /* make sure interrupts for each channel sent to us */
383 pci_read_config_byte(pdev, SATA_INT_GATE, &tmp8);
384 if ((tmp8 & ALL_PORTS) != ALL_PORTS) {
385 dev_printk(KERN_DEBUG, &pdev->dev,
386 "enabling SATA channel interrupts (0x%x)\n",
387 (int) tmp8);
388 tmp8 |= ALL_PORTS;
389 pci_write_config_byte(pdev, SATA_INT_GATE, tmp8);
390 }
391
392 /* make sure native mode is enabled */
393 pci_read_config_byte(pdev, SATA_NATIVE_MODE, &tmp8);
394 if ((tmp8 & NATIVE_MODE_ALL) != NATIVE_MODE_ALL) {
395 dev_printk(KERN_DEBUG, &pdev->dev,
396 "enabling SATA channel native mode (0x%x)\n",
397 (int) tmp8);
398 tmp8 |= NATIVE_MODE_ALL;
399 pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8);
400 }
401}
402
403static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
404{
405 static int printed_version;
406 unsigned int i;
407 int rc;
408 struct ata_probe_ent *probe_ent;
409 int board_id = (int) ent->driver_data;
410 const int *bar_sizes;
411 int pci_dev_busy = 0;
412 u8 tmp8;
413
414 if (!printed_version++)
415 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
416
417 rc = pci_enable_device(pdev);
418 if (rc)
419 return rc;
420
421 rc = pci_request_regions(pdev, DRV_NAME);
422 if (rc) {
423 pci_dev_busy = 1;
424 goto err_out;
425 }
426
427 if (board_id == vt6420) {
428 pci_read_config_byte(pdev, SATA_PATA_SHARING, &tmp8);
429 if (tmp8 & SATA_2DEV) {
430 dev_printk(KERN_ERR, &pdev->dev,
431 "SATA master/slave not supported (0x%x)\n",
432 (int) tmp8);
433 rc = -EIO;
434 goto err_out_regions;
435 }
436
437 bar_sizes = &svia_bar_sizes[0];
438 } else {
439 bar_sizes = &vt6421_bar_sizes[0];
440 }
441
442 for (i = 0; i < ARRAY_SIZE(svia_bar_sizes); i++)
443 if ((pci_resource_start(pdev, i) == 0) ||
444 (pci_resource_len(pdev, i) < bar_sizes[i])) {
445 dev_printk(KERN_ERR, &pdev->dev,
446 "invalid PCI BAR %u (sz 0x%llx, val 0x%llx)\n",
447 i,
448 (unsigned long long)pci_resource_start(pdev, i),
449 (unsigned long long)pci_resource_len(pdev, i));
450 rc = -ENODEV;
451 goto err_out_regions;
452 }
453
454 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
455 if (rc)
456 goto err_out_regions;
457 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
458 if (rc)
459 goto err_out_regions;
460
461 if (board_id == vt6420)
462 probe_ent = vt6420_init_probe_ent(pdev);
463 else
464 probe_ent = vt6421_init_probe_ent(pdev);
465
466 if (!probe_ent) {
467 dev_printk(KERN_ERR, &pdev->dev, "out of memory\n");
468 rc = -ENOMEM;
469 goto err_out_regions;
470 }
471
472 svia_configure(pdev);
473
474 pci_set_master(pdev);
475
476 /* FIXME: check ata_device_add return value */
477 ata_device_add(probe_ent);
478 kfree(probe_ent);
479
480 return 0;
481
482err_out_regions:
483 pci_release_regions(pdev);
484err_out:
485 if (!pci_dev_busy)
486 pci_disable_device(pdev);
487 return rc;
488}
489
490static int __init svia_init(void)
491{
492 return pci_register_driver(&svia_pci_driver);
493}
494
495static void __exit svia_exit(void)
496{
497 pci_unregister_driver(&svia_pci_driver);
498}
499
500module_init(svia_init);
501module_exit(svia_exit);
502
diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
new file mode 100644
index 000000000000..d0d92f33de54
--- /dev/null
+++ b/drivers/ata/sata_vsc.c
@@ -0,0 +1,482 @@
1/*
2 * sata_vsc.c - Vitesse VSC7174 4 port DPA SATA
3 *
4 * Maintained by: Jeremy Higdon @ SGI
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2004 SGI
9 *
10 * Bits from Jeff Garzik, Copyright RedHat, Inc.
11 *
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2, or (at your option)
16 * any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; see the file COPYING. If not, write to
25 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 *
27 *
28 * libata documentation is available via 'make {ps|pdf}docs',
29 * as Documentation/DocBook/libata.*
30 *
31 * Vitesse hardware documentation presumably available under NDA.
32 * Intel 31244 (same hardware interface) documentation presumably
33 * available from http://developer.intel.com/
34 *
35 */
36
37#include <linux/kernel.h>
38#include <linux/module.h>
39#include <linux/pci.h>
40#include <linux/init.h>
41#include <linux/blkdev.h>
42#include <linux/delay.h>
43#include <linux/interrupt.h>
44#include <linux/dma-mapping.h>
45#include <linux/device.h>
46#include <scsi/scsi_host.h>
47#include <linux/libata.h>
48
49#define DRV_NAME "sata_vsc"
50#define DRV_VERSION "2.0"
51
52enum {
53 /* Interrupt register offsets (from chip base address) */
54 VSC_SATA_INT_STAT_OFFSET = 0x00,
55 VSC_SATA_INT_MASK_OFFSET = 0x04,
56
57 /* Taskfile registers offsets */
58 VSC_SATA_TF_CMD_OFFSET = 0x00,
59 VSC_SATA_TF_DATA_OFFSET = 0x00,
60 VSC_SATA_TF_ERROR_OFFSET = 0x04,
61 VSC_SATA_TF_FEATURE_OFFSET = 0x06,
62 VSC_SATA_TF_NSECT_OFFSET = 0x08,
63 VSC_SATA_TF_LBAL_OFFSET = 0x0c,
64 VSC_SATA_TF_LBAM_OFFSET = 0x10,
65 VSC_SATA_TF_LBAH_OFFSET = 0x14,
66 VSC_SATA_TF_DEVICE_OFFSET = 0x18,
67 VSC_SATA_TF_STATUS_OFFSET = 0x1c,
68 VSC_SATA_TF_COMMAND_OFFSET = 0x1d,
69 VSC_SATA_TF_ALTSTATUS_OFFSET = 0x28,
70 VSC_SATA_TF_CTL_OFFSET = 0x29,
71
72 /* DMA base */
73 VSC_SATA_UP_DESCRIPTOR_OFFSET = 0x64,
74 VSC_SATA_UP_DATA_BUFFER_OFFSET = 0x6C,
75 VSC_SATA_DMA_CMD_OFFSET = 0x70,
76
77 /* SCRs base */
78 VSC_SATA_SCR_STATUS_OFFSET = 0x100,
79 VSC_SATA_SCR_ERROR_OFFSET = 0x104,
80 VSC_SATA_SCR_CONTROL_OFFSET = 0x108,
81
82 /* Port stride */
83 VSC_SATA_PORT_OFFSET = 0x200,
84
85 /* Error interrupt status bit offsets */
86 VSC_SATA_INT_ERROR_CRC = 0x40,
87 VSC_SATA_INT_ERROR_T = 0x20,
88 VSC_SATA_INT_ERROR_P = 0x10,
89 VSC_SATA_INT_ERROR_R = 0x8,
90 VSC_SATA_INT_ERROR_E = 0x4,
91 VSC_SATA_INT_ERROR_M = 0x2,
92 VSC_SATA_INT_PHY_CHANGE = 0x1,
93 VSC_SATA_INT_ERROR = (VSC_SATA_INT_ERROR_CRC | VSC_SATA_INT_ERROR_T | \
94 VSC_SATA_INT_ERROR_P | VSC_SATA_INT_ERROR_R | \
95 VSC_SATA_INT_ERROR_E | VSC_SATA_INT_ERROR_M | \
96 VSC_SATA_INT_PHY_CHANGE),
97};
98
99
100#define is_vsc_sata_int_err(port_idx, int_status) \
101 (int_status & (VSC_SATA_INT_ERROR << (8 * port_idx)))
102
103
104static u32 vsc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg)
105{
106 if (sc_reg > SCR_CONTROL)
107 return 0xffffffffU;
108 return readl((void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
109}
110
111
112static void vsc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg,
113 u32 val)
114{
115 if (sc_reg > SCR_CONTROL)
116 return;
117 writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
118}
119
120
121static void vsc_intr_mask_update(struct ata_port *ap, u8 ctl)
122{
123 void __iomem *mask_addr;
124 u8 mask;
125
126 mask_addr = ap->host->mmio_base +
127 VSC_SATA_INT_MASK_OFFSET + ap->port_no;
128 mask = readb(mask_addr);
129 if (ctl & ATA_NIEN)
130 mask |= 0x80;
131 else
132 mask &= 0x7F;
133 writeb(mask, mask_addr);
134}
135
136
137static void vsc_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
138{
139 struct ata_ioports *ioaddr = &ap->ioaddr;
140 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
141
142 /*
143 * The only thing the ctl register is used for is SRST.
144 * That is not enabled or disabled via tf_load.
145 * However, if ATA_NIEN is changed, then we need to change the interrupt register.
146 */
147 if ((tf->ctl & ATA_NIEN) != (ap->last_ctl & ATA_NIEN)) {
148 ap->last_ctl = tf->ctl;
149 vsc_intr_mask_update(ap, tf->ctl & ATA_NIEN);
150 }
151 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
152 writew(tf->feature | (((u16)tf->hob_feature) << 8), ioaddr->feature_addr);
153 writew(tf->nsect | (((u16)tf->hob_nsect) << 8), ioaddr->nsect_addr);
154 writew(tf->lbal | (((u16)tf->hob_lbal) << 8), ioaddr->lbal_addr);
155 writew(tf->lbam | (((u16)tf->hob_lbam) << 8), ioaddr->lbam_addr);
156 writew(tf->lbah | (((u16)tf->hob_lbah) << 8), ioaddr->lbah_addr);
157 } else if (is_addr) {
158 writew(tf->feature, ioaddr->feature_addr);
159 writew(tf->nsect, ioaddr->nsect_addr);
160 writew(tf->lbal, ioaddr->lbal_addr);
161 writew(tf->lbam, ioaddr->lbam_addr);
162 writew(tf->lbah, ioaddr->lbah_addr);
163 }
164
165 if (tf->flags & ATA_TFLAG_DEVICE)
166 writeb(tf->device, ioaddr->device_addr);
167
168 ata_wait_idle(ap);
169}
170
171
172static void vsc_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
173{
174 struct ata_ioports *ioaddr = &ap->ioaddr;
175 u16 nsect, lbal, lbam, lbah, feature;
176
177 tf->command = ata_check_status(ap);
178 tf->device = readw(ioaddr->device_addr);
179 feature = readw(ioaddr->error_addr);
180 nsect = readw(ioaddr->nsect_addr);
181 lbal = readw(ioaddr->lbal_addr);
182 lbam = readw(ioaddr->lbam_addr);
183 lbah = readw(ioaddr->lbah_addr);
184
185 tf->feature = feature;
186 tf->nsect = nsect;
187 tf->lbal = lbal;
188 tf->lbam = lbam;
189 tf->lbah = lbah;
190
191 if (tf->flags & ATA_TFLAG_LBA48) {
192 tf->hob_feature = feature >> 8;
193 tf->hob_nsect = nsect >> 8;
194 tf->hob_lbal = lbal >> 8;
195 tf->hob_lbam = lbam >> 8;
196 tf->hob_lbah = lbah >> 8;
197 }
198}
199
200
201/*
202 * vsc_sata_interrupt
203 *
204 * Read the interrupt register and process for the devices that have them pending.
205 */
206static irqreturn_t vsc_sata_interrupt (int irq, void *dev_instance,
207 struct pt_regs *regs)
208{
209 struct ata_host *host = dev_instance;
210 unsigned int i;
211 unsigned int handled = 0;
212 u32 int_status;
213
214 spin_lock(&host->lock);
215
216 int_status = readl(host->mmio_base + VSC_SATA_INT_STAT_OFFSET);
217
218 for (i = 0; i < host->n_ports; i++) {
219 if (int_status & ((u32) 0xFF << (8 * i))) {
220 struct ata_port *ap;
221
222 ap = host->ports[i];
223
224 if (is_vsc_sata_int_err(i, int_status)) {
225 u32 err_status;
226 printk(KERN_DEBUG "%s: ignoring interrupt(s)\n", __FUNCTION__);
227 err_status = ap ? vsc_sata_scr_read(ap, SCR_ERROR) : 0;
228 vsc_sata_scr_write(ap, SCR_ERROR, err_status);
229 handled++;
230 }
231
232 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
233 struct ata_queued_cmd *qc;
234
235 qc = ata_qc_from_tag(ap, ap->active_tag);
236 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
237 handled += ata_host_intr(ap, qc);
238 else if (is_vsc_sata_int_err(i, int_status)) {
239 /*
240 * On some chips (i.e. Intel 31244), an error
241 * interrupt will sneak in at initialization
242 * time (phy state changes). Clearing the SCR
243 * error register is not required, but it prevents
244 * the phy state change interrupts from recurring
245 * later.
246 */
247 u32 err_status;
248 err_status = vsc_sata_scr_read(ap, SCR_ERROR);
249 printk(KERN_DEBUG "%s: clearing interrupt, "
250 "status %x; sata err status %x\n",
251 __FUNCTION__,
252 int_status, err_status);
253 vsc_sata_scr_write(ap, SCR_ERROR, err_status);
254 /* Clear interrupt status */
255 ata_chk_status(ap);
256 handled++;
257 }
258 }
259 }
260 }
261
262 spin_unlock(&host->lock);
263
264 return IRQ_RETVAL(handled);
265}
266
267
268static struct scsi_host_template vsc_sata_sht = {
269 .module = THIS_MODULE,
270 .name = DRV_NAME,
271 .ioctl = ata_scsi_ioctl,
272 .queuecommand = ata_scsi_queuecmd,
273 .can_queue = ATA_DEF_QUEUE,
274 .this_id = ATA_SHT_THIS_ID,
275 .sg_tablesize = LIBATA_MAX_PRD,
276 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
277 .emulated = ATA_SHT_EMULATED,
278 .use_clustering = ATA_SHT_USE_CLUSTERING,
279 .proc_name = DRV_NAME,
280 .dma_boundary = ATA_DMA_BOUNDARY,
281 .slave_configure = ata_scsi_slave_config,
282 .slave_destroy = ata_scsi_slave_destroy,
283 .bios_param = ata_std_bios_param,
284};
285
286
287static const struct ata_port_operations vsc_sata_ops = {
288 .port_disable = ata_port_disable,
289 .tf_load = vsc_sata_tf_load,
290 .tf_read = vsc_sata_tf_read,
291 .exec_command = ata_exec_command,
292 .check_status = ata_check_status,
293 .dev_select = ata_std_dev_select,
294 .bmdma_setup = ata_bmdma_setup,
295 .bmdma_start = ata_bmdma_start,
296 .bmdma_stop = ata_bmdma_stop,
297 .bmdma_status = ata_bmdma_status,
298 .qc_prep = ata_qc_prep,
299 .qc_issue = ata_qc_issue_prot,
300 .data_xfer = ata_mmio_data_xfer,
301 .freeze = ata_bmdma_freeze,
302 .thaw = ata_bmdma_thaw,
303 .error_handler = ata_bmdma_error_handler,
304 .post_internal_cmd = ata_bmdma_post_internal_cmd,
305 .irq_handler = vsc_sata_interrupt,
306 .irq_clear = ata_bmdma_irq_clear,
307 .scr_read = vsc_sata_scr_read,
308 .scr_write = vsc_sata_scr_write,
309 .port_start = ata_port_start,
310 .port_stop = ata_port_stop,
311 .host_stop = ata_pci_host_stop,
312};
313
314static void __devinit vsc_sata_setup_port(struct ata_ioports *port, unsigned long base)
315{
316 port->cmd_addr = base + VSC_SATA_TF_CMD_OFFSET;
317 port->data_addr = base + VSC_SATA_TF_DATA_OFFSET;
318 port->error_addr = base + VSC_SATA_TF_ERROR_OFFSET;
319 port->feature_addr = base + VSC_SATA_TF_FEATURE_OFFSET;
320 port->nsect_addr = base + VSC_SATA_TF_NSECT_OFFSET;
321 port->lbal_addr = base + VSC_SATA_TF_LBAL_OFFSET;
322 port->lbam_addr = base + VSC_SATA_TF_LBAM_OFFSET;
323 port->lbah_addr = base + VSC_SATA_TF_LBAH_OFFSET;
324 port->device_addr = base + VSC_SATA_TF_DEVICE_OFFSET;
325 port->status_addr = base + VSC_SATA_TF_STATUS_OFFSET;
326 port->command_addr = base + VSC_SATA_TF_COMMAND_OFFSET;
327 port->altstatus_addr = base + VSC_SATA_TF_ALTSTATUS_OFFSET;
328 port->ctl_addr = base + VSC_SATA_TF_CTL_OFFSET;
329 port->bmdma_addr = base + VSC_SATA_DMA_CMD_OFFSET;
330 port->scr_addr = base + VSC_SATA_SCR_STATUS_OFFSET;
331 writel(0, base + VSC_SATA_UP_DESCRIPTOR_OFFSET);
332 writel(0, base + VSC_SATA_UP_DATA_BUFFER_OFFSET);
333}
334
335
336static int __devinit vsc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
337{
338 static int printed_version;
339 struct ata_probe_ent *probe_ent = NULL;
340 unsigned long base;
341 int pci_dev_busy = 0;
342 void __iomem *mmio_base;
343 int rc;
344
345 if (!printed_version++)
346 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
347
348 rc = pci_enable_device(pdev);
349 if (rc)
350 return rc;
351
352 /*
353 * Check if we have needed resource mapped.
354 */
355 if (pci_resource_len(pdev, 0) == 0) {
356 rc = -ENODEV;
357 goto err_out;
358 }
359
360 rc = pci_request_regions(pdev, DRV_NAME);
361 if (rc) {
362 pci_dev_busy = 1;
363 goto err_out;
364 }
365
366 /*
367 * Use 32 bit DMA mask, because 64 bit address support is poor.
368 */
369 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
370 if (rc)
371 goto err_out_regions;
372 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
373 if (rc)
374 goto err_out_regions;
375
376 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
377 if (probe_ent == NULL) {
378 rc = -ENOMEM;
379 goto err_out_regions;
380 }
381 memset(probe_ent, 0, sizeof(*probe_ent));
382 probe_ent->dev = pci_dev_to_dev(pdev);
383 INIT_LIST_HEAD(&probe_ent->node);
384
385 mmio_base = pci_iomap(pdev, 0, 0);
386 if (mmio_base == NULL) {
387 rc = -ENOMEM;
388 goto err_out_free_ent;
389 }
390 base = (unsigned long) mmio_base;
391
392 /*
393 * Due to a bug in the chip, the default cache line size can't be used
394 */
395 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x80);
396
397 probe_ent->sht = &vsc_sata_sht;
398 probe_ent->port_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
399 ATA_FLAG_MMIO;
400 probe_ent->port_ops = &vsc_sata_ops;
401 probe_ent->n_ports = 4;
402 probe_ent->irq = pdev->irq;
403 probe_ent->irq_flags = IRQF_SHARED;
404 probe_ent->mmio_base = mmio_base;
405
406 /* We don't care much about the PIO/UDMA masks, but the core won't like us
407 * if we don't fill these
408 */
409 probe_ent->pio_mask = 0x1f;
410 probe_ent->mwdma_mask = 0x07;
411 probe_ent->udma_mask = 0x7f;
412
413 /* We have 4 ports per PCI function */
414 vsc_sata_setup_port(&probe_ent->port[0], base + 1 * VSC_SATA_PORT_OFFSET);
415 vsc_sata_setup_port(&probe_ent->port[1], base + 2 * VSC_SATA_PORT_OFFSET);
416 vsc_sata_setup_port(&probe_ent->port[2], base + 3 * VSC_SATA_PORT_OFFSET);
417 vsc_sata_setup_port(&probe_ent->port[3], base + 4 * VSC_SATA_PORT_OFFSET);
418
419 pci_set_master(pdev);
420
421 /*
422 * Config offset 0x98 is "Extended Control and Status Register 0"
423 * Default value is (1 << 28). All bits except bit 28 are reserved in
424 * DPA mode. If bit 28 is set, LED 0 reflects all ports' activity.
425 * If bit 28 is clear, each port has its own LED.
426 */
427 pci_write_config_dword(pdev, 0x98, 0);
428
429 /* FIXME: check ata_device_add return value */
430 ata_device_add(probe_ent);
431 kfree(probe_ent);
432
433 return 0;
434
435err_out_free_ent:
436 kfree(probe_ent);
437err_out_regions:
438 pci_release_regions(pdev);
439err_out:
440 if (!pci_dev_busy)
441 pci_disable_device(pdev);
442 return rc;
443}
444
445
446static const struct pci_device_id vsc_sata_pci_tbl[] = {
447 { PCI_VENDOR_ID_VITESSE, 0x7174,
448 PCI_ANY_ID, PCI_ANY_ID, 0x10600, 0xFFFFFF, 0 },
449 { PCI_VENDOR_ID_INTEL, 0x3200,
450 PCI_ANY_ID, PCI_ANY_ID, 0x10600, 0xFFFFFF, 0 },
451 { } /* terminate list */
452};
453
454
455static struct pci_driver vsc_sata_pci_driver = {
456 .name = DRV_NAME,
457 .id_table = vsc_sata_pci_tbl,
458 .probe = vsc_sata_init_one,
459 .remove = ata_pci_remove_one,
460};
461
462
463static int __init vsc_sata_init(void)
464{
465 return pci_register_driver(&vsc_sata_pci_driver);
466}
467
468
469static void __exit vsc_sata_exit(void)
470{
471 pci_unregister_driver(&vsc_sata_pci_driver);
472}
473
474
475MODULE_AUTHOR("Jeremy Higdon");
476MODULE_DESCRIPTION("low-level driver for Vitesse VSC7174 SATA controller");
477MODULE_LICENSE("GPL");
478MODULE_DEVICE_TABLE(pci, vsc_sata_pci_tbl);
479MODULE_VERSION(DRV_VERSION);
480
481module_init(vsc_sata_init);
482module_exit(vsc_sata_exit);