aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ata/ahci.c
diff options
context:
space:
mode:
authorJeff Garzik <jeff@garzik.org>2006-08-10 07:31:37 -0400
committerJeff Garzik <jeff@garzik.org>2006-08-10 07:31:37 -0400
commitc6fd280766a050b13360d7c2d59a3d6bd3a27d9a (patch)
treefdbeab639bc3dec29267bbf4b32cff7c8dd03593 /drivers/ata/ahci.c
parent79ed35a9f139ad2b2653dfdd5f45a8f1453e2cbb (diff)
Move libata to drivers/ata.
Diffstat (limited to 'drivers/ata/ahci.c')
-rw-r--r--drivers/ata/ahci.c1684
1 files changed, 1684 insertions, 0 deletions
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
new file mode 100644
index 00000000000..813031c01fb
--- /dev/null
+++ b/drivers/ata/ahci.c
@@ -0,0 +1,1684 @@
1/*
2 * ahci.c - AHCI SATA support
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2004-2005 Red Hat, Inc.
9 *
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 *
26 * libata documentation is available via 'make {ps|pdf}docs',
27 * as Documentation/DocBook/libata.*
28 *
29 * AHCI hardware documentation:
30 * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
31 * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
32 *
33 */
34
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/blkdev.h>
40#include <linux/delay.h>
41#include <linux/interrupt.h>
42#include <linux/sched.h>
43#include <linux/dma-mapping.h>
44#include <linux/device.h>
45#include <scsi/scsi_host.h>
46#include <scsi/scsi_cmnd.h>
47#include <linux/libata.h>
48#include <asm/io.h>
49
50#define DRV_NAME "ahci"
51#define DRV_VERSION "2.0"
52
53
54enum {
55 AHCI_PCI_BAR = 5,
56 AHCI_MAX_SG = 168, /* hardware max is 64K */
57 AHCI_DMA_BOUNDARY = 0xffffffff,
58 AHCI_USE_CLUSTERING = 0,
59 AHCI_MAX_CMDS = 32,
60 AHCI_CMD_SZ = 32,
61 AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ,
62 AHCI_RX_FIS_SZ = 256,
63 AHCI_CMD_TBL_CDB = 0x40,
64 AHCI_CMD_TBL_HDR_SZ = 0x80,
65 AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16),
66 AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS,
67 AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ +
68 AHCI_RX_FIS_SZ,
69 AHCI_IRQ_ON_SG = (1 << 31),
70 AHCI_CMD_ATAPI = (1 << 5),
71 AHCI_CMD_WRITE = (1 << 6),
72 AHCI_CMD_PREFETCH = (1 << 7),
73 AHCI_CMD_RESET = (1 << 8),
74 AHCI_CMD_CLR_BUSY = (1 << 10),
75
76 RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
77 RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */
78
79 board_ahci = 0,
80 board_ahci_vt8251 = 1,
81
82 /* global controller registers */
83 HOST_CAP = 0x00, /* host capabilities */
84 HOST_CTL = 0x04, /* global host control */
85 HOST_IRQ_STAT = 0x08, /* interrupt status */
86 HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */
87 HOST_VERSION = 0x10, /* AHCI spec. version compliancy */
88
89 /* HOST_CTL bits */
90 HOST_RESET = (1 << 0), /* reset controller; self-clear */
91 HOST_IRQ_EN = (1 << 1), /* global IRQ enable */
92 HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
93
94 /* HOST_CAP bits */
95 HOST_CAP_SSC = (1 << 14), /* Slumber capable */
96 HOST_CAP_CLO = (1 << 24), /* Command List Override support */
97 HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */
98 HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */
99 HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
100
101 /* registers for each SATA port */
102 PORT_LST_ADDR = 0x00, /* command list DMA addr */
103 PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */
104 PORT_FIS_ADDR = 0x08, /* FIS rx buf addr */
105 PORT_FIS_ADDR_HI = 0x0c, /* FIS rx buf addr hi */
106 PORT_IRQ_STAT = 0x10, /* interrupt status */
107 PORT_IRQ_MASK = 0x14, /* interrupt enable/disable mask */
108 PORT_CMD = 0x18, /* port command */
109 PORT_TFDATA = 0x20, /* taskfile data */
110 PORT_SIG = 0x24, /* device TF signature */
111 PORT_CMD_ISSUE = 0x38, /* command issue */
112 PORT_SCR = 0x28, /* SATA phy register block */
113 PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */
114 PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */
115 PORT_SCR_ERR = 0x30, /* SATA phy register: SError */
116 PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */
117
118 /* PORT_IRQ_{STAT,MASK} bits */
119 PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */
120 PORT_IRQ_TF_ERR = (1 << 30), /* task file error */
121 PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */
122 PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */
123 PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */
124 PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */
125 PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */
126 PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */
127
128 PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */
129 PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */
130 PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */
131 PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */
132 PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */
133 PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */
134 PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */
135 PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */
136 PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */
137
138 PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR |
139 PORT_IRQ_IF_ERR |
140 PORT_IRQ_CONNECT |
141 PORT_IRQ_PHYRDY |
142 PORT_IRQ_UNK_FIS,
143 PORT_IRQ_ERROR = PORT_IRQ_FREEZE |
144 PORT_IRQ_TF_ERR |
145 PORT_IRQ_HBUS_DATA_ERR,
146 DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE |
147 PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS |
148 PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
149
150 /* PORT_CMD bits */
151 PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */
152 PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
153 PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */
154 PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */
155 PORT_CMD_CLO = (1 << 3), /* Command list override */
156 PORT_CMD_POWER_ON = (1 << 2), /* Power up device */
157 PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */
158 PORT_CMD_START = (1 << 0), /* Enable port DMA engine */
159
160 PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */
161 PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */
162 PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */
163 PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */
164
165 /* hpriv->flags bits */
166 AHCI_FLAG_MSI = (1 << 0),
167
168 /* ap->flags bits */
169 AHCI_FLAG_RESET_NEEDS_CLO = (1 << 24),
170 AHCI_FLAG_NO_NCQ = (1 << 25),
171};
172
173struct ahci_cmd_hdr {
174 u32 opts;
175 u32 status;
176 u32 tbl_addr;
177 u32 tbl_addr_hi;
178 u32 reserved[4];
179};
180
181struct ahci_sg {
182 u32 addr;
183 u32 addr_hi;
184 u32 reserved;
185 u32 flags_size;
186};
187
188struct ahci_host_priv {
189 unsigned long flags;
190 u32 cap; /* cache of HOST_CAP register */
191 u32 port_map; /* cache of HOST_PORTS_IMPL reg */
192};
193
194struct ahci_port_priv {
195 struct ahci_cmd_hdr *cmd_slot;
196 dma_addr_t cmd_slot_dma;
197 void *cmd_tbl;
198 dma_addr_t cmd_tbl_dma;
199 void *rx_fis;
200 dma_addr_t rx_fis_dma;
201};
202
203static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg);
204static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
205static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
206static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
207static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
208static void ahci_irq_clear(struct ata_port *ap);
209static int ahci_port_start(struct ata_port *ap);
210static void ahci_port_stop(struct ata_port *ap);
211static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
212static void ahci_qc_prep(struct ata_queued_cmd *qc);
213static u8 ahci_check_status(struct ata_port *ap);
214static void ahci_freeze(struct ata_port *ap);
215static void ahci_thaw(struct ata_port *ap);
216static void ahci_error_handler(struct ata_port *ap);
217static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
218static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
219static int ahci_port_resume(struct ata_port *ap);
220static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
221static int ahci_pci_device_resume(struct pci_dev *pdev);
222static void ahci_remove_one (struct pci_dev *pdev);
223
224static struct scsi_host_template ahci_sht = {
225 .module = THIS_MODULE,
226 .name = DRV_NAME,
227 .ioctl = ata_scsi_ioctl,
228 .queuecommand = ata_scsi_queuecmd,
229 .change_queue_depth = ata_scsi_change_queue_depth,
230 .can_queue = AHCI_MAX_CMDS - 1,
231 .this_id = ATA_SHT_THIS_ID,
232 .sg_tablesize = AHCI_MAX_SG,
233 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
234 .emulated = ATA_SHT_EMULATED,
235 .use_clustering = AHCI_USE_CLUSTERING,
236 .proc_name = DRV_NAME,
237 .dma_boundary = AHCI_DMA_BOUNDARY,
238 .slave_configure = ata_scsi_slave_config,
239 .slave_destroy = ata_scsi_slave_destroy,
240 .bios_param = ata_std_bios_param,
241 .suspend = ata_scsi_device_suspend,
242 .resume = ata_scsi_device_resume,
243};
244
245static const struct ata_port_operations ahci_ops = {
246 .port_disable = ata_port_disable,
247
248 .check_status = ahci_check_status,
249 .check_altstatus = ahci_check_status,
250 .dev_select = ata_noop_dev_select,
251
252 .tf_read = ahci_tf_read,
253
254 .qc_prep = ahci_qc_prep,
255 .qc_issue = ahci_qc_issue,
256
257 .irq_handler = ahci_interrupt,
258 .irq_clear = ahci_irq_clear,
259
260 .scr_read = ahci_scr_read,
261 .scr_write = ahci_scr_write,
262
263 .freeze = ahci_freeze,
264 .thaw = ahci_thaw,
265
266 .error_handler = ahci_error_handler,
267 .post_internal_cmd = ahci_post_internal_cmd,
268
269 .port_suspend = ahci_port_suspend,
270 .port_resume = ahci_port_resume,
271
272 .port_start = ahci_port_start,
273 .port_stop = ahci_port_stop,
274};
275
276static const struct ata_port_info ahci_port_info[] = {
277 /* board_ahci */
278 {
279 .sht = &ahci_sht,
280 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
281 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
282 ATA_FLAG_SKIP_D2H_BSY,
283 .pio_mask = 0x1f, /* pio0-4 */
284 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
285 .port_ops = &ahci_ops,
286 },
287 /* board_ahci_vt8251 */
288 {
289 .sht = &ahci_sht,
290 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
291 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
292 ATA_FLAG_SKIP_D2H_BSY |
293 AHCI_FLAG_RESET_NEEDS_CLO | AHCI_FLAG_NO_NCQ,
294 .pio_mask = 0x1f, /* pio0-4 */
295 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
296 .port_ops = &ahci_ops,
297 },
298};
299
300static const struct pci_device_id ahci_pci_tbl[] = {
301 /* Intel */
302 { PCI_VENDOR_ID_INTEL, 0x2652, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
303 board_ahci }, /* ICH6 */
304 { PCI_VENDOR_ID_INTEL, 0x2653, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
305 board_ahci }, /* ICH6M */
306 { PCI_VENDOR_ID_INTEL, 0x27c1, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
307 board_ahci }, /* ICH7 */
308 { PCI_VENDOR_ID_INTEL, 0x27c5, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
309 board_ahci }, /* ICH7M */
310 { PCI_VENDOR_ID_INTEL, 0x27c3, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
311 board_ahci }, /* ICH7R */
312 { PCI_VENDOR_ID_AL, 0x5288, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
313 board_ahci }, /* ULi M5288 */
314 { PCI_VENDOR_ID_INTEL, 0x2681, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
315 board_ahci }, /* ESB2 */
316 { PCI_VENDOR_ID_INTEL, 0x2682, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
317 board_ahci }, /* ESB2 */
318 { PCI_VENDOR_ID_INTEL, 0x2683, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
319 board_ahci }, /* ESB2 */
320 { PCI_VENDOR_ID_INTEL, 0x27c6, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
321 board_ahci }, /* ICH7-M DH */
322 { PCI_VENDOR_ID_INTEL, 0x2821, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
323 board_ahci }, /* ICH8 */
324 { PCI_VENDOR_ID_INTEL, 0x2822, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
325 board_ahci }, /* ICH8 */
326 { PCI_VENDOR_ID_INTEL, 0x2824, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
327 board_ahci }, /* ICH8 */
328 { PCI_VENDOR_ID_INTEL, 0x2829, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
329 board_ahci }, /* ICH8M */
330 { PCI_VENDOR_ID_INTEL, 0x282a, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
331 board_ahci }, /* ICH8M */
332
333 /* JMicron */
334 { 0x197b, 0x2360, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
335 board_ahci }, /* JMicron JMB360 */
336 { 0x197b, 0x2361, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
337 board_ahci }, /* JMicron JMB361 */
338 { 0x197b, 0x2363, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
339 board_ahci }, /* JMicron JMB363 */
340 { 0x197b, 0x2365, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
341 board_ahci }, /* JMicron JMB365 */
342 { 0x197b, 0x2366, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
343 board_ahci }, /* JMicron JMB366 */
344
345 /* ATI */
346 { PCI_VENDOR_ID_ATI, 0x4380, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
347 board_ahci }, /* ATI SB600 non-raid */
348 { PCI_VENDOR_ID_ATI, 0x4381, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
349 board_ahci }, /* ATI SB600 raid */
350
351 /* VIA */
352 { PCI_VENDOR_ID_VIA, 0x3349, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
353 board_ahci_vt8251 }, /* VIA VT8251 */
354
355 /* NVIDIA */
356 { PCI_VENDOR_ID_NVIDIA, 0x044c, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
357 board_ahci }, /* MCP65 */
358 { PCI_VENDOR_ID_NVIDIA, 0x044d, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
359 board_ahci }, /* MCP65 */
360 { PCI_VENDOR_ID_NVIDIA, 0x044e, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
361 board_ahci }, /* MCP65 */
362 { PCI_VENDOR_ID_NVIDIA, 0x044f, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
363 board_ahci }, /* MCP65 */
364
365 /* SiS */
366 { PCI_VENDOR_ID_SI, 0x1184, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
367 board_ahci }, /* SiS 966 */
368 { PCI_VENDOR_ID_SI, 0x1185, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
369 board_ahci }, /* SiS 966 */
370 { PCI_VENDOR_ID_SI, 0x0186, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
371 board_ahci }, /* SiS 968 */
372
373 { } /* terminate list */
374};
375
376
377static struct pci_driver ahci_pci_driver = {
378 .name = DRV_NAME,
379 .id_table = ahci_pci_tbl,
380 .probe = ahci_init_one,
381 .suspend = ahci_pci_device_suspend,
382 .resume = ahci_pci_device_resume,
383 .remove = ahci_remove_one,
384};
385
386
387static inline unsigned long ahci_port_base_ul (unsigned long base, unsigned int port)
388{
389 return base + 0x100 + (port * 0x80);
390}
391
392static inline void __iomem *ahci_port_base (void __iomem *base, unsigned int port)
393{
394 return (void __iomem *) ahci_port_base_ul((unsigned long)base, port);
395}
396
397static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg_in)
398{
399 unsigned int sc_reg;
400
401 switch (sc_reg_in) {
402 case SCR_STATUS: sc_reg = 0; break;
403 case SCR_CONTROL: sc_reg = 1; break;
404 case SCR_ERROR: sc_reg = 2; break;
405 case SCR_ACTIVE: sc_reg = 3; break;
406 default:
407 return 0xffffffffU;
408 }
409
410 return readl((void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
411}
412
413
414static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg_in,
415 u32 val)
416{
417 unsigned int sc_reg;
418
419 switch (sc_reg_in) {
420 case SCR_STATUS: sc_reg = 0; break;
421 case SCR_CONTROL: sc_reg = 1; break;
422 case SCR_ERROR: sc_reg = 2; break;
423 case SCR_ACTIVE: sc_reg = 3; break;
424 default:
425 return;
426 }
427
428 writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
429}
430
431static void ahci_start_engine(void __iomem *port_mmio)
432{
433 u32 tmp;
434
435 /* start DMA */
436 tmp = readl(port_mmio + PORT_CMD);
437 tmp |= PORT_CMD_START;
438 writel(tmp, port_mmio + PORT_CMD);
439 readl(port_mmio + PORT_CMD); /* flush */
440}
441
442static int ahci_stop_engine(void __iomem *port_mmio)
443{
444 u32 tmp;
445
446 tmp = readl(port_mmio + PORT_CMD);
447
448 /* check if the HBA is idle */
449 if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
450 return 0;
451
452 /* setting HBA to idle */
453 tmp &= ~PORT_CMD_START;
454 writel(tmp, port_mmio + PORT_CMD);
455
456 /* wait for engine to stop. This could be as long as 500 msec */
457 tmp = ata_wait_register(port_mmio + PORT_CMD,
458 PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
459 if (tmp & PORT_CMD_LIST_ON)
460 return -EIO;
461
462 return 0;
463}
464
465static void ahci_start_fis_rx(void __iomem *port_mmio, u32 cap,
466 dma_addr_t cmd_slot_dma, dma_addr_t rx_fis_dma)
467{
468 u32 tmp;
469
470 /* set FIS registers */
471 if (cap & HOST_CAP_64)
472 writel((cmd_slot_dma >> 16) >> 16, port_mmio + PORT_LST_ADDR_HI);
473 writel(cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
474
475 if (cap & HOST_CAP_64)
476 writel((rx_fis_dma >> 16) >> 16, port_mmio + PORT_FIS_ADDR_HI);
477 writel(rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
478
479 /* enable FIS reception */
480 tmp = readl(port_mmio + PORT_CMD);
481 tmp |= PORT_CMD_FIS_RX;
482 writel(tmp, port_mmio + PORT_CMD);
483
484 /* flush */
485 readl(port_mmio + PORT_CMD);
486}
487
488static int ahci_stop_fis_rx(void __iomem *port_mmio)
489{
490 u32 tmp;
491
492 /* disable FIS reception */
493 tmp = readl(port_mmio + PORT_CMD);
494 tmp &= ~PORT_CMD_FIS_RX;
495 writel(tmp, port_mmio + PORT_CMD);
496
497 /* wait for completion, spec says 500ms, give it 1000 */
498 tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON,
499 PORT_CMD_FIS_ON, 10, 1000);
500 if (tmp & PORT_CMD_FIS_ON)
501 return -EBUSY;
502
503 return 0;
504}
505
506static void ahci_power_up(void __iomem *port_mmio, u32 cap)
507{
508 u32 cmd;
509
510 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
511
512 /* spin up device */
513 if (cap & HOST_CAP_SSS) {
514 cmd |= PORT_CMD_SPIN_UP;
515 writel(cmd, port_mmio + PORT_CMD);
516 }
517
518 /* wake up link */
519 writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD);
520}
521
522static void ahci_power_down(void __iomem *port_mmio, u32 cap)
523{
524 u32 cmd, scontrol;
525
526 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
527
528 if (cap & HOST_CAP_SSC) {
529 /* enable transitions to slumber mode */
530 scontrol = readl(port_mmio + PORT_SCR_CTL);
531 if ((scontrol & 0x0f00) > 0x100) {
532 scontrol &= ~0xf00;
533 writel(scontrol, port_mmio + PORT_SCR_CTL);
534 }
535
536 /* put device into slumber mode */
537 writel(cmd | PORT_CMD_ICC_SLUMBER, port_mmio + PORT_CMD);
538
539 /* wait for the transition to complete */
540 ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_ICC_SLUMBER,
541 PORT_CMD_ICC_SLUMBER, 1, 50);
542 }
543
544 /* put device into listen mode */
545 if (cap & HOST_CAP_SSS) {
546 /* first set PxSCTL.DET to 0 */
547 scontrol = readl(port_mmio + PORT_SCR_CTL);
548 scontrol &= ~0xf;
549 writel(scontrol, port_mmio + PORT_SCR_CTL);
550
551 /* then set PxCMD.SUD to 0 */
552 cmd &= ~PORT_CMD_SPIN_UP;
553 writel(cmd, port_mmio + PORT_CMD);
554 }
555}
556
557static void ahci_init_port(void __iomem *port_mmio, u32 cap,
558 dma_addr_t cmd_slot_dma, dma_addr_t rx_fis_dma)
559{
560 /* power up */
561 ahci_power_up(port_mmio, cap);
562
563 /* enable FIS reception */
564 ahci_start_fis_rx(port_mmio, cap, cmd_slot_dma, rx_fis_dma);
565
566 /* enable DMA */
567 ahci_start_engine(port_mmio);
568}
569
570static int ahci_deinit_port(void __iomem *port_mmio, u32 cap, const char **emsg)
571{
572 int rc;
573
574 /* disable DMA */
575 rc = ahci_stop_engine(port_mmio);
576 if (rc) {
577 *emsg = "failed to stop engine";
578 return rc;
579 }
580
581 /* disable FIS reception */
582 rc = ahci_stop_fis_rx(port_mmio);
583 if (rc) {
584 *emsg = "failed stop FIS RX";
585 return rc;
586 }
587
588 /* put device into slumber mode */
589 ahci_power_down(port_mmio, cap);
590
591 return 0;
592}
593
594static int ahci_reset_controller(void __iomem *mmio, struct pci_dev *pdev)
595{
596 u32 cap_save, tmp;
597
598 cap_save = readl(mmio + HOST_CAP);
599 cap_save &= ( (1<<28) | (1<<17) );
600 cap_save |= (1 << 27);
601
602 /* global controller reset */
603 tmp = readl(mmio + HOST_CTL);
604 if ((tmp & HOST_RESET) == 0) {
605 writel(tmp | HOST_RESET, mmio + HOST_CTL);
606 readl(mmio + HOST_CTL); /* flush */
607 }
608
609 /* reset must complete within 1 second, or
610 * the hardware should be considered fried.
611 */
612 ssleep(1);
613
614 tmp = readl(mmio + HOST_CTL);
615 if (tmp & HOST_RESET) {
616 dev_printk(KERN_ERR, &pdev->dev,
617 "controller reset failed (0x%x)\n", tmp);
618 return -EIO;
619 }
620
621 writel(HOST_AHCI_EN, mmio + HOST_CTL);
622 (void) readl(mmio + HOST_CTL); /* flush */
623 writel(cap_save, mmio + HOST_CAP);
624 writel(0xf, mmio + HOST_PORTS_IMPL);
625 (void) readl(mmio + HOST_PORTS_IMPL); /* flush */
626
627 if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
628 u16 tmp16;
629
630 /* configure PCS */
631 pci_read_config_word(pdev, 0x92, &tmp16);
632 tmp16 |= 0xf;
633 pci_write_config_word(pdev, 0x92, tmp16);
634 }
635
636 return 0;
637}
638
639static void ahci_init_controller(void __iomem *mmio, struct pci_dev *pdev,
640 int n_ports, u32 cap)
641{
642 int i, rc;
643 u32 tmp;
644
645 for (i = 0; i < n_ports; i++) {
646 void __iomem *port_mmio = ahci_port_base(mmio, i);
647 const char *emsg = NULL;
648
649#if 0 /* BIOSen initialize this incorrectly */
650 if (!(hpriv->port_map & (1 << i)))
651 continue;
652#endif
653
654 /* make sure port is not active */
655 rc = ahci_deinit_port(port_mmio, cap, &emsg);
656 if (rc)
657 dev_printk(KERN_WARNING, &pdev->dev,
658 "%s (%d)\n", emsg, rc);
659
660 /* clear SError */
661 tmp = readl(port_mmio + PORT_SCR_ERR);
662 VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
663 writel(tmp, port_mmio + PORT_SCR_ERR);
664
665 /* clear port IRQ */
666 tmp = readl(port_mmio + PORT_IRQ_STAT);
667 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
668 if (tmp)
669 writel(tmp, port_mmio + PORT_IRQ_STAT);
670
671 writel(1 << i, mmio + HOST_IRQ_STAT);
672 }
673
674 tmp = readl(mmio + HOST_CTL);
675 VPRINTK("HOST_CTL 0x%x\n", tmp);
676 writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
677 tmp = readl(mmio + HOST_CTL);
678 VPRINTK("HOST_CTL 0x%x\n", tmp);
679}
680
681static unsigned int ahci_dev_classify(struct ata_port *ap)
682{
683 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
684 struct ata_taskfile tf;
685 u32 tmp;
686
687 tmp = readl(port_mmio + PORT_SIG);
688 tf.lbah = (tmp >> 24) & 0xff;
689 tf.lbam = (tmp >> 16) & 0xff;
690 tf.lbal = (tmp >> 8) & 0xff;
691 tf.nsect = (tmp) & 0xff;
692
693 return ata_dev_classify(&tf);
694}
695
696static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
697 u32 opts)
698{
699 dma_addr_t cmd_tbl_dma;
700
701 cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
702
703 pp->cmd_slot[tag].opts = cpu_to_le32(opts);
704 pp->cmd_slot[tag].status = 0;
705 pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
706 pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
707}
708
709static int ahci_clo(struct ata_port *ap)
710{
711 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
712 struct ahci_host_priv *hpriv = ap->host_set->private_data;
713 u32 tmp;
714
715 if (!(hpriv->cap & HOST_CAP_CLO))
716 return -EOPNOTSUPP;
717
718 tmp = readl(port_mmio + PORT_CMD);
719 tmp |= PORT_CMD_CLO;
720 writel(tmp, port_mmio + PORT_CMD);
721
722 tmp = ata_wait_register(port_mmio + PORT_CMD,
723 PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
724 if (tmp & PORT_CMD_CLO)
725 return -EIO;
726
727 return 0;
728}
729
730static int ahci_prereset(struct ata_port *ap)
731{
732 if ((ap->flags & AHCI_FLAG_RESET_NEEDS_CLO) &&
733 (ata_busy_wait(ap, ATA_BUSY, 1000) & ATA_BUSY)) {
734 /* ATA_BUSY hasn't cleared, so send a CLO */
735 ahci_clo(ap);
736 }
737
738 return ata_std_prereset(ap);
739}
740
741static int ahci_softreset(struct ata_port *ap, unsigned int *class)
742{
743 struct ahci_port_priv *pp = ap->private_data;
744 void __iomem *mmio = ap->host_set->mmio_base;
745 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
746 const u32 cmd_fis_len = 5; /* five dwords */
747 const char *reason = NULL;
748 struct ata_taskfile tf;
749 u32 tmp;
750 u8 *fis;
751 int rc;
752
753 DPRINTK("ENTER\n");
754
755 if (ata_port_offline(ap)) {
756 DPRINTK("PHY reports no device\n");
757 *class = ATA_DEV_NONE;
758 return 0;
759 }
760
761 /* prepare for SRST (AHCI-1.1 10.4.1) */
762 rc = ahci_stop_engine(port_mmio);
763 if (rc) {
764 reason = "failed to stop engine";
765 goto fail_restart;
766 }
767
768 /* check BUSY/DRQ, perform Command List Override if necessary */
769 ahci_tf_read(ap, &tf);
770 if (tf.command & (ATA_BUSY | ATA_DRQ)) {
771 rc = ahci_clo(ap);
772
773 if (rc == -EOPNOTSUPP) {
774 reason = "port busy but CLO unavailable";
775 goto fail_restart;
776 } else if (rc) {
777 reason = "port busy but CLO failed";
778 goto fail_restart;
779 }
780 }
781
782 /* restart engine */
783 ahci_start_engine(port_mmio);
784
785 ata_tf_init(ap->device, &tf);
786 fis = pp->cmd_tbl;
787
788 /* issue the first D2H Register FIS */
789 ahci_fill_cmd_slot(pp, 0,
790 cmd_fis_len | AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY);
791
792 tf.ctl |= ATA_SRST;
793 ata_tf_to_fis(&tf, fis, 0);
794 fis[1] &= ~(1 << 7); /* turn off Command FIS bit */
795
796 writel(1, port_mmio + PORT_CMD_ISSUE);
797
798 tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1, 1, 500);
799 if (tmp & 0x1) {
800 rc = -EIO;
801 reason = "1st FIS failed";
802 goto fail;
803 }
804
805 /* spec says at least 5us, but be generous and sleep for 1ms */
806 msleep(1);
807
808 /* issue the second D2H Register FIS */
809 ahci_fill_cmd_slot(pp, 0, cmd_fis_len);
810
811 tf.ctl &= ~ATA_SRST;
812 ata_tf_to_fis(&tf, fis, 0);
813 fis[1] &= ~(1 << 7); /* turn off Command FIS bit */
814
815 writel(1, port_mmio + PORT_CMD_ISSUE);
816 readl(port_mmio + PORT_CMD_ISSUE); /* flush */
817
818 /* spec mandates ">= 2ms" before checking status.
819 * We wait 150ms, because that was the magic delay used for
820 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
821 * between when the ATA command register is written, and then
822 * status is checked. Because waiting for "a while" before
823 * checking status is fine, post SRST, we perform this magic
824 * delay here as well.
825 */
826 msleep(150);
827
828 *class = ATA_DEV_NONE;
829 if (ata_port_online(ap)) {
830 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
831 rc = -EIO;
832 reason = "device not ready";
833 goto fail;
834 }
835 *class = ahci_dev_classify(ap);
836 }
837
838 DPRINTK("EXIT, class=%u\n", *class);
839 return 0;
840
841 fail_restart:
842 ahci_start_engine(port_mmio);
843 fail:
844 ata_port_printk(ap, KERN_ERR, "softreset failed (%s)\n", reason);
845 return rc;
846}
847
848static int ahci_hardreset(struct ata_port *ap, unsigned int *class)
849{
850 struct ahci_port_priv *pp = ap->private_data;
851 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
852 struct ata_taskfile tf;
853 void __iomem *mmio = ap->host_set->mmio_base;
854 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
855 int rc;
856
857 DPRINTK("ENTER\n");
858
859 ahci_stop_engine(port_mmio);
860
861 /* clear D2H reception area to properly wait for D2H FIS */
862 ata_tf_init(ap->device, &tf);
863 tf.command = 0xff;
864 ata_tf_to_fis(&tf, d2h_fis, 0);
865
866 rc = sata_std_hardreset(ap, class);
867
868 ahci_start_engine(port_mmio);
869
870 if (rc == 0 && ata_port_online(ap))
871 *class = ahci_dev_classify(ap);
872 if (*class == ATA_DEV_UNKNOWN)
873 *class = ATA_DEV_NONE;
874
875 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
876 return rc;
877}
878
879static void ahci_postreset(struct ata_port *ap, unsigned int *class)
880{
881 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
882 u32 new_tmp, tmp;
883
884 ata_std_postreset(ap, class);
885
886 /* Make sure port's ATAPI bit is set appropriately */
887 new_tmp = tmp = readl(port_mmio + PORT_CMD);
888 if (*class == ATA_DEV_ATAPI)
889 new_tmp |= PORT_CMD_ATAPI;
890 else
891 new_tmp &= ~PORT_CMD_ATAPI;
892 if (new_tmp != tmp) {
893 writel(new_tmp, port_mmio + PORT_CMD);
894 readl(port_mmio + PORT_CMD); /* flush */
895 }
896}
897
898static u8 ahci_check_status(struct ata_port *ap)
899{
900 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr;
901
902 return readl(mmio + PORT_TFDATA) & 0xFF;
903}
904
905static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
906{
907 struct ahci_port_priv *pp = ap->private_data;
908 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
909
910 ata_tf_from_fis(d2h_fis, tf);
911}
912
913static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
914{
915 struct scatterlist *sg;
916 struct ahci_sg *ahci_sg;
917 unsigned int n_sg = 0;
918
919 VPRINTK("ENTER\n");
920
921 /*
922 * Next, the S/G list.
923 */
924 ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
925 ata_for_each_sg(sg, qc) {
926 dma_addr_t addr = sg_dma_address(sg);
927 u32 sg_len = sg_dma_len(sg);
928
929 ahci_sg->addr = cpu_to_le32(addr & 0xffffffff);
930 ahci_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
931 ahci_sg->flags_size = cpu_to_le32(sg_len - 1);
932
933 ahci_sg++;
934 n_sg++;
935 }
936
937 return n_sg;
938}
939
940static void ahci_qc_prep(struct ata_queued_cmd *qc)
941{
942 struct ata_port *ap = qc->ap;
943 struct ahci_port_priv *pp = ap->private_data;
944 int is_atapi = is_atapi_taskfile(&qc->tf);
945 void *cmd_tbl;
946 u32 opts;
947 const u32 cmd_fis_len = 5; /* five dwords */
948 unsigned int n_elem;
949
950 /*
951 * Fill in command table information. First, the header,
952 * a SATA Register - Host to Device command FIS.
953 */
954 cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
955
956 ata_tf_to_fis(&qc->tf, cmd_tbl, 0);
957 if (is_atapi) {
958 memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
959 memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
960 }
961
962 n_elem = 0;
963 if (qc->flags & ATA_QCFLAG_DMAMAP)
964 n_elem = ahci_fill_sg(qc, cmd_tbl);
965
966 /*
967 * Fill in command slot information.
968 */
969 opts = cmd_fis_len | n_elem << 16;
970 if (qc->tf.flags & ATA_TFLAG_WRITE)
971 opts |= AHCI_CMD_WRITE;
972 if (is_atapi)
973 opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
974
975 ahci_fill_cmd_slot(pp, qc->tag, opts);
976}
977
978static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
979{
980 struct ahci_port_priv *pp = ap->private_data;
981 struct ata_eh_info *ehi = &ap->eh_info;
982 unsigned int err_mask = 0, action = 0;
983 struct ata_queued_cmd *qc;
984 u32 serror;
985
986 ata_ehi_clear_desc(ehi);
987
988 /* AHCI needs SError cleared; otherwise, it might lock up */
989 serror = ahci_scr_read(ap, SCR_ERROR);
990 ahci_scr_write(ap, SCR_ERROR, serror);
991
992 /* analyze @irq_stat */
993 ata_ehi_push_desc(ehi, "irq_stat 0x%08x", irq_stat);
994
995 if (irq_stat & PORT_IRQ_TF_ERR)
996 err_mask |= AC_ERR_DEV;
997
998 if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
999 err_mask |= AC_ERR_HOST_BUS;
1000 action |= ATA_EH_SOFTRESET;
1001 }
1002
1003 if (irq_stat & PORT_IRQ_IF_ERR) {
1004 err_mask |= AC_ERR_ATA_BUS;
1005 action |= ATA_EH_SOFTRESET;
1006 ata_ehi_push_desc(ehi, ", interface fatal error");
1007 }
1008
1009 if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
1010 ata_ehi_hotplugged(ehi);
1011 ata_ehi_push_desc(ehi, ", %s", irq_stat & PORT_IRQ_CONNECT ?
1012 "connection status changed" : "PHY RDY changed");
1013 }
1014
1015 if (irq_stat & PORT_IRQ_UNK_FIS) {
1016 u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
1017
1018 err_mask |= AC_ERR_HSM;
1019 action |= ATA_EH_SOFTRESET;
1020 ata_ehi_push_desc(ehi, ", unknown FIS %08x %08x %08x %08x",
1021 unk[0], unk[1], unk[2], unk[3]);
1022 }
1023
1024 /* okay, let's hand over to EH */
1025 ehi->serror |= serror;
1026 ehi->action |= action;
1027
1028 qc = ata_qc_from_tag(ap, ap->active_tag);
1029 if (qc)
1030 qc->err_mask |= err_mask;
1031 else
1032 ehi->err_mask |= err_mask;
1033
1034 if (irq_stat & PORT_IRQ_FREEZE)
1035 ata_port_freeze(ap);
1036 else
1037 ata_port_abort(ap);
1038}
1039
1040static void ahci_host_intr(struct ata_port *ap)
1041{
1042 void __iomem *mmio = ap->host_set->mmio_base;
1043 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1044 struct ata_eh_info *ehi = &ap->eh_info;
1045 u32 status, qc_active;
1046 int rc;
1047
1048 status = readl(port_mmio + PORT_IRQ_STAT);
1049 writel(status, port_mmio + PORT_IRQ_STAT);
1050
1051 if (unlikely(status & PORT_IRQ_ERROR)) {
1052 ahci_error_intr(ap, status);
1053 return;
1054 }
1055
1056 if (ap->sactive)
1057 qc_active = readl(port_mmio + PORT_SCR_ACT);
1058 else
1059 qc_active = readl(port_mmio + PORT_CMD_ISSUE);
1060
1061 rc = ata_qc_complete_multiple(ap, qc_active, NULL);
1062 if (rc > 0)
1063 return;
1064 if (rc < 0) {
1065 ehi->err_mask |= AC_ERR_HSM;
1066 ehi->action |= ATA_EH_SOFTRESET;
1067 ata_port_freeze(ap);
1068 return;
1069 }
1070
1071 /* hmmm... a spurious interupt */
1072
1073 /* some devices send D2H reg with I bit set during NCQ command phase */
1074 if (ap->sactive && status & PORT_IRQ_D2H_REG_FIS)
1075 return;
1076
1077 /* ignore interim PIO setup fis interrupts */
1078 if (ata_tag_valid(ap->active_tag) && (status & PORT_IRQ_PIOS_FIS))
1079 return;
1080
1081 if (ata_ratelimit())
1082 ata_port_printk(ap, KERN_INFO, "spurious interrupt "
1083 "(irq_stat 0x%x active_tag %d sactive 0x%x)\n",
1084 status, ap->active_tag, ap->sactive);
1085}
1086
1087static void ahci_irq_clear(struct ata_port *ap)
1088{
1089 /* TODO */
1090}
1091
1092static irqreturn_t ahci_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
1093{
1094 struct ata_host_set *host_set = dev_instance;
1095 struct ahci_host_priv *hpriv;
1096 unsigned int i, handled = 0;
1097 void __iomem *mmio;
1098 u32 irq_stat, irq_ack = 0;
1099
1100 VPRINTK("ENTER\n");
1101
1102 hpriv = host_set->private_data;
1103 mmio = host_set->mmio_base;
1104
1105 /* sigh. 0xffffffff is a valid return from h/w */
1106 irq_stat = readl(mmio + HOST_IRQ_STAT);
1107 irq_stat &= hpriv->port_map;
1108 if (!irq_stat)
1109 return IRQ_NONE;
1110
1111 spin_lock(&host_set->lock);
1112
1113 for (i = 0; i < host_set->n_ports; i++) {
1114 struct ata_port *ap;
1115
1116 if (!(irq_stat & (1 << i)))
1117 continue;
1118
1119 ap = host_set->ports[i];
1120 if (ap) {
1121 ahci_host_intr(ap);
1122 VPRINTK("port %u\n", i);
1123 } else {
1124 VPRINTK("port %u (no irq)\n", i);
1125 if (ata_ratelimit())
1126 dev_printk(KERN_WARNING, host_set->dev,
1127 "interrupt on disabled port %u\n", i);
1128 }
1129
1130 irq_ack |= (1 << i);
1131 }
1132
1133 if (irq_ack) {
1134 writel(irq_ack, mmio + HOST_IRQ_STAT);
1135 handled = 1;
1136 }
1137
1138 spin_unlock(&host_set->lock);
1139
1140 VPRINTK("EXIT\n");
1141
1142 return IRQ_RETVAL(handled);
1143}
1144
1145static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
1146{
1147 struct ata_port *ap = qc->ap;
1148 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
1149
1150 if (qc->tf.protocol == ATA_PROT_NCQ)
1151 writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
1152 writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE);
1153 readl(port_mmio + PORT_CMD_ISSUE); /* flush */
1154
1155 return 0;
1156}
1157
1158static void ahci_freeze(struct ata_port *ap)
1159{
1160 void __iomem *mmio = ap->host_set->mmio_base;
1161 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1162
1163 /* turn IRQ off */
1164 writel(0, port_mmio + PORT_IRQ_MASK);
1165}
1166
1167static void ahci_thaw(struct ata_port *ap)
1168{
1169 void __iomem *mmio = ap->host_set->mmio_base;
1170 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1171 u32 tmp;
1172
1173 /* clear IRQ */
1174 tmp = readl(port_mmio + PORT_IRQ_STAT);
1175 writel(tmp, port_mmio + PORT_IRQ_STAT);
1176 writel(1 << ap->id, mmio + HOST_IRQ_STAT);
1177
1178 /* turn IRQ back on */
1179 writel(DEF_PORT_IRQ, port_mmio + PORT_IRQ_MASK);
1180}
1181
1182static void ahci_error_handler(struct ata_port *ap)
1183{
1184 void __iomem *mmio = ap->host_set->mmio_base;
1185 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1186
1187 if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
1188 /* restart engine */
1189 ahci_stop_engine(port_mmio);
1190 ahci_start_engine(port_mmio);
1191 }
1192
1193 /* perform recovery */
1194 ata_do_eh(ap, ahci_prereset, ahci_softreset, ahci_hardreset,
1195 ahci_postreset);
1196}
1197
1198static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
1199{
1200 struct ata_port *ap = qc->ap;
1201 void __iomem *mmio = ap->host_set->mmio_base;
1202 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1203
1204 if (qc->flags & ATA_QCFLAG_FAILED)
1205 qc->err_mask |= AC_ERR_OTHER;
1206
1207 if (qc->err_mask) {
1208 /* make DMA engine forget about the failed command */
1209 ahci_stop_engine(port_mmio);
1210 ahci_start_engine(port_mmio);
1211 }
1212}
1213
1214static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
1215{
1216 struct ahci_host_priv *hpriv = ap->host_set->private_data;
1217 struct ahci_port_priv *pp = ap->private_data;
1218 void __iomem *mmio = ap->host_set->mmio_base;
1219 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1220 const char *emsg = NULL;
1221 int rc;
1222
1223 rc = ahci_deinit_port(port_mmio, hpriv->cap, &emsg);
1224 if (rc) {
1225 ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc);
1226 ahci_init_port(port_mmio, hpriv->cap,
1227 pp->cmd_slot_dma, pp->rx_fis_dma);
1228 }
1229
1230 return rc;
1231}
1232
1233static int ahci_port_resume(struct ata_port *ap)
1234{
1235 struct ahci_port_priv *pp = ap->private_data;
1236 struct ahci_host_priv *hpriv = ap->host_set->private_data;
1237 void __iomem *mmio = ap->host_set->mmio_base;
1238 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1239
1240 ahci_init_port(port_mmio, hpriv->cap, pp->cmd_slot_dma, pp->rx_fis_dma);
1241
1242 return 0;
1243}
1244
1245static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
1246{
1247 struct ata_host_set *host_set = dev_get_drvdata(&pdev->dev);
1248 void __iomem *mmio = host_set->mmio_base;
1249 u32 ctl;
1250
1251 if (mesg.event == PM_EVENT_SUSPEND) {
1252 /* AHCI spec rev1.1 section 8.3.3:
1253 * Software must disable interrupts prior to requesting a
1254 * transition of the HBA to D3 state.
1255 */
1256 ctl = readl(mmio + HOST_CTL);
1257 ctl &= ~HOST_IRQ_EN;
1258 writel(ctl, mmio + HOST_CTL);
1259 readl(mmio + HOST_CTL); /* flush */
1260 }
1261
1262 return ata_pci_device_suspend(pdev, mesg);
1263}
1264
1265static int ahci_pci_device_resume(struct pci_dev *pdev)
1266{
1267 struct ata_host_set *host_set = dev_get_drvdata(&pdev->dev);
1268 struct ahci_host_priv *hpriv = host_set->private_data;
1269 void __iomem *mmio = host_set->mmio_base;
1270 int rc;
1271
1272 ata_pci_device_do_resume(pdev);
1273
1274 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
1275 rc = ahci_reset_controller(mmio, pdev);
1276 if (rc)
1277 return rc;
1278
1279 ahci_init_controller(mmio, pdev, host_set->n_ports, hpriv->cap);
1280 }
1281
1282 ata_host_set_resume(host_set);
1283
1284 return 0;
1285}
1286
1287static int ahci_port_start(struct ata_port *ap)
1288{
1289 struct device *dev = ap->host_set->dev;
1290 struct ahci_host_priv *hpriv = ap->host_set->private_data;
1291 struct ahci_port_priv *pp;
1292 void __iomem *mmio = ap->host_set->mmio_base;
1293 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1294 void *mem;
1295 dma_addr_t mem_dma;
1296 int rc;
1297
1298 pp = kmalloc(sizeof(*pp), GFP_KERNEL);
1299 if (!pp)
1300 return -ENOMEM;
1301 memset(pp, 0, sizeof(*pp));
1302
1303 rc = ata_pad_alloc(ap, dev);
1304 if (rc) {
1305 kfree(pp);
1306 return rc;
1307 }
1308
1309 mem = dma_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma, GFP_KERNEL);
1310 if (!mem) {
1311 ata_pad_free(ap, dev);
1312 kfree(pp);
1313 return -ENOMEM;
1314 }
1315 memset(mem, 0, AHCI_PORT_PRIV_DMA_SZ);
1316
1317 /*
1318 * First item in chunk of DMA memory: 32-slot command table,
1319 * 32 bytes each in size
1320 */
1321 pp->cmd_slot = mem;
1322 pp->cmd_slot_dma = mem_dma;
1323
1324 mem += AHCI_CMD_SLOT_SZ;
1325 mem_dma += AHCI_CMD_SLOT_SZ;
1326
1327 /*
1328 * Second item: Received-FIS area
1329 */
1330 pp->rx_fis = mem;
1331 pp->rx_fis_dma = mem_dma;
1332
1333 mem += AHCI_RX_FIS_SZ;
1334 mem_dma += AHCI_RX_FIS_SZ;
1335
1336 /*
1337 * Third item: data area for storing a single command
1338 * and its scatter-gather table
1339 */
1340 pp->cmd_tbl = mem;
1341 pp->cmd_tbl_dma = mem_dma;
1342
1343 ap->private_data = pp;
1344
1345 /* initialize port */
1346 ahci_init_port(port_mmio, hpriv->cap, pp->cmd_slot_dma, pp->rx_fis_dma);
1347
1348 return 0;
1349}
1350
1351static void ahci_port_stop(struct ata_port *ap)
1352{
1353 struct device *dev = ap->host_set->dev;
1354 struct ahci_host_priv *hpriv = ap->host_set->private_data;
1355 struct ahci_port_priv *pp = ap->private_data;
1356 void __iomem *mmio = ap->host_set->mmio_base;
1357 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1358 const char *emsg = NULL;
1359 int rc;
1360
1361 /* de-initialize port */
1362 rc = ahci_deinit_port(port_mmio, hpriv->cap, &emsg);
1363 if (rc)
1364 ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc);
1365
1366 ap->private_data = NULL;
1367 dma_free_coherent(dev, AHCI_PORT_PRIV_DMA_SZ,
1368 pp->cmd_slot, pp->cmd_slot_dma);
1369 ata_pad_free(ap, dev);
1370 kfree(pp);
1371}
1372
1373static void ahci_setup_port(struct ata_ioports *port, unsigned long base,
1374 unsigned int port_idx)
1375{
1376 VPRINTK("ENTER, base==0x%lx, port_idx %u\n", base, port_idx);
1377 base = ahci_port_base_ul(base, port_idx);
1378 VPRINTK("base now==0x%lx\n", base);
1379
1380 port->cmd_addr = base;
1381 port->scr_addr = base + PORT_SCR;
1382
1383 VPRINTK("EXIT\n");
1384}
1385
1386static int ahci_host_init(struct ata_probe_ent *probe_ent)
1387{
1388 struct ahci_host_priv *hpriv = probe_ent->private_data;
1389 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
1390 void __iomem *mmio = probe_ent->mmio_base;
1391 unsigned int i, using_dac;
1392 int rc;
1393
1394 rc = ahci_reset_controller(mmio, pdev);
1395 if (rc)
1396 return rc;
1397
1398 hpriv->cap = readl(mmio + HOST_CAP);
1399 hpriv->port_map = readl(mmio + HOST_PORTS_IMPL);
1400 probe_ent->n_ports = (hpriv->cap & 0x1f) + 1;
1401
1402 VPRINTK("cap 0x%x port_map 0x%x n_ports %d\n",
1403 hpriv->cap, hpriv->port_map, probe_ent->n_ports);
1404
1405 using_dac = hpriv->cap & HOST_CAP_64;
1406 if (using_dac &&
1407 !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
1408 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
1409 if (rc) {
1410 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1411 if (rc) {
1412 dev_printk(KERN_ERR, &pdev->dev,
1413 "64-bit DMA enable failed\n");
1414 return rc;
1415 }
1416 }
1417 } else {
1418 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1419 if (rc) {
1420 dev_printk(KERN_ERR, &pdev->dev,
1421 "32-bit DMA enable failed\n");
1422 return rc;
1423 }
1424 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1425 if (rc) {
1426 dev_printk(KERN_ERR, &pdev->dev,
1427 "32-bit consistent DMA enable failed\n");
1428 return rc;
1429 }
1430 }
1431
1432 for (i = 0; i < probe_ent->n_ports; i++)
1433 ahci_setup_port(&probe_ent->port[i], (unsigned long) mmio, i);
1434
1435 ahci_init_controller(mmio, pdev, probe_ent->n_ports, hpriv->cap);
1436
1437 pci_set_master(pdev);
1438
1439 return 0;
1440}
1441
1442static void ahci_print_info(struct ata_probe_ent *probe_ent)
1443{
1444 struct ahci_host_priv *hpriv = probe_ent->private_data;
1445 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
1446 void __iomem *mmio = probe_ent->mmio_base;
1447 u32 vers, cap, impl, speed;
1448 const char *speed_s;
1449 u16 cc;
1450 const char *scc_s;
1451
1452 vers = readl(mmio + HOST_VERSION);
1453 cap = hpriv->cap;
1454 impl = hpriv->port_map;
1455
1456 speed = (cap >> 20) & 0xf;
1457 if (speed == 1)
1458 speed_s = "1.5";
1459 else if (speed == 2)
1460 speed_s = "3";
1461 else
1462 speed_s = "?";
1463
1464 pci_read_config_word(pdev, 0x0a, &cc);
1465 if (cc == 0x0101)
1466 scc_s = "IDE";
1467 else if (cc == 0x0106)
1468 scc_s = "SATA";
1469 else if (cc == 0x0104)
1470 scc_s = "RAID";
1471 else
1472 scc_s = "unknown";
1473
1474 dev_printk(KERN_INFO, &pdev->dev,
1475 "AHCI %02x%02x.%02x%02x "
1476 "%u slots %u ports %s Gbps 0x%x impl %s mode\n"
1477 ,
1478
1479 (vers >> 24) & 0xff,
1480 (vers >> 16) & 0xff,
1481 (vers >> 8) & 0xff,
1482 vers & 0xff,
1483
1484 ((cap >> 8) & 0x1f) + 1,
1485 (cap & 0x1f) + 1,
1486 speed_s,
1487 impl,
1488 scc_s);
1489
1490 dev_printk(KERN_INFO, &pdev->dev,
1491 "flags: "
1492 "%s%s%s%s%s%s"
1493 "%s%s%s%s%s%s%s\n"
1494 ,
1495
1496 cap & (1 << 31) ? "64bit " : "",
1497 cap & (1 << 30) ? "ncq " : "",
1498 cap & (1 << 28) ? "ilck " : "",
1499 cap & (1 << 27) ? "stag " : "",
1500 cap & (1 << 26) ? "pm " : "",
1501 cap & (1 << 25) ? "led " : "",
1502
1503 cap & (1 << 24) ? "clo " : "",
1504 cap & (1 << 19) ? "nz " : "",
1505 cap & (1 << 18) ? "only " : "",
1506 cap & (1 << 17) ? "pmp " : "",
1507 cap & (1 << 15) ? "pio " : "",
1508 cap & (1 << 14) ? "slum " : "",
1509 cap & (1 << 13) ? "part " : ""
1510 );
1511}
1512
1513static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1514{
1515 static int printed_version;
1516 struct ata_probe_ent *probe_ent = NULL;
1517 struct ahci_host_priv *hpriv;
1518 unsigned long base;
1519 void __iomem *mmio_base;
1520 unsigned int board_idx = (unsigned int) ent->driver_data;
1521 int have_msi, pci_dev_busy = 0;
1522 int rc;
1523
1524 VPRINTK("ENTER\n");
1525
1526 WARN_ON(ATA_MAX_QUEUE > AHCI_MAX_CMDS);
1527
1528 if (!printed_version++)
1529 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1530
1531 /* JMicron-specific fixup: make sure we're in AHCI mode */
1532 /* This is protected from races with ata_jmicron by the pci probe
1533 locking */
1534 if (pdev->vendor == PCI_VENDOR_ID_JMICRON) {
1535 /* AHCI enable, AHCI on function 0 */
1536 pci_write_config_byte(pdev, 0x41, 0xa1);
1537 /* Function 1 is the PATA controller */
1538 if (PCI_FUNC(pdev->devfn))
1539 return -ENODEV;
1540 }
1541
1542 rc = pci_enable_device(pdev);
1543 if (rc)
1544 return rc;
1545
1546 rc = pci_request_regions(pdev, DRV_NAME);
1547 if (rc) {
1548 pci_dev_busy = 1;
1549 goto err_out;
1550 }
1551
1552 if (pci_enable_msi(pdev) == 0)
1553 have_msi = 1;
1554 else {
1555 pci_intx(pdev, 1);
1556 have_msi = 0;
1557 }
1558
1559 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
1560 if (probe_ent == NULL) {
1561 rc = -ENOMEM;
1562 goto err_out_msi;
1563 }
1564
1565 memset(probe_ent, 0, sizeof(*probe_ent));
1566 probe_ent->dev = pci_dev_to_dev(pdev);
1567 INIT_LIST_HEAD(&probe_ent->node);
1568
1569 mmio_base = pci_iomap(pdev, AHCI_PCI_BAR, 0);
1570 if (mmio_base == NULL) {
1571 rc = -ENOMEM;
1572 goto err_out_free_ent;
1573 }
1574 base = (unsigned long) mmio_base;
1575
1576 hpriv = kmalloc(sizeof(*hpriv), GFP_KERNEL);
1577 if (!hpriv) {
1578 rc = -ENOMEM;
1579 goto err_out_iounmap;
1580 }
1581 memset(hpriv, 0, sizeof(*hpriv));
1582
1583 probe_ent->sht = ahci_port_info[board_idx].sht;
1584 probe_ent->host_flags = ahci_port_info[board_idx].host_flags;
1585 probe_ent->pio_mask = ahci_port_info[board_idx].pio_mask;
1586 probe_ent->udma_mask = ahci_port_info[board_idx].udma_mask;
1587 probe_ent->port_ops = ahci_port_info[board_idx].port_ops;
1588
1589 probe_ent->irq = pdev->irq;
1590 probe_ent->irq_flags = IRQF_SHARED;
1591 probe_ent->mmio_base = mmio_base;
1592 probe_ent->private_data = hpriv;
1593
1594 if (have_msi)
1595 hpriv->flags |= AHCI_FLAG_MSI;
1596
1597 /* initialize adapter */
1598 rc = ahci_host_init(probe_ent);
1599 if (rc)
1600 goto err_out_hpriv;
1601
1602 if (!(probe_ent->host_flags & AHCI_FLAG_NO_NCQ) &&
1603 (hpriv->cap & HOST_CAP_NCQ))
1604 probe_ent->host_flags |= ATA_FLAG_NCQ;
1605
1606 ahci_print_info(probe_ent);
1607
1608 /* FIXME: check ata_device_add return value */
1609 ata_device_add(probe_ent);
1610 kfree(probe_ent);
1611
1612 return 0;
1613
1614err_out_hpriv:
1615 kfree(hpriv);
1616err_out_iounmap:
1617 pci_iounmap(pdev, mmio_base);
1618err_out_free_ent:
1619 kfree(probe_ent);
1620err_out_msi:
1621 if (have_msi)
1622 pci_disable_msi(pdev);
1623 else
1624 pci_intx(pdev, 0);
1625 pci_release_regions(pdev);
1626err_out:
1627 if (!pci_dev_busy)
1628 pci_disable_device(pdev);
1629 return rc;
1630}
1631
1632static void ahci_remove_one (struct pci_dev *pdev)
1633{
1634 struct device *dev = pci_dev_to_dev(pdev);
1635 struct ata_host_set *host_set = dev_get_drvdata(dev);
1636 struct ahci_host_priv *hpriv = host_set->private_data;
1637 unsigned int i;
1638 int have_msi;
1639
1640 for (i = 0; i < host_set->n_ports; i++)
1641 ata_port_detach(host_set->ports[i]);
1642
1643 have_msi = hpriv->flags & AHCI_FLAG_MSI;
1644 free_irq(host_set->irq, host_set);
1645
1646 for (i = 0; i < host_set->n_ports; i++) {
1647 struct ata_port *ap = host_set->ports[i];
1648
1649 ata_scsi_release(ap->host);
1650 scsi_host_put(ap->host);
1651 }
1652
1653 kfree(hpriv);
1654 pci_iounmap(pdev, host_set->mmio_base);
1655 kfree(host_set);
1656
1657 if (have_msi)
1658 pci_disable_msi(pdev);
1659 else
1660 pci_intx(pdev, 0);
1661 pci_release_regions(pdev);
1662 pci_disable_device(pdev);
1663 dev_set_drvdata(dev, NULL);
1664}
1665
1666static int __init ahci_init(void)
1667{
1668 return pci_register_driver(&ahci_pci_driver);
1669}
1670
1671static void __exit ahci_exit(void)
1672{
1673 pci_unregister_driver(&ahci_pci_driver);
1674}
1675
1676
1677MODULE_AUTHOR("Jeff Garzik");
1678MODULE_DESCRIPTION("AHCI SATA low-level driver");
1679MODULE_LICENSE("GPL");
1680MODULE_DEVICE_TABLE(pci, ahci_pci_tbl);
1681MODULE_VERSION(DRV_VERSION);
1682
1683module_init(ahci_init);
1684module_exit(ahci_exit);