diff options
author | Linus Torvalds <torvalds@g5.osdl.org> | 2006-06-23 18:58:44 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-06-23 18:58:44 -0400 |
commit | 6edad161cd4dfe1df772e7a74ab63cab53b5e8c1 (patch) | |
tree | 389d6daa728b2ba1bd8c2180cab705706449f62a /drivers/scsi/ahci.c | |
parent | 236ee8c33277ab48671995f26dc68a4639936418 (diff) | |
parent | 0dd4b21f517e138ea113db255645fbae1bf5eef3 (diff) |
Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/libata-dev
* 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/libata-dev: (258 commits)
[libata] conversion to new debug scheme, part 1 of $N
[PATCH] libata: Add ata_scsi_dev_disabled
[libata] Add host lock to struct ata_port
[PATCH] libata: implement per-dev EH action mask eh_info->dev_action[]
[PATCH] libata-dev: move the CDB-intr DMA blacklisting
[PATCH] ahci: disable NCQ support on vt8251
[libata] ahci: add JMicron PCI IDs
[libata] sata_nv: add PCI IDs
[libata] ahci: Add NVIDIA PCI IDs.
[PATCH] libata: convert several bmdma-style controllers to new EH, take #3
[PATCH] sata_via: convert to new EH, take #3
[libata] sata_nv: s/spin_lock_irqsave/spin_lock/ in irq handler
[PATCH] sata_nv: add hotplug support
[PATCH] sata_nv: convert to new EH
[PATCH] sata_nv: better irq handlers
[PATCH] sata_nv: simplify constants
[PATCH] sata_nv: kill struct nv_host_desc and nv_host
[PATCH] sata_nv: kill not-working hotplug code
[libata] Update docs to reflect current driver API
[PATCH] libata: add host_set->next for legacy two host_sets case, take #3
...
Diffstat (limited to 'drivers/scsi/ahci.c')
-rw-r--r-- | drivers/scsi/ahci.c | 503 |
1 files changed, 313 insertions, 190 deletions
diff --git a/drivers/scsi/ahci.c b/drivers/scsi/ahci.c index b4f8fb1d628b..4bb77f62b3b9 100644 --- a/drivers/scsi/ahci.c +++ b/drivers/scsi/ahci.c | |||
@@ -48,7 +48,7 @@ | |||
48 | #include <asm/io.h> | 48 | #include <asm/io.h> |
49 | 49 | ||
50 | #define DRV_NAME "ahci" | 50 | #define DRV_NAME "ahci" |
51 | #define DRV_VERSION "1.2" | 51 | #define DRV_VERSION "1.3" |
52 | 52 | ||
53 | 53 | ||
54 | enum { | 54 | enum { |
@@ -56,12 +56,15 @@ enum { | |||
56 | AHCI_MAX_SG = 168, /* hardware max is 64K */ | 56 | AHCI_MAX_SG = 168, /* hardware max is 64K */ |
57 | AHCI_DMA_BOUNDARY = 0xffffffff, | 57 | AHCI_DMA_BOUNDARY = 0xffffffff, |
58 | AHCI_USE_CLUSTERING = 0, | 58 | AHCI_USE_CLUSTERING = 0, |
59 | AHCI_CMD_SLOT_SZ = 32 * 32, | 59 | AHCI_MAX_CMDS = 32, |
60 | AHCI_CMD_SZ = 32, | ||
61 | AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ, | ||
60 | AHCI_RX_FIS_SZ = 256, | 62 | AHCI_RX_FIS_SZ = 256, |
61 | AHCI_CMD_TBL_HDR = 0x80, | ||
62 | AHCI_CMD_TBL_CDB = 0x40, | 63 | AHCI_CMD_TBL_CDB = 0x40, |
63 | AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR + (AHCI_MAX_SG * 16), | 64 | AHCI_CMD_TBL_HDR_SZ = 0x80, |
64 | AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_SZ + | 65 | AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16), |
66 | AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS, | ||
67 | AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ + | ||
65 | AHCI_RX_FIS_SZ, | 68 | AHCI_RX_FIS_SZ, |
66 | AHCI_IRQ_ON_SG = (1 << 31), | 69 | AHCI_IRQ_ON_SG = (1 << 31), |
67 | AHCI_CMD_ATAPI = (1 << 5), | 70 | AHCI_CMD_ATAPI = (1 << 5), |
@@ -71,8 +74,10 @@ enum { | |||
71 | AHCI_CMD_CLR_BUSY = (1 << 10), | 74 | AHCI_CMD_CLR_BUSY = (1 << 10), |
72 | 75 | ||
73 | RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */ | 76 | RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */ |
77 | RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */ | ||
74 | 78 | ||
75 | board_ahci = 0, | 79 | board_ahci = 0, |
80 | board_ahci_vt8251 = 1, | ||
76 | 81 | ||
77 | /* global controller registers */ | 82 | /* global controller registers */ |
78 | HOST_CAP = 0x00, /* host capabilities */ | 83 | HOST_CAP = 0x00, /* host capabilities */ |
@@ -87,8 +92,9 @@ enum { | |||
87 | HOST_AHCI_EN = (1 << 31), /* AHCI enabled */ | 92 | HOST_AHCI_EN = (1 << 31), /* AHCI enabled */ |
88 | 93 | ||
89 | /* HOST_CAP bits */ | 94 | /* HOST_CAP bits */ |
90 | HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */ | ||
91 | HOST_CAP_CLO = (1 << 24), /* Command List Override support */ | 95 | HOST_CAP_CLO = (1 << 24), /* Command List Override support */ |
96 | HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */ | ||
97 | HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */ | ||
92 | 98 | ||
93 | /* registers for each SATA port */ | 99 | /* registers for each SATA port */ |
94 | PORT_LST_ADDR = 0x00, /* command list DMA addr */ | 100 | PORT_LST_ADDR = 0x00, /* command list DMA addr */ |
@@ -127,15 +133,17 @@ enum { | |||
127 | PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */ | 133 | PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */ |
128 | PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */ | 134 | PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */ |
129 | 135 | ||
130 | PORT_IRQ_FATAL = PORT_IRQ_TF_ERR | | 136 | PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR | |
131 | PORT_IRQ_HBUS_ERR | | 137 | PORT_IRQ_IF_ERR | |
132 | PORT_IRQ_HBUS_DATA_ERR | | 138 | PORT_IRQ_CONNECT | |
133 | PORT_IRQ_IF_ERR, | 139 | PORT_IRQ_PHYRDY | |
134 | DEF_PORT_IRQ = PORT_IRQ_FATAL | PORT_IRQ_PHYRDY | | 140 | PORT_IRQ_UNK_FIS, |
135 | PORT_IRQ_CONNECT | PORT_IRQ_SG_DONE | | 141 | PORT_IRQ_ERROR = PORT_IRQ_FREEZE | |
136 | PORT_IRQ_UNK_FIS | PORT_IRQ_SDB_FIS | | 142 | PORT_IRQ_TF_ERR | |
137 | PORT_IRQ_DMAS_FIS | PORT_IRQ_PIOS_FIS | | 143 | PORT_IRQ_HBUS_DATA_ERR, |
138 | PORT_IRQ_D2H_REG_FIS, | 144 | DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE | |
145 | PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS | | ||
146 | PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS, | ||
139 | 147 | ||
140 | /* PORT_CMD bits */ | 148 | /* PORT_CMD bits */ |
141 | PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */ | 149 | PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */ |
@@ -153,6 +161,10 @@ enum { | |||
153 | 161 | ||
154 | /* hpriv->flags bits */ | 162 | /* hpriv->flags bits */ |
155 | AHCI_FLAG_MSI = (1 << 0), | 163 | AHCI_FLAG_MSI = (1 << 0), |
164 | |||
165 | /* ap->flags bits */ | ||
166 | AHCI_FLAG_RESET_NEEDS_CLO = (1 << 24), | ||
167 | AHCI_FLAG_NO_NCQ = (1 << 25), | ||
156 | }; | 168 | }; |
157 | 169 | ||
158 | struct ahci_cmd_hdr { | 170 | struct ahci_cmd_hdr { |
@@ -181,7 +193,6 @@ struct ahci_port_priv { | |||
181 | dma_addr_t cmd_slot_dma; | 193 | dma_addr_t cmd_slot_dma; |
182 | void *cmd_tbl; | 194 | void *cmd_tbl; |
183 | dma_addr_t cmd_tbl_dma; | 195 | dma_addr_t cmd_tbl_dma; |
184 | struct ahci_sg *cmd_tbl_sg; | ||
185 | void *rx_fis; | 196 | void *rx_fis; |
186 | dma_addr_t rx_fis_dma; | 197 | dma_addr_t rx_fis_dma; |
187 | }; | 198 | }; |
@@ -191,15 +202,16 @@ static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); | |||
191 | static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); | 202 | static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); |
192 | static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc); | 203 | static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc); |
193 | static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *regs); | 204 | static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *regs); |
194 | static int ahci_probe_reset(struct ata_port *ap, unsigned int *classes); | ||
195 | static void ahci_irq_clear(struct ata_port *ap); | 205 | static void ahci_irq_clear(struct ata_port *ap); |
196 | static void ahci_eng_timeout(struct ata_port *ap); | ||
197 | static int ahci_port_start(struct ata_port *ap); | 206 | static int ahci_port_start(struct ata_port *ap); |
198 | static void ahci_port_stop(struct ata_port *ap); | 207 | static void ahci_port_stop(struct ata_port *ap); |
199 | static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf); | 208 | static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf); |
200 | static void ahci_qc_prep(struct ata_queued_cmd *qc); | 209 | static void ahci_qc_prep(struct ata_queued_cmd *qc); |
201 | static u8 ahci_check_status(struct ata_port *ap); | 210 | static u8 ahci_check_status(struct ata_port *ap); |
202 | static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc); | 211 | static void ahci_freeze(struct ata_port *ap); |
212 | static void ahci_thaw(struct ata_port *ap); | ||
213 | static void ahci_error_handler(struct ata_port *ap); | ||
214 | static void ahci_post_internal_cmd(struct ata_queued_cmd *qc); | ||
203 | static void ahci_remove_one (struct pci_dev *pdev); | 215 | static void ahci_remove_one (struct pci_dev *pdev); |
204 | 216 | ||
205 | static struct scsi_host_template ahci_sht = { | 217 | static struct scsi_host_template ahci_sht = { |
@@ -207,7 +219,8 @@ static struct scsi_host_template ahci_sht = { | |||
207 | .name = DRV_NAME, | 219 | .name = DRV_NAME, |
208 | .ioctl = ata_scsi_ioctl, | 220 | .ioctl = ata_scsi_ioctl, |
209 | .queuecommand = ata_scsi_queuecmd, | 221 | .queuecommand = ata_scsi_queuecmd, |
210 | .can_queue = ATA_DEF_QUEUE, | 222 | .change_queue_depth = ata_scsi_change_queue_depth, |
223 | .can_queue = AHCI_MAX_CMDS - 1, | ||
211 | .this_id = ATA_SHT_THIS_ID, | 224 | .this_id = ATA_SHT_THIS_ID, |
212 | .sg_tablesize = AHCI_MAX_SG, | 225 | .sg_tablesize = AHCI_MAX_SG, |
213 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | 226 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, |
@@ -216,6 +229,7 @@ static struct scsi_host_template ahci_sht = { | |||
216 | .proc_name = DRV_NAME, | 229 | .proc_name = DRV_NAME, |
217 | .dma_boundary = AHCI_DMA_BOUNDARY, | 230 | .dma_boundary = AHCI_DMA_BOUNDARY, |
218 | .slave_configure = ata_scsi_slave_config, | 231 | .slave_configure = ata_scsi_slave_config, |
232 | .slave_destroy = ata_scsi_slave_destroy, | ||
219 | .bios_param = ata_std_bios_param, | 233 | .bios_param = ata_std_bios_param, |
220 | }; | 234 | }; |
221 | 235 | ||
@@ -228,19 +242,21 @@ static const struct ata_port_operations ahci_ops = { | |||
228 | 242 | ||
229 | .tf_read = ahci_tf_read, | 243 | .tf_read = ahci_tf_read, |
230 | 244 | ||
231 | .probe_reset = ahci_probe_reset, | ||
232 | |||
233 | .qc_prep = ahci_qc_prep, | 245 | .qc_prep = ahci_qc_prep, |
234 | .qc_issue = ahci_qc_issue, | 246 | .qc_issue = ahci_qc_issue, |
235 | 247 | ||
236 | .eng_timeout = ahci_eng_timeout, | ||
237 | |||
238 | .irq_handler = ahci_interrupt, | 248 | .irq_handler = ahci_interrupt, |
239 | .irq_clear = ahci_irq_clear, | 249 | .irq_clear = ahci_irq_clear, |
240 | 250 | ||
241 | .scr_read = ahci_scr_read, | 251 | .scr_read = ahci_scr_read, |
242 | .scr_write = ahci_scr_write, | 252 | .scr_write = ahci_scr_write, |
243 | 253 | ||
254 | .freeze = ahci_freeze, | ||
255 | .thaw = ahci_thaw, | ||
256 | |||
257 | .error_handler = ahci_error_handler, | ||
258 | .post_internal_cmd = ahci_post_internal_cmd, | ||
259 | |||
244 | .port_start = ahci_port_start, | 260 | .port_start = ahci_port_start, |
245 | .port_stop = ahci_port_stop, | 261 | .port_stop = ahci_port_stop, |
246 | }; | 262 | }; |
@@ -250,7 +266,19 @@ static const struct ata_port_info ahci_port_info[] = { | |||
250 | { | 266 | { |
251 | .sht = &ahci_sht, | 267 | .sht = &ahci_sht, |
252 | .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | 268 | .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | |
253 | ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA, | 269 | ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA | |
270 | ATA_FLAG_SKIP_D2H_BSY, | ||
271 | .pio_mask = 0x1f, /* pio0-4 */ | ||
272 | .udma_mask = 0x7f, /* udma0-6 ; FIXME */ | ||
273 | .port_ops = &ahci_ops, | ||
274 | }, | ||
275 | /* board_ahci_vt8251 */ | ||
276 | { | ||
277 | .sht = &ahci_sht, | ||
278 | .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | ||
279 | ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA | | ||
280 | ATA_FLAG_SKIP_D2H_BSY | | ||
281 | AHCI_FLAG_RESET_NEEDS_CLO | AHCI_FLAG_NO_NCQ, | ||
254 | .pio_mask = 0x1f, /* pio0-4 */ | 282 | .pio_mask = 0x1f, /* pio0-4 */ |
255 | .udma_mask = 0x7f, /* udma0-6 ; FIXME */ | 283 | .udma_mask = 0x7f, /* udma0-6 ; FIXME */ |
256 | .port_ops = &ahci_ops, | 284 | .port_ops = &ahci_ops, |
@@ -258,6 +286,7 @@ static const struct ata_port_info ahci_port_info[] = { | |||
258 | }; | 286 | }; |
259 | 287 | ||
260 | static const struct pci_device_id ahci_pci_tbl[] = { | 288 | static const struct pci_device_id ahci_pci_tbl[] = { |
289 | /* Intel */ | ||
261 | { PCI_VENDOR_ID_INTEL, 0x2652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, | 290 | { PCI_VENDOR_ID_INTEL, 0x2652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, |
262 | board_ahci }, /* ICH6 */ | 291 | board_ahci }, /* ICH6 */ |
263 | { PCI_VENDOR_ID_INTEL, 0x2653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, | 292 | { PCI_VENDOR_ID_INTEL, 0x2653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, |
@@ -288,14 +317,39 @@ static const struct pci_device_id ahci_pci_tbl[] = { | |||
288 | board_ahci }, /* ICH8M */ | 317 | board_ahci }, /* ICH8M */ |
289 | { PCI_VENDOR_ID_INTEL, 0x282a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, | 318 | { PCI_VENDOR_ID_INTEL, 0x282a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, |
290 | board_ahci }, /* ICH8M */ | 319 | board_ahci }, /* ICH8M */ |
320 | |||
321 | /* JMicron */ | ||
291 | { 0x197b, 0x2360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, | 322 | { 0x197b, 0x2360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, |
292 | board_ahci }, /* JMicron JMB360 */ | 323 | board_ahci }, /* JMicron JMB360 */ |
324 | { 0x197b, 0x2361, PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
325 | board_ahci }, /* JMicron JMB361 */ | ||
293 | { 0x197b, 0x2363, PCI_ANY_ID, PCI_ANY_ID, 0, 0, | 326 | { 0x197b, 0x2363, PCI_ANY_ID, PCI_ANY_ID, 0, 0, |
294 | board_ahci }, /* JMicron JMB363 */ | 327 | board_ahci }, /* JMicron JMB363 */ |
328 | { 0x197b, 0x2365, PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
329 | board_ahci }, /* JMicron JMB365 */ | ||
330 | { 0x197b, 0x2366, PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
331 | board_ahci }, /* JMicron JMB366 */ | ||
332 | |||
333 | /* ATI */ | ||
295 | { PCI_VENDOR_ID_ATI, 0x4380, PCI_ANY_ID, PCI_ANY_ID, 0, 0, | 334 | { PCI_VENDOR_ID_ATI, 0x4380, PCI_ANY_ID, PCI_ANY_ID, 0, 0, |
296 | board_ahci }, /* ATI SB600 non-raid */ | 335 | board_ahci }, /* ATI SB600 non-raid */ |
297 | { PCI_VENDOR_ID_ATI, 0x4381, PCI_ANY_ID, PCI_ANY_ID, 0, 0, | 336 | { PCI_VENDOR_ID_ATI, 0x4381, PCI_ANY_ID, PCI_ANY_ID, 0, 0, |
298 | board_ahci }, /* ATI SB600 raid */ | 337 | board_ahci }, /* ATI SB600 raid */ |
338 | |||
339 | /* VIA */ | ||
340 | { PCI_VENDOR_ID_VIA, 0x3349, PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
341 | board_ahci_vt8251 }, /* VIA VT8251 */ | ||
342 | |||
343 | /* NVIDIA */ | ||
344 | { PCI_VENDOR_ID_NVIDIA, 0x044c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
345 | board_ahci }, /* MCP65 */ | ||
346 | { PCI_VENDOR_ID_NVIDIA, 0x044d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
347 | board_ahci }, /* MCP65 */ | ||
348 | { PCI_VENDOR_ID_NVIDIA, 0x044e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
349 | board_ahci }, /* MCP65 */ | ||
350 | { PCI_VENDOR_ID_NVIDIA, 0x044f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
351 | board_ahci }, /* MCP65 */ | ||
352 | |||
299 | { } /* terminate list */ | 353 | { } /* terminate list */ |
300 | }; | 354 | }; |
301 | 355 | ||
@@ -374,8 +428,6 @@ static int ahci_port_start(struct ata_port *ap) | |||
374 | pp->cmd_tbl = mem; | 428 | pp->cmd_tbl = mem; |
375 | pp->cmd_tbl_dma = mem_dma; | 429 | pp->cmd_tbl_dma = mem_dma; |
376 | 430 | ||
377 | pp->cmd_tbl_sg = mem + AHCI_CMD_TBL_HDR; | ||
378 | |||
379 | ap->private_data = pp; | 431 | ap->private_data = pp; |
380 | 432 | ||
381 | if (hpriv->cap & HOST_CAP_64) | 433 | if (hpriv->cap & HOST_CAP_64) |
@@ -508,46 +560,71 @@ static unsigned int ahci_dev_classify(struct ata_port *ap) | |||
508 | return ata_dev_classify(&tf); | 560 | return ata_dev_classify(&tf); |
509 | } | 561 | } |
510 | 562 | ||
511 | static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, u32 opts) | 563 | static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag, |
564 | u32 opts) | ||
512 | { | 565 | { |
513 | pp->cmd_slot[0].opts = cpu_to_le32(opts); | 566 | dma_addr_t cmd_tbl_dma; |
514 | pp->cmd_slot[0].status = 0; | 567 | |
515 | pp->cmd_slot[0].tbl_addr = cpu_to_le32(pp->cmd_tbl_dma & 0xffffffff); | 568 | cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ; |
516 | pp->cmd_slot[0].tbl_addr_hi = cpu_to_le32((pp->cmd_tbl_dma >> 16) >> 16); | 569 | |
570 | pp->cmd_slot[tag].opts = cpu_to_le32(opts); | ||
571 | pp->cmd_slot[tag].status = 0; | ||
572 | pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff); | ||
573 | pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16); | ||
517 | } | 574 | } |
518 | 575 | ||
519 | static int ahci_poll_register(void __iomem *reg, u32 mask, u32 val, | 576 | static int ahci_clo(struct ata_port *ap) |
520 | unsigned long interval_msec, | ||
521 | unsigned long timeout_msec) | ||
522 | { | 577 | { |
523 | unsigned long timeout; | 578 | void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr; |
579 | struct ahci_host_priv *hpriv = ap->host_set->private_data; | ||
524 | u32 tmp; | 580 | u32 tmp; |
525 | 581 | ||
526 | timeout = jiffies + (timeout_msec * HZ) / 1000; | 582 | if (!(hpriv->cap & HOST_CAP_CLO)) |
527 | do { | 583 | return -EOPNOTSUPP; |
528 | tmp = readl(reg); | ||
529 | if ((tmp & mask) == val) | ||
530 | return 0; | ||
531 | msleep(interval_msec); | ||
532 | } while (time_before(jiffies, timeout)); | ||
533 | 584 | ||
534 | return -1; | 585 | tmp = readl(port_mmio + PORT_CMD); |
586 | tmp |= PORT_CMD_CLO; | ||
587 | writel(tmp, port_mmio + PORT_CMD); | ||
588 | |||
589 | tmp = ata_wait_register(port_mmio + PORT_CMD, | ||
590 | PORT_CMD_CLO, PORT_CMD_CLO, 1, 500); | ||
591 | if (tmp & PORT_CMD_CLO) | ||
592 | return -EIO; | ||
593 | |||
594 | return 0; | ||
535 | } | 595 | } |
536 | 596 | ||
537 | static int ahci_softreset(struct ata_port *ap, int verbose, unsigned int *class) | 597 | static int ahci_prereset(struct ata_port *ap) |
598 | { | ||
599 | if ((ap->flags & AHCI_FLAG_RESET_NEEDS_CLO) && | ||
600 | (ata_busy_wait(ap, ATA_BUSY, 1000) & ATA_BUSY)) { | ||
601 | /* ATA_BUSY hasn't cleared, so send a CLO */ | ||
602 | ahci_clo(ap); | ||
603 | } | ||
604 | |||
605 | return ata_std_prereset(ap); | ||
606 | } | ||
607 | |||
608 | static int ahci_softreset(struct ata_port *ap, unsigned int *class) | ||
538 | { | 609 | { |
539 | struct ahci_host_priv *hpriv = ap->host_set->private_data; | ||
540 | struct ahci_port_priv *pp = ap->private_data; | 610 | struct ahci_port_priv *pp = ap->private_data; |
541 | void __iomem *mmio = ap->host_set->mmio_base; | 611 | void __iomem *mmio = ap->host_set->mmio_base; |
542 | void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); | 612 | void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); |
543 | const u32 cmd_fis_len = 5; /* five dwords */ | 613 | const u32 cmd_fis_len = 5; /* five dwords */ |
544 | const char *reason = NULL; | 614 | const char *reason = NULL; |
545 | struct ata_taskfile tf; | 615 | struct ata_taskfile tf; |
616 | u32 tmp; | ||
546 | u8 *fis; | 617 | u8 *fis; |
547 | int rc; | 618 | int rc; |
548 | 619 | ||
549 | DPRINTK("ENTER\n"); | 620 | DPRINTK("ENTER\n"); |
550 | 621 | ||
622 | if (ata_port_offline(ap)) { | ||
623 | DPRINTK("PHY reports no device\n"); | ||
624 | *class = ATA_DEV_NONE; | ||
625 | return 0; | ||
626 | } | ||
627 | |||
551 | /* prepare for SRST (AHCI-1.1 10.4.1) */ | 628 | /* prepare for SRST (AHCI-1.1 10.4.1) */ |
552 | rc = ahci_stop_engine(ap); | 629 | rc = ahci_stop_engine(ap); |
553 | if (rc) { | 630 | if (rc) { |
@@ -558,23 +635,13 @@ static int ahci_softreset(struct ata_port *ap, int verbose, unsigned int *class) | |||
558 | /* check BUSY/DRQ, perform Command List Override if necessary */ | 635 | /* check BUSY/DRQ, perform Command List Override if necessary */ |
559 | ahci_tf_read(ap, &tf); | 636 | ahci_tf_read(ap, &tf); |
560 | if (tf.command & (ATA_BUSY | ATA_DRQ)) { | 637 | if (tf.command & (ATA_BUSY | ATA_DRQ)) { |
561 | u32 tmp; | 638 | rc = ahci_clo(ap); |
562 | 639 | ||
563 | if (!(hpriv->cap & HOST_CAP_CLO)) { | 640 | if (rc == -EOPNOTSUPP) { |
564 | rc = -EIO; | 641 | reason = "port busy but CLO unavailable"; |
565 | reason = "port busy but no CLO"; | ||
566 | goto fail_restart; | 642 | goto fail_restart; |
567 | } | 643 | } else if (rc) { |
568 | 644 | reason = "port busy but CLO failed"; | |
569 | tmp = readl(port_mmio + PORT_CMD); | ||
570 | tmp |= PORT_CMD_CLO; | ||
571 | writel(tmp, port_mmio + PORT_CMD); | ||
572 | readl(port_mmio + PORT_CMD); /* flush */ | ||
573 | |||
574 | if (ahci_poll_register(port_mmio + PORT_CMD, PORT_CMD_CLO, 0x0, | ||
575 | 1, 500)) { | ||
576 | rc = -EIO; | ||
577 | reason = "CLO failed"; | ||
578 | goto fail_restart; | 645 | goto fail_restart; |
579 | } | 646 | } |
580 | } | 647 | } |
@@ -582,20 +649,21 @@ static int ahci_softreset(struct ata_port *ap, int verbose, unsigned int *class) | |||
582 | /* restart engine */ | 649 | /* restart engine */ |
583 | ahci_start_engine(ap); | 650 | ahci_start_engine(ap); |
584 | 651 | ||
585 | ata_tf_init(ap, &tf, 0); | 652 | ata_tf_init(ap->device, &tf); |
586 | fis = pp->cmd_tbl; | 653 | fis = pp->cmd_tbl; |
587 | 654 | ||
588 | /* issue the first D2H Register FIS */ | 655 | /* issue the first D2H Register FIS */ |
589 | ahci_fill_cmd_slot(pp, cmd_fis_len | AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY); | 656 | ahci_fill_cmd_slot(pp, 0, |
657 | cmd_fis_len | AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY); | ||
590 | 658 | ||
591 | tf.ctl |= ATA_SRST; | 659 | tf.ctl |= ATA_SRST; |
592 | ata_tf_to_fis(&tf, fis, 0); | 660 | ata_tf_to_fis(&tf, fis, 0); |
593 | fis[1] &= ~(1 << 7); /* turn off Command FIS bit */ | 661 | fis[1] &= ~(1 << 7); /* turn off Command FIS bit */ |
594 | 662 | ||
595 | writel(1, port_mmio + PORT_CMD_ISSUE); | 663 | writel(1, port_mmio + PORT_CMD_ISSUE); |
596 | readl(port_mmio + PORT_CMD_ISSUE); /* flush */ | ||
597 | 664 | ||
598 | if (ahci_poll_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x0, 1, 500)) { | 665 | tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1, 1, 500); |
666 | if (tmp & 0x1) { | ||
599 | rc = -EIO; | 667 | rc = -EIO; |
600 | reason = "1st FIS failed"; | 668 | reason = "1st FIS failed"; |
601 | goto fail; | 669 | goto fail; |
@@ -605,7 +673,7 @@ static int ahci_softreset(struct ata_port *ap, int verbose, unsigned int *class) | |||
605 | msleep(1); | 673 | msleep(1); |
606 | 674 | ||
607 | /* issue the second D2H Register FIS */ | 675 | /* issue the second D2H Register FIS */ |
608 | ahci_fill_cmd_slot(pp, cmd_fis_len); | 676 | ahci_fill_cmd_slot(pp, 0, cmd_fis_len); |
609 | 677 | ||
610 | tf.ctl &= ~ATA_SRST; | 678 | tf.ctl &= ~ATA_SRST; |
611 | ata_tf_to_fis(&tf, fis, 0); | 679 | ata_tf_to_fis(&tf, fis, 0); |
@@ -625,7 +693,7 @@ static int ahci_softreset(struct ata_port *ap, int verbose, unsigned int *class) | |||
625 | msleep(150); | 693 | msleep(150); |
626 | 694 | ||
627 | *class = ATA_DEV_NONE; | 695 | *class = ATA_DEV_NONE; |
628 | if (sata_dev_present(ap)) { | 696 | if (ata_port_online(ap)) { |
629 | if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) { | 697 | if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) { |
630 | rc = -EIO; | 698 | rc = -EIO; |
631 | reason = "device not ready"; | 699 | reason = "device not ready"; |
@@ -640,25 +708,31 @@ static int ahci_softreset(struct ata_port *ap, int verbose, unsigned int *class) | |||
640 | fail_restart: | 708 | fail_restart: |
641 | ahci_start_engine(ap); | 709 | ahci_start_engine(ap); |
642 | fail: | 710 | fail: |
643 | if (verbose) | 711 | ata_port_printk(ap, KERN_ERR, "softreset failed (%s)\n", reason); |
644 | printk(KERN_ERR "ata%u: softreset failed (%s)\n", | ||
645 | ap->id, reason); | ||
646 | else | ||
647 | DPRINTK("EXIT, rc=%d reason=\"%s\"\n", rc, reason); | ||
648 | return rc; | 712 | return rc; |
649 | } | 713 | } |
650 | 714 | ||
651 | static int ahci_hardreset(struct ata_port *ap, int verbose, unsigned int *class) | 715 | static int ahci_hardreset(struct ata_port *ap, unsigned int *class) |
652 | { | 716 | { |
717 | struct ahci_port_priv *pp = ap->private_data; | ||
718 | u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; | ||
719 | struct ata_taskfile tf; | ||
653 | int rc; | 720 | int rc; |
654 | 721 | ||
655 | DPRINTK("ENTER\n"); | 722 | DPRINTK("ENTER\n"); |
656 | 723 | ||
657 | ahci_stop_engine(ap); | 724 | ahci_stop_engine(ap); |
658 | rc = sata_std_hardreset(ap, verbose, class); | 725 | |
726 | /* clear D2H reception area to properly wait for D2H FIS */ | ||
727 | ata_tf_init(ap->device, &tf); | ||
728 | tf.command = 0xff; | ||
729 | ata_tf_to_fis(&tf, d2h_fis, 0); | ||
730 | |||
731 | rc = sata_std_hardreset(ap, class); | ||
732 | |||
659 | ahci_start_engine(ap); | 733 | ahci_start_engine(ap); |
660 | 734 | ||
661 | if (rc == 0) | 735 | if (rc == 0 && ata_port_online(ap)) |
662 | *class = ahci_dev_classify(ap); | 736 | *class = ahci_dev_classify(ap); |
663 | if (*class == ATA_DEV_UNKNOWN) | 737 | if (*class == ATA_DEV_UNKNOWN) |
664 | *class = ATA_DEV_NONE; | 738 | *class = ATA_DEV_NONE; |
@@ -686,13 +760,6 @@ static void ahci_postreset(struct ata_port *ap, unsigned int *class) | |||
686 | } | 760 | } |
687 | } | 761 | } |
688 | 762 | ||
689 | static int ahci_probe_reset(struct ata_port *ap, unsigned int *classes) | ||
690 | { | ||
691 | return ata_drive_probe_reset(ap, ata_std_probeinit, | ||
692 | ahci_softreset, ahci_hardreset, | ||
693 | ahci_postreset, classes); | ||
694 | } | ||
695 | |||
696 | static u8 ahci_check_status(struct ata_port *ap) | 763 | static u8 ahci_check_status(struct ata_port *ap) |
697 | { | 764 | { |
698 | void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr; | 765 | void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr; |
@@ -708,9 +775,8 @@ static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf) | |||
708 | ata_tf_from_fis(d2h_fis, tf); | 775 | ata_tf_from_fis(d2h_fis, tf); |
709 | } | 776 | } |
710 | 777 | ||
711 | static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc) | 778 | static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl) |
712 | { | 779 | { |
713 | struct ahci_port_priv *pp = qc->ap->private_data; | ||
714 | struct scatterlist *sg; | 780 | struct scatterlist *sg; |
715 | struct ahci_sg *ahci_sg; | 781 | struct ahci_sg *ahci_sg; |
716 | unsigned int n_sg = 0; | 782 | unsigned int n_sg = 0; |
@@ -720,7 +786,7 @@ static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc) | |||
720 | /* | 786 | /* |
721 | * Next, the S/G list. | 787 | * Next, the S/G list. |
722 | */ | 788 | */ |
723 | ahci_sg = pp->cmd_tbl_sg; | 789 | ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ; |
724 | ata_for_each_sg(sg, qc) { | 790 | ata_for_each_sg(sg, qc) { |
725 | dma_addr_t addr = sg_dma_address(sg); | 791 | dma_addr_t addr = sg_dma_address(sg); |
726 | u32 sg_len = sg_dma_len(sg); | 792 | u32 sg_len = sg_dma_len(sg); |
@@ -741,6 +807,7 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc) | |||
741 | struct ata_port *ap = qc->ap; | 807 | struct ata_port *ap = qc->ap; |
742 | struct ahci_port_priv *pp = ap->private_data; | 808 | struct ahci_port_priv *pp = ap->private_data; |
743 | int is_atapi = is_atapi_taskfile(&qc->tf); | 809 | int is_atapi = is_atapi_taskfile(&qc->tf); |
810 | void *cmd_tbl; | ||
744 | u32 opts; | 811 | u32 opts; |
745 | const u32 cmd_fis_len = 5; /* five dwords */ | 812 | const u32 cmd_fis_len = 5; /* five dwords */ |
746 | unsigned int n_elem; | 813 | unsigned int n_elem; |
@@ -749,16 +816,17 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc) | |||
749 | * Fill in command table information. First, the header, | 816 | * Fill in command table information. First, the header, |
750 | * a SATA Register - Host to Device command FIS. | 817 | * a SATA Register - Host to Device command FIS. |
751 | */ | 818 | */ |
752 | ata_tf_to_fis(&qc->tf, pp->cmd_tbl, 0); | 819 | cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ; |
820 | |||
821 | ata_tf_to_fis(&qc->tf, cmd_tbl, 0); | ||
753 | if (is_atapi) { | 822 | if (is_atapi) { |
754 | memset(pp->cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32); | 823 | memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32); |
755 | memcpy(pp->cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, | 824 | memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len); |
756 | qc->dev->cdb_len); | ||
757 | } | 825 | } |
758 | 826 | ||
759 | n_elem = 0; | 827 | n_elem = 0; |
760 | if (qc->flags & ATA_QCFLAG_DMAMAP) | 828 | if (qc->flags & ATA_QCFLAG_DMAMAP) |
761 | n_elem = ahci_fill_sg(qc); | 829 | n_elem = ahci_fill_sg(qc, cmd_tbl); |
762 | 830 | ||
763 | /* | 831 | /* |
764 | * Fill in command slot information. | 832 | * Fill in command slot information. |
@@ -769,112 +837,122 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc) | |||
769 | if (is_atapi) | 837 | if (is_atapi) |
770 | opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH; | 838 | opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH; |
771 | 839 | ||
772 | ahci_fill_cmd_slot(pp, opts); | 840 | ahci_fill_cmd_slot(pp, qc->tag, opts); |
773 | } | 841 | } |
774 | 842 | ||
775 | static void ahci_restart_port(struct ata_port *ap, u32 irq_stat) | 843 | static void ahci_error_intr(struct ata_port *ap, u32 irq_stat) |
776 | { | 844 | { |
777 | void __iomem *mmio = ap->host_set->mmio_base; | 845 | struct ahci_port_priv *pp = ap->private_data; |
778 | void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); | 846 | struct ata_eh_info *ehi = &ap->eh_info; |
779 | u32 tmp; | 847 | unsigned int err_mask = 0, action = 0; |
848 | struct ata_queued_cmd *qc; | ||
849 | u32 serror; | ||
780 | 850 | ||
781 | if ((ap->device[0].class != ATA_DEV_ATAPI) || | 851 | ata_ehi_clear_desc(ehi); |
782 | ((irq_stat & PORT_IRQ_TF_ERR) == 0)) | ||
783 | printk(KERN_WARNING "ata%u: port reset, " | ||
784 | "p_is %x is %x pis %x cmd %x tf %x ss %x se %x\n", | ||
785 | ap->id, | ||
786 | irq_stat, | ||
787 | readl(mmio + HOST_IRQ_STAT), | ||
788 | readl(port_mmio + PORT_IRQ_STAT), | ||
789 | readl(port_mmio + PORT_CMD), | ||
790 | readl(port_mmio + PORT_TFDATA), | ||
791 | readl(port_mmio + PORT_SCR_STAT), | ||
792 | readl(port_mmio + PORT_SCR_ERR)); | ||
793 | |||
794 | /* stop DMA */ | ||
795 | ahci_stop_engine(ap); | ||
796 | 852 | ||
797 | /* clear SATA phy error, if any */ | 853 | /* AHCI needs SError cleared; otherwise, it might lock up */ |
798 | tmp = readl(port_mmio + PORT_SCR_ERR); | 854 | serror = ahci_scr_read(ap, SCR_ERROR); |
799 | writel(tmp, port_mmio + PORT_SCR_ERR); | 855 | ahci_scr_write(ap, SCR_ERROR, serror); |
800 | 856 | ||
801 | /* if DRQ/BSY is set, device needs to be reset. | 857 | /* analyze @irq_stat */ |
802 | * if so, issue COMRESET | 858 | ata_ehi_push_desc(ehi, "irq_stat 0x%08x", irq_stat); |
803 | */ | 859 | |
804 | tmp = readl(port_mmio + PORT_TFDATA); | 860 | if (irq_stat & PORT_IRQ_TF_ERR) |
805 | if (tmp & (ATA_BUSY | ATA_DRQ)) { | 861 | err_mask |= AC_ERR_DEV; |
806 | writel(0x301, port_mmio + PORT_SCR_CTL); | 862 | |
807 | readl(port_mmio + PORT_SCR_CTL); /* flush */ | 863 | if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) { |
808 | udelay(10); | 864 | err_mask |= AC_ERR_HOST_BUS; |
809 | writel(0x300, port_mmio + PORT_SCR_CTL); | 865 | action |= ATA_EH_SOFTRESET; |
810 | readl(port_mmio + PORT_SCR_CTL); /* flush */ | ||
811 | } | 866 | } |
812 | 867 | ||
813 | /* re-start DMA */ | 868 | if (irq_stat & PORT_IRQ_IF_ERR) { |
814 | ahci_start_engine(ap); | 869 | err_mask |= AC_ERR_ATA_BUS; |
815 | } | 870 | action |= ATA_EH_SOFTRESET; |
871 | ata_ehi_push_desc(ehi, ", interface fatal error"); | ||
872 | } | ||
816 | 873 | ||
817 | static void ahci_eng_timeout(struct ata_port *ap) | 874 | if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) { |
818 | { | 875 | ata_ehi_hotplugged(ehi); |
819 | struct ata_host_set *host_set = ap->host_set; | 876 | ata_ehi_push_desc(ehi, ", %s", irq_stat & PORT_IRQ_CONNECT ? |
820 | void __iomem *mmio = host_set->mmio_base; | 877 | "connection status changed" : "PHY RDY changed"); |
821 | void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); | 878 | } |
822 | struct ata_queued_cmd *qc; | 879 | |
823 | unsigned long flags; | 880 | if (irq_stat & PORT_IRQ_UNK_FIS) { |
881 | u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK); | ||
824 | 882 | ||
825 | printk(KERN_WARNING "ata%u: handling error/timeout\n", ap->id); | 883 | err_mask |= AC_ERR_HSM; |
884 | action |= ATA_EH_SOFTRESET; | ||
885 | ata_ehi_push_desc(ehi, ", unknown FIS %08x %08x %08x %08x", | ||
886 | unk[0], unk[1], unk[2], unk[3]); | ||
887 | } | ||
826 | 888 | ||
827 | spin_lock_irqsave(&host_set->lock, flags); | 889 | /* okay, let's hand over to EH */ |
890 | ehi->serror |= serror; | ||
891 | ehi->action |= action; | ||
828 | 892 | ||
829 | ahci_restart_port(ap, readl(port_mmio + PORT_IRQ_STAT)); | ||
830 | qc = ata_qc_from_tag(ap, ap->active_tag); | 893 | qc = ata_qc_from_tag(ap, ap->active_tag); |
831 | qc->err_mask |= AC_ERR_TIMEOUT; | 894 | if (qc) |
832 | 895 | qc->err_mask |= err_mask; | |
833 | spin_unlock_irqrestore(&host_set->lock, flags); | 896 | else |
897 | ehi->err_mask |= err_mask; | ||
834 | 898 | ||
835 | ata_eh_qc_complete(qc); | 899 | if (irq_stat & PORT_IRQ_FREEZE) |
900 | ata_port_freeze(ap); | ||
901 | else | ||
902 | ata_port_abort(ap); | ||
836 | } | 903 | } |
837 | 904 | ||
838 | static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc) | 905 | static void ahci_host_intr(struct ata_port *ap) |
839 | { | 906 | { |
840 | void __iomem *mmio = ap->host_set->mmio_base; | 907 | void __iomem *mmio = ap->host_set->mmio_base; |
841 | void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); | 908 | void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); |
842 | u32 status, serr, ci; | 909 | struct ata_eh_info *ehi = &ap->eh_info; |
843 | 910 | u32 status, qc_active; | |
844 | serr = readl(port_mmio + PORT_SCR_ERR); | 911 | int rc; |
845 | writel(serr, port_mmio + PORT_SCR_ERR); | ||
846 | 912 | ||
847 | status = readl(port_mmio + PORT_IRQ_STAT); | 913 | status = readl(port_mmio + PORT_IRQ_STAT); |
848 | writel(status, port_mmio + PORT_IRQ_STAT); | 914 | writel(status, port_mmio + PORT_IRQ_STAT); |
849 | 915 | ||
850 | ci = readl(port_mmio + PORT_CMD_ISSUE); | 916 | if (unlikely(status & PORT_IRQ_ERROR)) { |
851 | if (likely((ci & 0x1) == 0)) { | 917 | ahci_error_intr(ap, status); |
852 | if (qc) { | 918 | return; |
853 | WARN_ON(qc->err_mask); | ||
854 | ata_qc_complete(qc); | ||
855 | qc = NULL; | ||
856 | } | ||
857 | } | 919 | } |
858 | 920 | ||
859 | if (status & PORT_IRQ_FATAL) { | 921 | if (ap->sactive) |
860 | unsigned int err_mask; | 922 | qc_active = readl(port_mmio + PORT_SCR_ACT); |
861 | if (status & PORT_IRQ_TF_ERR) | 923 | else |
862 | err_mask = AC_ERR_DEV; | 924 | qc_active = readl(port_mmio + PORT_CMD_ISSUE); |
863 | else if (status & PORT_IRQ_IF_ERR) | 925 | |
864 | err_mask = AC_ERR_ATA_BUS; | 926 | rc = ata_qc_complete_multiple(ap, qc_active, NULL); |
865 | else | 927 | if (rc > 0) |
866 | err_mask = AC_ERR_HOST_BUS; | 928 | return; |
867 | 929 | if (rc < 0) { | |
868 | /* command processing has stopped due to error; restart */ | 930 | ehi->err_mask |= AC_ERR_HSM; |
869 | ahci_restart_port(ap, status); | 931 | ehi->action |= ATA_EH_SOFTRESET; |
870 | 932 | ata_port_freeze(ap); | |
871 | if (qc) { | 933 | return; |
872 | qc->err_mask |= err_mask; | 934 | } |
873 | ata_qc_complete(qc); | 935 | |
874 | } | 936 | /* hmmm... a spurious interupt */ |
937 | |||
938 | /* some devices send D2H reg with I bit set during NCQ command phase */ | ||
939 | if (ap->sactive && status & PORT_IRQ_D2H_REG_FIS) | ||
940 | return; | ||
941 | |||
942 | /* ignore interim PIO setup fis interrupts */ | ||
943 | if (ata_tag_valid(ap->active_tag)) { | ||
944 | struct ata_queued_cmd *qc = | ||
945 | ata_qc_from_tag(ap, ap->active_tag); | ||
946 | |||
947 | if (qc && qc->tf.protocol == ATA_PROT_PIO && | ||
948 | (status & PORT_IRQ_PIOS_FIS)) | ||
949 | return; | ||
875 | } | 950 | } |
876 | 951 | ||
877 | return 1; | 952 | if (ata_ratelimit()) |
953 | ata_port_printk(ap, KERN_INFO, "spurious interrupt " | ||
954 | "(irq_stat 0x%x active_tag %d sactive 0x%x)\n", | ||
955 | status, ap->active_tag, ap->sactive); | ||
878 | } | 956 | } |
879 | 957 | ||
880 | static void ahci_irq_clear(struct ata_port *ap) | 958 | static void ahci_irq_clear(struct ata_port *ap) |
@@ -882,7 +960,7 @@ static void ahci_irq_clear(struct ata_port *ap) | |||
882 | /* TODO */ | 960 | /* TODO */ |
883 | } | 961 | } |
884 | 962 | ||
885 | static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *regs) | 963 | static irqreturn_t ahci_interrupt(int irq, void *dev_instance, struct pt_regs *regs) |
886 | { | 964 | { |
887 | struct ata_host_set *host_set = dev_instance; | 965 | struct ata_host_set *host_set = dev_instance; |
888 | struct ahci_host_priv *hpriv; | 966 | struct ahci_host_priv *hpriv; |
@@ -911,14 +989,7 @@ static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs * | |||
911 | 989 | ||
912 | ap = host_set->ports[i]; | 990 | ap = host_set->ports[i]; |
913 | if (ap) { | 991 | if (ap) { |
914 | struct ata_queued_cmd *qc; | 992 | ahci_host_intr(ap); |
915 | qc = ata_qc_from_tag(ap, ap->active_tag); | ||
916 | if (!ahci_host_intr(ap, qc)) | ||
917 | if (ata_ratelimit()) | ||
918 | dev_printk(KERN_WARNING, host_set->dev, | ||
919 | "unhandled interrupt on port %u\n", | ||
920 | i); | ||
921 | |||
922 | VPRINTK("port %u\n", i); | 993 | VPRINTK("port %u\n", i); |
923 | } else { | 994 | } else { |
924 | VPRINTK("port %u (no irq)\n", i); | 995 | VPRINTK("port %u (no irq)\n", i); |
@@ -935,7 +1006,7 @@ static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs * | |||
935 | handled = 1; | 1006 | handled = 1; |
936 | } | 1007 | } |
937 | 1008 | ||
938 | spin_unlock(&host_set->lock); | 1009 | spin_unlock(&host_set->lock); |
939 | 1010 | ||
940 | VPRINTK("EXIT\n"); | 1011 | VPRINTK("EXIT\n"); |
941 | 1012 | ||
@@ -947,12 +1018,65 @@ static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc) | |||
947 | struct ata_port *ap = qc->ap; | 1018 | struct ata_port *ap = qc->ap; |
948 | void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr; | 1019 | void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr; |
949 | 1020 | ||
950 | writel(1, port_mmio + PORT_CMD_ISSUE); | 1021 | if (qc->tf.protocol == ATA_PROT_NCQ) |
1022 | writel(1 << qc->tag, port_mmio + PORT_SCR_ACT); | ||
1023 | writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE); | ||
951 | readl(port_mmio + PORT_CMD_ISSUE); /* flush */ | 1024 | readl(port_mmio + PORT_CMD_ISSUE); /* flush */ |
952 | 1025 | ||
953 | return 0; | 1026 | return 0; |
954 | } | 1027 | } |
955 | 1028 | ||
1029 | static void ahci_freeze(struct ata_port *ap) | ||
1030 | { | ||
1031 | void __iomem *mmio = ap->host_set->mmio_base; | ||
1032 | void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); | ||
1033 | |||
1034 | /* turn IRQ off */ | ||
1035 | writel(0, port_mmio + PORT_IRQ_MASK); | ||
1036 | } | ||
1037 | |||
1038 | static void ahci_thaw(struct ata_port *ap) | ||
1039 | { | ||
1040 | void __iomem *mmio = ap->host_set->mmio_base; | ||
1041 | void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); | ||
1042 | u32 tmp; | ||
1043 | |||
1044 | /* clear IRQ */ | ||
1045 | tmp = readl(port_mmio + PORT_IRQ_STAT); | ||
1046 | writel(tmp, port_mmio + PORT_IRQ_STAT); | ||
1047 | writel(1 << ap->id, mmio + HOST_IRQ_STAT); | ||
1048 | |||
1049 | /* turn IRQ back on */ | ||
1050 | writel(DEF_PORT_IRQ, port_mmio + PORT_IRQ_MASK); | ||
1051 | } | ||
1052 | |||
1053 | static void ahci_error_handler(struct ata_port *ap) | ||
1054 | { | ||
1055 | if (!(ap->flags & ATA_FLAG_FROZEN)) { | ||
1056 | /* restart engine */ | ||
1057 | ahci_stop_engine(ap); | ||
1058 | ahci_start_engine(ap); | ||
1059 | } | ||
1060 | |||
1061 | /* perform recovery */ | ||
1062 | ata_do_eh(ap, ahci_prereset, ahci_softreset, ahci_hardreset, | ||
1063 | ahci_postreset); | ||
1064 | } | ||
1065 | |||
1066 | static void ahci_post_internal_cmd(struct ata_queued_cmd *qc) | ||
1067 | { | ||
1068 | struct ata_port *ap = qc->ap; | ||
1069 | |||
1070 | if (qc->flags & ATA_QCFLAG_FAILED) | ||
1071 | qc->err_mask |= AC_ERR_OTHER; | ||
1072 | |||
1073 | if (qc->err_mask) { | ||
1074 | /* make DMA engine forget about the failed command */ | ||
1075 | ahci_stop_engine(ap); | ||
1076 | ahci_start_engine(ap); | ||
1077 | } | ||
1078 | } | ||
1079 | |||
956 | static void ahci_setup_port(struct ata_ioports *port, unsigned long base, | 1080 | static void ahci_setup_port(struct ata_ioports *port, unsigned long base, |
957 | unsigned int port_idx) | 1081 | unsigned int port_idx) |
958 | { | 1082 | { |
@@ -1097,9 +1221,6 @@ static int ahci_host_init(struct ata_probe_ent *probe_ent) | |||
1097 | writel(tmp, port_mmio + PORT_IRQ_STAT); | 1221 | writel(tmp, port_mmio + PORT_IRQ_STAT); |
1098 | 1222 | ||
1099 | writel(1 << i, mmio + HOST_IRQ_STAT); | 1223 | writel(1 << i, mmio + HOST_IRQ_STAT); |
1100 | |||
1101 | /* set irq mask (enables interrupts) */ | ||
1102 | writel(DEF_PORT_IRQ, port_mmio + PORT_IRQ_MASK); | ||
1103 | } | 1224 | } |
1104 | 1225 | ||
1105 | tmp = readl(mmio + HOST_CTL); | 1226 | tmp = readl(mmio + HOST_CTL); |
@@ -1197,6 +1318,8 @@ static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1197 | 1318 | ||
1198 | VPRINTK("ENTER\n"); | 1319 | VPRINTK("ENTER\n"); |
1199 | 1320 | ||
1321 | WARN_ON(ATA_MAX_QUEUE > AHCI_MAX_CMDS); | ||
1322 | |||
1200 | if (!printed_version++) | 1323 | if (!printed_version++) |
1201 | dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); | 1324 | dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); |
1202 | 1325 | ||
@@ -1264,6 +1387,10 @@ static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1264 | if (rc) | 1387 | if (rc) |
1265 | goto err_out_hpriv; | 1388 | goto err_out_hpriv; |
1266 | 1389 | ||
1390 | if (!(probe_ent->host_flags & AHCI_FLAG_NO_NCQ) && | ||
1391 | (hpriv->cap & HOST_CAP_NCQ)) | ||
1392 | probe_ent->host_flags |= ATA_FLAG_NCQ; | ||
1393 | |||
1267 | ahci_print_info(probe_ent); | 1394 | ahci_print_info(probe_ent); |
1268 | 1395 | ||
1269 | /* FIXME: check ata_device_add return value */ | 1396 | /* FIXME: check ata_device_add return value */ |
@@ -1295,21 +1422,17 @@ static void ahci_remove_one (struct pci_dev *pdev) | |||
1295 | struct device *dev = pci_dev_to_dev(pdev); | 1422 | struct device *dev = pci_dev_to_dev(pdev); |
1296 | struct ata_host_set *host_set = dev_get_drvdata(dev); | 1423 | struct ata_host_set *host_set = dev_get_drvdata(dev); |
1297 | struct ahci_host_priv *hpriv = host_set->private_data; | 1424 | struct ahci_host_priv *hpriv = host_set->private_data; |
1298 | struct ata_port *ap; | ||
1299 | unsigned int i; | 1425 | unsigned int i; |
1300 | int have_msi; | 1426 | int have_msi; |
1301 | 1427 | ||
1302 | for (i = 0; i < host_set->n_ports; i++) { | 1428 | for (i = 0; i < host_set->n_ports; i++) |
1303 | ap = host_set->ports[i]; | 1429 | ata_port_detach(host_set->ports[i]); |
1304 | |||
1305 | scsi_remove_host(ap->host); | ||
1306 | } | ||
1307 | 1430 | ||
1308 | have_msi = hpriv->flags & AHCI_FLAG_MSI; | 1431 | have_msi = hpriv->flags & AHCI_FLAG_MSI; |
1309 | free_irq(host_set->irq, host_set); | 1432 | free_irq(host_set->irq, host_set); |
1310 | 1433 | ||
1311 | for (i = 0; i < host_set->n_ports; i++) { | 1434 | for (i = 0; i < host_set->n_ports; i++) { |
1312 | ap = host_set->ports[i]; | 1435 | struct ata_port *ap = host_set->ports[i]; |
1313 | 1436 | ||
1314 | ata_scsi_release(ap->host); | 1437 | ata_scsi_release(ap->host); |
1315 | scsi_host_put(ap->host); | 1438 | scsi_host_put(ap->host); |