diff options
author | Jeff Garzik <jeff@garzik.org> | 2007-07-12 14:34:26 -0400 |
---|---|---|
committer | Jeff Garzik <jeff@garzik.org> | 2007-07-12 14:34:26 -0400 |
commit | bdd4dddee325a7dce3e84cf48201a06aa8508aa4 (patch) | |
tree | 2669f6a2b5f74ded67cd96be2a4e59e0ac1f9696 /drivers/ata | |
parent | 4537deb5e90b717a725b3d74b58b4bb1d28443d0 (diff) |
[libata] sata_mv: Convert to new exception handling (EH) infrastructure
This makes hotplug, NCQ, etc. possible, and removes one of the few
remaining old-EH drivers.
Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/ata')
-rw-r--r-- | drivers/ata/sata_mv.c | 703 |
1 files changed, 456 insertions, 247 deletions
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index 7fa42c36c417..d40c41c5f16c 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c | |||
@@ -29,11 +29,6 @@ | |||
29 | I distinctly remember a couple workarounds (one related to PCI-X) | 29 | I distinctly remember a couple workarounds (one related to PCI-X) |
30 | are still needed. | 30 | are still needed. |
31 | 31 | ||
32 | 2) Convert to LibATA new EH. Required for hotplug, NCQ, and sane | ||
33 | probing/error handling in general. MUST HAVE. | ||
34 | |||
35 | 3) Add hotplug support (easy, once new-EH support appears) | ||
36 | |||
37 | 4) Add NCQ support (easy to intermediate, once new-EH support appears) | 32 | 4) Add NCQ support (easy to intermediate, once new-EH support appears) |
38 | 33 | ||
39 | 5) Investigate problems with PCI Message Signalled Interrupts (MSI). | 34 | 5) Investigate problems with PCI Message Signalled Interrupts (MSI). |
@@ -132,8 +127,8 @@ enum { | |||
132 | MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */ | 127 | MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */ |
133 | MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ | 128 | MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ |
134 | MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | 129 | MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | |
135 | ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | | 130 | ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI | |
136 | ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING, | 131 | ATA_FLAG_PIO_POLLING, |
137 | MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE, | 132 | MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE, |
138 | 133 | ||
139 | CRQB_FLAG_READ = (1 << 0), | 134 | CRQB_FLAG_READ = (1 << 0), |
@@ -254,13 +249,31 @@ enum { | |||
254 | EDMA_ERR_TRANS_PROTO = (1 << 31), | 249 | EDMA_ERR_TRANS_PROTO = (1 << 31), |
255 | EDMA_ERR_OVERRUN_5 = (1 << 5), | 250 | EDMA_ERR_OVERRUN_5 = (1 << 5), |
256 | EDMA_ERR_UNDERRUN_5 = (1 << 6), | 251 | EDMA_ERR_UNDERRUN_5 = (1 << 6), |
257 | EDMA_ERR_FATAL = (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR | | 252 | EDMA_EH_FREEZE = EDMA_ERR_D_PAR | |
258 | EDMA_ERR_DEV_DCON | EDMA_ERR_CRBQ_PAR | | 253 | EDMA_ERR_PRD_PAR | |
259 | EDMA_ERR_CRPB_PAR | EDMA_ERR_INTRL_PAR | | 254 | EDMA_ERR_DEV_DCON | |
260 | EDMA_ERR_IORDY | EDMA_ERR_LNK_CTRL_RX_2 | | 255 | EDMA_ERR_DEV_CON | |
261 | EDMA_ERR_LNK_DATA_RX | | 256 | EDMA_ERR_SERR | |
262 | EDMA_ERR_LNK_DATA_TX | | 257 | EDMA_ERR_SELF_DIS | |
263 | EDMA_ERR_TRANS_PROTO), | 258 | EDMA_ERR_CRBQ_PAR | |
259 | EDMA_ERR_CRPB_PAR | | ||
260 | EDMA_ERR_INTRL_PAR | | ||
261 | EDMA_ERR_IORDY | | ||
262 | EDMA_ERR_LNK_CTRL_RX_2 | | ||
263 | EDMA_ERR_LNK_DATA_RX | | ||
264 | EDMA_ERR_LNK_DATA_TX | | ||
265 | EDMA_ERR_TRANS_PROTO, | ||
266 | EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR | | ||
267 | EDMA_ERR_PRD_PAR | | ||
268 | EDMA_ERR_DEV_DCON | | ||
269 | EDMA_ERR_DEV_CON | | ||
270 | EDMA_ERR_OVERRUN_5 | | ||
271 | EDMA_ERR_UNDERRUN_5 | | ||
272 | EDMA_ERR_SELF_DIS_5 | | ||
273 | EDMA_ERR_CRBQ_PAR | | ||
274 | EDMA_ERR_CRPB_PAR | | ||
275 | EDMA_ERR_INTRL_PAR | | ||
276 | EDMA_ERR_IORDY, | ||
264 | 277 | ||
265 | EDMA_REQ_Q_BASE_HI_OFS = 0x10, | 278 | EDMA_REQ_Q_BASE_HI_OFS = 0x10, |
266 | EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */ | 279 | EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */ |
@@ -359,6 +372,10 @@ struct mv_port_priv { | |||
359 | dma_addr_t crpb_dma; | 372 | dma_addr_t crpb_dma; |
360 | struct mv_sg *sg_tbl; | 373 | struct mv_sg *sg_tbl; |
361 | dma_addr_t sg_tbl_dma; | 374 | dma_addr_t sg_tbl_dma; |
375 | |||
376 | unsigned int req_idx; | ||
377 | unsigned int resp_idx; | ||
378 | |||
362 | u32 pp_flags; | 379 | u32 pp_flags; |
363 | }; | 380 | }; |
364 | 381 | ||
@@ -391,14 +408,15 @@ static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in); | |||
391 | static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); | 408 | static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); |
392 | static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in); | 409 | static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in); |
393 | static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); | 410 | static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); |
394 | static void mv_phy_reset(struct ata_port *ap); | ||
395 | static void __mv_phy_reset(struct ata_port *ap, int can_sleep); | ||
396 | static int mv_port_start(struct ata_port *ap); | 411 | static int mv_port_start(struct ata_port *ap); |
397 | static void mv_port_stop(struct ata_port *ap); | 412 | static void mv_port_stop(struct ata_port *ap); |
398 | static void mv_qc_prep(struct ata_queued_cmd *qc); | 413 | static void mv_qc_prep(struct ata_queued_cmd *qc); |
399 | static void mv_qc_prep_iie(struct ata_queued_cmd *qc); | 414 | static void mv_qc_prep_iie(struct ata_queued_cmd *qc); |
400 | static unsigned int mv_qc_issue(struct ata_queued_cmd *qc); | 415 | static unsigned int mv_qc_issue(struct ata_queued_cmd *qc); |
401 | static void mv_eng_timeout(struct ata_port *ap); | 416 | static void mv_error_handler(struct ata_port *ap); |
417 | static void mv_post_int_cmd(struct ata_queued_cmd *qc); | ||
418 | static void mv_eh_freeze(struct ata_port *ap); | ||
419 | static void mv_eh_thaw(struct ata_port *ap); | ||
402 | static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); | 420 | static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); |
403 | 421 | ||
404 | static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, | 422 | static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, |
@@ -422,7 +440,6 @@ static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio); | |||
422 | static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio); | 440 | static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio); |
423 | static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio, | 441 | static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio, |
424 | unsigned int port_no); | 442 | unsigned int port_no); |
425 | static void mv_stop_and_reset(struct ata_port *ap); | ||
426 | 443 | ||
427 | static struct scsi_host_template mv5_sht = { | 444 | static struct scsi_host_template mv5_sht = { |
428 | .module = THIS_MODULE, | 445 | .module = THIS_MODULE, |
@@ -469,19 +486,21 @@ static const struct ata_port_operations mv5_ops = { | |||
469 | .exec_command = ata_exec_command, | 486 | .exec_command = ata_exec_command, |
470 | .dev_select = ata_std_dev_select, | 487 | .dev_select = ata_std_dev_select, |
471 | 488 | ||
472 | .phy_reset = mv_phy_reset, | ||
473 | .cable_detect = ata_cable_sata, | 489 | .cable_detect = ata_cable_sata, |
474 | 490 | ||
475 | .qc_prep = mv_qc_prep, | 491 | .qc_prep = mv_qc_prep, |
476 | .qc_issue = mv_qc_issue, | 492 | .qc_issue = mv_qc_issue, |
477 | .data_xfer = ata_data_xfer, | 493 | .data_xfer = ata_data_xfer, |
478 | 494 | ||
479 | .eng_timeout = mv_eng_timeout, | ||
480 | |||
481 | .irq_clear = mv_irq_clear, | 495 | .irq_clear = mv_irq_clear, |
482 | .irq_on = ata_irq_on, | 496 | .irq_on = ata_irq_on, |
483 | .irq_ack = ata_irq_ack, | 497 | .irq_ack = ata_irq_ack, |
484 | 498 | ||
499 | .error_handler = mv_error_handler, | ||
500 | .post_internal_cmd = mv_post_int_cmd, | ||
501 | .freeze = mv_eh_freeze, | ||
502 | .thaw = mv_eh_thaw, | ||
503 | |||
485 | .scr_read = mv5_scr_read, | 504 | .scr_read = mv5_scr_read, |
486 | .scr_write = mv5_scr_write, | 505 | .scr_write = mv5_scr_write, |
487 | 506 | ||
@@ -498,19 +517,21 @@ static const struct ata_port_operations mv6_ops = { | |||
498 | .exec_command = ata_exec_command, | 517 | .exec_command = ata_exec_command, |
499 | .dev_select = ata_std_dev_select, | 518 | .dev_select = ata_std_dev_select, |
500 | 519 | ||
501 | .phy_reset = mv_phy_reset, | ||
502 | .cable_detect = ata_cable_sata, | 520 | .cable_detect = ata_cable_sata, |
503 | 521 | ||
504 | .qc_prep = mv_qc_prep, | 522 | .qc_prep = mv_qc_prep, |
505 | .qc_issue = mv_qc_issue, | 523 | .qc_issue = mv_qc_issue, |
506 | .data_xfer = ata_data_xfer, | 524 | .data_xfer = ata_data_xfer, |
507 | 525 | ||
508 | .eng_timeout = mv_eng_timeout, | ||
509 | |||
510 | .irq_clear = mv_irq_clear, | 526 | .irq_clear = mv_irq_clear, |
511 | .irq_on = ata_irq_on, | 527 | .irq_on = ata_irq_on, |
512 | .irq_ack = ata_irq_ack, | 528 | .irq_ack = ata_irq_ack, |
513 | 529 | ||
530 | .error_handler = mv_error_handler, | ||
531 | .post_internal_cmd = mv_post_int_cmd, | ||
532 | .freeze = mv_eh_freeze, | ||
533 | .thaw = mv_eh_thaw, | ||
534 | |||
514 | .scr_read = mv_scr_read, | 535 | .scr_read = mv_scr_read, |
515 | .scr_write = mv_scr_write, | 536 | .scr_write = mv_scr_write, |
516 | 537 | ||
@@ -527,19 +548,21 @@ static const struct ata_port_operations mv_iie_ops = { | |||
527 | .exec_command = ata_exec_command, | 548 | .exec_command = ata_exec_command, |
528 | .dev_select = ata_std_dev_select, | 549 | .dev_select = ata_std_dev_select, |
529 | 550 | ||
530 | .phy_reset = mv_phy_reset, | ||
531 | .cable_detect = ata_cable_sata, | 551 | .cable_detect = ata_cable_sata, |
532 | 552 | ||
533 | .qc_prep = mv_qc_prep_iie, | 553 | .qc_prep = mv_qc_prep_iie, |
534 | .qc_issue = mv_qc_issue, | 554 | .qc_issue = mv_qc_issue, |
535 | .data_xfer = ata_data_xfer, | 555 | .data_xfer = ata_data_xfer, |
536 | 556 | ||
537 | .eng_timeout = mv_eng_timeout, | ||
538 | |||
539 | .irq_clear = mv_irq_clear, | 557 | .irq_clear = mv_irq_clear, |
540 | .irq_on = ata_irq_on, | 558 | .irq_on = ata_irq_on, |
541 | .irq_ack = ata_irq_ack, | 559 | .irq_ack = ata_irq_ack, |
542 | 560 | ||
561 | .error_handler = mv_error_handler, | ||
562 | .post_internal_cmd = mv_post_int_cmd, | ||
563 | .freeze = mv_eh_freeze, | ||
564 | .thaw = mv_eh_thaw, | ||
565 | |||
543 | .scr_read = mv_scr_read, | 566 | .scr_read = mv_scr_read, |
544 | .scr_write = mv_scr_write, | 567 | .scr_write = mv_scr_write, |
545 | 568 | ||
@@ -738,35 +761,40 @@ static void mv_set_edma_ptrs(void __iomem *port_mmio, | |||
738 | struct mv_host_priv *hpriv, | 761 | struct mv_host_priv *hpriv, |
739 | struct mv_port_priv *pp) | 762 | struct mv_port_priv *pp) |
740 | { | 763 | { |
764 | u32 index; | ||
765 | |||
741 | /* | 766 | /* |
742 | * initialize request queue | 767 | * initialize request queue |
743 | */ | 768 | */ |
769 | index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT; | ||
770 | |||
744 | WARN_ON(pp->crqb_dma & 0x3ff); | 771 | WARN_ON(pp->crqb_dma & 0x3ff); |
745 | writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS); | 772 | writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS); |
746 | writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK, | 773 | writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index, |
747 | port_mmio + EDMA_REQ_Q_IN_PTR_OFS); | 774 | port_mmio + EDMA_REQ_Q_IN_PTR_OFS); |
748 | 775 | ||
749 | if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0) | 776 | if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0) |
750 | writelfl(pp->crqb_dma & 0xffffffff, | 777 | writelfl((pp->crqb_dma & 0xffffffff) | index, |
751 | port_mmio + EDMA_REQ_Q_OUT_PTR_OFS); | 778 | port_mmio + EDMA_REQ_Q_OUT_PTR_OFS); |
752 | else | 779 | else |
753 | writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS); | 780 | writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS); |
754 | 781 | ||
755 | /* | 782 | /* |
756 | * initialize response queue | 783 | * initialize response queue |
757 | */ | 784 | */ |
785 | index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT; | ||
786 | |||
758 | WARN_ON(pp->crpb_dma & 0xff); | 787 | WARN_ON(pp->crpb_dma & 0xff); |
759 | writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS); | 788 | writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS); |
760 | 789 | ||
761 | if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0) | 790 | if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0) |
762 | writelfl(pp->crpb_dma & 0xffffffff, | 791 | writelfl((pp->crpb_dma & 0xffffffff) | index, |
763 | port_mmio + EDMA_RSP_Q_IN_PTR_OFS); | 792 | port_mmio + EDMA_RSP_Q_IN_PTR_OFS); |
764 | else | 793 | else |
765 | writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS); | 794 | writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS); |
766 | 795 | ||
767 | writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK, | 796 | writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index, |
768 | port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); | 797 | port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); |
769 | |||
770 | } | 798 | } |
771 | 799 | ||
772 | /** | 800 | /** |
@@ -784,6 +812,11 @@ static void mv_start_dma(void __iomem *base, struct mv_host_priv *hpriv, | |||
784 | struct mv_port_priv *pp) | 812 | struct mv_port_priv *pp) |
785 | { | 813 | { |
786 | if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) { | 814 | if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) { |
815 | /* clear EDMA event indicators, if any */ | ||
816 | writelfl(0, base + EDMA_ERR_IRQ_CAUSE_OFS); | ||
817 | |||
818 | mv_set_edma_ptrs(base, hpriv, pp); | ||
819 | |||
787 | writelfl(EDMA_EN, base + EDMA_CMD_OFS); | 820 | writelfl(EDMA_EN, base + EDMA_CMD_OFS); |
788 | pp->pp_flags |= MV_PP_FLAG_EDMA_EN; | 821 | pp->pp_flags |= MV_PP_FLAG_EDMA_EN; |
789 | } | 822 | } |
@@ -827,7 +860,6 @@ static int mv_stop_dma(struct ata_port *ap) | |||
827 | 860 | ||
828 | if (reg & EDMA_EN) { | 861 | if (reg & EDMA_EN) { |
829 | ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n"); | 862 | ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n"); |
830 | /* FIXME: Consider doing a reset here to recover */ | ||
831 | err = -EIO; | 863 | err = -EIO; |
832 | } | 864 | } |
833 | 865 | ||
@@ -1101,11 +1133,6 @@ static unsigned int mv_fill_sg(struct ata_queued_cmd *qc) | |||
1101 | return n_sg; | 1133 | return n_sg; |
1102 | } | 1134 | } |
1103 | 1135 | ||
1104 | static inline unsigned mv_inc_q_index(unsigned index) | ||
1105 | { | ||
1106 | return (index + 1) & MV_MAX_Q_DEPTH_MASK; | ||
1107 | } | ||
1108 | |||
1109 | static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last) | 1136 | static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last) |
1110 | { | 1137 | { |
1111 | u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS | | 1138 | u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS | |
@@ -1145,9 +1172,8 @@ static void mv_qc_prep(struct ata_queued_cmd *qc) | |||
1145 | flags |= qc->tag << CRQB_TAG_SHIFT; | 1172 | flags |= qc->tag << CRQB_TAG_SHIFT; |
1146 | flags |= qc->tag << CRQB_IOID_SHIFT; /* 50xx appears to ignore this*/ | 1173 | flags |= qc->tag << CRQB_IOID_SHIFT; /* 50xx appears to ignore this*/ |
1147 | 1174 | ||
1148 | /* get current queue index from hardware */ | 1175 | /* get current queue index from software */ |
1149 | in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS) | 1176 | in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK; |
1150 | >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; | ||
1151 | 1177 | ||
1152 | pp->crqb[in_index].sg_addr = | 1178 | pp->crqb[in_index].sg_addr = |
1153 | cpu_to_le32(pp->sg_tbl_dma & 0xffffffff); | 1179 | cpu_to_le32(pp->sg_tbl_dma & 0xffffffff); |
@@ -1237,12 +1263,11 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc) | |||
1237 | 1263 | ||
1238 | WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); | 1264 | WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); |
1239 | flags |= qc->tag << CRQB_TAG_SHIFT; | 1265 | flags |= qc->tag << CRQB_TAG_SHIFT; |
1240 | flags |= qc->tag << CRQB_IOID_SHIFT; /* "I/O Id" is -really- | 1266 | flags |= qc->tag << CRQB_IOID_SHIFT; /* "I/O Id" is -really- |
1241 | what we use as our tag */ | 1267 | what we use as our tag */ |
1242 | 1268 | ||
1243 | /* get current queue index from hardware */ | 1269 | /* get current queue index from software */ |
1244 | in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS) | 1270 | in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK; |
1245 | >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; | ||
1246 | 1271 | ||
1247 | crqb = (struct mv_crqb_iie *) &pp->crqb[in_index]; | 1272 | crqb = (struct mv_crqb_iie *) &pp->crqb[in_index]; |
1248 | crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff); | 1273 | crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff); |
@@ -1294,8 +1319,7 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc) | |||
1294 | void __iomem *port_mmio = mv_ap_base(ap); | 1319 | void __iomem *port_mmio = mv_ap_base(ap); |
1295 | struct mv_port_priv *pp = ap->private_data; | 1320 | struct mv_port_priv *pp = ap->private_data; |
1296 | struct mv_host_priv *hpriv = ap->host->private_data; | 1321 | struct mv_host_priv *hpriv = ap->host->private_data; |
1297 | unsigned in_index; | 1322 | u32 in_index; |
1298 | u32 in_ptr; | ||
1299 | 1323 | ||
1300 | if (qc->tf.protocol != ATA_PROT_DMA) { | 1324 | if (qc->tf.protocol != ATA_PROT_DMA) { |
1301 | /* We're about to send a non-EDMA capable command to the | 1325 | /* We're about to send a non-EDMA capable command to the |
@@ -1306,69 +1330,26 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc) | |||
1306 | return ata_qc_issue_prot(qc); | 1330 | return ata_qc_issue_prot(qc); |
1307 | } | 1331 | } |
1308 | 1332 | ||
1309 | in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS); | 1333 | mv_start_dma(port_mmio, hpriv, pp); |
1310 | in_index = (in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; | 1334 | |
1335 | in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK; | ||
1311 | 1336 | ||
1312 | /* until we do queuing, the queue should be empty at this point */ | 1337 | /* until we do queuing, the queue should be empty at this point */ |
1313 | WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) | 1338 | WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) |
1314 | >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK)); | 1339 | >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK)); |
1315 | 1340 | ||
1316 | in_index = mv_inc_q_index(in_index); /* now incr producer index */ | 1341 | pp->req_idx++; |
1317 | 1342 | ||
1318 | mv_start_dma(port_mmio, hpriv, pp); | 1343 | in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT; |
1319 | 1344 | ||
1320 | /* and write the request in pointer to kick the EDMA to life */ | 1345 | /* and write the request in pointer to kick the EDMA to life */ |
1321 | in_ptr &= EDMA_REQ_Q_BASE_LO_MASK; | 1346 | writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index, |
1322 | in_ptr |= in_index << EDMA_REQ_Q_PTR_SHIFT; | 1347 | port_mmio + EDMA_REQ_Q_IN_PTR_OFS); |
1323 | writelfl(in_ptr, port_mmio + EDMA_REQ_Q_IN_PTR_OFS); | ||
1324 | 1348 | ||
1325 | return 0; | 1349 | return 0; |
1326 | } | 1350 | } |
1327 | 1351 | ||
1328 | /** | 1352 | /** |
1329 | * mv_get_crpb_status - get status from most recently completed cmd | ||
1330 | * @ap: ATA channel to manipulate | ||
1331 | * | ||
1332 | * This routine is for use when the port is in DMA mode, when it | ||
1333 | * will be using the CRPB (command response block) method of | ||
1334 | * returning command completion information. We check indices | ||
1335 | * are good, grab status, and bump the response consumer index to | ||
1336 | * prove that we're up to date. | ||
1337 | * | ||
1338 | * LOCKING: | ||
1339 | * Inherited from caller. | ||
1340 | */ | ||
1341 | static u8 mv_get_crpb_status(struct ata_port *ap) | ||
1342 | { | ||
1343 | void __iomem *port_mmio = mv_ap_base(ap); | ||
1344 | struct mv_port_priv *pp = ap->private_data; | ||
1345 | unsigned out_index; | ||
1346 | u32 out_ptr; | ||
1347 | u8 ata_status; | ||
1348 | |||
1349 | out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); | ||
1350 | out_index = (out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; | ||
1351 | |||
1352 | ata_status = le16_to_cpu(pp->crpb[out_index].flags) | ||
1353 | >> CRPB_FLAG_STATUS_SHIFT; | ||
1354 | |||
1355 | /* increment our consumer index... */ | ||
1356 | out_index = mv_inc_q_index(out_index); | ||
1357 | |||
1358 | /* and, until we do NCQ, there should only be 1 CRPB waiting */ | ||
1359 | WARN_ON(out_index != ((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) | ||
1360 | >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK)); | ||
1361 | |||
1362 | /* write out our inc'd consumer index so EDMA knows we're caught up */ | ||
1363 | out_ptr &= EDMA_RSP_Q_BASE_LO_MASK; | ||
1364 | out_ptr |= out_index << EDMA_RSP_Q_PTR_SHIFT; | ||
1365 | writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); | ||
1366 | |||
1367 | /* Return ATA status register for completed CRPB */ | ||
1368 | return ata_status; | ||
1369 | } | ||
1370 | |||
1371 | /** | ||
1372 | * mv_err_intr - Handle error interrupts on the port | 1353 | * mv_err_intr - Handle error interrupts on the port |
1373 | * @ap: ATA channel to manipulate | 1354 | * @ap: ATA channel to manipulate |
1374 | * @reset_allowed: bool: 0 == don't trigger from reset here | 1355 | * @reset_allowed: bool: 0 == don't trigger from reset here |
@@ -1382,30 +1363,191 @@ static u8 mv_get_crpb_status(struct ata_port *ap) | |||
1382 | * LOCKING: | 1363 | * LOCKING: |
1383 | * Inherited from caller. | 1364 | * Inherited from caller. |
1384 | */ | 1365 | */ |
1385 | static void mv_err_intr(struct ata_port *ap, int reset_allowed) | 1366 | static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc) |
1386 | { | 1367 | { |
1387 | void __iomem *port_mmio = mv_ap_base(ap); | 1368 | void __iomem *port_mmio = mv_ap_base(ap); |
1388 | u32 edma_err_cause, serr = 0; | 1369 | u32 edma_err_cause, eh_freeze_mask, serr = 0; |
1370 | struct mv_port_priv *pp = ap->private_data; | ||
1371 | struct mv_host_priv *hpriv = ap->host->private_data; | ||
1372 | unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN); | ||
1373 | unsigned int action = 0, err_mask = 0; | ||
1374 | struct ata_eh_info *ehi = &ap->eh_info; | ||
1389 | 1375 | ||
1390 | edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); | 1376 | ata_ehi_clear_desc(ehi); |
1391 | 1377 | ||
1392 | if (EDMA_ERR_SERR & edma_err_cause) { | 1378 | if (!edma_enabled) { |
1379 | /* just a guess: do we need to do this? should we | ||
1380 | * expand this, and do it in all cases? | ||
1381 | */ | ||
1393 | sata_scr_read(ap, SCR_ERROR, &serr); | 1382 | sata_scr_read(ap, SCR_ERROR, &serr); |
1394 | sata_scr_write_flush(ap, SCR_ERROR, serr); | 1383 | sata_scr_write_flush(ap, SCR_ERROR, serr); |
1395 | } | 1384 | } |
1396 | if (EDMA_ERR_SELF_DIS & edma_err_cause) { | 1385 | |
1397 | struct mv_port_priv *pp = ap->private_data; | 1386 | edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); |
1398 | pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; | 1387 | |
1388 | ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause); | ||
1389 | |||
1390 | /* | ||
1391 | * all generations share these EDMA error cause bits | ||
1392 | */ | ||
1393 | |||
1394 | if (edma_err_cause & EDMA_ERR_DEV) | ||
1395 | err_mask |= AC_ERR_DEV; | ||
1396 | if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR | | ||
1397 | EDMA_ERR_CRBQ_PAR | EDMA_ERR_CRPB_PAR | | ||
1398 | EDMA_ERR_INTRL_PAR)) { | ||
1399 | err_mask |= AC_ERR_ATA_BUS; | ||
1400 | action |= ATA_EH_HARDRESET; | ||
1401 | ata_ehi_push_desc(ehi, ", parity error"); | ||
1402 | } | ||
1403 | if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) { | ||
1404 | ata_ehi_hotplugged(ehi); | ||
1405 | ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ? | ||
1406 | ", dev disconnect" : ", dev connect"); | ||
1407 | } | ||
1408 | |||
1409 | if (IS_50XX(hpriv)) { | ||
1410 | eh_freeze_mask = EDMA_EH_FREEZE_5; | ||
1411 | |||
1412 | if (edma_err_cause & EDMA_ERR_SELF_DIS_5) { | ||
1413 | struct mv_port_priv *pp = ap->private_data; | ||
1414 | pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; | ||
1415 | ata_ehi_push_desc(ehi, ", EDMA self-disable"); | ||
1416 | } | ||
1417 | } else { | ||
1418 | eh_freeze_mask = EDMA_EH_FREEZE; | ||
1419 | |||
1420 | if (edma_err_cause & EDMA_ERR_SELF_DIS) { | ||
1421 | struct mv_port_priv *pp = ap->private_data; | ||
1422 | pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; | ||
1423 | ata_ehi_push_desc(ehi, ", EDMA self-disable"); | ||
1424 | } | ||
1425 | |||
1426 | if (edma_err_cause & EDMA_ERR_SERR) { | ||
1427 | sata_scr_read(ap, SCR_ERROR, &serr); | ||
1428 | sata_scr_write_flush(ap, SCR_ERROR, serr); | ||
1429 | err_mask = AC_ERR_ATA_BUS; | ||
1430 | action |= ATA_EH_HARDRESET; | ||
1431 | } | ||
1399 | } | 1432 | } |
1400 | DPRINTK(KERN_ERR "ata%u: port error; EDMA err cause: 0x%08x " | ||
1401 | "SERR: 0x%08x\n", ap->print_id, edma_err_cause, serr); | ||
1402 | 1433 | ||
1403 | /* Clear EDMA now that SERR cleanup done */ | 1434 | /* Clear EDMA now that SERR cleanup done */ |
1404 | writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); | 1435 | writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); |
1405 | 1436 | ||
1406 | /* check for fatal here and recover if needed */ | 1437 | if (!err_mask) { |
1407 | if (reset_allowed && (EDMA_ERR_FATAL & edma_err_cause)) | 1438 | err_mask = AC_ERR_OTHER; |
1408 | mv_stop_and_reset(ap); | 1439 | action |= ATA_EH_HARDRESET; |
1440 | } | ||
1441 | |||
1442 | ehi->serror |= serr; | ||
1443 | ehi->action |= action; | ||
1444 | |||
1445 | if (qc) | ||
1446 | qc->err_mask |= err_mask; | ||
1447 | else | ||
1448 | ehi->err_mask |= err_mask; | ||
1449 | |||
1450 | if (edma_err_cause & eh_freeze_mask) | ||
1451 | ata_port_freeze(ap); | ||
1452 | else | ||
1453 | ata_port_abort(ap); | ||
1454 | } | ||
1455 | |||
1456 | static void mv_intr_pio(struct ata_port *ap) | ||
1457 | { | ||
1458 | struct ata_queued_cmd *qc; | ||
1459 | u8 ata_status; | ||
1460 | |||
1461 | /* ignore spurious intr if drive still BUSY */ | ||
1462 | ata_status = readb(ap->ioaddr.status_addr); | ||
1463 | if (unlikely(ata_status & ATA_BUSY)) | ||
1464 | return; | ||
1465 | |||
1466 | /* get active ATA command */ | ||
1467 | qc = ata_qc_from_tag(ap, ap->active_tag); | ||
1468 | if (unlikely(!qc)) /* no active tag */ | ||
1469 | return; | ||
1470 | if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */ | ||
1471 | return; | ||
1472 | |||
1473 | /* and finally, complete the ATA command */ | ||
1474 | qc->err_mask |= ac_err_mask(ata_status); | ||
1475 | ata_qc_complete(qc); | ||
1476 | } | ||
1477 | |||
1478 | static void mv_intr_edma(struct ata_port *ap) | ||
1479 | { | ||
1480 | void __iomem *port_mmio = mv_ap_base(ap); | ||
1481 | struct mv_host_priv *hpriv = ap->host->private_data; | ||
1482 | struct mv_port_priv *pp = ap->private_data; | ||
1483 | struct ata_queued_cmd *qc; | ||
1484 | u32 out_index, in_index; | ||
1485 | bool work_done = false; | ||
1486 | |||
1487 | /* get h/w response queue pointer */ | ||
1488 | in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) | ||
1489 | >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; | ||
1490 | |||
1491 | while (1) { | ||
1492 | u16 status; | ||
1493 | |||
1494 | /* get s/w response queue last-read pointer, and compare */ | ||
1495 | out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK; | ||
1496 | if (in_index == out_index) | ||
1497 | break; | ||
1498 | |||
1499 | |||
1500 | /* 50xx: get active ATA command */ | ||
1501 | if (IS_GEN_I(hpriv)) | ||
1502 | qc = ata_qc_from_tag(ap, ap->active_tag); | ||
1503 | |||
1504 | /* 60xx: get active ATA command via tag, to enable support | ||
1505 | * for queueing. this works transparently for queued and | ||
1506 | * non-queued modes. | ||
1507 | */ | ||
1508 | else { | ||
1509 | unsigned int tag; | ||
1510 | |||
1511 | if (IS_GEN_II(hpriv)) | ||
1512 | tag = (le16_to_cpu(pp->crpb[out_index].id) | ||
1513 | >> CRPB_IOID_SHIFT_6) & 0x3f; | ||
1514 | else | ||
1515 | tag = (le16_to_cpu(pp->crpb[out_index].id) | ||
1516 | >> CRPB_IOID_SHIFT_7) & 0x3f; | ||
1517 | |||
1518 | qc = ata_qc_from_tag(ap, tag); | ||
1519 | } | ||
1520 | |||
1521 | /* lower 8 bits of status are EDMA_ERR_IRQ_CAUSE_OFS | ||
1522 | * bits (WARNING: might not necessarily be associated | ||
1523 | * with this command), which -should- be clear | ||
1524 | * if all is well | ||
1525 | */ | ||
1526 | status = le16_to_cpu(pp->crpb[out_index].flags); | ||
1527 | if (unlikely(status & 0xff)) { | ||
1528 | mv_err_intr(ap, qc); | ||
1529 | return; | ||
1530 | } | ||
1531 | |||
1532 | /* and finally, complete the ATA command */ | ||
1533 | if (qc) { | ||
1534 | qc->err_mask |= | ||
1535 | ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT); | ||
1536 | ata_qc_complete(qc); | ||
1537 | } | ||
1538 | |||
1539 | /* advance software response queue pointer, to | ||
1540 | * indicate (after the loop completes) to hardware | ||
1541 | * that we have consumed a response queue entry. | ||
1542 | */ | ||
1543 | work_done = true; | ||
1544 | pp->resp_idx++; | ||
1545 | } | ||
1546 | |||
1547 | if (work_done) | ||
1548 | writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | | ||
1549 | (out_index << EDMA_RSP_Q_PTR_SHIFT), | ||
1550 | port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); | ||
1409 | } | 1551 | } |
1410 | 1552 | ||
1411 | /** | 1553 | /** |
@@ -1428,11 +1570,8 @@ static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc) | |||
1428 | { | 1570 | { |
1429 | void __iomem *mmio = host->iomap[MV_PRIMARY_BAR]; | 1571 | void __iomem *mmio = host->iomap[MV_PRIMARY_BAR]; |
1430 | void __iomem *hc_mmio = mv_hc_base(mmio, hc); | 1572 | void __iomem *hc_mmio = mv_hc_base(mmio, hc); |
1431 | struct ata_queued_cmd *qc; | ||
1432 | u32 hc_irq_cause; | 1573 | u32 hc_irq_cause; |
1433 | int port, port0; | 1574 | int port, port0; |
1434 | int shift, hard_port, handled; | ||
1435 | unsigned int err_mask; | ||
1436 | 1575 | ||
1437 | if (hc == 0) | 1576 | if (hc == 0) |
1438 | port0 = 0; | 1577 | port0 = 0; |
@@ -1441,72 +1580,89 @@ static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc) | |||
1441 | 1580 | ||
1442 | /* we'll need the HC success int register in most cases */ | 1581 | /* we'll need the HC success int register in most cases */ |
1443 | hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS); | 1582 | hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS); |
1444 | if (hc_irq_cause) | 1583 | if (!hc_irq_cause) |
1445 | writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS); | 1584 | return; |
1585 | |||
1586 | writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS); | ||
1446 | 1587 | ||
1447 | VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n", | 1588 | VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n", |
1448 | hc,relevant,hc_irq_cause); | 1589 | hc,relevant,hc_irq_cause); |
1449 | 1590 | ||
1450 | for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) { | 1591 | for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) { |
1451 | u8 ata_status = 0; | ||
1452 | struct ata_port *ap = host->ports[port]; | 1592 | struct ata_port *ap = host->ports[port]; |
1453 | struct mv_port_priv *pp = ap->private_data; | 1593 | struct mv_port_priv *pp = ap->private_data; |
1594 | int have_err_bits, hard_port, shift; | ||
1595 | |||
1596 | if ((!ap) || (ap->flags & ATA_FLAG_DISABLED)) | ||
1597 | continue; | ||
1598 | |||
1599 | shift = port << 1; /* (port * 2) */ | ||
1600 | if (port >= MV_PORTS_PER_HC) { | ||
1601 | shift++; /* skip bit 8 in the HC Main IRQ reg */ | ||
1602 | } | ||
1603 | have_err_bits = ((PORT0_ERR << shift) & relevant); | ||
1604 | |||
1605 | if (unlikely(have_err_bits)) { | ||
1606 | struct ata_queued_cmd *qc; | ||
1607 | |||
1608 | qc = ata_qc_from_tag(ap, ap->active_tag); | ||
1609 | if (qc && (qc->tf.flags & ATA_TFLAG_POLLING)) | ||
1610 | continue; | ||
1611 | |||
1612 | mv_err_intr(ap, qc); | ||
1613 | continue; | ||
1614 | } | ||
1454 | 1615 | ||
1455 | hard_port = mv_hardport_from_port(port); /* range 0..3 */ | 1616 | hard_port = mv_hardport_from_port(port); /* range 0..3 */ |
1456 | handled = 0; /* ensure ata_status is set if handled++ */ | ||
1457 | 1617 | ||
1458 | /* Note that DEV_IRQ might happen spuriously during EDMA, | ||
1459 | * and should be ignored in such cases. | ||
1460 | * The cause of this is still under investigation. | ||
1461 | */ | ||
1462 | if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { | 1618 | if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { |
1463 | /* EDMA: check for response queue interrupt */ | 1619 | if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) |
1464 | if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) { | 1620 | mv_intr_edma(ap); |
1465 | ata_status = mv_get_crpb_status(ap); | ||
1466 | handled = 1; | ||
1467 | } | ||
1468 | } else { | 1621 | } else { |
1469 | /* PIO: check for device (drive) interrupt */ | 1622 | if ((DEV_IRQ << hard_port) & hc_irq_cause) |
1470 | if ((DEV_IRQ << hard_port) & hc_irq_cause) { | 1623 | mv_intr_pio(ap); |
1471 | ata_status = readb(ap->ioaddr.status_addr); | ||
1472 | handled = 1; | ||
1473 | /* ignore spurious intr if drive still BUSY */ | ||
1474 | if (ata_status & ATA_BUSY) { | ||
1475 | ata_status = 0; | ||
1476 | handled = 0; | ||
1477 | } | ||
1478 | } | ||
1479 | } | 1624 | } |
1625 | } | ||
1626 | VPRINTK("EXIT\n"); | ||
1627 | } | ||
1480 | 1628 | ||
1481 | if (ap && (ap->flags & ATA_FLAG_DISABLED)) | 1629 | static void mv_pci_error(struct ata_host *host, void __iomem *mmio) |
1482 | continue; | 1630 | { |
1631 | struct ata_port *ap; | ||
1632 | struct ata_queued_cmd *qc; | ||
1633 | struct ata_eh_info *ehi; | ||
1634 | unsigned int i, err_mask, printed = 0; | ||
1635 | u32 err_cause; | ||
1483 | 1636 | ||
1484 | err_mask = ac_err_mask(ata_status); | 1637 | err_cause = readl(mmio + PCI_IRQ_CAUSE_OFS); |
1485 | 1638 | ||
1486 | shift = port << 1; /* (port * 2) */ | 1639 | dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n", |
1487 | if (port >= MV_PORTS_PER_HC) { | 1640 | err_cause); |
1488 | shift++; /* skip bit 8 in the HC Main IRQ reg */ | ||
1489 | } | ||
1490 | if ((PORT0_ERR << shift) & relevant) { | ||
1491 | mv_err_intr(ap, 1); | ||
1492 | err_mask |= AC_ERR_OTHER; | ||
1493 | handled = 1; | ||
1494 | } | ||
1495 | 1641 | ||
1496 | if (handled) { | 1642 | DPRINTK("All regs @ PCI error\n"); |
1643 | mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev)); | ||
1644 | |||
1645 | writelfl(0, mmio + PCI_IRQ_CAUSE_OFS); | ||
1646 | |||
1647 | for (i = 0; i < host->n_ports; i++) { | ||
1648 | ap = host->ports[i]; | ||
1649 | if (!ata_port_offline(ap)) { | ||
1650 | ehi = &ap->eh_info; | ||
1651 | ata_ehi_clear_desc(ehi); | ||
1652 | if (!printed++) | ||
1653 | ata_ehi_push_desc(ehi, | ||
1654 | "PCI err cause 0x%08x", err_cause); | ||
1655 | err_mask = AC_ERR_HOST_BUS; | ||
1656 | ehi->action = ATA_EH_HARDRESET; | ||
1497 | qc = ata_qc_from_tag(ap, ap->active_tag); | 1657 | qc = ata_qc_from_tag(ap, ap->active_tag); |
1498 | if (qc && (qc->flags & ATA_QCFLAG_ACTIVE)) { | 1658 | if (qc) |
1499 | VPRINTK("port %u IRQ found for qc, " | 1659 | qc->err_mask |= err_mask; |
1500 | "ata_status 0x%x\n", port,ata_status); | 1660 | else |
1501 | /* mark qc status appropriately */ | 1661 | ehi->err_mask |= err_mask; |
1502 | if (!(qc->tf.flags & ATA_TFLAG_POLLING)) { | 1662 | |
1503 | qc->err_mask |= err_mask; | 1663 | ata_port_freeze(ap); |
1504 | ata_qc_complete(qc); | ||
1505 | } | ||
1506 | } | ||
1507 | } | 1664 | } |
1508 | } | 1665 | } |
1509 | VPRINTK("EXIT\n"); | ||
1510 | } | 1666 | } |
1511 | 1667 | ||
1512 | /** | 1668 | /** |
@@ -1541,24 +1697,21 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance) | |||
1541 | n_hcs = mv_get_hc_count(host->ports[0]->flags); | 1697 | n_hcs = mv_get_hc_count(host->ports[0]->flags); |
1542 | spin_lock(&host->lock); | 1698 | spin_lock(&host->lock); |
1543 | 1699 | ||
1700 | if (unlikely(irq_stat & PCI_ERR)) { | ||
1701 | mv_pci_error(host, mmio); | ||
1702 | handled = 1; | ||
1703 | goto out_unlock; /* skip all other HC irq handling */ | ||
1704 | } | ||
1705 | |||
1544 | for (hc = 0; hc < n_hcs; hc++) { | 1706 | for (hc = 0; hc < n_hcs; hc++) { |
1545 | u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT)); | 1707 | u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT)); |
1546 | if (relevant) { | 1708 | if (relevant) { |
1547 | mv_host_intr(host, relevant, hc); | 1709 | mv_host_intr(host, relevant, hc); |
1548 | handled++; | 1710 | handled = 1; |
1549 | } | 1711 | } |
1550 | } | 1712 | } |
1551 | 1713 | ||
1552 | if (PCI_ERR & irq_stat) { | 1714 | out_unlock: |
1553 | printk(KERN_ERR DRV_NAME ": PCI ERROR; PCI IRQ cause=0x%08x\n", | ||
1554 | readl(mmio + PCI_IRQ_CAUSE_OFS)); | ||
1555 | |||
1556 | DPRINTK("All regs @ PCI error\n"); | ||
1557 | mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev)); | ||
1558 | |||
1559 | writelfl(0, mmio + PCI_IRQ_CAUSE_OFS); | ||
1560 | handled++; | ||
1561 | } | ||
1562 | spin_unlock(&host->lock); | 1715 | spin_unlock(&host->lock); |
1563 | 1716 | ||
1564 | return IRQ_RETVAL(handled); | 1717 | return IRQ_RETVAL(handled); |
@@ -1967,28 +2120,8 @@ static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio, | |||
1967 | mdelay(1); | 2120 | mdelay(1); |
1968 | } | 2121 | } |
1969 | 2122 | ||
1970 | static void mv_stop_and_reset(struct ata_port *ap) | ||
1971 | { | ||
1972 | struct mv_host_priv *hpriv = ap->host->private_data; | ||
1973 | void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR]; | ||
1974 | |||
1975 | mv_stop_dma(ap); | ||
1976 | |||
1977 | mv_channel_reset(hpriv, mmio, ap->port_no); | ||
1978 | |||
1979 | __mv_phy_reset(ap, 0); | ||
1980 | } | ||
1981 | |||
1982 | static inline void __msleep(unsigned int msec, int can_sleep) | ||
1983 | { | ||
1984 | if (can_sleep) | ||
1985 | msleep(msec); | ||
1986 | else | ||
1987 | mdelay(msec); | ||
1988 | } | ||
1989 | |||
1990 | /** | 2123 | /** |
1991 | * __mv_phy_reset - Perform eDMA reset followed by COMRESET | 2124 | * mv_phy_reset - Perform eDMA reset followed by COMRESET |
1992 | * @ap: ATA channel to manipulate | 2125 | * @ap: ATA channel to manipulate |
1993 | * | 2126 | * |
1994 | * Part of this is taken from __sata_phy_reset and modified to | 2127 | * Part of this is taken from __sata_phy_reset and modified to |
@@ -1998,14 +2131,12 @@ static inline void __msleep(unsigned int msec, int can_sleep) | |||
1998 | * Inherited from caller. This is coded to safe to call at | 2131 | * Inherited from caller. This is coded to safe to call at |
1999 | * interrupt level, i.e. it does not sleep. | 2132 | * interrupt level, i.e. it does not sleep. |
2000 | */ | 2133 | */ |
2001 | static void __mv_phy_reset(struct ata_port *ap, int can_sleep) | 2134 | static void mv_phy_reset(struct ata_port *ap, unsigned int *class, |
2135 | unsigned long deadline) | ||
2002 | { | 2136 | { |
2003 | struct mv_port_priv *pp = ap->private_data; | 2137 | struct mv_port_priv *pp = ap->private_data; |
2004 | struct mv_host_priv *hpriv = ap->host->private_data; | 2138 | struct mv_host_priv *hpriv = ap->host->private_data; |
2005 | void __iomem *port_mmio = mv_ap_base(ap); | 2139 | void __iomem *port_mmio = mv_ap_base(ap); |
2006 | struct ata_taskfile tf; | ||
2007 | struct ata_device *dev = &ap->device[0]; | ||
2008 | unsigned long deadline; | ||
2009 | int retry = 5; | 2140 | int retry = 5; |
2010 | u32 sstatus; | 2141 | u32 sstatus; |
2011 | 2142 | ||
@@ -2018,18 +2149,17 @@ static void __mv_phy_reset(struct ata_port *ap, int can_sleep) | |||
2018 | /* Issue COMRESET via SControl */ | 2149 | /* Issue COMRESET via SControl */ |
2019 | comreset_retry: | 2150 | comreset_retry: |
2020 | sata_scr_write_flush(ap, SCR_CONTROL, 0x301); | 2151 | sata_scr_write_flush(ap, SCR_CONTROL, 0x301); |
2021 | __msleep(1, can_sleep); | 2152 | msleep(1); |
2022 | 2153 | ||
2023 | sata_scr_write_flush(ap, SCR_CONTROL, 0x300); | 2154 | sata_scr_write_flush(ap, SCR_CONTROL, 0x300); |
2024 | __msleep(20, can_sleep); | 2155 | msleep(20); |
2025 | 2156 | ||
2026 | deadline = jiffies + msecs_to_jiffies(200); | ||
2027 | do { | 2157 | do { |
2028 | sata_scr_read(ap, SCR_STATUS, &sstatus); | 2158 | sata_scr_read(ap, SCR_STATUS, &sstatus); |
2029 | if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0)) | 2159 | if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0)) |
2030 | break; | 2160 | break; |
2031 | 2161 | ||
2032 | __msleep(1, can_sleep); | 2162 | msleep(1); |
2033 | } while (time_before(jiffies, deadline)); | 2163 | } while (time_before(jiffies, deadline)); |
2034 | 2164 | ||
2035 | /* work around errata */ | 2165 | /* work around errata */ |
@@ -2042,13 +2172,8 @@ comreset_retry: | |||
2042 | "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS), | 2172 | "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS), |
2043 | mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL)); | 2173 | mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL)); |
2044 | 2174 | ||
2045 | if (ata_port_online(ap)) { | 2175 | if (ata_port_offline(ap)) { |
2046 | ata_port_probe(ap); | 2176 | *class = ATA_DEV_NONE; |
2047 | } else { | ||
2048 | sata_scr_read(ap, SCR_STATUS, &sstatus); | ||
2049 | ata_port_printk(ap, KERN_INFO, | ||
2050 | "no device found (phy stat %08x)\n", sstatus); | ||
2051 | ata_port_disable(ap); | ||
2052 | return; | 2177 | return; |
2053 | } | 2178 | } |
2054 | 2179 | ||
@@ -2062,68 +2187,152 @@ comreset_retry: | |||
2062 | u8 drv_stat = ata_check_status(ap); | 2187 | u8 drv_stat = ata_check_status(ap); |
2063 | if ((drv_stat != 0x80) && (drv_stat != 0x7f)) | 2188 | if ((drv_stat != 0x80) && (drv_stat != 0x7f)) |
2064 | break; | 2189 | break; |
2065 | __msleep(500, can_sleep); | 2190 | msleep(500); |
2066 | if (retry-- <= 0) | 2191 | if (retry-- <= 0) |
2067 | break; | 2192 | break; |
2193 | if (time_after(jiffies, deadline)) | ||
2194 | break; | ||
2068 | } | 2195 | } |
2069 | 2196 | ||
2070 | tf.lbah = readb(ap->ioaddr.lbah_addr); | 2197 | /* FIXME: if we passed the deadline, the following |
2071 | tf.lbam = readb(ap->ioaddr.lbam_addr); | 2198 | * code probably produces an invalid result |
2072 | tf.lbal = readb(ap->ioaddr.lbal_addr); | 2199 | */ |
2073 | tf.nsect = readb(ap->ioaddr.nsect_addr); | ||
2074 | 2200 | ||
2075 | dev->class = ata_dev_classify(&tf); | 2201 | /* finally, read device signature from TF registers */ |
2076 | if (!ata_dev_enabled(dev)) { | 2202 | *class = ata_dev_try_classify(ap, 0, NULL); |
2077 | VPRINTK("Port disabled post-sig: No device present.\n"); | ||
2078 | ata_port_disable(ap); | ||
2079 | } | ||
2080 | 2203 | ||
2081 | writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); | 2204 | writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); |
2082 | 2205 | ||
2083 | pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; | 2206 | WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN); |
2084 | 2207 | ||
2085 | VPRINTK("EXIT\n"); | 2208 | VPRINTK("EXIT\n"); |
2086 | } | 2209 | } |
2087 | 2210 | ||
2088 | static void mv_phy_reset(struct ata_port *ap) | 2211 | static int mv_prereset(struct ata_port *ap, unsigned long deadline) |
2089 | { | 2212 | { |
2090 | __mv_phy_reset(ap, 1); | 2213 | struct mv_port_priv *pp = ap->private_data; |
2214 | struct ata_eh_context *ehc = &ap->eh_context; | ||
2215 | int rc; | ||
2216 | |||
2217 | rc = mv_stop_dma(ap); | ||
2218 | if (rc) | ||
2219 | ehc->i.action |= ATA_EH_HARDRESET; | ||
2220 | |||
2221 | if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) { | ||
2222 | pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET; | ||
2223 | ehc->i.action |= ATA_EH_HARDRESET; | ||
2224 | } | ||
2225 | |||
2226 | /* if we're about to do hardreset, nothing more to do */ | ||
2227 | if (ehc->i.action & ATA_EH_HARDRESET) | ||
2228 | return 0; | ||
2229 | |||
2230 | if (ata_port_online(ap)) | ||
2231 | rc = ata_wait_ready(ap, deadline); | ||
2232 | else | ||
2233 | rc = -ENODEV; | ||
2234 | |||
2235 | return rc; | ||
2091 | } | 2236 | } |
2092 | 2237 | ||
2093 | /** | 2238 | static int mv_hardreset(struct ata_port *ap, unsigned int *class, |
2094 | * mv_eng_timeout - Routine called by libata when SCSI times out I/O | 2239 | unsigned long deadline) |
2095 | * @ap: ATA channel to manipulate | ||
2096 | * | ||
2097 | * Intent is to clear all pending error conditions, reset the | ||
2098 | * chip/bus, fail the command, and move on. | ||
2099 | * | ||
2100 | * LOCKING: | ||
2101 | * This routine holds the host lock while failing the command. | ||
2102 | */ | ||
2103 | static void mv_eng_timeout(struct ata_port *ap) | ||
2104 | { | 2240 | { |
2241 | struct mv_host_priv *hpriv = ap->host->private_data; | ||
2105 | void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR]; | 2242 | void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR]; |
2106 | struct ata_queued_cmd *qc; | ||
2107 | unsigned long flags; | ||
2108 | 2243 | ||
2109 | ata_port_printk(ap, KERN_ERR, "Entering mv_eng_timeout\n"); | 2244 | mv_stop_dma(ap); |
2110 | DPRINTK("All regs @ start of eng_timeout\n"); | ||
2111 | mv_dump_all_regs(mmio, ap->port_no, to_pci_dev(ap->host->dev)); | ||
2112 | 2245 | ||
2113 | qc = ata_qc_from_tag(ap, ap->active_tag); | 2246 | mv_channel_reset(hpriv, mmio, ap->port_no); |
2114 | printk(KERN_ERR "mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n", | ||
2115 | mmio, ap, qc, qc->scsicmd, &qc->scsicmd->cmnd); | ||
2116 | 2247 | ||
2117 | spin_lock_irqsave(&ap->host->lock, flags); | 2248 | mv_phy_reset(ap, class, deadline); |
2118 | mv_err_intr(ap, 0); | 2249 | |
2119 | mv_stop_and_reset(ap); | 2250 | return 0; |
2120 | spin_unlock_irqrestore(&ap->host->lock, flags); | 2251 | } |
2252 | |||
2253 | static void mv_postreset(struct ata_port *ap, unsigned int *classes) | ||
2254 | { | ||
2255 | u32 serr; | ||
2256 | |||
2257 | /* print link status */ | ||
2258 | sata_print_link_status(ap); | ||
2259 | |||
2260 | /* clear SError */ | ||
2261 | sata_scr_read(ap, SCR_ERROR, &serr); | ||
2262 | sata_scr_write_flush(ap, SCR_ERROR, serr); | ||
2263 | |||
2264 | /* bail out if no device is present */ | ||
2265 | if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) { | ||
2266 | DPRINTK("EXIT, no device\n"); | ||
2267 | return; | ||
2268 | } | ||
2269 | |||
2270 | /* set up device control */ | ||
2271 | iowrite8(ap->ctl, ap->ioaddr.ctl_addr); | ||
2272 | } | ||
2273 | |||
2274 | static void mv_error_handler(struct ata_port *ap) | ||
2275 | { | ||
2276 | ata_do_eh(ap, mv_prereset, ata_std_softreset, | ||
2277 | mv_hardreset, mv_postreset); | ||
2278 | } | ||
2279 | |||
2280 | static void mv_post_int_cmd(struct ata_queued_cmd *qc) | ||
2281 | { | ||
2282 | mv_stop_dma(qc->ap); | ||
2283 | } | ||
2284 | |||
2285 | static void mv_eh_freeze(struct ata_port *ap) | ||
2286 | { | ||
2287 | void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR]; | ||
2288 | unsigned int hc = (ap->port_no > 3) ? 1 : 0; | ||
2289 | u32 tmp, mask; | ||
2290 | unsigned int shift; | ||
2291 | |||
2292 | /* FIXME: handle coalescing completion events properly */ | ||
2293 | |||
2294 | shift = ap->port_no * 2; | ||
2295 | if (hc > 0) | ||
2296 | shift++; | ||
2121 | 2297 | ||
2122 | WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE)); | 2298 | mask = 0x3 << shift; |
2123 | if (qc->flags & ATA_QCFLAG_ACTIVE) { | 2299 | |
2124 | qc->err_mask |= AC_ERR_TIMEOUT; | 2300 | /* disable assertion of portN err, done events */ |
2125 | ata_eh_qc_complete(qc); | 2301 | tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS); |
2302 | writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS); | ||
2303 | } | ||
2304 | |||
2305 | static void mv_eh_thaw(struct ata_port *ap) | ||
2306 | { | ||
2307 | void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR]; | ||
2308 | unsigned int hc = (ap->port_no > 3) ? 1 : 0; | ||
2309 | void __iomem *hc_mmio = mv_hc_base(mmio, hc); | ||
2310 | void __iomem *port_mmio = mv_ap_base(ap); | ||
2311 | u32 tmp, mask, hc_irq_cause; | ||
2312 | unsigned int shift, hc_port_no = ap->port_no; | ||
2313 | |||
2314 | /* FIXME: handle coalescing completion events properly */ | ||
2315 | |||
2316 | shift = ap->port_no * 2; | ||
2317 | if (hc > 0) { | ||
2318 | shift++; | ||
2319 | hc_port_no -= 4; | ||
2126 | } | 2320 | } |
2321 | |||
2322 | mask = 0x3 << shift; | ||
2323 | |||
2324 | /* clear EDMA errors on this port */ | ||
2325 | writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); | ||
2326 | |||
2327 | /* clear pending irq events */ | ||
2328 | hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS); | ||
2329 | hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */ | ||
2330 | hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */ | ||
2331 | writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS); | ||
2332 | |||
2333 | /* enable assertion of portN err, done events */ | ||
2334 | tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS); | ||
2335 | writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS); | ||
2127 | } | 2336 | } |
2128 | 2337 | ||
2129 | /** | 2338 | /** |