diff options
| author | Mark Lord <liml@rtr.ca> | 2008-05-02 02:15:37 -0400 |
|---|---|---|
| committer | Jeff Garzik <jgarzik@redhat.com> | 2008-05-06 11:38:24 -0400 |
| commit | 29d187bb1e30682e228ce461c487d78d945c3e4f (patch) | |
| tree | bd43da31c7a0853438f0593f553b0dd019f0bd33 | |
| parent | 10acf3b0d3b46c6ef5d6f0722f72ad9b743ea848 (diff) | |
sata_mv delayed eh handling
Introduce a new "delayed error handling" mechanism in sata_mv,
to enable us to eventually deal with multiple simultaneous NCQ
failures on a single host link when a PM is present.
This involves a port flag (MV_PP_FLAG_DELAYED_EH) to prevent new
commands being queued, and a pmp bitmap to indicate which pmp links
had NCQ errors.
The new mv_pmp_error_handler() uses those values to invoke
ata_eh_analyze_ncq_error() on each failed link, prior to freezing
the port and passing control to sata_pmp_error_handler().
This is based upon a strategy suggested by Tejun.
For now, we just implement the delayed mechanism.
The next patch in this series will add the multiple-NCQ EH code
to take advantage of it.
Signed-off-by: Mark Lord <mlord@pobox.com>
Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
| -rw-r--r-- | drivers/ata/sata_mv.c | 38 |
1 files changed, 37 insertions, 1 deletions
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index 803578ef22f8..1991eb22e388 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c | |||
| @@ -367,6 +367,7 @@ enum { | |||
| 367 | MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */ | 367 | MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */ |
| 368 | MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */ | 368 | MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */ |
| 369 | MV_PP_FLAG_FBS_EN = (1 << 2), /* is EDMA set up for FBS? */ | 369 | MV_PP_FLAG_FBS_EN = (1 << 2), /* is EDMA set up for FBS? */ |
| 370 | MV_PP_FLAG_DELAYED_EH = (1 << 3), /* delayed dev err handling */ | ||
| 370 | }; | 371 | }; |
| 371 | 372 | ||
| 372 | #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I) | 373 | #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I) |
| @@ -447,6 +448,7 @@ struct mv_port_priv { | |||
| 447 | unsigned int resp_idx; | 448 | unsigned int resp_idx; |
| 448 | 449 | ||
| 449 | u32 pp_flags; | 450 | u32 pp_flags; |
| 451 | unsigned int delayed_eh_pmp_map; | ||
| 450 | }; | 452 | }; |
| 451 | 453 | ||
| 452 | struct mv_port_signal { | 454 | struct mv_port_signal { |
| @@ -542,6 +544,7 @@ static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class, | |||
| 542 | unsigned long deadline); | 544 | unsigned long deadline); |
| 543 | static int mv_softreset(struct ata_link *link, unsigned int *class, | 545 | static int mv_softreset(struct ata_link *link, unsigned int *class, |
| 544 | unsigned long deadline); | 546 | unsigned long deadline); |
| 547 | static void mv_pmp_error_handler(struct ata_port *ap); | ||
| 545 | 548 | ||
| 546 | /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below | 549 | /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below |
| 547 | * because we have to allow room for worst case splitting of | 550 | * because we have to allow room for worst case splitting of |
| @@ -589,7 +592,7 @@ static struct ata_port_operations mv6_ops = { | |||
| 589 | .pmp_hardreset = mv_pmp_hardreset, | 592 | .pmp_hardreset = mv_pmp_hardreset, |
| 590 | .pmp_softreset = mv_softreset, | 593 | .pmp_softreset = mv_softreset, |
| 591 | .softreset = mv_softreset, | 594 | .softreset = mv_softreset, |
| 592 | .error_handler = sata_pmp_error_handler, | 595 | .error_handler = mv_pmp_error_handler, |
| 593 | }; | 596 | }; |
| 594 | 597 | ||
| 595 | static struct ata_port_operations mv_iie_ops = { | 598 | static struct ata_port_operations mv_iie_ops = { |
| @@ -1098,6 +1101,12 @@ static int mv_qc_defer(struct ata_queued_cmd *qc) | |||
| 1098 | struct mv_port_priv *pp = ap->private_data; | 1101 | struct mv_port_priv *pp = ap->private_data; |
| 1099 | 1102 | ||
| 1100 | /* | 1103 | /* |
| 1104 | * Don't allow new commands if we're in a delayed EH state | ||
| 1105 | * for NCQ and/or FIS-based switching. | ||
| 1106 | */ | ||
| 1107 | if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) | ||
| 1108 | return ATA_DEFER_PORT; | ||
| 1109 | /* | ||
| 1101 | * If the port is completely idle, then allow the new qc. | 1110 | * If the port is completely idle, then allow the new qc. |
| 1102 | */ | 1111 | */ |
| 1103 | if (ap->nr_active_links == 0) | 1112 | if (ap->nr_active_links == 0) |
| @@ -1591,6 +1600,33 @@ static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap) | |||
| 1591 | return qc; | 1600 | return qc; |
| 1592 | } | 1601 | } |
| 1593 | 1602 | ||
| 1603 | static void mv_pmp_error_handler(struct ata_port *ap) | ||
| 1604 | { | ||
| 1605 | unsigned int pmp, pmp_map; | ||
| 1606 | struct mv_port_priv *pp = ap->private_data; | ||
| 1607 | |||
| 1608 | if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) { | ||
| 1609 | /* | ||
| 1610 | * Perform NCQ error analysis on failed PMPs | ||
| 1611 | * before we freeze the port entirely. | ||
| 1612 | * | ||
| 1613 | * The failed PMPs are marked earlier by mv_pmp_eh_prep(). | ||
| 1614 | */ | ||
| 1615 | pmp_map = pp->delayed_eh_pmp_map; | ||
| 1616 | pp->pp_flags &= ~MV_PP_FLAG_DELAYED_EH; | ||
| 1617 | for (pmp = 0; pmp_map != 0; pmp++) { | ||
| 1618 | unsigned int this_pmp = (1 << pmp); | ||
| 1619 | if (pmp_map & this_pmp) { | ||
| 1620 | struct ata_link *link = &ap->pmp_link[pmp]; | ||
| 1621 | pmp_map &= ~this_pmp; | ||
| 1622 | ata_eh_analyze_ncq_error(link); | ||
| 1623 | } | ||
| 1624 | } | ||
| 1625 | ata_port_freeze(ap); | ||
| 1626 | } | ||
| 1627 | sata_pmp_error_handler(ap); | ||
| 1628 | } | ||
| 1629 | |||
| 1594 | static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled) | 1630 | static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled) |
| 1595 | { | 1631 | { |
| 1596 | struct ata_eh_info *ehi = &ap->link.eh_info; | 1632 | struct ata_eh_info *ehi = &ap->link.eh_info; |
