aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorMark Lord <liml@rtr.ca>2008-05-02 02:10:02 -0400
committerJeff Garzik <jgarzik@redhat.com>2008-05-06 11:37:39 -0400
commit3e4a139107e497a741c26f8a377a10f214d63ec1 (patch)
tree9ef418131b7ce4243413f2229d566b49e0eff832 /drivers
parent9b2c4e0bae854fb5e88c9cacc0dacf21631c5cb0 (diff)
sata_mv new mv_qc_defer method
The EDMA engine cannot tolerate a mix of NCQ/non-NCQ commands, and cannot be used for PIO at all. So we need to prevent libata from trying to feed us such mixtures. Introduce mv_qc_defer() for this purpose, and use it for all chip versions. Signed-off-by: Mark Lord <mlord@pobox.com> Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/ata/sata_mv.c43
1 files changed, 41 insertions, 2 deletions
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 692996216b1a..0545a4916100 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -492,6 +492,7 @@ static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
492static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); 492static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
493static int mv_port_start(struct ata_port *ap); 493static int mv_port_start(struct ata_port *ap);
494static void mv_port_stop(struct ata_port *ap); 494static void mv_port_stop(struct ata_port *ap);
495static int mv_qc_defer(struct ata_queued_cmd *qc);
495static void mv_qc_prep(struct ata_queued_cmd *qc); 496static void mv_qc_prep(struct ata_queued_cmd *qc);
496static void mv_qc_prep_iie(struct ata_queued_cmd *qc); 497static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
497static unsigned int mv_qc_issue(struct ata_queued_cmd *qc); 498static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
@@ -561,6 +562,7 @@ static struct scsi_host_template mv6_sht = {
561static struct ata_port_operations mv5_ops = { 562static struct ata_port_operations mv5_ops = {
562 .inherits = &ata_sff_port_ops, 563 .inherits = &ata_sff_port_ops,
563 564
565 .qc_defer = mv_qc_defer,
564 .qc_prep = mv_qc_prep, 566 .qc_prep = mv_qc_prep,
565 .qc_issue = mv_qc_issue, 567 .qc_issue = mv_qc_issue,
566 568
@@ -579,7 +581,6 @@ static struct ata_port_operations mv5_ops = {
579 581
580static struct ata_port_operations mv6_ops = { 582static struct ata_port_operations mv6_ops = {
581 .inherits = &mv5_ops, 583 .inherits = &mv5_ops,
582 .qc_defer = sata_pmp_qc_defer_cmd_switch,
583 .dev_config = mv6_dev_config, 584 .dev_config = mv6_dev_config,
584 .scr_read = mv_scr_read, 585 .scr_read = mv_scr_read,
585 .scr_write = mv_scr_write, 586 .scr_write = mv_scr_write,
@@ -592,7 +593,6 @@ static struct ata_port_operations mv6_ops = {
592 593
593static struct ata_port_operations mv_iie_ops = { 594static struct ata_port_operations mv_iie_ops = {
594 .inherits = &mv6_ops, 595 .inherits = &mv6_ops,
595 .qc_defer = ata_std_qc_defer, /* FIS-based switching */
596 .dev_config = ATA_OP_NULL, 596 .dev_config = ATA_OP_NULL,
597 .qc_prep = mv_qc_prep_iie, 597 .qc_prep = mv_qc_prep_iie,
598}; 598};
@@ -1090,6 +1090,45 @@ static void mv6_dev_config(struct ata_device *adev)
1090 } 1090 }
1091} 1091}
1092 1092
1093static int mv_qc_defer(struct ata_queued_cmd *qc)
1094{
1095 struct ata_link *link = qc->dev->link;
1096 struct ata_port *ap = link->ap;
1097 struct mv_port_priv *pp = ap->private_data;
1098
1099 /*
1100 * If the port is completely idle, then allow the new qc.
1101 */
1102 if (ap->nr_active_links == 0)
1103 return 0;
1104
1105 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1106 /*
1107 * The port is operating in host queuing mode (EDMA).
1108 * It can accomodate a new qc if the qc protocol
1109 * is compatible with the current host queue mode.
1110 */
1111 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) {
1112 /*
1113 * The host queue (EDMA) is in NCQ mode.
1114 * If the new qc is also an NCQ command,
1115 * then allow the new qc.
1116 */
1117 if (qc->tf.protocol == ATA_PROT_NCQ)
1118 return 0;
1119 } else {
1120 /*
1121 * The host queue (EDMA) is in non-NCQ, DMA mode.
1122 * If the new qc is also a non-NCQ, DMA command,
1123 * then allow the new qc.
1124 */
1125 if (qc->tf.protocol == ATA_PROT_DMA)
1126 return 0;
1127 }
1128 }
1129 return ATA_DEFER_PORT;
1130}
1131
1093static void mv_config_fbs(void __iomem *port_mmio, int enable_fbs) 1132static void mv_config_fbs(void __iomem *port_mmio, int enable_fbs)
1094{ 1133{
1095 u32 old_fiscfg, new_fiscfg, old_ltmode, new_ltmode; 1134 u32 old_fiscfg, new_fiscfg, old_ltmode, new_ltmode;