aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ata/sata_mv.c
diff options
context:
space:
mode:
authorMark Lord <liml@rtr.ca>2008-01-26 18:33:18 -0500
committerJeff Garzik <jeff@garzik.org>2008-02-01 11:29:49 -0500
commit138bfdd03f2c08cc62b6af3900fb7be1c696315b (patch)
treefe90bcc4662b93011a733869fa9fac8e87da69c3 /drivers/ata/sata_mv.c
parenteb73d558d1c1c931de0b3a86af962c77d74ef688 (diff)
sata_mv ncq Enable NCQ operation
Final changes to actually turn on NCQ in the driver for GEN_II/IIE hardware. Signed-off-by: Mark Lord <mlord@pobox.com> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/ata/sata_mv.c')
-rw-r--r--drivers/ata/sata_mv.c33
1 files changed, 17 insertions, 16 deletions
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index ea7af1f16844..817595cfc2f7 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -510,7 +510,8 @@ static struct scsi_host_template mv6_sht = {
510 .name = DRV_NAME, 510 .name = DRV_NAME,
511 .ioctl = ata_scsi_ioctl, 511 .ioctl = ata_scsi_ioctl,
512 .queuecommand = ata_scsi_queuecmd, 512 .queuecommand = ata_scsi_queuecmd,
513 .can_queue = ATA_DEF_QUEUE, 513 .change_queue_depth = ata_scsi_change_queue_depth,
514 .can_queue = MV_MAX_Q_DEPTH - 1,
514 .this_id = ATA_SHT_THIS_ID, 515 .this_id = ATA_SHT_THIS_ID,
515 .sg_tablesize = MV_MAX_SG_CT / 2, 516 .sg_tablesize = MV_MAX_SG_CT / 2,
516 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 517 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
@@ -572,6 +573,7 @@ static const struct ata_port_operations mv6_ops = {
572 .post_internal_cmd = mv_post_int_cmd, 573 .post_internal_cmd = mv_post_int_cmd,
573 .freeze = mv_eh_freeze, 574 .freeze = mv_eh_freeze,
574 .thaw = mv_eh_thaw, 575 .thaw = mv_eh_thaw,
576 .qc_defer = ata_std_qc_defer,
575 577
576 .scr_read = mv_scr_read, 578 .scr_read = mv_scr_read,
577 .scr_write = mv_scr_write, 579 .scr_write = mv_scr_write,
@@ -600,6 +602,7 @@ static const struct ata_port_operations mv_iie_ops = {
600 .post_internal_cmd = mv_post_int_cmd, 602 .post_internal_cmd = mv_post_int_cmd,
601 .freeze = mv_eh_freeze, 603 .freeze = mv_eh_freeze,
602 .thaw = mv_eh_thaw, 604 .thaw = mv_eh_thaw,
605 .qc_defer = ata_std_qc_defer,
603 606
604 .scr_read = mv_scr_read, 607 .scr_read = mv_scr_read,
605 .scr_write = mv_scr_write, 608 .scr_write = mv_scr_write,
@@ -628,26 +631,29 @@ static const struct ata_port_info mv_port_info[] = {
628 .port_ops = &mv5_ops, 631 .port_ops = &mv5_ops,
629 }, 632 },
630 { /* chip_604x */ 633 { /* chip_604x */
631 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS, 634 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
635 ATA_FLAG_NCQ,
632 .pio_mask = 0x1f, /* pio0-4 */ 636 .pio_mask = 0x1f, /* pio0-4 */
633 .udma_mask = ATA_UDMA6, 637 .udma_mask = ATA_UDMA6,
634 .port_ops = &mv6_ops, 638 .port_ops = &mv6_ops,
635 }, 639 },
636 { /* chip_608x */ 640 { /* chip_608x */
637 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | 641 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
638 MV_FLAG_DUAL_HC, 642 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
639 .pio_mask = 0x1f, /* pio0-4 */ 643 .pio_mask = 0x1f, /* pio0-4 */
640 .udma_mask = ATA_UDMA6, 644 .udma_mask = ATA_UDMA6,
641 .port_ops = &mv6_ops, 645 .port_ops = &mv6_ops,
642 }, 646 },
643 { /* chip_6042 */ 647 { /* chip_6042 */
644 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS, 648 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
649 ATA_FLAG_NCQ,
645 .pio_mask = 0x1f, /* pio0-4 */ 650 .pio_mask = 0x1f, /* pio0-4 */
646 .udma_mask = ATA_UDMA6, 651 .udma_mask = ATA_UDMA6,
647 .port_ops = &mv_iie_ops, 652 .port_ops = &mv_iie_ops,
648 }, 653 },
649 { /* chip_7042 */ 654 { /* chip_7042 */
650 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS, 655 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
656 ATA_FLAG_NCQ,
651 .pio_mask = 0x1f, /* pio0-4 */ 657 .pio_mask = 0x1f, /* pio0-4 */
652 .udma_mask = ATA_UDMA6, 658 .udma_mask = ATA_UDMA6,
653 .port_ops = &mv_iie_ops, 659 .port_ops = &mv_iie_ops,
@@ -1295,7 +1301,8 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
1295 u16 flags = 0; 1301 u16 flags = 0;
1296 unsigned in_index; 1302 unsigned in_index;
1297 1303
1298 if (qc->tf.protocol != ATA_PROT_DMA) 1304 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1305 (qc->tf.protocol != ATA_PROT_NCQ))
1299 return; 1306 return;
1300 1307
1301 /* Fill in command request block 1308 /* Fill in command request block
@@ -1331,13 +1338,11 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
1331 case ATA_CMD_WRITE_FUA_EXT: 1338 case ATA_CMD_WRITE_FUA_EXT:
1332 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0); 1339 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1333 break; 1340 break;
1334#ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1335 case ATA_CMD_FPDMA_READ: 1341 case ATA_CMD_FPDMA_READ:
1336 case ATA_CMD_FPDMA_WRITE: 1342 case ATA_CMD_FPDMA_WRITE:
1337 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0); 1343 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1338 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0); 1344 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1339 break; 1345 break;
1340#endif /* FIXME: remove this line when NCQ added */
1341 default: 1346 default:
1342 /* The only other commands EDMA supports in non-queued and 1347 /* The only other commands EDMA supports in non-queued and
1343 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none 1348 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
@@ -1386,7 +1391,8 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1386 unsigned in_index; 1391 unsigned in_index;
1387 u32 flags = 0; 1392 u32 flags = 0;
1388 1393
1389 if (qc->tf.protocol != ATA_PROT_DMA) 1394 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1395 (qc->tf.protocol != ATA_PROT_NCQ))
1390 return; 1396 return;
1391 1397
1392 /* Fill in Gen IIE command request block 1398 /* Fill in Gen IIE command request block
@@ -1452,7 +1458,8 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1452 struct mv_port_priv *pp = ap->private_data; 1458 struct mv_port_priv *pp = ap->private_data;
1453 u32 in_index; 1459 u32 in_index;
1454 1460
1455 if (qc->tf.protocol != ATA_PROT_DMA) { 1461 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1462 (qc->tf.protocol != ATA_PROT_NCQ)) {
1456 /* We're about to send a non-EDMA capable command to the 1463 /* We're about to send a non-EDMA capable command to the
1457 * port. Turn off EDMA so there won't be problems accessing 1464 * port. Turn off EDMA so there won't be problems accessing
1458 * shadow block, etc registers. 1465 * shadow block, etc registers.
@@ -1463,12 +1470,6 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1463 1470
1464 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol); 1471 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
1465 1472
1466 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1467
1468 /* until we do queuing, the queue should be empty at this point */
1469 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1470 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1471
1472 pp->req_idx++; 1473 pp->req_idx++;
1473 1474
1474 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT; 1475 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;