aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ata/sata_mv.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ata/sata_mv.c')
-rw-r--r--drivers/ata/sata_mv.c162
1 files changed, 105 insertions, 57 deletions
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 3873b29c80d6..8c554f2e69b0 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -108,8 +108,6 @@ enum {
108 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */ 108 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
109 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ, 109 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
110 110
111 MV_USE_Q_DEPTH = ATA_DEF_QUEUE,
112
113 MV_MAX_Q_DEPTH = 32, 111 MV_MAX_Q_DEPTH = 32,
114 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1, 112 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
115 113
@@ -133,18 +131,22 @@ enum {
133 /* Host Flags */ 131 /* Host Flags */
134 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */ 132 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
135 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ 133 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
136 MV_COMMON_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 134 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
137 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | 135 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
138 ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING), 136 ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING,
139 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE, 137 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
140 138
141 CRQB_FLAG_READ = (1 << 0), 139 CRQB_FLAG_READ = (1 << 0),
142 CRQB_TAG_SHIFT = 1, 140 CRQB_TAG_SHIFT = 1,
141 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
142 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
143 CRQB_CMD_ADDR_SHIFT = 8, 143 CRQB_CMD_ADDR_SHIFT = 8,
144 CRQB_CMD_CS = (0x2 << 11), 144 CRQB_CMD_CS = (0x2 << 11),
145 CRQB_CMD_LAST = (1 << 15), 145 CRQB_CMD_LAST = (1 << 15),
146 146
147 CRPB_FLAG_STATUS_SHIFT = 8, 147 CRPB_FLAG_STATUS_SHIFT = 8,
148 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
149 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
148 150
149 EPRD_FLAG_END_OF_TBL = (1 << 31), 151 EPRD_FLAG_END_OF_TBL = (1 << 31),
150 152
@@ -236,8 +238,10 @@ enum {
236 EDMA_ERR_DEV_DCON = (1 << 3), 238 EDMA_ERR_DEV_DCON = (1 << 3),
237 EDMA_ERR_DEV_CON = (1 << 4), 239 EDMA_ERR_DEV_CON = (1 << 4),
238 EDMA_ERR_SERR = (1 << 5), 240 EDMA_ERR_SERR = (1 << 5),
239 EDMA_ERR_SELF_DIS = (1 << 7), 241 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
242 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
240 EDMA_ERR_BIST_ASYNC = (1 << 8), 243 EDMA_ERR_BIST_ASYNC = (1 << 8),
244 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
241 EDMA_ERR_CRBQ_PAR = (1 << 9), 245 EDMA_ERR_CRBQ_PAR = (1 << 9),
242 EDMA_ERR_CRPB_PAR = (1 << 10), 246 EDMA_ERR_CRPB_PAR = (1 << 10),
243 EDMA_ERR_INTRL_PAR = (1 << 11), 247 EDMA_ERR_INTRL_PAR = (1 << 11),
@@ -248,6 +252,8 @@ enum {
248 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), 252 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21),
249 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), 253 EDMA_ERR_LNK_DATA_TX = (0x1f << 26),
250 EDMA_ERR_TRANS_PROTO = (1 << 31), 254 EDMA_ERR_TRANS_PROTO = (1 << 31),
255 EDMA_ERR_OVERRUN_5 = (1 << 5),
256 EDMA_ERR_UNDERRUN_5 = (1 << 6),
251 EDMA_ERR_FATAL = (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR | 257 EDMA_ERR_FATAL = (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
252 EDMA_ERR_DEV_DCON | EDMA_ERR_CRBQ_PAR | 258 EDMA_ERR_DEV_DCON | EDMA_ERR_CRBQ_PAR |
253 EDMA_ERR_CRPB_PAR | EDMA_ERR_INTRL_PAR | 259 EDMA_ERR_CRPB_PAR | EDMA_ERR_INTRL_PAR |
@@ -288,6 +294,7 @@ enum {
288 /* Port private flags (pp_flags) */ 294 /* Port private flags (pp_flags) */
289 MV_PP_FLAG_EDMA_EN = (1 << 0), 295 MV_PP_FLAG_EDMA_EN = (1 << 0),
290 MV_PP_FLAG_EDMA_DS_ACT = (1 << 1), 296 MV_PP_FLAG_EDMA_DS_ACT = (1 << 1),
297 MV_PP_FLAG_HAD_A_RESET = (1 << 2),
291}; 298};
292 299
293#define IS_50XX(hpriv) ((hpriv)->hp_flags & MV_HP_50XX) 300#define IS_50XX(hpriv) ((hpriv)->hp_flags & MV_HP_50XX)
@@ -417,12 +424,30 @@ static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
417 unsigned int port_no); 424 unsigned int port_no);
418static void mv_stop_and_reset(struct ata_port *ap); 425static void mv_stop_and_reset(struct ata_port *ap);
419 426
420static struct scsi_host_template mv_sht = { 427static struct scsi_host_template mv5_sht = {
428 .module = THIS_MODULE,
429 .name = DRV_NAME,
430 .ioctl = ata_scsi_ioctl,
431 .queuecommand = ata_scsi_queuecmd,
432 .can_queue = ATA_DEF_QUEUE,
433 .this_id = ATA_SHT_THIS_ID,
434 .sg_tablesize = MV_MAX_SG_CT,
435 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
436 .emulated = ATA_SHT_EMULATED,
437 .use_clustering = 1,
438 .proc_name = DRV_NAME,
439 .dma_boundary = MV_DMA_BOUNDARY,
440 .slave_configure = ata_scsi_slave_config,
441 .slave_destroy = ata_scsi_slave_destroy,
442 .bios_param = ata_std_bios_param,
443};
444
445static struct scsi_host_template mv6_sht = {
421 .module = THIS_MODULE, 446 .module = THIS_MODULE,
422 .name = DRV_NAME, 447 .name = DRV_NAME,
423 .ioctl = ata_scsi_ioctl, 448 .ioctl = ata_scsi_ioctl,
424 .queuecommand = ata_scsi_queuecmd, 449 .queuecommand = ata_scsi_queuecmd,
425 .can_queue = MV_USE_Q_DEPTH, 450 .can_queue = ATA_DEF_QUEUE,
426 .this_id = ATA_SHT_THIS_ID, 451 .this_id = ATA_SHT_THIS_ID,
427 .sg_tablesize = MV_MAX_SG_CT, 452 .sg_tablesize = MV_MAX_SG_CT,
428 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 453 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
@@ -530,38 +555,38 @@ static const struct ata_port_info mv_port_info[] = {
530 .port_ops = &mv5_ops, 555 .port_ops = &mv5_ops,
531 }, 556 },
532 { /* chip_508x */ 557 { /* chip_508x */
533 .flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC), 558 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
534 .pio_mask = 0x1f, /* pio0-4 */ 559 .pio_mask = 0x1f, /* pio0-4 */
535 .udma_mask = ATA_UDMA6, 560 .udma_mask = ATA_UDMA6,
536 .port_ops = &mv5_ops, 561 .port_ops = &mv5_ops,
537 }, 562 },
538 { /* chip_5080 */ 563 { /* chip_5080 */
539 .flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC), 564 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
540 .pio_mask = 0x1f, /* pio0-4 */ 565 .pio_mask = 0x1f, /* pio0-4 */
541 .udma_mask = ATA_UDMA6, 566 .udma_mask = ATA_UDMA6,
542 .port_ops = &mv5_ops, 567 .port_ops = &mv5_ops,
543 }, 568 },
544 { /* chip_604x */ 569 { /* chip_604x */
545 .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS), 570 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
546 .pio_mask = 0x1f, /* pio0-4 */ 571 .pio_mask = 0x1f, /* pio0-4 */
547 .udma_mask = ATA_UDMA6, 572 .udma_mask = ATA_UDMA6,
548 .port_ops = &mv6_ops, 573 .port_ops = &mv6_ops,
549 }, 574 },
550 { /* chip_608x */ 575 { /* chip_608x */
551 .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS | 576 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
552 MV_FLAG_DUAL_HC), 577 MV_FLAG_DUAL_HC,
553 .pio_mask = 0x1f, /* pio0-4 */ 578 .pio_mask = 0x1f, /* pio0-4 */
554 .udma_mask = ATA_UDMA6, 579 .udma_mask = ATA_UDMA6,
555 .port_ops = &mv6_ops, 580 .port_ops = &mv6_ops,
556 }, 581 },
557 { /* chip_6042 */ 582 { /* chip_6042 */
558 .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS), 583 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
559 .pio_mask = 0x1f, /* pio0-4 */ 584 .pio_mask = 0x1f, /* pio0-4 */
560 .udma_mask = ATA_UDMA6, 585 .udma_mask = ATA_UDMA6,
561 .port_ops = &mv_iie_ops, 586 .port_ops = &mv_iie_ops,
562 }, 587 },
563 { /* chip_7042 */ 588 { /* chip_7042 */
564 .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS), 589 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
565 .pio_mask = 0x1f, /* pio0-4 */ 590 .pio_mask = 0x1f, /* pio0-4 */
566 .udma_mask = ATA_UDMA6, 591 .udma_mask = ATA_UDMA6,
567 .port_ops = &mv_iie_ops, 592 .port_ops = &mv_iie_ops,
@@ -709,6 +734,41 @@ static void mv_irq_clear(struct ata_port *ap)
709{ 734{
710} 735}
711 736
737static void mv_set_edma_ptrs(void __iomem *port_mmio,
738 struct mv_host_priv *hpriv,
739 struct mv_port_priv *pp)
740{
741 /*
742 * initialize request queue
743 */
744 WARN_ON(pp->crqb_dma & 0x3ff);
745 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
746 writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK,
747 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
748
749 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
750 writelfl(pp->crqb_dma & 0xffffffff,
751 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
752 else
753 writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
754
755 /*
756 * initialize response queue
757 */
758 WARN_ON(pp->crpb_dma & 0xff);
759 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
760
761 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
762 writelfl(pp->crpb_dma & 0xffffffff,
763 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
764 else
765 writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
766
767 writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK,
768 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
769
770}
771
712/** 772/**
713 * mv_start_dma - Enable eDMA engine 773 * mv_start_dma - Enable eDMA engine
714 * @base: port base address 774 * @base: port base address
@@ -720,9 +780,10 @@ static void mv_irq_clear(struct ata_port *ap)
720 * LOCKING: 780 * LOCKING:
721 * Inherited from caller. 781 * Inherited from caller.
722 */ 782 */
723static void mv_start_dma(void __iomem *base, struct mv_port_priv *pp) 783static void mv_start_dma(void __iomem *base, struct mv_host_priv *hpriv,
784 struct mv_port_priv *pp)
724{ 785{
725 if (!(MV_PP_FLAG_EDMA_EN & pp->pp_flags)) { 786 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
726 writelfl(EDMA_EN, base + EDMA_CMD_OFS); 787 writelfl(EDMA_EN, base + EDMA_CMD_OFS);
727 pp->pp_flags |= MV_PP_FLAG_EDMA_EN; 788 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
728 } 789 }
@@ -739,12 +800,12 @@ static void mv_start_dma(void __iomem *base, struct mv_port_priv *pp)
739 * LOCKING: 800 * LOCKING:
740 * Inherited from caller. 801 * Inherited from caller.
741 */ 802 */
742static void mv_stop_dma(struct ata_port *ap) 803static int mv_stop_dma(struct ata_port *ap)
743{ 804{
744 void __iomem *port_mmio = mv_ap_base(ap); 805 void __iomem *port_mmio = mv_ap_base(ap);
745 struct mv_port_priv *pp = ap->private_data; 806 struct mv_port_priv *pp = ap->private_data;
746 u32 reg; 807 u32 reg;
747 int i; 808 int i, err = 0;
748 809
749 if (MV_PP_FLAG_EDMA_EN & pp->pp_flags) { 810 if (MV_PP_FLAG_EDMA_EN & pp->pp_flags) {
750 /* Disable EDMA if active. The disable bit auto clears. 811 /* Disable EDMA if active. The disable bit auto clears.
@@ -764,10 +825,13 @@ static void mv_stop_dma(struct ata_port *ap)
764 udelay(100); 825 udelay(100);
765 } 826 }
766 827
767 if (EDMA_EN & reg) { 828 if (reg & EDMA_EN) {
768 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n"); 829 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
769 /* FIXME: Consider doing a reset here to recover */ 830 /* FIXME: Consider doing a reset here to recover */
831 err = -EIO;
770 } 832 }
833
834 return err;
771} 835}
772 836
773#ifdef ATA_DEBUG 837#ifdef ATA_DEBUG
@@ -884,12 +948,13 @@ static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
884 writelfl(val, mv_ap_base(ap) + ofs); 948 writelfl(val, mv_ap_base(ap) + ofs);
885} 949}
886 950
887static void mv_edma_cfg(struct mv_host_priv *hpriv, void __iomem *port_mmio) 951static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv,
952 void __iomem *port_mmio)
888{ 953{
889 u32 cfg = readl(port_mmio + EDMA_CFG_OFS); 954 u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
890 955
891 /* set up non-NCQ EDMA configuration */ 956 /* set up non-NCQ EDMA configuration */
892 cfg &= ~(1 << 9); /* disable equeue */ 957 cfg &= ~(1 << 9); /* disable eQue */
893 958
894 if (IS_GEN_I(hpriv)) { 959 if (IS_GEN_I(hpriv)) {
895 cfg &= ~0x1f; /* clear queue depth */ 960 cfg &= ~0x1f; /* clear queue depth */
@@ -971,28 +1036,9 @@ static int mv_port_start(struct ata_port *ap)
971 pp->sg_tbl = mem; 1036 pp->sg_tbl = mem;
972 pp->sg_tbl_dma = mem_dma; 1037 pp->sg_tbl_dma = mem_dma;
973 1038
974 mv_edma_cfg(hpriv, port_mmio); 1039 mv_edma_cfg(ap, hpriv, port_mmio);
975
976 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
977 writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK,
978 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
979
980 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
981 writelfl(pp->crqb_dma & 0xffffffff,
982 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
983 else
984 writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
985
986 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
987 1040
988 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0) 1041 mv_set_edma_ptrs(port_mmio, hpriv, pp);
989 writelfl(pp->crpb_dma & 0xffffffff,
990 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
991 else
992 writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
993
994 writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK,
995 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
996 1042
997 /* Don't turn on EDMA here...do it before DMA commands only. Else 1043 /* Don't turn on EDMA here...do it before DMA commands only. Else
998 * we'll be unable to send non-data, PIO, etc due to restricted access 1044 * we'll be unable to send non-data, PIO, etc due to restricted access
@@ -1088,7 +1134,7 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
1088 u16 flags = 0; 1134 u16 flags = 0;
1089 unsigned in_index; 1135 unsigned in_index;
1090 1136
1091 if (ATA_PROT_DMA != qc->tf.protocol) 1137 if (qc->tf.protocol != ATA_PROT_DMA)
1092 return; 1138 return;
1093 1139
1094 /* Fill in command request block 1140 /* Fill in command request block
@@ -1180,7 +1226,7 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1180 unsigned in_index; 1226 unsigned in_index;
1181 u32 flags = 0; 1227 u32 flags = 0;
1182 1228
1183 if (ATA_PROT_DMA != qc->tf.protocol) 1229 if (qc->tf.protocol != ATA_PROT_DMA)
1184 return; 1230 return;
1185 1231
1186 /* Fill in Gen IIE command request block 1232 /* Fill in Gen IIE command request block
@@ -1241,17 +1287,19 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1241 */ 1287 */
1242static unsigned int mv_qc_issue(struct ata_queued_cmd *qc) 1288static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1243{ 1289{
1244 void __iomem *port_mmio = mv_ap_base(qc->ap); 1290 struct ata_port *ap = qc->ap;
1245 struct mv_port_priv *pp = qc->ap->private_data; 1291 void __iomem *port_mmio = mv_ap_base(ap);
1292 struct mv_port_priv *pp = ap->private_data;
1293 struct mv_host_priv *hpriv = ap->host->private_data;
1246 unsigned in_index; 1294 unsigned in_index;
1247 u32 in_ptr; 1295 u32 in_ptr;
1248 1296
1249 if (ATA_PROT_DMA != qc->tf.protocol) { 1297 if (qc->tf.protocol != ATA_PROT_DMA) {
1250 /* We're about to send a non-EDMA capable command to the 1298 /* We're about to send a non-EDMA capable command to the
1251 * port. Turn off EDMA so there won't be problems accessing 1299 * port. Turn off EDMA so there won't be problems accessing
1252 * shadow block, etc registers. 1300 * shadow block, etc registers.
1253 */ 1301 */
1254 mv_stop_dma(qc->ap); 1302 mv_stop_dma(ap);
1255 return ata_qc_issue_prot(qc); 1303 return ata_qc_issue_prot(qc);
1256 } 1304 }
1257 1305
@@ -1264,7 +1312,7 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1264 1312
1265 in_index = mv_inc_q_index(in_index); /* now incr producer index */ 1313 in_index = mv_inc_q_index(in_index); /* now incr producer index */
1266 1314
1267 mv_start_dma(port_mmio, pp); 1315 mv_start_dma(port_mmio, hpriv, pp);
1268 1316
1269 /* and write the request in pointer to kick the EDMA to life */ 1317 /* and write the request in pointer to kick the EDMA to life */
1270 in_ptr &= EDMA_REQ_Q_BASE_LO_MASK; 1318 in_ptr &= EDMA_REQ_Q_BASE_LO_MASK;
@@ -1379,7 +1427,8 @@ static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1379 void __iomem *hc_mmio = mv_hc_base(mmio, hc); 1427 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1380 struct ata_queued_cmd *qc; 1428 struct ata_queued_cmd *qc;
1381 u32 hc_irq_cause; 1429 u32 hc_irq_cause;
1382 int shift, port, port0, hard_port, handled; 1430 int port, port0;
1431 int shift, hard_port, handled;
1383 unsigned int err_mask; 1432 unsigned int err_mask;
1384 1433
1385 if (hc == 0) 1434 if (hc == 0)
@@ -1458,10 +1507,9 @@ static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1458} 1507}
1459 1508
1460/** 1509/**
1461 * mv_interrupt - 1510 * mv_interrupt - Main interrupt event handler
1462 * @irq: unused 1511 * @irq: unused
1463 * @dev_instance: private data; in this case the host structure 1512 * @dev_instance: private data; in this case the host structure
1464 * @regs: unused
1465 * 1513 *
1466 * Read the read only register to determine if any host 1514 * Read the read only register to determine if any host
1467 * controllers have pending interrupts. If so, call lower level 1515 * controllers have pending interrupts. If so, call lower level
@@ -1965,7 +2013,7 @@ static void __mv_phy_reset(struct ata_port *ap, int can_sleep)
1965 void __iomem *port_mmio = mv_ap_base(ap); 2013 void __iomem *port_mmio = mv_ap_base(ap);
1966 struct ata_taskfile tf; 2014 struct ata_taskfile tf;
1967 struct ata_device *dev = &ap->device[0]; 2015 struct ata_device *dev = &ap->device[0];
1968 unsigned long timeout; 2016 unsigned long deadline;
1969 int retry = 5; 2017 int retry = 5;
1970 u32 sstatus; 2018 u32 sstatus;
1971 2019
@@ -1983,14 +2031,14 @@ comreset_retry:
1983 sata_scr_write_flush(ap, SCR_CONTROL, 0x300); 2031 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1984 __msleep(20, can_sleep); 2032 __msleep(20, can_sleep);
1985 2033
1986 timeout = jiffies + msecs_to_jiffies(200); 2034 deadline = jiffies + msecs_to_jiffies(200);
1987 do { 2035 do {
1988 sata_scr_read(ap, SCR_STATUS, &sstatus); 2036 sata_scr_read(ap, SCR_STATUS, &sstatus);
1989 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0)) 2037 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
1990 break; 2038 break;
1991 2039
1992 __msleep(1, can_sleep); 2040 __msleep(1, can_sleep);
1993 } while (time_before(jiffies, timeout)); 2041 } while (time_before(jiffies, deadline));
1994 2042
1995 /* work around errata */ 2043 /* work around errata */
1996 if (IS_60XX(hpriv) && 2044 if (IS_60XX(hpriv) &&
@@ -2427,7 +2475,7 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2427 2475
2428 pci_set_master(pdev); 2476 pci_set_master(pdev);
2429 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED, 2477 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
2430 &mv_sht); 2478 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
2431} 2479}
2432 2480
2433static int __init mv_init(void) 2481static int __init mv_init(void)