aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorMark Lord <liml@rtr.ca>2006-05-19 16:36:36 -0400
committerJeff Garzik <jeff@garzik.org>2006-05-20 00:31:45 -0400
commita6432436c5e14b416f27c8f87c5bf0bc36771f49 (patch)
treec87e8148240a4efefeba72a239ee049e6d88bb0c /drivers
parente857f141945f29c16f72ffcfdbce097f8be6c4e9 (diff)
[PATCH] sata_mv: remove local copy of queue indexes
The driver currently keeps local copies of the hardware request/response queue indexes. But it expends significant effort ensuring consistency between the two views, and still gets it wrong after an error or reset occurs. This patch removes the local copies, in favour of just accessing the hardware whenever we need them. Eventually this may need to be tweaked again for NCQ, but for now this works and solves problems some users were seeing. Signed-off-by: Mark Lord <liml@rtr.ca> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/scsi/sata_mv.c76
1 files changed, 33 insertions, 43 deletions
diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c
index 65dc65304f51..dea9d4e42586 100644
--- a/drivers/scsi/sata_mv.c
+++ b/drivers/scsi/sata_mv.c
@@ -308,9 +308,6 @@ struct mv_port_priv {
308 dma_addr_t crpb_dma; 308 dma_addr_t crpb_dma;
309 struct mv_sg *sg_tbl; 309 struct mv_sg *sg_tbl;
310 dma_addr_t sg_tbl_dma; 310 dma_addr_t sg_tbl_dma;
311
312 unsigned req_producer; /* cp of req_in_ptr */
313 unsigned rsp_consumer; /* cp of rsp_out_ptr */
314 u32 pp_flags; 311 u32 pp_flags;
315}; 312};
316 313
@@ -943,8 +940,6 @@ static int mv_port_start(struct ata_port *ap)
943 writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK, 940 writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK,
944 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); 941 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
945 942
946 pp->req_producer = pp->rsp_consumer = 0;
947
948 /* Don't turn on EDMA here...do it before DMA commands only. Else 943 /* Don't turn on EDMA here...do it before DMA commands only. Else
949 * we'll be unable to send non-data, PIO, etc due to restricted access 944 * we'll be unable to send non-data, PIO, etc due to restricted access
950 * to shadow regs. 945 * to shadow regs.
@@ -1028,10 +1023,9 @@ static void mv_fill_sg(struct ata_queued_cmd *qc)
1028 } 1023 }
1029} 1024}
1030 1025
1031static inline unsigned mv_inc_q_index(unsigned *index) 1026static inline unsigned mv_inc_q_index(unsigned index)
1032{ 1027{
1033 *index = (*index + 1) & MV_MAX_Q_DEPTH_MASK; 1028 return (index + 1) & MV_MAX_Q_DEPTH_MASK;
1034 return *index;
1035} 1029}
1036 1030
1037static inline void mv_crqb_pack_cmd(u16 *cmdw, u8 data, u8 addr, unsigned last) 1031static inline void mv_crqb_pack_cmd(u16 *cmdw, u8 data, u8 addr, unsigned last)
@@ -1059,15 +1053,11 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
1059 u16 *cw; 1053 u16 *cw;
1060 struct ata_taskfile *tf; 1054 struct ata_taskfile *tf;
1061 u16 flags = 0; 1055 u16 flags = 0;
1056 unsigned in_index;
1062 1057
1063 if (ATA_PROT_DMA != qc->tf.protocol) 1058 if (ATA_PROT_DMA != qc->tf.protocol)
1064 return; 1059 return;
1065 1060
1066 /* the req producer index should be the same as we remember it */
1067 WARN_ON(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >>
1068 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
1069 pp->req_producer);
1070
1071 /* Fill in command request block 1061 /* Fill in command request block
1072 */ 1062 */
1073 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) 1063 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
@@ -1075,13 +1065,17 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
1075 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); 1065 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1076 flags |= qc->tag << CRQB_TAG_SHIFT; 1066 flags |= qc->tag << CRQB_TAG_SHIFT;
1077 1067
1078 pp->crqb[pp->req_producer].sg_addr = 1068 /* get current queue index from hardware */
1069 in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS)
1070 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1071
1072 pp->crqb[in_index].sg_addr =
1079 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff); 1073 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1080 pp->crqb[pp->req_producer].sg_addr_hi = 1074 pp->crqb[in_index].sg_addr_hi =
1081 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16); 1075 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1082 pp->crqb[pp->req_producer].ctrl_flags = cpu_to_le16(flags); 1076 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1083 1077
1084 cw = &pp->crqb[pp->req_producer].ata_cmd[0]; 1078 cw = &pp->crqb[in_index].ata_cmd[0];
1085 tf = &qc->tf; 1079 tf = &qc->tf;
1086 1080
1087 /* Sadly, the CRQB cannot accomodate all registers--there are 1081 /* Sadly, the CRQB cannot accomodate all registers--there are
@@ -1150,16 +1144,12 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1150 struct mv_port_priv *pp = ap->private_data; 1144 struct mv_port_priv *pp = ap->private_data;
1151 struct mv_crqb_iie *crqb; 1145 struct mv_crqb_iie *crqb;
1152 struct ata_taskfile *tf; 1146 struct ata_taskfile *tf;
1147 unsigned in_index;
1153 u32 flags = 0; 1148 u32 flags = 0;
1154 1149
1155 if (ATA_PROT_DMA != qc->tf.protocol) 1150 if (ATA_PROT_DMA != qc->tf.protocol)
1156 return; 1151 return;
1157 1152
1158 /* the req producer index should be the same as we remember it */
1159 WARN_ON(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >>
1160 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
1161 pp->req_producer);
1162
1163 /* Fill in Gen IIE command request block 1153 /* Fill in Gen IIE command request block
1164 */ 1154 */
1165 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) 1155 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
@@ -1168,7 +1158,11 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1168 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); 1158 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1169 flags |= qc->tag << CRQB_TAG_SHIFT; 1159 flags |= qc->tag << CRQB_TAG_SHIFT;
1170 1160
1171 crqb = (struct mv_crqb_iie *) &pp->crqb[pp->req_producer]; 1161 /* get current queue index from hardware */
1162 in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS)
1163 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1164
1165 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1172 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff); 1166 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1173 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16); 1167 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1174 crqb->flags = cpu_to_le32(flags); 1168 crqb->flags = cpu_to_le32(flags);
@@ -1216,6 +1210,7 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1216{ 1210{
1217 void __iomem *port_mmio = mv_ap_base(qc->ap); 1211 void __iomem *port_mmio = mv_ap_base(qc->ap);
1218 struct mv_port_priv *pp = qc->ap->private_data; 1212 struct mv_port_priv *pp = qc->ap->private_data;
1213 unsigned in_index;
1219 u32 in_ptr; 1214 u32 in_ptr;
1220 1215
1221 if (ATA_PROT_DMA != qc->tf.protocol) { 1216 if (ATA_PROT_DMA != qc->tf.protocol) {
@@ -1227,23 +1222,20 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1227 return ata_qc_issue_prot(qc); 1222 return ata_qc_issue_prot(qc);
1228 } 1223 }
1229 1224
1230 in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS); 1225 in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1226 in_index = (in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1231 1227
1232 /* the req producer index should be the same as we remember it */
1233 WARN_ON(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
1234 pp->req_producer);
1235 /* until we do queuing, the queue should be empty at this point */ 1228 /* until we do queuing, the queue should be empty at this point */
1236 WARN_ON(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) != 1229 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1237 ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) >> 1230 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1238 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1239 1231
1240 mv_inc_q_index(&pp->req_producer); /* now incr producer index */ 1232 in_index = mv_inc_q_index(in_index); /* now incr producer index */
1241 1233
1242 mv_start_dma(port_mmio, pp); 1234 mv_start_dma(port_mmio, pp);
1243 1235
1244 /* and write the request in pointer to kick the EDMA to life */ 1236 /* and write the request in pointer to kick the EDMA to life */
1245 in_ptr &= EDMA_REQ_Q_BASE_LO_MASK; 1237 in_ptr &= EDMA_REQ_Q_BASE_LO_MASK;
1246 in_ptr |= pp->req_producer << EDMA_REQ_Q_PTR_SHIFT; 1238 in_ptr |= in_index << EDMA_REQ_Q_PTR_SHIFT;
1247 writelfl(in_ptr, port_mmio + EDMA_REQ_Q_IN_PTR_OFS); 1239 writelfl(in_ptr, port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1248 1240
1249 return 0; 1241 return 0;
@@ -1266,28 +1258,26 @@ static u8 mv_get_crpb_status(struct ata_port *ap)
1266{ 1258{
1267 void __iomem *port_mmio = mv_ap_base(ap); 1259 void __iomem *port_mmio = mv_ap_base(ap);
1268 struct mv_port_priv *pp = ap->private_data; 1260 struct mv_port_priv *pp = ap->private_data;
1261 unsigned out_index;
1269 u32 out_ptr; 1262 u32 out_ptr;
1270 u8 ata_status; 1263 u8 ata_status;
1271 1264
1272 out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); 1265 out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1273 1266 out_index = (out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1274 /* the response consumer index should be the same as we remember it */
1275 WARN_ON(((out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
1276 pp->rsp_consumer);
1277 1267
1278 ata_status = pp->crpb[pp->rsp_consumer].flags >> CRPB_FLAG_STATUS_SHIFT; 1268 ata_status = le16_to_cpu(pp->crpb[out_index].flags)
1269 >> CRPB_FLAG_STATUS_SHIFT;
1279 1270
1280 /* increment our consumer index... */ 1271 /* increment our consumer index... */
1281 pp->rsp_consumer = mv_inc_q_index(&pp->rsp_consumer); 1272 out_index = mv_inc_q_index(out_index);
1282 1273
1283 /* and, until we do NCQ, there should only be 1 CRPB waiting */ 1274 /* and, until we do NCQ, there should only be 1 CRPB waiting */
1284 WARN_ON(((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) >> 1275 WARN_ON(out_index != ((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1285 EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) != 1276 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1286 pp->rsp_consumer);
1287 1277
1288 /* write out our inc'd consumer index so EDMA knows we're caught up */ 1278 /* write out our inc'd consumer index so EDMA knows we're caught up */
1289 out_ptr &= EDMA_RSP_Q_BASE_LO_MASK; 1279 out_ptr &= EDMA_RSP_Q_BASE_LO_MASK;
1290 out_ptr |= pp->rsp_consumer << EDMA_RSP_Q_PTR_SHIFT; 1280 out_ptr |= out_index << EDMA_RSP_Q_PTR_SHIFT;
1291 writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); 1281 writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1292 1282
1293 /* Return ATA status register for completed CRPB */ 1283 /* Return ATA status register for completed CRPB */