aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorTejun Heo <htejun@gmail.com>2006-02-11 05:11:13 -0500
committerJeff Garzik <jgarzik@pobox.com>2006-02-11 17:51:57 -0500
commitbeec7dbc6ff003bbc94de62b3323519c878fb2ac (patch)
treef1cf8dee4f271356f2025a386eaa746aabafd1fe /drivers
parenta46314744d8fadb91451bf2e5d2fd949c4a752d8 (diff)
[PATCH] libata: convert assert(xxx)'s in low-level drivers to WARN_ON(!xxx)'s
This patch converts all assert(xxx)'s in low-level drivers to WARN_ON(!xxx)'s. After this patch, there is no in-kernel user of the libata assert() macro. Signed-off-by: Tejun Heo <htejun@gmail.com> Signed-off-by: Jeff Garzik <jgarzik@pobox.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/scsi/ahci.c2
-rw-r--r--drivers/scsi/sata_mv.c50
-rw-r--r--drivers/scsi/sata_qstor.c4
-rw-r--r--drivers/scsi/sata_sx4.c2
4 files changed, 29 insertions, 29 deletions
diff --git a/drivers/scsi/ahci.c b/drivers/scsi/ahci.c
index 86bccb7128f9..24a54a5a91b8 100644
--- a/drivers/scsi/ahci.c
+++ b/drivers/scsi/ahci.c
@@ -714,7 +714,7 @@ static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
714 ci = readl(port_mmio + PORT_CMD_ISSUE); 714 ci = readl(port_mmio + PORT_CMD_ISSUE);
715 if (likely((ci & 0x1) == 0)) { 715 if (likely((ci & 0x1) == 0)) {
716 if (qc) { 716 if (qc) {
717 assert(qc->err_mask == 0); 717 WARN_ON(qc->err_mask);
718 ata_qc_complete(qc); 718 ata_qc_complete(qc);
719 qc = NULL; 719 qc = NULL;
720 } 720 }
diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c
index 6c80527ddd21..732cb64a4d1b 100644
--- a/drivers/scsi/sata_mv.c
+++ b/drivers/scsi/sata_mv.c
@@ -632,8 +632,8 @@ static void mv_irq_clear(struct ata_port *ap)
632 * @base: port base address 632 * @base: port base address
633 * @pp: port private data 633 * @pp: port private data
634 * 634 *
635 * Verify the local cache of the eDMA state is accurate with an 635 * Verify the local cache of the eDMA state is accurate with a
636 * assert. 636 * WARN_ON.
637 * 637 *
638 * LOCKING: 638 * LOCKING:
639 * Inherited from caller. 639 * Inherited from caller.
@@ -644,15 +644,15 @@ static void mv_start_dma(void __iomem *base, struct mv_port_priv *pp)
644 writelfl(EDMA_EN, base + EDMA_CMD_OFS); 644 writelfl(EDMA_EN, base + EDMA_CMD_OFS);
645 pp->pp_flags |= MV_PP_FLAG_EDMA_EN; 645 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
646 } 646 }
647 assert(EDMA_EN & readl(base + EDMA_CMD_OFS)); 647 WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
648} 648}
649 649
650/** 650/**
651 * mv_stop_dma - Disable eDMA engine 651 * mv_stop_dma - Disable eDMA engine
652 * @ap: ATA channel to manipulate 652 * @ap: ATA channel to manipulate
653 * 653 *
654 * Verify the local cache of the eDMA state is accurate with an 654 * Verify the local cache of the eDMA state is accurate with a
655 * assert. 655 * WARN_ON.
656 * 656 *
657 * LOCKING: 657 * LOCKING:
658 * Inherited from caller. 658 * Inherited from caller.
@@ -670,7 +670,7 @@ static void mv_stop_dma(struct ata_port *ap)
670 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS); 670 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
671 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; 671 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
672 } else { 672 } else {
673 assert(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS))); 673 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
674 } 674 }
675 675
676 /* now properly wait for the eDMA to stop */ 676 /* now properly wait for the eDMA to stop */
@@ -1061,15 +1061,15 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
1061 return; 1061 return;
1062 1062
1063 /* the req producer index should be the same as we remember it */ 1063 /* the req producer index should be the same as we remember it */
1064 assert(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >> 1064 WARN_ON(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >>
1065 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == 1065 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
1066 pp->req_producer); 1066 pp->req_producer);
1067 1067
1068 /* Fill in command request block 1068 /* Fill in command request block
1069 */ 1069 */
1070 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) 1070 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1071 flags |= CRQB_FLAG_READ; 1071 flags |= CRQB_FLAG_READ;
1072 assert(MV_MAX_Q_DEPTH > qc->tag); 1072 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1073 flags |= qc->tag << CRQB_TAG_SHIFT; 1073 flags |= qc->tag << CRQB_TAG_SHIFT;
1074 1074
1075 pp->crqb[pp->req_producer].sg_addr = 1075 pp->crqb[pp->req_producer].sg_addr =
@@ -1152,16 +1152,16 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1152 return; 1152 return;
1153 1153
1154 /* the req producer index should be the same as we remember it */ 1154 /* the req producer index should be the same as we remember it */
1155 assert(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >> 1155 WARN_ON(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >>
1156 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == 1156 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
1157 pp->req_producer); 1157 pp->req_producer);
1158 1158
1159 /* Fill in Gen IIE command request block 1159 /* Fill in Gen IIE command request block
1160 */ 1160 */
1161 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) 1161 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1162 flags |= CRQB_FLAG_READ; 1162 flags |= CRQB_FLAG_READ;
1163 1163
1164 assert(MV_MAX_Q_DEPTH > qc->tag); 1164 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1165 flags |= qc->tag << CRQB_TAG_SHIFT; 1165 flags |= qc->tag << CRQB_TAG_SHIFT;
1166 1166
1167 crqb = (struct mv_crqb_iie *) &pp->crqb[pp->req_producer]; 1167 crqb = (struct mv_crqb_iie *) &pp->crqb[pp->req_producer];
@@ -1226,12 +1226,12 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1226 in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS); 1226 in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1227 1227
1228 /* the req producer index should be the same as we remember it */ 1228 /* the req producer index should be the same as we remember it */
1229 assert(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == 1229 WARN_ON(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
1230 pp->req_producer); 1230 pp->req_producer);
1231 /* until we do queuing, the queue should be empty at this point */ 1231 /* until we do queuing, the queue should be empty at this point */
1232 assert(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == 1232 WARN_ON(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
1233 ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) >> 1233 ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) >>
1234 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK)); 1234 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1235 1235
1236 mv_inc_q_index(&pp->req_producer); /* now incr producer index */ 1236 mv_inc_q_index(&pp->req_producer); /* now incr producer index */
1237 1237
@@ -1251,7 +1251,7 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1251 * 1251 *
1252 * This routine is for use when the port is in DMA mode, when it 1252 * This routine is for use when the port is in DMA mode, when it
1253 * will be using the CRPB (command response block) method of 1253 * will be using the CRPB (command response block) method of
1254 * returning command completion information. We assert indices 1254 * returning command completion information. We check indices
1255 * are good, grab status, and bump the response consumer index to 1255 * are good, grab status, and bump the response consumer index to
1256 * prove that we're up to date. 1256 * prove that we're up to date.
1257 * 1257 *
@@ -1267,16 +1267,16 @@ static u8 mv_get_crpb_status(struct ata_port *ap)
1267 out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); 1267 out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1268 1268
1269 /* the response consumer index should be the same as we remember it */ 1269 /* the response consumer index should be the same as we remember it */
1270 assert(((out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == 1270 WARN_ON(((out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
1271 pp->rsp_consumer); 1271 pp->rsp_consumer);
1272 1272
1273 /* increment our consumer index... */ 1273 /* increment our consumer index... */
1274 pp->rsp_consumer = mv_inc_q_index(&pp->rsp_consumer); 1274 pp->rsp_consumer = mv_inc_q_index(&pp->rsp_consumer);
1275 1275
1276 /* and, until we do NCQ, there should only be 1 CRPB waiting */ 1276 /* and, until we do NCQ, there should only be 1 CRPB waiting */
1277 assert(((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) >> 1277 WARN_ON(((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) >>
1278 EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == 1278 EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
1279 pp->rsp_consumer); 1279 pp->rsp_consumer);
1280 1280
1281 /* write out our inc'd consumer index so EDMA knows we're caught up */ 1281 /* write out our inc'd consumer index so EDMA knows we're caught up */
1282 out_ptr &= EDMA_RSP_Q_BASE_LO_MASK; 1282 out_ptr &= EDMA_RSP_Q_BASE_LO_MASK;
diff --git a/drivers/scsi/sata_qstor.c b/drivers/scsi/sata_qstor.c
index 82c3df7048ea..955131b43206 100644
--- a/drivers/scsi/sata_qstor.c
+++ b/drivers/scsi/sata_qstor.c
@@ -277,8 +277,8 @@ static unsigned int qs_fill_sg(struct ata_queued_cmd *qc)
277 unsigned int nelem; 277 unsigned int nelem;
278 u8 *prd = pp->pkt + QS_CPB_BYTES; 278 u8 *prd = pp->pkt + QS_CPB_BYTES;
279 279
280 assert(qc->__sg != NULL); 280 WARN_ON(qc->__sg == NULL);
281 assert(qc->n_elem > 0); 281 WARN_ON(qc->n_elem == 0);
282 282
283 nelem = 0; 283 nelem = 0;
284 ata_for_each_sg(sg, qc) { 284 ata_for_each_sg(sg, qc) {
diff --git a/drivers/scsi/sata_sx4.c b/drivers/scsi/sata_sx4.c
index 212cff4fe5f5..e158f7a34d62 100644
--- a/drivers/scsi/sata_sx4.c
+++ b/drivers/scsi/sata_sx4.c
@@ -461,7 +461,7 @@ static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
461 unsigned int i, idx, total_len = 0, sgt_len; 461 unsigned int i, idx, total_len = 0, sgt_len;
462 u32 *buf = (u32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ]; 462 u32 *buf = (u32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ];
463 463
464 assert(qc->flags & ATA_QCFLAG_DMAMAP); 464 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
465 465
466 VPRINTK("ata%u: ENTER\n", ap->id); 466 VPRINTK("ata%u: ENTER\n", ap->id);
467 467