diff options
| author | Tejun Heo <tj@kernel.org> | 2009-01-09 05:19:14 -0500 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-01-09 14:54:22 -0500 |
| commit | efcb3cf7f00c3c424db012380a8a974c2676a3c8 (patch) | |
| tree | eaff0a3d6fed469629b81e8781ac80ca7c75eec1 | |
| parent | 43529c97122f2c851126447963eedcb8cba74fbe (diff) | |
libata: use WARN_ON_ONCE on hot paths
Convert WARN_ON() on command issue/completion paths to WARN_ON_ONCE()
so that libata doesn't spam the machine even when one of those
conditions triggers repeatedly.
Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
| -rw-r--r-- | drivers/ata/libata-core.c | 16 | ||||
| -rw-r--r-- | drivers/ata/libata-sff.c | 24 |
2 files changed, 20 insertions, 20 deletions
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 175df54eb66..c507a9ac78f 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
| @@ -4556,7 +4556,7 @@ void ata_sg_clean(struct ata_queued_cmd *qc) | |||
| 4556 | struct scatterlist *sg = qc->sg; | 4556 | struct scatterlist *sg = qc->sg; |
| 4557 | int dir = qc->dma_dir; | 4557 | int dir = qc->dma_dir; |
| 4558 | 4558 | ||
| 4559 | WARN_ON(sg == NULL); | 4559 | WARN_ON_ONCE(sg == NULL); |
| 4560 | 4560 | ||
| 4561 | VPRINTK("unmapping %u sg elements\n", qc->n_elem); | 4561 | VPRINTK("unmapping %u sg elements\n", qc->n_elem); |
| 4562 | 4562 | ||
| @@ -4776,7 +4776,7 @@ void ata_qc_free(struct ata_queued_cmd *qc) | |||
| 4776 | struct ata_port *ap = qc->ap; | 4776 | struct ata_port *ap = qc->ap; |
| 4777 | unsigned int tag; | 4777 | unsigned int tag; |
| 4778 | 4778 | ||
| 4779 | WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ | 4779 | WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ |
| 4780 | 4780 | ||
| 4781 | qc->flags = 0; | 4781 | qc->flags = 0; |
| 4782 | tag = qc->tag; | 4782 | tag = qc->tag; |
| @@ -4791,8 +4791,8 @@ void __ata_qc_complete(struct ata_queued_cmd *qc) | |||
| 4791 | struct ata_port *ap = qc->ap; | 4791 | struct ata_port *ap = qc->ap; |
| 4792 | struct ata_link *link = qc->dev->link; | 4792 | struct ata_link *link = qc->dev->link; |
| 4793 | 4793 | ||
| 4794 | WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ | 4794 | WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ |
| 4795 | WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE)); | 4795 | WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE)); |
| 4796 | 4796 | ||
| 4797 | if (likely(qc->flags & ATA_QCFLAG_DMAMAP)) | 4797 | if (likely(qc->flags & ATA_QCFLAG_DMAMAP)) |
| 4798 | ata_sg_clean(qc); | 4798 | ata_sg_clean(qc); |
| @@ -4878,7 +4878,7 @@ void ata_qc_complete(struct ata_queued_cmd *qc) | |||
| 4878 | struct ata_device *dev = qc->dev; | 4878 | struct ata_device *dev = qc->dev; |
| 4879 | struct ata_eh_info *ehi = &dev->link->eh_info; | 4879 | struct ata_eh_info *ehi = &dev->link->eh_info; |
| 4880 | 4880 | ||
| 4881 | WARN_ON(ap->pflags & ATA_PFLAG_FROZEN); | 4881 | WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN); |
| 4882 | 4882 | ||
| 4883 | if (unlikely(qc->err_mask)) | 4883 | if (unlikely(qc->err_mask)) |
| 4884 | qc->flags |= ATA_QCFLAG_FAILED; | 4884 | qc->flags |= ATA_QCFLAG_FAILED; |
| @@ -5000,16 +5000,16 @@ void ata_qc_issue(struct ata_queued_cmd *qc) | |||
| 5000 | * check is skipped for old EH because it reuses active qc to | 5000 | * check is skipped for old EH because it reuses active qc to |
| 5001 | * request ATAPI sense. | 5001 | * request ATAPI sense. |
| 5002 | */ | 5002 | */ |
| 5003 | WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag)); | 5003 | WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag)); |
| 5004 | 5004 | ||
| 5005 | if (ata_is_ncq(prot)) { | 5005 | if (ata_is_ncq(prot)) { |
| 5006 | WARN_ON(link->sactive & (1 << qc->tag)); | 5006 | WARN_ON_ONCE(link->sactive & (1 << qc->tag)); |
| 5007 | 5007 | ||
| 5008 | if (!link->sactive) | 5008 | if (!link->sactive) |
| 5009 | ap->nr_active_links++; | 5009 | ap->nr_active_links++; |
| 5010 | link->sactive |= 1 << qc->tag; | 5010 | link->sactive |= 1 << qc->tag; |
| 5011 | } else { | 5011 | } else { |
| 5012 | WARN_ON(link->sactive); | 5012 | WARN_ON_ONCE(link->sactive); |
| 5013 | 5013 | ||
| 5014 | ap->nr_active_links++; | 5014 | ap->nr_active_links++; |
| 5015 | link->active_tag = qc->tag; | 5015 | link->active_tag = qc->tag; |
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index c59ad76c84b..0eae9b45355 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c | |||
| @@ -578,7 +578,7 @@ void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) | |||
| 578 | } | 578 | } |
| 579 | 579 | ||
| 580 | if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { | 580 | if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { |
| 581 | WARN_ON(!ioaddr->ctl_addr); | 581 | WARN_ON_ONCE(!ioaddr->ctl_addr); |
| 582 | iowrite8(tf->hob_feature, ioaddr->feature_addr); | 582 | iowrite8(tf->hob_feature, ioaddr->feature_addr); |
| 583 | iowrite8(tf->hob_nsect, ioaddr->nsect_addr); | 583 | iowrite8(tf->hob_nsect, ioaddr->nsect_addr); |
| 584 | iowrite8(tf->hob_lbal, ioaddr->lbal_addr); | 584 | iowrite8(tf->hob_lbal, ioaddr->lbal_addr); |
| @@ -651,7 +651,7 @@ void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf) | |||
| 651 | iowrite8(tf->ctl, ioaddr->ctl_addr); | 651 | iowrite8(tf->ctl, ioaddr->ctl_addr); |
| 652 | ap->last_ctl = tf->ctl; | 652 | ap->last_ctl = tf->ctl; |
| 653 | } else | 653 | } else |
| 654 | WARN_ON(1); | 654 | WARN_ON_ONCE(1); |
| 655 | } | 655 | } |
| 656 | } | 656 | } |
| 657 | EXPORT_SYMBOL_GPL(ata_sff_tf_read); | 657 | EXPORT_SYMBOL_GPL(ata_sff_tf_read); |
| @@ -891,7 +891,7 @@ static void ata_pio_sectors(struct ata_queued_cmd *qc) | |||
| 891 | /* READ/WRITE MULTIPLE */ | 891 | /* READ/WRITE MULTIPLE */ |
| 892 | unsigned int nsect; | 892 | unsigned int nsect; |
| 893 | 893 | ||
| 894 | WARN_ON(qc->dev->multi_count == 0); | 894 | WARN_ON_ONCE(qc->dev->multi_count == 0); |
| 895 | 895 | ||
| 896 | nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size, | 896 | nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size, |
| 897 | qc->dev->multi_count); | 897 | qc->dev->multi_count); |
| @@ -918,7 +918,7 @@ static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc) | |||
| 918 | { | 918 | { |
| 919 | /* send SCSI cdb */ | 919 | /* send SCSI cdb */ |
| 920 | DPRINTK("send cdb\n"); | 920 | DPRINTK("send cdb\n"); |
| 921 | WARN_ON(qc->dev->cdb_len < 12); | 921 | WARN_ON_ONCE(qc->dev->cdb_len < 12); |
| 922 | 922 | ||
| 923 | ap->ops->sff_data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1); | 923 | ap->ops->sff_data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1); |
| 924 | ata_sff_sync(ap); | 924 | ata_sff_sync(ap); |
| @@ -1014,7 +1014,7 @@ next_sg: | |||
| 1014 | } | 1014 | } |
| 1015 | 1015 | ||
| 1016 | /* consumed can be larger than count only for the last transfer */ | 1016 | /* consumed can be larger than count only for the last transfer */ |
| 1017 | WARN_ON(qc->cursg && count != consumed); | 1017 | WARN_ON_ONCE(qc->cursg && count != consumed); |
| 1018 | 1018 | ||
| 1019 | if (bytes) | 1019 | if (bytes) |
| 1020 | goto next_sg; | 1020 | goto next_sg; |
| @@ -1172,13 +1172,13 @@ int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, | |||
| 1172 | unsigned long flags = 0; | 1172 | unsigned long flags = 0; |
| 1173 | int poll_next; | 1173 | int poll_next; |
| 1174 | 1174 | ||
| 1175 | WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0); | 1175 | WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0); |
| 1176 | 1176 | ||
| 1177 | /* Make sure ata_sff_qc_issue() does not throw things | 1177 | /* Make sure ata_sff_qc_issue() does not throw things |
| 1178 | * like DMA polling into the workqueue. Notice that | 1178 | * like DMA polling into the workqueue. Notice that |
| 1179 | * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING). | 1179 | * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING). |
| 1180 | */ | 1180 | */ |
| 1181 | WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc)); | 1181 | WARN_ON_ONCE(in_wq != ata_hsm_ok_in_wq(ap, qc)); |
| 1182 | 1182 | ||
| 1183 | fsm_start: | 1183 | fsm_start: |
| 1184 | DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n", | 1184 | DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n", |
| @@ -1387,7 +1387,7 @@ fsm_start: | |||
| 1387 | DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n", | 1387 | DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n", |
| 1388 | ap->print_id, qc->dev->devno, status); | 1388 | ap->print_id, qc->dev->devno, status); |
| 1389 | 1389 | ||
| 1390 | WARN_ON(qc->err_mask & (AC_ERR_DEV | AC_ERR_HSM)); | 1390 | WARN_ON_ONCE(qc->err_mask & (AC_ERR_DEV | AC_ERR_HSM)); |
| 1391 | 1391 | ||
| 1392 | ap->hsm_task_state = HSM_ST_IDLE; | 1392 | ap->hsm_task_state = HSM_ST_IDLE; |
| 1393 | 1393 | ||
| @@ -1423,7 +1423,7 @@ void ata_pio_task(struct work_struct *work) | |||
| 1423 | int poll_next; | 1423 | int poll_next; |
| 1424 | 1424 | ||
| 1425 | fsm_start: | 1425 | fsm_start: |
| 1426 | WARN_ON(ap->hsm_task_state == HSM_ST_IDLE); | 1426 | WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE); |
| 1427 | 1427 | ||
| 1428 | /* | 1428 | /* |
| 1429 | * This is purely heuristic. This is a fast path. | 1429 | * This is purely heuristic. This is a fast path. |
| @@ -1512,7 +1512,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) | |||
| 1512 | break; | 1512 | break; |
| 1513 | 1513 | ||
| 1514 | case ATA_PROT_DMA: | 1514 | case ATA_PROT_DMA: |
| 1515 | WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING); | 1515 | WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING); |
| 1516 | 1516 | ||
| 1517 | ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ | 1517 | ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ |
| 1518 | ap->ops->bmdma_setup(qc); /* set up bmdma */ | 1518 | ap->ops->bmdma_setup(qc); /* set up bmdma */ |
| @@ -1564,7 +1564,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) | |||
| 1564 | break; | 1564 | break; |
| 1565 | 1565 | ||
| 1566 | case ATAPI_PROT_DMA: | 1566 | case ATAPI_PROT_DMA: |
| 1567 | WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING); | 1567 | WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING); |
| 1568 | 1568 | ||
| 1569 | ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ | 1569 | ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ |
| 1570 | ap->ops->bmdma_setup(qc); /* set up bmdma */ | 1570 | ap->ops->bmdma_setup(qc); /* set up bmdma */ |
| @@ -1576,7 +1576,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) | |||
| 1576 | break; | 1576 | break; |
| 1577 | 1577 | ||
| 1578 | default: | 1578 | default: |
| 1579 | WARN_ON(1); | 1579 | WARN_ON_ONCE(1); |
| 1580 | return AC_ERR_SYSTEM; | 1580 | return AC_ERR_SYSTEM; |
| 1581 | } | 1581 | } |
| 1582 | 1582 | ||
