diff options
author | Jeff Garzik <jeff@garzik.org> | 2007-10-18 16:21:18 -0400 |
---|---|---|
committer | Jeff Garzik <jeff@garzik.org> | 2007-10-18 16:21:18 -0400 |
commit | 3be6cbd73f74b4a3da82cc7d6e1688a4ae595fc7 (patch) | |
tree | a077cf70d1cd438db34f5def23dd8f72f42b5a8c /drivers/ata | |
parent | 858c9c406688bc7244986b5836265071edfd1d3f (diff) |
[libata] kill ata_sg_is_last()
Short term, this works around a bug introduced by early sg-chaining
work.
Long term, removing this function eliminates a branch from a hot
path loop in each scatter/gather table build. Also, as this code
demonstrates, we don't need to _track_ the end of the s/g list, as
long as we mark it in some way. And doing so programatically is nice.
So its a useful cleanup, regardless of its short term effects.
Based conceptually on a quick patch by Jens Axboe.
Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
Diffstat (limited to 'drivers/ata')
-rw-r--r-- | drivers/ata/pdc_adma.c | 9 | ||||
-rw-r--r-- | drivers/ata/sata_mv.c | 26 | ||||
-rw-r--r-- | drivers/ata/sata_sil24.c | 11 |
3 files changed, 20 insertions, 26 deletions
diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c index 8d1b03d5bcb1..199f7e150eb3 100644 --- a/drivers/ata/pdc_adma.c +++ b/drivers/ata/pdc_adma.c | |||
@@ -318,7 +318,7 @@ static int adma_fill_sg(struct ata_queued_cmd *qc) | |||
318 | struct scatterlist *sg; | 318 | struct scatterlist *sg; |
319 | struct ata_port *ap = qc->ap; | 319 | struct ata_port *ap = qc->ap; |
320 | struct adma_port_priv *pp = ap->private_data; | 320 | struct adma_port_priv *pp = ap->private_data; |
321 | u8 *buf = pp->pkt; | 321 | u8 *buf = pp->pkt, *last_buf = NULL; |
322 | int i = (2 + buf[3]) * 8; | 322 | int i = (2 + buf[3]) * 8; |
323 | u8 pFLAGS = pORD | ((qc->tf.flags & ATA_TFLAG_WRITE) ? pDIRO : 0); | 323 | u8 pFLAGS = pORD | ((qc->tf.flags & ATA_TFLAG_WRITE) ? pDIRO : 0); |
324 | 324 | ||
@@ -334,8 +334,7 @@ static int adma_fill_sg(struct ata_queued_cmd *qc) | |||
334 | *(__le32 *)(buf + i) = cpu_to_le32(len); | 334 | *(__le32 *)(buf + i) = cpu_to_le32(len); |
335 | i += 4; | 335 | i += 4; |
336 | 336 | ||
337 | if (ata_sg_is_last(sg, qc)) | 337 | last_buf = &buf[i]; |
338 | pFLAGS |= pEND; | ||
339 | buf[i++] = pFLAGS; | 338 | buf[i++] = pFLAGS; |
340 | buf[i++] = qc->dev->dma_mode & 0xf; | 339 | buf[i++] = qc->dev->dma_mode & 0xf; |
341 | buf[i++] = 0; /* pPKLW */ | 340 | buf[i++] = 0; /* pPKLW */ |
@@ -348,6 +347,10 @@ static int adma_fill_sg(struct ata_queued_cmd *qc) | |||
348 | VPRINTK("PRD[%u] = (0x%lX, 0x%X)\n", i/4, | 347 | VPRINTK("PRD[%u] = (0x%lX, 0x%X)\n", i/4, |
349 | (unsigned long)addr, len); | 348 | (unsigned long)addr, len); |
350 | } | 349 | } |
350 | |||
351 | if (likely(last_buf)) | ||
352 | *last_buf |= pEND; | ||
353 | |||
351 | return i; | 354 | return i; |
352 | } | 355 | } |
353 | 356 | ||
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index 4df8311968e9..7f1b13e89cf7 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c | |||
@@ -421,7 +421,6 @@ static void mv_error_handler(struct ata_port *ap); | |||
421 | static void mv_post_int_cmd(struct ata_queued_cmd *qc); | 421 | static void mv_post_int_cmd(struct ata_queued_cmd *qc); |
422 | static void mv_eh_freeze(struct ata_port *ap); | 422 | static void mv_eh_freeze(struct ata_port *ap); |
423 | static void mv_eh_thaw(struct ata_port *ap); | 423 | static void mv_eh_thaw(struct ata_port *ap); |
424 | static int mv_slave_config(struct scsi_device *sdev); | ||
425 | static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); | 424 | static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); |
426 | 425 | ||
427 | static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, | 426 | static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, |
@@ -459,7 +458,7 @@ static struct scsi_host_template mv5_sht = { | |||
459 | .use_clustering = 1, | 458 | .use_clustering = 1, |
460 | .proc_name = DRV_NAME, | 459 | .proc_name = DRV_NAME, |
461 | .dma_boundary = MV_DMA_BOUNDARY, | 460 | .dma_boundary = MV_DMA_BOUNDARY, |
462 | .slave_configure = mv_slave_config, | 461 | .slave_configure = ata_scsi_slave_config, |
463 | .slave_destroy = ata_scsi_slave_destroy, | 462 | .slave_destroy = ata_scsi_slave_destroy, |
464 | .bios_param = ata_std_bios_param, | 463 | .bios_param = ata_std_bios_param, |
465 | }; | 464 | }; |
@@ -477,7 +476,7 @@ static struct scsi_host_template mv6_sht = { | |||
477 | .use_clustering = 1, | 476 | .use_clustering = 1, |
478 | .proc_name = DRV_NAME, | 477 | .proc_name = DRV_NAME, |
479 | .dma_boundary = MV_DMA_BOUNDARY, | 478 | .dma_boundary = MV_DMA_BOUNDARY, |
480 | .slave_configure = mv_slave_config, | 479 | .slave_configure = ata_scsi_slave_config, |
481 | .slave_destroy = ata_scsi_slave_destroy, | 480 | .slave_destroy = ata_scsi_slave_destroy, |
482 | .bios_param = ata_std_bios_param, | 481 | .bios_param = ata_std_bios_param, |
483 | }; | 482 | }; |
@@ -756,17 +755,6 @@ static void mv_irq_clear(struct ata_port *ap) | |||
756 | { | 755 | { |
757 | } | 756 | } |
758 | 757 | ||
759 | static int mv_slave_config(struct scsi_device *sdev) | ||
760 | { | ||
761 | int rc = ata_scsi_slave_config(sdev); | ||
762 | if (rc) | ||
763 | return rc; | ||
764 | |||
765 | blk_queue_max_phys_segments(sdev->request_queue, MV_MAX_SG_CT / 2); | ||
766 | |||
767 | return 0; /* scsi layer doesn't check return value, sigh */ | ||
768 | } | ||
769 | |||
770 | static void mv_set_edma_ptrs(void __iomem *port_mmio, | 758 | static void mv_set_edma_ptrs(void __iomem *port_mmio, |
771 | struct mv_host_priv *hpriv, | 759 | struct mv_host_priv *hpriv, |
772 | struct mv_port_priv *pp) | 760 | struct mv_port_priv *pp) |
@@ -1138,7 +1126,7 @@ static void mv_fill_sg(struct ata_queued_cmd *qc) | |||
1138 | { | 1126 | { |
1139 | struct mv_port_priv *pp = qc->ap->private_data; | 1127 | struct mv_port_priv *pp = qc->ap->private_data; |
1140 | struct scatterlist *sg; | 1128 | struct scatterlist *sg; |
1141 | struct mv_sg *mv_sg; | 1129 | struct mv_sg *mv_sg, *last_sg = NULL; |
1142 | 1130 | ||
1143 | mv_sg = pp->sg_tbl; | 1131 | mv_sg = pp->sg_tbl; |
1144 | ata_for_each_sg(sg, qc) { | 1132 | ata_for_each_sg(sg, qc) { |
@@ -1159,13 +1147,13 @@ static void mv_fill_sg(struct ata_queued_cmd *qc) | |||
1159 | sg_len -= len; | 1147 | sg_len -= len; |
1160 | addr += len; | 1148 | addr += len; |
1161 | 1149 | ||
1162 | if (!sg_len && ata_sg_is_last(sg, qc)) | 1150 | last_sg = mv_sg; |
1163 | mv_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL); | ||
1164 | |||
1165 | mv_sg++; | 1151 | mv_sg++; |
1166 | } | 1152 | } |
1167 | |||
1168 | } | 1153 | } |
1154 | |||
1155 | if (likely(last_sg)) | ||
1156 | last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL); | ||
1169 | } | 1157 | } |
1170 | 1158 | ||
1171 | static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last) | 1159 | static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last) |
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c index b0619278454a..26ebffc10f3e 100644 --- a/drivers/ata/sata_sil24.c +++ b/drivers/ata/sata_sil24.c | |||
@@ -796,16 +796,19 @@ static inline void sil24_fill_sg(struct ata_queued_cmd *qc, | |||
796 | struct sil24_sge *sge) | 796 | struct sil24_sge *sge) |
797 | { | 797 | { |
798 | struct scatterlist *sg; | 798 | struct scatterlist *sg; |
799 | struct sil24_sge *last_sge = NULL; | ||
799 | 800 | ||
800 | ata_for_each_sg(sg, qc) { | 801 | ata_for_each_sg(sg, qc) { |
801 | sge->addr = cpu_to_le64(sg_dma_address(sg)); | 802 | sge->addr = cpu_to_le64(sg_dma_address(sg)); |
802 | sge->cnt = cpu_to_le32(sg_dma_len(sg)); | 803 | sge->cnt = cpu_to_le32(sg_dma_len(sg)); |
803 | if (ata_sg_is_last(sg, qc)) | 804 | sge->flags = 0; |
804 | sge->flags = cpu_to_le32(SGE_TRM); | 805 | |
805 | else | 806 | last_sge = sge; |
806 | sge->flags = 0; | ||
807 | sge++; | 807 | sge++; |
808 | } | 808 | } |
809 | |||
810 | if (likely(last_sge)) | ||
811 | last_sge->flags = cpu_to_le32(SGE_TRM); | ||
809 | } | 812 | } |
810 | 813 | ||
811 | static int sil24_qc_defer(struct ata_queued_cmd *qc) | 814 | static int sil24_qc_defer(struct ata_queued_cmd *qc) |