aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ata/sata_mv.c
diff options
context:
space:
mode:
authorJeff Garzik <jeff@garzik.org>2007-10-18 16:21:18 -0400
committerJeff Garzik <jeff@garzik.org>2007-10-18 16:21:18 -0400
commit3be6cbd73f74b4a3da82cc7d6e1688a4ae595fc7 (patch)
treea077cf70d1cd438db34f5def23dd8f72f42b5a8c /drivers/ata/sata_mv.c
parent858c9c406688bc7244986b5836265071edfd1d3f (diff)
[libata] kill ata_sg_is_last()
Short term, this works around a bug introduced by early sg-chaining work. Long term, removing this function eliminates a branch from a hot path loop in each scatter/gather table build. Also, as this code demonstrates, we don't need to _track_ the end of the s/g list, as long as we mark it in some way. And doing so programatically is nice. So its a useful cleanup, regardless of its short term effects. Based conceptually on a quick patch by Jens Axboe. Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
Diffstat (limited to 'drivers/ata/sata_mv.c')
-rw-r--r--drivers/ata/sata_mv.c26
1 files changed, 7 insertions, 19 deletions
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 4df8311968e9..7f1b13e89cf7 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -421,7 +421,6 @@ static void mv_error_handler(struct ata_port *ap);
421static void mv_post_int_cmd(struct ata_queued_cmd *qc); 421static void mv_post_int_cmd(struct ata_queued_cmd *qc);
422static void mv_eh_freeze(struct ata_port *ap); 422static void mv_eh_freeze(struct ata_port *ap);
423static void mv_eh_thaw(struct ata_port *ap); 423static void mv_eh_thaw(struct ata_port *ap);
424static int mv_slave_config(struct scsi_device *sdev);
425static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); 424static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
426 425
427static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, 426static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
@@ -459,7 +458,7 @@ static struct scsi_host_template mv5_sht = {
459 .use_clustering = 1, 458 .use_clustering = 1,
460 .proc_name = DRV_NAME, 459 .proc_name = DRV_NAME,
461 .dma_boundary = MV_DMA_BOUNDARY, 460 .dma_boundary = MV_DMA_BOUNDARY,
462 .slave_configure = mv_slave_config, 461 .slave_configure = ata_scsi_slave_config,
463 .slave_destroy = ata_scsi_slave_destroy, 462 .slave_destroy = ata_scsi_slave_destroy,
464 .bios_param = ata_std_bios_param, 463 .bios_param = ata_std_bios_param,
465}; 464};
@@ -477,7 +476,7 @@ static struct scsi_host_template mv6_sht = {
477 .use_clustering = 1, 476 .use_clustering = 1,
478 .proc_name = DRV_NAME, 477 .proc_name = DRV_NAME,
479 .dma_boundary = MV_DMA_BOUNDARY, 478 .dma_boundary = MV_DMA_BOUNDARY,
480 .slave_configure = mv_slave_config, 479 .slave_configure = ata_scsi_slave_config,
481 .slave_destroy = ata_scsi_slave_destroy, 480 .slave_destroy = ata_scsi_slave_destroy,
482 .bios_param = ata_std_bios_param, 481 .bios_param = ata_std_bios_param,
483}; 482};
@@ -756,17 +755,6 @@ static void mv_irq_clear(struct ata_port *ap)
756{ 755{
757} 756}
758 757
759static int mv_slave_config(struct scsi_device *sdev)
760{
761 int rc = ata_scsi_slave_config(sdev);
762 if (rc)
763 return rc;
764
765 blk_queue_max_phys_segments(sdev->request_queue, MV_MAX_SG_CT / 2);
766
767 return 0; /* scsi layer doesn't check return value, sigh */
768}
769
770static void mv_set_edma_ptrs(void __iomem *port_mmio, 758static void mv_set_edma_ptrs(void __iomem *port_mmio,
771 struct mv_host_priv *hpriv, 759 struct mv_host_priv *hpriv,
772 struct mv_port_priv *pp) 760 struct mv_port_priv *pp)
@@ -1138,7 +1126,7 @@ static void mv_fill_sg(struct ata_queued_cmd *qc)
1138{ 1126{
1139 struct mv_port_priv *pp = qc->ap->private_data; 1127 struct mv_port_priv *pp = qc->ap->private_data;
1140 struct scatterlist *sg; 1128 struct scatterlist *sg;
1141 struct mv_sg *mv_sg; 1129 struct mv_sg *mv_sg, *last_sg = NULL;
1142 1130
1143 mv_sg = pp->sg_tbl; 1131 mv_sg = pp->sg_tbl;
1144 ata_for_each_sg(sg, qc) { 1132 ata_for_each_sg(sg, qc) {
@@ -1159,13 +1147,13 @@ static void mv_fill_sg(struct ata_queued_cmd *qc)
1159 sg_len -= len; 1147 sg_len -= len;
1160 addr += len; 1148 addr += len;
1161 1149
1162 if (!sg_len && ata_sg_is_last(sg, qc)) 1150 last_sg = mv_sg;
1163 mv_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1164
1165 mv_sg++; 1151 mv_sg++;
1166 } 1152 }
1167
1168 } 1153 }
1154
1155 if (likely(last_sg))
1156 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1169} 1157}
1170 1158
1171static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last) 1159static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)