aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ide/ide-dma.c
diff options
context:
space:
mode:
authorBartlomiej Zolnierkiewicz <bzolnier@gmail.com>2009-03-31 14:15:24 -0400
committerBartlomiej Zolnierkiewicz <bzolnier@gmail.com>2009-03-31 14:15:24 -0400
commitf094d4d83bccee9277ddb6aadccf35747889426b (patch)
treea4cd886f42d6ab13b507c23b08f2064ceba04623 /drivers/ide/ide-dma.c
parent88b4132e101e60e8fa67996ae3072ab6b71e8500 (diff)
ide: sanitize ide_build_sglist() and ide_destroy_dmatable()
* Move ide_map_sg() calls out from ide_build_sglist() to ide_dma_prepare(). * Pass command to ide_destroy_dmatable(). * Rename ide_build_sglist() to ide_dma_map_sg() and ide_destroy_dmatable() to ide_dma_unmap_sg(). There should be no functional changes caused by this patch. Signed-off-by: Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>
Diffstat (limited to 'drivers/ide/ide-dma.c')
-rw-r--r--drivers/ide/ide-dma.c50
1 files changed, 25 insertions, 25 deletions
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
index 4d3102887d9c..a5612eadc306 100644
--- a/drivers/ide/ide-dma.c
+++ b/drivers/ide/ide-dma.c
@@ -89,17 +89,16 @@ static const struct drive_list_entry drive_blacklist[] = {
89ide_startstop_t ide_dma_intr(ide_drive_t *drive) 89ide_startstop_t ide_dma_intr(ide_drive_t *drive)
90{ 90{
91 ide_hwif_t *hwif = drive->hwif; 91 ide_hwif_t *hwif = drive->hwif;
92 struct ide_cmd *cmd = &hwif->cmd;
92 u8 stat = 0, dma_stat = 0; 93 u8 stat = 0, dma_stat = 0;
93 94
94 drive->waiting_for_dma = 0; 95 drive->waiting_for_dma = 0;
95 dma_stat = hwif->dma_ops->dma_end(drive); 96 dma_stat = hwif->dma_ops->dma_end(drive);
96 ide_destroy_dmatable(drive); 97 ide_dma_unmap_sg(drive, cmd);
97 stat = hwif->tp_ops->read_status(hwif); 98 stat = hwif->tp_ops->read_status(hwif);
98 99
99 if (OK_STAT(stat, DRIVE_READY, drive->bad_wstat | ATA_DRQ)) { 100 if (OK_STAT(stat, DRIVE_READY, drive->bad_wstat | ATA_DRQ)) {
100 if (!dma_stat) { 101 if (!dma_stat) {
101 struct ide_cmd *cmd = &hwif->cmd;
102
103 if ((cmd->tf_flags & IDE_TFLAG_FS) == 0) 102 if ((cmd->tf_flags & IDE_TFLAG_FS) == 0)
104 ide_finish_cmd(drive, cmd, stat); 103 ide_finish_cmd(drive, cmd, stat);
105 else 104 else
@@ -119,8 +118,8 @@ int ide_dma_good_drive(ide_drive_t *drive)
119} 118}
120 119
121/** 120/**
122 * ide_build_sglist - map IDE scatter gather for DMA I/O 121 * ide_dma_map_sg - map IDE scatter gather for DMA I/O
123 * @drive: the drive to build the DMA table for 122 * @drive: the drive to map the DMA table for
124 * @cmd: command 123 * @cmd: command
125 * 124 *
126 * Perform the DMA mapping magic necessary to access the source or 125 * Perform the DMA mapping magic necessary to access the source or
@@ -129,23 +128,19 @@ int ide_dma_good_drive(ide_drive_t *drive)
129 * operate in a portable fashion. 128 * operate in a portable fashion.
130 */ 129 */
131 130
132static int ide_build_sglist(ide_drive_t *drive, struct ide_cmd *cmd) 131static int ide_dma_map_sg(ide_drive_t *drive, struct ide_cmd *cmd)
133{ 132{
134 ide_hwif_t *hwif = drive->hwif; 133 ide_hwif_t *hwif = drive->hwif;
135 struct scatterlist *sg = hwif->sg_table; 134 struct scatterlist *sg = hwif->sg_table;
136 int i; 135 int i;
137 136
138 ide_map_sg(drive, cmd);
139
140 if (cmd->tf_flags & IDE_TFLAG_WRITE) 137 if (cmd->tf_flags & IDE_TFLAG_WRITE)
141 cmd->sg_dma_direction = DMA_TO_DEVICE; 138 cmd->sg_dma_direction = DMA_TO_DEVICE;
142 else 139 else
143 cmd->sg_dma_direction = DMA_FROM_DEVICE; 140 cmd->sg_dma_direction = DMA_FROM_DEVICE;
144 141
145 i = dma_map_sg(hwif->dev, sg, cmd->sg_nents, cmd->sg_dma_direction); 142 i = dma_map_sg(hwif->dev, sg, cmd->sg_nents, cmd->sg_dma_direction);
146 if (i == 0) 143 if (i) {
147 ide_map_sg(drive, cmd);
148 else {
149 cmd->orig_sg_nents = cmd->sg_nents; 144 cmd->orig_sg_nents = cmd->sg_nents;
150 cmd->sg_nents = i; 145 cmd->sg_nents = i;
151 } 146 }
@@ -154,7 +149,7 @@ static int ide_build_sglist(ide_drive_t *drive, struct ide_cmd *cmd)
154} 149}
155 150
156/** 151/**
157 * ide_destroy_dmatable - clean up DMA mapping 152 * ide_dma_unmap_sg - clean up DMA mapping
158 * @drive: The drive to unmap 153 * @drive: The drive to unmap
159 * 154 *
160 * Teardown mappings after DMA has completed. This must be called 155 * Teardown mappings after DMA has completed. This must be called
@@ -164,15 +159,14 @@ static int ide_build_sglist(ide_drive_t *drive, struct ide_cmd *cmd)
164 * time. 159 * time.
165 */ 160 */
166 161
167void ide_destroy_dmatable(ide_drive_t *drive) 162void ide_dma_unmap_sg(ide_drive_t *drive, struct ide_cmd *cmd)
168{ 163{
169 ide_hwif_t *hwif = drive->hwif; 164 ide_hwif_t *hwif = drive->hwif;
170 struct ide_cmd *cmd = &hwif->cmd;
171 165
172 dma_unmap_sg(hwif->dev, hwif->sg_table, cmd->orig_sg_nents, 166 dma_unmap_sg(hwif->dev, hwif->sg_table, cmd->orig_sg_nents,
173 cmd->sg_dma_direction); 167 cmd->sg_dma_direction);
174} 168}
175EXPORT_SYMBOL_GPL(ide_destroy_dmatable); 169EXPORT_SYMBOL_GPL(ide_dma_unmap_sg);
176 170
177/** 171/**
178 * ide_dma_off_quietly - Generic DMA kill 172 * ide_dma_off_quietly - Generic DMA kill
@@ -471,6 +465,7 @@ ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error)
471{ 465{
472 ide_hwif_t *hwif = drive->hwif; 466 ide_hwif_t *hwif = drive->hwif;
473 const struct ide_dma_ops *dma_ops = hwif->dma_ops; 467 const struct ide_dma_ops *dma_ops = hwif->dma_ops;
468 struct ide_cmd *cmd = &hwif->cmd;
474 struct request *rq; 469 struct request *rq;
475 ide_startstop_t ret = ide_stopped; 470 ide_startstop_t ret = ide_stopped;
476 471
@@ -482,7 +477,7 @@ ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error)
482 printk(KERN_WARNING "%s: DMA timeout error\n", drive->name); 477 printk(KERN_WARNING "%s: DMA timeout error\n", drive->name);
483 drive->waiting_for_dma = 0; 478 drive->waiting_for_dma = 0;
484 (void)dma_ops->dma_end(drive); 479 (void)dma_ops->dma_end(drive);
485 ide_destroy_dmatable(drive); 480 ide_dma_unmap_sg(drive, cmd);
486 ret = ide_error(drive, "dma timeout error", 481 ret = ide_error(drive, "dma timeout error",
487 hwif->tp_ops->read_status(hwif)); 482 hwif->tp_ops->read_status(hwif));
488 } else { 483 } else {
@@ -495,7 +490,7 @@ ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error)
495 hwif->tp_ops->read_status(hwif)); 490 hwif->tp_ops->read_status(hwif));
496 drive->waiting_for_dma = 0; 491 drive->waiting_for_dma = 0;
497 (void)dma_ops->dma_end(drive); 492 (void)dma_ops->dma_end(drive);
498 ide_destroy_dmatable(drive); 493 ide_dma_unmap_sg(drive, cmd);
499 } 494 }
500 } 495 }
501 496
@@ -572,14 +567,19 @@ int ide_dma_prepare(ide_drive_t *drive, struct ide_cmd *cmd)
572 const struct ide_dma_ops *dma_ops = drive->hwif->dma_ops; 567 const struct ide_dma_ops *dma_ops = drive->hwif->dma_ops;
573 568
574 if ((drive->dev_flags & IDE_DFLAG_USING_DMA) == 0 || 569 if ((drive->dev_flags & IDE_DFLAG_USING_DMA) == 0 ||
575 (dma_ops->dma_check && dma_ops->dma_check(drive, cmd)) || 570 (dma_ops->dma_check && dma_ops->dma_check(drive, cmd)))
576 ide_build_sglist(drive, cmd) == 0) 571 goto out;
577 return 1; 572 ide_map_sg(drive, cmd);
578 if (dma_ops->dma_setup(drive, cmd)) { 573 if (ide_dma_map_sg(drive, cmd) == 0)
579 ide_destroy_dmatable(drive); 574 goto out_map;
580 ide_map_sg(drive, cmd); 575 if (dma_ops->dma_setup(drive, cmd))
581 return 1; 576 goto out_dma_unmap;
582 }
583 drive->waiting_for_dma = 1; 577 drive->waiting_for_dma = 1;
584 return 0; 578 return 0;
579out_dma_unmap:
580 ide_dma_unmap_sg(drive, cmd);
581out_map:
582 ide_map_sg(drive, cmd);
583out:
584 return 1;
585} 585}