diff options
Diffstat (limited to 'drivers/ide/ide-dma.c')
-rw-r--r-- | drivers/ide/ide-dma.c | 118 |
1 files changed, 69 insertions, 49 deletions
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c index 3dbf80c15491..a0b8cab1d9a6 100644 --- a/drivers/ide/ide-dma.c +++ b/drivers/ide/ide-dma.c | |||
@@ -89,15 +89,16 @@ static const struct drive_list_entry drive_blacklist[] = { | |||
89 | ide_startstop_t ide_dma_intr(ide_drive_t *drive) | 89 | ide_startstop_t ide_dma_intr(ide_drive_t *drive) |
90 | { | 90 | { |
91 | ide_hwif_t *hwif = drive->hwif; | 91 | ide_hwif_t *hwif = drive->hwif; |
92 | struct ide_cmd *cmd = &hwif->cmd; | ||
92 | u8 stat = 0, dma_stat = 0; | 93 | u8 stat = 0, dma_stat = 0; |
93 | 94 | ||
95 | drive->waiting_for_dma = 0; | ||
94 | dma_stat = hwif->dma_ops->dma_end(drive); | 96 | dma_stat = hwif->dma_ops->dma_end(drive); |
97 | ide_dma_unmap_sg(drive, cmd); | ||
95 | stat = hwif->tp_ops->read_status(hwif); | 98 | stat = hwif->tp_ops->read_status(hwif); |
96 | 99 | ||
97 | if (OK_STAT(stat, DRIVE_READY, drive->bad_wstat | ATA_DRQ)) { | 100 | if (OK_STAT(stat, DRIVE_READY, drive->bad_wstat | ATA_DRQ)) { |
98 | if (!dma_stat) { | 101 | if (!dma_stat) { |
99 | struct ide_cmd *cmd = &hwif->cmd; | ||
100 | |||
101 | if ((cmd->tf_flags & IDE_TFLAG_FS) == 0) | 102 | if ((cmd->tf_flags & IDE_TFLAG_FS) == 0) |
102 | ide_finish_cmd(drive, cmd, stat); | 103 | ide_finish_cmd(drive, cmd, stat); |
103 | else | 104 | else |
@@ -117,8 +118,8 @@ int ide_dma_good_drive(ide_drive_t *drive) | |||
117 | } | 118 | } |
118 | 119 | ||
119 | /** | 120 | /** |
120 | * ide_build_sglist - map IDE scatter gather for DMA I/O | 121 | * ide_dma_map_sg - map IDE scatter gather for DMA I/O |
121 | * @drive: the drive to build the DMA table for | 122 | * @drive: the drive to map the DMA table for |
122 | * @cmd: command | 123 | * @cmd: command |
123 | * | 124 | * |
124 | * Perform the DMA mapping magic necessary to access the source or | 125 | * Perform the DMA mapping magic necessary to access the source or |
@@ -127,23 +128,19 @@ int ide_dma_good_drive(ide_drive_t *drive) | |||
127 | * operate in a portable fashion. | 128 | * operate in a portable fashion. |
128 | */ | 129 | */ |
129 | 130 | ||
130 | int ide_build_sglist(ide_drive_t *drive, struct ide_cmd *cmd) | 131 | static int ide_dma_map_sg(ide_drive_t *drive, struct ide_cmd *cmd) |
131 | { | 132 | { |
132 | ide_hwif_t *hwif = drive->hwif; | 133 | ide_hwif_t *hwif = drive->hwif; |
133 | struct scatterlist *sg = hwif->sg_table; | 134 | struct scatterlist *sg = hwif->sg_table; |
134 | int i; | 135 | int i; |
135 | 136 | ||
136 | ide_map_sg(drive, cmd); | ||
137 | |||
138 | if (cmd->tf_flags & IDE_TFLAG_WRITE) | 137 | if (cmd->tf_flags & IDE_TFLAG_WRITE) |
139 | cmd->sg_dma_direction = DMA_TO_DEVICE; | 138 | cmd->sg_dma_direction = DMA_TO_DEVICE; |
140 | else | 139 | else |
141 | cmd->sg_dma_direction = DMA_FROM_DEVICE; | 140 | cmd->sg_dma_direction = DMA_FROM_DEVICE; |
142 | 141 | ||
143 | i = dma_map_sg(hwif->dev, sg, cmd->sg_nents, cmd->sg_dma_direction); | 142 | i = dma_map_sg(hwif->dev, sg, cmd->sg_nents, cmd->sg_dma_direction); |
144 | if (i == 0) | 143 | if (i) { |
145 | ide_map_sg(drive, cmd); | ||
146 | else { | ||
147 | cmd->orig_sg_nents = cmd->sg_nents; | 144 | cmd->orig_sg_nents = cmd->sg_nents; |
148 | cmd->sg_nents = i; | 145 | cmd->sg_nents = i; |
149 | } | 146 | } |
@@ -152,7 +149,7 @@ int ide_build_sglist(ide_drive_t *drive, struct ide_cmd *cmd) | |||
152 | } | 149 | } |
153 | 150 | ||
154 | /** | 151 | /** |
155 | * ide_destroy_dmatable - clean up DMA mapping | 152 | * ide_dma_unmap_sg - clean up DMA mapping |
156 | * @drive: The drive to unmap | 153 | * @drive: The drive to unmap |
157 | * | 154 | * |
158 | * Teardown mappings after DMA has completed. This must be called | 155 | * Teardown mappings after DMA has completed. This must be called |
@@ -162,15 +159,14 @@ int ide_build_sglist(ide_drive_t *drive, struct ide_cmd *cmd) | |||
162 | * time. | 159 | * time. |
163 | */ | 160 | */ |
164 | 161 | ||
165 | void ide_destroy_dmatable(ide_drive_t *drive) | 162 | void ide_dma_unmap_sg(ide_drive_t *drive, struct ide_cmd *cmd) |
166 | { | 163 | { |
167 | ide_hwif_t *hwif = drive->hwif; | 164 | ide_hwif_t *hwif = drive->hwif; |
168 | struct ide_cmd *cmd = &hwif->cmd; | ||
169 | 165 | ||
170 | dma_unmap_sg(hwif->dev, hwif->sg_table, cmd->orig_sg_nents, | 166 | dma_unmap_sg(hwif->dev, hwif->sg_table, cmd->orig_sg_nents, |
171 | cmd->sg_dma_direction); | 167 | cmd->sg_dma_direction); |
172 | } | 168 | } |
173 | EXPORT_SYMBOL_GPL(ide_destroy_dmatable); | 169 | EXPORT_SYMBOL_GPL(ide_dma_unmap_sg); |
174 | 170 | ||
175 | /** | 171 | /** |
176 | * ide_dma_off_quietly - Generic DMA kill | 172 | * ide_dma_off_quietly - Generic DMA kill |
@@ -249,12 +245,11 @@ static unsigned int ide_get_mode_mask(ide_drive_t *drive, u8 base, u8 req_mode) | |||
249 | case XFER_UDMA_0: | 245 | case XFER_UDMA_0: |
250 | if ((id[ATA_ID_FIELD_VALID] & 4) == 0) | 246 | if ((id[ATA_ID_FIELD_VALID] & 4) == 0) |
251 | break; | 247 | break; |
252 | 248 | mask = id[ATA_ID_UDMA_MODES]; | |
253 | if (port_ops && port_ops->udma_filter) | 249 | if (port_ops && port_ops->udma_filter) |
254 | mask = port_ops->udma_filter(drive); | 250 | mask &= port_ops->udma_filter(drive); |
255 | else | 251 | else |
256 | mask = hwif->ultra_mask; | 252 | mask &= hwif->ultra_mask; |
257 | mask &= id[ATA_ID_UDMA_MODES]; | ||
258 | 253 | ||
259 | /* | 254 | /* |
260 | * avoid false cable warning from eighty_ninty_three() | 255 | * avoid false cable warning from eighty_ninty_three() |
@@ -265,18 +260,23 @@ static unsigned int ide_get_mode_mask(ide_drive_t *drive, u8 base, u8 req_mode) | |||
265 | } | 260 | } |
266 | break; | 261 | break; |
267 | case XFER_MW_DMA_0: | 262 | case XFER_MW_DMA_0: |
268 | if ((id[ATA_ID_FIELD_VALID] & 2) == 0) | 263 | mask = id[ATA_ID_MWDMA_MODES]; |
269 | break; | 264 | |
265 | /* Also look for the CF specific MWDMA modes... */ | ||
266 | if (ata_id_is_cfa(id) && (id[ATA_ID_CFA_MODES] & 0x38)) { | ||
267 | u8 mode = ((id[ATA_ID_CFA_MODES] & 0x38) >> 3) - 1; | ||
268 | |||
269 | mask |= ((2 << mode) - 1) << 3; | ||
270 | } | ||
271 | |||
270 | if (port_ops && port_ops->mdma_filter) | 272 | if (port_ops && port_ops->mdma_filter) |
271 | mask = port_ops->mdma_filter(drive); | 273 | mask &= port_ops->mdma_filter(drive); |
272 | else | 274 | else |
273 | mask = hwif->mwdma_mask; | 275 | mask &= hwif->mwdma_mask; |
274 | mask &= id[ATA_ID_MWDMA_MODES]; | ||
275 | break; | 276 | break; |
276 | case XFER_SW_DMA_0: | 277 | case XFER_SW_DMA_0: |
277 | if (id[ATA_ID_FIELD_VALID] & 2) { | 278 | mask = id[ATA_ID_SWDMA_MODES]; |
278 | mask = id[ATA_ID_SWDMA_MODES] & hwif->swdma_mask; | 279 | if (!(mask & ATA_SWDMA2) && (id[ATA_ID_OLD_DMA_MODES] >> 8)) { |
279 | } else if (id[ATA_ID_OLD_DMA_MODES] >> 8) { | ||
280 | u8 mode = id[ATA_ID_OLD_DMA_MODES] >> 8; | 280 | u8 mode = id[ATA_ID_OLD_DMA_MODES] >> 8; |
281 | 281 | ||
282 | /* | 282 | /* |
@@ -284,8 +284,9 @@ static unsigned int ide_get_mode_mask(ide_drive_t *drive, u8 base, u8 req_mode) | |||
284 | * (the maximum allowed mode is XFER_SW_DMA_2) | 284 | * (the maximum allowed mode is XFER_SW_DMA_2) |
285 | */ | 285 | */ |
286 | if (mode <= 2) | 286 | if (mode <= 2) |
287 | mask = ((2 << mode) - 1) & hwif->swdma_mask; | 287 | mask = (2 << mode) - 1; |
288 | } | 288 | } |
289 | mask &= hwif->swdma_mask; | ||
289 | break; | 290 | break; |
290 | default: | 291 | default: |
291 | BUG(); | 292 | BUG(); |
@@ -402,11 +403,10 @@ int ide_id_dma_bug(ide_drive_t *drive) | |||
402 | if ((id[ATA_ID_UDMA_MODES] >> 8) && | 403 | if ((id[ATA_ID_UDMA_MODES] >> 8) && |
403 | (id[ATA_ID_MWDMA_MODES] >> 8)) | 404 | (id[ATA_ID_MWDMA_MODES] >> 8)) |
404 | goto err_out; | 405 | goto err_out; |
405 | } else if (id[ATA_ID_FIELD_VALID] & 2) { | 406 | } else if ((id[ATA_ID_MWDMA_MODES] >> 8) && |
406 | if ((id[ATA_ID_MWDMA_MODES] >> 8) && | 407 | (id[ATA_ID_SWDMA_MODES] >> 8)) |
407 | (id[ATA_ID_SWDMA_MODES] >> 8)) | 408 | goto err_out; |
408 | goto err_out; | 409 | |
409 | } | ||
410 | return 0; | 410 | return 0; |
411 | err_out: | 411 | err_out: |
412 | printk(KERN_ERR "%s: bad DMA info in identify block\n", drive->name); | 412 | printk(KERN_ERR "%s: bad DMA info in identify block\n", drive->name); |
@@ -460,21 +460,6 @@ void ide_dma_lost_irq(ide_drive_t *drive) | |||
460 | } | 460 | } |
461 | EXPORT_SYMBOL_GPL(ide_dma_lost_irq); | 461 | EXPORT_SYMBOL_GPL(ide_dma_lost_irq); |
462 | 462 | ||
463 | void ide_dma_timeout(ide_drive_t *drive) | ||
464 | { | ||
465 | ide_hwif_t *hwif = drive->hwif; | ||
466 | |||
467 | printk(KERN_ERR "%s: timeout waiting for DMA\n", drive->name); | ||
468 | |||
469 | if (hwif->dma_ops->dma_test_irq(drive)) | ||
470 | return; | ||
471 | |||
472 | ide_dump_status(drive, "DMA timeout", hwif->tp_ops->read_status(hwif)); | ||
473 | |||
474 | hwif->dma_ops->dma_end(drive); | ||
475 | } | ||
476 | EXPORT_SYMBOL_GPL(ide_dma_timeout); | ||
477 | |||
478 | /* | 463 | /* |
479 | * un-busy the port etc, and clear any pending DMA status. we want to | 464 | * un-busy the port etc, and clear any pending DMA status. we want to |
480 | * retry the current request in pio mode instead of risking tossing it | 465 | * retry the current request in pio mode instead of risking tossing it |
@@ -483,6 +468,8 @@ EXPORT_SYMBOL_GPL(ide_dma_timeout); | |||
483 | ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error) | 468 | ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error) |
484 | { | 469 | { |
485 | ide_hwif_t *hwif = drive->hwif; | 470 | ide_hwif_t *hwif = drive->hwif; |
471 | const struct ide_dma_ops *dma_ops = hwif->dma_ops; | ||
472 | struct ide_cmd *cmd = &hwif->cmd; | ||
486 | struct request *rq; | 473 | struct request *rq; |
487 | ide_startstop_t ret = ide_stopped; | 474 | ide_startstop_t ret = ide_stopped; |
488 | 475 | ||
@@ -492,12 +479,23 @@ ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error) | |||
492 | 479 | ||
493 | if (error < 0) { | 480 | if (error < 0) { |
494 | printk(KERN_WARNING "%s: DMA timeout error\n", drive->name); | 481 | printk(KERN_WARNING "%s: DMA timeout error\n", drive->name); |
495 | (void)hwif->dma_ops->dma_end(drive); | 482 | drive->waiting_for_dma = 0; |
483 | (void)dma_ops->dma_end(drive); | ||
484 | ide_dma_unmap_sg(drive, cmd); | ||
496 | ret = ide_error(drive, "dma timeout error", | 485 | ret = ide_error(drive, "dma timeout error", |
497 | hwif->tp_ops->read_status(hwif)); | 486 | hwif->tp_ops->read_status(hwif)); |
498 | } else { | 487 | } else { |
499 | printk(KERN_WARNING "%s: DMA timeout retry\n", drive->name); | 488 | printk(KERN_WARNING "%s: DMA timeout retry\n", drive->name); |
500 | hwif->dma_ops->dma_timeout(drive); | 489 | if (dma_ops->dma_clear) |
490 | dma_ops->dma_clear(drive); | ||
491 | printk(KERN_ERR "%s: timeout waiting for DMA\n", drive->name); | ||
492 | if (dma_ops->dma_test_irq(drive) == 0) { | ||
493 | ide_dump_status(drive, "DMA timeout", | ||
494 | hwif->tp_ops->read_status(hwif)); | ||
495 | drive->waiting_for_dma = 0; | ||
496 | (void)dma_ops->dma_end(drive); | ||
497 | ide_dma_unmap_sg(drive, cmd); | ||
498 | } | ||
501 | } | 499 | } |
502 | 500 | ||
503 | /* | 501 | /* |
@@ -567,3 +565,25 @@ int ide_allocate_dma_engine(ide_hwif_t *hwif) | |||
567 | return 0; | 565 | return 0; |
568 | } | 566 | } |
569 | EXPORT_SYMBOL_GPL(ide_allocate_dma_engine); | 567 | EXPORT_SYMBOL_GPL(ide_allocate_dma_engine); |
568 | |||
569 | int ide_dma_prepare(ide_drive_t *drive, struct ide_cmd *cmd) | ||
570 | { | ||
571 | const struct ide_dma_ops *dma_ops = drive->hwif->dma_ops; | ||
572 | |||
573 | if ((drive->dev_flags & IDE_DFLAG_USING_DMA) == 0 || | ||
574 | (dma_ops->dma_check && dma_ops->dma_check(drive, cmd))) | ||
575 | goto out; | ||
576 | ide_map_sg(drive, cmd); | ||
577 | if (ide_dma_map_sg(drive, cmd) == 0) | ||
578 | goto out_map; | ||
579 | if (dma_ops->dma_setup(drive, cmd)) | ||
580 | goto out_dma_unmap; | ||
581 | drive->waiting_for_dma = 1; | ||
582 | return 0; | ||
583 | out_dma_unmap: | ||
584 | ide_dma_unmap_sg(drive, cmd); | ||
585 | out_map: | ||
586 | ide_map_sg(drive, cmd); | ||
587 | out: | ||
588 | return 1; | ||
589 | } | ||