diff options
Diffstat (limited to 'drivers/ata/libata-sff.c')
-rw-r--r-- | drivers/ata/libata-sff.c | 115 |
1 files changed, 100 insertions, 15 deletions
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index 3c2d2289f85e..215d18672a5a 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c | |||
@@ -247,7 +247,7 @@ u8 ata_sff_check_status(struct ata_port *ap) | |||
247 | * LOCKING: | 247 | * LOCKING: |
248 | * Inherited from caller. | 248 | * Inherited from caller. |
249 | */ | 249 | */ |
250 | u8 ata_sff_altstatus(struct ata_port *ap) | 250 | static u8 ata_sff_altstatus(struct ata_port *ap) |
251 | { | 251 | { |
252 | if (ap->ops->sff_check_altstatus) | 252 | if (ap->ops->sff_check_altstatus) |
253 | return ap->ops->sff_check_altstatus(ap); | 253 | return ap->ops->sff_check_altstatus(ap); |
@@ -256,6 +256,93 @@ u8 ata_sff_altstatus(struct ata_port *ap) | |||
256 | } | 256 | } |
257 | 257 | ||
258 | /** | 258 | /** |
259 | * ata_sff_irq_status - Check if the device is busy | ||
260 | * @ap: port where the device is | ||
261 | * | ||
262 | * Determine if the port is currently busy. Uses altstatus | ||
263 | * if available in order to avoid clearing shared IRQ status | ||
264 | * when finding an IRQ source. Non ctl capable devices don't | ||
265 | * share interrupt lines fortunately for us. | ||
266 | * | ||
267 | * LOCKING: | ||
268 | * Inherited from caller. | ||
269 | */ | ||
270 | static u8 ata_sff_irq_status(struct ata_port *ap) | ||
271 | { | ||
272 | u8 status; | ||
273 | |||
274 | if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) { | ||
275 | status = ata_sff_altstatus(ap); | ||
276 | /* Not us: We are busy */ | ||
277 | if (status & ATA_BUSY) | ||
278 | return status; | ||
279 | } | ||
280 | /* Clear INTRQ latch */ | ||
281 | status = ap->ops->sff_check_status(ap); | ||
282 | return status; | ||
283 | } | ||
284 | |||
285 | /** | ||
286 | * ata_sff_sync - Flush writes | ||
287 | * @ap: Port to wait for. | ||
288 | * | ||
289 | * CAUTION: | ||
290 | * If we have an mmio device with no ctl and no altstatus | ||
291 | * method this will fail. No such devices are known to exist. | ||
292 | * | ||
293 | * LOCKING: | ||
294 | * Inherited from caller. | ||
295 | */ | ||
296 | |||
297 | static void ata_sff_sync(struct ata_port *ap) | ||
298 | { | ||
299 | if (ap->ops->sff_check_altstatus) | ||
300 | ap->ops->sff_check_altstatus(ap); | ||
301 | else if (ap->ioaddr.altstatus_addr) | ||
302 | ioread8(ap->ioaddr.altstatus_addr); | ||
303 | } | ||
304 | |||
305 | /** | ||
306 | * ata_sff_pause - Flush writes and wait 400nS | ||
307 | * @ap: Port to pause for. | ||
308 | * | ||
309 | * CAUTION: | ||
310 | * If we have an mmio device with no ctl and no altstatus | ||
311 | * method this will fail. No such devices are known to exist. | ||
312 | * | ||
313 | * LOCKING: | ||
314 | * Inherited from caller. | ||
315 | */ | ||
316 | |||
317 | void ata_sff_pause(struct ata_port *ap) | ||
318 | { | ||
319 | ata_sff_sync(ap); | ||
320 | ndelay(400); | ||
321 | } | ||
322 | |||
323 | /** | ||
324 | * ata_sff_dma_pause - Pause before commencing DMA | ||
325 | * @ap: Port to pause for. | ||
326 | * | ||
327 | * Perform I/O fencing and ensure sufficient cycle delays occur | ||
328 | * for the HDMA1:0 transition | ||
329 | */ | ||
330 | |||
331 | void ata_sff_dma_pause(struct ata_port *ap) | ||
332 | { | ||
333 | if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) { | ||
334 | /* An altstatus read will cause the needed delay without | ||
335 | messing up the IRQ status */ | ||
336 | ata_sff_altstatus(ap); | ||
337 | return; | ||
338 | } | ||
339 | /* There are no DMA controllers without ctl. BUG here to ensure | ||
340 | we never violate the HDMA1:0 transition timing and risk | ||
341 | corruption. */ | ||
342 | BUG(); | ||
343 | } | ||
344 | |||
345 | /** | ||
259 | * ata_sff_busy_sleep - sleep until BSY clears, or timeout | 346 | * ata_sff_busy_sleep - sleep until BSY clears, or timeout |
260 | * @ap: port containing status register to be polled | 347 | * @ap: port containing status register to be polled |
261 | * @tmout_pat: impatience timeout | 348 | * @tmout_pat: impatience timeout |
@@ -742,7 +829,7 @@ static void ata_pio_sectors(struct ata_queued_cmd *qc) | |||
742 | } else | 829 | } else |
743 | ata_pio_sector(qc); | 830 | ata_pio_sector(qc); |
744 | 831 | ||
745 | ata_sff_altstatus(qc->ap); /* flush */ | 832 | ata_sff_sync(qc->ap); /* flush */ |
746 | } | 833 | } |
747 | 834 | ||
748 | /** | 835 | /** |
@@ -763,8 +850,9 @@ static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc) | |||
763 | WARN_ON(qc->dev->cdb_len < 12); | 850 | WARN_ON(qc->dev->cdb_len < 12); |
764 | 851 | ||
765 | ap->ops->sff_data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1); | 852 | ap->ops->sff_data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1); |
766 | ata_sff_altstatus(ap); /* flush */ | 853 | ata_sff_sync(ap); |
767 | 854 | /* FIXME: If the CDB is for DMA do we need to do the transition delay | |
855 | or is bmdma_start guaranteed to do it ? */ | ||
768 | switch (qc->tf.protocol) { | 856 | switch (qc->tf.protocol) { |
769 | case ATAPI_PROT_PIO: | 857 | case ATAPI_PROT_PIO: |
770 | ap->hsm_task_state = HSM_ST; | 858 | ap->hsm_task_state = HSM_ST; |
@@ -905,7 +993,7 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc) | |||
905 | 993 | ||
906 | if (unlikely(__atapi_pio_bytes(qc, bytes))) | 994 | if (unlikely(__atapi_pio_bytes(qc, bytes))) |
907 | goto err_out; | 995 | goto err_out; |
908 | ata_sff_altstatus(ap); /* flush */ | 996 | ata_sff_sync(ap); /* flush */ |
909 | 997 | ||
910 | return; | 998 | return; |
911 | 999 | ||
@@ -1489,14 +1577,10 @@ inline unsigned int ata_sff_host_intr(struct ata_port *ap, | |||
1489 | goto idle_irq; | 1577 | goto idle_irq; |
1490 | } | 1578 | } |
1491 | 1579 | ||
1492 | /* check altstatus */ | ||
1493 | status = ata_sff_altstatus(ap); | ||
1494 | if (status & ATA_BUSY) | ||
1495 | goto idle_irq; | ||
1496 | 1580 | ||
1497 | /* check main status, clearing INTRQ */ | 1581 | /* check main status, clearing INTRQ if needed */ |
1498 | status = ap->ops->sff_check_status(ap); | 1582 | status = ata_sff_irq_status(ap); |
1499 | if (unlikely(status & ATA_BUSY)) | 1583 | if (status & ATA_BUSY) |
1500 | goto idle_irq; | 1584 | goto idle_irq; |
1501 | 1585 | ||
1502 | /* ack bmdma irq events */ | 1586 | /* ack bmdma irq events */ |
@@ -2030,7 +2114,7 @@ void ata_sff_error_handler(struct ata_port *ap) | |||
2030 | ap->ops->bmdma_stop(qc); | 2114 | ap->ops->bmdma_stop(qc); |
2031 | } | 2115 | } |
2032 | 2116 | ||
2033 | ata_sff_altstatus(ap); | 2117 | ata_sff_sync(ap); /* FIXME: We don't need this */ |
2034 | ap->ops->sff_check_status(ap); | 2118 | ap->ops->sff_check_status(ap); |
2035 | ap->ops->sff_irq_clear(ap); | 2119 | ap->ops->sff_irq_clear(ap); |
2036 | 2120 | ||
@@ -2203,7 +2287,7 @@ void ata_bmdma_stop(struct ata_queued_cmd *qc) | |||
2203 | mmio + ATA_DMA_CMD); | 2287 | mmio + ATA_DMA_CMD); |
2204 | 2288 | ||
2205 | /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ | 2289 | /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ |
2206 | ata_sff_altstatus(ap); /* dummy read */ | 2290 | ata_sff_dma_pause(ap); |
2207 | } | 2291 | } |
2208 | 2292 | ||
2209 | /** | 2293 | /** |
@@ -2722,7 +2806,8 @@ EXPORT_SYMBOL_GPL(ata_sff_qc_prep); | |||
2722 | EXPORT_SYMBOL_GPL(ata_sff_dumb_qc_prep); | 2806 | EXPORT_SYMBOL_GPL(ata_sff_dumb_qc_prep); |
2723 | EXPORT_SYMBOL_GPL(ata_sff_dev_select); | 2807 | EXPORT_SYMBOL_GPL(ata_sff_dev_select); |
2724 | EXPORT_SYMBOL_GPL(ata_sff_check_status); | 2808 | EXPORT_SYMBOL_GPL(ata_sff_check_status); |
2725 | EXPORT_SYMBOL_GPL(ata_sff_altstatus); | 2809 | EXPORT_SYMBOL_GPL(ata_sff_dma_pause); |
2810 | EXPORT_SYMBOL_GPL(ata_sff_pause); | ||
2726 | EXPORT_SYMBOL_GPL(ata_sff_busy_sleep); | 2811 | EXPORT_SYMBOL_GPL(ata_sff_busy_sleep); |
2727 | EXPORT_SYMBOL_GPL(ata_sff_wait_ready); | 2812 | EXPORT_SYMBOL_GPL(ata_sff_wait_ready); |
2728 | EXPORT_SYMBOL_GPL(ata_sff_tf_load); | 2813 | EXPORT_SYMBOL_GPL(ata_sff_tf_load); |