aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ata/libata-sff.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ata/libata-sff.c')
-rw-r--r--drivers/ata/libata-sff.c72
1 files changed, 54 insertions, 18 deletions
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 8a1396f52a3a..e78ad76861f4 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -40,6 +40,8 @@
40 40
41#include "libata.h" 41#include "libata.h"
42 42
43static struct workqueue_struct *ata_sff_wq;
44
43const struct ata_port_operations ata_sff_port_ops = { 45const struct ata_port_operations ata_sff_port_ops = {
44 .inherits = &ata_base_port_ops, 46 .inherits = &ata_base_port_ops,
45 47
@@ -1293,7 +1295,7 @@ fsm_start:
1293 if (in_wq) 1295 if (in_wq)
1294 spin_unlock_irqrestore(ap->lock, flags); 1296 spin_unlock_irqrestore(ap->lock, flags);
1295 1297
1296 /* if polling, ata_pio_task() handles the rest. 1298 /* if polling, ata_sff_pio_task() handles the rest.
1297 * otherwise, interrupt handler takes over from here. 1299 * otherwise, interrupt handler takes over from here.
1298 */ 1300 */
1299 break; 1301 break;
@@ -1458,14 +1460,38 @@ fsm_start:
1458} 1460}
1459EXPORT_SYMBOL_GPL(ata_sff_hsm_move); 1461EXPORT_SYMBOL_GPL(ata_sff_hsm_move);
1460 1462
1461void ata_pio_task(struct work_struct *work) 1463void ata_sff_queue_pio_task(struct ata_port *ap, unsigned long delay)
1464{
1465 /* may fail if ata_sff_flush_pio_task() in progress */
1466 queue_delayed_work(ata_sff_wq, &ap->sff_pio_task,
1467 msecs_to_jiffies(delay));
1468}
1469EXPORT_SYMBOL_GPL(ata_sff_queue_pio_task);
1470
1471void ata_sff_flush_pio_task(struct ata_port *ap)
1472{
1473 DPRINTK("ENTER\n");
1474
1475 cancel_rearming_delayed_work(&ap->sff_pio_task);
1476 ap->hsm_task_state = HSM_ST_IDLE;
1477
1478 if (ata_msg_ctl(ap))
1479 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __func__);
1480}
1481
1482static void ata_sff_pio_task(struct work_struct *work)
1462{ 1483{
1463 struct ata_port *ap = 1484 struct ata_port *ap =
1464 container_of(work, struct ata_port, port_task.work); 1485 container_of(work, struct ata_port, sff_pio_task.work);
1465 struct ata_queued_cmd *qc = ap->port_task_data; 1486 struct ata_queued_cmd *qc;
1466 u8 status; 1487 u8 status;
1467 int poll_next; 1488 int poll_next;
1468 1489
1490 /* qc can be NULL if timeout occurred */
1491 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1492 if (!qc)
1493 return;
1494
1469fsm_start: 1495fsm_start:
1470 WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE); 1496 WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE);
1471 1497
@@ -1481,7 +1507,7 @@ fsm_start:
1481 msleep(2); 1507 msleep(2);
1482 status = ata_sff_busy_wait(ap, ATA_BUSY, 10); 1508 status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
1483 if (status & ATA_BUSY) { 1509 if (status & ATA_BUSY) {
1484 ata_pio_queue_task(ap, qc, ATA_SHORT_PAUSE); 1510 ata_sff_queue_pio_task(ap, ATA_SHORT_PAUSE);
1485 return; 1511 return;
1486 } 1512 }
1487 } 1513 }
@@ -1551,7 +1577,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
1551 ap->hsm_task_state = HSM_ST_LAST; 1577 ap->hsm_task_state = HSM_ST_LAST;
1552 1578
1553 if (qc->tf.flags & ATA_TFLAG_POLLING) 1579 if (qc->tf.flags & ATA_TFLAG_POLLING)
1554 ata_pio_queue_task(ap, qc, 0); 1580 ata_sff_queue_pio_task(ap, 0);
1555 1581
1556 break; 1582 break;
1557 1583
@@ -1573,20 +1599,21 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
1573 if (qc->tf.flags & ATA_TFLAG_WRITE) { 1599 if (qc->tf.flags & ATA_TFLAG_WRITE) {
1574 /* PIO data out protocol */ 1600 /* PIO data out protocol */
1575 ap->hsm_task_state = HSM_ST_FIRST; 1601 ap->hsm_task_state = HSM_ST_FIRST;
1576 ata_pio_queue_task(ap, qc, 0); 1602 ata_sff_queue_pio_task(ap, 0);
1577 1603
1578 /* always send first data block using 1604 /* always send first data block using the
1579 * the ata_pio_task() codepath. 1605 * ata_sff_pio_task() codepath.
1580 */ 1606 */
1581 } else { 1607 } else {
1582 /* PIO data in protocol */ 1608 /* PIO data in protocol */
1583 ap->hsm_task_state = HSM_ST; 1609 ap->hsm_task_state = HSM_ST;
1584 1610
1585 if (qc->tf.flags & ATA_TFLAG_POLLING) 1611 if (qc->tf.flags & ATA_TFLAG_POLLING)
1586 ata_pio_queue_task(ap, qc, 0); 1612 ata_sff_queue_pio_task(ap, 0);
1587 1613
1588 /* if polling, ata_pio_task() handles the rest. 1614 /* if polling, ata_sff_pio_task() handles the
1589 * otherwise, interrupt handler takes over from here. 1615 * rest. otherwise, interrupt handler takes
1616 * over from here.
1590 */ 1617 */
1591 } 1618 }
1592 1619
@@ -1604,7 +1631,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
1604 /* send cdb by polling if no cdb interrupt */ 1631 /* send cdb by polling if no cdb interrupt */
1605 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) || 1632 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
1606 (qc->tf.flags & ATA_TFLAG_POLLING)) 1633 (qc->tf.flags & ATA_TFLAG_POLLING))
1607 ata_pio_queue_task(ap, qc, 0); 1634 ata_sff_queue_pio_task(ap, 0);
1608 break; 1635 break;
1609 1636
1610 case ATAPI_PROT_DMA: 1637 case ATAPI_PROT_DMA:
@@ -1616,7 +1643,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
1616 1643
1617 /* send cdb by polling if no cdb interrupt */ 1644 /* send cdb by polling if no cdb interrupt */
1618 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) 1645 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1619 ata_pio_queue_task(ap, qc, 0); 1646 ata_sff_queue_pio_task(ap, 0);
1620 break; 1647 break;
1621 1648
1622 default: 1649 default:
@@ -2360,8 +2387,6 @@ void ata_sff_error_handler(struct ata_port *ap)
2360 /* reset PIO HSM and stop DMA engine */ 2387 /* reset PIO HSM and stop DMA engine */
2361 spin_lock_irqsave(ap->lock, flags); 2388 spin_lock_irqsave(ap->lock, flags);
2362 2389
2363 ap->hsm_task_state = HSM_ST_IDLE;
2364
2365 if (ap->ioaddr.bmdma_addr && 2390 if (ap->ioaddr.bmdma_addr &&
2366 qc && (qc->tf.protocol == ATA_PROT_DMA || 2391 qc && (qc->tf.protocol == ATA_PROT_DMA ||
2367 qc->tf.protocol == ATAPI_PROT_DMA)) { 2392 qc->tf.protocol == ATAPI_PROT_DMA)) {
@@ -2432,8 +2457,6 @@ void ata_sff_post_internal_cmd(struct ata_queued_cmd *qc)
2432 2457
2433 spin_lock_irqsave(ap->lock, flags); 2458 spin_lock_irqsave(ap->lock, flags);
2434 2459
2435 ap->hsm_task_state = HSM_ST_IDLE;
2436
2437 if (ap->ioaddr.bmdma_addr) 2460 if (ap->ioaddr.bmdma_addr)
2438 ap->ops->bmdma_stop(qc); 2461 ap->ops->bmdma_stop(qc);
2439 2462
@@ -3074,15 +3097,28 @@ EXPORT_SYMBOL_GPL(ata_pci_bmdma_init);
3074 */ 3097 */
3075void ata_sff_port_init(struct ata_port *ap) 3098void ata_sff_port_init(struct ata_port *ap)
3076{ 3099{
3100 INIT_DELAYED_WORK(&ap->sff_pio_task, ata_sff_pio_task);
3077 ap->ctl = ATA_DEVCTL_OBS; 3101 ap->ctl = ATA_DEVCTL_OBS;
3078 ap->last_ctl = 0xFF; 3102 ap->last_ctl = 0xFF;
3079} 3103}
3080 3104
3081int __init ata_sff_init(void) 3105int __init ata_sff_init(void)
3082{ 3106{
3107 /*
3108 * FIXME: In UP case, there is only one workqueue thread and if you
3109 * have more than one PIO device, latency is bloody awful, with
3110 * occasional multi-second "hiccups" as one PIO device waits for
3111 * another. It's an ugly wart that users DO occasionally complain
3112 * about; luckily most users have at most one PIO polled device.
3113 */
3114 ata_sff_wq = create_workqueue("ata_sff");
3115 if (!ata_sff_wq)
3116 return -ENOMEM;
3117
3083 return 0; 3118 return 0;
3084} 3119}
3085 3120
3086void __exit ata_sff_exit(void) 3121void __exit ata_sff_exit(void)
3087{ 3122{
3123 destroy_workqueue(ata_sff_wq);
3088} 3124}