aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390/cio/qdio_main.c
diff options
context:
space:
mode:
authorJan Glauber <jang@linux.vnet.ibm.com>2010-09-07 17:14:39 -0400
committerDavid S. Miller <davem@davemloft.net>2010-09-08 17:31:00 -0400
commitd36deae75011a7890f0e730dd0f867c64081cb50 (patch)
treed24672cf5cc40c637186362187450362cabefd24 /drivers/s390/cio/qdio_main.c
parente508be174ad36b0cf9b324cd04978c2b13c21502 (diff)
qdio: extend API to allow polling
Extend the qdio API to allow polling in the upper-layer driver. This is needed by qeth to use NAPI. To use the new interface the upper-layer driver must specify the queue_start_poll(). This callback is used to signal the upper-layer driver that is has initiative and must process the inbound queue by calling qdio_get_next_buffers(). If the upper-layer driver wants to stop polling it calls qdio_start_irq(). Since adapter interrupts are not completely stoppable qdio implements a software bit QDIO_QUEUE_IRQS_DISABLED to safely disable interrupts for an input queue. The old interface is preserved and will be used as is by zfcp. Signed-off-by: Jan Glauber <jang@linux.vnet.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Signed-off-by: Frank Blaschka <frank.blaschka@de.ibm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/s390/cio/qdio_main.c')
-rw-r--r--drivers/s390/cio/qdio_main.c138
1 files changed, 136 insertions, 2 deletions
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 00520f9a7a8e..5fcfa7f9e9ef 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -884,8 +884,19 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
884 if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED)) 884 if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
885 return; 885 return;
886 886
887 for_each_input_queue(irq_ptr, q, i) 887 for_each_input_queue(irq_ptr, q, i) {
888 tasklet_schedule(&q->tasklet); 888 if (q->u.in.queue_start_poll) {
889 /* skip if polling is enabled or already in work */
890 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
891 &q->u.in.queue_irq_state)) {
892 qperf_inc(q, int_discarded);
893 continue;
894 }
895 q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
896 q->irq_ptr->int_parm);
897 } else
898 tasklet_schedule(&q->tasklet);
899 }
889 900
890 if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)) 901 if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED))
891 return; 902 return;
@@ -1519,6 +1530,129 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
1519} 1530}
1520EXPORT_SYMBOL_GPL(do_QDIO); 1531EXPORT_SYMBOL_GPL(do_QDIO);
1521 1532
1533/**
1534 * qdio_start_irq - process input buffers
1535 * @cdev: associated ccw_device for the qdio subchannel
1536 * @nr: input queue number
1537 *
1538 * Return codes
1539 * 0 - success
1540 * 1 - irqs not started since new data is available
1541 */
1542int qdio_start_irq(struct ccw_device *cdev, int nr)
1543{
1544 struct qdio_q *q;
1545 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1546
1547 if (!irq_ptr)
1548 return -ENODEV;
1549 q = irq_ptr->input_qs[nr];
1550
1551 WARN_ON(queue_irqs_enabled(q));
1552
1553 if (!shared_ind(q->irq_ptr))
1554 xchg(q->irq_ptr->dsci, 0);
1555
1556 qdio_stop_polling(q);
1557 clear_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state);
1558
1559 /*
1560 * We need to check again to not lose initiative after
1561 * resetting the ACK state.
1562 */
1563 if (!shared_ind(q->irq_ptr) && *q->irq_ptr->dsci)
1564 goto rescan;
1565 if (!qdio_inbound_q_done(q))
1566 goto rescan;
1567 return 0;
1568
1569rescan:
1570 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
1571 &q->u.in.queue_irq_state))
1572 return 0;
1573 else
1574 return 1;
1575
1576}
1577EXPORT_SYMBOL(qdio_start_irq);
1578
1579/**
1580 * qdio_get_next_buffers - process input buffers
1581 * @cdev: associated ccw_device for the qdio subchannel
1582 * @nr: input queue number
1583 * @bufnr: first filled buffer number
1584 * @error: buffers are in error state
1585 *
1586 * Return codes
1587 * < 0 - error
1588 * = 0 - no new buffers found
1589 * > 0 - number of processed buffers
1590 */
1591int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
1592 int *error)
1593{
1594 struct qdio_q *q;
1595 int start, end;
1596 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1597
1598 if (!irq_ptr)
1599 return -ENODEV;
1600 q = irq_ptr->input_qs[nr];
1601 WARN_ON(queue_irqs_enabled(q));
1602
1603 qdio_sync_after_thinint(q);
1604
1605 /*
1606 * The interrupt could be caused by a PCI request. Check the
1607 * PCI capable outbound queues.
1608 */
1609 qdio_check_outbound_after_thinint(q);
1610
1611 if (!qdio_inbound_q_moved(q))
1612 return 0;
1613
1614 /* Note: upper-layer MUST stop processing immediately here ... */
1615 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
1616 return -EIO;
1617
1618 start = q->first_to_kick;
1619 end = q->first_to_check;
1620 *bufnr = start;
1621 *error = q->qdio_error;
1622
1623 /* for the next time */
1624 q->first_to_kick = end;
1625 q->qdio_error = 0;
1626 return sub_buf(end, start);
1627}
1628EXPORT_SYMBOL(qdio_get_next_buffers);
1629
1630/**
1631 * qdio_stop_irq - disable interrupt processing for the device
1632 * @cdev: associated ccw_device for the qdio subchannel
1633 * @nr: input queue number
1634 *
1635 * Return codes
1636 * 0 - interrupts were already disabled
1637 * 1 - interrupts successfully disabled
1638 */
1639int qdio_stop_irq(struct ccw_device *cdev, int nr)
1640{
1641 struct qdio_q *q;
1642 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1643
1644 if (!irq_ptr)
1645 return -ENODEV;
1646 q = irq_ptr->input_qs[nr];
1647
1648 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
1649 &q->u.in.queue_irq_state))
1650 return 0;
1651 else
1652 return 1;
1653}
1654EXPORT_SYMBOL(qdio_stop_irq);
1655
1522static int __init init_QDIO(void) 1656static int __init init_QDIO(void)
1523{ 1657{
1524 int rc; 1658 int rc;