aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/chelsio
diff options
context:
space:
mode:
authorStephen Hemminger <shemminger@osdl.org>2007-01-08 14:26:30 -0500
committerJeff Garzik <jeff@garzik.org>2007-02-05 16:58:46 -0500
commit3de00b8958b12d62712ae9500968c65d3b43bb27 (patch)
tree6e4ba6acdc8745ad1e5890c517783ae304652697 /drivers/net/chelsio
parent4d22de3e6cc4a09c369b504cd8bcde3385a974cd (diff)
chelsio: NAPI speed improvement
Speedup and cleanup the receive processing by eliminating the mmio read and a lock round trip. Signed-off-by: Stephen Hemminger <shemminger@osdl.org> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/chelsio')
-rw-r--r--drivers/net/chelsio/sge.c77
1 files changed, 35 insertions, 42 deletions
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index f94d6397164..8e287e79e4e 100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
@@ -1559,6 +1559,14 @@ static int process_responses(struct adapter *adapter, int budget)
1559 return budget; 1559 return budget;
1560} 1560}
1561 1561
1562static inline int responses_pending(const struct adapter *adapter)
1563{
1564 const struct respQ *Q = &adapter->sge->respQ;
1565 const struct respQ_e *e = &Q->entries[Q->cidx];
1566
1567 return (e->GenerationBit == Q->genbit);
1568}
1569
1562#ifdef CONFIG_CHELSIO_T1_NAPI 1570#ifdef CONFIG_CHELSIO_T1_NAPI
1563/* 1571/*
1564 * A simpler version of process_responses() that handles only pure (i.e., 1572 * A simpler version of process_responses() that handles only pure (i.e.,
@@ -1568,13 +1576,16 @@ static int process_responses(struct adapter *adapter, int budget)
1568 * which the caller must ensure is a valid pure response. Returns 1 if it 1576 * which the caller must ensure is a valid pure response. Returns 1 if it
1569 * encounters a valid data-carrying response, 0 otherwise. 1577 * encounters a valid data-carrying response, 0 otherwise.
1570 */ 1578 */
1571static int process_pure_responses(struct adapter *adapter, struct respQ_e *e) 1579static int process_pure_responses(struct adapter *adapter)
1572{ 1580{
1573 struct sge *sge = adapter->sge; 1581 struct sge *sge = adapter->sge;
1574 struct respQ *q = &sge->respQ; 1582 struct respQ *q = &sge->respQ;
1583 struct respQ_e *e = &q->entries[q->cidx];
1575 unsigned int flags = 0; 1584 unsigned int flags = 0;
1576 unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0}; 1585 unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
1577 1586
1587 if (e->DataValid)
1588 return 1;
1578 do { 1589 do {
1579 flags |= e->Qsleeping; 1590 flags |= e->Qsleeping;
1580 1591
@@ -1610,23 +1621,20 @@ static int process_pure_responses(struct adapter *adapter, struct respQ_e *e)
1610int t1_poll(struct net_device *dev, int *budget) 1621int t1_poll(struct net_device *dev, int *budget)
1611{ 1622{
1612 struct adapter *adapter = dev->priv; 1623 struct adapter *adapter = dev->priv;
1613 int effective_budget = min(*budget, dev->quota); 1624 int work_done;
1614 int work_done = process_responses(adapter, effective_budget);
1615 1625
1626 work_done = process_responses(adapter, min(*budget, dev->quota));
1616 *budget -= work_done; 1627 *budget -= work_done;
1617 dev->quota -= work_done; 1628 dev->quota -= work_done;
1618 1629
1619 if (work_done >= effective_budget) 1630 if (unlikely(responses_pending(adapter)))
1620 return 1; 1631 return 1;
1621 1632
1622 spin_lock_irq(&adapter->async_lock); 1633 netif_rx_complete(dev);
1623 __netif_rx_complete(dev);
1624 writel(adapter->sge->respQ.cidx, adapter->regs + A_SG_SLEEPING); 1634 writel(adapter->sge->respQ.cidx, adapter->regs + A_SG_SLEEPING);
1625 writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
1626 adapter->regs + A_PL_ENABLE);
1627 spin_unlock_irq(&adapter->async_lock);
1628 1635
1629 return 0; 1636 return 0;
1637
1630} 1638}
1631 1639
1632/* 1640/*
@@ -1635,44 +1643,33 @@ int t1_poll(struct net_device *dev, int *budget)
1635irqreturn_t t1_interrupt(int irq, void *data) 1643irqreturn_t t1_interrupt(int irq, void *data)
1636{ 1644{
1637 struct adapter *adapter = data; 1645 struct adapter *adapter = data;
1638 struct net_device *dev = adapter->sge->netdev;
1639 struct sge *sge = adapter->sge; 1646 struct sge *sge = adapter->sge;
1640 u32 cause; 1647 int handled;
1641 int handled = 0;
1642
1643 cause = readl(adapter->regs + A_PL_CAUSE);
1644 if (cause == 0 || cause == ~0)
1645 return IRQ_NONE;
1646 1648
1647 spin_lock(&adapter->async_lock); 1649 if (likely(responses_pending(adapter))) {
1648 if (cause & F_PL_INTR_SGE_DATA) { 1650 struct net_device *dev = sge->netdev;
1649 struct respQ *q = &adapter->sge->respQ;
1650 struct respQ_e *e = &q->entries[q->cidx];
1651 1651
1652 handled = 1;
1653 writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE); 1652 writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
1654 1653
1655 if (e->GenerationBit == q->genbit && 1654 if (__netif_rx_schedule_prep(dev)) {
1656 __netif_rx_schedule_prep(dev)) { 1655 if (process_pure_responses(adapter))
1657 if (e->DataValid || process_pure_responses(adapter, e)) { 1656 __netif_rx_schedule(dev);
1658 /* mask off data IRQ */ 1657 else {
1659 writel(adapter->slow_intr_mask, 1658 /* no data, no NAPI needed */
1660 adapter->regs + A_PL_ENABLE); 1659 writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING);
1661 __netif_rx_schedule(sge->netdev); 1660 netif_poll_enable(dev); /* undo schedule_prep */
1662 goto unlock;
1663 } 1661 }
1664 /* no data, no NAPI needed */
1665 netif_poll_enable(dev);
1666
1667 } 1662 }
1668 writel(q->cidx, adapter->regs + A_SG_SLEEPING); 1663 return IRQ_HANDLED;
1669 } else 1664 }
1670 handled = t1_slow_intr_handler(adapter); 1665
1666 spin_lock(&adapter->async_lock);
1667 handled = t1_slow_intr_handler(adapter);
1668 spin_unlock(&adapter->async_lock);
1671 1669
1672 if (!handled) 1670 if (!handled)
1673 sge->stats.unhandled_irqs++; 1671 sge->stats.unhandled_irqs++;
1674unlock: 1672
1675 spin_unlock(&adapter->async_lock);
1676 return IRQ_RETVAL(handled != 0); 1673 return IRQ_RETVAL(handled != 0);
1677} 1674}
1678 1675
@@ -1695,17 +1692,13 @@ unlock:
1695irqreturn_t t1_interrupt(int irq, void *cookie) 1692irqreturn_t t1_interrupt(int irq, void *cookie)
1696{ 1693{
1697 int work_done; 1694 int work_done;
1698 struct respQ_e *e;
1699 struct adapter *adapter = cookie; 1695 struct adapter *adapter = cookie;
1700 struct respQ *Q = &adapter->sge->respQ;
1701 1696
1702 spin_lock(&adapter->async_lock); 1697 spin_lock(&adapter->async_lock);
1703 e = &Q->entries[Q->cidx];
1704 prefetch(e);
1705 1698
1706 writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE); 1699 writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
1707 1700
1708 if (likely(e->GenerationBit == Q->genbit)) 1701 if (likely(responses_pending(adapter))
1709 work_done = process_responses(adapter, -1); 1702 work_done = process_responses(adapter, -1);
1710 else 1703 else
1711 work_done = t1_slow_intr_handler(adapter); 1704 work_done = t1_slow_intr_handler(adapter);