aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorStephen Hemminger <shemminger@osdl.org>2006-12-08 14:08:33 -0500
committerJeff Garzik <jeff@garzik.org>2006-12-11 09:51:07 -0500
commit7fe26a60e08f38c797851fb3b444d753af616112 (patch)
treed112dd98b9db72805e57c157ac670f74cd41ad0e /drivers/net
parent0f0d84e52cb2a6e0b1d101484a92121410135da1 (diff)
[PATCH] chelsio: working NAPI
This driver tries to enable/disable NAPI at runtime, but does so in an unsafe manner, and the NAPI interrupt handling is a mess. Replace it with a compile time selected NAPI implementation. Signed-off-by: Stephen Hemminger <shemminger@osdl.org> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/Kconfig8
-rw-r--r--drivers/net/chelsio/cxgb2.c23
-rw-r--r--drivers/net/chelsio/sge.c115
-rw-r--r--drivers/net/chelsio/sge.h4
4 files changed, 67 insertions, 83 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 9de0eed6755b..8aa8dd02b910 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2384,6 +2384,14 @@ config CHELSIO_T1_1G
2384 Enables support for Chelsio's gigabit Ethernet PCI cards. If you 2384 Enables support for Chelsio's gigabit Ethernet PCI cards. If you
2385 are using only 10G cards say 'N' here. 2385 are using only 10G cards say 'N' here.
2386 2386
2387config CHELSIO_T1_NAPI
2388 bool "Use Rx Polling (NAPI)"
2389 depends on CHELSIO_T1
2390 default y
2391 help
2392 NAPI is a driver API designed to reduce CPU and interrupt load
2393 when the driver is receiving lots of packets from the card.
2394
2387config EHEA 2395config EHEA
2388 tristate "eHEA Ethernet support" 2396 tristate "eHEA Ethernet support"
2389 depends on IBMEBUS 2397 depends on IBMEBUS
diff --git a/drivers/net/chelsio/cxgb2.c b/drivers/net/chelsio/cxgb2.c
index de48eadddbc4..fd5d821f3f2a 100644
--- a/drivers/net/chelsio/cxgb2.c
+++ b/drivers/net/chelsio/cxgb2.c
@@ -220,9 +220,8 @@ static int cxgb_up(struct adapter *adapter)
220 220
221 t1_interrupts_clear(adapter); 221 t1_interrupts_clear(adapter);
222 222
223 adapter->params.has_msi = !disable_msi && pci_enable_msi(adapter->pdev) == 0; 223 adapter->params.has_msi = !disable_msi && !pci_enable_msi(adapter->pdev);
224 err = request_irq(adapter->pdev->irq, 224 err = request_irq(adapter->pdev->irq, t1_interrupt,
225 t1_select_intr_handler(adapter),
226 adapter->params.has_msi ? 0 : IRQF_SHARED, 225 adapter->params.has_msi ? 0 : IRQF_SHARED,
227 adapter->name, adapter); 226 adapter->name, adapter);
228 if (err) { 227 if (err) {
@@ -764,18 +763,7 @@ static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
764{ 763{
765 struct adapter *adapter = dev->priv; 764 struct adapter *adapter = dev->priv;
766 765
767 /* 766 adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs;
768 * If RX coalescing is requested we use NAPI, otherwise interrupts.
769 * This choice can be made only when all ports and the TOE are off.
770 */
771 if (adapter->open_device_map == 0)
772 adapter->params.sge.polling = c->use_adaptive_rx_coalesce;
773
774 if (adapter->params.sge.polling) {
775 adapter->params.sge.rx_coalesce_usecs = 0;
776 } else {
777 adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs;
778 }
779 adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce; 767 adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce;
780 adapter->params.sge.sample_interval_usecs = c->rate_sample_interval; 768 adapter->params.sge.sample_interval_usecs = c->rate_sample_interval;
781 t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge); 769 t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge);
@@ -944,7 +932,7 @@ static void t1_netpoll(struct net_device *dev)
944 struct adapter *adapter = dev->priv; 932 struct adapter *adapter = dev->priv;
945 933
946 local_irq_save(flags); 934 local_irq_save(flags);
947 t1_select_intr_handler(adapter)(adapter->pdev->irq, adapter); 935 t1_interrupt(adapter->pdev->irq, adapter);
948 local_irq_restore(flags); 936 local_irq_restore(flags);
949} 937}
950#endif 938#endif
@@ -1165,7 +1153,10 @@ static int __devinit init_one(struct pci_dev *pdev,
1165#ifdef CONFIG_NET_POLL_CONTROLLER 1153#ifdef CONFIG_NET_POLL_CONTROLLER
1166 netdev->poll_controller = t1_netpoll; 1154 netdev->poll_controller = t1_netpoll;
1167#endif 1155#endif
1156#ifdef CONFIG_CHELSIO_T1_NAPI
1168 netdev->weight = 64; 1157 netdev->weight = 64;
1158 netdev->poll = t1_poll;
1159#endif
1169 1160
1170 SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops); 1161 SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops);
1171 } 1162 }
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index 0ca8d876e16f..659cb2252e44 100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
@@ -1413,16 +1413,20 @@ static int sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
1413 1413
1414 if (unlikely(adapter->vlan_grp && p->vlan_valid)) { 1414 if (unlikely(adapter->vlan_grp && p->vlan_valid)) {
1415 st->vlan_xtract++; 1415 st->vlan_xtract++;
1416 if (adapter->params.sge.polling) 1416#ifdef CONFIG_CHELSIO_T1_NAPI
1417 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, 1417 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
1418 ntohs(p->vlan)); 1418 ntohs(p->vlan));
1419 else 1419#else
1420 vlan_hwaccel_rx(skb, adapter->vlan_grp, 1420 vlan_hwaccel_rx(skb, adapter->vlan_grp,
1421 ntohs(p->vlan)); 1421 ntohs(p->vlan));
1422 } else if (adapter->params.sge.polling) 1422#endif
1423 } else {
1424#ifdef CONFIG_CHELSIO_T1_NAPI
1423 netif_receive_skb(skb); 1425 netif_receive_skb(skb);
1424 else 1426#else
1425 netif_rx(skb); 1427 netif_rx(skb);
1428#endif
1429 }
1426 return 0; 1430 return 0;
1427} 1431}
1428 1432
@@ -1572,6 +1576,7 @@ static int process_responses(struct adapter *adapter, int budget)
1572 return budget; 1576 return budget;
1573} 1577}
1574 1578
1579#ifdef CONFIG_CHELSIO_T1_NAPI
1575/* 1580/*
1576 * A simpler version of process_responses() that handles only pure (i.e., 1581 * A simpler version of process_responses() that handles only pure (i.e.,
1577 * non data-carrying) responses. Such respones are too light-weight to justify 1582 * non data-carrying) responses. Such respones are too light-weight to justify
@@ -1619,92 +1624,76 @@ static int process_pure_responses(struct adapter *adapter, struct respQ_e *e)
1619 * or protection from interrupts as data interrupts are off at this point and 1624 * or protection from interrupts as data interrupts are off at this point and
1620 * other adapter interrupts do not interfere. 1625 * other adapter interrupts do not interfere.
1621 */ 1626 */
1622static int t1_poll(struct net_device *dev, int *budget) 1627int t1_poll(struct net_device *dev, int *budget)
1623{ 1628{
1624 struct adapter *adapter = dev->priv; 1629 struct adapter *adapter = dev->priv;
1625 int effective_budget = min(*budget, dev->quota); 1630 int effective_budget = min(*budget, dev->quota);
1626
1627 int work_done = process_responses(adapter, effective_budget); 1631 int work_done = process_responses(adapter, effective_budget);
1632
1628 *budget -= work_done; 1633 *budget -= work_done;
1629 dev->quota -= work_done; 1634 dev->quota -= work_done;
1630 1635
1631 if (work_done >= effective_budget) 1636 if (work_done >= effective_budget)
1632 return 1; 1637 return 1;
1633 1638
1639 spin_lock_irq(&adapter->async_lock);
1634 __netif_rx_complete(dev); 1640 __netif_rx_complete(dev);
1635
1636 /*
1637 * Because we don't atomically flush the following write it is
1638 * possible that in very rare cases it can reach the device in a way
1639 * that races with a new response being written plus an error interrupt
1640 * causing the NAPI interrupt handler below to return unhandled status
1641 * to the OS. To protect against this would require flushing the write
1642 * and doing both the write and the flush with interrupts off. Way too
1643 * expensive and unjustifiable given the rarity of the race.
1644 */
1645 writel(adapter->sge->respQ.cidx, adapter->regs + A_SG_SLEEPING); 1641 writel(adapter->sge->respQ.cidx, adapter->regs + A_SG_SLEEPING);
1646 return 0; 1642 writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
1647} 1643 adapter->regs + A_PL_ENABLE);
1644 spin_unlock_irq(&adapter->async_lock);
1648 1645
1649/* 1646 return 0;
1650 * Returns true if the device is already scheduled for polling.
1651 */
1652static inline int napi_is_scheduled(struct net_device *dev)
1653{
1654 return test_bit(__LINK_STATE_RX_SCHED, &dev->state);
1655} 1647}
1656 1648
1657/* 1649/*
1658 * NAPI version of the main interrupt handler. 1650 * NAPI version of the main interrupt handler.
1659 */ 1651 */
1660static irqreturn_t t1_interrupt_napi(int irq, void *data) 1652irqreturn_t t1_interrupt(int irq, void *data)
1661{ 1653{
1662 int handled;
1663 struct adapter *adapter = data; 1654 struct adapter *adapter = data;
1655 struct net_device *dev = adapter->sge->netdev;
1664 struct sge *sge = adapter->sge; 1656 struct sge *sge = adapter->sge;
1665 struct respQ *q = &adapter->sge->respQ; 1657 u32 cause;
1658 int handled = 0;
1666 1659
1667 /* 1660 cause = readl(adapter->regs + A_PL_CAUSE);
1668 * Clear the SGE_DATA interrupt first thing. Normally the NAPI 1661 if (cause == 0 || cause == ~0)
1669 * handler has control of the response queue and the interrupt handler 1662 return IRQ_NONE;
1670 * can look at the queue reliably only once it knows NAPI is off.
1671 * We can't wait that long to clear the SGE_DATA interrupt because we
1672 * could race with t1_poll rearming the SGE interrupt, so we need to
1673 * clear the interrupt speculatively and really early on.
1674 */
1675 writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
1676 1663
1677 spin_lock(&adapter->async_lock); 1664 spin_lock(&adapter->async_lock);
1678 if (!napi_is_scheduled(sge->netdev)) { 1665 if (cause & F_PL_INTR_SGE_DATA) {
1666 struct respQ *q = &adapter->sge->respQ;
1679 struct respQ_e *e = &q->entries[q->cidx]; 1667 struct respQ_e *e = &q->entries[q->cidx];
1680 1668
1681 if (e->GenerationBit == q->genbit) { 1669 handled = 1;
1682 if (e->DataValid || 1670 writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
1683 process_pure_responses(adapter, e)) { 1671
1684 if (likely(__netif_rx_schedule_prep(sge->netdev))) 1672 if (e->GenerationBit == q->genbit &&
1685 __netif_rx_schedule(sge->netdev); 1673 __netif_rx_schedule_prep(dev)) {
1686 else if (net_ratelimit()) 1674 if (e->DataValid || process_pure_responses(adapter, e)) {
1687 printk(KERN_INFO 1675 /* mask off data IRQ */
1688 "NAPI schedule failure!\n"); 1676 writel(adapter->slow_intr_mask,
1689 } else 1677 adapter->regs + A_PL_ENABLE);
1690 writel(q->cidx, adapter->regs + A_SG_SLEEPING); 1678 __netif_rx_schedule(sge->netdev);
1691 1679 goto unlock;
1692 handled = 1; 1680 }
1693 goto unlock; 1681 /* no data, no NAPI needed */
1694 } else 1682 netif_poll_enable(dev);
1695 writel(q->cidx, adapter->regs + A_SG_SLEEPING); 1683
1696 } else if (readl(adapter->regs + A_PL_CAUSE) & F_PL_INTR_SGE_DATA) { 1684 }
1697 printk(KERN_ERR "data interrupt while NAPI running\n"); 1685 writel(q->cidx, adapter->regs + A_SG_SLEEPING);
1698 } 1686 } else
1699 1687 handled = t1_slow_intr_handler(adapter);
1700 handled = t1_slow_intr_handler(adapter); 1688
1701 if (!handled) 1689 if (!handled)
1702 sge->stats.unhandled_irqs++; 1690 sge->stats.unhandled_irqs++;
1703 unlock: 1691unlock:
1704 spin_unlock(&adapter->async_lock); 1692 spin_unlock(&adapter->async_lock);
1705 return IRQ_RETVAL(handled != 0); 1693 return IRQ_RETVAL(handled != 0);
1706} 1694}
1707 1695
1696#else
1708/* 1697/*
1709 * Main interrupt handler, optimized assuming that we took a 'DATA' 1698 * Main interrupt handler, optimized assuming that we took a 'DATA'
1710 * interrupt. 1699 * interrupt.
@@ -1720,7 +1709,7 @@ static irqreturn_t t1_interrupt_napi(int irq, void *data)
1720 * 5. If we took an interrupt, but no valid respQ descriptors was found we 1709 * 5. If we took an interrupt, but no valid respQ descriptors was found we
1721 * let the slow_intr_handler run and do error handling. 1710 * let the slow_intr_handler run and do error handling.
1722 */ 1711 */
1723static irqreturn_t t1_interrupt(int irq, void *cookie) 1712irqreturn_t t1_interrupt(int irq, void *cookie)
1724{ 1713{
1725 int work_done; 1714 int work_done;
1726 struct respQ_e *e; 1715 struct respQ_e *e;
@@ -1752,11 +1741,7 @@ static irqreturn_t t1_interrupt(int irq, void *cookie)
1752 spin_unlock(&adapter->async_lock); 1741 spin_unlock(&adapter->async_lock);
1753 return IRQ_RETVAL(work_done != 0); 1742 return IRQ_RETVAL(work_done != 0);
1754} 1743}
1755 1744#endif
1756irq_handler_t t1_select_intr_handler(adapter_t *adapter)
1757{
1758 return adapter->params.sge.polling ? t1_interrupt_napi : t1_interrupt;
1759}
1760 1745
1761/* 1746/*
1762 * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it. 1747 * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it.
@@ -2033,7 +2018,6 @@ static void sge_tx_reclaim_cb(unsigned long data)
2033 */ 2018 */
2034int t1_sge_set_coalesce_params(struct sge *sge, struct sge_params *p) 2019int t1_sge_set_coalesce_params(struct sge *sge, struct sge_params *p)
2035{ 2020{
2036 sge->netdev->poll = t1_poll;
2037 sge->fixed_intrtimer = p->rx_coalesce_usecs * 2021 sge->fixed_intrtimer = p->rx_coalesce_usecs *
2038 core_ticks_per_usec(sge->adapter); 2022 core_ticks_per_usec(sge->adapter);
2039 writel(sge->fixed_intrtimer, sge->adapter->regs + A_SG_INTRTIMER); 2023 writel(sge->fixed_intrtimer, sge->adapter->regs + A_SG_INTRTIMER);
@@ -2234,7 +2218,6 @@ struct sge * __devinit t1_sge_create(struct adapter *adapter,
2234 2218
2235 p->coalesce_enable = 0; 2219 p->coalesce_enable = 0;
2236 p->sample_interval_usecs = 0; 2220 p->sample_interval_usecs = 0;
2237 p->polling = 0;
2238 2221
2239 return sge; 2222 return sge;
2240nomem_port: 2223nomem_port:
diff --git a/drivers/net/chelsio/sge.h b/drivers/net/chelsio/sge.h
index 7ceb0117d039..d132a0ef2a22 100644
--- a/drivers/net/chelsio/sge.h
+++ b/drivers/net/chelsio/sge.h
@@ -76,7 +76,9 @@ struct sge *t1_sge_create(struct adapter *, struct sge_params *);
76int t1_sge_configure(struct sge *, struct sge_params *); 76int t1_sge_configure(struct sge *, struct sge_params *);
77int t1_sge_set_coalesce_params(struct sge *, struct sge_params *); 77int t1_sge_set_coalesce_params(struct sge *, struct sge_params *);
78void t1_sge_destroy(struct sge *); 78void t1_sge_destroy(struct sge *);
79irq_handler_t t1_select_intr_handler(adapter_t *adapter); 79irqreturn_t t1_interrupt(int irq, void *cookie);
80int t1_poll(struct net_device *, int *);
81
80int t1_start_xmit(struct sk_buff *skb, struct net_device *dev); 82int t1_start_xmit(struct sk_buff *skb, struct net_device *dev);
81void t1_set_vlan_accel(struct adapter *adapter, int on_off); 83void t1_set_vlan_accel(struct adapter *adapter, int on_off);
82void t1_sge_start(struct sge *); 84void t1_sge_start(struct sge *);