aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/via-rhine.c
diff options
context:
space:
mode:
authorRoger Luethi <rl@hellgate.ch>2006-08-15 02:00:17 -0400
committerJeff Garzik <jeff@garzik.org>2006-08-19 17:44:29 -0400
commit633949a145df19d6d338b0f227f033a0a1c5562a (patch)
treed000e88532f0420eeb42c2525ae3a8d44818a29f /drivers/net/via-rhine.c
parenta4d09272480e57cd4aa1156cac7aba9b08885bf9 (diff)
[PATCH] via-rhine: NAPI support
Add NAPI support to the via-rhine driver so that it can handle higher speeds and doesn't get overloaded by interrupts as easily. Signed-off-by: Stephen Hemminger <shemminger@osdl.org> Signed-off-by: Roger Luethi <rl@hellgate.ch> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/via-rhine.c')
-rw-r--r--drivers/net/via-rhine.c77
1 files changed, 65 insertions, 12 deletions
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index d3d0ec970318..fee824002d10 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -30,8 +30,8 @@
30*/ 30*/
31 31
32#define DRV_NAME "via-rhine" 32#define DRV_NAME "via-rhine"
33#define DRV_VERSION "1.4.0" 33#define DRV_VERSION "1.4.1"
34#define DRV_RELDATE "June-27-2006" 34#define DRV_RELDATE "July-24-2006"
35 35
36 36
37/* A few user-configurable values. 37/* A few user-configurable values.
@@ -63,7 +63,11 @@ static const int multicast_filter_limit = 32;
63 There are no ill effects from too-large receive rings. */ 63 There are no ill effects from too-large receive rings. */
64#define TX_RING_SIZE 16 64#define TX_RING_SIZE 16
65#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */ 65#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
66#ifdef CONFIG_VIA_RHINE_NAPI
67#define RX_RING_SIZE 64
68#else
66#define RX_RING_SIZE 16 69#define RX_RING_SIZE 16
70#endif
67 71
68 72
69/* Operational parameters that usually are not changed. */ 73/* Operational parameters that usually are not changed. */
@@ -396,7 +400,7 @@ static void rhine_tx_timeout(struct net_device *dev);
396static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev); 400static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev);
397static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *regs); 401static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
398static void rhine_tx(struct net_device *dev); 402static void rhine_tx(struct net_device *dev);
399static void rhine_rx(struct net_device *dev); 403static int rhine_rx(struct net_device *dev, int limit);
400static void rhine_error(struct net_device *dev, int intr_status); 404static void rhine_error(struct net_device *dev, int intr_status);
401static void rhine_set_rx_mode(struct net_device *dev); 405static void rhine_set_rx_mode(struct net_device *dev);
402static struct net_device_stats *rhine_get_stats(struct net_device *dev); 406static struct net_device_stats *rhine_get_stats(struct net_device *dev);
@@ -564,6 +568,32 @@ static void rhine_poll(struct net_device *dev)
564} 568}
565#endif 569#endif
566 570
571#ifdef CONFIG_VIA_RHINE_NAPI
572static int rhine_napipoll(struct net_device *dev, int *budget)
573{
574 struct rhine_private *rp = netdev_priv(dev);
575 void __iomem *ioaddr = rp->base;
576 int done, limit = min(dev->quota, *budget);
577
578 done = rhine_rx(dev, limit);
579 *budget -= done;
580 dev->quota -= done;
581
582 if (done < limit) {
583 netif_rx_complete(dev);
584
585 iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
586 IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
587 IntrTxDone | IntrTxError | IntrTxUnderrun |
588 IntrPCIErr | IntrStatsMax | IntrLinkChange,
589 ioaddr + IntrEnable);
590 return 0;
591 }
592 else
593 return 1;
594}
595#endif
596
567static void rhine_hw_init(struct net_device *dev, long pioaddr) 597static void rhine_hw_init(struct net_device *dev, long pioaddr)
568{ 598{
569 struct rhine_private *rp = netdev_priv(dev); 599 struct rhine_private *rp = netdev_priv(dev);
@@ -744,6 +774,10 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
744#ifdef CONFIG_NET_POLL_CONTROLLER 774#ifdef CONFIG_NET_POLL_CONTROLLER
745 dev->poll_controller = rhine_poll; 775 dev->poll_controller = rhine_poll;
746#endif 776#endif
777#ifdef CONFIG_VIA_RHINE_NAPI
778 dev->poll = rhine_napipoll;
779 dev->weight = 64;
780#endif
747 if (rp->quirks & rqRhineI) 781 if (rp->quirks & rqRhineI)
748 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM; 782 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
749 783
@@ -1165,6 +1199,7 @@ static void rhine_tx_timeout(struct net_device *dev)
1165 dev->trans_start = jiffies; 1199 dev->trans_start = jiffies;
1166 rp->stats.tx_errors++; 1200 rp->stats.tx_errors++;
1167 netif_wake_queue(dev); 1201 netif_wake_queue(dev);
1202 netif_poll_enable(dev);
1168} 1203}
1169 1204
1170static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev) 1205static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev)
@@ -1268,8 +1303,18 @@ static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *
1268 dev->name, intr_status); 1303 dev->name, intr_status);
1269 1304
1270 if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped | 1305 if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped |
1271 IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf)) 1306 IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf)) {
1272 rhine_rx(dev); 1307#ifdef CONFIG_VIA_RHINE_NAPI
1308 iowrite16(IntrTxAborted |
1309 IntrTxDone | IntrTxError | IntrTxUnderrun |
1310 IntrPCIErr | IntrStatsMax | IntrLinkChange,
1311 ioaddr + IntrEnable);
1312
1313 netif_rx_schedule(dev);
1314#else
1315 rhine_rx(dev, RX_RING_SIZE);
1316#endif
1317 }
1273 1318
1274 if (intr_status & (IntrTxErrSummary | IntrTxDone)) { 1319 if (intr_status & (IntrTxErrSummary | IntrTxDone)) {
1275 if (intr_status & IntrTxErrSummary) { 1320 if (intr_status & IntrTxErrSummary) {
@@ -1367,13 +1412,12 @@ static void rhine_tx(struct net_device *dev)
1367 spin_unlock(&rp->lock); 1412 spin_unlock(&rp->lock);
1368} 1413}
1369 1414
1370/* This routine is logically part of the interrupt handler, but isolated 1415/* Process up to limit frames from receive ring */
1371 for clarity and better register allocation. */ 1416static int rhine_rx(struct net_device *dev, int limit)
1372static void rhine_rx(struct net_device *dev)
1373{ 1417{
1374 struct rhine_private *rp = netdev_priv(dev); 1418 struct rhine_private *rp = netdev_priv(dev);
1419 int count;
1375 int entry = rp->cur_rx % RX_RING_SIZE; 1420 int entry = rp->cur_rx % RX_RING_SIZE;
1376 int boguscnt = rp->dirty_rx + RX_RING_SIZE - rp->cur_rx;
1377 1421
1378 if (debug > 4) { 1422 if (debug > 4) {
1379 printk(KERN_DEBUG "%s: rhine_rx(), entry %d status %8.8x.\n", 1423 printk(KERN_DEBUG "%s: rhine_rx(), entry %d status %8.8x.\n",
@@ -1382,16 +1426,18 @@ static void rhine_rx(struct net_device *dev)
1382 } 1426 }
1383 1427
1384 /* If EOP is set on the next entry, it's a new packet. Send it up. */ 1428 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1385 while (!(rp->rx_head_desc->rx_status & cpu_to_le32(DescOwn))) { 1429 for (count = 0; count < limit; ++count) {
1386 struct rx_desc *desc = rp->rx_head_desc; 1430 struct rx_desc *desc = rp->rx_head_desc;
1387 u32 desc_status = le32_to_cpu(desc->rx_status); 1431 u32 desc_status = le32_to_cpu(desc->rx_status);
1388 int data_size = desc_status >> 16; 1432 int data_size = desc_status >> 16;
1389 1433
1434 if (desc_status & DescOwn)
1435 break;
1436
1390 if (debug > 4) 1437 if (debug > 4)
1391 printk(KERN_DEBUG "rhine_rx() status is %8.8x.\n", 1438 printk(KERN_DEBUG "rhine_rx() status is %8.8x.\n",
1392 desc_status); 1439 desc_status);
1393 if (--boguscnt < 0) 1440
1394 break;
1395 if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) { 1441 if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
1396 if ((desc_status & RxWholePkt) != RxWholePkt) { 1442 if ((desc_status & RxWholePkt) != RxWholePkt) {
1397 printk(KERN_WARNING "%s: Oversized Ethernet " 1443 printk(KERN_WARNING "%s: Oversized Ethernet "
@@ -1460,7 +1506,11 @@ static void rhine_rx(struct net_device *dev)
1460 PCI_DMA_FROMDEVICE); 1506 PCI_DMA_FROMDEVICE);
1461 } 1507 }
1462 skb->protocol = eth_type_trans(skb, dev); 1508 skb->protocol = eth_type_trans(skb, dev);
1509#ifdef CONFIG_VIA_RHINE_NAPI
1510 netif_receive_skb(skb);
1511#else
1463 netif_rx(skb); 1512 netif_rx(skb);
1513#endif
1464 dev->last_rx = jiffies; 1514 dev->last_rx = jiffies;
1465 rp->stats.rx_bytes += pkt_len; 1515 rp->stats.rx_bytes += pkt_len;
1466 rp->stats.rx_packets++; 1516 rp->stats.rx_packets++;
@@ -1487,6 +1537,8 @@ static void rhine_rx(struct net_device *dev)
1487 } 1537 }
1488 rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn); 1538 rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
1489 } 1539 }
1540
1541 return count;
1490} 1542}
1491 1543
1492/* 1544/*
@@ -1776,6 +1828,7 @@ static int rhine_close(struct net_device *dev)
1776 spin_lock_irq(&rp->lock); 1828 spin_lock_irq(&rp->lock);
1777 1829
1778 netif_stop_queue(dev); 1830 netif_stop_queue(dev);
1831 netif_poll_disable(dev);
1779 1832
1780 if (debug > 1) 1833 if (debug > 1)
1781 printk(KERN_DEBUG "%s: Shutting down ethercard, " 1834 printk(KERN_DEBUG "%s: Shutting down ethercard, "