aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/cpmac.c
diff options
context:
space:
mode:
authorEugene Konev <ejka@imfi.kspu.ru>2007-10-23 22:42:02 -0400
committerJeff Garzik <jeff@garzik.org>2007-10-25 03:31:15 -0400
commit67d129d14da1555bb3eaca754f6f81c02cacbe0e (patch)
tree632990a74134cd7709995824e4d0654cf24f01d4 /drivers/net/cpmac.c
parentdf523b5cd9950485350fb1b7d97d5b8882d94a4e (diff)
cpmac: convert to napi_struct interface
Convert cpmac to new napi_struct API introduced by bea3348eef27e6044b6161fd04c3152215f96411 [NET]: Make NAPI polling independent of struct net_device objects. Only disable rx interrupts if napi actually has been scheduled. Signed-off-by: Eugene Konev <ejka@imfi.kspu.ru> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/cpmac.c')
-rw-r--r--drivers/net/cpmac.c68
1 files changed, 37 insertions, 31 deletions
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index 0598d4d438f5..486c82b2dab6 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -205,6 +205,7 @@ struct cpmac_priv {
205 struct net_device *dev; 205 struct net_device *dev;
206 struct work_struct reset_work; 206 struct work_struct reset_work;
207 struct platform_device *pdev; 207 struct platform_device *pdev;
208 struct napi_struct napi;
208}; 209};
209 210
210static irqreturn_t cpmac_irq(int, void *); 211static irqreturn_t cpmac_irq(int, void *);
@@ -356,47 +357,48 @@ static void cpmac_set_multicast_list(struct net_device *dev)
356 } 357 }
357} 358}
358 359
359static struct sk_buff *cpmac_rx_one(struct net_device *dev, 360static struct sk_buff *cpmac_rx_one(struct cpmac_priv *priv,
360 struct cpmac_priv *priv,
361 struct cpmac_desc *desc) 361 struct cpmac_desc *desc)
362{ 362{
363 struct sk_buff *skb, *result = NULL; 363 struct sk_buff *skb, *result = NULL;
364 364
365 if (unlikely(netif_msg_hw(priv))) 365 if (unlikely(netif_msg_hw(priv)))
366 cpmac_dump_desc(dev, desc); 366 cpmac_dump_desc(priv->dev, desc);
367 cpmac_write(priv->regs, CPMAC_RX_ACK(0), (u32)desc->mapping); 367 cpmac_write(priv->regs, CPMAC_RX_ACK(0), (u32)desc->mapping);
368 if (unlikely(!desc->datalen)) { 368 if (unlikely(!desc->datalen)) {
369 if (netif_msg_rx_err(priv) && net_ratelimit()) 369 if (netif_msg_rx_err(priv) && net_ratelimit())
370 printk(KERN_WARNING "%s: rx: spurious interrupt\n", 370 printk(KERN_WARNING "%s: rx: spurious interrupt\n",
371 dev->name); 371 priv->dev->name);
372 return NULL; 372 return NULL;
373 } 373 }
374 374
375 skb = netdev_alloc_skb(dev, CPMAC_SKB_SIZE); 375 skb = netdev_alloc_skb(priv->dev, CPMAC_SKB_SIZE);
376 if (likely(skb)) { 376 if (likely(skb)) {
377 skb_reserve(skb, 2); 377 skb_reserve(skb, 2);
378 skb_put(desc->skb, desc->datalen); 378 skb_put(desc->skb, desc->datalen);
379 desc->skb->protocol = eth_type_trans(desc->skb, dev); 379 desc->skb->protocol = eth_type_trans(desc->skb, priv->dev);
380 desc->skb->ip_summed = CHECKSUM_NONE; 380 desc->skb->ip_summed = CHECKSUM_NONE;
381 dev->stats.rx_packets++; 381 priv->dev->stats.rx_packets++;
382 dev->stats.rx_bytes += desc->datalen; 382 priv->dev->stats.rx_bytes += desc->datalen;
383 result = desc->skb; 383 result = desc->skb;
384 dma_unmap_single(&dev->dev, desc->data_mapping, CPMAC_SKB_SIZE, 384 dma_unmap_single(&priv->dev->dev, desc->data_mapping,
385 DMA_FROM_DEVICE); 385 CPMAC_SKB_SIZE, DMA_FROM_DEVICE);
386 desc->skb = skb; 386 desc->skb = skb;
387 desc->data_mapping = dma_map_single(&dev->dev, skb->data, 387 desc->data_mapping = dma_map_single(&priv->dev->dev, skb->data,
388 CPMAC_SKB_SIZE, 388 CPMAC_SKB_SIZE,
389 DMA_FROM_DEVICE); 389 DMA_FROM_DEVICE);
390 desc->hw_data = (u32)desc->data_mapping; 390 desc->hw_data = (u32)desc->data_mapping;
391 if (unlikely(netif_msg_pktdata(priv))) { 391 if (unlikely(netif_msg_pktdata(priv))) {
392 printk(KERN_DEBUG "%s: received packet:\n", dev->name); 392 printk(KERN_DEBUG "%s: received packet:\n",
393 cpmac_dump_skb(dev, result); 393 priv->dev->name);
394 cpmac_dump_skb(priv->dev, result);
394 } 395 }
395 } else { 396 } else {
396 if (netif_msg_rx_err(priv) && net_ratelimit()) 397 if (netif_msg_rx_err(priv) && net_ratelimit())
397 printk(KERN_WARNING 398 printk(KERN_WARNING
398 "%s: low on skbs, dropping packet\n", dev->name); 399 "%s: low on skbs, dropping packet\n",
399 dev->stats.rx_dropped++; 400 priv->dev->name);
401 priv->dev->stats.rx_dropped++;
400 } 402 }
401 403
402 desc->buflen = CPMAC_SKB_SIZE; 404 desc->buflen = CPMAC_SKB_SIZE;
@@ -405,25 +407,25 @@ static struct sk_buff *cpmac_rx_one(struct net_device *dev,
405 return result; 407 return result;
406} 408}
407 409
408static int cpmac_poll(struct net_device *dev, int *budget) 410static int cpmac_poll(struct napi_struct *napi, int budget)
409{ 411{
410 struct sk_buff *skb; 412 struct sk_buff *skb;
411 struct cpmac_desc *desc; 413 struct cpmac_desc *desc;
412 int received = 0, quota = min(dev->quota, *budget); 414 int received = 0;
413 struct cpmac_priv *priv = netdev_priv(dev); 415 struct cpmac_priv *priv = container_of(napi, struct cpmac_priv, napi);
414 416
415 spin_lock(&priv->rx_lock); 417 spin_lock(&priv->rx_lock);
416 if (unlikely(!priv->rx_head)) { 418 if (unlikely(!priv->rx_head)) {
417 if (netif_msg_rx_err(priv) && net_ratelimit()) 419 if (netif_msg_rx_err(priv) && net_ratelimit())
418 printk(KERN_WARNING "%s: rx: polling, but no queue\n", 420 printk(KERN_WARNING "%s: rx: polling, but no queue\n",
419 dev->name); 421 priv->dev->name);
420 netif_rx_complete(dev); 422 netif_rx_complete(priv->dev, napi);
421 return 0; 423 return 0;
422 } 424 }
423 425
424 desc = priv->rx_head; 426 desc = priv->rx_head;
425 while ((received < quota) && ((desc->dataflags & CPMAC_OWN) == 0)) { 427 while (((desc->dataflags & CPMAC_OWN) == 0) && (received < budget)) {
426 skb = cpmac_rx_one(dev, priv, desc); 428 skb = cpmac_rx_one(priv, desc);
427 if (likely(skb)) { 429 if (likely(skb)) {
428 netif_receive_skb(skb); 430 netif_receive_skb(skb);
429 received++; 431 received++;
@@ -433,13 +435,11 @@ static int cpmac_poll(struct net_device *dev, int *budget)
433 435
434 priv->rx_head = desc; 436 priv->rx_head = desc;
435 spin_unlock(&priv->rx_lock); 437 spin_unlock(&priv->rx_lock);
436 *budget -= received;
437 dev->quota -= received;
438 if (unlikely(netif_msg_rx_status(priv))) 438 if (unlikely(netif_msg_rx_status(priv)))
439 printk(KERN_DEBUG "%s: poll processed %d packets\n", dev->name, 439 printk(KERN_DEBUG "%s: poll processed %d packets\n",
440 received); 440 priv->dev->name, received);
441 if (desc->dataflags & CPMAC_OWN) { 441 if (desc->dataflags & CPMAC_OWN) {
442 netif_rx_complete(dev); 442 netif_rx_complete(priv->dev, napi);
443 cpmac_write(priv->regs, CPMAC_RX_PTR(0), (u32)desc->mapping); 443 cpmac_write(priv->regs, CPMAC_RX_PTR(0), (u32)desc->mapping);
444 cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1); 444 cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1);
445 return 0; 445 return 0;
@@ -649,6 +649,7 @@ static void cpmac_hw_error(struct work_struct *work)
649 spin_unlock(&priv->rx_lock); 649 spin_unlock(&priv->rx_lock);
650 cpmac_clear_tx(priv->dev); 650 cpmac_clear_tx(priv->dev);
651 cpmac_hw_start(priv->dev); 651 cpmac_hw_start(priv->dev);
652 napi_enable(&priv->napi);
652 netif_start_queue(priv->dev); 653 netif_start_queue(priv->dev);
653} 654}
654 655
@@ -675,8 +676,10 @@ static irqreturn_t cpmac_irq(int irq, void *dev_id)
675 676
676 if (status & MAC_INT_RX) { 677 if (status & MAC_INT_RX) {
677 queue = (status >> 8) & 7; 678 queue = (status >> 8) & 7;
678 netif_rx_schedule(dev); 679 if (netif_rx_schedule_prep(dev, &priv->napi)) {
679 cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 1 << queue); 680 cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 1 << queue);
681 __netif_rx_schedule(dev, &priv->napi);
682 }
680 } 683 }
681 684
682 cpmac_write(priv->regs, CPMAC_MAC_EOI_VECTOR, 0); 685 cpmac_write(priv->regs, CPMAC_MAC_EOI_VECTOR, 0);
@@ -686,6 +689,7 @@ static irqreturn_t cpmac_irq(int irq, void *dev_id)
686 printk(KERN_ERR "%s: hw error, resetting...\n", 689 printk(KERN_ERR "%s: hw error, resetting...\n",
687 dev->name); 690 dev->name);
688 netif_stop_queue(dev); 691 netif_stop_queue(dev);
692 napi_disable(&priv->napi);
689 cpmac_hw_stop(dev); 693 cpmac_hw_stop(dev);
690 schedule_work(&priv->reset_work); 694 schedule_work(&priv->reset_work);
691 if (unlikely(netif_msg_hw(priv))) 695 if (unlikely(netif_msg_hw(priv)))
@@ -921,6 +925,7 @@ static int cpmac_open(struct net_device *dev)
921 INIT_WORK(&priv->reset_work, cpmac_hw_error); 925 INIT_WORK(&priv->reset_work, cpmac_hw_error);
922 cpmac_hw_start(dev); 926 cpmac_hw_start(dev);
923 927
928 napi_enable(&priv->napi);
924 priv->phy->state = PHY_CHANGELINK; 929 priv->phy->state = PHY_CHANGELINK;
925 phy_start(priv->phy); 930 phy_start(priv->phy);
926 931
@@ -959,6 +964,7 @@ static int cpmac_stop(struct net_device *dev)
959 netif_stop_queue(dev); 964 netif_stop_queue(dev);
960 965
961 cancel_work_sync(&priv->reset_work); 966 cancel_work_sync(&priv->reset_work);
967 napi_disable(&priv->napi);
962 phy_stop(priv->phy); 968 phy_stop(priv->phy);
963 phy_disconnect(priv->phy); 969 phy_disconnect(priv->phy);
964 priv->phy = NULL; 970 priv->phy = NULL;
@@ -1048,10 +1054,10 @@ static int __devinit cpmac_probe(struct platform_device *pdev)
1048 dev->set_multicast_list = cpmac_set_multicast_list; 1054 dev->set_multicast_list = cpmac_set_multicast_list;
1049 dev->tx_timeout = cpmac_tx_timeout; 1055 dev->tx_timeout = cpmac_tx_timeout;
1050 dev->ethtool_ops = &cpmac_ethtool_ops; 1056 dev->ethtool_ops = &cpmac_ethtool_ops;
1051 dev->poll = cpmac_poll;
1052 dev->weight = 64;
1053 dev->features |= NETIF_F_MULTI_QUEUE; 1057 dev->features |= NETIF_F_MULTI_QUEUE;
1054 1058
1059 netif_napi_add(dev, &priv->napi, cpmac_poll, 64);
1060
1055 spin_lock_init(&priv->lock); 1061 spin_lock_init(&priv->lock);
1056 spin_lock_init(&priv->rx_lock); 1062 spin_lock_init(&priv->rx_lock);
1057 priv->dev = dev; 1063 priv->dev = dev;